Compare commits

..

No commits in common. "29996fa2fc10bd25863a435cc0ca157d85ac9e46" and "main" have entirely different histories.

428 changed files with 73 additions and 232364 deletions

View file

@ -1,7 +1,6 @@
---
kind: pipeline
name: bottin
type: kubernetes
steps:
- name: build
@ -19,23 +18,6 @@ steps:
commands:
- ash test/runner.sh
- name: notify
image: plugins/slack
when:
status:
- success
- failure
settings:
channel: Drone
webhook: https://webhooks.earthnet.ch/api/v1/matrix/hook/ruNHRBPLDviP3HZGwNbk0HTLaAon4oD16xWeAL2Ul3wKkTBwvoKpoHNxHNU4ObK5
template: >
Build {{ repo.name }}
{{#success build.status}}
{{build.number}} succeeded. Good job.
{{else}}
{{build.number}} failed. Fix me please.
{{/success}}
---
kind: signature
hmac: ff246a04c3df8a2f39c8b446dea920622d61950e6caaac886931bdb05d0706ed

17
.vscode/launch.json vendored
View file

@ -1,17 +0,0 @@
{
// Use IntelliSense to learn about possible attributes.
// Hover to view descriptions of existing attributes.
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
"version": "0.2.0",
"configurations": [
{
"name": "Launch",
"type": "go",
"request": "launch",
"mode": "auto",
"program": "${workspaceFolder}",
"env": {},
"args": []
}
]
}

View file

@ -1,6 +1,4 @@
FROM alpine:3.14
RUN apk add --no-cache openldap-clients
FROM scratch
ADD bottin.static /bottin

View file

@ -1,6 +1,6 @@
BIN=bottin
SRC=main.go ssha.go util.go acl.go read.go write.go memberof.go
DOCKER=192.168.6.10:5000/bottin
DOCKER=lxpz/bottin_amd64
all: $(BIN)
@ -13,7 +13,7 @@ $(BIN).static: $(SRC)
CGO_ENABLED=0 GOOS=linux go build -a -v -o $(BIN).static
docker: $(BIN).static
docker build -t $(DOCKER):latest .
# docker push $(DOCKER):$(TAG)
# docker tag $(DOCKER):$(TAG) $(DOCKER):latest
docker build -t $(DOCKER):$(TAG) .
docker push $(DOCKER):$(TAG)
docker tag $(DOCKER):$(TAG) $(DOCKER):latest
docker push $(DOCKER):latest

View file

@ -1,14 +1,13 @@
{
"suffix": "dc=earthnet,dc=local",
"suffix": "dc=bottin,dc=eu",
"bind": "127.0.0.1:1389",
"acl": [
"ANONYMOUS::bind:*,ou=users,dc=earthnet,dc=local:",
"ANONYMOUS::bind:*,ou=People,dc=earthnet,dc=local:",
"ANONYMOUS::bind:cn=admin,dc=earthnet,dc=local:",
"*,dc=earthnet,dc=local::read:*:* !userpassword",
"ANONYMOUS::bind:*,ou=users,dc=bottin,dc=eu:",
"ANONYMOUS::bind:cn=admin,dc=bottin,dc=eu:",
"*,dc=bottin,dc=eu::read:*:* !userpassword",
"*::read modify:SELF:*",
"cn=admin,dc=earthnet,dc=local::read add modify delete:*:*",
"*:cn=admin,ou=groups,dc=earthnet,dc=local:read add modify delete:*:*"
"cn=admin,dc=bottin,dc=eu::read add modify delete:*:*",
"*:cn=admin,ou=groups,dc=bottin,dc=eu:read add modify delete:*:*"
]
}

2
go.mod
View file

@ -3,9 +3,9 @@ module bottin
go 1.13
require (
github.com/go-ldap/ldap/v3 v3.3.0
github.com/google/uuid v1.1.1
github.com/hashicorp/consul/api v1.3.0
github.com/jsimonetti/pwscheme v0.0.0-20220125093853-4d9895f5db73
github.com/sirupsen/logrus v1.4.2
golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9 // indirect
)

10
go.sum
View file

@ -1,3 +1,5 @@
github.com/Azure/go-ntlmssp v0.0.0-20200615164410-66371956d46c h1:/IBSNwUN8+eKzUzbJPqhK839ygXJ82sde8x3ogr6R28=
github.com/Azure/go-ntlmssp v0.0.0-20200615164410-66371956d46c/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da h1:8GUt8eRujhVEGZFFEjBj46YV4rDjvGrNxb0KMWYkL2I=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
@ -7,6 +9,11 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/go-asn1-ber/asn1-ber v1.5.1 h1:pDbRAunXzIUXfx4CB2QJFv5IuPiuoW+sWvr/Us009o8=
github.com/go-asn1-ber/asn1-ber v1.5.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
github.com/go-ldap/ldap v2.5.1+incompatible h1:Opaoft5zMW8IU/VRULB0eGMBQ9P5buRvCW6sFTRmMn8=
github.com/go-ldap/ldap/v3 v3.3.0 h1:lwx+SJpgOHd8tG6SumBQZXCmNX51zM8B1cfxJ5gv4tQ=
github.com/go-ldap/ldap/v3 v3.3.0/go.mod h1:iYS1MdmrmceOJ1QOTnRXrIs7i3kloqtmGQjRvjKpyMg=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c h1:964Od4U6p2jUkFxvCydnIczKteheJEzHRToSGK3Bnlw=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
@ -42,8 +49,6 @@ github.com/hashicorp/memberlist v0.1.3 h1:EmmoJme1matNzb+hMpDuR/0sbJSUisxyqBGG67
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
github.com/hashicorp/serf v0.8.2 h1:YZ7UKsJv+hKjqGVUUbtE3HNj79Eln2oQ75tniF6iPt0=
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
github.com/jsimonetti/pwscheme v0.0.0-20220125093853-4d9895f5db73 h1:ZhC4QngptYaGx53+ph1RjxcH8fkCozBaY+935TNX4i8=
github.com/jsimonetti/pwscheme v0.0.0-20220125093853-4d9895f5db73/go.mod h1:t0Q9JvoMTfTYdAWIk2MF69iz+Qpdk9D+PgVu6fVmaDI=
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
@ -83,7 +88,6 @@ golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9 h1:vEg9joUBmeBcK9iSJftGNf
golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=

View file

@ -1,4 +0,0 @@
dn: uid=john.lemon,ou=People,dc=earthnet,dc=local
changetype: modify
replace: userPassword
userPassword: {SSHA}ag22A7i2XavJYvYriYwpj+ZHI3+JA28x

View file

@ -1,330 +0,0 @@
# LDIF Export for dc=earthnet,dc=local
# Server: ldap (ldap)
# Search Scope: sub
# Search Filter: (objectClass=*)
# Total Entries: 30
#
# Generated by phpLDAPadmin (http://phpldapadmin.sourceforge.net) on February 8, 2022 7:42 pm
# Version: 1.2.5
version: 1
# Entry 1: dc=earthnet,dc=local
dn: dc=earthnet,dc=local
dc: earthnet
objectclass: top
objectclass: domain
# Entry 2: ou=Groups,dc=earthnet,dc=local
dn: ou=Groups,dc=earthnet,dc=local
objectclass: top
objectclass: organizationalunit
ou: Groups
# Entry 3: cn=admin,ou=Groups,dc=earthnet,dc=local
dn: cn=admin,ou=Groups,dc=earthnet,dc=local
cn: admin
objectclass: top
objectclass: group
member: uid=simon.beck,ou=People,dc=earthnet,dc=local
member: uid=admin,ou=People,dc=earthnet,dc=local
# Entry 4: cn=bookstack,ou=Groups,dc=earthnet,dc=local
dn: cn=bookstack,ou=Groups,dc=earthnet,dc=local
cn: bookstack
objectclass: top
objectclass: group
member: uid=simon.beck,ou=People,dc=earthnet,dc=local
member: uid=regula.trueb,ou=People,dc=earthnet,dc=local
# Entry 5: cn=default-roles-master,ou=Groups,dc=earthnet,dc=local
dn: cn=default-roles-master,ou=Groups,dc=earthnet,dc=local
cn: default-roles-master
objectclass: top
objectclass: group
member: cn=empty-membership-placeholder
member: uid=replication test,ou=People,dc=earthnet,dc=local
# Entry 6: cn=gitea,ou=Groups,dc=earthnet,dc=local
dn: cn=gitea,ou=Groups,dc=earthnet,dc=local
cn: gitea
objectclass: top
objectclass: group
member: uid=Dixit Nemesis,ou=People,dc=earthnet,dc=local
member: uid=matthias.luppi,ou=People,dc=earthnet,dc=local
member: uid=simon.beck,ou=People,dc=earthnet,dc=local
# Entry 7: cn=grafana,ou=Groups,dc=earthnet,dc=local
dn: cn=grafana,ou=Groups,dc=earthnet,dc=local
cn: grafana
objectclass: top
objectclass: group
member: uid=simon.beck,ou=People,dc=earthnet,dc=local
# Entry 8: cn=graylog,ou=Groups,dc=earthnet,dc=local
dn: cn=graylog,ou=Groups,dc=earthnet,dc=local
cn: graylog
objectclass: top
objectclass: group
member: uid=simon.beck,ou=People,dc=earthnet,dc=local
# Entry 9: cn=hostsigner,ou=Groups,dc=earthnet,dc=local
dn: cn=hostsigner,ou=Groups,dc=earthnet,dc=local
cn: hostsigner
objectclass: top
objectclass: group
member: uid=hostsigner,ou=People,dc=earthnet,dc=local
# Entry 10: cn=jellyfin,ou=Groups,dc=earthnet,dc=local
dn: cn=jellyfin,ou=Groups,dc=earthnet,dc=local
cn: jellyfin
objectclass: top
objectclass: group
member: uid=simon.beck,ou=People,dc=earthnet,dc=local
member: uid=regula.trueb,ou=People,dc=earthnet,dc=local
member: uid=john.lemon,ou=People,dc=earthnet,dc=local
member: uid=Dixit Nemesis,ou=People,dc=earthnet,dc=local
member: uid=joel.fluri,ou=People,dc=earthnet,dc=local
# Entry 11: cn=komga,ou=Groups,dc=earthnet,dc=local
dn: cn=komga,ou=Groups,dc=earthnet,dc=local
cn: komga
objectclass: top
objectclass: group
member: uid=simon.beck,ou=People,dc=earthnet,dc=local
# Entry 12: cn=mail,ou=Groups,dc=earthnet,dc=local
dn: cn=mail,ou=Groups,dc=earthnet,dc=local
cn: mail
objectclass: top
objectclass: group
member: uid=simon.beck,ou=People,dc=earthnet,dc=local
member: uid=mail user,ou=People,dc=earthnet,dc=local
member: uid=rsom,ou=People,dc=earthnet,dc=local
member: uid=john.lemon,ou=People,dc=earthnet,dc=local
# Entry 13: cn=offline_access,ou=Groups,dc=earthnet,dc=local
dn: cn=offline_access,ou=Groups,dc=earthnet,dc=local
cn: offline_access
objectclass: top
objectclass: group
member: cn=empty-membership-placeholder
member: uid=rsom,ou=People,dc=earthnet,dc=local
member: uid=hostsigner,ou=People,dc=earthnet,dc=local
member: uid=admin,ou=People,dc=earthnet,dc=local
member: uid=test,ou=People,dc=earthnet,dc=local
member: uid=test.user,ou=People,dc=earthnet,dc=local
member: uid=kid.gamer,ou=People,dc=earthnet,dc=local
member: uid=yannick.bammert,ou=People,dc=earthnet,dc=local
# Entry 14: cn=reader,ou=Groups,dc=earthnet,dc=local
dn: cn=reader,ou=Groups,dc=earthnet,dc=local
cn: reader
objectclass: top
objectclass: group
member: uid=simon.beck,ou=People,dc=earthnet,dc=local
# Entry 15: cn=seafile,ou=Groups,dc=earthnet,dc=local
dn: cn=seafile,ou=Groups,dc=earthnet,dc=local
cn: seafile
objectclass: top
objectclass: group
member: uid=simon.beck,ou=People,dc=earthnet,dc=local
# Entry 16: cn=synapse,ou=Groups,dc=earthnet,dc=local
dn: cn=synapse,ou=Groups,dc=earthnet,dc=local
cn: synapse
objectclass: top
objectclass: group
member: uid=Dixit Nemesis,ou=People,dc=earthnet,dc=local
member: uid=dimension dimension,ou=People,dc=earthnet,dc=local
member: uid=matthias.luppi,ou=People,dc=earthnet,dc=local
member: uid=regula.trueb,ou=People,dc=earthnet,dc=local
member: uid=john.lemon,ou=People,dc=earthnet,dc=local
member: uid=yannick.bammert,ou=People,dc=earthnet,dc=local
member: uid=simon.beck,ou=People,dc=earthnet,dc=local
# Entry 17: cn=uma_authorization,ou=Groups,dc=earthnet,dc=local
dn: cn=uma_authorization,ou=Groups,dc=earthnet,dc=local
cn: uma_authorization
objectclass: top
objectclass: group
member: cn=empty-membership-placeholder
member: uid=admin,ou=People,dc=earthnet,dc=local
member: uid=test,ou=People,dc=earthnet,dc=local
member: uid=test.user,ou=People,dc=earthnet,dc=local
member: uid=kid.gamer,ou=People,dc=earthnet,dc=local
member: uid=yannick.bammert,ou=People,dc=earthnet,dc=local
# Entry 18: ou=People,dc=earthnet,dc=local
dn: ou=People,dc=earthnet,dc=local
objectclass: top
objectclass: organizationalunit
ou: People
# Entry 19: uid=admin,ou=People,dc=earthnet,dc=local
dn: uid=admin,ou=People,dc=earthnet,dc=local
cn:
givenname:
objectclass: top
objectclass: inetOrgPerson
objectclass: organizationalPerson
objectclass: person
sn:
uid: admin
# Entry 20: uid=dimension dimension,ou=People,dc=earthnet,dc=local
dn: uid=dimension dimension,ou=People,dc=earthnet,dc=local
cn: dimension dimension
givenname: dimension
mail: dimension@earthnet.local
objectclass: top
objectclass: inetOrgPerson
objectclass: organizationalPerson
objectclass: person
sn: dimension
uid: dimension
uid: dimension dimension
userpassword: {SSHA512}wesLTWs6x6/p4kOsn3fJQiaN/hc3u0IFsv1AWJmESp5Y1RHUYlXhW
xSGAbtynCzRDn3uW0ihc3++GE1kT612s1vM4xW102Ek
# Entry 21: uid=Dixit Nemesis,ou=People,dc=earthnet,dc=local
dn: uid=Dixit Nemesis,ou=People,dc=earthnet,dc=local
cn: Lua Fluri
givenname: Lua
mail: DixitNemesis@earthnet.ch
objectclass: top
objectclass: inetOrgPerson
objectclass: organizationalPerson
objectclass: person
sn: Fluri
uid: DixitNemesis
uid: Dixit Nemesis
uid: dixit.nemesis
userpassword: {SSHA512}JZXzqjpHRZODVMBBusZRWbm05ot7FblEa7EXWME8+AOISbBnB7jss
lwia4PHniar5wG4NuLS3YC6BXcj7+Ef1AxoltzXDhx4
# Entry 22: uid=hostsigner,ou=People,dc=earthnet,dc=local
dn: uid=hostsigner,ou=People,dc=earthnet,dc=local
cn: hostsigner
givenname: hostsigner
objectclass: top
objectclass: inetOrgPerson
objectclass: organizationalPerson
objectclass: person
sn: hostsigner
uid: hostsigner
userpassword: {SSHA512}eQQPlm+LdMld6rOVzMdHYR54P4n2IEEKFHh51ovmfZQkJHYHciQTT
15/RlmZpCz/Ym1PgR8KmfH9t0i6ATMa+b3r2LaC4GZa
# Entry 23: uid=joel.fluri,ou=People,dc=earthnet,dc=local
dn: uid=joel.fluri,ou=People,dc=earthnet,dc=local
cn: Joel Fluri
givenname: Joel
mail: fluri.joel@gmail.com
objectclass: top
objectclass: inetOrgPerson
objectclass: organizationalPerson
objectclass: person
sn: Fluri
uid: joel.fluri
userpassword: {SSHA}a1zBFJT08ki+NhdV+WgWoDQrCSDRYU14Gog9bw==
# Entry 24: uid=john.lemon,ou=People,dc=earthnet,dc=local
dn: uid=john.lemon,ou=People,dc=earthnet,dc=local
cn: John Lemon
givenname: John
mail: john.lemon@earthnet.ch
objectclass: top
objectclass: inetOrgPerson
objectclass: organizationalPerson
objectclass: person
sn: Lemon
uid: john.lemon
userpassword: {SSHA512}1vkCNmm7u8yqGXauYdl83ycT5BLViD1RANG8H1cXozHFqsJk8O5p/
S39diDnW4KFV7Y1L9iMM6jDRDRIevLkulUCLxg6hyXb
# Entry 25: uid=mail user,ou=People,dc=earthnet,dc=local
dn: uid=mail user,ou=People,dc=earthnet,dc=local
cn: Mail User
givenname: mail
mail: mail.user@earthnet.ch
objectclass: top
objectclass: inetOrgPerson
objectclass: organizationalPerson
objectclass: person
sn: user
uid: mail.user
uid: mail user
userpassword: {SSHA512}/y+9GyIPnJFpUPxRbNK0H2VAG1MwImewEy7t7PbYPFJdlthpefdxg
hsOms/mpqcMitjQeREqKsBUE5B46362FsSA3lQ55YpRhnxVstJvis0lJDZo33IEIUuL33UBhfwI
# Entry 26: uid=matthias.luppi,ou=People,dc=earthnet,dc=local
dn: uid=matthias.luppi,ou=People,dc=earthnet,dc=local
cn: Matthias Luppi
givenname: Matthias
mail: matthias.luppi@earthnet.ch
objectclass: top
objectclass: inetOrgPerson
objectclass: organizationalPerson
objectclass: person
sn: Luppi
uid: matthias.luppi
userpassword: {SSHA512}XEslka1tvBXfeMmcKfXQuWi0/MHnQZuEjfiSzb0Xzq+m84mCQGnD6
CENUpERGFFz4gNEh9ZVHlRtUZYY3lzT07gWgynGLJx5
# Entry 27: uid=regula.trueb,ou=People,dc=earthnet,dc=local
dn: uid=regula.trueb,ou=People,dc=earthnet,dc=local
cn:: UmVndWxhIFRyw7xi
givenname: Regula
mail: regula.trueb@earthnet.ch
objectclass: top
objectclass: inetOrgPerson
objectclass: organizationalPerson
objectclass: person
sn:: VHLDvGI=
uid: regula.trueb
userpassword: {SSHA512}rj9jQDcpd9ebgSa/q+E2KbETMowgulsVYUFPRHwR9WcN3strV3WNF
tbIzJKvpCzLXD1jwwi3pUj2AHe3SlwF9aScGwE2wrQq
# Entry 28: uid=rsom,ou=People,dc=earthnet,dc=local
dn: uid=rsom,ou=People,dc=earthnet,dc=local
cn: rsom
givenname: R
mail: rsom@earthnet.ch
objectclass: top
objectclass: inetOrgPerson
objectclass: organizationalPerson
objectclass: person
sn: Som
uid: rsom
userpassword: {SSHA512}XDpUiDKXSAjqenBPpwdZaogox7s9SwmHuaO3X9Yf/EubKmBAsswOf
FTCeV++IaMlxoJ8uBWnnUUcl6B0TKLLZDjXAsFM8fJj
# Entry 29: uid=simon.beck,ou=People,dc=earthnet,dc=local
dn: uid=simon.beck,ou=People,dc=earthnet,dc=local
cn: Simon Beck
givenname: Simon
mail: simon.beck@earthnet.ch
objectclass: top
objectclass: inetOrgPerson
objectclass: organizationalPerson
objectclass: person
sn: Beck
uid: simon.beck
userpassword: {SSHA512}0II9QrOs6743o80XpOvypLUdKDfo+wrYbHuhIHLTEl3CvllWw1nDn
855nZAp5TyzThsOXzym2mQa5jLhlX4MZO4Jij4oBC5B2oF9u9/idkXkZL1Db955ZCYn2wtlxXWE
# Entry 30: uid=yannick.bammert,ou=People,dc=earthnet,dc=local
dn: uid=yannick.bammert,ou=People,dc=earthnet,dc=local
cn: Yannick Bammert
givenname: Yannick
mail: yan.bam@gmail.com
objectclass: top
objectclass: inetOrgPerson
objectclass: organizationalPerson
objectclass: person
sn: Bammert
uid: yannick.bammert
userpassword: {SSHA512}SgYMVqnVqI8xzrj5RzfZWKqYPOX6baW7b7fC4KUSfVb4vZPFvqumZ
Yo7JNSGDT3IN7zkVI59Wz9BTiS1DiPWPkO+ndLY/b1R

View file

@ -1,17 +0,0 @@
# LDIF Export for cn=gitea,ou=Groups,dc=earthnet,dc=local
# Server: ldap (ldap)
# Search Scope: base
# Search Filter: (objectClass=*)
# Total Entries: 1
#
# Generated by phpLDAPadmin (http://phpldapadmin.sourceforge.net) on February 8, 2022 1:51 pm
# Version: 1.2.5
version: 1
# Entry 1: cn=gitea,ou=Groups,dc=earthnet,dc=local
dn: cn=gitea,ou=Groups,dc=earthnet,dc=local
cn: gitea
objectclass: top
objectclass: group
member: uid=john.lemon,ou=people,dc=earthnet,dc=local

View file

@ -1,16 +0,0 @@
# LDIF Export for ou=Groups,dc=earthnet,dc=local
# Server: ldap (ldap)
# Search Scope: base
# Search Filter: (objectClass=*)
# Total Entries: 1
#
# Generated by phpLDAPadmin (http://phpldapadmin.sourceforge.net) on February 8, 2022 1:50 pm
# Version: 1.2.5
version: 1
# Entry 1: ou=Groups,dc=earthnet,dc=local
dn: ou=Groups,dc=earthnet,dc=local
objectclass: top
objectclass: organizationalunit
ou: Groups

View file

@ -1,17 +0,0 @@
# LDIF Export for ou=People,dc=earthnet,dc=local
# Server: ldap (ldap)
# Search Scope: base
# Search Filter: (objectClass=*)
# Total Entries: 1
#
# Generated by phpLDAPadmin (http://phpldapadmin.sourceforge.net) on February 8, 2022 10:39 am
# Version: 1.2.5
version: 1
# Entry 1: ou=People,dc=earthnet,dc=local
dn: ou=People,dc=earthnet,dc=local
objectclass: top
objectclass: organizationalunit
ou: People

View file

@ -1,25 +0,0 @@
# LDIF Export for uid=john.lemon,ou=People,dc=earthnet,dc=local
# Server: ldap (ldap)
# Search Scope: base
# Search Filter: (objectClass=*)
# Total Entries: 1
#
# Generated by phpLDAPadmin (http://phpldapadmin.sourceforge.net) on February 8, 2022 4:23 pm
# Version: 1.2.5
version: 1
# Entry 1: uid=john.lemon,ou=People,dc=earthnet,dc=local
dn: uid=john.lemon,ou=People,dc=earthnet,dc=local
cn: John Lemon
givenname: John
mail: john.lemon@earthnet.ch
objectclass: top
objectclass: inetOrgPerson
objectclass: organizationalPerson
objectclass: person
sn: Lemon
uid: john.lemon
userpassword: {SSHA512}1vkCNmm7u8yqGXauYdl83ycT5BLViD1RANG8H1cXozHFqsJk8O5p/
S39diDnW4KFV7Y1L9iMM6jDRDRIevLkulUCLxg6hyXb

39
main.go
View file

@ -12,8 +12,8 @@ import (
"os/signal"
"syscall"
message "bottin/goldap"
ldap "bottin/ldapserver"
message "bottin/goldap"
consul "github.com/hashicorp/consul/api"
log "github.com/sirupsen/logrus"
@ -320,6 +320,7 @@ func (server *Server) init() error {
return err
}
admin_pass_str, environnement_variable_exist := os.LookupEnv("BOTTIN_DEFAULT_ADMIN_PW")
if !environnement_variable_exist {
admin_pass := make([]byte, 8)
@ -328,15 +329,11 @@ func (server *Server) init() error {
return err
}
admin_pass_str = base64.RawURLEncoding.EncodeToString(admin_pass)
} else {
} else {
server.logger.Debug("BOTTIN_DEFAULT_ADMIN_PW environment variable is set, using it for admin's password")
}
admin_pass_hash, err := SSHAEncode(admin_pass_str)
if err != nil {
server.logger.Error("can't create admin password")
panic(err)
}
admin_pass_hash := SSHAEncode([]byte(admin_pass_str))
admin_dn := "cn=admin," + server.config.Suffix
admin_attributes := Entry{
@ -437,8 +434,8 @@ func (server *Server) handleBindInternal(state *State, r *message.BindRequest) (
}
for _, hash := range passwd {
valid, err := SSHAMatches(hash, string(r.AuthenticationSimple()))
if valid && err == nil {
valid := SSHAMatches(hash, []byte(r.AuthenticationSimple()))
if valid {
groups, err := server.getAttribute(string(r.Name()), ATTR_MEMBEROF)
if err != nil {
return ldap.LDAPResultOperationsError, err
@ -447,32 +444,8 @@ func (server *Server) handleBindInternal(state *State, r *message.BindRequest) (
user: string(r.Name()),
groups: groups,
}
updatePasswordHash(string(r.AuthenticationSimple()), hash, server, string(r.Name()))
return ldap.LDAPResultSuccess, nil
} else {
return ldap.LDAPResultInvalidCredentials, fmt.Errorf("can't authenticate: %w", err)
}
}
return ldap.LDAPResultInvalidCredentials, fmt.Errorf("No password match")
}
// Update the hash if it's not already SSHA512
func updatePasswordHash(password string, currentHash string, server *Server, dn string) {
hashType, err := determineHashType(currentHash)
if err != nil {
server.logger.Errorf("can't determine hash type of password")
return
}
if hashType != SSHA512 {
reencodedPassword, err := SSHAEncode(password)
if err != nil {
server.logger.Errorf("can't encode password")
return
}
server.putAttributes(dn, Entry{
ATTR_USERPASSWORD: []string{reencodedPassword},
})
}
}

View file

@ -213,11 +213,10 @@ func (server *Server) handleSearchInternal(state *State, w ldap.ResponseWriter,
continue
}
// Send result
resultVals := []message.AttributeValue{}
for _, v := range val {
resultVals = append(resultVals, message.AttributeValue(v))
e.AddAttribute(message.AttributeDescription(attr),
message.AttributeValue(v))
}
e.AddAttribute(message.AttributeDescription(attr), resultVals...)
}
w.Write(e)
}

78
ssha.go
View file

@ -1,53 +1,59 @@
package main
import (
"errors"
"bytes"
"crypto/rand"
"crypto/sha1"
"encoding/base64"
"fmt"
"strings"
"github.com/jsimonetti/pwscheme/ssha"
"github.com/jsimonetti/pwscheme/ssha256"
"github.com/jsimonetti/pwscheme/ssha512"
log "github.com/sirupsen/logrus"
)
const (
SSHA = "{SSHA}"
SSHA256 = "{SSHA256}"
SSHA512 = "{SSHA512}"
)
// Encode encodes the string to ssha512
func SSHAEncode(rawPassPhrase string) (string, error) {
return ssha512.Generate(rawPassPhrase, 16)
// Encode encodes the []byte of raw password
func SSHAEncode(rawPassPhrase []byte) string {
hash := makeSSHAHash(rawPassPhrase, makeSalt())
b64 := base64.StdEncoding.EncodeToString(hash)
return fmt.Sprintf("{ssha}%s", b64)
}
// Matches matches the encoded password and the raw password
func SSHAMatches(encodedPassPhrase string, rawPassPhrase string) (bool, error) {
hashType, err := determineHashType(encodedPassPhrase)
func SSHAMatches(encodedPassPhrase string, rawPassPhrase []byte) bool {
if !strings.EqualFold(encodedPassPhrase[:6], "{ssha}") {
return false
}
bhash, err := base64.StdEncoding.DecodeString(encodedPassPhrase[6:])
if err != nil {
return false, errors.New("invalid password hash stored")
return false
}
salt := bhash[20:]
switch hashType {
case SSHA:
return ssha.Validate(rawPassPhrase, encodedPassPhrase)
case SSHA256:
return ssha256.Validate(rawPassPhrase, encodedPassPhrase)
case SSHA512:
return ssha512.Validate(rawPassPhrase, encodedPassPhrase)
newssha := makeSSHAHash(rawPassPhrase, salt)
if bytes.Compare(newssha, bhash) != 0 {
return false
}
return false, errors.New("no matching hash type found")
return true
}
func determineHashType(hash string) (string, error) {
if len(hash) >= 7 && string(hash[0:6]) == SSHA {
return SSHA, nil
// makeSalt make a 32 byte array containing random bytes.
func makeSalt() []byte {
sbytes := make([]byte, 32)
_, err := rand.Read(sbytes)
if err != nil {
log.Panicf("Could not read random bytes: %s", err)
}
if len(hash) >= 10 && string(hash[0:9]) == SSHA256 {
return SSHA256, nil
}
if len(hash) >= 10 && string(hash[0:9]) == SSHA512 {
return SSHA512, nil
}
return "", errors.New("no valid hash found")
return sbytes
}
// makeSSHAHash make hasing using SHA-1 with salt. This is not the final output though. You need to append {SSHA} string with base64 of this hash.
func makeSSHAHash(passphrase, salt []byte) []byte {
sha := sha1.New()
sha.Write(passphrase)
sha.Write(salt)
h := sha.Sum(nil)
return append(h, salt...)
}

View file

@ -30,9 +30,7 @@ func dnToConsul(dn string) (string, error) {
rdns[i], rdns[j] = rdns[j], rdns[i]
}
concattedPath := strings.Join(rdns, "/")
return strings.ToLower(concattedPath), nil
return strings.Join(rdns, "/"), nil
}
func consulToDN(key string) (string, string, error) {

View file

@ -1,24 +0,0 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
/metrics.out

View file

@ -1,20 +0,0 @@
The MIT License (MIT)
Copyright (c) 2013 Armon Dadgar
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View file

@ -1,91 +0,0 @@
go-metrics
==========
This library provides a `metrics` package which can be used to instrument code,
expose application metrics, and profile runtime performance in a flexible manner.
Current API: [![GoDoc](https://godoc.org/github.com/armon/go-metrics?status.svg)](https://godoc.org/github.com/armon/go-metrics)
Sinks
-----
The `metrics` package makes use of a `MetricSink` interface to support delivery
to any type of backend. Currently the following sinks are provided:
* StatsiteSink : Sinks to a [statsite](https://github.com/armon/statsite/) instance (TCP)
* StatsdSink: Sinks to a [StatsD](https://github.com/etsy/statsd/) / statsite instance (UDP)
* PrometheusSink: Sinks to a [Prometheus](http://prometheus.io/) metrics endpoint (exposed via HTTP for scrapes)
* InmemSink : Provides in-memory aggregation, can be used to export stats
* FanoutSink : Sinks to multiple sinks. Enables writing to multiple statsite instances for example.
* BlackholeSink : Sinks to nowhere
In addition to the sinks, the `InmemSignal` can be used to catch a signal,
and dump a formatted output of recent metrics. For example, when a process gets
a SIGUSR1, it can dump to stderr recent performance metrics for debugging.
Labels
------
Most metrics do have an equivalent ending with `WithLabels`, such methods
allow to push metrics with labels and use some features of underlying Sinks
(ex: translated into Prometheus labels).
Since some of these labels may increase greatly cardinality of metrics, the
library allow to filter labels using a blacklist/whitelist filtering system
which is global to all metrics.
* If `Config.AllowedLabels` is not nil, then only labels specified in this value will be sent to underlying Sink, otherwise, all labels are sent by default.
* If `Config.BlockedLabels` is not nil, any label specified in this value will not be sent to underlying Sinks.
By default, both `Config.AllowedLabels` and `Config.BlockedLabels` are nil, meaning that
no tags are filetered at all, but it allow to a user to globally block some tags with high
cardinality at application level.
Examples
--------
Here is an example of using the package:
```go
func SlowMethod() {
// Profiling the runtime of a method
defer metrics.MeasureSince([]string{"SlowMethod"}, time.Now())
}
// Configure a statsite sink as the global metrics sink
sink, _ := metrics.NewStatsiteSink("statsite:8125")
metrics.NewGlobal(metrics.DefaultConfig("service-name"), sink)
// Emit a Key/Value pair
metrics.EmitKey([]string{"questions", "meaning of life"}, 42)
```
Here is an example of setting up a signal handler:
```go
// Setup the inmem sink and signal handler
inm := metrics.NewInmemSink(10*time.Second, time.Minute)
sig := metrics.DefaultInmemSignal(inm)
metrics.NewGlobal(metrics.DefaultConfig("service-name"), inm)
// Run some code
inm.SetGauge([]string{"foo"}, 42)
inm.EmitKey([]string{"bar"}, 30)
inm.IncrCounter([]string{"baz"}, 42)
inm.IncrCounter([]string{"baz"}, 1)
inm.IncrCounter([]string{"baz"}, 80)
inm.AddSample([]string{"method", "wow"}, 42)
inm.AddSample([]string{"method", "wow"}, 100)
inm.AddSample([]string{"method", "wow"}, 22)
....
```
When a signal comes in, output like the following will be dumped to stderr:
[2014-01-28 14:57:33.04 -0800 PST][G] 'foo': 42.000
[2014-01-28 14:57:33.04 -0800 PST][P] 'bar': 30.000
[2014-01-28 14:57:33.04 -0800 PST][C] 'baz': Count: 3 Min: 1.000 Mean: 41.000 Max: 80.000 Stddev: 39.509
[2014-01-28 14:57:33.04 -0800 PST][S] 'method.wow': Count: 3 Min: 22.000 Mean: 54.667 Max: 100.000 Stddev: 40.513

View file

@ -1,12 +0,0 @@
// +build !windows
package metrics
import (
"syscall"
)
const (
// DefaultSignal is used with DefaultInmemSignal
DefaultSignal = syscall.SIGUSR1
)

View file

@ -1,13 +0,0 @@
// +build windows
package metrics
import (
"syscall"
)
const (
// DefaultSignal is used with DefaultInmemSignal
// Windows has no SIGUSR1, use SIGBREAK
DefaultSignal = syscall.Signal(21)
)

View file

@ -1,348 +0,0 @@
package metrics
import (
"bytes"
"fmt"
"math"
"net/url"
"strings"
"sync"
"time"
)
// InmemSink provides a MetricSink that does in-memory aggregation
// without sending metrics over a network. It can be embedded within
// an application to provide profiling information.
type InmemSink struct {
// How long is each aggregation interval
interval time.Duration
// Retain controls how many metrics interval we keep
retain time.Duration
// maxIntervals is the maximum length of intervals.
// It is retain / interval.
maxIntervals int
// intervals is a slice of the retained intervals
intervals []*IntervalMetrics
intervalLock sync.RWMutex
rateDenom float64
}
// IntervalMetrics stores the aggregated metrics
// for a specific interval
type IntervalMetrics struct {
sync.RWMutex
// The start time of the interval
Interval time.Time
// Gauges maps the key to the last set value
Gauges map[string]GaugeValue
// Points maps the string to the list of emitted values
// from EmitKey
Points map[string][]float32
// Counters maps the string key to a sum of the counter
// values
Counters map[string]SampledValue
// Samples maps the key to an AggregateSample,
// which has the rolled up view of a sample
Samples map[string]SampledValue
}
// NewIntervalMetrics creates a new IntervalMetrics for a given interval
func NewIntervalMetrics(intv time.Time) *IntervalMetrics {
return &IntervalMetrics{
Interval: intv,
Gauges: make(map[string]GaugeValue),
Points: make(map[string][]float32),
Counters: make(map[string]SampledValue),
Samples: make(map[string]SampledValue),
}
}
// AggregateSample is used to hold aggregate metrics
// about a sample
type AggregateSample struct {
Count int // The count of emitted pairs
Rate float64 // The values rate per time unit (usually 1 second)
Sum float64 // The sum of values
SumSq float64 `json:"-"` // The sum of squared values
Min float64 // Minimum value
Max float64 // Maximum value
LastUpdated time.Time `json:"-"` // When value was last updated
}
// Computes a Stddev of the values
func (a *AggregateSample) Stddev() float64 {
num := (float64(a.Count) * a.SumSq) - math.Pow(a.Sum, 2)
div := float64(a.Count * (a.Count - 1))
if div == 0 {
return 0
}
return math.Sqrt(num / div)
}
// Computes a mean of the values
func (a *AggregateSample) Mean() float64 {
if a.Count == 0 {
return 0
}
return a.Sum / float64(a.Count)
}
// Ingest is used to update a sample
func (a *AggregateSample) Ingest(v float64, rateDenom float64) {
a.Count++
a.Sum += v
a.SumSq += (v * v)
if v < a.Min || a.Count == 1 {
a.Min = v
}
if v > a.Max || a.Count == 1 {
a.Max = v
}
a.Rate = float64(a.Sum) / rateDenom
a.LastUpdated = time.Now()
}
func (a *AggregateSample) String() string {
if a.Count == 0 {
return "Count: 0"
} else if a.Stddev() == 0 {
return fmt.Sprintf("Count: %d Sum: %0.3f LastUpdated: %s", a.Count, a.Sum, a.LastUpdated)
} else {
return fmt.Sprintf("Count: %d Min: %0.3f Mean: %0.3f Max: %0.3f Stddev: %0.3f Sum: %0.3f LastUpdated: %s",
a.Count, a.Min, a.Mean(), a.Max, a.Stddev(), a.Sum, a.LastUpdated)
}
}
// NewInmemSinkFromURL creates an InmemSink from a URL. It is used
// (and tested) from NewMetricSinkFromURL.
func NewInmemSinkFromURL(u *url.URL) (MetricSink, error) {
params := u.Query()
interval, err := time.ParseDuration(params.Get("interval"))
if err != nil {
return nil, fmt.Errorf("Bad 'interval' param: %s", err)
}
retain, err := time.ParseDuration(params.Get("retain"))
if err != nil {
return nil, fmt.Errorf("Bad 'retain' param: %s", err)
}
return NewInmemSink(interval, retain), nil
}
// NewInmemSink is used to construct a new in-memory sink.
// Uses an aggregation interval and maximum retention period.
func NewInmemSink(interval, retain time.Duration) *InmemSink {
rateTimeUnit := time.Second
i := &InmemSink{
interval: interval,
retain: retain,
maxIntervals: int(retain / interval),
rateDenom: float64(interval.Nanoseconds()) / float64(rateTimeUnit.Nanoseconds()),
}
i.intervals = make([]*IntervalMetrics, 0, i.maxIntervals)
return i
}
func (i *InmemSink) SetGauge(key []string, val float32) {
i.SetGaugeWithLabels(key, val, nil)
}
func (i *InmemSink) SetGaugeWithLabels(key []string, val float32, labels []Label) {
k, name := i.flattenKeyLabels(key, labels)
intv := i.getInterval()
intv.Lock()
defer intv.Unlock()
intv.Gauges[k] = GaugeValue{Name: name, Value: val, Labels: labels}
}
func (i *InmemSink) EmitKey(key []string, val float32) {
k := i.flattenKey(key)
intv := i.getInterval()
intv.Lock()
defer intv.Unlock()
vals := intv.Points[k]
intv.Points[k] = append(vals, val)
}
func (i *InmemSink) IncrCounter(key []string, val float32) {
i.IncrCounterWithLabels(key, val, nil)
}
func (i *InmemSink) IncrCounterWithLabels(key []string, val float32, labels []Label) {
k, name := i.flattenKeyLabels(key, labels)
intv := i.getInterval()
intv.Lock()
defer intv.Unlock()
agg, ok := intv.Counters[k]
if !ok {
agg = SampledValue{
Name: name,
AggregateSample: &AggregateSample{},
Labels: labels,
}
intv.Counters[k] = agg
}
agg.Ingest(float64(val), i.rateDenom)
}
func (i *InmemSink) AddSample(key []string, val float32) {
i.AddSampleWithLabels(key, val, nil)
}
func (i *InmemSink) AddSampleWithLabels(key []string, val float32, labels []Label) {
k, name := i.flattenKeyLabels(key, labels)
intv := i.getInterval()
intv.Lock()
defer intv.Unlock()
agg, ok := intv.Samples[k]
if !ok {
agg = SampledValue{
Name: name,
AggregateSample: &AggregateSample{},
Labels: labels,
}
intv.Samples[k] = agg
}
agg.Ingest(float64(val), i.rateDenom)
}
// Data is used to retrieve all the aggregated metrics
// Intervals may be in use, and a read lock should be acquired
func (i *InmemSink) Data() []*IntervalMetrics {
// Get the current interval, forces creation
i.getInterval()
i.intervalLock.RLock()
defer i.intervalLock.RUnlock()
n := len(i.intervals)
intervals := make([]*IntervalMetrics, n)
copy(intervals[:n-1], i.intervals[:n-1])
current := i.intervals[n-1]
// make its own copy for current interval
intervals[n-1] = &IntervalMetrics{}
copyCurrent := intervals[n-1]
current.RLock()
*copyCurrent = *current
copyCurrent.Gauges = make(map[string]GaugeValue, len(current.Gauges))
for k, v := range current.Gauges {
copyCurrent.Gauges[k] = v
}
// saved values will be not change, just copy its link
copyCurrent.Points = make(map[string][]float32, len(current.Points))
for k, v := range current.Points {
copyCurrent.Points[k] = v
}
copyCurrent.Counters = make(map[string]SampledValue, len(current.Counters))
for k, v := range current.Counters {
copyCurrent.Counters[k] = v
}
copyCurrent.Samples = make(map[string]SampledValue, len(current.Samples))
for k, v := range current.Samples {
copyCurrent.Samples[k] = v
}
current.RUnlock()
return intervals
}
func (i *InmemSink) getExistingInterval(intv time.Time) *IntervalMetrics {
i.intervalLock.RLock()
defer i.intervalLock.RUnlock()
n := len(i.intervals)
if n > 0 && i.intervals[n-1].Interval == intv {
return i.intervals[n-1]
}
return nil
}
func (i *InmemSink) createInterval(intv time.Time) *IntervalMetrics {
i.intervalLock.Lock()
defer i.intervalLock.Unlock()
// Check for an existing interval
n := len(i.intervals)
if n > 0 && i.intervals[n-1].Interval == intv {
return i.intervals[n-1]
}
// Add the current interval
current := NewIntervalMetrics(intv)
i.intervals = append(i.intervals, current)
n++
// Truncate the intervals if they are too long
if n >= i.maxIntervals {
copy(i.intervals[0:], i.intervals[n-i.maxIntervals:])
i.intervals = i.intervals[:i.maxIntervals]
}
return current
}
// getInterval returns the current interval to write to
func (i *InmemSink) getInterval() *IntervalMetrics {
intv := time.Now().Truncate(i.interval)
if m := i.getExistingInterval(intv); m != nil {
return m
}
return i.createInterval(intv)
}
// Flattens the key for formatting, removes spaces
func (i *InmemSink) flattenKey(parts []string) string {
buf := &bytes.Buffer{}
replacer := strings.NewReplacer(" ", "_")
if len(parts) > 0 {
replacer.WriteString(buf, parts[0])
}
for _, part := range parts[1:] {
replacer.WriteString(buf, ".")
replacer.WriteString(buf, part)
}
return buf.String()
}
// Flattens the key for formatting along with its labels, removes spaces
func (i *InmemSink) flattenKeyLabels(parts []string, labels []Label) (string, string) {
buf := &bytes.Buffer{}
replacer := strings.NewReplacer(" ", "_")
if len(parts) > 0 {
replacer.WriteString(buf, parts[0])
}
for _, part := range parts[1:] {
replacer.WriteString(buf, ".")
replacer.WriteString(buf, part)
}
key := buf.String()
for _, label := range labels {
replacer.WriteString(buf, fmt.Sprintf(";%s=%s", label.Name, label.Value))
}
return buf.String(), key
}

View file

@ -1,118 +0,0 @@
package metrics
import (
"fmt"
"net/http"
"sort"
"time"
)
// MetricsSummary holds a roll-up of metrics info for a given interval
type MetricsSummary struct {
Timestamp string
Gauges []GaugeValue
Points []PointValue
Counters []SampledValue
Samples []SampledValue
}
type GaugeValue struct {
Name string
Hash string `json:"-"`
Value float32
Labels []Label `json:"-"`
DisplayLabels map[string]string `json:"Labels"`
}
type PointValue struct {
Name string
Points []float32
}
type SampledValue struct {
Name string
Hash string `json:"-"`
*AggregateSample
Mean float64
Stddev float64
Labels []Label `json:"-"`
DisplayLabels map[string]string `json:"Labels"`
}
// DisplayMetrics returns a summary of the metrics from the most recent finished interval.
func (i *InmemSink) DisplayMetrics(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
data := i.Data()
var interval *IntervalMetrics
n := len(data)
switch {
case n == 0:
return nil, fmt.Errorf("no metric intervals have been initialized yet")
case n == 1:
// Show the current interval if it's all we have
interval = i.intervals[0]
default:
// Show the most recent finished interval if we have one
interval = i.intervals[n-2]
}
summary := MetricsSummary{
Timestamp: interval.Interval.Round(time.Second).UTC().String(),
Gauges: make([]GaugeValue, 0, len(interval.Gauges)),
Points: make([]PointValue, 0, len(interval.Points)),
}
// Format and sort the output of each metric type, so it gets displayed in a
// deterministic order.
for name, points := range interval.Points {
summary.Points = append(summary.Points, PointValue{name, points})
}
sort.Slice(summary.Points, func(i, j int) bool {
return summary.Points[i].Name < summary.Points[j].Name
})
for hash, value := range interval.Gauges {
value.Hash = hash
value.DisplayLabels = make(map[string]string)
for _, label := range value.Labels {
value.DisplayLabels[label.Name] = label.Value
}
value.Labels = nil
summary.Gauges = append(summary.Gauges, value)
}
sort.Slice(summary.Gauges, func(i, j int) bool {
return summary.Gauges[i].Hash < summary.Gauges[j].Hash
})
summary.Counters = formatSamples(interval.Counters)
summary.Samples = formatSamples(interval.Samples)
return summary, nil
}
func formatSamples(source map[string]SampledValue) []SampledValue {
output := make([]SampledValue, 0, len(source))
for hash, sample := range source {
displayLabels := make(map[string]string)
for _, label := range sample.Labels {
displayLabels[label.Name] = label.Value
}
output = append(output, SampledValue{
Name: sample.Name,
Hash: hash,
AggregateSample: sample.AggregateSample,
Mean: sample.AggregateSample.Mean(),
Stddev: sample.AggregateSample.Stddev(),
DisplayLabels: displayLabels,
})
}
sort.Slice(output, func(i, j int) bool {
return output[i].Hash < output[j].Hash
})
return output
}

View file

@ -1,117 +0,0 @@
package metrics
import (
"bytes"
"fmt"
"io"
"os"
"os/signal"
"strings"
"sync"
"syscall"
)
// InmemSignal is used to listen for a given signal, and when received,
// to dump the current metrics from the InmemSink to an io.Writer
type InmemSignal struct {
signal syscall.Signal
inm *InmemSink
w io.Writer
sigCh chan os.Signal
stop bool
stopCh chan struct{}
stopLock sync.Mutex
}
// NewInmemSignal creates a new InmemSignal which listens for a given signal,
// and dumps the current metrics out to a writer
func NewInmemSignal(inmem *InmemSink, sig syscall.Signal, w io.Writer) *InmemSignal {
i := &InmemSignal{
signal: sig,
inm: inmem,
w: w,
sigCh: make(chan os.Signal, 1),
stopCh: make(chan struct{}),
}
signal.Notify(i.sigCh, sig)
go i.run()
return i
}
// DefaultInmemSignal returns a new InmemSignal that responds to SIGUSR1
// and writes output to stderr. Windows uses SIGBREAK
func DefaultInmemSignal(inmem *InmemSink) *InmemSignal {
return NewInmemSignal(inmem, DefaultSignal, os.Stderr)
}
// Stop is used to stop the InmemSignal from listening
func (i *InmemSignal) Stop() {
i.stopLock.Lock()
defer i.stopLock.Unlock()
if i.stop {
return
}
i.stop = true
close(i.stopCh)
signal.Stop(i.sigCh)
}
// run is a long running routine that handles signals
func (i *InmemSignal) run() {
for {
select {
case <-i.sigCh:
i.dumpStats()
case <-i.stopCh:
return
}
}
}
// dumpStats is used to dump the data to output writer
func (i *InmemSignal) dumpStats() {
buf := bytes.NewBuffer(nil)
data := i.inm.Data()
// Skip the last period which is still being aggregated
for j := 0; j < len(data)-1; j++ {
intv := data[j]
intv.RLock()
for _, val := range intv.Gauges {
name := i.flattenLabels(val.Name, val.Labels)
fmt.Fprintf(buf, "[%v][G] '%s': %0.3f\n", intv.Interval, name, val.Value)
}
for name, vals := range intv.Points {
for _, val := range vals {
fmt.Fprintf(buf, "[%v][P] '%s': %0.3f\n", intv.Interval, name, val)
}
}
for _, agg := range intv.Counters {
name := i.flattenLabels(agg.Name, agg.Labels)
fmt.Fprintf(buf, "[%v][C] '%s': %s\n", intv.Interval, name, agg.AggregateSample)
}
for _, agg := range intv.Samples {
name := i.flattenLabels(agg.Name, agg.Labels)
fmt.Fprintf(buf, "[%v][S] '%s': %s\n", intv.Interval, name, agg.AggregateSample)
}
intv.RUnlock()
}
// Write out the bytes
i.w.Write(buf.Bytes())
}
// Flattens the key for formatting along with its labels, removes spaces
func (i *InmemSignal) flattenLabels(name string, labels []Label) string {
buf := bytes.NewBufferString(name)
replacer := strings.NewReplacer(" ", "_", ":", "_")
for _, label := range labels {
replacer.WriteString(buf, ".")
replacer.WriteString(buf, label.Value)
}
return buf.String()
}

View file

@ -1,278 +0,0 @@
package metrics
import (
"runtime"
"strings"
"time"
"github.com/hashicorp/go-immutable-radix"
)
type Label struct {
Name string
Value string
}
func (m *Metrics) SetGauge(key []string, val float32) {
m.SetGaugeWithLabels(key, val, nil)
}
func (m *Metrics) SetGaugeWithLabels(key []string, val float32, labels []Label) {
if m.HostName != "" {
if m.EnableHostnameLabel {
labels = append(labels, Label{"host", m.HostName})
} else if m.EnableHostname {
key = insert(0, m.HostName, key)
}
}
if m.EnableTypePrefix {
key = insert(0, "gauge", key)
}
if m.ServiceName != "" {
if m.EnableServiceLabel {
labels = append(labels, Label{"service", m.ServiceName})
} else {
key = insert(0, m.ServiceName, key)
}
}
allowed, labelsFiltered := m.allowMetric(key, labels)
if !allowed {
return
}
m.sink.SetGaugeWithLabels(key, val, labelsFiltered)
}
func (m *Metrics) EmitKey(key []string, val float32) {
if m.EnableTypePrefix {
key = insert(0, "kv", key)
}
if m.ServiceName != "" {
key = insert(0, m.ServiceName, key)
}
allowed, _ := m.allowMetric(key, nil)
if !allowed {
return
}
m.sink.EmitKey(key, val)
}
func (m *Metrics) IncrCounter(key []string, val float32) {
m.IncrCounterWithLabels(key, val, nil)
}
func (m *Metrics) IncrCounterWithLabels(key []string, val float32, labels []Label) {
if m.HostName != "" && m.EnableHostnameLabel {
labels = append(labels, Label{"host", m.HostName})
}
if m.EnableTypePrefix {
key = insert(0, "counter", key)
}
if m.ServiceName != "" {
if m.EnableServiceLabel {
labels = append(labels, Label{"service", m.ServiceName})
} else {
key = insert(0, m.ServiceName, key)
}
}
allowed, labelsFiltered := m.allowMetric(key, labels)
if !allowed {
return
}
m.sink.IncrCounterWithLabels(key, val, labelsFiltered)
}
func (m *Metrics) AddSample(key []string, val float32) {
m.AddSampleWithLabels(key, val, nil)
}
func (m *Metrics) AddSampleWithLabels(key []string, val float32, labels []Label) {
if m.HostName != "" && m.EnableHostnameLabel {
labels = append(labels, Label{"host", m.HostName})
}
if m.EnableTypePrefix {
key = insert(0, "sample", key)
}
if m.ServiceName != "" {
if m.EnableServiceLabel {
labels = append(labels, Label{"service", m.ServiceName})
} else {
key = insert(0, m.ServiceName, key)
}
}
allowed, labelsFiltered := m.allowMetric(key, labels)
if !allowed {
return
}
m.sink.AddSampleWithLabels(key, val, labelsFiltered)
}
func (m *Metrics) MeasureSince(key []string, start time.Time) {
m.MeasureSinceWithLabels(key, start, nil)
}
func (m *Metrics) MeasureSinceWithLabels(key []string, start time.Time, labels []Label) {
if m.HostName != "" && m.EnableHostnameLabel {
labels = append(labels, Label{"host", m.HostName})
}
if m.EnableTypePrefix {
key = insert(0, "timer", key)
}
if m.ServiceName != "" {
if m.EnableServiceLabel {
labels = append(labels, Label{"service", m.ServiceName})
} else {
key = insert(0, m.ServiceName, key)
}
}
allowed, labelsFiltered := m.allowMetric(key, labels)
if !allowed {
return
}
now := time.Now()
elapsed := now.Sub(start)
msec := float32(elapsed.Nanoseconds()) / float32(m.TimerGranularity)
m.sink.AddSampleWithLabels(key, msec, labelsFiltered)
}
// UpdateFilter overwrites the existing filter with the given rules.
func (m *Metrics) UpdateFilter(allow, block []string) {
m.UpdateFilterAndLabels(allow, block, m.AllowedLabels, m.BlockedLabels)
}
// UpdateFilterAndLabels overwrites the existing filter with the given rules.
func (m *Metrics) UpdateFilterAndLabels(allow, block, allowedLabels, blockedLabels []string) {
m.filterLock.Lock()
defer m.filterLock.Unlock()
m.AllowedPrefixes = allow
m.BlockedPrefixes = block
if allowedLabels == nil {
// Having a white list means we take only elements from it
m.allowedLabels = nil
} else {
m.allowedLabels = make(map[string]bool)
for _, v := range allowedLabels {
m.allowedLabels[v] = true
}
}
m.blockedLabels = make(map[string]bool)
for _, v := range blockedLabels {
m.blockedLabels[v] = true
}
m.AllowedLabels = allowedLabels
m.BlockedLabels = blockedLabels
m.filter = iradix.New()
for _, prefix := range m.AllowedPrefixes {
m.filter, _, _ = m.filter.Insert([]byte(prefix), true)
}
for _, prefix := range m.BlockedPrefixes {
m.filter, _, _ = m.filter.Insert([]byte(prefix), false)
}
}
// labelIsAllowed return true if a should be included in metric
// the caller should lock m.filterLock while calling this method
func (m *Metrics) labelIsAllowed(label *Label) bool {
labelName := (*label).Name
if m.blockedLabels != nil {
_, ok := m.blockedLabels[labelName]
if ok {
// If present, let's remove this label
return false
}
}
if m.allowedLabels != nil {
_, ok := m.allowedLabels[labelName]
return ok
}
// Allow by default
return true
}
// filterLabels return only allowed labels
// the caller should lock m.filterLock while calling this method
func (m *Metrics) filterLabels(labels []Label) []Label {
if labels == nil {
return nil
}
toReturn := labels[:0]
for _, label := range labels {
if m.labelIsAllowed(&label) {
toReturn = append(toReturn, label)
}
}
return toReturn
}
// Returns whether the metric should be allowed based on configured prefix filters
// Also return the applicable labels
func (m *Metrics) allowMetric(key []string, labels []Label) (bool, []Label) {
m.filterLock.RLock()
defer m.filterLock.RUnlock()
if m.filter == nil || m.filter.Len() == 0 {
return m.Config.FilterDefault, m.filterLabels(labels)
}
_, allowed, ok := m.filter.Root().LongestPrefix([]byte(strings.Join(key, ".")))
if !ok {
return m.Config.FilterDefault, m.filterLabels(labels)
}
return allowed.(bool), m.filterLabels(labels)
}
// Periodically collects runtime stats to publish
func (m *Metrics) collectStats() {
for {
time.Sleep(m.ProfileInterval)
m.emitRuntimeStats()
}
}
// Emits various runtime statsitics
func (m *Metrics) emitRuntimeStats() {
// Export number of Goroutines
numRoutines := runtime.NumGoroutine()
m.SetGauge([]string{"runtime", "num_goroutines"}, float32(numRoutines))
// Export memory stats
var stats runtime.MemStats
runtime.ReadMemStats(&stats)
m.SetGauge([]string{"runtime", "alloc_bytes"}, float32(stats.Alloc))
m.SetGauge([]string{"runtime", "sys_bytes"}, float32(stats.Sys))
m.SetGauge([]string{"runtime", "malloc_count"}, float32(stats.Mallocs))
m.SetGauge([]string{"runtime", "free_count"}, float32(stats.Frees))
m.SetGauge([]string{"runtime", "heap_objects"}, float32(stats.HeapObjects))
m.SetGauge([]string{"runtime", "total_gc_pause_ns"}, float32(stats.PauseTotalNs))
m.SetGauge([]string{"runtime", "total_gc_runs"}, float32(stats.NumGC))
// Export info about the last few GC runs
num := stats.NumGC
// Handle wrap around
if num < m.lastNumGC {
m.lastNumGC = 0
}
// Ensure we don't scan more than 256
if num-m.lastNumGC >= 256 {
m.lastNumGC = num - 255
}
for i := m.lastNumGC; i < num; i++ {
pause := stats.PauseNs[i%256]
m.AddSample([]string{"runtime", "gc_pause_ns"}, float32(pause))
}
m.lastNumGC = num
}
// Inserts a string value at an index into the slice
func insert(i int, v string, s []string) []string {
s = append(s, "")
copy(s[i+1:], s[i:])
s[i] = v
return s
}

View file

@ -1,115 +0,0 @@
package metrics
import (
"fmt"
"net/url"
)
// The MetricSink interface is used to transmit metrics information
// to an external system
type MetricSink interface {
// A Gauge should retain the last value it is set to
SetGauge(key []string, val float32)
SetGaugeWithLabels(key []string, val float32, labels []Label)
// Should emit a Key/Value pair for each call
EmitKey(key []string, val float32)
// Counters should accumulate values
IncrCounter(key []string, val float32)
IncrCounterWithLabels(key []string, val float32, labels []Label)
// Samples are for timing information, where quantiles are used
AddSample(key []string, val float32)
AddSampleWithLabels(key []string, val float32, labels []Label)
}
// BlackholeSink is used to just blackhole messages
type BlackholeSink struct{}
func (*BlackholeSink) SetGauge(key []string, val float32) {}
func (*BlackholeSink) SetGaugeWithLabels(key []string, val float32, labels []Label) {}
func (*BlackholeSink) EmitKey(key []string, val float32) {}
func (*BlackholeSink) IncrCounter(key []string, val float32) {}
func (*BlackholeSink) IncrCounterWithLabels(key []string, val float32, labels []Label) {}
func (*BlackholeSink) AddSample(key []string, val float32) {}
func (*BlackholeSink) AddSampleWithLabels(key []string, val float32, labels []Label) {}
// FanoutSink is used to sink to fanout values to multiple sinks
type FanoutSink []MetricSink
func (fh FanoutSink) SetGauge(key []string, val float32) {
fh.SetGaugeWithLabels(key, val, nil)
}
func (fh FanoutSink) SetGaugeWithLabels(key []string, val float32, labels []Label) {
for _, s := range fh {
s.SetGaugeWithLabels(key, val, labels)
}
}
func (fh FanoutSink) EmitKey(key []string, val float32) {
for _, s := range fh {
s.EmitKey(key, val)
}
}
func (fh FanoutSink) IncrCounter(key []string, val float32) {
fh.IncrCounterWithLabels(key, val, nil)
}
func (fh FanoutSink) IncrCounterWithLabels(key []string, val float32, labels []Label) {
for _, s := range fh {
s.IncrCounterWithLabels(key, val, labels)
}
}
func (fh FanoutSink) AddSample(key []string, val float32) {
fh.AddSampleWithLabels(key, val, nil)
}
func (fh FanoutSink) AddSampleWithLabels(key []string, val float32, labels []Label) {
for _, s := range fh {
s.AddSampleWithLabels(key, val, labels)
}
}
// sinkURLFactoryFunc is an generic interface around the *SinkFromURL() function provided
// by each sink type
type sinkURLFactoryFunc func(*url.URL) (MetricSink, error)
// sinkRegistry supports the generic NewMetricSink function by mapping URL
// schemes to metric sink factory functions
var sinkRegistry = map[string]sinkURLFactoryFunc{
"statsd": NewStatsdSinkFromURL,
"statsite": NewStatsiteSinkFromURL,
"inmem": NewInmemSinkFromURL,
}
// NewMetricSinkFromURL allows a generic URL input to configure any of the
// supported sinks. The scheme of the URL identifies the type of the sink, the
// and query parameters are used to set options.
//
// "statsd://" - Initializes a StatsdSink. The host and port are passed through
// as the "addr" of the sink
//
// "statsite://" - Initializes a StatsiteSink. The host and port become the
// "addr" of the sink
//
// "inmem://" - Initializes an InmemSink. The host and port are ignored. The
// "interval" and "duration" query parameters must be specified with valid
// durations, see NewInmemSink for details.
func NewMetricSinkFromURL(urlStr string) (MetricSink, error) {
u, err := url.Parse(urlStr)
if err != nil {
return nil, err
}
sinkURLFactoryFunc := sinkRegistry[u.Scheme]
if sinkURLFactoryFunc == nil {
return nil, fmt.Errorf(
"cannot create metric sink, unrecognized sink name: %q", u.Scheme)
}
return sinkURLFactoryFunc(u)
}

View file

@ -1,141 +0,0 @@
package metrics
import (
"os"
"sync"
"sync/atomic"
"time"
"github.com/hashicorp/go-immutable-radix"
)
// Config is used to configure metrics settings
type Config struct {
ServiceName string // Prefixed with keys to separate services
HostName string // Hostname to use. If not provided and EnableHostname, it will be os.Hostname
EnableHostname bool // Enable prefixing gauge values with hostname
EnableHostnameLabel bool // Enable adding hostname to labels
EnableServiceLabel bool // Enable adding service to labels
EnableRuntimeMetrics bool // Enables profiling of runtime metrics (GC, Goroutines, Memory)
EnableTypePrefix bool // Prefixes key with a type ("counter", "gauge", "timer")
TimerGranularity time.Duration // Granularity of timers.
ProfileInterval time.Duration // Interval to profile runtime metrics
AllowedPrefixes []string // A list of metric prefixes to allow, with '.' as the separator
BlockedPrefixes []string // A list of metric prefixes to block, with '.' as the separator
AllowedLabels []string // A list of metric labels to allow, with '.' as the separator
BlockedLabels []string // A list of metric labels to block, with '.' as the separator
FilterDefault bool // Whether to allow metrics by default
}
// Metrics represents an instance of a metrics sink that can
// be used to emit
type Metrics struct {
Config
lastNumGC uint32
sink MetricSink
filter *iradix.Tree
allowedLabels map[string]bool
blockedLabels map[string]bool
filterLock sync.RWMutex // Lock filters and allowedLabels/blockedLabels access
}
// Shared global metrics instance
var globalMetrics atomic.Value // *Metrics
func init() {
// Initialize to a blackhole sink to avoid errors
globalMetrics.Store(&Metrics{sink: &BlackholeSink{}})
}
// DefaultConfig provides a sane default configuration
func DefaultConfig(serviceName string) *Config {
c := &Config{
ServiceName: serviceName, // Use client provided service
HostName: "",
EnableHostname: true, // Enable hostname prefix
EnableRuntimeMetrics: true, // Enable runtime profiling
EnableTypePrefix: false, // Disable type prefix
TimerGranularity: time.Millisecond, // Timers are in milliseconds
ProfileInterval: time.Second, // Poll runtime every second
FilterDefault: true, // Don't filter metrics by default
}
// Try to get the hostname
name, _ := os.Hostname()
c.HostName = name
return c
}
// New is used to create a new instance of Metrics
func New(conf *Config, sink MetricSink) (*Metrics, error) {
met := &Metrics{}
met.Config = *conf
met.sink = sink
met.UpdateFilterAndLabels(conf.AllowedPrefixes, conf.BlockedPrefixes, conf.AllowedLabels, conf.BlockedLabels)
// Start the runtime collector
if conf.EnableRuntimeMetrics {
go met.collectStats()
}
return met, nil
}
// NewGlobal is the same as New, but it assigns the metrics object to be
// used globally as well as returning it.
func NewGlobal(conf *Config, sink MetricSink) (*Metrics, error) {
metrics, err := New(conf, sink)
if err == nil {
globalMetrics.Store(metrics)
}
return metrics, err
}
// Proxy all the methods to the globalMetrics instance
func SetGauge(key []string, val float32) {
globalMetrics.Load().(*Metrics).SetGauge(key, val)
}
func SetGaugeWithLabels(key []string, val float32, labels []Label) {
globalMetrics.Load().(*Metrics).SetGaugeWithLabels(key, val, labels)
}
func EmitKey(key []string, val float32) {
globalMetrics.Load().(*Metrics).EmitKey(key, val)
}
func IncrCounter(key []string, val float32) {
globalMetrics.Load().(*Metrics).IncrCounter(key, val)
}
func IncrCounterWithLabels(key []string, val float32, labels []Label) {
globalMetrics.Load().(*Metrics).IncrCounterWithLabels(key, val, labels)
}
func AddSample(key []string, val float32) {
globalMetrics.Load().(*Metrics).AddSample(key, val)
}
func AddSampleWithLabels(key []string, val float32, labels []Label) {
globalMetrics.Load().(*Metrics).AddSampleWithLabels(key, val, labels)
}
func MeasureSince(key []string, start time.Time) {
globalMetrics.Load().(*Metrics).MeasureSince(key, start)
}
func MeasureSinceWithLabels(key []string, start time.Time, labels []Label) {
globalMetrics.Load().(*Metrics).MeasureSinceWithLabels(key, start, labels)
}
func UpdateFilter(allow, block []string) {
globalMetrics.Load().(*Metrics).UpdateFilter(allow, block)
}
// UpdateFilterAndLabels set allow/block prefixes of metrics while allowedLabels
// and blockedLabels - when not nil - allow filtering of labels in order to
// block/allow globally labels (especially useful when having large number of
// values for a given label). See README.md for more information about usage.
func UpdateFilterAndLabels(allow, block, allowedLabels, blockedLabels []string) {
globalMetrics.Load().(*Metrics).UpdateFilterAndLabels(allow, block, allowedLabels, blockedLabels)
}

View file

@ -1,184 +0,0 @@
package metrics
import (
"bytes"
"fmt"
"log"
"net"
"net/url"
"strings"
"time"
)
const (
// statsdMaxLen is the maximum size of a packet
// to send to statsd
statsdMaxLen = 1400
)
// StatsdSink provides a MetricSink that can be used
// with a statsite or statsd metrics server. It uses
// only UDP packets, while StatsiteSink uses TCP.
type StatsdSink struct {
addr string
metricQueue chan string
}
// NewStatsdSinkFromURL creates an StatsdSink from a URL. It is used
// (and tested) from NewMetricSinkFromURL.
func NewStatsdSinkFromURL(u *url.URL) (MetricSink, error) {
return NewStatsdSink(u.Host)
}
// NewStatsdSink is used to create a new StatsdSink
func NewStatsdSink(addr string) (*StatsdSink, error) {
s := &StatsdSink{
addr: addr,
metricQueue: make(chan string, 4096),
}
go s.flushMetrics()
return s, nil
}
// Close is used to stop flushing to statsd
func (s *StatsdSink) Shutdown() {
close(s.metricQueue)
}
func (s *StatsdSink) SetGauge(key []string, val float32) {
flatKey := s.flattenKey(key)
s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val))
}
func (s *StatsdSink) SetGaugeWithLabels(key []string, val float32, labels []Label) {
flatKey := s.flattenKeyLabels(key, labels)
s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val))
}
func (s *StatsdSink) EmitKey(key []string, val float32) {
flatKey := s.flattenKey(key)
s.pushMetric(fmt.Sprintf("%s:%f|kv\n", flatKey, val))
}
func (s *StatsdSink) IncrCounter(key []string, val float32) {
flatKey := s.flattenKey(key)
s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val))
}
func (s *StatsdSink) IncrCounterWithLabels(key []string, val float32, labels []Label) {
flatKey := s.flattenKeyLabels(key, labels)
s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val))
}
func (s *StatsdSink) AddSample(key []string, val float32) {
flatKey := s.flattenKey(key)
s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val))
}
func (s *StatsdSink) AddSampleWithLabels(key []string, val float32, labels []Label) {
flatKey := s.flattenKeyLabels(key, labels)
s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val))
}
// Flattens the key for formatting, removes spaces
func (s *StatsdSink) flattenKey(parts []string) string {
joined := strings.Join(parts, ".")
return strings.Map(func(r rune) rune {
switch r {
case ':':
fallthrough
case ' ':
return '_'
default:
return r
}
}, joined)
}
// Flattens the key along with labels for formatting, removes spaces
func (s *StatsdSink) flattenKeyLabels(parts []string, labels []Label) string {
for _, label := range labels {
parts = append(parts, label.Value)
}
return s.flattenKey(parts)
}
// Does a non-blocking push to the metrics queue
func (s *StatsdSink) pushMetric(m string) {
select {
case s.metricQueue <- m:
default:
}
}
// Flushes metrics
func (s *StatsdSink) flushMetrics() {
var sock net.Conn
var err error
var wait <-chan time.Time
ticker := time.NewTicker(flushInterval)
defer ticker.Stop()
CONNECT:
// Create a buffer
buf := bytes.NewBuffer(nil)
// Attempt to connect
sock, err = net.Dial("udp", s.addr)
if err != nil {
log.Printf("[ERR] Error connecting to statsd! Err: %s", err)
goto WAIT
}
for {
select {
case metric, ok := <-s.metricQueue:
// Get a metric from the queue
if !ok {
goto QUIT
}
// Check if this would overflow the packet size
if len(metric)+buf.Len() > statsdMaxLen {
_, err := sock.Write(buf.Bytes())
buf.Reset()
if err != nil {
log.Printf("[ERR] Error writing to statsd! Err: %s", err)
goto WAIT
}
}
// Append to the buffer
buf.WriteString(metric)
case <-ticker.C:
if buf.Len() == 0 {
continue
}
_, err := sock.Write(buf.Bytes())
buf.Reset()
if err != nil {
log.Printf("[ERR] Error flushing to statsd! Err: %s", err)
goto WAIT
}
}
}
WAIT:
// Wait for a while
wait = time.After(time.Duration(5) * time.Second)
for {
select {
// Dequeue the messages to avoid backlog
case _, ok := <-s.metricQueue:
if !ok {
goto QUIT
}
case <-wait:
goto CONNECT
}
}
QUIT:
s.metricQueue = nil
}

View file

@ -1,172 +0,0 @@
package metrics
import (
"bufio"
"fmt"
"log"
"net"
"net/url"
"strings"
"time"
)
const (
// We force flush the statsite metrics after this period of
// inactivity. Prevents stats from getting stuck in a buffer
// forever.
flushInterval = 100 * time.Millisecond
)
// NewStatsiteSinkFromURL creates an StatsiteSink from a URL. It is used
// (and tested) from NewMetricSinkFromURL.
func NewStatsiteSinkFromURL(u *url.URL) (MetricSink, error) {
return NewStatsiteSink(u.Host)
}
// StatsiteSink provides a MetricSink that can be used with a
// statsite metrics server
type StatsiteSink struct {
addr string
metricQueue chan string
}
// NewStatsiteSink is used to create a new StatsiteSink
func NewStatsiteSink(addr string) (*StatsiteSink, error) {
s := &StatsiteSink{
addr: addr,
metricQueue: make(chan string, 4096),
}
go s.flushMetrics()
return s, nil
}
// Close is used to stop flushing to statsite
func (s *StatsiteSink) Shutdown() {
close(s.metricQueue)
}
func (s *StatsiteSink) SetGauge(key []string, val float32) {
flatKey := s.flattenKey(key)
s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val))
}
func (s *StatsiteSink) SetGaugeWithLabels(key []string, val float32, labels []Label) {
flatKey := s.flattenKeyLabels(key, labels)
s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val))
}
func (s *StatsiteSink) EmitKey(key []string, val float32) {
flatKey := s.flattenKey(key)
s.pushMetric(fmt.Sprintf("%s:%f|kv\n", flatKey, val))
}
func (s *StatsiteSink) IncrCounter(key []string, val float32) {
flatKey := s.flattenKey(key)
s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val))
}
func (s *StatsiteSink) IncrCounterWithLabels(key []string, val float32, labels []Label) {
flatKey := s.flattenKeyLabels(key, labels)
s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val))
}
func (s *StatsiteSink) AddSample(key []string, val float32) {
flatKey := s.flattenKey(key)
s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val))
}
func (s *StatsiteSink) AddSampleWithLabels(key []string, val float32, labels []Label) {
flatKey := s.flattenKeyLabels(key, labels)
s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val))
}
// Flattens the key for formatting, removes spaces
func (s *StatsiteSink) flattenKey(parts []string) string {
joined := strings.Join(parts, ".")
return strings.Map(func(r rune) rune {
switch r {
case ':':
fallthrough
case ' ':
return '_'
default:
return r
}
}, joined)
}
// Flattens the key along with labels for formatting, removes spaces
func (s *StatsiteSink) flattenKeyLabels(parts []string, labels []Label) string {
for _, label := range labels {
parts = append(parts, label.Value)
}
return s.flattenKey(parts)
}
// Does a non-blocking push to the metrics queue
func (s *StatsiteSink) pushMetric(m string) {
select {
case s.metricQueue <- m:
default:
}
}
// Flushes metrics
func (s *StatsiteSink) flushMetrics() {
var sock net.Conn
var err error
var wait <-chan time.Time
var buffered *bufio.Writer
ticker := time.NewTicker(flushInterval)
defer ticker.Stop()
CONNECT:
// Attempt to connect
sock, err = net.Dial("tcp", s.addr)
if err != nil {
log.Printf("[ERR] Error connecting to statsite! Err: %s", err)
goto WAIT
}
// Create a buffered writer
buffered = bufio.NewWriter(sock)
for {
select {
case metric, ok := <-s.metricQueue:
// Get a metric from the queue
if !ok {
goto QUIT
}
// Try to send to statsite
_, err := buffered.Write([]byte(metric))
if err != nil {
log.Printf("[ERR] Error writing to statsite! Err: %s", err)
goto WAIT
}
case <-ticker.C:
if err := buffered.Flush(); err != nil {
log.Printf("[ERR] Error flushing to statsite! Err: %s", err)
goto WAIT
}
}
}
WAIT:
// Wait for a while
wait = time.After(time.Duration(5) * time.Second)
for {
select {
// Dequeue the messages to avoid backlog
case _, ok := <-s.metricQueue:
if !ok {
goto QUIT
}
case <-wait:
goto CONNECT
}
}
QUIT:
s.metricQueue = nil
}

View file

@ -1,9 +0,0 @@
language: go
go:
- 1.4.3
- 1.5.3
- tip
script:
- go test -v ./...

View file

@ -1,10 +0,0 @@
# How to contribute
We definitely welcome patches and contribution to this project!
### Legal requirements
In order to protect both you and ourselves, you will need to sign the
[Contributor License Agreement](https://cla.developers.google.com/clas).
You may have already signed it for other Google projects.

View file

@ -1,9 +0,0 @@
Paul Borman <borman@google.com>
bmatsuo
shawnps
theory
jboverfelt
dsymonds
cd1
wallclockbuilder
dansouza

View file

@ -1,27 +0,0 @@
Copyright (c) 2009,2014 Google Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View file

@ -1,19 +0,0 @@
# uuid ![build status](https://travis-ci.org/google/uuid.svg?branch=master)
The uuid package generates and inspects UUIDs based on
[RFC 4122](http://tools.ietf.org/html/rfc4122)
and DCE 1.1: Authentication and Security Services.
This package is based on the github.com/pborman/uuid package (previously named
code.google.com/p/go-uuid). It differs from these earlier packages in that
a UUID is a 16 byte array rather than a byte slice. One loss due to this
change is the ability to represent an invalid UUID (vs a NIL UUID).
###### Install
`go get github.com/google/uuid`
###### Documentation
[![GoDoc](https://godoc.org/github.com/google/uuid?status.svg)](http://godoc.org/github.com/google/uuid)
Full `go doc` style documentation for the package can be viewed online without
installing this package by using the GoDoc site here:
http://godoc.org/github.com/google/uuid

80
vendor/github.com/google/uuid/dce.go generated vendored
View file

@ -1,80 +0,0 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"encoding/binary"
"fmt"
"os"
)
// A Domain represents a Version 2 domain
type Domain byte
// Domain constants for DCE Security (Version 2) UUIDs.
const (
Person = Domain(0)
Group = Domain(1)
Org = Domain(2)
)
// NewDCESecurity returns a DCE Security (Version 2) UUID.
//
// The domain should be one of Person, Group or Org.
// On a POSIX system the id should be the users UID for the Person
// domain and the users GID for the Group. The meaning of id for
// the domain Org or on non-POSIX systems is site defined.
//
// For a given domain/id pair the same token may be returned for up to
// 7 minutes and 10 seconds.
func NewDCESecurity(domain Domain, id uint32) (UUID, error) {
uuid, err := NewUUID()
if err == nil {
uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2
uuid[9] = byte(domain)
binary.BigEndian.PutUint32(uuid[0:], id)
}
return uuid, err
}
// NewDCEPerson returns a DCE Security (Version 2) UUID in the person
// domain with the id returned by os.Getuid.
//
// NewDCESecurity(Person, uint32(os.Getuid()))
func NewDCEPerson() (UUID, error) {
return NewDCESecurity(Person, uint32(os.Getuid()))
}
// NewDCEGroup returns a DCE Security (Version 2) UUID in the group
// domain with the id returned by os.Getgid.
//
// NewDCESecurity(Group, uint32(os.Getgid()))
func NewDCEGroup() (UUID, error) {
return NewDCESecurity(Group, uint32(os.Getgid()))
}
// Domain returns the domain for a Version 2 UUID. Domains are only defined
// for Version 2 UUIDs.
func (uuid UUID) Domain() Domain {
return Domain(uuid[9])
}
// ID returns the id for a Version 2 UUID. IDs are only defined for Version 2
// UUIDs.
func (uuid UUID) ID() uint32 {
return binary.BigEndian.Uint32(uuid[0:4])
}
func (d Domain) String() string {
switch d {
case Person:
return "Person"
case Group:
return "Group"
case Org:
return "Org"
}
return fmt.Sprintf("Domain%d", int(d))
}

12
vendor/github.com/google/uuid/doc.go generated vendored
View file

@ -1,12 +0,0 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package uuid generates and inspects UUIDs.
//
// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security
// Services.
//
// A UUID is a 16 byte (128 bit) array. UUIDs may be used as keys to
// maps or compared directly.
package uuid

View file

@ -1 +0,0 @@
module github.com/google/uuid

View file

@ -1,53 +0,0 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"crypto/md5"
"crypto/sha1"
"hash"
)
// Well known namespace IDs and UUIDs
var (
NameSpaceDNS = Must(Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8"))
NameSpaceURL = Must(Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8"))
NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8"))
NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8"))
Nil UUID // empty UUID, all zeros
)
// NewHash returns a new UUID derived from the hash of space concatenated with
// data generated by h. The hash should be at least 16 byte in length. The
// first 16 bytes of the hash are used to form the UUID. The version of the
// UUID will be the lower 4 bits of version. NewHash is used to implement
// NewMD5 and NewSHA1.
func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID {
h.Reset()
h.Write(space[:])
h.Write(data)
s := h.Sum(nil)
var uuid UUID
copy(uuid[:], s)
uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4)
uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant
return uuid
}
// NewMD5 returns a new MD5 (Version 3) UUID based on the
// supplied name space and data. It is the same as calling:
//
// NewHash(md5.New(), space, data, 3)
func NewMD5(space UUID, data []byte) UUID {
return NewHash(md5.New(), space, data, 3)
}
// NewSHA1 returns a new SHA1 (Version 5) UUID based on the
// supplied name space and data. It is the same as calling:
//
// NewHash(sha1.New(), space, data, 5)
func NewSHA1(space UUID, data []byte) UUID {
return NewHash(sha1.New(), space, data, 5)
}

View file

@ -1,37 +0,0 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import "fmt"
// MarshalText implements encoding.TextMarshaler.
func (uuid UUID) MarshalText() ([]byte, error) {
var js [36]byte
encodeHex(js[:], uuid)
return js[:], nil
}
// UnmarshalText implements encoding.TextUnmarshaler.
func (uuid *UUID) UnmarshalText(data []byte) error {
id, err := ParseBytes(data)
if err == nil {
*uuid = id
}
return err
}
// MarshalBinary implements encoding.BinaryMarshaler.
func (uuid UUID) MarshalBinary() ([]byte, error) {
return uuid[:], nil
}
// UnmarshalBinary implements encoding.BinaryUnmarshaler.
func (uuid *UUID) UnmarshalBinary(data []byte) error {
if len(data) != 16 {
return fmt.Errorf("invalid UUID (got %d bytes)", len(data))
}
copy(uuid[:], data)
return nil
}

View file

@ -1,90 +0,0 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"sync"
)
var (
nodeMu sync.Mutex
ifname string // name of interface being used
nodeID [6]byte // hardware for version 1 UUIDs
zeroID [6]byte // nodeID with only 0's
)
// NodeInterface returns the name of the interface from which the NodeID was
// derived. The interface "user" is returned if the NodeID was set by
// SetNodeID.
func NodeInterface() string {
defer nodeMu.Unlock()
nodeMu.Lock()
return ifname
}
// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs.
// If name is "" then the first usable interface found will be used or a random
// Node ID will be generated. If a named interface cannot be found then false
// is returned.
//
// SetNodeInterface never fails when name is "".
func SetNodeInterface(name string) bool {
defer nodeMu.Unlock()
nodeMu.Lock()
return setNodeInterface(name)
}
func setNodeInterface(name string) bool {
iname, addr := getHardwareInterface(name) // null implementation for js
if iname != "" && addr != nil {
ifname = iname
copy(nodeID[:], addr)
return true
}
// We found no interfaces with a valid hardware address. If name
// does not specify a specific interface generate a random Node ID
// (section 4.1.6)
if name == "" {
ifname = "random"
randomBits(nodeID[:])
return true
}
return false
}
// NodeID returns a slice of a copy of the current Node ID, setting the Node ID
// if not already set.
func NodeID() []byte {
defer nodeMu.Unlock()
nodeMu.Lock()
if nodeID == zeroID {
setNodeInterface("")
}
nid := nodeID
return nid[:]
}
// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes
// of id are used. If id is less than 6 bytes then false is returned and the
// Node ID is not set.
func SetNodeID(id []byte) bool {
if len(id) < 6 {
return false
}
defer nodeMu.Unlock()
nodeMu.Lock()
copy(nodeID[:], id)
ifname = "user"
return true
}
// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is
// not valid. The NodeID is only well defined for version 1 and 2 UUIDs.
func (uuid UUID) NodeID() []byte {
var node [6]byte
copy(node[:], uuid[10:])
return node[:]
}

View file

@ -1,12 +0,0 @@
// Copyright 2017 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build js
package uuid
// getHardwareInterface returns nil values for the JS version of the code.
// This remvoves the "net" dependency, because it is not used in the browser.
// Using the "net" library inflates the size of the transpiled JS code by 673k bytes.
func getHardwareInterface(name string) (string, []byte) { return "", nil }

View file

@ -1,33 +0,0 @@
// Copyright 2017 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !js
package uuid
import "net"
var interfaces []net.Interface // cached list of interfaces
// getHardwareInterface returns the name and hardware address of interface name.
// If name is "" then the name and hardware address of one of the system's
// interfaces is returned. If no interfaces are found (name does not exist or
// there are no interfaces) then "", nil is returned.
//
// Only addresses of at least 6 bytes are returned.
func getHardwareInterface(name string) (string, []byte) {
if interfaces == nil {
var err error
interfaces, err = net.Interfaces()
if err != nil {
return "", nil
}
}
for _, ifs := range interfaces {
if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) {
return ifs.Name, ifs.HardwareAddr
}
}
return "", nil
}

59
vendor/github.com/google/uuid/sql.go generated vendored
View file

@ -1,59 +0,0 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"database/sql/driver"
"fmt"
)
// Scan implements sql.Scanner so UUIDs can be read from databases transparently
// Currently, database types that map to string and []byte are supported. Please
// consult database-specific driver documentation for matching types.
func (uuid *UUID) Scan(src interface{}) error {
switch src := src.(type) {
case nil:
return nil
case string:
// if an empty UUID comes from a table, we return a null UUID
if src == "" {
return nil
}
// see Parse for required string format
u, err := Parse(src)
if err != nil {
return fmt.Errorf("Scan: %v", err)
}
*uuid = u
case []byte:
// if an empty UUID comes from a table, we return a null UUID
if len(src) == 0 {
return nil
}
// assumes a simple slice of bytes if 16 bytes
// otherwise attempts to parse
if len(src) != 16 {
return uuid.Scan(string(src))
}
copy((*uuid)[:], src)
default:
return fmt.Errorf("Scan: unable to scan type %T into UUID", src)
}
return nil
}
// Value implements sql.Valuer so that UUIDs can be written to databases
// transparently. Currently, UUIDs map to strings. Please consult
// database-specific driver documentation for matching types.
func (uuid UUID) Value() (driver.Value, error) {
return uuid.String(), nil
}

123
vendor/github.com/google/uuid/time.go generated vendored
View file

@ -1,123 +0,0 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"encoding/binary"
"sync"
"time"
)
// A Time represents a time as the number of 100's of nanoseconds since 15 Oct
// 1582.
type Time int64
const (
lillian = 2299160 // Julian day of 15 Oct 1582
unix = 2440587 // Julian day of 1 Jan 1970
epoch = unix - lillian // Days between epochs
g1582 = epoch * 86400 // seconds between epochs
g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs
)
var (
timeMu sync.Mutex
lasttime uint64 // last time we returned
clockSeq uint16 // clock sequence for this run
timeNow = time.Now // for testing
)
// UnixTime converts t the number of seconds and nanoseconds using the Unix
// epoch of 1 Jan 1970.
func (t Time) UnixTime() (sec, nsec int64) {
sec = int64(t - g1582ns100)
nsec = (sec % 10000000) * 100
sec /= 10000000
return sec, nsec
}
// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and
// clock sequence as well as adjusting the clock sequence as needed. An error
// is returned if the current time cannot be determined.
func GetTime() (Time, uint16, error) {
defer timeMu.Unlock()
timeMu.Lock()
return getTime()
}
func getTime() (Time, uint16, error) {
t := timeNow()
// If we don't have a clock sequence already, set one.
if clockSeq == 0 {
setClockSequence(-1)
}
now := uint64(t.UnixNano()/100) + g1582ns100
// If time has gone backwards with this clock sequence then we
// increment the clock sequence
if now <= lasttime {
clockSeq = ((clockSeq + 1) & 0x3fff) | 0x8000
}
lasttime = now
return Time(now), clockSeq, nil
}
// ClockSequence returns the current clock sequence, generating one if not
// already set. The clock sequence is only used for Version 1 UUIDs.
//
// The uuid package does not use global static storage for the clock sequence or
// the last time a UUID was generated. Unless SetClockSequence is used, a new
// random clock sequence is generated the first time a clock sequence is
// requested by ClockSequence, GetTime, or NewUUID. (section 4.2.1.1)
func ClockSequence() int {
defer timeMu.Unlock()
timeMu.Lock()
return clockSequence()
}
func clockSequence() int {
if clockSeq == 0 {
setClockSequence(-1)
}
return int(clockSeq & 0x3fff)
}
// SetClockSequence sets the clock sequence to the lower 14 bits of seq. Setting to
// -1 causes a new sequence to be generated.
func SetClockSequence(seq int) {
defer timeMu.Unlock()
timeMu.Lock()
setClockSequence(seq)
}
func setClockSequence(seq int) {
if seq == -1 {
var b [2]byte
randomBits(b[:]) // clock sequence
seq = int(b[0])<<8 | int(b[1])
}
oldSeq := clockSeq
clockSeq = uint16(seq&0x3fff) | 0x8000 // Set our variant
if oldSeq != clockSeq {
lasttime = 0
}
}
// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in
// uuid. The time is only defined for version 1 and 2 UUIDs.
func (uuid UUID) Time() Time {
time := int64(binary.BigEndian.Uint32(uuid[0:4]))
time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32
time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48
return Time(time)
}
// ClockSequence returns the clock sequence encoded in uuid.
// The clock sequence is only well defined for version 1 and 2 UUIDs.
func (uuid UUID) ClockSequence() int {
return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff
}

View file

@ -1,43 +0,0 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"io"
)
// randomBits completely fills slice b with random data.
func randomBits(b []byte) {
if _, err := io.ReadFull(rander, b); err != nil {
panic(err.Error()) // rand should never fail
}
}
// xvalues returns the value of a byte as a hexadecimal digit or 255.
var xvalues = [256]byte{
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255,
255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
}
// xtob converts hex characters x1 and x2 into a byte.
func xtob(x1, x2 byte) (byte, bool) {
b1 := xvalues[x1]
b2 := xvalues[x2]
return (b1 << 4) | b2, b1 != 255 && b2 != 255
}

245
vendor/github.com/google/uuid/uuid.go generated vendored
View file

@ -1,245 +0,0 @@
// Copyright 2018 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"bytes"
"crypto/rand"
"encoding/hex"
"errors"
"fmt"
"io"
"strings"
)
// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC
// 4122.
type UUID [16]byte
// A Version represents a UUID's version.
type Version byte
// A Variant represents a UUID's variant.
type Variant byte
// Constants returned by Variant.
const (
Invalid = Variant(iota) // Invalid UUID
RFC4122 // The variant specified in RFC4122
Reserved // Reserved, NCS backward compatibility.
Microsoft // Reserved, Microsoft Corporation backward compatibility.
Future // Reserved for future definition.
)
var rander = rand.Reader // random function
// Parse decodes s into a UUID or returns an error. Both the standard UUID
// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the
// Microsoft encoding {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} and the raw hex
// encoding: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx.
func Parse(s string) (UUID, error) {
var uuid UUID
switch len(s) {
// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
case 36:
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
case 36 + 9:
if strings.ToLower(s[:9]) != "urn:uuid:" {
return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9])
}
s = s[9:]
// {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
case 36 + 2:
s = s[1:]
// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
case 32:
var ok bool
for i := range uuid {
uuid[i], ok = xtob(s[i*2], s[i*2+1])
if !ok {
return uuid, errors.New("invalid UUID format")
}
}
return uuid, nil
default:
return uuid, fmt.Errorf("invalid UUID length: %d", len(s))
}
// s is now at least 36 bytes long
// it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
return uuid, errors.New("invalid UUID format")
}
for i, x := range [16]int{
0, 2, 4, 6,
9, 11,
14, 16,
19, 21,
24, 26, 28, 30, 32, 34} {
v, ok := xtob(s[x], s[x+1])
if !ok {
return uuid, errors.New("invalid UUID format")
}
uuid[i] = v
}
return uuid, nil
}
// ParseBytes is like Parse, except it parses a byte slice instead of a string.
func ParseBytes(b []byte) (UUID, error) {
var uuid UUID
switch len(b) {
case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) {
return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9])
}
b = b[9:]
case 36 + 2: // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
b = b[1:]
case 32: // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
var ok bool
for i := 0; i < 32; i += 2 {
uuid[i/2], ok = xtob(b[i], b[i+1])
if !ok {
return uuid, errors.New("invalid UUID format")
}
}
return uuid, nil
default:
return uuid, fmt.Errorf("invalid UUID length: %d", len(b))
}
// s is now at least 36 bytes long
// it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' {
return uuid, errors.New("invalid UUID format")
}
for i, x := range [16]int{
0, 2, 4, 6,
9, 11,
14, 16,
19, 21,
24, 26, 28, 30, 32, 34} {
v, ok := xtob(b[x], b[x+1])
if !ok {
return uuid, errors.New("invalid UUID format")
}
uuid[i] = v
}
return uuid, nil
}
// MustParse is like Parse but panics if the string cannot be parsed.
// It simplifies safe initialization of global variables holding compiled UUIDs.
func MustParse(s string) UUID {
uuid, err := Parse(s)
if err != nil {
panic(`uuid: Parse(` + s + `): ` + err.Error())
}
return uuid
}
// FromBytes creates a new UUID from a byte slice. Returns an error if the slice
// does not have a length of 16. The bytes are copied from the slice.
func FromBytes(b []byte) (uuid UUID, err error) {
err = uuid.UnmarshalBinary(b)
return uuid, err
}
// Must returns uuid if err is nil and panics otherwise.
func Must(uuid UUID, err error) UUID {
if err != nil {
panic(err)
}
return uuid
}
// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
// , or "" if uuid is invalid.
func (uuid UUID) String() string {
var buf [36]byte
encodeHex(buf[:], uuid)
return string(buf[:])
}
// URN returns the RFC 2141 URN form of uuid,
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid.
func (uuid UUID) URN() string {
var buf [36 + 9]byte
copy(buf[:], "urn:uuid:")
encodeHex(buf[9:], uuid)
return string(buf[:])
}
func encodeHex(dst []byte, uuid UUID) {
hex.Encode(dst, uuid[:4])
dst[8] = '-'
hex.Encode(dst[9:13], uuid[4:6])
dst[13] = '-'
hex.Encode(dst[14:18], uuid[6:8])
dst[18] = '-'
hex.Encode(dst[19:23], uuid[8:10])
dst[23] = '-'
hex.Encode(dst[24:], uuid[10:])
}
// Variant returns the variant encoded in uuid.
func (uuid UUID) Variant() Variant {
switch {
case (uuid[8] & 0xc0) == 0x80:
return RFC4122
case (uuid[8] & 0xe0) == 0xc0:
return Microsoft
case (uuid[8] & 0xe0) == 0xe0:
return Future
default:
return Reserved
}
}
// Version returns the version of uuid.
func (uuid UUID) Version() Version {
return Version(uuid[6] >> 4)
}
func (v Version) String() string {
if v > 15 {
return fmt.Sprintf("BAD_VERSION_%d", v)
}
return fmt.Sprintf("VERSION_%d", v)
}
func (v Variant) String() string {
switch v {
case RFC4122:
return "RFC4122"
case Reserved:
return "Reserved"
case Microsoft:
return "Microsoft"
case Future:
return "Future"
case Invalid:
return "Invalid"
}
return fmt.Sprintf("BadVariant%d", int(v))
}
// SetRand sets the random number generator to r, which implements io.Reader.
// If r.Read returns an error when the package requests random data then
// a panic will be issued.
//
// Calling SetRand with nil sets the random number generator to the default
// generator.
func SetRand(r io.Reader) {
if r == nil {
rander = rand.Reader
return
}
rander = r
}

View file

@ -1,44 +0,0 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"encoding/binary"
)
// NewUUID returns a Version 1 UUID based on the current NodeID and clock
// sequence, and the current time. If the NodeID has not been set by SetNodeID
// or SetNodeInterface then it will be set automatically. If the NodeID cannot
// be set NewUUID returns nil. If clock sequence has not been set by
// SetClockSequence then it will be set automatically. If GetTime fails to
// return the current NewUUID returns nil and an error.
//
// In most cases, New should be used.
func NewUUID() (UUID, error) {
nodeMu.Lock()
if nodeID == zeroID {
setNodeInterface("")
}
nodeMu.Unlock()
var uuid UUID
now, seq, err := GetTime()
if err != nil {
return uuid, err
}
timeLow := uint32(now & 0xffffffff)
timeMid := uint16((now >> 32) & 0xffff)
timeHi := uint16((now >> 48) & 0x0fff)
timeHi |= 0x1000 // Version 1
binary.BigEndian.PutUint32(uuid[0:], timeLow)
binary.BigEndian.PutUint16(uuid[4:], timeMid)
binary.BigEndian.PutUint16(uuid[6:], timeHi)
binary.BigEndian.PutUint16(uuid[8:], seq)
copy(uuid[10:], nodeID[:])
return uuid, nil
}

View file

@ -1,38 +0,0 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import "io"
// New creates a new random UUID or panics. New is equivalent to
// the expression
//
// uuid.Must(uuid.NewRandom())
func New() UUID {
return Must(NewRandom())
}
// NewRandom returns a Random (Version 4) UUID.
//
// The strength of the UUIDs is based on the strength of the crypto/rand
// package.
//
// A note about uniqueness derived from the UUID Wikipedia entry:
//
// Randomly generated UUIDs have 122 random bits. One's annual risk of being
// hit by a meteorite is estimated to be one chance in 17 billion, that
// means the probability is about 0.00000000006 (6 × 1011),
// equivalent to the odds of creating a few tens of trillions of UUIDs in a
// year and having one duplicate.
func NewRandom() (UUID, error) {
var uuid UUID
_, err := io.ReadFull(rander, uuid[:])
if err != nil {
return Nil, err
}
uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4
uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
return uuid, nil
}

View file

@ -1,354 +0,0 @@
Mozilla Public License, version 2.0
1. Definitions
1.1. “Contributor”
means each individual or legal entity that creates, contributes to the
creation of, or owns Covered Software.
1.2. “Contributor Version”
means the combination of the Contributions of others (if any) used by a
Contributor and that particular Contributors Contribution.
1.3. “Contribution”
means Covered Software of a particular Contributor.
1.4. “Covered Software”
means Source Code Form to which the initial Contributor has attached the
notice in Exhibit A, the Executable Form of such Source Code Form, and
Modifications of such Source Code Form, in each case including portions
thereof.
1.5. “Incompatible With Secondary Licenses”
means
a. that the initial Contributor has attached the notice described in
Exhibit B to the Covered Software; or
b. that the Covered Software was made available under the terms of version
1.1 or earlier of the License, but not also under the terms of a
Secondary License.
1.6. “Executable Form”
means any form of the work other than Source Code Form.
1.7. “Larger Work”
means a work that combines Covered Software with other material, in a separate
file or files, that is not Covered Software.
1.8. “License”
means this document.
1.9. “Licensable”
means having the right to grant, to the maximum extent possible, whether at the
time of the initial grant or subsequently, any and all of the rights conveyed by
this License.
1.10. “Modifications”
means any of the following:
a. any file in Source Code Form that results from an addition to, deletion
from, or modification of the contents of Covered Software; or
b. any new file in Source Code Form that contains any Covered Software.
1.11. “Patent Claims” of a Contributor
means any patent claim(s), including without limitation, method, process,
and apparatus claims, in any patent Licensable by such Contributor that
would be infringed, but for the grant of the License, by the making,
using, selling, offering for sale, having made, import, or transfer of
either its Contributions or its Contributor Version.
1.12. “Secondary License”
means either the GNU General Public License, Version 2.0, the GNU Lesser
General Public License, Version 2.1, the GNU Affero General Public
License, Version 3.0, or any later versions of those licenses.
1.13. “Source Code Form”
means the form of the work preferred for making modifications.
1.14. “You” (or “Your”)
means an individual or a legal entity exercising rights under this
License. For legal entities, “You” includes any entity that controls, is
controlled by, or is under common control with You. For purposes of this
definition, “control” means (a) the power, direct or indirect, to cause
the direction or management of such entity, whether by contract or
otherwise, or (b) ownership of more than fifty percent (50%) of the
outstanding shares or beneficial ownership of such entity.
2. License Grants and Conditions
2.1. Grants
Each Contributor hereby grants You a world-wide, royalty-free,
non-exclusive license:
a. under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or as
part of a Larger Work; and
b. under Patent Claims of such Contributor to make, use, sell, offer for
sale, have made, import, and otherwise transfer either its Contributions
or its Contributor Version.
2.2. Effective Date
The licenses granted in Section 2.1 with respect to any Contribution become
effective for each Contribution on the date the Contributor first distributes
such Contribution.
2.3. Limitations on Grant Scope
The licenses granted in this Section 2 are the only rights granted under this
License. No additional rights or licenses will be implied from the distribution
or licensing of Covered Software under this License. Notwithstanding Section
2.1(b) above, no patent license is granted by a Contributor:
a. for any code that a Contributor has removed from Covered Software; or
b. for infringements caused by: (i) Your and any other third partys
modifications of Covered Software, or (ii) the combination of its
Contributions with other software (except as part of its Contributor
Version); or
c. under Patent Claims infringed by Covered Software in the absence of its
Contributions.
This License does not grant any rights in the trademarks, service marks, or
logos of any Contributor (except as may be necessary to comply with the
notice requirements in Section 3.4).
2.4. Subsequent Licenses
No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this License
(see Section 10.2) or under the terms of a Secondary License (if permitted
under the terms of Section 3.3).
2.5. Representation
Each Contributor represents that the Contributor believes its Contributions
are its original creation(s) or it has sufficient rights to grant the
rights to its Contributions conveyed by this License.
2.6. Fair Use
This License is not intended to limit any rights You have under applicable
copyright doctrines of fair use, fair dealing, or other equivalents.
2.7. Conditions
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
Section 2.1.
3. Responsibilities
3.1. Distribution of Source Form
All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under the
terms of this License. You must inform recipients that the Source Code Form
of the Covered Software is governed by the terms of this License, and how
they can obtain a copy of this License. You may not attempt to alter or
restrict the recipients rights in the Source Code Form.
3.2. Distribution of Executable Form
If You distribute Covered Software in Executable Form then:
a. such Covered Software must also be made available in Source Code Form,
as described in Section 3.1, and You must inform recipients of the
Executable Form how they can obtain a copy of such Source Code Form by
reasonable means in a timely manner, at a charge no more than the cost
of distribution to the recipient; and
b. You may distribute such Executable Form under the terms of this License,
or sublicense it under different terms, provided that the license for
the Executable Form does not attempt to limit or alter the recipients
rights in the Source Code Form under this License.
3.3. Distribution of a Larger Work
You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for the
Covered Software. If the Larger Work is a combination of Covered Software
with a work governed by one or more Secondary Licenses, and the Covered
Software is not Incompatible With Secondary Licenses, this License permits
You to additionally distribute such Covered Software under the terms of
such Secondary License(s), so that the recipient of the Larger Work may, at
their option, further distribute the Covered Software under the terms of
either this License or such Secondary License(s).
3.4. Notices
You may not remove or alter the substance of any license notices (including
copyright notices, patent notices, disclaimers of warranty, or limitations
of liability) contained within the Source Code Form of the Covered
Software, except that You may alter any license notices to the extent
required to remedy known factual inaccuracies.
3.5. Application of Additional Terms
You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on behalf
of any Contributor. You must make it absolutely clear that any such
warranty, support, indemnity, or liability obligation is offered by You
alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
jurisdiction.
4. Inability to Comply Due to Statute or Regulation
If it is impossible for You to comply with any of the terms of this License
with respect to some or all of the Covered Software due to statute, judicial
order, or regulation then You must: (a) comply with the terms of this License
to the maximum extent possible; and (b) describe the limitations and the code
they affect. Such description must be placed in a text file included with all
distributions of the Covered Software under this License. Except to the
extent prohibited by statute or regulation, such description must be
sufficiently detailed for a recipient of ordinary skill to be able to
understand it.
5. Termination
5.1. The rights granted under this License will terminate automatically if You
fail to comply with any of its terms. However, if You become compliant,
then the rights granted under this License from a particular Contributor
are reinstated (a) provisionally, unless and until such Contributor
explicitly and finally terminates Your grants, and (b) on an ongoing basis,
if such Contributor fails to notify You of the non-compliance by some
reasonable means prior to 60 days after You have come back into compliance.
Moreover, Your grants from a particular Contributor are reinstated on an
ongoing basis if such Contributor notifies You of the non-compliance by
some reasonable means, this is the first time You have received notice of
non-compliance with this License from such Contributor, and You become
compliant prior to 30 days after Your receipt of the notice.
5.2. If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions, counter-claims,
and cross-claims) alleging that a Contributor Version directly or
indirectly infringes any patent, then the rights granted to You by any and
all Contributors for the Covered Software under Section 2.1 of this License
shall terminate.
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
license agreements (excluding distributors and resellers) which have been
validly granted by You or Your distributors under this License prior to
termination shall survive termination.
6. Disclaimer of Warranty
Covered Software is provided under this License on an “as is” basis, without
warranty of any kind, either expressed, implied, or statutory, including,
without limitation, warranties that the Covered Software is free of defects,
merchantable, fit for a particular purpose or non-infringing. The entire
risk as to the quality and performance of the Covered Software is with You.
Should any Covered Software prove defective in any respect, You (not any
Contributor) assume the cost of any necessary servicing, repair, or
correction. This disclaimer of warranty constitutes an essential part of this
License. No use of any Covered Software is authorized under this License
except under this disclaimer.
7. Limitation of Liability
Under no circumstances and under no legal theory, whether tort (including
negligence), contract, or otherwise, shall any Contributor, or anyone who
distributes Covered Software as permitted above, be liable to You for any
direct, indirect, special, incidental, or consequential damages of any
character including, without limitation, damages for lost profits, loss of
goodwill, work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses, even if such party shall have been
informed of the possibility of such damages. This limitation of liability
shall not apply to liability for death or personal injury resulting from such
partys negligence to the extent applicable law prohibits such limitation.
Some jurisdictions do not allow the exclusion or limitation of incidental or
consequential damages, so this exclusion and limitation may not apply to You.
8. Litigation
Any litigation relating to this License may be brought only in the courts of
a jurisdiction where the defendant maintains its principal place of business
and such litigation shall be governed by laws of that jurisdiction, without
reference to its conflict-of-law provisions. Nothing in this Section shall
prevent a partys ability to bring cross-claims or counter-claims.
9. Miscellaneous
This License represents the complete agreement concerning the subject matter
hereof. If any provision of this License is held to be unenforceable, such
provision shall be reformed only to the extent necessary to make it
enforceable. Any law or regulation which provides that the language of a
contract shall be construed against the drafter shall not be used to construe
this License against a Contributor.
10. Versions of the License
10.1. New Versions
Mozilla Foundation is the license steward. Except as provided in Section
10.3, no one other than the license steward has the right to modify or
publish new versions of this License. Each version will be given a
distinguishing version number.
10.2. Effect of New Versions
You may distribute the Covered Software under the terms of the version of
the License under which You originally received the Covered Software, or
under the terms of any subsequent version published by the license
steward.
10.3. Modified Versions
If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a modified
version of this License if you rename the license and remove any
references to the name of the license steward (except to note that such
modified license differs from this License).
10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
If You choose to distribute Source Code Form that is Incompatible With
Secondary Licenses under the terms of this version of the License, the
notice described in Exhibit B of this License must be attached.
Exhibit A - Source Code Form License Notice
This Source Code Form is subject to the
terms of the Mozilla Public License, v.
2.0. If a copy of the MPL was not
distributed with this file, You can
obtain one at
http://mozilla.org/MPL/2.0/.
If it is not possible or desirable to put the notice in a particular file, then
You may include the notice in a location (such as a LICENSE file in a relevant
directory) where a recipient would be likely to look for such a notice.
You may add additional accurate notices of copyright ownership.
Exhibit B - “Incompatible With Secondary Licenses” Notice
This Source Code Form is “Incompatible
With Secondary Licenses”, as defined by
the Mozilla Public License, v. 2.0.

View file

@ -1,67 +0,0 @@
Consul API client
=================
This package provides the `api` package which attempts to
provide programmatic access to the full Consul API.
Currently, all of the Consul APIs included in version 0.6.0 are supported.
Documentation
=============
The full documentation is available on [Godoc](https://godoc.org/github.com/hashicorp/consul/api)
Usage
=====
Below is an example of using the Consul client:
```go
package main
import "github.com/hashicorp/consul/api"
import "fmt"
func main() {
// Get a new client
client, err := api.NewClient(api.DefaultConfig())
if err != nil {
panic(err)
}
// Get a handle to the KV API
kv := client.KV()
// PUT a new KV pair
p := &api.KVPair{Key: "REDIS_MAXCLIENTS", Value: []byte("1000")}
_, err = kv.Put(p, nil)
if err != nil {
panic(err)
}
// Lookup the pair
pair, _, err := kv.Get("REDIS_MAXCLIENTS", nil)
if err != nil {
panic(err)
}
fmt.Printf("KV: %v %s\n", pair.Key, pair.Value)
}
```
To run this example, start a Consul server:
```bash
consul agent -dev
```
Copy the code above into a file such as `main.go`.
Install and run. You'll see a key (`REDIS_MAXCLIENTS`) and value (`1000`) printed.
```bash
$ go get
$ go run main.go
KV: REDIS_MAXCLIENTS 1000
```
After running the code, you can also view the values in the Consul UI on your local machine at http://localhost:8500/ui/dc1/kv

File diff suppressed because it is too large Load diff

View file

@ -1,999 +0,0 @@
package api
import (
"bufio"
"bytes"
"fmt"
"io"
"net/http"
"net/url"
)
// ServiceKind is the kind of service being registered.
type ServiceKind string
const (
// ServiceKindTypical is a typical, classic Consul service. This is
// represented by the absence of a value. This was chosen for ease of
// backwards compatibility: existing services in the catalog would
// default to the typical service.
ServiceKindTypical ServiceKind = ""
// ServiceKindConnectProxy is a proxy for the Connect feature. This
// service proxies another service within Consul and speaks the connect
// protocol.
ServiceKindConnectProxy ServiceKind = "connect-proxy"
// ServiceKindMeshGateway is a Mesh Gateway for the Connect feature. This
// service will proxy connections based off the SNI header set by other
// connect proxies
ServiceKindMeshGateway ServiceKind = "mesh-gateway"
)
// UpstreamDestType is the type of upstream discovery mechanism.
type UpstreamDestType string
const (
// UpstreamDestTypeService discovers instances via healthy service lookup.
UpstreamDestTypeService UpstreamDestType = "service"
// UpstreamDestTypePreparedQuery discovers instances via prepared query
// execution.
UpstreamDestTypePreparedQuery UpstreamDestType = "prepared_query"
)
// AgentCheck represents a check known to the agent
type AgentCheck struct {
Node string
CheckID string
Name string
Status string
Notes string
Output string
ServiceID string
ServiceName string
Type string
Definition HealthCheckDefinition
}
// AgentWeights represent optional weights for a service
type AgentWeights struct {
Passing int
Warning int
}
// AgentService represents a service known to the agent
type AgentService struct {
Kind ServiceKind `json:",omitempty"`
ID string
Service string
Tags []string
Meta map[string]string
Port int
Address string
TaggedAddresses map[string]ServiceAddress `json:",omitempty"`
Weights AgentWeights
EnableTagOverride bool
CreateIndex uint64 `json:",omitempty" bexpr:"-"`
ModifyIndex uint64 `json:",omitempty" bexpr:"-"`
ContentHash string `json:",omitempty" bexpr:"-"`
Proxy *AgentServiceConnectProxyConfig `json:",omitempty"`
Connect *AgentServiceConnect `json:",omitempty"`
}
// AgentServiceChecksInfo returns information about a Service and its checks
type AgentServiceChecksInfo struct {
AggregatedStatus string
Service *AgentService
Checks HealthChecks
}
// AgentServiceConnect represents the Connect configuration of a service.
type AgentServiceConnect struct {
Native bool `json:",omitempty"`
SidecarService *AgentServiceRegistration `json:",omitempty" bexpr:"-"`
}
// AgentServiceConnectProxyConfig is the proxy configuration in a connect-proxy
// ServiceDefinition or response.
type AgentServiceConnectProxyConfig struct {
DestinationServiceName string `json:",omitempty"`
DestinationServiceID string `json:",omitempty"`
LocalServiceAddress string `json:",omitempty"`
LocalServicePort int `json:",omitempty"`
Config map[string]interface{} `json:",omitempty" bexpr:"-"`
Upstreams []Upstream `json:",omitempty"`
MeshGateway MeshGatewayConfig `json:",omitempty"`
Expose ExposeConfig `json:",omitempty"`
}
// AgentMember represents a cluster member known to the agent
type AgentMember struct {
Name string
Addr string
Port uint16
Tags map[string]string
Status int
ProtocolMin uint8
ProtocolMax uint8
ProtocolCur uint8
DelegateMin uint8
DelegateMax uint8
DelegateCur uint8
}
// AllSegments is used to select for all segments in MembersOpts.
const AllSegments = "_all"
// MembersOpts is used for querying member information.
type MembersOpts struct {
// WAN is whether to show members from the WAN.
WAN bool
// Segment is the LAN segment to show members for. Setting this to the
// AllSegments value above will show members in all segments.
Segment string
}
// AgentServiceRegistration is used to register a new service
type AgentServiceRegistration struct {
Kind ServiceKind `json:",omitempty"`
ID string `json:",omitempty"`
Name string `json:",omitempty"`
Tags []string `json:",omitempty"`
Port int `json:",omitempty"`
Address string `json:",omitempty"`
TaggedAddresses map[string]ServiceAddress `json:",omitempty"`
EnableTagOverride bool `json:",omitempty"`
Meta map[string]string `json:",omitempty"`
Weights *AgentWeights `json:",omitempty"`
Check *AgentServiceCheck
Checks AgentServiceChecks
Proxy *AgentServiceConnectProxyConfig `json:",omitempty"`
Connect *AgentServiceConnect `json:",omitempty"`
}
// AgentCheckRegistration is used to register a new check
type AgentCheckRegistration struct {
ID string `json:",omitempty"`
Name string `json:",omitempty"`
Notes string `json:",omitempty"`
ServiceID string `json:",omitempty"`
AgentServiceCheck
}
// AgentServiceCheck is used to define a node or service level check
type AgentServiceCheck struct {
CheckID string `json:",omitempty"`
Name string `json:",omitempty"`
Args []string `json:"ScriptArgs,omitempty"`
DockerContainerID string `json:",omitempty"`
Shell string `json:",omitempty"` // Only supported for Docker.
Interval string `json:",omitempty"`
Timeout string `json:",omitempty"`
TTL string `json:",omitempty"`
HTTP string `json:",omitempty"`
Header map[string][]string `json:",omitempty"`
Method string `json:",omitempty"`
TCP string `json:",omitempty"`
Status string `json:",omitempty"`
Notes string `json:",omitempty"`
TLSSkipVerify bool `json:",omitempty"`
GRPC string `json:",omitempty"`
GRPCUseTLS bool `json:",omitempty"`
AliasNode string `json:",omitempty"`
AliasService string `json:",omitempty"`
// In Consul 0.7 and later, checks that are associated with a service
// may also contain this optional DeregisterCriticalServiceAfter field,
// which is a timeout in the same Go time format as Interval and TTL. If
// a check is in the critical state for more than this configured value,
// then its associated service (and all of its associated checks) will
// automatically be deregistered.
DeregisterCriticalServiceAfter string `json:",omitempty"`
}
type AgentServiceChecks []*AgentServiceCheck
// AgentToken is used when updating ACL tokens for an agent.
type AgentToken struct {
Token string
}
// Metrics info is used to store different types of metric values from the agent.
type MetricsInfo struct {
Timestamp string
Gauges []GaugeValue
Points []PointValue
Counters []SampledValue
Samples []SampledValue
}
// GaugeValue stores one value that is updated as time goes on, such as
// the amount of memory allocated.
type GaugeValue struct {
Name string
Value float32
Labels map[string]string
}
// PointValue holds a series of points for a metric.
type PointValue struct {
Name string
Points []float32
}
// SampledValue stores info about a metric that is incremented over time,
// such as the number of requests to an HTTP endpoint.
type SampledValue struct {
Name string
Count int
Sum float64
Min float64
Max float64
Mean float64
Stddev float64
Labels map[string]string
}
// AgentAuthorizeParams are the request parameters for authorizing a request.
type AgentAuthorizeParams struct {
Target string
ClientCertURI string
ClientCertSerial string
}
// AgentAuthorize is the response structure for Connect authorization.
type AgentAuthorize struct {
Authorized bool
Reason string
}
// ConnectProxyConfig is the response structure for agent-local proxy
// configuration.
type ConnectProxyConfig struct {
ProxyServiceID string
TargetServiceID string
TargetServiceName string
ContentHash string
Config map[string]interface{} `bexpr:"-"`
Upstreams []Upstream
}
// Upstream is the response structure for a proxy upstream configuration.
type Upstream struct {
DestinationType UpstreamDestType `json:",omitempty"`
DestinationNamespace string `json:",omitempty"`
DestinationName string
Datacenter string `json:",omitempty"`
LocalBindAddress string `json:",omitempty"`
LocalBindPort int `json:",omitempty"`
Config map[string]interface{} `json:",omitempty" bexpr:"-"`
MeshGateway MeshGatewayConfig `json:",omitempty"`
}
// Agent can be used to query the Agent endpoints
type Agent struct {
c *Client
// cache the node name
nodeName string
}
// Agent returns a handle to the agent endpoints
func (c *Client) Agent() *Agent {
return &Agent{c: c}
}
// Self is used to query the agent we are speaking to for
// information about itself
func (a *Agent) Self() (map[string]map[string]interface{}, error) {
r := a.c.newRequest("GET", "/v1/agent/self")
_, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return nil, err
}
defer resp.Body.Close()
var out map[string]map[string]interface{}
if err := decodeBody(resp, &out); err != nil {
return nil, err
}
return out, nil
}
// Host is used to retrieve information about the host the
// agent is running on such as CPU, memory, and disk. Requires
// a operator:read ACL token.
func (a *Agent) Host() (map[string]interface{}, error) {
r := a.c.newRequest("GET", "/v1/agent/host")
_, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return nil, err
}
defer resp.Body.Close()
var out map[string]interface{}
if err := decodeBody(resp, &out); err != nil {
return nil, err
}
return out, nil
}
// Metrics is used to query the agent we are speaking to for
// its current internal metric data
func (a *Agent) Metrics() (*MetricsInfo, error) {
r := a.c.newRequest("GET", "/v1/agent/metrics")
_, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return nil, err
}
defer resp.Body.Close()
var out *MetricsInfo
if err := decodeBody(resp, &out); err != nil {
return nil, err
}
return out, nil
}
// Reload triggers a configuration reload for the agent we are connected to.
func (a *Agent) Reload() error {
r := a.c.newRequest("PUT", "/v1/agent/reload")
_, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return err
}
resp.Body.Close()
return nil
}
// NodeName is used to get the node name of the agent
func (a *Agent) NodeName() (string, error) {
if a.nodeName != "" {
return a.nodeName, nil
}
info, err := a.Self()
if err != nil {
return "", err
}
name := info["Config"]["NodeName"].(string)
a.nodeName = name
return name, nil
}
// Checks returns the locally registered checks
func (a *Agent) Checks() (map[string]*AgentCheck, error) {
return a.ChecksWithFilter("")
}
// ChecksWithFilter returns a subset of the locally registered checks that match
// the given filter expression
func (a *Agent) ChecksWithFilter(filter string) (map[string]*AgentCheck, error) {
r := a.c.newRequest("GET", "/v1/agent/checks")
r.filterQuery(filter)
_, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return nil, err
}
defer resp.Body.Close()
var out map[string]*AgentCheck
if err := decodeBody(resp, &out); err != nil {
return nil, err
}
return out, nil
}
// Services returns the locally registered services
func (a *Agent) Services() (map[string]*AgentService, error) {
return a.ServicesWithFilter("")
}
// ServicesWithFilter returns a subset of the locally registered services that match
// the given filter expression
func (a *Agent) ServicesWithFilter(filter string) (map[string]*AgentService, error) {
r := a.c.newRequest("GET", "/v1/agent/services")
r.filterQuery(filter)
_, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return nil, err
}
defer resp.Body.Close()
var out map[string]*AgentService
if err := decodeBody(resp, &out); err != nil {
return nil, err
}
return out, nil
}
// AgentHealthServiceByID returns for a given serviceID: the aggregated health status, the service definition or an error if any
// - If the service is not found, will return status (critical, nil, nil)
// - If the service is found, will return (critical|passing|warning), AgentServiceChecksInfo, nil)
// - In all other cases, will return an error
func (a *Agent) AgentHealthServiceByID(serviceID string) (string, *AgentServiceChecksInfo, error) {
path := fmt.Sprintf("/v1/agent/health/service/id/%v", url.PathEscape(serviceID))
r := a.c.newRequest("GET", path)
r.params.Add("format", "json")
r.header.Set("Accept", "application/json")
_, resp, err := a.c.doRequest(r)
if err != nil {
return "", nil, err
}
defer resp.Body.Close()
// Service not Found
if resp.StatusCode == http.StatusNotFound {
return HealthCritical, nil, nil
}
var out *AgentServiceChecksInfo
if err := decodeBody(resp, &out); err != nil {
return HealthCritical, out, err
}
switch resp.StatusCode {
case http.StatusOK:
return HealthPassing, out, nil
case http.StatusTooManyRequests:
return HealthWarning, out, nil
case http.StatusServiceUnavailable:
return HealthCritical, out, nil
}
return HealthCritical, out, fmt.Errorf("Unexpected Error Code %v for %s", resp.StatusCode, path)
}
// AgentHealthServiceByName returns for a given service name: the aggregated health status for all services
// having the specified name.
// - If no service is not found, will return status (critical, [], nil)
// - If the service is found, will return (critical|passing|warning), []api.AgentServiceChecksInfo, nil)
// - In all other cases, will return an error
func (a *Agent) AgentHealthServiceByName(service string) (string, []AgentServiceChecksInfo, error) {
path := fmt.Sprintf("/v1/agent/health/service/name/%v", url.PathEscape(service))
r := a.c.newRequest("GET", path)
r.params.Add("format", "json")
r.header.Set("Accept", "application/json")
_, resp, err := a.c.doRequest(r)
if err != nil {
return "", nil, err
}
defer resp.Body.Close()
// Service not Found
if resp.StatusCode == http.StatusNotFound {
return HealthCritical, nil, nil
}
var out []AgentServiceChecksInfo
if err := decodeBody(resp, &out); err != nil {
return HealthCritical, out, err
}
switch resp.StatusCode {
case http.StatusOK:
return HealthPassing, out, nil
case http.StatusTooManyRequests:
return HealthWarning, out, nil
case http.StatusServiceUnavailable:
return HealthCritical, out, nil
}
return HealthCritical, out, fmt.Errorf("Unexpected Error Code %v for %s", resp.StatusCode, path)
}
// Service returns a locally registered service instance and allows for
// hash-based blocking.
//
// Note that this uses an unconventional blocking mechanism since it's
// agent-local state. That means there is no persistent raft index so we block
// based on object hash instead.
func (a *Agent) Service(serviceID string, q *QueryOptions) (*AgentService, *QueryMeta, error) {
r := a.c.newRequest("GET", "/v1/agent/service/"+serviceID)
r.setQueryOptions(q)
rtt, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var out *AgentService
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}
return out, qm, nil
}
// Members returns the known gossip members. The WAN
// flag can be used to query a server for WAN members.
func (a *Agent) Members(wan bool) ([]*AgentMember, error) {
r := a.c.newRequest("GET", "/v1/agent/members")
if wan {
r.params.Set("wan", "1")
}
_, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return nil, err
}
defer resp.Body.Close()
var out []*AgentMember
if err := decodeBody(resp, &out); err != nil {
return nil, err
}
return out, nil
}
// MembersOpts returns the known gossip members and can be passed
// additional options for WAN/segment filtering.
func (a *Agent) MembersOpts(opts MembersOpts) ([]*AgentMember, error) {
r := a.c.newRequest("GET", "/v1/agent/members")
r.params.Set("segment", opts.Segment)
if opts.WAN {
r.params.Set("wan", "1")
}
_, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return nil, err
}
defer resp.Body.Close()
var out []*AgentMember
if err := decodeBody(resp, &out); err != nil {
return nil, err
}
return out, nil
}
// ServiceRegister is used to register a new service with
// the local agent
func (a *Agent) ServiceRegister(service *AgentServiceRegistration) error {
r := a.c.newRequest("PUT", "/v1/agent/service/register")
r.obj = service
_, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return err
}
resp.Body.Close()
return nil
}
// ServiceDeregister is used to deregister a service with
// the local agent
func (a *Agent) ServiceDeregister(serviceID string) error {
r := a.c.newRequest("PUT", "/v1/agent/service/deregister/"+serviceID)
_, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return err
}
resp.Body.Close()
return nil
}
// PassTTL is used to set a TTL check to the passing state.
//
// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL().
// The client interface will be removed in 0.8 or changed to use
// UpdateTTL()'s endpoint and the server endpoints will be removed in 0.9.
func (a *Agent) PassTTL(checkID, note string) error {
return a.updateTTL(checkID, note, "pass")
}
// WarnTTL is used to set a TTL check to the warning state.
//
// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL().
// The client interface will be removed in 0.8 or changed to use
// UpdateTTL()'s endpoint and the server endpoints will be removed in 0.9.
func (a *Agent) WarnTTL(checkID, note string) error {
return a.updateTTL(checkID, note, "warn")
}
// FailTTL is used to set a TTL check to the failing state.
//
// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL().
// The client interface will be removed in 0.8 or changed to use
// UpdateTTL()'s endpoint and the server endpoints will be removed in 0.9.
func (a *Agent) FailTTL(checkID, note string) error {
return a.updateTTL(checkID, note, "fail")
}
// updateTTL is used to update the TTL of a check. This is the internal
// method that uses the old API that's present in Consul versions prior to
// 0.6.4. Since Consul didn't have an analogous "update" API before it seemed
// ok to break this (former) UpdateTTL in favor of the new UpdateTTL below,
// but keep the old Pass/Warn/Fail methods using the old API under the hood.
//
// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL().
// The client interface will be removed in 0.8 and the server endpoints will
// be removed in 0.9.
func (a *Agent) updateTTL(checkID, note, status string) error {
switch status {
case "pass":
case "warn":
case "fail":
default:
return fmt.Errorf("Invalid status: %s", status)
}
endpoint := fmt.Sprintf("/v1/agent/check/%s/%s", status, checkID)
r := a.c.newRequest("PUT", endpoint)
r.params.Set("note", note)
_, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return err
}
resp.Body.Close()
return nil
}
// checkUpdate is the payload for a PUT for a check update.
type checkUpdate struct {
// Status is one of the api.Health* states: HealthPassing
// ("passing"), HealthWarning ("warning"), or HealthCritical
// ("critical").
Status string
// Output is the information to post to the UI for operators as the
// output of the process that decided to hit the TTL check. This is
// different from the note field that's associated with the check
// itself.
Output string
}
// UpdateTTL is used to update the TTL of a check. This uses the newer API
// that was introduced in Consul 0.6.4 and later. We translate the old status
// strings for compatibility (though a newer version of Consul will still be
// required to use this API).
func (a *Agent) UpdateTTL(checkID, output, status string) error {
switch status {
case "pass", HealthPassing:
status = HealthPassing
case "warn", HealthWarning:
status = HealthWarning
case "fail", HealthCritical:
status = HealthCritical
default:
return fmt.Errorf("Invalid status: %s", status)
}
endpoint := fmt.Sprintf("/v1/agent/check/update/%s", checkID)
r := a.c.newRequest("PUT", endpoint)
r.obj = &checkUpdate{
Status: status,
Output: output,
}
_, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return err
}
resp.Body.Close()
return nil
}
// CheckRegister is used to register a new check with
// the local agent
func (a *Agent) CheckRegister(check *AgentCheckRegistration) error {
r := a.c.newRequest("PUT", "/v1/agent/check/register")
r.obj = check
_, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return err
}
resp.Body.Close()
return nil
}
// CheckDeregister is used to deregister a check with
// the local agent
func (a *Agent) CheckDeregister(checkID string) error {
r := a.c.newRequest("PUT", "/v1/agent/check/deregister/"+checkID)
_, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return err
}
resp.Body.Close()
return nil
}
// Join is used to instruct the agent to attempt a join to
// another cluster member
func (a *Agent) Join(addr string, wan bool) error {
r := a.c.newRequest("PUT", "/v1/agent/join/"+addr)
if wan {
r.params.Set("wan", "1")
}
_, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return err
}
resp.Body.Close()
return nil
}
// Leave is used to have the agent gracefully leave the cluster and shutdown
func (a *Agent) Leave() error {
r := a.c.newRequest("PUT", "/v1/agent/leave")
_, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return err
}
resp.Body.Close()
return nil
}
// ForceLeave is used to have the agent eject a failed node
func (a *Agent) ForceLeave(node string) error {
r := a.c.newRequest("PUT", "/v1/agent/force-leave/"+node)
_, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return err
}
resp.Body.Close()
return nil
}
//ForceLeavePrune is used to have an a failed agent removed
//from the list of members
func (a *Agent) ForceLeavePrune(node string) error {
r := a.c.newRequest("PUT", "/v1/agent/force-leave/"+node)
r.params.Set("prune", "1")
_, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return err
}
resp.Body.Close()
return nil
}
// ConnectAuthorize is used to authorize an incoming connection
// to a natively integrated Connect service.
func (a *Agent) ConnectAuthorize(auth *AgentAuthorizeParams) (*AgentAuthorize, error) {
r := a.c.newRequest("POST", "/v1/agent/connect/authorize")
r.obj = auth
_, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return nil, err
}
defer resp.Body.Close()
var out AgentAuthorize
if err := decodeBody(resp, &out); err != nil {
return nil, err
}
return &out, nil
}
// ConnectCARoots returns the list of roots.
func (a *Agent) ConnectCARoots(q *QueryOptions) (*CARootList, *QueryMeta, error) {
r := a.c.newRequest("GET", "/v1/agent/connect/ca/roots")
r.setQueryOptions(q)
rtt, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var out CARootList
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}
return &out, qm, nil
}
// ConnectCALeaf gets the leaf certificate for the given service ID.
func (a *Agent) ConnectCALeaf(serviceID string, q *QueryOptions) (*LeafCert, *QueryMeta, error) {
r := a.c.newRequest("GET", "/v1/agent/connect/ca/leaf/"+serviceID)
r.setQueryOptions(q)
rtt, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var out LeafCert
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}
return &out, qm, nil
}
// EnableServiceMaintenance toggles service maintenance mode on
// for the given service ID.
func (a *Agent) EnableServiceMaintenance(serviceID, reason string) error {
r := a.c.newRequest("PUT", "/v1/agent/service/maintenance/"+serviceID)
r.params.Set("enable", "true")
r.params.Set("reason", reason)
_, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return err
}
resp.Body.Close()
return nil
}
// DisableServiceMaintenance toggles service maintenance mode off
// for the given service ID.
func (a *Agent) DisableServiceMaintenance(serviceID string) error {
r := a.c.newRequest("PUT", "/v1/agent/service/maintenance/"+serviceID)
r.params.Set("enable", "false")
_, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return err
}
resp.Body.Close()
return nil
}
// EnableNodeMaintenance toggles node maintenance mode on for the
// agent we are connected to.
func (a *Agent) EnableNodeMaintenance(reason string) error {
r := a.c.newRequest("PUT", "/v1/agent/maintenance")
r.params.Set("enable", "true")
r.params.Set("reason", reason)
_, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return err
}
resp.Body.Close()
return nil
}
// DisableNodeMaintenance toggles node maintenance mode off for the
// agent we are connected to.
func (a *Agent) DisableNodeMaintenance() error {
r := a.c.newRequest("PUT", "/v1/agent/maintenance")
r.params.Set("enable", "false")
_, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return err
}
resp.Body.Close()
return nil
}
// Monitor returns a channel which will receive streaming logs from the agent
// Providing a non-nil stopCh can be used to close the connection and stop the
// log stream. An empty string will be sent down the given channel when there's
// nothing left to stream, after which the caller should close the stopCh.
func (a *Agent) Monitor(loglevel string, stopCh <-chan struct{}, q *QueryOptions) (chan string, error) {
r := a.c.newRequest("GET", "/v1/agent/monitor")
r.setQueryOptions(q)
if loglevel != "" {
r.params.Add("loglevel", loglevel)
}
_, resp, err := requireOK(a.c.doRequest(r))
if err != nil {
return nil, err
}
logCh := make(chan string, 64)
go func() {
defer resp.Body.Close()
scanner := bufio.NewScanner(resp.Body)
for {
select {
case <-stopCh:
close(logCh)
return
default:
}
if scanner.Scan() {
// An empty string signals to the caller that
// the scan is done, so make sure we only emit
// that when the scanner says it's done, not if
// we happen to ingest an empty line.
if text := scanner.Text(); text != "" {
logCh <- text
} else {
logCh <- " "
}
} else {
logCh <- ""
}
}
}()
return logCh, nil
}
// UpdateACLToken updates the agent's "acl_token". See updateToken for more
// details.
//
// DEPRECATED (ACL-Legacy-Compat) - Prefer UpdateDefaultACLToken for v1.4.3 and above
func (a *Agent) UpdateACLToken(token string, q *WriteOptions) (*WriteMeta, error) {
return a.updateToken("acl_token", token, q)
}
// UpdateACLAgentToken updates the agent's "acl_agent_token". See updateToken
// for more details.
//
// DEPRECATED (ACL-Legacy-Compat) - Prefer UpdateAgentACLToken for v1.4.3 and above
func (a *Agent) UpdateACLAgentToken(token string, q *WriteOptions) (*WriteMeta, error) {
return a.updateToken("acl_agent_token", token, q)
}
// UpdateACLAgentMasterToken updates the agent's "acl_agent_master_token". See
// updateToken for more details.
//
// DEPRECATED (ACL-Legacy-Compat) - Prefer UpdateAgentMasterACLToken for v1.4.3 and above
func (a *Agent) UpdateACLAgentMasterToken(token string, q *WriteOptions) (*WriteMeta, error) {
return a.updateToken("acl_agent_master_token", token, q)
}
// UpdateACLReplicationToken updates the agent's "acl_replication_token". See
// updateToken for more details.
//
// DEPRECATED (ACL-Legacy-Compat) - Prefer UpdateReplicationACLToken for v1.4.3 and above
func (a *Agent) UpdateACLReplicationToken(token string, q *WriteOptions) (*WriteMeta, error) {
return a.updateToken("acl_replication_token", token, q)
}
// UpdateDefaultACLToken updates the agent's "default" token. See updateToken
// for more details
func (a *Agent) UpdateDefaultACLToken(token string, q *WriteOptions) (*WriteMeta, error) {
return a.updateTokenFallback("default", "acl_token", token, q)
}
// UpdateAgentACLToken updates the agent's "agent" token. See updateToken
// for more details
func (a *Agent) UpdateAgentACLToken(token string, q *WriteOptions) (*WriteMeta, error) {
return a.updateTokenFallback("agent", "acl_agent_token", token, q)
}
// UpdateAgentMasterACLToken updates the agent's "agent_master" token. See updateToken
// for more details
func (a *Agent) UpdateAgentMasterACLToken(token string, q *WriteOptions) (*WriteMeta, error) {
return a.updateTokenFallback("agent_master", "acl_agent_master_token", token, q)
}
// UpdateReplicationACLToken updates the agent's "replication" token. See updateToken
// for more details
func (a *Agent) UpdateReplicationACLToken(token string, q *WriteOptions) (*WriteMeta, error) {
return a.updateTokenFallback("replication", "acl_replication_token", token, q)
}
// updateToken can be used to update one of an agent's ACL tokens after the agent has
// started. The tokens are may not be persisted, so will need to be updated again if
// the agent is restarted unless the agent is configured to persist them.
func (a *Agent) updateToken(target, token string, q *WriteOptions) (*WriteMeta, error) {
meta, _, err := a.updateTokenOnce(target, token, q)
return meta, err
}
func (a *Agent) updateTokenFallback(target, fallback, token string, q *WriteOptions) (*WriteMeta, error) {
meta, status, err := a.updateTokenOnce(target, token, q)
if err != nil && status == 404 {
meta, _, err = a.updateTokenOnce(fallback, token, q)
}
return meta, err
}
func (a *Agent) updateTokenOnce(target, token string, q *WriteOptions) (*WriteMeta, int, error) {
r := a.c.newRequest("PUT", fmt.Sprintf("/v1/agent/token/%s", target))
r.setWriteOptions(q)
r.obj = &AgentToken{Token: token}
rtt, resp, err := a.c.doRequest(r)
if err != nil {
return nil, 0, err
}
defer resp.Body.Close()
wm := &WriteMeta{RequestTime: rtt}
if resp.StatusCode != 200 {
var buf bytes.Buffer
io.Copy(&buf, resp.Body)
return wm, resp.StatusCode, fmt.Errorf("Unexpected response code: %d (%s)", resp.StatusCode, buf.Bytes())
}
return wm, resp.StatusCode, nil
}

View file

@ -1,974 +0,0 @@
package api
import (
"bytes"
"context"
"crypto/tls"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"log"
"net"
"net/http"
"net/url"
"os"
"strconv"
"strings"
"time"
"github.com/hashicorp/go-cleanhttp"
"github.com/hashicorp/go-rootcerts"
)
const (
// HTTPAddrEnvName defines an environment variable name which sets
// the HTTP address if there is no -http-addr specified.
HTTPAddrEnvName = "CONSUL_HTTP_ADDR"
// HTTPTokenEnvName defines an environment variable name which sets
// the HTTP token.
HTTPTokenEnvName = "CONSUL_HTTP_TOKEN"
// HTTPTokenFileEnvName defines an environment variable name which sets
// the HTTP token file.
HTTPTokenFileEnvName = "CONSUL_HTTP_TOKEN_FILE"
// HTTPAuthEnvName defines an environment variable name which sets
// the HTTP authentication header.
HTTPAuthEnvName = "CONSUL_HTTP_AUTH"
// HTTPSSLEnvName defines an environment variable name which sets
// whether or not to use HTTPS.
HTTPSSLEnvName = "CONSUL_HTTP_SSL"
// HTTPCAFile defines an environment variable name which sets the
// CA file to use for talking to Consul over TLS.
HTTPCAFile = "CONSUL_CACERT"
// HTTPCAPath defines an environment variable name which sets the
// path to a directory of CA certs to use for talking to Consul over TLS.
HTTPCAPath = "CONSUL_CAPATH"
// HTTPClientCert defines an environment variable name which sets the
// client cert file to use for talking to Consul over TLS.
HTTPClientCert = "CONSUL_CLIENT_CERT"
// HTTPClientKey defines an environment variable name which sets the
// client key file to use for talking to Consul over TLS.
HTTPClientKey = "CONSUL_CLIENT_KEY"
// HTTPTLSServerName defines an environment variable name which sets the
// server name to use as the SNI host when connecting via TLS
HTTPTLSServerName = "CONSUL_TLS_SERVER_NAME"
// HTTPSSLVerifyEnvName defines an environment variable name which sets
// whether or not to disable certificate checking.
HTTPSSLVerifyEnvName = "CONSUL_HTTP_SSL_VERIFY"
// GRPCAddrEnvName defines an environment variable name which sets the gRPC
// address for consul connect envoy. Note this isn't actually used by the api
// client in this package but is defined here for consistency with all the
// other ENV names we use.
GRPCAddrEnvName = "CONSUL_GRPC_ADDR"
)
// QueryOptions are used to parameterize a query
type QueryOptions struct {
// Providing a datacenter overwrites the DC provided
// by the Config
Datacenter string
// AllowStale allows any Consul server (non-leader) to service
// a read. This allows for lower latency and higher throughput
AllowStale bool
// RequireConsistent forces the read to be fully consistent.
// This is more expensive but prevents ever performing a stale
// read.
RequireConsistent bool
// UseCache requests that the agent cache results locally. See
// https://www.consul.io/api/features/caching.html for more details on the
// semantics.
UseCache bool
// MaxAge limits how old a cached value will be returned if UseCache is true.
// If there is a cached response that is older than the MaxAge, it is treated
// as a cache miss and a new fetch invoked. If the fetch fails, the error is
// returned. Clients that wish to allow for stale results on error can set
// StaleIfError to a longer duration to change this behavior. It is ignored
// if the endpoint supports background refresh caching. See
// https://www.consul.io/api/features/caching.html for more details.
MaxAge time.Duration
// StaleIfError specifies how stale the client will accept a cached response
// if the servers are unavailable to fetch a fresh one. Only makes sense when
// UseCache is true and MaxAge is set to a lower, non-zero value. It is
// ignored if the endpoint supports background refresh caching. See
// https://www.consul.io/api/features/caching.html for more details.
StaleIfError time.Duration
// WaitIndex is used to enable a blocking query. Waits
// until the timeout or the next index is reached
WaitIndex uint64
// WaitHash is used by some endpoints instead of WaitIndex to perform blocking
// on state based on a hash of the response rather than a monotonic index.
// This is required when the state being blocked on is not stored in Raft, for
// example agent-local proxy configuration.
WaitHash string
// WaitTime is used to bound the duration of a wait.
// Defaults to that of the Config, but can be overridden.
WaitTime time.Duration
// Token is used to provide a per-request ACL token
// which overrides the agent's default token.
Token string
// Near is used to provide a node name that will sort the results
// in ascending order based on the estimated round trip time from
// that node. Setting this to "_agent" will use the agent's node
// for the sort.
Near string
// NodeMeta is used to filter results by nodes with the given
// metadata key/value pairs. Currently, only one key/value pair can
// be provided for filtering.
NodeMeta map[string]string
// RelayFactor is used in keyring operations to cause responses to be
// relayed back to the sender through N other random nodes. Must be
// a value from 0 to 5 (inclusive).
RelayFactor uint8
// LocalOnly is used in keyring list operation to force the keyring
// query to only hit local servers (no WAN traffic).
LocalOnly bool
// Connect filters prepared query execution to only include Connect-capable
// services. This currently affects prepared query execution.
Connect bool
// ctx is an optional context pass through to the underlying HTTP
// request layer. Use Context() and WithContext() to manage this.
ctx context.Context
// Filter requests filtering data prior to it being returned. The string
// is a go-bexpr compatible expression.
Filter string
}
func (o *QueryOptions) Context() context.Context {
if o != nil && o.ctx != nil {
return o.ctx
}
return context.Background()
}
func (o *QueryOptions) WithContext(ctx context.Context) *QueryOptions {
o2 := new(QueryOptions)
if o != nil {
*o2 = *o
}
o2.ctx = ctx
return o2
}
// WriteOptions are used to parameterize a write
type WriteOptions struct {
// Providing a datacenter overwrites the DC provided
// by the Config
Datacenter string
// Token is used to provide a per-request ACL token
// which overrides the agent's default token.
Token string
// RelayFactor is used in keyring operations to cause responses to be
// relayed back to the sender through N other random nodes. Must be
// a value from 0 to 5 (inclusive).
RelayFactor uint8
// ctx is an optional context pass through to the underlying HTTP
// request layer. Use Context() and WithContext() to manage this.
ctx context.Context
}
func (o *WriteOptions) Context() context.Context {
if o != nil && o.ctx != nil {
return o.ctx
}
return context.Background()
}
func (o *WriteOptions) WithContext(ctx context.Context) *WriteOptions {
o2 := new(WriteOptions)
if o != nil {
*o2 = *o
}
o2.ctx = ctx
return o2
}
// QueryMeta is used to return meta data about a query
type QueryMeta struct {
// LastIndex. This can be used as a WaitIndex to perform
// a blocking query
LastIndex uint64
// LastContentHash. This can be used as a WaitHash to perform a blocking query
// for endpoints that support hash-based blocking. Endpoints that do not
// support it will return an empty hash.
LastContentHash string
// Time of last contact from the leader for the
// server servicing the request
LastContact time.Duration
// Is there a known leader
KnownLeader bool
// How long did the request take
RequestTime time.Duration
// Is address translation enabled for HTTP responses on this agent
AddressTranslationEnabled bool
// CacheHit is true if the result was served from agent-local cache.
CacheHit bool
// CacheAge is set if request was ?cached and indicates how stale the cached
// response is.
CacheAge time.Duration
}
// WriteMeta is used to return meta data about a write
type WriteMeta struct {
// How long did the request take
RequestTime time.Duration
}
// HttpBasicAuth is used to authenticate http client with HTTP Basic Authentication
type HttpBasicAuth struct {
// Username to use for HTTP Basic Authentication
Username string
// Password to use for HTTP Basic Authentication
Password string
}
// Config is used to configure the creation of a client
type Config struct {
// Address is the address of the Consul server
Address string
// Scheme is the URI scheme for the Consul server
Scheme string
// Datacenter to use. If not provided, the default agent datacenter is used.
Datacenter string
// Transport is the Transport to use for the http client.
Transport *http.Transport
// HttpClient is the client to use. Default will be
// used if not provided.
HttpClient *http.Client
// HttpAuth is the auth info to use for http access.
HttpAuth *HttpBasicAuth
// WaitTime limits how long a Watch will block. If not provided,
// the agent default values will be used.
WaitTime time.Duration
// Token is used to provide a per-request ACL token
// which overrides the agent's default token.
Token string
// TokenFile is a file containing the current token to use for this client.
// If provided it is read once at startup and never again.
TokenFile string
TLSConfig TLSConfig
}
// TLSConfig is used to generate a TLSClientConfig that's useful for talking to
// Consul using TLS.
type TLSConfig struct {
// Address is the optional address of the Consul server. The port, if any
// will be removed from here and this will be set to the ServerName of the
// resulting config.
Address string
// CAFile is the optional path to the CA certificate used for Consul
// communication, defaults to the system bundle if not specified.
CAFile string
// CAPath is the optional path to a directory of CA certificates to use for
// Consul communication, defaults to the system bundle if not specified.
CAPath string
// CertFile is the optional path to the certificate for Consul
// communication. If this is set then you need to also set KeyFile.
CertFile string
// KeyFile is the optional path to the private key for Consul communication.
// If this is set then you need to also set CertFile.
KeyFile string
// InsecureSkipVerify if set to true will disable TLS host verification.
InsecureSkipVerify bool
}
// DefaultConfig returns a default configuration for the client. By default this
// will pool and reuse idle connections to Consul. If you have a long-lived
// client object, this is the desired behavior and should make the most efficient
// use of the connections to Consul. If you don't reuse a client object, which
// is not recommended, then you may notice idle connections building up over
// time. To avoid this, use the DefaultNonPooledConfig() instead.
func DefaultConfig() *Config {
return defaultConfig(cleanhttp.DefaultPooledTransport)
}
// DefaultNonPooledConfig returns a default configuration for the client which
// does not pool connections. This isn't a recommended configuration because it
// will reconnect to Consul on every request, but this is useful to avoid the
// accumulation of idle connections if you make many client objects during the
// lifetime of your application.
func DefaultNonPooledConfig() *Config {
return defaultConfig(cleanhttp.DefaultTransport)
}
// defaultConfig returns the default configuration for the client, using the
// given function to make the transport.
func defaultConfig(transportFn func() *http.Transport) *Config {
config := &Config{
Address: "127.0.0.1:8500",
Scheme: "http",
Transport: transportFn(),
}
if addr := os.Getenv(HTTPAddrEnvName); addr != "" {
config.Address = addr
}
if tokenFile := os.Getenv(HTTPTokenFileEnvName); tokenFile != "" {
config.TokenFile = tokenFile
}
if token := os.Getenv(HTTPTokenEnvName); token != "" {
config.Token = token
}
if auth := os.Getenv(HTTPAuthEnvName); auth != "" {
var username, password string
if strings.Contains(auth, ":") {
split := strings.SplitN(auth, ":", 2)
username = split[0]
password = split[1]
} else {
username = auth
}
config.HttpAuth = &HttpBasicAuth{
Username: username,
Password: password,
}
}
if ssl := os.Getenv(HTTPSSLEnvName); ssl != "" {
enabled, err := strconv.ParseBool(ssl)
if err != nil {
log.Printf("[WARN] client: could not parse %s: %s", HTTPSSLEnvName, err)
}
if enabled {
config.Scheme = "https"
}
}
if v := os.Getenv(HTTPTLSServerName); v != "" {
config.TLSConfig.Address = v
}
if v := os.Getenv(HTTPCAFile); v != "" {
config.TLSConfig.CAFile = v
}
if v := os.Getenv(HTTPCAPath); v != "" {
config.TLSConfig.CAPath = v
}
if v := os.Getenv(HTTPClientCert); v != "" {
config.TLSConfig.CertFile = v
}
if v := os.Getenv(HTTPClientKey); v != "" {
config.TLSConfig.KeyFile = v
}
if v := os.Getenv(HTTPSSLVerifyEnvName); v != "" {
doVerify, err := strconv.ParseBool(v)
if err != nil {
log.Printf("[WARN] client: could not parse %s: %s", HTTPSSLVerifyEnvName, err)
}
if !doVerify {
config.TLSConfig.InsecureSkipVerify = true
}
}
return config
}
// TLSConfig is used to generate a TLSClientConfig that's useful for talking to
// Consul using TLS.
func SetupTLSConfig(tlsConfig *TLSConfig) (*tls.Config, error) {
tlsClientConfig := &tls.Config{
InsecureSkipVerify: tlsConfig.InsecureSkipVerify,
}
if tlsConfig.Address != "" {
server := tlsConfig.Address
hasPort := strings.LastIndex(server, ":") > strings.LastIndex(server, "]")
if hasPort {
var err error
server, _, err = net.SplitHostPort(server)
if err != nil {
return nil, err
}
}
tlsClientConfig.ServerName = server
}
if tlsConfig.CertFile != "" && tlsConfig.KeyFile != "" {
tlsCert, err := tls.LoadX509KeyPair(tlsConfig.CertFile, tlsConfig.KeyFile)
if err != nil {
return nil, err
}
tlsClientConfig.Certificates = []tls.Certificate{tlsCert}
}
if tlsConfig.CAFile != "" || tlsConfig.CAPath != "" {
rootConfig := &rootcerts.Config{
CAFile: tlsConfig.CAFile,
CAPath: tlsConfig.CAPath,
}
if err := rootcerts.ConfigureTLS(tlsClientConfig, rootConfig); err != nil {
return nil, err
}
}
return tlsClientConfig, nil
}
func (c *Config) GenerateEnv() []string {
env := make([]string, 0, 10)
env = append(env,
fmt.Sprintf("%s=%s", HTTPAddrEnvName, c.Address),
fmt.Sprintf("%s=%s", HTTPTokenEnvName, c.Token),
fmt.Sprintf("%s=%s", HTTPTokenFileEnvName, c.TokenFile),
fmt.Sprintf("%s=%t", HTTPSSLEnvName, c.Scheme == "https"),
fmt.Sprintf("%s=%s", HTTPCAFile, c.TLSConfig.CAFile),
fmt.Sprintf("%s=%s", HTTPCAPath, c.TLSConfig.CAPath),
fmt.Sprintf("%s=%s", HTTPClientCert, c.TLSConfig.CertFile),
fmt.Sprintf("%s=%s", HTTPClientKey, c.TLSConfig.KeyFile),
fmt.Sprintf("%s=%s", HTTPTLSServerName, c.TLSConfig.Address),
fmt.Sprintf("%s=%t", HTTPSSLVerifyEnvName, !c.TLSConfig.InsecureSkipVerify))
if c.HttpAuth != nil {
env = append(env, fmt.Sprintf("%s=%s:%s", HTTPAuthEnvName, c.HttpAuth.Username, c.HttpAuth.Password))
} else {
env = append(env, fmt.Sprintf("%s=", HTTPAuthEnvName))
}
return env
}
// Client provides a client to the Consul API
type Client struct {
config Config
}
// NewClient returns a new client
func NewClient(config *Config) (*Client, error) {
// bootstrap the config
defConfig := DefaultConfig()
if len(config.Address) == 0 {
config.Address = defConfig.Address
}
if len(config.Scheme) == 0 {
config.Scheme = defConfig.Scheme
}
if config.Transport == nil {
config.Transport = defConfig.Transport
}
if config.TLSConfig.Address == "" {
config.TLSConfig.Address = defConfig.TLSConfig.Address
}
if config.TLSConfig.CAFile == "" {
config.TLSConfig.CAFile = defConfig.TLSConfig.CAFile
}
if config.TLSConfig.CAPath == "" {
config.TLSConfig.CAPath = defConfig.TLSConfig.CAPath
}
if config.TLSConfig.CertFile == "" {
config.TLSConfig.CertFile = defConfig.TLSConfig.CertFile
}
if config.TLSConfig.KeyFile == "" {
config.TLSConfig.KeyFile = defConfig.TLSConfig.KeyFile
}
if !config.TLSConfig.InsecureSkipVerify {
config.TLSConfig.InsecureSkipVerify = defConfig.TLSConfig.InsecureSkipVerify
}
if config.HttpClient == nil {
var err error
config.HttpClient, err = NewHttpClient(config.Transport, config.TLSConfig)
if err != nil {
return nil, err
}
}
parts := strings.SplitN(config.Address, "://", 2)
if len(parts) == 2 {
switch parts[0] {
case "http":
config.Scheme = "http"
case "https":
config.Scheme = "https"
case "unix":
trans := cleanhttp.DefaultTransport()
trans.DialContext = func(_ context.Context, _, _ string) (net.Conn, error) {
return net.Dial("unix", parts[1])
}
config.HttpClient = &http.Client{
Transport: trans,
}
default:
return nil, fmt.Errorf("Unknown protocol scheme: %s", parts[0])
}
config.Address = parts[1]
}
// If the TokenFile is set, always use that, even if a Token is configured.
// This is because when TokenFile is set it is read into the Token field.
// We want any derived clients to have to re-read the token file.
if config.TokenFile != "" {
data, err := ioutil.ReadFile(config.TokenFile)
if err != nil {
return nil, fmt.Errorf("Error loading token file: %s", err)
}
if token := strings.TrimSpace(string(data)); token != "" {
config.Token = token
}
}
if config.Token == "" {
config.Token = defConfig.Token
}
return &Client{config: *config}, nil
}
// NewHttpClient returns an http client configured with the given Transport and TLS
// config.
func NewHttpClient(transport *http.Transport, tlsConf TLSConfig) (*http.Client, error) {
client := &http.Client{
Transport: transport,
}
// TODO (slackpad) - Once we get some run time on the HTTP/2 support we
// should turn it on by default if TLS is enabled. We would basically
// just need to call http2.ConfigureTransport(transport) here. We also
// don't want to introduce another external dependency on
// golang.org/x/net/http2 at this time. For a complete recipe for how
// to enable HTTP/2 support on a transport suitable for the API client
// library see agent/http_test.go:TestHTTPServer_H2.
if transport.TLSClientConfig == nil {
tlsClientConfig, err := SetupTLSConfig(&tlsConf)
if err != nil {
return nil, err
}
transport.TLSClientConfig = tlsClientConfig
}
return client, nil
}
// request is used to help build up a request
type request struct {
config *Config
method string
url *url.URL
params url.Values
body io.Reader
header http.Header
obj interface{}
ctx context.Context
}
// setQueryOptions is used to annotate the request with
// additional query options
func (r *request) setQueryOptions(q *QueryOptions) {
if q == nil {
return
}
if q.Datacenter != "" {
r.params.Set("dc", q.Datacenter)
}
if q.AllowStale {
r.params.Set("stale", "")
}
if q.RequireConsistent {
r.params.Set("consistent", "")
}
if q.WaitIndex != 0 {
r.params.Set("index", strconv.FormatUint(q.WaitIndex, 10))
}
if q.WaitTime != 0 {
r.params.Set("wait", durToMsec(q.WaitTime))
}
if q.WaitHash != "" {
r.params.Set("hash", q.WaitHash)
}
if q.Token != "" {
r.header.Set("X-Consul-Token", q.Token)
}
if q.Near != "" {
r.params.Set("near", q.Near)
}
if q.Filter != "" {
r.params.Set("filter", q.Filter)
}
if len(q.NodeMeta) > 0 {
for key, value := range q.NodeMeta {
r.params.Add("node-meta", key+":"+value)
}
}
if q.RelayFactor != 0 {
r.params.Set("relay-factor", strconv.Itoa(int(q.RelayFactor)))
}
if q.LocalOnly {
r.params.Set("local-only", fmt.Sprintf("%t", q.LocalOnly))
}
if q.Connect {
r.params.Set("connect", "true")
}
if q.UseCache && !q.RequireConsistent {
r.params.Set("cached", "")
cc := []string{}
if q.MaxAge > 0 {
cc = append(cc, fmt.Sprintf("max-age=%.0f", q.MaxAge.Seconds()))
}
if q.StaleIfError > 0 {
cc = append(cc, fmt.Sprintf("stale-if-error=%.0f", q.StaleIfError.Seconds()))
}
if len(cc) > 0 {
r.header.Set("Cache-Control", strings.Join(cc, ", "))
}
}
r.ctx = q.ctx
}
// durToMsec converts a duration to a millisecond specified string. If the
// user selected a positive value that rounds to 0 ms, then we will use 1 ms
// so they get a short delay, otherwise Consul will translate the 0 ms into
// a huge default delay.
func durToMsec(dur time.Duration) string {
ms := dur / time.Millisecond
if dur > 0 && ms == 0 {
ms = 1
}
return fmt.Sprintf("%dms", ms)
}
// serverError is a string we look for to detect 500 errors.
const serverError = "Unexpected response code: 500"
// IsRetryableError returns true for 500 errors from the Consul servers, and
// network connection errors. These are usually retryable at a later time.
// This applies to reads but NOT to writes. This may return true for errors
// on writes that may have still gone through, so do not use this to retry
// any write operations.
func IsRetryableError(err error) bool {
if err == nil {
return false
}
if _, ok := err.(net.Error); ok {
return true
}
// TODO (slackpad) - Make a real error type here instead of using
// a string check.
return strings.Contains(err.Error(), serverError)
}
// setWriteOptions is used to annotate the request with
// additional write options
func (r *request) setWriteOptions(q *WriteOptions) {
if q == nil {
return
}
if q.Datacenter != "" {
r.params.Set("dc", q.Datacenter)
}
if q.Token != "" {
r.header.Set("X-Consul-Token", q.Token)
}
if q.RelayFactor != 0 {
r.params.Set("relay-factor", strconv.Itoa(int(q.RelayFactor)))
}
r.ctx = q.ctx
}
// toHTTP converts the request to an HTTP request
func (r *request) toHTTP() (*http.Request, error) {
// Encode the query parameters
r.url.RawQuery = r.params.Encode()
// Check if we should encode the body
if r.body == nil && r.obj != nil {
b, err := encodeBody(r.obj)
if err != nil {
return nil, err
}
r.body = b
}
// Create the HTTP request
req, err := http.NewRequest(r.method, r.url.RequestURI(), r.body)
if err != nil {
return nil, err
}
req.URL.Host = r.url.Host
req.URL.Scheme = r.url.Scheme
req.Host = r.url.Host
req.Header = r.header
// Setup auth
if r.config.HttpAuth != nil {
req.SetBasicAuth(r.config.HttpAuth.Username, r.config.HttpAuth.Password)
}
if r.ctx != nil {
return req.WithContext(r.ctx), nil
}
return req, nil
}
// newRequest is used to create a new request
func (c *Client) newRequest(method, path string) *request {
r := &request{
config: &c.config,
method: method,
url: &url.URL{
Scheme: c.config.Scheme,
Host: c.config.Address,
Path: path,
},
params: make(map[string][]string),
header: make(http.Header),
}
if c.config.Datacenter != "" {
r.params.Set("dc", c.config.Datacenter)
}
if c.config.WaitTime != 0 {
r.params.Set("wait", durToMsec(r.config.WaitTime))
}
if c.config.Token != "" {
r.header.Set("X-Consul-Token", r.config.Token)
}
return r
}
// doRequest runs a request with our client
func (c *Client) doRequest(r *request) (time.Duration, *http.Response, error) {
req, err := r.toHTTP()
if err != nil {
return 0, nil, err
}
start := time.Now()
resp, err := c.config.HttpClient.Do(req)
diff := time.Since(start)
return diff, resp, err
}
// Query is used to do a GET request against an endpoint
// and deserialize the response into an interface using
// standard Consul conventions.
func (c *Client) query(endpoint string, out interface{}, q *QueryOptions) (*QueryMeta, error) {
r := c.newRequest("GET", endpoint)
r.setQueryOptions(q)
rtt, resp, err := c.doRequest(r)
if err != nil {
return nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
if err := decodeBody(resp, out); err != nil {
return nil, err
}
return qm, nil
}
// write is used to do a PUT request against an endpoint
// and serialize/deserialized using the standard Consul conventions.
func (c *Client) write(endpoint string, in, out interface{}, q *WriteOptions) (*WriteMeta, error) {
r := c.newRequest("PUT", endpoint)
r.setWriteOptions(q)
r.obj = in
rtt, resp, err := requireOK(c.doRequest(r))
if err != nil {
return nil, err
}
defer resp.Body.Close()
wm := &WriteMeta{RequestTime: rtt}
if out != nil {
if err := decodeBody(resp, &out); err != nil {
return nil, err
}
} else if _, err := ioutil.ReadAll(resp.Body); err != nil {
return nil, err
}
return wm, nil
}
// parseQueryMeta is used to help parse query meta-data
//
// TODO(rb): bug? the error from this function is never handled
func parseQueryMeta(resp *http.Response, q *QueryMeta) error {
header := resp.Header
// Parse the X-Consul-Index (if it's set - hash based blocking queries don't
// set this)
if indexStr := header.Get("X-Consul-Index"); indexStr != "" {
index, err := strconv.ParseUint(indexStr, 10, 64)
if err != nil {
return fmt.Errorf("Failed to parse X-Consul-Index: %v", err)
}
q.LastIndex = index
}
q.LastContentHash = header.Get("X-Consul-ContentHash")
// Parse the X-Consul-LastContact
last, err := strconv.ParseUint(header.Get("X-Consul-LastContact"), 10, 64)
if err != nil {
return fmt.Errorf("Failed to parse X-Consul-LastContact: %v", err)
}
q.LastContact = time.Duration(last) * time.Millisecond
// Parse the X-Consul-KnownLeader
switch header.Get("X-Consul-KnownLeader") {
case "true":
q.KnownLeader = true
default:
q.KnownLeader = false
}
// Parse X-Consul-Translate-Addresses
switch header.Get("X-Consul-Translate-Addresses") {
case "true":
q.AddressTranslationEnabled = true
default:
q.AddressTranslationEnabled = false
}
// Parse Cache info
if cacheStr := header.Get("X-Cache"); cacheStr != "" {
q.CacheHit = strings.EqualFold(cacheStr, "HIT")
}
if ageStr := header.Get("Age"); ageStr != "" {
age, err := strconv.ParseUint(ageStr, 10, 64)
if err != nil {
return fmt.Errorf("Failed to parse Age Header: %v", err)
}
q.CacheAge = time.Duration(age) * time.Second
}
return nil
}
// decodeBody is used to JSON decode a body
func decodeBody(resp *http.Response, out interface{}) error {
dec := json.NewDecoder(resp.Body)
return dec.Decode(out)
}
// encodeBody is used to encode a request body
func encodeBody(obj interface{}) (io.Reader, error) {
buf := bytes.NewBuffer(nil)
enc := json.NewEncoder(buf)
if err := enc.Encode(obj); err != nil {
return nil, err
}
return buf, nil
}
// requireOK is used to wrap doRequest and check for a 200
func requireOK(d time.Duration, resp *http.Response, e error) (time.Duration, *http.Response, error) {
if e != nil {
if resp != nil {
resp.Body.Close()
}
return d, nil, e
}
if resp.StatusCode != 200 {
return d, nil, generateUnexpectedResponseCodeError(resp)
}
return d, resp, nil
}
func (req *request) filterQuery(filter string) {
if filter == "" {
return
}
req.params.Set("filter", filter)
}
// generateUnexpectedResponseCodeError consumes the rest of the body, closes
// the body stream and generates an error indicating the status code was
// unexpected.
func generateUnexpectedResponseCodeError(resp *http.Response) error {
var buf bytes.Buffer
io.Copy(&buf, resp.Body)
resp.Body.Close()
return fmt.Errorf("Unexpected response code: %d (%s)", resp.StatusCode, buf.Bytes())
}
func requireNotFoundOrOK(d time.Duration, resp *http.Response, e error) (bool, time.Duration, *http.Response, error) {
if e != nil {
if resp != nil {
resp.Body.Close()
}
return false, d, nil, e
}
switch resp.StatusCode {
case 200:
return true, d, resp, nil
case 404:
return false, d, resp, nil
default:
return false, d, nil, generateUnexpectedResponseCodeError(resp)
}
}

View file

@ -1,262 +0,0 @@
package api
import (
"net"
"strconv"
)
type Weights struct {
Passing int
Warning int
}
type Node struct {
ID string
Node string
Address string
Datacenter string
TaggedAddresses map[string]string
Meta map[string]string
CreateIndex uint64
ModifyIndex uint64
}
type ServiceAddress struct {
Address string
Port int
}
type CatalogService struct {
ID string
Node string
Address string
Datacenter string
TaggedAddresses map[string]string
NodeMeta map[string]string
ServiceID string
ServiceName string
ServiceAddress string
ServiceTaggedAddresses map[string]ServiceAddress
ServiceTags []string
ServiceMeta map[string]string
ServicePort int
ServiceWeights Weights
ServiceEnableTagOverride bool
ServiceProxy *AgentServiceConnectProxyConfig
CreateIndex uint64
Checks HealthChecks
ModifyIndex uint64
}
type CatalogNode struct {
Node *Node
Services map[string]*AgentService
}
type CatalogRegistration struct {
ID string
Node string
Address string
TaggedAddresses map[string]string
NodeMeta map[string]string
Datacenter string
Service *AgentService
Check *AgentCheck
Checks HealthChecks
SkipNodeUpdate bool
}
type CatalogDeregistration struct {
Node string
Address string // Obsolete.
Datacenter string
ServiceID string
CheckID string
}
// Catalog can be used to query the Catalog endpoints
type Catalog struct {
c *Client
}
// Catalog returns a handle to the catalog endpoints
func (c *Client) Catalog() *Catalog {
return &Catalog{c}
}
func (c *Catalog) Register(reg *CatalogRegistration, q *WriteOptions) (*WriteMeta, error) {
r := c.c.newRequest("PUT", "/v1/catalog/register")
r.setWriteOptions(q)
r.obj = reg
rtt, resp, err := requireOK(c.c.doRequest(r))
if err != nil {
return nil, err
}
resp.Body.Close()
wm := &WriteMeta{}
wm.RequestTime = rtt
return wm, nil
}
func (c *Catalog) Deregister(dereg *CatalogDeregistration, q *WriteOptions) (*WriteMeta, error) {
r := c.c.newRequest("PUT", "/v1/catalog/deregister")
r.setWriteOptions(q)
r.obj = dereg
rtt, resp, err := requireOK(c.c.doRequest(r))
if err != nil {
return nil, err
}
resp.Body.Close()
wm := &WriteMeta{}
wm.RequestTime = rtt
return wm, nil
}
// Datacenters is used to query for all the known datacenters
func (c *Catalog) Datacenters() ([]string, error) {
r := c.c.newRequest("GET", "/v1/catalog/datacenters")
_, resp, err := requireOK(c.c.doRequest(r))
if err != nil {
return nil, err
}
defer resp.Body.Close()
var out []string
if err := decodeBody(resp, &out); err != nil {
return nil, err
}
return out, nil
}
// Nodes is used to query all the known nodes
func (c *Catalog) Nodes(q *QueryOptions) ([]*Node, *QueryMeta, error) {
r := c.c.newRequest("GET", "/v1/catalog/nodes")
r.setQueryOptions(q)
rtt, resp, err := requireOK(c.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var out []*Node
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}
return out, qm, nil
}
// Services is used to query for all known services
func (c *Catalog) Services(q *QueryOptions) (map[string][]string, *QueryMeta, error) {
r := c.c.newRequest("GET", "/v1/catalog/services")
r.setQueryOptions(q)
rtt, resp, err := requireOK(c.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var out map[string][]string
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}
return out, qm, nil
}
// Service is used to query catalog entries for a given service
func (c *Catalog) Service(service, tag string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) {
var tags []string
if tag != "" {
tags = []string{tag}
}
return c.service(service, tags, q, false)
}
// Supports multiple tags for filtering
func (c *Catalog) ServiceMultipleTags(service string, tags []string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) {
return c.service(service, tags, q, false)
}
// Connect is used to query catalog entries for a given Connect-enabled service
func (c *Catalog) Connect(service, tag string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) {
var tags []string
if tag != "" {
tags = []string{tag}
}
return c.service(service, tags, q, true)
}
// Supports multiple tags for filtering
func (c *Catalog) ConnectMultipleTags(service string, tags []string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) {
return c.service(service, tags, q, true)
}
func (c *Catalog) service(service string, tags []string, q *QueryOptions, connect bool) ([]*CatalogService, *QueryMeta, error) {
path := "/v1/catalog/service/" + service
if connect {
path = "/v1/catalog/connect/" + service
}
r := c.c.newRequest("GET", path)
r.setQueryOptions(q)
if len(tags) > 0 {
for _, tag := range tags {
r.params.Add("tag", tag)
}
}
rtt, resp, err := requireOK(c.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var out []*CatalogService
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}
return out, qm, nil
}
// Node is used to query for service information about a single node
func (c *Catalog) Node(node string, q *QueryOptions) (*CatalogNode, *QueryMeta, error) {
r := c.c.newRequest("GET", "/v1/catalog/node/"+node)
r.setQueryOptions(q)
rtt, resp, err := requireOK(c.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var out *CatalogNode
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}
return out, qm, nil
}
func ParseServiceAddr(addrPort string) (ServiceAddress, error) {
port := 0
host, portStr, err := net.SplitHostPort(addrPort)
if err == nil {
port, err = strconv.Atoi(portStr)
}
return ServiceAddress{Address: host, Port: port}, err
}

View file

@ -1,350 +0,0 @@
package api
import (
"bytes"
"encoding/json"
"fmt"
"io"
"strconv"
"strings"
"github.com/mitchellh/mapstructure"
)
const (
ServiceDefaults string = "service-defaults"
ProxyDefaults string = "proxy-defaults"
ServiceRouter string = "service-router"
ServiceSplitter string = "service-splitter"
ServiceResolver string = "service-resolver"
ProxyConfigGlobal string = "global"
)
type ConfigEntry interface {
GetKind() string
GetName() string
GetCreateIndex() uint64
GetModifyIndex() uint64
}
type MeshGatewayMode string
const (
// MeshGatewayModeDefault represents no specific mode and should
// be used to indicate that a different layer of the configuration
// chain should take precedence
MeshGatewayModeDefault MeshGatewayMode = ""
// MeshGatewayModeNone represents that the Upstream Connect connections
// should be direct and not flow through a mesh gateway.
MeshGatewayModeNone MeshGatewayMode = "none"
// MeshGatewayModeLocal represents that the Upstrea Connect connections
// should be made to a mesh gateway in the local datacenter. This is
MeshGatewayModeLocal MeshGatewayMode = "local"
// MeshGatewayModeRemote represents that the Upstream Connect connections
// should be made to a mesh gateway in a remote datacenter.
MeshGatewayModeRemote MeshGatewayMode = "remote"
)
// MeshGatewayConfig controls how Mesh Gateways are used for upstream Connect
// services
type MeshGatewayConfig struct {
// Mode is the mode that should be used for the upstream connection.
Mode MeshGatewayMode `json:",omitempty"`
}
// ExposeConfig describes HTTP paths to expose through Envoy outside of Connect.
// Users can expose individual paths and/or all HTTP/GRPC paths for checks.
type ExposeConfig struct {
// Checks defines whether paths associated with Consul checks will be exposed.
// This flag triggers exposing all HTTP and GRPC check paths registered for the service.
Checks bool `json:",omitempty"`
// Paths is the list of paths exposed through the proxy.
Paths []ExposePath `json:",omitempty"`
}
type ExposePath struct {
// ListenerPort defines the port of the proxy's listener for exposed paths.
ListenerPort int `json:",omitempty"`
// Path is the path to expose through the proxy, ie. "/metrics."
Path string `json:",omitempty"`
// LocalPathPort is the port that the service is listening on for the given path.
LocalPathPort int `json:",omitempty"`
// Protocol describes the upstream's service protocol.
// Valid values are "http" and "http2", defaults to "http"
Protocol string `json:",omitempty"`
// ParsedFromCheck is set if this path was parsed from a registered check
ParsedFromCheck bool
}
type ServiceConfigEntry struct {
Kind string
Name string
Protocol string `json:",omitempty"`
MeshGateway MeshGatewayConfig `json:",omitempty"`
Expose ExposeConfig `json:",omitempty"`
ExternalSNI string `json:",omitempty"`
CreateIndex uint64
ModifyIndex uint64
}
func (s *ServiceConfigEntry) GetKind() string {
return s.Kind
}
func (s *ServiceConfigEntry) GetName() string {
return s.Name
}
func (s *ServiceConfigEntry) GetCreateIndex() uint64 {
return s.CreateIndex
}
func (s *ServiceConfigEntry) GetModifyIndex() uint64 {
return s.ModifyIndex
}
type ProxyConfigEntry struct {
Kind string
Name string
Config map[string]interface{} `json:",omitempty"`
MeshGateway MeshGatewayConfig `json:",omitempty"`
Expose ExposeConfig `json:",omitempty"`
CreateIndex uint64
ModifyIndex uint64
}
func (p *ProxyConfigEntry) GetKind() string {
return p.Kind
}
func (p *ProxyConfigEntry) GetName() string {
return p.Name
}
func (p *ProxyConfigEntry) GetCreateIndex() uint64 {
return p.CreateIndex
}
func (p *ProxyConfigEntry) GetModifyIndex() uint64 {
return p.ModifyIndex
}
type rawEntryListResponse struct {
kind string
Entries []map[string]interface{}
}
func makeConfigEntry(kind, name string) (ConfigEntry, error) {
switch kind {
case ServiceDefaults:
return &ServiceConfigEntry{Kind: kind, Name: name}, nil
case ProxyDefaults:
return &ProxyConfigEntry{Kind: kind, Name: name}, nil
case ServiceRouter:
return &ServiceRouterConfigEntry{Kind: kind, Name: name}, nil
case ServiceSplitter:
return &ServiceSplitterConfigEntry{Kind: kind, Name: name}, nil
case ServiceResolver:
return &ServiceResolverConfigEntry{Kind: kind, Name: name}, nil
default:
return nil, fmt.Errorf("invalid config entry kind: %s", kind)
}
}
func MakeConfigEntry(kind, name string) (ConfigEntry, error) {
return makeConfigEntry(kind, name)
}
// DecodeConfigEntry will decode the result of using json.Unmarshal of a config
// entry into a map[string]interface{}.
//
// Important caveats:
//
// - This will NOT work if the map[string]interface{} was produced using HCL
// decoding as that requires more extensive parsing to work around the issues
// with map[string][]interface{} that arise.
//
// - This will only decode fields using their camel case json field
// representations.
func DecodeConfigEntry(raw map[string]interface{}) (ConfigEntry, error) {
var entry ConfigEntry
kindVal, ok := raw["Kind"]
if !ok {
kindVal, ok = raw["kind"]
}
if !ok {
return nil, fmt.Errorf("Payload does not contain a kind/Kind key at the top level")
}
if kindStr, ok := kindVal.(string); ok {
newEntry, err := makeConfigEntry(kindStr, "")
if err != nil {
return nil, err
}
entry = newEntry
} else {
return nil, fmt.Errorf("Kind value in payload is not a string")
}
decodeConf := &mapstructure.DecoderConfig{
DecodeHook: mapstructure.StringToTimeDurationHookFunc(),
Result: &entry,
WeaklyTypedInput: true,
}
decoder, err := mapstructure.NewDecoder(decodeConf)
if err != nil {
return nil, err
}
return entry, decoder.Decode(raw)
}
func DecodeConfigEntryFromJSON(data []byte) (ConfigEntry, error) {
var raw map[string]interface{}
if err := json.Unmarshal(data, &raw); err != nil {
return nil, err
}
return DecodeConfigEntry(raw)
}
func decodeConfigEntrySlice(raw []map[string]interface{}) ([]ConfigEntry, error) {
var entries []ConfigEntry
for _, rawEntry := range raw {
entry, err := DecodeConfigEntry(rawEntry)
if err != nil {
return nil, err
}
entries = append(entries, entry)
}
return entries, nil
}
// ConfigEntries can be used to query the Config endpoints
type ConfigEntries struct {
c *Client
}
// Config returns a handle to the Config endpoints
func (c *Client) ConfigEntries() *ConfigEntries {
return &ConfigEntries{c}
}
func (conf *ConfigEntries) Get(kind string, name string, q *QueryOptions) (ConfigEntry, *QueryMeta, error) {
if kind == "" || name == "" {
return nil, nil, fmt.Errorf("Both kind and name parameters must not be empty")
}
entry, err := makeConfigEntry(kind, name)
if err != nil {
return nil, nil, err
}
r := conf.c.newRequest("GET", fmt.Sprintf("/v1/config/%s/%s", kind, name))
r.setQueryOptions(q)
rtt, resp, err := requireOK(conf.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
if err := decodeBody(resp, entry); err != nil {
return nil, nil, err
}
return entry, qm, nil
}
func (conf *ConfigEntries) List(kind string, q *QueryOptions) ([]ConfigEntry, *QueryMeta, error) {
if kind == "" {
return nil, nil, fmt.Errorf("The kind parameter must not be empty")
}
r := conf.c.newRequest("GET", fmt.Sprintf("/v1/config/%s", kind))
r.setQueryOptions(q)
rtt, resp, err := requireOK(conf.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var raw []map[string]interface{}
if err := decodeBody(resp, &raw); err != nil {
return nil, nil, err
}
entries, err := decodeConfigEntrySlice(raw)
if err != nil {
return nil, nil, err
}
return entries, qm, nil
}
func (conf *ConfigEntries) Set(entry ConfigEntry, w *WriteOptions) (bool, *WriteMeta, error) {
return conf.set(entry, nil, w)
}
func (conf *ConfigEntries) CAS(entry ConfigEntry, index uint64, w *WriteOptions) (bool, *WriteMeta, error) {
return conf.set(entry, map[string]string{"cas": strconv.FormatUint(index, 10)}, w)
}
func (conf *ConfigEntries) set(entry ConfigEntry, params map[string]string, w *WriteOptions) (bool, *WriteMeta, error) {
r := conf.c.newRequest("PUT", "/v1/config")
r.setWriteOptions(w)
for param, value := range params {
r.params.Set(param, value)
}
r.obj = entry
rtt, resp, err := requireOK(conf.c.doRequest(r))
if err != nil {
return false, nil, err
}
defer resp.Body.Close()
var buf bytes.Buffer
if _, err := io.Copy(&buf, resp.Body); err != nil {
return false, nil, fmt.Errorf("Failed to read response: %v", err)
}
res := strings.Contains(buf.String(), "true")
wm := &WriteMeta{RequestTime: rtt}
return res, wm, nil
}
func (conf *ConfigEntries) Delete(kind string, name string, w *WriteOptions) (*WriteMeta, error) {
if kind == "" || name == "" {
return nil, fmt.Errorf("Both kind and name parameters must not be empty")
}
r := conf.c.newRequest("DELETE", fmt.Sprintf("/v1/config/%s/%s", kind, name))
r.setWriteOptions(w)
rtt, resp, err := requireOK(conf.c.doRequest(r))
if err != nil {
return nil, err
}
resp.Body.Close()
wm := &WriteMeta{RequestTime: rtt}
return wm, nil
}

View file

@ -1,200 +0,0 @@
package api
import (
"encoding/json"
"time"
)
type ServiceRouterConfigEntry struct {
Kind string
Name string
Routes []ServiceRoute `json:",omitempty"`
CreateIndex uint64
ModifyIndex uint64
}
func (e *ServiceRouterConfigEntry) GetKind() string { return e.Kind }
func (e *ServiceRouterConfigEntry) GetName() string { return e.Name }
func (e *ServiceRouterConfigEntry) GetCreateIndex() uint64 { return e.CreateIndex }
func (e *ServiceRouterConfigEntry) GetModifyIndex() uint64 { return e.ModifyIndex }
type ServiceRoute struct {
Match *ServiceRouteMatch `json:",omitempty"`
Destination *ServiceRouteDestination `json:",omitempty"`
}
type ServiceRouteMatch struct {
HTTP *ServiceRouteHTTPMatch `json:",omitempty"`
}
type ServiceRouteHTTPMatch struct {
PathExact string `json:",omitempty"`
PathPrefix string `json:",omitempty"`
PathRegex string `json:",omitempty"`
Header []ServiceRouteHTTPMatchHeader `json:",omitempty"`
QueryParam []ServiceRouteHTTPMatchQueryParam `json:",omitempty"`
Methods []string `json:",omitempty"`
}
type ServiceRouteHTTPMatchHeader struct {
Name string
Present bool `json:",omitempty"`
Exact string `json:",omitempty"`
Prefix string `json:",omitempty"`
Suffix string `json:",omitempty"`
Regex string `json:",omitempty"`
Invert bool `json:",omitempty"`
}
type ServiceRouteHTTPMatchQueryParam struct {
Name string
Present bool `json:",omitempty"`
Exact string `json:",omitempty"`
Regex string `json:",omitempty"`
}
type ServiceRouteDestination struct {
Service string `json:",omitempty"`
ServiceSubset string `json:",omitempty"`
Namespace string `json:",omitempty"`
PrefixRewrite string `json:",omitempty"`
RequestTimeout time.Duration `json:",omitempty"`
NumRetries uint32 `json:",omitempty"`
RetryOnConnectFailure bool `json:",omitempty"`
RetryOnStatusCodes []uint32 `json:",omitempty"`
}
func (e *ServiceRouteDestination) MarshalJSON() ([]byte, error) {
type Alias ServiceRouteDestination
exported := &struct {
RequestTimeout string `json:",omitempty"`
*Alias
}{
RequestTimeout: e.RequestTimeout.String(),
Alias: (*Alias)(e),
}
if e.RequestTimeout == 0 {
exported.RequestTimeout = ""
}
return json.Marshal(exported)
}
func (e *ServiceRouteDestination) UnmarshalJSON(data []byte) error {
type Alias ServiceRouteDestination
aux := &struct {
RequestTimeout string
*Alias
}{
Alias: (*Alias)(e),
}
if err := json.Unmarshal(data, &aux); err != nil {
return err
}
var err error
if aux.RequestTimeout != "" {
if e.RequestTimeout, err = time.ParseDuration(aux.RequestTimeout); err != nil {
return err
}
}
return nil
}
type ServiceSplitterConfigEntry struct {
Kind string
Name string
Splits []ServiceSplit `json:",omitempty"`
CreateIndex uint64
ModifyIndex uint64
}
func (e *ServiceSplitterConfigEntry) GetKind() string { return e.Kind }
func (e *ServiceSplitterConfigEntry) GetName() string { return e.Name }
func (e *ServiceSplitterConfigEntry) GetCreateIndex() uint64 { return e.CreateIndex }
func (e *ServiceSplitterConfigEntry) GetModifyIndex() uint64 { return e.ModifyIndex }
type ServiceSplit struct {
Weight float32
Service string `json:",omitempty"`
ServiceSubset string `json:",omitempty"`
Namespace string `json:",omitempty"`
}
type ServiceResolverConfigEntry struct {
Kind string
Name string
DefaultSubset string `json:",omitempty"`
Subsets map[string]ServiceResolverSubset `json:",omitempty"`
Redirect *ServiceResolverRedirect `json:",omitempty"`
Failover map[string]ServiceResolverFailover `json:",omitempty"`
ConnectTimeout time.Duration `json:",omitempty"`
CreateIndex uint64
ModifyIndex uint64
}
func (e *ServiceResolverConfigEntry) MarshalJSON() ([]byte, error) {
type Alias ServiceResolverConfigEntry
exported := &struct {
ConnectTimeout string `json:",omitempty"`
*Alias
}{
ConnectTimeout: e.ConnectTimeout.String(),
Alias: (*Alias)(e),
}
if e.ConnectTimeout == 0 {
exported.ConnectTimeout = ""
}
return json.Marshal(exported)
}
func (e *ServiceResolverConfigEntry) UnmarshalJSON(data []byte) error {
type Alias ServiceResolverConfigEntry
aux := &struct {
ConnectTimeout string
*Alias
}{
Alias: (*Alias)(e),
}
if err := json.Unmarshal(data, &aux); err != nil {
return err
}
var err error
if aux.ConnectTimeout != "" {
if e.ConnectTimeout, err = time.ParseDuration(aux.ConnectTimeout); err != nil {
return err
}
}
return nil
}
func (e *ServiceResolverConfigEntry) GetKind() string { return e.Kind }
func (e *ServiceResolverConfigEntry) GetName() string { return e.Name }
func (e *ServiceResolverConfigEntry) GetCreateIndex() uint64 { return e.CreateIndex }
func (e *ServiceResolverConfigEntry) GetModifyIndex() uint64 { return e.ModifyIndex }
type ServiceResolverSubset struct {
Filter string `json:",omitempty"`
OnlyPassing bool `json:",omitempty"`
}
type ServiceResolverRedirect struct {
Service string `json:",omitempty"`
ServiceSubset string `json:",omitempty"`
Namespace string `json:",omitempty"`
Datacenter string `json:",omitempty"`
}
type ServiceResolverFailover struct {
Service string `json:",omitempty"`
ServiceSubset string `json:",omitempty"`
Namespace string `json:",omitempty"`
Datacenters []string `json:",omitempty"`
}

View file

@ -1,12 +0,0 @@
package api
// Connect can be used to work with endpoints related to Connect, the
// feature for securely connecting services within Consul.
type Connect struct {
c *Client
}
// Connect returns a handle to the connect-related endpoints
func (c *Client) Connect() *Connect {
return &Connect{c}
}

View file

@ -1,174 +0,0 @@
package api
import (
"fmt"
"time"
"github.com/mitchellh/mapstructure"
)
// CAConfig is the structure for the Connect CA configuration.
type CAConfig struct {
// Provider is the CA provider implementation to use.
Provider string
// Configuration is arbitrary configuration for the provider. This
// should only contain primitive values and containers (such as lists
// and maps).
Config map[string]interface{}
CreateIndex uint64
ModifyIndex uint64
}
// CommonCAProviderConfig is the common options available to all CA providers.
type CommonCAProviderConfig struct {
LeafCertTTL time.Duration
SkipValidate bool
CSRMaxPerSecond float32
CSRMaxConcurrent int
}
// ConsulCAProviderConfig is the config for the built-in Consul CA provider.
type ConsulCAProviderConfig struct {
CommonCAProviderConfig `mapstructure:",squash"`
PrivateKey string
RootCert string
RotationPeriod time.Duration
}
// ParseConsulCAConfig takes a raw config map and returns a parsed
// ConsulCAProviderConfig.
func ParseConsulCAConfig(raw map[string]interface{}) (*ConsulCAProviderConfig, error) {
var config ConsulCAProviderConfig
decodeConf := &mapstructure.DecoderConfig{
DecodeHook: mapstructure.StringToTimeDurationHookFunc(),
Result: &config,
WeaklyTypedInput: true,
}
decoder, err := mapstructure.NewDecoder(decodeConf)
if err != nil {
return nil, err
}
if err := decoder.Decode(raw); err != nil {
return nil, fmt.Errorf("error decoding config: %s", err)
}
return &config, nil
}
// CARootList is the structure for the results of listing roots.
type CARootList struct {
ActiveRootID string
TrustDomain string
Roots []*CARoot
}
// CARoot represents a root CA certificate that is trusted.
type CARoot struct {
// ID is a globally unique ID (UUID) representing this CA root.
ID string
// Name is a human-friendly name for this CA root. This value is
// opaque to Consul and is not used for anything internally.
Name string
// RootCertPEM is the PEM-encoded public certificate.
RootCertPEM string `json:"RootCert"`
// Active is true if this is the current active CA. This must only
// be true for exactly one CA. For any method that modifies roots in the
// state store, tests should be written to verify that multiple roots
// cannot be active.
Active bool
CreateIndex uint64
ModifyIndex uint64
}
// LeafCert is a certificate that has been issued by a Connect CA.
type LeafCert struct {
// SerialNumber is the unique serial number for this certificate.
// This is encoded in standard hex separated by :.
SerialNumber string
// CertPEM and PrivateKeyPEM are the PEM-encoded certificate and private
// key for that cert, respectively. This should not be stored in the
// state store, but is present in the sign API response.
CertPEM string `json:",omitempty"`
PrivateKeyPEM string `json:",omitempty"`
// Service is the name of the service for which the cert was issued.
// ServiceURI is the cert URI value.
Service string
ServiceURI string
// ValidAfter and ValidBefore are the validity periods for the
// certificate.
ValidAfter time.Time
ValidBefore time.Time
CreateIndex uint64
ModifyIndex uint64
}
// CARoots queries the list of available roots.
func (h *Connect) CARoots(q *QueryOptions) (*CARootList, *QueryMeta, error) {
r := h.c.newRequest("GET", "/v1/connect/ca/roots")
r.setQueryOptions(q)
rtt, resp, err := requireOK(h.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var out CARootList
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}
return &out, qm, nil
}
// CAGetConfig returns the current CA configuration.
func (h *Connect) CAGetConfig(q *QueryOptions) (*CAConfig, *QueryMeta, error) {
r := h.c.newRequest("GET", "/v1/connect/ca/configuration")
r.setQueryOptions(q)
rtt, resp, err := requireOK(h.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var out CAConfig
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}
return &out, qm, nil
}
// CASetConfig sets the current CA configuration.
func (h *Connect) CASetConfig(conf *CAConfig, q *WriteOptions) (*WriteMeta, error) {
r := h.c.newRequest("PUT", "/v1/connect/ca/configuration")
r.setWriteOptions(q)
r.obj = conf
rtt, resp, err := requireOK(h.c.doRequest(r))
if err != nil {
return nil, err
}
defer resp.Body.Close()
wm := &WriteMeta{}
wm.RequestTime = rtt
return wm, nil
}

View file

@ -1,309 +0,0 @@
package api
import (
"bytes"
"fmt"
"io"
"time"
)
// Intention defines an intention for the Connect Service Graph. This defines
// the allowed or denied behavior of a connection between two services using
// Connect.
type Intention struct {
// ID is the UUID-based ID for the intention, always generated by Consul.
ID string
// Description is a human-friendly description of this intention.
// It is opaque to Consul and is only stored and transferred in API
// requests.
Description string
// SourceNS, SourceName are the namespace and name, respectively, of
// the source service. Either of these may be the wildcard "*", but only
// the full value can be a wildcard. Partial wildcards are not allowed.
// The source may also be a non-Consul service, as specified by SourceType.
//
// DestinationNS, DestinationName is the same, but for the destination
// service. The same rules apply. The destination is always a Consul
// service.
SourceNS, SourceName string
DestinationNS, DestinationName string
// SourceType is the type of the value for the source.
SourceType IntentionSourceType
// Action is whether this is a whitelist or blacklist intention.
Action IntentionAction
// DefaultAddr, DefaultPort of the local listening proxy (if any) to
// make this connection.
DefaultAddr string
DefaultPort int
// Meta is arbitrary metadata associated with the intention. This is
// opaque to Consul but is served in API responses.
Meta map[string]string
// Precedence is the order that the intention will be applied, with
// larger numbers being applied first. This is a read-only field, on
// any intention update it is updated.
Precedence int
// CreatedAt and UpdatedAt keep track of when this record was created
// or modified.
CreatedAt, UpdatedAt time.Time
// Hash of the contents of the intention
//
// This is needed mainly for replication purposes. When replicating from
// one DC to another keeping the content Hash will allow us to detect
// content changes more efficiently than checking every single field
Hash []byte
CreateIndex uint64
ModifyIndex uint64
}
// String returns human-friendly output describing ths intention.
func (i *Intention) String() string {
return fmt.Sprintf("%s => %s (%s)",
i.SourceString(),
i.DestinationString(),
i.Action)
}
// SourceString returns the namespace/name format for the source, or
// just "name" if the namespace is the default namespace.
func (i *Intention) SourceString() string {
return i.partString(i.SourceNS, i.SourceName)
}
// DestinationString returns the namespace/name format for the source, or
// just "name" if the namespace is the default namespace.
func (i *Intention) DestinationString() string {
return i.partString(i.DestinationNS, i.DestinationName)
}
func (i *Intention) partString(ns, n string) string {
// For now we omit the default namespace from the output. In the future
// we might want to look at this and show this in a multi-namespace world.
if ns != "" && ns != IntentionDefaultNamespace {
n = ns + "/" + n
}
return n
}
// IntentionDefaultNamespace is the default namespace value.
const IntentionDefaultNamespace = "default"
// IntentionAction is the action that the intention represents. This
// can be "allow" or "deny" to whitelist or blacklist intentions.
type IntentionAction string
const (
IntentionActionAllow IntentionAction = "allow"
IntentionActionDeny IntentionAction = "deny"
)
// IntentionSourceType is the type of the source within an intention.
type IntentionSourceType string
const (
// IntentionSourceConsul is a service within the Consul catalog.
IntentionSourceConsul IntentionSourceType = "consul"
)
// IntentionMatch are the arguments for the intention match API.
type IntentionMatch struct {
By IntentionMatchType
Names []string
}
// IntentionMatchType is the target for a match request. For example,
// matching by source will look for all intentions that match the given
// source value.
type IntentionMatchType string
const (
IntentionMatchSource IntentionMatchType = "source"
IntentionMatchDestination IntentionMatchType = "destination"
)
// IntentionCheck are the arguments for the intention check API. For
// more documentation see the IntentionCheck function.
type IntentionCheck struct {
// Source and Destination are the source and destination values to
// check. The destination is always a Consul service, but the source
// may be other values as defined by the SourceType.
Source, Destination string
// SourceType is the type of the value for the source.
SourceType IntentionSourceType
}
// Intentions returns the list of intentions.
func (h *Connect) Intentions(q *QueryOptions) ([]*Intention, *QueryMeta, error) {
r := h.c.newRequest("GET", "/v1/connect/intentions")
r.setQueryOptions(q)
rtt, resp, err := requireOK(h.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var out []*Intention
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}
return out, qm, nil
}
// IntentionGet retrieves a single intention.
func (h *Connect) IntentionGet(id string, q *QueryOptions) (*Intention, *QueryMeta, error) {
r := h.c.newRequest("GET", "/v1/connect/intentions/"+id)
r.setQueryOptions(q)
rtt, resp, err := h.c.doRequest(r)
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
if resp.StatusCode == 404 {
return nil, qm, nil
} else if resp.StatusCode != 200 {
var buf bytes.Buffer
io.Copy(&buf, resp.Body)
return nil, nil, fmt.Errorf(
"Unexpected response %d: %s", resp.StatusCode, buf.String())
}
var out Intention
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}
return &out, qm, nil
}
// IntentionDelete deletes a single intention.
func (h *Connect) IntentionDelete(id string, q *WriteOptions) (*WriteMeta, error) {
r := h.c.newRequest("DELETE", "/v1/connect/intentions/"+id)
r.setWriteOptions(q)
rtt, resp, err := requireOK(h.c.doRequest(r))
if err != nil {
return nil, err
}
defer resp.Body.Close()
qm := &WriteMeta{}
qm.RequestTime = rtt
return qm, nil
}
// IntentionMatch returns the list of intentions that match a given source
// or destination. The returned intentions are ordered by precedence where
// result[0] is the highest precedence (if that matches, then that rule overrides
// all other rules).
//
// Matching can be done for multiple names at the same time. The resulting
// map is keyed by the given names. Casing is preserved.
func (h *Connect) IntentionMatch(args *IntentionMatch, q *QueryOptions) (map[string][]*Intention, *QueryMeta, error) {
r := h.c.newRequest("GET", "/v1/connect/intentions/match")
r.setQueryOptions(q)
r.params.Set("by", string(args.By))
for _, name := range args.Names {
r.params.Add("name", name)
}
rtt, resp, err := requireOK(h.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var out map[string][]*Intention
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}
return out, qm, nil
}
// IntentionCheck returns whether a given source/destination would be allowed
// or not given the current set of intentions and the configuration of Consul.
func (h *Connect) IntentionCheck(args *IntentionCheck, q *QueryOptions) (bool, *QueryMeta, error) {
r := h.c.newRequest("GET", "/v1/connect/intentions/check")
r.setQueryOptions(q)
r.params.Set("source", args.Source)
r.params.Set("destination", args.Destination)
if args.SourceType != "" {
r.params.Set("source-type", string(args.SourceType))
}
rtt, resp, err := requireOK(h.c.doRequest(r))
if err != nil {
return false, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var out struct{ Allowed bool }
if err := decodeBody(resp, &out); err != nil {
return false, nil, err
}
return out.Allowed, qm, nil
}
// IntentionCreate will create a new intention. The ID in the given
// structure must be empty and a generate ID will be returned on
// success.
func (c *Connect) IntentionCreate(ixn *Intention, q *WriteOptions) (string, *WriteMeta, error) {
r := c.c.newRequest("POST", "/v1/connect/intentions")
r.setWriteOptions(q)
r.obj = ixn
rtt, resp, err := requireOK(c.c.doRequest(r))
if err != nil {
return "", nil, err
}
defer resp.Body.Close()
wm := &WriteMeta{}
wm.RequestTime = rtt
var out struct{ ID string }
if err := decodeBody(resp, &out); err != nil {
return "", nil, err
}
return out.ID, wm, nil
}
// IntentionUpdate will update an existing intention. The ID in the given
// structure must be non-empty.
func (c *Connect) IntentionUpdate(ixn *Intention, q *WriteOptions) (*WriteMeta, error) {
r := c.c.newRequest("PUT", "/v1/connect/intentions/"+ixn.ID)
r.setWriteOptions(q)
r.obj = ixn
rtt, resp, err := requireOK(c.c.doRequest(r))
if err != nil {
return nil, err
}
defer resp.Body.Close()
wm := &WriteMeta{}
wm.RequestTime = rtt
return wm, nil
}

View file

@ -1,106 +0,0 @@
package api
import (
"github.com/hashicorp/serf/coordinate"
)
// CoordinateEntry represents a node and its associated network coordinate.
type CoordinateEntry struct {
Node string
Segment string
Coord *coordinate.Coordinate
}
// CoordinateDatacenterMap has the coordinates for servers in a given datacenter
// and area. Network coordinates are only compatible within the same area.
type CoordinateDatacenterMap struct {
Datacenter string
AreaID string
Coordinates []CoordinateEntry
}
// Coordinate can be used to query the coordinate endpoints
type Coordinate struct {
c *Client
}
// Coordinate returns a handle to the coordinate endpoints
func (c *Client) Coordinate() *Coordinate {
return &Coordinate{c}
}
// Datacenters is used to return the coordinates of all the servers in the WAN
// pool.
func (c *Coordinate) Datacenters() ([]*CoordinateDatacenterMap, error) {
r := c.c.newRequest("GET", "/v1/coordinate/datacenters")
_, resp, err := requireOK(c.c.doRequest(r))
if err != nil {
return nil, err
}
defer resp.Body.Close()
var out []*CoordinateDatacenterMap
if err := decodeBody(resp, &out); err != nil {
return nil, err
}
return out, nil
}
// Nodes is used to return the coordinates of all the nodes in the LAN pool.
func (c *Coordinate) Nodes(q *QueryOptions) ([]*CoordinateEntry, *QueryMeta, error) {
r := c.c.newRequest("GET", "/v1/coordinate/nodes")
r.setQueryOptions(q)
rtt, resp, err := requireOK(c.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var out []*CoordinateEntry
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}
return out, qm, nil
}
// Update inserts or updates the LAN coordinate of a node.
func (c *Coordinate) Update(coord *CoordinateEntry, q *WriteOptions) (*WriteMeta, error) {
r := c.c.newRequest("PUT", "/v1/coordinate/update")
r.setWriteOptions(q)
r.obj = coord
rtt, resp, err := requireOK(c.c.doRequest(r))
if err != nil {
return nil, err
}
defer resp.Body.Close()
wm := &WriteMeta{}
wm.RequestTime = rtt
return wm, nil
}
// Node is used to return the coordinates of a single node in the LAN pool.
func (c *Coordinate) Node(node string, q *QueryOptions) ([]*CoordinateEntry, *QueryMeta, error) {
r := c.c.newRequest("GET", "/v1/coordinate/node/"+node)
r.setQueryOptions(q)
rtt, resp, err := requireOK(c.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var out []*CoordinateEntry
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}
return out, qm, nil
}

View file

@ -1,106 +0,0 @@
package api
import (
"fmt"
"io/ioutil"
"strconv"
)
// Debug can be used to query the /debug/pprof endpoints to gather
// profiling information about the target agent.Debug
//
// The agent must have enable_debug set to true for profiling to be enabled
// and for these endpoints to function.
type Debug struct {
c *Client
}
// Debug returns a handle that exposes the internal debug endpoints.
func (c *Client) Debug() *Debug {
return &Debug{c}
}
// Heap returns a pprof heap dump
func (d *Debug) Heap() ([]byte, error) {
r := d.c.newRequest("GET", "/debug/pprof/heap")
_, resp, err := d.c.doRequest(r)
if err != nil {
return nil, fmt.Errorf("error making request: %s", err)
}
defer resp.Body.Close()
// We return a raw response because we're just passing through a response
// from the pprof handlers
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("error decoding body: %s", err)
}
return body, nil
}
// Profile returns a pprof CPU profile for the specified number of seconds
func (d *Debug) Profile(seconds int) ([]byte, error) {
r := d.c.newRequest("GET", "/debug/pprof/profile")
// Capture a profile for the specified number of seconds
r.params.Set("seconds", strconv.Itoa(seconds))
_, resp, err := d.c.doRequest(r)
if err != nil {
return nil, fmt.Errorf("error making request: %s", err)
}
defer resp.Body.Close()
// We return a raw response because we're just passing through a response
// from the pprof handlers
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("error decoding body: %s", err)
}
return body, nil
}
// Trace returns an execution trace
func (d *Debug) Trace(seconds int) ([]byte, error) {
r := d.c.newRequest("GET", "/debug/pprof/trace")
// Capture a trace for the specified number of seconds
r.params.Set("seconds", strconv.Itoa(seconds))
_, resp, err := d.c.doRequest(r)
if err != nil {
return nil, fmt.Errorf("error making request: %s", err)
}
defer resp.Body.Close()
// We return a raw response because we're just passing through a response
// from the pprof handlers
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("error decoding body: %s", err)
}
return body, nil
}
// Goroutine returns a pprof goroutine profile
func (d *Debug) Goroutine() ([]byte, error) {
r := d.c.newRequest("GET", "/debug/pprof/goroutine")
_, resp, err := d.c.doRequest(r)
if err != nil {
return nil, fmt.Errorf("error making request: %s", err)
}
defer resp.Body.Close()
// We return a raw response because we're just passing through a response
// from the pprof handlers
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("error decoding body: %s", err)
}
return body, nil
}

View file

@ -1,230 +0,0 @@
package api
import (
"encoding/json"
"fmt"
"time"
)
// DiscoveryChain can be used to query the discovery-chain endpoints
type DiscoveryChain struct {
c *Client
}
// DiscoveryChain returns a handle to the discovery-chain endpoints
func (c *Client) DiscoveryChain() *DiscoveryChain {
return &DiscoveryChain{c}
}
func (d *DiscoveryChain) Get(name string, opts *DiscoveryChainOptions, q *QueryOptions) (*DiscoveryChainResponse, *QueryMeta, error) {
if name == "" {
return nil, nil, fmt.Errorf("Name parameter must not be empty")
}
method := "GET"
if opts != nil && opts.requiresPOST() {
method = "POST"
}
r := d.c.newRequest(method, fmt.Sprintf("/v1/discovery-chain/%s", name))
r.setQueryOptions(q)
if opts != nil {
if opts.EvaluateInDatacenter != "" {
r.params.Set("compile-dc", opts.EvaluateInDatacenter)
}
// TODO(namespaces): handle possible EvaluateInNamespace here
}
if method == "POST" {
r.obj = opts
}
rtt, resp, err := requireOK(d.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var out DiscoveryChainResponse
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}
return &out, qm, nil
}
type DiscoveryChainOptions struct {
EvaluateInDatacenter string `json:"-"`
// OverrideMeshGateway allows for the mesh gateway setting to be overridden
// for any resolver in the compiled chain.
OverrideMeshGateway MeshGatewayConfig `json:",omitempty"`
// OverrideProtocol allows for the final protocol for the chain to be
// altered.
//
// - If the chain ordinarily would be TCP and an L7 protocol is passed here
// the chain will not include Routers or Splitters.
//
// - If the chain ordinarily would be L7 and TCP is passed here the chain
// will not include Routers or Splitters.
OverrideProtocol string `json:",omitempty"`
// OverrideConnectTimeout allows for the ConnectTimeout setting to be
// overridden for any resolver in the compiled chain.
OverrideConnectTimeout time.Duration `json:",omitempty"`
}
func (o *DiscoveryChainOptions) requiresPOST() bool {
if o == nil {
return false
}
return o.OverrideMeshGateway.Mode != "" ||
o.OverrideProtocol != "" ||
o.OverrideConnectTimeout != 0
}
type DiscoveryChainResponse struct {
Chain *CompiledDiscoveryChain
}
type CompiledDiscoveryChain struct {
ServiceName string
Namespace string
Datacenter string
// CustomizationHash is a unique hash of any data that affects the
// compilation of the discovery chain other than config entries or the
// name/namespace/datacenter evaluation criteria.
//
// If set, this value should be used to prefix/suffix any generated load
// balancer data plane objects to avoid sharing customized and
// non-customized versions.
CustomizationHash string
// Protocol is the overall protocol shared by everything in the chain.
Protocol string
// StartNode is the first key into the Nodes map that should be followed
// when walking the discovery chain.
StartNode string
// Nodes contains all nodes available for traversal in the chain keyed by a
// unique name. You can walk this by starting with StartNode.
//
// NOTE: The names should be treated as opaque values and are only
// guaranteed to be consistent within a single compilation.
Nodes map[string]*DiscoveryGraphNode
// Targets is a list of all targets used in this chain.
//
// NOTE: The names should be treated as opaque values and are only
// guaranteed to be consistent within a single compilation.
Targets map[string]*DiscoveryTarget
}
const (
DiscoveryGraphNodeTypeRouter = "router"
DiscoveryGraphNodeTypeSplitter = "splitter"
DiscoveryGraphNodeTypeResolver = "resolver"
)
// DiscoveryGraphNode is a single node in the compiled discovery chain.
type DiscoveryGraphNode struct {
Type string
Name string // this is NOT necessarily a service
// fields for Type==router
Routes []*DiscoveryRoute
// fields for Type==splitter
Splits []*DiscoverySplit
// fields for Type==resolver
Resolver *DiscoveryResolver
}
// compiled form of ServiceRoute
type DiscoveryRoute struct {
Definition *ServiceRoute
NextNode string
}
// compiled form of ServiceSplit
type DiscoverySplit struct {
Weight float32
NextNode string
}
// compiled form of ServiceResolverConfigEntry
type DiscoveryResolver struct {
Default bool
ConnectTimeout time.Duration
Target string
Failover *DiscoveryFailover
}
func (r *DiscoveryResolver) MarshalJSON() ([]byte, error) {
type Alias DiscoveryResolver
exported := &struct {
ConnectTimeout string `json:",omitempty"`
*Alias
}{
ConnectTimeout: r.ConnectTimeout.String(),
Alias: (*Alias)(r),
}
if r.ConnectTimeout == 0 {
exported.ConnectTimeout = ""
}
return json.Marshal(exported)
}
func (r *DiscoveryResolver) UnmarshalJSON(data []byte) error {
type Alias DiscoveryResolver
aux := &struct {
ConnectTimeout string
*Alias
}{
Alias: (*Alias)(r),
}
if err := json.Unmarshal(data, &aux); err != nil {
return err
}
var err error
if aux.ConnectTimeout != "" {
if r.ConnectTimeout, err = time.ParseDuration(aux.ConnectTimeout); err != nil {
return err
}
}
return nil
}
// compiled form of ServiceResolverFailover
type DiscoveryFailover struct {
Targets []string
}
// DiscoveryTarget represents all of the inputs necessary to use a resolver
// config entry to execute a catalog query to generate a list of service
// instances during discovery.
type DiscoveryTarget struct {
ID string
Service string
ServiceSubset string
Namespace string
Datacenter string
MeshGateway MeshGatewayConfig
Subset ServiceResolverSubset
External bool
SNI string
Name string
}

View file

@ -1,104 +0,0 @@
package api
import (
"bytes"
"strconv"
)
// Event can be used to query the Event endpoints
type Event struct {
c *Client
}
// UserEvent represents an event that was fired by the user
type UserEvent struct {
ID string
Name string
Payload []byte
NodeFilter string
ServiceFilter string
TagFilter string
Version int
LTime uint64
}
// Event returns a handle to the event endpoints
func (c *Client) Event() *Event {
return &Event{c}
}
// Fire is used to fire a new user event. Only the Name, Payload and Filters
// are respected. This returns the ID or an associated error. Cross DC requests
// are supported.
func (e *Event) Fire(params *UserEvent, q *WriteOptions) (string, *WriteMeta, error) {
r := e.c.newRequest("PUT", "/v1/event/fire/"+params.Name)
r.setWriteOptions(q)
if params.NodeFilter != "" {
r.params.Set("node", params.NodeFilter)
}
if params.ServiceFilter != "" {
r.params.Set("service", params.ServiceFilter)
}
if params.TagFilter != "" {
r.params.Set("tag", params.TagFilter)
}
if params.Payload != nil {
r.body = bytes.NewReader(params.Payload)
}
rtt, resp, err := requireOK(e.c.doRequest(r))
if err != nil {
return "", nil, err
}
defer resp.Body.Close()
wm := &WriteMeta{RequestTime: rtt}
var out UserEvent
if err := decodeBody(resp, &out); err != nil {
return "", nil, err
}
return out.ID, wm, nil
}
// List is used to get the most recent events an agent has received.
// This list can be optionally filtered by the name. This endpoint supports
// quasi-blocking queries. The index is not monotonic, nor does it provide provide
// LastContact or KnownLeader.
func (e *Event) List(name string, q *QueryOptions) ([]*UserEvent, *QueryMeta, error) {
r := e.c.newRequest("GET", "/v1/event/list")
r.setQueryOptions(q)
if name != "" {
r.params.Set("name", name)
}
rtt, resp, err := requireOK(e.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var entries []*UserEvent
if err := decodeBody(resp, &entries); err != nil {
return nil, nil, err
}
return entries, qm, nil
}
// IDToIndex is a bit of a hack. This simulates the index generation to
// convert an event ID into a WaitIndex.
func (e *Event) IDToIndex(uuid string) uint64 {
lower := uuid[0:8] + uuid[9:13] + uuid[14:18]
upper := uuid[19:23] + uuid[24:36]
lowVal, err := strconv.ParseUint(lower, 16, 64)
if err != nil {
panic("Failed to convert " + lower)
}
highVal, err := strconv.ParseUint(upper, 16, 64)
if err != nil {
panic("Failed to convert " + upper)
}
return lowVal ^ highVal
}

View file

@ -1,16 +0,0 @@
module github.com/hashicorp/consul/api
go 1.12
replace github.com/hashicorp/consul/sdk => ../sdk
require (
github.com/hashicorp/consul/sdk v0.3.0
github.com/hashicorp/go-cleanhttp v0.5.1
github.com/hashicorp/go-rootcerts v1.0.0
github.com/hashicorp/go-uuid v1.0.1
github.com/hashicorp/serf v0.8.2
github.com/mitchellh/mapstructure v1.1.2
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c
github.com/stretchr/testify v1.3.0
)

View file

@ -1,78 +0,0 @@
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da h1:8GUt8eRujhVEGZFFEjBj46YV4rDjvGrNxb0KMWYkL2I=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c h1:964Od4U6p2jUkFxvCydnIczKteheJEzHRToSGK3Bnlw=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/hashicorp/consul/sdk v0.3.0 h1:UOxjlb4xVNF93jak1mzzoBatyFju9nrkxpVwIp/QqxQ=
github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM=
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0=
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-msgpack v0.5.3 h1:zKjpN5BK/P5lMYrLmBHdBULWbJ0XpYR+7NGzqkZzoD4=
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o=
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
github.com/hashicorp/go-rootcerts v1.0.0 h1:Rqb66Oo1X/eSV1x66xbDccZjhJigjg0+e82kpwzSwCI=
github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
github.com/hashicorp/go-sockaddr v1.0.0 h1:GeH6tui99pF4NJgfnhp+L6+FfobzVW3Ah46sLo0ICXs=
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE=
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
github.com/hashicorp/memberlist v0.1.3 h1:EmmoJme1matNzb+hMpDuR/0sbJSUisxyqBGG676r31M=
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
github.com/hashicorp/serf v0.8.2 h1:YZ7UKsJv+hKjqGVUUbtE3HNj79Eln2oQ75tniF6iPt0=
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/miekg/dns v1.0.14 h1:9jZdLNd/P4+SfEJ0TNyxYpsK8N4GtfylBLqtbYN1sbA=
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
github.com/mitchellh/go-homedir v1.0.0 h1:vKb8ShqSby24Yrqr/yDYkuFz8d0WUjys40rvnGC8aR0=
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0=
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c h1:Lgl0gzECD8GnQ5QCWA8o6BtfL6mDH5rQgM4/fX3avOs=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3 h1:KYQXGkl6vs02hK7pK4eIbw0NpNPedieTSTEiJ//bwGs=
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc h1:a3CU5tJYVj92DY2LaA1kUkrsqD5/3mLDhx2NcNqyW+0=
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5 h1:x6r4Jo0KNzOOzYd8lbcRsqjuqEASK6ob3auvWYM4/8U=
golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=

View file

@ -1,354 +0,0 @@
package api
import (
"encoding/json"
"fmt"
"strings"
"time"
)
const (
// HealthAny is special, and is used as a wild card,
// not as a specific state.
HealthAny = "any"
HealthPassing = "passing"
HealthWarning = "warning"
HealthCritical = "critical"
HealthMaint = "maintenance"
)
const (
// NodeMaint is the special key set by a node in maintenance mode.
NodeMaint = "_node_maintenance"
// ServiceMaintPrefix is the prefix for a service in maintenance mode.
ServiceMaintPrefix = "_service_maintenance:"
)
// HealthCheck is used to represent a single check
type HealthCheck struct {
Node string
CheckID string
Name string
Status string
Notes string
Output string
ServiceID string
ServiceName string
ServiceTags []string
Type string
Definition HealthCheckDefinition
CreateIndex uint64
ModifyIndex uint64
}
// HealthCheckDefinition is used to store the details about
// a health check's execution.
type HealthCheckDefinition struct {
HTTP string
Header map[string][]string
Method string
TLSSkipVerify bool
TCP string
IntervalDuration time.Duration `json:"-"`
TimeoutDuration time.Duration `json:"-"`
DeregisterCriticalServiceAfterDuration time.Duration `json:"-"`
// DEPRECATED in Consul 1.4.1. Use the above time.Duration fields instead.
Interval ReadableDuration
Timeout ReadableDuration
DeregisterCriticalServiceAfter ReadableDuration
}
func (d *HealthCheckDefinition) MarshalJSON() ([]byte, error) {
type Alias HealthCheckDefinition
out := &struct {
Interval string
Timeout string
DeregisterCriticalServiceAfter string
*Alias
}{
Interval: d.Interval.String(),
Timeout: d.Timeout.String(),
DeregisterCriticalServiceAfter: d.DeregisterCriticalServiceAfter.String(),
Alias: (*Alias)(d),
}
if d.IntervalDuration != 0 {
out.Interval = d.IntervalDuration.String()
} else if d.Interval != 0 {
out.Interval = d.Interval.String()
}
if d.TimeoutDuration != 0 {
out.Timeout = d.TimeoutDuration.String()
} else if d.Timeout != 0 {
out.Timeout = d.Timeout.String()
}
if d.DeregisterCriticalServiceAfterDuration != 0 {
out.DeregisterCriticalServiceAfter = d.DeregisterCriticalServiceAfterDuration.String()
} else if d.DeregisterCriticalServiceAfter != 0 {
out.DeregisterCriticalServiceAfter = d.DeregisterCriticalServiceAfter.String()
}
return json.Marshal(out)
}
func (t *HealthCheckDefinition) UnmarshalJSON(data []byte) (err error) {
type Alias HealthCheckDefinition
aux := &struct {
IntervalDuration interface{}
TimeoutDuration interface{}
DeregisterCriticalServiceAfterDuration interface{}
*Alias
}{
Alias: (*Alias)(t),
}
if err := json.Unmarshal(data, &aux); err != nil {
return err
}
// Parse the values into both the time.Duration and old ReadableDuration fields.
if aux.IntervalDuration == nil {
t.IntervalDuration = time.Duration(t.Interval)
} else {
switch v := aux.IntervalDuration.(type) {
case string:
if t.IntervalDuration, err = time.ParseDuration(v); err != nil {
return err
}
case float64:
t.IntervalDuration = time.Duration(v)
}
t.Interval = ReadableDuration(t.IntervalDuration)
}
if aux.TimeoutDuration == nil {
t.TimeoutDuration = time.Duration(t.Timeout)
} else {
switch v := aux.TimeoutDuration.(type) {
case string:
if t.TimeoutDuration, err = time.ParseDuration(v); err != nil {
return err
}
case float64:
t.TimeoutDuration = time.Duration(v)
}
t.Timeout = ReadableDuration(t.TimeoutDuration)
}
if aux.DeregisterCriticalServiceAfterDuration == nil {
t.DeregisterCriticalServiceAfterDuration = time.Duration(t.DeregisterCriticalServiceAfter)
} else {
switch v := aux.DeregisterCriticalServiceAfterDuration.(type) {
case string:
if t.DeregisterCriticalServiceAfterDuration, err = time.ParseDuration(v); err != nil {
return err
}
case float64:
t.DeregisterCriticalServiceAfterDuration = time.Duration(v)
}
t.DeregisterCriticalServiceAfter = ReadableDuration(t.DeregisterCriticalServiceAfterDuration)
}
return nil
}
// HealthChecks is a collection of HealthCheck structs.
type HealthChecks []*HealthCheck
// AggregatedStatus returns the "best" status for the list of health checks.
// Because a given entry may have many service and node-level health checks
// attached, this function determines the best representative of the status as
// as single string using the following heuristic:
//
// maintenance > critical > warning > passing
//
func (c HealthChecks) AggregatedStatus() string {
var passing, warning, critical, maintenance bool
for _, check := range c {
id := string(check.CheckID)
if id == NodeMaint || strings.HasPrefix(id, ServiceMaintPrefix) {
maintenance = true
continue
}
switch check.Status {
case HealthPassing:
passing = true
case HealthWarning:
warning = true
case HealthCritical:
critical = true
default:
return ""
}
}
switch {
case maintenance:
return HealthMaint
case critical:
return HealthCritical
case warning:
return HealthWarning
case passing:
return HealthPassing
default:
return HealthPassing
}
}
// ServiceEntry is used for the health service endpoint
type ServiceEntry struct {
Node *Node
Service *AgentService
Checks HealthChecks
}
// Health can be used to query the Health endpoints
type Health struct {
c *Client
}
// Health returns a handle to the health endpoints
func (c *Client) Health() *Health {
return &Health{c}
}
// Node is used to query for checks belonging to a given node
func (h *Health) Node(node string, q *QueryOptions) (HealthChecks, *QueryMeta, error) {
r := h.c.newRequest("GET", "/v1/health/node/"+node)
r.setQueryOptions(q)
rtt, resp, err := requireOK(h.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var out HealthChecks
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}
return out, qm, nil
}
// Checks is used to return the checks associated with a service
func (h *Health) Checks(service string, q *QueryOptions) (HealthChecks, *QueryMeta, error) {
r := h.c.newRequest("GET", "/v1/health/checks/"+service)
r.setQueryOptions(q)
rtt, resp, err := requireOK(h.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var out HealthChecks
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}
return out, qm, nil
}
// Service is used to query health information along with service info
// for a given service. It can optionally do server-side filtering on a tag
// or nodes with passing health checks only.
func (h *Health) Service(service, tag string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) {
var tags []string
if tag != "" {
tags = []string{tag}
}
return h.service(service, tags, passingOnly, q, false)
}
func (h *Health) ServiceMultipleTags(service string, tags []string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) {
return h.service(service, tags, passingOnly, q, false)
}
// Connect is equivalent to Service except that it will only return services
// which are Connect-enabled and will returns the connection address for Connect
// client's to use which may be a proxy in front of the named service. If
// passingOnly is true only instances where both the service and any proxy are
// healthy will be returned.
func (h *Health) Connect(service, tag string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) {
var tags []string
if tag != "" {
tags = []string{tag}
}
return h.service(service, tags, passingOnly, q, true)
}
func (h *Health) ConnectMultipleTags(service string, tags []string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) {
return h.service(service, tags, passingOnly, q, true)
}
func (h *Health) service(service string, tags []string, passingOnly bool, q *QueryOptions, connect bool) ([]*ServiceEntry, *QueryMeta, error) {
path := "/v1/health/service/" + service
if connect {
path = "/v1/health/connect/" + service
}
r := h.c.newRequest("GET", path)
r.setQueryOptions(q)
if len(tags) > 0 {
for _, tag := range tags {
r.params.Add("tag", tag)
}
}
if passingOnly {
r.params.Set(HealthPassing, "1")
}
rtt, resp, err := requireOK(h.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var out []*ServiceEntry
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}
return out, qm, nil
}
// State is used to retrieve all the checks in a given state.
// The wildcard "any" state can also be used for all checks.
func (h *Health) State(state string, q *QueryOptions) (HealthChecks, *QueryMeta, error) {
switch state {
case HealthAny:
case HealthWarning:
case HealthCritical:
case HealthPassing:
default:
return nil, nil, fmt.Errorf("Unsupported state: %v", state)
}
r := h.c.newRequest("GET", "/v1/health/state/"+state)
r.setQueryOptions(q)
rtt, resp, err := requireOK(h.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
var out HealthChecks
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}
return out, qm, nil
}

View file

@ -1,286 +0,0 @@
package api
import (
"bytes"
"fmt"
"io"
"net/http"
"strconv"
"strings"
)
// KVPair is used to represent a single K/V entry
type KVPair struct {
// Key is the name of the key. It is also part of the URL path when accessed
// via the API.
Key string
// CreateIndex holds the index corresponding the creation of this KVPair. This
// is a read-only field.
CreateIndex uint64
// ModifyIndex is used for the Check-And-Set operations and can also be fed
// back into the WaitIndex of the QueryOptions in order to perform blocking
// queries.
ModifyIndex uint64
// LockIndex holds the index corresponding to a lock on this key, if any. This
// is a read-only field.
LockIndex uint64
// Flags are any user-defined flags on the key. It is up to the implementer
// to check these values, since Consul does not treat them specially.
Flags uint64
// Value is the value for the key. This can be any value, but it will be
// base64 encoded upon transport.
Value []byte
// Session is a string representing the ID of the session. Any other
// interactions with this key over the same session must specify the same
// session ID.
Session string
}
// KVPairs is a list of KVPair objects
type KVPairs []*KVPair
// KV is used to manipulate the K/V API
type KV struct {
c *Client
}
// KV is used to return a handle to the K/V apis
func (c *Client) KV() *KV {
return &KV{c}
}
// Get is used to lookup a single key. The returned pointer
// to the KVPair will be nil if the key does not exist.
func (k *KV) Get(key string, q *QueryOptions) (*KVPair, *QueryMeta, error) {
resp, qm, err := k.getInternal(key, nil, q)
if err != nil {
return nil, nil, err
}
if resp == nil {
return nil, qm, nil
}
defer resp.Body.Close()
var entries []*KVPair
if err := decodeBody(resp, &entries); err != nil {
return nil, nil, err
}
if len(entries) > 0 {
return entries[0], qm, nil
}
return nil, qm, nil
}
// List is used to lookup all keys under a prefix
func (k *KV) List(prefix string, q *QueryOptions) (KVPairs, *QueryMeta, error) {
resp, qm, err := k.getInternal(prefix, map[string]string{"recurse": ""}, q)
if err != nil {
return nil, nil, err
}
if resp == nil {
return nil, qm, nil
}
defer resp.Body.Close()
var entries []*KVPair
if err := decodeBody(resp, &entries); err != nil {
return nil, nil, err
}
return entries, qm, nil
}
// Keys is used to list all the keys under a prefix. Optionally,
// a separator can be used to limit the responses.
func (k *KV) Keys(prefix, separator string, q *QueryOptions) ([]string, *QueryMeta, error) {
params := map[string]string{"keys": ""}
if separator != "" {
params["separator"] = separator
}
resp, qm, err := k.getInternal(prefix, params, q)
if err != nil {
return nil, nil, err
}
if resp == nil {
return nil, qm, nil
}
defer resp.Body.Close()
var entries []string
if err := decodeBody(resp, &entries); err != nil {
return nil, nil, err
}
return entries, qm, nil
}
func (k *KV) getInternal(key string, params map[string]string, q *QueryOptions) (*http.Response, *QueryMeta, error) {
r := k.c.newRequest("GET", "/v1/kv/"+strings.TrimPrefix(key, "/"))
r.setQueryOptions(q)
for param, val := range params {
r.params.Set(param, val)
}
rtt, resp, err := k.c.doRequest(r)
if err != nil {
return nil, nil, err
}
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
if resp.StatusCode == 404 {
resp.Body.Close()
return nil, qm, nil
} else if resp.StatusCode != 200 {
resp.Body.Close()
return nil, nil, fmt.Errorf("Unexpected response code: %d", resp.StatusCode)
}
return resp, qm, nil
}
// Put is used to write a new value. Only the
// Key, Flags and Value is respected.
func (k *KV) Put(p *KVPair, q *WriteOptions) (*WriteMeta, error) {
params := make(map[string]string, 1)
if p.Flags != 0 {
params["flags"] = strconv.FormatUint(p.Flags, 10)
}
_, wm, err := k.put(p.Key, params, p.Value, q)
return wm, err
}
// CAS is used for a Check-And-Set operation. The Key,
// ModifyIndex, Flags and Value are respected. Returns true
// on success or false on failures.
func (k *KV) CAS(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) {
params := make(map[string]string, 2)
if p.Flags != 0 {
params["flags"] = strconv.FormatUint(p.Flags, 10)
}
params["cas"] = strconv.FormatUint(p.ModifyIndex, 10)
return k.put(p.Key, params, p.Value, q)
}
// Acquire is used for a lock acquisition operation. The Key,
// Flags, Value and Session are respected. Returns true
// on success or false on failures.
func (k *KV) Acquire(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) {
params := make(map[string]string, 2)
if p.Flags != 0 {
params["flags"] = strconv.FormatUint(p.Flags, 10)
}
params["acquire"] = p.Session
return k.put(p.Key, params, p.Value, q)
}
// Release is used for a lock release operation. The Key,
// Flags, Value and Session are respected. Returns true
// on success or false on failures.
func (k *KV) Release(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) {
params := make(map[string]string, 2)
if p.Flags != 0 {
params["flags"] = strconv.FormatUint(p.Flags, 10)
}
params["release"] = p.Session
return k.put(p.Key, params, p.Value, q)
}
func (k *KV) put(key string, params map[string]string, body []byte, q *WriteOptions) (bool, *WriteMeta, error) {
if len(key) > 0 && key[0] == '/' {
return false, nil, fmt.Errorf("Invalid key. Key must not begin with a '/': %s", key)
}
r := k.c.newRequest("PUT", "/v1/kv/"+key)
r.setWriteOptions(q)
for param, val := range params {
r.params.Set(param, val)
}
r.body = bytes.NewReader(body)
rtt, resp, err := requireOK(k.c.doRequest(r))
if err != nil {
return false, nil, err
}
defer resp.Body.Close()
qm := &WriteMeta{}
qm.RequestTime = rtt
var buf bytes.Buffer
if _, err := io.Copy(&buf, resp.Body); err != nil {
return false, nil, fmt.Errorf("Failed to read response: %v", err)
}
res := strings.Contains(buf.String(), "true")
return res, qm, nil
}
// Delete is used to delete a single key
func (k *KV) Delete(key string, w *WriteOptions) (*WriteMeta, error) {
_, qm, err := k.deleteInternal(key, nil, w)
return qm, err
}
// DeleteCAS is used for a Delete Check-And-Set operation. The Key
// and ModifyIndex are respected. Returns true on success or false on failures.
func (k *KV) DeleteCAS(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) {
params := map[string]string{
"cas": strconv.FormatUint(p.ModifyIndex, 10),
}
return k.deleteInternal(p.Key, params, q)
}
// DeleteTree is used to delete all keys under a prefix
func (k *KV) DeleteTree(prefix string, w *WriteOptions) (*WriteMeta, error) {
_, qm, err := k.deleteInternal(prefix, map[string]string{"recurse": ""}, w)
return qm, err
}
func (k *KV) deleteInternal(key string, params map[string]string, q *WriteOptions) (bool, *WriteMeta, error) {
r := k.c.newRequest("DELETE", "/v1/kv/"+strings.TrimPrefix(key, "/"))
r.setWriteOptions(q)
for param, val := range params {
r.params.Set(param, val)
}
rtt, resp, err := requireOK(k.c.doRequest(r))
if err != nil {
return false, nil, err
}
defer resp.Body.Close()
qm := &WriteMeta{}
qm.RequestTime = rtt
var buf bytes.Buffer
if _, err := io.Copy(&buf, resp.Body); err != nil {
return false, nil, fmt.Errorf("Failed to read response: %v", err)
}
res := strings.Contains(buf.String(), "true")
return res, qm, nil
}
// The Txn function has been deprecated from the KV object; please see the Txn
// object for more information about Transactions.
func (k *KV) Txn(txn KVTxnOps, q *QueryOptions) (bool, *KVTxnResponse, *QueryMeta, error) {
var ops TxnOps
for _, op := range txn {
ops = append(ops, &TxnOp{KV: op})
}
respOk, txnResp, qm, err := k.c.txn(ops, q)
if err != nil {
return false, nil, nil, err
}
// Convert from the internal format.
kvResp := KVTxnResponse{
Errors: txnResp.Errors,
}
for _, result := range txnResp.Results {
kvResp.Results = append(kvResp.Results, result.KV)
}
return respOk, &kvResp, qm, nil
}

View file

@ -1,386 +0,0 @@
package api
import (
"fmt"
"sync"
"time"
)
const (
// DefaultLockSessionName is the Session Name we assign if none is provided
DefaultLockSessionName = "Consul API Lock"
// DefaultLockSessionTTL is the default session TTL if no Session is provided
// when creating a new Lock. This is used because we do not have another
// other check to depend upon.
DefaultLockSessionTTL = "15s"
// DefaultLockWaitTime is how long we block for at a time to check if lock
// acquisition is possible. This affects the minimum time it takes to cancel
// a Lock acquisition.
DefaultLockWaitTime = 15 * time.Second
// DefaultLockRetryTime is how long we wait after a failed lock acquisition
// before attempting to do the lock again. This is so that once a lock-delay
// is in effect, we do not hot loop retrying the acquisition.
DefaultLockRetryTime = 5 * time.Second
// DefaultMonitorRetryTime is how long we wait after a failed monitor check
// of a lock (500 response code). This allows the monitor to ride out brief
// periods of unavailability, subject to the MonitorRetries setting in the
// lock options which is by default set to 0, disabling this feature. This
// affects locks and semaphores.
DefaultMonitorRetryTime = 2 * time.Second
// LockFlagValue is a magic flag we set to indicate a key
// is being used for a lock. It is used to detect a potential
// conflict with a semaphore.
LockFlagValue = 0x2ddccbc058a50c18
)
var (
// ErrLockHeld is returned if we attempt to double lock
ErrLockHeld = fmt.Errorf("Lock already held")
// ErrLockNotHeld is returned if we attempt to unlock a lock
// that we do not hold.
ErrLockNotHeld = fmt.Errorf("Lock not held")
// ErrLockInUse is returned if we attempt to destroy a lock
// that is in use.
ErrLockInUse = fmt.Errorf("Lock in use")
// ErrLockConflict is returned if the flags on a key
// used for a lock do not match expectation
ErrLockConflict = fmt.Errorf("Existing key does not match lock use")
)
// Lock is used to implement client-side leader election. It is follows the
// algorithm as described here: https://www.consul.io/docs/guides/leader-election.html.
type Lock struct {
c *Client
opts *LockOptions
isHeld bool
sessionRenew chan struct{}
lockSession string
l sync.Mutex
}
// LockOptions is used to parameterize the Lock behavior.
type LockOptions struct {
Key string // Must be set and have write permissions
Value []byte // Optional, value to associate with the lock
Session string // Optional, created if not specified
SessionOpts *SessionEntry // Optional, options to use when creating a session
SessionName string // Optional, defaults to DefaultLockSessionName (ignored if SessionOpts is given)
SessionTTL string // Optional, defaults to DefaultLockSessionTTL (ignored if SessionOpts is given)
MonitorRetries int // Optional, defaults to 0 which means no retries
MonitorRetryTime time.Duration // Optional, defaults to DefaultMonitorRetryTime
LockWaitTime time.Duration // Optional, defaults to DefaultLockWaitTime
LockTryOnce bool // Optional, defaults to false which means try forever
}
// LockKey returns a handle to a lock struct which can be used
// to acquire and release the mutex. The key used must have
// write permissions.
func (c *Client) LockKey(key string) (*Lock, error) {
opts := &LockOptions{
Key: key,
}
return c.LockOpts(opts)
}
// LockOpts returns a handle to a lock struct which can be used
// to acquire and release the mutex. The key used must have
// write permissions.
func (c *Client) LockOpts(opts *LockOptions) (*Lock, error) {
if opts.Key == "" {
return nil, fmt.Errorf("missing key")
}
if opts.SessionName == "" {
opts.SessionName = DefaultLockSessionName
}
if opts.SessionTTL == "" {
opts.SessionTTL = DefaultLockSessionTTL
} else {
if _, err := time.ParseDuration(opts.SessionTTL); err != nil {
return nil, fmt.Errorf("invalid SessionTTL: %v", err)
}
}
if opts.MonitorRetryTime == 0 {
opts.MonitorRetryTime = DefaultMonitorRetryTime
}
if opts.LockWaitTime == 0 {
opts.LockWaitTime = DefaultLockWaitTime
}
l := &Lock{
c: c,
opts: opts,
}
return l, nil
}
// Lock attempts to acquire the lock and blocks while doing so.
// Providing a non-nil stopCh can be used to abort the lock attempt.
// Returns a channel that is closed if our lock is lost or an error.
// This channel could be closed at any time due to session invalidation,
// communication errors, operator intervention, etc. It is NOT safe to
// assume that the lock is held until Unlock() unless the Session is specifically
// created without any associated health checks. By default Consul sessions
// prefer liveness over safety and an application must be able to handle
// the lock being lost.
func (l *Lock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) {
// Hold the lock as we try to acquire
l.l.Lock()
defer l.l.Unlock()
// Check if we already hold the lock
if l.isHeld {
return nil, ErrLockHeld
}
// Check if we need to create a session first
l.lockSession = l.opts.Session
if l.lockSession == "" {
s, err := l.createSession()
if err != nil {
return nil, fmt.Errorf("failed to create session: %v", err)
}
l.sessionRenew = make(chan struct{})
l.lockSession = s
session := l.c.Session()
go session.RenewPeriodic(l.opts.SessionTTL, s, nil, l.sessionRenew)
// If we fail to acquire the lock, cleanup the session
defer func() {
if !l.isHeld {
close(l.sessionRenew)
l.sessionRenew = nil
}
}()
}
// Setup the query options
kv := l.c.KV()
qOpts := &QueryOptions{
WaitTime: l.opts.LockWaitTime,
}
start := time.Now()
attempts := 0
WAIT:
// Check if we should quit
select {
case <-stopCh:
return nil, nil
default:
}
// Handle the one-shot mode.
if l.opts.LockTryOnce && attempts > 0 {
elapsed := time.Since(start)
if elapsed > l.opts.LockWaitTime {
return nil, nil
}
// Query wait time should not exceed the lock wait time
qOpts.WaitTime = l.opts.LockWaitTime - elapsed
}
attempts++
// Look for an existing lock, blocking until not taken
pair, meta, err := kv.Get(l.opts.Key, qOpts)
if err != nil {
return nil, fmt.Errorf("failed to read lock: %v", err)
}
if pair != nil && pair.Flags != LockFlagValue {
return nil, ErrLockConflict
}
locked := false
if pair != nil && pair.Session == l.lockSession {
goto HELD
}
if pair != nil && pair.Session != "" {
qOpts.WaitIndex = meta.LastIndex
goto WAIT
}
// Try to acquire the lock
pair = l.lockEntry(l.lockSession)
locked, _, err = kv.Acquire(pair, nil)
if err != nil {
return nil, fmt.Errorf("failed to acquire lock: %v", err)
}
// Handle the case of not getting the lock
if !locked {
// Determine why the lock failed
qOpts.WaitIndex = 0
pair, meta, err = kv.Get(l.opts.Key, qOpts)
if pair != nil && pair.Session != "" {
//If the session is not null, this means that a wait can safely happen
//using a long poll
qOpts.WaitIndex = meta.LastIndex
goto WAIT
} else {
// If the session is empty and the lock failed to acquire, then it means
// a lock-delay is in effect and a timed wait must be used
select {
case <-time.After(DefaultLockRetryTime):
goto WAIT
case <-stopCh:
return nil, nil
}
}
}
HELD:
// Watch to ensure we maintain leadership
leaderCh := make(chan struct{})
go l.monitorLock(l.lockSession, leaderCh)
// Set that we own the lock
l.isHeld = true
// Locked! All done
return leaderCh, nil
}
// Unlock released the lock. It is an error to call this
// if the lock is not currently held.
func (l *Lock) Unlock() error {
// Hold the lock as we try to release
l.l.Lock()
defer l.l.Unlock()
// Ensure the lock is actually held
if !l.isHeld {
return ErrLockNotHeld
}
// Set that we no longer own the lock
l.isHeld = false
// Stop the session renew
if l.sessionRenew != nil {
defer func() {
close(l.sessionRenew)
l.sessionRenew = nil
}()
}
// Get the lock entry, and clear the lock session
lockEnt := l.lockEntry(l.lockSession)
l.lockSession = ""
// Release the lock explicitly
kv := l.c.KV()
_, _, err := kv.Release(lockEnt, nil)
if err != nil {
return fmt.Errorf("failed to release lock: %v", err)
}
return nil
}
// Destroy is used to cleanup the lock entry. It is not necessary
// to invoke. It will fail if the lock is in use.
func (l *Lock) Destroy() error {
// Hold the lock as we try to release
l.l.Lock()
defer l.l.Unlock()
// Check if we already hold the lock
if l.isHeld {
return ErrLockHeld
}
// Look for an existing lock
kv := l.c.KV()
pair, _, err := kv.Get(l.opts.Key, nil)
if err != nil {
return fmt.Errorf("failed to read lock: %v", err)
}
// Nothing to do if the lock does not exist
if pair == nil {
return nil
}
// Check for possible flag conflict
if pair.Flags != LockFlagValue {
return ErrLockConflict
}
// Check if it is in use
if pair.Session != "" {
return ErrLockInUse
}
// Attempt the delete
didRemove, _, err := kv.DeleteCAS(pair, nil)
if err != nil {
return fmt.Errorf("failed to remove lock: %v", err)
}
if !didRemove {
return ErrLockInUse
}
return nil
}
// createSession is used to create a new managed session
func (l *Lock) createSession() (string, error) {
session := l.c.Session()
se := l.opts.SessionOpts
if se == nil {
se = &SessionEntry{
Name: l.opts.SessionName,
TTL: l.opts.SessionTTL,
}
}
id, _, err := session.Create(se, nil)
if err != nil {
return "", err
}
return id, nil
}
// lockEntry returns a formatted KVPair for the lock
func (l *Lock) lockEntry(session string) *KVPair {
return &KVPair{
Key: l.opts.Key,
Value: l.opts.Value,
Session: session,
Flags: LockFlagValue,
}
}
// monitorLock is a long running routine to monitor a lock ownership
// It closes the stopCh if we lose our leadership.
func (l *Lock) monitorLock(session string, stopCh chan struct{}) {
defer close(stopCh)
kv := l.c.KV()
opts := &QueryOptions{RequireConsistent: true}
WAIT:
retries := l.opts.MonitorRetries
RETRY:
pair, meta, err := kv.Get(l.opts.Key, opts)
if err != nil {
// If configured we can try to ride out a brief Consul unavailability
// by doing retries. Note that we have to attempt the retry in a non-
// blocking fashion so that we have a clean place to reset the retry
// counter if service is restored.
if retries > 0 && IsRetryableError(err) {
time.Sleep(l.opts.MonitorRetryTime)
retries--
opts.WaitIndex = 0
goto RETRY
}
return
}
if pair != nil && pair.Session == session {
opts.WaitIndex = meta.LastIndex
goto WAIT
}
}

View file

@ -1,11 +0,0 @@
package api
// Operator can be used to perform low-level operator tasks for Consul.
type Operator struct {
c *Client
}
// Operator returns a handle to the operator endpoints.
func (c *Client) Operator() *Operator {
return &Operator{c}
}

View file

@ -1,194 +0,0 @@
package api
// The /v1/operator/area endpoints are available only in Consul Enterprise and
// interact with its network area subsystem. Network areas are used to link
// together Consul servers in different Consul datacenters. With network areas,
// Consul datacenters can be linked together in ways other than a fully-connected
// mesh, as is required for Consul's WAN.
import (
"net"
"time"
)
// Area defines a network area.
type Area struct {
// ID is this identifier for an area (a UUID). This must be left empty
// when creating a new area.
ID string
// PeerDatacenter is the peer Consul datacenter that will make up the
// other side of this network area. Network areas always involve a pair
// of datacenters: the datacenter where the area was created, and the
// peer datacenter. This is required.
PeerDatacenter string
// RetryJoin specifies the address of Consul servers to join to, such as
// an IPs or hostnames with an optional port number. This is optional.
RetryJoin []string
// UseTLS specifies whether gossip over this area should be encrypted with TLS
// if possible.
UseTLS bool
}
// AreaJoinResponse is returned when a join occurs and gives the result for each
// address.
type AreaJoinResponse struct {
// The address that was joined.
Address string
// Whether or not the join was a success.
Joined bool
// If we couldn't join, this is the message with information.
Error string
}
// SerfMember is a generic structure for reporting information about members in
// a Serf cluster. This is only used by the area endpoints right now, but this
// could be expanded to other endpoints in the future.
type SerfMember struct {
// ID is the node identifier (a UUID).
ID string
// Name is the node name.
Name string
// Addr has the IP address.
Addr net.IP
// Port is the RPC port.
Port uint16
// Datacenter is the DC name.
Datacenter string
// Role is "client", "server", or "unknown".
Role string
// Build has the version of the Consul agent.
Build string
// Protocol is the protocol of the Consul agent.
Protocol int
// Status is the Serf health status "none", "alive", "leaving", "left",
// or "failed".
Status string
// RTT is the estimated round trip time from the server handling the
// request to the this member. This will be negative if no RTT estimate
// is available.
RTT time.Duration
}
// AreaCreate will create a new network area. The ID in the given structure must
// be empty and a generated ID will be returned on success.
func (op *Operator) AreaCreate(area *Area, q *WriteOptions) (string, *WriteMeta, error) {
r := op.c.newRequest("POST", "/v1/operator/area")
r.setWriteOptions(q)
r.obj = area
rtt, resp, err := requireOK(op.c.doRequest(r))
if err != nil {
return "", nil, err
}
defer resp.Body.Close()
wm := &WriteMeta{}
wm.RequestTime = rtt
var out struct{ ID string }
if err := decodeBody(resp, &out); err != nil {
return "", nil, err
}
return out.ID, wm, nil
}
// AreaUpdate will update the configuration of the network area with the given ID.
func (op *Operator) AreaUpdate(areaID string, area *Area, q *WriteOptions) (string, *WriteMeta, error) {
r := op.c.newRequest("PUT", "/v1/operator/area/"+areaID)
r.setWriteOptions(q)
r.obj = area
rtt, resp, err := requireOK(op.c.doRequest(r))
if err != nil {
return "", nil, err
}
defer resp.Body.Close()
wm := &WriteMeta{}
wm.RequestTime = rtt
var out struct{ ID string }
if err := decodeBody(resp, &out); err != nil {
return "", nil, err
}
return out.ID, wm, nil
}
// AreaGet returns a single network area.
func (op *Operator) AreaGet(areaID string, q *QueryOptions) ([]*Area, *QueryMeta, error) {
var out []*Area
qm, err := op.c.query("/v1/operator/area/"+areaID, &out, q)
if err != nil {
return nil, nil, err
}
return out, qm, nil
}
// AreaList returns all the available network areas.
func (op *Operator) AreaList(q *QueryOptions) ([]*Area, *QueryMeta, error) {
var out []*Area
qm, err := op.c.query("/v1/operator/area", &out, q)
if err != nil {
return nil, nil, err
}
return out, qm, nil
}
// AreaDelete deletes the given network area.
func (op *Operator) AreaDelete(areaID string, q *WriteOptions) (*WriteMeta, error) {
r := op.c.newRequest("DELETE", "/v1/operator/area/"+areaID)
r.setWriteOptions(q)
rtt, resp, err := requireOK(op.c.doRequest(r))
if err != nil {
return nil, err
}
defer resp.Body.Close()
wm := &WriteMeta{}
wm.RequestTime = rtt
return wm, nil
}
// AreaJoin attempts to join the given set of join addresses to the given
// network area. See the Area structure for details about join addresses.
func (op *Operator) AreaJoin(areaID string, addresses []string, q *WriteOptions) ([]*AreaJoinResponse, *WriteMeta, error) {
r := op.c.newRequest("PUT", "/v1/operator/area/"+areaID+"/join")
r.setWriteOptions(q)
r.obj = addresses
rtt, resp, err := requireOK(op.c.doRequest(r))
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
wm := &WriteMeta{}
wm.RequestTime = rtt
var out []*AreaJoinResponse
if err := decodeBody(resp, &out); err != nil {
return nil, nil, err
}
return out, wm, nil
}
// AreaMembers lists the Serf information about the members in the given area.
func (op *Operator) AreaMembers(areaID string, q *QueryOptions) ([]*SerfMember, *QueryMeta, error) {
var out []*SerfMember
qm, err := op.c.query("/v1/operator/area/"+areaID+"/members", &out, q)
if err != nil {
return nil, nil, err
}
return out, qm, nil
}

View file

@ -1,232 +0,0 @@
package api
import (
"bytes"
"fmt"
"io"
"strconv"
"strings"
"time"
)
// AutopilotConfiguration is used for querying/setting the Autopilot configuration.
// Autopilot helps manage operator tasks related to Consul servers like removing
// failed servers from the Raft quorum.
type AutopilotConfiguration struct {
// CleanupDeadServers controls whether to remove dead servers from the Raft
// peer list when a new server joins
CleanupDeadServers bool
// LastContactThreshold is the limit on the amount of time a server can go
// without leader contact before being considered unhealthy.
LastContactThreshold *ReadableDuration
// MaxTrailingLogs is the amount of entries in the Raft Log that a server can
// be behind before being considered unhealthy.
MaxTrailingLogs uint64
// MinQuorum sets the minimum number of servers allowed in a cluster before
// autopilot can prune dead servers.
MinQuorum uint
// ServerStabilizationTime is the minimum amount of time a server must be
// in a stable, healthy state before it can be added to the cluster. Only
// applicable with Raft protocol version 3 or higher.
ServerStabilizationTime *ReadableDuration
// (Enterprise-only) RedundancyZoneTag is the node tag to use for separating
// servers into zones for redundancy. If left blank, this feature will be disabled.
RedundancyZoneTag string
// (Enterprise-only) DisableUpgradeMigration will disable Autopilot's upgrade migration
// strategy of waiting until enough newer-versioned servers have been added to the
// cluster before promoting them to voters.
DisableUpgradeMigration bool
// (Enterprise-only) UpgradeVersionTag is the node tag to use for version info when
// performing upgrade migrations. If left blank, the Consul version will be used.
UpgradeVersionTag string
// CreateIndex holds the index corresponding the creation of this configuration.
// This is a read-only field.
CreateIndex uint64
// ModifyIndex will be set to the index of the last update when retrieving the
// Autopilot configuration. Resubmitting a configuration with
// AutopilotCASConfiguration will perform a check-and-set operation which ensures
// there hasn't been a subsequent update since the configuration was retrieved.
ModifyIndex uint64
}
// ServerHealth is the health (from the leader's point of view) of a server.
type ServerHealth struct {
// ID is the raft ID of the server.
ID string
// Name is the node name of the server.
Name string
// Address is the address of the server.
Address string
// The status of the SerfHealth check for the server.
SerfStatus string
// Version is the Consul version of the server.
Version string
// Leader is whether this server is currently the leader.
Leader bool
// LastContact is the time since this node's last contact with the leader.
LastContact *ReadableDuration
// LastTerm is the highest leader term this server has a record of in its Raft log.
LastTerm uint64
// LastIndex is the last log index this server has a record of in its Raft log.
LastIndex uint64
// Healthy is whether or not the server is healthy according to the current
// Autopilot config.
Healthy bool
// Voter is whether this is a voting server.
Voter bool
// StableSince is the last time this server's Healthy value changed.
StableSince time.Time
}
// OperatorHealthReply is a representation of the overall health of the cluster
type OperatorHealthReply struct {
// Healthy is true if all the servers in the cluster are healthy.
Healthy bool
// FailureTolerance is the number of healthy servers that could be lost without
// an outage occurring.
FailureTolerance int
// Servers holds the health of each server.
Servers []ServerHealth
}
// ReadableDuration is a duration type that is serialized to JSON in human readable format.
type ReadableDuration time.Duration
func NewReadableDuration(dur time.Duration) *ReadableDuration {
d := ReadableDuration(dur)
return &d
}
func (d *ReadableDuration) String() string {
return d.Duration().String()
}
func (d *ReadableDuration) Duration() time.Duration {
if d == nil {
return time.Duration(0)
}
return time.Duration(*d)
}
func (d *ReadableDuration) MarshalJSON() ([]byte, error) {
return []byte(fmt.Sprintf(`"%s"`, d.Duration().String())), nil
}
func (d *ReadableDuration) UnmarshalJSON(raw []byte) (err error) {
if d == nil {
return fmt.Errorf("cannot unmarshal to nil pointer")
}
var dur time.Duration
str := string(raw)
if len(str) >= 2 && str[0] == '"' && str[len(str)-1] == '"' {
// quoted string
dur, err = time.ParseDuration(str[1 : len(str)-1])
if err != nil {
return err
}
} else {
// no quotes, not a string
v, err := strconv.ParseFloat(str, 64)
if err != nil {
return err
}
dur = time.Duration(v)
}
*d = ReadableDuration(dur)
return nil
}
// AutopilotGetConfiguration is used to query the current Autopilot configuration.
func (op *Operator) AutopilotGetConfiguration(q *QueryOptions) (*AutopilotConfiguration, error) {
r := op.c.newRequest("GET", "/v1/operator/autopilot/configuration")
r.setQueryOptions(q)
_, resp, err := requireOK(op.c.doRequest(r))
if err != nil {
return nil, err
}
defer resp.Body.Close()
var out AutopilotConfiguration
if err := decodeBody(resp, &out); err != nil {
return nil, err
}
return &out, nil
}
// AutopilotSetConfiguration is used to set the current Autopilot configuration.
func (op *Operator) AutopilotSetConfiguration(conf *AutopilotConfiguration, q *WriteOptions) error {
r := op.c.newRequest("PUT", "/v1/operator/autopilot/configuration")
r.setWriteOptions(q)
r.obj = conf
_, resp, err := requireOK(op.c.doRequest(r))
if err != nil {
return err
}
resp.Body.Close()
return nil
}
// AutopilotCASConfiguration is used to perform a Check-And-Set update on the
// Autopilot configuration. The ModifyIndex value will be respected. Returns
// true on success or false on failures.
func (op *Operator) AutopilotCASConfiguration(conf *AutopilotConfiguration, q *WriteOptions) (bool, error) {
r := op.c.newRequest("PUT", "/v1/operator/autopilot/configuration")
r.setWriteOptions(q)
r.params.Set("cas", strconv.FormatUint(conf.ModifyIndex, 10))
r.obj = conf
_, resp, err := requireOK(op.c.doRequest(r))
if err != nil {
return false, err
}
defer resp.Body.Close()
var buf bytes.Buffer
if _, err := io.Copy(&buf, resp.Body); err != nil {
return false, fmt.Errorf("Failed to read response: %v", err)
}
res := strings.Contains(buf.String(), "true")
return res, nil
}
// AutopilotServerHealth
func (op *Operator) AutopilotServerHealth(q *QueryOptions) (*OperatorHealthReply, error) {
r := op.c.newRequest("GET", "/v1/operator/autopilot/health")
r.setQueryOptions(q)
_, resp, err := requireOK(op.c.doRequest(r))
if err != nil {
return nil, err
}
defer resp.Body.Close()
var out OperatorHealthReply
if err := decodeBody(resp, &out); err != nil {
return nil, err
}
return &out, nil
}

View file

@ -1,89 +0,0 @@
package api
// keyringRequest is used for performing Keyring operations
type keyringRequest struct {
Key string
}
// KeyringResponse is returned when listing the gossip encryption keys
type KeyringResponse struct {
// Whether this response is for a WAN ring
WAN bool
// The datacenter name this request corresponds to
Datacenter string
// Segment has the network segment this request corresponds to.
Segment string
// Messages has information or errors from serf
Messages map[string]string `json:",omitempty"`
// A map of the encryption keys to the number of nodes they're installed on
Keys map[string]int
// The total number of nodes in this ring
NumNodes int
}
// KeyringInstall is used to install a new gossip encryption key into the cluster
func (op *Operator) KeyringInstall(key string, q *WriteOptions) error {
r := op.c.newRequest("POST", "/v1/operator/keyring")
r.setWriteOptions(q)
r.obj = keyringRequest{
Key: key,
}
_, resp, err := requireOK(op.c.doRequest(r))
if err != nil {
return err
}
resp.Body.Close()
return nil
}
// KeyringList is used to list the gossip keys installed in the cluster
func (op *Operator) KeyringList(q *QueryOptions) ([]*KeyringResponse, error) {
r := op.c.newRequest("GET", "/v1/operator/keyring")
r.setQueryOptions(q)
_, resp, err := requireOK(op.c.doRequest(r))
if err != nil {
return nil, err
}
defer resp.Body.Close()
var out []*KeyringResponse
if err := decodeBody(resp, &out); err != nil {
return nil, err
}
return out, nil
}
// KeyringRemove is used to remove a gossip encryption key from the cluster
func (op *Operator) KeyringRemove(key string, q *WriteOptions) error {
r := op.c.newRequest("DELETE", "/v1/operator/keyring")
r.setWriteOptions(q)
r.obj = keyringRequest{
Key: key,
}
_, resp, err := requireOK(op.c.doRequest(r))
if err != nil {
return err
}
resp.Body.Close()
return nil
}
// KeyringUse is used to change the active gossip encryption key
func (op *Operator) KeyringUse(key string, q *WriteOptions) error {
r := op.c.newRequest("PUT", "/v1/operator/keyring")
r.setWriteOptions(q)
r.obj = keyringRequest{
Key: key,
}
_, resp, err := requireOK(op.c.doRequest(r))
if err != nil {
return err
}
resp.Body.Close()
return nil
}

View file

@ -1,111 +0,0 @@
package api
import (
"io/ioutil"
"strings"
"time"
)
type License struct {
// The unique identifier of the license
LicenseID string `json:"license_id"`
// The customer ID associated with the license
CustomerID string `json:"customer_id"`
// If set, an identifier that should be used to lock the license to a
// particular site, cluster, etc.
InstallationID string `json:"installation_id"`
// The time at which the license was issued
IssueTime time.Time `json:"issue_time"`
// The time at which the license starts being valid
StartTime time.Time `json:"start_time"`
// The time after which the license expires
ExpirationTime time.Time `json:"expiration_time"`
// The time at which the license ceases to function and can
// no longer be used in any capacity
TerminationTime time.Time `json:"termination_time"`
// The product the license is valid for
Product string `json:"product"`
// License Specific Flags
Flags map[string]interface{} `json:"flags"`
// List of features enabled by the license
Features []string `json:"features"`
}
type LicenseReply struct {
Valid bool
License *License
Warnings []string
}
func (op *Operator) LicenseGet(q *QueryOptions) (*LicenseReply, error) {
var reply LicenseReply
if _, err := op.c.query("/v1/operator/license", &reply, q); err != nil {
return nil, err
} else {
return &reply, nil
}
}
func (op *Operator) LicenseGetSigned(q *QueryOptions) (string, error) {
r := op.c.newRequest("GET", "/v1/operator/license")
r.params.Set("signed", "1")
r.setQueryOptions(q)
_, resp, err := requireOK(op.c.doRequest(r))
if err != nil {
return "", err
}
defer resp.Body.Close()
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", err
}
return string(data), nil
}
// LicenseReset will reset the license to the builtin one if it is still valid.
// If the builtin license is invalid, the current license stays active.
func (op *Operator) LicenseReset(opts *WriteOptions) (*LicenseReply, error) {
var reply LicenseReply
r := op.c.newRequest("DELETE", "/v1/operator/license")
r.setWriteOptions(opts)
_, resp, err := requireOK(op.c.doRequest(r))
if err != nil {
return nil, err
}
defer resp.Body.Close()
if err := decodeBody(resp, &reply); err != nil {
return nil, err
}
return &reply, nil
}
func (op *Operator) LicensePut(license string, opts *WriteOptions) (*LicenseReply, error) {
var reply LicenseReply
r := op.c.newRequest("PUT", "/v1/operator/license")
r.setWriteOptions(opts)
r.body = strings.NewReader(license)
_, resp, err := requireOK(op.c.doRequest(r))
if err != nil {
return nil, err
}
defer resp.Body.Close()
if err := decodeBody(resp, &reply); err != nil {
return nil, err
}
return &reply, nil
}

View file

@ -1,89 +0,0 @@
package api
// RaftServer has information about a server in the Raft configuration.
type RaftServer struct {
// ID is the unique ID for the server. These are currently the same
// as the address, but they will be changed to a real GUID in a future
// release of Consul.
ID string
// Node is the node name of the server, as known by Consul, or this
// will be set to "(unknown)" otherwise.
Node string
// Address is the IP:port of the server, used for Raft communications.
Address string
// Leader is true if this server is the current cluster leader.
Leader bool
// Protocol version is the raft protocol version used by the server
ProtocolVersion string
// Voter is true if this server has a vote in the cluster. This might
// be false if the server is staging and still coming online, or if
// it's a non-voting server, which will be added in a future release of
// Consul.
Voter bool
}
// RaftConfiguration is returned when querying for the current Raft configuration.
type RaftConfiguration struct {
// Servers has the list of servers in the Raft configuration.
Servers []*RaftServer
// Index has the Raft index of this configuration.
Index uint64
}
// RaftGetConfiguration is used to query the current Raft peer set.
func (op *Operator) RaftGetConfiguration(q *QueryOptions) (*RaftConfiguration, error) {
r := op.c.newRequest("GET", "/v1/operator/raft/configuration")
r.setQueryOptions(q)
_, resp, err := requireOK(op.c.doRequest(r))
if err != nil {
return nil, err
}
defer resp.Body.Close()
var out RaftConfiguration
if err := decodeBody(resp, &out); err != nil {
return nil, err
}
return &out, nil
}
// RaftRemovePeerByAddress is used to kick a stale peer (one that it in the Raft
// quorum but no longer known to Serf or the catalog) by address in the form of
// "IP:port".
func (op *Operator) RaftRemovePeerByAddress(address string, q *WriteOptions) error {
r := op.c.newRequest("DELETE", "/v1/operator/raft/peer")
r.setWriteOptions(q)
r.params.Set("address", string(address))
_, resp, err := requireOK(op.c.doRequest(r))
if err != nil {
return err
}
resp.Body.Close()
return nil
}
// RaftRemovePeerByID is used to kick a stale peer (one that it in the Raft
// quorum but no longer known to Serf or the catalog) by ID.
func (op *Operator) RaftRemovePeerByID(id string, q *WriteOptions) error {
r := op.c.newRequest("DELETE", "/v1/operator/raft/peer")
r.setWriteOptions(q)
r.params.Set("id", string(id))
_, resp, err := requireOK(op.c.doRequest(r))
if err != nil {
return err
}
resp.Body.Close()
return nil
}

View file

@ -1,11 +0,0 @@
package api
// SegmentList returns all the available LAN segments.
func (op *Operator) SegmentList(q *QueryOptions) ([]string, *QueryMeta, error) {
var out []string
qm, err := op.c.query("/v1/operator/segment", &out, q)
if err != nil {
return nil, nil, err
}
return out, qm, nil
}

View file

@ -1,217 +0,0 @@
package api
// QueryDatacenterOptions sets options about how we fail over if there are no
// healthy nodes in the local datacenter.
type QueryDatacenterOptions struct {
// NearestN is set to the number of remote datacenters to try, based on
// network coordinates.
NearestN int
// Datacenters is a fixed list of datacenters to try after NearestN. We
// never try a datacenter multiple times, so those are subtracted from
// this list before proceeding.
Datacenters []string
}
// QueryDNSOptions controls settings when query results are served over DNS.
type QueryDNSOptions struct {
// TTL is the time to live for the served DNS results.
TTL string
}
// ServiceQuery is used to query for a set of healthy nodes offering a specific
// service.
type ServiceQuery struct {
// Service is the service to query.
Service string
// Near allows baking in the name of a node to automatically distance-
// sort from. The magic "_agent" value is supported, which sorts near
// the agent which initiated the request by default.
Near string
// Failover controls what we do if there are no healthy nodes in the
// local datacenter.
Failover QueryDatacenterOptions
// IgnoreCheckIDs is an optional list of health check IDs to ignore when
// considering which nodes are healthy. It is useful as an emergency measure
// to temporarily override some health check that is producing false negatives
// for example.
IgnoreCheckIDs []string
// If OnlyPassing is true then we will only include nodes with passing
// health checks (critical AND warning checks will cause a node to be
// discarded)
OnlyPassing bool
// Tags are a set of required and/or disallowed tags. If a tag is in
// this list it must be present. If the tag is preceded with "!" then
// it is disallowed.
Tags []string
// NodeMeta is a map of required node metadata fields. If a key/value
// pair is in this map it must be present on the node in order for the
// service entry to be returned.
NodeMeta map[string]string
// ServiceMeta is a map of required service metadata fields. If a key/value
// pair is in this map it must be present on the node in order for the
// service entry to be returned.
ServiceMeta map[string]string
// Connect if true will filter the prepared query results to only
// include Connect-capable services. These include both native services
// and proxies for matching services. Note that if a proxy matches,
// the constraints in the query above (Near, OnlyPassing, etc.) apply
// to the _proxy_ and not the service being proxied. In practice, proxies
// should be directly next to their services so this isn't an issue.
Connect bool
}
// QueryTemplate carries the arguments for creating a templated query.
type QueryTemplate struct {
// Type specifies the type of the query template. Currently only
// "name_prefix_match" is supported. This field is required.
Type string
// Regexp allows specifying a regex pattern to match against the name
// of the query being executed.
Regexp string
}
// PreparedQueryDefinition defines a complete prepared query.
type PreparedQueryDefinition struct {
// ID is this UUID-based ID for the query, always generated by Consul.
ID string
// Name is an optional friendly name for the query supplied by the
// user. NOTE - if this feature is used then it will reduce the security
// of any read ACL associated with this query/service since this name
// can be used to locate nodes with supplying any ACL.
Name string
// Session is an optional session to tie this query's lifetime to. If
// this is omitted then the query will not expire.
Session string
// Token is the ACL token used when the query was created, and it is
// used when a query is subsequently executed. This token, or a token
// with management privileges, must be used to change the query later.
Token string
// Service defines a service query (leaving things open for other types
// later).
Service ServiceQuery
// DNS has options that control how the results of this query are
// served over DNS.
DNS QueryDNSOptions
// Template is used to pass through the arguments for creating a
// prepared query with an attached template. If a template is given,
// interpolations are possible in other struct fields.
Template QueryTemplate
}
// PreparedQueryExecuteResponse has the results of executing a query.
type PreparedQueryExecuteResponse struct {
// Service is the service that was queried.
Service string
// Nodes has the nodes that were output by the query.
Nodes []ServiceEntry
// DNS has the options for serving these results over DNS.
DNS QueryDNSOptions
// Datacenter is the datacenter that these results came from.
Datacenter string
// Failovers is a count of how many times we had to query a remote
// datacenter.
Failovers int
}
// PreparedQuery can be used to query the prepared query endpoints.
type PreparedQuery struct {
c *Client
}
// PreparedQuery returns a handle to the prepared query endpoints.
func (c *Client) PreparedQuery() *PreparedQuery {
return &PreparedQuery{c}
}
// Create makes a new prepared query. The ID of the new query is returned.
func (c *PreparedQuery) Create(query *PreparedQueryDefinition, q *WriteOptions) (string, *WriteMeta, error) {
r := c.c.newRequest("POST", "/v1/query")
r.setWriteOptions(q)
r.obj = query
rtt, resp, err := requireOK(c.c.doRequest(r))
if err != nil {
return "", nil, err
}
defer resp.Body.Close()
wm := &WriteMeta{}
wm.RequestTime = rtt
var out struct{ ID string }
if err := decodeBody(resp, &out); err != nil {
return "", nil, err
}
return out.ID, wm, nil
}
// Update makes updates to an existing prepared query.
func (c *PreparedQuery) Update(query *PreparedQueryDefinition, q *WriteOptions) (*WriteMeta, error) {
return c.c.write("/v1/query/"+query.ID, query, nil, q)
}
// List is used to fetch all the prepared queries (always requires a management
// token).
func (c *PreparedQuery) List(q *QueryOptions) ([]*PreparedQueryDefinition, *QueryMeta, error) {
var out []*PreparedQueryDefinition
qm, err := c.c.query("/v1/query", &out, q)
if err != nil {
return nil, nil, err
}
return out, qm, nil
}
// Get is used to fetch a specific prepared query.
func (c *PreparedQuery) Get(queryID string, q *QueryOptions) ([]*PreparedQueryDefinition, *QueryMeta, error) {
var out []*PreparedQueryDefinition
qm, err := c.c.query("/v1/query/"+queryID, &out, q)
if err != nil {
return nil, nil, err
}
return out, qm, nil
}
// Delete is used to delete a specific prepared query.
func (c *PreparedQuery) Delete(queryID string, q *WriteOptions) (*WriteMeta, error) {
r := c.c.newRequest("DELETE", "/v1/query/"+queryID)
r.setWriteOptions(q)
rtt, resp, err := requireOK(c.c.doRequest(r))
if err != nil {
return nil, err
}
defer resp.Body.Close()
wm := &WriteMeta{}
wm.RequestTime = rtt
return wm, nil
}
// Execute is used to execute a specific prepared query. You can execute using
// a query ID or name.
func (c *PreparedQuery) Execute(queryIDOrName string, q *QueryOptions) (*PreparedQueryExecuteResponse, *QueryMeta, error) {
var out *PreparedQueryExecuteResponse
qm, err := c.c.query("/v1/query/"+queryIDOrName+"/execute", &out, q)
if err != nil {
return nil, nil, err
}
return out, qm, nil
}

View file

@ -1,24 +0,0 @@
package api
// Raw can be used to do raw queries against custom endpoints
type Raw struct {
c *Client
}
// Raw returns a handle to query endpoints
func (c *Client) Raw() *Raw {
return &Raw{c}
}
// Query is used to do a GET request against an endpoint
// and deserialize the response into an interface using
// standard Consul conventions.
func (raw *Raw) Query(endpoint string, out interface{}, q *QueryOptions) (*QueryMeta, error) {
return raw.c.query(endpoint, out, q)
}
// Write is used to do a PUT request against an endpoint
// and serialize/deserialized using the standard Consul conventions.
func (raw *Raw) Write(endpoint string, in, out interface{}, q *WriteOptions) (*WriteMeta, error) {
return raw.c.write(endpoint, in, out, q)
}

View file

@ -1,514 +0,0 @@
package api
import (
"encoding/json"
"fmt"
"path"
"sync"
"time"
)
const (
// DefaultSemaphoreSessionName is the Session Name we assign if none is provided
DefaultSemaphoreSessionName = "Consul API Semaphore"
// DefaultSemaphoreSessionTTL is the default session TTL if no Session is provided
// when creating a new Semaphore. This is used because we do not have another
// other check to depend upon.
DefaultSemaphoreSessionTTL = "15s"
// DefaultSemaphoreWaitTime is how long we block for at a time to check if semaphore
// acquisition is possible. This affects the minimum time it takes to cancel
// a Semaphore acquisition.
DefaultSemaphoreWaitTime = 15 * time.Second
// DefaultSemaphoreKey is the key used within the prefix to
// use for coordination between all the contenders.
DefaultSemaphoreKey = ".lock"
// SemaphoreFlagValue is a magic flag we set to indicate a key
// is being used for a semaphore. It is used to detect a potential
// conflict with a lock.
SemaphoreFlagValue = 0xe0f69a2baa414de0
)
var (
// ErrSemaphoreHeld is returned if we attempt to double lock
ErrSemaphoreHeld = fmt.Errorf("Semaphore already held")
// ErrSemaphoreNotHeld is returned if we attempt to unlock a semaphore
// that we do not hold.
ErrSemaphoreNotHeld = fmt.Errorf("Semaphore not held")
// ErrSemaphoreInUse is returned if we attempt to destroy a semaphore
// that is in use.
ErrSemaphoreInUse = fmt.Errorf("Semaphore in use")
// ErrSemaphoreConflict is returned if the flags on a key
// used for a semaphore do not match expectation
ErrSemaphoreConflict = fmt.Errorf("Existing key does not match semaphore use")
)
// Semaphore is used to implement a distributed semaphore
// using the Consul KV primitives.
type Semaphore struct {
c *Client
opts *SemaphoreOptions
isHeld bool
sessionRenew chan struct{}
lockSession string
l sync.Mutex
}
// SemaphoreOptions is used to parameterize the Semaphore
type SemaphoreOptions struct {
Prefix string // Must be set and have write permissions
Limit int // Must be set, and be positive
Value []byte // Optional, value to associate with the contender entry
Session string // Optional, created if not specified
SessionName string // Optional, defaults to DefaultLockSessionName
SessionTTL string // Optional, defaults to DefaultLockSessionTTL
MonitorRetries int // Optional, defaults to 0 which means no retries
MonitorRetryTime time.Duration // Optional, defaults to DefaultMonitorRetryTime
SemaphoreWaitTime time.Duration // Optional, defaults to DefaultSemaphoreWaitTime
SemaphoreTryOnce bool // Optional, defaults to false which means try forever
}
// semaphoreLock is written under the DefaultSemaphoreKey and
// is used to coordinate between all the contenders.
type semaphoreLock struct {
// Limit is the integer limit of holders. This is used to
// verify that all the holders agree on the value.
Limit int
// Holders is a list of all the semaphore holders.
// It maps the session ID to true. It is used as a set effectively.
Holders map[string]bool
}
// SemaphorePrefix is used to created a Semaphore which will operate
// at the given KV prefix and uses the given limit for the semaphore.
// The prefix must have write privileges, and the limit must be agreed
// upon by all contenders.
func (c *Client) SemaphorePrefix(prefix string, limit int) (*Semaphore, error) {
opts := &SemaphoreOptions{
Prefix: prefix,
Limit: limit,
}
return c.SemaphoreOpts(opts)
}
// SemaphoreOpts is used to create a Semaphore with the given options.
// The prefix must have write privileges, and the limit must be agreed
// upon by all contenders. If a Session is not provided, one will be created.
func (c *Client) SemaphoreOpts(opts *SemaphoreOptions) (*Semaphore, error) {
if opts.Prefix == "" {
return nil, fmt.Errorf("missing prefix")
}
if opts.Limit <= 0 {
return nil, fmt.Errorf("semaphore limit must be positive")
}
if opts.SessionName == "" {
opts.SessionName = DefaultSemaphoreSessionName
}
if opts.SessionTTL == "" {
opts.SessionTTL = DefaultSemaphoreSessionTTL
} else {
if _, err := time.ParseDuration(opts.SessionTTL); err != nil {
return nil, fmt.Errorf("invalid SessionTTL: %v", err)
}
}
if opts.MonitorRetryTime == 0 {
opts.MonitorRetryTime = DefaultMonitorRetryTime
}
if opts.SemaphoreWaitTime == 0 {
opts.SemaphoreWaitTime = DefaultSemaphoreWaitTime
}
s := &Semaphore{
c: c,
opts: opts,
}
return s, nil
}
// Acquire attempts to reserve a slot in the semaphore, blocking until
// success, interrupted via the stopCh or an error is encountered.
// Providing a non-nil stopCh can be used to abort the attempt.
// On success, a channel is returned that represents our slot.
// This channel could be closed at any time due to session invalidation,
// communication errors, operator intervention, etc. It is NOT safe to
// assume that the slot is held until Release() unless the Session is specifically
// created without any associated health checks. By default Consul sessions
// prefer liveness over safety and an application must be able to handle
// the session being lost.
func (s *Semaphore) Acquire(stopCh <-chan struct{}) (<-chan struct{}, error) {
// Hold the lock as we try to acquire
s.l.Lock()
defer s.l.Unlock()
// Check if we already hold the semaphore
if s.isHeld {
return nil, ErrSemaphoreHeld
}
// Check if we need to create a session first
s.lockSession = s.opts.Session
if s.lockSession == "" {
sess, err := s.createSession()
if err != nil {
return nil, fmt.Errorf("failed to create session: %v", err)
}
s.sessionRenew = make(chan struct{})
s.lockSession = sess
session := s.c.Session()
go session.RenewPeriodic(s.opts.SessionTTL, sess, nil, s.sessionRenew)
// If we fail to acquire the lock, cleanup the session
defer func() {
if !s.isHeld {
close(s.sessionRenew)
s.sessionRenew = nil
}
}()
}
// Create the contender entry
kv := s.c.KV()
made, _, err := kv.Acquire(s.contenderEntry(s.lockSession), nil)
if err != nil || !made {
return nil, fmt.Errorf("failed to make contender entry: %v", err)
}
// Setup the query options
qOpts := &QueryOptions{
WaitTime: s.opts.SemaphoreWaitTime,
}
start := time.Now()
attempts := 0
WAIT:
// Check if we should quit
select {
case <-stopCh:
return nil, nil
default:
}
// Handle the one-shot mode.
if s.opts.SemaphoreTryOnce && attempts > 0 {
elapsed := time.Since(start)
if elapsed > s.opts.SemaphoreWaitTime {
return nil, nil
}
// Query wait time should not exceed the semaphore wait time
qOpts.WaitTime = s.opts.SemaphoreWaitTime - elapsed
}
attempts++
// Read the prefix
pairs, meta, err := kv.List(s.opts.Prefix, qOpts)
if err != nil {
return nil, fmt.Errorf("failed to read prefix: %v", err)
}
// Decode the lock
lockPair := s.findLock(pairs)
if lockPair.Flags != SemaphoreFlagValue {
return nil, ErrSemaphoreConflict
}
lock, err := s.decodeLock(lockPair)
if err != nil {
return nil, err
}
// Verify we agree with the limit
if lock.Limit != s.opts.Limit {
return nil, fmt.Errorf("semaphore limit conflict (lock: %d, local: %d)",
lock.Limit, s.opts.Limit)
}
// Prune the dead holders
s.pruneDeadHolders(lock, pairs)
// Check if the lock is held
if len(lock.Holders) >= lock.Limit {
qOpts.WaitIndex = meta.LastIndex
goto WAIT
}
// Create a new lock with us as a holder
lock.Holders[s.lockSession] = true
newLock, err := s.encodeLock(lock, lockPair.ModifyIndex)
if err != nil {
return nil, err
}
// Attempt the acquisition
didSet, _, err := kv.CAS(newLock, nil)
if err != nil {
return nil, fmt.Errorf("failed to update lock: %v", err)
}
if !didSet {
// Update failed, could have been a race with another contender,
// retry the operation
goto WAIT
}
// Watch to ensure we maintain ownership of the slot
lockCh := make(chan struct{})
go s.monitorLock(s.lockSession, lockCh)
// Set that we own the lock
s.isHeld = true
// Acquired! All done
return lockCh, nil
}
// Release is used to voluntarily give up our semaphore slot. It is
// an error to call this if the semaphore has not been acquired.
func (s *Semaphore) Release() error {
// Hold the lock as we try to release
s.l.Lock()
defer s.l.Unlock()
// Ensure the lock is actually held
if !s.isHeld {
return ErrSemaphoreNotHeld
}
// Set that we no longer own the lock
s.isHeld = false
// Stop the session renew
if s.sessionRenew != nil {
defer func() {
close(s.sessionRenew)
s.sessionRenew = nil
}()
}
// Get and clear the lock session
lockSession := s.lockSession
s.lockSession = ""
// Remove ourselves as a lock holder
kv := s.c.KV()
key := path.Join(s.opts.Prefix, DefaultSemaphoreKey)
READ:
pair, _, err := kv.Get(key, nil)
if err != nil {
return err
}
if pair == nil {
pair = &KVPair{}
}
lock, err := s.decodeLock(pair)
if err != nil {
return err
}
// Create a new lock without us as a holder
if _, ok := lock.Holders[lockSession]; ok {
delete(lock.Holders, lockSession)
newLock, err := s.encodeLock(lock, pair.ModifyIndex)
if err != nil {
return err
}
// Swap the locks
didSet, _, err := kv.CAS(newLock, nil)
if err != nil {
return fmt.Errorf("failed to update lock: %v", err)
}
if !didSet {
goto READ
}
}
// Destroy the contender entry
contenderKey := path.Join(s.opts.Prefix, lockSession)
if _, err := kv.Delete(contenderKey, nil); err != nil {
return err
}
return nil
}
// Destroy is used to cleanup the semaphore entry. It is not necessary
// to invoke. It will fail if the semaphore is in use.
func (s *Semaphore) Destroy() error {
// Hold the lock as we try to acquire
s.l.Lock()
defer s.l.Unlock()
// Check if we already hold the semaphore
if s.isHeld {
return ErrSemaphoreHeld
}
// List for the semaphore
kv := s.c.KV()
pairs, _, err := kv.List(s.opts.Prefix, nil)
if err != nil {
return fmt.Errorf("failed to read prefix: %v", err)
}
// Find the lock pair, bail if it doesn't exist
lockPair := s.findLock(pairs)
if lockPair.ModifyIndex == 0 {
return nil
}
if lockPair.Flags != SemaphoreFlagValue {
return ErrSemaphoreConflict
}
// Decode the lock
lock, err := s.decodeLock(lockPair)
if err != nil {
return err
}
// Prune the dead holders
s.pruneDeadHolders(lock, pairs)
// Check if there are any holders
if len(lock.Holders) > 0 {
return ErrSemaphoreInUse
}
// Attempt the delete
didRemove, _, err := kv.DeleteCAS(lockPair, nil)
if err != nil {
return fmt.Errorf("failed to remove semaphore: %v", err)
}
if !didRemove {
return ErrSemaphoreInUse
}
return nil
}
// createSession is used to create a new managed session
func (s *Semaphore) createSession() (string, error) {
session := s.c.Session()
se := &SessionEntry{
Name: s.opts.SessionName,
TTL: s.opts.SessionTTL,
Behavior: SessionBehaviorDelete,
}
id, _, err := session.Create(se, nil)
if err != nil {
return "", err
}
return id, nil
}
// contenderEntry returns a formatted KVPair for the contender
func (s *Semaphore) contenderEntry(session string) *KVPair {
return &KVPair{
Key: path.Join(s.opts.Prefix, session),
Value: s.opts.Value,
Session: session,
Flags: SemaphoreFlagValue,
}
}
// findLock is used to find the KV Pair which is used for coordination
func (s *Semaphore) findLock(pairs KVPairs) *KVPair {
key := path.Join(s.opts.Prefix, DefaultSemaphoreKey)
for _, pair := range pairs {
if pair.Key == key {
return pair
}
}
return &KVPair{Flags: SemaphoreFlagValue}
}
// decodeLock is used to decode a semaphoreLock from an
// entry in Consul
func (s *Semaphore) decodeLock(pair *KVPair) (*semaphoreLock, error) {
// Handle if there is no lock
if pair == nil || pair.Value == nil {
return &semaphoreLock{
Limit: s.opts.Limit,
Holders: make(map[string]bool),
}, nil
}
l := &semaphoreLock{}
if err := json.Unmarshal(pair.Value, l); err != nil {
return nil, fmt.Errorf("lock decoding failed: %v", err)
}
return l, nil
}
// encodeLock is used to encode a semaphoreLock into a KVPair
// that can be PUT
func (s *Semaphore) encodeLock(l *semaphoreLock, oldIndex uint64) (*KVPair, error) {
enc, err := json.Marshal(l)
if err != nil {
return nil, fmt.Errorf("lock encoding failed: %v", err)
}
pair := &KVPair{
Key: path.Join(s.opts.Prefix, DefaultSemaphoreKey),
Value: enc,
Flags: SemaphoreFlagValue,
ModifyIndex: oldIndex,
}
return pair, nil
}
// pruneDeadHolders is used to remove all the dead lock holders
func (s *Semaphore) pruneDeadHolders(lock *semaphoreLock, pairs KVPairs) {
// Gather all the live holders
alive := make(map[string]struct{}, len(pairs))
for _, pair := range pairs {
if pair.Session != "" {
alive[pair.Session] = struct{}{}
}
}
// Remove any holders that are dead
for holder := range lock.Holders {
if _, ok := alive[holder]; !ok {
delete(lock.Holders, holder)
}
}
}
// monitorLock is a long running routine to monitor a semaphore ownership
// It closes the stopCh if we lose our slot.
func (s *Semaphore) monitorLock(session string, stopCh chan struct{}) {
defer close(stopCh)
kv := s.c.KV()
opts := &QueryOptions{RequireConsistent: true}
WAIT:
retries := s.opts.MonitorRetries
RETRY:
pairs, meta, err := kv.List(s.opts.Prefix, opts)
if err != nil {
// If configured we can try to ride out a brief Consul unavailability
// by doing retries. Note that we have to attempt the retry in a non-
// blocking fashion so that we have a clean place to reset the retry
// counter if service is restored.
if retries > 0 && IsRetryableError(err) {
time.Sleep(s.opts.MonitorRetryTime)
retries--
opts.WaitIndex = 0
goto RETRY
}
return
}
lockPair := s.findLock(pairs)
lock, err := s.decodeLock(lockPair)
if err != nil {
return
}
s.pruneDeadHolders(lock, pairs)
if _, ok := lock.Holders[session]; ok {
opts.WaitIndex = meta.LastIndex
goto WAIT
}
}

View file

@ -1,224 +0,0 @@
package api
import (
"errors"
"fmt"
"time"
)
const (
// SessionBehaviorRelease is the default behavior and causes
// all associated locks to be released on session invalidation.
SessionBehaviorRelease = "release"
// SessionBehaviorDelete is new in Consul 0.5 and changes the
// behavior to delete all associated locks on session invalidation.
// It can be used in a way similar to Ephemeral Nodes in ZooKeeper.
SessionBehaviorDelete = "delete"
)
var ErrSessionExpired = errors.New("session expired")
// SessionEntry represents a session in consul
type SessionEntry struct {
CreateIndex uint64
ID string
Name string
Node string
Checks []string
LockDelay time.Duration
Behavior string
TTL string
}
// Session can be used to query the Session endpoints
type Session struct {
c *Client
}
// Session returns a handle to the session endpoints
func (c *Client) Session() *Session {
return &Session{c}
}
// CreateNoChecks is like Create but is used specifically to create
// a session with no associated health checks.
func (s *Session) CreateNoChecks(se *SessionEntry, q *WriteOptions) (string, *WriteMeta, error) {
body := make(map[string]interface{})
body["Checks"] = []string{}
if se != nil {
if se.Name != "" {
body["Name"] = se.Name
}
if se.Node != "" {
body["Node"] = se.Node
}
if se.LockDelay != 0 {
body["LockDelay"] = durToMsec(se.LockDelay)
}
if se.Behavior != "" {
body["Behavior"] = se.Behavior
}
if se.TTL != "" {
body["TTL"] = se.TTL
}
}
return s.create(body, q)
}
// Create makes a new session. Providing a session entry can
// customize the session. It can also be nil to use defaults.
func (s *Session) Create(se *SessionEntry, q *WriteOptions) (string, *WriteMeta, error) {
var obj interface{}
if se != nil {
body := make(map[string]interface{})
obj = body
if se.Name != "" {
body["Name"] = se.Name
}
if se.Node != "" {
body["Node"] = se.Node
}
if se.LockDelay != 0 {
body["LockDelay"] = durToMsec(se.LockDelay)
}
if len(se.Checks) > 0 {
body["Checks"] = se.Checks
}
if se.Behavior != "" {
body["Behavior"] = se.Behavior
}
if se.TTL != "" {
body["TTL"] = se.TTL
}
}
return s.create(obj, q)
}
func (s *Session) create(obj interface{}, q *WriteOptions) (string, *WriteMeta, error) {
var out struct{ ID string }
wm, err := s.c.write("/v1/session/create", obj, &out, q)
if err != nil {
return "", nil, err
}
return out.ID, wm, nil
}
// Destroy invalidates a given session
func (s *Session) Destroy(id string, q *WriteOptions) (*WriteMeta, error) {
wm, err := s.c.write("/v1/session/destroy/"+id, nil, nil, q)
if err != nil {
return nil, err
}
return wm, nil
}
// Renew renews the TTL on a given session
func (s *Session) Renew(id string, q *WriteOptions) (*SessionEntry, *WriteMeta, error) {
r := s.c.newRequest("PUT", "/v1/session/renew/"+id)
r.setWriteOptions(q)
rtt, resp, err := s.c.doRequest(r)
if err != nil {
return nil, nil, err
}
defer resp.Body.Close()
wm := &WriteMeta{RequestTime: rtt}
if resp.StatusCode == 404 {
return nil, wm, nil
} else if resp.StatusCode != 200 {
return nil, nil, fmt.Errorf("Unexpected response code: %d", resp.StatusCode)
}
var entries []*SessionEntry
if err := decodeBody(resp, &entries); err != nil {
return nil, nil, fmt.Errorf("Failed to read response: %v", err)
}
if len(entries) > 0 {
return entries[0], wm, nil
}
return nil, wm, nil
}
// RenewPeriodic is used to periodically invoke Session.Renew on a
// session until a doneCh is closed. This is meant to be used in a long running
// goroutine to ensure a session stays valid.
func (s *Session) RenewPeriodic(initialTTL string, id string, q *WriteOptions, doneCh <-chan struct{}) error {
ctx := q.Context()
ttl, err := time.ParseDuration(initialTTL)
if err != nil {
return err
}
waitDur := ttl / 2
lastRenewTime := time.Now()
var lastErr error
for {
if time.Since(lastRenewTime) > ttl {
return lastErr
}
select {
case <-time.After(waitDur):
entry, _, err := s.Renew(id, q)
if err != nil {
waitDur = time.Second
lastErr = err
continue
}
if entry == nil {
return ErrSessionExpired
}
// Handle the server updating the TTL
ttl, _ = time.ParseDuration(entry.TTL)
waitDur = ttl / 2
lastRenewTime = time.Now()
case <-doneCh:
// Attempt a session destroy
s.Destroy(id, q)
return nil
case <-ctx.Done():
// Bail immediately since attempting the destroy would
// use the canceled context in q, which would just bail.
return ctx.Err()
}
}
}
// Info looks up a single session
func (s *Session) Info(id string, q *QueryOptions) (*SessionEntry, *QueryMeta, error) {
var entries []*SessionEntry
qm, err := s.c.query("/v1/session/info/"+id, &entries, q)
if err != nil {
return nil, nil, err
}
if len(entries) > 0 {
return entries[0], qm, nil
}
return nil, qm, nil
}
// List gets sessions for a node
func (s *Session) Node(node string, q *QueryOptions) ([]*SessionEntry, *QueryMeta, error) {
var entries []*SessionEntry
qm, err := s.c.query("/v1/session/node/"+node, &entries, q)
if err != nil {
return nil, nil, err
}
return entries, qm, nil
}
// List gets all active sessions
func (s *Session) List(q *QueryOptions) ([]*SessionEntry, *QueryMeta, error) {
var entries []*SessionEntry
qm, err := s.c.query("/v1/session/list", &entries, q)
if err != nil {
return nil, nil, err
}
return entries, qm, nil
}

View file

@ -1,47 +0,0 @@
package api
import (
"io"
)
// Snapshot can be used to query the /v1/snapshot endpoint to take snapshots of
// Consul's internal state and restore snapshots for disaster recovery.
type Snapshot struct {
c *Client
}
// Snapshot returns a handle that exposes the snapshot endpoints.
func (c *Client) Snapshot() *Snapshot {
return &Snapshot{c}
}
// Save requests a new snapshot and provides an io.ReadCloser with the snapshot
// data to save. If this doesn't return an error, then it's the responsibility
// of the caller to close it. Only a subset of the QueryOptions are supported:
// Datacenter, AllowStale, and Token.
func (s *Snapshot) Save(q *QueryOptions) (io.ReadCloser, *QueryMeta, error) {
r := s.c.newRequest("GET", "/v1/snapshot")
r.setQueryOptions(q)
rtt, resp, err := requireOK(s.c.doRequest(r))
if err != nil {
return nil, nil, err
}
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
return resp.Body, qm, nil
}
// Restore streams in an existing snapshot and attempts to restore it.
func (s *Snapshot) Restore(q *WriteOptions, in io.Reader) error {
r := s.c.newRequest("PUT", "/v1/snapshot")
r.body = in
r.setWriteOptions(q)
_, _, err := requireOK(s.c.doRequest(r))
if err != nil {
return err
}
return nil
}

View file

@ -1,43 +0,0 @@
package api
// Status can be used to query the Status endpoints
type Status struct {
c *Client
}
// Status returns a handle to the status endpoints
func (c *Client) Status() *Status {
return &Status{c}
}
// Leader is used to query for a known leader
func (s *Status) Leader() (string, error) {
r := s.c.newRequest("GET", "/v1/status/leader")
_, resp, err := requireOK(s.c.doRequest(r))
if err != nil {
return "", err
}
defer resp.Body.Close()
var leader string
if err := decodeBody(resp, &leader); err != nil {
return "", err
}
return leader, nil
}
// Peers is used to query for a known raft peers
func (s *Status) Peers() ([]string, error) {
r := s.c.newRequest("GET", "/v1/status/peers")
_, resp, err := requireOK(s.c.doRequest(r))
if err != nil {
return nil, err
}
defer resp.Body.Close()
var peers []string
if err := decodeBody(resp, &peers); err != nil {
return nil, err
}
return peers, nil
}

View file

@ -1,230 +0,0 @@
package api
import (
"bytes"
"fmt"
"io"
"net/http"
)
// Txn is used to manipulate the Txn API
type Txn struct {
c *Client
}
// Txn is used to return a handle to the K/V apis
func (c *Client) Txn() *Txn {
return &Txn{c}
}
// TxnOp is the internal format we send to Consul. Currently only K/V and
// check operations are supported.
type TxnOp struct {
KV *KVTxnOp
Node *NodeTxnOp
Service *ServiceTxnOp
Check *CheckTxnOp
}
// TxnOps is a list of transaction operations.
type TxnOps []*TxnOp
// TxnResult is the internal format we receive from Consul.
type TxnResult struct {
KV *KVPair
Node *Node
Service *CatalogService
Check *HealthCheck
}
// TxnResults is a list of TxnResult objects.
type TxnResults []*TxnResult
// TxnError is used to return information about an operation in a transaction.
type TxnError struct {
OpIndex int
What string
}
// TxnErrors is a list of TxnError objects.
type TxnErrors []*TxnError
// TxnResponse is the internal format we receive from Consul.
type TxnResponse struct {
Results TxnResults
Errors TxnErrors
}
// KVOp constants give possible operations available in a transaction.
type KVOp string
const (
KVSet KVOp = "set"
KVDelete KVOp = "delete"
KVDeleteCAS KVOp = "delete-cas"
KVDeleteTree KVOp = "delete-tree"
KVCAS KVOp = "cas"
KVLock KVOp = "lock"
KVUnlock KVOp = "unlock"
KVGet KVOp = "get"
KVGetTree KVOp = "get-tree"
KVCheckSession KVOp = "check-session"
KVCheckIndex KVOp = "check-index"
KVCheckNotExists KVOp = "check-not-exists"
)
// KVTxnOp defines a single operation inside a transaction.
type KVTxnOp struct {
Verb KVOp
Key string
Value []byte
Flags uint64
Index uint64
Session string
}
// KVTxnOps defines a set of operations to be performed inside a single
// transaction.
type KVTxnOps []*KVTxnOp
// KVTxnResponse has the outcome of a transaction.
type KVTxnResponse struct {
Results []*KVPair
Errors TxnErrors
}
// NodeOp constants give possible operations available in a transaction.
type NodeOp string
const (
NodeGet NodeOp = "get"
NodeSet NodeOp = "set"
NodeCAS NodeOp = "cas"
NodeDelete NodeOp = "delete"
NodeDeleteCAS NodeOp = "delete-cas"
)
// NodeTxnOp defines a single operation inside a transaction.
type NodeTxnOp struct {
Verb NodeOp
Node Node
}
// ServiceOp constants give possible operations available in a transaction.
type ServiceOp string
const (
ServiceGet ServiceOp = "get"
ServiceSet ServiceOp = "set"
ServiceCAS ServiceOp = "cas"
ServiceDelete ServiceOp = "delete"
ServiceDeleteCAS ServiceOp = "delete-cas"
)
// ServiceTxnOp defines a single operation inside a transaction.
type ServiceTxnOp struct {
Verb ServiceOp
Node string
Service AgentService
}
// CheckOp constants give possible operations available in a transaction.
type CheckOp string
const (
CheckGet CheckOp = "get"
CheckSet CheckOp = "set"
CheckCAS CheckOp = "cas"
CheckDelete CheckOp = "delete"
CheckDeleteCAS CheckOp = "delete-cas"
)
// CheckTxnOp defines a single operation inside a transaction.
type CheckTxnOp struct {
Verb CheckOp
Check HealthCheck
}
// Txn is used to apply multiple Consul operations in a single, atomic transaction.
//
// Note that Go will perform the required base64 encoding on the values
// automatically because the type is a byte slice. Transactions are defined as a
// list of operations to perform, using the different fields in the TxnOp structure
// to define operations. If any operation fails, none of the changes are applied
// to the state store.
//
// Even though this is generally a write operation, we take a QueryOptions input
// and return a QueryMeta output. If the transaction contains only read ops, then
// Consul will fast-path it to a different endpoint internally which supports
// consistency controls, but not blocking. If there are write operations then
// the request will always be routed through raft and any consistency settings
// will be ignored.
//
// Here's an example:
//
// ops := KVTxnOps{
// &KVTxnOp{
// Verb: KVLock,
// Key: "test/lock",
// Session: "adf4238a-882b-9ddc-4a9d-5b6758e4159e",
// Value: []byte("hello"),
// },
// &KVTxnOp{
// Verb: KVGet,
// Key: "another/key",
// },
// &CheckTxnOp{
// Verb: CheckSet,
// HealthCheck: HealthCheck{
// Node: "foo",
// CheckID: "redis:a",
// Name: "Redis Health Check",
// Status: "passing",
// },
// }
// }
// ok, response, _, err := kv.Txn(&ops, nil)
//
// If there is a problem making the transaction request then an error will be
// returned. Otherwise, the ok value will be true if the transaction succeeded
// or false if it was rolled back. The response is a structured return value which
// will have the outcome of the transaction. Its Results member will have entries
// for each operation. For KV operations, Deleted keys will have a nil entry in the
// results, and to save space, the Value of each key in the Results will be nil
// unless the operation is a KVGet. If the transaction was rolled back, the Errors
// member will have entries referencing the index of the operation that failed
// along with an error message.
func (t *Txn) Txn(txn TxnOps, q *QueryOptions) (bool, *TxnResponse, *QueryMeta, error) {
return t.c.txn(txn, q)
}
func (c *Client) txn(txn TxnOps, q *QueryOptions) (bool, *TxnResponse, *QueryMeta, error) {
r := c.newRequest("PUT", "/v1/txn")
r.setQueryOptions(q)
r.obj = txn
rtt, resp, err := c.doRequest(r)
if err != nil {
return false, nil, nil, err
}
defer resp.Body.Close()
qm := &QueryMeta{}
parseQueryMeta(resp, qm)
qm.RequestTime = rtt
if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusConflict {
var txnResp TxnResponse
if err := decodeBody(resp, &txnResp); err != nil {
return false, nil, nil, err
}
return resp.StatusCode == http.StatusOK, &txnResp, qm, nil
}
var buf bytes.Buffer
if _, err := io.Copy(&buf, resp.Body); err != nil {
return false, nil, nil, fmt.Errorf("Failed to read response: %v", err)
}
return false, nil, nil, fmt.Errorf("Failed request: %s", buf.String())
}

View file

@ -1,363 +0,0 @@
Mozilla Public License, version 2.0
1. Definitions
1.1. "Contributor"
means each individual or legal entity that creates, contributes to the
creation of, or owns Covered Software.
1.2. "Contributor Version"
means the combination of the Contributions of others (if any) used by a
Contributor and that particular Contributor's Contribution.
1.3. "Contribution"
means Covered Software of a particular Contributor.
1.4. "Covered Software"
means Source Code Form to which the initial Contributor has attached the
notice in Exhibit A, the Executable Form of such Source Code Form, and
Modifications of such Source Code Form, in each case including portions
thereof.
1.5. "Incompatible With Secondary Licenses"
means
a. that the initial Contributor has attached the notice described in
Exhibit B to the Covered Software; or
b. that the Covered Software was made available under the terms of
version 1.1 or earlier of the License, but not also under the terms of
a Secondary License.
1.6. "Executable Form"
means any form of the work other than Source Code Form.
1.7. "Larger Work"
means a work that combines Covered Software with other material, in a
separate file or files, that is not Covered Software.
1.8. "License"
means this document.
1.9. "Licensable"
means having the right to grant, to the maximum extent possible, whether
at the time of the initial grant or subsequently, any and all of the
rights conveyed by this License.
1.10. "Modifications"
means any of the following:
a. any file in Source Code Form that results from an addition to,
deletion from, or modification of the contents of Covered Software; or
b. any new file in Source Code Form that contains any Covered Software.
1.11. "Patent Claims" of a Contributor
means any patent claim(s), including without limitation, method,
process, and apparatus claims, in any patent Licensable by such
Contributor that would be infringed, but for the grant of the License,
by the making, using, selling, offering for sale, having made, import,
or transfer of either its Contributions or its Contributor Version.
1.12. "Secondary License"
means either the GNU General Public License, Version 2.0, the GNU Lesser
General Public License, Version 2.1, the GNU Affero General Public
License, Version 3.0, or any later versions of those licenses.
1.13. "Source Code Form"
means the form of the work preferred for making modifications.
1.14. "You" (or "Your")
means an individual or a legal entity exercising rights under this
License. For legal entities, "You" includes any entity that controls, is
controlled by, or is under common control with You. For purposes of this
definition, "control" means (a) the power, direct or indirect, to cause
the direction or management of such entity, whether by contract or
otherwise, or (b) ownership of more than fifty percent (50%) of the
outstanding shares or beneficial ownership of such entity.
2. License Grants and Conditions
2.1. Grants
Each Contributor hereby grants You a world-wide, royalty-free,
non-exclusive license:
a. under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or
as part of a Larger Work; and
b. under Patent Claims of such Contributor to make, use, sell, offer for
sale, have made, import, and otherwise transfer either its
Contributions or its Contributor Version.
2.2. Effective Date
The licenses granted in Section 2.1 with respect to any Contribution
become effective for each Contribution on the date the Contributor first
distributes such Contribution.
2.3. Limitations on Grant Scope
The licenses granted in this Section 2 are the only rights granted under
this License. No additional rights or licenses will be implied from the
distribution or licensing of Covered Software under this License.
Notwithstanding Section 2.1(b) above, no patent license is granted by a
Contributor:
a. for any code that a Contributor has removed from Covered Software; or
b. for infringements caused by: (i) Your and any other third party's
modifications of Covered Software, or (ii) the combination of its
Contributions with other software (except as part of its Contributor
Version); or
c. under Patent Claims infringed by Covered Software in the absence of
its Contributions.
This License does not grant any rights in the trademarks, service marks,
or logos of any Contributor (except as may be necessary to comply with
the notice requirements in Section 3.4).
2.4. Subsequent Licenses
No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this
License (see Section 10.2) or under the terms of a Secondary License (if
permitted under the terms of Section 3.3).
2.5. Representation
Each Contributor represents that the Contributor believes its
Contributions are its original creation(s) or it has sufficient rights to
grant the rights to its Contributions conveyed by this License.
2.6. Fair Use
This License is not intended to limit any rights You have under
applicable copyright doctrines of fair use, fair dealing, or other
equivalents.
2.7. Conditions
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
Section 2.1.
3. Responsibilities
3.1. Distribution of Source Form
All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under
the terms of this License. You must inform recipients that the Source
Code Form of the Covered Software is governed by the terms of this
License, and how they can obtain a copy of this License. You may not
attempt to alter or restrict the recipients' rights in the Source Code
Form.
3.2. Distribution of Executable Form
If You distribute Covered Software in Executable Form then:
a. such Covered Software must also be made available in Source Code Form,
as described in Section 3.1, and You must inform recipients of the
Executable Form how they can obtain a copy of such Source Code Form by
reasonable means in a timely manner, at a charge no more than the cost
of distribution to the recipient; and
b. You may distribute such Executable Form under the terms of this
License, or sublicense it under different terms, provided that the
license for the Executable Form does not attempt to limit or alter the
recipients' rights in the Source Code Form under this License.
3.3. Distribution of a Larger Work
You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for
the Covered Software. If the Larger Work is a combination of Covered
Software with a work governed by one or more Secondary Licenses, and the
Covered Software is not Incompatible With Secondary Licenses, this
License permits You to additionally distribute such Covered Software
under the terms of such Secondary License(s), so that the recipient of
the Larger Work may, at their option, further distribute the Covered
Software under the terms of either this License or such Secondary
License(s).
3.4. Notices
You may not remove or alter the substance of any license notices
(including copyright notices, patent notices, disclaimers of warranty, or
limitations of liability) contained within the Source Code Form of the
Covered Software, except that You may alter any license notices to the
extent required to remedy known factual inaccuracies.
3.5. Application of Additional Terms
You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on
behalf of any Contributor. You must make it absolutely clear that any
such warranty, support, indemnity, or liability obligation is offered by
You alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
jurisdiction.
4. Inability to Comply Due to Statute or Regulation
If it is impossible for You to comply with any of the terms of this License
with respect to some or all of the Covered Software due to statute,
judicial order, or regulation then You must: (a) comply with the terms of
this License to the maximum extent possible; and (b) describe the
limitations and the code they affect. Such description must be placed in a
text file included with all distributions of the Covered Software under
this License. Except to the extent prohibited by statute or regulation,
such description must be sufficiently detailed for a recipient of ordinary
skill to be able to understand it.
5. Termination
5.1. The rights granted under this License will terminate automatically if You
fail to comply with any of its terms. However, if You become compliant,
then the rights granted under this License from a particular Contributor
are reinstated (a) provisionally, unless and until such Contributor
explicitly and finally terminates Your grants, and (b) on an ongoing
basis, if such Contributor fails to notify You of the non-compliance by
some reasonable means prior to 60 days after You have come back into
compliance. Moreover, Your grants from a particular Contributor are
reinstated on an ongoing basis if such Contributor notifies You of the
non-compliance by some reasonable means, this is the first time You have
received notice of non-compliance with this License from such
Contributor, and You become compliant prior to 30 days after Your receipt
of the notice.
5.2. If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions,
counter-claims, and cross-claims) alleging that a Contributor Version
directly or indirectly infringes any patent, then the rights granted to
You by any and all Contributors for the Covered Software under Section
2.1 of this License shall terminate.
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
license agreements (excluding distributors and resellers) which have been
validly granted by You or Your distributors under this License prior to
termination shall survive termination.
6. Disclaimer of Warranty
Covered Software is provided under this License on an "as is" basis,
without warranty of any kind, either expressed, implied, or statutory,
including, without limitation, warranties that the Covered Software is free
of defects, merchantable, fit for a particular purpose or non-infringing.
The entire risk as to the quality and performance of the Covered Software
is with You. Should any Covered Software prove defective in any respect,
You (not any Contributor) assume the cost of any necessary servicing,
repair, or correction. This disclaimer of warranty constitutes an essential
part of this License. No use of any Covered Software is authorized under
this License except under this disclaimer.
7. Limitation of Liability
Under no circumstances and under no legal theory, whether tort (including
negligence), contract, or otherwise, shall any Contributor, or anyone who
distributes Covered Software as permitted above, be liable to You for any
direct, indirect, special, incidental, or consequential damages of any
character including, without limitation, damages for lost profits, loss of
goodwill, work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses, even if such party shall have been
informed of the possibility of such damages. This limitation of liability
shall not apply to liability for death or personal injury resulting from
such party's negligence to the extent applicable law prohibits such
limitation. Some jurisdictions do not allow the exclusion or limitation of
incidental or consequential damages, so this exclusion and limitation may
not apply to You.
8. Litigation
Any litigation relating to this License may be brought only in the courts
of a jurisdiction where the defendant maintains its principal place of
business and such litigation shall be governed by laws of that
jurisdiction, without reference to its conflict-of-law provisions. Nothing
in this Section shall prevent a party's ability to bring cross-claims or
counter-claims.
9. Miscellaneous
This License represents the complete agreement concerning the subject
matter hereof. If any provision of this License is held to be
unenforceable, such provision shall be reformed only to the extent
necessary to make it enforceable. Any law or regulation which provides that
the language of a contract shall be construed against the drafter shall not
be used to construe this License against a Contributor.
10. Versions of the License
10.1. New Versions
Mozilla Foundation is the license steward. Except as provided in Section
10.3, no one other than the license steward has the right to modify or
publish new versions of this License. Each version will be given a
distinguishing version number.
10.2. Effect of New Versions
You may distribute the Covered Software under the terms of the version
of the License under which You originally received the Covered Software,
or under the terms of any subsequent version published by the license
steward.
10.3. Modified Versions
If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a
modified version of this License if you rename the license and remove
any references to the name of the license steward (except to note that
such modified license differs from this License).
10.4. Distributing Source Code Form that is Incompatible With Secondary
Licenses If You choose to distribute Source Code Form that is
Incompatible With Secondary Licenses under the terms of this version of
the License, the notice described in Exhibit B of this License must be
attached.
Exhibit A - Source Code Form License Notice
This Source Code Form is subject to the
terms of the Mozilla Public License, v.
2.0. If a copy of the MPL was not
distributed with this file, You can
obtain one at
http://mozilla.org/MPL/2.0/.
If it is not possible or desirable to put the notice in a particular file,
then You may include the notice in a location (such as a LICENSE file in a
relevant directory) where a recipient would be likely to look for such a
notice.
You may add additional accurate notices of copyright ownership.
Exhibit B - "Incompatible With Secondary Licenses" Notice
This Source Code Form is "Incompatible
With Secondary Licenses", as defined by
the Mozilla Public License, v. 2.0.

View file

@ -1,30 +0,0 @@
# cleanhttp
Functions for accessing "clean" Go http.Client values
-------------
The Go standard library contains a default `http.Client` called
`http.DefaultClient`. It is a common idiom in Go code to start with
`http.DefaultClient` and tweak it as necessary, and in fact, this is
encouraged; from the `http` package documentation:
> The Client's Transport typically has internal state (cached TCP connections),
so Clients should be reused instead of created as needed. Clients are safe for
concurrent use by multiple goroutines.
Unfortunately, this is a shared value, and it is not uncommon for libraries to
assume that they are free to modify it at will. With enough dependencies, it
can be very easy to encounter strange problems and race conditions due to
manipulation of this shared value across libraries and goroutines (clients are
safe for concurrent use, but writing values to the client struct itself is not
protected).
Making things worse is the fact that a bare `http.Client` will use a default
`http.Transport` called `http.DefaultTransport`, which is another global value
that behaves the same way. So it is not simply enough to replace
`http.DefaultClient` with `&http.Client{}`.
This repository provides some simple functions to get a "clean" `http.Client`
-- one that uses the same default values as the Go standard library, but
returns a client that does not share any state with other clients.

View file

@ -1,57 +0,0 @@
package cleanhttp
import (
"net"
"net/http"
"runtime"
"time"
)
// DefaultTransport returns a new http.Transport with similar default values to
// http.DefaultTransport, but with idle connections and keepalives disabled.
func DefaultTransport() *http.Transport {
transport := DefaultPooledTransport()
transport.DisableKeepAlives = true
transport.MaxIdleConnsPerHost = -1
return transport
}
// DefaultPooledTransport returns a new http.Transport with similar default
// values to http.DefaultTransport. Do not use this for transient transports as
// it can leak file descriptors over time. Only use this for transports that
// will be re-used for the same host(s).
func DefaultPooledTransport() *http.Transport {
transport := &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
DualStack: true,
}).DialContext,
MaxIdleConns: 100,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
MaxIdleConnsPerHost: runtime.GOMAXPROCS(0) + 1,
}
return transport
}
// DefaultClient returns a new http.Client with similar default values to
// http.Client, but with a non-shared Transport, idle connections disabled, and
// keepalives disabled.
func DefaultClient() *http.Client {
return &http.Client{
Transport: DefaultTransport(),
}
}
// DefaultPooledClient returns a new http.Client with similar default values to
// http.Client, but with a shared Transport. Do not use this function for
// transient clients as it can leak file descriptors over time. Only use this
// for clients that will be re-used for the same host(s).
func DefaultPooledClient() *http.Client {
return &http.Client{
Transport: DefaultPooledTransport(),
}
}

View file

@ -1,20 +0,0 @@
// Package cleanhttp offers convenience utilities for acquiring "clean"
// http.Transport and http.Client structs.
//
// Values set on http.DefaultClient and http.DefaultTransport affect all
// callers. This can have detrimental effects, esepcially in TLS contexts,
// where client or root certificates set to talk to multiple endpoints can end
// up displacing each other, leading to hard-to-debug issues. This package
// provides non-shared http.Client and http.Transport structs to ensure that
// the configuration will not be overwritten by other parts of the application
// or dependencies.
//
// The DefaultClient and DefaultTransport functions disable idle connections
// and keepalives. Without ensuring that idle connections are closed before
// garbage collection, short-term clients/transports can leak file descriptors,
// eventually leading to "too many open files" errors. If you will be
// connecting to the same hosts repeatedly from the same client, you can use
// DefaultPooledClient to receive a client that has connection pooling
// semantics similar to http.DefaultClient.
//
package cleanhttp

View file

@ -1 +0,0 @@
module github.com/hashicorp/go-cleanhttp

View file

@ -1,48 +0,0 @@
package cleanhttp
import (
"net/http"
"strings"
"unicode"
)
// HandlerInput provides input options to cleanhttp's handlers
type HandlerInput struct {
ErrStatus int
}
// PrintablePathCheckHandler is a middleware that ensures the request path
// contains only printable runes.
func PrintablePathCheckHandler(next http.Handler, input *HandlerInput) http.Handler {
// Nil-check on input to make it optional
if input == nil {
input = &HandlerInput{
ErrStatus: http.StatusBadRequest,
}
}
// Default to http.StatusBadRequest on error
if input.ErrStatus == 0 {
input.ErrStatus = http.StatusBadRequest
}
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r != nil {
// Check URL path for non-printable characters
idx := strings.IndexFunc(r.URL.Path, func(c rune) bool {
return !unicode.IsPrint(c)
})
if idx != -1 {
w.WriteHeader(input.ErrStatus)
return
}
if next != nil {
next.ServeHTTP(w, r)
}
}
return
})
}

View file

@ -1,24 +0,0 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test
*.prof

View file

@ -1,3 +0,0 @@
language: go
go:
- tip

View file

@ -1,363 +0,0 @@
Mozilla Public License, version 2.0
1. Definitions
1.1. "Contributor"
means each individual or legal entity that creates, contributes to the
creation of, or owns Covered Software.
1.2. "Contributor Version"
means the combination of the Contributions of others (if any) used by a
Contributor and that particular Contributor's Contribution.
1.3. "Contribution"
means Covered Software of a particular Contributor.
1.4. "Covered Software"
means Source Code Form to which the initial Contributor has attached the
notice in Exhibit A, the Executable Form of such Source Code Form, and
Modifications of such Source Code Form, in each case including portions
thereof.
1.5. "Incompatible With Secondary Licenses"
means
a. that the initial Contributor has attached the notice described in
Exhibit B to the Covered Software; or
b. that the Covered Software was made available under the terms of
version 1.1 or earlier of the License, but not also under the terms of
a Secondary License.
1.6. "Executable Form"
means any form of the work other than Source Code Form.
1.7. "Larger Work"
means a work that combines Covered Software with other material, in a
separate file or files, that is not Covered Software.
1.8. "License"
means this document.
1.9. "Licensable"
means having the right to grant, to the maximum extent possible, whether
at the time of the initial grant or subsequently, any and all of the
rights conveyed by this License.
1.10. "Modifications"
means any of the following:
a. any file in Source Code Form that results from an addition to,
deletion from, or modification of the contents of Covered Software; or
b. any new file in Source Code Form that contains any Covered Software.
1.11. "Patent Claims" of a Contributor
means any patent claim(s), including without limitation, method,
process, and apparatus claims, in any patent Licensable by such
Contributor that would be infringed, but for the grant of the License,
by the making, using, selling, offering for sale, having made, import,
or transfer of either its Contributions or its Contributor Version.
1.12. "Secondary License"
means either the GNU General Public License, Version 2.0, the GNU Lesser
General Public License, Version 2.1, the GNU Affero General Public
License, Version 3.0, or any later versions of those licenses.
1.13. "Source Code Form"
means the form of the work preferred for making modifications.
1.14. "You" (or "Your")
means an individual or a legal entity exercising rights under this
License. For legal entities, "You" includes any entity that controls, is
controlled by, or is under common control with You. For purposes of this
definition, "control" means (a) the power, direct or indirect, to cause
the direction or management of such entity, whether by contract or
otherwise, or (b) ownership of more than fifty percent (50%) of the
outstanding shares or beneficial ownership of such entity.
2. License Grants and Conditions
2.1. Grants
Each Contributor hereby grants You a world-wide, royalty-free,
non-exclusive license:
a. under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or
as part of a Larger Work; and
b. under Patent Claims of such Contributor to make, use, sell, offer for
sale, have made, import, and otherwise transfer either its
Contributions or its Contributor Version.
2.2. Effective Date
The licenses granted in Section 2.1 with respect to any Contribution
become effective for each Contribution on the date the Contributor first
distributes such Contribution.
2.3. Limitations on Grant Scope
The licenses granted in this Section 2 are the only rights granted under
this License. No additional rights or licenses will be implied from the
distribution or licensing of Covered Software under this License.
Notwithstanding Section 2.1(b) above, no patent license is granted by a
Contributor:
a. for any code that a Contributor has removed from Covered Software; or
b. for infringements caused by: (i) Your and any other third party's
modifications of Covered Software, or (ii) the combination of its
Contributions with other software (except as part of its Contributor
Version); or
c. under Patent Claims infringed by Covered Software in the absence of
its Contributions.
This License does not grant any rights in the trademarks, service marks,
or logos of any Contributor (except as may be necessary to comply with
the notice requirements in Section 3.4).
2.4. Subsequent Licenses
No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this
License (see Section 10.2) or under the terms of a Secondary License (if
permitted under the terms of Section 3.3).
2.5. Representation
Each Contributor represents that the Contributor believes its
Contributions are its original creation(s) or it has sufficient rights to
grant the rights to its Contributions conveyed by this License.
2.6. Fair Use
This License is not intended to limit any rights You have under
applicable copyright doctrines of fair use, fair dealing, or other
equivalents.
2.7. Conditions
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
Section 2.1.
3. Responsibilities
3.1. Distribution of Source Form
All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under
the terms of this License. You must inform recipients that the Source
Code Form of the Covered Software is governed by the terms of this
License, and how they can obtain a copy of this License. You may not
attempt to alter or restrict the recipients' rights in the Source Code
Form.
3.2. Distribution of Executable Form
If You distribute Covered Software in Executable Form then:
a. such Covered Software must also be made available in Source Code Form,
as described in Section 3.1, and You must inform recipients of the
Executable Form how they can obtain a copy of such Source Code Form by
reasonable means in a timely manner, at a charge no more than the cost
of distribution to the recipient; and
b. You may distribute such Executable Form under the terms of this
License, or sublicense it under different terms, provided that the
license for the Executable Form does not attempt to limit or alter the
recipients' rights in the Source Code Form under this License.
3.3. Distribution of a Larger Work
You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for
the Covered Software. If the Larger Work is a combination of Covered
Software with a work governed by one or more Secondary Licenses, and the
Covered Software is not Incompatible With Secondary Licenses, this
License permits You to additionally distribute such Covered Software
under the terms of such Secondary License(s), so that the recipient of
the Larger Work may, at their option, further distribute the Covered
Software under the terms of either this License or such Secondary
License(s).
3.4. Notices
You may not remove or alter the substance of any license notices
(including copyright notices, patent notices, disclaimers of warranty, or
limitations of liability) contained within the Source Code Form of the
Covered Software, except that You may alter any license notices to the
extent required to remedy known factual inaccuracies.
3.5. Application of Additional Terms
You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on
behalf of any Contributor. You must make it absolutely clear that any
such warranty, support, indemnity, or liability obligation is offered by
You alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
jurisdiction.
4. Inability to Comply Due to Statute or Regulation
If it is impossible for You to comply with any of the terms of this License
with respect to some or all of the Covered Software due to statute,
judicial order, or regulation then You must: (a) comply with the terms of
this License to the maximum extent possible; and (b) describe the
limitations and the code they affect. Such description must be placed in a
text file included with all distributions of the Covered Software under
this License. Except to the extent prohibited by statute or regulation,
such description must be sufficiently detailed for a recipient of ordinary
skill to be able to understand it.
5. Termination
5.1. The rights granted under this License will terminate automatically if You
fail to comply with any of its terms. However, if You become compliant,
then the rights granted under this License from a particular Contributor
are reinstated (a) provisionally, unless and until such Contributor
explicitly and finally terminates Your grants, and (b) on an ongoing
basis, if such Contributor fails to notify You of the non-compliance by
some reasonable means prior to 60 days after You have come back into
compliance. Moreover, Your grants from a particular Contributor are
reinstated on an ongoing basis if such Contributor notifies You of the
non-compliance by some reasonable means, this is the first time You have
received notice of non-compliance with this License from such
Contributor, and You become compliant prior to 30 days after Your receipt
of the notice.
5.2. If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions,
counter-claims, and cross-claims) alleging that a Contributor Version
directly or indirectly infringes any patent, then the rights granted to
You by any and all Contributors for the Covered Software under Section
2.1 of this License shall terminate.
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
license agreements (excluding distributors and resellers) which have been
validly granted by You or Your distributors under this License prior to
termination shall survive termination.
6. Disclaimer of Warranty
Covered Software is provided under this License on an "as is" basis,
without warranty of any kind, either expressed, implied, or statutory,
including, without limitation, warranties that the Covered Software is free
of defects, merchantable, fit for a particular purpose or non-infringing.
The entire risk as to the quality and performance of the Covered Software
is with You. Should any Covered Software prove defective in any respect,
You (not any Contributor) assume the cost of any necessary servicing,
repair, or correction. This disclaimer of warranty constitutes an essential
part of this License. No use of any Covered Software is authorized under
this License except under this disclaimer.
7. Limitation of Liability
Under no circumstances and under no legal theory, whether tort (including
negligence), contract, or otherwise, shall any Contributor, or anyone who
distributes Covered Software as permitted above, be liable to You for any
direct, indirect, special, incidental, or consequential damages of any
character including, without limitation, damages for lost profits, loss of
goodwill, work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses, even if such party shall have been
informed of the possibility of such damages. This limitation of liability
shall not apply to liability for death or personal injury resulting from
such party's negligence to the extent applicable law prohibits such
limitation. Some jurisdictions do not allow the exclusion or limitation of
incidental or consequential damages, so this exclusion and limitation may
not apply to You.
8. Litigation
Any litigation relating to this License may be brought only in the courts
of a jurisdiction where the defendant maintains its principal place of
business and such litigation shall be governed by laws of that
jurisdiction, without reference to its conflict-of-law provisions. Nothing
in this Section shall prevent a party's ability to bring cross-claims or
counter-claims.
9. Miscellaneous
This License represents the complete agreement concerning the subject
matter hereof. If any provision of this License is held to be
unenforceable, such provision shall be reformed only to the extent
necessary to make it enforceable. Any law or regulation which provides that
the language of a contract shall be construed against the drafter shall not
be used to construe this License against a Contributor.
10. Versions of the License
10.1. New Versions
Mozilla Foundation is the license steward. Except as provided in Section
10.3, no one other than the license steward has the right to modify or
publish new versions of this License. Each version will be given a
distinguishing version number.
10.2. Effect of New Versions
You may distribute the Covered Software under the terms of the version
of the License under which You originally received the Covered Software,
or under the terms of any subsequent version published by the license
steward.
10.3. Modified Versions
If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a
modified version of this License if you rename the license and remove
any references to the name of the license steward (except to note that
such modified license differs from this License).
10.4. Distributing Source Code Form that is Incompatible With Secondary
Licenses If You choose to distribute Source Code Form that is
Incompatible With Secondary Licenses under the terms of this version of
the License, the notice described in Exhibit B of this License must be
attached.
Exhibit A - Source Code Form License Notice
This Source Code Form is subject to the
terms of the Mozilla Public License, v.
2.0. If a copy of the MPL was not
distributed with this file, You can
obtain one at
http://mozilla.org/MPL/2.0/.
If it is not possible or desirable to put the notice in a particular file,
then You may include the notice in a location (such as a LICENSE file in a
relevant directory) where a recipient would be likely to look for such a
notice.
You may add additional accurate notices of copyright ownership.
Exhibit B - "Incompatible With Secondary Licenses" Notice
This Source Code Form is "Incompatible
With Secondary Licenses", as defined by
the Mozilla Public License, v. 2.0.

View file

@ -1,41 +0,0 @@
go-immutable-radix [![Build Status](https://travis-ci.org/hashicorp/go-immutable-radix.png)](https://travis-ci.org/hashicorp/go-immutable-radix)
=========
Provides the `iradix` package that implements an immutable [radix tree](http://en.wikipedia.org/wiki/Radix_tree).
The package only provides a single `Tree` implementation, optimized for sparse nodes.
As a radix tree, it provides the following:
* O(k) operations. In many cases, this can be faster than a hash table since
the hash function is an O(k) operation, and hash tables have very poor cache locality.
* Minimum / Maximum value lookups
* Ordered iteration
A tree supports using a transaction to batch multiple updates (insert, delete)
in a more efficient manner than performing each operation one at a time.
For a mutable variant, see [go-radix](https://github.com/armon/go-radix).
Documentation
=============
The full documentation is available on [Godoc](http://godoc.org/github.com/hashicorp/go-immutable-radix).
Example
=======
Below is a simple example of usage
```go
// Create a tree
r := iradix.New()
r, _, _ = r.Insert([]byte("foo"), 1)
r, _, _ = r.Insert([]byte("bar"), 2)
r, _, _ = r.Insert([]byte("foobar"), 2)
// Find the longest prefix match
m, _, _ := r.Root().LongestPrefix([]byte("foozip"))
if string(m) != "foo" {
panic("should be foo")
}
```

View file

@ -1,21 +0,0 @@
package iradix
import "sort"
type edges []edge
func (e edges) Len() int {
return len(e)
}
func (e edges) Less(i, j int) bool {
return e[i].label < e[j].label
}
func (e edges) Swap(i, j int) {
e[i], e[j] = e[j], e[i]
}
func (e edges) Sort() {
sort.Sort(e)
}

View file

@ -1,6 +0,0 @@
module github.com/hashicorp/go-immutable-radix
require (
github.com/hashicorp/go-uuid v1.0.0
github.com/hashicorp/golang-lru v0.5.0
)

View file

@ -1,4 +0,0 @@
github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM=
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=

View file

@ -1,662 +0,0 @@
package iradix
import (
"bytes"
"strings"
"github.com/hashicorp/golang-lru/simplelru"
)
const (
// defaultModifiedCache is the default size of the modified node
// cache used per transaction. This is used to cache the updates
// to the nodes near the root, while the leaves do not need to be
// cached. This is important for very large transactions to prevent
// the modified cache from growing to be enormous. This is also used
// to set the max size of the mutation notify maps since those should
// also be bounded in a similar way.
defaultModifiedCache = 8192
)
// Tree implements an immutable radix tree. This can be treated as a
// Dictionary abstract data type. The main advantage over a standard
// hash map is prefix-based lookups and ordered iteration. The immutability
// means that it is safe to concurrently read from a Tree without any
// coordination.
type Tree struct {
root *Node
size int
}
// New returns an empty Tree
func New() *Tree {
t := &Tree{
root: &Node{
mutateCh: make(chan struct{}),
},
}
return t
}
// Len is used to return the number of elements in the tree
func (t *Tree) Len() int {
return t.size
}
// Txn is a transaction on the tree. This transaction is applied
// atomically and returns a new tree when committed. A transaction
// is not thread safe, and should only be used by a single goroutine.
type Txn struct {
// root is the modified root for the transaction.
root *Node
// snap is a snapshot of the root node for use if we have to run the
// slow notify algorithm.
snap *Node
// size tracks the size of the tree as it is modified during the
// transaction.
size int
// writable is a cache of writable nodes that have been created during
// the course of the transaction. This allows us to re-use the same
// nodes for further writes and avoid unnecessary copies of nodes that
// have never been exposed outside the transaction. This will only hold
// up to defaultModifiedCache number of entries.
writable *simplelru.LRU
// trackChannels is used to hold channels that need to be notified to
// signal mutation of the tree. This will only hold up to
// defaultModifiedCache number of entries, after which we will set the
// trackOverflow flag, which will cause us to use a more expensive
// algorithm to perform the notifications. Mutation tracking is only
// performed if trackMutate is true.
trackChannels map[chan struct{}]struct{}
trackOverflow bool
trackMutate bool
}
// Txn starts a new transaction that can be used to mutate the tree
func (t *Tree) Txn() *Txn {
txn := &Txn{
root: t.root,
snap: t.root,
size: t.size,
}
return txn
}
// TrackMutate can be used to toggle if mutations are tracked. If this is enabled
// then notifications will be issued for affected internal nodes and leaves when
// the transaction is committed.
func (t *Txn) TrackMutate(track bool) {
t.trackMutate = track
}
// trackChannel safely attempts to track the given mutation channel, setting the
// overflow flag if we can no longer track any more. This limits the amount of
// state that will accumulate during a transaction and we have a slower algorithm
// to switch to if we overflow.
func (t *Txn) trackChannel(ch chan struct{}) {
// In overflow, make sure we don't store any more objects.
if t.trackOverflow {
return
}
// If this would overflow the state we reject it and set the flag (since
// we aren't tracking everything that's required any longer).
if len(t.trackChannels) >= defaultModifiedCache {
// Mark that we are in the overflow state
t.trackOverflow = true
// Clear the map so that the channels can be garbage collected. It is
// safe to do this since we have already overflowed and will be using
// the slow notify algorithm.
t.trackChannels = nil
return
}
// Create the map on the fly when we need it.
if t.trackChannels == nil {
t.trackChannels = make(map[chan struct{}]struct{})
}
// Otherwise we are good to track it.
t.trackChannels[ch] = struct{}{}
}
// writeNode returns a node to be modified, if the current node has already been
// modified during the course of the transaction, it is used in-place. Set
// forLeafUpdate to true if you are getting a write node to update the leaf,
// which will set leaf mutation tracking appropriately as well.
func (t *Txn) writeNode(n *Node, forLeafUpdate bool) *Node {
// Ensure the writable set exists.
if t.writable == nil {
lru, err := simplelru.NewLRU(defaultModifiedCache, nil)
if err != nil {
panic(err)
}
t.writable = lru
}
// If this node has already been modified, we can continue to use it
// during this transaction. We know that we don't need to track it for
// a node update since the node is writable, but if this is for a leaf
// update we track it, in case the initial write to this node didn't
// update the leaf.
if _, ok := t.writable.Get(n); ok {
if t.trackMutate && forLeafUpdate && n.leaf != nil {
t.trackChannel(n.leaf.mutateCh)
}
return n
}
// Mark this node as being mutated.
if t.trackMutate {
t.trackChannel(n.mutateCh)
}
// Mark its leaf as being mutated, if appropriate.
if t.trackMutate && forLeafUpdate && n.leaf != nil {
t.trackChannel(n.leaf.mutateCh)
}
// Copy the existing node. If you have set forLeafUpdate it will be
// safe to replace this leaf with another after you get your node for
// writing. You MUST replace it, because the channel associated with
// this leaf will be closed when this transaction is committed.
nc := &Node{
mutateCh: make(chan struct{}),
leaf: n.leaf,
}
if n.prefix != nil {
nc.prefix = make([]byte, len(n.prefix))
copy(nc.prefix, n.prefix)
}
if len(n.edges) != 0 {
nc.edges = make([]edge, len(n.edges))
copy(nc.edges, n.edges)
}
// Mark this node as writable.
t.writable.Add(nc, nil)
return nc
}
// Visit all the nodes in the tree under n, and add their mutateChannels to the transaction
// Returns the size of the subtree visited
func (t *Txn) trackChannelsAndCount(n *Node) int {
// Count only leaf nodes
leaves := 0
if n.leaf != nil {
leaves = 1
}
// Mark this node as being mutated.
if t.trackMutate {
t.trackChannel(n.mutateCh)
}
// Mark its leaf as being mutated, if appropriate.
if t.trackMutate && n.leaf != nil {
t.trackChannel(n.leaf.mutateCh)
}
// Recurse on the children
for _, e := range n.edges {
leaves += t.trackChannelsAndCount(e.node)
}
return leaves
}
// mergeChild is called to collapse the given node with its child. This is only
// called when the given node is not a leaf and has a single edge.
func (t *Txn) mergeChild(n *Node) {
// Mark the child node as being mutated since we are about to abandon
// it. We don't need to mark the leaf since we are retaining it if it
// is there.
e := n.edges[0]
child := e.node
if t.trackMutate {
t.trackChannel(child.mutateCh)
}
// Merge the nodes.
n.prefix = concat(n.prefix, child.prefix)
n.leaf = child.leaf
if len(child.edges) != 0 {
n.edges = make([]edge, len(child.edges))
copy(n.edges, child.edges)
} else {
n.edges = nil
}
}
// insert does a recursive insertion
func (t *Txn) insert(n *Node, k, search []byte, v interface{}) (*Node, interface{}, bool) {
// Handle key exhaustion
if len(search) == 0 {
var oldVal interface{}
didUpdate := false
if n.isLeaf() {
oldVal = n.leaf.val
didUpdate = true
}
nc := t.writeNode(n, true)
nc.leaf = &leafNode{
mutateCh: make(chan struct{}),
key: k,
val: v,
}
return nc, oldVal, didUpdate
}
// Look for the edge
idx, child := n.getEdge(search[0])
// No edge, create one
if child == nil {
e := edge{
label: search[0],
node: &Node{
mutateCh: make(chan struct{}),
leaf: &leafNode{
mutateCh: make(chan struct{}),
key: k,
val: v,
},
prefix: search,
},
}
nc := t.writeNode(n, false)
nc.addEdge(e)
return nc, nil, false
}
// Determine longest prefix of the search key on match
commonPrefix := longestPrefix(search, child.prefix)
if commonPrefix == len(child.prefix) {
search = search[commonPrefix:]
newChild, oldVal, didUpdate := t.insert(child, k, search, v)
if newChild != nil {
nc := t.writeNode(n, false)
nc.edges[idx].node = newChild
return nc, oldVal, didUpdate
}
return nil, oldVal, didUpdate
}
// Split the node
nc := t.writeNode(n, false)
splitNode := &Node{
mutateCh: make(chan struct{}),
prefix: search[:commonPrefix],
}
nc.replaceEdge(edge{
label: search[0],
node: splitNode,
})
// Restore the existing child node
modChild := t.writeNode(child, false)
splitNode.addEdge(edge{
label: modChild.prefix[commonPrefix],
node: modChild,
})
modChild.prefix = modChild.prefix[commonPrefix:]
// Create a new leaf node
leaf := &leafNode{
mutateCh: make(chan struct{}),
key: k,
val: v,
}
// If the new key is a subset, add to to this node
search = search[commonPrefix:]
if len(search) == 0 {
splitNode.leaf = leaf
return nc, nil, false
}
// Create a new edge for the node
splitNode.addEdge(edge{
label: search[0],
node: &Node{
mutateCh: make(chan struct{}),
leaf: leaf,
prefix: search,
},
})
return nc, nil, false
}
// delete does a recursive deletion
func (t *Txn) delete(parent, n *Node, search []byte) (*Node, *leafNode) {
// Check for key exhaustion
if len(search) == 0 {
if !n.isLeaf() {
return nil, nil
}
// Copy the pointer in case we are in a transaction that already
// modified this node since the node will be reused. Any changes
// made to the node will not affect returning the original leaf
// value.
oldLeaf := n.leaf
// Remove the leaf node
nc := t.writeNode(n, true)
nc.leaf = nil
// Check if this node should be merged
if n != t.root && len(nc.edges) == 1 {
t.mergeChild(nc)
}
return nc, oldLeaf
}
// Look for an edge
label := search[0]
idx, child := n.getEdge(label)
if child == nil || !bytes.HasPrefix(search, child.prefix) {
return nil, nil
}
// Consume the search prefix
search = search[len(child.prefix):]
newChild, leaf := t.delete(n, child, search)
if newChild == nil {
return nil, nil
}
// Copy this node. WATCH OUT - it's safe to pass "false" here because we
// will only ADD a leaf via nc.mergeChild() if there isn't one due to
// the !nc.isLeaf() check in the logic just below. This is pretty subtle,
// so be careful if you change any of the logic here.
nc := t.writeNode(n, false)
// Delete the edge if the node has no edges
if newChild.leaf == nil && len(newChild.edges) == 0 {
nc.delEdge(label)
if n != t.root && len(nc.edges) == 1 && !nc.isLeaf() {
t.mergeChild(nc)
}
} else {
nc.edges[idx].node = newChild
}
return nc, leaf
}
// delete does a recursive deletion
func (t *Txn) deletePrefix(parent, n *Node, search []byte) (*Node, int) {
// Check for key exhaustion
if len(search) == 0 {
nc := t.writeNode(n, true)
if n.isLeaf() {
nc.leaf = nil
}
nc.edges = nil
return nc, t.trackChannelsAndCount(n)
}
// Look for an edge
label := search[0]
idx, child := n.getEdge(label)
// We make sure that either the child node's prefix starts with the search term, or the search term starts with the child node's prefix
// Need to do both so that we can delete prefixes that don't correspond to any node in the tree
if child == nil || (!bytes.HasPrefix(child.prefix, search) && !bytes.HasPrefix(search, child.prefix)) {
return nil, 0
}
// Consume the search prefix
if len(child.prefix) > len(search) {
search = []byte("")
} else {
search = search[len(child.prefix):]
}
newChild, numDeletions := t.deletePrefix(n, child, search)
if newChild == nil {
return nil, 0
}
// Copy this node. WATCH OUT - it's safe to pass "false" here because we
// will only ADD a leaf via nc.mergeChild() if there isn't one due to
// the !nc.isLeaf() check in the logic just below. This is pretty subtle,
// so be careful if you change any of the logic here.
nc := t.writeNode(n, false)
// Delete the edge if the node has no edges
if newChild.leaf == nil && len(newChild.edges) == 0 {
nc.delEdge(label)
if n != t.root && len(nc.edges) == 1 && !nc.isLeaf() {
t.mergeChild(nc)
}
} else {
nc.edges[idx].node = newChild
}
return nc, numDeletions
}
// Insert is used to add or update a given key. The return provides
// the previous value and a bool indicating if any was set.
func (t *Txn) Insert(k []byte, v interface{}) (interface{}, bool) {
newRoot, oldVal, didUpdate := t.insert(t.root, k, k, v)
if newRoot != nil {
t.root = newRoot
}
if !didUpdate {
t.size++
}
return oldVal, didUpdate
}
// Delete is used to delete a given key. Returns the old value if any,
// and a bool indicating if the key was set.
func (t *Txn) Delete(k []byte) (interface{}, bool) {
newRoot, leaf := t.delete(nil, t.root, k)
if newRoot != nil {
t.root = newRoot
}
if leaf != nil {
t.size--
return leaf.val, true
}
return nil, false
}
// DeletePrefix is used to delete an entire subtree that matches the prefix
// This will delete all nodes under that prefix
func (t *Txn) DeletePrefix(prefix []byte) bool {
newRoot, numDeletions := t.deletePrefix(nil, t.root, prefix)
if newRoot != nil {
t.root = newRoot
t.size = t.size - numDeletions
return true
}
return false
}
// Root returns the current root of the radix tree within this
// transaction. The root is not safe across insert and delete operations,
// but can be used to read the current state during a transaction.
func (t *Txn) Root() *Node {
return t.root
}
// Get is used to lookup a specific key, returning
// the value and if it was found
func (t *Txn) Get(k []byte) (interface{}, bool) {
return t.root.Get(k)
}
// GetWatch is used to lookup a specific key, returning
// the watch channel, value and if it was found
func (t *Txn) GetWatch(k []byte) (<-chan struct{}, interface{}, bool) {
return t.root.GetWatch(k)
}
// Commit is used to finalize the transaction and return a new tree. If mutation
// tracking is turned on then notifications will also be issued.
func (t *Txn) Commit() *Tree {
nt := t.CommitOnly()
if t.trackMutate {
t.Notify()
}
return nt
}
// CommitOnly is used to finalize the transaction and return a new tree, but
// does not issue any notifications until Notify is called.
func (t *Txn) CommitOnly() *Tree {
nt := &Tree{t.root, t.size}
t.writable = nil
return nt
}
// slowNotify does a complete comparison of the before and after trees in order
// to trigger notifications. This doesn't require any additional state but it
// is very expensive to compute.
func (t *Txn) slowNotify() {
snapIter := t.snap.rawIterator()
rootIter := t.root.rawIterator()
for snapIter.Front() != nil || rootIter.Front() != nil {
// If we've exhausted the nodes in the old snapshot, we know
// there's nothing remaining to notify.
if snapIter.Front() == nil {
return
}
snapElem := snapIter.Front()
// If we've exhausted the nodes in the new root, we know we need
// to invalidate everything that remains in the old snapshot. We
// know from the loop condition there's something in the old
// snapshot.
if rootIter.Front() == nil {
close(snapElem.mutateCh)
if snapElem.isLeaf() {
close(snapElem.leaf.mutateCh)
}
snapIter.Next()
continue
}
// Do one string compare so we can check the various conditions
// below without repeating the compare.
cmp := strings.Compare(snapIter.Path(), rootIter.Path())
// If the snapshot is behind the root, then we must have deleted
// this node during the transaction.
if cmp < 0 {
close(snapElem.mutateCh)
if snapElem.isLeaf() {
close(snapElem.leaf.mutateCh)
}
snapIter.Next()
continue
}
// If the snapshot is ahead of the root, then we must have added
// this node during the transaction.
if cmp > 0 {
rootIter.Next()
continue
}
// If we have the same path, then we need to see if we mutated a
// node and possibly the leaf.
rootElem := rootIter.Front()
if snapElem != rootElem {
close(snapElem.mutateCh)
if snapElem.leaf != nil && (snapElem.leaf != rootElem.leaf) {
close(snapElem.leaf.mutateCh)
}
}
snapIter.Next()
rootIter.Next()
}
}
// Notify is used along with TrackMutate to trigger notifications. This must
// only be done once a transaction is committed via CommitOnly, and it is called
// automatically by Commit.
func (t *Txn) Notify() {
if !t.trackMutate {
return
}
// If we've overflowed the tracking state we can't use it in any way and
// need to do a full tree compare.
if t.trackOverflow {
t.slowNotify()
} else {
for ch := range t.trackChannels {
close(ch)
}
}
// Clean up the tracking state so that a re-notify is safe (will trigger
// the else clause above which will be a no-op).
t.trackChannels = nil
t.trackOverflow = false
}
// Insert is used to add or update a given key. The return provides
// the new tree, previous value and a bool indicating if any was set.
func (t *Tree) Insert(k []byte, v interface{}) (*Tree, interface{}, bool) {
txn := t.Txn()
old, ok := txn.Insert(k, v)
return txn.Commit(), old, ok
}
// Delete is used to delete a given key. Returns the new tree,
// old value if any, and a bool indicating if the key was set.
func (t *Tree) Delete(k []byte) (*Tree, interface{}, bool) {
txn := t.Txn()
old, ok := txn.Delete(k)
return txn.Commit(), old, ok
}
// DeletePrefix is used to delete all nodes starting with a given prefix. Returns the new tree,
// and a bool indicating if the prefix matched any nodes
func (t *Tree) DeletePrefix(k []byte) (*Tree, bool) {
txn := t.Txn()
ok := txn.DeletePrefix(k)
return txn.Commit(), ok
}
// Root returns the root node of the tree which can be used for richer
// query operations.
func (t *Tree) Root() *Node {
return t.root
}
// Get is used to lookup a specific key, returning
// the value and if it was found
func (t *Tree) Get(k []byte) (interface{}, bool) {
return t.root.Get(k)
}
// longestPrefix finds the length of the shared prefix
// of two strings
func longestPrefix(k1, k2 []byte) int {
max := len(k1)
if l := len(k2); l < max {
max = l
}
var i int
for i = 0; i < max; i++ {
if k1[i] != k2[i] {
break
}
}
return i
}
// concat two byte slices, returning a third new copy
func concat(a, b []byte) []byte {
c := make([]byte, len(a)+len(b))
copy(c, a)
copy(c[len(a):], b)
return c
}

View file

@ -1,91 +0,0 @@
package iradix
import "bytes"
// Iterator is used to iterate over a set of nodes
// in pre-order
type Iterator struct {
node *Node
stack []edges
}
// SeekPrefixWatch is used to seek the iterator to a given prefix
// and returns the watch channel of the finest granularity
func (i *Iterator) SeekPrefixWatch(prefix []byte) (watch <-chan struct{}) {
// Wipe the stack
i.stack = nil
n := i.node
watch = n.mutateCh
search := prefix
for {
// Check for key exhaution
if len(search) == 0 {
i.node = n
return
}
// Look for an edge
_, n = n.getEdge(search[0])
if n == nil {
i.node = nil
return
}
// Update to the finest granularity as the search makes progress
watch = n.mutateCh
// Consume the search prefix
if bytes.HasPrefix(search, n.prefix) {
search = search[len(n.prefix):]
} else if bytes.HasPrefix(n.prefix, search) {
i.node = n
return
} else {
i.node = nil
return
}
}
}
// SeekPrefix is used to seek the iterator to a given prefix
func (i *Iterator) SeekPrefix(prefix []byte) {
i.SeekPrefixWatch(prefix)
}
// Next returns the next node in order
func (i *Iterator) Next() ([]byte, interface{}, bool) {
// Initialize our stack if needed
if i.stack == nil && i.node != nil {
i.stack = []edges{
edges{
edge{node: i.node},
},
}
}
for len(i.stack) > 0 {
// Inspect the last element of the stack
n := len(i.stack)
last := i.stack[n-1]
elem := last[0].node
// Update the stack
if len(last) > 1 {
i.stack[n-1] = last[1:]
} else {
i.stack = i.stack[:n-1]
}
// Push the edges onto the frontier
if len(elem.edges) > 0 {
i.stack = append(i.stack, elem.edges)
}
// Return the leaf values if any
if elem.leaf != nil {
return elem.leaf.key, elem.leaf.val, true
}
}
return nil, nil, false
}

View file

@ -1,292 +0,0 @@
package iradix
import (
"bytes"
"sort"
)
// WalkFn is used when walking the tree. Takes a
// key and value, returning if iteration should
// be terminated.
type WalkFn func(k []byte, v interface{}) bool
// leafNode is used to represent a value
type leafNode struct {
mutateCh chan struct{}
key []byte
val interface{}
}
// edge is used to represent an edge node
type edge struct {
label byte
node *Node
}
// Node is an immutable node in the radix tree
type Node struct {
// mutateCh is closed if this node is modified
mutateCh chan struct{}
// leaf is used to store possible leaf
leaf *leafNode
// prefix is the common prefix we ignore
prefix []byte
// Edges should be stored in-order for iteration.
// We avoid a fully materialized slice to save memory,
// since in most cases we expect to be sparse
edges edges
}
func (n *Node) isLeaf() bool {
return n.leaf != nil
}
func (n *Node) addEdge(e edge) {
num := len(n.edges)
idx := sort.Search(num, func(i int) bool {
return n.edges[i].label >= e.label
})
n.edges = append(n.edges, e)
if idx != num {
copy(n.edges[idx+1:], n.edges[idx:num])
n.edges[idx] = e
}
}
func (n *Node) replaceEdge(e edge) {
num := len(n.edges)
idx := sort.Search(num, func(i int) bool {
return n.edges[i].label >= e.label
})
if idx < num && n.edges[idx].label == e.label {
n.edges[idx].node = e.node
return
}
panic("replacing missing edge")
}
func (n *Node) getEdge(label byte) (int, *Node) {
num := len(n.edges)
idx := sort.Search(num, func(i int) bool {
return n.edges[i].label >= label
})
if idx < num && n.edges[idx].label == label {
return idx, n.edges[idx].node
}
return -1, nil
}
func (n *Node) delEdge(label byte) {
num := len(n.edges)
idx := sort.Search(num, func(i int) bool {
return n.edges[i].label >= label
})
if idx < num && n.edges[idx].label == label {
copy(n.edges[idx:], n.edges[idx+1:])
n.edges[len(n.edges)-1] = edge{}
n.edges = n.edges[:len(n.edges)-1]
}
}
func (n *Node) GetWatch(k []byte) (<-chan struct{}, interface{}, bool) {
search := k
watch := n.mutateCh
for {
// Check for key exhaustion
if len(search) == 0 {
if n.isLeaf() {
return n.leaf.mutateCh, n.leaf.val, true
}
break
}
// Look for an edge
_, n = n.getEdge(search[0])
if n == nil {
break
}
// Update to the finest granularity as the search makes progress
watch = n.mutateCh
// Consume the search prefix
if bytes.HasPrefix(search, n.prefix) {
search = search[len(n.prefix):]
} else {
break
}
}
return watch, nil, false
}
func (n *Node) Get(k []byte) (interface{}, bool) {
_, val, ok := n.GetWatch(k)
return val, ok
}
// LongestPrefix is like Get, but instead of an
// exact match, it will return the longest prefix match.
func (n *Node) LongestPrefix(k []byte) ([]byte, interface{}, bool) {
var last *leafNode
search := k
for {
// Look for a leaf node
if n.isLeaf() {
last = n.leaf
}
// Check for key exhaution
if len(search) == 0 {
break
}
// Look for an edge
_, n = n.getEdge(search[0])
if n == nil {
break
}
// Consume the search prefix
if bytes.HasPrefix(search, n.prefix) {
search = search[len(n.prefix):]
} else {
break
}
}
if last != nil {
return last.key, last.val, true
}
return nil, nil, false
}
// Minimum is used to return the minimum value in the tree
func (n *Node) Minimum() ([]byte, interface{}, bool) {
for {
if n.isLeaf() {
return n.leaf.key, n.leaf.val, true
}
if len(n.edges) > 0 {
n = n.edges[0].node
} else {
break
}
}
return nil, nil, false
}
// Maximum is used to return the maximum value in the tree
func (n *Node) Maximum() ([]byte, interface{}, bool) {
for {
if num := len(n.edges); num > 0 {
n = n.edges[num-1].node
continue
}
if n.isLeaf() {
return n.leaf.key, n.leaf.val, true
} else {
break
}
}
return nil, nil, false
}
// Iterator is used to return an iterator at
// the given node to walk the tree
func (n *Node) Iterator() *Iterator {
return &Iterator{node: n}
}
// rawIterator is used to return a raw iterator at the given node to walk the
// tree.
func (n *Node) rawIterator() *rawIterator {
iter := &rawIterator{node: n}
iter.Next()
return iter
}
// Walk is used to walk the tree
func (n *Node) Walk(fn WalkFn) {
recursiveWalk(n, fn)
}
// WalkPrefix is used to walk the tree under a prefix
func (n *Node) WalkPrefix(prefix []byte, fn WalkFn) {
search := prefix
for {
// Check for key exhaution
if len(search) == 0 {
recursiveWalk(n, fn)
return
}
// Look for an edge
_, n = n.getEdge(search[0])
if n == nil {
break
}
// Consume the search prefix
if bytes.HasPrefix(search, n.prefix) {
search = search[len(n.prefix):]
} else if bytes.HasPrefix(n.prefix, search) {
// Child may be under our search prefix
recursiveWalk(n, fn)
return
} else {
break
}
}
}
// WalkPath is used to walk the tree, but only visiting nodes
// from the root down to a given leaf. Where WalkPrefix walks
// all the entries *under* the given prefix, this walks the
// entries *above* the given prefix.
func (n *Node) WalkPath(path []byte, fn WalkFn) {
search := path
for {
// Visit the leaf values if any
if n.leaf != nil && fn(n.leaf.key, n.leaf.val) {
return
}
// Check for key exhaution
if len(search) == 0 {
return
}
// Look for an edge
_, n = n.getEdge(search[0])
if n == nil {
return
}
// Consume the search prefix
if bytes.HasPrefix(search, n.prefix) {
search = search[len(n.prefix):]
} else {
break
}
}
}
// recursiveWalk is used to do a pre-order walk of a node
// recursively. Returns true if the walk should be aborted
func recursiveWalk(n *Node, fn WalkFn) bool {
// Visit the leaf values if any
if n.leaf != nil && fn(n.leaf.key, n.leaf.val) {
return true
}
// Recurse on the children
for _, e := range n.edges {
if recursiveWalk(e.node, fn) {
return true
}
}
return false
}

View file

@ -1,78 +0,0 @@
package iradix
// rawIterator visits each of the nodes in the tree, even the ones that are not
// leaves. It keeps track of the effective path (what a leaf at a given node
// would be called), which is useful for comparing trees.
type rawIterator struct {
// node is the starting node in the tree for the iterator.
node *Node
// stack keeps track of edges in the frontier.
stack []rawStackEntry
// pos is the current position of the iterator.
pos *Node
// path is the effective path of the current iterator position,
// regardless of whether the current node is a leaf.
path string
}
// rawStackEntry is used to keep track of the cumulative common path as well as
// its associated edges in the frontier.
type rawStackEntry struct {
path string
edges edges
}
// Front returns the current node that has been iterated to.
func (i *rawIterator) Front() *Node {
return i.pos
}
// Path returns the effective path of the current node, even if it's not actually
// a leaf.
func (i *rawIterator) Path() string {
return i.path
}
// Next advances the iterator to the next node.
func (i *rawIterator) Next() {
// Initialize our stack if needed.
if i.stack == nil && i.node != nil {
i.stack = []rawStackEntry{
rawStackEntry{
edges: edges{
edge{node: i.node},
},
},
}
}
for len(i.stack) > 0 {
// Inspect the last element of the stack.
n := len(i.stack)
last := i.stack[n-1]
elem := last.edges[0].node
// Update the stack.
if len(last.edges) > 1 {
i.stack[n-1].edges = last.edges[1:]
} else {
i.stack = i.stack[:n-1]
}
// Push the edges onto the frontier.
if len(elem.edges) > 0 {
path := last.path + string(elem.prefix)
i.stack = append(i.stack, rawStackEntry{path, elem.edges})
}
i.pos = elem
i.path = last.path + string(elem.prefix)
return
}
i.pos = nil
i.path = ""
}

Some files were not shown because too many files have changed in this diff Show more