mirror of https://github.com/minio/minio.git
Migrate to go1.12 to simplify our cmd/http package (#7302)
Simplify the cmd/http package overall by removing custom plain text v/s tls connection detection, by migrating to go1.12 and choose minimum version to be go1.12 Also remove all the vendored deps, since they are not useful anymore.
This commit is contained in:
parent
4c23e6fa55
commit
313a3a286a
|
@ -18,7 +18,7 @@ matrix:
|
|||
- ARCH=x86_64
|
||||
- CGO_ENABLED=0
|
||||
- GO111MODULE=on
|
||||
go: 1.11.4
|
||||
go: 1.12.1
|
||||
script:
|
||||
- make
|
||||
- diff -au <(gofmt -s -d cmd) <(printf "")
|
||||
|
@ -33,7 +33,8 @@ matrix:
|
|||
env:
|
||||
- ARCH=x86_64
|
||||
- CGO_ENABLED=0
|
||||
go: 1.11.4
|
||||
- GO111MODULE=on
|
||||
go: 1.12.1
|
||||
script:
|
||||
- go build --ldflags="$(go run buildscripts/gen-ldflags.go)" -o %GOPATH%\bin\minio.exe
|
||||
- for d in $(go list ./... | grep -v browser); do CGO_ENABLED=1 go test -v -race --timeout 20m "$d"; done
|
||||
|
|
|
@ -58,14 +58,12 @@ Pull requests can be created via GitHub. Refer to [this document](https://help.g
|
|||
|
||||
## FAQs
|
||||
### How does ``Minio`` manages dependencies?
|
||||
``Minio`` manages its dependencies using [govendor](https://github.com/kardianos/govendor). To add a dependency
|
||||
- Run `go get foo/bar`
|
||||
- Edit your code to import foo/bar
|
||||
- Run `make pkg-add PKG=foo/bar` from top-level directory
|
||||
``Minio`` uses `go mod` to manage its dependencies.
|
||||
- Run `go get foo/bar` in the source folder to add the dependency to `go.mod` file.
|
||||
|
||||
To remove a dependency
|
||||
- Edit your code to not import foo/bar
|
||||
- Run `make pkg-remove PKG=foo/bar` from top-level directory
|
||||
- Edit your code and remove the import reference.
|
||||
- Run `go mod tidy` in the source folder to remove dependency from `go.mod` file.
|
||||
|
||||
### What are the coding guidelines for Minio?
|
||||
``Minio`` is fully conformant with Golang style. Refer: [Effective Go](https://github.com/golang/go/wiki/CodeReviewComments) article from Golang project. If you observe offending code, please feel free to send a pull request or ping us on [Slack](https://slack.minio.io).
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
FROM golang:1.11.4-alpine3.7
|
||||
FROM golang:1.12-alpine3.7
|
||||
|
||||
LABEL maintainer="Minio Inc <dev@minio.io>"
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
FROM golang:1.11.4-alpine3.7
|
||||
FROM golang:1.12-alpine3.7
|
||||
|
||||
ENV GOPATH /go
|
||||
ENV CGO_ENABLED 0
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
#-------------------------------------------------------------
|
||||
# Stage 1: Build and Unit tests
|
||||
#-------------------------------------------------------------
|
||||
FROM golang:1.11.4
|
||||
FROM golang:1.12
|
||||
|
||||
COPY . /go/src/github.com/minio/minio
|
||||
WORKDIR /go/src/github.com/minio/minio
|
||||
|
@ -11,10 +11,6 @@ ENV GO111MODULE=on
|
|||
|
||||
RUN git config --global http.cookiefile /gitcookie/.gitcookie
|
||||
|
||||
RUN make
|
||||
RUN bash -c 'diff -au <(gofmt -s -d cmd) <(printf "")'
|
||||
RUN bash -c 'diff -au <(gofmt -s -d pkg) <(printf "")'
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get -y install sudo
|
||||
RUN touch /etc/sudoers
|
||||
|
@ -27,18 +23,19 @@ RUN echo 'Defaults secure_path="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr
|
|||
RUN mkdir -p /home/ci/.cache
|
||||
|
||||
RUN groupadd -g 999 ci && \
|
||||
useradd -r -u 999 -g ci ci
|
||||
|
||||
RUN chown -R ci:ci /go
|
||||
RUN chown -R ci:ci /home/ci
|
||||
RUN chmod -R a+rw /go
|
||||
useradd -r -u 999 -g ci ci && \
|
||||
chown -R ci:ci /go /home/ci && \
|
||||
chmod -R a+rw /go
|
||||
|
||||
USER ci
|
||||
|
||||
# -- tests --
|
||||
RUN make
|
||||
RUN bash -c 'diff -au <(gofmt -s -d cmd) <(printf "")'
|
||||
RUN bash -c 'diff -au <(gofmt -s -d pkg) <(printf "")'
|
||||
RUN for d in $(go list ./... | grep -v browser); do go test -v -race --timeout 15m "$d"; done
|
||||
RUN make verifiers
|
||||
RUN make crosscompile
|
||||
RUN make coverage
|
||||
RUN make verify
|
||||
|
||||
#-------------------------------------------------------------
|
||||
|
@ -55,19 +52,21 @@ RUN yarn test
|
|||
#-------------------------------------------------------------
|
||||
# Stage 3: Run Gateway Tests
|
||||
#-------------------------------------------------------------
|
||||
|
||||
FROM ubuntu:16.04
|
||||
|
||||
COPY --from=0 /go/src/github.com/minio/minio/minio ./minio
|
||||
COPY buildscripts/gateway-tests.sh ./gateway-tests.sh
|
||||
RUN apt-get update && apt-get install -y git wget jq curl dnsmasq
|
||||
COPY --from=0 /go/src/github.com/minio/minio/minio /usr/bin/minio
|
||||
COPY buildscripts/gateway-tests.sh /usr/bin/gateway-tests.sh
|
||||
|
||||
ENV DEBIAN_FRONTEND noninteractive
|
||||
ENV LANG C.UTF-8
|
||||
ENV GOROOT /usr/local/go
|
||||
|
||||
RUN mkdir -p /go
|
||||
ENV GOPATH /go
|
||||
ENV GOPATH /usr/local
|
||||
ENV PATH $GOPATH/bin:$GOROOT/bin:$PATH
|
||||
|
||||
RUN ./gateway-tests.sh
|
||||
RUN apt-get --yes update && apt-get --yes upgrade && apt-get --yes --quiet install wget jq curl git dnsmasq && \
|
||||
git clone https://github.com/minio/mint.git /mint && \
|
||||
cd /mint && /mint/release.sh
|
||||
|
||||
WORKDIR /mint
|
||||
|
||||
RUN /usr/bin/gateway-tests.sh
|
||||
|
|
32
Makefile
32
Makefile
|
@ -2,6 +2,7 @@ PWD := $(shell pwd)
|
|||
GOPATH := $(shell go env GOPATH)
|
||||
LDFLAGS := $(shell go run buildscripts/gen-ldflags.go)
|
||||
|
||||
TAG ?= $(USER)
|
||||
BUILD_LDFLAGS := '$(LDFLAGS)'
|
||||
|
||||
all: build
|
||||
|
@ -9,8 +10,6 @@ all: build
|
|||
checks:
|
||||
@echo "Checking dependencies"
|
||||
@(env bash $(PWD)/buildscripts/checkdeps.sh)
|
||||
@echo "Checking for project in GOPATH"
|
||||
@(env bash $(PWD)/buildscripts/checkgopath.sh)
|
||||
|
||||
getdeps:
|
||||
@mkdir -p ${GOPATH}/bin
|
||||
|
@ -25,12 +24,12 @@ verifiers: getdeps vet fmt lint staticcheck spelling
|
|||
|
||||
vet:
|
||||
@echo "Running $@"
|
||||
@go vet github.com/minio/minio/...
|
||||
@GO111MODULE=on go vet github.com/minio/minio/...
|
||||
|
||||
fmt:
|
||||
@echo "Running $@"
|
||||
@gofmt -d cmd/
|
||||
@gofmt -d pkg/
|
||||
@GO111MODULE=on gofmt -d cmd/
|
||||
@GO111MODULE=on gofmt -d pkg/
|
||||
|
||||
lint:
|
||||
@echo "Running $@"
|
||||
|
@ -53,7 +52,7 @@ spelling:
|
|||
check: test
|
||||
test: verifiers build
|
||||
@echo "Running unit tests"
|
||||
@CGO_ENABLED=0 go test -tags kqueue ./...
|
||||
@GO111MODULE=on CGO_ENABLED=0 go test -tags kqueue ./... 1>/dev/null
|
||||
|
||||
verify: build
|
||||
@echo "Verifying build"
|
||||
|
@ -66,31 +65,16 @@ coverage: build
|
|||
# Builds minio locally.
|
||||
build: checks
|
||||
@echo "Building minio binary to './minio'"
|
||||
@GOFLAGS="" CGO_ENABLED=0 go build -tags kqueue --ldflags $(BUILD_LDFLAGS) -o $(PWD)/minio
|
||||
@GOFLAGS="" CGO_ENABLED=0 go build -tags kqueue --ldflags="-s -w" -o $(PWD)/dockerscripts/healthcheck $(PWD)/dockerscripts/healthcheck.go
|
||||
@GO111MODULE=on GOFLAGS="" CGO_ENABLED=0 go build -tags kqueue --ldflags $(BUILD_LDFLAGS) -o $(PWD)/minio 1>/dev/null
|
||||
@GO111MODULE=on GOFLAGS="" CGO_ENABLED=0 go build -tags kqueue --ldflags $(BUILD_LDFLAGS) -o $(PWD)/dockerscripts/healthcheck $(PWD)/dockerscripts/healthcheck.go 1>/dev/null
|
||||
|
||||
docker: build
|
||||
@docker build -t $(TAG) . -f Dockerfile.dev
|
||||
|
||||
pkg-add:
|
||||
@echo "Adding new package $(PKG)"
|
||||
@${GOPATH}/bin/govendor add $(PKG)
|
||||
|
||||
pkg-update:
|
||||
@echo "Updating new package $(PKG)"
|
||||
@${GOPATH}/bin/govendor update $(PKG)
|
||||
|
||||
pkg-remove:
|
||||
@echo "Remove new package $(PKG)"
|
||||
@${GOPATH}/bin/govendor remove $(PKG)
|
||||
|
||||
pkg-list:
|
||||
@$(GOPATH)/bin/govendor list
|
||||
|
||||
# Builds minio and installs it to $GOPATH/bin.
|
||||
install: build
|
||||
@echo "Installing minio binary to '$(GOPATH)/bin/minio'"
|
||||
@mkdir -p $(GOPATH)/bin && cp $(PWD)/minio $(GOPATH)/bin/minio
|
||||
@mkdir -p $(GOPATH)/bin && cp -uf $(PWD)/minio $(GOPATH)/bin/minio
|
||||
@echo "Installation successful. To learn more, try \"minio --help\"."
|
||||
|
||||
clean:
|
||||
|
|
|
@ -21,7 +21,7 @@ _init() {
|
|||
|
||||
## Minimum required versions for build dependencies
|
||||
GIT_VERSION="1.0"
|
||||
GO_VERSION="1.10.1"
|
||||
GO_VERSION="1.12"
|
||||
OSX_VERSION="10.8"
|
||||
KNAME=$(uname -s)
|
||||
ARCH=$(uname -m)
|
||||
|
|
|
@ -1,36 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
#
|
||||
# Minio Cloud Storage, (C) 2015-2018 Minio, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
main() {
|
||||
gopath=$(go env GOPATH)
|
||||
IFS=':' read -r -a paths <<< "$gopath"
|
||||
for path in "${paths[@]}"; do
|
||||
minio_path="$path/src/github.com/minio/minio"
|
||||
if [ -d "$minio_path" ]; then
|
||||
if [ "$minio_path" -ef "$PWD" ]; then
|
||||
exit 0
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
echo "ERROR"
|
||||
echo "Project not found in ${gopath}."
|
||||
echo "Follow instructions at https://github.com/minio/minio/blob/master/CONTRIBUTING.md#setup-your-minio-github-repository"
|
||||
exit 1
|
||||
}
|
||||
|
||||
main
|
|
@ -11,7 +11,7 @@ function _init() {
|
|||
SUPPORTED_OSARCH="linux/ppc64le linux/arm64 linux/s390x darwin/amd64 freebsd/amd64"
|
||||
}
|
||||
|
||||
function _build_and_sign() {
|
||||
function _build() {
|
||||
local osarch=$1
|
||||
IFS=/ read -r -a arr <<<"$osarch"
|
||||
os="${arr[0]}"
|
||||
|
@ -22,13 +22,14 @@ function _build_and_sign() {
|
|||
# Go build to build the binary.
|
||||
export GOOS=$os
|
||||
export GOARCH=$arch
|
||||
export GO111MODULE=on
|
||||
go build -tags kqueue -o /dev/null
|
||||
}
|
||||
|
||||
function main() {
|
||||
echo "Testing builds for OS/Arch: ${SUPPORTED_OSARCH}"
|
||||
for each_osarch in ${SUPPORTED_OSARCH}; do
|
||||
_build_and_sign "${each_osarch}"
|
||||
_build "${each_osarch}"
|
||||
done
|
||||
}
|
||||
|
||||
|
|
|
@ -19,41 +19,50 @@ set -e
|
|||
set -E
|
||||
set -o pipefail
|
||||
|
||||
export SERVER_ENDPOINT=127.0.0.1:24240
|
||||
export ENABLE_HTTPS=0
|
||||
export ACCESS_KEY=minio
|
||||
export SECRET_KEY=minio123
|
||||
export MINIO_ACCESS_KEY=minio
|
||||
export MINIO_SECRET_KEY=minio123
|
||||
export AWS_ACCESS_KEY_ID=minio
|
||||
export AWS_SECRET_ACCESS_KEY=minio123
|
||||
function start_minio_server()
|
||||
{
|
||||
MINIO_ACCESS_KEY=minio MINIO_SECRET_KEY=minio123 \
|
||||
minio --quiet --json server data --address 127.0.0.1:24242 > server.log 2>&1 &
|
||||
server_pid=$!
|
||||
sleep 3
|
||||
|
||||
trap "cat server.log;cat gateway.log" SIGHUP SIGINT SIGTERM
|
||||
echo "$server_pid"
|
||||
}
|
||||
|
||||
./minio --quiet --json server data --address 127.0.0.1:24242 > server.log &
|
||||
sleep 3
|
||||
./minio --quiet --json gateway s3 http://127.0.0.1:24242 --address 127.0.0.1:24240 > gateway.log &
|
||||
sleep 3
|
||||
function start_minio_gateway_s3()
|
||||
{
|
||||
MINIO_ACCESS_KEY=minio MINIO_SECRET_KEY=minio123 \
|
||||
minio --quiet --json gateway s3 http://127.0.0.1:24242 \
|
||||
--address 127.0.0.1:24240 > gateway.log 2>&1 &
|
||||
gw_pid=$!
|
||||
sleep 3
|
||||
|
||||
mkdir -p /mint
|
||||
git clone https://github.com/minio/mint /mint
|
||||
cd /mint
|
||||
echo "$gw_pid"
|
||||
}
|
||||
|
||||
export MINT_ROOT_DIR=${MINT_ROOT_DIR:-/mint}
|
||||
export MINT_RUN_CORE_DIR="$MINT_ROOT_DIR/run/core"
|
||||
export MINT_RUN_SECURITY_DIR="$MINT_ROOT_DIR/run/security"
|
||||
export MINT_MODE="full"
|
||||
export WGET="wget --quiet --no-check-certificate"
|
||||
function main()
|
||||
{
|
||||
sr_pid="$(start_minio_server)"
|
||||
gw_pid="$(start_minio_gateway_s3)"
|
||||
|
||||
./create-data-files.sh
|
||||
./preinstall.sh
|
||||
SERVER_ENDPOINT=127.0.0.1:24240 ENABLE_HTTPS=0 ACCESS_KEY=minio \
|
||||
SECRET_KEY=minio123 MINT_MODE="full" /mint/entrypoint.sh aws-sdk-go \
|
||||
aws-sdk-java aws-sdk-php aws-sdk-ruby awscli healthcheck minio-dotnet \
|
||||
minio-go minio-java minio-js minio-py
|
||||
rv=$?
|
||||
|
||||
# install mint app packages
|
||||
for pkg in "build"/*/install.sh; do
|
||||
echo "Running $pkg"
|
||||
$pkg
|
||||
done
|
||||
kill "$sr_pid"
|
||||
kill "$gw_pid"
|
||||
sleep 3
|
||||
|
||||
./postinstall.sh
|
||||
if [ "$rv" -ne 0 ]; then
|
||||
echo "=========== Gateway ==========="
|
||||
cat "gateway.log"
|
||||
echo "=========== Server ==========="
|
||||
cat "server.log"
|
||||
fi
|
||||
|
||||
/mint/entrypoint.sh || cat server.log gateway.log fail
|
||||
rm -f gateway.log server.log
|
||||
}
|
||||
|
||||
main "$@"
|
||||
|
|
|
@ -2,5 +2,4 @@
|
|||
|
||||
set -e
|
||||
|
||||
|
||||
CGO_ENABLED=0 go test -v -coverprofile=coverage.txt -covermode=atomic ./...
|
||||
GO111MODULE=on CGO_ENABLED=0 go test -v -coverprofile=coverage.txt -covermode=atomic ./...
|
||||
|
|
|
@ -32,6 +32,7 @@ export SERVER_ENDPOINT="127.0.0.1:9000"
|
|||
export ACCESS_KEY="minio"
|
||||
export SECRET_KEY="minio123"
|
||||
export ENABLE_HTTPS=0
|
||||
export GO111MODULE=on
|
||||
|
||||
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
|
||||
MINIO=( "$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" )
|
||||
|
@ -146,16 +147,6 @@ function start_minio_dist_erasure()
|
|||
echo "${minio_pids[@]}"
|
||||
}
|
||||
|
||||
function start_minio_gateway_s3()
|
||||
{
|
||||
MINIO_ACCESS_KEY=Q3AM3UQ867SPQQA43P2F MINIO_SECRET_KEY=zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG \
|
||||
"${MINIO[@]}" gateway s3 https://play.minio.io:9000 >"$WORK_DIR/minio-gateway-s3.log" 2>&1 &
|
||||
minio_pid=$!
|
||||
sleep 3
|
||||
|
||||
echo "$minio_pid"
|
||||
}
|
||||
|
||||
function run_test_fs()
|
||||
{
|
||||
minio_pid="$(start_minio_fs)"
|
||||
|
@ -291,25 +282,9 @@ function run_test_dist_erasure()
|
|||
return "$rv"
|
||||
}
|
||||
|
||||
function run_test_gateway_s3()
|
||||
function purge()
|
||||
{
|
||||
minio_pid="$(start_minio_gateway_s3)"
|
||||
|
||||
export SERVER_ENDPOINT="127.0.0.1:9000"
|
||||
export ACCESS_KEY=Q3AM3UQ867SPQQA43P2F
|
||||
export SECRET_KEY=zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG
|
||||
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
|
||||
rv=$?
|
||||
|
||||
kill "$minio_pid"
|
||||
sleep 3
|
||||
|
||||
if [ "$rv" -ne 0 ]; then
|
||||
cat "$WORK_DIR/minio-gateway-s3.log"
|
||||
fi
|
||||
rm -f "$WORK_DIR/minio-gateway-s3.log"
|
||||
|
||||
return "$rv"
|
||||
rm -rf "$1"
|
||||
}
|
||||
|
||||
function __init__()
|
||||
|
@ -319,12 +294,17 @@ function __init__()
|
|||
mkdir -p "$MINIO_CONFIG_DIR"
|
||||
mkdir -p "$MINT_DATA_DIR"
|
||||
|
||||
git clone https://github.com/minio/mc
|
||||
cd mc
|
||||
go build
|
||||
/bin/cp -a ./mc "$WORK_DIR/mc"
|
||||
MC_BUILD_DIR="mc-$RANDOM"
|
||||
if ! git clone --quiet https://github.com/minio/mc "$MC_BUILD_DIR"; then
|
||||
echo "failed to download https://github.com/minio/mc"
|
||||
purge "${MC_BUILD_DIR}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
chmod a+x "$WORK_DIR/mc"
|
||||
(cd "${MC_BUILD_DIR}" && go build -o "$WORK_DIR/mc")
|
||||
|
||||
# remove mc source.
|
||||
purge "${MC_BUILD_DIR}"
|
||||
|
||||
shred -n 1 -s 1M - 1>"$FILE_1_MB" 2>/dev/null
|
||||
shred -n 1 -s 65M - 1>"$FILE_65_MB" 2>/dev/null
|
||||
|
@ -346,56 +326,49 @@ function main()
|
|||
echo "Testing in FS setup"
|
||||
if ! run_test_fs; then
|
||||
echo "FAILED"
|
||||
rm -fr "$WORK_DIR"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Testing in Erasure setup"
|
||||
if ! run_test_erasure; then
|
||||
echo "FAILED"
|
||||
rm -fr "$WORK_DIR"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Testing in Distributed Erasure setup"
|
||||
if ! run_test_dist_erasure; then
|
||||
echo "FAILED"
|
||||
rm -fr "$WORK_DIR"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Testing in Erasure setup as sets"
|
||||
if ! run_test_erasure_sets; then
|
||||
echo "FAILED"
|
||||
rm -fr "$WORK_DIR"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Testing in Distributed Erasure setup as sets"
|
||||
if ! run_test_dist_erasure_sets; then
|
||||
echo "FAILED"
|
||||
rm -fr "$WORK_DIR"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Testing in Distributed Erasure setup as sets with ipv6"
|
||||
if ! run_test_dist_erasure_sets_ipv6; then
|
||||
echo "FAILED"
|
||||
rm -fr "$WORK_DIR"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Testing in Gateway S3 setup"
|
||||
if ! run_test_gateway_s3; then
|
||||
echo "FAILED"
|
||||
rm -fr "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
rm -fr "$WORK_DIR"
|
||||
purge "$WORK_DIR"
|
||||
}
|
||||
|
||||
( __init__ "$@" && main "$@" )
|
||||
rv=$?
|
||||
rm -fr "$WORK_DIR"
|
||||
purge "$WORK_DIR"
|
||||
exit "$rv"
|
||||
|
|
|
@ -17,15 +17,13 @@
|
|||
package http
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"net"
|
||||
"time"
|
||||
)
|
||||
|
||||
// BufConn - is a generic stream-oriented network connection supporting buffered reader and read/write timeout.
|
||||
type BufConn struct {
|
||||
// DeadlineConn - is a generic stream-oriented network connection supporting buffered reader and read/write timeout.
|
||||
type DeadlineConn struct {
|
||||
QuirkConn
|
||||
bufReader *bufio.Reader // buffered reader wraps reader in net.Conn.
|
||||
readTimeout time.Duration // sets the read timeout in the connection.
|
||||
writeTimeout time.Duration // sets the write timeout in the connection.
|
||||
updateBytesReadFunc func(int) // function to be called to update bytes read.
|
||||
|
@ -33,42 +31,22 @@ type BufConn struct {
|
|||
}
|
||||
|
||||
// Sets read timeout
|
||||
func (c *BufConn) setReadTimeout() {
|
||||
func (c *DeadlineConn) setReadTimeout() {
|
||||
if c.readTimeout != 0 && c.canSetReadDeadline() {
|
||||
c.SetReadDeadline(time.Now().UTC().Add(c.readTimeout))
|
||||
}
|
||||
}
|
||||
|
||||
func (c *BufConn) setWriteTimeout() {
|
||||
func (c *DeadlineConn) setWriteTimeout() {
|
||||
if c.writeTimeout != 0 {
|
||||
c.SetWriteDeadline(time.Now().UTC().Add(c.writeTimeout))
|
||||
}
|
||||
}
|
||||
|
||||
// RemoveTimeout - removes all configured read and write
|
||||
// timeouts. Used by callers which control net.Conn behavior
|
||||
// themselves.
|
||||
func (c *BufConn) RemoveTimeout() {
|
||||
c.readTimeout = 0
|
||||
c.writeTimeout = 0
|
||||
// Unset read/write timeouts, since we use **bufio** it is not
|
||||
// guaranteed that the underlying Peek/Read operation in-fact
|
||||
// indeed performed a Read() operation on the network. With
|
||||
// that in mind we need to unset any timeouts currently set to
|
||||
// avoid any pre-mature timeouts.
|
||||
c.SetDeadline(time.Time{})
|
||||
}
|
||||
|
||||
// Peek - returns the next n bytes without advancing the reader. It just wraps bufio.Reader.Peek().
|
||||
func (c *BufConn) Peek(n int) ([]byte, error) {
|
||||
c.setReadTimeout()
|
||||
return c.bufReader.Peek(n)
|
||||
}
|
||||
|
||||
// Read - reads data from the connection using wrapped buffered reader.
|
||||
func (c *BufConn) Read(b []byte) (n int, err error) {
|
||||
func (c *DeadlineConn) Read(b []byte) (n int, err error) {
|
||||
c.setReadTimeout()
|
||||
n, err = c.bufReader.Read(b)
|
||||
n, err = c.Conn.Read(b)
|
||||
if err == nil && c.updateBytesReadFunc != nil {
|
||||
c.updateBytesReadFunc(n)
|
||||
}
|
||||
|
@ -77,7 +55,7 @@ func (c *BufConn) Read(b []byte) (n int, err error) {
|
|||
}
|
||||
|
||||
// Write - writes data to the connection.
|
||||
func (c *BufConn) Write(b []byte) (n int, err error) {
|
||||
func (c *DeadlineConn) Write(b []byte) (n int, err error) {
|
||||
c.setWriteTimeout()
|
||||
n, err = c.Conn.Write(b)
|
||||
if err == nil && c.updateBytesWrittenFunc != nil {
|
||||
|
@ -87,11 +65,10 @@ func (c *BufConn) Write(b []byte) (n int, err error) {
|
|||
return n, err
|
||||
}
|
||||
|
||||
// newBufConn - creates a new connection object wrapping net.Conn.
|
||||
func newBufConn(c net.Conn, readTimeout, writeTimeout time.Duration, maxHeaderBytes int, updateBytesReadFunc, updateBytesWrittenFunc func(int)) *BufConn {
|
||||
return &BufConn{
|
||||
// newDeadlineConn - creates a new connection object wrapping net.Conn with deadlines.
|
||||
func newDeadlineConn(c net.Conn, readTimeout, writeTimeout time.Duration, updateBytesReadFunc, updateBytesWrittenFunc func(int)) *DeadlineConn {
|
||||
return &DeadlineConn{
|
||||
QuirkConn: QuirkConn{Conn: c},
|
||||
bufReader: bufio.NewReaderSize(c, maxHeaderBytes),
|
||||
readTimeout: readTimeout,
|
||||
writeTimeout: writeTimeout,
|
||||
updateBytesReadFunc: updateBytesReadFunc,
|
|
@ -25,7 +25,7 @@ import (
|
|||
"time"
|
||||
)
|
||||
|
||||
// Test bufconn handles read timeout properly by reading two messages beyond deadline.
|
||||
// Test deadlineconn handles read timeout properly by reading two messages beyond deadline.
|
||||
func TestBuffConnReadTimeout(t *testing.T) {
|
||||
l, err := net.Listen("tcp", "localhost:0")
|
||||
if err != nil {
|
||||
|
@ -49,12 +49,12 @@ func TestBuffConnReadTimeout(t *testing.T) {
|
|||
t.Errorf("failed to accept new connection. %v", terr)
|
||||
return
|
||||
}
|
||||
bufconn := newBufConn(tcpConn, 1*time.Second, 1*time.Second, 4096, nil, nil)
|
||||
defer bufconn.Close()
|
||||
deadlineconn := newDeadlineConn(tcpConn, 1*time.Second, 1*time.Second, nil, nil)
|
||||
defer deadlineconn.Close()
|
||||
|
||||
// Read a line
|
||||
var b = make([]byte, 12)
|
||||
_, terr = bufconn.Read(b)
|
||||
_, terr = deadlineconn.Read(b)
|
||||
if terr != nil {
|
||||
t.Errorf("failed to read from client. %v", terr)
|
||||
return
|
||||
|
@ -68,7 +68,7 @@ func TestBuffConnReadTimeout(t *testing.T) {
|
|||
// Wait for more than read timeout to simulate processing.
|
||||
time.Sleep(3 * time.Second)
|
||||
|
||||
_, terr = bufconn.Read(b)
|
||||
_, terr = deadlineconn.Read(b)
|
||||
if terr != nil {
|
||||
t.Errorf("failed to read from client. %v", terr)
|
||||
return
|
||||
|
@ -80,14 +80,11 @@ func TestBuffConnReadTimeout(t *testing.T) {
|
|||
}
|
||||
|
||||
// Send a response.
|
||||
_, terr = io.WriteString(bufconn, "messages received\n")
|
||||
_, terr = io.WriteString(deadlineconn, "messages received\n")
|
||||
if terr != nil {
|
||||
t.Errorf("failed to write to client. %v", terr)
|
||||
return
|
||||
}
|
||||
|
||||
// Removes all deadlines if any.
|
||||
bufconn.RemoveTimeout()
|
||||
}()
|
||||
|
||||
c, err := net.Dial("tcp", serverAddr)
|
|
@ -17,56 +17,15 @@
|
|||
package http
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
)
|
||||
|
||||
var sslRequiredErrMsg = []byte("HTTP/1.1 403 Forbidden\r\n\r\nSSL required")
|
||||
|
||||
// HTTP methods.
|
||||
var methods = []string{
|
||||
http.MethodGet,
|
||||
http.MethodHead,
|
||||
http.MethodPost,
|
||||
http.MethodPut,
|
||||
http.MethodPatch,
|
||||
http.MethodDelete,
|
||||
http.MethodConnect,
|
||||
http.MethodOptions,
|
||||
http.MethodTrace,
|
||||
"PRI", // HTTP 2 method
|
||||
}
|
||||
|
||||
func getPlainText(bufConn *BufConn) (bool, error) {
|
||||
defer bufConn.setReadTimeout()
|
||||
|
||||
if bufConn.canSetReadDeadline() {
|
||||
// Set deadline such that we close the connection quickly
|
||||
// of no data was received from the Peek()
|
||||
bufConn.SetReadDeadline(time.Now().UTC().Add(time.Second * 3))
|
||||
}
|
||||
b, err := bufConn.Peek(1)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, method := range methods {
|
||||
if b[0] == method[0] {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
type acceptResult struct {
|
||||
conn net.Conn
|
||||
err error
|
||||
|
@ -78,13 +37,11 @@ type httpListener struct {
|
|||
tcpListeners []*net.TCPListener // underlaying TCP listeners.
|
||||
acceptCh chan acceptResult // channel where all TCP listeners write accepted connection.
|
||||
doneCh chan struct{} // done channel for TCP listener goroutines.
|
||||
tlsConfig *tls.Config // TLS configuration
|
||||
tcpKeepAliveTimeout time.Duration
|
||||
readTimeout time.Duration
|
||||
writeTimeout time.Duration
|
||||
maxHeaderBytes int
|
||||
updateBytesReadFunc func(int) // function to be called to update bytes read in BufConn.
|
||||
updateBytesWrittenFunc func(int) // function to be called to update bytes written in BufConn.
|
||||
updateBytesReadFunc func(int) // function to be called to update bytes read in Deadlineconn.
|
||||
updateBytesWrittenFunc func(int) // function to be called to update bytes written in Deadlineconn.
|
||||
}
|
||||
|
||||
// isRoutineNetErr returns true if error is due to a network timeout,
|
||||
|
@ -107,7 +64,7 @@ func isRoutineNetErr(err error) bool {
|
|||
return err == io.EOF || err.Error() == "EOF"
|
||||
}
|
||||
|
||||
// start - starts separate goroutine for each TCP listener. A valid insecure/TLS HTTP new connection is passed to httpListener.acceptCh.
|
||||
// start - starts separate goroutine for each TCP listener. A valid new connection is passed to httpListener.acceptCh.
|
||||
func (listener *httpListener) start() {
|
||||
listener.acceptCh = make(chan acceptResult)
|
||||
listener.doneCh = make(chan struct{})
|
||||
|
@ -134,36 +91,10 @@ func (listener *httpListener) start() {
|
|||
tcpConn.SetKeepAlive(true)
|
||||
tcpConn.SetKeepAlivePeriod(listener.tcpKeepAliveTimeout)
|
||||
|
||||
bufconn := newBufConn(tcpConn, listener.readTimeout, listener.writeTimeout, listener.maxHeaderBytes,
|
||||
listener.updateBytesReadFunc, listener.updateBytesWrittenFunc)
|
||||
if listener.tlsConfig != nil {
|
||||
ok, err := getPlainText(bufconn)
|
||||
if err != nil {
|
||||
// Peek could fail legitimately when clients abruptly close
|
||||
// connection. E.g. Chrome browser opens connections speculatively to
|
||||
// speed up loading of a web page. Peek may also fail due to network
|
||||
// saturation on a transport with read timeout set. All other kind of
|
||||
// errors should be logged for further investigation. Thanks @brendanashworth.
|
||||
if !isRoutineNetErr(err) {
|
||||
reqInfo := (&logger.ReqInfo{}).AppendTags("remoteAddr", bufconn.RemoteAddr().String())
|
||||
reqInfo.AppendTags("localAddr", bufconn.LocalAddr().String())
|
||||
ctx := logger.SetReqInfo(context.Background(), reqInfo)
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
bufconn.Close()
|
||||
return
|
||||
}
|
||||
deadlineConn := newDeadlineConn(tcpConn, listener.readTimeout,
|
||||
listener.writeTimeout, listener.updateBytesReadFunc, listener.updateBytesWrittenFunc)
|
||||
|
||||
if ok {
|
||||
// As TLS is configured and we got plain text HTTP request,
|
||||
// return 403 (forbidden) error.
|
||||
bufconn.Write(sslRequiredErrMsg)
|
||||
bufconn.Close()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
send(acceptResult{bufconn, nil}, doneCh)
|
||||
send(acceptResult{deadlineConn, nil}, doneCh)
|
||||
}
|
||||
|
||||
// Closure to handle TCPListener until done channel is closed.
|
||||
|
@ -244,11 +175,9 @@ func (listener *httpListener) Addrs() (addrs []net.Addr) {
|
|||
// * listen to multiple addresses
|
||||
// * controls incoming connections only doing HTTP protocol
|
||||
func newHTTPListener(serverAddrs []string,
|
||||
tlsConfig *tls.Config,
|
||||
tcpKeepAliveTimeout time.Duration,
|
||||
readTimeout time.Duration,
|
||||
writeTimeout time.Duration,
|
||||
maxHeaderBytes int,
|
||||
updateBytesReadFunc func(int),
|
||||
updateBytesWrittenFunc func(int)) (listener *httpListener, err error) {
|
||||
|
||||
|
@ -284,11 +213,9 @@ func newHTTPListener(serverAddrs []string,
|
|||
|
||||
listener = &httpListener{
|
||||
tcpListeners: tcpListeners,
|
||||
tlsConfig: tlsConfig,
|
||||
tcpKeepAliveTimeout: tcpKeepAliveTimeout,
|
||||
readTimeout: readTimeout,
|
||||
writeTimeout: writeTimeout,
|
||||
maxHeaderBytes: maxHeaderBytes,
|
||||
updateBytesReadFunc: updateBytesReadFunc,
|
||||
updateBytesWrittenFunc: updateBytesWrittenFunc,
|
||||
}
|
||||
|
|
|
@ -17,7 +17,6 @@
|
|||
package http
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
@ -34,17 +33,6 @@ import (
|
|||
|
||||
var serverPort uint32 = 60000
|
||||
|
||||
// fail - as t.Fatalf() is not goroutine safe, this function behaves like t.Fatalf().
|
||||
func fail(t *testing.T, template string, args ...interface{}) {
|
||||
fmt.Printf(template, args...)
|
||||
fmt.Println()
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
func getNextPort() string {
|
||||
return strconv.Itoa(int(atomic.AddUint32(&serverPort, 1)))
|
||||
}
|
||||
|
||||
var getCert = func(*tls.ClientHelloInfo) (*tls.Certificate, error) {
|
||||
certificate, err := getTLSCert()
|
||||
if err != nil {
|
||||
|
@ -106,19 +94,8 @@ rr3DRiUP6V/10CZ/ImeSJ72k69VuTw9vq2HzB4x6pqxF2X7JQSLUCS2wfNN13N0d
|
|||
return tls.X509KeyPair(certPEMBlock, keyPEMBlock)
|
||||
}
|
||||
|
||||
func getTLSConfig(t *testing.T) *tls.Config {
|
||||
tlsCert, err := getTLSCert()
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to parse private/certificate data. %v\n", err)
|
||||
}
|
||||
tlsConfig := &tls.Config{
|
||||
PreferServerCipherSuites: true,
|
||||
MinVersion: tls.VersionTLS12,
|
||||
NextProtos: []string{"http/1.1"},
|
||||
}
|
||||
tlsConfig.Certificates = append(tlsConfig.Certificates, tlsCert)
|
||||
|
||||
return tlsConfig
|
||||
func getNextPort() string {
|
||||
return strconv.Itoa(int(atomic.AddUint32(&serverPort, 1)))
|
||||
}
|
||||
|
||||
func getNonLoopBackIP(t *testing.T) string {
|
||||
|
@ -154,11 +131,8 @@ func getNonLoopBackIP(t *testing.T) string {
|
|||
}
|
||||
|
||||
func TestNewHTTPListener(t *testing.T) {
|
||||
tlsConfig := getTLSConfig(t)
|
||||
|
||||
testCases := []struct {
|
||||
serverAddrs []string
|
||||
tlsConfig *tls.Config
|
||||
tcpKeepAliveTimeout time.Duration
|
||||
readTimeout time.Duration
|
||||
writeTimeout time.Duration
|
||||
|
@ -166,24 +140,22 @@ func TestNewHTTPListener(t *testing.T) {
|
|||
updateBytesWrittenFunc func(int)
|
||||
expectedErr bool
|
||||
}{
|
||||
{[]string{"93.184.216.34:65432"}, nil, time.Duration(0), time.Duration(0), time.Duration(0), nil, nil, true},
|
||||
{[]string{"example.org:65432"}, nil, time.Duration(0), time.Duration(0), time.Duration(0), nil, nil, true},
|
||||
{[]string{"unknown-host"}, nil, time.Duration(0), time.Duration(0), time.Duration(0), nil, nil, true},
|
||||
{[]string{"unknown-host:65432"}, nil, time.Duration(0), time.Duration(0), time.Duration(0), nil, nil, true},
|
||||
{[]string{"localhost:65432", "93.184.216.34:65432"}, nil, time.Duration(0), time.Duration(0), time.Duration(0), nil, nil, true},
|
||||
{[]string{"localhost:65432", "unknown-host:65432"}, nil, time.Duration(0), time.Duration(0), time.Duration(0), nil, nil, true},
|
||||
{[]string{"localhost:0"}, nil, time.Duration(0), time.Duration(0), time.Duration(0), nil, nil, false},
|
||||
{[]string{"localhost:0"}, tlsConfig, time.Duration(0), time.Duration(0), time.Duration(0), nil, nil, false},
|
||||
{[]string{"93.184.216.34:65432"}, time.Duration(0), time.Duration(0), time.Duration(0), nil, nil, true},
|
||||
{[]string{"example.org:65432"}, time.Duration(0), time.Duration(0), time.Duration(0), nil, nil, true},
|
||||
{[]string{"unknown-host"}, time.Duration(0), time.Duration(0), time.Duration(0), nil, nil, true},
|
||||
{[]string{"unknown-host:65432"}, time.Duration(0), time.Duration(0), time.Duration(0), nil, nil, true},
|
||||
{[]string{"localhost:65432", "93.184.216.34:65432"}, time.Duration(0), time.Duration(0), time.Duration(0), nil, nil, true},
|
||||
{[]string{"localhost:65432", "unknown-host:65432"}, time.Duration(0), time.Duration(0), time.Duration(0), nil, nil, true},
|
||||
{[]string{"localhost:0"}, time.Duration(0), time.Duration(0), time.Duration(0), nil, nil, false},
|
||||
{[]string{"localhost:0"}, time.Duration(0), time.Duration(0), time.Duration(0), nil, nil, false},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
listener, err := newHTTPListener(
|
||||
testCase.serverAddrs,
|
||||
testCase.tlsConfig,
|
||||
testCase.tcpKeepAliveTimeout,
|
||||
testCase.readTimeout,
|
||||
testCase.writeTimeout,
|
||||
DefaultMaxHeaderBytes,
|
||||
testCase.updateBytesReadFunc,
|
||||
testCase.updateBytesWrittenFunc,
|
||||
)
|
||||
|
@ -203,29 +175,25 @@ func TestNewHTTPListener(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestHTTPListenerStartClose(t *testing.T) {
|
||||
tlsConfig := getTLSConfig(t)
|
||||
nonLoopBackIP := getNonLoopBackIP(t)
|
||||
|
||||
testCases := []struct {
|
||||
serverAddrs []string
|
||||
tlsConfig *tls.Config
|
||||
}{
|
||||
{[]string{"localhost:0"}, nil},
|
||||
{[]string{nonLoopBackIP + ":0"}, nil},
|
||||
{[]string{"127.0.0.1:0", nonLoopBackIP + ":0"}, nil},
|
||||
{[]string{"localhost:0"}, tlsConfig},
|
||||
{[]string{nonLoopBackIP + ":0"}, tlsConfig},
|
||||
{[]string{"127.0.0.1:0", nonLoopBackIP + ":0"}, tlsConfig},
|
||||
{[]string{"localhost:0"}},
|
||||
{[]string{nonLoopBackIP + ":0"}},
|
||||
{[]string{"127.0.0.1:0", nonLoopBackIP + ":0"}},
|
||||
{[]string{"localhost:0"}},
|
||||
{[]string{nonLoopBackIP + ":0"}},
|
||||
{[]string{"127.0.0.1:0", nonLoopBackIP + ":0"}},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
listener, err := newHTTPListener(
|
||||
testCase.serverAddrs,
|
||||
testCase.tlsConfig,
|
||||
time.Duration(0),
|
||||
time.Duration(0),
|
||||
time.Duration(0),
|
||||
DefaultMaxHeaderBytes,
|
||||
nil, nil,
|
||||
)
|
||||
if err != nil {
|
||||
|
@ -245,7 +213,6 @@ func TestHTTPListenerStartClose(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestHTTPListenerAddr(t *testing.T) {
|
||||
tlsConfig := getTLSConfig(t)
|
||||
nonLoopBackIP := getNonLoopBackIP(t)
|
||||
var casePorts []string
|
||||
for i := 0; i < 6; i++ {
|
||||
|
@ -254,25 +221,22 @@ func TestHTTPListenerAddr(t *testing.T) {
|
|||
|
||||
testCases := []struct {
|
||||
serverAddrs []string
|
||||
tlsConfig *tls.Config
|
||||
expectedAddr string
|
||||
}{
|
||||
{[]string{"localhost:" + casePorts[0]}, nil, "127.0.0.1:" + casePorts[0]},
|
||||
{[]string{nonLoopBackIP + ":" + casePorts[1]}, nil, nonLoopBackIP + ":" + casePorts[1]},
|
||||
{[]string{"127.0.0.1:" + casePorts[2], nonLoopBackIP + ":" + casePorts[2]}, nil, "0.0.0.0:" + casePorts[2]},
|
||||
{[]string{"localhost:" + casePorts[3]}, tlsConfig, "127.0.0.1:" + casePorts[3]},
|
||||
{[]string{nonLoopBackIP + ":" + casePorts[4]}, tlsConfig, nonLoopBackIP + ":" + casePorts[4]},
|
||||
{[]string{"127.0.0.1:" + casePorts[5], nonLoopBackIP + ":" + casePorts[5]}, tlsConfig, "0.0.0.0:" + casePorts[5]},
|
||||
{[]string{"localhost:" + casePorts[0]}, "127.0.0.1:" + casePorts[0]},
|
||||
{[]string{nonLoopBackIP + ":" + casePorts[1]}, nonLoopBackIP + ":" + casePorts[1]},
|
||||
{[]string{"127.0.0.1:" + casePorts[2], nonLoopBackIP + ":" + casePorts[2]}, "0.0.0.0:" + casePorts[2]},
|
||||
{[]string{"localhost:" + casePorts[3]}, "127.0.0.1:" + casePorts[3]},
|
||||
{[]string{nonLoopBackIP + ":" + casePorts[4]}, nonLoopBackIP + ":" + casePorts[4]},
|
||||
{[]string{"127.0.0.1:" + casePorts[5], nonLoopBackIP + ":" + casePorts[5]}, "0.0.0.0:" + casePorts[5]},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
listener, err := newHTTPListener(
|
||||
testCase.serverAddrs,
|
||||
testCase.tlsConfig,
|
||||
time.Duration(0),
|
||||
time.Duration(0),
|
||||
time.Duration(0),
|
||||
DefaultMaxHeaderBytes,
|
||||
nil, nil,
|
||||
)
|
||||
if err != nil {
|
||||
|
@ -289,7 +253,6 @@ func TestHTTPListenerAddr(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestHTTPListenerAddrs(t *testing.T) {
|
||||
tlsConfig := getTLSConfig(t)
|
||||
nonLoopBackIP := getNonLoopBackIP(t)
|
||||
var casePorts []string
|
||||
for i := 0; i < 6; i++ {
|
||||
|
@ -298,25 +261,22 @@ func TestHTTPListenerAddrs(t *testing.T) {
|
|||
|
||||
testCases := []struct {
|
||||
serverAddrs []string
|
||||
tlsConfig *tls.Config
|
||||
expectedAddrs set.StringSet
|
||||
}{
|
||||
{[]string{"localhost:" + casePorts[0]}, nil, set.CreateStringSet("127.0.0.1:" + casePorts[0])},
|
||||
{[]string{nonLoopBackIP + ":" + casePorts[1]}, nil, set.CreateStringSet(nonLoopBackIP + ":" + casePorts[1])},
|
||||
{[]string{"127.0.0.1:" + casePorts[2], nonLoopBackIP + ":" + casePorts[2]}, nil, set.CreateStringSet("127.0.0.1:"+casePorts[2], nonLoopBackIP+":"+casePorts[2])},
|
||||
{[]string{"localhost:" + casePorts[3]}, tlsConfig, set.CreateStringSet("127.0.0.1:" + casePorts[3])},
|
||||
{[]string{nonLoopBackIP + ":" + casePorts[4]}, tlsConfig, set.CreateStringSet(nonLoopBackIP + ":" + casePorts[4])},
|
||||
{[]string{"127.0.0.1:" + casePorts[5], nonLoopBackIP + ":" + casePorts[5]}, tlsConfig, set.CreateStringSet("127.0.0.1:"+casePorts[5], nonLoopBackIP+":"+casePorts[5])},
|
||||
{[]string{"localhost:" + casePorts[0]}, set.CreateStringSet("127.0.0.1:" + casePorts[0])},
|
||||
{[]string{nonLoopBackIP + ":" + casePorts[1]}, set.CreateStringSet(nonLoopBackIP + ":" + casePorts[1])},
|
||||
{[]string{"127.0.0.1:" + casePorts[2], nonLoopBackIP + ":" + casePorts[2]}, set.CreateStringSet("127.0.0.1:"+casePorts[2], nonLoopBackIP+":"+casePorts[2])},
|
||||
{[]string{"localhost:" + casePorts[3]}, set.CreateStringSet("127.0.0.1:" + casePorts[3])},
|
||||
{[]string{nonLoopBackIP + ":" + casePorts[4]}, set.CreateStringSet(nonLoopBackIP + ":" + casePorts[4])},
|
||||
{[]string{"127.0.0.1:" + casePorts[5], nonLoopBackIP + ":" + casePorts[5]}, set.CreateStringSet("127.0.0.1:"+casePorts[5], nonLoopBackIP+":"+casePorts[5])},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
listener, err := newHTTPListener(
|
||||
testCase.serverAddrs,
|
||||
testCase.tlsConfig,
|
||||
time.Duration(0),
|
||||
time.Duration(0),
|
||||
time.Duration(0),
|
||||
DefaultMaxHeaderBytes,
|
||||
nil, nil,
|
||||
)
|
||||
if err != nil {
|
||||
|
@ -337,69 +297,6 @@ func TestHTTPListenerAddrs(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestHTTPListenerAcceptTLSError(t *testing.T) {
|
||||
tlsConfig := getTLSConfig(t)
|
||||
nonLoopBackIP := getNonLoopBackIP(t)
|
||||
|
||||
testCases := []struct {
|
||||
serverAddrs []string
|
||||
tlsConfig *tls.Config
|
||||
request string
|
||||
}{
|
||||
{[]string{"127.0.0.1:0", nonLoopBackIP + ":0"}, tlsConfig, "GET / HTTP/1.0\n"},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
listener, err := newHTTPListener(
|
||||
testCase.serverAddrs,
|
||||
testCase.tlsConfig,
|
||||
time.Duration(0),
|
||||
time.Duration(0),
|
||||
time.Duration(0),
|
||||
DefaultMaxHeaderBytes,
|
||||
nil, nil,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: error: expected = <nil>, got = %v", i+1, err)
|
||||
}
|
||||
|
||||
for _, serverAddr := range listener.Addrs() {
|
||||
conn, err := net.Dial("tcp", serverAddr.String())
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: error: expected = <nil>, got = %v", i+1, err)
|
||||
}
|
||||
|
||||
if _, err = io.WriteString(conn, testCase.request); err != nil {
|
||||
t.Fatalf("Test %d: request send: expected = <nil>, got = %v", i+1, err)
|
||||
}
|
||||
|
||||
go func() {
|
||||
serverConn, aerr := listener.Accept()
|
||||
if aerr == nil {
|
||||
fail(t, "Test %d: accept: expected = <error>, got = <nil>", i+1)
|
||||
}
|
||||
if serverConn != nil {
|
||||
fail(t, "Test %d: accept: server expected = <nil>, got = %v", i+1, serverConn)
|
||||
}
|
||||
}()
|
||||
|
||||
buf := make([]byte, len(sslRequiredErrMsg))
|
||||
n, err := io.ReadFull(conn, buf)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: reply read: expected = <nil> got = %v", i+1, err)
|
||||
} else if n != len(buf) {
|
||||
t.Fatalf("Test %d: reply length: expected = %v got = %v", i+1, len(buf), n)
|
||||
} else if !bytes.Equal(buf, sslRequiredErrMsg) {
|
||||
t.Fatalf("Test %d: reply: expected = %v got = %v", i+1, string(sslRequiredErrMsg), string(buf))
|
||||
}
|
||||
|
||||
conn.Close()
|
||||
}
|
||||
|
||||
listener.Close()
|
||||
}
|
||||
}
|
||||
|
||||
type myTimeoutErr struct {
|
||||
timeout bool
|
||||
}
|
||||
|
|
|
@ -92,11 +92,9 @@ func (srv *Server) Start() (err error) {
|
|||
var listener *httpListener
|
||||
listener, err = newHTTPListener(
|
||||
addrs,
|
||||
tlsConfig,
|
||||
tcpKeepAliveTimeout,
|
||||
readTimeout,
|
||||
writeTimeout,
|
||||
srv.MaxHeaderBytes,
|
||||
srv.UpdateBytesReadFunc,
|
||||
srv.UpdateBytesWrittenFunc,
|
||||
)
|
||||
|
|
|
@ -37,29 +37,29 @@ func TestPrepareUpdateMessage(t *testing.T) {
|
|||
{-72 * time.Hour, "another_update_url", ""},
|
||||
{0, "another_update_url", ""},
|
||||
{time.Hour, "", ""},
|
||||
{1 * time.Second, "my_download_url", "now"},
|
||||
{2 * time.Second, "my_download_url", "1 second ago"},
|
||||
{0 * time.Second, "my_download_url", "now"},
|
||||
{1 * time.Second, "my_download_url", "1 second ago"},
|
||||
{37 * time.Second, "my_download_url", "37 seconds ago"},
|
||||
{60 * time.Second, "my_download_url", "60 seconds ago"},
|
||||
{60 * time.Second, "my_download_url", "1 minute ago"},
|
||||
{61 * time.Second, "my_download_url", "1 minute ago"},
|
||||
|
||||
// Testcase index 10
|
||||
{37 * time.Minute, "my_download_url", "37 minutes ago"},
|
||||
{1 * time.Hour, "my_download_url", "60 minutes ago"},
|
||||
{1 * time.Hour, "my_download_url", "1 hour ago"},
|
||||
{61 * time.Minute, "my_download_url", "1 hour ago"},
|
||||
{122 * time.Minute, "my_download_url", "2 hours ago"},
|
||||
{24 * time.Hour, "my_download_url", "24 hours ago"},
|
||||
{24 * time.Hour, "my_download_url", "1 day ago"},
|
||||
{25 * time.Hour, "my_download_url", "1 day ago"},
|
||||
{49 * time.Hour, "my_download_url", "2 days ago"},
|
||||
{7 * 24 * time.Hour, "my_download_url", "7 days ago"},
|
||||
{7 * 24 * time.Hour, "my_download_url", "1 week ago"},
|
||||
{8 * 24 * time.Hour, "my_download_url", "1 week ago"},
|
||||
{15 * 24 * time.Hour, "my_download_url", "2 weeks ago"},
|
||||
|
||||
// Testcase index 20
|
||||
{30 * 24 * time.Hour, "my_download_url", "4 weeks ago"},
|
||||
{30 * 24 * time.Hour, "my_download_url", "1 month ago"},
|
||||
{31 * 24 * time.Hour, "my_download_url", "1 month ago"},
|
||||
{61 * 24 * time.Hour, "my_download_url", "2 months ago"},
|
||||
{360 * 24 * time.Hour, "my_download_url", "12 months ago"},
|
||||
{360 * 24 * time.Hour, "my_download_url", "1 year ago"},
|
||||
{361 * 24 * time.Hour, "my_download_url", "1 year ago"},
|
||||
{2 * 365 * 24 * time.Hour, "my_download_url", "2 years ago"},
|
||||
}
|
||||
|
@ -74,17 +74,17 @@ func TestPrepareUpdateMessage(t *testing.T) {
|
|||
// fmt.Println(output)
|
||||
switch {
|
||||
case testCase.dlURL == "" && output != "":
|
||||
t.Errorf("Testcase %d: no newer release available but got an update message: %s", i, output)
|
||||
t.Errorf("Testcase %d: no newer release available but got an update message: %s", i+1, output)
|
||||
case output == "" && testCase.dlURL != "" && testCase.older > 0:
|
||||
t.Errorf("Testcase %d: newer release is available but got empty update message!", i)
|
||||
t.Errorf("Testcase %d: newer release is available but got empty update message!", i+1)
|
||||
case output == "" && (testCase.dlURL == "" || testCase.older <= 0):
|
||||
// Valid no update message case. No further
|
||||
// validation needed.
|
||||
continue
|
||||
case !strings.Contains(output, line1):
|
||||
t.Errorf("Testcase %d: output '%s' did not contain line 1: '%s'", i, output, line1)
|
||||
t.Errorf("Testcase %d: output '%s' did not contain line 1: '%s'", i+1, output, line1)
|
||||
case !strings.Contains(output, line2):
|
||||
t.Errorf("Testcase %d: output '%s' did not contain line 2: '%s'", i, output, line2)
|
||||
t.Errorf("Testcase %d: output '%s' did not contain line 2: '%s'", i+1, output, line2)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
258
go.mod
258
go.mod
|
@ -1,192 +1,104 @@
|
|||
module github.com/minio/minio
|
||||
|
||||
go 1.12
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.26.0
|
||||
contrib.go.opencensus.io/exporter/ocagent v0.4.7 // indirect
|
||||
contrib.go.opencensus.io/exporter/stackdriver v0.0.0-20180919222851-d1e19f5c23e9 // indirect
|
||||
github.com/Azure/azure-sdk-for-go v26.4.0+incompatible
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect
|
||||
github.com/Azure/go-autorest v11.5.2+incompatible
|
||||
github.com/Microsoft/go-winio v0.4.12 // indirect
|
||||
github.com/NYTimes/gziphandler v1.1.1 // indirect
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect
|
||||
github.com/SAP/go-hdb v0.14.0 // indirect
|
||||
github.com/SermoDigital/jose v0.9.1 // indirect
|
||||
github.com/Shopify/toxiproxy v2.1.4+incompatible // indirect
|
||||
github.com/alecthomas/participle v0.0.0-20190103085315-bf8340a459bd
|
||||
github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190315122603-6f9e54af456e // indirect
|
||||
github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20170925032315-6fe16293d6b7
|
||||
github.com/araddon/gou v0.0.0-20190110011759-c797efecbb61 // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf // indirect
|
||||
github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f // indirect
|
||||
cloud.google.com/go v0.37.2
|
||||
contrib.go.opencensus.io/exporter/ocagent v0.4.9 // indirect
|
||||
github.com/Azure/azure-sdk-for-go v27.0.0+incompatible
|
||||
github.com/Azure/go-autorest v11.7.0+incompatible
|
||||
github.com/DataDog/zstd v1.3.5 // indirect
|
||||
github.com/alecthomas/participle v0.2.1
|
||||
github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190307165228-86c17b95fcd5
|
||||
github.com/bcicen/jstream v0.0.0-20190220045926-16c1f8af81c2
|
||||
github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932 // indirect
|
||||
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 // indirect
|
||||
github.com/boombuler/barcode v1.0.0 // indirect
|
||||
github.com/briankassouf/jose v0.9.1 // indirect
|
||||
github.com/cenkalti/backoff v2.1.1+incompatible // indirect
|
||||
github.com/centrify/cloud-golang-sdk v0.0.0-20190214225812-119110094d0f // indirect
|
||||
github.com/cheggaaa/pb v0.0.0-20160713104425-73ae1d68fe0b
|
||||
github.com/chrismalek/oktasdk-go v0.0.0-20181212195951-3430665dfaa0 // indirect
|
||||
github.com/containerd/continuity v0.0.0-20181203112020-004b46473808 // indirect
|
||||
github.com/coredns/coredns v0.0.0-20180121192821-d4bf076ccf4e
|
||||
github.com/coreos/etcd v0.0.0-20180703215944-e4425ee79f2f
|
||||
github.com/coreos/go-oidc v2.0.0+incompatible // indirect
|
||||
github.com/dancannon/gorethink v4.0.0+incompatible // indirect
|
||||
github.com/denisenkom/go-mssqldb v0.0.0-20190315220205-a8ed825ac853 // indirect
|
||||
github.com/dgrijalva/jwt-go v0.0.0-20180921172315-3af4c746e1c2
|
||||
github.com/dimchansky/utfbom v1.1.0 // indirect
|
||||
github.com/djherbis/atime v0.0.0-20170215084934-89517e96e10b
|
||||
github.com/dnaeon/go-vcr v1.0.1 // indirect
|
||||
github.com/docker/go-connections v0.4.0 // indirect
|
||||
github.com/docker/go-units v0.3.3 // indirect
|
||||
github.com/duosecurity/duo_api_golang v0.0.0-20190308151101-6c680f768e74 // indirect
|
||||
github.com/dustin/go-humanize v0.0.0-20170228161531-259d2a102b87
|
||||
github.com/eapache/go-resiliency v0.0.0-20160104191539-b86b1ec0dd42 // indirect
|
||||
github.com/eapache/go-xerial-snappy v0.0.0-20160609142408-bb955e01b934 // indirect
|
||||
github.com/eapache/queue v1.1.0 // indirect
|
||||
github.com/eclipse/paho.mqtt.golang v0.0.0-20181129145454-379fd9f99ba5
|
||||
github.com/elazarl/go-bindata-assetfs v0.0.0-20151224045452-57eb5e1fc594
|
||||
github.com/cheggaaa/pb v1.0.28
|
||||
github.com/coredns/coredns v1.4.0
|
||||
github.com/coreos/bbolt v1.3.2 // indirect
|
||||
github.com/coreos/etcd v3.3.12+incompatible
|
||||
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f // indirect
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible
|
||||
github.com/djherbis/atime v1.0.0
|
||||
github.com/dustin/go-humanize v1.0.0
|
||||
github.com/eclipse/paho.mqtt.golang v1.1.2-0.20190322152051-20337d8c3947
|
||||
github.com/elazarl/go-bindata-assetfs v1.0.0
|
||||
github.com/fatih/color v1.7.0
|
||||
github.com/fatih/structs v1.1.0
|
||||
github.com/fullsailor/pkcs7 v0.0.0-20180613152042-8306686428a5 // indirect
|
||||
github.com/gammazero/deque v0.0.0-20190130191400-2afb3858e9c7 // indirect
|
||||
github.com/gammazero/workerpool v0.0.0-20181230203049-86a96b5d5d92 // indirect
|
||||
github.com/garyburd/redigo v1.6.0 // indirect
|
||||
github.com/go-errors/errors v1.0.1 // indirect
|
||||
github.com/go-ldap/ldap v3.0.2+incompatible // indirect
|
||||
github.com/go-sql-driver/mysql v0.0.0-20181218123637-c45f530f8e7f
|
||||
github.com/go-stomp/stomp v2.0.2+incompatible // indirect
|
||||
github.com/go-test/deep v1.0.1 // indirect
|
||||
github.com/gocql/gocql v0.0.0-20190301043612-f6df8288f9b4 // indirect
|
||||
github.com/gogo/protobuf v1.2.1 // indirect
|
||||
github.com/go-ini/ini v1.42.0 // indirect
|
||||
github.com/go-sql-driver/mysql v1.4.1
|
||||
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef // indirect
|
||||
github.com/golang/snappy v0.0.1
|
||||
github.com/gomodule/redigo v0.0.0-20190205135352-43fe51054af5
|
||||
github.com/google/go-github v17.0.0+incompatible // indirect
|
||||
github.com/google/go-querystring v1.0.0 // indirect
|
||||
github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf // indirect
|
||||
github.com/google/martian v2.1.0+incompatible // indirect
|
||||
github.com/gopherjs/gopherjs v0.0.0-20190309154008-847fc94819f9 // indirect
|
||||
github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75 // indirect
|
||||
github.com/gorilla/context v0.0.0-20160525203319-aed02d124ae4 // indirect
|
||||
github.com/gorilla/handlers v0.0.0-20160410185317-66e6c6f01d8d
|
||||
github.com/gorilla/mux v0.0.0-20160605233521-9fa818a44c2b
|
||||
github.com/gorilla/rpc v0.0.0-20160517062331-bd3317b8f670
|
||||
github.com/gotestyourself/gotestyourself v2.2.0+incompatible // indirect
|
||||
github.com/hashicorp/consul v1.4.3 // indirect
|
||||
github.com/hashicorp/go-cleanhttp v0.0.0-20171218145408-d5fe4b57a186 // indirect
|
||||
github.com/hashicorp/go-gcp-common v0.0.0-20180425173946-763e39302965 // indirect
|
||||
github.com/hashicorp/go-hclog v0.8.0 // indirect
|
||||
github.com/hashicorp/go-memdb v0.0.0-20190306140544-eea0b16292ad // indirect
|
||||
github.com/hashicorp/go-plugin v0.0.0-20190220160451-3f118e8ee104 // indirect
|
||||
github.com/hashicorp/go-retryablehttp v0.0.0-20180718195005-e651d75abec6 // indirect
|
||||
github.com/hashicorp/go-rootcerts v0.0.0-20160503143440-6bb64b370b90 // indirect
|
||||
github.com/hashicorp/go-sockaddr v1.0.2 // indirect
|
||||
github.com/hashicorp/go-version v0.0.0-20160214002439-2e7f5ea8e27b
|
||||
github.com/hashicorp/golang-lru v0.5.1 // indirect
|
||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||
github.com/hashicorp/nomad v0.8.7 // indirect
|
||||
github.com/hashicorp/raft v1.0.0 // indirect
|
||||
github.com/hashicorp/serf v0.8.2 // indirect
|
||||
github.com/hashicorp/vault v0.0.0-20181121181053-d4367e581fe1
|
||||
github.com/hashicorp/vault-plugin-auth-alicloud v0.0.0-20190311155555-98628998247d // indirect
|
||||
github.com/hashicorp/vault-plugin-auth-azure v0.0.0-20190201222632-0af1d040b5b3 // indirect
|
||||
github.com/hashicorp/vault-plugin-auth-centrify v0.0.0-20180816201131-66b0a34a58bf // indirect
|
||||
github.com/hashicorp/vault-plugin-auth-gcp v0.0.0-20190201215414-7d4c2101e7d0 // indirect
|
||||
github.com/hashicorp/vault-plugin-auth-jwt v0.0.0-20190314211503-86b44673ce1e // indirect
|
||||
github.com/hashicorp/vault-plugin-auth-kubernetes v0.0.0-20190201222209-db96aa4ab438 // indirect
|
||||
github.com/hashicorp/vault-plugin-secrets-ad v0.0.0-20190131222416-4796d9980125 // indirect
|
||||
github.com/hashicorp/vault-plugin-secrets-alicloud v0.0.0-20190131211812-b0abe36195cb // indirect
|
||||
github.com/hashicorp/vault-plugin-secrets-azure v0.0.0-20181207232500-0087bdef705a // indirect
|
||||
github.com/hashicorp/vault-plugin-secrets-gcp v0.0.0-20190311200649-621231cb86fe // indirect
|
||||
github.com/hashicorp/vault-plugin-secrets-gcpkms v0.0.0-20190116164938-d6b25b0b4a39 // indirect
|
||||
github.com/hashicorp/vault-plugin-secrets-kv v0.0.0-20190315192709-dccffee64925 // indirect
|
||||
github.com/gomodule/redigo v2.0.0+incompatible
|
||||
github.com/gopherjs/gopherjs v0.0.0-20190328170749-bb2674552d8f // indirect
|
||||
github.com/gorilla/handlers v1.4.0
|
||||
github.com/gorilla/mux v1.7.0
|
||||
github.com/gorilla/rpc v1.2.0+incompatible
|
||||
github.com/gorilla/websocket v1.4.0 // indirect
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 // indirect
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.8.5 // indirect
|
||||
github.com/hashicorp/go-cleanhttp v0.5.1 // indirect
|
||||
github.com/hashicorp/go-retryablehttp v0.5.3 // indirect
|
||||
github.com/hashicorp/go-rootcerts v1.0.0 // indirect
|
||||
github.com/hashicorp/vault v1.1.0
|
||||
github.com/howeyc/gopass v0.0.0-20170109162249-bf9dde6d0d2c // indirect
|
||||
github.com/inconshreveable/go-update v0.0.0-20160112193335-8152e7eb6ccf
|
||||
github.com/jeffchao/backoff v0.0.0-20140404060208-9d7fd7aa17f2 // indirect
|
||||
github.com/jefferai/jsonx v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.6 // indirect
|
||||
github.com/keybase/go-crypto v0.0.0-20190312101036-b475f2ecc1fe // indirect
|
||||
github.com/klauspost/compress v1.3.0 // indirect
|
||||
github.com/klauspost/cpuid v0.0.0-20160106104451-349c67577817 // indirect
|
||||
github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6 // indirect
|
||||
github.com/klauspost/pgzip v0.0.0-20180606150939-90b2c57fba35
|
||||
github.com/klauspost/reedsolomon v0.0.0-20190210214925-2b210cf0866d
|
||||
github.com/lib/pq v0.0.0-20181016162627-9eb73efc1fcc
|
||||
github.com/marstr/guid v1.1.0 // indirect
|
||||
github.com/mattbaird/elastigo v0.0.0-20170123220020-2fe47fd29e4b // indirect
|
||||
github.com/mattn/go-isatty v0.0.4
|
||||
github.com/michaelklishin/rabbit-hole v1.5.0 // indirect
|
||||
github.com/miekg/dns v1.0.14
|
||||
github.com/jonboulle/clockwork v0.1.0 // indirect
|
||||
github.com/klauspost/compress v1.4.1 // indirect
|
||||
github.com/klauspost/cpuid v1.2.0 // indirect
|
||||
github.com/klauspost/pgzip v1.2.1
|
||||
github.com/klauspost/reedsolomon v1.9.1
|
||||
github.com/lib/pq v1.0.0
|
||||
github.com/mattn/go-isatty v0.0.7
|
||||
github.com/mattn/go-runewidth v0.0.4 // indirect
|
||||
github.com/miekg/dns v1.1.8
|
||||
github.com/minio/blazer v0.0.0-20171126203752-2081f5bf0465
|
||||
github.com/minio/cli v0.0.0-20170227073228-b8ae5507c0ce
|
||||
github.com/minio/dsync v0.0.0-20190104003057-61c41ffdeea2
|
||||
github.com/minio/highwayhash v0.0.0-20181220011308-93ed73d64169
|
||||
github.com/minio/cli v1.3.0
|
||||
github.com/minio/dsync v0.0.0-20190131060523-fb604afd87b2
|
||||
github.com/minio/highwayhash v0.0.0-20190131021015-02ca4b43caa3
|
||||
github.com/minio/lsync v0.0.0-20190207022115-a4e43e3d0887
|
||||
github.com/minio/mc v0.0.0-20190311071728-2e612b23d665
|
||||
github.com/minio/minio-go v0.0.0-20190227180923-59af836a7e6d
|
||||
github.com/minio/mc v0.0.0-20190401030144-a1355e50e2e8
|
||||
github.com/minio/minio-go v0.0.0-20190327203652-5325257a208f
|
||||
github.com/minio/parquet-go v0.0.0-20190318185229-9d767baf1679
|
||||
github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16
|
||||
github.com/minio/sio v0.0.0-20180327104954-6a41828a60f0
|
||||
github.com/mitchellh/copystructure v1.0.0 // indirect
|
||||
github.com/minio/sha256-simd v0.0.0-20190328051042-05b4dd3047e5
|
||||
github.com/minio/sio v0.0.0-20190118043801-035b4ef8c449
|
||||
github.com/mitchellh/go-homedir v1.1.0
|
||||
github.com/mitchellh/go-testing-interface v1.0.0 // indirect
|
||||
github.com/mitchellh/hashstructure v1.0.0 // indirect
|
||||
github.com/mitchellh/pointerstructure v0.0.0-20170205204203-f2329fcfa9e2 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.1 // indirect
|
||||
github.com/nats-io/gnatsd v1.4.1 // indirect
|
||||
github.com/nats-io/go-nats v0.0.0-20161120202126-6b6bf392d34d // indirect
|
||||
github.com/nats-io/go-nats-streaming v0.0.0-20161216191029-077898146bfb
|
||||
github.com/nats-io/nats v0.0.0-20160916181735-70b70be17b77
|
||||
github.com/nats-io/nats-streaming-server v0.12.2 // indirect
|
||||
github.com/nats-io/nuid v1.0.0 // indirect
|
||||
github.com/nsqio/go-nsq v0.0.0-20181028195256-0527e80f3ba5
|
||||
github.com/onsi/ginkgo v1.8.0 // indirect
|
||||
github.com/onsi/gomega v1.5.0 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0-rc1 // indirect
|
||||
github.com/opencontainers/image-spec v1.0.1 // indirect
|
||||
github.com/opencontainers/runc v0.1.1 // indirect
|
||||
github.com/ory-am/common v0.4.0 // indirect
|
||||
github.com/ory/dockertest v3.3.4+incompatible // indirect
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible // indirect
|
||||
github.com/pborman/uuid v1.2.0 // indirect
|
||||
github.com/pkg/profile v1.2.1
|
||||
github.com/pquerna/cachecontrol v0.0.0-20180517163645-1555304b9b35 // indirect
|
||||
github.com/pquerna/otp v1.1.0 // indirect
|
||||
github.com/prometheus/client_golang v0.9.2
|
||||
github.com/mitchellh/mapstructure v1.1.2 // indirect
|
||||
github.com/nats-io/go-nats v1.7.2 // indirect
|
||||
github.com/nats-io/go-nats-streaming v0.4.2
|
||||
github.com/nats-io/nats v1.7.2
|
||||
github.com/nats-io/nkeys v0.0.2 // indirect
|
||||
github.com/nsqio/go-nsq v1.0.7
|
||||
github.com/pascaldekloe/goe v0.1.0 // indirect
|
||||
github.com/pkg/errors v0.8.1 // indirect
|
||||
github.com/pkg/profile v1.3.0
|
||||
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 // indirect
|
||||
github.com/rjeczalik/notify v0.9.2
|
||||
github.com/rs/cors v0.0.0-20190116175910-76f58f330d76
|
||||
github.com/ryanuber/go-glob v1.0.0 // indirect
|
||||
github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec // indirect
|
||||
github.com/satori/go.uuid v1.2.0 // indirect
|
||||
github.com/segmentio/go-prompt v0.0.0-20161017233205-f0d19b6901ad
|
||||
github.com/skyrings/skyring-common v0.0.0-20160324141443-762fd2bfc12e
|
||||
github.com/smartystreets/goconvey v0.0.0-20190306220146-200a235640ff // indirect
|
||||
github.com/streadway/amqp v0.0.0-20160311215503-2e25825abdbd
|
||||
github.com/tidwall/gjson v1.1.4
|
||||
github.com/rs/cors v1.6.0
|
||||
github.com/segmentio/go-prompt v1.2.1-0.20161017233205-f0d19b6901ad
|
||||
github.com/sirupsen/logrus v1.3.0 // indirect
|
||||
github.com/skyrings/skyring-common v0.0.0-20160929130248-d1c0bb1cbd5e
|
||||
github.com/smartystreets/assertions v0.0.0-20190401200700-3f99fa72afbb // indirect
|
||||
github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a // indirect
|
||||
github.com/soheilhy/cmux v0.1.4 // indirect
|
||||
github.com/streadway/amqp v0.0.0-20190312223743-14f78b41ce6d
|
||||
github.com/tidwall/gjson v1.2.1
|
||||
github.com/tidwall/pretty v0.0.0-20190325153808-1166b9ac2b65 // indirect
|
||||
github.com/tidwall/sjson v1.0.4
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 // indirect
|
||||
github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 // indirect
|
||||
go.etcd.io/bbolt v1.3.2 // indirect
|
||||
go.uber.org/atomic v1.3.2
|
||||
go.uber.org/multierr v1.1.0 // indirect
|
||||
go.uber.org/zap v1.9.1 // indirect
|
||||
golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3 // indirect
|
||||
golang.org/x/net v0.0.0-20190327214358-63eda1eb0650
|
||||
golang.org/x/sys v0.0.0-20190322080309-f49334f85ddc
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c
|
||||
golang.org/x/tools v0.0.0-20190327201419-c70d86f8b7cf // indirect
|
||||
google.golang.org/api v0.0.0-20180916000451-19ff8768a5c0
|
||||
google.golang.org/genproto v0.0.0-20180918203901-c3f76f3b92d1 // indirect
|
||||
gopkg.in/Shopify/sarama.v1 v1.10.1
|
||||
gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d // indirect
|
||||
gopkg.in/gorethink/gorethink.v4 v4.1.0 // indirect
|
||||
gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce // indirect
|
||||
gopkg.in/olivere/elastic.v5 v5.0.31
|
||||
gopkg.in/ory-am/dockertest.v2 v2.2.3 // indirect
|
||||
gopkg.in/square/go-jose.v2 v2.3.0 // indirect
|
||||
golang.org/x/net v0.0.0-20190328230028-74de082e2cca
|
||||
golang.org/x/sys v0.0.0-20190329044733-9eb1bfa1ce65
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4
|
||||
golang.org/x/tools v0.0.0-20190401201229-1bac838f5b88 // indirect
|
||||
google.golang.org/api v0.3.0
|
||||
gopkg.in/Shopify/sarama.v1 v1.20.0
|
||||
gopkg.in/olivere/elastic.v5 v5.0.80
|
||||
gopkg.in/yaml.v2 v2.2.2
|
||||
gotest.tools v2.2.0+incompatible // indirect
|
||||
k8s.io/api v0.0.0-20190313115550-3c12c96769cc // indirect
|
||||
k8s.io/apimachinery v0.0.0-20190313115320-c9defaaddf6f // indirect
|
||||
k8s.io/klog v0.2.0 // indirect
|
||||
layeh.com/radius v0.0.0-20190118135028-0f678f039617 // indirect
|
||||
sigs.k8s.io/yaml v1.1.0 // indirect
|
||||
)
|
||||
|
|
505
go.sum
505
go.sum
|
@ -1,50 +1,52 @@
|
|||
cloud.google.com/go v0.23.0 h1:w1svupRqvZnfjN9+KksMiggoIRQuMzWkVzpxcR96xDs=
|
||||
cloud.google.com/go v0.23.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.26.0 h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ=
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
contrib.go.opencensus.io/exporter/ocagent v0.4.7 h1:7NiGV38nUxXevUGX5rJdG6QBRRXRDRb6ABwdYHjhiXI=
|
||||
cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.37.2 h1:4y4L7BdHenTfZL0HervofNTHh9Ad6mNX72cQvl+5eH0=
|
||||
cloud.google.com/go v0.37.2/go.mod h1:H8IAquKe2L30IxoupDgqTaQvKSwF/c8prYHynGIWQbA=
|
||||
contrib.go.opencensus.io/exporter/ocagent v0.4.7/go.mod h1:+KkYrcvvEN0E5ls626sqMv8PdMx2931feKtzIwP01qI=
|
||||
contrib.go.opencensus.io/exporter/stackdriver v0.0.0-20180919222851-d1e19f5c23e9 h1:qGDL/as+reGch2TnQl053h96loeNRKASdiRqnKQDLko=
|
||||
contrib.go.opencensus.io/exporter/ocagent v0.4.9 h1:8ZbMXpyd04/3LILa/9Tzr8N4HzZNj6Vb2xsaSuR4nQI=
|
||||
contrib.go.opencensus.io/exporter/ocagent v0.4.9/go.mod h1:ueLzZcP7LPhPulEBukGn4aLh7Mx9YJwpVJ9nL2FYltw=
|
||||
contrib.go.opencensus.io/exporter/stackdriver v0.0.0-20180919222851-d1e19f5c23e9/go.mod h1:hNe5qQofPbg6bLQY5wHCvQ7o+2E5P8PkegEuQ+MyRw0=
|
||||
git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
|
||||
git.apache.org/thrift.git v0.12.0 h1:CMxsZlAmxKs+VAZMlDDL0wXciMblJcutQbEe3A9CYUM=
|
||||
git.apache.org/thrift.git v0.12.0/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
|
||||
github.com/Azure/azure-sdk-for-go v26.4.0+incompatible h1:ISw3xYFYPGBmcwP7CQjzQDoYhkywcIVfYzo4CHgQzOw=
|
||||
github.com/Azure/azure-sdk-for-go v26.4.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8=
|
||||
github.com/Azure/azure-sdk-for-go v27.0.0+incompatible h1:JknnG+RYTnwzpi+YuQ04/dAWIssbubSRD8arN78I+Qo=
|
||||
github.com/Azure/azure-sdk-for-go v27.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
|
||||
github.com/Azure/go-autorest v11.5.2+incompatible h1:NTIEargbhAGNWuT7QEXJ2fqLMFvatupHIscb9FYwVOg=
|
||||
github.com/Azure/go-autorest v11.5.2+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
|
||||
github.com/Jeffail/gabs v1.1.1 h1:V0uzR08Hj22EX8+8QMhyI9sX2hwRu+/RJhJUmnwda/E=
|
||||
github.com/Azure/go-autorest v11.7.0+incompatible h1:gzma19dc9ejB75D90E5S+/wXouzpZyA+CV+/MJPSD/k=
|
||||
github.com/Azure/go-autorest v11.7.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/DataDog/zstd v1.3.5 h1:DtpNbljikUepEPD16hD4LvIcmhnhdLTiW/5pHgbmp14=
|
||||
github.com/DataDog/zstd v1.3.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
|
||||
github.com/Jeffail/gabs v1.1.1/go.mod h1:6xMvQMK4k33lb7GUUpaAPh6nKMmemQeg5d4gn7/bOXc=
|
||||
github.com/Microsoft/go-winio v0.4.12 h1:xAfWHN1IrQ0NJ9TBC0KBZoqLjzDTr1ML+4MywiUOryc=
|
||||
github.com/Microsoft/go-winio v0.4.12/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
|
||||
github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I=
|
||||
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw=
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk=
|
||||
github.com/SAP/go-hdb v0.14.0 h1:e6lvHlwRcjjDGeiv8PtKZHadTWsEGVMr9Zqc/iolwgM=
|
||||
github.com/SAP/go-hdb v0.14.0/go.mod h1:7fdQLVC2lER3urZLjZCm0AuMQfApof92n3aylBPEkMo=
|
||||
github.com/SermoDigital/jose v0.9.1 h1:atYaHPD3lPICcbK1owly3aPm0iaJGSGPi0WD4vLznv8=
|
||||
github.com/SermoDigital/jose v0.9.1/go.mod h1:ARgCUhI1MHQH+ONky/PAtmVHQrP5JlGY0F3poXOp/fA=
|
||||
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
|
||||
github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc=
|
||||
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
|
||||
github.com/alecthomas/participle v0.0.0-20190103085315-bf8340a459bd h1:Fx8zE0HVKjLkrRpyds1EK9HJeXWTVlO2dqWC5UNR4Wc=
|
||||
github.com/alecthomas/participle v0.0.0-20190103085315-bf8340a459bd/go.mod h1:SW6HZGeZgSIpcUWX3fXpfZhuaWHnmoD5KCVaqSaNTkk=
|
||||
github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190315122603-6f9e54af456e h1:YzotKd+gUHEK6v3WEVQUT/UbZdqJJ4Nlqtk/xyhd/q4=
|
||||
github.com/alecthomas/participle v0.2.1 h1:4AVLj1viSGa4LG5HDXKXrm5xRx19SB/rS/skPQB1Grw=
|
||||
github.com/alecthomas/participle v0.2.1/go.mod h1:SW6HZGeZgSIpcUWX3fXpfZhuaWHnmoD5KCVaqSaNTkk=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190315122603-6f9e54af456e/go.mod h1:T9M45xf79ahXVelWoOBmH0y4aC1t5kXO5BxwyakgIGA=
|
||||
github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20170925032315-6fe16293d6b7 h1:uHceyjNJ6fFv1Vv2zOrB7Qm8VnVjQ/TuJ8mwV7pHPKk=
|
||||
github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20170925032315-6fe16293d6b7/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8=
|
||||
github.com/araddon/gou v0.0.0-20190110011759-c797efecbb61 h1:Xz25cuW4REGC5W5UtpMU3QItMIImag615HiQcRbxqKQ=
|
||||
github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190307165228-86c17b95fcd5 h1:nWDRPCyCltiTsANwC/n3QZH7Vww33Npq9MKqlwRzI/c=
|
||||
github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190307165228-86c17b95fcd5/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
|
||||
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
||||
github.com/araddon/gou v0.0.0-20190110011759-c797efecbb61/go.mod h1:ikc1XA58M+Rx7SEbf0bLJCfBkwayZ8T5jBo5FXK8Uz8=
|
||||
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
|
||||
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da h1:8GUt8eRujhVEGZFFEjBj46YV4rDjvGrNxb0KMWYkL2I=
|
||||
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
|
||||
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310 h1:BUAU3CGlLvorLI26FmByPp2eC2qla6E1Tw+scpcg/to=
|
||||
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||
github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf h1:eg0MeVzsP1G42dRafH3vf+al2vQIJU0YHX+1Tw87oco=
|
||||
github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
||||
github.com/aws/aws-sdk-go v1.15.31 h1:ExgD8W8QDeD8Y4CPVlcP/laumxvikDbkVWB+VCHgXxA=
|
||||
github.com/aws/aws-sdk-go v1.15.31/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0=
|
||||
github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f h1:ZNv7On9kyUzm7fvRZumSyy/IUiSC7AzL0I1jKKtwooA=
|
||||
github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc=
|
||||
|
@ -53,111 +55,109 @@ github.com/bcicen/jstream v0.0.0-20190220045926-16c1f8af81c2/go.mod h1:RDu/qcrnp
|
|||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||
github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932 h1:mXoPYz/Ul5HYEDvkta6I8/rnYM5gSdSV2tJ6XbZuEtY=
|
||||
github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k=
|
||||
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY=
|
||||
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
|
||||
github.com/boombuler/barcode v1.0.0 h1:s1TvRnXwL2xJRaccrdcBQMZxq6X7DvsMogtmJeHDdrc=
|
||||
github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
|
||||
github.com/briankassouf/jose v0.9.1 h1:HMeR+qpz2PBP7RgR+64nJPA9qcZI74tdkBv5NyD0Mzk=
|
||||
github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g=
|
||||
github.com/briankassouf/jose v0.9.1/go.mod h1:HQhVmdUf7dBNwIIdBTivnCDxcf6IZY3/zrb+uKSJz6Y=
|
||||
github.com/cenkalti/backoff v2.0.0+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
|
||||
github.com/cenkalti/backoff v2.1.1+incompatible h1:tKJnvO2kl0zmb/jA5UKAt4VoEVw1qxKWjE/Bpp46npY=
|
||||
github.com/cenkalti/backoff v2.1.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.0 h1:LzQXZOgg4CQfE6bFvXGM30YZL1WW/M337pXml+GrcZ4=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/centrify/cloud-golang-sdk v0.0.0-20190214225812-119110094d0f h1:gJzxrodnNd/CtPXjO3WYiakyNzHg3rtAi7rO74ejHYU=
|
||||
github.com/centrify/cloud-golang-sdk v0.0.0-20190214225812-119110094d0f/go.mod h1:C0rtzmGXgN78pYR0tGJFhtHgkbAs0lIbHwkB81VxDQE=
|
||||
github.com/cheggaaa/pb v0.0.0-20160713104425-73ae1d68fe0b h1:byvZ+gwmaExJc7n2oYzpN3VCnUDOUwqzo/JrunZmvGk=
|
||||
github.com/cheggaaa/pb v0.0.0-20160713104425-73ae1d68fe0b/go.mod h1:pQciLPpbU0oxA0h+VJYYLxO+XeDQb5pZijXscXHm81s=
|
||||
github.com/chrismalek/oktasdk-go v0.0.0-20181212195951-3430665dfaa0 h1:CWU8piLyqoi9qXEUwzOh5KFKGgmSU5ZhktJyYcq6ryQ=
|
||||
github.com/cheggaaa/pb v1.0.28 h1:kWGpdAcSp3MxMU9CCHOwz/8V0kCHN4+9yQm2MzWuI98=
|
||||
github.com/cheggaaa/pb v1.0.28/go.mod h1:pQciLPpbU0oxA0h+VJYYLxO+XeDQb5pZijXscXHm81s=
|
||||
github.com/chrismalek/oktasdk-go v0.0.0-20181212195951-3430665dfaa0/go.mod h1:5d8DqS60xkj9k3aXfL3+mXBH0DPYO0FQjcKosxl+b/Q=
|
||||
github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/containerd/continuity v0.0.0-20181203112020-004b46473808 h1:4BX8f882bXEDKfWIf0wa8HRvpnBoPszJJXL+TVbBw4M=
|
||||
github.com/containerd/continuity v0.0.0-20181203112020-004b46473808/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
|
||||
github.com/coredns/coredns v0.0.0-20180121192821-d4bf076ccf4e h1:AExHcQe3cJZQk3wgIs9UmW0lR9Bp1hk+X8YK5Bbn6/4=
|
||||
github.com/coredns/coredns v0.0.0-20180121192821-d4bf076ccf4e/go.mod h1:zASH/MVDgR6XZTbxvOnsZfffS+31vg6Ackf/wo1+AM0=
|
||||
github.com/coredns/coredns v1.4.0 h1:RubBkYmkByUqZWWkjRHvNLnUHgkRVqAWgSMmRFvpE1A=
|
||||
github.com/coredns/coredns v1.4.0/go.mod h1:zASH/MVDgR6XZTbxvOnsZfffS+31vg6Ackf/wo1+AM0=
|
||||
github.com/coreos/bbolt v1.3.2 h1:wZwiHHUieZCquLkDL0B8UhzreNWsPHooDAG3q34zk0s=
|
||||
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||
github.com/coreos/etcd v0.0.0-20180622175804-8f6348a97d48/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/etcd v0.0.0-20180703215944-e4425ee79f2f h1:YcMpzEkbh12yE3+PngDUFe69GCbqvNwBiUJJQZV7vGY=
|
||||
github.com/coreos/etcd v0.0.0-20180703215944-e4425ee79f2f/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-oidc v2.0.0+incompatible h1:+RStIopZ8wooMx+Vs5Bt8zMXxV1ABl5LbakNExNmZIg=
|
||||
github.com/coreos/etcd v3.3.12+incompatible h1:pAWNwdf7QiT1zfaWyqCtNZQWCLByQyA3JrSQyuYAqnQ=
|
||||
github.com/coreos/etcd v3.3.12+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-oidc v2.0.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
|
||||
github.com/coreos/go-semver v0.0.0-20180108230905-e214231b295a h1:WqY2Kv7eI1jeoU3pC05YYK/kK4tdXyLzzaBzCR51r9M=
|
||||
github.com/coreos/go-semver v0.0.0-20180108230905-e214231b295a/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-systemd v0.0.0-20181031085051-9002847aa142 h1:3jFq2xL4ZajGK4aZY8jz+DAF0FHjI51BXjjSwCzS1Dk=
|
||||
github.com/coreos/go-semver v0.2.0 h1:3Jm3tLmsgAYcjC+4Up7hJrFBPr+n7rAqYeSw/SZazuY=
|
||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd v0.0.0-20181031085051-9002847aa142/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8=
|
||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg=
|
||||
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/dancannon/gorethink v4.0.0+incompatible h1:KFV7Gha3AuqT+gr0B/eKvGhbjmUv0qGF43aKCIKVE9A=
|
||||
github.com/dancannon/gorethink v4.0.0+incompatible/go.mod h1:BLvkat9KmZc1efyYwhz3WnybhRZtgF1K929FD8z1avU=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/denisenkom/go-mssqldb v0.0.0-20190315220205-a8ed825ac853 h1:tTngnoO/B6HQnJ+pK8tN7kEAhmhIfaJOutqq/A4/JTM=
|
||||
github.com/denisenkom/go-mssqldb v0.0.0-20190315220205-a8ed825ac853/go.mod h1:xN/JuLBIz4bjkxNmByTiV1IbhfnYb6oo99phBn4Eqhc=
|
||||
github.com/dgrijalva/jwt-go v0.0.0-20180921172315-3af4c746e1c2 h1:xhptajUY6xeFJmfsrpVPxXxH0i2JxobVCBDn0iShbkU=
|
||||
github.com/dgrijalva/jwt-go v0.0.0-20180921172315-3af4c746e1c2/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8=
|
||||
github.com/djherbis/atime v0.0.0-20170215084934-89517e96e10b h1:e0AH6GKm2gddyuEilb3+K3LmRUb2dK23JDDrZJ7dmD8=
|
||||
github.com/djherbis/atime v0.0.0-20170215084934-89517e96e10b/go.mod h1:5W+KBIuTwVGcqjIfaTwt+KSYX1o6uep8dtevevQP/f8=
|
||||
github.com/djherbis/atime v1.0.0 h1:ySLvBAM0EvOGaX7TI4dAM5lWj+RdJUCKtGSEHN8SGBg=
|
||||
github.com/djherbis/atime v1.0.0/go.mod h1:5W+KBIuTwVGcqjIfaTwt+KSYX1o6uep8dtevevQP/f8=
|
||||
github.com/dnaeon/go-vcr v1.0.1 h1:r8L/HqC0Hje5AXMu1ooW8oyQyOFv4GxqpL0nRP7SLLY=
|
||||
github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
|
||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
github.com/docker/go-units v0.3.3 h1:Xk8S3Xj5sLGlG5g67hJmYMmUgXv5N4PhkjJHHqrwnTk=
|
||||
github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/duosecurity/duo_api_golang v0.0.0-20190308151101-6c680f768e74 h1:2MIhn2R6oXQbgW5yHfS+d6YqyMfXiu2L55rFZC4UD/M=
|
||||
github.com/duosecurity/duo_api_golang v0.0.0-20190308151101-6c680f768e74/go.mod h1:UqXY1lYT/ERa4OEAywUqdok1T4RCRdArkhic1Opuavo=
|
||||
github.com/dustin/go-humanize v0.0.0-20150809201405-1c212aae1d02/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/dustin/go-humanize v0.0.0-20170228161531-259d2a102b87 h1:uPzP/9GIqYKvZAmz4IayKMMZiWRWNtGynUREBtTXPXA=
|
||||
github.com/dustin/go-humanize v0.0.0-20170228161531-259d2a102b87/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/eapache/go-resiliency v0.0.0-20160104191539-b86b1ec0dd42 h1:f8ERmXYuaC+kCSv2w+y3rBK/oVu6If4DEm3jywJJ0hc=
|
||||
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
|
||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/eapache/go-resiliency v0.0.0-20160104191539-b86b1ec0dd42/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
|
||||
github.com/eapache/go-xerial-snappy v0.0.0-20160609142408-bb955e01b934 h1:oGLoaVIefp3tiOgi7+KInR/nNPvEpPM6GFo+El7fd14=
|
||||
github.com/eapache/go-resiliency v1.1.0 h1:1NtRmCAqadE2FN4ZcN6g90TP3uk8cg9rn9eNK2197aU=
|
||||
github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
|
||||
github.com/eapache/go-xerial-snappy v0.0.0-20160609142408-bb955e01b934/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
|
||||
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw=
|
||||
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
|
||||
github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc=
|
||||
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
|
||||
github.com/eclipse/paho.mqtt.golang v0.0.0-20181129145454-379fd9f99ba5 h1:2AEPnz/cyD83iOHC1HaW06xSW6/0xCpKkuGEBHvWvYs=
|
||||
github.com/eclipse/paho.mqtt.golang v0.0.0-20181129145454-379fd9f99ba5/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts=
|
||||
github.com/elazarl/go-bindata-assetfs v0.0.0-20151224045452-57eb5e1fc594 h1:McZ/pt/pP/XAbLMDQGzm/iQUwW6OXmKVbFtmH9klWmc=
|
||||
github.com/eclipse/paho.mqtt.golang v1.1.1 h1:iPJYXJLaViCshRTW/PSqImSS6HJ2Rf671WR0bXZ2GIU=
|
||||
github.com/eclipse/paho.mqtt.golang v1.1.1/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts=
|
||||
github.com/eclipse/paho.mqtt.golang v1.1.2-0.20190322152051-20337d8c3947 h1:sgqVE7PzNVkD3Fkb2XPXMmZD4+tRRIpMVTkVryEeG6M=
|
||||
github.com/eclipse/paho.mqtt.golang v1.1.2-0.20190322152051-20337d8c3947/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts=
|
||||
github.com/elazarl/go-bindata-assetfs v0.0.0-20151224045452-57eb5e1fc594/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4=
|
||||
github.com/elazarl/go-bindata-assetfs v1.0.0 h1:G/bYguwHIzWq9ZoyUQqrjTmJbbYn3j3CKKpKinvZLFk=
|
||||
github.com/elazarl/go-bindata-assetfs v1.0.0/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4=
|
||||
github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/fatih/structs v0.0.0-20181010231757-878a968ab225/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
|
||||
github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo=
|
||||
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
|
||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
|
||||
github.com/fortytw2/leaktest v1.2.0 h1:cj6GCiwJDH7l3tMHLjZDo0QqPtrXJiWSI9JgpeQKw+Q=
|
||||
github.com/fortytw2/leaktest v1.2.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fullsailor/pkcs7 v0.0.0-20180613152042-8306686428a5 h1:v+vxrd9XS8uWIXG2RK0BHCnXc30qLVQXVqbK+IOmpXk=
|
||||
github.com/fullsailor/pkcs7 v0.0.0-20180613152042-8306686428a5/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA=
|
||||
github.com/gammazero/deque v0.0.0-20190130191400-2afb3858e9c7 h1:D2LrfOPgGHQprIxmsTpxtzhpmF66HoM6rXSmcqaX7h8=
|
||||
github.com/gammazero/deque v0.0.0-20190130191400-2afb3858e9c7/go.mod h1:GeIq9qoE43YdGnDXURnmKTnGg15pQz4mYkXSTChbneI=
|
||||
github.com/gammazero/workerpool v0.0.0-20181230203049-86a96b5d5d92 h1:EipXK6U05IQ2wtuFRn4k3h0+2lXypzItoXGVyf4r9Io=
|
||||
github.com/gammazero/workerpool v0.0.0-20181230203049-86a96b5d5d92/go.mod h1:w9RqFVO2BM3xwWEcAB8Fwp0OviTBBEiRmSBDfbXnd3w=
|
||||
github.com/garyburd/redigo v1.6.0 h1:0VruCpn7yAIIu7pWVClQC8wxCJEcG3nyzpMSHKi1PQc=
|
||||
github.com/garyburd/redigo v1.6.0/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY=
|
||||
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w=
|
||||
github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
|
||||
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
|
||||
github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
|
||||
github.com/go-ini/ini v1.42.0 h1:TWr1wGj35+UiWHlBA8er89seFXxzwFn11spilrrj+38=
|
||||
github.com/go-ini/ini v1.42.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
|
||||
github.com/go-ldap/ldap v3.0.2+incompatible h1:kD5HQcAzlQ7yrhfn+h+MSABeAy/jAJhvIJ/QDllP44g=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc=
|
||||
github.com/go-sql-driver/mysql v0.0.0-20181218123637-c45f530f8e7f h1:fbIzwEaXt5b2bl9mm+PIufKTSGKk6ZuwSSTQ7iZj7Lo=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-sql-driver/mysql v0.0.0-20181218123637-c45f530f8e7f/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/go-stomp/stomp v2.0.2+incompatible h1:0yPknMJh32lE2xiCFGW5t/KgamhUC4OgCv10wIjx5aw=
|
||||
github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA=
|
||||
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/go-stomp/stomp v2.0.2+incompatible/go.mod h1:VqCtqNZv1226A1/79yh+rMiFUcfY3R109np+7ke4n0c=
|
||||
github.com/go-test/deep v1.0.1 h1:UQhStjbkDClarlmv0am7OXXO4/GaPdCGiUiMTvi28sg=
|
||||
github.com/go-test/deep v1.0.1/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
|
||||
github.com/gocql/gocql v0.0.0-20190301043612-f6df8288f9b4 h1:vF83LI8tAakwEwvWZtrIEx7pOySacl2TOxx6eXk4ePo=
|
||||
github.com/gocql/gocql v0.0.0-20190301043612-f6df8288f9b4/go.mod h1:4Fw1eo5iaEhDUs8XyuhSVCVy52Jq3L+/3GJgYkwc+/0=
|
||||
github.com/gogo/protobuf v1.2.0 h1:xU6/SpYbvkNYiptHJYEDRseDLvYE7wSqhYYNy0QSUzI=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE=
|
||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||
|
@ -167,84 +167,91 @@ github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef h1:veQD95Isof8w9
|
|||
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.0 h1:kbxbvI4Un1LUWKxufD+BiE6AEExYYgkQLQmLFqA1LFk=
|
||||
github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0=
|
||||
github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
|
||||
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/gomodule/redigo v0.0.0-20190205135352-43fe51054af5 h1:HVDzcZHqJUlNfCqPdTMidgAPVeDZR32Vuam64ikElJA=
|
||||
github.com/gomodule/redigo v0.0.0-20190205135352-43fe51054af5/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4=
|
||||
github.com/gomodule/redigo v2.0.0+incompatible h1:K/R+8tc58AaqLkqG2Ol3Qk+DR/TlNuhuh457pBFPtt0=
|
||||
github.com/gomodule/redigo v2.0.0+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c h1:964Od4U6p2jUkFxvCydnIczKteheJEzHRToSGK3Bnlw=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-github v17.0.0+incompatible h1:N0LgJ1j65A7kfXrZnUDaYCs/Sf4rEjNlfyDHW9dolSY=
|
||||
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
|
||||
github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=
|
||||
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
|
||||
github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf h1:+RRA9JqSOZFfKrOeqr2z77+8R2RKyh8PG66dcu1V0ck=
|
||||
github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
|
||||
github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/uuid v1.0.0 h1:b4Gk+7WdP/d3HZH8EJsZpvV7EtDOgaZLtnaNGIu1adA=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gax-go v2.0.0+incompatible h1:j0GKcs05QVmm7yesiZq2+9cxHkNK9YM6zKx4D2qucQU=
|
||||
github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4 h1:hU4mGcQI4DaAYW+IbTun+2qEZVFxK0ySjQLTbS0VQKc=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20190309154008-847fc94819f9 h1:Z0f701LpR4dqO92bP6TnIe3ZURClzJtBhds8R8u1HBE=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20190309154008-847fc94819f9/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75 h1:f0n1xnMSmBLzVfsMMvriDyA75NB/oBgILX2GcHXIQzY=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20190328170749-bb2674552d8f/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75/go.mod h1:g2644b03hfBX9Ov0ZBDgXXens4rxSxmqFBbhvKv2yVA=
|
||||
github.com/gorilla/context v0.0.0-20160525203319-aed02d124ae4 h1:3nOfQt8sRPYbXORD5tJ8YyQ3HlL2Jt3LJ2U17CbNh6I=
|
||||
github.com/gorilla/context v0.0.0-20160525203319-aed02d124ae4/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
|
||||
github.com/gorilla/handlers v0.0.0-20160410185317-66e6c6f01d8d h1:wroUBCGyWwKzb/MvvcfuO91P7c0cR8oNoFVvNd3B2kU=
|
||||
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
|
||||
github.com/gorilla/handlers v0.0.0-20160410185317-66e6c6f01d8d/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
|
||||
github.com/gorilla/mux v0.0.0-20160605233521-9fa818a44c2b h1:OFvZV3a+25cGJH9dETHw0nk0wV6hLZI7IJijOkXEFS0=
|
||||
github.com/gorilla/handlers v1.4.0 h1:XulKRWSQK5uChr4pEgSE4Tc/OcmnU9GJuSwdog/tZsA=
|
||||
github.com/gorilla/handlers v1.4.0/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
|
||||
github.com/gorilla/mux v0.0.0-20160605233521-9fa818a44c2b/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/gorilla/rpc v0.0.0-20160517062331-bd3317b8f670 h1:YzxOFT0J9DX8G4WfjA0j5A9Pih7ED5ztSE/zU/TnBK8=
|
||||
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/gorilla/mux v1.7.0 h1:tOSd0UKHQd6urX6ApfOn4XdBMY6Sh1MfxV3kmaazO+U=
|
||||
github.com/gorilla/mux v1.7.0/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/gorilla/rpc v0.0.0-20160517062331-bd3317b8f670/go.mod h1:V4h9r+4sF5HnzqbwIez0fKSpANP0zlYd3qR7p36jkTQ=
|
||||
github.com/gorilla/rpc v1.2.0+incompatible h1:V3Dz9mWwCvHKm0N+mVM2A/hShV+hLUMUdzoyHQjr1NA=
|
||||
github.com/gorilla/rpc v1.2.0+incompatible/go.mod h1:V4h9r+4sF5HnzqbwIez0fKSpANP0zlYd3qR7p36jkTQ=
|
||||
github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q=
|
||||
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/gotestyourself/gotestyourself v2.2.0+incompatible h1:AQwinXlbQR2HvPjQZOmDhRqsv5mZf+Jb1RnSLxcqZcI=
|
||||
github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY=
|
||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 h1:Iju5GlWwrvL6UBg4zJJt3btmonfrMlCDdsejg4CZE7c=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.7.0 h1:tPFY/SM+d656aSgLWO2Eckc3ExwpwwybwdN5Ph20h1A=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.6.2/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.7.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
|
||||
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.8.5 h1:2+KSC78XiO6Qy0hIjfc1OD9H+hsaJdJlb8Kqsd41CTE=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4=
|
||||
github.com/hashicorp/consul v1.4.3 h1:mNHondWmijBVwPKK94hlkUOgIwXiMQLrnBMsQdKrhDE=
|
||||
github.com/hashicorp/consul v1.4.3/go.mod h1:mFrjN1mfidgJfYP1xrJCF+AfRhr6Eaqhb2+sfyn/OOI=
|
||||
github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
|
||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/go-cleanhttp v0.0.0-20171218145408-d5fe4b57a186 h1:URgjUo+bs1KwatoNbwG0uCO4dHN4r1jsp4a5AGgHRjo=
|
||||
github.com/hashicorp/go-cleanhttp v0.0.0-20171218145408-d5fe4b57a186/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
||||
github.com/hashicorp/go-gcp-common v0.0.0-20180425173946-763e39302965 h1:CRWAJ7atEOq9OfAa5rBKvL4wFNbUrPil8hN1knLuEG0=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
||||
github.com/hashicorp/go-gcp-common v0.0.0-20180425173946-763e39302965/go.mod h1:LNbios2fdMAuLA1dsYUvUcoCYIfywcCEK8/ooaWjoOA=
|
||||
github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI=
|
||||
github.com/hashicorp/go-hclog v0.8.0 h1:z3ollgGRg8RjfJH6UVBaG54R70GFd++QOkvnJH3VSBY=
|
||||
github.com/hashicorp/go-hclog v0.8.0/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
|
||||
github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0=
|
||||
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||
github.com/hashicorp/go-memdb v0.0.0-20190306140544-eea0b16292ad h1:Z8ldPkr3NLLacRTNmpAug4Uz09349gcnOB0b7PDD7/0=
|
||||
github.com/hashicorp/go-memdb v0.0.0-20190306140544-eea0b16292ad/go.mod h1:kbfItVoBJwCfKXDXN4YoAXjxcFVZ7MRrJzyTX6H4giE=
|
||||
github.com/hashicorp/go-msgpack v0.5.3 h1:zKjpN5BK/P5lMYrLmBHdBULWbJ0XpYR+7NGzqkZzoD4=
|
||||
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
|
||||
github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o=
|
||||
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
|
||||
github.com/hashicorp/go-plugin v0.0.0-20190220160451-3f118e8ee104 h1:9iQ/zrTOJqzP+kH37s6xNb6T1RysiT7fnDD3DJbspVw=
|
||||
github.com/hashicorp/go-plugin v0.0.0-20190220160451-3f118e8ee104/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY=
|
||||
github.com/hashicorp/go-retryablehttp v0.0.0-20180718195005-e651d75abec6 h1:qCv4319q2q7XKn0MQbi8p37hsJ+9Xo8e6yojA73JVxk=
|
||||
github.com/hashicorp/go-retryablehttp v0.0.0-20180718195005-e651d75abec6/go.mod h1:fXcdFsQoipQa7mwORhKad5jmDCeSy/RCGzWA08PO0lM=
|
||||
github.com/hashicorp/go-rootcerts v0.0.0-20160503143440-6bb64b370b90 h1:VBj0QYQ0u2MCJzBfeYXGexnAl17GsH1yidnoxCqqD9E=
|
||||
github.com/hashicorp/go-retryablehttp v0.5.3 h1:QlWt0KvWT0lq8MFppF9tsJGF+ynG7ztc2KIPhzRGk7s=
|
||||
github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
|
||||
github.com/hashicorp/go-rootcerts v0.0.0-20160503143440-6bb64b370b90/go.mod h1:o4zcYY1e0GEZI6eSEr+43QDYmuGglw1qSO6qdHUHCgg=
|
||||
github.com/hashicorp/go-rootcerts v1.0.0 h1:Rqb66Oo1X/eSV1x66xbDccZjhJigjg0+e82kpwzSwCI=
|
||||
github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
|
||||
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
|
||||
github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc=
|
||||
github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A=
|
||||
|
@ -252,8 +259,8 @@ github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdv
|
|||
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE=
|
||||
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/go-version v0.0.0-20160214002439-2e7f5ea8e27b h1:j5hhoMJXMZPck42/fNTe6EWsKmkgr8bZyCIGZKUoW2o=
|
||||
github.com/hashicorp/go-version v0.0.0-20160214002439-2e7f5ea8e27b/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
||||
github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
||||
github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
|
||||
|
@ -262,420 +269,482 @@ github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
|
|||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
|
||||
github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
|
||||
github.com/hashicorp/memberlist v0.1.3 h1:EmmoJme1matNzb+hMpDuR/0sbJSUisxyqBGG676r31M=
|
||||
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
|
||||
github.com/hashicorp/nomad v0.8.7 h1:jOrmJdAoWcyhKgoG4OxHQhG5SU6RniXFjfwKg6a492U=
|
||||
github.com/hashicorp/nomad v0.8.7/go.mod h1:WRaKjdO1G2iqi86TvTjIYtKTyxg4pl7NLr9InxtWaI0=
|
||||
github.com/hashicorp/raft v1.0.0 h1:htBVktAOtGs4Le5Z7K8SF5H2+oWsQFYVmOgH5loro7Y=
|
||||
github.com/hashicorp/raft v1.0.0/go.mod h1:DVSAWItjLjTOkVbSpWQ0j0kUADIvDaCtBxIcbNAQLkI=
|
||||
github.com/hashicorp/serf v0.8.2 h1:YZ7UKsJv+hKjqGVUUbtE3HNj79Eln2oQ75tniF6iPt0=
|
||||
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
|
||||
github.com/hashicorp/vault v0.0.0-20181121181053-d4367e581fe1 h1:Z9HQ6wYfPZvwDMPFBMxnAO7SI80iGMq+Hhd2+dOsp84=
|
||||
github.com/hashicorp/vault v0.0.0-20181121181053-d4367e581fe1/go.mod h1:KfSyffbKxoVyspOdlaGVjIuwLobi07qD1bAbosPMpP0=
|
||||
github.com/hashicorp/vault-plugin-auth-alicloud v0.0.0-20190311155555-98628998247d h1:c6V5Sn72jhmkJlEsfGUhfcWtRYml/iIxx5FI3Q2NvhE=
|
||||
github.com/hashicorp/vault v1.1.0 h1:v79NUgO5xCZnXVzUkIqFOXtP8YhpnHAi1fk3eo9cuOE=
|
||||
github.com/hashicorp/vault v1.1.0/go.mod h1:KfSyffbKxoVyspOdlaGVjIuwLobi07qD1bAbosPMpP0=
|
||||
github.com/hashicorp/vault-plugin-auth-alicloud v0.0.0-20190311155555-98628998247d/go.mod h1:o3i5QQWgV5+SYouIn++L9D0kbhLYB3FjxNRHNf6KS+Q=
|
||||
github.com/hashicorp/vault-plugin-auth-azure v0.0.0-20190201222632-0af1d040b5b3 h1:A8VkhfjR3B7PkwQCwIj+0S76xBQMpetKPq12Ve7y6KQ=
|
||||
github.com/hashicorp/vault-plugin-auth-azure v0.0.0-20190201222632-0af1d040b5b3/go.mod h1:f+VmjSQIxxO+YTeO3FbPWRPCPbd3f3lwpP6jaO/YduQ=
|
||||
github.com/hashicorp/vault-plugin-auth-centrify v0.0.0-20180816201131-66b0a34a58bf h1:AizyQbOM+h/ZjV0ao65aJ/kQpOxlOKgZORLP/XnJlTA=
|
||||
github.com/hashicorp/vault-plugin-auth-centrify v0.0.0-20180816201131-66b0a34a58bf/go.mod h1:IIz+CMBKBEFyjeBeFUlpoUuMOyFb7mybOUNP6GX1xuk=
|
||||
github.com/hashicorp/vault-plugin-auth-gcp v0.0.0-20190201215414-7d4c2101e7d0 h1:xP4Iiq/X4is8ZCVM/A5rrXjHMmEzqZ9+0Smj0Ft8KpQ=
|
||||
github.com/hashicorp/vault-plugin-auth-gcp v0.0.0-20190201215414-7d4c2101e7d0/go.mod h1:E/E+5CuQCjOn/YGCmZ/tA7GwLey/lN1PwwJOOa9Iqy0=
|
||||
github.com/hashicorp/vault-plugin-auth-jwt v0.0.0-20190314211503-86b44673ce1e h1:cezAuunlgcIDrKL9L0I72CTL2jctqAIkUzo4oqHJb+0=
|
||||
github.com/hashicorp/vault-plugin-auth-jwt v0.0.0-20190314211503-86b44673ce1e/go.mod h1:j6Xmkj3dzuC63mivquwVVTlxjwDndwNxi4cJUku40J8=
|
||||
github.com/hashicorp/vault-plugin-auth-kubernetes v0.0.0-20190201222209-db96aa4ab438 h1:ylEjxR+l2UwEPNiSNYnrpbnqYkmyhGYs3gutLLopbgc=
|
||||
github.com/hashicorp/vault-plugin-auth-kubernetes v0.0.0-20190201222209-db96aa4ab438/go.mod h1:PqRUU5TaQ6FwVTsHPLrJs1F+T5IjbSzlfTy9cTyGeHM=
|
||||
github.com/hashicorp/vault-plugin-secrets-ad v0.0.0-20190131222416-4796d9980125 h1:EWUv105dSegjxZacnQ4+DKXHaVnECielqHtw5eOhGNs=
|
||||
github.com/hashicorp/vault-plugin-secrets-ad v0.0.0-20190131222416-4796d9980125/go.mod h1:4vRQzvp3JI+g4oUqzcklIEj2UKyhQnpIo+BDbh2uzYM=
|
||||
github.com/hashicorp/vault-plugin-secrets-alicloud v0.0.0-20190131211812-b0abe36195cb h1:mWY3OMYmQqxlbGiu/ffG7UGua3eWUqe2LjyiajyNYLE=
|
||||
github.com/hashicorp/vault-plugin-secrets-alicloud v0.0.0-20190131211812-b0abe36195cb/go.mod h1:rl8WzY7++fZMLXd6Z/k/o9wUmMbOqpTLhbtKs1loMU0=
|
||||
github.com/hashicorp/vault-plugin-secrets-azure v0.0.0-20181207232500-0087bdef705a h1:1sAzFWTeuHSBT+IzQmmWCw2oN9ZBNyPGbG2y30TYjK8=
|
||||
github.com/hashicorp/vault-plugin-secrets-azure v0.0.0-20181207232500-0087bdef705a/go.mod h1:/DhLpYuRP2o00gkj6S0Gy7NvKk5AaAtP6p3f+OmxDUI=
|
||||
github.com/hashicorp/vault-plugin-secrets-gcp v0.0.0-20190311200649-621231cb86fe h1:MheAGVIKlMmR4m5rslSCeylNszrOqY/bRhWXI3oUdgU=
|
||||
github.com/hashicorp/vault-plugin-secrets-gcp v0.0.0-20190311200649-621231cb86fe/go.mod h1:IV2OZZZ9FCtSYeKDLsnO5JipMdjwachVISz9pNuQjhs=
|
||||
github.com/hashicorp/vault-plugin-secrets-gcpkms v0.0.0-20190116164938-d6b25b0b4a39 h1:Tg7YyHyK9P/Z/2aA3NTngjLhZN/1CoeWtbUutoX5A6E=
|
||||
github.com/hashicorp/vault-plugin-secrets-gcpkms v0.0.0-20190116164938-d6b25b0b4a39/go.mod h1:2n62quNV4DvfMY5Lxx82NJmx+9pYtv4RltLIFKxEO4E=
|
||||
github.com/hashicorp/vault-plugin-secrets-kv v0.0.0-20190315192709-dccffee64925 h1:QpryTbZYYtc5zE5qZghxzUsBLmT4YtW3LBZMWRmFxXY=
|
||||
github.com/hashicorp/vault-plugin-secrets-kv v0.0.0-20190315192709-dccffee64925/go.mod h1:VJHHT2SC1tAPrfENQeBhLlb5FbZoKZM+oC/ROmEftz0=
|
||||
github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M=
|
||||
github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM=
|
||||
github.com/howeyc/gopass v0.0.0-20170109162249-bf9dde6d0d2c h1:kQWxfPIHVLbgLzphqk3QUflDy9QdksZR4ygR807bpy0=
|
||||
github.com/howeyc/gopass v0.0.0-20170109162249-bf9dde6d0d2c/go.mod h1:lADxMC39cJJqL93Duh1xhAs4I2Zs8mKS89XWXFGp9cs=
|
||||
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/inconshreveable/go-update v0.0.0-20160112193335-8152e7eb6ccf h1:WfD7VjIE6z8dIvMsI4/s+1qr5EL+zoIGev1BQj1eoJ8=
|
||||
github.com/inconshreveable/go-update v0.0.0-20160112193335-8152e7eb6ccf/go.mod h1:hyb9oH7vZsitZCiBt0ZvifOrB+qc8PS5IiilCIb87rg=
|
||||
github.com/jeffchao/backoff v0.0.0-20140404060208-9d7fd7aa17f2 h1:mex1izRBCD+7WjieGgRdy7e651vD/lvB1bD9vNE/3K4=
|
||||
github.com/jeffchao/backoff v0.0.0-20140404060208-9d7fd7aa17f2/go.mod h1:xkfESuHriIekR+4RoV+fu91j/CfnYM29Zi2tMFw5iD4=
|
||||
github.com/jefferai/jsonx v1.0.0 h1:Xoz0ZbmkpBvED5W9W1B5B/zc3Oiq7oXqiW7iRV3B6EI=
|
||||
github.com/jefferai/jsonx v1.0.0/go.mod h1:OGmqmi2tTeI/PS+qQfBDToLHHJIy/RMp24fPo8vFvoQ=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8 h1:12VvqtR6Aowv3l/EQUlocDHW2Cp4G9WJVH7uyH8QFJE=
|
||||
github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo=
|
||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||
github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/keybase/go-crypto v0.0.0-20190312101036-b475f2ecc1fe h1:1uflwka8xAWVB2vfZgn+EzemfgGi7WA/XHDnECEeSCM=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/keybase/go-crypto v0.0.0-20190312101036-b475f2ecc1fe/go.mod h1:ghbZscTyKdM07+Fw3KSi0hcJm+AlEUWj8QLlPtijN/M=
|
||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.3.0 h1:kKeUSEWOEaY7o6tEo81HGkvA90Q+qCd1WIkOjr7HgvQ=
|
||||
github.com/klauspost/compress v1.3.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/cpuid v0.0.0-20160106104451-349c67577817 h1:/7pPahIC+GoCm/euDCi2Pm29bAj9tc6TcK4Zcc8D3WI=
|
||||
github.com/klauspost/compress v1.4.1 h1:8VMb5+0wMgdBykOV96DwNwKFQ+WTI4pzYURP99CcB9E=
|
||||
github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/cpuid v0.0.0-20160106104451-349c67577817/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||
github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6 h1:KAZ1BW2TCmT6PRihDPpocIy1QTtsAsrx6TneU/4+CMg=
|
||||
github.com/klauspost/cpuid v1.2.0 h1:NMpwD2G9JSFOE1/TJjGSo5zG7Yb2bTe7eq1jH+irmeE=
|
||||
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||
github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg=
|
||||
github.com/klauspost/pgzip v0.0.0-20180606150939-90b2c57fba35 h1:V1xjy9E/9RjLJs+cSLIY0LxUtqc2q1Ufq5O7MhOqfz8=
|
||||
github.com/klauspost/pgzip v0.0.0-20180606150939-90b2c57fba35/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
||||
github.com/klauspost/reedsolomon v0.0.0-20190210214925-2b210cf0866d h1:alKbTsh5QbtSTSX09Xoi13lfftojLoznxBowyexhpkQ=
|
||||
github.com/klauspost/pgzip v1.2.1 h1:oIPZROsWuPHpOdMVWLuJZXwgjhrW8r1yEX8UqMyeNHM=
|
||||
github.com/klauspost/pgzip v1.2.1/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
||||
github.com/klauspost/reedsolomon v0.0.0-20190210214925-2b210cf0866d/go.mod h1:CwCi+NUr9pqSVktrkN+Ondf06rkhYZ/pcNv7fu+8Un4=
|
||||
github.com/klauspost/reedsolomon v1.9.1 h1:kYrT1MlR4JH6PqOpC+okdb9CDTcwEC/BqpzK4WFyXL8=
|
||||
github.com/klauspost/reedsolomon v1.9.1/go.mod h1:CwCi+NUr9pqSVktrkN+Ondf06rkhYZ/pcNv7fu+8Un4=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/lib/pq v0.0.0-20181016162627-9eb73efc1fcc h1:0pifi8wVV/YuUKBDmlH3koJgRVnUJ2RiJQ8ly/1/aJ8=
|
||||
github.com/lib/pq v0.0.0-20181016162627-9eb73efc1fcc/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.0.0 h1:X5PMW56eZitiTeO7tKzZxFCSpbFZJtkMMooicw2us9A=
|
||||
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/mailru/easyjson v0.0.0-20180730094502-03f2033d19d5 h1:0x4qcEHDpruK6ML/m/YSlFUUu0UpRD3I2PHsNCuGnyA=
|
||||
github.com/mailru/easyjson v0.0.0-20180730094502-03f2033d19d5/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/marstr/guid v1.1.0 h1:/M4H/1G4avsieL6BbUwCOBzulmoeKVP5ux/3mQNnbyI=
|
||||
github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho=
|
||||
github.com/mattbaird/elastigo v0.0.0-20170123220020-2fe47fd29e4b h1:v29yPGHhOqw7VHEnTeQFAth3SsBrmwc8JfuhNY0G34k=
|
||||
github.com/mattbaird/elastigo v0.0.0-20170123220020-2fe47fd29e4b/go.mod h1:5MWrJXKRQyhQdUCF+vu6U5c4nQpg70vW3eHaU0/AYbU=
|
||||
github.com/mattn/go-colorable v0.0.0-20160210001857-9fdad7c47650/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
github.com/mattn/go-colorable v0.0.9 h1:UVL0vNpWh04HeJXV0KLcaT7r06gOH2l4OW6ddYRUIY4=
|
||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
github.com/mattn/go-isatty v0.0.3 h1:ns/ykhmWi7G9O+8a448SecJU3nSMBXJfqQkl0upE1jI=
|
||||
github.com/mattn/go-colorable v0.1.1 h1:G1f5SKeVxmagw/IyvzvtZE4Gybcc4Tr1tf7I8z0XgOg=
|
||||
github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
|
||||
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-isatty v0.0.4 h1:bnP0vzxcAdeI1zdubAl5PjU6zsERjGZb7raWodagDYs=
|
||||
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.7 h1:UvyT9uN+3r7yLEYSlJsbQGdsaB/a0DlgWP3pql6iwOc=
|
||||
github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/p7Y=
|
||||
github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/michaelklishin/rabbit-hole v1.5.0 h1:Bex27BiFDsijCM9D0ezSHqyy0kehpYHuNKaPqq/a4RM=
|
||||
github.com/michaelklishin/rabbit-hole v1.5.0/go.mod h1:vvI1uOitYZi0O5HEGXhaWC1XT80Gy+HvFheJ+5Krlhk=
|
||||
github.com/miekg/dns v1.0.14 h1:9jZdLNd/P4+SfEJ0TNyxYpsK8N4GtfylBLqtbYN1sbA=
|
||||
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
||||
github.com/miekg/dns v1.1.8 h1:1QYRAKU3lN5cRfLCkPU08hwvLJFhvjP6MqNMmQz6ZVI=
|
||||
github.com/miekg/dns v1.1.8/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
||||
github.com/minio/blazer v0.0.0-20171126203752-2081f5bf0465 h1:rV63TOug3ph7F/cpkDSXdZNxyH3LCta+Lk5A/IYkt/0=
|
||||
github.com/minio/blazer v0.0.0-20171126203752-2081f5bf0465/go.mod h1:ChYiRE5crAmaOo0dQth8FJCtLTiPH9QEMVlDcMp+938=
|
||||
github.com/minio/cli v0.0.0-20170227073228-b8ae5507c0ce h1:cMutC6C4ElU0TVblNfXx6us/E5J54zgxMCoELhQ+Dj4=
|
||||
github.com/minio/cli v0.0.0-20170227073228-b8ae5507c0ce/go.mod h1:hLsWNQy2wIf3FKFnMlH69f4RdEyn8nbRA2shaulTjGY=
|
||||
github.com/minio/dsync v0.0.0-20190104003057-61c41ffdeea2 h1:d244DGqthdKVVB7fKUN/oVK0w1GEP3Da5YUAJKZivU4=
|
||||
github.com/minio/cli v1.3.0 h1:vB0iUpmyaH54+1jJJj62Aa0qFF3xO3i0J3IcKiM6bHM=
|
||||
github.com/minio/cli v1.3.0/go.mod h1:hLsWNQy2wIf3FKFnMlH69f4RdEyn8nbRA2shaulTjGY=
|
||||
github.com/minio/dsync v0.0.0-20190104003057-61c41ffdeea2/go.mod h1:eLQe3mXL0h02kNpPtBJiLr1fIEIJftgXRAjncjQbxJo=
|
||||
github.com/minio/highwayhash v0.0.0-20181220011308-93ed73d64169 h1:ysJt+W4oIB0YVMckoJt6SjJN/eF57XQdAlWZw786smU=
|
||||
github.com/minio/dsync v0.0.0-20190131060523-fb604afd87b2 h1:5Aq4Aro/PSNVgoWWTLPX+zcfDY87VhtKPv+7x1ERJ1w=
|
||||
github.com/minio/dsync v0.0.0-20190131060523-fb604afd87b2/go.mod h1:eLQe3mXL0h02kNpPtBJiLr1fIEIJftgXRAjncjQbxJo=
|
||||
github.com/minio/highwayhash v0.0.0-20181220011308-93ed73d64169/go.mod h1:NL8wme5P5MoscwAkXfGroz3VgpCdhBw3KYOu5mEsvpU=
|
||||
github.com/minio/highwayhash v0.0.0-20190131021015-02ca4b43caa3 h1:qt0ysgOZnWt6g6WA/gDpLfl+iV77hEYWLu2dleBw3i4=
|
||||
github.com/minio/highwayhash v0.0.0-20190131021015-02ca4b43caa3/go.mod h1:xQboMTeM9nY9v/LlAOxFctujiv5+Aq2hR5dxBpaMbdc=
|
||||
github.com/minio/lsync v0.0.0-20190207022115-a4e43e3d0887 h1:MIpCDz3d2FR2a+FjdizuFdjsoeHuLlSkl3YNQJ55jV8=
|
||||
github.com/minio/lsync v0.0.0-20190207022115-a4e43e3d0887/go.mod h1:ni10+iSX7FO8N2rv41XM444V6w4rYO0dZo5KIkbn/YA=
|
||||
github.com/minio/mc v0.0.0-20190311071728-2e612b23d665 h1:fL4R87wJA+Y0RjGMD8gUnI0AkBc5q1+APDDVx3Loz6g=
|
||||
github.com/minio/mc v0.0.0-20190311071728-2e612b23d665/go.mod h1:7qLZXNjCD55DJ3iqe1uWoUh1MASRVd1M6wnqSdyhx7Y=
|
||||
github.com/minio/mc v0.0.0-20190401030144-a1355e50e2e8 h1:npKHywsxVECDu+YHfR8Sswm3giEdRbCcLRSYudE3UxQ=
|
||||
github.com/minio/mc v0.0.0-20190401030144-a1355e50e2e8/go.mod h1:rnJByweU1h98rGmAcWcKen1sCAlekF38kbSrq6OLmAg=
|
||||
github.com/minio/minio v0.0.0-20190206103305-fd4e15c11641/go.mod h1:lXcp05uxYaW99ebgI6ZKIGYU7tqZkM5xSsG0xRt4VIU=
|
||||
github.com/minio/minio-go v0.0.0-20190227180923-59af836a7e6d h1:gptD0/Hnam7h4Iq9D/33fscRpHfzOOUqUbH2nPw9HcU=
|
||||
github.com/minio/minio v0.0.0-20190325204105-0250f7de678b/go.mod h1:6ODmvb06uOpNy0IM+3pJRTHaauOMpLJ51jLhipbcifI=
|
||||
github.com/minio/minio-go v0.0.0-20190227180923-59af836a7e6d/go.mod h1:/haSOWG8hQNx2+JOfLJ9GKp61EAmgPwRVw/Sac0NzaM=
|
||||
github.com/minio/parquet-go v0.0.0-20190304214338-8747cd6359d3 h1:jICxM8jOUfk4GRAgy5x1KAkTBdaQuZSc5H67nB5BMEw=
|
||||
github.com/minio/parquet-go v0.0.0-20190304214338-8747cd6359d3/go.mod h1:J+goXSuzlte5imWMqb6cUWC/tbYYysUHctwmKXomYzM=
|
||||
github.com/minio/minio-go v0.0.0-20190313212832-5d20267d970d/go.mod h1:/haSOWG8hQNx2+JOfLJ9GKp61EAmgPwRVw/Sac0NzaM=
|
||||
github.com/minio/minio-go v0.0.0-20190327203652-5325257a208f h1:u+iNxfkLrfyWp7KxSTV+ZhO4SMHT6qUFxSZ6yhYMQ0Q=
|
||||
github.com/minio/minio-go v0.0.0-20190327203652-5325257a208f/go.mod h1:/haSOWG8hQNx2+JOfLJ9GKp61EAmgPwRVw/Sac0NzaM=
|
||||
github.com/minio/minio-go v6.0.14+incompatible h1:fnV+GD28LeqdN6vT2XdGKW8Qe/IfjJDswNVuni6km9o=
|
||||
github.com/minio/minio-go v6.0.14+incompatible/go.mod h1:7guKYtitv8dktvNUGrhzmNlA5wrAABTQXCoesZdFQO8=
|
||||
github.com/minio/parquet-go v0.0.0-20190318185229-9d767baf1679 h1:OMKaN/82sBHUZPvjYNBFituHExa1OGY63eACDGtetKs=
|
||||
github.com/minio/parquet-go v0.0.0-20190318185229-9d767baf1679/go.mod h1:J+goXSuzlte5imWMqb6cUWC/tbYYysUHctwmKXomYzM=
|
||||
github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16 h1:5W7KhL8HVF3XCFOweFD3BNESdnO8ewyYTFT2R+/b8FQ=
|
||||
github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U=
|
||||
github.com/minio/sio v0.0.0-20180327104954-6a41828a60f0 h1:ys4bbOlPvaUBlA0byjm6TqydsXZu614ZIUTfF+4MRY0=
|
||||
github.com/minio/sha256-simd v0.0.0-20190328051042-05b4dd3047e5 h1:l16XLUUJ34wIz+RIvLhSwGvLvKyy+W598b135bJN6mg=
|
||||
github.com/minio/sha256-simd v0.0.0-20190328051042-05b4dd3047e5/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U=
|
||||
github.com/minio/sio v0.0.0-20180327104954-6a41828a60f0/go.mod h1:PDJGYr8GXjiOTIst0hQMOSK5FdXLwObr2cGbiMddDPc=
|
||||
github.com/minio/sio v0.0.0-20190118043801-035b4ef8c449 h1:p7L1eKiloAwHpDkurkmzaLuRYTReh0aWNxj0rrVVsF8=
|
||||
github.com/minio/sio v0.0.0-20190118043801-035b4ef8c449/go.mod h1:nKM5GIWSrqbOZp0uhyj6M1iA0X6xQzSGtYSaTKSCut0=
|
||||
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
|
||||
github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ=
|
||||
github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw=
|
||||
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
|
||||
github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0=
|
||||
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
|
||||
github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
|
||||
github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
|
||||
github.com/mitchellh/hashstructure v1.0.0 h1:ZkRJX1CyOoTkar7p/mLS5TZU4nJ1Rn/F8u9dGS02Q3Y=
|
||||
github.com/mitchellh/hashstructure v1.0.0/go.mod h1:QjSHrPWS+BGUVBYkbTZWEnOh3G1DutKwClXU/ABz6AQ=
|
||||
github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
|
||||
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee h1:kK7VuFVykgt0LfMSloWYjDOt4TnOcL0AxF0/rDq2VkM=
|
||||
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/pointerstructure v0.0.0-20170205204203-f2329fcfa9e2 h1:KkcG7Ppt1wn2lmtjNr55d1tG4kbDA+TDaWtuezT5WS4=
|
||||
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/pointerstructure v0.0.0-20170205204203-f2329fcfa9e2/go.mod h1:KMNPMpc0BU/kZEgyDhBplsDn/mjnJMhyMjq4MWboN20=
|
||||
github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY=
|
||||
github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/nats-io/gnatsd v1.4.1 h1:RconcfDeWpKCD6QIIwiVFcvForlXpWeJP7i5/lDLy44=
|
||||
github.com/nats-io/gnatsd v1.4.1/go.mod h1:nqco77VO78hLCJpIcVfygDP2rPGfsEHkGTUk94uh5DQ=
|
||||
github.com/nats-io/go-nats v0.0.0-20161120202126-6b6bf392d34d h1:WB/xFGGdAliPnwnKOUIXp9qLCYQtbh09j2nDgtzI948=
|
||||
github.com/nats-io/go-nats v0.0.0-20161120202126-6b6bf392d34d/go.mod h1:+t7RHT5ApZebkrQdnn6AhQJmhJJiKAvJUio1PiiCtj0=
|
||||
github.com/nats-io/go-nats-streaming v0.0.0-20161216191029-077898146bfb h1:jUIMu/j8MvC05arCap7hVlhNStawzBrQaZ2CU+IHrSE=
|
||||
github.com/nats-io/go-nats v1.7.2 h1:cJujlwCYR8iMz5ofZSD/p2WLW8FabhkQ2lIEVbSvNSA=
|
||||
github.com/nats-io/go-nats v1.7.2/go.mod h1:+t7RHT5ApZebkrQdnn6AhQJmhJJiKAvJUio1PiiCtj0=
|
||||
github.com/nats-io/go-nats-streaming v0.0.0-20161216191029-077898146bfb/go.mod h1:gfq4R3c9sKAINOpelo0gn/b9QDMBZnmrttcsNF+lqyo=
|
||||
github.com/nats-io/nats v0.0.0-20160916181735-70b70be17b77 h1:3Uk8Cn5GxarM118Ya1pkd7p8Y0DQgNlbhaJaqBOvDwY=
|
||||
github.com/nats-io/go-nats-streaming v0.4.2 h1:e7Fs4yxvFTs8N5xKFoJyw0sVW2heJwYvrUWfdf9VQlE=
|
||||
github.com/nats-io/go-nats-streaming v0.4.2/go.mod h1:gfq4R3c9sKAINOpelo0gn/b9QDMBZnmrttcsNF+lqyo=
|
||||
github.com/nats-io/nats v0.0.0-20160916181735-70b70be17b77/go.mod h1:PpmYZwlgTfBI56QypJLfIMOfLnMRuVs+VL6r8mQ2SoQ=
|
||||
github.com/nats-io/nats v1.7.2 h1:8hasuHgNFiDGc7MTkGpPFc0v1Jz1nEoQwIHKAH2hFuo=
|
||||
github.com/nats-io/nats v1.7.2/go.mod h1:PpmYZwlgTfBI56QypJLfIMOfLnMRuVs+VL6r8mQ2SoQ=
|
||||
github.com/nats-io/nats-streaming-server v0.12.2 h1:EpyLfUBZgwu5c0mdSSytQsapm615AyitPssq7jgafdw=
|
||||
github.com/nats-io/nats-streaming-server v0.12.2/go.mod h1:RyqtDJZvMZO66YmyjIYdIvS69zu/wDAkyNWa8PIUa5c=
|
||||
github.com/nats-io/nkeys v0.0.2 h1:+qM7QpgXnvDDixitZtQUBDY9w/s9mu1ghS+JIbsrx6M=
|
||||
github.com/nats-io/nkeys v0.0.2/go.mod h1:dab7URMsZm6Z/jp9Z5UGa87Uutgc2mVpXLC4B7TDb/4=
|
||||
github.com/nats-io/nuid v1.0.0 h1:44QGdhbiANq8ZCbUkdn6W5bqtg+mHuDE4wOUuxxndFs=
|
||||
github.com/nats-io/nuid v1.0.0/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
|
||||
github.com/nsqio/go-nsq v0.0.0-20181028195256-0527e80f3ba5 h1:pQydq98llDs8dmbPtEtgFT8gsKwGnRs811DDKHqWWLA=
|
||||
github.com/nsqio/go-nsq v0.0.0-20181028195256-0527e80f3ba5/go.mod h1:XP5zaUs3pqf+Q71EqUJs3HYfBIqfK6G83WQMdNN+Ito=
|
||||
github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw=
|
||||
github.com/nsqio/go-nsq v1.0.7 h1:O0pIZJYTf+x7cZBA0UMY8WxFG79lYTURmWzAAh48ljY=
|
||||
github.com/nsqio/go-nsq v1.0.7/go.mod h1:XP5zaUs3pqf+Q71EqUJs3HYfBIqfK6G83WQMdNN+Ito=
|
||||
github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w=
|
||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ=
|
||||
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
||||
github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI=
|
||||
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
||||
github.com/opencontainers/runc v0.1.1 h1:GlxAyO6x8rfZYN9Tt0Kti5a/cP41iuiO2yYT0IJGY8Y=
|
||||
github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
||||
github.com/opentracing/opentracing-go v1.0.2 h1:3jA2P6O1F9UOrWVpwrIo17pu01KWvNWg4X946/Y5Zwg=
|
||||
github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
|
||||
github.com/ory-am/common v0.4.0 h1:edGPoxYX4hno0IJHXh9TCMUPR6ZcJp+y6aClFYxeuUE=
|
||||
github.com/openzipkin/zipkin-go v0.1.3/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
|
||||
github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
|
||||
github.com/ory-am/common v0.4.0/go.mod h1:oCYGuwwM8FyYMKqh9vrhBaeUoyz/edx0bgJN6uS6/+k=
|
||||
github.com/ory/dockertest v3.3.4+incompatible h1:VrpM6Gqg7CrPm3bL4Wm1skO+zFWLbh7/Xb5kGEbJRh8=
|
||||
github.com/ory/dockertest v3.3.4+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs=
|
||||
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c h1:Lgl0gzECD8GnQ5QCWA8o6BtfL6mDH5rQgM4/fX3avOs=
|
||||
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc=
|
||||
github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
|
||||
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ=
|
||||
github.com/pborman/uuid v1.2.0 h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g=
|
||||
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
|
||||
github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I=
|
||||
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/profile v0.0.0-20160315012113-19396ecfda37/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA=
|
||||
github.com/pkg/profile v1.2.1 h1:F++O52m40owAmADcojzM+9gyjmMOY/T4oYJkgFDH8RE=
|
||||
github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA=
|
||||
github.com/pkg/xattr v0.0.0-20170808190211-56ed87199eba h1:WIqO7Q7EUftmER0kakrexuyEWGq+W8ToUfPM/9oZG28=
|
||||
github.com/pkg/profile v1.3.0 h1:OQIvuDgm00gWVWGTf4m4mCt6W1/0YqU7Ntg0mySWgaI=
|
||||
github.com/pkg/profile v1.3.0/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA=
|
||||
github.com/pkg/xattr v0.0.0-20170808190211-56ed87199eba/go.mod h1:wuo6utqb0b/WNJYm0fQyg57cKpORNfpX2lY6Ew6+Grg=
|
||||
github.com/pkg/xattr v0.4.1/go.mod h1:W2cGD0TBEus7MkUgv0tNZ9JutLtVO3cXu+IBRuHqnFs=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
|
||||
github.com/posener/complete v1.2.1 h1:LrvDIY//XNo65Lq84G/akBuMGlawHvGBABv8f/ZN6DI=
|
||||
github.com/posener/complete v1.2.1/go.mod h1:6gapUrK/U1TAN7ciCoNRIdVC5sbdBTUh1DKN0g6uH7E=
|
||||
github.com/pquerna/cachecontrol v0.0.0-20180517163645-1555304b9b35 h1:J9b7z+QKAmPf4YLrFg6oQUotqHQeUNWwkvo7jZp1GLU=
|
||||
github.com/pquerna/cachecontrol v0.0.0-20180517163645-1555304b9b35/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
|
||||
github.com/pquerna/otp v1.1.0 h1:q2gMsMuMl3JzneUaAX1MRGxLvOG6bzXV51hivBaStf0=
|
||||
github.com/pquerna/otp v1.1.0/go.mod h1:Zad1CMQfSQZI5KLpahDiSUX4tMMREnXw98IvL1nhgMk=
|
||||
github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.2 h1:awm861/B8OKDd2I/6o1dy3ra4BamzKhYOiGItCeZ740=
|
||||
github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8=
|
||||
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829 h1:D+CiwcpGTW6pL6bv6KI3KbyEyCKyS+1JWS2h8PNDnGA=
|
||||
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e h1:n/3MEhJQjQxrOUCzh1Y3Re6aJUUWRp2M9+Oc3eVn/54=
|
||||
github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.0.0-20181126121408-4724e9255275 h1:PnBWHBf+6L0jOqq0gIVUe6Yk0/QMZ640k6NvkxcBf+8=
|
||||
github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273 h1:agujYaXJSxSo18YNX3jzl+4G6Bstwt+kqv47GS12uL0=
|
||||
github.com/prometheus/common v0.2.0 h1:kUZDBDTdBVBYBj5Tmh2NZLlF60mfjA27rM34b+cVwNU=
|
||||
github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a h1:9a8MnZMP0X2nLJdBg+pBmGgkJlSaKC2KaQmTCk1XDtE=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1 h1:/K3IL0Z1quvmJ7X0A1AwNEK7CRkVK3YwfOU/QAL4WGg=
|
||||
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/rjeczalik/notify v0.9.2 h1:MiTWrPj55mNDHEiIX5YUSKefw/+lCQVoAFmD6oQm5w8=
|
||||
github.com/rjeczalik/notify v0.9.2/go.mod h1:aErll2f0sUX9PXZnVNyeiObbmTlk5jnMoCa4QEjJeqM=
|
||||
github.com/rs/cors v0.0.0-20190116175910-76f58f330d76 h1:kz+slcZ3xepXoLw24pyf3+fnc3WJITZ91IEa+PJTv2g=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/rs/cors v0.0.0-20190116175910-76f58f330d76/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/rs/cors v1.6.0 h1:G9tHG9lebljV9mfp9SNPDL36nCDxmo3zTlAf1YgvzmI=
|
||||
github.com/rs/cors v1.6.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk=
|
||||
github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc=
|
||||
github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec h1:6ncX5ko6B9LntYM0YBRXkiSaZMmLYeZ/NWcmeB43mMY=
|
||||
github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
|
||||
github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww=
|
||||
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||
github.com/segmentio/go-prompt v0.0.0-20161017233205-f0d19b6901ad h1:2ZO86bDscwpNrDfu3R0YlSF1igDzsk0EdrLzwgJqqUQ=
|
||||
github.com/segmentio/go-prompt v0.0.0-20161017233205-f0d19b6901ad/go.mod h1:B3ehdD1xPoWDKgrQgUaGk+m8H1xb1J5TyYDfKpKNeEE=
|
||||
github.com/segmentio/go-prompt v1.2.1-0.20161017233205-f0d19b6901ad h1:EqOdoSJGI7CsBQczPcIgmpm3hJE7X8Hj3jrgI002whs=
|
||||
github.com/segmentio/go-prompt v1.2.1-0.20161017233205-f0d19b6901ad/go.mod h1:B3ehdD1xPoWDKgrQgUaGk+m8H1xb1J5TyYDfKpKNeEE=
|
||||
github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.3.0 h1:hI/7Q+DtNZ2kINb6qt/lS+IyXnHQe9e90POfeewL/ME=
|
||||
github.com/sirupsen/logrus v1.3.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/skyrings/skyring-common v0.0.0-20160324141443-762fd2bfc12e h1:oZ1fpzB0OtG3e9d1NbnERBTvQGx5NDZ1pr2kWPZbGUI=
|
||||
github.com/skyrings/skyring-common v0.0.0-20160324141443-762fd2bfc12e/go.mod h1:d8hQseuYt4rJoOo21lFzYJdhMjmDqLY++ayArbgYjWI=
|
||||
github.com/skyrings/skyring-common v0.0.0-20160929130248-d1c0bb1cbd5e h1:jrZSSgPUDtBeJbGXqgGUeupQH8I+ZvGXfhpIahye2Bc=
|
||||
github.com/skyrings/skyring-common v0.0.0-20160929130248-d1c0bb1cbd5e/go.mod h1:d8hQseuYt4rJoOo21lFzYJdhMjmDqLY++ayArbgYjWI=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/assertions v0.0.0-20190116191733-b6c0e53d7304/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/assertions v0.0.0-20190215210624-980c5ac6f3ac h1:wbW+Bybf9pXxnCFAOWZTqkRjAc7rAIwo2e1ArUhiHxg=
|
||||
github.com/smartystreets/assertions v0.0.0-20190215210624-980c5ac6f3ac/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/assertions v0.0.0-20190401200700-3f99fa72afbb/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/go-aws-auth v0.0.0-20180515143844-0c1422d1fdb9/go.mod h1:SnhjPscd9TpLiy1LpzGSKh3bXCfxxXuqd9xmQJy3slM=
|
||||
github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s=
|
||||
github.com/smartystreets/goconvey v0.0.0-20190222223459-a17d461953aa/go.mod h1:2RVY1rIf+2J2o/IM9+vPq9RzmHDSseB7FoXiSNIUsoU=
|
||||
github.com/smartystreets/goconvey v0.0.0-20190306220146-200a235640ff h1:86HlEv0yBCry9syNuylzqznKXDK11p6D0DT596yNMys=
|
||||
github.com/smartystreets/goconvey v0.0.0-20190306220146-200a235640ff/go.mod h1:KSQcGKpxUMHk3nbYzs/tIBAM2iDooCn0BmttHOJEbLs=
|
||||
github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E=
|
||||
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/streadway/amqp v0.0.0-20160311215503-2e25825abdbd h1:625/bJvSNfQrzzK5ttnUqMqnVe8M5MILmf5ZRGgeeDY=
|
||||
github.com/streadway/amqp v0.0.0-20160311215503-2e25825abdbd/go.mod h1:1WNBiOZtZQLpVAyu0iTduoJL9hEsMloAK5XWrtW0xdY=
|
||||
github.com/streadway/amqp v0.0.0-20190312223743-14f78b41ce6d h1:ToACqFOOYVdz7PswtVcAawttvtdGlLhoAsXdhYFQeEI=
|
||||
github.com/streadway/amqp v0.0.0-20190312223743-14f78b41ce6d/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
|
||||
github.com/tidwall/gjson v1.1.2/go.mod h1:c/nTNbUr0E0OrXEhq1pwa8iEgc2DOt4ZZqAt1HtCkPA=
|
||||
github.com/tidwall/gjson v1.1.4 h1:lonRDhK9sFzw7ogkERBgx5wF6lRP2bpjr6jiwVzYjYc=
|
||||
github.com/tidwall/gjson v1.1.4/go.mod h1:c/nTNbUr0E0OrXEhq1pwa8iEgc2DOt4ZZqAt1HtCkPA=
|
||||
github.com/tidwall/match v1.0.0 h1:Ym1EcFkp+UQ4ptxfWlW+iMdq5cPH5nEuGzdf/Pb7VmI=
|
||||
github.com/tidwall/gjson v1.2.1 h1:j0efZLrZUvNerEf6xqoi0NjWMK5YlLrR7Guo/dxY174=
|
||||
github.com/tidwall/gjson v1.2.1/go.mod h1:c/nTNbUr0E0OrXEhq1pwa8iEgc2DOt4ZZqAt1HtCkPA=
|
||||
github.com/tidwall/match v1.0.0/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E=
|
||||
github.com/tidwall/match v1.0.1 h1:PnKP62LPNxHKTwvHHZZzdOAOCtsJTjo6dZLCwpKm5xc=
|
||||
github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E=
|
||||
github.com/tidwall/pretty v0.0.0-20190325153808-1166b9ac2b65 h1:rQ229MBgvW68s1/g6f1/63TgYwYxfF4E+bi/KC19P8g=
|
||||
github.com/tidwall/pretty v0.0.0-20190325153808-1166b9ac2b65/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
|
||||
github.com/tidwall/sjson v1.0.4 h1:UcdIRXff12Lpnu3OLtZvnc03g4vH2suXDXhBwBqmzYg=
|
||||
github.com/tidwall/sjson v1.0.4/go.mod h1:bURseu1nuBkFpIES5cz6zBtjmYeOQmEESshn7VpF15Y=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/ugorji/go v0.0.0-20180628102755-7d51bbe6161d h1:vqHumCiddnEi8M6ztdQ9QZkiAJn5fASghAY2A7Jeyic=
|
||||
github.com/ugorji/go v0.0.0-20180628102755-7d51bbe6161d/go.mod h1:hnLbHMwcvSihnDhEfx2/BzKp2xb0Y+ErdfYcrs9tkJQ=
|
||||
github.com/ugorji/go v1.1.2 h1:JON3E2/GPW2iDNGoSAusl1KDf5TRQ8k8q7Tp097pZGs=
|
||||
github.com/ugorji/go v1.1.2/go.mod h1:hnLbHMwcvSihnDhEfx2/BzKp2xb0Y+ErdfYcrs9tkJQ=
|
||||
github.com/ugorji/go/codec v0.0.0-20190320090025-2dc34c0b8780 h1:vG/gY/PxA3v3l04qxe3tDjXyu3bozii8ulSlIPOYKhI=
|
||||
github.com/ugorji/go/codec v0.0.0-20190320090025-2dc34c0b8780/go.mod h1:iT03XoTwV7xq/+UGwKO3UbC1nNNlopQiY61beSdrtOA=
|
||||
github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a h1:0R4NLDRDZX6JcmhJgXi5E4b8Wg84ihbmUKp/GvSPEzc=
|
||||
github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
go.etcd.io/bbolt v1.3.2 h1:Z/90sZLPOeCy2PwprqkFa25PdkusRzaj9P8zm/KNyvk=
|
||||
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.opencensus.io v0.15.0 h1:r1SzcjSm4ybA0qZs3B4QYX072f8gK61Kh0qtwyFpfdk=
|
||||
go.opencensus.io v0.15.0/go.mod h1:UffZAU+4sDEINUGP/B7UfBBkq4fqLu9zXAX7ke6CHW0=
|
||||
go.opencensus.io v0.18.1-0.20181204023538-aab39bd6a98b h1:6ayHMBPtdP3jNuk+Sfhso+PTB7ZJQ5E1FBo403m2H8w=
|
||||
go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA=
|
||||
go.opencensus.io v0.18.1-0.20181204023538-aab39bd6a98b/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA=
|
||||
go.opencensus.io v0.19.1/go.mod h1:gug0GbSHa8Pafr0d2urOSgoXHZ6x/RUlaiT0d9pqb4A=
|
||||
go.opencensus.io v0.19.2 h1:ZZpq6xI6kv/LuE/5s5UQvBU5vMjvRnPb8PvJrIntAnc=
|
||||
go.opencensus.io v0.19.2/go.mod h1:NO/8qkisMZLZ1FCsKNqtJPwc8/TaclWyY0B6wcYNg9M=
|
||||
go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4=
|
||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/zap v1.9.1 h1:XCJQEf3W6eZaVwhRBof6ImoYGJSITeKWsyeh3HFu/5o=
|
||||
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE=
|
||||
golang.org/x/build v0.0.0-20190314133821-5284462c4bec/go.mod h1:atTaCNAy0f16Ah5aV1gMSwgiKVHwu/JncqDpuRr7lS4=
|
||||
golang.org/x/crypto v0.0.0-20180820150726-614d502a4dac/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20181106171534-e4dc69e5b2fd/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190128193316-c7b33c32a30b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190131182504-b8fe1690c613/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190228161510-8dd112bcdc25/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a h1:YX8ljsm6wXlHZO+aRz9Exqr0evNhKRNe5K/gi+zKh4U=
|
||||
golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c h1:Vj5n4GlwjmQteupaxJ9+0FNOmBrHfq7vN4btdGoDZgI=
|
||||
golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3 h1:XQyxROzUlZH+WIQwySDgnISgOivlhjIEwaQaJEJrrN0=
|
||||
golang.org/x/lint v0.0.0-20181217174547-8f45f776aaf1/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190301231341-16b79f2e4e95/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311175648-12eef18f757f h1:5XQlNmCmC6QCwdP9jITSaPHwflzXW/v2wDg4wSiPscs=
|
||||
golang.org/x/net v0.0.0-20190311175648-12eef18f757f/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190318221613-d196dffd7c2b h1:ZWpVMTsK0ey5WJCu+vVdfMldWq7/ezaOcjnKWIHWVkE=
|
||||
golang.org/x/net v0.0.0-20190318221613-d196dffd7c2b/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190327214358-63eda1eb0650 h1:XCbwcsP09zrBt1aYht0fASw+ynbEpYr8NnCkIN9nMM0=
|
||||
golang.org/x/net v0.0.0-20190327214358-63eda1eb0650/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190324223953-e3b2ff56ed87/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190328230028-74de082e2cca h1:hyA6yiAgbUwuWqtscNvWAI7U1CtlaD1KilQ6iudt1aI=
|
||||
golang.org/x/net v0.0.0-20190328230028-74de082e2cca/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/oauth2 v0.0.0-20180603041954-1e0a3fa8ba9a/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421 h1:Wo7BWFiOk0QRFMLYMqJGFMd9CgUAcGx7V+qEg/h5IBI=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 h1:bjcUS9ztw9kFmmIxJInhon/0Is3p+EHBKNgquIzo1OI=
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180828065106-d99a578cf41b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180926160741-c2ed4eda69e7/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181021155630-eda9bb28ed51/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181218192612-074acd46bca6/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190124100055-b90733256f2e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190304154630-e844e0132e93/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190311152110-c8c8c57fd1e1 h1:FQNj2xvjQ1lgFyzbSybGZr792Y8Dy95D7uuqnZAzNaA=
|
||||
golang.org/x/sys v0.0.0-20190311152110-c8c8c57fd1e1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190318195719-6c81ef8f67ca h1:o2TLx1bGN3W+Ei0EMU5fShLupLmTOU95KvJJmfYhAzM=
|
||||
golang.org/x/sys v0.0.0-20190318195719-6c81ef8f67ca/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190322080309-f49334f85ddc h1:4gbWbmmPFp4ySWICouJl6emP0MyS31yy9SrTlAGFT+g=
|
||||
golang.org/x/sys v0.0.0-20190322080309-f49334f85ddc/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||
golang.org/x/sys v0.0.0-20190329044733-9eb1bfa1ce65 h1:hOY+O8MxdkPV10pNf7/XEHaySCiPKxixMKUshfHsGn0=
|
||||
golang.org/x/sys v0.0.0-20190329044733-9eb1bfa1ce65/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c h1:fqgJT0MGcGpPgpWU7VRdRjuArfcOvC4AoJmILihzhDg=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2 h1:z99zHgr7hKfrUcX/KsoJk5FJfjTceCKIp96+biqP4To=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20181219222714-6e267b5cc78e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190318200714-bb1270c20edf h1:OVQ7iQXiQQT4WuYg+7S/bOVVlASHvL1Chsc15Qtkogo=
|
||||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190318200714-bb1270c20edf/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190327201419-c70d86f8b7cf h1:++r/Kj1CfG42p6XntDItK1TfB5V6Vq/baDeKvV1q5gY=
|
||||
golang.org/x/tools v0.0.0-20190327201419-c70d86f8b7cf/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190401201229-1bac838f5b88/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
google.golang.org/api v0.0.0-20180603000442-8e296ef26005/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
|
||||
google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
|
||||
google.golang.org/api v0.0.0-20180916000451-19ff8768a5c0 h1:AJOCn+ScmtWxp6yySbxsiNXi+RrZaHqWgYbZUzl6oLc=
|
||||
google.golang.org/api v0.0.0-20180916000451-19ff8768a5c0/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
|
||||
google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
|
||||
google.golang.org/api v0.0.0-20181220000619-583d854617af/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
|
||||
google.golang.org/api v0.2.0/go.mod h1:IfRCZScioGtypHNTlz3gFk67J8uePVW7uDTBzXuIkhU=
|
||||
google.golang.org/api v0.3.0 h1:UIJY20OEo3+tK5MBlcdx37kmdH6EnRjGkW78mc6+EeA=
|
||||
google.golang.org/api v0.3.0/go.mod h1:IuvZyQh8jgscv8qWfQ4ABd8m7hEudgBFM/EdhA3BnXw=
|
||||
google.golang.org/appengine v1.0.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.1.0 h1:igQkv0AAhEIvTEpD5LIpAfav2eeVO9HBTjvKHVJPRSs=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/genproto v0.0.0-20180601223552-81158efcc9f2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20180918203901-c3f76f3b92d1 h1:9JN/x8g8h4eMemKiniLwss/+Avp9VmS+Es8eMU4Ifog=
|
||||
google.golang.org/genproto v0.0.0-20180918203901-c3f76f3b92d1/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20181219182458-5a97ab628bfb/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg=
|
||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19 h1:Lj2SnHtxkRGJDqnGaSjo+CCdIieEnwVazbOXILwQemk=
|
||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
google.golang.org/grpc v1.14.0 h1:ArxJuB1NWfPY6r9Gp9gqwplT0Ge7nqv9msgu03lHLmo=
|
||||
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
google.golang.org/grpc v1.15.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
|
||||
google.golang.org/grpc v1.18.0 h1:IZl7mfBGfbhYx2p2rKRtYgDFw6SBz+kclmxYrCksPPA=
|
||||
google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
google.golang.org/grpc v1.18.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
gopkg.in/Shopify/sarama.v1 v1.10.1 h1:SoBkWk2qrgFuiAVEGOYlw63FZcS6YLZYqo/z7wFOW2A=
|
||||
google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.19.1 h1:TrBcJ1yqAl1G++wO39nD/qtgpsW9/1+QGrluyMGEYgM=
|
||||
google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
gopkg.in/Shopify/sarama.v1 v1.10.1/go.mod h1:AxnvoaevB2nBjNK17cG61A3LleFcWFwVBHBt+cot4Oc=
|
||||
gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d h1:TxyelI5cVkbREznMhfzycHdkp5cLA7DpE+GKjSslYhM=
|
||||
gopkg.in/Shopify/sarama.v1 v1.20.0 h1:DrCuMOhmuaUwb5o4aL9JJnW+whbEnuuL6AZ99ySMoQA=
|
||||
gopkg.in/Shopify/sarama.v1 v1.20.0/go.mod h1:AxnvoaevB2nBjNK17cG61A3LleFcWFwVBHBt+cot4Oc=
|
||||
gopkg.in/VividCortex/ewma.v1 v1.1.1 h1:tWHEKkKq802K/JT9RiqGCBU5fW3raAPnJGTE9ostZvg=
|
||||
gopkg.in/VividCortex/ewma.v1 v1.1.1/go.mod h1:TekXuFipeiHWiAlO1+wSS23vTcyFau5u3rxXUSXj710=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/cheggaaa/pb.v1 v1.0.27 h1:kJdccidYzt3CaHD1crCFTS1hxyhSi059NhOFUf03YFo=
|
||||
gopkg.in/cheggaaa/pb.v1 v1.0.27/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
|
||||
gopkg.in/fatih/pool.v2 v2.0.0 h1:xIFeWtxifuQJGk/IEPKsTduEKcKvPmhoiVDGpC40nKg=
|
||||
gopkg.in/cheggaaa/pb.v2 v2.0.6 h1:L2KAo2l2ZQTzxmh8b9RdQpzgLpK2mX3paGCMJSUugBk=
|
||||
gopkg.in/cheggaaa/pb.v2 v2.0.6/go.mod h1:0CiZ1p8pvtxBlQpLXkHuUTpdJ1shm3OqCF1QugkjHL4=
|
||||
gopkg.in/fatih/color.v1 v1.7.0 h1:bYGjb+HezBM6j/QmgBfgm1adxHpzzrss6bj4r9ROppk=
|
||||
gopkg.in/fatih/color.v1 v1.7.0/go.mod h1:P7yosIhqIl/sX8J8UypY5M+dDpD2KmyfP5IRs5v/fo0=
|
||||
gopkg.in/fatih/pool.v2 v2.0.0/go.mod h1:8xVGeu1/2jr2wm5V9SPuMht2H5AEmf5aFMGSQixtjTY=
|
||||
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/gorethink/gorethink.v4 v4.1.0 h1:xoE9qJ9Ae9KdKEsiQGCF44u2JdnjyohrMBRDtts3Gjw=
|
||||
gopkg.in/gorethink/gorethink.v4 v4.1.0/go.mod h1:M7JgwrUAmshJ3iUbEK0Pt049MPyPK+CYDGGaEjdZb/c=
|
||||
gopkg.in/h2non/filetype.v1 v1.0.3 h1:EhZ9p3H8eDdFHiKljxJ59EeQ9Pu88wrgY7/B1WRK/VE=
|
||||
gopkg.in/h2non/filetype.v1 v1.0.3/go.mod h1:M0yem4rwSX5lLVrkEuRRp2/NinFMD5vgJ4DlAhZcfNo=
|
||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||
gopkg.in/h2non/filetype.v1 v1.0.5/go.mod h1:M0yem4rwSX5lLVrkEuRRp2/NinFMD5vgJ4DlAhZcfNo=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/ini.v1 v1.41.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/ini.v1 v1.42.0 h1:7N3gPTt50s8GuLortA00n8AqRTk75qOP98+mTPpgzRk=
|
||||
gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce h1:xcEWjVhvbDy+nHP67nPDDpbYrY+ILlfndk4bRioVHaU=
|
||||
gopkg.in/mattn/go-colorable.v0 v0.1.0 h1:WYuADWvfvYC07fm8ygYB3LMcsc5CunpxfMGKawHkAos=
|
||||
gopkg.in/mattn/go-colorable.v0 v0.1.0/go.mod h1:BVJlBXzARQxdi3nZo6f6bnl5yR20/tOL6p+V0KejgSY=
|
||||
gopkg.in/mattn/go-isatty.v0 v0.0.4 h1:NtS1rQGQr4IaFWBGz4Cz4BhB///gyys4gDVtKA7hIsc=
|
||||
gopkg.in/mattn/go-isatty.v0 v0.0.4/go.mod h1:wt691ab7g0X4ilKZNmMII3egK0bTxl37fEn/Fwbd8gc=
|
||||
gopkg.in/mattn/go-runewidth.v0 v0.0.4 h1:r0P71TnzQDlNIcizCqvPSSANoFa3WVGtcNJf3TWurcY=
|
||||
gopkg.in/mattn/go-runewidth.v0 v0.0.4/go.mod h1:BmXejnxvhwdaATwiJbB1vZ2dtXkQKZGu9yLFCZb4msQ=
|
||||
gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA=
|
||||
gopkg.in/olivere/elastic.v5 v5.0.31 h1:MNyEvLWhMubs7rJBkhxwloHJ9VoXMf12G+CPH6e+ZfM=
|
||||
gopkg.in/olivere/elastic.v5 v5.0.31/go.mod h1:FylZT6jQWtfHsicejzOm3jIMVPOAksa80i3o+6qtQRk=
|
||||
gopkg.in/ory-am/dockertest.v2 v2.2.3 h1:vSYvP7tvyfAm9merq0gHmcI4yk5nkPpfXmoBCnSP3/4=
|
||||
gopkg.in/olivere/elastic.v5 v5.0.80 h1:AKjfcq3ZIAAqO4m8h/vJ3GP6nY8n9ft5mgf54fEqC60=
|
||||
gopkg.in/olivere/elastic.v5 v5.0.80/go.mod h1:uhHoB4o3bvX5sorxBU29rPcmBQdV2Qfg0FBrx5D6pV0=
|
||||
gopkg.in/ory-am/dockertest.v2 v2.2.3/go.mod h1:kDHEsan1UcKFYH1c28sDmqnmeqIpB4Nj682gSNhYDYM=
|
||||
gopkg.in/square/go-jose.v2 v2.3.0 h1:nLzhkFyl5bkblqYBoiWJUt5JkWOzmiaBtCxdJAqJd3U=
|
||||
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
|
||||
gopkg.in/square/go-jose.v2 v2.3.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
|
||||
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
||||
grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o=
|
||||
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
k8s.io/api v0.0.0-20190313115550-3c12c96769cc h1:m/JS6kQd00rICnXLWlnJzMFQB4AplcURUopS8dKiWmI=
|
||||
honnef.co/go/tools v0.0.0-20180920025451-e3ad64cb4ed3/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
k8s.io/api v0.0.0-20190313115550-3c12c96769cc/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA=
|
||||
k8s.io/apimachinery v0.0.0-20190313115320-c9defaaddf6f h1:6ojhffWUv9DZ8i4L2LIvSjbWH3fXfP6PmrTNwXHHMhM=
|
||||
k8s.io/apimachinery v0.0.0-20190313115320-c9defaaddf6f/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0=
|
||||
k8s.io/klog v0.2.0 h1:0ElL0OHzF3N+OhoJTL0uca20SxtYt4X4+bzHeqrB83c=
|
||||
k8s.io/klog v0.2.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
|
||||
layeh.com/radius v0.0.0-20190118135028-0f678f039617 h1:UfoQTGVcI2tUZdQxp4kyh07KW0RB+HrUez9LeHurAvs=
|
||||
layeh.com/radius v0.0.0-20190118135028-0f678f039617/go.mod h1:fywZKyu//X7iRzaxLgPWsvc0L26IUpVvE/aeIL2JtIQ=
|
||||
sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
|
||||
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
|
||||
|
|
37
main.go
37
main.go
|
@ -1,3 +1,5 @@
|
|||
// +build go1.12
|
||||
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2016, 2017, 2018 Minio, Inc.
|
||||
*
|
||||
|
@ -23,49 +25,14 @@
|
|||
package main // import "github.com/minio/minio"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
|
||||
version "github.com/hashicorp/go-version"
|
||||
"github.com/minio/mc/pkg/console"
|
||||
minio "github.com/minio/minio/cmd"
|
||||
|
||||
// Import gateway
|
||||
_ "github.com/minio/minio/cmd/gateway"
|
||||
)
|
||||
|
||||
const (
|
||||
// Minio requires at least Go v1.9.4
|
||||
minGoVersion = "1.9.4"
|
||||
goVersionConstraint = ">= " + minGoVersion
|
||||
)
|
||||
|
||||
// Check if this binary is compiled with at least minimum Go version.
|
||||
func checkGoVersion(goVersionStr string) error {
|
||||
constraint, err := version.NewConstraint(goVersionConstraint)
|
||||
if err != nil {
|
||||
return fmt.Errorf("'%s': %s", goVersionConstraint, err)
|
||||
}
|
||||
|
||||
goVersion, err := version.NewVersion(goVersionStr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !constraint.Check(goVersion) {
|
||||
return fmt.Errorf("Minio is not compiled by go %s. Minimum required version is %s, go %s release is known to have security issues. Please recompile accordingly", goVersionConstraint, minGoVersion, runtime.Version()[2:])
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
// When `go get` is used minimum Go version check is not triggered but it would have compiled it successfully.
|
||||
// However such binary will fail at runtime, hence we also check Go version at runtime.
|
||||
if err := checkGoVersion(runtime.Version()[2:]); err != nil {
|
||||
console.Errorln(err)
|
||||
}
|
||||
|
||||
minio.Main(os.Args)
|
||||
}
|
||||
|
|
46
main_test.go
46
main_test.go
|
@ -1,46 +0,0 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2017, 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestCheckGoVersion(t *testing.T) {
|
||||
// Test success cases.
|
||||
testCases := []struct {
|
||||
version string
|
||||
success bool
|
||||
}{
|
||||
{minGoVersion, true},
|
||||
{"1.6.8", false},
|
||||
{"1.5", false},
|
||||
{"0.1", false},
|
||||
{".1", false},
|
||||
{"somejunk", false},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
err := checkGoVersion(testCase.version)
|
||||
if err != nil && testCase.success {
|
||||
t.Fatalf("Test %v, expected: success, got: %v", testCase, err)
|
||||
}
|
||||
if err == nil && !testCase.success {
|
||||
t.Fatalf("Test %v, expected: failure, got: success", testCase)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,202 +0,0 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
|
@ -1,503 +0,0 @@
|
|||
// Copyright 2014 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package metadata provides access to Google Compute Engine (GCE)
|
||||
// metadata and API service accounts.
|
||||
//
|
||||
// This package is a wrapper around the GCE metadata service,
|
||||
// as documented at https://developers.google.com/compute/docs/metadata.
|
||||
package metadata // import "cloud.google.com/go/compute/metadata"
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/net/context/ctxhttp"
|
||||
)
|
||||
|
||||
const (
|
||||
// metadataIP is the documented metadata server IP address.
|
||||
metadataIP = "169.254.169.254"
|
||||
|
||||
// metadataHostEnv is the environment variable specifying the
|
||||
// GCE metadata hostname. If empty, the default value of
|
||||
// metadataIP ("169.254.169.254") is used instead.
|
||||
// This is variable name is not defined by any spec, as far as
|
||||
// I know; it was made up for the Go package.
|
||||
metadataHostEnv = "GCE_METADATA_HOST"
|
||||
|
||||
userAgent = "gcloud-golang/0.1"
|
||||
)
|
||||
|
||||
type cachedValue struct {
|
||||
k string
|
||||
trim bool
|
||||
mu sync.Mutex
|
||||
v string
|
||||
}
|
||||
|
||||
var (
|
||||
projID = &cachedValue{k: "project/project-id", trim: true}
|
||||
projNum = &cachedValue{k: "project/numeric-project-id", trim: true}
|
||||
instID = &cachedValue{k: "instance/id", trim: true}
|
||||
)
|
||||
|
||||
var (
|
||||
defaultClient = &Client{hc: &http.Client{
|
||||
Transport: &http.Transport{
|
||||
Dial: (&net.Dialer{
|
||||
Timeout: 2 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}).Dial,
|
||||
ResponseHeaderTimeout: 2 * time.Second,
|
||||
},
|
||||
}}
|
||||
subscribeClient = &Client{hc: &http.Client{
|
||||
Transport: &http.Transport{
|
||||
Dial: (&net.Dialer{
|
||||
Timeout: 2 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}).Dial,
|
||||
},
|
||||
}}
|
||||
)
|
||||
|
||||
// NotDefinedError is returned when requested metadata is not defined.
|
||||
//
|
||||
// The underlying string is the suffix after "/computeMetadata/v1/".
|
||||
//
|
||||
// This error is not returned if the value is defined to be the empty
|
||||
// string.
|
||||
type NotDefinedError string
|
||||
|
||||
func (suffix NotDefinedError) Error() string {
|
||||
return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix))
|
||||
}
|
||||
|
||||
func (c *cachedValue) get(cl *Client) (v string, err error) {
|
||||
defer c.mu.Unlock()
|
||||
c.mu.Lock()
|
||||
if c.v != "" {
|
||||
return c.v, nil
|
||||
}
|
||||
if c.trim {
|
||||
v, err = cl.getTrimmed(c.k)
|
||||
} else {
|
||||
v, err = cl.Get(c.k)
|
||||
}
|
||||
if err == nil {
|
||||
c.v = v
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
onGCEOnce sync.Once
|
||||
onGCE bool
|
||||
)
|
||||
|
||||
// OnGCE reports whether this process is running on Google Compute Engine.
|
||||
func OnGCE() bool {
|
||||
onGCEOnce.Do(initOnGCE)
|
||||
return onGCE
|
||||
}
|
||||
|
||||
func initOnGCE() {
|
||||
onGCE = testOnGCE()
|
||||
}
|
||||
|
||||
func testOnGCE() bool {
|
||||
// The user explicitly said they're on GCE, so trust them.
|
||||
if os.Getenv(metadataHostEnv) != "" {
|
||||
return true
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
resc := make(chan bool, 2)
|
||||
|
||||
// Try two strategies in parallel.
|
||||
// See https://github.com/GoogleCloudPlatform/google-cloud-go/issues/194
|
||||
go func() {
|
||||
req, _ := http.NewRequest("GET", "http://"+metadataIP, nil)
|
||||
req.Header.Set("User-Agent", userAgent)
|
||||
res, err := ctxhttp.Do(ctx, defaultClient.hc, req)
|
||||
if err != nil {
|
||||
resc <- false
|
||||
return
|
||||
}
|
||||
defer res.Body.Close()
|
||||
resc <- res.Header.Get("Metadata-Flavor") == "Google"
|
||||
}()
|
||||
|
||||
go func() {
|
||||
addrs, err := net.LookupHost("metadata.google.internal")
|
||||
if err != nil || len(addrs) == 0 {
|
||||
resc <- false
|
||||
return
|
||||
}
|
||||
resc <- strsContains(addrs, metadataIP)
|
||||
}()
|
||||
|
||||
tryHarder := systemInfoSuggestsGCE()
|
||||
if tryHarder {
|
||||
res := <-resc
|
||||
if res {
|
||||
// The first strategy succeeded, so let's use it.
|
||||
return true
|
||||
}
|
||||
// Wait for either the DNS or metadata server probe to
|
||||
// contradict the other one and say we are running on
|
||||
// GCE. Give it a lot of time to do so, since the system
|
||||
// info already suggests we're running on a GCE BIOS.
|
||||
timer := time.NewTimer(5 * time.Second)
|
||||
defer timer.Stop()
|
||||
select {
|
||||
case res = <-resc:
|
||||
return res
|
||||
case <-timer.C:
|
||||
// Too slow. Who knows what this system is.
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// There's no hint from the system info that we're running on
|
||||
// GCE, so use the first probe's result as truth, whether it's
|
||||
// true or false. The goal here is to optimize for speed for
|
||||
// users who are NOT running on GCE. We can't assume that
|
||||
// either a DNS lookup or an HTTP request to a blackholed IP
|
||||
// address is fast. Worst case this should return when the
|
||||
// metaClient's Transport.ResponseHeaderTimeout or
|
||||
// Transport.Dial.Timeout fires (in two seconds).
|
||||
return <-resc
|
||||
}
|
||||
|
||||
// systemInfoSuggestsGCE reports whether the local system (without
|
||||
// doing network requests) suggests that we're running on GCE. If this
|
||||
// returns true, testOnGCE tries a bit harder to reach its metadata
|
||||
// server.
|
||||
func systemInfoSuggestsGCE() bool {
|
||||
if runtime.GOOS != "linux" {
|
||||
// We don't have any non-Linux clues available, at least yet.
|
||||
return false
|
||||
}
|
||||
slurp, _ := ioutil.ReadFile("/sys/class/dmi/id/product_name")
|
||||
name := strings.TrimSpace(string(slurp))
|
||||
return name == "Google" || name == "Google Compute Engine"
|
||||
}
|
||||
|
||||
// Subscribe calls Client.Subscribe on a client designed for subscribing (one with no
|
||||
// ResponseHeaderTimeout).
|
||||
func Subscribe(suffix string, fn func(v string, ok bool) error) error {
|
||||
return subscribeClient.Subscribe(suffix, fn)
|
||||
}
|
||||
|
||||
// Get calls Client.Get on the default client.
|
||||
func Get(suffix string) (string, error) { return defaultClient.Get(suffix) }
|
||||
|
||||
// ProjectID returns the current instance's project ID string.
|
||||
func ProjectID() (string, error) { return defaultClient.ProjectID() }
|
||||
|
||||
// NumericProjectID returns the current instance's numeric project ID.
|
||||
func NumericProjectID() (string, error) { return defaultClient.NumericProjectID() }
|
||||
|
||||
// InternalIP returns the instance's primary internal IP address.
|
||||
func InternalIP() (string, error) { return defaultClient.InternalIP() }
|
||||
|
||||
// ExternalIP returns the instance's primary external (public) IP address.
|
||||
func ExternalIP() (string, error) { return defaultClient.ExternalIP() }
|
||||
|
||||
// Hostname returns the instance's hostname. This will be of the form
|
||||
// "<instanceID>.c.<projID>.internal".
|
||||
func Hostname() (string, error) { return defaultClient.Hostname() }
|
||||
|
||||
// InstanceTags returns the list of user-defined instance tags,
|
||||
// assigned when initially creating a GCE instance.
|
||||
func InstanceTags() ([]string, error) { return defaultClient.InstanceTags() }
|
||||
|
||||
// InstanceID returns the current VM's numeric instance ID.
|
||||
func InstanceID() (string, error) { return defaultClient.InstanceID() }
|
||||
|
||||
// InstanceName returns the current VM's instance ID string.
|
||||
func InstanceName() (string, error) { return defaultClient.InstanceName() }
|
||||
|
||||
// Zone returns the current VM's zone, such as "us-central1-b".
|
||||
func Zone() (string, error) { return defaultClient.Zone() }
|
||||
|
||||
// InstanceAttributes calls Client.InstanceAttributes on the default client.
|
||||
func InstanceAttributes() ([]string, error) { return defaultClient.InstanceAttributes() }
|
||||
|
||||
// ProjectAttributes calls Client.ProjectAttributes on the default client.
|
||||
func ProjectAttributes() ([]string, error) { return defaultClient.ProjectAttributes() }
|
||||
|
||||
// InstanceAttributeValue calls Client.InstanceAttributeValue on the default client.
|
||||
func InstanceAttributeValue(attr string) (string, error) {
|
||||
return defaultClient.InstanceAttributeValue(attr)
|
||||
}
|
||||
|
||||
// ProjectAttributeValue calls Client.ProjectAttributeValue on the default client.
|
||||
func ProjectAttributeValue(attr string) (string, error) {
|
||||
return defaultClient.ProjectAttributeValue(attr)
|
||||
}
|
||||
|
||||
// Scopes calls Client.Scopes on the default client.
|
||||
func Scopes(serviceAccount string) ([]string, error) { return defaultClient.Scopes(serviceAccount) }
|
||||
|
||||
func strsContains(ss []string, s string) bool {
|
||||
for _, v := range ss {
|
||||
if v == s {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// A Client provides metadata.
|
||||
type Client struct {
|
||||
hc *http.Client
|
||||
}
|
||||
|
||||
// NewClient returns a Client that can be used to fetch metadata. All HTTP requests
|
||||
// will use the given http.Client instead of the default client.
|
||||
func NewClient(c *http.Client) *Client {
|
||||
return &Client{hc: c}
|
||||
}
|
||||
|
||||
// getETag returns a value from the metadata service as well as the associated ETag.
|
||||
// This func is otherwise equivalent to Get.
|
||||
func (c *Client) getETag(suffix string) (value, etag string, err error) {
|
||||
// Using a fixed IP makes it very difficult to spoof the metadata service in
|
||||
// a container, which is an important use-case for local testing of cloud
|
||||
// deployments. To enable spoofing of the metadata service, the environment
|
||||
// variable GCE_METADATA_HOST is first inspected to decide where metadata
|
||||
// requests shall go.
|
||||
host := os.Getenv(metadataHostEnv)
|
||||
if host == "" {
|
||||
// Using 169.254.169.254 instead of "metadata" here because Go
|
||||
// binaries built with the "netgo" tag and without cgo won't
|
||||
// know the search suffix for "metadata" is
|
||||
// ".google.internal", and this IP address is documented as
|
||||
// being stable anyway.
|
||||
host = metadataIP
|
||||
}
|
||||
url := "http://" + host + "/computeMetadata/v1/" + suffix
|
||||
req, _ := http.NewRequest("GET", url, nil)
|
||||
req.Header.Set("Metadata-Flavor", "Google")
|
||||
req.Header.Set("User-Agent", userAgent)
|
||||
res, err := c.hc.Do(req)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode == http.StatusNotFound {
|
||||
return "", "", NotDefinedError(suffix)
|
||||
}
|
||||
if res.StatusCode != 200 {
|
||||
return "", "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url)
|
||||
}
|
||||
all, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
return string(all), res.Header.Get("Etag"), nil
|
||||
}
|
||||
|
||||
// Get returns a value from the metadata service.
|
||||
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
|
||||
//
|
||||
// If the GCE_METADATA_HOST environment variable is not defined, a default of
|
||||
// 169.254.169.254 will be used instead.
|
||||
//
|
||||
// If the requested metadata is not defined, the returned error will
|
||||
// be of type NotDefinedError.
|
||||
func (c *Client) Get(suffix string) (string, error) {
|
||||
val, _, err := c.getETag(suffix)
|
||||
return val, err
|
||||
}
|
||||
|
||||
func (c *Client) getTrimmed(suffix string) (s string, err error) {
|
||||
s, err = c.Get(suffix)
|
||||
s = strings.TrimSpace(s)
|
||||
return
|
||||
}
|
||||
|
||||
func (c *Client) lines(suffix string) ([]string, error) {
|
||||
j, err := c.Get(suffix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s := strings.Split(strings.TrimSpace(j), "\n")
|
||||
for i := range s {
|
||||
s[i] = strings.TrimSpace(s[i])
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// ProjectID returns the current instance's project ID string.
|
||||
func (c *Client) ProjectID() (string, error) { return projID.get(c) }
|
||||
|
||||
// NumericProjectID returns the current instance's numeric project ID.
|
||||
func (c *Client) NumericProjectID() (string, error) { return projNum.get(c) }
|
||||
|
||||
// InstanceID returns the current VM's numeric instance ID.
|
||||
func (c *Client) InstanceID() (string, error) { return instID.get(c) }
|
||||
|
||||
// InternalIP returns the instance's primary internal IP address.
|
||||
func (c *Client) InternalIP() (string, error) {
|
||||
return c.getTrimmed("instance/network-interfaces/0/ip")
|
||||
}
|
||||
|
||||
// ExternalIP returns the instance's primary external (public) IP address.
|
||||
func (c *Client) ExternalIP() (string, error) {
|
||||
return c.getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip")
|
||||
}
|
||||
|
||||
// Hostname returns the instance's hostname. This will be of the form
|
||||
// "<instanceID>.c.<projID>.internal".
|
||||
func (c *Client) Hostname() (string, error) {
|
||||
return c.getTrimmed("instance/hostname")
|
||||
}
|
||||
|
||||
// InstanceTags returns the list of user-defined instance tags,
|
||||
// assigned when initially creating a GCE instance.
|
||||
func (c *Client) InstanceTags() ([]string, error) {
|
||||
var s []string
|
||||
j, err := c.Get("instance/tags")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// InstanceName returns the current VM's instance ID string.
|
||||
func (c *Client) InstanceName() (string, error) {
|
||||
host, err := c.Hostname()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strings.Split(host, ".")[0], nil
|
||||
}
|
||||
|
||||
// Zone returns the current VM's zone, such as "us-central1-b".
|
||||
func (c *Client) Zone() (string, error) {
|
||||
zone, err := c.getTrimmed("instance/zone")
|
||||
// zone is of the form "projects/<projNum>/zones/<zoneName>".
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return zone[strings.LastIndex(zone, "/")+1:], nil
|
||||
}
|
||||
|
||||
// InstanceAttributes returns the list of user-defined attributes,
|
||||
// assigned when initially creating a GCE VM instance. The value of an
|
||||
// attribute can be obtained with InstanceAttributeValue.
|
||||
func (c *Client) InstanceAttributes() ([]string, error) { return c.lines("instance/attributes/") }
|
||||
|
||||
// ProjectAttributes returns the list of user-defined attributes
|
||||
// applying to the project as a whole, not just this VM. The value of
|
||||
// an attribute can be obtained with ProjectAttributeValue.
|
||||
func (c *Client) ProjectAttributes() ([]string, error) { return c.lines("project/attributes/") }
|
||||
|
||||
// InstanceAttributeValue returns the value of the provided VM
|
||||
// instance attribute.
|
||||
//
|
||||
// If the requested attribute is not defined, the returned error will
|
||||
// be of type NotDefinedError.
|
||||
//
|
||||
// InstanceAttributeValue may return ("", nil) if the attribute was
|
||||
// defined to be the empty string.
|
||||
func (c *Client) InstanceAttributeValue(attr string) (string, error) {
|
||||
return c.Get("instance/attributes/" + attr)
|
||||
}
|
||||
|
||||
// ProjectAttributeValue returns the value of the provided
|
||||
// project attribute.
|
||||
//
|
||||
// If the requested attribute is not defined, the returned error will
|
||||
// be of type NotDefinedError.
|
||||
//
|
||||
// ProjectAttributeValue may return ("", nil) if the attribute was
|
||||
// defined to be the empty string.
|
||||
func (c *Client) ProjectAttributeValue(attr string) (string, error) {
|
||||
return c.Get("project/attributes/" + attr)
|
||||
}
|
||||
|
||||
// Scopes returns the service account scopes for the given account.
|
||||
// The account may be empty or the string "default" to use the instance's
|
||||
// main account.
|
||||
func (c *Client) Scopes(serviceAccount string) ([]string, error) {
|
||||
if serviceAccount == "" {
|
||||
serviceAccount = "default"
|
||||
}
|
||||
return c.lines("instance/service-accounts/" + serviceAccount + "/scopes")
|
||||
}
|
||||
|
||||
// Subscribe subscribes to a value from the metadata service.
|
||||
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
|
||||
// The suffix may contain query parameters.
|
||||
//
|
||||
// Subscribe calls fn with the latest metadata value indicated by the provided
|
||||
// suffix. If the metadata value is deleted, fn is called with the empty string
|
||||
// and ok false. Subscribe blocks until fn returns a non-nil error or the value
|
||||
// is deleted. Subscribe returns the error value returned from the last call to
|
||||
// fn, which may be nil when ok == false.
|
||||
func (c *Client) Subscribe(suffix string, fn func(v string, ok bool) error) error {
|
||||
const failedSubscribeSleep = time.Second * 5
|
||||
|
||||
// First check to see if the metadata value exists at all.
|
||||
val, lastETag, err := c.getETag(suffix)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := fn(val, true); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ok := true
|
||||
if strings.ContainsRune(suffix, '?') {
|
||||
suffix += "&wait_for_change=true&last_etag="
|
||||
} else {
|
||||
suffix += "?wait_for_change=true&last_etag="
|
||||
}
|
||||
for {
|
||||
val, etag, err := c.getETag(suffix + url.QueryEscape(lastETag))
|
||||
if err != nil {
|
||||
if _, deleted := err.(NotDefinedError); !deleted {
|
||||
time.Sleep(failedSubscribeSleep)
|
||||
continue // Retry on other errors.
|
||||
}
|
||||
ok = false
|
||||
}
|
||||
lastETag = etag
|
||||
|
||||
if err := fn(val, ok); err != nil || !ok {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,292 +0,0 @@
|
|||
// Copyright 2016 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package iam supports the resource-specific operations of Google Cloud
|
||||
// IAM (Identity and Access Management) for the Google Cloud Libraries.
|
||||
// See https://cloud.google.com/iam for more about IAM.
|
||||
//
|
||||
// Users of the Google Cloud Libraries will typically not use this package
|
||||
// directly. Instead they will begin with some resource that supports IAM, like
|
||||
// a pubsub topic, and call its IAM method to get a Handle for that resource.
|
||||
package iam
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
gax "github.com/googleapis/gax-go"
|
||||
"golang.org/x/net/context"
|
||||
pb "google.golang.org/genproto/googleapis/iam/v1"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
)
|
||||
|
||||
// client abstracts the IAMPolicy API to allow multiple implementations.
|
||||
type client interface {
|
||||
Get(ctx context.Context, resource string) (*pb.Policy, error)
|
||||
Set(ctx context.Context, resource string, p *pb.Policy) error
|
||||
Test(ctx context.Context, resource string, perms []string) ([]string, error)
|
||||
}
|
||||
|
||||
// grpcClient implements client for the standard gRPC-based IAMPolicy service.
|
||||
type grpcClient struct {
|
||||
c pb.IAMPolicyClient
|
||||
}
|
||||
|
||||
var withRetry = gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.DeadlineExceeded,
|
||||
codes.Unavailable,
|
||||
}, gax.Backoff{
|
||||
Initial: 100 * time.Millisecond,
|
||||
Max: 60 * time.Second,
|
||||
Multiplier: 1.3,
|
||||
})
|
||||
})
|
||||
|
||||
func (g *grpcClient) Get(ctx context.Context, resource string) (*pb.Policy, error) {
|
||||
var proto *pb.Policy
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error {
|
||||
var err error
|
||||
proto, err = g.c.GetIamPolicy(ctx, &pb.GetIamPolicyRequest{Resource: resource})
|
||||
return err
|
||||
}, withRetry)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return proto, nil
|
||||
}
|
||||
|
||||
func (g *grpcClient) Set(ctx context.Context, resource string, p *pb.Policy) error {
|
||||
return gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error {
|
||||
_, err := g.c.SetIamPolicy(ctx, &pb.SetIamPolicyRequest{
|
||||
Resource: resource,
|
||||
Policy: p,
|
||||
})
|
||||
return err
|
||||
}, withRetry)
|
||||
}
|
||||
|
||||
func (g *grpcClient) Test(ctx context.Context, resource string, perms []string) ([]string, error) {
|
||||
var res *pb.TestIamPermissionsResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error {
|
||||
var err error
|
||||
res, err = g.c.TestIamPermissions(ctx, &pb.TestIamPermissionsRequest{
|
||||
Resource: resource,
|
||||
Permissions: perms,
|
||||
})
|
||||
return err
|
||||
}, withRetry)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.Permissions, nil
|
||||
}
|
||||
|
||||
// A Handle provides IAM operations for a resource.
|
||||
type Handle struct {
|
||||
c client
|
||||
resource string
|
||||
}
|
||||
|
||||
// InternalNewHandle is for use by the Google Cloud Libraries only.
|
||||
//
|
||||
// InternalNewHandle returns a Handle for resource.
|
||||
// The conn parameter refers to a server that must support the IAMPolicy service.
|
||||
func InternalNewHandle(conn *grpc.ClientConn, resource string) *Handle {
|
||||
return InternalNewHandleGRPCClient(pb.NewIAMPolicyClient(conn), resource)
|
||||
}
|
||||
|
||||
// InternalNewHandleGRPCClient is for use by the Google Cloud Libraries only.
|
||||
//
|
||||
// InternalNewHandleClient returns a Handle for resource using the given
|
||||
// grpc service that implements IAM as a mixin
|
||||
func InternalNewHandleGRPCClient(c pb.IAMPolicyClient, resource string) *Handle {
|
||||
return InternalNewHandleClient(&grpcClient{c: c}, resource)
|
||||
}
|
||||
|
||||
// InternalNewHandleClient is for use by the Google Cloud Libraries only.
|
||||
//
|
||||
// InternalNewHandleClient returns a Handle for resource using the given
|
||||
// client implementation.
|
||||
func InternalNewHandleClient(c client, resource string) *Handle {
|
||||
return &Handle{
|
||||
c: c,
|
||||
resource: resource,
|
||||
}
|
||||
}
|
||||
|
||||
// Policy retrieves the IAM policy for the resource.
|
||||
func (h *Handle) Policy(ctx context.Context) (*Policy, error) {
|
||||
proto, err := h.c.Get(ctx, h.resource)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Policy{InternalProto: proto}, nil
|
||||
}
|
||||
|
||||
// SetPolicy replaces the resource's current policy with the supplied Policy.
|
||||
//
|
||||
// If policy was created from a prior call to Get, then the modification will
|
||||
// only succeed if the policy has not changed since the Get.
|
||||
func (h *Handle) SetPolicy(ctx context.Context, policy *Policy) error {
|
||||
return h.c.Set(ctx, h.resource, policy.InternalProto)
|
||||
}
|
||||
|
||||
// TestPermissions returns the subset of permissions that the caller has on the resource.
|
||||
func (h *Handle) TestPermissions(ctx context.Context, permissions []string) ([]string, error) {
|
||||
return h.c.Test(ctx, h.resource, permissions)
|
||||
}
|
||||
|
||||
// A RoleName is a name representing a collection of permissions.
|
||||
type RoleName string
|
||||
|
||||
// Common role names.
|
||||
const (
|
||||
Owner RoleName = "roles/owner"
|
||||
Editor RoleName = "roles/editor"
|
||||
Viewer RoleName = "roles/viewer"
|
||||
)
|
||||
|
||||
const (
|
||||
// AllUsers is a special member that denotes all users, even unauthenticated ones.
|
||||
AllUsers = "allUsers"
|
||||
|
||||
// AllAuthenticatedUsers is a special member that denotes all authenticated users.
|
||||
AllAuthenticatedUsers = "allAuthenticatedUsers"
|
||||
)
|
||||
|
||||
// A Policy is a list of Bindings representing roles
|
||||
// granted to members.
|
||||
//
|
||||
// The zero Policy is a valid policy with no bindings.
|
||||
type Policy struct {
|
||||
// TODO(jba): when type aliases are available, put Policy into an internal package
|
||||
// and provide an exported alias here.
|
||||
|
||||
// This field is exported for use by the Google Cloud Libraries only.
|
||||
// It may become unexported in a future release.
|
||||
InternalProto *pb.Policy
|
||||
}
|
||||
|
||||
// Members returns the list of members with the supplied role.
|
||||
// The return value should not be modified. Use Add and Remove
|
||||
// to modify the members of a role.
|
||||
func (p *Policy) Members(r RoleName) []string {
|
||||
b := p.binding(r)
|
||||
if b == nil {
|
||||
return nil
|
||||
}
|
||||
return b.Members
|
||||
}
|
||||
|
||||
// HasRole reports whether member has role r.
|
||||
func (p *Policy) HasRole(member string, r RoleName) bool {
|
||||
return memberIndex(member, p.binding(r)) >= 0
|
||||
}
|
||||
|
||||
// Add adds member member to role r if it is not already present.
|
||||
// A new binding is created if there is no binding for the role.
|
||||
func (p *Policy) Add(member string, r RoleName) {
|
||||
b := p.binding(r)
|
||||
if b == nil {
|
||||
if p.InternalProto == nil {
|
||||
p.InternalProto = &pb.Policy{}
|
||||
}
|
||||
p.InternalProto.Bindings = append(p.InternalProto.Bindings, &pb.Binding{
|
||||
Role: string(r),
|
||||
Members: []string{member},
|
||||
})
|
||||
return
|
||||
}
|
||||
if memberIndex(member, b) < 0 {
|
||||
b.Members = append(b.Members, member)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Remove removes member from role r if it is present.
|
||||
func (p *Policy) Remove(member string, r RoleName) {
|
||||
bi := p.bindingIndex(r)
|
||||
if bi < 0 {
|
||||
return
|
||||
}
|
||||
bindings := p.InternalProto.Bindings
|
||||
b := bindings[bi]
|
||||
mi := memberIndex(member, b)
|
||||
if mi < 0 {
|
||||
return
|
||||
}
|
||||
// Order doesn't matter for bindings or members, so to remove, move the last item
|
||||
// into the removed spot and shrink the slice.
|
||||
if len(b.Members) == 1 {
|
||||
// Remove binding.
|
||||
last := len(bindings) - 1
|
||||
bindings[bi] = bindings[last]
|
||||
bindings[last] = nil
|
||||
p.InternalProto.Bindings = bindings[:last]
|
||||
return
|
||||
}
|
||||
// Remove member.
|
||||
// TODO(jba): worry about multiple copies of m?
|
||||
last := len(b.Members) - 1
|
||||
b.Members[mi] = b.Members[last]
|
||||
b.Members[last] = ""
|
||||
b.Members = b.Members[:last]
|
||||
}
|
||||
|
||||
// Roles returns the names of all the roles that appear in the Policy.
|
||||
func (p *Policy) Roles() []RoleName {
|
||||
if p.InternalProto == nil {
|
||||
return nil
|
||||
}
|
||||
var rns []RoleName
|
||||
for _, b := range p.InternalProto.Bindings {
|
||||
rns = append(rns, RoleName(b.Role))
|
||||
}
|
||||
return rns
|
||||
}
|
||||
|
||||
// binding returns the Binding for the suppied role, or nil if there isn't one.
|
||||
func (p *Policy) binding(r RoleName) *pb.Binding {
|
||||
i := p.bindingIndex(r)
|
||||
if i < 0 {
|
||||
return nil
|
||||
}
|
||||
return p.InternalProto.Bindings[i]
|
||||
}
|
||||
|
||||
func (p *Policy) bindingIndex(r RoleName) int {
|
||||
if p.InternalProto == nil {
|
||||
return -1
|
||||
}
|
||||
for i, b := range p.InternalProto.Bindings {
|
||||
if b.Role == string(r) {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// memberIndex returns the index of m in b's Members, or -1 if not found.
|
||||
func memberIndex(m string, b *pb.Binding) int {
|
||||
if b == nil {
|
||||
return -1
|
||||
}
|
||||
for i, mm := range b.Members {
|
||||
if mm == m {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
|
@ -1,54 +0,0 @@
|
|||
// Copyright 2017 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"google.golang.org/api/googleapi"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// Annotate prepends msg to the error message in err, attempting
|
||||
// to preserve other information in err, like an error code.
|
||||
//
|
||||
// Annotate panics if err is nil.
|
||||
//
|
||||
// Annotate knows about these error types:
|
||||
// - "google.golang.org/grpc/status".Status
|
||||
// - "google.golang.org/api/googleapi".Error
|
||||
// If the error is not one of these types, Annotate behaves
|
||||
// like
|
||||
// fmt.Errorf("%s: %v", msg, err)
|
||||
func Annotate(err error, msg string) error {
|
||||
if err == nil {
|
||||
panic("Annotate called with nil")
|
||||
}
|
||||
if s, ok := status.FromError(err); ok {
|
||||
p := s.Proto()
|
||||
p.Message = msg + ": " + p.Message
|
||||
return status.ErrorProto(p)
|
||||
}
|
||||
if g, ok := err.(*googleapi.Error); ok {
|
||||
g.Message = msg + ": " + g.Message
|
||||
return g
|
||||
}
|
||||
return fmt.Errorf("%s: %v", msg, err)
|
||||
}
|
||||
|
||||
// Annotatef uses format and args to format a string, then calls Annotate.
|
||||
func Annotatef(err error, format string, args ...interface{}) error {
|
||||
return Annotate(err, fmt.Sprintf(format, args...))
|
||||
}
|
|
@ -1,108 +0,0 @@
|
|||
// Copyright 2016 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package optional provides versions of primitive types that can
|
||||
// be nil. These are useful in methods that update some of an API object's
|
||||
// fields.
|
||||
package optional
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type (
|
||||
// Bool is either a bool or nil.
|
||||
Bool interface{}
|
||||
|
||||
// String is either a string or nil.
|
||||
String interface{}
|
||||
|
||||
// Int is either an int or nil.
|
||||
Int interface{}
|
||||
|
||||
// Uint is either a uint or nil.
|
||||
Uint interface{}
|
||||
|
||||
// Float64 is either a float64 or nil.
|
||||
Float64 interface{}
|
||||
|
||||
// Duration is either a time.Duration or nil.
|
||||
Duration interface{}
|
||||
)
|
||||
|
||||
// ToBool returns its argument as a bool.
|
||||
// It panics if its argument is nil or not a bool.
|
||||
func ToBool(v Bool) bool {
|
||||
x, ok := v.(bool)
|
||||
if !ok {
|
||||
doPanic("Bool", v)
|
||||
}
|
||||
return x
|
||||
}
|
||||
|
||||
// ToString returns its argument as a string.
|
||||
// It panics if its argument is nil or not a string.
|
||||
func ToString(v String) string {
|
||||
x, ok := v.(string)
|
||||
if !ok {
|
||||
doPanic("String", v)
|
||||
}
|
||||
return x
|
||||
}
|
||||
|
||||
// ToInt returns its argument as an int.
|
||||
// It panics if its argument is nil or not an int.
|
||||
func ToInt(v Int) int {
|
||||
x, ok := v.(int)
|
||||
if !ok {
|
||||
doPanic("Int", v)
|
||||
}
|
||||
return x
|
||||
}
|
||||
|
||||
// ToUint returns its argument as a uint.
|
||||
// It panics if its argument is nil or not a uint.
|
||||
func ToUint(v Uint) uint {
|
||||
x, ok := v.(uint)
|
||||
if !ok {
|
||||
doPanic("Uint", v)
|
||||
}
|
||||
return x
|
||||
}
|
||||
|
||||
// ToFloat64 returns its argument as a float64.
|
||||
// It panics if its argument is nil or not a float64.
|
||||
func ToFloat64(v Float64) float64 {
|
||||
x, ok := v.(float64)
|
||||
if !ok {
|
||||
doPanic("Float64", v)
|
||||
}
|
||||
return x
|
||||
}
|
||||
|
||||
// ToDuration returns its argument as a time.Duration.
|
||||
// It panics if its argument is nil or not a time.Duration.
|
||||
func ToDuration(v Duration) time.Duration {
|
||||
x, ok := v.(time.Duration)
|
||||
if !ok {
|
||||
doPanic("Duration", v)
|
||||
}
|
||||
return x
|
||||
}
|
||||
|
||||
func doPanic(capType string, v interface{}) {
|
||||
panic(fmt.Sprintf("optional.%s value should be %s, got %T", capType, strings.ToLower(capType), v))
|
||||
}
|
|
@ -1,55 +0,0 @@
|
|||
// Copyright 2016 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
gax "github.com/googleapis/gax-go"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// Retry calls the supplied function f repeatedly according to the provided
|
||||
// backoff parameters. It returns when one of the following occurs:
|
||||
// When f's first return value is true, Retry immediately returns with f's second
|
||||
// return value.
|
||||
// When the provided context is done, Retry returns with an error that
|
||||
// includes both ctx.Error() and the last error returned by f.
|
||||
func Retry(ctx context.Context, bo gax.Backoff, f func() (stop bool, err error)) error {
|
||||
return retry(ctx, bo, f, gax.Sleep)
|
||||
}
|
||||
|
||||
func retry(ctx context.Context, bo gax.Backoff, f func() (stop bool, err error),
|
||||
sleep func(context.Context, time.Duration) error) error {
|
||||
var lastErr error
|
||||
for {
|
||||
stop, err := f()
|
||||
if stop {
|
||||
return err
|
||||
}
|
||||
// Remember the last "real" error from f.
|
||||
if err != nil && err != context.Canceled && err != context.DeadlineExceeded {
|
||||
lastErr = err
|
||||
}
|
||||
p := bo.Pause()
|
||||
if cerr := sleep(ctx, p); cerr != nil {
|
||||
if lastErr != nil {
|
||||
return Annotatef(lastErr, "retry failed with %v; last error", cerr)
|
||||
}
|
||||
return cerr
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,83 +0,0 @@
|
|||
// Copyright 2018 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build go1.8
|
||||
|
||||
package trace
|
||||
|
||||
import (
|
||||
"go.opencensus.io/trace"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/googleapi"
|
||||
"google.golang.org/genproto/googleapis/rpc/code"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
func StartSpan(ctx context.Context, name string) context.Context {
|
||||
ctx, _ = trace.StartSpan(ctx, name)
|
||||
return ctx
|
||||
}
|
||||
|
||||
func EndSpan(ctx context.Context, err error) {
|
||||
span := trace.FromContext(ctx)
|
||||
if err != nil {
|
||||
span.SetStatus(toStatus(err))
|
||||
}
|
||||
span.End()
|
||||
}
|
||||
|
||||
// ToStatus interrogates an error and converts it to an appropriate
|
||||
// OpenCensus status.
|
||||
func toStatus(err error) trace.Status {
|
||||
if err2, ok := err.(*googleapi.Error); ok {
|
||||
return trace.Status{Code: httpStatusCodeToOCCode(err2.Code), Message: err2.Message}
|
||||
} else if s, ok := status.FromError(err); ok {
|
||||
return trace.Status{Code: int32(s.Code()), Message: s.Message()}
|
||||
} else {
|
||||
return trace.Status{Code: int32(code.Code_UNKNOWN), Message: err.Error()}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO (deklerk): switch to using OpenCensus function when it becomes available.
|
||||
// Reference: https://github.com/googleapis/googleapis/blob/26b634d2724ac5dd30ae0b0cbfb01f07f2e4050e/google/rpc/code.proto
|
||||
func httpStatusCodeToOCCode(httpStatusCode int) int32 {
|
||||
switch httpStatusCode {
|
||||
case 200:
|
||||
return int32(code.Code_OK)
|
||||
case 499:
|
||||
return int32(code.Code_CANCELLED)
|
||||
case 500:
|
||||
return int32(code.Code_UNKNOWN) // Could also be Code_INTERNAL, Code_DATA_LOSS
|
||||
case 400:
|
||||
return int32(code.Code_INVALID_ARGUMENT) // Could also be Code_OUT_OF_RANGE
|
||||
case 504:
|
||||
return int32(code.Code_DEADLINE_EXCEEDED)
|
||||
case 404:
|
||||
return int32(code.Code_NOT_FOUND)
|
||||
case 409:
|
||||
return int32(code.Code_ALREADY_EXISTS) // Could also be Code_ABORTED
|
||||
case 403:
|
||||
return int32(code.Code_PERMISSION_DENIED)
|
||||
case 401:
|
||||
return int32(code.Code_UNAUTHENTICATED)
|
||||
case 429:
|
||||
return int32(code.Code_RESOURCE_EXHAUSTED)
|
||||
case 501:
|
||||
return int32(code.Code_UNIMPLEMENTED)
|
||||
case 503:
|
||||
return int32(code.Code_UNAVAILABLE)
|
||||
default:
|
||||
return int32(code.Code_UNKNOWN)
|
||||
}
|
||||
}
|
|
@ -1,30 +0,0 @@
|
|||
// Copyright 2018 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build !go1.8
|
||||
|
||||
package trace
|
||||
|
||||
import (
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// OpenCensus only supports go 1.8 and higher.
|
||||
|
||||
func StartSpan(ctx context.Context, _ string) context.Context {
|
||||
return ctx
|
||||
}
|
||||
|
||||
func EndSpan(context.Context, error) {
|
||||
}
|
|
@ -1,6 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
today=$(date +%Y%m%d)
|
||||
|
||||
sed -i -r -e 's/const Repo = "([0-9]{8})"/const Repo = "'$today'"/' $GOFILE
|
||||
|
|
@ -1,71 +0,0 @@
|
|||
// Copyright 2016 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:generate ./update_version.sh
|
||||
|
||||
// Package version contains version information for Google Cloud Client
|
||||
// Libraries for Go, as reported in request headers.
|
||||
package version
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"strings"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
// Repo is the current version of the client libraries in this
|
||||
// repo. It should be a date in YYYYMMDD format.
|
||||
const Repo = "20180226"
|
||||
|
||||
// Go returns the Go runtime version. The returned string
|
||||
// has no whitespace.
|
||||
func Go() string {
|
||||
return goVersion
|
||||
}
|
||||
|
||||
var goVersion = goVer(runtime.Version())
|
||||
|
||||
const develPrefix = "devel +"
|
||||
|
||||
func goVer(s string) string {
|
||||
if strings.HasPrefix(s, develPrefix) {
|
||||
s = s[len(develPrefix):]
|
||||
if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 {
|
||||
s = s[:p]
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
if strings.HasPrefix(s, "go1") {
|
||||
s = s[2:]
|
||||
var prerelease string
|
||||
if p := strings.IndexFunc(s, notSemverRune); p >= 0 {
|
||||
s, prerelease = s[:p], s[p:]
|
||||
}
|
||||
if strings.HasSuffix(s, ".") {
|
||||
s += "0"
|
||||
} else if strings.Count(s, ".") < 2 {
|
||||
s += ".0"
|
||||
}
|
||||
if prerelease != "" {
|
||||
s += "-" + prerelease
|
||||
}
|
||||
return s
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func notSemverRune(r rune) bool {
|
||||
return strings.IndexRune("0123456789.", r) < 0
|
||||
}
|
|
@ -1,335 +0,0 @@
|
|||
// Copyright 2014 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"reflect"
|
||||
|
||||
"cloud.google.com/go/internal/trace"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/googleapi"
|
||||
raw "google.golang.org/api/storage/v1"
|
||||
)
|
||||
|
||||
// ACLRole is the level of access to grant.
|
||||
type ACLRole string
|
||||
|
||||
const (
|
||||
RoleOwner ACLRole = "OWNER"
|
||||
RoleReader ACLRole = "READER"
|
||||
RoleWriter ACLRole = "WRITER"
|
||||
)
|
||||
|
||||
// ACLEntity refers to a user or group.
|
||||
// They are sometimes referred to as grantees.
|
||||
//
|
||||
// It could be in the form of:
|
||||
// "user-<userId>", "user-<email>", "group-<groupId>", "group-<email>",
|
||||
// "domain-<domain>" and "project-team-<projectId>".
|
||||
//
|
||||
// Or one of the predefined constants: AllUsers, AllAuthenticatedUsers.
|
||||
type ACLEntity string
|
||||
|
||||
const (
|
||||
AllUsers ACLEntity = "allUsers"
|
||||
AllAuthenticatedUsers ACLEntity = "allAuthenticatedUsers"
|
||||
)
|
||||
|
||||
// ACLRule represents a grant for a role to an entity (user, group or team) for a
|
||||
// Google Cloud Storage object or bucket.
|
||||
type ACLRule struct {
|
||||
Entity ACLEntity
|
||||
EntityID string
|
||||
Role ACLRole
|
||||
Domain string
|
||||
Email string
|
||||
ProjectTeam *ProjectTeam
|
||||
}
|
||||
|
||||
// ProjectTeam is the project team associated with the entity, if any.
|
||||
type ProjectTeam struct {
|
||||
ProjectNumber string
|
||||
Team string
|
||||
}
|
||||
|
||||
// ACLHandle provides operations on an access control list for a Google Cloud Storage bucket or object.
|
||||
type ACLHandle struct {
|
||||
c *Client
|
||||
bucket string
|
||||
object string
|
||||
isDefault bool
|
||||
userProject string // for requester-pays buckets
|
||||
}
|
||||
|
||||
// Delete permanently deletes the ACL entry for the given entity.
|
||||
func (a *ACLHandle) Delete(ctx context.Context, entity ACLEntity) (err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.ACL.Delete")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
if a.object != "" {
|
||||
return a.objectDelete(ctx, entity)
|
||||
}
|
||||
if a.isDefault {
|
||||
return a.bucketDefaultDelete(ctx, entity)
|
||||
}
|
||||
return a.bucketDelete(ctx, entity)
|
||||
}
|
||||
|
||||
// Set sets the role for the given entity.
|
||||
func (a *ACLHandle) Set(ctx context.Context, entity ACLEntity, role ACLRole) (err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.ACL.Set")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
if a.object != "" {
|
||||
return a.objectSet(ctx, entity, role, false)
|
||||
}
|
||||
if a.isDefault {
|
||||
return a.objectSet(ctx, entity, role, true)
|
||||
}
|
||||
return a.bucketSet(ctx, entity, role)
|
||||
}
|
||||
|
||||
// List retrieves ACL entries.
|
||||
func (a *ACLHandle) List(ctx context.Context) (rules []ACLRule, err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.ACL.List")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
if a.object != "" {
|
||||
return a.objectList(ctx)
|
||||
}
|
||||
if a.isDefault {
|
||||
return a.bucketDefaultList(ctx)
|
||||
}
|
||||
return a.bucketList(ctx)
|
||||
}
|
||||
|
||||
func (a *ACLHandle) bucketDefaultList(ctx context.Context) ([]ACLRule, error) {
|
||||
var acls *raw.ObjectAccessControls
|
||||
var err error
|
||||
err = runWithRetry(ctx, func() error {
|
||||
req := a.c.raw.DefaultObjectAccessControls.List(a.bucket)
|
||||
a.configureCall(req, ctx)
|
||||
acls, err = req.Do()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return toObjectACLRules(acls.Items), nil
|
||||
}
|
||||
|
||||
func (a *ACLHandle) bucketDefaultDelete(ctx context.Context, entity ACLEntity) error {
|
||||
return runWithRetry(ctx, func() error {
|
||||
req := a.c.raw.DefaultObjectAccessControls.Delete(a.bucket, string(entity))
|
||||
a.configureCall(req, ctx)
|
||||
return req.Do()
|
||||
})
|
||||
}
|
||||
|
||||
func (a *ACLHandle) bucketList(ctx context.Context) ([]ACLRule, error) {
|
||||
var acls *raw.BucketAccessControls
|
||||
var err error
|
||||
err = runWithRetry(ctx, func() error {
|
||||
req := a.c.raw.BucketAccessControls.List(a.bucket)
|
||||
a.configureCall(req, ctx)
|
||||
acls, err = req.Do()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return toBucketACLRules(acls.Items), nil
|
||||
}
|
||||
|
||||
func (a *ACLHandle) bucketSet(ctx context.Context, entity ACLEntity, role ACLRole) error {
|
||||
acl := &raw.BucketAccessControl{
|
||||
Bucket: a.bucket,
|
||||
Entity: string(entity),
|
||||
Role: string(role),
|
||||
}
|
||||
err := runWithRetry(ctx, func() error {
|
||||
req := a.c.raw.BucketAccessControls.Update(a.bucket, string(entity), acl)
|
||||
a.configureCall(req, ctx)
|
||||
_, err := req.Do()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *ACLHandle) bucketDelete(ctx context.Context, entity ACLEntity) error {
|
||||
return runWithRetry(ctx, func() error {
|
||||
req := a.c.raw.BucketAccessControls.Delete(a.bucket, string(entity))
|
||||
a.configureCall(req, ctx)
|
||||
return req.Do()
|
||||
})
|
||||
}
|
||||
|
||||
func (a *ACLHandle) objectList(ctx context.Context) ([]ACLRule, error) {
|
||||
var acls *raw.ObjectAccessControls
|
||||
var err error
|
||||
err = runWithRetry(ctx, func() error {
|
||||
req := a.c.raw.ObjectAccessControls.List(a.bucket, a.object)
|
||||
a.configureCall(req, ctx)
|
||||
acls, err = req.Do()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return toObjectACLRules(acls.Items), nil
|
||||
}
|
||||
|
||||
func (a *ACLHandle) objectSet(ctx context.Context, entity ACLEntity, role ACLRole, isBucketDefault bool) error {
|
||||
type setRequest interface {
|
||||
Do(opts ...googleapi.CallOption) (*raw.ObjectAccessControl, error)
|
||||
Header() http.Header
|
||||
}
|
||||
|
||||
acl := &raw.ObjectAccessControl{
|
||||
Bucket: a.bucket,
|
||||
Entity: string(entity),
|
||||
Role: string(role),
|
||||
}
|
||||
var req setRequest
|
||||
if isBucketDefault {
|
||||
req = a.c.raw.DefaultObjectAccessControls.Update(a.bucket, string(entity), acl)
|
||||
} else {
|
||||
req = a.c.raw.ObjectAccessControls.Update(a.bucket, a.object, string(entity), acl)
|
||||
}
|
||||
a.configureCall(req, ctx)
|
||||
return runWithRetry(ctx, func() error {
|
||||
_, err := req.Do()
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
func (a *ACLHandle) objectDelete(ctx context.Context, entity ACLEntity) error {
|
||||
return runWithRetry(ctx, func() error {
|
||||
req := a.c.raw.ObjectAccessControls.Delete(a.bucket, a.object, string(entity))
|
||||
a.configureCall(req, ctx)
|
||||
return req.Do()
|
||||
})
|
||||
}
|
||||
|
||||
func (a *ACLHandle) configureCall(call interface{ Header() http.Header }, ctx context.Context) {
|
||||
vc := reflect.ValueOf(call)
|
||||
vc.MethodByName("Context").Call([]reflect.Value{reflect.ValueOf(ctx)})
|
||||
if a.userProject != "" {
|
||||
vc.MethodByName("UserProject").Call([]reflect.Value{reflect.ValueOf(a.userProject)})
|
||||
}
|
||||
setClientHeader(call.Header())
|
||||
}
|
||||
|
||||
func toObjectACLRules(items []*raw.ObjectAccessControl) []ACLRule {
|
||||
var rs []ACLRule
|
||||
for _, item := range items {
|
||||
rs = append(rs, toObjectACLRule(item))
|
||||
}
|
||||
return rs
|
||||
}
|
||||
|
||||
func toBucketACLRules(items []*raw.BucketAccessControl) []ACLRule {
|
||||
var rs []ACLRule
|
||||
for _, item := range items {
|
||||
rs = append(rs, toBucketACLRule(item))
|
||||
}
|
||||
return rs
|
||||
}
|
||||
|
||||
func toObjectACLRule(a *raw.ObjectAccessControl) ACLRule {
|
||||
return ACLRule{
|
||||
Entity: ACLEntity(a.Entity),
|
||||
EntityID: a.EntityId,
|
||||
Role: ACLRole(a.Role),
|
||||
Domain: a.Domain,
|
||||
Email: a.Email,
|
||||
ProjectTeam: toObjectProjectTeam(a.ProjectTeam),
|
||||
}
|
||||
}
|
||||
|
||||
func toBucketACLRule(a *raw.BucketAccessControl) ACLRule {
|
||||
return ACLRule{
|
||||
Entity: ACLEntity(a.Entity),
|
||||
EntityID: a.EntityId,
|
||||
Role: ACLRole(a.Role),
|
||||
Domain: a.Domain,
|
||||
Email: a.Email,
|
||||
ProjectTeam: toBucketProjectTeam(a.ProjectTeam),
|
||||
}
|
||||
}
|
||||
|
||||
func toRawObjectACL(rules []ACLRule) []*raw.ObjectAccessControl {
|
||||
if len(rules) == 0 {
|
||||
return nil
|
||||
}
|
||||
r := make([]*raw.ObjectAccessControl, 0, len(rules))
|
||||
for _, rule := range rules {
|
||||
r = append(r, rule.toRawObjectAccessControl("")) // bucket name unnecessary
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
func toRawBucketACL(rules []ACLRule) []*raw.BucketAccessControl {
|
||||
if len(rules) == 0 {
|
||||
return nil
|
||||
}
|
||||
r := make([]*raw.BucketAccessControl, 0, len(rules))
|
||||
for _, rule := range rules {
|
||||
r = append(r, rule.toRawBucketAccessControl("")) // bucket name unnecessary
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
func (r ACLRule) toRawBucketAccessControl(bucket string) *raw.BucketAccessControl {
|
||||
return &raw.BucketAccessControl{
|
||||
Bucket: bucket,
|
||||
Entity: string(r.Entity),
|
||||
Role: string(r.Role),
|
||||
// The other fields are not settable.
|
||||
}
|
||||
}
|
||||
|
||||
func (r ACLRule) toRawObjectAccessControl(bucket string) *raw.ObjectAccessControl {
|
||||
return &raw.ObjectAccessControl{
|
||||
Bucket: bucket,
|
||||
Entity: string(r.Entity),
|
||||
Role: string(r.Role),
|
||||
// The other fields are not settable.
|
||||
}
|
||||
}
|
||||
|
||||
func toBucketProjectTeam(p *raw.BucketAccessControlProjectTeam) *ProjectTeam {
|
||||
if p == nil {
|
||||
return nil
|
||||
}
|
||||
return &ProjectTeam{
|
||||
ProjectNumber: p.ProjectNumber,
|
||||
Team: p.Team,
|
||||
}
|
||||
}
|
||||
|
||||
func toObjectProjectTeam(p *raw.ObjectAccessControlProjectTeam) *ProjectTeam {
|
||||
if p == nil {
|
||||
return nil
|
||||
}
|
||||
return &ProjectTeam{
|
||||
ProjectNumber: p.ProjectNumber,
|
||||
Team: p.Team,
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -1,228 +0,0 @@
|
|||
// Copyright 2016 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"cloud.google.com/go/internal/trace"
|
||||
"golang.org/x/net/context"
|
||||
raw "google.golang.org/api/storage/v1"
|
||||
)
|
||||
|
||||
// CopierFrom creates a Copier that can copy src to dst.
|
||||
// You can immediately call Run on the returned Copier, or
|
||||
// you can configure it first.
|
||||
//
|
||||
// For Requester Pays buckets, the user project of dst is billed, unless it is empty,
|
||||
// in which case the user project of src is billed.
|
||||
func (dst *ObjectHandle) CopierFrom(src *ObjectHandle) *Copier {
|
||||
return &Copier{dst: dst, src: src}
|
||||
}
|
||||
|
||||
// A Copier copies a source object to a destination.
|
||||
type Copier struct {
|
||||
// ObjectAttrs are optional attributes to set on the destination object.
|
||||
// Any attributes must be initialized before any calls on the Copier. Nil
|
||||
// or zero-valued attributes are ignored.
|
||||
ObjectAttrs
|
||||
|
||||
// RewriteToken can be set before calling Run to resume a copy
|
||||
// operation. After Run returns a non-nil error, RewriteToken will
|
||||
// have been updated to contain the value needed to resume the copy.
|
||||
RewriteToken string
|
||||
|
||||
// ProgressFunc can be used to monitor the progress of a multi-RPC copy
|
||||
// operation. If ProgressFunc is not nil and copying requires multiple
|
||||
// calls to the underlying service (see
|
||||
// https://cloud.google.com/storage/docs/json_api/v1/objects/rewrite), then
|
||||
// ProgressFunc will be invoked after each call with the number of bytes of
|
||||
// content copied so far and the total size in bytes of the source object.
|
||||
//
|
||||
// ProgressFunc is intended to make upload progress available to the
|
||||
// application. For example, the implementation of ProgressFunc may update
|
||||
// a progress bar in the application's UI, or log the result of
|
||||
// float64(copiedBytes)/float64(totalBytes).
|
||||
//
|
||||
// ProgressFunc should return quickly without blocking.
|
||||
ProgressFunc func(copiedBytes, totalBytes uint64)
|
||||
|
||||
// The Cloud KMS key, in the form projects/P/locations/L/keyRings/R/cryptoKeys/K,
|
||||
// that will be used to encrypt the object. Overrides the object's KMSKeyName, if
|
||||
// any.
|
||||
//
|
||||
// Providing both a DestinationKMSKeyName and a customer-supplied encryption key
|
||||
// (via ObjectHandle.Key) on the destination object will result in an error when
|
||||
// Run is called.
|
||||
DestinationKMSKeyName string
|
||||
|
||||
dst, src *ObjectHandle
|
||||
}
|
||||
|
||||
// Run performs the copy.
|
||||
func (c *Copier) Run(ctx context.Context) (attrs *ObjectAttrs, err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Copier.Run")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
if err := c.src.validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := c.dst.validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if c.DestinationKMSKeyName != "" && c.dst.encryptionKey != nil {
|
||||
return nil, errors.New("storage: cannot use DestinationKMSKeyName with a customer-supplied encryption key")
|
||||
}
|
||||
// Convert destination attributes to raw form, omitting the bucket.
|
||||
// If the bucket is included but name or content-type aren't, the service
|
||||
// returns a 400 with "Required" as the only message. Omitting the bucket
|
||||
// does not cause any problems.
|
||||
rawObject := c.ObjectAttrs.toRawObject("")
|
||||
for {
|
||||
res, err := c.callRewrite(ctx, rawObject)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if c.ProgressFunc != nil {
|
||||
c.ProgressFunc(uint64(res.TotalBytesRewritten), uint64(res.ObjectSize))
|
||||
}
|
||||
if res.Done { // Finished successfully.
|
||||
return newObject(res.Resource), nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Copier) callRewrite(ctx context.Context, rawObj *raw.Object) (*raw.RewriteResponse, error) {
|
||||
call := c.dst.c.raw.Objects.Rewrite(c.src.bucket, c.src.object, c.dst.bucket, c.dst.object, rawObj)
|
||||
|
||||
call.Context(ctx).Projection("full")
|
||||
if c.RewriteToken != "" {
|
||||
call.RewriteToken(c.RewriteToken)
|
||||
}
|
||||
if c.DestinationKMSKeyName != "" {
|
||||
call.DestinationKmsKeyName(c.DestinationKMSKeyName)
|
||||
}
|
||||
if c.PredefinedACL != "" {
|
||||
call.DestinationPredefinedAcl(c.PredefinedACL)
|
||||
}
|
||||
if err := applyConds("Copy destination", c.dst.gen, c.dst.conds, call); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if c.dst.userProject != "" {
|
||||
call.UserProject(c.dst.userProject)
|
||||
} else if c.src.userProject != "" {
|
||||
call.UserProject(c.src.userProject)
|
||||
}
|
||||
if err := applySourceConds(c.src.gen, c.src.conds, call); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := setEncryptionHeaders(call.Header(), c.dst.encryptionKey, false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := setEncryptionHeaders(call.Header(), c.src.encryptionKey, true); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var res *raw.RewriteResponse
|
||||
var err error
|
||||
setClientHeader(call.Header())
|
||||
err = runWithRetry(ctx, func() error { res, err = call.Do(); return err })
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.RewriteToken = res.RewriteToken
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// ComposerFrom creates a Composer that can compose srcs into dst.
|
||||
// You can immediately call Run on the returned Composer, or you can
|
||||
// configure it first.
|
||||
//
|
||||
// The encryption key for the destination object will be used to decrypt all
|
||||
// source objects and encrypt the destination object. It is an error
|
||||
// to specify an encryption key for any of the source objects.
|
||||
func (dst *ObjectHandle) ComposerFrom(srcs ...*ObjectHandle) *Composer {
|
||||
return &Composer{dst: dst, srcs: srcs}
|
||||
}
|
||||
|
||||
// A Composer composes source objects into a destination object.
|
||||
//
|
||||
// For Requester Pays buckets, the user project of dst is billed.
|
||||
type Composer struct {
|
||||
// ObjectAttrs are optional attributes to set on the destination object.
|
||||
// Any attributes must be initialized before any calls on the Composer. Nil
|
||||
// or zero-valued attributes are ignored.
|
||||
ObjectAttrs
|
||||
|
||||
dst *ObjectHandle
|
||||
srcs []*ObjectHandle
|
||||
}
|
||||
|
||||
// Run performs the compose operation.
|
||||
func (c *Composer) Run(ctx context.Context) (attrs *ObjectAttrs, err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Composer.Run")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
if err := c.dst.validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(c.srcs) == 0 {
|
||||
return nil, errors.New("storage: at least one source object must be specified")
|
||||
}
|
||||
|
||||
req := &raw.ComposeRequest{}
|
||||
// Compose requires a non-empty Destination, so we always set it,
|
||||
// even if the caller-provided ObjectAttrs is the zero value.
|
||||
req.Destination = c.ObjectAttrs.toRawObject(c.dst.bucket)
|
||||
for _, src := range c.srcs {
|
||||
if err := src.validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if src.bucket != c.dst.bucket {
|
||||
return nil, fmt.Errorf("storage: all source objects must be in bucket %q, found %q", c.dst.bucket, src.bucket)
|
||||
}
|
||||
if src.encryptionKey != nil {
|
||||
return nil, fmt.Errorf("storage: compose source %s.%s must not have encryption key", src.bucket, src.object)
|
||||
}
|
||||
srcObj := &raw.ComposeRequestSourceObjects{
|
||||
Name: src.object,
|
||||
}
|
||||
if err := applyConds("ComposeFrom source", src.gen, src.conds, composeSourceObj{srcObj}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.SourceObjects = append(req.SourceObjects, srcObj)
|
||||
}
|
||||
|
||||
call := c.dst.c.raw.Objects.Compose(c.dst.bucket, c.dst.object, req).Context(ctx)
|
||||
if err := applyConds("ComposeFrom destination", c.dst.gen, c.dst.conds, call); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if c.dst.userProject != "" {
|
||||
call.UserProject(c.dst.userProject)
|
||||
}
|
||||
if c.PredefinedACL != "" {
|
||||
call.DestinationPredefinedAcl(c.PredefinedACL)
|
||||
}
|
||||
if err := setEncryptionHeaders(call.Header(), c.dst.encryptionKey, false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var obj *raw.Object
|
||||
setClientHeader(call.Header())
|
||||
err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err })
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newObject(obj), nil
|
||||
}
|
|
@ -1,176 +0,0 @@
|
|||
// Copyright 2016 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
/*
|
||||
Package storage provides an easy way to work with Google Cloud Storage.
|
||||
Google Cloud Storage stores data in named objects, which are grouped into buckets.
|
||||
|
||||
More information about Google Cloud Storage is available at
|
||||
https://cloud.google.com/storage/docs.
|
||||
|
||||
See https://godoc.org/cloud.google.com/go for authentication, timeouts,
|
||||
connection pooling and similar aspects of this package.
|
||||
|
||||
All of the methods of this package use exponential backoff to retry calls that fail
|
||||
with certain errors, as described in
|
||||
https://cloud.google.com/storage/docs/exponential-backoff. Retrying continues
|
||||
indefinitely unless the controlling context is canceled or the client is closed. See
|
||||
context.WithTimeout and context.WithCancel.
|
||||
|
||||
|
||||
Creating a Client
|
||||
|
||||
To start working with this package, create a client:
|
||||
|
||||
ctx := context.Background()
|
||||
client, err := storage.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
The client will use your default application credentials.
|
||||
|
||||
If you only wish to access public data, you can create
|
||||
an unauthenticated client with
|
||||
|
||||
client, err := storage.NewClient(ctx, option.WithoutAuthentication())
|
||||
|
||||
Buckets
|
||||
|
||||
A Google Cloud Storage bucket is a collection of objects. To work with a
|
||||
bucket, make a bucket handle:
|
||||
|
||||
bkt := client.Bucket(bucketName)
|
||||
|
||||
A handle is a reference to a bucket. You can have a handle even if the
|
||||
bucket doesn't exist yet. To create a bucket in Google Cloud Storage,
|
||||
call Create on the handle:
|
||||
|
||||
if err := bkt.Create(ctx, projectID, nil); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
Note that although buckets are associated with projects, bucket names are
|
||||
global across all projects.
|
||||
|
||||
Each bucket has associated metadata, represented in this package by
|
||||
BucketAttrs. The third argument to BucketHandle.Create allows you to set
|
||||
the initial BucketAttrs of a bucket. To retrieve a bucket's attributes, use
|
||||
Attrs:
|
||||
|
||||
attrs, err := bkt.Attrs(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Printf("bucket %s, created at %s, is located in %s with storage class %s\n",
|
||||
attrs.Name, attrs.Created, attrs.Location, attrs.StorageClass)
|
||||
|
||||
Objects
|
||||
|
||||
An object holds arbitrary data as a sequence of bytes, like a file. You
|
||||
refer to objects using a handle, just as with buckets, but unlike buckets
|
||||
you don't explicitly create an object. Instead, the first time you write
|
||||
to an object it will be created. You can use the standard Go io.Reader
|
||||
and io.Writer interfaces to read and write object data:
|
||||
|
||||
obj := bkt.Object("data")
|
||||
// Write something to obj.
|
||||
// w implements io.Writer.
|
||||
w := obj.NewWriter(ctx)
|
||||
// Write some text to obj. This will either create the object or overwrite whatever is there already.
|
||||
if _, err := fmt.Fprintf(w, "This object contains text.\n"); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// Close, just like writing a file.
|
||||
if err := w.Close(); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
// Read it back.
|
||||
r, err := obj.NewReader(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
defer r.Close()
|
||||
if _, err := io.Copy(os.Stdout, r); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// Prints "This object contains text."
|
||||
|
||||
Objects also have attributes, which you can fetch with Attrs:
|
||||
|
||||
objAttrs, err := obj.Attrs(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Printf("object %s has size %d and can be read using %s\n",
|
||||
objAttrs.Name, objAttrs.Size, objAttrs.MediaLink)
|
||||
|
||||
ACLs
|
||||
|
||||
Both objects and buckets have ACLs (Access Control Lists). An ACL is a list of
|
||||
ACLRules, each of which specifies the role of a user, group or project. ACLs
|
||||
are suitable for fine-grained control, but you may prefer using IAM to control
|
||||
access at the project level (see
|
||||
https://cloud.google.com/storage/docs/access-control/iam).
|
||||
|
||||
To list the ACLs of a bucket or object, obtain an ACLHandle and call its List method:
|
||||
|
||||
acls, err := obj.ACL().List(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
for _, rule := range acls {
|
||||
fmt.Printf("%s has role %s\n", rule.Entity, rule.Role)
|
||||
}
|
||||
|
||||
You can also set and delete ACLs.
|
||||
|
||||
Conditions
|
||||
|
||||
Every object has a generation and a metageneration. The generation changes
|
||||
whenever the content changes, and the metageneration changes whenever the
|
||||
metadata changes. Conditions let you check these values before an operation;
|
||||
the operation only executes if the conditions match. You can use conditions to
|
||||
prevent race conditions in read-modify-write operations.
|
||||
|
||||
For example, say you've read an object's metadata into objAttrs. Now
|
||||
you want to write to that object, but only if its contents haven't changed
|
||||
since you read it. Here is how to express that:
|
||||
|
||||
w = obj.If(storage.Conditions{GenerationMatch: objAttrs.Generation}).NewWriter(ctx)
|
||||
// Proceed with writing as above.
|
||||
|
||||
Signed URLs
|
||||
|
||||
You can obtain a URL that lets anyone read or write an object for a limited time.
|
||||
You don't need to create a client to do this. See the documentation of
|
||||
SignedURL for details.
|
||||
|
||||
url, err := storage.SignedURL(bucketName, "shared-object", opts)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(url)
|
||||
|
||||
Errors
|
||||
|
||||
Errors returned by this client are often of the type [`googleapi.Error`](https://godoc.org/google.golang.org/api/googleapi#Error).
|
||||
These errors can be introspected for more information by type asserting to the richer `googleapi.Error` type. For example:
|
||||
|
||||
if e, ok := err.(*googleapi.Error); ok {
|
||||
if e.Code = 409 { ... }
|
||||
}
|
||||
*/
|
||||
package storage // import "cloud.google.com/go/storage"
|
|
@ -1,32 +0,0 @@
|
|||
// Copyright 2017 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build go1.10
|
||||
|
||||
package storage
|
||||
|
||||
import "google.golang.org/api/googleapi"
|
||||
|
||||
func shouldRetry(err error) bool {
|
||||
switch e := err.(type) {
|
||||
case *googleapi.Error:
|
||||
// Retry on 429 and 5xx, according to
|
||||
// https://cloud.google.com/storage/docs/exponential-backoff.
|
||||
return e.Code == 429 || (e.Code >= 500 && e.Code < 600)
|
||||
case interface{ Temporary() bool }:
|
||||
return e.Temporary()
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
|
@ -1,30 +0,0 @@
|
|||
// Copyright 2017 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build go1.7
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func withContext(r *http.Request, ctx context.Context) *http.Request {
|
||||
return r.WithContext(ctx)
|
||||
}
|
||||
|
||||
func goHTTPUncompressed(res *http.Response) bool {
|
||||
return res.Uncompressed
|
||||
}
|
|
@ -1,129 +0,0 @@
|
|||
// Copyright 2017 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"cloud.google.com/go/iam"
|
||||
"cloud.google.com/go/internal/trace"
|
||||
"golang.org/x/net/context"
|
||||
raw "google.golang.org/api/storage/v1"
|
||||
iampb "google.golang.org/genproto/googleapis/iam/v1"
|
||||
)
|
||||
|
||||
// IAM provides access to IAM access control for the bucket.
|
||||
func (b *BucketHandle) IAM() *iam.Handle {
|
||||
return iam.InternalNewHandleClient(&iamClient{
|
||||
raw: b.c.raw,
|
||||
userProject: b.userProject,
|
||||
}, b.name)
|
||||
}
|
||||
|
||||
// iamClient implements the iam.client interface.
|
||||
type iamClient struct {
|
||||
raw *raw.Service
|
||||
userProject string
|
||||
}
|
||||
|
||||
func (c *iamClient) Get(ctx context.Context, resource string) (p *iampb.Policy, err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Get")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
call := c.raw.Buckets.GetIamPolicy(resource)
|
||||
setClientHeader(call.Header())
|
||||
if c.userProject != "" {
|
||||
call.UserProject(c.userProject)
|
||||
}
|
||||
var rp *raw.Policy
|
||||
err = runWithRetry(ctx, func() error {
|
||||
rp, err = call.Context(ctx).Do()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return iamFromStoragePolicy(rp), nil
|
||||
}
|
||||
|
||||
func (c *iamClient) Set(ctx context.Context, resource string, p *iampb.Policy) (err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Set")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
rp := iamToStoragePolicy(p)
|
||||
call := c.raw.Buckets.SetIamPolicy(resource, rp)
|
||||
setClientHeader(call.Header())
|
||||
if c.userProject != "" {
|
||||
call.UserProject(c.userProject)
|
||||
}
|
||||
return runWithRetry(ctx, func() error {
|
||||
_, err := call.Context(ctx).Do()
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
func (c *iamClient) Test(ctx context.Context, resource string, perms []string) (permissions []string, err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Test")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
call := c.raw.Buckets.TestIamPermissions(resource, perms)
|
||||
setClientHeader(call.Header())
|
||||
if c.userProject != "" {
|
||||
call.UserProject(c.userProject)
|
||||
}
|
||||
var res *raw.TestIamPermissionsResponse
|
||||
err = runWithRetry(ctx, func() error {
|
||||
res, err = call.Context(ctx).Do()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.Permissions, nil
|
||||
}
|
||||
|
||||
func iamToStoragePolicy(ip *iampb.Policy) *raw.Policy {
|
||||
return &raw.Policy{
|
||||
Bindings: iamToStorageBindings(ip.Bindings),
|
||||
Etag: string(ip.Etag),
|
||||
}
|
||||
}
|
||||
|
||||
func iamToStorageBindings(ibs []*iampb.Binding) []*raw.PolicyBindings {
|
||||
var rbs []*raw.PolicyBindings
|
||||
for _, ib := range ibs {
|
||||
rbs = append(rbs, &raw.PolicyBindings{
|
||||
Role: ib.Role,
|
||||
Members: ib.Members,
|
||||
})
|
||||
}
|
||||
return rbs
|
||||
}
|
||||
|
||||
func iamFromStoragePolicy(rp *raw.Policy) *iampb.Policy {
|
||||
return &iampb.Policy{
|
||||
Bindings: iamFromStorageBindings(rp.Bindings),
|
||||
Etag: []byte(rp.Etag),
|
||||
}
|
||||
}
|
||||
|
||||
func iamFromStorageBindings(rbs []*raw.PolicyBindings) []*iampb.Binding {
|
||||
var ibs []*iampb.Binding
|
||||
for _, rb := range rbs {
|
||||
ibs = append(ibs, &iampb.Binding{
|
||||
Role: rb.Role,
|
||||
Members: rb.Members,
|
||||
})
|
||||
}
|
||||
return ibs
|
||||
}
|
|
@ -1,36 +0,0 @@
|
|||
// Copyright 2014 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"cloud.google.com/go/internal"
|
||||
gax "github.com/googleapis/gax-go"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// runWithRetry calls the function until it returns nil or a non-retryable error, or
|
||||
// the context is done.
|
||||
func runWithRetry(ctx context.Context, call func() error) error {
|
||||
return internal.Retry(ctx, gax.Backoff{}, func() (stop bool, err error) {
|
||||
err = call()
|
||||
if err == nil {
|
||||
return true, nil
|
||||
}
|
||||
if shouldRetry(err) {
|
||||
return false, nil
|
||||
}
|
||||
return true, err
|
||||
})
|
||||
}
|
|
@ -1,42 +0,0 @@
|
|||
// Copyright 2017 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build !go1.10
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"google.golang.org/api/googleapi"
|
||||
)
|
||||
|
||||
func shouldRetry(err error) bool {
|
||||
switch e := err.(type) {
|
||||
case *googleapi.Error:
|
||||
// Retry on 429 and 5xx, according to
|
||||
// https://cloud.google.com/storage/docs/exponential-backoff.
|
||||
return e.Code == 429 || (e.Code >= 500 && e.Code < 600)
|
||||
case *url.Error:
|
||||
// Retry on REFUSED_STREAM.
|
||||
// Unfortunately the error type is unexported, so we resort to string
|
||||
// matching.
|
||||
return strings.Contains(e.Error(), "REFUSED_STREAM")
|
||||
case interface{ Temporary() bool }:
|
||||
return e.Temporary()
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
|
@ -1,33 +0,0 @@
|
|||
// Copyright 2017 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build !go1.7
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func withContext(r *http.Request, _ interface{}) *http.Request {
|
||||
// In Go 1.6 and below, ignore the context.
|
||||
return r
|
||||
}
|
||||
|
||||
// Go 1.6 doesn't have http.Response.Uncompressed, so we can't know whether the Go
|
||||
// HTTP stack uncompressed a gzip file. As a good approximation, assume that
|
||||
// the lack of a Content-Length header means that it did uncompress.
|
||||
func goHTTPUncompressed(res *http.Response) bool {
|
||||
return res.Header.Get("Content-Length") == ""
|
||||
}
|
|
@ -1,188 +0,0 @@
|
|||
// Copyright 2017 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"regexp"
|
||||
|
||||
"cloud.google.com/go/internal/trace"
|
||||
"golang.org/x/net/context"
|
||||
raw "google.golang.org/api/storage/v1"
|
||||
)
|
||||
|
||||
// A Notification describes how to send Cloud PubSub messages when certain
|
||||
// events occur in a bucket.
|
||||
type Notification struct {
|
||||
//The ID of the notification.
|
||||
ID string
|
||||
|
||||
// The ID of the topic to which this subscription publishes.
|
||||
TopicID string
|
||||
|
||||
// The ID of the project to which the topic belongs.
|
||||
TopicProjectID string
|
||||
|
||||
// Only send notifications about listed event types. If empty, send notifications
|
||||
// for all event types.
|
||||
// See https://cloud.google.com/storage/docs/pubsub-notifications#events.
|
||||
EventTypes []string
|
||||
|
||||
// If present, only apply this notification configuration to object names that
|
||||
// begin with this prefix.
|
||||
ObjectNamePrefix string
|
||||
|
||||
// An optional list of additional attributes to attach to each Cloud PubSub
|
||||
// message published for this notification subscription.
|
||||
CustomAttributes map[string]string
|
||||
|
||||
// The contents of the message payload.
|
||||
// See https://cloud.google.com/storage/docs/pubsub-notifications#payload.
|
||||
PayloadFormat string
|
||||
}
|
||||
|
||||
// Values for Notification.PayloadFormat.
|
||||
const (
|
||||
// Send no payload with notification messages.
|
||||
NoPayload = "NONE"
|
||||
|
||||
// Send object metadata as JSON with notification messages.
|
||||
JSONPayload = "JSON_API_V1"
|
||||
)
|
||||
|
||||
// Values for Notification.EventTypes.
|
||||
const (
|
||||
// Event that occurs when an object is successfully created.
|
||||
ObjectFinalizeEvent = "OBJECT_FINALIZE"
|
||||
|
||||
// Event that occurs when the metadata of an existing object changes.
|
||||
ObjectMetadataUpdateEvent = "OBJECT_METADATA_UPDATE"
|
||||
|
||||
// Event that occurs when an object is permanently deleted.
|
||||
ObjectDeleteEvent = "OBJECT_DELETE"
|
||||
|
||||
// Event that occurs when the live version of an object becomes an
|
||||
// archived version.
|
||||
ObjectArchiveEvent = "OBJECT_ARCHIVE"
|
||||
)
|
||||
|
||||
func toNotification(rn *raw.Notification) *Notification {
|
||||
n := &Notification{
|
||||
ID: rn.Id,
|
||||
EventTypes: rn.EventTypes,
|
||||
ObjectNamePrefix: rn.ObjectNamePrefix,
|
||||
CustomAttributes: rn.CustomAttributes,
|
||||
PayloadFormat: rn.PayloadFormat,
|
||||
}
|
||||
n.TopicProjectID, n.TopicID = parseNotificationTopic(rn.Topic)
|
||||
return n
|
||||
}
|
||||
|
||||
var topicRE = regexp.MustCompile("^//pubsub.googleapis.com/projects/([^/]+)/topics/([^/]+)")
|
||||
|
||||
// parseNotificationTopic extracts the project and topic IDs from from the full
|
||||
// resource name returned by the service. If the name is malformed, it returns
|
||||
// "?" for both IDs.
|
||||
func parseNotificationTopic(nt string) (projectID, topicID string) {
|
||||
matches := topicRE.FindStringSubmatch(nt)
|
||||
if matches == nil {
|
||||
return "?", "?"
|
||||
}
|
||||
return matches[1], matches[2]
|
||||
}
|
||||
|
||||
func toRawNotification(n *Notification) *raw.Notification {
|
||||
return &raw.Notification{
|
||||
Id: n.ID,
|
||||
Topic: fmt.Sprintf("//pubsub.googleapis.com/projects/%s/topics/%s",
|
||||
n.TopicProjectID, n.TopicID),
|
||||
EventTypes: n.EventTypes,
|
||||
ObjectNamePrefix: n.ObjectNamePrefix,
|
||||
CustomAttributes: n.CustomAttributes,
|
||||
PayloadFormat: string(n.PayloadFormat),
|
||||
}
|
||||
}
|
||||
|
||||
// AddNotification adds a notification to b. You must set n's TopicProjectID, TopicID
|
||||
// and PayloadFormat, and must not set its ID. The other fields are all optional. The
|
||||
// returned Notification's ID can be used to refer to it.
|
||||
func (b *BucketHandle) AddNotification(ctx context.Context, n *Notification) (ret *Notification, err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.AddNotification")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
if n.ID != "" {
|
||||
return nil, errors.New("storage: AddNotification: ID must not be set")
|
||||
}
|
||||
if n.TopicProjectID == "" {
|
||||
return nil, errors.New("storage: AddNotification: missing TopicProjectID")
|
||||
}
|
||||
if n.TopicID == "" {
|
||||
return nil, errors.New("storage: AddNotification: missing TopicID")
|
||||
}
|
||||
call := b.c.raw.Notifications.Insert(b.name, toRawNotification(n))
|
||||
setClientHeader(call.Header())
|
||||
if b.userProject != "" {
|
||||
call.UserProject(b.userProject)
|
||||
}
|
||||
rn, err := call.Context(ctx).Do()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return toNotification(rn), nil
|
||||
}
|
||||
|
||||
// Notifications returns all the Notifications configured for this bucket, as a map
|
||||
// indexed by notification ID.
|
||||
func (b *BucketHandle) Notifications(ctx context.Context) (n map[string]*Notification, err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Notifications")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
call := b.c.raw.Notifications.List(b.name)
|
||||
setClientHeader(call.Header())
|
||||
if b.userProject != "" {
|
||||
call.UserProject(b.userProject)
|
||||
}
|
||||
var res *raw.Notifications
|
||||
err = runWithRetry(ctx, func() error {
|
||||
res, err = call.Context(ctx).Do()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return notificationsToMap(res.Items), nil
|
||||
}
|
||||
|
||||
func notificationsToMap(rns []*raw.Notification) map[string]*Notification {
|
||||
m := map[string]*Notification{}
|
||||
for _, rn := range rns {
|
||||
m[rn.Id] = toNotification(rn)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// DeleteNotification deletes the notification with the given ID.
|
||||
func (b *BucketHandle) DeleteNotification(ctx context.Context, id string) (err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.DeleteNotification")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
call := b.c.raw.Notifications.Delete(b.name, id)
|
||||
setClientHeader(call.Header())
|
||||
if b.userProject != "" {
|
||||
call.UserProject(b.userProject)
|
||||
}
|
||||
return call.Context(ctx).Do()
|
||||
}
|
|
@ -1,386 +0,0 @@
|
|||
// Copyright 2016 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash/crc32"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/internal/trace"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/googleapi"
|
||||
)
|
||||
|
||||
var crc32cTable = crc32.MakeTable(crc32.Castagnoli)
|
||||
|
||||
// ReaderObjectAttrs are attributes about the object being read. These are populated
|
||||
// during the New call. This struct only holds a subset of object attributes: to
|
||||
// get the full set of attributes, use ObjectHandle.Attrs.
|
||||
//
|
||||
// Each field is read-only.
|
||||
type ReaderObjectAttrs struct {
|
||||
// Size is the length of the object's content.
|
||||
Size int64
|
||||
|
||||
// ContentType is the MIME type of the object's content.
|
||||
ContentType string
|
||||
|
||||
// ContentEncoding is the encoding of the object's content.
|
||||
ContentEncoding string
|
||||
|
||||
// CacheControl specifies whether and for how long browser and Internet
|
||||
// caches are allowed to cache your objects.
|
||||
CacheControl string
|
||||
|
||||
// LastModified is the time that the object was last modified.
|
||||
LastModified time.Time
|
||||
|
||||
// Generation is the generation number of the object's content.
|
||||
Generation int64
|
||||
|
||||
// Metageneration is the version of the metadata for this object at
|
||||
// this generation. This field is used for preconditions and for
|
||||
// detecting changes in metadata. A metageneration number is only
|
||||
// meaningful in the context of a particular generation of a
|
||||
// particular object.
|
||||
Metageneration int64
|
||||
}
|
||||
|
||||
// NewReader creates a new Reader to read the contents of the
|
||||
// object.
|
||||
// ErrObjectNotExist will be returned if the object is not found.
|
||||
//
|
||||
// The caller must call Close on the returned Reader when done reading.
|
||||
func (o *ObjectHandle) NewReader(ctx context.Context) (*Reader, error) {
|
||||
return o.NewRangeReader(ctx, 0, -1)
|
||||
}
|
||||
|
||||
// NewRangeReader reads part of an object, reading at most length bytes
|
||||
// starting at the given offset. If length is negative, the object is read
|
||||
// until the end.
|
||||
func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64) (r *Reader, err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.NewRangeReader")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
if err := o.validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if offset < 0 {
|
||||
return nil, fmt.Errorf("storage: invalid offset %d < 0", offset)
|
||||
}
|
||||
if o.conds != nil {
|
||||
if err := o.conds.validate("NewRangeReader"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
u := &url.URL{
|
||||
Scheme: "https",
|
||||
Host: "storage.googleapis.com",
|
||||
Path: fmt.Sprintf("/%s/%s", o.bucket, o.object),
|
||||
}
|
||||
verb := "GET"
|
||||
if length == 0 {
|
||||
verb = "HEAD"
|
||||
}
|
||||
req, err := http.NewRequest(verb, u.String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req = withContext(req, ctx)
|
||||
if o.userProject != "" {
|
||||
req.Header.Set("X-Goog-User-Project", o.userProject)
|
||||
}
|
||||
if o.readCompressed {
|
||||
req.Header.Set("Accept-Encoding", "gzip")
|
||||
}
|
||||
|
||||
if err := setEncryptionHeaders(req.Header, o.encryptionKey, false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
gen := o.gen
|
||||
|
||||
// Define a function that initiates a Read with offset and length, assuming we
|
||||
// have already read seen bytes.
|
||||
reopen := func(seen int64) (*http.Response, error) {
|
||||
start := offset + seen
|
||||
if length < 0 && start > 0 {
|
||||
req.Header.Set("Range", fmt.Sprintf("bytes=%d-", start))
|
||||
} else if length > 0 {
|
||||
// The end character isn't affected by how many bytes we've seen.
|
||||
req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", start, offset+length-1))
|
||||
}
|
||||
// We wait to assign conditions here because the generation number can change in between reopen() runs.
|
||||
req.URL.RawQuery = conditionsQuery(gen, o.conds)
|
||||
var res *http.Response
|
||||
err = runWithRetry(ctx, func() error {
|
||||
res, err = o.c.hc.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if res.StatusCode == http.StatusNotFound {
|
||||
res.Body.Close()
|
||||
return ErrObjectNotExist
|
||||
}
|
||||
if res.StatusCode < 200 || res.StatusCode > 299 {
|
||||
body, _ := ioutil.ReadAll(res.Body)
|
||||
res.Body.Close()
|
||||
return &googleapi.Error{
|
||||
Code: res.StatusCode,
|
||||
Header: res.Header,
|
||||
Body: string(body),
|
||||
}
|
||||
}
|
||||
if start > 0 && length != 0 && res.StatusCode != http.StatusPartialContent {
|
||||
res.Body.Close()
|
||||
return errors.New("storage: partial request not satisfied")
|
||||
}
|
||||
// If a generation hasn't been specified, and this is the first response we get, let's record the
|
||||
// generation. In future requests we'll use this generation as a precondition to avoid data races.
|
||||
if gen < 0 && res.Header.Get("X-Goog-Generation") != "" {
|
||||
gen64, err := strconv.ParseInt(res.Header.Get("X-Goog-Generation"), 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
gen = gen64
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
res, err := reopen(0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var (
|
||||
size int64 // total size of object, even if a range was requested.
|
||||
checkCRC bool
|
||||
crc uint32
|
||||
)
|
||||
if res.StatusCode == http.StatusPartialContent {
|
||||
cr := strings.TrimSpace(res.Header.Get("Content-Range"))
|
||||
if !strings.HasPrefix(cr, "bytes ") || !strings.Contains(cr, "/") {
|
||||
|
||||
return nil, fmt.Errorf("storage: invalid Content-Range %q", cr)
|
||||
}
|
||||
size, err = strconv.ParseInt(cr[strings.LastIndex(cr, "/")+1:], 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("storage: invalid Content-Range %q", cr)
|
||||
}
|
||||
} else {
|
||||
size = res.ContentLength
|
||||
// Check the CRC iff all of the following hold:
|
||||
// - We asked for content (length != 0).
|
||||
// - We got all the content (status != PartialContent).
|
||||
// - The server sent a CRC header.
|
||||
// - The Go http stack did not uncompress the file.
|
||||
// - We were not served compressed data that was uncompressed on download.
|
||||
// The problem with the last two cases is that the CRC will not match -- GCS
|
||||
// computes it on the compressed contents, but we compute it on the
|
||||
// uncompressed contents.
|
||||
if length != 0 && !goHTTPUncompressed(res) && !uncompressedByServer(res) {
|
||||
crc, checkCRC = parseCRC32c(res)
|
||||
}
|
||||
}
|
||||
|
||||
remain := res.ContentLength
|
||||
body := res.Body
|
||||
if length == 0 {
|
||||
remain = 0
|
||||
body.Close()
|
||||
body = emptyBody
|
||||
}
|
||||
var metaGen int64
|
||||
if res.Header.Get("X-Goog-Generation") != "" {
|
||||
metaGen, err = strconv.ParseInt(res.Header.Get("X-Goog-Metageneration"), 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
var lm time.Time
|
||||
if res.Header.Get("Last-Modified") != "" {
|
||||
lm, err = http.ParseTime(res.Header.Get("Last-Modified"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
attrs := ReaderObjectAttrs{
|
||||
Size: size,
|
||||
ContentType: res.Header.Get("Content-Type"),
|
||||
ContentEncoding: res.Header.Get("Content-Encoding"),
|
||||
CacheControl: res.Header.Get("Cache-Control"),
|
||||
LastModified: lm,
|
||||
Generation: gen,
|
||||
Metageneration: metaGen,
|
||||
}
|
||||
return &Reader{
|
||||
Attrs: attrs,
|
||||
body: body,
|
||||
size: size,
|
||||
remain: remain,
|
||||
wantCRC: crc,
|
||||
checkCRC: checkCRC,
|
||||
reopen: reopen,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func uncompressedByServer(res *http.Response) bool {
|
||||
// If the data is stored as gzip but is not encoded as gzip, then it
|
||||
// was uncompressed by the server.
|
||||
return res.Header.Get("X-Goog-Stored-Content-Encoding") == "gzip" &&
|
||||
res.Header.Get("Content-Encoding") != "gzip"
|
||||
}
|
||||
|
||||
func parseCRC32c(res *http.Response) (uint32, bool) {
|
||||
const prefix = "crc32c="
|
||||
for _, spec := range res.Header["X-Goog-Hash"] {
|
||||
if strings.HasPrefix(spec, prefix) {
|
||||
c, err := decodeUint32(spec[len(prefix):])
|
||||
if err == nil {
|
||||
return c, true
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0, false
|
||||
}
|
||||
|
||||
var emptyBody = ioutil.NopCloser(strings.NewReader(""))
|
||||
|
||||
// Reader reads a Cloud Storage object.
|
||||
// It implements io.Reader.
|
||||
//
|
||||
// Typically, a Reader computes the CRC of the downloaded content and compares it to
|
||||
// the stored CRC, returning an error from Read if there is a mismatch. This integrity check
|
||||
// is skipped if transcoding occurs. See https://cloud.google.com/storage/docs/transcoding.
|
||||
type Reader struct {
|
||||
Attrs ReaderObjectAttrs
|
||||
body io.ReadCloser
|
||||
seen, remain, size int64
|
||||
checkCRC bool // should we check the CRC?
|
||||
wantCRC uint32 // the CRC32c value the server sent in the header
|
||||
gotCRC uint32 // running crc
|
||||
reopen func(seen int64) (*http.Response, error)
|
||||
}
|
||||
|
||||
// Close closes the Reader. It must be called when done reading.
|
||||
func (r *Reader) Close() error {
|
||||
return r.body.Close()
|
||||
}
|
||||
|
||||
func (r *Reader) Read(p []byte) (int, error) {
|
||||
n, err := r.readWithRetry(p)
|
||||
if r.remain != -1 {
|
||||
r.remain -= int64(n)
|
||||
}
|
||||
if r.checkCRC {
|
||||
r.gotCRC = crc32.Update(r.gotCRC, crc32cTable, p[:n])
|
||||
// Check CRC here. It would be natural to check it in Close, but
|
||||
// everybody defers Close on the assumption that it doesn't return
|
||||
// anything worth looking at.
|
||||
if err == io.EOF {
|
||||
if r.gotCRC != r.wantCRC {
|
||||
return n, fmt.Errorf("storage: bad CRC on read: got %d, want %d",
|
||||
r.gotCRC, r.wantCRC)
|
||||
}
|
||||
}
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (r *Reader) readWithRetry(p []byte) (int, error) {
|
||||
n := 0
|
||||
for len(p[n:]) > 0 {
|
||||
m, err := r.body.Read(p[n:])
|
||||
n += m
|
||||
r.seen += int64(m)
|
||||
if !shouldRetryRead(err) {
|
||||
return n, err
|
||||
}
|
||||
// Read failed, but we will try again. Send a ranged read request that takes
|
||||
// into account the number of bytes we've already seen.
|
||||
res, err := r.reopen(r.seen)
|
||||
if err != nil {
|
||||
// reopen already retries
|
||||
return n, err
|
||||
}
|
||||
r.body.Close()
|
||||
r.body = res.Body
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func shouldRetryRead(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
return strings.HasSuffix(err.Error(), "INTERNAL_ERROR") && strings.Contains(reflect.TypeOf(err).String(), "http2")
|
||||
}
|
||||
|
||||
// Size returns the size of the object in bytes.
|
||||
// The returned value is always the same and is not affected by
|
||||
// calls to Read or Close.
|
||||
//
|
||||
// Deprecated: use Reader.Attrs.Size.
|
||||
func (r *Reader) Size() int64 {
|
||||
return r.Attrs.Size
|
||||
}
|
||||
|
||||
// Remain returns the number of bytes left to read, or -1 if unknown.
|
||||
func (r *Reader) Remain() int64 {
|
||||
return r.remain
|
||||
}
|
||||
|
||||
// ContentType returns the content type of the object.
|
||||
//
|
||||
// Deprecated: use Reader.Attrs.ContentType.
|
||||
func (r *Reader) ContentType() string {
|
||||
return r.Attrs.ContentType
|
||||
}
|
||||
|
||||
// ContentEncoding returns the content encoding of the object.
|
||||
//
|
||||
// Deprecated: use Reader.Attrs.ContentEncoding.
|
||||
func (r *Reader) ContentEncoding() string {
|
||||
return r.Attrs.ContentEncoding
|
||||
}
|
||||
|
||||
// CacheControl returns the cache control of the object.
|
||||
//
|
||||
// Deprecated: use Reader.Attrs.CacheControl.
|
||||
func (r *Reader) CacheControl() string {
|
||||
return r.Attrs.CacheControl
|
||||
}
|
||||
|
||||
// LastModified returns the value of the Last-Modified header.
|
||||
//
|
||||
// Deprecated: use Reader.Attrs.LastModified.
|
||||
func (r *Reader) LastModified() (time.Time, error) {
|
||||
return r.Attrs.LastModified, nil
|
||||
}
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because one or more lines are too long
|
@ -1,227 +0,0 @@
|
|||
// Copyright 2014 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
"unicode/utf8"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/googleapi"
|
||||
raw "google.golang.org/api/storage/v1"
|
||||
)
|
||||
|
||||
// A Writer writes a Cloud Storage object.
|
||||
type Writer struct {
|
||||
// ObjectAttrs are optional attributes to set on the object. Any attributes
|
||||
// must be initialized before the first Write call. Nil or zero-valued
|
||||
// attributes are ignored.
|
||||
ObjectAttrs
|
||||
|
||||
// SendCRC specifies whether to transmit a CRC32C field. It should be set
|
||||
// to true in addition to setting the Writer's CRC32C field, because zero
|
||||
// is a valid CRC and normally a zero would not be transmitted.
|
||||
// If a CRC32C is sent, and the data written does not match the checksum,
|
||||
// the write will be rejected.
|
||||
SendCRC32C bool
|
||||
|
||||
// ChunkSize controls the maximum number of bytes of the object that the
|
||||
// Writer will attempt to send to the server in a single request. Objects
|
||||
// smaller than the size will be sent in a single request, while larger
|
||||
// objects will be split over multiple requests. The size will be rounded up
|
||||
// to the nearest multiple of 256K. If zero, chunking will be disabled and
|
||||
// the object will be uploaded in a single request.
|
||||
//
|
||||
// ChunkSize will default to a reasonable value. If you perform many concurrent
|
||||
// writes of small objects, you may wish set ChunkSize to a value that matches
|
||||
// your objects' sizes to avoid consuming large amounts of memory.
|
||||
//
|
||||
// ChunkSize must be set before the first Write call.
|
||||
ChunkSize int
|
||||
|
||||
// ProgressFunc can be used to monitor the progress of a large write.
|
||||
// operation. If ProgressFunc is not nil and writing requires multiple
|
||||
// calls to the underlying service (see
|
||||
// https://cloud.google.com/storage/docs/json_api/v1/how-tos/resumable-upload),
|
||||
// then ProgressFunc will be invoked after each call with the number of bytes of
|
||||
// content copied so far.
|
||||
//
|
||||
// ProgressFunc should return quickly without blocking.
|
||||
ProgressFunc func(int64)
|
||||
|
||||
ctx context.Context
|
||||
o *ObjectHandle
|
||||
|
||||
opened bool
|
||||
pw *io.PipeWriter
|
||||
|
||||
donec chan struct{} // closed after err and obj are set.
|
||||
obj *ObjectAttrs
|
||||
|
||||
mu sync.Mutex
|
||||
err error
|
||||
}
|
||||
|
||||
func (w *Writer) open() error {
|
||||
attrs := w.ObjectAttrs
|
||||
// Check the developer didn't change the object Name (this is unfortunate, but
|
||||
// we don't want to store an object under the wrong name).
|
||||
if attrs.Name != w.o.object {
|
||||
return fmt.Errorf("storage: Writer.Name %q does not match object name %q", attrs.Name, w.o.object)
|
||||
}
|
||||
if !utf8.ValidString(attrs.Name) {
|
||||
return fmt.Errorf("storage: object name %q is not valid UTF-8", attrs.Name)
|
||||
}
|
||||
if attrs.KMSKeyName != "" && w.o.encryptionKey != nil {
|
||||
return errors.New("storage: cannot use KMSKeyName with a customer-supplied encryption key")
|
||||
}
|
||||
pr, pw := io.Pipe()
|
||||
w.pw = pw
|
||||
w.opened = true
|
||||
|
||||
if w.ChunkSize < 0 {
|
||||
return errors.New("storage: Writer.ChunkSize must be non-negative")
|
||||
}
|
||||
mediaOpts := []googleapi.MediaOption{
|
||||
googleapi.ChunkSize(w.ChunkSize),
|
||||
}
|
||||
if c := attrs.ContentType; c != "" {
|
||||
mediaOpts = append(mediaOpts, googleapi.ContentType(c))
|
||||
}
|
||||
|
||||
go func() {
|
||||
defer close(w.donec)
|
||||
|
||||
rawObj := attrs.toRawObject(w.o.bucket)
|
||||
if w.SendCRC32C {
|
||||
rawObj.Crc32c = encodeUint32(attrs.CRC32C)
|
||||
}
|
||||
if w.MD5 != nil {
|
||||
rawObj.Md5Hash = base64.StdEncoding.EncodeToString(w.MD5)
|
||||
}
|
||||
call := w.o.c.raw.Objects.Insert(w.o.bucket, rawObj).
|
||||
Media(pr, mediaOpts...).
|
||||
Projection("full").
|
||||
Context(w.ctx)
|
||||
if w.ProgressFunc != nil {
|
||||
call.ProgressUpdater(func(n, _ int64) { w.ProgressFunc(n) })
|
||||
}
|
||||
if attrs.KMSKeyName != "" {
|
||||
call.KmsKeyName(attrs.KMSKeyName)
|
||||
}
|
||||
if attrs.PredefinedACL != "" {
|
||||
call.PredefinedAcl(attrs.PredefinedACL)
|
||||
}
|
||||
if err := setEncryptionHeaders(call.Header(), w.o.encryptionKey, false); err != nil {
|
||||
w.mu.Lock()
|
||||
w.err = err
|
||||
w.mu.Unlock()
|
||||
pr.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
var resp *raw.Object
|
||||
err := applyConds("NewWriter", w.o.gen, w.o.conds, call)
|
||||
if err == nil {
|
||||
if w.o.userProject != "" {
|
||||
call.UserProject(w.o.userProject)
|
||||
}
|
||||
setClientHeader(call.Header())
|
||||
// If the chunk size is zero, then no chunking is done on the Reader,
|
||||
// which means we cannot retry: the first call will read the data, and if
|
||||
// it fails, there is no way to re-read.
|
||||
if w.ChunkSize == 0 {
|
||||
resp, err = call.Do()
|
||||
} else {
|
||||
// We will only retry here if the initial POST, which obtains a URI for
|
||||
// the resumable upload, fails with a retryable error. The upload itself
|
||||
// has its own retry logic.
|
||||
err = runWithRetry(w.ctx, func() error {
|
||||
var err2 error
|
||||
resp, err2 = call.Do()
|
||||
return err2
|
||||
})
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
w.mu.Lock()
|
||||
w.err = err
|
||||
w.mu.Unlock()
|
||||
pr.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
w.obj = newObject(resp)
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Write appends to w. It implements the io.Writer interface.
|
||||
//
|
||||
// Since writes happen asynchronously, Write may return a nil
|
||||
// error even though the write failed (or will fail). Always
|
||||
// use the error returned from Writer.Close to determine if
|
||||
// the upload was successful.
|
||||
func (w *Writer) Write(p []byte) (n int, err error) {
|
||||
w.mu.Lock()
|
||||
werr := w.err
|
||||
w.mu.Unlock()
|
||||
if werr != nil {
|
||||
return 0, werr
|
||||
}
|
||||
if !w.opened {
|
||||
if err := w.open(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
return w.pw.Write(p)
|
||||
}
|
||||
|
||||
// Close completes the write operation and flushes any buffered data.
|
||||
// If Close doesn't return an error, metadata about the written object
|
||||
// can be retrieved by calling Attrs.
|
||||
func (w *Writer) Close() error {
|
||||
if !w.opened {
|
||||
if err := w.open(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := w.pw.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
<-w.donec
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
return w.err
|
||||
}
|
||||
|
||||
// CloseWithError aborts the write operation with the provided error.
|
||||
// CloseWithError always returns nil.
|
||||
//
|
||||
// Deprecated: cancel the context passed to NewWriter instead.
|
||||
func (w *Writer) CloseWithError(err error) error {
|
||||
if !w.opened {
|
||||
return nil
|
||||
}
|
||||
return w.pw.CloseWithError(err)
|
||||
}
|
||||
|
||||
// Attrs returns metadata about a successfully-written object.
|
||||
// It's only valid to call it after Close returns nil.
|
||||
func (w *Writer) Attrs() *ObjectAttrs {
|
||||
return w.obj
|
||||
}
|
|
@ -1,202 +0,0 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
|
@ -1,94 +0,0 @@
|
|||
// Copyright 2018, OpenCensus Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package propagation implement X-Cloud-Trace-Context header propagation used
|
||||
// by Google Cloud products.
|
||||
package propagation // import "contrib.go.opencensus.io/exporter/stackdriver/propagation"
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"go.opencensus.io/trace"
|
||||
"go.opencensus.io/trace/propagation"
|
||||
)
|
||||
|
||||
const (
|
||||
httpHeaderMaxSize = 200
|
||||
httpHeader = `X-Cloud-Trace-Context`
|
||||
)
|
||||
|
||||
var _ propagation.HTTPFormat = (*HTTPFormat)(nil)
|
||||
|
||||
// HTTPFormat implements propagation.HTTPFormat to propagate
|
||||
// traces in HTTP headers for Google Cloud Platform and Stackdriver Trace.
|
||||
type HTTPFormat struct{}
|
||||
|
||||
// SpanContextFromRequest extracts a Stackdriver Trace span context from incoming requests.
|
||||
func (f *HTTPFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) {
|
||||
h := req.Header.Get(httpHeader)
|
||||
// See https://cloud.google.com/trace/docs/faq for the header HTTPFormat.
|
||||
// Return if the header is empty or missing, or if the header is unreasonably
|
||||
// large, to avoid making unnecessary copies of a large string.
|
||||
if h == "" || len(h) > httpHeaderMaxSize {
|
||||
return trace.SpanContext{}, false
|
||||
}
|
||||
|
||||
// Parse the trace id field.
|
||||
slash := strings.Index(h, `/`)
|
||||
if slash == -1 {
|
||||
return trace.SpanContext{}, false
|
||||
}
|
||||
tid, h := h[:slash], h[slash+1:]
|
||||
|
||||
buf, err := hex.DecodeString(tid)
|
||||
if err != nil {
|
||||
return trace.SpanContext{}, false
|
||||
}
|
||||
copy(sc.TraceID[:], buf)
|
||||
|
||||
// Parse the span id field.
|
||||
spanstr := h
|
||||
semicolon := strings.Index(h, `;`)
|
||||
if semicolon != -1 {
|
||||
spanstr, h = h[:semicolon], h[semicolon+1:]
|
||||
}
|
||||
sid, err := strconv.ParseUint(spanstr, 10, 64)
|
||||
if err != nil {
|
||||
return trace.SpanContext{}, false
|
||||
}
|
||||
binary.BigEndian.PutUint64(sc.SpanID[:], sid)
|
||||
|
||||
// Parse the options field, options field is optional.
|
||||
if !strings.HasPrefix(h, "o=") {
|
||||
return sc, true
|
||||
}
|
||||
o, err := strconv.ParseUint(h[2:], 10, 64)
|
||||
if err != nil {
|
||||
return trace.SpanContext{}, false
|
||||
}
|
||||
sc.TraceOptions = trace.TraceOptions(o)
|
||||
return sc, true
|
||||
}
|
||||
|
||||
// SpanContextToRequest modifies the given request to include a Stackdriver Trace header.
|
||||
func (f *HTTPFormat) SpanContextToRequest(sc trace.SpanContext, req *http.Request) {
|
||||
sid := binary.BigEndian.Uint64(sc.SpanID[:])
|
||||
header := fmt.Sprintf("%s/%d;o=%d", hex.EncodeToString(sc.TraceID[:]), sid, int64(sc.TraceOptions))
|
||||
req.Header.Set(httpHeader, header)
|
||||
}
|
|
@ -1,164 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
const (
|
||||
UNKNOWN_APPLICATION_EXCEPTION = 0
|
||||
UNKNOWN_METHOD = 1
|
||||
INVALID_MESSAGE_TYPE_EXCEPTION = 2
|
||||
WRONG_METHOD_NAME = 3
|
||||
BAD_SEQUENCE_ID = 4
|
||||
MISSING_RESULT = 5
|
||||
INTERNAL_ERROR = 6
|
||||
PROTOCOL_ERROR = 7
|
||||
)
|
||||
|
||||
var defaultApplicationExceptionMessage = map[int32]string{
|
||||
UNKNOWN_APPLICATION_EXCEPTION: "unknown application exception",
|
||||
UNKNOWN_METHOD: "unknown method",
|
||||
INVALID_MESSAGE_TYPE_EXCEPTION: "invalid message type",
|
||||
WRONG_METHOD_NAME: "wrong method name",
|
||||
BAD_SEQUENCE_ID: "bad sequence ID",
|
||||
MISSING_RESULT: "missing result",
|
||||
INTERNAL_ERROR: "unknown internal error",
|
||||
PROTOCOL_ERROR: "unknown protocol error",
|
||||
}
|
||||
|
||||
// Application level Thrift exception
|
||||
type TApplicationException interface {
|
||||
TException
|
||||
TypeId() int32
|
||||
Read(iprot TProtocol) error
|
||||
Write(oprot TProtocol) error
|
||||
}
|
||||
|
||||
type tApplicationException struct {
|
||||
message string
|
||||
type_ int32
|
||||
}
|
||||
|
||||
func (e tApplicationException) Error() string {
|
||||
if e.message != "" {
|
||||
return e.message
|
||||
}
|
||||
return defaultApplicationExceptionMessage[e.type_]
|
||||
}
|
||||
|
||||
func NewTApplicationException(type_ int32, message string) TApplicationException {
|
||||
return &tApplicationException{message, type_}
|
||||
}
|
||||
|
||||
func (p *tApplicationException) TypeId() int32 {
|
||||
return p.type_
|
||||
}
|
||||
|
||||
func (p *tApplicationException) Read(iprot TProtocol) error {
|
||||
// TODO: this should really be generated by the compiler
|
||||
_, err := iprot.ReadStructBegin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
message := ""
|
||||
type_ := int32(UNKNOWN_APPLICATION_EXCEPTION)
|
||||
|
||||
for {
|
||||
_, ttype, id, err := iprot.ReadFieldBegin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ttype == STOP {
|
||||
break
|
||||
}
|
||||
switch id {
|
||||
case 1:
|
||||
if ttype == STRING {
|
||||
if message, err = iprot.ReadString(); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err = SkipDefaultDepth(iprot, ttype); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
case 2:
|
||||
if ttype == I32 {
|
||||
if type_, err = iprot.ReadI32(); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err = SkipDefaultDepth(iprot, ttype); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
default:
|
||||
if err = SkipDefaultDepth(iprot, ttype); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err = iprot.ReadFieldEnd(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := iprot.ReadStructEnd(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
p.message = message
|
||||
p.type_ = type_
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *tApplicationException) Write(oprot TProtocol) (err error) {
|
||||
err = oprot.WriteStructBegin("TApplicationException")
|
||||
if len(p.Error()) > 0 {
|
||||
err = oprot.WriteFieldBegin("message", STRING, 1)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = oprot.WriteString(p.Error())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = oprot.WriteFieldEnd()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
err = oprot.WriteFieldBegin("type", I32, 2)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = oprot.WriteI32(p.type_)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = oprot.WriteFieldEnd()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = oprot.WriteFieldStop()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = oprot.WriteStructEnd()
|
||||
return
|
||||
}
|
|
@ -1,509 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
)
|
||||
|
||||
type TBinaryProtocol struct {
|
||||
trans TRichTransport
|
||||
origTransport TTransport
|
||||
reader io.Reader
|
||||
writer io.Writer
|
||||
strictRead bool
|
||||
strictWrite bool
|
||||
buffer [64]byte
|
||||
}
|
||||
|
||||
type TBinaryProtocolFactory struct {
|
||||
strictRead bool
|
||||
strictWrite bool
|
||||
}
|
||||
|
||||
func NewTBinaryProtocolTransport(t TTransport) *TBinaryProtocol {
|
||||
return NewTBinaryProtocol(t, false, true)
|
||||
}
|
||||
|
||||
func NewTBinaryProtocol(t TTransport, strictRead, strictWrite bool) *TBinaryProtocol {
|
||||
p := &TBinaryProtocol{origTransport: t, strictRead: strictRead, strictWrite: strictWrite}
|
||||
if et, ok := t.(TRichTransport); ok {
|
||||
p.trans = et
|
||||
} else {
|
||||
p.trans = NewTRichTransport(t)
|
||||
}
|
||||
p.reader = p.trans
|
||||
p.writer = p.trans
|
||||
return p
|
||||
}
|
||||
|
||||
func NewTBinaryProtocolFactoryDefault() *TBinaryProtocolFactory {
|
||||
return NewTBinaryProtocolFactory(false, true)
|
||||
}
|
||||
|
||||
func NewTBinaryProtocolFactory(strictRead, strictWrite bool) *TBinaryProtocolFactory {
|
||||
return &TBinaryProtocolFactory{strictRead: strictRead, strictWrite: strictWrite}
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocolFactory) GetProtocol(t TTransport) TProtocol {
|
||||
return NewTBinaryProtocol(t, p.strictRead, p.strictWrite)
|
||||
}
|
||||
|
||||
/**
|
||||
* Writing Methods
|
||||
*/
|
||||
|
||||
func (p *TBinaryProtocol) WriteMessageBegin(name string, typeId TMessageType, seqId int32) error {
|
||||
if p.strictWrite {
|
||||
version := uint32(VERSION_1) | uint32(typeId)
|
||||
e := p.WriteI32(int32(version))
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
e = p.WriteString(name)
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
e = p.WriteI32(seqId)
|
||||
return e
|
||||
} else {
|
||||
e := p.WriteString(name)
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
e = p.WriteByte(int8(typeId))
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
e = p.WriteI32(seqId)
|
||||
return e
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) WriteMessageEnd() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) WriteStructBegin(name string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) WriteStructEnd() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) WriteFieldBegin(name string, typeId TType, id int16) error {
|
||||
e := p.WriteByte(int8(typeId))
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
e = p.WriteI16(id)
|
||||
return e
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) WriteFieldEnd() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) WriteFieldStop() error {
|
||||
e := p.WriteByte(STOP)
|
||||
return e
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error {
|
||||
e := p.WriteByte(int8(keyType))
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
e = p.WriteByte(int8(valueType))
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
e = p.WriteI32(int32(size))
|
||||
return e
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) WriteMapEnd() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) WriteListBegin(elemType TType, size int) error {
|
||||
e := p.WriteByte(int8(elemType))
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
e = p.WriteI32(int32(size))
|
||||
return e
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) WriteListEnd() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) WriteSetBegin(elemType TType, size int) error {
|
||||
e := p.WriteByte(int8(elemType))
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
e = p.WriteI32(int32(size))
|
||||
return e
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) WriteSetEnd() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) WriteBool(value bool) error {
|
||||
if value {
|
||||
return p.WriteByte(1)
|
||||
}
|
||||
return p.WriteByte(0)
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) WriteByte(value int8) error {
|
||||
e := p.trans.WriteByte(byte(value))
|
||||
return NewTProtocolException(e)
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) WriteI16(value int16) error {
|
||||
v := p.buffer[0:2]
|
||||
binary.BigEndian.PutUint16(v, uint16(value))
|
||||
_, e := p.writer.Write(v)
|
||||
return NewTProtocolException(e)
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) WriteI32(value int32) error {
|
||||
v := p.buffer[0:4]
|
||||
binary.BigEndian.PutUint32(v, uint32(value))
|
||||
_, e := p.writer.Write(v)
|
||||
return NewTProtocolException(e)
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) WriteI64(value int64) error {
|
||||
v := p.buffer[0:8]
|
||||
binary.BigEndian.PutUint64(v, uint64(value))
|
||||
_, err := p.writer.Write(v)
|
||||
return NewTProtocolException(err)
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) WriteDouble(value float64) error {
|
||||
return p.WriteI64(int64(math.Float64bits(value)))
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) WriteString(value string) error {
|
||||
e := p.WriteI32(int32(len(value)))
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
_, err := p.trans.WriteString(value)
|
||||
return NewTProtocolException(err)
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) WriteBinary(value []byte) error {
|
||||
e := p.WriteI32(int32(len(value)))
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
_, err := p.writer.Write(value)
|
||||
return NewTProtocolException(err)
|
||||
}
|
||||
|
||||
/**
|
||||
* Reading methods
|
||||
*/
|
||||
|
||||
func (p *TBinaryProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqId int32, err error) {
|
||||
size, e := p.ReadI32()
|
||||
if e != nil {
|
||||
return "", typeId, 0, NewTProtocolException(e)
|
||||
}
|
||||
if size < 0 {
|
||||
typeId = TMessageType(size & 0x0ff)
|
||||
version := int64(int64(size) & VERSION_MASK)
|
||||
if version != VERSION_1 {
|
||||
return name, typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, fmt.Errorf("Bad version in ReadMessageBegin"))
|
||||
}
|
||||
name, e = p.ReadString()
|
||||
if e != nil {
|
||||
return name, typeId, seqId, NewTProtocolException(e)
|
||||
}
|
||||
seqId, e = p.ReadI32()
|
||||
if e != nil {
|
||||
return name, typeId, seqId, NewTProtocolException(e)
|
||||
}
|
||||
return name, typeId, seqId, nil
|
||||
}
|
||||
if p.strictRead {
|
||||
return name, typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, fmt.Errorf("Missing version in ReadMessageBegin"))
|
||||
}
|
||||
name, e2 := p.readStringBody(size)
|
||||
if e2 != nil {
|
||||
return name, typeId, seqId, e2
|
||||
}
|
||||
b, e3 := p.ReadByte()
|
||||
if e3 != nil {
|
||||
return name, typeId, seqId, e3
|
||||
}
|
||||
typeId = TMessageType(b)
|
||||
seqId, e4 := p.ReadI32()
|
||||
if e4 != nil {
|
||||
return name, typeId, seqId, e4
|
||||
}
|
||||
return name, typeId, seqId, nil
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) ReadMessageEnd() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) ReadStructBegin() (name string, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) ReadStructEnd() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) ReadFieldBegin() (name string, typeId TType, seqId int16, err error) {
|
||||
t, err := p.ReadByte()
|
||||
typeId = TType(t)
|
||||
if err != nil {
|
||||
return name, typeId, seqId, err
|
||||
}
|
||||
if t != STOP {
|
||||
seqId, err = p.ReadI16()
|
||||
}
|
||||
return name, typeId, seqId, err
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) ReadFieldEnd() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
var invalidDataLength = NewTProtocolExceptionWithType(INVALID_DATA, errors.New("Invalid data length"))
|
||||
|
||||
func (p *TBinaryProtocol) ReadMapBegin() (kType, vType TType, size int, err error) {
|
||||
k, e := p.ReadByte()
|
||||
if e != nil {
|
||||
err = NewTProtocolException(e)
|
||||
return
|
||||
}
|
||||
kType = TType(k)
|
||||
v, e := p.ReadByte()
|
||||
if e != nil {
|
||||
err = NewTProtocolException(e)
|
||||
return
|
||||
}
|
||||
vType = TType(v)
|
||||
size32, e := p.ReadI32()
|
||||
if e != nil {
|
||||
err = NewTProtocolException(e)
|
||||
return
|
||||
}
|
||||
if size32 < 0 {
|
||||
err = invalidDataLength
|
||||
return
|
||||
}
|
||||
size = int(size32)
|
||||
return kType, vType, size, nil
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) ReadMapEnd() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) ReadListBegin() (elemType TType, size int, err error) {
|
||||
b, e := p.ReadByte()
|
||||
if e != nil {
|
||||
err = NewTProtocolException(e)
|
||||
return
|
||||
}
|
||||
elemType = TType(b)
|
||||
size32, e := p.ReadI32()
|
||||
if e != nil {
|
||||
err = NewTProtocolException(e)
|
||||
return
|
||||
}
|
||||
if size32 < 0 {
|
||||
err = invalidDataLength
|
||||
return
|
||||
}
|
||||
size = int(size32)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) ReadListEnd() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) ReadSetBegin() (elemType TType, size int, err error) {
|
||||
b, e := p.ReadByte()
|
||||
if e != nil {
|
||||
err = NewTProtocolException(e)
|
||||
return
|
||||
}
|
||||
elemType = TType(b)
|
||||
size32, e := p.ReadI32()
|
||||
if e != nil {
|
||||
err = NewTProtocolException(e)
|
||||
return
|
||||
}
|
||||
if size32 < 0 {
|
||||
err = invalidDataLength
|
||||
return
|
||||
}
|
||||
size = int(size32)
|
||||
return elemType, size, nil
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) ReadSetEnd() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) ReadBool() (bool, error) {
|
||||
b, e := p.ReadByte()
|
||||
v := true
|
||||
if b != 1 {
|
||||
v = false
|
||||
}
|
||||
return v, e
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) ReadByte() (int8, error) {
|
||||
v, err := p.trans.ReadByte()
|
||||
return int8(v), err
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) ReadI16() (value int16, err error) {
|
||||
buf := p.buffer[0:2]
|
||||
err = p.readAll(buf)
|
||||
value = int16(binary.BigEndian.Uint16(buf))
|
||||
return value, err
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) ReadI32() (value int32, err error) {
|
||||
buf := p.buffer[0:4]
|
||||
err = p.readAll(buf)
|
||||
value = int32(binary.BigEndian.Uint32(buf))
|
||||
return value, err
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) ReadI64() (value int64, err error) {
|
||||
buf := p.buffer[0:8]
|
||||
err = p.readAll(buf)
|
||||
value = int64(binary.BigEndian.Uint64(buf))
|
||||
return value, err
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) ReadDouble() (value float64, err error) {
|
||||
buf := p.buffer[0:8]
|
||||
err = p.readAll(buf)
|
||||
value = math.Float64frombits(binary.BigEndian.Uint64(buf))
|
||||
return value, err
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) ReadString() (value string, err error) {
|
||||
size, e := p.ReadI32()
|
||||
if e != nil {
|
||||
return "", e
|
||||
}
|
||||
if size < 0 {
|
||||
err = invalidDataLength
|
||||
return
|
||||
}
|
||||
|
||||
return p.readStringBody(size)
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) ReadBinary() ([]byte, error) {
|
||||
size, e := p.ReadI32()
|
||||
if e != nil {
|
||||
return nil, e
|
||||
}
|
||||
if size < 0 {
|
||||
return nil, invalidDataLength
|
||||
}
|
||||
|
||||
isize := int(size)
|
||||
buf := make([]byte, isize)
|
||||
_, err := io.ReadFull(p.trans, buf)
|
||||
return buf, NewTProtocolException(err)
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) Flush(ctx context.Context) (err error) {
|
||||
return NewTProtocolException(p.trans.Flush(ctx))
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) Skip(fieldType TType) (err error) {
|
||||
return SkipDefaultDepth(p, fieldType)
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) Transport() TTransport {
|
||||
return p.origTransport
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) readAll(buf []byte) error {
|
||||
_, err := io.ReadFull(p.reader, buf)
|
||||
return NewTProtocolException(err)
|
||||
}
|
||||
|
||||
const readLimit = 32768
|
||||
|
||||
func (p *TBinaryProtocol) readStringBody(size int32) (value string, err error) {
|
||||
if size < 0 {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
var (
|
||||
buf bytes.Buffer
|
||||
e error
|
||||
b []byte
|
||||
)
|
||||
|
||||
switch {
|
||||
case int(size) <= len(p.buffer):
|
||||
b = p.buffer[:size] // avoids allocation for small reads
|
||||
case int(size) < readLimit:
|
||||
b = make([]byte, size)
|
||||
default:
|
||||
b = make([]byte, readLimit)
|
||||
}
|
||||
|
||||
for size > 0 {
|
||||
_, e = io.ReadFull(p.trans, b)
|
||||
buf.Write(b)
|
||||
if e != nil {
|
||||
break
|
||||
}
|
||||
size -= readLimit
|
||||
if size < readLimit && size > 0 {
|
||||
b = b[:size]
|
||||
}
|
||||
}
|
||||
return buf.String(), NewTProtocolException(e)
|
||||
}
|
|
@ -1,92 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
)
|
||||
|
||||
type TBufferedTransportFactory struct {
|
||||
size int
|
||||
}
|
||||
|
||||
type TBufferedTransport struct {
|
||||
bufio.ReadWriter
|
||||
tp TTransport
|
||||
}
|
||||
|
||||
func (p *TBufferedTransportFactory) GetTransport(trans TTransport) (TTransport, error) {
|
||||
return NewTBufferedTransport(trans, p.size), nil
|
||||
}
|
||||
|
||||
func NewTBufferedTransportFactory(bufferSize int) *TBufferedTransportFactory {
|
||||
return &TBufferedTransportFactory{size: bufferSize}
|
||||
}
|
||||
|
||||
func NewTBufferedTransport(trans TTransport, bufferSize int) *TBufferedTransport {
|
||||
return &TBufferedTransport{
|
||||
ReadWriter: bufio.ReadWriter{
|
||||
Reader: bufio.NewReaderSize(trans, bufferSize),
|
||||
Writer: bufio.NewWriterSize(trans, bufferSize),
|
||||
},
|
||||
tp: trans,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *TBufferedTransport) IsOpen() bool {
|
||||
return p.tp.IsOpen()
|
||||
}
|
||||
|
||||
func (p *TBufferedTransport) Open() (err error) {
|
||||
return p.tp.Open()
|
||||
}
|
||||
|
||||
func (p *TBufferedTransport) Close() (err error) {
|
||||
return p.tp.Close()
|
||||
}
|
||||
|
||||
func (p *TBufferedTransport) Read(b []byte) (int, error) {
|
||||
n, err := p.ReadWriter.Read(b)
|
||||
if err != nil {
|
||||
p.ReadWriter.Reader.Reset(p.tp)
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (p *TBufferedTransport) Write(b []byte) (int, error) {
|
||||
n, err := p.ReadWriter.Write(b)
|
||||
if err != nil {
|
||||
p.ReadWriter.Writer.Reset(p.tp)
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (p *TBufferedTransport) Flush(ctx context.Context) error {
|
||||
if err := p.ReadWriter.Flush(); err != nil {
|
||||
p.ReadWriter.Writer.Reset(p.tp)
|
||||
return err
|
||||
}
|
||||
return p.tp.Flush(ctx)
|
||||
}
|
||||
|
||||
func (p *TBufferedTransport) RemainingBytes() (num_bytes uint64) {
|
||||
return p.tp.RemainingBytes()
|
||||
}
|
|
@ -1,85 +0,0 @@
|
|||
package thrift
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type TClient interface {
|
||||
Call(ctx context.Context, method string, args, result TStruct) error
|
||||
}
|
||||
|
||||
type TStandardClient struct {
|
||||
seqId int32
|
||||
iprot, oprot TProtocol
|
||||
}
|
||||
|
||||
// TStandardClient implements TClient, and uses the standard message format for Thrift.
|
||||
// It is not safe for concurrent use.
|
||||
func NewTStandardClient(inputProtocol, outputProtocol TProtocol) *TStandardClient {
|
||||
return &TStandardClient{
|
||||
iprot: inputProtocol,
|
||||
oprot: outputProtocol,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *TStandardClient) Send(ctx context.Context, oprot TProtocol, seqId int32, method string, args TStruct) error {
|
||||
if err := oprot.WriteMessageBegin(method, CALL, seqId); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := args.Write(oprot); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := oprot.WriteMessageEnd(); err != nil {
|
||||
return err
|
||||
}
|
||||
return oprot.Flush(ctx)
|
||||
}
|
||||
|
||||
func (p *TStandardClient) Recv(iprot TProtocol, seqId int32, method string, result TStruct) error {
|
||||
rMethod, rTypeId, rSeqId, err := iprot.ReadMessageBegin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if method != rMethod {
|
||||
return NewTApplicationException(WRONG_METHOD_NAME, fmt.Sprintf("%s: wrong method name", method))
|
||||
} else if seqId != rSeqId {
|
||||
return NewTApplicationException(BAD_SEQUENCE_ID, fmt.Sprintf("%s: out of order sequence response", method))
|
||||
} else if rTypeId == EXCEPTION {
|
||||
var exception tApplicationException
|
||||
if err := exception.Read(iprot); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := iprot.ReadMessageEnd(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return &exception
|
||||
} else if rTypeId != REPLY {
|
||||
return NewTApplicationException(INVALID_MESSAGE_TYPE_EXCEPTION, fmt.Sprintf("%s: invalid message type", method))
|
||||
}
|
||||
|
||||
if err := result.Read(iprot); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return iprot.ReadMessageEnd()
|
||||
}
|
||||
|
||||
func (p *TStandardClient) Call(ctx context.Context, method string, args, result TStruct) error {
|
||||
p.seqId++
|
||||
seqId := p.seqId
|
||||
|
||||
if err := p.Send(ctx, p.oprot, seqId, method, args); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// method is oneway
|
||||
if result == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return p.Recv(p.iprot, seqId, method, result)
|
||||
}
|
|
@ -1,810 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
)
|
||||
|
||||
const (
|
||||
COMPACT_PROTOCOL_ID = 0x082
|
||||
COMPACT_VERSION = 1
|
||||
COMPACT_VERSION_MASK = 0x1f
|
||||
COMPACT_TYPE_MASK = 0x0E0
|
||||
COMPACT_TYPE_BITS = 0x07
|
||||
COMPACT_TYPE_SHIFT_AMOUNT = 5
|
||||
)
|
||||
|
||||
type tCompactType byte
|
||||
|
||||
const (
|
||||
COMPACT_BOOLEAN_TRUE = 0x01
|
||||
COMPACT_BOOLEAN_FALSE = 0x02
|
||||
COMPACT_BYTE = 0x03
|
||||
COMPACT_I16 = 0x04
|
||||
COMPACT_I32 = 0x05
|
||||
COMPACT_I64 = 0x06
|
||||
COMPACT_DOUBLE = 0x07
|
||||
COMPACT_BINARY = 0x08
|
||||
COMPACT_LIST = 0x09
|
||||
COMPACT_SET = 0x0A
|
||||
COMPACT_MAP = 0x0B
|
||||
COMPACT_STRUCT = 0x0C
|
||||
)
|
||||
|
||||
var (
|
||||
ttypeToCompactType map[TType]tCompactType
|
||||
)
|
||||
|
||||
func init() {
|
||||
ttypeToCompactType = map[TType]tCompactType{
|
||||
STOP: STOP,
|
||||
BOOL: COMPACT_BOOLEAN_TRUE,
|
||||
BYTE: COMPACT_BYTE,
|
||||
I16: COMPACT_I16,
|
||||
I32: COMPACT_I32,
|
||||
I64: COMPACT_I64,
|
||||
DOUBLE: COMPACT_DOUBLE,
|
||||
STRING: COMPACT_BINARY,
|
||||
LIST: COMPACT_LIST,
|
||||
SET: COMPACT_SET,
|
||||
MAP: COMPACT_MAP,
|
||||
STRUCT: COMPACT_STRUCT,
|
||||
}
|
||||
}
|
||||
|
||||
type TCompactProtocolFactory struct{}
|
||||
|
||||
func NewTCompactProtocolFactory() *TCompactProtocolFactory {
|
||||
return &TCompactProtocolFactory{}
|
||||
}
|
||||
|
||||
func (p *TCompactProtocolFactory) GetProtocol(trans TTransport) TProtocol {
|
||||
return NewTCompactProtocol(trans)
|
||||
}
|
||||
|
||||
type TCompactProtocol struct {
|
||||
trans TRichTransport
|
||||
origTransport TTransport
|
||||
|
||||
// Used to keep track of the last field for the current and previous structs,
|
||||
// so we can do the delta stuff.
|
||||
lastField []int
|
||||
lastFieldId int
|
||||
|
||||
// If we encounter a boolean field begin, save the TField here so it can
|
||||
// have the value incorporated.
|
||||
booleanFieldName string
|
||||
booleanFieldId int16
|
||||
booleanFieldPending bool
|
||||
|
||||
// If we read a field header, and it's a boolean field, save the boolean
|
||||
// value here so that readBool can use it.
|
||||
boolValue bool
|
||||
boolValueIsNotNull bool
|
||||
buffer [64]byte
|
||||
}
|
||||
|
||||
// Create a TCompactProtocol given a TTransport
|
||||
func NewTCompactProtocol(trans TTransport) *TCompactProtocol {
|
||||
p := &TCompactProtocol{origTransport: trans, lastField: []int{}}
|
||||
if et, ok := trans.(TRichTransport); ok {
|
||||
p.trans = et
|
||||
} else {
|
||||
p.trans = NewTRichTransport(trans)
|
||||
}
|
||||
|
||||
return p
|
||||
|
||||
}
|
||||
|
||||
//
|
||||
// Public Writing methods.
|
||||
//
|
||||
|
||||
// Write a message header to the wire. Compact Protocol messages contain the
|
||||
// protocol version so we can migrate forwards in the future if need be.
|
||||
func (p *TCompactProtocol) WriteMessageBegin(name string, typeId TMessageType, seqid int32) error {
|
||||
err := p.writeByteDirect(COMPACT_PROTOCOL_ID)
|
||||
if err != nil {
|
||||
return NewTProtocolException(err)
|
||||
}
|
||||
err = p.writeByteDirect((COMPACT_VERSION & COMPACT_VERSION_MASK) | ((byte(typeId) << COMPACT_TYPE_SHIFT_AMOUNT) & COMPACT_TYPE_MASK))
|
||||
if err != nil {
|
||||
return NewTProtocolException(err)
|
||||
}
|
||||
_, err = p.writeVarint32(seqid)
|
||||
if err != nil {
|
||||
return NewTProtocolException(err)
|
||||
}
|
||||
e := p.WriteString(name)
|
||||
return e
|
||||
|
||||
}
|
||||
|
||||
func (p *TCompactProtocol) WriteMessageEnd() error { return nil }
|
||||
|
||||
// Write a struct begin. This doesn't actually put anything on the wire. We
|
||||
// use it as an opportunity to put special placeholder markers on the field
|
||||
// stack so we can get the field id deltas correct.
|
||||
func (p *TCompactProtocol) WriteStructBegin(name string) error {
|
||||
p.lastField = append(p.lastField, p.lastFieldId)
|
||||
p.lastFieldId = 0
|
||||
return nil
|
||||
}
|
||||
|
||||
// Write a struct end. This doesn't actually put anything on the wire. We use
|
||||
// this as an opportunity to pop the last field from the current struct off
|
||||
// of the field stack.
|
||||
func (p *TCompactProtocol) WriteStructEnd() error {
|
||||
p.lastFieldId = p.lastField[len(p.lastField)-1]
|
||||
p.lastField = p.lastField[:len(p.lastField)-1]
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TCompactProtocol) WriteFieldBegin(name string, typeId TType, id int16) error {
|
||||
if typeId == BOOL {
|
||||
// we want to possibly include the value, so we'll wait.
|
||||
p.booleanFieldName, p.booleanFieldId, p.booleanFieldPending = name, id, true
|
||||
return nil
|
||||
}
|
||||
_, err := p.writeFieldBeginInternal(name, typeId, id, 0xFF)
|
||||
return NewTProtocolException(err)
|
||||
}
|
||||
|
||||
// The workhorse of writeFieldBegin. It has the option of doing a
|
||||
// 'type override' of the type header. This is used specifically in the
|
||||
// boolean field case.
|
||||
func (p *TCompactProtocol) writeFieldBeginInternal(name string, typeId TType, id int16, typeOverride byte) (int, error) {
|
||||
// short lastField = lastField_.pop();
|
||||
|
||||
// if there's a type override, use that.
|
||||
var typeToWrite byte
|
||||
if typeOverride == 0xFF {
|
||||
typeToWrite = byte(p.getCompactType(typeId))
|
||||
} else {
|
||||
typeToWrite = typeOverride
|
||||
}
|
||||
// check if we can use delta encoding for the field id
|
||||
fieldId := int(id)
|
||||
written := 0
|
||||
if fieldId > p.lastFieldId && fieldId-p.lastFieldId <= 15 {
|
||||
// write them together
|
||||
err := p.writeByteDirect(byte((fieldId-p.lastFieldId)<<4) | typeToWrite)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
} else {
|
||||
// write them separate
|
||||
err := p.writeByteDirect(typeToWrite)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
err = p.WriteI16(id)
|
||||
written = 1 + 2
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
p.lastFieldId = fieldId
|
||||
// p.lastField.Push(field.id);
|
||||
return written, nil
|
||||
}
|
||||
|
||||
func (p *TCompactProtocol) WriteFieldEnd() error { return nil }
|
||||
|
||||
func (p *TCompactProtocol) WriteFieldStop() error {
|
||||
err := p.writeByteDirect(STOP)
|
||||
return NewTProtocolException(err)
|
||||
}
|
||||
|
||||
func (p *TCompactProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error {
|
||||
if size == 0 {
|
||||
err := p.writeByteDirect(0)
|
||||
return NewTProtocolException(err)
|
||||
}
|
||||
_, err := p.writeVarint32(int32(size))
|
||||
if err != nil {
|
||||
return NewTProtocolException(err)
|
||||
}
|
||||
err = p.writeByteDirect(byte(p.getCompactType(keyType))<<4 | byte(p.getCompactType(valueType)))
|
||||
return NewTProtocolException(err)
|
||||
}
|
||||
|
||||
func (p *TCompactProtocol) WriteMapEnd() error { return nil }
|
||||
|
||||
// Write a list header.
|
||||
func (p *TCompactProtocol) WriteListBegin(elemType TType, size int) error {
|
||||
_, err := p.writeCollectionBegin(elemType, size)
|
||||
return NewTProtocolException(err)
|
||||
}
|
||||
|
||||
func (p *TCompactProtocol) WriteListEnd() error { return nil }
|
||||
|
||||
// Write a set header.
|
||||
func (p *TCompactProtocol) WriteSetBegin(elemType TType, size int) error {
|
||||
_, err := p.writeCollectionBegin(elemType, size)
|
||||
return NewTProtocolException(err)
|
||||
}
|
||||
|
||||
func (p *TCompactProtocol) WriteSetEnd() error { return nil }
|
||||
|
||||
func (p *TCompactProtocol) WriteBool(value bool) error {
|
||||
v := byte(COMPACT_BOOLEAN_FALSE)
|
||||
if value {
|
||||
v = byte(COMPACT_BOOLEAN_TRUE)
|
||||
}
|
||||
if p.booleanFieldPending {
|
||||
// we haven't written the field header yet
|
||||
_, err := p.writeFieldBeginInternal(p.booleanFieldName, BOOL, p.booleanFieldId, v)
|
||||
p.booleanFieldPending = false
|
||||
return NewTProtocolException(err)
|
||||
}
|
||||
// we're not part of a field, so just write the value.
|
||||
err := p.writeByteDirect(v)
|
||||
return NewTProtocolException(err)
|
||||
}
|
||||
|
||||
// Write a byte. Nothing to see here!
|
||||
func (p *TCompactProtocol) WriteByte(value int8) error {
|
||||
err := p.writeByteDirect(byte(value))
|
||||
return NewTProtocolException(err)
|
||||
}
|
||||
|
||||
// Write an I16 as a zigzag varint.
|
||||
func (p *TCompactProtocol) WriteI16(value int16) error {
|
||||
_, err := p.writeVarint32(p.int32ToZigzag(int32(value)))
|
||||
return NewTProtocolException(err)
|
||||
}
|
||||
|
||||
// Write an i32 as a zigzag varint.
|
||||
func (p *TCompactProtocol) WriteI32(value int32) error {
|
||||
_, err := p.writeVarint32(p.int32ToZigzag(value))
|
||||
return NewTProtocolException(err)
|
||||
}
|
||||
|
||||
// Write an i64 as a zigzag varint.
|
||||
func (p *TCompactProtocol) WriteI64(value int64) error {
|
||||
_, err := p.writeVarint64(p.int64ToZigzag(value))
|
||||
return NewTProtocolException(err)
|
||||
}
|
||||
|
||||
// Write a double to the wire as 8 bytes.
|
||||
func (p *TCompactProtocol) WriteDouble(value float64) error {
|
||||
buf := p.buffer[0:8]
|
||||
binary.LittleEndian.PutUint64(buf, math.Float64bits(value))
|
||||
_, err := p.trans.Write(buf)
|
||||
return NewTProtocolException(err)
|
||||
}
|
||||
|
||||
// Write a string to the wire with a varint size preceding.
|
||||
func (p *TCompactProtocol) WriteString(value string) error {
|
||||
_, e := p.writeVarint32(int32(len(value)))
|
||||
if e != nil {
|
||||
return NewTProtocolException(e)
|
||||
}
|
||||
if len(value) > 0 {
|
||||
}
|
||||
_, e = p.trans.WriteString(value)
|
||||
return e
|
||||
}
|
||||
|
||||
// Write a byte array, using a varint for the size.
|
||||
func (p *TCompactProtocol) WriteBinary(bin []byte) error {
|
||||
_, e := p.writeVarint32(int32(len(bin)))
|
||||
if e != nil {
|
||||
return NewTProtocolException(e)
|
||||
}
|
||||
if len(bin) > 0 {
|
||||
_, e = p.trans.Write(bin)
|
||||
return NewTProtocolException(e)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
//
|
||||
// Reading methods.
|
||||
//
|
||||
|
||||
// Read a message header.
|
||||
func (p *TCompactProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqId int32, err error) {
|
||||
|
||||
protocolId, err := p.readByteDirect()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if protocolId != COMPACT_PROTOCOL_ID {
|
||||
e := fmt.Errorf("Expected protocol id %02x but got %02x", COMPACT_PROTOCOL_ID, protocolId)
|
||||
return "", typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, e)
|
||||
}
|
||||
|
||||
versionAndType, err := p.readByteDirect()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
version := versionAndType & COMPACT_VERSION_MASK
|
||||
typeId = TMessageType((versionAndType >> COMPACT_TYPE_SHIFT_AMOUNT) & COMPACT_TYPE_BITS)
|
||||
if version != COMPACT_VERSION {
|
||||
e := fmt.Errorf("Expected version %02x but got %02x", COMPACT_VERSION, version)
|
||||
err = NewTProtocolExceptionWithType(BAD_VERSION, e)
|
||||
return
|
||||
}
|
||||
seqId, e := p.readVarint32()
|
||||
if e != nil {
|
||||
err = NewTProtocolException(e)
|
||||
return
|
||||
}
|
||||
name, err = p.ReadString()
|
||||
return
|
||||
}
|
||||
|
||||
func (p *TCompactProtocol) ReadMessageEnd() error { return nil }
|
||||
|
||||
// Read a struct begin. There's nothing on the wire for this, but it is our
|
||||
// opportunity to push a new struct begin marker onto the field stack.
|
||||
func (p *TCompactProtocol) ReadStructBegin() (name string, err error) {
|
||||
p.lastField = append(p.lastField, p.lastFieldId)
|
||||
p.lastFieldId = 0
|
||||
return
|
||||
}
|
||||
|
||||
// Doesn't actually consume any wire data, just removes the last field for
|
||||
// this struct from the field stack.
|
||||
func (p *TCompactProtocol) ReadStructEnd() error {
|
||||
// consume the last field we read off the wire.
|
||||
p.lastFieldId = p.lastField[len(p.lastField)-1]
|
||||
p.lastField = p.lastField[:len(p.lastField)-1]
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read a field header off the wire.
|
||||
func (p *TCompactProtocol) ReadFieldBegin() (name string, typeId TType, id int16, err error) {
|
||||
t, err := p.readByteDirect()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// if it's a stop, then we can return immediately, as the struct is over.
|
||||
if (t & 0x0f) == STOP {
|
||||
return "", STOP, 0, nil
|
||||
}
|
||||
|
||||
// mask off the 4 MSB of the type header. it could contain a field id delta.
|
||||
modifier := int16((t & 0xf0) >> 4)
|
||||
if modifier == 0 {
|
||||
// not a delta. look ahead for the zigzag varint field id.
|
||||
id, err = p.ReadI16()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
// has a delta. add the delta to the last read field id.
|
||||
id = int16(p.lastFieldId) + modifier
|
||||
}
|
||||
typeId, e := p.getTType(tCompactType(t & 0x0f))
|
||||
if e != nil {
|
||||
err = NewTProtocolException(e)
|
||||
return
|
||||
}
|
||||
|
||||
// if this happens to be a boolean field, the value is encoded in the type
|
||||
if p.isBoolType(t) {
|
||||
// save the boolean value in a special instance variable.
|
||||
p.boolValue = (byte(t)&0x0f == COMPACT_BOOLEAN_TRUE)
|
||||
p.boolValueIsNotNull = true
|
||||
}
|
||||
|
||||
// push the new field onto the field stack so we can keep the deltas going.
|
||||
p.lastFieldId = int(id)
|
||||
return
|
||||
}
|
||||
|
||||
func (p *TCompactProtocol) ReadFieldEnd() error { return nil }
|
||||
|
||||
// Read a map header off the wire. If the size is zero, skip reading the key
|
||||
// and value type. This means that 0-length maps will yield TMaps without the
|
||||
// "correct" types.
|
||||
func (p *TCompactProtocol) ReadMapBegin() (keyType TType, valueType TType, size int, err error) {
|
||||
size32, e := p.readVarint32()
|
||||
if e != nil {
|
||||
err = NewTProtocolException(e)
|
||||
return
|
||||
}
|
||||
if size32 < 0 {
|
||||
err = invalidDataLength
|
||||
return
|
||||
}
|
||||
size = int(size32)
|
||||
|
||||
keyAndValueType := byte(STOP)
|
||||
if size != 0 {
|
||||
keyAndValueType, err = p.readByteDirect()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
keyType, _ = p.getTType(tCompactType(keyAndValueType >> 4))
|
||||
valueType, _ = p.getTType(tCompactType(keyAndValueType & 0xf))
|
||||
return
|
||||
}
|
||||
|
||||
func (p *TCompactProtocol) ReadMapEnd() error { return nil }
|
||||
|
||||
// Read a list header off the wire. If the list size is 0-14, the size will
|
||||
// be packed into the element type header. If it's a longer list, the 4 MSB
|
||||
// of the element type header will be 0xF, and a varint will follow with the
|
||||
// true size.
|
||||
func (p *TCompactProtocol) ReadListBegin() (elemType TType, size int, err error) {
|
||||
size_and_type, err := p.readByteDirect()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
size = int((size_and_type >> 4) & 0x0f)
|
||||
if size == 15 {
|
||||
size2, e := p.readVarint32()
|
||||
if e != nil {
|
||||
err = NewTProtocolException(e)
|
||||
return
|
||||
}
|
||||
if size2 < 0 {
|
||||
err = invalidDataLength
|
||||
return
|
||||
}
|
||||
size = int(size2)
|
||||
}
|
||||
elemType, e := p.getTType(tCompactType(size_and_type))
|
||||
if e != nil {
|
||||
err = NewTProtocolException(e)
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (p *TCompactProtocol) ReadListEnd() error { return nil }
|
||||
|
||||
// Read a set header off the wire. If the set size is 0-14, the size will
|
||||
// be packed into the element type header. If it's a longer set, the 4 MSB
|
||||
// of the element type header will be 0xF, and a varint will follow with the
|
||||
// true size.
|
||||
func (p *TCompactProtocol) ReadSetBegin() (elemType TType, size int, err error) {
|
||||
return p.ReadListBegin()
|
||||
}
|
||||
|
||||
func (p *TCompactProtocol) ReadSetEnd() error { return nil }
|
||||
|
||||
// Read a boolean off the wire. If this is a boolean field, the value should
|
||||
// already have been read during readFieldBegin, so we'll just consume the
|
||||
// pre-stored value. Otherwise, read a byte.
|
||||
func (p *TCompactProtocol) ReadBool() (value bool, err error) {
|
||||
if p.boolValueIsNotNull {
|
||||
p.boolValueIsNotNull = false
|
||||
return p.boolValue, nil
|
||||
}
|
||||
v, err := p.readByteDirect()
|
||||
return v == COMPACT_BOOLEAN_TRUE, err
|
||||
}
|
||||
|
||||
// Read a single byte off the wire. Nothing interesting here.
|
||||
func (p *TCompactProtocol) ReadByte() (int8, error) {
|
||||
v, err := p.readByteDirect()
|
||||
if err != nil {
|
||||
return 0, NewTProtocolException(err)
|
||||
}
|
||||
return int8(v), err
|
||||
}
|
||||
|
||||
// Read an i16 from the wire as a zigzag varint.
|
||||
func (p *TCompactProtocol) ReadI16() (value int16, err error) {
|
||||
v, err := p.ReadI32()
|
||||
return int16(v), err
|
||||
}
|
||||
|
||||
// Read an i32 from the wire as a zigzag varint.
|
||||
func (p *TCompactProtocol) ReadI32() (value int32, err error) {
|
||||
v, e := p.readVarint32()
|
||||
if e != nil {
|
||||
return 0, NewTProtocolException(e)
|
||||
}
|
||||
value = p.zigzagToInt32(v)
|
||||
return value, nil
|
||||
}
|
||||
|
||||
// Read an i64 from the wire as a zigzag varint.
|
||||
func (p *TCompactProtocol) ReadI64() (value int64, err error) {
|
||||
v, e := p.readVarint64()
|
||||
if e != nil {
|
||||
return 0, NewTProtocolException(e)
|
||||
}
|
||||
value = p.zigzagToInt64(v)
|
||||
return value, nil
|
||||
}
|
||||
|
||||
// No magic here - just read a double off the wire.
|
||||
func (p *TCompactProtocol) ReadDouble() (value float64, err error) {
|
||||
longBits := p.buffer[0:8]
|
||||
_, e := io.ReadFull(p.trans, longBits)
|
||||
if e != nil {
|
||||
return 0.0, NewTProtocolException(e)
|
||||
}
|
||||
return math.Float64frombits(p.bytesToUint64(longBits)), nil
|
||||
}
|
||||
|
||||
// Reads a []byte (via readBinary), and then UTF-8 decodes it.
|
||||
func (p *TCompactProtocol) ReadString() (value string, err error) {
|
||||
length, e := p.readVarint32()
|
||||
if e != nil {
|
||||
return "", NewTProtocolException(e)
|
||||
}
|
||||
if length < 0 {
|
||||
return "", invalidDataLength
|
||||
}
|
||||
|
||||
if length == 0 {
|
||||
return "", nil
|
||||
}
|
||||
var buf []byte
|
||||
if length <= int32(len(p.buffer)) {
|
||||
buf = p.buffer[0:length]
|
||||
} else {
|
||||
buf = make([]byte, length)
|
||||
}
|
||||
_, e = io.ReadFull(p.trans, buf)
|
||||
return string(buf), NewTProtocolException(e)
|
||||
}
|
||||
|
||||
// Read a []byte from the wire.
|
||||
func (p *TCompactProtocol) ReadBinary() (value []byte, err error) {
|
||||
length, e := p.readVarint32()
|
||||
if e != nil {
|
||||
return nil, NewTProtocolException(e)
|
||||
}
|
||||
if length == 0 {
|
||||
return []byte{}, nil
|
||||
}
|
||||
if length < 0 {
|
||||
return nil, invalidDataLength
|
||||
}
|
||||
|
||||
buf := make([]byte, length)
|
||||
_, e = io.ReadFull(p.trans, buf)
|
||||
return buf, NewTProtocolException(e)
|
||||
}
|
||||
|
||||
func (p *TCompactProtocol) Flush(ctx context.Context) (err error) {
|
||||
return NewTProtocolException(p.trans.Flush(ctx))
|
||||
}
|
||||
|
||||
func (p *TCompactProtocol) Skip(fieldType TType) (err error) {
|
||||
return SkipDefaultDepth(p, fieldType)
|
||||
}
|
||||
|
||||
func (p *TCompactProtocol) Transport() TTransport {
|
||||
return p.origTransport
|
||||
}
|
||||
|
||||
//
|
||||
// Internal writing methods
|
||||
//
|
||||
|
||||
// Abstract method for writing the start of lists and sets. List and sets on
|
||||
// the wire differ only by the type indicator.
|
||||
func (p *TCompactProtocol) writeCollectionBegin(elemType TType, size int) (int, error) {
|
||||
if size <= 14 {
|
||||
return 1, p.writeByteDirect(byte(int32(size<<4) | int32(p.getCompactType(elemType))))
|
||||
}
|
||||
err := p.writeByteDirect(0xf0 | byte(p.getCompactType(elemType)))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
m, err := p.writeVarint32(int32(size))
|
||||
return 1 + m, err
|
||||
}
|
||||
|
||||
// Write an i32 as a varint. Results in 1-5 bytes on the wire.
|
||||
// TODO(pomack): make a permanent buffer like writeVarint64?
|
||||
func (p *TCompactProtocol) writeVarint32(n int32) (int, error) {
|
||||
i32buf := p.buffer[0:5]
|
||||
idx := 0
|
||||
for {
|
||||
if (n & ^0x7F) == 0 {
|
||||
i32buf[idx] = byte(n)
|
||||
idx++
|
||||
// p.writeByteDirect(byte(n));
|
||||
break
|
||||
// return;
|
||||
} else {
|
||||
i32buf[idx] = byte((n & 0x7F) | 0x80)
|
||||
idx++
|
||||
// p.writeByteDirect(byte(((n & 0x7F) | 0x80)));
|
||||
u := uint32(n)
|
||||
n = int32(u >> 7)
|
||||
}
|
||||
}
|
||||
return p.trans.Write(i32buf[0:idx])
|
||||
}
|
||||
|
||||
// Write an i64 as a varint. Results in 1-10 bytes on the wire.
|
||||
func (p *TCompactProtocol) writeVarint64(n int64) (int, error) {
|
||||
varint64out := p.buffer[0:10]
|
||||
idx := 0
|
||||
for {
|
||||
if (n & ^0x7F) == 0 {
|
||||
varint64out[idx] = byte(n)
|
||||
idx++
|
||||
break
|
||||
} else {
|
||||
varint64out[idx] = byte((n & 0x7F) | 0x80)
|
||||
idx++
|
||||
u := uint64(n)
|
||||
n = int64(u >> 7)
|
||||
}
|
||||
}
|
||||
return p.trans.Write(varint64out[0:idx])
|
||||
}
|
||||
|
||||
// Convert l into a zigzag long. This allows negative numbers to be
|
||||
// represented compactly as a varint.
|
||||
func (p *TCompactProtocol) int64ToZigzag(l int64) int64 {
|
||||
return (l << 1) ^ (l >> 63)
|
||||
}
|
||||
|
||||
// Convert l into a zigzag long. This allows negative numbers to be
|
||||
// represented compactly as a varint.
|
||||
func (p *TCompactProtocol) int32ToZigzag(n int32) int32 {
|
||||
return (n << 1) ^ (n >> 31)
|
||||
}
|
||||
|
||||
func (p *TCompactProtocol) fixedUint64ToBytes(n uint64, buf []byte) {
|
||||
binary.LittleEndian.PutUint64(buf, n)
|
||||
}
|
||||
|
||||
func (p *TCompactProtocol) fixedInt64ToBytes(n int64, buf []byte) {
|
||||
binary.LittleEndian.PutUint64(buf, uint64(n))
|
||||
}
|
||||
|
||||
// Writes a byte without any possibility of all that field header nonsense.
|
||||
// Used internally by other writing methods that know they need to write a byte.
|
||||
func (p *TCompactProtocol) writeByteDirect(b byte) error {
|
||||
return p.trans.WriteByte(b)
|
||||
}
|
||||
|
||||
// Writes a byte without any possibility of all that field header nonsense.
|
||||
func (p *TCompactProtocol) writeIntAsByteDirect(n int) (int, error) {
|
||||
return 1, p.writeByteDirect(byte(n))
|
||||
}
|
||||
|
||||
//
|
||||
// Internal reading methods
|
||||
//
|
||||
|
||||
// Read an i32 from the wire as a varint. The MSB of each byte is set
|
||||
// if there is another byte to follow. This can read up to 5 bytes.
|
||||
func (p *TCompactProtocol) readVarint32() (int32, error) {
|
||||
// if the wire contains the right stuff, this will just truncate the i64 we
|
||||
// read and get us the right sign.
|
||||
v, err := p.readVarint64()
|
||||
return int32(v), err
|
||||
}
|
||||
|
||||
// Read an i64 from the wire as a proper varint. The MSB of each byte is set
|
||||
// if there is another byte to follow. This can read up to 10 bytes.
|
||||
func (p *TCompactProtocol) readVarint64() (int64, error) {
|
||||
shift := uint(0)
|
||||
result := int64(0)
|
||||
for {
|
||||
b, err := p.readByteDirect()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
result |= int64(b&0x7f) << shift
|
||||
if (b & 0x80) != 0x80 {
|
||||
break
|
||||
}
|
||||
shift += 7
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Read a byte, unlike ReadByte that reads Thrift-byte that is i8.
|
||||
func (p *TCompactProtocol) readByteDirect() (byte, error) {
|
||||
return p.trans.ReadByte()
|
||||
}
|
||||
|
||||
//
|
||||
// encoding helpers
|
||||
//
|
||||
|
||||
// Convert from zigzag int to int.
|
||||
func (p *TCompactProtocol) zigzagToInt32(n int32) int32 {
|
||||
u := uint32(n)
|
||||
return int32(u>>1) ^ -(n & 1)
|
||||
}
|
||||
|
||||
// Convert from zigzag long to long.
|
||||
func (p *TCompactProtocol) zigzagToInt64(n int64) int64 {
|
||||
u := uint64(n)
|
||||
return int64(u>>1) ^ -(n & 1)
|
||||
}
|
||||
|
||||
// Note that it's important that the mask bytes are long literals,
|
||||
// otherwise they'll default to ints, and when you shift an int left 56 bits,
|
||||
// you just get a messed up int.
|
||||
func (p *TCompactProtocol) bytesToInt64(b []byte) int64 {
|
||||
return int64(binary.LittleEndian.Uint64(b))
|
||||
}
|
||||
|
||||
// Note that it's important that the mask bytes are long literals,
|
||||
// otherwise they'll default to ints, and when you shift an int left 56 bits,
|
||||
// you just get a messed up int.
|
||||
func (p *TCompactProtocol) bytesToUint64(b []byte) uint64 {
|
||||
return binary.LittleEndian.Uint64(b)
|
||||
}
|
||||
|
||||
//
|
||||
// type testing and converting
|
||||
//
|
||||
|
||||
func (p *TCompactProtocol) isBoolType(b byte) bool {
|
||||
return (b&0x0f) == COMPACT_BOOLEAN_TRUE || (b&0x0f) == COMPACT_BOOLEAN_FALSE
|
||||
}
|
||||
|
||||
// Given a tCompactType constant, convert it to its corresponding
|
||||
// TType value.
|
||||
func (p *TCompactProtocol) getTType(t tCompactType) (TType, error) {
|
||||
switch byte(t) & 0x0f {
|
||||
case STOP:
|
||||
return STOP, nil
|
||||
case COMPACT_BOOLEAN_FALSE, COMPACT_BOOLEAN_TRUE:
|
||||
return BOOL, nil
|
||||
case COMPACT_BYTE:
|
||||
return BYTE, nil
|
||||
case COMPACT_I16:
|
||||
return I16, nil
|
||||
case COMPACT_I32:
|
||||
return I32, nil
|
||||
case COMPACT_I64:
|
||||
return I64, nil
|
||||
case COMPACT_DOUBLE:
|
||||
return DOUBLE, nil
|
||||
case COMPACT_BINARY:
|
||||
return STRING, nil
|
||||
case COMPACT_LIST:
|
||||
return LIST, nil
|
||||
case COMPACT_SET:
|
||||
return SET, nil
|
||||
case COMPACT_MAP:
|
||||
return MAP, nil
|
||||
case COMPACT_STRUCT:
|
||||
return STRUCT, nil
|
||||
}
|
||||
return STOP, TException(fmt.Errorf("don't know what type: %v", t&0x0f))
|
||||
}
|
||||
|
||||
// Given a TType value, find the appropriate TCompactProtocol.Types constant.
|
||||
func (p *TCompactProtocol) getCompactType(t TType) tCompactType {
|
||||
return ttypeToCompactType[t]
|
||||
}
|
|
@ -1,24 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import "context"
|
||||
|
||||
var defaultCtx = context.Background()
|
|
@ -1,270 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
)
|
||||
|
||||
type TDebugProtocol struct {
|
||||
Delegate TProtocol
|
||||
LogPrefix string
|
||||
}
|
||||
|
||||
type TDebugProtocolFactory struct {
|
||||
Underlying TProtocolFactory
|
||||
LogPrefix string
|
||||
}
|
||||
|
||||
func NewTDebugProtocolFactory(underlying TProtocolFactory, logPrefix string) *TDebugProtocolFactory {
|
||||
return &TDebugProtocolFactory{
|
||||
Underlying: underlying,
|
||||
LogPrefix: logPrefix,
|
||||
}
|
||||
}
|
||||
|
||||
func (t *TDebugProtocolFactory) GetProtocol(trans TTransport) TProtocol {
|
||||
return &TDebugProtocol{
|
||||
Delegate: t.Underlying.GetProtocol(trans),
|
||||
LogPrefix: t.LogPrefix,
|
||||
}
|
||||
}
|
||||
|
||||
func (tdp *TDebugProtocol) WriteMessageBegin(name string, typeId TMessageType, seqid int32) error {
|
||||
err := tdp.Delegate.WriteMessageBegin(name, typeId, seqid)
|
||||
log.Printf("%sWriteMessageBegin(name=%#v, typeId=%#v, seqid=%#v) => %#v", tdp.LogPrefix, name, typeId, seqid, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteMessageEnd() error {
|
||||
err := tdp.Delegate.WriteMessageEnd()
|
||||
log.Printf("%sWriteMessageEnd() => %#v", tdp.LogPrefix, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteStructBegin(name string) error {
|
||||
err := tdp.Delegate.WriteStructBegin(name)
|
||||
log.Printf("%sWriteStructBegin(name=%#v) => %#v", tdp.LogPrefix, name, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteStructEnd() error {
|
||||
err := tdp.Delegate.WriteStructEnd()
|
||||
log.Printf("%sWriteStructEnd() => %#v", tdp.LogPrefix, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteFieldBegin(name string, typeId TType, id int16) error {
|
||||
err := tdp.Delegate.WriteFieldBegin(name, typeId, id)
|
||||
log.Printf("%sWriteFieldBegin(name=%#v, typeId=%#v, id%#v) => %#v", tdp.LogPrefix, name, typeId, id, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteFieldEnd() error {
|
||||
err := tdp.Delegate.WriteFieldEnd()
|
||||
log.Printf("%sWriteFieldEnd() => %#v", tdp.LogPrefix, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteFieldStop() error {
|
||||
err := tdp.Delegate.WriteFieldStop()
|
||||
log.Printf("%sWriteFieldStop() => %#v", tdp.LogPrefix, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error {
|
||||
err := tdp.Delegate.WriteMapBegin(keyType, valueType, size)
|
||||
log.Printf("%sWriteMapBegin(keyType=%#v, valueType=%#v, size=%#v) => %#v", tdp.LogPrefix, keyType, valueType, size, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteMapEnd() error {
|
||||
err := tdp.Delegate.WriteMapEnd()
|
||||
log.Printf("%sWriteMapEnd() => %#v", tdp.LogPrefix, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteListBegin(elemType TType, size int) error {
|
||||
err := tdp.Delegate.WriteListBegin(elemType, size)
|
||||
log.Printf("%sWriteListBegin(elemType=%#v, size=%#v) => %#v", tdp.LogPrefix, elemType, size, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteListEnd() error {
|
||||
err := tdp.Delegate.WriteListEnd()
|
||||
log.Printf("%sWriteListEnd() => %#v", tdp.LogPrefix, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteSetBegin(elemType TType, size int) error {
|
||||
err := tdp.Delegate.WriteSetBegin(elemType, size)
|
||||
log.Printf("%sWriteSetBegin(elemType=%#v, size=%#v) => %#v", tdp.LogPrefix, elemType, size, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteSetEnd() error {
|
||||
err := tdp.Delegate.WriteSetEnd()
|
||||
log.Printf("%sWriteSetEnd() => %#v", tdp.LogPrefix, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteBool(value bool) error {
|
||||
err := tdp.Delegate.WriteBool(value)
|
||||
log.Printf("%sWriteBool(value=%#v) => %#v", tdp.LogPrefix, value, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteByte(value int8) error {
|
||||
err := tdp.Delegate.WriteByte(value)
|
||||
log.Printf("%sWriteByte(value=%#v) => %#v", tdp.LogPrefix, value, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteI16(value int16) error {
|
||||
err := tdp.Delegate.WriteI16(value)
|
||||
log.Printf("%sWriteI16(value=%#v) => %#v", tdp.LogPrefix, value, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteI32(value int32) error {
|
||||
err := tdp.Delegate.WriteI32(value)
|
||||
log.Printf("%sWriteI32(value=%#v) => %#v", tdp.LogPrefix, value, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteI64(value int64) error {
|
||||
err := tdp.Delegate.WriteI64(value)
|
||||
log.Printf("%sWriteI64(value=%#v) => %#v", tdp.LogPrefix, value, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteDouble(value float64) error {
|
||||
err := tdp.Delegate.WriteDouble(value)
|
||||
log.Printf("%sWriteDouble(value=%#v) => %#v", tdp.LogPrefix, value, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteString(value string) error {
|
||||
err := tdp.Delegate.WriteString(value)
|
||||
log.Printf("%sWriteString(value=%#v) => %#v", tdp.LogPrefix, value, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteBinary(value []byte) error {
|
||||
err := tdp.Delegate.WriteBinary(value)
|
||||
log.Printf("%sWriteBinary(value=%#v) => %#v", tdp.LogPrefix, value, err)
|
||||
return err
|
||||
}
|
||||
|
||||
func (tdp *TDebugProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqid int32, err error) {
|
||||
name, typeId, seqid, err = tdp.Delegate.ReadMessageBegin()
|
||||
log.Printf("%sReadMessageBegin() (name=%#v, typeId=%#v, seqid=%#v, err=%#v)", tdp.LogPrefix, name, typeId, seqid, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadMessageEnd() (err error) {
|
||||
err = tdp.Delegate.ReadMessageEnd()
|
||||
log.Printf("%sReadMessageEnd() err=%#v", tdp.LogPrefix, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadStructBegin() (name string, err error) {
|
||||
name, err = tdp.Delegate.ReadStructBegin()
|
||||
log.Printf("%sReadStructBegin() (name%#v, err=%#v)", tdp.LogPrefix, name, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadStructEnd() (err error) {
|
||||
err = tdp.Delegate.ReadStructEnd()
|
||||
log.Printf("%sReadStructEnd() err=%#v", tdp.LogPrefix, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadFieldBegin() (name string, typeId TType, id int16, err error) {
|
||||
name, typeId, id, err = tdp.Delegate.ReadFieldBegin()
|
||||
log.Printf("%sReadFieldBegin() (name=%#v, typeId=%#v, id=%#v, err=%#v)", tdp.LogPrefix, name, typeId, id, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadFieldEnd() (err error) {
|
||||
err = tdp.Delegate.ReadFieldEnd()
|
||||
log.Printf("%sReadFieldEnd() err=%#v", tdp.LogPrefix, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadMapBegin() (keyType TType, valueType TType, size int, err error) {
|
||||
keyType, valueType, size, err = tdp.Delegate.ReadMapBegin()
|
||||
log.Printf("%sReadMapBegin() (keyType=%#v, valueType=%#v, size=%#v, err=%#v)", tdp.LogPrefix, keyType, valueType, size, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadMapEnd() (err error) {
|
||||
err = tdp.Delegate.ReadMapEnd()
|
||||
log.Printf("%sReadMapEnd() err=%#v", tdp.LogPrefix, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadListBegin() (elemType TType, size int, err error) {
|
||||
elemType, size, err = tdp.Delegate.ReadListBegin()
|
||||
log.Printf("%sReadListBegin() (elemType=%#v, size=%#v, err=%#v)", tdp.LogPrefix, elemType, size, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadListEnd() (err error) {
|
||||
err = tdp.Delegate.ReadListEnd()
|
||||
log.Printf("%sReadListEnd() err=%#v", tdp.LogPrefix, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadSetBegin() (elemType TType, size int, err error) {
|
||||
elemType, size, err = tdp.Delegate.ReadSetBegin()
|
||||
log.Printf("%sReadSetBegin() (elemType=%#v, size=%#v, err=%#v)", tdp.LogPrefix, elemType, size, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadSetEnd() (err error) {
|
||||
err = tdp.Delegate.ReadSetEnd()
|
||||
log.Printf("%sReadSetEnd() err=%#v", tdp.LogPrefix, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadBool() (value bool, err error) {
|
||||
value, err = tdp.Delegate.ReadBool()
|
||||
log.Printf("%sReadBool() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadByte() (value int8, err error) {
|
||||
value, err = tdp.Delegate.ReadByte()
|
||||
log.Printf("%sReadByte() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadI16() (value int16, err error) {
|
||||
value, err = tdp.Delegate.ReadI16()
|
||||
log.Printf("%sReadI16() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadI32() (value int32, err error) {
|
||||
value, err = tdp.Delegate.ReadI32()
|
||||
log.Printf("%sReadI32() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadI64() (value int64, err error) {
|
||||
value, err = tdp.Delegate.ReadI64()
|
||||
log.Printf("%sReadI64() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadDouble() (value float64, err error) {
|
||||
value, err = tdp.Delegate.ReadDouble()
|
||||
log.Printf("%sReadDouble() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadString() (value string, err error) {
|
||||
value, err = tdp.Delegate.ReadString()
|
||||
log.Printf("%sReadString() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadBinary() (value []byte, err error) {
|
||||
value, err = tdp.Delegate.ReadBinary()
|
||||
log.Printf("%sReadBinary() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) Skip(fieldType TType) (err error) {
|
||||
err = tdp.Delegate.Skip(fieldType)
|
||||
log.Printf("%sSkip(fieldType=%#v) (err=%#v)", tdp.LogPrefix, fieldType, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) Flush(ctx context.Context) (err error) {
|
||||
err = tdp.Delegate.Flush(ctx)
|
||||
log.Printf("%sFlush() (err=%#v)", tdp.LogPrefix, err)
|
||||
return
|
||||
}
|
||||
|
||||
func (tdp *TDebugProtocol) Transport() TTransport {
|
||||
return tdp.Delegate.Transport()
|
||||
}
|
|
@ -1,58 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
type TDeserializer struct {
|
||||
Transport TTransport
|
||||
Protocol TProtocol
|
||||
}
|
||||
|
||||
func NewTDeserializer() *TDeserializer {
|
||||
var transport TTransport
|
||||
transport = NewTMemoryBufferLen(1024)
|
||||
|
||||
protocol := NewTBinaryProtocolFactoryDefault().GetProtocol(transport)
|
||||
|
||||
return &TDeserializer{
|
||||
transport,
|
||||
protocol}
|
||||
}
|
||||
|
||||
func (t *TDeserializer) ReadString(msg TStruct, s string) (err error) {
|
||||
err = nil
|
||||
if _, err = t.Transport.Write([]byte(s)); err != nil {
|
||||
return
|
||||
}
|
||||
if err = msg.Read(t.Protocol); err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (t *TDeserializer) Read(msg TStruct, b []byte) (err error) {
|
||||
err = nil
|
||||
if _, err = t.Transport.Write(b); err != nil {
|
||||
return
|
||||
}
|
||||
if err = msg.Read(t.Protocol); err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
|
@ -1,44 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
// Generic Thrift exception
|
||||
type TException interface {
|
||||
error
|
||||
}
|
||||
|
||||
// Prepends additional information to an error without losing the Thrift exception interface
|
||||
func PrependError(prepend string, err error) error {
|
||||
if t, ok := err.(TTransportException); ok {
|
||||
return NewTTransportException(t.TypeId(), prepend+t.Error())
|
||||
}
|
||||
if t, ok := err.(TProtocolException); ok {
|
||||
return NewTProtocolExceptionWithType(t.TypeId(), errors.New(prepend+err.Error()))
|
||||
}
|
||||
if t, ok := err.(TApplicationException); ok {
|
||||
return NewTApplicationException(t.TypeId(), prepend+t.Error())
|
||||
}
|
||||
|
||||
return errors.New(prepend + err.Error())
|
||||
}
|
|
@ -1,79 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
// Helper class that encapsulates field metadata.
|
||||
type field struct {
|
||||
name string
|
||||
typeId TType
|
||||
id int
|
||||
}
|
||||
|
||||
func newField(n string, t TType, i int) *field {
|
||||
return &field{name: n, typeId: t, id: i}
|
||||
}
|
||||
|
||||
func (p *field) Name() string {
|
||||
if p == nil {
|
||||
return ""
|
||||
}
|
||||
return p.name
|
||||
}
|
||||
|
||||
func (p *field) TypeId() TType {
|
||||
if p == nil {
|
||||
return TType(VOID)
|
||||
}
|
||||
return p.typeId
|
||||
}
|
||||
|
||||
func (p *field) Id() int {
|
||||
if p == nil {
|
||||
return -1
|
||||
}
|
||||
return p.id
|
||||
}
|
||||
|
||||
func (p *field) String() string {
|
||||
if p == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return "<TField name:'" + p.name + "' type:" + string(p.typeId) + " field-id:" + string(p.id) + ">"
|
||||
}
|
||||
|
||||
var ANONYMOUS_FIELD *field
|
||||
|
||||
type fieldSlice []field
|
||||
|
||||
func (p fieldSlice) Len() int {
|
||||
return len(p)
|
||||
}
|
||||
|
||||
func (p fieldSlice) Less(i, j int) bool {
|
||||
return p[i].Id() < p[j].Id()
|
||||
}
|
||||
|
||||
func (p fieldSlice) Swap(i, j int) {
|
||||
p[i], p[j] = p[j], p[i]
|
||||
}
|
||||
|
||||
func init() {
|
||||
ANONYMOUS_FIELD = newField("", STOP, 0)
|
||||
}
|
|
@ -1,173 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
const DEFAULT_MAX_LENGTH = 16384000
|
||||
|
||||
type TFramedTransport struct {
|
||||
transport TTransport
|
||||
buf bytes.Buffer
|
||||
reader *bufio.Reader
|
||||
frameSize uint32 //Current remaining size of the frame. if ==0 read next frame header
|
||||
buffer [4]byte
|
||||
maxLength uint32
|
||||
}
|
||||
|
||||
type tFramedTransportFactory struct {
|
||||
factory TTransportFactory
|
||||
maxLength uint32
|
||||
}
|
||||
|
||||
func NewTFramedTransportFactory(factory TTransportFactory) TTransportFactory {
|
||||
return &tFramedTransportFactory{factory: factory, maxLength: DEFAULT_MAX_LENGTH}
|
||||
}
|
||||
|
||||
func NewTFramedTransportFactoryMaxLength(factory TTransportFactory, maxLength uint32) TTransportFactory {
|
||||
return &tFramedTransportFactory{factory: factory, maxLength: maxLength}
|
||||
}
|
||||
|
||||
func (p *tFramedTransportFactory) GetTransport(base TTransport) (TTransport, error) {
|
||||
tt, err := p.factory.GetTransport(base)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewTFramedTransportMaxLength(tt, p.maxLength), nil
|
||||
}
|
||||
|
||||
func NewTFramedTransport(transport TTransport) *TFramedTransport {
|
||||
return &TFramedTransport{transport: transport, reader: bufio.NewReader(transport), maxLength: DEFAULT_MAX_LENGTH}
|
||||
}
|
||||
|
||||
func NewTFramedTransportMaxLength(transport TTransport, maxLength uint32) *TFramedTransport {
|
||||
return &TFramedTransport{transport: transport, reader: bufio.NewReader(transport), maxLength: maxLength}
|
||||
}
|
||||
|
||||
func (p *TFramedTransport) Open() error {
|
||||
return p.transport.Open()
|
||||
}
|
||||
|
||||
func (p *TFramedTransport) IsOpen() bool {
|
||||
return p.transport.IsOpen()
|
||||
}
|
||||
|
||||
func (p *TFramedTransport) Close() error {
|
||||
return p.transport.Close()
|
||||
}
|
||||
|
||||
func (p *TFramedTransport) Read(buf []byte) (l int, err error) {
|
||||
if p.frameSize == 0 {
|
||||
p.frameSize, err = p.readFrameHeader()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
if p.frameSize < uint32(len(buf)) {
|
||||
frameSize := p.frameSize
|
||||
tmp := make([]byte, p.frameSize)
|
||||
l, err = p.Read(tmp)
|
||||
copy(buf, tmp)
|
||||
if err == nil {
|
||||
err = NewTTransportExceptionFromError(fmt.Errorf("Not enough frame size %d to read %d bytes", frameSize, len(buf)))
|
||||
return
|
||||
}
|
||||
}
|
||||
got, err := p.reader.Read(buf)
|
||||
p.frameSize = p.frameSize - uint32(got)
|
||||
//sanity check
|
||||
if p.frameSize < 0 {
|
||||
return 0, NewTTransportException(UNKNOWN_TRANSPORT_EXCEPTION, "Negative frame size")
|
||||
}
|
||||
return got, NewTTransportExceptionFromError(err)
|
||||
}
|
||||
|
||||
func (p *TFramedTransport) ReadByte() (c byte, err error) {
|
||||
if p.frameSize == 0 {
|
||||
p.frameSize, err = p.readFrameHeader()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
if p.frameSize < 1 {
|
||||
return 0, NewTTransportExceptionFromError(fmt.Errorf("Not enough frame size %d to read %d bytes", p.frameSize, 1))
|
||||
}
|
||||
c, err = p.reader.ReadByte()
|
||||
if err == nil {
|
||||
p.frameSize--
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (p *TFramedTransport) Write(buf []byte) (int, error) {
|
||||
n, err := p.buf.Write(buf)
|
||||
return n, NewTTransportExceptionFromError(err)
|
||||
}
|
||||
|
||||
func (p *TFramedTransport) WriteByte(c byte) error {
|
||||
return p.buf.WriteByte(c)
|
||||
}
|
||||
|
||||
func (p *TFramedTransport) WriteString(s string) (n int, err error) {
|
||||
return p.buf.WriteString(s)
|
||||
}
|
||||
|
||||
func (p *TFramedTransport) Flush(ctx context.Context) error {
|
||||
size := p.buf.Len()
|
||||
buf := p.buffer[:4]
|
||||
binary.BigEndian.PutUint32(buf, uint32(size))
|
||||
_, err := p.transport.Write(buf)
|
||||
if err != nil {
|
||||
p.buf.Truncate(0)
|
||||
return NewTTransportExceptionFromError(err)
|
||||
}
|
||||
if size > 0 {
|
||||
if n, err := p.buf.WriteTo(p.transport); err != nil {
|
||||
print("Error while flushing write buffer of size ", size, " to transport, only wrote ", n, " bytes: ", err.Error(), "\n")
|
||||
p.buf.Truncate(0)
|
||||
return NewTTransportExceptionFromError(err)
|
||||
}
|
||||
}
|
||||
err = p.transport.Flush(ctx)
|
||||
return NewTTransportExceptionFromError(err)
|
||||
}
|
||||
|
||||
func (p *TFramedTransport) readFrameHeader() (uint32, error) {
|
||||
buf := p.buffer[:4]
|
||||
if _, err := io.ReadFull(p.reader, buf); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
size := binary.BigEndian.Uint32(buf)
|
||||
if size < 0 || size > p.maxLength {
|
||||
return 0, NewTTransportException(UNKNOWN_TRANSPORT_EXCEPTION, fmt.Sprintf("Incorrect frame size (%d)", size))
|
||||
}
|
||||
return size, nil
|
||||
}
|
||||
|
||||
func (p *TFramedTransport) RemainingBytes() (num_bytes uint64) {
|
||||
return uint64(p.frameSize)
|
||||
}
|
|
@ -1,242 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// Default to using the shared http client. Library users are
|
||||
// free to change this global client or specify one through
|
||||
// THttpClientOptions.
|
||||
var DefaultHttpClient *http.Client = http.DefaultClient
|
||||
|
||||
type THttpClient struct {
|
||||
client *http.Client
|
||||
response *http.Response
|
||||
url *url.URL
|
||||
requestBuffer *bytes.Buffer
|
||||
header http.Header
|
||||
nsecConnectTimeout int64
|
||||
nsecReadTimeout int64
|
||||
}
|
||||
|
||||
type THttpClientTransportFactory struct {
|
||||
options THttpClientOptions
|
||||
url string
|
||||
}
|
||||
|
||||
func (p *THttpClientTransportFactory) GetTransport(trans TTransport) (TTransport, error) {
|
||||
if trans != nil {
|
||||
t, ok := trans.(*THttpClient)
|
||||
if ok && t.url != nil {
|
||||
return NewTHttpClientWithOptions(t.url.String(), p.options)
|
||||
}
|
||||
}
|
||||
return NewTHttpClientWithOptions(p.url, p.options)
|
||||
}
|
||||
|
||||
type THttpClientOptions struct {
|
||||
// If nil, DefaultHttpClient is used
|
||||
Client *http.Client
|
||||
}
|
||||
|
||||
func NewTHttpClientTransportFactory(url string) *THttpClientTransportFactory {
|
||||
return NewTHttpClientTransportFactoryWithOptions(url, THttpClientOptions{})
|
||||
}
|
||||
|
||||
func NewTHttpClientTransportFactoryWithOptions(url string, options THttpClientOptions) *THttpClientTransportFactory {
|
||||
return &THttpClientTransportFactory{url: url, options: options}
|
||||
}
|
||||
|
||||
func NewTHttpClientWithOptions(urlstr string, options THttpClientOptions) (TTransport, error) {
|
||||
parsedURL, err := url.Parse(urlstr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
buf := make([]byte, 0, 1024)
|
||||
client := options.Client
|
||||
if client == nil {
|
||||
client = DefaultHttpClient
|
||||
}
|
||||
httpHeader := map[string][]string{"Content-Type": {"application/x-thrift"}}
|
||||
return &THttpClient{client: client, url: parsedURL, requestBuffer: bytes.NewBuffer(buf), header: httpHeader}, nil
|
||||
}
|
||||
|
||||
func NewTHttpClient(urlstr string) (TTransport, error) {
|
||||
return NewTHttpClientWithOptions(urlstr, THttpClientOptions{})
|
||||
}
|
||||
|
||||
// Set the HTTP Header for this specific Thrift Transport
|
||||
// It is important that you first assert the TTransport as a THttpClient type
|
||||
// like so:
|
||||
//
|
||||
// httpTrans := trans.(THttpClient)
|
||||
// httpTrans.SetHeader("User-Agent","Thrift Client 1.0")
|
||||
func (p *THttpClient) SetHeader(key string, value string) {
|
||||
p.header.Add(key, value)
|
||||
}
|
||||
|
||||
// Get the HTTP Header represented by the supplied Header Key for this specific Thrift Transport
|
||||
// It is important that you first assert the TTransport as a THttpClient type
|
||||
// like so:
|
||||
//
|
||||
// httpTrans := trans.(THttpClient)
|
||||
// hdrValue := httpTrans.GetHeader("User-Agent")
|
||||
func (p *THttpClient) GetHeader(key string) string {
|
||||
return p.header.Get(key)
|
||||
}
|
||||
|
||||
// Deletes the HTTP Header given a Header Key for this specific Thrift Transport
|
||||
// It is important that you first assert the TTransport as a THttpClient type
|
||||
// like so:
|
||||
//
|
||||
// httpTrans := trans.(THttpClient)
|
||||
// httpTrans.DelHeader("User-Agent")
|
||||
func (p *THttpClient) DelHeader(key string) {
|
||||
p.header.Del(key)
|
||||
}
|
||||
|
||||
func (p *THttpClient) Open() error {
|
||||
// do nothing
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *THttpClient) IsOpen() bool {
|
||||
return p.response != nil || p.requestBuffer != nil
|
||||
}
|
||||
|
||||
func (p *THttpClient) closeResponse() error {
|
||||
var err error
|
||||
if p.response != nil && p.response.Body != nil {
|
||||
// The docs specify that if keepalive is enabled and the response body is not
|
||||
// read to completion the connection will never be returned to the pool and
|
||||
// reused. Errors are being ignored here because if the connection is invalid
|
||||
// and this fails for some reason, the Close() method will do any remaining
|
||||
// cleanup.
|
||||
io.Copy(ioutil.Discard, p.response.Body)
|
||||
|
||||
err = p.response.Body.Close()
|
||||
}
|
||||
|
||||
p.response = nil
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *THttpClient) Close() error {
|
||||
if p.requestBuffer != nil {
|
||||
p.requestBuffer.Reset()
|
||||
p.requestBuffer = nil
|
||||
}
|
||||
return p.closeResponse()
|
||||
}
|
||||
|
||||
func (p *THttpClient) Read(buf []byte) (int, error) {
|
||||
if p.response == nil {
|
||||
return 0, NewTTransportException(NOT_OPEN, "Response buffer is empty, no request.")
|
||||
}
|
||||
n, err := p.response.Body.Read(buf)
|
||||
if n > 0 && (err == nil || err == io.EOF) {
|
||||
return n, nil
|
||||
}
|
||||
return n, NewTTransportExceptionFromError(err)
|
||||
}
|
||||
|
||||
func (p *THttpClient) ReadByte() (c byte, err error) {
|
||||
return readByte(p.response.Body)
|
||||
}
|
||||
|
||||
func (p *THttpClient) Write(buf []byte) (int, error) {
|
||||
n, err := p.requestBuffer.Write(buf)
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (p *THttpClient) WriteByte(c byte) error {
|
||||
return p.requestBuffer.WriteByte(c)
|
||||
}
|
||||
|
||||
func (p *THttpClient) WriteString(s string) (n int, err error) {
|
||||
return p.requestBuffer.WriteString(s)
|
||||
}
|
||||
|
||||
func (p *THttpClient) Flush(ctx context.Context) error {
|
||||
// Close any previous response body to avoid leaking connections.
|
||||
p.closeResponse()
|
||||
|
||||
req, err := http.NewRequest("POST", p.url.String(), p.requestBuffer)
|
||||
if err != nil {
|
||||
return NewTTransportExceptionFromError(err)
|
||||
}
|
||||
req.Header = p.header
|
||||
if ctx != nil {
|
||||
req = req.WithContext(ctx)
|
||||
}
|
||||
response, err := p.client.Do(req)
|
||||
if err != nil {
|
||||
return NewTTransportExceptionFromError(err)
|
||||
}
|
||||
if response.StatusCode != http.StatusOK {
|
||||
// Close the response to avoid leaking file descriptors. closeResponse does
|
||||
// more than just call Close(), so temporarily assign it and reuse the logic.
|
||||
p.response = response
|
||||
p.closeResponse()
|
||||
|
||||
// TODO(pomack) log bad response
|
||||
return NewTTransportException(UNKNOWN_TRANSPORT_EXCEPTION, "HTTP Response code: "+strconv.Itoa(response.StatusCode))
|
||||
}
|
||||
p.response = response
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *THttpClient) RemainingBytes() (num_bytes uint64) {
|
||||
len := p.response.ContentLength
|
||||
if len >= 0 {
|
||||
return uint64(len)
|
||||
}
|
||||
|
||||
const maxSize = ^uint64(0)
|
||||
return maxSize // the thruth is, we just don't know unless framed is used
|
||||
}
|
||||
|
||||
// Deprecated: Use NewTHttpClientTransportFactory instead.
|
||||
func NewTHttpPostClientTransportFactory(url string) *THttpClientTransportFactory {
|
||||
return NewTHttpClientTransportFactoryWithOptions(url, THttpClientOptions{})
|
||||
}
|
||||
|
||||
// Deprecated: Use NewTHttpClientTransportFactoryWithOptions instead.
|
||||
func NewTHttpPostClientTransportFactoryWithOptions(url string, options THttpClientOptions) *THttpClientTransportFactory {
|
||||
return NewTHttpClientTransportFactoryWithOptions(url, options)
|
||||
}
|
||||
|
||||
// Deprecated: Use NewTHttpClientWithOptions instead.
|
||||
func NewTHttpPostClientWithOptions(urlstr string, options THttpClientOptions) (TTransport, error) {
|
||||
return NewTHttpClientWithOptions(urlstr, options)
|
||||
}
|
||||
|
||||
// Deprecated: Use NewTHttpClient instead.
|
||||
func NewTHttpPostClient(urlstr string) (TTransport, error) {
|
||||
return NewTHttpClientWithOptions(urlstr, THttpClientOptions{})
|
||||
}
|
|
@ -1,63 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import (
|
||||
"compress/gzip"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// NewThriftHandlerFunc is a function that create a ready to use Apache Thrift Handler function
|
||||
func NewThriftHandlerFunc(processor TProcessor,
|
||||
inPfactory, outPfactory TProtocolFactory) func(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
return gz(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Add("Content-Type", "application/x-thrift")
|
||||
|
||||
transport := NewStreamTransport(r.Body, w)
|
||||
processor.Process(r.Context(), inPfactory.GetProtocol(transport), outPfactory.GetProtocol(transport))
|
||||
})
|
||||
}
|
||||
|
||||
// gz transparently compresses the HTTP response if the client supports it.
|
||||
func gz(handler http.HandlerFunc) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
if !strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
|
||||
handler(w, r)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Content-Encoding", "gzip")
|
||||
gz := gzip.NewWriter(w)
|
||||
defer gz.Close()
|
||||
gzw := gzipResponseWriter{Writer: gz, ResponseWriter: w}
|
||||
handler(gzw, r)
|
||||
}
|
||||
}
|
||||
|
||||
type gzipResponseWriter struct {
|
||||
io.Writer
|
||||
http.ResponseWriter
|
||||
}
|
||||
|
||||
func (w gzipResponseWriter) Write(b []byte) (int, error) {
|
||||
return w.Writer.Write(b)
|
||||
}
|
|
@ -1,214 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"io"
|
||||
)
|
||||
|
||||
// StreamTransport is a Transport made of an io.Reader and/or an io.Writer
|
||||
type StreamTransport struct {
|
||||
io.Reader
|
||||
io.Writer
|
||||
isReadWriter bool
|
||||
closed bool
|
||||
}
|
||||
|
||||
type StreamTransportFactory struct {
|
||||
Reader io.Reader
|
||||
Writer io.Writer
|
||||
isReadWriter bool
|
||||
}
|
||||
|
||||
func (p *StreamTransportFactory) GetTransport(trans TTransport) (TTransport, error) {
|
||||
if trans != nil {
|
||||
t, ok := trans.(*StreamTransport)
|
||||
if ok {
|
||||
if t.isReadWriter {
|
||||
return NewStreamTransportRW(t.Reader.(io.ReadWriter)), nil
|
||||
}
|
||||
if t.Reader != nil && t.Writer != nil {
|
||||
return NewStreamTransport(t.Reader, t.Writer), nil
|
||||
}
|
||||
if t.Reader != nil && t.Writer == nil {
|
||||
return NewStreamTransportR(t.Reader), nil
|
||||
}
|
||||
if t.Reader == nil && t.Writer != nil {
|
||||
return NewStreamTransportW(t.Writer), nil
|
||||
}
|
||||
return &StreamTransport{}, nil
|
||||
}
|
||||
}
|
||||
if p.isReadWriter {
|
||||
return NewStreamTransportRW(p.Reader.(io.ReadWriter)), nil
|
||||
}
|
||||
if p.Reader != nil && p.Writer != nil {
|
||||
return NewStreamTransport(p.Reader, p.Writer), nil
|
||||
}
|
||||
if p.Reader != nil && p.Writer == nil {
|
||||
return NewStreamTransportR(p.Reader), nil
|
||||
}
|
||||
if p.Reader == nil && p.Writer != nil {
|
||||
return NewStreamTransportW(p.Writer), nil
|
||||
}
|
||||
return &StreamTransport{}, nil
|
||||
}
|
||||
|
||||
func NewStreamTransportFactory(reader io.Reader, writer io.Writer, isReadWriter bool) *StreamTransportFactory {
|
||||
return &StreamTransportFactory{Reader: reader, Writer: writer, isReadWriter: isReadWriter}
|
||||
}
|
||||
|
||||
func NewStreamTransport(r io.Reader, w io.Writer) *StreamTransport {
|
||||
return &StreamTransport{Reader: bufio.NewReader(r), Writer: bufio.NewWriter(w)}
|
||||
}
|
||||
|
||||
func NewStreamTransportR(r io.Reader) *StreamTransport {
|
||||
return &StreamTransport{Reader: bufio.NewReader(r)}
|
||||
}
|
||||
|
||||
func NewStreamTransportW(w io.Writer) *StreamTransport {
|
||||
return &StreamTransport{Writer: bufio.NewWriter(w)}
|
||||
}
|
||||
|
||||
func NewStreamTransportRW(rw io.ReadWriter) *StreamTransport {
|
||||
bufrw := bufio.NewReadWriter(bufio.NewReader(rw), bufio.NewWriter(rw))
|
||||
return &StreamTransport{Reader: bufrw, Writer: bufrw, isReadWriter: true}
|
||||
}
|
||||
|
||||
func (p *StreamTransport) IsOpen() bool {
|
||||
return !p.closed
|
||||
}
|
||||
|
||||
// implicitly opened on creation, can't be reopened once closed
|
||||
func (p *StreamTransport) Open() error {
|
||||
if !p.closed {
|
||||
return NewTTransportException(ALREADY_OPEN, "StreamTransport already open.")
|
||||
} else {
|
||||
return NewTTransportException(NOT_OPEN, "cannot reopen StreamTransport.")
|
||||
}
|
||||
}
|
||||
|
||||
// Closes both the input and output streams.
|
||||
func (p *StreamTransport) Close() error {
|
||||
if p.closed {
|
||||
return NewTTransportException(NOT_OPEN, "StreamTransport already closed.")
|
||||
}
|
||||
p.closed = true
|
||||
closedReader := false
|
||||
if p.Reader != nil {
|
||||
c, ok := p.Reader.(io.Closer)
|
||||
if ok {
|
||||
e := c.Close()
|
||||
closedReader = true
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
}
|
||||
p.Reader = nil
|
||||
}
|
||||
if p.Writer != nil && (!closedReader || !p.isReadWriter) {
|
||||
c, ok := p.Writer.(io.Closer)
|
||||
if ok {
|
||||
e := c.Close()
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
}
|
||||
p.Writer = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Flushes the underlying output stream if not null.
|
||||
func (p *StreamTransport) Flush(ctx context.Context) error {
|
||||
if p.Writer == nil {
|
||||
return NewTTransportException(NOT_OPEN, "Cannot flush null outputStream")
|
||||
}
|
||||
f, ok := p.Writer.(Flusher)
|
||||
if ok {
|
||||
err := f.Flush()
|
||||
if err != nil {
|
||||
return NewTTransportExceptionFromError(err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *StreamTransport) Read(c []byte) (n int, err error) {
|
||||
n, err = p.Reader.Read(c)
|
||||
if err != nil {
|
||||
err = NewTTransportExceptionFromError(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (p *StreamTransport) ReadByte() (c byte, err error) {
|
||||
f, ok := p.Reader.(io.ByteReader)
|
||||
if ok {
|
||||
c, err = f.ReadByte()
|
||||
} else {
|
||||
c, err = readByte(p.Reader)
|
||||
}
|
||||
if err != nil {
|
||||
err = NewTTransportExceptionFromError(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (p *StreamTransport) Write(c []byte) (n int, err error) {
|
||||
n, err = p.Writer.Write(c)
|
||||
if err != nil {
|
||||
err = NewTTransportExceptionFromError(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (p *StreamTransport) WriteByte(c byte) (err error) {
|
||||
f, ok := p.Writer.(io.ByteWriter)
|
||||
if ok {
|
||||
err = f.WriteByte(c)
|
||||
} else {
|
||||
err = writeByte(p.Writer, c)
|
||||
}
|
||||
if err != nil {
|
||||
err = NewTTransportExceptionFromError(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (p *StreamTransport) WriteString(s string) (n int, err error) {
|
||||
f, ok := p.Writer.(stringWriter)
|
||||
if ok {
|
||||
n, err = f.WriteString(s)
|
||||
} else {
|
||||
n, err = p.Writer.Write([]byte(s))
|
||||
}
|
||||
if err != nil {
|
||||
err = NewTTransportExceptionFromError(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (p *StreamTransport) RemainingBytes() (num_bytes uint64) {
|
||||
const maxSize = ^uint64(0)
|
||||
return maxSize // the thruth is, we just don't know unless framed is used
|
||||
}
|
|
@ -1,584 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
const (
|
||||
THRIFT_JSON_PROTOCOL_VERSION = 1
|
||||
)
|
||||
|
||||
// for references to _ParseContext see tsimplejson_protocol.go
|
||||
|
||||
// JSON protocol implementation for thrift.
|
||||
//
|
||||
// This protocol produces/consumes a simple output format
|
||||
// suitable for parsing by scripting languages. It should not be
|
||||
// confused with the full-featured TJSONProtocol.
|
||||
//
|
||||
type TJSONProtocol struct {
|
||||
*TSimpleJSONProtocol
|
||||
}
|
||||
|
||||
// Constructor
|
||||
func NewTJSONProtocol(t TTransport) *TJSONProtocol {
|
||||
v := &TJSONProtocol{TSimpleJSONProtocol: NewTSimpleJSONProtocol(t)}
|
||||
v.parseContextStack = append(v.parseContextStack, int(_CONTEXT_IN_TOPLEVEL))
|
||||
v.dumpContext = append(v.dumpContext, int(_CONTEXT_IN_TOPLEVEL))
|
||||
return v
|
||||
}
|
||||
|
||||
// Factory
|
||||
type TJSONProtocolFactory struct{}
|
||||
|
||||
func (p *TJSONProtocolFactory) GetProtocol(trans TTransport) TProtocol {
|
||||
return NewTJSONProtocol(trans)
|
||||
}
|
||||
|
||||
func NewTJSONProtocolFactory() *TJSONProtocolFactory {
|
||||
return &TJSONProtocolFactory{}
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteMessageBegin(name string, typeId TMessageType, seqId int32) error {
|
||||
p.resetContextStack() // THRIFT-3735
|
||||
if e := p.OutputListBegin(); e != nil {
|
||||
return e
|
||||
}
|
||||
if e := p.WriteI32(THRIFT_JSON_PROTOCOL_VERSION); e != nil {
|
||||
return e
|
||||
}
|
||||
if e := p.WriteString(name); e != nil {
|
||||
return e
|
||||
}
|
||||
if e := p.WriteByte(int8(typeId)); e != nil {
|
||||
return e
|
||||
}
|
||||
if e := p.WriteI32(seqId); e != nil {
|
||||
return e
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteMessageEnd() error {
|
||||
return p.OutputListEnd()
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteStructBegin(name string) error {
|
||||
if e := p.OutputObjectBegin(); e != nil {
|
||||
return e
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteStructEnd() error {
|
||||
return p.OutputObjectEnd()
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteFieldBegin(name string, typeId TType, id int16) error {
|
||||
if e := p.WriteI16(id); e != nil {
|
||||
return e
|
||||
}
|
||||
if e := p.OutputObjectBegin(); e != nil {
|
||||
return e
|
||||
}
|
||||
s, e1 := p.TypeIdToString(typeId)
|
||||
if e1 != nil {
|
||||
return e1
|
||||
}
|
||||
if e := p.WriteString(s); e != nil {
|
||||
return e
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteFieldEnd() error {
|
||||
return p.OutputObjectEnd()
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteFieldStop() error { return nil }
|
||||
|
||||
func (p *TJSONProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error {
|
||||
if e := p.OutputListBegin(); e != nil {
|
||||
return e
|
||||
}
|
||||
s, e1 := p.TypeIdToString(keyType)
|
||||
if e1 != nil {
|
||||
return e1
|
||||
}
|
||||
if e := p.WriteString(s); e != nil {
|
||||
return e
|
||||
}
|
||||
s, e1 = p.TypeIdToString(valueType)
|
||||
if e1 != nil {
|
||||
return e1
|
||||
}
|
||||
if e := p.WriteString(s); e != nil {
|
||||
return e
|
||||
}
|
||||
if e := p.WriteI64(int64(size)); e != nil {
|
||||
return e
|
||||
}
|
||||
return p.OutputObjectBegin()
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteMapEnd() error {
|
||||
if e := p.OutputObjectEnd(); e != nil {
|
||||
return e
|
||||
}
|
||||
return p.OutputListEnd()
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteListBegin(elemType TType, size int) error {
|
||||
return p.OutputElemListBegin(elemType, size)
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteListEnd() error {
|
||||
return p.OutputListEnd()
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteSetBegin(elemType TType, size int) error {
|
||||
return p.OutputElemListBegin(elemType, size)
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteSetEnd() error {
|
||||
return p.OutputListEnd()
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteBool(b bool) error {
|
||||
if b {
|
||||
return p.WriteI32(1)
|
||||
}
|
||||
return p.WriteI32(0)
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteByte(b int8) error {
|
||||
return p.WriteI32(int32(b))
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteI16(v int16) error {
|
||||
return p.WriteI32(int32(v))
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteI32(v int32) error {
|
||||
return p.OutputI64(int64(v))
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteI64(v int64) error {
|
||||
return p.OutputI64(int64(v))
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteDouble(v float64) error {
|
||||
return p.OutputF64(v)
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteString(v string) error {
|
||||
return p.OutputString(v)
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteBinary(v []byte) error {
|
||||
// JSON library only takes in a string,
|
||||
// not an arbitrary byte array, to ensure bytes are transmitted
|
||||
// efficiently we must convert this into a valid JSON string
|
||||
// therefore we use base64 encoding to avoid excessive escaping/quoting
|
||||
if e := p.OutputPreValue(); e != nil {
|
||||
return e
|
||||
}
|
||||
if _, e := p.write(JSON_QUOTE_BYTES); e != nil {
|
||||
return NewTProtocolException(e)
|
||||
}
|
||||
writer := base64.NewEncoder(base64.StdEncoding, p.writer)
|
||||
if _, e := writer.Write(v); e != nil {
|
||||
p.writer.Reset(p.trans) // THRIFT-3735
|
||||
return NewTProtocolException(e)
|
||||
}
|
||||
if e := writer.Close(); e != nil {
|
||||
return NewTProtocolException(e)
|
||||
}
|
||||
if _, e := p.write(JSON_QUOTE_BYTES); e != nil {
|
||||
return NewTProtocolException(e)
|
||||
}
|
||||
return p.OutputPostValue()
|
||||
}
|
||||
|
||||
// Reading methods.
|
||||
func (p *TJSONProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqId int32, err error) {
|
||||
p.resetContextStack() // THRIFT-3735
|
||||
if isNull, err := p.ParseListBegin(); isNull || err != nil {
|
||||
return name, typeId, seqId, err
|
||||
}
|
||||
version, err := p.ReadI32()
|
||||
if err != nil {
|
||||
return name, typeId, seqId, err
|
||||
}
|
||||
if version != THRIFT_JSON_PROTOCOL_VERSION {
|
||||
e := fmt.Errorf("Unknown Protocol version %d, expected version %d", version, THRIFT_JSON_PROTOCOL_VERSION)
|
||||
return name, typeId, seqId, NewTProtocolExceptionWithType(INVALID_DATA, e)
|
||||
|
||||
}
|
||||
if name, err = p.ReadString(); err != nil {
|
||||
return name, typeId, seqId, err
|
||||
}
|
||||
bTypeId, err := p.ReadByte()
|
||||
typeId = TMessageType(bTypeId)
|
||||
if err != nil {
|
||||
return name, typeId, seqId, err
|
||||
}
|
||||
if seqId, err = p.ReadI32(); err != nil {
|
||||
return name, typeId, seqId, err
|
||||
}
|
||||
return name, typeId, seqId, nil
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadMessageEnd() error {
|
||||
err := p.ParseListEnd()
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadStructBegin() (name string, err error) {
|
||||
_, err = p.ParseObjectStart()
|
||||
return "", err
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadStructEnd() error {
|
||||
return p.ParseObjectEnd()
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadFieldBegin() (string, TType, int16, error) {
|
||||
b, _ := p.reader.Peek(1)
|
||||
if len(b) < 1 || b[0] == JSON_RBRACE[0] || b[0] == JSON_RBRACKET[0] {
|
||||
return "", STOP, -1, nil
|
||||
}
|
||||
fieldId, err := p.ReadI16()
|
||||
if err != nil {
|
||||
return "", STOP, fieldId, err
|
||||
}
|
||||
if _, err = p.ParseObjectStart(); err != nil {
|
||||
return "", STOP, fieldId, err
|
||||
}
|
||||
sType, err := p.ReadString()
|
||||
if err != nil {
|
||||
return "", STOP, fieldId, err
|
||||
}
|
||||
fType, err := p.StringToTypeId(sType)
|
||||
return "", fType, fieldId, err
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadFieldEnd() error {
|
||||
return p.ParseObjectEnd()
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadMapBegin() (keyType TType, valueType TType, size int, e error) {
|
||||
if isNull, e := p.ParseListBegin(); isNull || e != nil {
|
||||
return VOID, VOID, 0, e
|
||||
}
|
||||
|
||||
// read keyType
|
||||
sKeyType, e := p.ReadString()
|
||||
if e != nil {
|
||||
return keyType, valueType, size, e
|
||||
}
|
||||
keyType, e = p.StringToTypeId(sKeyType)
|
||||
if e != nil {
|
||||
return keyType, valueType, size, e
|
||||
}
|
||||
|
||||
// read valueType
|
||||
sValueType, e := p.ReadString()
|
||||
if e != nil {
|
||||
return keyType, valueType, size, e
|
||||
}
|
||||
valueType, e = p.StringToTypeId(sValueType)
|
||||
if e != nil {
|
||||
return keyType, valueType, size, e
|
||||
}
|
||||
|
||||
// read size
|
||||
iSize, e := p.ReadI64()
|
||||
if e != nil {
|
||||
return keyType, valueType, size, e
|
||||
}
|
||||
size = int(iSize)
|
||||
|
||||
_, e = p.ParseObjectStart()
|
||||
return keyType, valueType, size, e
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadMapEnd() error {
|
||||
e := p.ParseObjectEnd()
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
return p.ParseListEnd()
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadListBegin() (elemType TType, size int, e error) {
|
||||
return p.ParseElemListBegin()
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadListEnd() error {
|
||||
return p.ParseListEnd()
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadSetBegin() (elemType TType, size int, e error) {
|
||||
return p.ParseElemListBegin()
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadSetEnd() error {
|
||||
return p.ParseListEnd()
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadBool() (bool, error) {
|
||||
value, err := p.ReadI32()
|
||||
return (value != 0), err
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadByte() (int8, error) {
|
||||
v, err := p.ReadI64()
|
||||
return int8(v), err
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadI16() (int16, error) {
|
||||
v, err := p.ReadI64()
|
||||
return int16(v), err
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadI32() (int32, error) {
|
||||
v, err := p.ReadI64()
|
||||
return int32(v), err
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadI64() (int64, error) {
|
||||
v, _, err := p.ParseI64()
|
||||
return v, err
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadDouble() (float64, error) {
|
||||
v, _, err := p.ParseF64()
|
||||
return v, err
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadString() (string, error) {
|
||||
var v string
|
||||
if err := p.ParsePreValue(); err != nil {
|
||||
return v, err
|
||||
}
|
||||
f, _ := p.reader.Peek(1)
|
||||
if len(f) > 0 && f[0] == JSON_QUOTE {
|
||||
p.reader.ReadByte()
|
||||
value, err := p.ParseStringBody()
|
||||
v = value
|
||||
if err != nil {
|
||||
return v, err
|
||||
}
|
||||
} else if len(f) > 0 && f[0] == JSON_NULL[0] {
|
||||
b := make([]byte, len(JSON_NULL))
|
||||
_, err := p.reader.Read(b)
|
||||
if err != nil {
|
||||
return v, NewTProtocolException(err)
|
||||
}
|
||||
if string(b) != string(JSON_NULL) {
|
||||
e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b))
|
||||
return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
|
||||
}
|
||||
} else {
|
||||
e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f))
|
||||
return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
|
||||
}
|
||||
return v, p.ParsePostValue()
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadBinary() ([]byte, error) {
|
||||
var v []byte
|
||||
if err := p.ParsePreValue(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f, _ := p.reader.Peek(1)
|
||||
if len(f) > 0 && f[0] == JSON_QUOTE {
|
||||
p.reader.ReadByte()
|
||||
value, err := p.ParseBase64EncodedBody()
|
||||
v = value
|
||||
if err != nil {
|
||||
return v, err
|
||||
}
|
||||
} else if len(f) > 0 && f[0] == JSON_NULL[0] {
|
||||
b := make([]byte, len(JSON_NULL))
|
||||
_, err := p.reader.Read(b)
|
||||
if err != nil {
|
||||
return v, NewTProtocolException(err)
|
||||
}
|
||||
if string(b) != string(JSON_NULL) {
|
||||
e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b))
|
||||
return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
|
||||
}
|
||||
} else {
|
||||
e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f))
|
||||
return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
|
||||
}
|
||||
|
||||
return v, p.ParsePostValue()
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) Flush(ctx context.Context) (err error) {
|
||||
err = p.writer.Flush()
|
||||
if err == nil {
|
||||
err = p.trans.Flush(ctx)
|
||||
}
|
||||
return NewTProtocolException(err)
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) Skip(fieldType TType) (err error) {
|
||||
return SkipDefaultDepth(p, fieldType)
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) Transport() TTransport {
|
||||
return p.trans
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) OutputElemListBegin(elemType TType, size int) error {
|
||||
if e := p.OutputListBegin(); e != nil {
|
||||
return e
|
||||
}
|
||||
s, e1 := p.TypeIdToString(elemType)
|
||||
if e1 != nil {
|
||||
return e1
|
||||
}
|
||||
if e := p.WriteString(s); e != nil {
|
||||
return e
|
||||
}
|
||||
if e := p.WriteI64(int64(size)); e != nil {
|
||||
return e
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ParseElemListBegin() (elemType TType, size int, e error) {
|
||||
if isNull, e := p.ParseListBegin(); isNull || e != nil {
|
||||
return VOID, 0, e
|
||||
}
|
||||
sElemType, err := p.ReadString()
|
||||
if err != nil {
|
||||
return VOID, size, err
|
||||
}
|
||||
elemType, err = p.StringToTypeId(sElemType)
|
||||
if err != nil {
|
||||
return elemType, size, err
|
||||
}
|
||||
nSize, err2 := p.ReadI64()
|
||||
size = int(nSize)
|
||||
return elemType, size, err2
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) readElemListBegin() (elemType TType, size int, e error) {
|
||||
if isNull, e := p.ParseListBegin(); isNull || e != nil {
|
||||
return VOID, 0, e
|
||||
}
|
||||
sElemType, err := p.ReadString()
|
||||
if err != nil {
|
||||
return VOID, size, err
|
||||
}
|
||||
elemType, err = p.StringToTypeId(sElemType)
|
||||
if err != nil {
|
||||
return elemType, size, err
|
||||
}
|
||||
nSize, err2 := p.ReadI64()
|
||||
size = int(nSize)
|
||||
return elemType, size, err2
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) writeElemListBegin(elemType TType, size int) error {
|
||||
if e := p.OutputListBegin(); e != nil {
|
||||
return e
|
||||
}
|
||||
s, e1 := p.TypeIdToString(elemType)
|
||||
if e1 != nil {
|
||||
return e1
|
||||
}
|
||||
if e := p.OutputString(s); e != nil {
|
||||
return e
|
||||
}
|
||||
if e := p.OutputI64(int64(size)); e != nil {
|
||||
return e
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) TypeIdToString(fieldType TType) (string, error) {
|
||||
switch byte(fieldType) {
|
||||
case BOOL:
|
||||
return "tf", nil
|
||||
case BYTE:
|
||||
return "i8", nil
|
||||
case I16:
|
||||
return "i16", nil
|
||||
case I32:
|
||||
return "i32", nil
|
||||
case I64:
|
||||
return "i64", nil
|
||||
case DOUBLE:
|
||||
return "dbl", nil
|
||||
case STRING:
|
||||
return "str", nil
|
||||
case STRUCT:
|
||||
return "rec", nil
|
||||
case MAP:
|
||||
return "map", nil
|
||||
case SET:
|
||||
return "set", nil
|
||||
case LIST:
|
||||
return "lst", nil
|
||||
}
|
||||
|
||||
e := fmt.Errorf("Unknown fieldType: %d", int(fieldType))
|
||||
return "", NewTProtocolExceptionWithType(INVALID_DATA, e)
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) StringToTypeId(fieldType string) (TType, error) {
|
||||
switch fieldType {
|
||||
case "tf":
|
||||
return TType(BOOL), nil
|
||||
case "i8":
|
||||
return TType(BYTE), nil
|
||||
case "i16":
|
||||
return TType(I16), nil
|
||||
case "i32":
|
||||
return TType(I32), nil
|
||||
case "i64":
|
||||
return TType(I64), nil
|
||||
case "dbl":
|
||||
return TType(DOUBLE), nil
|
||||
case "str":
|
||||
return TType(STRING), nil
|
||||
case "rec":
|
||||
return TType(STRUCT), nil
|
||||
case "map":
|
||||
return TType(MAP), nil
|
||||
case "set":
|
||||
return TType(SET), nil
|
||||
case "lst":
|
||||
return TType(LIST), nil
|
||||
}
|
||||
|
||||
e := fmt.Errorf("Unknown type identifier: %s", fieldType)
|
||||
return TType(STOP), NewTProtocolExceptionWithType(INVALID_DATA, e)
|
||||
}
|
|
@ -1,80 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
)
|
||||
|
||||
// Memory buffer-based implementation of the TTransport interface.
|
||||
type TMemoryBuffer struct {
|
||||
*bytes.Buffer
|
||||
size int
|
||||
}
|
||||
|
||||
type TMemoryBufferTransportFactory struct {
|
||||
size int
|
||||
}
|
||||
|
||||
func (p *TMemoryBufferTransportFactory) GetTransport(trans TTransport) (TTransport, error) {
|
||||
if trans != nil {
|
||||
t, ok := trans.(*TMemoryBuffer)
|
||||
if ok && t.size > 0 {
|
||||
return NewTMemoryBufferLen(t.size), nil
|
||||
}
|
||||
}
|
||||
return NewTMemoryBufferLen(p.size), nil
|
||||
}
|
||||
|
||||
func NewTMemoryBufferTransportFactory(size int) *TMemoryBufferTransportFactory {
|
||||
return &TMemoryBufferTransportFactory{size: size}
|
||||
}
|
||||
|
||||
func NewTMemoryBuffer() *TMemoryBuffer {
|
||||
return &TMemoryBuffer{Buffer: &bytes.Buffer{}, size: 0}
|
||||
}
|
||||
|
||||
func NewTMemoryBufferLen(size int) *TMemoryBuffer {
|
||||
buf := make([]byte, 0, size)
|
||||
return &TMemoryBuffer{Buffer: bytes.NewBuffer(buf), size: size}
|
||||
}
|
||||
|
||||
func (p *TMemoryBuffer) IsOpen() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (p *TMemoryBuffer) Open() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TMemoryBuffer) Close() error {
|
||||
p.Buffer.Reset()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Flushing a memory buffer is a no-op
|
||||
func (p *TMemoryBuffer) Flush(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TMemoryBuffer) RemainingBytes() (num_bytes uint64) {
|
||||
return uint64(p.Buffer.Len())
|
||||
}
|
|
@ -1,31 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
// Message type constants in the Thrift protocol.
|
||||
type TMessageType int32
|
||||
|
||||
const (
|
||||
INVALID_TMESSAGE_TYPE TMessageType = 0
|
||||
CALL TMessageType = 1
|
||||
REPLY TMessageType = 2
|
||||
EXCEPTION TMessageType = 3
|
||||
ONEWAY TMessageType = 4
|
||||
)
|
|
@ -1,170 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
/*
|
||||
TMultiplexedProtocol is a protocol-independent concrete decorator
|
||||
that allows a Thrift client to communicate with a multiplexing Thrift server,
|
||||
by prepending the service name to the function name during function calls.
|
||||
|
||||
NOTE: THIS IS NOT USED BY SERVERS. On the server, use TMultiplexedProcessor to handle request
|
||||
from a multiplexing client.
|
||||
|
||||
This example uses a single socket transport to invoke two services:
|
||||
|
||||
socket := thrift.NewTSocketFromAddrTimeout(addr, TIMEOUT)
|
||||
transport := thrift.NewTFramedTransport(socket)
|
||||
protocol := thrift.NewTBinaryProtocolTransport(transport)
|
||||
|
||||
mp := thrift.NewTMultiplexedProtocol(protocol, "Calculator")
|
||||
service := Calculator.NewCalculatorClient(mp)
|
||||
|
||||
mp2 := thrift.NewTMultiplexedProtocol(protocol, "WeatherReport")
|
||||
service2 := WeatherReport.NewWeatherReportClient(mp2)
|
||||
|
||||
err := transport.Open()
|
||||
if err != nil {
|
||||
t.Fatal("Unable to open client socket", err)
|
||||
}
|
||||
|
||||
fmt.Println(service.Add(2,2))
|
||||
fmt.Println(service2.GetTemperature())
|
||||
*/
|
||||
|
||||
type TMultiplexedProtocol struct {
|
||||
TProtocol
|
||||
serviceName string
|
||||
}
|
||||
|
||||
const MULTIPLEXED_SEPARATOR = ":"
|
||||
|
||||
func NewTMultiplexedProtocol(protocol TProtocol, serviceName string) *TMultiplexedProtocol {
|
||||
return &TMultiplexedProtocol{
|
||||
TProtocol: protocol,
|
||||
serviceName: serviceName,
|
||||
}
|
||||
}
|
||||
|
||||
func (t *TMultiplexedProtocol) WriteMessageBegin(name string, typeId TMessageType, seqid int32) error {
|
||||
if typeId == CALL || typeId == ONEWAY {
|
||||
return t.TProtocol.WriteMessageBegin(t.serviceName+MULTIPLEXED_SEPARATOR+name, typeId, seqid)
|
||||
} else {
|
||||
return t.TProtocol.WriteMessageBegin(name, typeId, seqid)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
TMultiplexedProcessor is a TProcessor allowing
|
||||
a single TServer to provide multiple services.
|
||||
|
||||
To do so, you instantiate the processor and then register additional
|
||||
processors with it, as shown in the following example:
|
||||
|
||||
var processor = thrift.NewTMultiplexedProcessor()
|
||||
|
||||
firstProcessor :=
|
||||
processor.RegisterProcessor("FirstService", firstProcessor)
|
||||
|
||||
processor.registerProcessor(
|
||||
"Calculator",
|
||||
Calculator.NewCalculatorProcessor(&CalculatorHandler{}),
|
||||
)
|
||||
|
||||
processor.registerProcessor(
|
||||
"WeatherReport",
|
||||
WeatherReport.NewWeatherReportProcessor(&WeatherReportHandler{}),
|
||||
)
|
||||
|
||||
serverTransport, err := thrift.NewTServerSocketTimeout(addr, TIMEOUT)
|
||||
if err != nil {
|
||||
t.Fatal("Unable to create server socket", err)
|
||||
}
|
||||
server := thrift.NewTSimpleServer2(processor, serverTransport)
|
||||
server.Serve();
|
||||
*/
|
||||
|
||||
type TMultiplexedProcessor struct {
|
||||
serviceProcessorMap map[string]TProcessor
|
||||
DefaultProcessor TProcessor
|
||||
}
|
||||
|
||||
func NewTMultiplexedProcessor() *TMultiplexedProcessor {
|
||||
return &TMultiplexedProcessor{
|
||||
serviceProcessorMap: make(map[string]TProcessor),
|
||||
}
|
||||
}
|
||||
|
||||
func (t *TMultiplexedProcessor) RegisterDefault(processor TProcessor) {
|
||||
t.DefaultProcessor = processor
|
||||
}
|
||||
|
||||
func (t *TMultiplexedProcessor) RegisterProcessor(name string, processor TProcessor) {
|
||||
if t.serviceProcessorMap == nil {
|
||||
t.serviceProcessorMap = make(map[string]TProcessor)
|
||||
}
|
||||
t.serviceProcessorMap[name] = processor
|
||||
}
|
||||
|
||||
func (t *TMultiplexedProcessor) Process(ctx context.Context, in, out TProtocol) (bool, TException) {
|
||||
name, typeId, seqid, err := in.ReadMessageBegin()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if typeId != CALL && typeId != ONEWAY {
|
||||
return false, fmt.Errorf("Unexpected message type %v", typeId)
|
||||
}
|
||||
//extract the service name
|
||||
v := strings.SplitN(name, MULTIPLEXED_SEPARATOR, 2)
|
||||
if len(v) != 2 {
|
||||
if t.DefaultProcessor != nil {
|
||||
smb := NewStoredMessageProtocol(in, name, typeId, seqid)
|
||||
return t.DefaultProcessor.Process(ctx, smb, out)
|
||||
}
|
||||
return false, fmt.Errorf("Service name not found in message name: %s. Did you forget to use a TMultiplexProtocol in your client?", name)
|
||||
}
|
||||
actualProcessor, ok := t.serviceProcessorMap[v[0]]
|
||||
if !ok {
|
||||
return false, fmt.Errorf("Service name not found: %s. Did you forget to call registerProcessor()?", v[0])
|
||||
}
|
||||
smb := NewStoredMessageProtocol(in, v[1], typeId, seqid)
|
||||
return actualProcessor.Process(ctx, smb, out)
|
||||
}
|
||||
|
||||
//Protocol that use stored message for ReadMessageBegin
|
||||
type storedMessageProtocol struct {
|
||||
TProtocol
|
||||
name string
|
||||
typeId TMessageType
|
||||
seqid int32
|
||||
}
|
||||
|
||||
func NewStoredMessageProtocol(protocol TProtocol, name string, typeId TMessageType, seqid int32) *storedMessageProtocol {
|
||||
return &storedMessageProtocol{protocol, name, typeId, seqid}
|
||||
}
|
||||
|
||||
func (s *storedMessageProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqid int32, err error) {
|
||||
return s.name, s.typeId, s.seqid, nil
|
||||
}
|
|
@ -1,164 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import (
|
||||
"math"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
type Numeric interface {
|
||||
Int64() int64
|
||||
Int32() int32
|
||||
Int16() int16
|
||||
Byte() byte
|
||||
Int() int
|
||||
Float64() float64
|
||||
Float32() float32
|
||||
String() string
|
||||
isNull() bool
|
||||
}
|
||||
|
||||
type numeric struct {
|
||||
iValue int64
|
||||
dValue float64
|
||||
sValue string
|
||||
isNil bool
|
||||
}
|
||||
|
||||
var (
|
||||
INFINITY Numeric
|
||||
NEGATIVE_INFINITY Numeric
|
||||
NAN Numeric
|
||||
ZERO Numeric
|
||||
NUMERIC_NULL Numeric
|
||||
)
|
||||
|
||||
func NewNumericFromDouble(dValue float64) Numeric {
|
||||
if math.IsInf(dValue, 1) {
|
||||
return INFINITY
|
||||
}
|
||||
if math.IsInf(dValue, -1) {
|
||||
return NEGATIVE_INFINITY
|
||||
}
|
||||
if math.IsNaN(dValue) {
|
||||
return NAN
|
||||
}
|
||||
iValue := int64(dValue)
|
||||
sValue := strconv.FormatFloat(dValue, 'g', 10, 64)
|
||||
isNil := false
|
||||
return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil}
|
||||
}
|
||||
|
||||
func NewNumericFromI64(iValue int64) Numeric {
|
||||
dValue := float64(iValue)
|
||||
sValue := string(iValue)
|
||||
isNil := false
|
||||
return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil}
|
||||
}
|
||||
|
||||
func NewNumericFromI32(iValue int32) Numeric {
|
||||
dValue := float64(iValue)
|
||||
sValue := string(iValue)
|
||||
isNil := false
|
||||
return &numeric{iValue: int64(iValue), dValue: dValue, sValue: sValue, isNil: isNil}
|
||||
}
|
||||
|
||||
func NewNumericFromString(sValue string) Numeric {
|
||||
if sValue == INFINITY.String() {
|
||||
return INFINITY
|
||||
}
|
||||
if sValue == NEGATIVE_INFINITY.String() {
|
||||
return NEGATIVE_INFINITY
|
||||
}
|
||||
if sValue == NAN.String() {
|
||||
return NAN
|
||||
}
|
||||
iValue, _ := strconv.ParseInt(sValue, 10, 64)
|
||||
dValue, _ := strconv.ParseFloat(sValue, 64)
|
||||
isNil := len(sValue) == 0
|
||||
return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil}
|
||||
}
|
||||
|
||||
func NewNumericFromJSONString(sValue string, isNull bool) Numeric {
|
||||
if isNull {
|
||||
return NewNullNumeric()
|
||||
}
|
||||
if sValue == JSON_INFINITY {
|
||||
return INFINITY
|
||||
}
|
||||
if sValue == JSON_NEGATIVE_INFINITY {
|
||||
return NEGATIVE_INFINITY
|
||||
}
|
||||
if sValue == JSON_NAN {
|
||||
return NAN
|
||||
}
|
||||
iValue, _ := strconv.ParseInt(sValue, 10, 64)
|
||||
dValue, _ := strconv.ParseFloat(sValue, 64)
|
||||
return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNull}
|
||||
}
|
||||
|
||||
func NewNullNumeric() Numeric {
|
||||
return &numeric{iValue: 0, dValue: 0.0, sValue: "", isNil: true}
|
||||
}
|
||||
|
||||
func (p *numeric) Int64() int64 {
|
||||
return p.iValue
|
||||
}
|
||||
|
||||
func (p *numeric) Int32() int32 {
|
||||
return int32(p.iValue)
|
||||
}
|
||||
|
||||
func (p *numeric) Int16() int16 {
|
||||
return int16(p.iValue)
|
||||
}
|
||||
|
||||
func (p *numeric) Byte() byte {
|
||||
return byte(p.iValue)
|
||||
}
|
||||
|
||||
func (p *numeric) Int() int {
|
||||
return int(p.iValue)
|
||||
}
|
||||
|
||||
func (p *numeric) Float64() float64 {
|
||||
return p.dValue
|
||||
}
|
||||
|
||||
func (p *numeric) Float32() float32 {
|
||||
return float32(p.dValue)
|
||||
}
|
||||
|
||||
func (p *numeric) String() string {
|
||||
return p.sValue
|
||||
}
|
||||
|
||||
func (p *numeric) isNull() bool {
|
||||
return p.isNil
|
||||
}
|
||||
|
||||
func init() {
|
||||
INFINITY = &numeric{iValue: 0, dValue: math.Inf(1), sValue: "Infinity", isNil: false}
|
||||
NEGATIVE_INFINITY = &numeric{iValue: 0, dValue: math.Inf(-1), sValue: "-Infinity", isNil: false}
|
||||
NAN = &numeric{iValue: 0, dValue: math.NaN(), sValue: "NaN", isNil: false}
|
||||
ZERO = &numeric{iValue: 0, dValue: 0, sValue: "0", isNil: false}
|
||||
NUMERIC_NULL = &numeric{iValue: 0, dValue: 0, sValue: "0", isNil: true}
|
||||
}
|
|
@ -1,50 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// This file is home to helpers that convert from various base types to
|
||||
// respective pointer types. This is necessary because Go does not permit
|
||||
// references to constants, nor can a pointer type to base type be allocated
|
||||
// and initialized in a single expression.
|
||||
//
|
||||
// E.g., this is not allowed:
|
||||
//
|
||||
// var ip *int = &5
|
||||
//
|
||||
// But this *is* allowed:
|
||||
//
|
||||
// func IntPtr(i int) *int { return &i }
|
||||
// var ip *int = IntPtr(5)
|
||||
//
|
||||
// Since pointers to base types are commonplace as [optional] fields in
|
||||
// exported thrift structs, we factor such helpers here.
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
func Float32Ptr(v float32) *float32 { return &v }
|
||||
func Float64Ptr(v float64) *float64 { return &v }
|
||||
func IntPtr(v int) *int { return &v }
|
||||
func Int32Ptr(v int32) *int32 { return &v }
|
||||
func Int64Ptr(v int64) *int64 { return &v }
|
||||
func StringPtr(v string) *string { return &v }
|
||||
func Uint32Ptr(v uint32) *uint32 { return &v }
|
||||
func Uint64Ptr(v uint64) *uint64 { return &v }
|
||||
func BoolPtr(v bool) *bool { return &v }
|
||||
func ByteSlicePtr(v []byte) *[]byte { return &v }
|
|
@ -1,70 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import "context"
|
||||
|
||||
// A processor is a generic object which operates upon an input stream and
|
||||
// writes to some output stream.
|
||||
type TProcessor interface {
|
||||
Process(ctx context.Context, in, out TProtocol) (bool, TException)
|
||||
}
|
||||
|
||||
type TProcessorFunction interface {
|
||||
Process(ctx context.Context, seqId int32, in, out TProtocol) (bool, TException)
|
||||
}
|
||||
|
||||
// The default processor factory just returns a singleton
|
||||
// instance.
|
||||
type TProcessorFactory interface {
|
||||
GetProcessor(trans TTransport) TProcessor
|
||||
}
|
||||
|
||||
type tProcessorFactory struct {
|
||||
processor TProcessor
|
||||
}
|
||||
|
||||
func NewTProcessorFactory(p TProcessor) TProcessorFactory {
|
||||
return &tProcessorFactory{processor: p}
|
||||
}
|
||||
|
||||
func (p *tProcessorFactory) GetProcessor(trans TTransport) TProcessor {
|
||||
return p.processor
|
||||
}
|
||||
|
||||
/**
|
||||
* The default processor factory just returns a singleton
|
||||
* instance.
|
||||
*/
|
||||
type TProcessorFunctionFactory interface {
|
||||
GetProcessorFunction(trans TTransport) TProcessorFunction
|
||||
}
|
||||
|
||||
type tProcessorFunctionFactory struct {
|
||||
processor TProcessorFunction
|
||||
}
|
||||
|
||||
func NewTProcessorFunctionFactory(p TProcessorFunction) TProcessorFunctionFactory {
|
||||
return &tProcessorFunctionFactory{processor: p}
|
||||
}
|
||||
|
||||
func (p *tProcessorFunctionFactory) GetProcessorFunction(trans TTransport) TProcessorFunction {
|
||||
return p.processor
|
||||
}
|
|
@ -1,179 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
const (
|
||||
VERSION_MASK = 0xffff0000
|
||||
VERSION_1 = 0x80010000
|
||||
)
|
||||
|
||||
type TProtocol interface {
|
||||
WriteMessageBegin(name string, typeId TMessageType, seqid int32) error
|
||||
WriteMessageEnd() error
|
||||
WriteStructBegin(name string) error
|
||||
WriteStructEnd() error
|
||||
WriteFieldBegin(name string, typeId TType, id int16) error
|
||||
WriteFieldEnd() error
|
||||
WriteFieldStop() error
|
||||
WriteMapBegin(keyType TType, valueType TType, size int) error
|
||||
WriteMapEnd() error
|
||||
WriteListBegin(elemType TType, size int) error
|
||||
WriteListEnd() error
|
||||
WriteSetBegin(elemType TType, size int) error
|
||||
WriteSetEnd() error
|
||||
WriteBool(value bool) error
|
||||
WriteByte(value int8) error
|
||||
WriteI16(value int16) error
|
||||
WriteI32(value int32) error
|
||||
WriteI64(value int64) error
|
||||
WriteDouble(value float64) error
|
||||
WriteString(value string) error
|
||||
WriteBinary(value []byte) error
|
||||
|
||||
ReadMessageBegin() (name string, typeId TMessageType, seqid int32, err error)
|
||||
ReadMessageEnd() error
|
||||
ReadStructBegin() (name string, err error)
|
||||
ReadStructEnd() error
|
||||
ReadFieldBegin() (name string, typeId TType, id int16, err error)
|
||||
ReadFieldEnd() error
|
||||
ReadMapBegin() (keyType TType, valueType TType, size int, err error)
|
||||
ReadMapEnd() error
|
||||
ReadListBegin() (elemType TType, size int, err error)
|
||||
ReadListEnd() error
|
||||
ReadSetBegin() (elemType TType, size int, err error)
|
||||
ReadSetEnd() error
|
||||
ReadBool() (value bool, err error)
|
||||
ReadByte() (value int8, err error)
|
||||
ReadI16() (value int16, err error)
|
||||
ReadI32() (value int32, err error)
|
||||
ReadI64() (value int64, err error)
|
||||
ReadDouble() (value float64, err error)
|
||||
ReadString() (value string, err error)
|
||||
ReadBinary() (value []byte, err error)
|
||||
|
||||
Skip(fieldType TType) (err error)
|
||||
Flush(ctx context.Context) (err error)
|
||||
|
||||
Transport() TTransport
|
||||
}
|
||||
|
||||
// The maximum recursive depth the skip() function will traverse
|
||||
const DEFAULT_RECURSION_DEPTH = 64
|
||||
|
||||
// Skips over the next data element from the provided input TProtocol object.
|
||||
func SkipDefaultDepth(prot TProtocol, typeId TType) (err error) {
|
||||
return Skip(prot, typeId, DEFAULT_RECURSION_DEPTH)
|
||||
}
|
||||
|
||||
// Skips over the next data element from the provided input TProtocol object.
|
||||
func Skip(self TProtocol, fieldType TType, maxDepth int) (err error) {
|
||||
|
||||
if maxDepth <= 0 {
|
||||
return NewTProtocolExceptionWithType(DEPTH_LIMIT, errors.New("Depth limit exceeded"))
|
||||
}
|
||||
|
||||
switch fieldType {
|
||||
case STOP:
|
||||
return
|
||||
case BOOL:
|
||||
_, err = self.ReadBool()
|
||||
return
|
||||
case BYTE:
|
||||
_, err = self.ReadByte()
|
||||
return
|
||||
case I16:
|
||||
_, err = self.ReadI16()
|
||||
return
|
||||
case I32:
|
||||
_, err = self.ReadI32()
|
||||
return
|
||||
case I64:
|
||||
_, err = self.ReadI64()
|
||||
return
|
||||
case DOUBLE:
|
||||
_, err = self.ReadDouble()
|
||||
return
|
||||
case STRING:
|
||||
_, err = self.ReadString()
|
||||
return
|
||||
case STRUCT:
|
||||
if _, err = self.ReadStructBegin(); err != nil {
|
||||
return err
|
||||
}
|
||||
for {
|
||||
_, typeId, _, _ := self.ReadFieldBegin()
|
||||
if typeId == STOP {
|
||||
break
|
||||
}
|
||||
err := Skip(self, typeId, maxDepth-1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
self.ReadFieldEnd()
|
||||
}
|
||||
return self.ReadStructEnd()
|
||||
case MAP:
|
||||
keyType, valueType, size, err := self.ReadMapBegin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for i := 0; i < size; i++ {
|
||||
err := Skip(self, keyType, maxDepth-1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
self.Skip(valueType)
|
||||
}
|
||||
return self.ReadMapEnd()
|
||||
case SET:
|
||||
elemType, size, err := self.ReadSetBegin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for i := 0; i < size; i++ {
|
||||
err := Skip(self, elemType, maxDepth-1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return self.ReadSetEnd()
|
||||
case LIST:
|
||||
elemType, size, err := self.ReadListBegin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for i := 0; i < size; i++ {
|
||||
err := Skip(self, elemType, maxDepth-1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return self.ReadListEnd()
|
||||
default:
|
||||
return NewTProtocolExceptionWithType(INVALID_DATA, errors.New(fmt.Sprintf("Unknown data type %d", fieldType)))
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,77 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
)
|
||||
|
||||
// Thrift Protocol exception
|
||||
type TProtocolException interface {
|
||||
TException
|
||||
TypeId() int
|
||||
}
|
||||
|
||||
const (
|
||||
UNKNOWN_PROTOCOL_EXCEPTION = 0
|
||||
INVALID_DATA = 1
|
||||
NEGATIVE_SIZE = 2
|
||||
SIZE_LIMIT = 3
|
||||
BAD_VERSION = 4
|
||||
NOT_IMPLEMENTED = 5
|
||||
DEPTH_LIMIT = 6
|
||||
)
|
||||
|
||||
type tProtocolException struct {
|
||||
typeId int
|
||||
message string
|
||||
}
|
||||
|
||||
func (p *tProtocolException) TypeId() int {
|
||||
return p.typeId
|
||||
}
|
||||
|
||||
func (p *tProtocolException) String() string {
|
||||
return p.message
|
||||
}
|
||||
|
||||
func (p *tProtocolException) Error() string {
|
||||
return p.message
|
||||
}
|
||||
|
||||
func NewTProtocolException(err error) TProtocolException {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
if e, ok := err.(TProtocolException); ok {
|
||||
return e
|
||||
}
|
||||
if _, ok := err.(base64.CorruptInputError); ok {
|
||||
return &tProtocolException{INVALID_DATA, err.Error()}
|
||||
}
|
||||
return &tProtocolException{UNKNOWN_PROTOCOL_EXCEPTION, err.Error()}
|
||||
}
|
||||
|
||||
func NewTProtocolExceptionWithType(errType int, err error) TProtocolException {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
return &tProtocolException{errType, err.Error()}
|
||||
}
|
|
@ -1,25 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
// Factory interface for constructing protocol instances.
|
||||
type TProtocolFactory interface {
|
||||
GetProtocol(trans TTransport) TProtocol
|
||||
}
|
|
@ -1,68 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import "io"
|
||||
|
||||
type RichTransport struct {
|
||||
TTransport
|
||||
}
|
||||
|
||||
// Wraps Transport to provide TRichTransport interface
|
||||
func NewTRichTransport(trans TTransport) *RichTransport {
|
||||
return &RichTransport{trans}
|
||||
}
|
||||
|
||||
func (r *RichTransport) ReadByte() (c byte, err error) {
|
||||
return readByte(r.TTransport)
|
||||
}
|
||||
|
||||
func (r *RichTransport) WriteByte(c byte) error {
|
||||
return writeByte(r.TTransport, c)
|
||||
}
|
||||
|
||||
func (r *RichTransport) WriteString(s string) (n int, err error) {
|
||||
return r.Write([]byte(s))
|
||||
}
|
||||
|
||||
func (r *RichTransport) RemainingBytes() (num_bytes uint64) {
|
||||
return r.TTransport.RemainingBytes()
|
||||
}
|
||||
|
||||
func readByte(r io.Reader) (c byte, err error) {
|
||||
v := [1]byte{0}
|
||||
n, err := r.Read(v[0:1])
|
||||
if n > 0 && (err == nil || err == io.EOF) {
|
||||
return v[0], nil
|
||||
}
|
||||
if n > 0 && err != nil {
|
||||
return v[0], err
|
||||
}
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return v[0], nil
|
||||
}
|
||||
|
||||
func writeByte(w io.Writer, c byte) error {
|
||||
v := [1]byte{c}
|
||||
_, err := w.Write(v[0:1])
|
||||
return err
|
||||
}
|
|
@ -1,79 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
type TSerializer struct {
|
||||
Transport *TMemoryBuffer
|
||||
Protocol TProtocol
|
||||
}
|
||||
|
||||
type TStruct interface {
|
||||
Write(p TProtocol) error
|
||||
Read(p TProtocol) error
|
||||
}
|
||||
|
||||
func NewTSerializer() *TSerializer {
|
||||
transport := NewTMemoryBufferLen(1024)
|
||||
protocol := NewTBinaryProtocolFactoryDefault().GetProtocol(transport)
|
||||
|
||||
return &TSerializer{
|
||||
transport,
|
||||
protocol}
|
||||
}
|
||||
|
||||
func (t *TSerializer) WriteString(ctx context.Context, msg TStruct) (s string, err error) {
|
||||
t.Transport.Reset()
|
||||
|
||||
if err = msg.Write(t.Protocol); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if err = t.Protocol.Flush(ctx); err != nil {
|
||||
return
|
||||
}
|
||||
if err = t.Transport.Flush(ctx); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return t.Transport.String(), nil
|
||||
}
|
||||
|
||||
func (t *TSerializer) Write(ctx context.Context, msg TStruct) (b []byte, err error) {
|
||||
t.Transport.Reset()
|
||||
|
||||
if err = msg.Write(t.Protocol); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if err = t.Protocol.Flush(ctx); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if err = t.Transport.Flush(ctx); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
b = append(b, t.Transport.Bytes()...)
|
||||
return
|
||||
}
|
|
@ -1,35 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
type TServer interface {
|
||||
ProcessorFactory() TProcessorFactory
|
||||
ServerTransport() TServerTransport
|
||||
InputTransportFactory() TTransportFactory
|
||||
OutputTransportFactory() TTransportFactory
|
||||
InputProtocolFactory() TProtocolFactory
|
||||
OutputProtocolFactory() TProtocolFactory
|
||||
|
||||
// Starts the server
|
||||
Serve() error
|
||||
// Stops the server. This is optional on a per-implementation basis. Not
|
||||
// all servers are required to be cleanly stoppable.
|
||||
Stop() error
|
||||
}
|
|
@ -1,134 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import (
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type TServerSocket struct {
|
||||
listener net.Listener
|
||||
addr net.Addr
|
||||
clientTimeout time.Duration
|
||||
|
||||
// Protects the interrupted value to make it thread safe.
|
||||
mu sync.RWMutex
|
||||
interrupted bool
|
||||
}
|
||||
|
||||
func NewTServerSocket(listenAddr string) (*TServerSocket, error) {
|
||||
return NewTServerSocketTimeout(listenAddr, 0)
|
||||
}
|
||||
|
||||
func NewTServerSocketTimeout(listenAddr string, clientTimeout time.Duration) (*TServerSocket, error) {
|
||||
addr, err := net.ResolveTCPAddr("tcp", listenAddr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &TServerSocket{addr: addr, clientTimeout: clientTimeout}, nil
|
||||
}
|
||||
|
||||
// Creates a TServerSocket from a net.Addr
|
||||
func NewTServerSocketFromAddrTimeout(addr net.Addr, clientTimeout time.Duration) *TServerSocket {
|
||||
return &TServerSocket{addr: addr, clientTimeout: clientTimeout}
|
||||
}
|
||||
|
||||
func (p *TServerSocket) Listen() error {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
if p.IsListening() {
|
||||
return nil
|
||||
}
|
||||
l, err := net.Listen(p.addr.Network(), p.addr.String())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.listener = l
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TServerSocket) Accept() (TTransport, error) {
|
||||
p.mu.RLock()
|
||||
interrupted := p.interrupted
|
||||
p.mu.RUnlock()
|
||||
|
||||
if interrupted {
|
||||
return nil, errTransportInterrupted
|
||||
}
|
||||
|
||||
listener := p.listener
|
||||
if listener == nil {
|
||||
return nil, NewTTransportException(NOT_OPEN, "No underlying server socket")
|
||||
}
|
||||
|
||||
conn, err := listener.Accept()
|
||||
if err != nil {
|
||||
return nil, NewTTransportExceptionFromError(err)
|
||||
}
|
||||
return NewTSocketFromConnTimeout(conn, p.clientTimeout), nil
|
||||
}
|
||||
|
||||
// Checks whether the socket is listening.
|
||||
func (p *TServerSocket) IsListening() bool {
|
||||
return p.listener != nil
|
||||
}
|
||||
|
||||
// Connects the socket, creating a new socket object if necessary.
|
||||
func (p *TServerSocket) Open() error {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
if p.IsListening() {
|
||||
return NewTTransportException(ALREADY_OPEN, "Server socket already open")
|
||||
}
|
||||
if l, err := net.Listen(p.addr.Network(), p.addr.String()); err != nil {
|
||||
return err
|
||||
} else {
|
||||
p.listener = l
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TServerSocket) Addr() net.Addr {
|
||||
if p.listener != nil {
|
||||
return p.listener.Addr()
|
||||
}
|
||||
return p.addr
|
||||
}
|
||||
|
||||
func (p *TServerSocket) Close() error {
|
||||
defer func() {
|
||||
p.listener = nil
|
||||
}()
|
||||
if p.IsListening() {
|
||||
return p.listener.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TServerSocket) Interrupt() error {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
p.interrupted = true
|
||||
p.Close()
|
||||
|
||||
return nil
|
||||
}
|
|
@ -1,34 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
// Server transport. Object which provides client transports.
|
||||
type TServerTransport interface {
|
||||
Listen() error
|
||||
Accept() (TTransport, error)
|
||||
Close() error
|
||||
|
||||
// Optional method implementation. This signals to the server transport
|
||||
// that it should break out of any accept() or listen() that it is currently
|
||||
// blocked on. This method, if implemented, MUST be thread safe, as it may
|
||||
// be called from a different thread context than the other TServerTransport
|
||||
// methods.
|
||||
Interrupt() error
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -1,227 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import (
|
||||
"log"
|
||||
"runtime/debug"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
/*
|
||||
* This is not a typical TSimpleServer as it is not blocked after accept a socket.
|
||||
* It is more like a TThreadedServer that can handle different connections in different goroutines.
|
||||
* This will work if golang user implements a conn-pool like thing in client side.
|
||||
*/
|
||||
type TSimpleServer struct {
|
||||
closed int32
|
||||
wg sync.WaitGroup
|
||||
mu sync.Mutex
|
||||
|
||||
processorFactory TProcessorFactory
|
||||
serverTransport TServerTransport
|
||||
inputTransportFactory TTransportFactory
|
||||
outputTransportFactory TTransportFactory
|
||||
inputProtocolFactory TProtocolFactory
|
||||
outputProtocolFactory TProtocolFactory
|
||||
}
|
||||
|
||||
func NewTSimpleServer2(processor TProcessor, serverTransport TServerTransport) *TSimpleServer {
|
||||
return NewTSimpleServerFactory2(NewTProcessorFactory(processor), serverTransport)
|
||||
}
|
||||
|
||||
func NewTSimpleServer4(processor TProcessor, serverTransport TServerTransport, transportFactory TTransportFactory, protocolFactory TProtocolFactory) *TSimpleServer {
|
||||
return NewTSimpleServerFactory4(NewTProcessorFactory(processor),
|
||||
serverTransport,
|
||||
transportFactory,
|
||||
protocolFactory,
|
||||
)
|
||||
}
|
||||
|
||||
func NewTSimpleServer6(processor TProcessor, serverTransport TServerTransport, inputTransportFactory TTransportFactory, outputTransportFactory TTransportFactory, inputProtocolFactory TProtocolFactory, outputProtocolFactory TProtocolFactory) *TSimpleServer {
|
||||
return NewTSimpleServerFactory6(NewTProcessorFactory(processor),
|
||||
serverTransport,
|
||||
inputTransportFactory,
|
||||
outputTransportFactory,
|
||||
inputProtocolFactory,
|
||||
outputProtocolFactory,
|
||||
)
|
||||
}
|
||||
|
||||
func NewTSimpleServerFactory2(processorFactory TProcessorFactory, serverTransport TServerTransport) *TSimpleServer {
|
||||
return NewTSimpleServerFactory6(processorFactory,
|
||||
serverTransport,
|
||||
NewTTransportFactory(),
|
||||
NewTTransportFactory(),
|
||||
NewTBinaryProtocolFactoryDefault(),
|
||||
NewTBinaryProtocolFactoryDefault(),
|
||||
)
|
||||
}
|
||||
|
||||
func NewTSimpleServerFactory4(processorFactory TProcessorFactory, serverTransport TServerTransport, transportFactory TTransportFactory, protocolFactory TProtocolFactory) *TSimpleServer {
|
||||
return NewTSimpleServerFactory6(processorFactory,
|
||||
serverTransport,
|
||||
transportFactory,
|
||||
transportFactory,
|
||||
protocolFactory,
|
||||
protocolFactory,
|
||||
)
|
||||
}
|
||||
|
||||
func NewTSimpleServerFactory6(processorFactory TProcessorFactory, serverTransport TServerTransport, inputTransportFactory TTransportFactory, outputTransportFactory TTransportFactory, inputProtocolFactory TProtocolFactory, outputProtocolFactory TProtocolFactory) *TSimpleServer {
|
||||
return &TSimpleServer{
|
||||
processorFactory: processorFactory,
|
||||
serverTransport: serverTransport,
|
||||
inputTransportFactory: inputTransportFactory,
|
||||
outputTransportFactory: outputTransportFactory,
|
||||
inputProtocolFactory: inputProtocolFactory,
|
||||
outputProtocolFactory: outputProtocolFactory,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *TSimpleServer) ProcessorFactory() TProcessorFactory {
|
||||
return p.processorFactory
|
||||
}
|
||||
|
||||
func (p *TSimpleServer) ServerTransport() TServerTransport {
|
||||
return p.serverTransport
|
||||
}
|
||||
|
||||
func (p *TSimpleServer) InputTransportFactory() TTransportFactory {
|
||||
return p.inputTransportFactory
|
||||
}
|
||||
|
||||
func (p *TSimpleServer) OutputTransportFactory() TTransportFactory {
|
||||
return p.outputTransportFactory
|
||||
}
|
||||
|
||||
func (p *TSimpleServer) InputProtocolFactory() TProtocolFactory {
|
||||
return p.inputProtocolFactory
|
||||
}
|
||||
|
||||
func (p *TSimpleServer) OutputProtocolFactory() TProtocolFactory {
|
||||
return p.outputProtocolFactory
|
||||
}
|
||||
|
||||
func (p *TSimpleServer) Listen() error {
|
||||
return p.serverTransport.Listen()
|
||||
}
|
||||
|
||||
func (p *TSimpleServer) innerAccept() (int32, error) {
|
||||
client, err := p.serverTransport.Accept()
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
closed := atomic.LoadInt32(&p.closed)
|
||||
if closed != 0 {
|
||||
return closed, nil
|
||||
}
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if client != nil {
|
||||
p.wg.Add(1)
|
||||
go func() {
|
||||
defer p.wg.Done()
|
||||
if err := p.processRequests(client); err != nil {
|
||||
log.Println("error processing request:", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (p *TSimpleServer) AcceptLoop() error {
|
||||
for {
|
||||
closed, err := p.innerAccept()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if closed != 0 {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *TSimpleServer) Serve() error {
|
||||
err := p.Listen()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.AcceptLoop()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TSimpleServer) Stop() error {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
if atomic.LoadInt32(&p.closed) != 0 {
|
||||
return nil
|
||||
}
|
||||
atomic.StoreInt32(&p.closed, 1)
|
||||
p.serverTransport.Interrupt()
|
||||
p.wg.Wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TSimpleServer) processRequests(client TTransport) error {
|
||||
processor := p.processorFactory.GetProcessor(client)
|
||||
inputTransport, err := p.inputTransportFactory.GetTransport(client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
outputTransport, err := p.outputTransportFactory.GetTransport(client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
inputProtocol := p.inputProtocolFactory.GetProtocol(inputTransport)
|
||||
outputProtocol := p.outputProtocolFactory.GetProtocol(outputTransport)
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
log.Printf("panic in processor: %s: %s", e, debug.Stack())
|
||||
}
|
||||
}()
|
||||
|
||||
if inputTransport != nil {
|
||||
defer inputTransport.Close()
|
||||
}
|
||||
if outputTransport != nil {
|
||||
defer outputTransport.Close()
|
||||
}
|
||||
for {
|
||||
if atomic.LoadInt32(&p.closed) != 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
ok, err := processor.Process(defaultCtx, inputProtocol, outputProtocol)
|
||||
if err, ok := err.(TTransportException); ok && err.TypeId() == END_OF_FILE {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
if err, ok := err.(TApplicationException); ok && err.TypeId() == UNKNOWN_METHOD {
|
||||
continue
|
||||
}
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,166 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"time"
|
||||
)
|
||||
|
||||
type TSocket struct {
|
||||
conn net.Conn
|
||||
addr net.Addr
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
// NewTSocket creates a net.Conn-backed TTransport, given a host and port
|
||||
//
|
||||
// Example:
|
||||
// trans, err := thrift.NewTSocket("localhost:9090")
|
||||
func NewTSocket(hostPort string) (*TSocket, error) {
|
||||
return NewTSocketTimeout(hostPort, 0)
|
||||
}
|
||||
|
||||
// NewTSocketTimeout creates a net.Conn-backed TTransport, given a host and port
|
||||
// it also accepts a timeout as a time.Duration
|
||||
func NewTSocketTimeout(hostPort string, timeout time.Duration) (*TSocket, error) {
|
||||
//conn, err := net.DialTimeout(network, address, timeout)
|
||||
addr, err := net.ResolveTCPAddr("tcp", hostPort)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewTSocketFromAddrTimeout(addr, timeout), nil
|
||||
}
|
||||
|
||||
// Creates a TSocket from a net.Addr
|
||||
func NewTSocketFromAddrTimeout(addr net.Addr, timeout time.Duration) *TSocket {
|
||||
return &TSocket{addr: addr, timeout: timeout}
|
||||
}
|
||||
|
||||
// Creates a TSocket from an existing net.Conn
|
||||
func NewTSocketFromConnTimeout(conn net.Conn, timeout time.Duration) *TSocket {
|
||||
return &TSocket{conn: conn, addr: conn.RemoteAddr(), timeout: timeout}
|
||||
}
|
||||
|
||||
// Sets the socket timeout
|
||||
func (p *TSocket) SetTimeout(timeout time.Duration) error {
|
||||
p.timeout = timeout
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TSocket) pushDeadline(read, write bool) {
|
||||
var t time.Time
|
||||
if p.timeout > 0 {
|
||||
t = time.Now().Add(time.Duration(p.timeout))
|
||||
}
|
||||
if read && write {
|
||||
p.conn.SetDeadline(t)
|
||||
} else if read {
|
||||
p.conn.SetReadDeadline(t)
|
||||
} else if write {
|
||||
p.conn.SetWriteDeadline(t)
|
||||
}
|
||||
}
|
||||
|
||||
// Connects the socket, creating a new socket object if necessary.
|
||||
func (p *TSocket) Open() error {
|
||||
if p.IsOpen() {
|
||||
return NewTTransportException(ALREADY_OPEN, "Socket already connected.")
|
||||
}
|
||||
if p.addr == nil {
|
||||
return NewTTransportException(NOT_OPEN, "Cannot open nil address.")
|
||||
}
|
||||
if len(p.addr.Network()) == 0 {
|
||||
return NewTTransportException(NOT_OPEN, "Cannot open bad network name.")
|
||||
}
|
||||
if len(p.addr.String()) == 0 {
|
||||
return NewTTransportException(NOT_OPEN, "Cannot open bad address.")
|
||||
}
|
||||
var err error
|
||||
if p.conn, err = net.DialTimeout(p.addr.Network(), p.addr.String(), p.timeout); err != nil {
|
||||
return NewTTransportException(NOT_OPEN, err.Error())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Retrieve the underlying net.Conn
|
||||
func (p *TSocket) Conn() net.Conn {
|
||||
return p.conn
|
||||
}
|
||||
|
||||
// Returns true if the connection is open
|
||||
func (p *TSocket) IsOpen() bool {
|
||||
if p.conn == nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Closes the socket.
|
||||
func (p *TSocket) Close() error {
|
||||
// Close the socket
|
||||
if p.conn != nil {
|
||||
err := p.conn.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.conn = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
//Returns the remote address of the socket.
|
||||
func (p *TSocket) Addr() net.Addr {
|
||||
return p.addr
|
||||
}
|
||||
|
||||
func (p *TSocket) Read(buf []byte) (int, error) {
|
||||
if !p.IsOpen() {
|
||||
return 0, NewTTransportException(NOT_OPEN, "Connection not open")
|
||||
}
|
||||
p.pushDeadline(true, false)
|
||||
n, err := p.conn.Read(buf)
|
||||
return n, NewTTransportExceptionFromError(err)
|
||||
}
|
||||
|
||||
func (p *TSocket) Write(buf []byte) (int, error) {
|
||||
if !p.IsOpen() {
|
||||
return 0, NewTTransportException(NOT_OPEN, "Connection not open")
|
||||
}
|
||||
p.pushDeadline(false, true)
|
||||
return p.conn.Write(buf)
|
||||
}
|
||||
|
||||
func (p *TSocket) Flush(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TSocket) Interrupt() error {
|
||||
if !p.IsOpen() {
|
||||
return nil
|
||||
}
|
||||
return p.conn.Close()
|
||||
}
|
||||
|
||||
func (p *TSocket) RemainingBytes() (num_bytes uint64) {
|
||||
const maxSize = ^uint64(0)
|
||||
return maxSize // the thruth is, we just don't know unless framed is used
|
||||
}
|
|
@ -1,112 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"net"
|
||||
"time"
|
||||
)
|
||||
|
||||
type TSSLServerSocket struct {
|
||||
listener net.Listener
|
||||
addr net.Addr
|
||||
clientTimeout time.Duration
|
||||
interrupted bool
|
||||
cfg *tls.Config
|
||||
}
|
||||
|
||||
func NewTSSLServerSocket(listenAddr string, cfg *tls.Config) (*TSSLServerSocket, error) {
|
||||
return NewTSSLServerSocketTimeout(listenAddr, cfg, 0)
|
||||
}
|
||||
|
||||
func NewTSSLServerSocketTimeout(listenAddr string, cfg *tls.Config, clientTimeout time.Duration) (*TSSLServerSocket, error) {
|
||||
if cfg.MinVersion == 0 {
|
||||
cfg.MinVersion = tls.VersionTLS10
|
||||
}
|
||||
addr, err := net.ResolveTCPAddr("tcp", listenAddr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &TSSLServerSocket{addr: addr, clientTimeout: clientTimeout, cfg: cfg}, nil
|
||||
}
|
||||
|
||||
func (p *TSSLServerSocket) Listen() error {
|
||||
if p.IsListening() {
|
||||
return nil
|
||||
}
|
||||
l, err := tls.Listen(p.addr.Network(), p.addr.String(), p.cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.listener = l
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TSSLServerSocket) Accept() (TTransport, error) {
|
||||
if p.interrupted {
|
||||
return nil, errTransportInterrupted
|
||||
}
|
||||
if p.listener == nil {
|
||||
return nil, NewTTransportException(NOT_OPEN, "No underlying server socket")
|
||||
}
|
||||
conn, err := p.listener.Accept()
|
||||
if err != nil {
|
||||
return nil, NewTTransportExceptionFromError(err)
|
||||
}
|
||||
return NewTSSLSocketFromConnTimeout(conn, p.cfg, p.clientTimeout), nil
|
||||
}
|
||||
|
||||
// Checks whether the socket is listening.
|
||||
func (p *TSSLServerSocket) IsListening() bool {
|
||||
return p.listener != nil
|
||||
}
|
||||
|
||||
// Connects the socket, creating a new socket object if necessary.
|
||||
func (p *TSSLServerSocket) Open() error {
|
||||
if p.IsListening() {
|
||||
return NewTTransportException(ALREADY_OPEN, "Server socket already open")
|
||||
}
|
||||
if l, err := tls.Listen(p.addr.Network(), p.addr.String(), p.cfg); err != nil {
|
||||
return err
|
||||
} else {
|
||||
p.listener = l
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TSSLServerSocket) Addr() net.Addr {
|
||||
return p.addr
|
||||
}
|
||||
|
||||
func (p *TSSLServerSocket) Close() error {
|
||||
defer func() {
|
||||
p.listener = nil
|
||||
}()
|
||||
if p.IsListening() {
|
||||
return p.listener.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TSSLServerSocket) Interrupt() error {
|
||||
p.interrupted = true
|
||||
return nil
|
||||
}
|
|
@ -1,176 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"net"
|
||||
"time"
|
||||
)
|
||||
|
||||
type TSSLSocket struct {
|
||||
conn net.Conn
|
||||
// hostPort contains host:port (e.g. "asdf.com:12345"). The field is
|
||||
// only valid if addr is nil.
|
||||
hostPort string
|
||||
// addr is nil when hostPort is not "", and is only used when the
|
||||
// TSSLSocket is constructed from a net.Addr.
|
||||
addr net.Addr
|
||||
timeout time.Duration
|
||||
cfg *tls.Config
|
||||
}
|
||||
|
||||
// NewTSSLSocket creates a net.Conn-backed TTransport, given a host and port and tls Configuration
|
||||
//
|
||||
// Example:
|
||||
// trans, err := thrift.NewTSSLSocket("localhost:9090", nil)
|
||||
func NewTSSLSocket(hostPort string, cfg *tls.Config) (*TSSLSocket, error) {
|
||||
return NewTSSLSocketTimeout(hostPort, cfg, 0)
|
||||
}
|
||||
|
||||
// NewTSSLSocketTimeout creates a net.Conn-backed TTransport, given a host and port
|
||||
// it also accepts a tls Configuration and a timeout as a time.Duration
|
||||
func NewTSSLSocketTimeout(hostPort string, cfg *tls.Config, timeout time.Duration) (*TSSLSocket, error) {
|
||||
if cfg.MinVersion == 0 {
|
||||
cfg.MinVersion = tls.VersionTLS10
|
||||
}
|
||||
return &TSSLSocket{hostPort: hostPort, timeout: timeout, cfg: cfg}, nil
|
||||
}
|
||||
|
||||
// Creates a TSSLSocket from a net.Addr
|
||||
func NewTSSLSocketFromAddrTimeout(addr net.Addr, cfg *tls.Config, timeout time.Duration) *TSSLSocket {
|
||||
return &TSSLSocket{addr: addr, timeout: timeout, cfg: cfg}
|
||||
}
|
||||
|
||||
// Creates a TSSLSocket from an existing net.Conn
|
||||
func NewTSSLSocketFromConnTimeout(conn net.Conn, cfg *tls.Config, timeout time.Duration) *TSSLSocket {
|
||||
return &TSSLSocket{conn: conn, addr: conn.RemoteAddr(), timeout: timeout, cfg: cfg}
|
||||
}
|
||||
|
||||
// Sets the socket timeout
|
||||
func (p *TSSLSocket) SetTimeout(timeout time.Duration) error {
|
||||
p.timeout = timeout
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TSSLSocket) pushDeadline(read, write bool) {
|
||||
var t time.Time
|
||||
if p.timeout > 0 {
|
||||
t = time.Now().Add(time.Duration(p.timeout))
|
||||
}
|
||||
if read && write {
|
||||
p.conn.SetDeadline(t)
|
||||
} else if read {
|
||||
p.conn.SetReadDeadline(t)
|
||||
} else if write {
|
||||
p.conn.SetWriteDeadline(t)
|
||||
}
|
||||
}
|
||||
|
||||
// Connects the socket, creating a new socket object if necessary.
|
||||
func (p *TSSLSocket) Open() error {
|
||||
var err error
|
||||
// If we have a hostname, we need to pass the hostname to tls.Dial for
|
||||
// certificate hostname checks.
|
||||
if p.hostPort != "" {
|
||||
if p.conn, err = tls.DialWithDialer(&net.Dialer{
|
||||
Timeout: p.timeout}, "tcp", p.hostPort, p.cfg); err != nil {
|
||||
return NewTTransportException(NOT_OPEN, err.Error())
|
||||
}
|
||||
} else {
|
||||
if p.IsOpen() {
|
||||
return NewTTransportException(ALREADY_OPEN, "Socket already connected.")
|
||||
}
|
||||
if p.addr == nil {
|
||||
return NewTTransportException(NOT_OPEN, "Cannot open nil address.")
|
||||
}
|
||||
if len(p.addr.Network()) == 0 {
|
||||
return NewTTransportException(NOT_OPEN, "Cannot open bad network name.")
|
||||
}
|
||||
if len(p.addr.String()) == 0 {
|
||||
return NewTTransportException(NOT_OPEN, "Cannot open bad address.")
|
||||
}
|
||||
if p.conn, err = tls.DialWithDialer(&net.Dialer{
|
||||
Timeout: p.timeout}, p.addr.Network(), p.addr.String(), p.cfg); err != nil {
|
||||
return NewTTransportException(NOT_OPEN, err.Error())
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Retrieve the underlying net.Conn
|
||||
func (p *TSSLSocket) Conn() net.Conn {
|
||||
return p.conn
|
||||
}
|
||||
|
||||
// Returns true if the connection is open
|
||||
func (p *TSSLSocket) IsOpen() bool {
|
||||
if p.conn == nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Closes the socket.
|
||||
func (p *TSSLSocket) Close() error {
|
||||
// Close the socket
|
||||
if p.conn != nil {
|
||||
err := p.conn.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.conn = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TSSLSocket) Read(buf []byte) (int, error) {
|
||||
if !p.IsOpen() {
|
||||
return 0, NewTTransportException(NOT_OPEN, "Connection not open")
|
||||
}
|
||||
p.pushDeadline(true, false)
|
||||
n, err := p.conn.Read(buf)
|
||||
return n, NewTTransportExceptionFromError(err)
|
||||
}
|
||||
|
||||
func (p *TSSLSocket) Write(buf []byte) (int, error) {
|
||||
if !p.IsOpen() {
|
||||
return 0, NewTTransportException(NOT_OPEN, "Connection not open")
|
||||
}
|
||||
p.pushDeadline(false, true)
|
||||
return p.conn.Write(buf)
|
||||
}
|
||||
|
||||
func (p *TSSLSocket) Flush(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TSSLSocket) Interrupt() error {
|
||||
if !p.IsOpen() {
|
||||
return nil
|
||||
}
|
||||
return p.conn.Close()
|
||||
}
|
||||
|
||||
func (p *TSSLSocket) RemainingBytes() (num_bytes uint64) {
|
||||
const maxSize = ^uint64(0)
|
||||
return maxSize // the thruth is, we just don't know unless framed is used
|
||||
}
|
|
@ -1,70 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
var errTransportInterrupted = errors.New("Transport Interrupted")
|
||||
|
||||
type Flusher interface {
|
||||
Flush() (err error)
|
||||
}
|
||||
|
||||
type ContextFlusher interface {
|
||||
Flush(ctx context.Context) (err error)
|
||||
}
|
||||
|
||||
type ReadSizeProvider interface {
|
||||
RemainingBytes() (num_bytes uint64)
|
||||
}
|
||||
|
||||
// Encapsulates the I/O layer
|
||||
type TTransport interface {
|
||||
io.ReadWriteCloser
|
||||
ContextFlusher
|
||||
ReadSizeProvider
|
||||
|
||||
// Opens the transport for communication
|
||||
Open() error
|
||||
|
||||
// Returns true if the transport is open
|
||||
IsOpen() bool
|
||||
}
|
||||
|
||||
type stringWriter interface {
|
||||
WriteString(s string) (n int, err error)
|
||||
}
|
||||
|
||||
// This is "enchanced" transport with extra capabilities. You need to use one of these
|
||||
// to construct protocol.
|
||||
// Notably, TSocket does not implement this interface, and it is always a mistake to use
|
||||
// TSocket directly in protocol.
|
||||
type TRichTransport interface {
|
||||
io.ReadWriter
|
||||
io.ByteReader
|
||||
io.ByteWriter
|
||||
stringWriter
|
||||
ContextFlusher
|
||||
ReadSizeProvider
|
||||
}
|
|
@ -1,90 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
type timeoutable interface {
|
||||
Timeout() bool
|
||||
}
|
||||
|
||||
// Thrift Transport exception
|
||||
type TTransportException interface {
|
||||
TException
|
||||
TypeId() int
|
||||
Err() error
|
||||
}
|
||||
|
||||
const (
|
||||
UNKNOWN_TRANSPORT_EXCEPTION = 0
|
||||
NOT_OPEN = 1
|
||||
ALREADY_OPEN = 2
|
||||
TIMED_OUT = 3
|
||||
END_OF_FILE = 4
|
||||
)
|
||||
|
||||
type tTransportException struct {
|
||||
typeId int
|
||||
err error
|
||||
}
|
||||
|
||||
func (p *tTransportException) TypeId() int {
|
||||
return p.typeId
|
||||
}
|
||||
|
||||
func (p *tTransportException) Error() string {
|
||||
return p.err.Error()
|
||||
}
|
||||
|
||||
func (p *tTransportException) Err() error {
|
||||
return p.err
|
||||
}
|
||||
|
||||
func NewTTransportException(t int, e string) TTransportException {
|
||||
return &tTransportException{typeId: t, err: errors.New(e)}
|
||||
}
|
||||
|
||||
func NewTTransportExceptionFromError(e error) TTransportException {
|
||||
if e == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if t, ok := e.(TTransportException); ok {
|
||||
return t
|
||||
}
|
||||
|
||||
switch v := e.(type) {
|
||||
case TTransportException:
|
||||
return v
|
||||
case timeoutable:
|
||||
if v.Timeout() {
|
||||
return &tTransportException{typeId: TIMED_OUT, err: e}
|
||||
}
|
||||
}
|
||||
|
||||
if e == io.EOF {
|
||||
return &tTransportException{typeId: END_OF_FILE, err: e}
|
||||
}
|
||||
|
||||
return &tTransportException{typeId: UNKNOWN_TRANSPORT_EXCEPTION, err: e}
|
||||
}
|
|
@ -1,39 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
// Factory class used to create wrapped instance of Transports.
|
||||
// This is used primarily in servers, which get Transports from
|
||||
// a ServerTransport and then may want to mutate them (i.e. create
|
||||
// a BufferedTransport from the underlying base transport)
|
||||
type TTransportFactory interface {
|
||||
GetTransport(trans TTransport) (TTransport, error)
|
||||
}
|
||||
|
||||
type tTransportFactory struct{}
|
||||
|
||||
// Return a wrapped instance of the base Transport.
|
||||
func (p *tTransportFactory) GetTransport(trans TTransport) (TTransport, error) {
|
||||
return trans, nil
|
||||
}
|
||||
|
||||
func NewTTransportFactory() TTransportFactory {
|
||||
return &tTransportFactory{}
|
||||
}
|
|
@ -1,69 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
// Type constants in the Thrift protocol
|
||||
type TType byte
|
||||
|
||||
const (
|
||||
STOP = 0
|
||||
VOID = 1
|
||||
BOOL = 2
|
||||
BYTE = 3
|
||||
I08 = 3
|
||||
DOUBLE = 4
|
||||
I16 = 6
|
||||
I32 = 8
|
||||
I64 = 10
|
||||
STRING = 11
|
||||
UTF7 = 11
|
||||
STRUCT = 12
|
||||
MAP = 13
|
||||
SET = 14
|
||||
LIST = 15
|
||||
UTF8 = 16
|
||||
UTF16 = 17
|
||||
//BINARY = 18 wrong and unusued
|
||||
)
|
||||
|
||||
var typeNames = map[int]string{
|
||||
STOP: "STOP",
|
||||
VOID: "VOID",
|
||||
BOOL: "BOOL",
|
||||
BYTE: "BYTE",
|
||||
DOUBLE: "DOUBLE",
|
||||
I16: "I16",
|
||||
I32: "I32",
|
||||
I64: "I64",
|
||||
STRING: "STRING",
|
||||
STRUCT: "STRUCT",
|
||||
MAP: "MAP",
|
||||
SET: "SET",
|
||||
LIST: "LIST",
|
||||
UTF8: "UTF8",
|
||||
UTF16: "UTF16",
|
||||
}
|
||||
|
||||
func (p TType) String() string {
|
||||
if s, ok := typeNames[int(p)]; ok {
|
||||
return s
|
||||
}
|
||||
return "Unknown"
|
||||
}
|
|
@ -1,132 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import (
|
||||
"compress/zlib"
|
||||
"context"
|
||||
"io"
|
||||
"log"
|
||||
)
|
||||
|
||||
// TZlibTransportFactory is a factory for TZlibTransport instances
|
||||
type TZlibTransportFactory struct {
|
||||
level int
|
||||
factory TTransportFactory
|
||||
}
|
||||
|
||||
// TZlibTransport is a TTransport implementation that makes use of zlib compression.
|
||||
type TZlibTransport struct {
|
||||
reader io.ReadCloser
|
||||
transport TTransport
|
||||
writer *zlib.Writer
|
||||
}
|
||||
|
||||
// GetTransport constructs a new instance of NewTZlibTransport
|
||||
func (p *TZlibTransportFactory) GetTransport(trans TTransport) (TTransport, error) {
|
||||
if p.factory != nil {
|
||||
// wrap other factory
|
||||
var err error
|
||||
trans, err = p.factory.GetTransport(trans)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return NewTZlibTransport(trans, p.level)
|
||||
}
|
||||
|
||||
// NewTZlibTransportFactory constructs a new instance of NewTZlibTransportFactory
|
||||
func NewTZlibTransportFactory(level int) *TZlibTransportFactory {
|
||||
return &TZlibTransportFactory{level: level, factory: nil}
|
||||
}
|
||||
|
||||
// NewTZlibTransportFactory constructs a new instance of TZlibTransportFactory
|
||||
// as a wrapper over existing transport factory
|
||||
func NewTZlibTransportFactoryWithFactory(level int, factory TTransportFactory) *TZlibTransportFactory {
|
||||
return &TZlibTransportFactory{level: level, factory: factory}
|
||||
}
|
||||
|
||||
// NewTZlibTransport constructs a new instance of TZlibTransport
|
||||
func NewTZlibTransport(trans TTransport, level int) (*TZlibTransport, error) {
|
||||
w, err := zlib.NewWriterLevel(trans, level)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &TZlibTransport{
|
||||
writer: w,
|
||||
transport: trans,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Close closes the reader and writer (flushing any unwritten data) and closes
|
||||
// the underlying transport.
|
||||
func (z *TZlibTransport) Close() error {
|
||||
if z.reader != nil {
|
||||
if err := z.reader.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := z.writer.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
return z.transport.Close()
|
||||
}
|
||||
|
||||
// Flush flushes the writer and its underlying transport.
|
||||
func (z *TZlibTransport) Flush(ctx context.Context) error {
|
||||
if err := z.writer.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
return z.transport.Flush(ctx)
|
||||
}
|
||||
|
||||
// IsOpen returns true if the transport is open
|
||||
func (z *TZlibTransport) IsOpen() bool {
|
||||
return z.transport.IsOpen()
|
||||
}
|
||||
|
||||
// Open opens the transport for communication
|
||||
func (z *TZlibTransport) Open() error {
|
||||
return z.transport.Open()
|
||||
}
|
||||
|
||||
func (z *TZlibTransport) Read(p []byte) (int, error) {
|
||||
if z.reader == nil {
|
||||
r, err := zlib.NewReader(z.transport)
|
||||
if err != nil {
|
||||
return 0, NewTTransportExceptionFromError(err)
|
||||
}
|
||||
z.reader = r
|
||||
}
|
||||
|
||||
return z.reader.Read(p)
|
||||
}
|
||||
|
||||
// RemainingBytes returns the size in bytes of the data that is still to be
|
||||
// read.
|
||||
func (z *TZlibTransport) RemainingBytes() uint64 {
|
||||
return z.transport.RemainingBytes()
|
||||
}
|
||||
|
||||
func (z *TZlibTransport) Write(p []byte) (int, error) {
|
||||
return z.writer.Write(p)
|
||||
}
|
|
@ -1,202 +0,0 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2016 Microsoft Corporation
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
|
@ -1,5 +0,0 @@
|
|||
Microsoft Azure-SDK-for-Go
|
||||
Copyright 2014-2017 Microsoft
|
||||
|
||||
This product includes software developed at
|
||||
the Microsoft Corporation (https://www.microsoft.com).
|
|
@ -1,77 +0,0 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"time"
|
||||
)
|
||||
|
||||
// PutAppendBlob initializes an empty append blob with specified name. An
|
||||
// append blob must be created using this method before appending blocks.
|
||||
//
|
||||
// See CreateBlockBlobFromReader for more info on creating blobs.
|
||||
//
|
||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob
|
||||
func (b *Blob) PutAppendBlob(options *PutBlobOptions) error {
|
||||
params := url.Values{}
|
||||
headers := b.Container.bsc.client.getStandardHeaders()
|
||||
headers["x-ms-blob-type"] = string(BlobTypeAppend)
|
||||
headers = mergeHeaders(headers, headersFromStruct(b.Properties))
|
||||
headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata)
|
||||
|
||||
if options != nil {
|
||||
params = addTimeout(params, options.Timeout)
|
||||
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||
}
|
||||
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
|
||||
|
||||
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return b.respondCreation(resp, BlobTypeAppend)
|
||||
}
|
||||
|
||||
// AppendBlockOptions includes the options for an append block operation
|
||||
type AppendBlockOptions struct {
|
||||
Timeout uint
|
||||
LeaseID string `header:"x-ms-lease-id"`
|
||||
MaxSize *uint `header:"x-ms-blob-condition-maxsize"`
|
||||
AppendPosition *uint `header:"x-ms-blob-condition-appendpos"`
|
||||
IfModifiedSince *time.Time `header:"If-Modified-Since"`
|
||||
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
|
||||
IfMatch string `header:"If-Match"`
|
||||
IfNoneMatch string `header:"If-None-Match"`
|
||||
RequestID string `header:"x-ms-client-request-id"`
|
||||
ContentMD5 bool
|
||||
}
|
||||
|
||||
// AppendBlock appends a block to an append blob.
|
||||
//
|
||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Append-Block
|
||||
func (b *Blob) AppendBlock(chunk []byte, options *AppendBlockOptions) error {
|
||||
params := url.Values{"comp": {"appendblock"}}
|
||||
headers := b.Container.bsc.client.getStandardHeaders()
|
||||
headers["x-ms-blob-type"] = string(BlobTypeAppend)
|
||||
headers["Content-Length"] = fmt.Sprintf("%v", len(chunk))
|
||||
|
||||
if options != nil {
|
||||
params = addTimeout(params, options.Timeout)
|
||||
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||
if options.ContentMD5 {
|
||||
md5sum := md5.Sum(chunk)
|
||||
headers[headerContentMD5] = base64.StdEncoding.EncodeToString(md5sum[:])
|
||||
}
|
||||
}
|
||||
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
|
||||
|
||||
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, bytes.NewReader(chunk), b.Container.bsc.auth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return b.respondCreation(resp, BlobTypeAppend)
|
||||
}
|
|
@ -1,230 +0,0 @@
|
|||
// Package storage provides clients for Microsoft Azure Storage Services.
|
||||
package storage
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// See: https://docs.microsoft.com/rest/api/storageservices/fileservices/authentication-for-the-azure-storage-services
|
||||
|
||||
type authentication string
|
||||
|
||||
const (
|
||||
sharedKey authentication = "sharedKey"
|
||||
sharedKeyForTable authentication = "sharedKeyTable"
|
||||
sharedKeyLite authentication = "sharedKeyLite"
|
||||
sharedKeyLiteForTable authentication = "sharedKeyLiteTable"
|
||||
|
||||
// headers
|
||||
headerAcceptCharset = "Accept-Charset"
|
||||
headerAuthorization = "Authorization"
|
||||
headerContentLength = "Content-Length"
|
||||
headerDate = "Date"
|
||||
headerXmsDate = "x-ms-date"
|
||||
headerXmsVersion = "x-ms-version"
|
||||
headerContentEncoding = "Content-Encoding"
|
||||
headerContentLanguage = "Content-Language"
|
||||
headerContentType = "Content-Type"
|
||||
headerContentMD5 = "Content-MD5"
|
||||
headerIfModifiedSince = "If-Modified-Since"
|
||||
headerIfMatch = "If-Match"
|
||||
headerIfNoneMatch = "If-None-Match"
|
||||
headerIfUnmodifiedSince = "If-Unmodified-Since"
|
||||
headerRange = "Range"
|
||||
headerDataServiceVersion = "DataServiceVersion"
|
||||
headerMaxDataServiceVersion = "MaxDataServiceVersion"
|
||||
headerContentTransferEncoding = "Content-Transfer-Encoding"
|
||||
)
|
||||
|
||||
func (c *Client) addAuthorizationHeader(verb, url string, headers map[string]string, auth authentication) (map[string]string, error) {
|
||||
authHeader, err := c.getSharedKey(verb, url, headers, auth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
headers[headerAuthorization] = authHeader
|
||||
return headers, nil
|
||||
}
|
||||
|
||||
func (c *Client) getSharedKey(verb, url string, headers map[string]string, auth authentication) (string, error) {
|
||||
canRes, err := c.buildCanonicalizedResource(url, auth, false)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
canString, err := buildCanonicalizedString(verb, headers, canRes, auth)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return c.createAuthorizationHeader(canString, auth), nil
|
||||
}
|
||||
|
||||
func (c *Client) buildCanonicalizedResource(uri string, auth authentication, sas bool) (string, error) {
|
||||
errMsg := "buildCanonicalizedResource error: %s"
|
||||
u, err := url.Parse(uri)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf(errMsg, err.Error())
|
||||
}
|
||||
|
||||
cr := bytes.NewBufferString("")
|
||||
if c.accountName != StorageEmulatorAccountName || !sas {
|
||||
cr.WriteString("/")
|
||||
cr.WriteString(c.getCanonicalizedAccountName())
|
||||
}
|
||||
|
||||
if len(u.Path) > 0 {
|
||||
// Any portion of the CanonicalizedResource string that is derived from
|
||||
// the resource's URI should be encoded exactly as it is in the URI.
|
||||
// -- https://msdn.microsoft.com/en-gb/library/azure/dd179428.aspx
|
||||
cr.WriteString(u.EscapedPath())
|
||||
}
|
||||
|
||||
params, err := url.ParseQuery(u.RawQuery)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf(errMsg, err.Error())
|
||||
}
|
||||
|
||||
// See https://github.com/Azure/azure-storage-net/blob/master/Lib/Common/Core/Util/AuthenticationUtility.cs#L277
|
||||
if auth == sharedKey {
|
||||
if len(params) > 0 {
|
||||
cr.WriteString("\n")
|
||||
|
||||
keys := []string{}
|
||||
for key := range params {
|
||||
keys = append(keys, key)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
completeParams := []string{}
|
||||
for _, key := range keys {
|
||||
if len(params[key]) > 1 {
|
||||
sort.Strings(params[key])
|
||||
}
|
||||
|
||||
completeParams = append(completeParams, fmt.Sprintf("%s:%s", key, strings.Join(params[key], ",")))
|
||||
}
|
||||
cr.WriteString(strings.Join(completeParams, "\n"))
|
||||
}
|
||||
} else {
|
||||
// search for "comp" parameter, if exists then add it to canonicalizedresource
|
||||
if v, ok := params["comp"]; ok {
|
||||
cr.WriteString("?comp=" + v[0])
|
||||
}
|
||||
}
|
||||
|
||||
return string(cr.Bytes()), nil
|
||||
}
|
||||
|
||||
func (c *Client) getCanonicalizedAccountName() string {
|
||||
// since we may be trying to access a secondary storage account, we need to
|
||||
// remove the -secondary part of the storage name
|
||||
return strings.TrimSuffix(c.accountName, "-secondary")
|
||||
}
|
||||
|
||||
func buildCanonicalizedString(verb string, headers map[string]string, canonicalizedResource string, auth authentication) (string, error) {
|
||||
contentLength := headers[headerContentLength]
|
||||
if contentLength == "0" {
|
||||
contentLength = ""
|
||||
}
|
||||
date := headers[headerDate]
|
||||
if v, ok := headers[headerXmsDate]; ok {
|
||||
if auth == sharedKey || auth == sharedKeyLite {
|
||||
date = ""
|
||||
} else {
|
||||
date = v
|
||||
}
|
||||
}
|
||||
var canString string
|
||||
switch auth {
|
||||
case sharedKey:
|
||||
canString = strings.Join([]string{
|
||||
verb,
|
||||
headers[headerContentEncoding],
|
||||
headers[headerContentLanguage],
|
||||
contentLength,
|
||||
headers[headerContentMD5],
|
||||
headers[headerContentType],
|
||||
date,
|
||||
headers[headerIfModifiedSince],
|
||||
headers[headerIfMatch],
|
||||
headers[headerIfNoneMatch],
|
||||
headers[headerIfUnmodifiedSince],
|
||||
headers[headerRange],
|
||||
buildCanonicalizedHeader(headers),
|
||||
canonicalizedResource,
|
||||
}, "\n")
|
||||
case sharedKeyForTable:
|
||||
canString = strings.Join([]string{
|
||||
verb,
|
||||
headers[headerContentMD5],
|
||||
headers[headerContentType],
|
||||
date,
|
||||
canonicalizedResource,
|
||||
}, "\n")
|
||||
case sharedKeyLite:
|
||||
canString = strings.Join([]string{
|
||||
verb,
|
||||
headers[headerContentMD5],
|
||||
headers[headerContentType],
|
||||
date,
|
||||
buildCanonicalizedHeader(headers),
|
||||
canonicalizedResource,
|
||||
}, "\n")
|
||||
case sharedKeyLiteForTable:
|
||||
canString = strings.Join([]string{
|
||||
date,
|
||||
canonicalizedResource,
|
||||
}, "\n")
|
||||
default:
|
||||
return "", fmt.Errorf("%s authentication is not supported yet", auth)
|
||||
}
|
||||
return canString, nil
|
||||
}
|
||||
|
||||
func buildCanonicalizedHeader(headers map[string]string) string {
|
||||
cm := make(map[string]string)
|
||||
|
||||
for k, v := range headers {
|
||||
headerName := strings.TrimSpace(strings.ToLower(k))
|
||||
if strings.HasPrefix(headerName, "x-ms-") {
|
||||
cm[headerName] = v
|
||||
}
|
||||
}
|
||||
|
||||
if len(cm) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
keys := []string{}
|
||||
for key := range cm {
|
||||
keys = append(keys, key)
|
||||
}
|
||||
|
||||
sort.Strings(keys)
|
||||
|
||||
ch := bytes.NewBufferString("")
|
||||
|
||||
for _, key := range keys {
|
||||
ch.WriteString(key)
|
||||
ch.WriteRune(':')
|
||||
ch.WriteString(cm[key])
|
||||
ch.WriteRune('\n')
|
||||
}
|
||||
|
||||
return strings.TrimSuffix(string(ch.Bytes()), "\n")
|
||||
}
|
||||
|
||||
func (c *Client) createAuthorizationHeader(canonicalizedString string, auth authentication) string {
|
||||
signature := c.computeHmac256(canonicalizedString)
|
||||
var key string
|
||||
switch auth {
|
||||
case sharedKey, sharedKeyForTable:
|
||||
key = "SharedKey"
|
||||
case sharedKeyLite, sharedKeyLiteForTable:
|
||||
key = "SharedKeyLite"
|
||||
}
|
||||
return fmt.Sprintf("%s %s:%s", key, c.getCanonicalizedAccountName(), signature)
|
||||
}
|
|
@ -1,639 +0,0 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// A Blob is an entry in BlobListResponse.
|
||||
type Blob struct {
|
||||
Container *Container
|
||||
Name string `xml:"Name"`
|
||||
Snapshot time.Time `xml:"Snapshot"`
|
||||
Properties BlobProperties `xml:"Properties"`
|
||||
Metadata BlobMetadata `xml:"Metadata"`
|
||||
}
|
||||
|
||||
// PutBlobOptions includes the options any put blob operation
|
||||
// (page, block, append)
|
||||
type PutBlobOptions struct {
|
||||
Timeout uint
|
||||
LeaseID string `header:"x-ms-lease-id"`
|
||||
Origin string `header:"Origin"`
|
||||
IfModifiedSince *time.Time `header:"If-Modified-Since"`
|
||||
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
|
||||
IfMatch string `header:"If-Match"`
|
||||
IfNoneMatch string `header:"If-None-Match"`
|
||||
RequestID string `header:"x-ms-client-request-id"`
|
||||
}
|
||||
|
||||
// BlobMetadata is a set of custom name/value pairs.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179404.aspx
|
||||
type BlobMetadata map[string]string
|
||||
|
||||
type blobMetadataEntries struct {
|
||||
Entries []blobMetadataEntry `xml:",any"`
|
||||
}
|
||||
type blobMetadataEntry struct {
|
||||
XMLName xml.Name
|
||||
Value string `xml:",chardata"`
|
||||
}
|
||||
|
||||
// UnmarshalXML converts the xml:Metadata into Metadata map
|
||||
func (bm *BlobMetadata) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||
var entries blobMetadataEntries
|
||||
if err := d.DecodeElement(&entries, &start); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, entry := range entries.Entries {
|
||||
if *bm == nil {
|
||||
*bm = make(BlobMetadata)
|
||||
}
|
||||
(*bm)[strings.ToLower(entry.XMLName.Local)] = entry.Value
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalXML implements the xml.Marshaler interface. It encodes
|
||||
// metadata name/value pairs as they would appear in an Azure
|
||||
// ListBlobs response.
|
||||
func (bm BlobMetadata) MarshalXML(enc *xml.Encoder, start xml.StartElement) error {
|
||||
entries := make([]blobMetadataEntry, 0, len(bm))
|
||||
for k, v := range bm {
|
||||
entries = append(entries, blobMetadataEntry{
|
||||
XMLName: xml.Name{Local: http.CanonicalHeaderKey(k)},
|
||||
Value: v,
|
||||
})
|
||||
}
|
||||
return enc.EncodeElement(blobMetadataEntries{
|
||||
Entries: entries,
|
||||
}, start)
|
||||
}
|
||||
|
||||
// BlobProperties contains various properties of a blob
|
||||
// returned in various endpoints like ListBlobs or GetBlobProperties.
|
||||
type BlobProperties struct {
|
||||
LastModified TimeRFC1123 `xml:"Last-Modified"`
|
||||
Etag string `xml:"Etag"`
|
||||
ContentMD5 string `xml:"Content-MD5" header:"x-ms-blob-content-md5"`
|
||||
ContentLength int64 `xml:"Content-Length"`
|
||||
ContentType string `xml:"Content-Type" header:"x-ms-blob-content-type"`
|
||||
ContentEncoding string `xml:"Content-Encoding" header:"x-ms-blob-content-encoding"`
|
||||
CacheControl string `xml:"Cache-Control" header:"x-ms-blob-cache-control"`
|
||||
ContentLanguage string `xml:"Cache-Language" header:"x-ms-blob-content-language"`
|
||||
ContentDisposition string `xml:"Content-Disposition" header:"x-ms-blob-content-disposition"`
|
||||
BlobType BlobType `xml:"x-ms-blob-blob-type"`
|
||||
SequenceNumber int64 `xml:"x-ms-blob-sequence-number"`
|
||||
CopyID string `xml:"CopyId"`
|
||||
CopyStatus string `xml:"CopyStatus"`
|
||||
CopySource string `xml:"CopySource"`
|
||||
CopyProgress string `xml:"CopyProgress"`
|
||||
CopyCompletionTime TimeRFC1123 `xml:"CopyCompletionTime"`
|
||||
CopyStatusDescription string `xml:"CopyStatusDescription"`
|
||||
LeaseStatus string `xml:"LeaseStatus"`
|
||||
LeaseState string `xml:"LeaseState"`
|
||||
LeaseDuration string `xml:"LeaseDuration"`
|
||||
ServerEncrypted bool `xml:"ServerEncrypted"`
|
||||
IncrementalCopy bool `xml:"IncrementalCopy"`
|
||||
}
|
||||
|
||||
// BlobType defines the type of the Azure Blob.
|
||||
type BlobType string
|
||||
|
||||
// Types of page blobs
|
||||
const (
|
||||
BlobTypeBlock BlobType = "BlockBlob"
|
||||
BlobTypePage BlobType = "PageBlob"
|
||||
BlobTypeAppend BlobType = "AppendBlob"
|
||||
)
|
||||
|
||||
func (b *Blob) buildPath() string {
|
||||
return b.Container.buildPath() + "/" + b.Name
|
||||
}
|
||||
|
||||
// Exists returns true if a blob with given name exists on the specified
|
||||
// container of the storage account.
|
||||
func (b *Blob) Exists() (bool, error) {
|
||||
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), nil)
|
||||
headers := b.Container.bsc.client.getStandardHeaders()
|
||||
resp, err := b.Container.bsc.client.exec(http.MethodHead, uri, headers, nil, b.Container.bsc.auth)
|
||||
if resp != nil {
|
||||
defer readAndCloseBody(resp.body)
|
||||
if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound {
|
||||
return resp.statusCode == http.StatusOK, nil
|
||||
}
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
// GetURL gets the canonical URL to the blob with the specified name in the
|
||||
// specified container. If name is not specified, the canonical URL for the entire
|
||||
// container is obtained.
|
||||
// This method does not create a publicly accessible URL if the blob or container
|
||||
// is private and this method does not check if the blob exists.
|
||||
func (b *Blob) GetURL() string {
|
||||
container := b.Container.Name
|
||||
if container == "" {
|
||||
container = "$root"
|
||||
}
|
||||
return b.Container.bsc.client.getEndpoint(blobServiceName, pathForResource(container, b.Name), nil)
|
||||
}
|
||||
|
||||
// GetBlobRangeOptions includes the options for a get blob range operation
|
||||
type GetBlobRangeOptions struct {
|
||||
Range *BlobRange
|
||||
GetRangeContentMD5 bool
|
||||
*GetBlobOptions
|
||||
}
|
||||
|
||||
// GetBlobOptions includes the options for a get blob operation
|
||||
type GetBlobOptions struct {
|
||||
Timeout uint
|
||||
Snapshot *time.Time
|
||||
LeaseID string `header:"x-ms-lease-id"`
|
||||
Origin string `header:"Origin"`
|
||||
IfModifiedSince *time.Time `header:"If-Modified-Since"`
|
||||
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
|
||||
IfMatch string `header:"If-Match"`
|
||||
IfNoneMatch string `header:"If-None-Match"`
|
||||
RequestID string `header:"x-ms-client-request-id"`
|
||||
}
|
||||
|
||||
// BlobRange represents the bytes range to be get
|
||||
type BlobRange struct {
|
||||
Start uint64
|
||||
End uint64
|
||||
}
|
||||
|
||||
func (br BlobRange) String() string {
|
||||
if br.End == 0 {
|
||||
return fmt.Sprintf("bytes=%d-", br.Start)
|
||||
}
|
||||
return fmt.Sprintf("bytes=%d-%d", br.Start, br.End)
|
||||
}
|
||||
|
||||
// Get returns a stream to read the blob. Caller must call both Read and Close()
|
||||
// to correctly close the underlying connection.
|
||||
//
|
||||
// See the GetRange method for use with a Range header.
|
||||
//
|
||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Blob
|
||||
func (b *Blob) Get(options *GetBlobOptions) (io.ReadCloser, error) {
|
||||
rangeOptions := GetBlobRangeOptions{
|
||||
GetBlobOptions: options,
|
||||
}
|
||||
resp, err := b.getRange(&rangeOptions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := b.writeProperties(resp.headers, true); err != nil {
|
||||
return resp.body, err
|
||||
}
|
||||
return resp.body, nil
|
||||
}
|
||||
|
||||
// GetRange reads the specified range of a blob to a stream. The bytesRange
|
||||
// string must be in a format like "0-", "10-100" as defined in HTTP 1.1 spec.
|
||||
// Caller must call both Read and Close()// to correctly close the underlying
|
||||
// connection.
|
||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Blob
|
||||
func (b *Blob) GetRange(options *GetBlobRangeOptions) (io.ReadCloser, error) {
|
||||
resp, err := b.getRange(options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := checkRespCode(resp.statusCode, []int{http.StatusPartialContent}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Content-Length header should not be updated, as the service returns the range length
|
||||
// (which is not alwys the full blob length)
|
||||
if err := b.writeProperties(resp.headers, false); err != nil {
|
||||
return resp.body, err
|
||||
}
|
||||
return resp.body, nil
|
||||
}
|
||||
|
||||
func (b *Blob) getRange(options *GetBlobRangeOptions) (*storageResponse, error) {
|
||||
params := url.Values{}
|
||||
headers := b.Container.bsc.client.getStandardHeaders()
|
||||
|
||||
if options != nil {
|
||||
if options.Range != nil {
|
||||
headers["Range"] = options.Range.String()
|
||||
if options.GetRangeContentMD5 {
|
||||
headers["x-ms-range-get-content-md5"] = "true"
|
||||
}
|
||||
}
|
||||
if options.GetBlobOptions != nil {
|
||||
headers = mergeHeaders(headers, headersFromStruct(*options.GetBlobOptions))
|
||||
params = addTimeout(params, options.Timeout)
|
||||
params = addSnapshot(params, options.Snapshot)
|
||||
}
|
||||
}
|
||||
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
|
||||
|
||||
resp, err := b.Container.bsc.client.exec(http.MethodGet, uri, headers, nil, b.Container.bsc.auth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// SnapshotOptions includes the options for a snapshot blob operation
|
||||
type SnapshotOptions struct {
|
||||
Timeout uint
|
||||
LeaseID string `header:"x-ms-lease-id"`
|
||||
IfModifiedSince *time.Time `header:"If-Modified-Since"`
|
||||
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
|
||||
IfMatch string `header:"If-Match"`
|
||||
IfNoneMatch string `header:"If-None-Match"`
|
||||
RequestID string `header:"x-ms-client-request-id"`
|
||||
}
|
||||
|
||||
// CreateSnapshot creates a snapshot for a blob
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/ee691971.aspx
|
||||
func (b *Blob) CreateSnapshot(options *SnapshotOptions) (snapshotTimestamp *time.Time, err error) {
|
||||
params := url.Values{"comp": {"snapshot"}}
|
||||
headers := b.Container.bsc.client.getStandardHeaders()
|
||||
headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata)
|
||||
|
||||
if options != nil {
|
||||
params = addTimeout(params, options.Timeout)
|
||||
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||
}
|
||||
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
|
||||
|
||||
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth)
|
||||
if err != nil || resp == nil {
|
||||
return nil, err
|
||||
}
|
||||
defer readAndCloseBody(resp.body)
|
||||
|
||||
if err := checkRespCode(resp.statusCode, []int{http.StatusCreated}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
snapshotResponse := resp.headers.Get(http.CanonicalHeaderKey("x-ms-snapshot"))
|
||||
if snapshotResponse != "" {
|
||||
snapshotTimestamp, err := time.Parse(time.RFC3339, snapshotResponse)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &snapshotTimestamp, nil
|
||||
}
|
||||
|
||||
return nil, errors.New("Snapshot not created")
|
||||
}
|
||||
|
||||
// GetBlobPropertiesOptions includes the options for a get blob properties operation
|
||||
type GetBlobPropertiesOptions struct {
|
||||
Timeout uint
|
||||
Snapshot *time.Time
|
||||
LeaseID string `header:"x-ms-lease-id"`
|
||||
IfModifiedSince *time.Time `header:"If-Modified-Since"`
|
||||
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
|
||||
IfMatch string `header:"If-Match"`
|
||||
IfNoneMatch string `header:"If-None-Match"`
|
||||
RequestID string `header:"x-ms-client-request-id"`
|
||||
}
|
||||
|
||||
// GetProperties provides various information about the specified blob.
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179394.aspx
|
||||
func (b *Blob) GetProperties(options *GetBlobPropertiesOptions) error {
|
||||
params := url.Values{}
|
||||
headers := b.Container.bsc.client.getStandardHeaders()
|
||||
|
||||
if options != nil {
|
||||
params = addTimeout(params, options.Timeout)
|
||||
params = addSnapshot(params, options.Snapshot)
|
||||
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||
}
|
||||
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
|
||||
|
||||
resp, err := b.Container.bsc.client.exec(http.MethodHead, uri, headers, nil, b.Container.bsc.auth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer readAndCloseBody(resp.body)
|
||||
|
||||
if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
|
||||
return err
|
||||
}
|
||||
return b.writeProperties(resp.headers, true)
|
||||
}
|
||||
|
||||
func (b *Blob) writeProperties(h http.Header, includeContentLen bool) error {
|
||||
var err error
|
||||
|
||||
contentLength := b.Properties.ContentLength
|
||||
if includeContentLen {
|
||||
contentLengthStr := h.Get("Content-Length")
|
||||
if contentLengthStr != "" {
|
||||
contentLength, err = strconv.ParseInt(contentLengthStr, 0, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var sequenceNum int64
|
||||
sequenceNumStr := h.Get("x-ms-blob-sequence-number")
|
||||
if sequenceNumStr != "" {
|
||||
sequenceNum, err = strconv.ParseInt(sequenceNumStr, 0, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
lastModified, err := getTimeFromHeaders(h, "Last-Modified")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
copyCompletionTime, err := getTimeFromHeaders(h, "x-ms-copy-completion-time")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b.Properties = BlobProperties{
|
||||
LastModified: TimeRFC1123(*lastModified),
|
||||
Etag: h.Get("Etag"),
|
||||
ContentMD5: h.Get("Content-MD5"),
|
||||
ContentLength: contentLength,
|
||||
ContentEncoding: h.Get("Content-Encoding"),
|
||||
ContentType: h.Get("Content-Type"),
|
||||
ContentDisposition: h.Get("Content-Disposition"),
|
||||
CacheControl: h.Get("Cache-Control"),
|
||||
ContentLanguage: h.Get("Content-Language"),
|
||||
SequenceNumber: sequenceNum,
|
||||
CopyCompletionTime: TimeRFC1123(*copyCompletionTime),
|
||||
CopyStatusDescription: h.Get("x-ms-copy-status-description"),
|
||||
CopyID: h.Get("x-ms-copy-id"),
|
||||
CopyProgress: h.Get("x-ms-copy-progress"),
|
||||
CopySource: h.Get("x-ms-copy-source"),
|
||||
CopyStatus: h.Get("x-ms-copy-status"),
|
||||
BlobType: BlobType(h.Get("x-ms-blob-type")),
|
||||
LeaseStatus: h.Get("x-ms-lease-status"),
|
||||
LeaseState: h.Get("x-ms-lease-state"),
|
||||
}
|
||||
b.writeMetadata(h)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetBlobPropertiesOptions contains various properties of a blob and is an entry
|
||||
// in SetProperties
|
||||
type SetBlobPropertiesOptions struct {
|
||||
Timeout uint
|
||||
LeaseID string `header:"x-ms-lease-id"`
|
||||
Origin string `header:"Origin"`
|
||||
IfModifiedSince *time.Time `header:"If-Modified-Since"`
|
||||
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
|
||||
IfMatch string `header:"If-Match"`
|
||||
IfNoneMatch string `header:"If-None-Match"`
|
||||
SequenceNumberAction *SequenceNumberAction
|
||||
RequestID string `header:"x-ms-client-request-id"`
|
||||
}
|
||||
|
||||
// SequenceNumberAction defines how the blob's sequence number should be modified
|
||||
type SequenceNumberAction string
|
||||
|
||||
// Options for sequence number action
|
||||
const (
|
||||
SequenceNumberActionMax SequenceNumberAction = "max"
|
||||
SequenceNumberActionUpdate SequenceNumberAction = "update"
|
||||
SequenceNumberActionIncrement SequenceNumberAction = "increment"
|
||||
)
|
||||
|
||||
// SetProperties replaces the BlobHeaders for the specified blob.
|
||||
//
|
||||
// Some keys may be converted to Camel-Case before sending. All keys
|
||||
// are returned in lower case by GetBlobProperties. HTTP header names
|
||||
// are case-insensitive so case munging should not matter to other
|
||||
// applications either.
|
||||
//
|
||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Blob-Properties
|
||||
func (b *Blob) SetProperties(options *SetBlobPropertiesOptions) error {
|
||||
params := url.Values{"comp": {"properties"}}
|
||||
headers := b.Container.bsc.client.getStandardHeaders()
|
||||
headers = mergeHeaders(headers, headersFromStruct(b.Properties))
|
||||
|
||||
if options != nil {
|
||||
params = addTimeout(params, options.Timeout)
|
||||
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||
}
|
||||
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
|
||||
|
||||
if b.Properties.BlobType == BlobTypePage {
|
||||
headers = addToHeaders(headers, "x-ms-blob-content-length", fmt.Sprintf("%v", b.Properties.ContentLength))
|
||||
if options != nil && options.SequenceNumberAction != nil {
|
||||
headers = addToHeaders(headers, "x-ms-sequence-number-action", string(*options.SequenceNumberAction))
|
||||
if *options.SequenceNumberAction != SequenceNumberActionIncrement {
|
||||
headers = addToHeaders(headers, "x-ms-blob-sequence-number", fmt.Sprintf("%v", b.Properties.SequenceNumber))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
readAndCloseBody(resp.body)
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusOK})
|
||||
}
|
||||
|
||||
// SetBlobMetadataOptions includes the options for a set blob metadata operation
|
||||
type SetBlobMetadataOptions struct {
|
||||
Timeout uint
|
||||
LeaseID string `header:"x-ms-lease-id"`
|
||||
IfModifiedSince *time.Time `header:"If-Modified-Since"`
|
||||
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
|
||||
IfMatch string `header:"If-Match"`
|
||||
IfNoneMatch string `header:"If-None-Match"`
|
||||
RequestID string `header:"x-ms-client-request-id"`
|
||||
}
|
||||
|
||||
// SetMetadata replaces the metadata for the specified blob.
|
||||
//
|
||||
// Some keys may be converted to Camel-Case before sending. All keys
|
||||
// are returned in lower case by GetBlobMetadata. HTTP header names
|
||||
// are case-insensitive so case munging should not matter to other
|
||||
// applications either.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx
|
||||
func (b *Blob) SetMetadata(options *SetBlobMetadataOptions) error {
|
||||
params := url.Values{"comp": {"metadata"}}
|
||||
headers := b.Container.bsc.client.getStandardHeaders()
|
||||
headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata)
|
||||
|
||||
if options != nil {
|
||||
params = addTimeout(params, options.Timeout)
|
||||
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||
}
|
||||
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
|
||||
|
||||
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
readAndCloseBody(resp.body)
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusOK})
|
||||
}
|
||||
|
||||
// GetBlobMetadataOptions includes the options for a get blob metadata operation
|
||||
type GetBlobMetadataOptions struct {
|
||||
Timeout uint
|
||||
Snapshot *time.Time
|
||||
LeaseID string `header:"x-ms-lease-id"`
|
||||
IfModifiedSince *time.Time `header:"If-Modified-Since"`
|
||||
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
|
||||
IfMatch string `header:"If-Match"`
|
||||
IfNoneMatch string `header:"If-None-Match"`
|
||||
RequestID string `header:"x-ms-client-request-id"`
|
||||
}
|
||||
|
||||
// GetMetadata returns all user-defined metadata for the specified blob.
|
||||
//
|
||||
// All metadata keys will be returned in lower case. (HTTP header
|
||||
// names are case-insensitive.)
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx
|
||||
func (b *Blob) GetMetadata(options *GetBlobMetadataOptions) error {
|
||||
params := url.Values{"comp": {"metadata"}}
|
||||
headers := b.Container.bsc.client.getStandardHeaders()
|
||||
|
||||
if options != nil {
|
||||
params = addTimeout(params, options.Timeout)
|
||||
params = addSnapshot(params, options.Snapshot)
|
||||
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||
}
|
||||
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
|
||||
|
||||
resp, err := b.Container.bsc.client.exec(http.MethodGet, uri, headers, nil, b.Container.bsc.auth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer readAndCloseBody(resp.body)
|
||||
|
||||
if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b.writeMetadata(resp.headers)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *Blob) writeMetadata(h http.Header) {
|
||||
metadata := make(map[string]string)
|
||||
for k, v := range h {
|
||||
// Can't trust CanonicalHeaderKey() to munge case
|
||||
// reliably. "_" is allowed in identifiers:
|
||||
// https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx
|
||||
// https://msdn.microsoft.com/library/aa664670(VS.71).aspx
|
||||
// http://tools.ietf.org/html/rfc7230#section-3.2
|
||||
// ...but "_" is considered invalid by
|
||||
// CanonicalMIMEHeaderKey in
|
||||
// https://golang.org/src/net/textproto/reader.go?s=14615:14659#L542
|
||||
// so k can be "X-Ms-Meta-Lol" or "x-ms-meta-lol_rofl".
|
||||
k = strings.ToLower(k)
|
||||
if len(v) == 0 || !strings.HasPrefix(k, strings.ToLower(userDefinedMetadataHeaderPrefix)) {
|
||||
continue
|
||||
}
|
||||
// metadata["lol"] = content of the last X-Ms-Meta-Lol header
|
||||
k = k[len(userDefinedMetadataHeaderPrefix):]
|
||||
metadata[k] = v[len(v)-1]
|
||||
}
|
||||
|
||||
b.Metadata = BlobMetadata(metadata)
|
||||
}
|
||||
|
||||
// DeleteBlobOptions includes the options for a delete blob operation
|
||||
type DeleteBlobOptions struct {
|
||||
Timeout uint
|
||||
Snapshot *time.Time
|
||||
LeaseID string `header:"x-ms-lease-id"`
|
||||
DeleteSnapshots *bool
|
||||
IfModifiedSince *time.Time `header:"If-Modified-Since"`
|
||||
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
|
||||
IfMatch string `header:"If-Match"`
|
||||
IfNoneMatch string `header:"If-None-Match"`
|
||||
RequestID string `header:"x-ms-client-request-id"`
|
||||
}
|
||||
|
||||
// Delete deletes the given blob from the specified container.
|
||||
// If the blob does not exists at the time of the Delete Blob operation, it
|
||||
// returns error.
|
||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Blob
|
||||
func (b *Blob) Delete(options *DeleteBlobOptions) error {
|
||||
resp, err := b.delete(options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
readAndCloseBody(resp.body)
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusAccepted})
|
||||
}
|
||||
|
||||
// DeleteIfExists deletes the given blob from the specified container If the
|
||||
// blob is deleted with this call, returns true. Otherwise returns false.
|
||||
//
|
||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Blob
|
||||
func (b *Blob) DeleteIfExists(options *DeleteBlobOptions) (bool, error) {
|
||||
resp, err := b.delete(options)
|
||||
if resp != nil {
|
||||
defer readAndCloseBody(resp.body)
|
||||
if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound {
|
||||
return resp.statusCode == http.StatusAccepted, nil
|
||||
}
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
func (b *Blob) delete(options *DeleteBlobOptions) (*storageResponse, error) {
|
||||
params := url.Values{}
|
||||
headers := b.Container.bsc.client.getStandardHeaders()
|
||||
|
||||
if options != nil {
|
||||
params = addTimeout(params, options.Timeout)
|
||||
params = addSnapshot(params, options.Snapshot)
|
||||
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||
if options.DeleteSnapshots != nil {
|
||||
if *options.DeleteSnapshots {
|
||||
headers["x-ms-delete-snapshots"] = "include"
|
||||
} else {
|
||||
headers["x-ms-delete-snapshots"] = "only"
|
||||
}
|
||||
}
|
||||
}
|
||||
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
|
||||
return b.Container.bsc.client.exec(http.MethodDelete, uri, headers, nil, b.Container.bsc.auth)
|
||||
}
|
||||
|
||||
// helper method to construct the path to either a blob or container
|
||||
func pathForResource(container, name string) string {
|
||||
if name != "" {
|
||||
return fmt.Sprintf("/%s/%s", container, name)
|
||||
}
|
||||
return fmt.Sprintf("/%s", container)
|
||||
}
|
||||
|
||||
func (b *Blob) respondCreation(resp *storageResponse, bt BlobType) error {
|
||||
readAndCloseBody(resp.body)
|
||||
err := checkRespCode(resp.statusCode, []int{http.StatusCreated})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b.Properties.BlobType = bt
|
||||
return nil
|
||||
}
|
|
@ -1,108 +0,0 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// GetSASURIWithSignedIPAndProtocol creates an URL to the specified blob which contains the Shared
|
||||
// Access Signature with specified permissions and expiration time. Also includes signedIPRange and allowed protocols.
|
||||
// If old API version is used but no signedIP is passed (ie empty string) then this should still work.
|
||||
// We only populate the signedIP when it non-empty.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/ee395415.aspx
|
||||
func (b *Blob) GetSASURIWithSignedIPAndProtocol(expiry time.Time, permissions string, signedIPRange string, HTTPSOnly bool) (string, error) {
|
||||
var (
|
||||
signedPermissions = permissions
|
||||
blobURL = b.GetURL()
|
||||
)
|
||||
canonicalizedResource, err := b.Container.bsc.client.buildCanonicalizedResource(blobURL, b.Container.bsc.auth, true)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// "The canonicalizedresouce portion of the string is a canonical path to the signed resource.
|
||||
// It must include the service name (blob, table, queue or file) for version 2015-02-21 or
|
||||
// later, the storage account name, and the resource name, and must be URL-decoded.
|
||||
// -- https://msdn.microsoft.com/en-us/library/azure/dn140255.aspx
|
||||
|
||||
// We need to replace + with %2b first to avoid being treated as a space (which is correct for query strings, but not the path component).
|
||||
canonicalizedResource = strings.Replace(canonicalizedResource, "+", "%2b", -1)
|
||||
canonicalizedResource, err = url.QueryUnescape(canonicalizedResource)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
signedExpiry := expiry.UTC().Format(time.RFC3339)
|
||||
|
||||
//If blob name is missing, resource is a container
|
||||
signedResource := "c"
|
||||
if len(b.Name) > 0 {
|
||||
signedResource = "b"
|
||||
}
|
||||
|
||||
protocols := ""
|
||||
if HTTPSOnly {
|
||||
protocols = "https"
|
||||
}
|
||||
stringToSign, err := blobSASStringToSign(b.Container.bsc.client.apiVersion, canonicalizedResource, signedExpiry, signedPermissions, signedIPRange, protocols)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
sig := b.Container.bsc.client.computeHmac256(stringToSign)
|
||||
sasParams := url.Values{
|
||||
"sv": {b.Container.bsc.client.apiVersion},
|
||||
"se": {signedExpiry},
|
||||
"sr": {signedResource},
|
||||
"sp": {signedPermissions},
|
||||
"sig": {sig},
|
||||
}
|
||||
|
||||
if b.Container.bsc.client.apiVersion >= "2015-04-05" {
|
||||
if protocols != "" {
|
||||
sasParams.Add("spr", protocols)
|
||||
}
|
||||
if signedIPRange != "" {
|
||||
sasParams.Add("sip", signedIPRange)
|
||||
}
|
||||
}
|
||||
|
||||
sasURL, err := url.Parse(blobURL)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
sasURL.RawQuery = sasParams.Encode()
|
||||
return sasURL.String(), nil
|
||||
}
|
||||
|
||||
// GetSASURI creates an URL to the specified blob which contains the Shared
|
||||
// Access Signature with specified permissions and expiration time.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/ee395415.aspx
|
||||
func (b *Blob) GetSASURI(expiry time.Time, permissions string) (string, error) {
|
||||
return b.GetSASURIWithSignedIPAndProtocol(expiry, permissions, "", false)
|
||||
}
|
||||
|
||||
func blobSASStringToSign(signedVersion, canonicalizedResource, signedExpiry, signedPermissions string, signedIP string, protocols string) (string, error) {
|
||||
var signedStart, signedIdentifier, rscc, rscd, rsce, rscl, rsct string
|
||||
|
||||
if signedVersion >= "2015-02-21" {
|
||||
canonicalizedResource = "/blob" + canonicalizedResource
|
||||
}
|
||||
|
||||
// https://msdn.microsoft.com/en-us/library/azure/dn140255.aspx#Anchor_12
|
||||
if signedVersion >= "2015-04-05" {
|
||||
return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedIP, protocols, signedVersion, rscc, rscd, rsce, rscl, rsct), nil
|
||||
}
|
||||
|
||||
// reference: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx
|
||||
if signedVersion >= "2013-08-15" {
|
||||
return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedVersion, rscc, rscd, rsce, rscl, rsct), nil
|
||||
}
|
||||
|
||||
return "", errors.New("storage: not implemented SAS for versions earlier than 2013-08-15")
|
||||
}
|
|
@ -1,95 +0,0 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// BlobStorageClient contains operations for Microsoft Azure Blob Storage
|
||||
// Service.
|
||||
type BlobStorageClient struct {
|
||||
client Client
|
||||
auth authentication
|
||||
}
|
||||
|
||||
// GetServiceProperties gets the properties of your storage account's blob service.
|
||||
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-blob-service-properties
|
||||
func (b *BlobStorageClient) GetServiceProperties() (*ServiceProperties, error) {
|
||||
return b.client.getServiceProperties(blobServiceName, b.auth)
|
||||
}
|
||||
|
||||
// SetServiceProperties sets the properties of your storage account's blob service.
|
||||
// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-blob-service-properties
|
||||
func (b *BlobStorageClient) SetServiceProperties(props ServiceProperties) error {
|
||||
return b.client.setServiceProperties(props, blobServiceName, b.auth)
|
||||
}
|
||||
|
||||
// ListContainersParameters defines the set of customizable parameters to make a
|
||||
// List Containers call.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx
|
||||
type ListContainersParameters struct {
|
||||
Prefix string
|
||||
Marker string
|
||||
Include string
|
||||
MaxResults uint
|
||||
Timeout uint
|
||||
}
|
||||
|
||||
// GetContainerReference returns a Container object for the specified container name.
|
||||
func (b *BlobStorageClient) GetContainerReference(name string) *Container {
|
||||
return &Container{
|
||||
bsc: b,
|
||||
Name: name,
|
||||
}
|
||||
}
|
||||
|
||||
// ListContainers returns the list of containers in a storage account along with
|
||||
// pagination token and other response details.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx
|
||||
func (b BlobStorageClient) ListContainers(params ListContainersParameters) (*ContainerListResponse, error) {
|
||||
q := mergeParams(params.getParameters(), url.Values{"comp": {"list"}})
|
||||
uri := b.client.getEndpoint(blobServiceName, "", q)
|
||||
headers := b.client.getStandardHeaders()
|
||||
|
||||
var out ContainerListResponse
|
||||
resp, err := b.client.exec(http.MethodGet, uri, headers, nil, b.auth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
err = xmlUnmarshal(resp.body, &out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// assign our client to the newly created Container objects
|
||||
for i := range out.Containers {
|
||||
out.Containers[i].bsc = &b
|
||||
}
|
||||
return &out, err
|
||||
}
|
||||
|
||||
func (p ListContainersParameters) getParameters() url.Values {
|
||||
out := url.Values{}
|
||||
|
||||
if p.Prefix != "" {
|
||||
out.Set("prefix", p.Prefix)
|
||||
}
|
||||
if p.Marker != "" {
|
||||
out.Set("marker", p.Marker)
|
||||
}
|
||||
if p.Include != "" {
|
||||
out.Set("include", p.Include)
|
||||
}
|
||||
if p.MaxResults != 0 {
|
||||
out.Set("maxresults", strconv.FormatUint(uint64(p.MaxResults), 10))
|
||||
}
|
||||
if p.Timeout != 0 {
|
||||
out.Set("timeout", strconv.FormatUint(uint64(p.Timeout), 10))
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
|
@ -1,256 +0,0 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// BlockListType is used to filter out types of blocks in a Get Blocks List call
|
||||
// for a block blob.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx for all
|
||||
// block types.
|
||||
type BlockListType string
|
||||
|
||||
// Filters for listing blocks in block blobs
|
||||
const (
|
||||
BlockListTypeAll BlockListType = "all"
|
||||
BlockListTypeCommitted BlockListType = "committed"
|
||||
BlockListTypeUncommitted BlockListType = "uncommitted"
|
||||
)
|
||||
|
||||
// Maximum sizes (per REST API) for various concepts
|
||||
const (
|
||||
MaxBlobBlockSize = 100 * 1024 * 1024
|
||||
MaxBlobPageSize = 4 * 1024 * 1024
|
||||
)
|
||||
|
||||
// BlockStatus defines states a block for a block blob can
|
||||
// be in.
|
||||
type BlockStatus string
|
||||
|
||||
// List of statuses that can be used to refer to a block in a block list
|
||||
const (
|
||||
BlockStatusUncommitted BlockStatus = "Uncommitted"
|
||||
BlockStatusCommitted BlockStatus = "Committed"
|
||||
BlockStatusLatest BlockStatus = "Latest"
|
||||
)
|
||||
|
||||
// Block is used to create Block entities for Put Block List
|
||||
// call.
|
||||
type Block struct {
|
||||
ID string
|
||||
Status BlockStatus
|
||||
}
|
||||
|
||||
// BlockListResponse contains the response fields from Get Block List call.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx
|
||||
type BlockListResponse struct {
|
||||
XMLName xml.Name `xml:"BlockList"`
|
||||
CommittedBlocks []BlockResponse `xml:"CommittedBlocks>Block"`
|
||||
UncommittedBlocks []BlockResponse `xml:"UncommittedBlocks>Block"`
|
||||
}
|
||||
|
||||
// BlockResponse contains the block information returned
|
||||
// in the GetBlockListCall.
|
||||
type BlockResponse struct {
|
||||
Name string `xml:"Name"`
|
||||
Size int64 `xml:"Size"`
|
||||
}
|
||||
|
||||
// CreateBlockBlob initializes an empty block blob with no blocks.
|
||||
//
|
||||
// See CreateBlockBlobFromReader for more info on creating blobs.
|
||||
//
|
||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob
|
||||
func (b *Blob) CreateBlockBlob(options *PutBlobOptions) error {
|
||||
return b.CreateBlockBlobFromReader(nil, options)
|
||||
}
|
||||
|
||||
// CreateBlockBlobFromReader initializes a block blob using data from
|
||||
// reader. Size must be the number of bytes read from reader. To
|
||||
// create an empty blob, use size==0 and reader==nil.
|
||||
//
|
||||
// Any headers set in blob.Properties or metadata in blob.Metadata
|
||||
// will be set on the blob.
|
||||
//
|
||||
// The API rejects requests with size > 256 MiB (but this limit is not
|
||||
// checked by the SDK). To write a larger blob, use CreateBlockBlob,
|
||||
// PutBlock, and PutBlockList.
|
||||
//
|
||||
// To create a blob from scratch, call container.GetBlobReference() to
|
||||
// get an empty blob, fill in blob.Properties and blob.Metadata as
|
||||
// appropriate then call this method.
|
||||
//
|
||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob
|
||||
func (b *Blob) CreateBlockBlobFromReader(blob io.Reader, options *PutBlobOptions) error {
|
||||
params := url.Values{}
|
||||
headers := b.Container.bsc.client.getStandardHeaders()
|
||||
headers["x-ms-blob-type"] = string(BlobTypeBlock)
|
||||
|
||||
headers["Content-Length"] = "0"
|
||||
var n int64
|
||||
var err error
|
||||
if blob != nil {
|
||||
type lener interface {
|
||||
Len() int
|
||||
}
|
||||
// TODO(rjeczalik): handle io.ReadSeeker, in case blob is *os.File etc.
|
||||
if l, ok := blob.(lener); ok {
|
||||
n = int64(l.Len())
|
||||
} else {
|
||||
var buf bytes.Buffer
|
||||
n, err = io.Copy(&buf, blob)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
blob = &buf
|
||||
}
|
||||
|
||||
headers["Content-Length"] = strconv.FormatInt(n, 10)
|
||||
}
|
||||
b.Properties.ContentLength = n
|
||||
|
||||
headers = mergeHeaders(headers, headersFromStruct(b.Properties))
|
||||
headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata)
|
||||
|
||||
if options != nil {
|
||||
params = addTimeout(params, options.Timeout)
|
||||
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||
}
|
||||
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
|
||||
|
||||
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, blob, b.Container.bsc.auth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return b.respondCreation(resp, BlobTypeBlock)
|
||||
}
|
||||
|
||||
// PutBlockOptions includes the options for a put block operation
|
||||
type PutBlockOptions struct {
|
||||
Timeout uint
|
||||
LeaseID string `header:"x-ms-lease-id"`
|
||||
ContentMD5 string `header:"Content-MD5"`
|
||||
RequestID string `header:"x-ms-client-request-id"`
|
||||
}
|
||||
|
||||
// PutBlock saves the given data chunk to the specified block blob with
|
||||
// given ID.
|
||||
//
|
||||
// The API rejects chunks larger than 100 MiB (but this limit is not
|
||||
// checked by the SDK).
|
||||
//
|
||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Block
|
||||
func (b *Blob) PutBlock(blockID string, chunk []byte, options *PutBlockOptions) error {
|
||||
return b.PutBlockWithLength(blockID, uint64(len(chunk)), bytes.NewReader(chunk), options)
|
||||
}
|
||||
|
||||
// PutBlockWithLength saves the given data stream of exactly specified size to
|
||||
// the block blob with given ID. It is an alternative to PutBlocks where data
|
||||
// comes as stream but the length is known in advance.
|
||||
//
|
||||
// The API rejects requests with size > 100 MiB (but this limit is not
|
||||
// checked by the SDK).
|
||||
//
|
||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Block
|
||||
func (b *Blob) PutBlockWithLength(blockID string, size uint64, blob io.Reader, options *PutBlockOptions) error {
|
||||
query := url.Values{
|
||||
"comp": {"block"},
|
||||
"blockid": {blockID},
|
||||
}
|
||||
headers := b.Container.bsc.client.getStandardHeaders()
|
||||
headers["Content-Length"] = fmt.Sprintf("%v", size)
|
||||
|
||||
if options != nil {
|
||||
query = addTimeout(query, options.Timeout)
|
||||
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||
}
|
||||
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), query)
|
||||
|
||||
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, blob, b.Container.bsc.auth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return b.respondCreation(resp, BlobTypeBlock)
|
||||
}
|
||||
|
||||
// PutBlockListOptions includes the options for a put block list operation
|
||||
type PutBlockListOptions struct {
|
||||
Timeout uint
|
||||
LeaseID string `header:"x-ms-lease-id"`
|
||||
IfModifiedSince *time.Time `header:"If-Modified-Since"`
|
||||
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
|
||||
IfMatch string `header:"If-Match"`
|
||||
IfNoneMatch string `header:"If-None-Match"`
|
||||
RequestID string `header:"x-ms-client-request-id"`
|
||||
}
|
||||
|
||||
// PutBlockList saves list of blocks to the specified block blob.
|
||||
//
|
||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Block-List
|
||||
func (b *Blob) PutBlockList(blocks []Block, options *PutBlockListOptions) error {
|
||||
params := url.Values{"comp": {"blocklist"}}
|
||||
blockListXML := prepareBlockListRequest(blocks)
|
||||
headers := b.Container.bsc.client.getStandardHeaders()
|
||||
headers["Content-Length"] = fmt.Sprintf("%v", len(blockListXML))
|
||||
headers = mergeHeaders(headers, headersFromStruct(b.Properties))
|
||||
headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata)
|
||||
|
||||
if options != nil {
|
||||
params = addTimeout(params, options.Timeout)
|
||||
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||
}
|
||||
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
|
||||
|
||||
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, strings.NewReader(blockListXML), b.Container.bsc.auth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
readAndCloseBody(resp.body)
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusCreated})
|
||||
}
|
||||
|
||||
// GetBlockListOptions includes the options for a get block list operation
|
||||
type GetBlockListOptions struct {
|
||||
Timeout uint
|
||||
Snapshot *time.Time
|
||||
LeaseID string `header:"x-ms-lease-id"`
|
||||
RequestID string `header:"x-ms-client-request-id"`
|
||||
}
|
||||
|
||||
// GetBlockList retrieves list of blocks in the specified block blob.
|
||||
//
|
||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Block-List
|
||||
func (b *Blob) GetBlockList(blockType BlockListType, options *GetBlockListOptions) (BlockListResponse, error) {
|
||||
params := url.Values{
|
||||
"comp": {"blocklist"},
|
||||
"blocklisttype": {string(blockType)},
|
||||
}
|
||||
headers := b.Container.bsc.client.getStandardHeaders()
|
||||
|
||||
if options != nil {
|
||||
params = addTimeout(params, options.Timeout)
|
||||
params = addSnapshot(params, options.Snapshot)
|
||||
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||
}
|
||||
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
|
||||
|
||||
var out BlockListResponse
|
||||
resp, err := b.Container.bsc.client.exec(http.MethodGet, uri, headers, nil, b.Container.bsc.auth)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
|
||||
err = xmlUnmarshal(resp.body, &out)
|
||||
return out, err
|
||||
}
|
|
@ -1,663 +0,0 @@
|
|||
// Package storage provides clients for Microsoft Azure Storage Services.
|
||||
package storage
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"mime"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/Azure/go-autorest/autorest/azure"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultBaseURL is the domain name used for storage requests in the
|
||||
// public cloud when a default client is created.
|
||||
DefaultBaseURL = "core.windows.net"
|
||||
|
||||
// DefaultAPIVersion is the Azure Storage API version string used when a
|
||||
// basic client is created.
|
||||
DefaultAPIVersion = "2016-05-31"
|
||||
|
||||
defaultUseHTTPS = true
|
||||
|
||||
// StorageEmulatorAccountName is the fixed storage account used by Azure Storage Emulator
|
||||
StorageEmulatorAccountName = "devstoreaccount1"
|
||||
|
||||
// StorageEmulatorAccountKey is the the fixed storage account used by Azure Storage Emulator
|
||||
StorageEmulatorAccountKey = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw=="
|
||||
|
||||
blobServiceName = "blob"
|
||||
tableServiceName = "table"
|
||||
queueServiceName = "queue"
|
||||
fileServiceName = "file"
|
||||
|
||||
storageEmulatorBlob = "127.0.0.1:10000"
|
||||
storageEmulatorTable = "127.0.0.1:10002"
|
||||
storageEmulatorQueue = "127.0.0.1:10001"
|
||||
|
||||
userAgentHeader = "User-Agent"
|
||||
|
||||
userDefinedMetadataHeaderPrefix = "x-ms-meta-"
|
||||
)
|
||||
|
||||
var (
|
||||
validStorageAccount = regexp.MustCompile("^[0-9a-z]{3,24}$")
|
||||
)
|
||||
|
||||
// Sender sends a request
|
||||
type Sender interface {
|
||||
Send(*Client, *http.Request) (*http.Response, error)
|
||||
}
|
||||
|
||||
// DefaultSender is the default sender for the client. It implements
|
||||
// an automatic retry strategy.
|
||||
type DefaultSender struct {
|
||||
RetryAttempts int
|
||||
RetryDuration time.Duration
|
||||
ValidStatusCodes []int
|
||||
attempts int // used for testing
|
||||
}
|
||||
|
||||
// Send is the default retry strategy in the client
|
||||
func (ds *DefaultSender) Send(c *Client, req *http.Request) (resp *http.Response, err error) {
|
||||
rr := autorest.NewRetriableRequest(req)
|
||||
for attempts := 0; attempts < ds.RetryAttempts; attempts++ {
|
||||
err = rr.Prepare()
|
||||
if err != nil {
|
||||
return resp, err
|
||||
}
|
||||
resp, err = c.HTTPClient.Do(rr.Request())
|
||||
if err != nil || !autorest.ResponseHasStatusCode(resp, ds.ValidStatusCodes...) {
|
||||
return resp, err
|
||||
}
|
||||
autorest.DelayForBackoff(ds.RetryDuration, attempts, req.Cancel)
|
||||
ds.attempts = attempts
|
||||
}
|
||||
ds.attempts++
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// Client is the object that needs to be constructed to perform
|
||||
// operations on the storage account.
|
||||
type Client struct {
|
||||
// HTTPClient is the http.Client used to initiate API
|
||||
// requests. http.DefaultClient is used when creating a
|
||||
// client.
|
||||
HTTPClient *http.Client
|
||||
|
||||
// Sender is an interface that sends the request. Clients are
|
||||
// created with a DefaultSender. The DefaultSender has an
|
||||
// automatic retry strategy built in. The Sender can be customized.
|
||||
Sender Sender
|
||||
|
||||
accountName string
|
||||
accountKey []byte
|
||||
useHTTPS bool
|
||||
UseSharedKeyLite bool
|
||||
baseURL string
|
||||
apiVersion string
|
||||
userAgent string
|
||||
}
|
||||
|
||||
type storageResponse struct {
|
||||
statusCode int
|
||||
headers http.Header
|
||||
body io.ReadCloser
|
||||
}
|
||||
|
||||
type odataResponse struct {
|
||||
storageResponse
|
||||
odata odataErrorWrapper
|
||||
}
|
||||
|
||||
// AzureStorageServiceError contains fields of the error response from
|
||||
// Azure Storage Service REST API. See https://msdn.microsoft.com/en-us/library/azure/dd179382.aspx
|
||||
// Some fields might be specific to certain calls.
|
||||
type AzureStorageServiceError struct {
|
||||
Code string `xml:"Code"`
|
||||
Message string `xml:"Message"`
|
||||
AuthenticationErrorDetail string `xml:"AuthenticationErrorDetail"`
|
||||
QueryParameterName string `xml:"QueryParameterName"`
|
||||
QueryParameterValue string `xml:"QueryParameterValue"`
|
||||
Reason string `xml:"Reason"`
|
||||
Lang string
|
||||
StatusCode int
|
||||
RequestID string
|
||||
Date string
|
||||
APIVersion string
|
||||
}
|
||||
|
||||
type odataErrorMessage struct {
|
||||
Lang string `json:"lang"`
|
||||
Value string `json:"value"`
|
||||
}
|
||||
|
||||
type odataError struct {
|
||||
Code string `json:"code"`
|
||||
Message odataErrorMessage `json:"message"`
|
||||
}
|
||||
|
||||
type odataErrorWrapper struct {
|
||||
Err odataError `json:"odata.error"`
|
||||
}
|
||||
|
||||
// UnexpectedStatusCodeError is returned when a storage service responds with neither an error
|
||||
// nor with an HTTP status code indicating success.
|
||||
type UnexpectedStatusCodeError struct {
|
||||
allowed []int
|
||||
got int
|
||||
}
|
||||
|
||||
func (e UnexpectedStatusCodeError) Error() string {
|
||||
s := func(i int) string { return fmt.Sprintf("%d %s", i, http.StatusText(i)) }
|
||||
|
||||
got := s(e.got)
|
||||
expected := []string{}
|
||||
for _, v := range e.allowed {
|
||||
expected = append(expected, s(v))
|
||||
}
|
||||
return fmt.Sprintf("storage: status code from service response is %s; was expecting %s", got, strings.Join(expected, " or "))
|
||||
}
|
||||
|
||||
// Got is the actual status code returned by Azure.
|
||||
func (e UnexpectedStatusCodeError) Got() int {
|
||||
return e.got
|
||||
}
|
||||
|
||||
// NewBasicClient constructs a Client with given storage service name and
|
||||
// key.
|
||||
func NewBasicClient(accountName, accountKey string) (Client, error) {
|
||||
if accountName == StorageEmulatorAccountName {
|
||||
return NewEmulatorClient()
|
||||
}
|
||||
return NewClient(accountName, accountKey, DefaultBaseURL, DefaultAPIVersion, defaultUseHTTPS)
|
||||
}
|
||||
|
||||
// NewBasicClientOnSovereignCloud constructs a Client with given storage service name and
|
||||
// key in the referenced cloud.
|
||||
func NewBasicClientOnSovereignCloud(accountName, accountKey string, env azure.Environment) (Client, error) {
|
||||
if accountName == StorageEmulatorAccountName {
|
||||
return NewEmulatorClient()
|
||||
}
|
||||
return NewClient(accountName, accountKey, env.StorageEndpointSuffix, DefaultAPIVersion, defaultUseHTTPS)
|
||||
}
|
||||
|
||||
//NewEmulatorClient contructs a Client intended to only work with Azure
|
||||
//Storage Emulator
|
||||
func NewEmulatorClient() (Client, error) {
|
||||
return NewClient(StorageEmulatorAccountName, StorageEmulatorAccountKey, DefaultBaseURL, DefaultAPIVersion, false)
|
||||
}
|
||||
|
||||
// NewClient constructs a Client. This should be used if the caller wants
|
||||
// to specify whether to use HTTPS, a specific REST API version or a custom
|
||||
// storage endpoint than Azure Public Cloud.
|
||||
func NewClient(accountName, accountKey, blobServiceBaseURL, apiVersion string, useHTTPS bool) (Client, error) {
|
||||
var c Client
|
||||
if !IsValidStorageAccount(accountName) {
|
||||
return c, fmt.Errorf("azure: account name is not valid: it must be between 3 and 24 characters, and only may contain numbers and lowercase letters: %v", accountName)
|
||||
} else if accountKey == "" {
|
||||
return c, fmt.Errorf("azure: account key required")
|
||||
} else if blobServiceBaseURL == "" {
|
||||
return c, fmt.Errorf("azure: base storage service url required")
|
||||
}
|
||||
|
||||
key, err := base64.StdEncoding.DecodeString(accountKey)
|
||||
if err != nil {
|
||||
return c, fmt.Errorf("azure: malformed storage account key: %v", err)
|
||||
}
|
||||
|
||||
c = Client{
|
||||
HTTPClient: http.DefaultClient,
|
||||
accountName: accountName,
|
||||
accountKey: key,
|
||||
useHTTPS: useHTTPS,
|
||||
baseURL: blobServiceBaseURL,
|
||||
apiVersion: apiVersion,
|
||||
UseSharedKeyLite: false,
|
||||
Sender: &DefaultSender{
|
||||
RetryAttempts: 5,
|
||||
ValidStatusCodes: []int{
|
||||
http.StatusRequestTimeout, // 408
|
||||
http.StatusInternalServerError, // 500
|
||||
http.StatusBadGateway, // 502
|
||||
http.StatusServiceUnavailable, // 503
|
||||
http.StatusGatewayTimeout, // 504
|
||||
},
|
||||
RetryDuration: time.Second * 5,
|
||||
},
|
||||
}
|
||||
c.userAgent = c.getDefaultUserAgent()
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// IsValidStorageAccount checks if the storage account name is valid.
|
||||
// See https://docs.microsoft.com/en-us/azure/storage/storage-create-storage-account
|
||||
func IsValidStorageAccount(account string) bool {
|
||||
return validStorageAccount.MatchString(account)
|
||||
}
|
||||
|
||||
func (c Client) getDefaultUserAgent() string {
|
||||
return fmt.Sprintf("Go/%s (%s-%s) azure-storage-go/%s api-version/%s",
|
||||
runtime.Version(),
|
||||
runtime.GOARCH,
|
||||
runtime.GOOS,
|
||||
sdkVersion,
|
||||
c.apiVersion,
|
||||
)
|
||||
}
|
||||
|
||||
// AddToUserAgent adds an extension to the current user agent
|
||||
func (c *Client) AddToUserAgent(extension string) error {
|
||||
if extension != "" {
|
||||
c.userAgent = fmt.Sprintf("%s %s", c.userAgent, extension)
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("Extension was empty, User Agent stayed as %s", c.userAgent)
|
||||
}
|
||||
|
||||
// protectUserAgent is used in funcs that include extraheaders as a parameter.
|
||||
// It prevents the User-Agent header to be overwritten, instead if it happens to
|
||||
// be present, it gets added to the current User-Agent. Use it before getStandardHeaders
|
||||
func (c *Client) protectUserAgent(extraheaders map[string]string) map[string]string {
|
||||
if v, ok := extraheaders[userAgentHeader]; ok {
|
||||
c.AddToUserAgent(v)
|
||||
delete(extraheaders, userAgentHeader)
|
||||
}
|
||||
return extraheaders
|
||||
}
|
||||
|
||||
func (c Client) getBaseURL(service string) *url.URL {
|
||||
scheme := "http"
|
||||
if c.useHTTPS {
|
||||
scheme = "https"
|
||||
}
|
||||
host := ""
|
||||
if c.accountName == StorageEmulatorAccountName {
|
||||
switch service {
|
||||
case blobServiceName:
|
||||
host = storageEmulatorBlob
|
||||
case tableServiceName:
|
||||
host = storageEmulatorTable
|
||||
case queueServiceName:
|
||||
host = storageEmulatorQueue
|
||||
}
|
||||
} else {
|
||||
host = fmt.Sprintf("%s.%s.%s", c.accountName, service, c.baseURL)
|
||||
}
|
||||
|
||||
return &url.URL{
|
||||
Scheme: scheme,
|
||||
Host: host,
|
||||
}
|
||||
}
|
||||
|
||||
func (c Client) getEndpoint(service, path string, params url.Values) string {
|
||||
u := c.getBaseURL(service)
|
||||
|
||||
// API doesn't accept path segments not starting with '/'
|
||||
if !strings.HasPrefix(path, "/") {
|
||||
path = fmt.Sprintf("/%v", path)
|
||||
}
|
||||
|
||||
if c.accountName == StorageEmulatorAccountName {
|
||||
path = fmt.Sprintf("/%v%v", StorageEmulatorAccountName, path)
|
||||
}
|
||||
|
||||
u.Path = path
|
||||
u.RawQuery = params.Encode()
|
||||
return u.String()
|
||||
}
|
||||
|
||||
// GetBlobService returns a BlobStorageClient which can operate on the blob
|
||||
// service of the storage account.
|
||||
func (c Client) GetBlobService() BlobStorageClient {
|
||||
b := BlobStorageClient{
|
||||
client: c,
|
||||
}
|
||||
b.client.AddToUserAgent(blobServiceName)
|
||||
b.auth = sharedKey
|
||||
if c.UseSharedKeyLite {
|
||||
b.auth = sharedKeyLite
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// GetQueueService returns a QueueServiceClient which can operate on the queue
|
||||
// service of the storage account.
|
||||
func (c Client) GetQueueService() QueueServiceClient {
|
||||
q := QueueServiceClient{
|
||||
client: c,
|
||||
}
|
||||
q.client.AddToUserAgent(queueServiceName)
|
||||
q.auth = sharedKey
|
||||
if c.UseSharedKeyLite {
|
||||
q.auth = sharedKeyLite
|
||||
}
|
||||
return q
|
||||
}
|
||||
|
||||
// GetTableService returns a TableServiceClient which can operate on the table
|
||||
// service of the storage account.
|
||||
func (c Client) GetTableService() TableServiceClient {
|
||||
t := TableServiceClient{
|
||||
client: c,
|
||||
}
|
||||
t.client.AddToUserAgent(tableServiceName)
|
||||
t.auth = sharedKeyForTable
|
||||
if c.UseSharedKeyLite {
|
||||
t.auth = sharedKeyLiteForTable
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// GetFileService returns a FileServiceClient which can operate on the file
|
||||
// service of the storage account.
|
||||
func (c Client) GetFileService() FileServiceClient {
|
||||
f := FileServiceClient{
|
||||
client: c,
|
||||
}
|
||||
f.client.AddToUserAgent(fileServiceName)
|
||||
f.auth = sharedKey
|
||||
if c.UseSharedKeyLite {
|
||||
f.auth = sharedKeyLite
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
func (c Client) getStandardHeaders() map[string]string {
|
||||
return map[string]string{
|
||||
userAgentHeader: c.userAgent,
|
||||
"x-ms-version": c.apiVersion,
|
||||
"x-ms-date": currentTimeRfc1123Formatted(),
|
||||
}
|
||||
}
|
||||
|
||||
func (c Client) exec(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*storageResponse, error) {
|
||||
headers, err := c.addAuthorizationHeader(verb, url, headers, auth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req, err := http.NewRequest(verb, url, body)
|
||||
if err != nil {
|
||||
return nil, errors.New("azure/storage: error creating request: " + err.Error())
|
||||
}
|
||||
|
||||
// if a body was provided ensure that the content length was set.
|
||||
// http.NewRequest() will automatically do this for a handful of types
|
||||
// and for those that it doesn't we will handle here.
|
||||
if body != nil && req.ContentLength < 1 {
|
||||
if lr, ok := body.(*io.LimitedReader); ok {
|
||||
setContentLengthFromLimitedReader(req, lr)
|
||||
}
|
||||
}
|
||||
|
||||
for k, v := range headers {
|
||||
req.Header[k] = append(req.Header[k], v) // Must bypass case munging present in `Add` by using map functions directly. See https://github.com/Azure/azure-sdk-for-go/issues/645
|
||||
}
|
||||
|
||||
resp, err := c.Sender.Send(&c, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if resp.StatusCode >= 400 && resp.StatusCode <= 505 {
|
||||
var respBody []byte
|
||||
respBody, err = readAndCloseBody(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
requestID, date, version := getDebugHeaders(resp.Header)
|
||||
if len(respBody) == 0 {
|
||||
// no error in response body, might happen in HEAD requests
|
||||
err = serviceErrFromStatusCode(resp.StatusCode, resp.Status, requestID, date, version)
|
||||
} else {
|
||||
storageErr := AzureStorageServiceError{
|
||||
StatusCode: resp.StatusCode,
|
||||
RequestID: requestID,
|
||||
Date: date,
|
||||
APIVersion: version,
|
||||
}
|
||||
// response contains storage service error object, unmarshal
|
||||
if resp.Header.Get("Content-Type") == "application/xml" {
|
||||
errIn := serviceErrFromXML(respBody, &storageErr)
|
||||
if err != nil { // error unmarshaling the error response
|
||||
err = errIn
|
||||
}
|
||||
} else {
|
||||
errIn := serviceErrFromJSON(respBody, &storageErr)
|
||||
if err != nil { // error unmarshaling the error response
|
||||
err = errIn
|
||||
}
|
||||
}
|
||||
err = storageErr
|
||||
}
|
||||
return &storageResponse{
|
||||
statusCode: resp.StatusCode,
|
||||
headers: resp.Header,
|
||||
body: ioutil.NopCloser(bytes.NewReader(respBody)), /* restore the body */
|
||||
}, err
|
||||
}
|
||||
|
||||
return &storageResponse{
|
||||
statusCode: resp.StatusCode,
|
||||
headers: resp.Header,
|
||||
body: resp.Body}, nil
|
||||
}
|
||||
|
||||
func (c Client) execInternalJSONCommon(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*odataResponse, *http.Request, *http.Response, error) {
|
||||
headers, err := c.addAuthorizationHeader(verb, url, headers, auth)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
req, err := http.NewRequest(verb, url, body)
|
||||
for k, v := range headers {
|
||||
req.Header.Add(k, v)
|
||||
}
|
||||
|
||||
resp, err := c.Sender.Send(&c, req)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
respToRet := &odataResponse{}
|
||||
respToRet.body = resp.Body
|
||||
respToRet.statusCode = resp.StatusCode
|
||||
respToRet.headers = resp.Header
|
||||
|
||||
statusCode := resp.StatusCode
|
||||
if statusCode >= 400 && statusCode <= 505 {
|
||||
var respBody []byte
|
||||
respBody, err = readAndCloseBody(resp.Body)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
requestID, date, version := getDebugHeaders(resp.Header)
|
||||
if len(respBody) == 0 {
|
||||
// no error in response body, might happen in HEAD requests
|
||||
err = serviceErrFromStatusCode(resp.StatusCode, resp.Status, requestID, date, version)
|
||||
return respToRet, req, resp, err
|
||||
}
|
||||
// try unmarshal as odata.error json
|
||||
err = json.Unmarshal(respBody, &respToRet.odata)
|
||||
}
|
||||
|
||||
return respToRet, req, resp, err
|
||||
}
|
||||
|
||||
func (c Client) execInternalJSON(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*odataResponse, error) {
|
||||
respToRet, _, _, err := c.execInternalJSONCommon(verb, url, headers, body, auth)
|
||||
return respToRet, err
|
||||
}
|
||||
|
||||
func (c Client) execBatchOperationJSON(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*odataResponse, error) {
|
||||
// execute common query, get back generated request, response etc... for more processing.
|
||||
respToRet, req, resp, err := c.execInternalJSONCommon(verb, url, headers, body, auth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// return the OData in the case of executing batch commands.
|
||||
// In this case we need to read the outer batch boundary and contents.
|
||||
// Then we read the changeset information within the batch
|
||||
var respBody []byte
|
||||
respBody, err = readAndCloseBody(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// outer multipart body
|
||||
_, batchHeader, err := mime.ParseMediaType(resp.Header["Content-Type"][0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// batch details.
|
||||
batchBoundary := batchHeader["boundary"]
|
||||
batchPartBuf, changesetBoundary, err := genBatchReader(batchBoundary, respBody)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// changeset details.
|
||||
err = genChangesetReader(req, respToRet, batchPartBuf, changesetBoundary)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return respToRet, nil
|
||||
}
|
||||
|
||||
func genChangesetReader(req *http.Request, respToRet *odataResponse, batchPartBuf io.Reader, changesetBoundary string) error {
|
||||
changesetMultiReader := multipart.NewReader(batchPartBuf, changesetBoundary)
|
||||
changesetPart, err := changesetMultiReader.NextPart()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
changesetPartBufioReader := bufio.NewReader(changesetPart)
|
||||
changesetResp, err := http.ReadResponse(changesetPartBufioReader, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if changesetResp.StatusCode != http.StatusNoContent {
|
||||
changesetBody, err := readAndCloseBody(changesetResp.Body)
|
||||
err = json.Unmarshal(changesetBody, &respToRet.odata)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
respToRet.statusCode = changesetResp.StatusCode
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func genBatchReader(batchBoundary string, respBody []byte) (io.Reader, string, error) {
|
||||
respBodyString := string(respBody)
|
||||
respBodyReader := strings.NewReader(respBodyString)
|
||||
|
||||
// reading batchresponse
|
||||
batchMultiReader := multipart.NewReader(respBodyReader, batchBoundary)
|
||||
batchPart, err := batchMultiReader.NextPart()
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
batchPartBufioReader := bufio.NewReader(batchPart)
|
||||
|
||||
_, changesetHeader, err := mime.ParseMediaType(batchPart.Header.Get("Content-Type"))
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
changesetBoundary := changesetHeader["boundary"]
|
||||
return batchPartBufioReader, changesetBoundary, nil
|
||||
}
|
||||
|
||||
func readAndCloseBody(body io.ReadCloser) ([]byte, error) {
|
||||
defer body.Close()
|
||||
out, err := ioutil.ReadAll(body)
|
||||
if err == io.EOF {
|
||||
err = nil
|
||||
}
|
||||
return out, err
|
||||
}
|
||||
|
||||
func serviceErrFromXML(body []byte, storageErr *AzureStorageServiceError) error {
|
||||
if err := xml.Unmarshal(body, storageErr); err != nil {
|
||||
storageErr.Message = fmt.Sprintf("Response body could no be unmarshaled: %v. Body: %v.", err, string(body))
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func serviceErrFromJSON(body []byte, storageErr *AzureStorageServiceError) error {
|
||||
odataError := odataErrorWrapper{}
|
||||
if err := json.Unmarshal(body, &odataError); err != nil {
|
||||
storageErr.Message = fmt.Sprintf("Response body could no be unmarshaled: %v. Body: %v.", err, string(body))
|
||||
return err
|
||||
}
|
||||
storageErr.Code = odataError.Err.Code
|
||||
storageErr.Message = odataError.Err.Message.Value
|
||||
storageErr.Lang = odataError.Err.Message.Lang
|
||||
return nil
|
||||
}
|
||||
|
||||
func serviceErrFromStatusCode(code int, status string, requestID, date, version string) AzureStorageServiceError {
|
||||
return AzureStorageServiceError{
|
||||
StatusCode: code,
|
||||
Code: status,
|
||||
RequestID: requestID,
|
||||
Date: date,
|
||||
APIVersion: version,
|
||||
Message: "no response body was available for error status code",
|
||||
}
|
||||
}
|
||||
|
||||
func (e AzureStorageServiceError) Error() string {
|
||||
return fmt.Sprintf("storage: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=%s, RequestInitiated=%s, RequestId=%s, API Version=%s, QueryParameterName=%s, QueryParameterValue=%s",
|
||||
e.StatusCode, e.Code, e.Message, e.Date, e.RequestID, e.APIVersion, e.QueryParameterName, e.QueryParameterValue)
|
||||
}
|
||||
|
||||
// checkRespCode returns UnexpectedStatusError if the given response code is not
|
||||
// one of the allowed status codes; otherwise nil.
|
||||
func checkRespCode(respCode int, allowed []int) error {
|
||||
for _, v := range allowed {
|
||||
if respCode == v {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return UnexpectedStatusCodeError{allowed, respCode}
|
||||
}
|
||||
|
||||
func (c Client) addMetadataToHeaders(h map[string]string, metadata map[string]string) map[string]string {
|
||||
metadata = c.protectUserAgent(metadata)
|
||||
for k, v := range metadata {
|
||||
h[userDefinedMetadataHeaderPrefix+k] = v
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
func getDebugHeaders(h http.Header) (requestID, date, version string) {
|
||||
requestID = h.Get("x-ms-request-id")
|
||||
version = h.Get("x-ms-version")
|
||||
date = h.Get("Date")
|
||||
return
|
||||
}
|
|
@ -1,453 +0,0 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Container represents an Azure container.
|
||||
type Container struct {
|
||||
bsc *BlobStorageClient
|
||||
Name string `xml:"Name"`
|
||||
Properties ContainerProperties `xml:"Properties"`
|
||||
Metadata map[string]string
|
||||
}
|
||||
|
||||
func (c *Container) buildPath() string {
|
||||
return fmt.Sprintf("/%s", c.Name)
|
||||
}
|
||||
|
||||
// ContainerProperties contains various properties of a container returned from
|
||||
// various endpoints like ListContainers.
|
||||
type ContainerProperties struct {
|
||||
LastModified string `xml:"Last-Modified"`
|
||||
Etag string `xml:"Etag"`
|
||||
LeaseStatus string `xml:"LeaseStatus"`
|
||||
LeaseState string `xml:"LeaseState"`
|
||||
LeaseDuration string `xml:"LeaseDuration"`
|
||||
}
|
||||
|
||||
// ContainerListResponse contains the response fields from
|
||||
// ListContainers call.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx
|
||||
type ContainerListResponse struct {
|
||||
XMLName xml.Name `xml:"EnumerationResults"`
|
||||
Xmlns string `xml:"xmlns,attr"`
|
||||
Prefix string `xml:"Prefix"`
|
||||
Marker string `xml:"Marker"`
|
||||
NextMarker string `xml:"NextMarker"`
|
||||
MaxResults int64 `xml:"MaxResults"`
|
||||
Containers []Container `xml:"Containers>Container"`
|
||||
}
|
||||
|
||||
// BlobListResponse contains the response fields from ListBlobs call.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx
|
||||
type BlobListResponse struct {
|
||||
XMLName xml.Name `xml:"EnumerationResults"`
|
||||
Xmlns string `xml:"xmlns,attr"`
|
||||
Prefix string `xml:"Prefix"`
|
||||
Marker string `xml:"Marker"`
|
||||
NextMarker string `xml:"NextMarker"`
|
||||
MaxResults int64 `xml:"MaxResults"`
|
||||
Blobs []Blob `xml:"Blobs>Blob"`
|
||||
|
||||
// BlobPrefix is used to traverse blobs as if it were a file system.
|
||||
// It is returned if ListBlobsParameters.Delimiter is specified.
|
||||
// The list here can be thought of as "folders" that may contain
|
||||
// other folders or blobs.
|
||||
BlobPrefixes []string `xml:"Blobs>BlobPrefix>Name"`
|
||||
|
||||
// Delimiter is used to traverse blobs as if it were a file system.
|
||||
// It is returned if ListBlobsParameters.Delimiter is specified.
|
||||
Delimiter string `xml:"Delimiter"`
|
||||
}
|
||||
|
||||
// IncludeBlobDataset has options to include in a list blobs operation
|
||||
type IncludeBlobDataset struct {
|
||||
Snapshots bool
|
||||
Metadata bool
|
||||
UncommittedBlobs bool
|
||||
Copy bool
|
||||
}
|
||||
|
||||
// ListBlobsParameters defines the set of customizable
|
||||
// parameters to make a List Blobs call.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx
|
||||
type ListBlobsParameters struct {
|
||||
Prefix string
|
||||
Delimiter string
|
||||
Marker string
|
||||
Include *IncludeBlobDataset
|
||||
MaxResults uint
|
||||
Timeout uint
|
||||
RequestID string
|
||||
}
|
||||
|
||||
func (p ListBlobsParameters) getParameters() url.Values {
|
||||
out := url.Values{}
|
||||
|
||||
if p.Prefix != "" {
|
||||
out.Set("prefix", p.Prefix)
|
||||
}
|
||||
if p.Delimiter != "" {
|
||||
out.Set("delimiter", p.Delimiter)
|
||||
}
|
||||
if p.Marker != "" {
|
||||
out.Set("marker", p.Marker)
|
||||
}
|
||||
if p.Include != nil {
|
||||
include := []string{}
|
||||
include = addString(include, p.Include.Snapshots, "snapshots")
|
||||
include = addString(include, p.Include.Metadata, "metadata")
|
||||
include = addString(include, p.Include.UncommittedBlobs, "uncommittedblobs")
|
||||
include = addString(include, p.Include.Copy, "copy")
|
||||
fullInclude := strings.Join(include, ",")
|
||||
out.Set("include", fullInclude)
|
||||
}
|
||||
if p.MaxResults != 0 {
|
||||
out.Set("maxresults", strconv.FormatUint(uint64(p.MaxResults), 10))
|
||||
}
|
||||
if p.Timeout != 0 {
|
||||
out.Set("timeout", strconv.FormatUint(uint64(p.Timeout), 10))
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
func addString(datasets []string, include bool, text string) []string {
|
||||
if include {
|
||||
datasets = append(datasets, text)
|
||||
}
|
||||
return datasets
|
||||
}
|
||||
|
||||
// ContainerAccessType defines the access level to the container from a public
|
||||
// request.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx and "x-ms-
|
||||
// blob-public-access" header.
|
||||
type ContainerAccessType string
|
||||
|
||||
// Access options for containers
|
||||
const (
|
||||
ContainerAccessTypePrivate ContainerAccessType = ""
|
||||
ContainerAccessTypeBlob ContainerAccessType = "blob"
|
||||
ContainerAccessTypeContainer ContainerAccessType = "container"
|
||||
)
|
||||
|
||||
// ContainerAccessPolicy represents each access policy in the container ACL.
|
||||
type ContainerAccessPolicy struct {
|
||||
ID string
|
||||
StartTime time.Time
|
||||
ExpiryTime time.Time
|
||||
CanRead bool
|
||||
CanWrite bool
|
||||
CanDelete bool
|
||||
}
|
||||
|
||||
// ContainerPermissions represents the container ACLs.
|
||||
type ContainerPermissions struct {
|
||||
AccessType ContainerAccessType
|
||||
AccessPolicies []ContainerAccessPolicy
|
||||
}
|
||||
|
||||
// ContainerAccessHeader references header used when setting/getting container ACL
|
||||
const (
|
||||
ContainerAccessHeader string = "x-ms-blob-public-access"
|
||||
)
|
||||
|
||||
// GetBlobReference returns a Blob object for the specified blob name.
|
||||
func (c *Container) GetBlobReference(name string) *Blob {
|
||||
return &Blob{
|
||||
Container: c,
|
||||
Name: name,
|
||||
}
|
||||
}
|
||||
|
||||
// CreateContainerOptions includes the options for a create container operation
|
||||
type CreateContainerOptions struct {
|
||||
Timeout uint
|
||||
Access ContainerAccessType `header:"x-ms-blob-public-access"`
|
||||
RequestID string `header:"x-ms-client-request-id"`
|
||||
}
|
||||
|
||||
// Create creates a blob container within the storage account
|
||||
// with given name and access level. Returns error if container already exists.
|
||||
//
|
||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Container
|
||||
func (c *Container) Create(options *CreateContainerOptions) error {
|
||||
resp, err := c.create(options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
readAndCloseBody(resp.body)
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusCreated})
|
||||
}
|
||||
|
||||
// CreateIfNotExists creates a blob container if it does not exist. Returns
|
||||
// true if container is newly created or false if container already exists.
|
||||
func (c *Container) CreateIfNotExists(options *CreateContainerOptions) (bool, error) {
|
||||
resp, err := c.create(options)
|
||||
if resp != nil {
|
||||
defer readAndCloseBody(resp.body)
|
||||
if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict {
|
||||
return resp.statusCode == http.StatusCreated, nil
|
||||
}
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
func (c *Container) create(options *CreateContainerOptions) (*storageResponse, error) {
|
||||
query := url.Values{"restype": {"container"}}
|
||||
headers := c.bsc.client.getStandardHeaders()
|
||||
headers = c.bsc.client.addMetadataToHeaders(headers, c.Metadata)
|
||||
|
||||
if options != nil {
|
||||
query = addTimeout(query, options.Timeout)
|
||||
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||
}
|
||||
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), query)
|
||||
|
||||
return c.bsc.client.exec(http.MethodPut, uri, headers, nil, c.bsc.auth)
|
||||
}
|
||||
|
||||
// Exists returns true if a container with given name exists
|
||||
// on the storage account, otherwise returns false.
|
||||
func (c *Container) Exists() (bool, error) {
|
||||
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), url.Values{"restype": {"container"}})
|
||||
headers := c.bsc.client.getStandardHeaders()
|
||||
|
||||
resp, err := c.bsc.client.exec(http.MethodHead, uri, headers, nil, c.bsc.auth)
|
||||
if resp != nil {
|
||||
defer readAndCloseBody(resp.body)
|
||||
if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound {
|
||||
return resp.statusCode == http.StatusOK, nil
|
||||
}
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
// SetContainerPermissionOptions includes options for a set container permissions operation
|
||||
type SetContainerPermissionOptions struct {
|
||||
Timeout uint
|
||||
LeaseID string `header:"x-ms-lease-id"`
|
||||
IfModifiedSince *time.Time `header:"If-Modified-Since"`
|
||||
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
|
||||
RequestID string `header:"x-ms-client-request-id"`
|
||||
}
|
||||
|
||||
// SetPermissions sets up container permissions
|
||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Container-ACL
|
||||
func (c *Container) SetPermissions(permissions ContainerPermissions, options *SetContainerPermissionOptions) error {
|
||||
body, length, err := generateContainerACLpayload(permissions.AccessPolicies)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
params := url.Values{
|
||||
"restype": {"container"},
|
||||
"comp": {"acl"},
|
||||
}
|
||||
headers := c.bsc.client.getStandardHeaders()
|
||||
headers = addToHeaders(headers, ContainerAccessHeader, string(permissions.AccessType))
|
||||
headers["Content-Length"] = strconv.Itoa(length)
|
||||
|
||||
if options != nil {
|
||||
params = addTimeout(params, options.Timeout)
|
||||
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||
}
|
||||
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params)
|
||||
|
||||
resp, err := c.bsc.client.exec(http.MethodPut, uri, headers, body, c.bsc.auth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer readAndCloseBody(resp.body)
|
||||
|
||||
if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
|
||||
return errors.New("Unable to set permissions")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetContainerPermissionOptions includes options for a get container permissions operation
|
||||
type GetContainerPermissionOptions struct {
|
||||
Timeout uint
|
||||
LeaseID string `header:"x-ms-lease-id"`
|
||||
RequestID string `header:"x-ms-client-request-id"`
|
||||
}
|
||||
|
||||
// GetPermissions gets the container permissions as per https://msdn.microsoft.com/en-us/library/azure/dd179469.aspx
|
||||
// If timeout is 0 then it will not be passed to Azure
|
||||
// leaseID will only be passed to Azure if populated
|
||||
func (c *Container) GetPermissions(options *GetContainerPermissionOptions) (*ContainerPermissions, error) {
|
||||
params := url.Values{
|
||||
"restype": {"container"},
|
||||
"comp": {"acl"},
|
||||
}
|
||||
headers := c.bsc.client.getStandardHeaders()
|
||||
|
||||
if options != nil {
|
||||
params = addTimeout(params, options.Timeout)
|
||||
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||
}
|
||||
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params)
|
||||
|
||||
resp, err := c.bsc.client.exec(http.MethodGet, uri, headers, nil, c.bsc.auth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
|
||||
var ap AccessPolicy
|
||||
err = xmlUnmarshal(resp.body, &ap.SignedIdentifiersList)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buildAccessPolicy(ap, &resp.headers), nil
|
||||
}
|
||||
|
||||
func buildAccessPolicy(ap AccessPolicy, headers *http.Header) *ContainerPermissions {
|
||||
// containerAccess. Blob, Container, empty
|
||||
containerAccess := headers.Get(http.CanonicalHeaderKey(ContainerAccessHeader))
|
||||
permissions := ContainerPermissions{
|
||||
AccessType: ContainerAccessType(containerAccess),
|
||||
AccessPolicies: []ContainerAccessPolicy{},
|
||||
}
|
||||
|
||||
for _, policy := range ap.SignedIdentifiersList.SignedIdentifiers {
|
||||
capd := ContainerAccessPolicy{
|
||||
ID: policy.ID,
|
||||
StartTime: policy.AccessPolicy.StartTime,
|
||||
ExpiryTime: policy.AccessPolicy.ExpiryTime,
|
||||
}
|
||||
capd.CanRead = updatePermissions(policy.AccessPolicy.Permission, "r")
|
||||
capd.CanWrite = updatePermissions(policy.AccessPolicy.Permission, "w")
|
||||
capd.CanDelete = updatePermissions(policy.AccessPolicy.Permission, "d")
|
||||
|
||||
permissions.AccessPolicies = append(permissions.AccessPolicies, capd)
|
||||
}
|
||||
return &permissions
|
||||
}
|
||||
|
||||
// DeleteContainerOptions includes options for a delete container operation
|
||||
type DeleteContainerOptions struct {
|
||||
Timeout uint
|
||||
LeaseID string `header:"x-ms-lease-id"`
|
||||
IfModifiedSince *time.Time `header:"If-Modified-Since"`
|
||||
IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"`
|
||||
RequestID string `header:"x-ms-client-request-id"`
|
||||
}
|
||||
|
||||
// Delete deletes the container with given name on the storage
|
||||
// account. If the container does not exist returns error.
|
||||
//
|
||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/delete-container
|
||||
func (c *Container) Delete(options *DeleteContainerOptions) error {
|
||||
resp, err := c.delete(options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
readAndCloseBody(resp.body)
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusAccepted})
|
||||
}
|
||||
|
||||
// DeleteIfExists deletes the container with given name on the storage
|
||||
// account if it exists. Returns true if container is deleted with this call, or
|
||||
// false if the container did not exist at the time of the Delete Container
|
||||
// operation.
|
||||
//
|
||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/delete-container
|
||||
func (c *Container) DeleteIfExists(options *DeleteContainerOptions) (bool, error) {
|
||||
resp, err := c.delete(options)
|
||||
if resp != nil {
|
||||
defer readAndCloseBody(resp.body)
|
||||
if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound {
|
||||
return resp.statusCode == http.StatusAccepted, nil
|
||||
}
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
func (c *Container) delete(options *DeleteContainerOptions) (*storageResponse, error) {
|
||||
query := url.Values{"restype": {"container"}}
|
||||
headers := c.bsc.client.getStandardHeaders()
|
||||
|
||||
if options != nil {
|
||||
query = addTimeout(query, options.Timeout)
|
||||
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||
}
|
||||
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), query)
|
||||
|
||||
return c.bsc.client.exec(http.MethodDelete, uri, headers, nil, c.bsc.auth)
|
||||
}
|
||||
|
||||
// ListBlobs returns an object that contains list of blobs in the container,
|
||||
// pagination token and other information in the response of List Blobs call.
|
||||
//
|
||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Blobs
|
||||
func (c *Container) ListBlobs(params ListBlobsParameters) (BlobListResponse, error) {
|
||||
q := mergeParams(params.getParameters(), url.Values{
|
||||
"restype": {"container"},
|
||||
"comp": {"list"}},
|
||||
)
|
||||
uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), q)
|
||||
|
||||
headers := c.bsc.client.getStandardHeaders()
|
||||
headers = addToHeaders(headers, "x-ms-client-request-id", params.RequestID)
|
||||
|
||||
var out BlobListResponse
|
||||
resp, err := c.bsc.client.exec(http.MethodGet, uri, headers, nil, c.bsc.auth)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
|
||||
err = xmlUnmarshal(resp.body, &out)
|
||||
for i := range out.Blobs {
|
||||
out.Blobs[i].Container = c
|
||||
}
|
||||
return out, err
|
||||
}
|
||||
|
||||
func generateContainerACLpayload(policies []ContainerAccessPolicy) (io.Reader, int, error) {
|
||||
sil := SignedIdentifiers{
|
||||
SignedIdentifiers: []SignedIdentifier{},
|
||||
}
|
||||
for _, capd := range policies {
|
||||
permission := capd.generateContainerPermissions()
|
||||
signedIdentifier := convertAccessPolicyToXMLStructs(capd.ID, capd.StartTime, capd.ExpiryTime, permission)
|
||||
sil.SignedIdentifiers = append(sil.SignedIdentifiers, signedIdentifier)
|
||||
}
|
||||
return xmlMarshal(sil)
|
||||
}
|
||||
|
||||
func (capd *ContainerAccessPolicy) generateContainerPermissions() (permissions string) {
|
||||
// generate the permissions string (rwd).
|
||||
// still want the end user API to have bool flags.
|
||||
permissions = ""
|
||||
|
||||
if capd.CanRead {
|
||||
permissions += "r"
|
||||
}
|
||||
|
||||
if capd.CanWrite {
|
||||
permissions += "w"
|
||||
}
|
||||
|
||||
if capd.CanDelete {
|
||||
permissions += "d"
|
||||
}
|
||||
|
||||
return permissions
|
||||
}
|
|
@ -1,223 +0,0 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
blobCopyStatusPending = "pending"
|
||||
blobCopyStatusSuccess = "success"
|
||||
blobCopyStatusAborted = "aborted"
|
||||
blobCopyStatusFailed = "failed"
|
||||
)
|
||||
|
||||
// CopyOptions includes the options for a copy blob operation
|
||||
type CopyOptions struct {
|
||||
Timeout uint
|
||||
Source CopyOptionsConditions
|
||||
Destiny CopyOptionsConditions
|
||||
RequestID string
|
||||
}
|
||||
|
||||
// IncrementalCopyOptions includes the options for an incremental copy blob operation
|
||||
type IncrementalCopyOptions struct {
|
||||
Timeout uint
|
||||
Destination IncrementalCopyOptionsConditions
|
||||
RequestID string
|
||||
}
|
||||
|
||||
// CopyOptionsConditions includes some conditional options in a copy blob operation
|
||||
type CopyOptionsConditions struct {
|
||||
LeaseID string
|
||||
IfModifiedSince *time.Time
|
||||
IfUnmodifiedSince *time.Time
|
||||
IfMatch string
|
||||
IfNoneMatch string
|
||||
}
|
||||
|
||||
// IncrementalCopyOptionsConditions includes some conditional options in a copy blob operation
|
||||
type IncrementalCopyOptionsConditions struct {
|
||||
IfModifiedSince *time.Time
|
||||
IfUnmodifiedSince *time.Time
|
||||
IfMatch string
|
||||
IfNoneMatch string
|
||||
}
|
||||
|
||||
// Copy starts a blob copy operation and waits for the operation to
|
||||
// complete. sourceBlob parameter must be a canonical URL to the blob (can be
|
||||
// obtained using the GetURL method.) There is no SLA on blob copy and therefore
|
||||
// this helper method works faster on smaller files.
|
||||
//
|
||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Copy-Blob
|
||||
func (b *Blob) Copy(sourceBlob string, options *CopyOptions) error {
|
||||
copyID, err := b.StartCopy(sourceBlob, options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return b.WaitForCopy(copyID)
|
||||
}
|
||||
|
||||
// StartCopy starts a blob copy operation.
|
||||
// sourceBlob parameter must be a canonical URL to the blob (can be
|
||||
// obtained using the GetURL method.)
|
||||
//
|
||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Copy-Blob
|
||||
func (b *Blob) StartCopy(sourceBlob string, options *CopyOptions) (string, error) {
|
||||
params := url.Values{}
|
||||
headers := b.Container.bsc.client.getStandardHeaders()
|
||||
headers["x-ms-copy-source"] = sourceBlob
|
||||
headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata)
|
||||
|
||||
if options != nil {
|
||||
params = addTimeout(params, options.Timeout)
|
||||
headers = addToHeaders(headers, "x-ms-client-request-id", options.RequestID)
|
||||
// source
|
||||
headers = addToHeaders(headers, "x-ms-source-lease-id", options.Source.LeaseID)
|
||||
headers = addTimeToHeaders(headers, "x-ms-source-if-modified-since", options.Source.IfModifiedSince)
|
||||
headers = addTimeToHeaders(headers, "x-ms-source-if-unmodified-since", options.Source.IfUnmodifiedSince)
|
||||
headers = addToHeaders(headers, "x-ms-source-if-match", options.Source.IfMatch)
|
||||
headers = addToHeaders(headers, "x-ms-source-if-none-match", options.Source.IfNoneMatch)
|
||||
//destiny
|
||||
headers = addToHeaders(headers, "x-ms-lease-id", options.Destiny.LeaseID)
|
||||
headers = addTimeToHeaders(headers, "x-ms-if-modified-since", options.Destiny.IfModifiedSince)
|
||||
headers = addTimeToHeaders(headers, "x-ms-if-unmodified-since", options.Destiny.IfUnmodifiedSince)
|
||||
headers = addToHeaders(headers, "x-ms-if-match", options.Destiny.IfMatch)
|
||||
headers = addToHeaders(headers, "x-ms-if-none-match", options.Destiny.IfNoneMatch)
|
||||
}
|
||||
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
|
||||
|
||||
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer readAndCloseBody(resp.body)
|
||||
|
||||
if err := checkRespCode(resp.statusCode, []int{http.StatusAccepted, http.StatusCreated}); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
copyID := resp.headers.Get("x-ms-copy-id")
|
||||
if copyID == "" {
|
||||
return "", errors.New("Got empty copy id header")
|
||||
}
|
||||
return copyID, nil
|
||||
}
|
||||
|
||||
// AbortCopyOptions includes the options for an abort blob operation
|
||||
type AbortCopyOptions struct {
|
||||
Timeout uint
|
||||
LeaseID string `header:"x-ms-lease-id"`
|
||||
RequestID string `header:"x-ms-client-request-id"`
|
||||
}
|
||||
|
||||
// AbortCopy aborts a BlobCopy which has already been triggered by the StartBlobCopy function.
|
||||
// copyID is generated from StartBlobCopy function.
|
||||
// currentLeaseID is required IF the destination blob has an active lease on it.
|
||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Abort-Copy-Blob
|
||||
func (b *Blob) AbortCopy(copyID string, options *AbortCopyOptions) error {
|
||||
params := url.Values{
|
||||
"comp": {"copy"},
|
||||
"copyid": {copyID},
|
||||
}
|
||||
headers := b.Container.bsc.client.getStandardHeaders()
|
||||
headers["x-ms-copy-action"] = "abort"
|
||||
|
||||
if options != nil {
|
||||
params = addTimeout(params, options.Timeout)
|
||||
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||
}
|
||||
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
|
||||
|
||||
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
readAndCloseBody(resp.body)
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusNoContent})
|
||||
}
|
||||
|
||||
// WaitForCopy loops until a BlobCopy operation is completed (or fails with error)
|
||||
func (b *Blob) WaitForCopy(copyID string) error {
|
||||
for {
|
||||
err := b.GetProperties(nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if b.Properties.CopyID != copyID {
|
||||
return errBlobCopyIDMismatch
|
||||
}
|
||||
|
||||
switch b.Properties.CopyStatus {
|
||||
case blobCopyStatusSuccess:
|
||||
return nil
|
||||
case blobCopyStatusPending:
|
||||
continue
|
||||
case blobCopyStatusAborted:
|
||||
return errBlobCopyAborted
|
||||
case blobCopyStatusFailed:
|
||||
return fmt.Errorf("storage: blob copy failed. Id=%s Description=%s", b.Properties.CopyID, b.Properties.CopyStatusDescription)
|
||||
default:
|
||||
return fmt.Errorf("storage: unhandled blob copy status: '%s'", b.Properties.CopyStatus)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// IncrementalCopyBlob copies a snapshot of a source blob and copies to referring blob
|
||||
// sourceBlob parameter must be a valid snapshot URL of the original blob.
|
||||
// THe original blob mut be public, or use a Shared Access Signature.
|
||||
//
|
||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/incremental-copy-blob .
|
||||
func (b *Blob) IncrementalCopyBlob(sourceBlobURL string, snapshotTime time.Time, options *IncrementalCopyOptions) (string, error) {
|
||||
params := url.Values{"comp": {"incrementalcopy"}}
|
||||
|
||||
// need formatting to 7 decimal places so it's friendly to Windows and *nix
|
||||
snapshotTimeFormatted := snapshotTime.Format("2006-01-02T15:04:05.0000000Z")
|
||||
u, err := url.Parse(sourceBlobURL)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
query := u.Query()
|
||||
query.Add("snapshot", snapshotTimeFormatted)
|
||||
encodedQuery := query.Encode()
|
||||
encodedQuery = strings.Replace(encodedQuery, "%3A", ":", -1)
|
||||
u.RawQuery = encodedQuery
|
||||
snapshotURL := u.String()
|
||||
|
||||
headers := b.Container.bsc.client.getStandardHeaders()
|
||||
headers["x-ms-copy-source"] = snapshotURL
|
||||
|
||||
if options != nil {
|
||||
addTimeout(params, options.Timeout)
|
||||
headers = addToHeaders(headers, "x-ms-client-request-id", options.RequestID)
|
||||
headers = addTimeToHeaders(headers, "x-ms-if-modified-since", options.Destination.IfModifiedSince)
|
||||
headers = addTimeToHeaders(headers, "x-ms-if-unmodified-since", options.Destination.IfUnmodifiedSince)
|
||||
headers = addToHeaders(headers, "x-ms-if-match", options.Destination.IfMatch)
|
||||
headers = addToHeaders(headers, "x-ms-if-none-match", options.Destination.IfNoneMatch)
|
||||
}
|
||||
|
||||
// get URI of destination blob
|
||||
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
|
||||
|
||||
resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer readAndCloseBody(resp.body)
|
||||
|
||||
if err := checkRespCode(resp.statusCode, []int{http.StatusAccepted}); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
copyID := resp.headers.Get("x-ms-copy-id")
|
||||
if copyID == "" {
|
||||
return "", errors.New("Got empty copy id header")
|
||||
}
|
||||
return copyID, nil
|
||||
}
|
|
@ -1,224 +0,0 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Directory represents a directory on a share.
|
||||
type Directory struct {
|
||||
fsc *FileServiceClient
|
||||
Metadata map[string]string
|
||||
Name string `xml:"Name"`
|
||||
parent *Directory
|
||||
Properties DirectoryProperties
|
||||
share *Share
|
||||
}
|
||||
|
||||
// DirectoryProperties contains various properties of a directory.
|
||||
type DirectoryProperties struct {
|
||||
LastModified string `xml:"Last-Modified"`
|
||||
Etag string `xml:"Etag"`
|
||||
}
|
||||
|
||||
// ListDirsAndFilesParameters defines the set of customizable parameters to
|
||||
// make a List Files and Directories call.
|
||||
//
|
||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Directories-and-Files
|
||||
type ListDirsAndFilesParameters struct {
|
||||
Prefix string
|
||||
Marker string
|
||||
MaxResults uint
|
||||
Timeout uint
|
||||
}
|
||||
|
||||
// DirsAndFilesListResponse contains the response fields from
|
||||
// a List Files and Directories call.
|
||||
//
|
||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Directories-and-Files
|
||||
type DirsAndFilesListResponse struct {
|
||||
XMLName xml.Name `xml:"EnumerationResults"`
|
||||
Xmlns string `xml:"xmlns,attr"`
|
||||
Marker string `xml:"Marker"`
|
||||
MaxResults int64 `xml:"MaxResults"`
|
||||
Directories []Directory `xml:"Entries>Directory"`
|
||||
Files []File `xml:"Entries>File"`
|
||||
NextMarker string `xml:"NextMarker"`
|
||||
}
|
||||
|
||||
// builds the complete directory path for this directory object.
|
||||
func (d *Directory) buildPath() string {
|
||||
path := ""
|
||||
current := d
|
||||
for current.Name != "" {
|
||||
path = "/" + current.Name + path
|
||||
current = current.parent
|
||||
}
|
||||
return d.share.buildPath() + path
|
||||
}
|
||||
|
||||
// Create this directory in the associated share.
|
||||
// If a directory with the same name already exists, the operation fails.
|
||||
//
|
||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Directory
|
||||
func (d *Directory) Create(options *FileRequestOptions) error {
|
||||
// if this is the root directory exit early
|
||||
if d.parent == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
params := prepareOptions(options)
|
||||
headers, err := d.fsc.createResource(d.buildPath(), resourceDirectory, params, mergeMDIntoExtraHeaders(d.Metadata, nil), []int{http.StatusCreated})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.updateEtagAndLastModified(headers)
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateIfNotExists creates this directory under the associated share if the
|
||||
// directory does not exists. Returns true if the directory is newly created or
|
||||
// false if the directory already exists.
|
||||
//
|
||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Directory
|
||||
func (d *Directory) CreateIfNotExists(options *FileRequestOptions) (bool, error) {
|
||||
// if this is the root directory exit early
|
||||
if d.parent == nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
params := prepareOptions(options)
|
||||
resp, err := d.fsc.createResourceNoClose(d.buildPath(), resourceDirectory, params, nil)
|
||||
if resp != nil {
|
||||
defer readAndCloseBody(resp.body)
|
||||
if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict {
|
||||
if resp.statusCode == http.StatusCreated {
|
||||
d.updateEtagAndLastModified(resp.headers)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, d.FetchAttributes(nil)
|
||||
}
|
||||
}
|
||||
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Delete removes this directory. It must be empty in order to be deleted.
|
||||
// If the directory does not exist the operation fails.
|
||||
//
|
||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Directory
|
||||
func (d *Directory) Delete(options *FileRequestOptions) error {
|
||||
return d.fsc.deleteResource(d.buildPath(), resourceDirectory, options)
|
||||
}
|
||||
|
||||
// DeleteIfExists removes this directory if it exists.
|
||||
//
|
||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Directory
|
||||
func (d *Directory) DeleteIfExists(options *FileRequestOptions) (bool, error) {
|
||||
resp, err := d.fsc.deleteResourceNoClose(d.buildPath(), resourceDirectory, options)
|
||||
if resp != nil {
|
||||
defer readAndCloseBody(resp.body)
|
||||
if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound {
|
||||
return resp.statusCode == http.StatusAccepted, nil
|
||||
}
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Exists returns true if this directory exists.
|
||||
func (d *Directory) Exists() (bool, error) {
|
||||
exists, headers, err := d.fsc.resourceExists(d.buildPath(), resourceDirectory)
|
||||
if exists {
|
||||
d.updateEtagAndLastModified(headers)
|
||||
}
|
||||
return exists, err
|
||||
}
|
||||
|
||||
// FetchAttributes retrieves metadata for this directory.
|
||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-directory-properties
|
||||
func (d *Directory) FetchAttributes(options *FileRequestOptions) error {
|
||||
params := prepareOptions(options)
|
||||
headers, err := d.fsc.getResourceHeaders(d.buildPath(), compNone, resourceDirectory, params, http.MethodHead)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.updateEtagAndLastModified(headers)
|
||||
d.Metadata = getMetadataFromHeaders(headers)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetDirectoryReference returns a child Directory object for this directory.
|
||||
func (d *Directory) GetDirectoryReference(name string) *Directory {
|
||||
return &Directory{
|
||||
fsc: d.fsc,
|
||||
Name: name,
|
||||
parent: d,
|
||||
share: d.share,
|
||||
}
|
||||
}
|
||||
|
||||
// GetFileReference returns a child File object for this directory.
|
||||
func (d *Directory) GetFileReference(name string) *File {
|
||||
return &File{
|
||||
fsc: d.fsc,
|
||||
Name: name,
|
||||
parent: d,
|
||||
share: d.share,
|
||||
mutex: &sync.Mutex{},
|
||||
}
|
||||
}
|
||||
|
||||
// ListDirsAndFiles returns a list of files and directories under this directory.
|
||||
// It also contains a pagination token and other response details.
|
||||
//
|
||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Directories-and-Files
|
||||
func (d *Directory) ListDirsAndFiles(params ListDirsAndFilesParameters) (*DirsAndFilesListResponse, error) {
|
||||
q := mergeParams(params.getParameters(), getURLInitValues(compList, resourceDirectory))
|
||||
|
||||
resp, err := d.fsc.listContent(d.buildPath(), q, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer resp.body.Close()
|
||||
var out DirsAndFilesListResponse
|
||||
err = xmlUnmarshal(resp.body, &out)
|
||||
return &out, err
|
||||
}
|
||||
|
||||
// SetMetadata replaces the metadata for this directory.
|
||||
//
|
||||
// Some keys may be converted to Camel-Case before sending. All keys
|
||||
// are returned in lower case by GetDirectoryMetadata. HTTP header names
|
||||
// are case-insensitive so case munging should not matter to other
|
||||
// applications either.
|
||||
//
|
||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Directory-Metadata
|
||||
func (d *Directory) SetMetadata(options *FileRequestOptions) error {
|
||||
headers, err := d.fsc.setResourceHeaders(d.buildPath(), compMetadata, resourceDirectory, mergeMDIntoExtraHeaders(d.Metadata, nil), options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.updateEtagAndLastModified(headers)
|
||||
return nil
|
||||
}
|
||||
|
||||
// updates Etag and last modified date
|
||||
func (d *Directory) updateEtagAndLastModified(headers http.Header) {
|
||||
d.Properties.Etag = headers.Get("Etag")
|
||||
d.Properties.LastModified = headers.Get("Last-Modified")
|
||||
}
|
||||
|
||||
// URL gets the canonical URL to this directory.
|
||||
// This method does not create a publicly accessible URL if the directory
|
||||
// is private and this method does not check if the directory exists.
|
||||
func (d *Directory) URL() string {
|
||||
return d.fsc.client.getEndpoint(fileServiceName, d.buildPath(), url.Values{})
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue