Update godeps

This commit is contained in:
Manuel Alejandro de Brito Fontes 2016-03-19 20:00:11 -03:00 committed by Manuel de Brito Fontes
parent ffe6baa14c
commit 9b142b56f8
1137 changed files with 22773 additions and 189176 deletions

27
Godeps/_workspace/src/google.golang.org/api/LICENSE generated vendored Normal file
View file

@ -0,0 +1,27 @@
Copyright (c) 2011 Google Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View file

@ -1,367 +0,0 @@
// Copyright 2015 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gensupport
import (
"encoding/json"
"reflect"
"testing"
"google.golang.org/api/googleapi"
)
type schema struct {
// Basic types
B bool `json:"b,omitempty"`
F float64 `json:"f,omitempty"`
I int64 `json:"i,omitempty"`
Istr int64 `json:"istr,omitempty,string"`
Str string `json:"str,omitempty"`
// Pointers to basic types
PB *bool `json:"pb,omitempty"`
PF *float64 `json:"pf,omitempty"`
PI *int64 `json:"pi,omitempty"`
PIStr *int64 `json:"pistr,omitempty,string"`
PStr *string `json:"pstr,omitempty"`
// Other types
Int64s googleapi.Int64s `json:"i64s,omitempty"`
S []int `json:"s,omitempty"`
M map[string]string `json:"m,omitempty"`
Any interface{} `json:"any,omitempty"`
Child *child `json:"child,omitempty"`
ForceSendFields []string `json:"-"`
}
type child struct {
B bool `json:"childbool,omitempty"`
}
type testCase struct {
s schema
want string
}
func TestBasics(t *testing.T) {
for _, tc := range []testCase{
{
s: schema{},
want: `{}`,
},
{
s: schema{
ForceSendFields: []string{"B", "F", "I", "Istr", "Str", "PB", "PF", "PI", "PIStr", "PStr"},
},
want: `{"b":false,"f":0.0,"i":0,"istr":"0","str":""}`,
},
{
s: schema{
B: true,
F: 1.2,
I: 1,
Istr: 2,
Str: "a",
PB: googleapi.Bool(true),
PF: googleapi.Float64(1.2),
PI: googleapi.Int64(int64(1)),
PIStr: googleapi.Int64(int64(2)),
PStr: googleapi.String("a"),
},
want: `{"b":true,"f":1.2,"i":1,"istr":"2","str":"a","pb":true,"pf":1.2,"pi":1,"pistr":"2","pstr":"a"}`,
},
{
s: schema{
B: false,
F: 0.0,
I: 0,
Istr: 0,
Str: "",
PB: googleapi.Bool(false),
PF: googleapi.Float64(0.0),
PI: googleapi.Int64(int64(0)),
PIStr: googleapi.Int64(int64(0)),
PStr: googleapi.String(""),
},
want: `{"pb":false,"pf":0.0,"pi":0,"pistr":"0","pstr":""}`,
},
{
s: schema{
B: false,
F: 0.0,
I: 0,
Istr: 0,
Str: "",
PB: googleapi.Bool(false),
PF: googleapi.Float64(0.0),
PI: googleapi.Int64(int64(0)),
PIStr: googleapi.Int64(int64(0)),
PStr: googleapi.String(""),
ForceSendFields: []string{"B", "F", "I", "Istr", "Str", "PB", "PF", "PI", "PIStr", "PStr"},
},
want: `{"b":false,"f":0.0,"i":0,"istr":"0","str":"","pb":false,"pf":0.0,"pi":0,"pistr":"0","pstr":""}`,
},
} {
checkMarshalJSON(t, tc)
}
}
func TestSliceFields(t *testing.T) {
for _, tc := range []testCase{
{
s: schema{},
want: `{}`,
},
{
s: schema{S: []int{}, Int64s: googleapi.Int64s{}},
want: `{}`,
},
{
s: schema{S: []int{1}, Int64s: googleapi.Int64s{1}},
want: `{"s":[1],"i64s":["1"]}`,
},
{
s: schema{
ForceSendFields: []string{"S", "Int64s"},
},
want: `{"s":[],"i64s":[]}`,
},
{
s: schema{
S: []int{},
Int64s: googleapi.Int64s{},
ForceSendFields: []string{"S", "Int64s"},
},
want: `{"s":[],"i64s":[]}`,
},
{
s: schema{
S: []int{1},
Int64s: googleapi.Int64s{1},
ForceSendFields: []string{"S", "Int64s"},
},
want: `{"s":[1],"i64s":["1"]}`,
},
} {
checkMarshalJSON(t, tc)
}
}
func TestMapField(t *testing.T) {
for _, tc := range []testCase{
{
s: schema{},
want: `{}`,
},
{
s: schema{M: make(map[string]string)},
want: `{}`,
},
{
s: schema{M: map[string]string{"a": "b"}},
want: `{"m":{"a":"b"}}`,
},
{
s: schema{
ForceSendFields: []string{"M"},
},
want: `{"m":{}}`,
},
{
s: schema{
M: make(map[string]string),
ForceSendFields: []string{"M"},
},
want: `{"m":{}}`,
},
{
s: schema{
M: map[string]string{"a": "b"},
ForceSendFields: []string{"M"},
},
want: `{"m":{"a":"b"}}`,
},
} {
checkMarshalJSON(t, tc)
}
}
type anyType struct {
Field int
}
func (a anyType) MarshalJSON() ([]byte, error) {
return []byte(`"anyType value"`), nil
}
func TestAnyField(t *testing.T) {
// ForceSendFields has no effect on nil interfaces and interfaces that contain nil pointers.
var nilAny *anyType
for _, tc := range []testCase{
{
s: schema{},
want: `{}`,
},
{
s: schema{Any: nilAny},
want: `{"any": null}`,
},
{
s: schema{Any: &anyType{}},
want: `{"any":"anyType value"}`,
},
{
s: schema{Any: anyType{}},
want: `{"any":"anyType value"}`,
},
{
s: schema{
ForceSendFields: []string{"Any"},
},
want: `{}`,
},
{
s: schema{
Any: nilAny,
ForceSendFields: []string{"Any"},
},
want: `{"any": null}`,
},
{
s: schema{
Any: &anyType{},
ForceSendFields: []string{"Any"},
},
want: `{"any":"anyType value"}`,
},
{
s: schema{
Any: anyType{},
ForceSendFields: []string{"Any"},
},
want: `{"any":"anyType value"}`,
},
} {
checkMarshalJSON(t, tc)
}
}
func TestSubschema(t *testing.T) {
// Subschemas are always stored as pointers, so ForceSendFields has no effect on them.
for _, tc := range []testCase{
{
s: schema{},
want: `{}`,
},
{
s: schema{
ForceSendFields: []string{"Child"},
},
want: `{}`,
},
{
s: schema{Child: &child{}},
want: `{"child":{}}`,
},
{
s: schema{
Child: &child{},
ForceSendFields: []string{"Child"},
},
want: `{"child":{}}`,
},
{
s: schema{Child: &child{B: true}},
want: `{"child":{"childbool":true}}`,
},
{
s: schema{
Child: &child{B: true},
ForceSendFields: []string{"Child"},
},
want: `{"child":{"childbool":true}}`,
},
} {
checkMarshalJSON(t, tc)
}
}
// checkMarshalJSON verifies that calling schemaToMap on tc.s yields a result which is equivalent to tc.want.
func checkMarshalJSON(t *testing.T, tc testCase) {
doCheckMarshalJSON(t, tc.s, tc.s.ForceSendFields, tc.want)
if len(tc.s.ForceSendFields) == 0 {
// verify that the code path used when ForceSendFields
// is non-empty produces the same output as the fast
// path that is used when it is empty.
doCheckMarshalJSON(t, tc.s, []string{"dummy"}, tc.want)
}
}
func doCheckMarshalJSON(t *testing.T, s schema, forceSendFields []string, wantJSON string) {
encoded, err := MarshalJSON(s, forceSendFields)
if err != nil {
t.Fatalf("encoding json:\n got err: %v", err)
}
// The expected and obtained JSON can differ in field ordering, so unmarshal before comparing.
var got interface{}
var want interface{}
err = json.Unmarshal(encoded, &got)
if err != nil {
t.Fatalf("decoding json:\n got err: %v", err)
}
err = json.Unmarshal([]byte(wantJSON), &want)
if err != nil {
t.Fatalf("decoding json:\n got err: %v", err)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("schemaToMap:\ngot :%s\nwant:%s", got, want)
}
}
func TestParseJSONTag(t *testing.T) {
for _, tc := range []struct {
tag string
want jsonTag
}{
{
tag: "-",
want: jsonTag{ignore: true},
}, {
tag: "name,omitempty",
want: jsonTag{apiName: "name"},
}, {
tag: "name,omitempty,string",
want: jsonTag{apiName: "name", stringFormat: true},
},
} {
got, err := parseJSONTag(tc.tag)
if err != nil {
t.Fatalf("parsing json:\n got err: %v\ntag: %q", err, tc.tag)
}
if !reflect.DeepEqual(got, tc.want) {
t.Errorf("parseJSONTage:\ngot :%s\nwant:%s", got, tc.want)
}
}
}
func TestParseMalformedJSONTag(t *testing.T) {
for _, tag := range []string{
"",
"name",
"name,",
"name,blah",
"name,blah,string",
",omitempty",
",omitempty,string",
"name,omitempty,string,blah",
} {
_, err := parseJSONTag(tag)
if err == nil {
t.Fatalf("parsing json: expected err, got nil for tag: %v", tag)
}
}
}

View file

@ -1,113 +0,0 @@
// Copyright 2015 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gensupport
import (
"bytes"
"errors"
"io"
"io/ioutil"
"reflect"
"testing"
)
// errReader reads out of a buffer until it is empty, then returns the specified error.
type errReader struct {
buf []byte
err error
}
var errBang error = errors.New("bang")
func (er *errReader) Read(p []byte) (int, error) {
if len(er.buf) == 0 {
if er.err == nil {
return 0, io.EOF
}
return 0, er.err
}
n := copy(p, er.buf)
er.buf = er.buf[n:]
return n, nil
}
func TestAll(t *testing.T) {
type testCase struct {
data []byte // the data to read from the Reader
finalErr error // error to return after data has been read
wantContentType string
wantContentTypeResult bool
}
for _, tc := range []testCase{
{
data: []byte{0, 0, 0, 0},
finalErr: nil,
wantContentType: "application/octet-stream",
wantContentTypeResult: true,
},
{
data: []byte(""),
finalErr: nil,
wantContentType: "text/plain; charset=utf-8",
wantContentTypeResult: true,
},
{
data: []byte(""),
finalErr: errBang,
wantContentType: "text/plain; charset=utf-8",
wantContentTypeResult: false,
},
{
data: []byte("abc"),
finalErr: nil,
wantContentType: "text/plain; charset=utf-8",
wantContentTypeResult: true,
},
{
data: []byte("abc"),
finalErr: errBang,
wantContentType: "text/plain; charset=utf-8",
wantContentTypeResult: false,
},
// The following examples contain more bytes than are buffered for sniffing.
{
data: bytes.Repeat([]byte("a"), 513),
finalErr: nil,
wantContentType: "text/plain; charset=utf-8",
wantContentTypeResult: true,
},
{
data: bytes.Repeat([]byte("a"), 513),
finalErr: errBang,
wantContentType: "text/plain; charset=utf-8",
wantContentTypeResult: true, // true because error is after first 512 bytes.
},
} {
er := &errReader{buf: tc.data, err: tc.finalErr}
sct := NewContentSniffer(er)
// Even if was an error during the first 512 bytes, we should still be able to read those bytes.
buf, err := ioutil.ReadAll(sct)
if !reflect.DeepEqual(buf, tc.data) {
t.Fatalf("Failed reading buffer: got: %q; want:%q", buf, tc.data)
}
if err != tc.finalErr {
t.Fatalf("Reading buffer error: got: %v; want: %v", err, tc.finalErr)
}
ct, ok := sct.ContentType()
if ok != tc.wantContentTypeResult {
t.Fatalf("Content type result got: %v; want: %v", ok, tc.wantContentTypeResult)
}
if ok && ct != tc.wantContentType {
t.Fatalf("Content type got: %q; want: %q", ct, tc.wantContentType)
}
}
}

View file

@ -1,599 +0,0 @@
// Copyright 2011 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package googleapi
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"os"
"reflect"
"regexp"
"strconv"
"strings"
"testing"
"time"
"golang.org/x/net/context"
)
type SetOpaqueTest struct {
in *url.URL
wantRequestURI string
}
var setOpaqueTests = []SetOpaqueTest{
// no path
{
&url.URL{
Scheme: "http",
Host: "www.golang.org",
},
"http://www.golang.org",
},
// path
{
&url.URL{
Scheme: "http",
Host: "www.golang.org",
Path: "/",
},
"http://www.golang.org/",
},
// file with hex escaping
{
&url.URL{
Scheme: "https",
Host: "www.golang.org",
Path: "/file%20one&two",
},
"https://www.golang.org/file%20one&two",
},
// query
{
&url.URL{
Scheme: "http",
Host: "www.golang.org",
Path: "/",
RawQuery: "q=go+language",
},
"http://www.golang.org/?q=go+language",
},
// file with hex escaping in path plus query
{
&url.URL{
Scheme: "https",
Host: "www.golang.org",
Path: "/file%20one&two",
RawQuery: "q=go+language",
},
"https://www.golang.org/file%20one&two?q=go+language",
},
// query with hex escaping
{
&url.URL{
Scheme: "http",
Host: "www.golang.org",
Path: "/",
RawQuery: "q=go%20language",
},
"http://www.golang.org/?q=go%20language",
},
}
// prefixTmpl is a template for the expected prefix of the output of writing
// an HTTP request.
const prefixTmpl = "GET %v HTTP/1.1\r\nHost: %v\r\n"
func TestSetOpaque(t *testing.T) {
for _, test := range setOpaqueTests {
u := *test.in
SetOpaque(&u)
w := &bytes.Buffer{}
r := &http.Request{URL: &u}
if err := r.Write(w); err != nil {
t.Errorf("write request: %v", err)
continue
}
prefix := fmt.Sprintf(prefixTmpl, test.wantRequestURI, test.in.Host)
if got := string(w.Bytes()); !strings.HasPrefix(got, prefix) {
t.Errorf("got %q expected prefix %q", got, prefix)
}
}
}
type ExpandTest struct {
in string
expansions map[string]string
want string
}
var expandTests = []ExpandTest{
// no expansions
{
"http://www.golang.org/",
map[string]string{},
"http://www.golang.org/",
},
// one expansion, no escaping
{
"http://www.golang.org/{bucket}/delete",
map[string]string{
"bucket": "red",
},
"http://www.golang.org/red/delete",
},
// one expansion, with hex escapes
{
"http://www.golang.org/{bucket}/delete",
map[string]string{
"bucket": "red/blue",
},
"http://www.golang.org/red%2Fblue/delete",
},
// one expansion, with space
{
"http://www.golang.org/{bucket}/delete",
map[string]string{
"bucket": "red or blue",
},
"http://www.golang.org/red%20or%20blue/delete",
},
// expansion not found
{
"http://www.golang.org/{object}/delete",
map[string]string{
"bucket": "red or blue",
},
"http://www.golang.org//delete",
},
// multiple expansions
{
"http://www.golang.org/{one}/{two}/{three}/get",
map[string]string{
"one": "ONE",
"two": "TWO",
"three": "THREE",
},
"http://www.golang.org/ONE/TWO/THREE/get",
},
// utf-8 characters
{
"http://www.golang.org/{bucket}/get",
map[string]string{
"bucket": "£100",
},
"http://www.golang.org/%C2%A3100/get",
},
// punctuations
{
"http://www.golang.org/{bucket}/get",
map[string]string{
"bucket": `/\@:,.`,
},
"http://www.golang.org/%2F%5C%40%3A%2C./get",
},
// mis-matched brackets
{
"http://www.golang.org/{bucket/get",
map[string]string{
"bucket": "red",
},
"http://www.golang.org/{bucket/get",
},
// "+" prefix for suppressing escape
// See also: http://tools.ietf.org/html/rfc6570#section-3.2.3
{
"http://www.golang.org/{+topic}",
map[string]string{
"topic": "/topics/myproject/mytopic",
},
// The double slashes here look weird, but it's intentional
"http://www.golang.org//topics/myproject/mytopic",
},
}
func TestExpand(t *testing.T) {
for i, test := range expandTests {
u := url.URL{
Path: test.in,
}
Expand(&u, test.expansions)
got := u.Path
if got != test.want {
t.Errorf("got %q expected %q in test %d", got, test.want, i+1)
}
}
}
type CheckResponseTest struct {
in *http.Response
bodyText string
want error
errText string
}
var checkResponseTests = []CheckResponseTest{
{
&http.Response{
StatusCode: http.StatusOK,
},
"",
nil,
"",
},
{
&http.Response{
StatusCode: http.StatusInternalServerError,
},
`{"error":{}}`,
&Error{
Code: http.StatusInternalServerError,
Body: `{"error":{}}`,
},
`googleapi: got HTTP response code 500 with body: {"error":{}}`,
},
{
&http.Response{
StatusCode: http.StatusNotFound,
},
`{"error":{"message":"Error message for StatusNotFound."}}`,
&Error{
Code: http.StatusNotFound,
Message: "Error message for StatusNotFound.",
Body: `{"error":{"message":"Error message for StatusNotFound."}}`,
},
"googleapi: Error 404: Error message for StatusNotFound.",
},
{
&http.Response{
StatusCode: http.StatusBadRequest,
},
`{"error":"invalid_token","error_description":"Invalid Value"}`,
&Error{
Code: http.StatusBadRequest,
Body: `{"error":"invalid_token","error_description":"Invalid Value"}`,
},
`googleapi: got HTTP response code 400 with body: {"error":"invalid_token","error_description":"Invalid Value"}`,
},
{
&http.Response{
StatusCode: http.StatusBadRequest,
},
`{"error":{"errors":[{"domain":"usageLimits","reason":"keyInvalid","message":"Bad Request"}],"code":400,"message":"Bad Request"}}`,
&Error{
Code: http.StatusBadRequest,
Errors: []ErrorItem{
{
Reason: "keyInvalid",
Message: "Bad Request",
},
},
Body: `{"error":{"errors":[{"domain":"usageLimits","reason":"keyInvalid","message":"Bad Request"}],"code":400,"message":"Bad Request"}}`,
Message: "Bad Request",
},
"googleapi: Error 400: Bad Request, keyInvalid",
},
}
func TestCheckResponse(t *testing.T) {
for _, test := range checkResponseTests {
res := test.in
if test.bodyText != "" {
res.Body = ioutil.NopCloser(strings.NewReader(test.bodyText))
}
g := CheckResponse(res)
if !reflect.DeepEqual(g, test.want) {
t.Errorf("CheckResponse: got %v, want %v", g, test.want)
gotJson, err := json.Marshal(g)
if err != nil {
t.Error(err)
}
wantJson, err := json.Marshal(test.want)
if err != nil {
t.Error(err)
}
t.Errorf("json(got): %q\njson(want): %q", string(gotJson), string(wantJson))
}
if g != nil && g.Error() != test.errText {
t.Errorf("CheckResponse: unexpected error message.\nGot: %q\nwant: %q", g, test.errText)
}
}
}
type VariantPoint struct {
Type string
Coordinates []float64
}
type VariantTest struct {
in map[string]interface{}
result bool
want VariantPoint
}
var coords = []interface{}{1.0, 2.0}
var variantTests = []VariantTest{
{
in: map[string]interface{}{
"type": "Point",
"coordinates": coords,
},
result: true,
want: VariantPoint{
Type: "Point",
Coordinates: []float64{1.0, 2.0},
},
},
{
in: map[string]interface{}{
"type": "Point",
"bogus": coords,
},
result: true,
want: VariantPoint{
Type: "Point",
},
},
}
func TestVariantType(t *testing.T) {
for _, test := range variantTests {
if g := VariantType(test.in); g != test.want.Type {
t.Errorf("VariantType(%v): got %v, want %v", test.in, g, test.want.Type)
}
}
}
func TestConvertVariant(t *testing.T) {
for _, test := range variantTests {
g := VariantPoint{}
r := ConvertVariant(test.in, &g)
if r != test.result {
t.Errorf("ConvertVariant(%v): got %v, want %v", test.in, r, test.result)
}
if !reflect.DeepEqual(g, test.want) {
t.Errorf("ConvertVariant(%v): got %v, want %v", test.in, g, test.want)
}
}
}
type unexpectedReader struct{}
func (unexpectedReader) Read([]byte) (int, error) {
return 0, fmt.Errorf("unexpected read in test.")
}
var contentRangeRE = regexp.MustCompile(`^bytes (\d+)\-(\d+)/(\d+)$`)
func (t *testTransport) RoundTrip(req *http.Request) (*http.Response, error) {
t.req = req
if rng := req.Header.Get("Content-Range"); rng != "" && !strings.HasPrefix(rng, "bytes */") { // Read the data
m := contentRangeRE.FindStringSubmatch(rng)
if len(m) != 4 {
return nil, fmt.Errorf("unable to parse content range: %v", rng)
}
start, err := strconv.ParseInt(m[1], 10, 64)
if err != nil {
return nil, fmt.Errorf("unable to parse content range: %v", rng)
}
end, err := strconv.ParseInt(m[2], 10, 64)
if err != nil {
return nil, fmt.Errorf("unable to parse content range: %v", rng)
}
totalSize, err := strconv.ParseInt(m[3], 10, 64)
if err != nil {
return nil, fmt.Errorf("unable to parse content range: %v", rng)
}
partialSize := end - start + 1
t.buf, err = ioutil.ReadAll(req.Body)
if err != nil || int64(len(t.buf)) != partialSize {
return nil, fmt.Errorf("unable to read %v bytes from request data, n=%v: %v", partialSize, len(t.buf), err)
}
if totalSize == end+1 {
t.statusCode = 200 // signify completion of transfer
}
}
f := ioutil.NopCloser(unexpectedReader{})
res := &http.Response{
Body: f,
StatusCode: t.statusCode,
Header: http.Header{},
}
if t.rangeVal != "" {
res.Header.Set("Range", t.rangeVal)
}
return res, nil
}
type testTransport struct {
req *http.Request
statusCode int
rangeVal string
want int64
buf []byte
}
var statusTests = []*testTransport{
&testTransport{statusCode: 308, want: 0},
&testTransport{statusCode: 308, rangeVal: "bytes=0-0", want: 1},
&testTransport{statusCode: 308, rangeVal: "bytes=0-42", want: 43},
}
func TestTransferStatus(t *testing.T) {
ctx := context.Background()
for _, tr := range statusTests {
rx := &ResumableUpload{
Client: &http.Client{Transport: tr},
}
g, _, err := rx.transferStatus(ctx)
if err != nil {
t.Error(err)
}
if g != tr.want {
t.Errorf("transferStatus got %v, want %v", g, tr.want)
}
}
}
func (t *interruptedTransport) RoundTrip(req *http.Request) (*http.Response, error) {
t.req = req
if rng := req.Header.Get("Content-Range"); rng != "" && !strings.HasPrefix(rng, "bytes */") {
t.interruptCount += 1
if t.interruptCount%7 == 0 { // Respond with a "service unavailable" error
res := &http.Response{
StatusCode: http.StatusServiceUnavailable,
Header: http.Header{},
}
t.rangeVal = fmt.Sprintf("bytes=0-%v", len(t.buf)-1) // Set the response for next time
return res, nil
}
m := contentRangeRE.FindStringSubmatch(rng)
if len(m) != 4 {
return nil, fmt.Errorf("unable to parse content range: %v", rng)
}
start, err := strconv.ParseInt(m[1], 10, 64)
if err != nil {
return nil, fmt.Errorf("unable to parse content range: %v", rng)
}
end, err := strconv.ParseInt(m[2], 10, 64)
if err != nil {
return nil, fmt.Errorf("unable to parse content range: %v", rng)
}
totalSize, err := strconv.ParseInt(m[3], 10, 64)
if err != nil {
return nil, fmt.Errorf("unable to parse content range: %v", rng)
}
partialSize := end - start + 1
buf, err := ioutil.ReadAll(req.Body)
if err != nil || int64(len(buf)) != partialSize {
return nil, fmt.Errorf("unable to read %v bytes from request data, n=%v: %v", partialSize, len(buf), err)
}
t.buf = append(t.buf, buf...)
if totalSize == end+1 {
t.statusCode = 200 // signify completion of transfer
}
}
f := ioutil.NopCloser(unexpectedReader{})
res := &http.Response{
Body: f,
StatusCode: t.statusCode,
Header: http.Header{},
}
if t.rangeVal != "" {
res.Header.Set("Range", t.rangeVal)
}
return res, nil
}
type interruptedTransport struct {
req *http.Request
statusCode int
rangeVal string
interruptCount int
buf []byte
progressUpdates []int64
}
func (tr *interruptedTransport) ProgressUpdate(current int64) {
tr.progressUpdates = append(tr.progressUpdates, current)
}
func TestInterruptedTransferChunks(t *testing.T) {
f, err := os.Open("googleapi.go")
if err != nil {
t.Fatalf("unable to open googleapi.go: %v", err)
}
defer f.Close()
slurp, err := ioutil.ReadAll(f)
if err != nil {
t.Fatalf("unable to slurp file: %v", err)
}
st, err := f.Stat()
if err != nil {
t.Fatalf("unable to stat googleapi.go: %v", err)
}
tr := &interruptedTransport{
statusCode: 308,
buf: make([]byte, 0, st.Size()),
}
oldChunkSize := chunkSize
defer func() { chunkSize = oldChunkSize }()
chunkSize = 100 // override to process small chunks for test.
sleep = func(time.Duration) {} // override time.Sleep
rx := &ResumableUpload{
Client: &http.Client{Transport: tr},
Media: f,
MediaType: "text/plain",
ContentLength: st.Size(),
Callback: tr.ProgressUpdate,
}
res, err := rx.Upload(context.Background())
if err != nil || res == nil || res.StatusCode != http.StatusOK {
if res == nil {
t.Errorf("transferChunks not successful, res=nil: %v", err)
} else {
t.Errorf("transferChunks not successful, statusCode=%v: %v", res.StatusCode, err)
}
}
if len(tr.buf) != len(slurp) || bytes.Compare(tr.buf, slurp) != 0 {
t.Errorf("transferred file corrupted:\ngot %s\nwant %s", tr.buf, slurp)
}
want := []int64{}
for i := chunkSize; i <= st.Size(); i += chunkSize {
want = append(want, i)
}
if st.Size()%chunkSize != 0 {
want = append(want, st.Size())
}
if !reflect.DeepEqual(tr.progressUpdates, want) {
t.Errorf("progress update error, got %v, want %v", tr.progressUpdates, want)
}
}
func TestCancelUpload(t *testing.T) {
f, err := os.Open("googleapi.go")
if err != nil {
t.Fatalf("unable to open googleapi.go: %v", err)
}
defer f.Close()
st, err := f.Stat()
if err != nil {
t.Fatalf("unable to stat googleapi.go: %v", err)
}
tr := &interruptedTransport{
statusCode: 308,
buf: make([]byte, 0, st.Size()),
}
oldChunkSize := chunkSize
defer func() { chunkSize = oldChunkSize }()
chunkSize = 100 // override to process small chunks for test.
sleep = func(time.Duration) {} // override time.Sleep
rx := &ResumableUpload{
Client: &http.Client{Transport: tr},
Media: f,
MediaType: "text/plain",
ContentLength: st.Size(),
Callback: tr.ProgressUpdate,
}
ctx, cancelFunc := context.WithCancel(context.Background())
cancelFunc() // stop the upload that hasn't started yet
res, err := rx.Upload(ctx)
if err == nil || res == nil || res.StatusCode != http.StatusRequestTimeout {
if res == nil {
t.Errorf("transferChunks not successful, got res=nil, err=%v, want StatusRequestTimeout", err)
} else {
t.Errorf("transferChunks not successful, got statusCode=%v, err=%v, want StatusRequestTimeout", res.StatusCode, err)
}
}
}

View file

@ -1,38 +0,0 @@
// Copyright 2012 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package transport contains HTTP transports used to make
// authenticated API requests.
package transport
import (
"errors"
"net/http"
)
// APIKey is an HTTP Transport which wraps an underlying transport and
// appends an API Key "key" parameter to the URL of outgoing requests.
type APIKey struct {
// Key is the API Key to set on requests.
Key string
// Transport is the underlying HTTP transport.
// If nil, http.DefaultTransport is used.
Transport http.RoundTripper
}
func (t *APIKey) RoundTrip(req *http.Request) (*http.Response, error) {
rt := t.Transport
if rt == nil {
rt = http.DefaultTransport
if rt == nil {
return nil, errors.New("googleapi/transport: no Transport specified or available")
}
}
newReq := *req
args := newReq.URL.Query()
args.Set("key", t.Key)
newReq.URL.RawQuery = args.Encode()
return rt.RoundTrip(&newReq)
}

View file

@ -1,44 +0,0 @@
// Copyright 2013 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package googleapi
import (
"encoding/json"
"reflect"
"testing"
)
func TestTypes(t *testing.T) {
type T struct {
I32 Int32s
I64 Int64s
U32 Uint32s
U64 Uint64s
F64 Float64s
}
v := &T{
I32: Int32s{-1, 2, 3},
I64: Int64s{-1, 2, 1 << 33},
U32: Uint32s{1, 2},
U64: Uint64s{1, 2, 1 << 33},
F64: Float64s{1.5, 3.33},
}
got, err := json.Marshal(v)
if err != nil {
t.Fatal(err)
}
want := `{"I32":["-1","2","3"],"I64":["-1","2","8589934592"],"U32":["1","2"],"U64":["1","2","8589934592"],"F64":["1.5","3.33"]}`
if string(got) != want {
t.Fatalf("Marshal mismatch.\n got: %s\nwant: %s\n", got, want)
}
v2 := new(T)
if err := json.Unmarshal(got, v2); err != nil {
t.Fatalf("Unmarshal: %v", err)
}
if !reflect.DeepEqual(v, v2) {
t.Fatalf("Unmarshal didn't produce same results.\n got: %#v\nwant: %#v\n", v, v2)
}
}

202
Godeps/_workspace/src/google.golang.org/cloud/LICENSE generated vendored Normal file
View file

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2014 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

File diff suppressed because it is too large Load diff

View file

@ -1,594 +0,0 @@
// Copyright 2013 Google Inc. All Rights Reserved.
//
// The datastore v1 service proto definitions
syntax = "proto2";
package pb;
option java_package = "com.google.api.services.datastore";
// An identifier for a particular subset of entities.
//
// Entities are partitioned into various subsets, each used by different
// datasets and different namespaces within a dataset and so forth.
//
// All input partition IDs are normalized before use.
// A partition ID is normalized as follows:
// If the partition ID is unset or is set to an empty partition ID, replace it
// with the context partition ID.
// Otherwise, if the partition ID has no dataset ID, assign it the context
// partition ID's dataset ID.
// Unless otherwise documented, the context partition ID has the dataset ID set
// to the context dataset ID and no other partition dimension set.
//
// A partition ID is empty if all of its fields are unset.
//
// Partition dimension:
// A dimension may be unset.
// A dimension's value must never be "".
// A dimension's value must match [A-Za-z\d\.\-_]{1,100}
// If the value of any dimension matches regex "__.*__",
// the partition is reserved/read-only.
// A reserved/read-only partition ID is forbidden in certain documented contexts.
//
// Dataset ID:
// A dataset id's value must never be "".
// A dataset id's value must match
// ([a-z\d\-]{1,100}~)?([a-z\d][a-z\d\-\.]{0,99}:)?([a-z\d][a-z\d\-]{0,99}
message PartitionId {
// The dataset ID.
optional string dataset_id = 3;
// The namespace.
optional string namespace = 4;
}
// A unique identifier for an entity.
// If a key's partition id or any of its path kinds or names are
// reserved/read-only, the key is reserved/read-only.
// A reserved/read-only key is forbidden in certain documented contexts.
message Key {
// Entities are partitioned into subsets, currently identified by a dataset
// (usually implicitly specified by the project) and namespace ID.
// Queries are scoped to a single partition.
optional PartitionId partition_id = 1;
// A (kind, ID/name) pair used to construct a key path.
//
// At most one of name or ID may be set.
// If either is set, the element is complete.
// If neither is set, the element is incomplete.
message PathElement {
// The kind of the entity.
// A kind matching regex "__.*__" is reserved/read-only.
// A kind must not contain more than 500 characters.
// Cannot be "".
required string kind = 1;
// The ID of the entity.
// Never equal to zero. Values less than zero are discouraged and will not
// be supported in the future.
optional int64 id = 2;
// The name of the entity.
// A name matching regex "__.*__" is reserved/read-only.
// A name must not be more than 500 characters.
// Cannot be "".
optional string name = 3;
}
// The entity path.
// An entity path consists of one or more elements composed of a kind and a
// string or numerical identifier, which identify entities. The first
// element identifies a <em>root entity</em>, the second element identifies
// a <em>child</em> of the root entity, the third element a child of the
// second entity, and so forth. The entities identified by all prefixes of
// the path are called the element's <em>ancestors</em>.
// An entity path is always fully complete: ALL of the entity's ancestors
// are required to be in the path along with the entity identifier itself.
// The only exception is that in some documented cases, the identifier in the
// last path element (for the entity) itself may be omitted. A path can never
// be empty.
repeated PathElement path_element = 2;
}
// A message that can hold any of the supported value types and associated
// metadata.
//
// At most one of the <type>Value fields may be set.
// If none are set the value is "null".
//
message Value {
// A boolean value.
optional bool boolean_value = 1;
// An integer value.
optional int64 integer_value = 2;
// A double value.
optional double double_value = 3;
// A timestamp value.
optional int64 timestamp_microseconds_value = 4;
// A key value.
optional Key key_value = 5;
// A blob key value.
optional string blob_key_value = 16;
// A UTF-8 encoded string value.
optional string string_value = 17;
// A blob value.
optional bytes blob_value = 18;
// An entity value.
// May have no key.
// May have a key with an incomplete key path.
// May have a reserved/read-only key.
optional Entity entity_value = 6;
// A list value.
// Cannot contain another list value.
// Cannot also have a meaning and indexing set.
repeated Value list_value = 7;
// The <code>meaning</code> field is reserved and should not be used.
optional int32 meaning = 14;
// If the value should be indexed.
//
// The <code>indexed</code> property may be set for a
// <code>null</code> value.
// When <code>indexed</code> is <code>true</code>, <code>stringValue</code>
// is limited to 500 characters and the blob value is limited to 500 bytes.
// Exception: If meaning is set to 2, string_value is limited to 2038
// characters regardless of indexed.
// When indexed is true, meaning 15 and 22 are not allowed, and meaning 16
// will be ignored on input (and will never be set on output).
// Input values by default have <code>indexed</code> set to
// <code>true</code>; however, you can explicitly set <code>indexed</code> to
// <code>true</code> if you want. (An output value never has
// <code>indexed</code> explicitly set to <code>true</code>.) If a value is
// itself an entity, it cannot have <code>indexed</code> set to
// <code>true</code>.
// Exception: An entity value with meaning 9, 20 or 21 may be indexed.
optional bool indexed = 15 [default = true];
}
// An entity property.
message Property {
// The name of the property.
// A property name matching regex "__.*__" is reserved.
// A reserved property name is forbidden in certain documented contexts.
// The name must not contain more than 500 characters.
// Cannot be "".
required string name = 1;
// The value(s) of the property.
// Each value can have only one value property populated. For example,
// you cannot have a values list of <code>{ value: { integerValue: 22,
// stringValue: "a" } }</code>, but you can have <code>{ value: { listValue:
// [ { integerValue: 22 }, { stringValue: "a" } ] }</code>.
required Value value = 4;
}
// An entity.
//
// An entity is limited to 1 megabyte when stored. That <em>roughly</em>
// corresponds to a limit of 1 megabyte for the serialized form of this
// message.
message Entity {
// The entity's key.
//
// An entity must have a key, unless otherwise documented (for example,
// an entity in <code>Value.entityValue</code> may have no key).
// An entity's kind is its key's path's last element's kind,
// or null if it has no key.
optional Key key = 1;
// The entity's properties.
// Each property's name must be unique for its entity.
repeated Property property = 2;
}
// The result of fetching an entity from the datastore.
message EntityResult {
// Specifies what data the 'entity' field contains.
// A ResultType is either implied (for example, in LookupResponse.found it
// is always FULL) or specified by context (for example, in message
// QueryResultBatch, field 'entity_result_type' specifies a ResultType
// for all the values in field 'entity_result').
enum ResultType {
FULL = 1; // The entire entity.
PROJECTION = 2; // A projected subset of properties.
// The entity may have no key.
// A property value may have meaning 18.
KEY_ONLY = 3; // Only the key.
}
// The resulting entity.
required Entity entity = 1;
}
// A query.
message Query {
// The projection to return. If not set the entire entity is returned.
repeated PropertyExpression projection = 2;
// The kinds to query (if empty, returns entities from all kinds).
repeated KindExpression kind = 3;
// The filter to apply (optional).
optional Filter filter = 4;
// The order to apply to the query results (if empty, order is unspecified).
repeated PropertyOrder order = 5;
// The properties to group by (if empty, no grouping is applied to the
// result set).
repeated PropertyReference group_by = 6;
// A starting point for the query results. Optional. Query cursors are
// returned in query result batches.
optional bytes /* serialized QueryCursor */ start_cursor = 7;
// An ending point for the query results. Optional. Query cursors are
// returned in query result batches.
optional bytes /* serialized QueryCursor */ end_cursor = 8;
// The number of results to skip. Applies before limit, but after all other
// constraints (optional, defaults to 0).
optional int32 offset = 10 [default=0];
// The maximum number of results to return. Applies after all other
// constraints. Optional.
optional int32 limit = 11;
}
// A representation of a kind.
message KindExpression {
// The name of the kind.
required string name = 1;
}
// A reference to a property relative to the kind expressions.
// exactly.
message PropertyReference {
// The name of the property.
required string name = 2;
}
// A representation of a property in a projection.
message PropertyExpression {
enum AggregationFunction {
FIRST = 1;
}
// The property to project.
required PropertyReference property = 1;
// The aggregation function to apply to the property. Optional.
// Can only be used when grouping by at least one property. Must
// then be set on all properties in the projection that are not
// being grouped by.
optional AggregationFunction aggregation_function = 2;
}
// The desired order for a specific property.
message PropertyOrder {
enum Direction {
ASCENDING = 1;
DESCENDING = 2;
}
// The property to order by.
required PropertyReference property = 1;
// The direction to order by.
optional Direction direction = 2 [default=ASCENDING];
}
// A holder for any type of filter. Exactly one field should be specified.
message Filter {
// A composite filter.
optional CompositeFilter composite_filter = 1;
// A filter on a property.
optional PropertyFilter property_filter = 2;
}
// A filter that merges the multiple other filters using the given operation.
message CompositeFilter {
enum Operator {
AND = 1;
}
// The operator for combining multiple filters.
required Operator operator = 1;
// The list of filters to combine.
// Must contain at least one filter.
repeated Filter filter = 2;
}
// A filter on a specific property.
message PropertyFilter {
enum Operator {
LESS_THAN = 1;
LESS_THAN_OR_EQUAL = 2;
GREATER_THAN = 3;
GREATER_THAN_OR_EQUAL = 4;
EQUAL = 5;
HAS_ANCESTOR = 11;
}
// The property to filter by.
required PropertyReference property = 1;
// The operator to filter by.
required Operator operator = 2;
// The value to compare the property to.
required Value value = 3;
}
// A GQL query.
message GqlQuery {
required string query_string = 1;
// When false, the query string must not contain a literal.
optional bool allow_literal = 2 [default = false];
// A named argument must set field GqlQueryArg.name.
// No two named arguments may have the same name.
// For each non-reserved named binding site in the query string,
// there must be a named argument with that name,
// but not necessarily the inverse.
repeated GqlQueryArg name_arg = 3;
// Numbered binding site @1 references the first numbered argument,
// effectively using 1-based indexing, rather than the usual 0.
// A numbered argument must NOT set field GqlQueryArg.name.
// For each binding site numbered i in query_string,
// there must be an ith numbered argument.
// The inverse must also be true.
repeated GqlQueryArg number_arg = 4;
}
// A binding argument for a GQL query.
// Exactly one of fields value and cursor must be set.
message GqlQueryArg {
// Must match regex "[A-Za-z_$][A-Za-z_$0-9]*".
// Must not match regex "__.*__".
// Must not be "".
optional string name = 1;
optional Value value = 2;
optional bytes cursor = 3;
}
// A batch of results produced by a query.
message QueryResultBatch {
// The possible values for the 'more_results' field.
enum MoreResultsType {
NOT_FINISHED = 1; // There are additional batches to fetch from this query.
MORE_RESULTS_AFTER_LIMIT = 2; // The query is finished, but there are more
// results after the limit.
NO_MORE_RESULTS = 3; // The query has been exhausted.
}
// The result type for every entity in entityResults.
required EntityResult.ResultType entity_result_type = 1;
// The results for this batch.
repeated EntityResult entity_result = 2;
// A cursor that points to the position after the last result in the batch.
// May be absent.
optional bytes /* serialized QueryCursor */ end_cursor = 4;
// The state of the query after the current batch.
required MoreResultsType more_results = 5;
// The number of results skipped because of <code>Query.offset</code>.
optional int32 skipped_results = 6;
}
// A set of changes to apply.
//
// No entity in this message may have a reserved property name,
// not even a property in an entity in a value.
// No value in this message may have meaning 18,
// not even a value in an entity in another value.
//
// If entities with duplicate keys are present, an arbitrary choice will
// be made as to which is written.
message Mutation {
// Entities to upsert.
// Each upserted entity's key must have a complete path and
// must not be reserved/read-only.
repeated Entity upsert = 1;
// Entities to update.
// Each updated entity's key must have a complete path and
// must not be reserved/read-only.
repeated Entity update = 2;
// Entities to insert.
// Each inserted entity's key must have a complete path and
// must not be reserved/read-only.
repeated Entity insert = 3;
// Insert entities with a newly allocated ID.
// Each inserted entity's key must omit the final identifier in its path and
// must not be reserved/read-only.
repeated Entity insert_auto_id = 4;
// Keys of entities to delete.
// Each key must have a complete key path and must not be reserved/read-only.
repeated Key delete = 5;
// Ignore a user specified read-only period. Optional.
optional bool force = 6;
}
// The result of applying a mutation.
message MutationResult {
// Number of index writes.
required int32 index_updates = 1;
// Keys for <code>insertAutoId</code> entities. One per entity from the
// request, in the same order.
repeated Key insert_auto_id_key = 2;
}
// Options shared by read requests.
message ReadOptions {
enum ReadConsistency {
DEFAULT = 0;
STRONG = 1;
EVENTUAL = 2;
}
// The read consistency to use.
// Cannot be set when transaction is set.
// Lookup and ancestor queries default to STRONG, global queries default to
// EVENTUAL and cannot be set to STRONG.
optional ReadConsistency read_consistency = 1 [default=DEFAULT];
// The transaction to use. Optional.
optional bytes /* serialized Transaction */ transaction = 2;
}
// The request for Lookup.
message LookupRequest {
// Options for this lookup request. Optional.
optional ReadOptions read_options = 1;
// Keys of entities to look up from the datastore.
repeated Key key = 3;
}
// The response for Lookup.
message LookupResponse {
// The order of results in these fields is undefined and has no relation to
// the order of the keys in the input.
// Entities found as ResultType.FULL entities.
repeated EntityResult found = 1;
// Entities not found as ResultType.KEY_ONLY entities.
repeated EntityResult missing = 2;
// A list of keys that were not looked up due to resource constraints.
repeated Key deferred = 3;
}
// The request for RunQuery.
message RunQueryRequest {
// The options for this query.
optional ReadOptions read_options = 1;
// Entities are partitioned into subsets, identified by a dataset (usually
// implicitly specified by the project) and namespace ID. Queries are scoped
// to a single partition.
// This partition ID is normalized with the standard default context
// partition ID, but all other partition IDs in RunQueryRequest are
// normalized with this partition ID as the context partition ID.
optional PartitionId partition_id = 2;
// The query to run.
// Either this field or field gql_query must be set, but not both.
optional Query query = 3;
// The GQL query to run.
// Either this field or field query must be set, but not both.
optional GqlQuery gql_query = 7;
}
// The response for RunQuery.
message RunQueryResponse {
// A batch of query results (always present).
optional QueryResultBatch batch = 1;
}
// The request for BeginTransaction.
message BeginTransactionRequest {
enum IsolationLevel {
SNAPSHOT = 0; // Read from a consistent snapshot. Concurrent transactions
// conflict if their mutations conflict. For example:
// Read(A),Write(B) may not conflict with Read(B),Write(A),
// but Read(B),Write(B) does conflict with Read(B),Write(B).
SERIALIZABLE = 1; // Read from a consistent snapshot. Concurrent
// transactions conflict if they cannot be serialized.
// For example Read(A),Write(B) does conflict with
// Read(B),Write(A) but Read(A) may not conflict with
// Write(A).
}
// The transaction isolation level.
optional IsolationLevel isolation_level = 1 [default=SNAPSHOT];
}
// The response for BeginTransaction.
message BeginTransactionResponse {
// The transaction identifier (always present).
optional bytes /* serialized Transaction */ transaction = 1;
}
// The request for Rollback.
message RollbackRequest {
// The transaction identifier, returned by a call to
// <code>beginTransaction</code>.
required bytes /* serialized Transaction */ transaction = 1;
}
// The response for Rollback.
message RollbackResponse {
// Empty
}
// The request for Commit.
message CommitRequest {
enum Mode {
TRANSACTIONAL = 1;
NON_TRANSACTIONAL = 2;
}
// The transaction identifier, returned by a call to
// <code>beginTransaction</code>. Must be set when mode is TRANSACTIONAL.
optional bytes /* serialized Transaction */ transaction = 1;
// The mutation to perform. Optional.
optional Mutation mutation = 2;
// The type of commit to perform. Either TRANSACTIONAL or NON_TRANSACTIONAL.
optional Mode mode = 5 [default=TRANSACTIONAL];
}
// The response for Commit.
message CommitResponse {
// The result of performing the mutation (if any).
optional MutationResult mutation_result = 1;
}
// The request for AllocateIds.
message AllocateIdsRequest {
// A list of keys with incomplete key paths to allocate IDs for.
// No key may be reserved/read-only.
repeated Key key = 1;
}
// The response for AllocateIds.
message AllocateIdsResponse {
// The keys specified in the request (in the same order), each with
// its key path completed with a newly allocated ID.
repeated Key key = 1;
}
// Each rpc normalizes the partition IDs of the keys in its input entities,
// and always returns entities with keys with normalized partition IDs.
// (Note that applies to all entities, including entities in values.)
service DatastoreService {
// Look up some entities by key.
rpc Lookup(LookupRequest) returns (LookupResponse) {
};
// Query for entities.
rpc RunQuery(RunQueryRequest) returns (RunQueryResponse) {
};
// Begin a new transaction.
rpc BeginTransaction(BeginTransactionRequest) returns (BeginTransactionResponse) {
};
// Commit a transaction, optionally creating, deleting or modifying some
// entities.
rpc Commit(CommitRequest) returns (CommitResponse) {
};
// Roll back a transaction.
rpc Rollback(RollbackRequest) returns (RollbackResponse) {
};
// Allocate IDs for incomplete keys (useful for referencing an entity before
// it is inserted).
rpc AllocateIds(AllocateIdsRequest) returns (AllocateIdsResponse) {
};
}

View file

@ -1,57 +0,0 @@
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package testutil contains helper functions for writing tests.
package testutil
import (
"io/ioutil"
"log"
"net/http"
"os"
"golang.org/x/net/context"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
"google.golang.org/cloud"
)
const (
envProjID = "GCLOUD_TESTS_GOLANG_PROJECT_ID"
envPrivateKey = "GCLOUD_TESTS_GOLANG_KEY"
)
func Context(scopes ...string) context.Context {
key, projID := os.Getenv(envPrivateKey), os.Getenv(envProjID)
if key == "" || projID == "" {
log.Fatal("GCLOUD_TESTS_GOLANG_KEY and GCLOUD_TESTS_GOLANG_PROJECT_ID must be set. See CONTRIBUTING.md for details.")
}
jsonKey, err := ioutil.ReadFile(key)
if err != nil {
log.Fatalf("Cannot read the JSON key file, err: %v", err)
}
conf, err := google.JWTConfigFromJSON(jsonKey, scopes...)
if err != nil {
log.Fatal(err)
}
return cloud.NewContext(projID, conf.Client(oauth2.NoContext))
}
func NoAuthContext() context.Context {
projID := os.Getenv(envProjID)
if projID == "" {
log.Fatal("GCLOUD_TESTS_GOLANG_PROJECT_ID must be set. See CONTRIBUTING.md for details.")
}
return cloud.NewContext(projID, &http.Client{Transport: http.DefaultTransport})
}