Add e2e tests
This commit is contained in:
parent
99a355f25d
commit
601fb7dacf
1163 changed files with 289217 additions and 14195 deletions
1
vendor/google.golang.org/grpc/.travis.yml
generated
vendored
1
vendor/google.golang.org/grpc/.travis.yml
generated
vendored
|
|
@ -1,7 +1,6 @@
|
|||
language: go
|
||||
|
||||
go:
|
||||
- 1.6.x
|
||||
- 1.7.x
|
||||
- 1.8.x
|
||||
- 1.9.x
|
||||
|
|
|
|||
4
vendor/google.golang.org/grpc/Makefile
generated
vendored
4
vendor/google.golang.org/grpc/Makefile
generated
vendored
|
|
@ -23,10 +23,10 @@ proto:
|
|||
go generate google.golang.org/grpc/...
|
||||
|
||||
test: testdeps
|
||||
go test -v -cpu 1,4 google.golang.org/grpc/...
|
||||
go test -cpu 1,4 google.golang.org/grpc/...
|
||||
|
||||
testrace: testdeps
|
||||
go test -v -race -cpu 1,4 google.golang.org/grpc/...
|
||||
go test -race -cpu 1,4 google.golang.org/grpc/...
|
||||
|
||||
clean:
|
||||
go clean -i google.golang.org/grpc/...
|
||||
|
|
|
|||
2
vendor/google.golang.org/grpc/README.md
generated
vendored
2
vendor/google.golang.org/grpc/README.md
generated
vendored
|
|
@ -16,7 +16,7 @@ $ go get -u google.golang.org/grpc
|
|||
Prerequisites
|
||||
-------------
|
||||
|
||||
This requires Go 1.6 or later.
|
||||
This requires Go 1.7 or later.
|
||||
|
||||
Constraints
|
||||
-----------
|
||||
|
|
|
|||
2
vendor/google.golang.org/grpc/balancer.go
generated
vendored
2
vendor/google.golang.org/grpc/balancer.go
generated
vendored
|
|
@ -403,6 +403,6 @@ type pickFirst struct {
|
|||
*roundRobin
|
||||
}
|
||||
|
||||
func pickFirstBalancer(r naming.Resolver) Balancer {
|
||||
func pickFirstBalancerV1(r naming.Resolver) Balancer {
|
||||
return &pickFirst{&roundRobin{r: r}}
|
||||
}
|
||||
|
|
|
|||
206
vendor/google.golang.org/grpc/balancer/balancer.go
generated
vendored
Normal file
206
vendor/google.golang.org/grpc/balancer/balancer.go
generated
vendored
Normal file
|
|
@ -0,0 +1,206 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package balancer defines APIs for load balancing in gRPC.
|
||||
// All APIs in this package are experimental.
|
||||
package balancer
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc/connectivity"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
var (
|
||||
// m is a map from name to balancer builder.
|
||||
m = make(map[string]Builder)
|
||||
// defaultBuilder is the default balancer to use.
|
||||
defaultBuilder Builder // TODO(bar) install pickfirst as default.
|
||||
)
|
||||
|
||||
// Register registers the balancer builder to the balancer map.
|
||||
// b.Name will be used as the name registered with this builder.
|
||||
func Register(b Builder) {
|
||||
m[b.Name()] = b
|
||||
}
|
||||
|
||||
// Get returns the resolver builder registered with the given name.
|
||||
// If no builder is register with the name, the default pickfirst will
|
||||
// be used.
|
||||
func Get(name string) Builder {
|
||||
if b, ok := m[name]; ok {
|
||||
return b
|
||||
}
|
||||
return defaultBuilder
|
||||
}
|
||||
|
||||
// SubConn represents a gRPC sub connection.
|
||||
// Each sub connection contains a list of addresses. gRPC will
|
||||
// try to connect to them (in sequence), and stop trying the
|
||||
// remainder once one connection is successful.
|
||||
//
|
||||
// The reconnect backoff will be applied on the list, not a single address.
|
||||
// For example, try_on_all_addresses -> backoff -> try_on_all_addresses.
|
||||
//
|
||||
// All SubConns start in IDLE, and will not try to connect. To trigger
|
||||
// the connecting, Balancers must call Connect.
|
||||
// When the connection encounters an error, it will reconnect immediately.
|
||||
// When the connection becomes IDLE, it will not reconnect unless Connect is
|
||||
// called.
|
||||
type SubConn interface {
|
||||
// UpdateAddresses updates the addresses used in this SubConn.
|
||||
// gRPC checks if currently-connected address is still in the new list.
|
||||
// If it's in the list, the connection will be kept.
|
||||
// If it's not in the list, the connection will gracefully closed, and
|
||||
// a new connection will be created.
|
||||
//
|
||||
// This will trigger a state transition for the SubConn.
|
||||
UpdateAddresses([]resolver.Address)
|
||||
// Connect starts the connecting for this SubConn.
|
||||
Connect()
|
||||
}
|
||||
|
||||
// NewSubConnOptions contains options to create new SubConn.
|
||||
type NewSubConnOptions struct{}
|
||||
|
||||
// ClientConn represents a gRPC ClientConn.
|
||||
type ClientConn interface {
|
||||
// NewSubConn is called by balancer to create a new SubConn.
|
||||
// It doesn't block and wait for the connections to be established.
|
||||
// Behaviors of the SubConn can be controlled by options.
|
||||
NewSubConn([]resolver.Address, NewSubConnOptions) (SubConn, error)
|
||||
// RemoveSubConn removes the SubConn from ClientConn.
|
||||
// The SubConn will be shutdown.
|
||||
RemoveSubConn(SubConn)
|
||||
|
||||
// UpdateBalancerState is called by balancer to nofity gRPC that some internal
|
||||
// state in balancer has changed.
|
||||
//
|
||||
// gRPC will update the connectivity state of the ClientConn, and will call pick
|
||||
// on the new picker to pick new SubConn.
|
||||
UpdateBalancerState(s connectivity.State, p Picker)
|
||||
|
||||
// Target returns the dial target for this ClientConn.
|
||||
Target() string
|
||||
}
|
||||
|
||||
// BuildOptions contains additional information for Build.
|
||||
type BuildOptions struct {
|
||||
// DialCreds is the transport credential the Balancer implementation can
|
||||
// use to dial to a remote load balancer server. The Balancer implementations
|
||||
// can ignore this if it does not need to talk to another party securely.
|
||||
DialCreds credentials.TransportCredentials
|
||||
// Dialer is the custom dialer the Balancer implementation can use to dial
|
||||
// to a remote load balancer server. The Balancer implementations
|
||||
// can ignore this if it doesn't need to talk to remote balancer.
|
||||
Dialer func(context.Context, string) (net.Conn, error)
|
||||
}
|
||||
|
||||
// Builder creates a balancer.
|
||||
type Builder interface {
|
||||
// Build creates a new balancer with the ClientConn.
|
||||
Build(cc ClientConn, opts BuildOptions) Balancer
|
||||
// Name returns the name of balancers built by this builder.
|
||||
// It will be used to pick balancers (for example in service config).
|
||||
Name() string
|
||||
}
|
||||
|
||||
// PickOptions contains addition information for the Pick operation.
|
||||
type PickOptions struct{}
|
||||
|
||||
// DoneInfo contains additional information for done.
|
||||
type DoneInfo struct {
|
||||
// Err is the rpc error the RPC finished with. It could be nil.
|
||||
Err error
|
||||
}
|
||||
|
||||
var (
|
||||
// ErrNoSubConnAvailable indicates no SubConn is available for pick().
|
||||
// gRPC will block the RPC until a new picker is available via UpdateBalancerState().
|
||||
ErrNoSubConnAvailable = errors.New("no SubConn is available")
|
||||
// ErrTransientFailure indicates all SubConns are in TransientFailure.
|
||||
// WaitForReady RPCs will block, non-WaitForReady RPCs will fail.
|
||||
ErrTransientFailure = errors.New("all SubConns are in TransientFailure")
|
||||
)
|
||||
|
||||
// Picker is used by gRPC to pick a SubConn to send an RPC.
|
||||
// Balancer is expected to generate a new picker from its snapshot everytime its
|
||||
// internal state has changed.
|
||||
//
|
||||
// The pickers used by gRPC can be updated by ClientConn.UpdateBalancerState().
|
||||
type Picker interface {
|
||||
// Pick returns the SubConn to be used to send the RPC.
|
||||
// The returned SubConn must be one returned by NewSubConn().
|
||||
//
|
||||
// This functions is expected to return:
|
||||
// - a SubConn that is known to be READY;
|
||||
// - ErrNoSubConnAvailable if no SubConn is available, but progress is being
|
||||
// made (for example, some SubConn is in CONNECTING mode);
|
||||
// - other errors if no active connecting is happening (for example, all SubConn
|
||||
// are in TRANSIENT_FAILURE mode).
|
||||
//
|
||||
// If a SubConn is returned:
|
||||
// - If it is READY, gRPC will send the RPC on it;
|
||||
// - If it is not ready, or becomes not ready after it's returned, gRPC will block
|
||||
// this call until a new picker is updated and will call pick on the new picker.
|
||||
//
|
||||
// If the returned error is not nil:
|
||||
// - If the error is ErrNoSubConnAvailable, gRPC will block until UpdateBalancerState()
|
||||
// - If the error is ErrTransientFailure:
|
||||
// - If the RPC is wait-for-ready, gRPC will block until UpdateBalancerState()
|
||||
// is called to pick again;
|
||||
// - Otherwise, RPC will fail with unavailable error.
|
||||
// - Else (error is other non-nil error):
|
||||
// - The RPC will fail with unavailable error.
|
||||
//
|
||||
// The returned done() function will be called once the rpc has finished, with the
|
||||
// final status of that RPC.
|
||||
// done may be nil if balancer doesn't care about the RPC status.
|
||||
Pick(ctx context.Context, opts PickOptions) (conn SubConn, done func(DoneInfo), err error)
|
||||
}
|
||||
|
||||
// Balancer takes input from gRPC, manages SubConns, and collects and aggregates
|
||||
// the connectivity states.
|
||||
//
|
||||
// It also generates and updates the Picker used by gRPC to pick SubConns for RPCs.
|
||||
//
|
||||
// HandleSubConnectionStateChange, HandleResolvedAddrs and Close are guaranteed
|
||||
// to be called synchronously from the same goroutine.
|
||||
// There's no guarantee on picker.Pick, it may be called anytime.
|
||||
type Balancer interface {
|
||||
// HandleSubConnStateChange is called by gRPC when the connectivity state
|
||||
// of sc has changed.
|
||||
// Balancer is expected to aggregate all the state of SubConn and report
|
||||
// that back to gRPC.
|
||||
// Balancer should also generate and update Pickers when its internal state has
|
||||
// been changed by the new state.
|
||||
HandleSubConnStateChange(sc SubConn, state connectivity.State)
|
||||
// HandleResolvedAddrs is called by gRPC to send updated resolved addresses to
|
||||
// balancers.
|
||||
// Balancer can create new SubConn or remove SubConn with the addresses.
|
||||
// An empty address slice and a non-nil error will be passed if the resolver returns
|
||||
// non-nil error to gRPC.
|
||||
HandleResolvedAddrs([]resolver.Address, error)
|
||||
// Close closes the balancer. The balancer is not required to call
|
||||
// ClientConn.RemoveSubConn for its existing SubConns.
|
||||
Close()
|
||||
}
|
||||
248
vendor/google.golang.org/grpc/balancer_conn_wrappers.go
generated
vendored
Normal file
248
vendor/google.golang.org/grpc/balancer_conn_wrappers.go
generated
vendored
Normal file
|
|
@ -0,0 +1,248 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/connectivity"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
// scStateUpdate contains the subConn and the new state it changed to.
|
||||
type scStateUpdate struct {
|
||||
sc balancer.SubConn
|
||||
state connectivity.State
|
||||
}
|
||||
|
||||
// scStateUpdateBuffer is an unbounded channel for scStateChangeTuple.
|
||||
// TODO make a general purpose buffer that uses interface{}.
|
||||
type scStateUpdateBuffer struct {
|
||||
c chan *scStateUpdate
|
||||
mu sync.Mutex
|
||||
backlog []*scStateUpdate
|
||||
}
|
||||
|
||||
func newSCStateUpdateBuffer() *scStateUpdateBuffer {
|
||||
return &scStateUpdateBuffer{
|
||||
c: make(chan *scStateUpdate, 1),
|
||||
}
|
||||
}
|
||||
|
||||
func (b *scStateUpdateBuffer) put(t *scStateUpdate) {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
if len(b.backlog) == 0 {
|
||||
select {
|
||||
case b.c <- t:
|
||||
return
|
||||
default:
|
||||
}
|
||||
}
|
||||
b.backlog = append(b.backlog, t)
|
||||
}
|
||||
|
||||
func (b *scStateUpdateBuffer) load() {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
if len(b.backlog) > 0 {
|
||||
select {
|
||||
case b.c <- b.backlog[0]:
|
||||
b.backlog[0] = nil
|
||||
b.backlog = b.backlog[1:]
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// get returns the channel that receives a recvMsg in the buffer.
|
||||
//
|
||||
// Upon receiving, the caller should call load to send another
|
||||
// scStateChangeTuple onto the channel if there is any.
|
||||
func (b *scStateUpdateBuffer) get() <-chan *scStateUpdate {
|
||||
return b.c
|
||||
}
|
||||
|
||||
// resolverUpdate contains the new resolved addresses or error if there's
|
||||
// any.
|
||||
type resolverUpdate struct {
|
||||
addrs []resolver.Address
|
||||
err error
|
||||
}
|
||||
|
||||
// ccBalancerWrapper is a wrapper on top of cc for balancers.
|
||||
// It implements balancer.ClientConn interface.
|
||||
type ccBalancerWrapper struct {
|
||||
cc *ClientConn
|
||||
balancer balancer.Balancer
|
||||
stateChangeQueue *scStateUpdateBuffer
|
||||
resolverUpdateCh chan *resolverUpdate
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.BuildOptions) *ccBalancerWrapper {
|
||||
ccb := &ccBalancerWrapper{
|
||||
cc: cc,
|
||||
stateChangeQueue: newSCStateUpdateBuffer(),
|
||||
resolverUpdateCh: make(chan *resolverUpdate, 1),
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
go ccb.watcher()
|
||||
ccb.balancer = b.Build(ccb, bopts)
|
||||
return ccb
|
||||
}
|
||||
|
||||
// watcher balancer functions sequencially, so the balancer can be implemeneted
|
||||
// lock-free.
|
||||
func (ccb *ccBalancerWrapper) watcher() {
|
||||
for {
|
||||
select {
|
||||
case t := <-ccb.stateChangeQueue.get():
|
||||
ccb.stateChangeQueue.load()
|
||||
ccb.balancer.HandleSubConnStateChange(t.sc, t.state)
|
||||
case t := <-ccb.resolverUpdateCh:
|
||||
ccb.balancer.HandleResolvedAddrs(t.addrs, t.err)
|
||||
case <-ccb.done:
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ccb.done:
|
||||
ccb.balancer.Close()
|
||||
return
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) close() {
|
||||
close(ccb.done)
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
|
||||
// When updating addresses for a SubConn, if the address in use is not in
|
||||
// the new addresses, the old ac will be tearDown() and a new ac will be
|
||||
// created. tearDown() generates a state change with Shutdown state, we
|
||||
// don't want the balancer to receive this state change. So before
|
||||
// tearDown() on the old ac, ac.acbw (acWrapper) will be set to nil, and
|
||||
// this function will be called with (nil, Shutdown). We don't need to call
|
||||
// balancer method in this case.
|
||||
if sc == nil {
|
||||
return
|
||||
}
|
||||
ccb.stateChangeQueue.put(&scStateUpdate{
|
||||
sc: sc,
|
||||
state: s,
|
||||
})
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) handleResolvedAddrs(addrs []resolver.Address, err error) {
|
||||
select {
|
||||
case <-ccb.resolverUpdateCh:
|
||||
default:
|
||||
}
|
||||
ccb.resolverUpdateCh <- &resolverUpdate{
|
||||
addrs: addrs,
|
||||
err: err,
|
||||
}
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
|
||||
grpclog.Infof("ccBalancerWrapper: new subconn: %v", addrs)
|
||||
ac, err := ccb.cc.newAddrConn(addrs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
acbw := &acBalancerWrapper{ac: ac}
|
||||
ac.acbw = acbw
|
||||
return acbw, nil
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) {
|
||||
grpclog.Infof("ccBalancerWrapper: removing subconn")
|
||||
acbw, ok := sc.(*acBalancerWrapper)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain)
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) UpdateBalancerState(s connectivity.State, p balancer.Picker) {
|
||||
grpclog.Infof("ccBalancerWrapper: updating state and picker called by balancer: %v, %p", s, p)
|
||||
ccb.cc.csMgr.updateState(s)
|
||||
ccb.cc.blockingpicker.updatePicker(p)
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) Target() string {
|
||||
return ccb.cc.target
|
||||
}
|
||||
|
||||
// acBalancerWrapper is a wrapper on top of ac for balancers.
|
||||
// It implements balancer.SubConn interface.
|
||||
type acBalancerWrapper struct {
|
||||
mu sync.Mutex
|
||||
ac *addrConn
|
||||
}
|
||||
|
||||
func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) {
|
||||
grpclog.Infof("acBalancerWrapper: UpdateAddresses called with %v", addrs)
|
||||
acbw.mu.Lock()
|
||||
defer acbw.mu.Unlock()
|
||||
if !acbw.ac.tryUpdateAddrs(addrs) {
|
||||
cc := acbw.ac.cc
|
||||
acbw.ac.mu.Lock()
|
||||
// Set old ac.acbw to nil so the Shutdown state update will be ignored
|
||||
// by balancer.
|
||||
//
|
||||
// TODO(bar) the state transition could be wrong when tearDown() old ac
|
||||
// and creating new ac, fix the transition.
|
||||
acbw.ac.acbw = nil
|
||||
acbw.ac.mu.Unlock()
|
||||
acState := acbw.ac.getState()
|
||||
acbw.ac.tearDown(errConnDrain)
|
||||
|
||||
if acState == connectivity.Shutdown {
|
||||
return
|
||||
}
|
||||
|
||||
ac, err := cc.newAddrConn(addrs)
|
||||
if err != nil {
|
||||
grpclog.Warningf("acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err)
|
||||
return
|
||||
}
|
||||
acbw.ac = ac
|
||||
ac.acbw = acbw
|
||||
if acState != connectivity.Idle {
|
||||
ac.connect(false)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (acbw *acBalancerWrapper) Connect() {
|
||||
acbw.mu.Lock()
|
||||
defer acbw.mu.Unlock()
|
||||
acbw.ac.connect(false)
|
||||
}
|
||||
|
||||
func (acbw *acBalancerWrapper) getAddrConn() *addrConn {
|
||||
acbw.mu.Lock()
|
||||
defer acbw.mu.Unlock()
|
||||
return acbw.ac
|
||||
}
|
||||
116
vendor/google.golang.org/grpc/balancer_test.go
generated
vendored
116
vendor/google.golang.org/grpc/balancer_test.go
generated
vendored
|
|
@ -28,7 +28,9 @@ import (
|
|||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc/codes"
|
||||
_ "google.golang.org/grpc/grpclog/glogger"
|
||||
"google.golang.org/grpc/naming"
|
||||
"google.golang.org/grpc/test/leakcheck"
|
||||
)
|
||||
|
||||
type testWatcher struct {
|
||||
|
|
@ -56,6 +58,7 @@ func (w *testWatcher) Next() (updates []*naming.Update, err error) {
|
|||
}
|
||||
|
||||
func (w *testWatcher) Close() {
|
||||
close(w.side)
|
||||
}
|
||||
|
||||
// Inject naming resolution updates to the testWatcher.
|
||||
|
|
@ -89,7 +92,7 @@ func (r *testNameResolver) Resolve(target string) (naming.Watcher, error) {
|
|||
return r.w, nil
|
||||
}
|
||||
|
||||
func startServers(t *testing.T, numServers int, maxStreams uint32) ([]*server, *testNameResolver) {
|
||||
func startServers(t *testing.T, numServers int, maxStreams uint32) ([]*server, *testNameResolver, func()) {
|
||||
var servers []*server
|
||||
for i := 0; i < numServers; i++ {
|
||||
s := newTestServer()
|
||||
|
|
@ -100,18 +103,25 @@ func startServers(t *testing.T, numServers int, maxStreams uint32) ([]*server, *
|
|||
// Point to server[0]
|
||||
addr := "localhost:" + servers[0].port
|
||||
return servers, &testNameResolver{
|
||||
addr: addr,
|
||||
}
|
||||
addr: addr,
|
||||
}, func() {
|
||||
for i := 0; i < numServers; i++ {
|
||||
servers[i].stop()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNameDiscovery(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
// Start 2 servers on 2 ports.
|
||||
numServers := 2
|
||||
servers, r := startServers(t, numServers, math.MaxUint32)
|
||||
servers, r, cleanup := startServers(t, numServers, math.MaxUint32)
|
||||
defer cleanup()
|
||||
cc, err := Dial("foo.bar.com", WithBalancer(RoundRobin(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{}))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create ClientConn: %v", err)
|
||||
}
|
||||
defer cc.Close()
|
||||
req := "port"
|
||||
var reply string
|
||||
if err := Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err == nil || ErrorDesc(err) != servers[0].port {
|
||||
|
|
@ -135,18 +145,17 @@ func TestNameDiscovery(t *testing.T) {
|
|||
}
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
cc.Close()
|
||||
for i := 0; i < numServers; i++ {
|
||||
servers[i].stop()
|
||||
}
|
||||
}
|
||||
|
||||
func TestEmptyAddrs(t *testing.T) {
|
||||
servers, r := startServers(t, 1, math.MaxUint32)
|
||||
defer leakcheck.Check(t)
|
||||
servers, r, cleanup := startServers(t, 1, math.MaxUint32)
|
||||
defer cleanup()
|
||||
cc, err := Dial("foo.bar.com", WithBalancer(RoundRobin(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{}))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create ClientConn: %v", err)
|
||||
}
|
||||
defer cc.Close()
|
||||
var reply string
|
||||
if err := Invoke(context.Background(), "/foo/bar", &expectedRequest, &reply, cc); err != nil || reply != expectedResponse {
|
||||
t.Fatalf("grpc.Invoke(_, _, _, _, _) = %v, reply = %q, want %q, <nil>", err, reply, expectedResponse)
|
||||
|
|
@ -168,18 +177,19 @@ func TestEmptyAddrs(t *testing.T) {
|
|||
}
|
||||
cancel()
|
||||
}
|
||||
cc.Close()
|
||||
servers[0].stop()
|
||||
}
|
||||
|
||||
func TestRoundRobin(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
// Start 3 servers on 3 ports.
|
||||
numServers := 3
|
||||
servers, r := startServers(t, numServers, math.MaxUint32)
|
||||
servers, r, cleanup := startServers(t, numServers, math.MaxUint32)
|
||||
defer cleanup()
|
||||
cc, err := Dial("foo.bar.com", WithBalancer(RoundRobin(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{}))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create ClientConn: %v", err)
|
||||
}
|
||||
defer cc.Close()
|
||||
// Add servers[1] to the service discovery.
|
||||
u := &naming.Update{
|
||||
Op: naming.Add,
|
||||
|
|
@ -214,18 +224,17 @@ func TestRoundRobin(t *testing.T) {
|
|||
t.Fatalf("Index %d: Invoke(_, _, _, _, _) = %v, want %s", i, err, servers[i%numServers].port)
|
||||
}
|
||||
}
|
||||
cc.Close()
|
||||
for i := 0; i < numServers; i++ {
|
||||
servers[i].stop()
|
||||
}
|
||||
}
|
||||
|
||||
func TestCloseWithPendingRPC(t *testing.T) {
|
||||
servers, r := startServers(t, 1, math.MaxUint32)
|
||||
defer leakcheck.Check(t)
|
||||
servers, r, cleanup := startServers(t, 1, math.MaxUint32)
|
||||
defer cleanup()
|
||||
cc, err := Dial("foo.bar.com", WithBalancer(RoundRobin(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{}))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create ClientConn: %v", err)
|
||||
}
|
||||
defer cc.Close()
|
||||
var reply string
|
||||
if err := Invoke(context.Background(), "/foo/bar", &expectedRequest, &reply, cc, FailFast(false)); err != nil {
|
||||
t.Fatalf("grpc.Invoke(_, _, _, _, _) = %v, want %s", err, servers[0].port)
|
||||
|
|
@ -267,15 +276,17 @@ func TestCloseWithPendingRPC(t *testing.T) {
|
|||
time.Sleep(5 * time.Millisecond)
|
||||
cc.Close()
|
||||
wg.Wait()
|
||||
servers[0].stop()
|
||||
}
|
||||
|
||||
func TestGetOnWaitChannel(t *testing.T) {
|
||||
servers, r := startServers(t, 1, math.MaxUint32)
|
||||
defer leakcheck.Check(t)
|
||||
servers, r, cleanup := startServers(t, 1, math.MaxUint32)
|
||||
defer cleanup()
|
||||
cc, err := Dial("foo.bar.com", WithBalancer(RoundRobin(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{}))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create ClientConn: %v", err)
|
||||
}
|
||||
defer cc.Close()
|
||||
// Remove all servers so that all upcoming RPCs will block on waitCh.
|
||||
updates := []*naming.Update{{
|
||||
Op: naming.Delete,
|
||||
|
|
@ -309,18 +320,19 @@ func TestGetOnWaitChannel(t *testing.T) {
|
|||
r.w.inject(updates)
|
||||
// Wait until the above RPC succeeds.
|
||||
wg.Wait()
|
||||
cc.Close()
|
||||
servers[0].stop()
|
||||
}
|
||||
|
||||
func TestOneServerDown(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
// Start 2 servers.
|
||||
numServers := 2
|
||||
servers, r := startServers(t, numServers, math.MaxUint32)
|
||||
servers, r, cleanup := startServers(t, numServers, math.MaxUint32)
|
||||
defer cleanup()
|
||||
cc, err := Dial("foo.bar.com", WithBalancer(RoundRobin(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{}))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create ClientConn: %v", err)
|
||||
}
|
||||
defer cc.Close()
|
||||
// Add servers[1] to the service discovery.
|
||||
var updates []*naming.Update
|
||||
updates = append(updates, &naming.Update{
|
||||
|
|
@ -361,20 +373,19 @@ func TestOneServerDown(t *testing.T) {
|
|||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
cc.Close()
|
||||
for i := 0; i < numServers; i++ {
|
||||
servers[i].stop()
|
||||
}
|
||||
}
|
||||
|
||||
func TestOneAddressRemoval(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
// Start 2 servers.
|
||||
numServers := 2
|
||||
servers, r := startServers(t, numServers, math.MaxUint32)
|
||||
servers, r, cleanup := startServers(t, numServers, math.MaxUint32)
|
||||
defer cleanup()
|
||||
cc, err := Dial("foo.bar.com", WithBalancer(RoundRobin(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{}))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create ClientConn: %v", err)
|
||||
}
|
||||
defer cc.Close()
|
||||
// Add servers[1] to the service discovery.
|
||||
var updates []*naming.Update
|
||||
updates = append(updates, &naming.Update{
|
||||
|
|
@ -423,10 +434,6 @@ func TestOneAddressRemoval(t *testing.T) {
|
|||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
cc.Close()
|
||||
for i := 0; i < numServers; i++ {
|
||||
servers[i].stop()
|
||||
}
|
||||
}
|
||||
|
||||
func checkServerUp(t *testing.T, currentServer *server) {
|
||||
|
|
@ -436,6 +443,7 @@ func checkServerUp(t *testing.T, currentServer *server) {
|
|||
if err != nil {
|
||||
t.Fatalf("Failed to create ClientConn: %v", err)
|
||||
}
|
||||
defer cc.Close()
|
||||
var reply string
|
||||
for {
|
||||
if err := Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err != nil && ErrorDesc(err) == port {
|
||||
|
|
@ -443,13 +451,13 @@ func checkServerUp(t *testing.T, currentServer *server) {
|
|||
}
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
cc.Close()
|
||||
}
|
||||
|
||||
func TestPickFirstEmptyAddrs(t *testing.T) {
|
||||
servers, r := startServers(t, 1, math.MaxUint32)
|
||||
defer servers[0].stop()
|
||||
cc, err := Dial("foo.bar.com", WithBalancer(pickFirstBalancer(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{}))
|
||||
defer leakcheck.Check(t)
|
||||
servers, r, cleanup := startServers(t, 1, math.MaxUint32)
|
||||
defer cleanup()
|
||||
cc, err := Dial("foo.bar.com", WithBalancer(pickFirstBalancerV1(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{}))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create ClientConn: %v", err)
|
||||
}
|
||||
|
|
@ -478,12 +486,14 @@ func TestPickFirstEmptyAddrs(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestPickFirstCloseWithPendingRPC(t *testing.T) {
|
||||
servers, r := startServers(t, 1, math.MaxUint32)
|
||||
defer servers[0].stop()
|
||||
cc, err := Dial("foo.bar.com", WithBalancer(pickFirstBalancer(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{}))
|
||||
defer leakcheck.Check(t)
|
||||
servers, r, cleanup := startServers(t, 1, math.MaxUint32)
|
||||
defer cleanup()
|
||||
cc, err := Dial("foo.bar.com", WithBalancer(pickFirstBalancerV1(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{}))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create ClientConn: %v", err)
|
||||
}
|
||||
defer cc.Close()
|
||||
var reply string
|
||||
if err := Invoke(context.Background(), "/foo/bar", &expectedRequest, &reply, cc, FailFast(false)); err != nil {
|
||||
t.Fatalf("grpc.Invoke(_, _, _, _, _) = %v, want %s", err, servers[0].port)
|
||||
|
|
@ -528,13 +538,12 @@ func TestPickFirstCloseWithPendingRPC(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestPickFirstOrderAllServerUp(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
// Start 3 servers on 3 ports.
|
||||
numServers := 3
|
||||
servers, r := startServers(t, numServers, math.MaxUint32)
|
||||
for i := 0; i < numServers; i++ {
|
||||
defer servers[i].stop()
|
||||
}
|
||||
cc, err := Dial("foo.bar.com", WithBalancer(pickFirstBalancer(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{}))
|
||||
servers, r, cleanup := startServers(t, numServers, math.MaxUint32)
|
||||
defer cleanup()
|
||||
cc, err := Dial("foo.bar.com", WithBalancer(pickFirstBalancerV1(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{}))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create ClientConn: %v", err)
|
||||
}
|
||||
|
|
@ -635,20 +644,19 @@ func TestPickFirstOrderAllServerUp(t *testing.T) {
|
|||
}
|
||||
for i := 0; i < 20; i++ {
|
||||
if err := Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err == nil || ErrorDesc(err) != servers[0].port {
|
||||
t.Fatalf("Index %d: Invoke(_, _, _, _, _) = %v, want %s", 2, err, servers[2].port)
|
||||
t.Fatalf("Index %d: Invoke(_, _, _, _, _) = %v, want %s", 0, err, servers[0].port)
|
||||
}
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPickFirstOrderOneServerDown(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
// Start 3 servers on 3 ports.
|
||||
numServers := 3
|
||||
servers, r := startServers(t, numServers, math.MaxUint32)
|
||||
for i := 0; i < numServers; i++ {
|
||||
defer servers[i].stop()
|
||||
}
|
||||
cc, err := Dial("foo.bar.com", WithBalancer(pickFirstBalancer(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{}))
|
||||
servers, r, cleanup := startServers(t, numServers, math.MaxUint32)
|
||||
defer cleanup()
|
||||
cc, err := Dial("foo.bar.com", WithBalancer(pickFirstBalancerV1(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{}))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create ClientConn: %v", err)
|
||||
}
|
||||
|
|
@ -702,6 +710,7 @@ func TestPickFirstOrderOneServerDown(t *testing.T) {
|
|||
p, _ := strconv.Atoi(servers[0].port)
|
||||
servers[0] = newTestServer()
|
||||
go servers[0].start(t, p, math.MaxUint32)
|
||||
defer servers[0].stop()
|
||||
servers[0].wait(t, 2*time.Second)
|
||||
checkServerUp(t, servers[0])
|
||||
|
||||
|
|
@ -733,13 +742,12 @@ func TestPickFirstOrderOneServerDown(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestPickFirstOneAddressRemoval(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
// Start 2 servers.
|
||||
numServers := 2
|
||||
servers, r := startServers(t, numServers, math.MaxUint32)
|
||||
for i := 0; i < numServers; i++ {
|
||||
defer servers[i].stop()
|
||||
}
|
||||
cc, err := Dial("localhost:"+servers[0].port, WithBalancer(pickFirstBalancer(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{}))
|
||||
servers, r, cleanup := startServers(t, numServers, math.MaxUint32)
|
||||
defer cleanup()
|
||||
cc, err := Dial("localhost:"+servers[0].port, WithBalancer(pickFirstBalancerV1(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{}))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create ClientConn: %v", err)
|
||||
}
|
||||
|
|
|
|||
367
vendor/google.golang.org/grpc/balancer_v1_wrapper.go
generated
vendored
Normal file
367
vendor/google.golang.org/grpc/balancer_v1_wrapper.go
generated
vendored
Normal file
|
|
@ -0,0 +1,367 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/connectivity"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
type balancerWrapperBuilder struct {
|
||||
b Balancer // The v1 balancer.
|
||||
}
|
||||
|
||||
func (bwb *balancerWrapperBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer {
|
||||
bwb.b.Start(cc.Target(), BalancerConfig{
|
||||
DialCreds: opts.DialCreds,
|
||||
Dialer: opts.Dialer,
|
||||
})
|
||||
_, pickfirst := bwb.b.(*pickFirst)
|
||||
bw := &balancerWrapper{
|
||||
balancer: bwb.b,
|
||||
pickfirst: pickfirst,
|
||||
cc: cc,
|
||||
startCh: make(chan struct{}),
|
||||
conns: make(map[resolver.Address]balancer.SubConn),
|
||||
connSt: make(map[balancer.SubConn]*scState),
|
||||
csEvltr: &connectivityStateEvaluator{},
|
||||
state: connectivity.Idle,
|
||||
}
|
||||
cc.UpdateBalancerState(connectivity.Idle, bw)
|
||||
go bw.lbWatcher()
|
||||
return bw
|
||||
}
|
||||
|
||||
func (bwb *balancerWrapperBuilder) Name() string {
|
||||
return "wrapper"
|
||||
}
|
||||
|
||||
type scState struct {
|
||||
addr Address // The v1 address type.
|
||||
s connectivity.State
|
||||
down func(error)
|
||||
}
|
||||
|
||||
type balancerWrapper struct {
|
||||
balancer Balancer // The v1 balancer.
|
||||
pickfirst bool
|
||||
|
||||
cc balancer.ClientConn
|
||||
|
||||
// To aggregate the connectivity state.
|
||||
csEvltr *connectivityStateEvaluator
|
||||
state connectivity.State
|
||||
|
||||
mu sync.Mutex
|
||||
conns map[resolver.Address]balancer.SubConn
|
||||
connSt map[balancer.SubConn]*scState
|
||||
// This channel is closed when handling the first resolver result.
|
||||
// lbWatcher blocks until this is closed, to avoid race between
|
||||
// - NewSubConn is created, cc wants to notify balancer of state changes;
|
||||
// - Build hasn't return, cc doesn't have access to balancer.
|
||||
startCh chan struct{}
|
||||
}
|
||||
|
||||
// lbWatcher watches the Notify channel of the balancer and manages
|
||||
// connections accordingly.
|
||||
func (bw *balancerWrapper) lbWatcher() {
|
||||
<-bw.startCh
|
||||
grpclog.Infof("balancerWrapper: is pickfirst: %v\n", bw.pickfirst)
|
||||
notifyCh := bw.balancer.Notify()
|
||||
if notifyCh == nil {
|
||||
// There's no resolver in the balancer. Connect directly.
|
||||
a := resolver.Address{
|
||||
Addr: bw.cc.Target(),
|
||||
Type: resolver.Backend,
|
||||
}
|
||||
sc, err := bw.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{})
|
||||
if err != nil {
|
||||
grpclog.Warningf("Error creating connection to %v. Err: %v", a, err)
|
||||
} else {
|
||||
bw.mu.Lock()
|
||||
bw.conns[a] = sc
|
||||
bw.connSt[sc] = &scState{
|
||||
addr: Address{Addr: bw.cc.Target()},
|
||||
s: connectivity.Idle,
|
||||
}
|
||||
bw.mu.Unlock()
|
||||
sc.Connect()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
for addrs := range notifyCh {
|
||||
grpclog.Infof("balancerWrapper: got update addr from Notify: %v\n", addrs)
|
||||
if bw.pickfirst {
|
||||
var (
|
||||
oldA resolver.Address
|
||||
oldSC balancer.SubConn
|
||||
)
|
||||
bw.mu.Lock()
|
||||
for oldA, oldSC = range bw.conns {
|
||||
break
|
||||
}
|
||||
bw.mu.Unlock()
|
||||
if len(addrs) <= 0 {
|
||||
if oldSC != nil {
|
||||
// Teardown old sc.
|
||||
bw.mu.Lock()
|
||||
delete(bw.conns, oldA)
|
||||
delete(bw.connSt, oldSC)
|
||||
bw.mu.Unlock()
|
||||
bw.cc.RemoveSubConn(oldSC)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
var newAddrs []resolver.Address
|
||||
for _, a := range addrs {
|
||||
newAddr := resolver.Address{
|
||||
Addr: a.Addr,
|
||||
Type: resolver.Backend, // All addresses from balancer are all backends.
|
||||
ServerName: "",
|
||||
Metadata: a.Metadata,
|
||||
}
|
||||
newAddrs = append(newAddrs, newAddr)
|
||||
}
|
||||
if oldSC == nil {
|
||||
// Create new sc.
|
||||
sc, err := bw.cc.NewSubConn(newAddrs, balancer.NewSubConnOptions{})
|
||||
if err != nil {
|
||||
grpclog.Warningf("Error creating connection to %v. Err: %v", newAddrs, err)
|
||||
} else {
|
||||
bw.mu.Lock()
|
||||
// For pickfirst, there should be only one SubConn, so the
|
||||
// address doesn't matter. All states updating (up and down)
|
||||
// and picking should all happen on that only SubConn.
|
||||
bw.conns[resolver.Address{}] = sc
|
||||
bw.connSt[sc] = &scState{
|
||||
addr: addrs[0], // Use the first address.
|
||||
s: connectivity.Idle,
|
||||
}
|
||||
bw.mu.Unlock()
|
||||
sc.Connect()
|
||||
}
|
||||
} else {
|
||||
oldSC.UpdateAddresses(newAddrs)
|
||||
bw.mu.Lock()
|
||||
bw.connSt[oldSC].addr = addrs[0]
|
||||
bw.mu.Unlock()
|
||||
}
|
||||
} else {
|
||||
var (
|
||||
add []resolver.Address // Addresses need to setup connections.
|
||||
del []balancer.SubConn // Connections need to tear down.
|
||||
)
|
||||
resAddrs := make(map[resolver.Address]Address)
|
||||
for _, a := range addrs {
|
||||
resAddrs[resolver.Address{
|
||||
Addr: a.Addr,
|
||||
Type: resolver.Backend, // All addresses from balancer are all backends.
|
||||
ServerName: "",
|
||||
Metadata: a.Metadata,
|
||||
}] = a
|
||||
}
|
||||
bw.mu.Lock()
|
||||
for a := range resAddrs {
|
||||
if _, ok := bw.conns[a]; !ok {
|
||||
add = append(add, a)
|
||||
}
|
||||
}
|
||||
for a, c := range bw.conns {
|
||||
if _, ok := resAddrs[a]; !ok {
|
||||
del = append(del, c)
|
||||
delete(bw.conns, a)
|
||||
// Keep the state of this sc in bw.connSt until its state becomes Shutdown.
|
||||
}
|
||||
}
|
||||
bw.mu.Unlock()
|
||||
for _, a := range add {
|
||||
sc, err := bw.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{})
|
||||
if err != nil {
|
||||
grpclog.Warningf("Error creating connection to %v. Err: %v", a, err)
|
||||
} else {
|
||||
bw.mu.Lock()
|
||||
bw.conns[a] = sc
|
||||
bw.connSt[sc] = &scState{
|
||||
addr: resAddrs[a],
|
||||
s: connectivity.Idle,
|
||||
}
|
||||
bw.mu.Unlock()
|
||||
sc.Connect()
|
||||
}
|
||||
}
|
||||
for _, c := range del {
|
||||
bw.cc.RemoveSubConn(c)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (bw *balancerWrapper) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
|
||||
grpclog.Infof("balancerWrapper: handle subconn state change: %p, %v", sc, s)
|
||||
bw.mu.Lock()
|
||||
defer bw.mu.Unlock()
|
||||
scSt, ok := bw.connSt[sc]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if s == connectivity.Idle {
|
||||
sc.Connect()
|
||||
}
|
||||
oldS := scSt.s
|
||||
scSt.s = s
|
||||
if oldS != connectivity.Ready && s == connectivity.Ready {
|
||||
scSt.down = bw.balancer.Up(scSt.addr)
|
||||
} else if oldS == connectivity.Ready && s != connectivity.Ready {
|
||||
if scSt.down != nil {
|
||||
scSt.down(errConnClosing)
|
||||
}
|
||||
}
|
||||
sa := bw.csEvltr.recordTransition(oldS, s)
|
||||
if bw.state != sa {
|
||||
bw.state = sa
|
||||
}
|
||||
bw.cc.UpdateBalancerState(bw.state, bw)
|
||||
if s == connectivity.Shutdown {
|
||||
// Remove state for this sc.
|
||||
delete(bw.connSt, sc)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (bw *balancerWrapper) HandleResolvedAddrs([]resolver.Address, error) {
|
||||
bw.mu.Lock()
|
||||
defer bw.mu.Unlock()
|
||||
select {
|
||||
case <-bw.startCh:
|
||||
default:
|
||||
close(bw.startCh)
|
||||
}
|
||||
// There should be a resolver inside the balancer.
|
||||
// All updates here, if any, are ignored.
|
||||
return
|
||||
}
|
||||
|
||||
func (bw *balancerWrapper) Close() {
|
||||
bw.mu.Lock()
|
||||
defer bw.mu.Unlock()
|
||||
select {
|
||||
case <-bw.startCh:
|
||||
default:
|
||||
close(bw.startCh)
|
||||
}
|
||||
bw.balancer.Close()
|
||||
return
|
||||
}
|
||||
|
||||
// The picker is the balancerWrapper itself.
|
||||
// Pick should never return ErrNoSubConnAvailable.
|
||||
// It either blocks or returns error, consistent with v1 balancer Get().
|
||||
func (bw *balancerWrapper) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
|
||||
failfast := true // Default failfast is true.
|
||||
if ss, ok := rpcInfoFromContext(ctx); ok {
|
||||
failfast = ss.failfast
|
||||
}
|
||||
a, p, err := bw.balancer.Get(ctx, BalancerGetOptions{BlockingWait: !failfast})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
var done func(balancer.DoneInfo)
|
||||
if p != nil {
|
||||
done = func(i balancer.DoneInfo) { p() }
|
||||
}
|
||||
var sc balancer.SubConn
|
||||
bw.mu.Lock()
|
||||
defer bw.mu.Unlock()
|
||||
if bw.pickfirst {
|
||||
// Get the first sc in conns.
|
||||
for _, sc = range bw.conns {
|
||||
break
|
||||
}
|
||||
} else {
|
||||
var ok bool
|
||||
sc, ok = bw.conns[resolver.Address{
|
||||
Addr: a.Addr,
|
||||
Type: resolver.Backend,
|
||||
ServerName: "",
|
||||
Metadata: a.Metadata,
|
||||
}]
|
||||
if !ok && failfast {
|
||||
return nil, nil, Errorf(codes.Unavailable, "there is no connection available")
|
||||
}
|
||||
if s, ok := bw.connSt[sc]; failfast && (!ok || s.s != connectivity.Ready) {
|
||||
// If the returned sc is not ready and RPC is failfast,
|
||||
// return error, and this RPC will fail.
|
||||
return nil, nil, Errorf(codes.Unavailable, "there is no connection available")
|
||||
}
|
||||
}
|
||||
|
||||
return sc, done, nil
|
||||
}
|
||||
|
||||
// connectivityStateEvaluator gets updated by addrConns when their
|
||||
// states transition, based on which it evaluates the state of
|
||||
// ClientConn.
|
||||
type connectivityStateEvaluator struct {
|
||||
mu sync.Mutex
|
||||
numReady uint64 // Number of addrConns in ready state.
|
||||
numConnecting uint64 // Number of addrConns in connecting state.
|
||||
numTransientFailure uint64 // Number of addrConns in transientFailure.
|
||||
}
|
||||
|
||||
// recordTransition records state change happening in every subConn and based on
|
||||
// that it evaluates what aggregated state should be.
|
||||
// It can only transition between Ready, Connecting and TransientFailure. Other states,
|
||||
// Idle and Shutdown are transitioned into by ClientConn; in the beginning of the connection
|
||||
// before any subConn is created ClientConn is in idle state. In the end when ClientConn
|
||||
// closes it is in Shutdown state.
|
||||
// TODO Note that in later releases, a ClientConn with no activity will be put into an Idle state.
|
||||
func (cse *connectivityStateEvaluator) recordTransition(oldState, newState connectivity.State) connectivity.State {
|
||||
cse.mu.Lock()
|
||||
defer cse.mu.Unlock()
|
||||
|
||||
// Update counters.
|
||||
for idx, state := range []connectivity.State{oldState, newState} {
|
||||
updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new.
|
||||
switch state {
|
||||
case connectivity.Ready:
|
||||
cse.numReady += updateVal
|
||||
case connectivity.Connecting:
|
||||
cse.numConnecting += updateVal
|
||||
case connectivity.TransientFailure:
|
||||
cse.numTransientFailure += updateVal
|
||||
}
|
||||
}
|
||||
|
||||
// Evaluate.
|
||||
if cse.numReady > 0 {
|
||||
return connectivity.Ready
|
||||
}
|
||||
if cse.numConnecting > 0 {
|
||||
return connectivity.Connecting
|
||||
}
|
||||
return connectivity.TransientFailure
|
||||
}
|
||||
38
vendor/google.golang.org/grpc/call.go
generated
vendored
38
vendor/google.golang.org/grpc/call.go
generated
vendored
|
|
@ -25,6 +25,7 @@ import (
|
|||
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/net/trace"
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/peer"
|
||||
"google.golang.org/grpc/stats"
|
||||
|
|
@ -135,7 +136,7 @@ func Invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli
|
|||
}
|
||||
|
||||
func invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) (e error) {
|
||||
c := defaultCallInfo
|
||||
c := defaultCallInfo()
|
||||
mc := cc.GetMethodConfig(method)
|
||||
if mc.WaitForReady != nil {
|
||||
c.failFast = !*mc.WaitForReady
|
||||
|
|
@ -149,13 +150,13 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli
|
|||
|
||||
opts = append(cc.dopts.callOptions, opts...)
|
||||
for _, o := range opts {
|
||||
if err := o.before(&c); err != nil {
|
||||
if err := o.before(c); err != nil {
|
||||
return toRPCErr(err)
|
||||
}
|
||||
}
|
||||
defer func() {
|
||||
for _, o := range opts {
|
||||
o.after(&c)
|
||||
o.after(c)
|
||||
}
|
||||
}()
|
||||
|
||||
|
|
@ -178,7 +179,7 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli
|
|||
}
|
||||
}()
|
||||
}
|
||||
ctx = newContextWithRPCInfo(ctx)
|
||||
ctx = newContextWithRPCInfo(ctx, c.failFast)
|
||||
sh := cc.dopts.copts.StatsHandler
|
||||
if sh != nil {
|
||||
ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast})
|
||||
|
|
@ -206,9 +207,9 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli
|
|||
err error
|
||||
t transport.ClientTransport
|
||||
stream *transport.Stream
|
||||
// Record the put handler from Balancer.Get(...). It is called once the
|
||||
// Record the done handler from Balancer.Get(...). It is called once the
|
||||
// RPC has completed or failed.
|
||||
put func()
|
||||
done func(balancer.DoneInfo)
|
||||
)
|
||||
// TODO(zhaoq): Need a formal spec of fail-fast.
|
||||
callHdr := &transport.CallHdr{
|
||||
|
|
@ -222,10 +223,7 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli
|
|||
callHdr.Creds = c.creds
|
||||
}
|
||||
|
||||
gopts := BalancerGetOptions{
|
||||
BlockingWait: !c.failFast,
|
||||
}
|
||||
t, put, err = cc.getTransport(ctx, gopts)
|
||||
t, done, err = cc.getTransport(ctx, c.failFast)
|
||||
if err != nil {
|
||||
// TODO(zhaoq): Probably revisit the error handling.
|
||||
if _, ok := status.FromError(err); ok {
|
||||
|
|
@ -245,14 +243,14 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli
|
|||
}
|
||||
stream, err = t.NewStream(ctx, callHdr)
|
||||
if err != nil {
|
||||
if put != nil {
|
||||
if done != nil {
|
||||
if _, ok := err.(transport.ConnectionError); ok {
|
||||
// If error is connection error, transport was sending data on wire,
|
||||
// and we are not sure if anything has been sent on wire.
|
||||
// If error is not connection error, we are sure nothing has been sent.
|
||||
updateRPCInfoInContext(ctx, rpcInfo{bytesSent: true, bytesReceived: false})
|
||||
}
|
||||
put()
|
||||
done(balancer.DoneInfo{Err: err})
|
||||
}
|
||||
if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast {
|
||||
continue
|
||||
|
|
@ -262,14 +260,14 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli
|
|||
if peer, ok := peer.FromContext(stream.Context()); ok {
|
||||
c.peer = peer
|
||||
}
|
||||
err = sendRequest(ctx, cc.dopts, cc.dopts.cp, &c, callHdr, stream, t, args, topts)
|
||||
err = sendRequest(ctx, cc.dopts, cc.dopts.cp, c, callHdr, stream, t, args, topts)
|
||||
if err != nil {
|
||||
if put != nil {
|
||||
if done != nil {
|
||||
updateRPCInfoInContext(ctx, rpcInfo{
|
||||
bytesSent: stream.BytesSent(),
|
||||
bytesReceived: stream.BytesReceived(),
|
||||
})
|
||||
put()
|
||||
done(balancer.DoneInfo{Err: err})
|
||||
}
|
||||
// Retry a non-failfast RPC when
|
||||
// i) there is a connection error; or
|
||||
|
|
@ -279,14 +277,14 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli
|
|||
}
|
||||
return toRPCErr(err)
|
||||
}
|
||||
err = recvResponse(ctx, cc.dopts, t, &c, stream, reply)
|
||||
err = recvResponse(ctx, cc.dopts, t, c, stream, reply)
|
||||
if err != nil {
|
||||
if put != nil {
|
||||
if done != nil {
|
||||
updateRPCInfoInContext(ctx, rpcInfo{
|
||||
bytesSent: stream.BytesSent(),
|
||||
bytesReceived: stream.BytesReceived(),
|
||||
})
|
||||
put()
|
||||
done(balancer.DoneInfo{Err: err})
|
||||
}
|
||||
if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast {
|
||||
continue
|
||||
|
|
@ -297,12 +295,12 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli
|
|||
c.traceInfo.tr.LazyLog(&payload{sent: false, msg: reply}, true)
|
||||
}
|
||||
t.CloseStream(stream, nil)
|
||||
if put != nil {
|
||||
if done != nil {
|
||||
updateRPCInfoInContext(ctx, rpcInfo{
|
||||
bytesSent: stream.BytesSent(),
|
||||
bytesReceived: stream.BytesReceived(),
|
||||
})
|
||||
put()
|
||||
done(balancer.DoneInfo{Err: err})
|
||||
}
|
||||
return stream.Status().Err()
|
||||
}
|
||||
|
|
|
|||
10
vendor/google.golang.org/grpc/call_test.go
generated
vendored
10
vendor/google.golang.org/grpc/call_test.go
generated
vendored
|
|
@ -32,6 +32,7 @@ import (
|
|||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/grpc/test/leakcheck"
|
||||
"google.golang.org/grpc/transport"
|
||||
)
|
||||
|
||||
|
|
@ -116,6 +117,7 @@ func (h *testStreamHandler) handleStream(t *testing.T, s *transport.Stream) {
|
|||
type server struct {
|
||||
lis net.Listener
|
||||
port string
|
||||
addr string
|
||||
startedErr chan error // sent nil or an error after server starts
|
||||
mu sync.Mutex
|
||||
conns map[transport.ServerTransport]bool
|
||||
|
|
@ -137,7 +139,8 @@ func (s *server) start(t *testing.T, port int, maxStreams uint32) {
|
|||
s.startedErr <- fmt.Errorf("failed to listen: %v", err)
|
||||
return
|
||||
}
|
||||
_, p, err := net.SplitHostPort(s.lis.Addr().String())
|
||||
s.addr = s.lis.Addr().String()
|
||||
_, p, err := net.SplitHostPort(s.addr)
|
||||
if err != nil {
|
||||
s.startedErr <- fmt.Errorf("failed to parse listener address: %v", err)
|
||||
return
|
||||
|
|
@ -211,6 +214,7 @@ func setUp(t *testing.T, port int, maxStreams uint32) (*server, *ClientConn) {
|
|||
}
|
||||
|
||||
func TestInvoke(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
server, cc := setUp(t, 0, math.MaxUint32)
|
||||
var reply string
|
||||
if err := Invoke(context.Background(), "/foo/bar", &expectedRequest, &reply, cc); err != nil || reply != expectedResponse {
|
||||
|
|
@ -221,6 +225,7 @@ func TestInvoke(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestInvokeLargeErr(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
server, cc := setUp(t, 0, math.MaxUint32)
|
||||
var reply string
|
||||
req := "hello"
|
||||
|
|
@ -237,6 +242,7 @@ func TestInvokeLargeErr(t *testing.T) {
|
|||
|
||||
// TestInvokeErrorSpecialChars checks that error messages don't get mangled.
|
||||
func TestInvokeErrorSpecialChars(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
server, cc := setUp(t, 0, math.MaxUint32)
|
||||
var reply string
|
||||
req := "weird error"
|
||||
|
|
@ -253,6 +259,7 @@ func TestInvokeErrorSpecialChars(t *testing.T) {
|
|||
|
||||
// TestInvokeCancel checks that an Invoke with a canceled context is not sent.
|
||||
func TestInvokeCancel(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
server, cc := setUp(t, 0, math.MaxUint32)
|
||||
var reply string
|
||||
req := "canceled"
|
||||
|
|
@ -271,6 +278,7 @@ func TestInvokeCancel(t *testing.T) {
|
|||
// TestInvokeCancelClosedNonFail checks that a canceled non-failfast RPC
|
||||
// on a closed client will terminate.
|
||||
func TestInvokeCancelClosedNonFailFast(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
server, cc := setUp(t, 0, math.MaxUint32)
|
||||
var reply string
|
||||
cc.Close()
|
||||
|
|
|
|||
752
vendor/google.golang.org/grpc/clientconn.go
generated
vendored
752
vendor/google.golang.org/grpc/clientconn.go
generated
vendored
File diff suppressed because it is too large
Load diff
42
vendor/google.golang.org/grpc/clientconn_test.go
generated
vendored
42
vendor/google.golang.org/grpc/clientconn_test.go
generated
vendored
|
|
@ -30,6 +30,7 @@ import (
|
|||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/keepalive"
|
||||
"google.golang.org/grpc/naming"
|
||||
"google.golang.org/grpc/test/leakcheck"
|
||||
"google.golang.org/grpc/testdata"
|
||||
)
|
||||
|
||||
|
|
@ -43,13 +44,9 @@ func assertState(wantState connectivity.State, cc *ClientConn) (connectivity.Sta
|
|||
}
|
||||
|
||||
func TestConnectivityStates(t *testing.T) {
|
||||
servers, resolver := startServers(t, 2, math.MaxUint32)
|
||||
defer func() {
|
||||
for i := 0; i < 2; i++ {
|
||||
servers[i].stop()
|
||||
}
|
||||
}()
|
||||
|
||||
defer leakcheck.Check(t)
|
||||
servers, resolver, cleanup := startServers(t, 2, math.MaxUint32)
|
||||
defer cleanup()
|
||||
cc, err := Dial("foo.bar.com", WithBalancer(RoundRobin(resolver)), WithInsecure())
|
||||
if err != nil {
|
||||
t.Fatalf("Dial(\"foo.bar.com\", WithBalancer(_)) = _, %v, want _ <nil>", err)
|
||||
|
|
@ -84,6 +81,7 @@ func TestConnectivityStates(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestDialTimeout(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
conn, err := Dial("Non-Existent.Server:80", WithTimeout(time.Millisecond), WithBlock(), WithInsecure())
|
||||
if err == nil {
|
||||
conn.Close()
|
||||
|
|
@ -94,6 +92,7 @@ func TestDialTimeout(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTLSDialTimeout(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
creds, err := credentials.NewClientTLSFromFile(testdata.Path("ca.pem"), "x.test.youtube.com")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create credentials %v", err)
|
||||
|
|
@ -108,6 +107,7 @@ func TestTLSDialTimeout(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestDefaultAuthority(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
target := "Non-Existent.Server:8080"
|
||||
conn, err := Dial(target, WithInsecure())
|
||||
if err != nil {
|
||||
|
|
@ -120,6 +120,7 @@ func TestDefaultAuthority(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTLSServerNameOverwrite(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
overwriteServerName := "over.write.server.name"
|
||||
creds, err := credentials.NewClientTLSFromFile(testdata.Path("ca.pem"), overwriteServerName)
|
||||
if err != nil {
|
||||
|
|
@ -136,6 +137,7 @@ func TestTLSServerNameOverwrite(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestWithAuthority(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
overwriteServerName := "over.write.server.name"
|
||||
conn, err := Dial("Non-Existent.Server:80", WithInsecure(), WithAuthority(overwriteServerName))
|
||||
if err != nil {
|
||||
|
|
@ -148,6 +150,7 @@ func TestWithAuthority(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestWithAuthorityAndTLS(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
overwriteServerName := "over.write.server.name"
|
||||
creds, err := credentials.NewClientTLSFromFile(testdata.Path("ca.pem"), overwriteServerName)
|
||||
if err != nil {
|
||||
|
|
@ -164,6 +167,7 @@ func TestWithAuthorityAndTLS(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestDialContextCancel(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cancel()
|
||||
if _, err := DialContext(ctx, "Non-Existent.Server:80", WithBlock(), WithInsecure()); err != context.Canceled {
|
||||
|
|
@ -198,6 +202,7 @@ func (b *blockingBalancer) Close() error {
|
|||
}
|
||||
|
||||
func TestDialWithBlockingBalancer(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
dialDone := make(chan struct{})
|
||||
go func() {
|
||||
|
|
@ -220,6 +225,7 @@ func (c securePerRPCCredentials) RequireTransportSecurity() bool {
|
|||
}
|
||||
|
||||
func TestCredentialsMisuse(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
tlsCreds, err := credentials.NewClientTLSFromFile(testdata.Path("ca.pem"), "x.test.youtube.com")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create authenticator %v", err)
|
||||
|
|
@ -235,10 +241,12 @@ func TestCredentialsMisuse(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestWithBackoffConfigDefault(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
testBackoffConfigSet(t, &DefaultBackoffConfig)
|
||||
}
|
||||
|
||||
func TestWithBackoffConfig(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
b := BackoffConfig{MaxDelay: DefaultBackoffConfig.MaxDelay / 2}
|
||||
expected := b
|
||||
setDefaults(&expected) // defaults should be set
|
||||
|
|
@ -246,6 +254,7 @@ func TestWithBackoffConfig(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestWithBackoffMaxDelay(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
md := DefaultBackoffConfig.MaxDelay / 2
|
||||
expected := BackoffConfig{MaxDelay: md}
|
||||
setDefaults(&expected)
|
||||
|
|
@ -293,6 +302,7 @@ func nonTemporaryErrorDialer(addr string, timeout time.Duration) (net.Conn, erro
|
|||
}
|
||||
|
||||
func TestDialWithBlockErrorOnNonTemporaryErrorDialer(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)
|
||||
defer cancel()
|
||||
if _, err := DialContext(ctx, "", WithInsecure(), WithDialer(nonTemporaryErrorDialer), WithBlock(), FailOnNonTempDialError(true)); err != nonTemporaryError {
|
||||
|
|
@ -332,6 +342,7 @@ func (b *emptyBalancer) Close() error {
|
|||
}
|
||||
|
||||
func TestNonblockingDialWithEmptyBalancer(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
dialDone := make(chan error)
|
||||
|
|
@ -350,6 +361,7 @@ func TestNonblockingDialWithEmptyBalancer(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestClientUpdatesParamsAfterGoAway(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
lis, err := net.Listen("tcp", "localhost:0")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to listen. Err: %v", err)
|
||||
|
|
@ -376,19 +388,3 @@ func TestClientUpdatesParamsAfterGoAway(t *testing.T) {
|
|||
t.Fatalf("cc.dopts.copts.Keepalive.Time = %v , want 100ms", v)
|
||||
}
|
||||
}
|
||||
|
||||
func TestClientLBWatcherWithClosedBalancer(t *testing.T) {
|
||||
b := newBlockingBalancer()
|
||||
cc := &ClientConn{dopts: dialOptions{balancer: b}}
|
||||
|
||||
doneChan := make(chan struct{})
|
||||
go cc.lbWatcher(doneChan)
|
||||
// Balancer closes before any successful connections.
|
||||
b.Close()
|
||||
|
||||
select {
|
||||
case <-doneChan:
|
||||
case <-time.After(100 * time.Millisecond):
|
||||
t.Fatal("lbWatcher with closed balancer didn't close doneChan after 100ms")
|
||||
}
|
||||
}
|
||||
|
|
|
|||
2
vendor/google.golang.org/grpc/credentials/credentials_test.go
generated
vendored
2
vendor/google.golang.org/grpc/credentials/credentials_test.go
generated
vendored
|
|
@ -128,7 +128,7 @@ func launchServer(t *testing.T, hs serverHandshake, done chan AuthInfo) net.List
|
|||
return lis
|
||||
}
|
||||
|
||||
// Is run in a seperate goroutine.
|
||||
// Is run in a separate goroutine.
|
||||
func serverHandle(t *testing.T, hs serverHandshake, done chan AuthInfo, lis net.Listener) {
|
||||
serverRawConn, err := lis.Accept()
|
||||
if err != nil {
|
||||
|
|
|
|||
98
vendor/google.golang.org/grpc/go16.go
generated
vendored
98
vendor/google.golang.org/grpc/go16.go
generated
vendored
|
|
@ -1,98 +0,0 @@
|
|||
// +build go1.6,!go1.7
|
||||
|
||||
/*
|
||||
*
|
||||
* Copyright 2016 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/grpc/transport"
|
||||
)
|
||||
|
||||
// dialContext connects to the address on the named network.
|
||||
func dialContext(ctx context.Context, network, address string) (net.Conn, error) {
|
||||
return (&net.Dialer{Cancel: ctx.Done()}).Dial(network, address)
|
||||
}
|
||||
|
||||
func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error {
|
||||
req.Cancel = ctx.Done()
|
||||
if err := req.Write(conn); err != nil {
|
||||
return fmt.Errorf("failed to write the HTTP request: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// toRPCErr converts an error into an error from the status package.
|
||||
func toRPCErr(err error) error {
|
||||
if _, ok := status.FromError(err); ok {
|
||||
return err
|
||||
}
|
||||
switch e := err.(type) {
|
||||
case transport.StreamError:
|
||||
return status.Error(e.Code, e.Desc)
|
||||
case transport.ConnectionError:
|
||||
return status.Error(codes.Unavailable, e.Desc)
|
||||
default:
|
||||
switch err {
|
||||
case context.DeadlineExceeded:
|
||||
return status.Error(codes.DeadlineExceeded, err.Error())
|
||||
case context.Canceled:
|
||||
return status.Error(codes.Canceled, err.Error())
|
||||
case ErrClientConnClosing:
|
||||
return status.Error(codes.FailedPrecondition, err.Error())
|
||||
}
|
||||
}
|
||||
return status.Error(codes.Unknown, err.Error())
|
||||
}
|
||||
|
||||
// convertCode converts a standard Go error into its canonical code. Note that
|
||||
// this is only used to translate the error returned by the server applications.
|
||||
func convertCode(err error) codes.Code {
|
||||
switch err {
|
||||
case nil:
|
||||
return codes.OK
|
||||
case io.EOF:
|
||||
return codes.OutOfRange
|
||||
case io.ErrClosedPipe, io.ErrNoProgress, io.ErrShortBuffer, io.ErrShortWrite, io.ErrUnexpectedEOF:
|
||||
return codes.FailedPrecondition
|
||||
case os.ErrInvalid:
|
||||
return codes.InvalidArgument
|
||||
case context.Canceled:
|
||||
return codes.Canceled
|
||||
case context.DeadlineExceeded:
|
||||
return codes.DeadlineExceeded
|
||||
}
|
||||
switch {
|
||||
case os.IsExist(err):
|
||||
return codes.AlreadyExists
|
||||
case os.IsNotExist(err):
|
||||
return codes.NotFound
|
||||
case os.IsPermission(err):
|
||||
return codes.PermissionDenied
|
||||
}
|
||||
return codes.Unknown
|
||||
}
|
||||
98
vendor/google.golang.org/grpc/go17.go
generated
vendored
98
vendor/google.golang.org/grpc/go17.go
generated
vendored
|
|
@ -1,98 +0,0 @@
|
|||
// +build go1.7
|
||||
|
||||
/*
|
||||
*
|
||||
* Copyright 2016 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
netctx "golang.org/x/net/context"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/grpc/transport"
|
||||
)
|
||||
|
||||
// dialContext connects to the address on the named network.
|
||||
func dialContext(ctx context.Context, network, address string) (net.Conn, error) {
|
||||
return (&net.Dialer{}).DialContext(ctx, network, address)
|
||||
}
|
||||
|
||||
func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error {
|
||||
req = req.WithContext(ctx)
|
||||
if err := req.Write(conn); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// toRPCErr converts an error into an error from the status package.
|
||||
func toRPCErr(err error) error {
|
||||
if _, ok := status.FromError(err); ok {
|
||||
return err
|
||||
}
|
||||
switch e := err.(type) {
|
||||
case transport.StreamError:
|
||||
return status.Error(e.Code, e.Desc)
|
||||
case transport.ConnectionError:
|
||||
return status.Error(codes.Unavailable, e.Desc)
|
||||
default:
|
||||
switch err {
|
||||
case context.DeadlineExceeded, netctx.DeadlineExceeded:
|
||||
return status.Error(codes.DeadlineExceeded, err.Error())
|
||||
case context.Canceled, netctx.Canceled:
|
||||
return status.Error(codes.Canceled, err.Error())
|
||||
case ErrClientConnClosing:
|
||||
return status.Error(codes.FailedPrecondition, err.Error())
|
||||
}
|
||||
}
|
||||
return status.Error(codes.Unknown, err.Error())
|
||||
}
|
||||
|
||||
// convertCode converts a standard Go error into its canonical code. Note that
|
||||
// this is only used to translate the error returned by the server applications.
|
||||
func convertCode(err error) codes.Code {
|
||||
switch err {
|
||||
case nil:
|
||||
return codes.OK
|
||||
case io.EOF:
|
||||
return codes.OutOfRange
|
||||
case io.ErrClosedPipe, io.ErrNoProgress, io.ErrShortBuffer, io.ErrShortWrite, io.ErrUnexpectedEOF:
|
||||
return codes.FailedPrecondition
|
||||
case os.ErrInvalid:
|
||||
return codes.InvalidArgument
|
||||
case context.Canceled, netctx.Canceled:
|
||||
return codes.Canceled
|
||||
case context.DeadlineExceeded, netctx.DeadlineExceeded:
|
||||
return codes.DeadlineExceeded
|
||||
}
|
||||
switch {
|
||||
case os.IsExist(err):
|
||||
return codes.AlreadyExists
|
||||
case os.IsNotExist(err):
|
||||
return codes.NotFound
|
||||
case os.IsPermission(err):
|
||||
return codes.PermissionDenied
|
||||
}
|
||||
return codes.Unknown
|
||||
}
|
||||
42
vendor/google.golang.org/grpc/grpclb.go
generated
vendored
42
vendor/google.golang.org/grpc/grpclb.go
generated
vendored
|
|
@ -73,7 +73,7 @@ func (x *balanceLoadClientStream) Recv() (*lbmpb.LoadBalanceResponse, error) {
|
|||
|
||||
// NewGRPCLBBalancer creates a grpclb load balancer.
|
||||
func NewGRPCLBBalancer(r naming.Resolver) Balancer {
|
||||
return &balancer{
|
||||
return &grpclbBalancer{
|
||||
r: r,
|
||||
}
|
||||
}
|
||||
|
|
@ -96,7 +96,7 @@ type grpclbAddrInfo struct {
|
|||
dropForLoadBalancing bool
|
||||
}
|
||||
|
||||
type balancer struct {
|
||||
type grpclbBalancer struct {
|
||||
r naming.Resolver
|
||||
target string
|
||||
mu sync.Mutex
|
||||
|
|
@ -113,7 +113,7 @@ type balancer struct {
|
|||
clientStats lbmpb.ClientStats
|
||||
}
|
||||
|
||||
func (b *balancer) watchAddrUpdates(w naming.Watcher, ch chan []remoteBalancerInfo) error {
|
||||
func (b *grpclbBalancer) watchAddrUpdates(w naming.Watcher, ch chan []remoteBalancerInfo) error {
|
||||
updates, err := w.Next()
|
||||
if err != nil {
|
||||
grpclog.Warningf("grpclb: failed to get next addr update from watcher: %v", err)
|
||||
|
|
@ -187,7 +187,7 @@ func convertDuration(d *lbmpb.Duration) time.Duration {
|
|||
return time.Duration(d.Seconds)*time.Second + time.Duration(d.Nanos)*time.Nanosecond
|
||||
}
|
||||
|
||||
func (b *balancer) processServerList(l *lbmpb.ServerList, seq int) {
|
||||
func (b *grpclbBalancer) processServerList(l *lbmpb.ServerList, seq int) {
|
||||
if l == nil {
|
||||
return
|
||||
}
|
||||
|
|
@ -230,7 +230,7 @@ func (b *balancer) processServerList(l *lbmpb.ServerList, seq int) {
|
|||
return
|
||||
}
|
||||
|
||||
func (b *balancer) sendLoadReport(s *balanceLoadClientStream, interval time.Duration, done <-chan struct{}) {
|
||||
func (b *grpclbBalancer) sendLoadReport(s *balanceLoadClientStream, interval time.Duration, done <-chan struct{}) {
|
||||
ticker := time.NewTicker(interval)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
|
|
@ -259,7 +259,7 @@ func (b *balancer) sendLoadReport(s *balanceLoadClientStream, interval time.Dura
|
|||
}
|
||||
}
|
||||
|
||||
func (b *balancer) callRemoteBalancer(lbc *loadBalancerClient, seq int) (retry bool) {
|
||||
func (b *grpclbBalancer) callRemoteBalancer(lbc *loadBalancerClient, seq int) (retry bool) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
stream, err := lbc.BalanceLoad(ctx)
|
||||
|
|
@ -332,7 +332,7 @@ func (b *balancer) callRemoteBalancer(lbc *loadBalancerClient, seq int) (retry b
|
|||
return true
|
||||
}
|
||||
|
||||
func (b *balancer) Start(target string, config BalancerConfig) error {
|
||||
func (b *grpclbBalancer) Start(target string, config BalancerConfig) error {
|
||||
b.rand = rand.New(rand.NewSource(time.Now().Unix()))
|
||||
// TODO: Fall back to the basic direct connection if there is no name resolver.
|
||||
if b.r == nil {
|
||||
|
|
@ -461,8 +461,11 @@ func (b *balancer) Start(target string, config BalancerConfig) error {
|
|||
// WithDialer takes a different type of function, so we instead use a special DialOption here.
|
||||
dopts = append(dopts, func(o *dialOptions) { o.copts.Dialer = dialer })
|
||||
}
|
||||
dopts = append(dopts, WithBlock())
|
||||
ccError = make(chan struct{})
|
||||
cc, err = Dial(rb.addr, dopts...)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||
cc, err = DialContext(ctx, rb.addr, dopts...)
|
||||
cancel()
|
||||
if err != nil {
|
||||
grpclog.Warningf("grpclb: failed to setup a connection to the remote balancer %v: %v", rb.addr, err)
|
||||
close(ccError)
|
||||
|
|
@ -488,7 +491,7 @@ func (b *balancer) Start(target string, config BalancerConfig) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (b *balancer) down(addr Address, err error) {
|
||||
func (b *grpclbBalancer) down(addr Address, err error) {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
for _, a := range b.addrs {
|
||||
|
|
@ -499,7 +502,7 @@ func (b *balancer) down(addr Address, err error) {
|
|||
}
|
||||
}
|
||||
|
||||
func (b *balancer) Up(addr Address) func(error) {
|
||||
func (b *grpclbBalancer) Up(addr Address) func(error) {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
if b.done {
|
||||
|
|
@ -527,7 +530,7 @@ func (b *balancer) Up(addr Address) func(error) {
|
|||
}
|
||||
}
|
||||
|
||||
func (b *balancer) Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) {
|
||||
func (b *grpclbBalancer) Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) {
|
||||
var ch chan struct{}
|
||||
b.mu.Lock()
|
||||
if b.done {
|
||||
|
|
@ -597,17 +600,10 @@ func (b *balancer) Get(ctx context.Context, opts BalancerGetOptions) (addr Addre
|
|||
}
|
||||
}
|
||||
if !opts.BlockingWait {
|
||||
if len(b.addrs) == 0 {
|
||||
b.clientStats.NumCallsFinished++
|
||||
b.clientStats.NumCallsFinishedWithClientFailedToSend++
|
||||
b.mu.Unlock()
|
||||
err = Errorf(codes.Unavailable, "there is no address available")
|
||||
return
|
||||
}
|
||||
// Returns the next addr on b.addrs for a failfast RPC.
|
||||
addr = b.addrs[b.next].addr
|
||||
b.next++
|
||||
b.clientStats.NumCallsFinished++
|
||||
b.clientStats.NumCallsFinishedWithClientFailedToSend++
|
||||
b.mu.Unlock()
|
||||
err = Errorf(codes.Unavailable, "there is no address available")
|
||||
return
|
||||
}
|
||||
// Wait on b.waitCh for non-failfast RPCs.
|
||||
|
|
@ -684,11 +680,11 @@ func (b *balancer) Get(ctx context.Context, opts BalancerGetOptions) (addr Addre
|
|||
}
|
||||
}
|
||||
|
||||
func (b *balancer) Notify() <-chan []Address {
|
||||
func (b *grpclbBalancer) Notify() <-chan []Address {
|
||||
return b.addrCh
|
||||
}
|
||||
|
||||
func (b *balancer) Close() error {
|
||||
func (b *grpclbBalancer) Close() error {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
if b.done {
|
||||
|
|
|
|||
34
vendor/google.golang.org/grpc/grpclb/grpclb_test.go
generated
vendored
34
vendor/google.golang.org/grpc/grpclb/grpclb_test.go
generated
vendored
|
|
@ -39,9 +39,11 @@ import (
|
|||
"google.golang.org/grpc/credentials"
|
||||
lbmpb "google.golang.org/grpc/grpclb/grpc_lb_v1/messages"
|
||||
lbspb "google.golang.org/grpc/grpclb/grpc_lb_v1/service"
|
||||
_ "google.golang.org/grpc/grpclog/glogger"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/naming"
|
||||
testpb "google.golang.org/grpc/test/grpc_testing"
|
||||
"google.golang.org/grpc/test/leakcheck"
|
||||
)
|
||||
|
||||
var (
|
||||
|
|
@ -86,6 +88,7 @@ func (w *testWatcher) Next() (updates []*naming.Update, err error) {
|
|||
}
|
||||
|
||||
func (w *testWatcher) Close() {
|
||||
close(w.side)
|
||||
}
|
||||
|
||||
// Inject naming resolution updates to the testWatcher.
|
||||
|
|
@ -374,6 +377,7 @@ func newLoadBalancer(numberOfBackends int) (tss *testServers, cleanup func(), er
|
|||
}
|
||||
|
||||
func TestGRPCLB(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
tss, cleanup, err := newLoadBalancer(1)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create new load balancer: %v", err)
|
||||
|
|
@ -403,14 +407,15 @@ func TestGRPCLB(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatalf("Failed to dial to the backend %v", err)
|
||||
}
|
||||
defer cc.Close()
|
||||
testC := testpb.NewTestServiceClient(cc)
|
||||
if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}); err != nil {
|
||||
if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.FailFast(false)); err != nil {
|
||||
t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, <nil>", testC, err)
|
||||
}
|
||||
cc.Close()
|
||||
}
|
||||
|
||||
func TestDropRequest(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
tss, cleanup, err := newLoadBalancer(2)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create new load balancer: %v", err)
|
||||
|
|
@ -441,7 +446,17 @@ func TestDropRequest(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatalf("Failed to dial to the backend %v", err)
|
||||
}
|
||||
defer cc.Close()
|
||||
testC := testpb.NewTestServiceClient(cc)
|
||||
// Wait until the first connection is up.
|
||||
// The first one has Drop set to true, error should contain "drop requests".
|
||||
for {
|
||||
if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}); err != nil {
|
||||
if strings.Contains(err.Error(), "drops requests") {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
// The 1st, non-fail-fast RPC should succeed. This ensures both server
|
||||
// connections are made, because the first one has DropForLoadBalancing set to true.
|
||||
if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.FailFast(false)); err != nil {
|
||||
|
|
@ -459,10 +474,10 @@ func TestDropRequest(t *testing.T) {
|
|||
t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, <nil>", testC, err)
|
||||
}
|
||||
}
|
||||
cc.Close()
|
||||
}
|
||||
|
||||
func TestDropRequestFailedNonFailFast(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
tss, cleanup, err := newLoadBalancer(1)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create new load balancer: %v", err)
|
||||
|
|
@ -492,17 +507,18 @@ func TestDropRequestFailedNonFailFast(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatalf("Failed to dial to the backend %v", err)
|
||||
}
|
||||
defer cc.Close()
|
||||
testC := testpb.NewTestServiceClient(cc)
|
||||
ctx, cancel = context.WithTimeout(context.Background(), 10*time.Millisecond)
|
||||
defer cancel()
|
||||
if _, err := testC.EmptyCall(ctx, &testpb.Empty{}, grpc.FailFast(false)); grpc.Code(err) != codes.DeadlineExceeded {
|
||||
t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, %s", testC, err, codes.DeadlineExceeded)
|
||||
}
|
||||
cc.Close()
|
||||
}
|
||||
|
||||
// When the balancer in use disconnects, grpclb should connect to the next address from resolved balancer address list.
|
||||
func TestBalancerDisconnects(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
var (
|
||||
lbAddrs []string
|
||||
lbs []*grpc.Server
|
||||
|
|
@ -545,6 +561,7 @@ func TestBalancerDisconnects(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatalf("Failed to dial to the backend %v", err)
|
||||
}
|
||||
defer cc.Close()
|
||||
testC := testpb.NewTestServiceClient(cc)
|
||||
var previousTrailer string
|
||||
trailer := metadata.MD{}
|
||||
|
|
@ -589,7 +606,6 @@ func TestBalancerDisconnects(t *testing.T) {
|
|||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
cc.Close()
|
||||
}
|
||||
|
||||
type failPreRPCCred struct{}
|
||||
|
|
@ -653,6 +669,7 @@ func runAndGetStats(t *testing.T, dropForLoadBalancing, dropForRateLimiting bool
|
|||
const countRPC = 40
|
||||
|
||||
func TestGRPCLBStatsUnarySuccess(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
stats := runAndGetStats(t, false, false, func(cc *grpc.ClientConn) {
|
||||
testC := testpb.NewTestServiceClient(cc)
|
||||
// The first non-failfast RPC succeeds, all connections are up.
|
||||
|
|
@ -674,6 +691,7 @@ func TestGRPCLBStatsUnarySuccess(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGRPCLBStatsUnaryDropLoadBalancing(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
c := 0
|
||||
stats := runAndGetStats(t, true, false, func(cc *grpc.ClientConn) {
|
||||
testC := testpb.NewTestServiceClient(cc)
|
||||
|
|
@ -701,6 +719,7 @@ func TestGRPCLBStatsUnaryDropLoadBalancing(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGRPCLBStatsUnaryDropRateLimiting(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
c := 0
|
||||
stats := runAndGetStats(t, false, true, func(cc *grpc.ClientConn) {
|
||||
testC := testpb.NewTestServiceClient(cc)
|
||||
|
|
@ -728,6 +747,7 @@ func TestGRPCLBStatsUnaryDropRateLimiting(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGRPCLBStatsUnaryFailedToSend(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
stats := runAndGetStats(t, false, false, func(cc *grpc.ClientConn) {
|
||||
testC := testpb.NewTestServiceClient(cc)
|
||||
// The first non-failfast RPC succeeds, all connections are up.
|
||||
|
|
@ -750,6 +770,7 @@ func TestGRPCLBStatsUnaryFailedToSend(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGRPCLBStatsStreamingSuccess(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
stats := runAndGetStats(t, false, false, func(cc *grpc.ClientConn) {
|
||||
testC := testpb.NewTestServiceClient(cc)
|
||||
// The first non-failfast RPC succeeds, all connections are up.
|
||||
|
|
@ -785,6 +806,7 @@ func TestGRPCLBStatsStreamingSuccess(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGRPCLBStatsStreamingDropLoadBalancing(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
c := 0
|
||||
stats := runAndGetStats(t, true, false, func(cc *grpc.ClientConn) {
|
||||
testC := testpb.NewTestServiceClient(cc)
|
||||
|
|
@ -812,6 +834,7 @@ func TestGRPCLBStatsStreamingDropLoadBalancing(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGRPCLBStatsStreamingDropRateLimiting(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
c := 0
|
||||
stats := runAndGetStats(t, false, true, func(cc *grpc.ClientConn) {
|
||||
testC := testpb.NewTestServiceClient(cc)
|
||||
|
|
@ -839,6 +862,7 @@ func TestGRPCLBStatsStreamingDropRateLimiting(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGRPCLBStatsStreamingFailedToSend(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
stats := runAndGetStats(t, false, false, func(cc *grpc.ClientConn) {
|
||||
testC := testpb.NewTestServiceClient(cc)
|
||||
// The first non-failfast RPC succeeds, all connections are up.
|
||||
|
|
|
|||
2
vendor/google.golang.org/grpc/naming/go17.go
generated
vendored
2
vendor/google.golang.org/grpc/naming/go17.go
generated
vendored
|
|
@ -1,4 +1,4 @@
|
|||
// +build go1.6, !go1.8
|
||||
// +build go1.7, !go1.8
|
||||
|
||||
/*
|
||||
*
|
||||
|
|
|
|||
2
vendor/google.golang.org/grpc/naming/go17_test.go
generated
vendored
2
vendor/google.golang.org/grpc/naming/go17_test.go
generated
vendored
|
|
@ -1,4 +1,4 @@
|
|||
// +build go1.6, !go1.8
|
||||
// +build go1.7, !go1.8
|
||||
|
||||
/*
|
||||
*
|
||||
|
|
|
|||
141
vendor/google.golang.org/grpc/picker_wrapper.go
generated
vendored
Normal file
141
vendor/google.golang.org/grpc/picker_wrapper.go
generated
vendored
Normal file
|
|
@ -0,0 +1,141 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/grpc/transport"
|
||||
)
|
||||
|
||||
// pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick
|
||||
// actions and unblock when there's a picker update.
|
||||
type pickerWrapper struct {
|
||||
mu sync.Mutex
|
||||
done bool
|
||||
blockingCh chan struct{}
|
||||
picker balancer.Picker
|
||||
}
|
||||
|
||||
func newPickerWrapper() *pickerWrapper {
|
||||
bp := &pickerWrapper{blockingCh: make(chan struct{})}
|
||||
return bp
|
||||
}
|
||||
|
||||
// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick.
|
||||
func (bp *pickerWrapper) updatePicker(p balancer.Picker) {
|
||||
bp.mu.Lock()
|
||||
if bp.done {
|
||||
bp.mu.Unlock()
|
||||
return
|
||||
}
|
||||
bp.picker = p
|
||||
// bp.blockingCh should never be nil.
|
||||
close(bp.blockingCh)
|
||||
bp.blockingCh = make(chan struct{})
|
||||
bp.mu.Unlock()
|
||||
}
|
||||
|
||||
// pick returns the transport that will be used for the RPC.
|
||||
// It may block in the following cases:
|
||||
// - there's no picker
|
||||
// - the current picker returns ErrNoSubConnAvailable
|
||||
// - the current picker returns other errors and failfast is false.
|
||||
// - the subConn returned by the current picker is not READY
|
||||
// When one of these situations happens, pick blocks until the picker gets updated.
|
||||
func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer.PickOptions) (transport.ClientTransport, func(balancer.DoneInfo), error) {
|
||||
var (
|
||||
p balancer.Picker
|
||||
ch chan struct{}
|
||||
)
|
||||
|
||||
for {
|
||||
bp.mu.Lock()
|
||||
if bp.done {
|
||||
bp.mu.Unlock()
|
||||
return nil, nil, ErrClientConnClosing
|
||||
}
|
||||
|
||||
if bp.picker == nil {
|
||||
ch = bp.blockingCh
|
||||
}
|
||||
if ch == bp.blockingCh {
|
||||
// This could happen when either:
|
||||
// - bp.picker is nil (the previous if condition), or
|
||||
// - has called pick on the current picker.
|
||||
bp.mu.Unlock()
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, nil, ctx.Err()
|
||||
case <-ch:
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
ch = bp.blockingCh
|
||||
p = bp.picker
|
||||
bp.mu.Unlock()
|
||||
|
||||
subConn, put, err := p.Pick(ctx, opts)
|
||||
|
||||
if err != nil {
|
||||
switch err {
|
||||
case balancer.ErrNoSubConnAvailable:
|
||||
continue
|
||||
case balancer.ErrTransientFailure:
|
||||
if !failfast {
|
||||
continue
|
||||
}
|
||||
return nil, nil, status.Errorf(codes.Unavailable, "%v", err)
|
||||
default:
|
||||
// err is some other error.
|
||||
return nil, nil, toRPCErr(err)
|
||||
}
|
||||
}
|
||||
|
||||
acw, ok := subConn.(*acBalancerWrapper)
|
||||
if !ok {
|
||||
grpclog.Infof("subconn returned from pick is not *acBalancerWrapper")
|
||||
continue
|
||||
}
|
||||
if t, ok := acw.getAddrConn().getReadyTransport(); ok {
|
||||
return t, put, nil
|
||||
}
|
||||
grpclog.Infof("blockingPicker: the picked transport is not ready, loop back to repick")
|
||||
// If ok == false, ac.state is not READY.
|
||||
// A valid picker always returns READY subConn. This means the state of ac
|
||||
// just changed, and picker will be updated shortly.
|
||||
// continue back to the beginning of the for loop to repick.
|
||||
}
|
||||
}
|
||||
|
||||
func (bp *pickerWrapper) close() {
|
||||
bp.mu.Lock()
|
||||
defer bp.mu.Unlock()
|
||||
if bp.done {
|
||||
return
|
||||
}
|
||||
bp.done = true
|
||||
close(bp.blockingCh)
|
||||
}
|
||||
160
vendor/google.golang.org/grpc/picker_wrapper_test.go
generated
vendored
Normal file
160
vendor/google.golang.org/grpc/picker_wrapper_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,160 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/connectivity"
|
||||
_ "google.golang.org/grpc/grpclog/glogger"
|
||||
"google.golang.org/grpc/test/leakcheck"
|
||||
"google.golang.org/grpc/transport"
|
||||
)
|
||||
|
||||
const goroutineCount = 5
|
||||
|
||||
var (
|
||||
testT = &testTransport{}
|
||||
testSC = &acBalancerWrapper{ac: &addrConn{
|
||||
state: connectivity.Ready,
|
||||
transport: testT,
|
||||
}}
|
||||
testSCNotReady = &acBalancerWrapper{ac: &addrConn{
|
||||
state: connectivity.TransientFailure,
|
||||
}}
|
||||
)
|
||||
|
||||
type testTransport struct {
|
||||
transport.ClientTransport
|
||||
}
|
||||
|
||||
type testingPicker struct {
|
||||
err error
|
||||
sc balancer.SubConn
|
||||
maxCalled int64
|
||||
}
|
||||
|
||||
func (p *testingPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
|
||||
if atomic.AddInt64(&p.maxCalled, -1) < 0 {
|
||||
return nil, nil, fmt.Errorf("Pick called to many times (> goroutineCount)")
|
||||
}
|
||||
if p.err != nil {
|
||||
return nil, nil, p.err
|
||||
}
|
||||
return p.sc, nil, nil
|
||||
}
|
||||
|
||||
func TestBlockingPickTimeout(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
bp := newPickerWrapper()
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond)
|
||||
defer cancel()
|
||||
if _, _, err := bp.pick(ctx, true, balancer.PickOptions{}); err != context.DeadlineExceeded {
|
||||
t.Errorf("bp.pick returned error %v, want DeadlineExceeded", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockingPick(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
bp := newPickerWrapper()
|
||||
// All goroutines should block because picker is nil in bp.
|
||||
var finishedCount uint64
|
||||
for i := goroutineCount; i > 0; i-- {
|
||||
go func() {
|
||||
if tr, _, err := bp.pick(context.Background(), true, balancer.PickOptions{}); err != nil || tr != testT {
|
||||
t.Errorf("bp.pick returned non-nil error: %v", err)
|
||||
}
|
||||
atomic.AddUint64(&finishedCount, 1)
|
||||
}()
|
||||
}
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
if c := atomic.LoadUint64(&finishedCount); c != 0 {
|
||||
t.Errorf("finished goroutines count: %v, want 0", c)
|
||||
}
|
||||
bp.updatePicker(&testingPicker{sc: testSC, maxCalled: goroutineCount})
|
||||
}
|
||||
|
||||
func TestBlockingPickNoSubAvailable(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
bp := newPickerWrapper()
|
||||
var finishedCount uint64
|
||||
bp.updatePicker(&testingPicker{err: balancer.ErrNoSubConnAvailable, maxCalled: goroutineCount})
|
||||
// All goroutines should block because picker returns no sc avilable.
|
||||
for i := goroutineCount; i > 0; i-- {
|
||||
go func() {
|
||||
if tr, _, err := bp.pick(context.Background(), true, balancer.PickOptions{}); err != nil || tr != testT {
|
||||
t.Errorf("bp.pick returned non-nil error: %v", err)
|
||||
}
|
||||
atomic.AddUint64(&finishedCount, 1)
|
||||
}()
|
||||
}
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
if c := atomic.LoadUint64(&finishedCount); c != 0 {
|
||||
t.Errorf("finished goroutines count: %v, want 0", c)
|
||||
}
|
||||
bp.updatePicker(&testingPicker{sc: testSC, maxCalled: goroutineCount})
|
||||
}
|
||||
|
||||
func TestBlockingPickTransientWaitforready(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
bp := newPickerWrapper()
|
||||
bp.updatePicker(&testingPicker{err: balancer.ErrTransientFailure, maxCalled: goroutineCount})
|
||||
var finishedCount uint64
|
||||
// All goroutines should block because picker returns transientFailure and
|
||||
// picks are not failfast.
|
||||
for i := goroutineCount; i > 0; i-- {
|
||||
go func() {
|
||||
if tr, _, err := bp.pick(context.Background(), false, balancer.PickOptions{}); err != nil || tr != testT {
|
||||
t.Errorf("bp.pick returned non-nil error: %v", err)
|
||||
}
|
||||
atomic.AddUint64(&finishedCount, 1)
|
||||
}()
|
||||
}
|
||||
time.Sleep(time.Millisecond)
|
||||
if c := atomic.LoadUint64(&finishedCount); c != 0 {
|
||||
t.Errorf("finished goroutines count: %v, want 0", c)
|
||||
}
|
||||
bp.updatePicker(&testingPicker{sc: testSC, maxCalled: goroutineCount})
|
||||
}
|
||||
|
||||
func TestBlockingPickSCNotReady(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
bp := newPickerWrapper()
|
||||
bp.updatePicker(&testingPicker{sc: testSCNotReady, maxCalled: goroutineCount})
|
||||
var finishedCount uint64
|
||||
// All goroutines should block because sc is not ready.
|
||||
for i := goroutineCount; i > 0; i-- {
|
||||
go func() {
|
||||
if tr, _, err := bp.pick(context.Background(), true, balancer.PickOptions{}); err != nil || tr != testT {
|
||||
t.Errorf("bp.pick returned non-nil error: %v", err)
|
||||
}
|
||||
atomic.AddUint64(&finishedCount, 1)
|
||||
}()
|
||||
}
|
||||
time.Sleep(time.Millisecond)
|
||||
if c := atomic.LoadUint64(&finishedCount); c != 0 {
|
||||
t.Errorf("finished goroutines count: %v, want 0", c)
|
||||
}
|
||||
bp.updatePicker(&testingPicker{sc: testSC, maxCalled: goroutineCount})
|
||||
}
|
||||
95
vendor/google.golang.org/grpc/pickfirst.go
generated
vendored
Normal file
95
vendor/google.golang.org/grpc/pickfirst.go
generated
vendored
Normal file
|
|
@ -0,0 +1,95 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/connectivity"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
func newPickfirstBuilder() balancer.Builder {
|
||||
return &pickfirstBuilder{}
|
||||
}
|
||||
|
||||
type pickfirstBuilder struct{}
|
||||
|
||||
func (*pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
|
||||
return &pickfirstBalancer{cc: cc}
|
||||
}
|
||||
|
||||
func (*pickfirstBuilder) Name() string {
|
||||
return "pickfirst"
|
||||
}
|
||||
|
||||
type pickfirstBalancer struct {
|
||||
cc balancer.ClientConn
|
||||
sc balancer.SubConn
|
||||
}
|
||||
|
||||
func (b *pickfirstBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) {
|
||||
if err != nil {
|
||||
grpclog.Infof("pickfirstBalancer: HandleResolvedAddrs called with error %v", err)
|
||||
return
|
||||
}
|
||||
if b.sc == nil {
|
||||
b.sc, err = b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{})
|
||||
if err != nil {
|
||||
grpclog.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err)
|
||||
return
|
||||
}
|
||||
b.cc.UpdateBalancerState(connectivity.Idle, &picker{sc: b.sc})
|
||||
} else {
|
||||
b.sc.UpdateAddresses(addrs)
|
||||
}
|
||||
}
|
||||
|
||||
func (b *pickfirstBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
|
||||
grpclog.Infof("pickfirstBalancer: HandleSubConnStateChange: %p, %v", sc, s)
|
||||
if b.sc != sc || s == connectivity.Shutdown {
|
||||
b.sc = nil
|
||||
return
|
||||
}
|
||||
|
||||
switch s {
|
||||
case connectivity.Ready, connectivity.Idle:
|
||||
b.cc.UpdateBalancerState(s, &picker{sc: sc})
|
||||
case connectivity.Connecting:
|
||||
b.cc.UpdateBalancerState(s, &picker{err: balancer.ErrNoSubConnAvailable})
|
||||
case connectivity.TransientFailure:
|
||||
b.cc.UpdateBalancerState(s, &picker{err: balancer.ErrTransientFailure})
|
||||
}
|
||||
}
|
||||
|
||||
func (b *pickfirstBalancer) Close() {
|
||||
}
|
||||
|
||||
type picker struct {
|
||||
err error
|
||||
sc balancer.SubConn
|
||||
}
|
||||
|
||||
func (p *picker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
|
||||
if p.err != nil {
|
||||
return nil, nil, p.err
|
||||
}
|
||||
return p.sc, nil, nil
|
||||
}
|
||||
352
vendor/google.golang.org/grpc/pickfirst_test.go
generated
vendored
Normal file
352
vendor/google.golang.org/grpc/pickfirst_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,352 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/resolver"
|
||||
"google.golang.org/grpc/resolver/manual"
|
||||
"google.golang.org/grpc/test/leakcheck"
|
||||
)
|
||||
|
||||
func TestOneBackendPickfirst(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
r, rcleanup := manual.GenerateAndRegisterManualResolver()
|
||||
defer rcleanup()
|
||||
|
||||
numServers := 1
|
||||
servers, _, scleanup := startServers(t, numServers, math.MaxInt32)
|
||||
defer scleanup()
|
||||
|
||||
cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithBalancerBuilder(newPickfirstBuilder()), WithCodec(testCodec{}))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to dial: %v", err)
|
||||
}
|
||||
defer cc.Close()
|
||||
// The first RPC should fail because there's no address.
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond)
|
||||
defer cancel()
|
||||
req := "port"
|
||||
var reply string
|
||||
if err := Invoke(ctx, "/foo/bar", &req, &reply, cc); err == nil || Code(err) != codes.DeadlineExceeded {
|
||||
t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err)
|
||||
}
|
||||
|
||||
r.NewAddress([]resolver.Address{{Addr: servers[0].addr}})
|
||||
// The second RPC should succeed.
|
||||
for i := 0; i < 1000; i++ {
|
||||
if err = Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err != nil && ErrorDesc(err) == servers[0].port {
|
||||
return
|
||||
}
|
||||
time.Sleep(time.Millisecond)
|
||||
}
|
||||
t.Fatalf("EmptyCall() = _, %v, want _, %v", err, servers[0].port)
|
||||
}
|
||||
|
||||
func TestBackendsPickfirst(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
r, rcleanup := manual.GenerateAndRegisterManualResolver()
|
||||
defer rcleanup()
|
||||
|
||||
numServers := 2
|
||||
servers, _, scleanup := startServers(t, numServers, math.MaxInt32)
|
||||
defer scleanup()
|
||||
|
||||
cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithBalancerBuilder(newPickfirstBuilder()), WithCodec(testCodec{}))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to dial: %v", err)
|
||||
}
|
||||
defer cc.Close()
|
||||
// The first RPC should fail because there's no address.
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond)
|
||||
defer cancel()
|
||||
req := "port"
|
||||
var reply string
|
||||
if err := Invoke(ctx, "/foo/bar", &req, &reply, cc); err == nil || Code(err) != codes.DeadlineExceeded {
|
||||
t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err)
|
||||
}
|
||||
|
||||
r.NewAddress([]resolver.Address{{Addr: servers[0].addr}, {Addr: servers[1].addr}})
|
||||
// The second RPC should succeed with the first server.
|
||||
for i := 0; i < 1000; i++ {
|
||||
if err = Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err != nil && ErrorDesc(err) == servers[0].port {
|
||||
return
|
||||
}
|
||||
time.Sleep(time.Millisecond)
|
||||
}
|
||||
t.Fatalf("EmptyCall() = _, %v, want _, %v", err, servers[0].port)
|
||||
}
|
||||
|
||||
func TestNewAddressWhileBlockingPickfirst(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
r, rcleanup := manual.GenerateAndRegisterManualResolver()
|
||||
defer rcleanup()
|
||||
|
||||
numServers := 1
|
||||
servers, _, scleanup := startServers(t, numServers, math.MaxInt32)
|
||||
defer scleanup()
|
||||
|
||||
cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithBalancerBuilder(newPickfirstBuilder()), WithCodec(testCodec{}))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to dial: %v", err)
|
||||
}
|
||||
defer cc.Close()
|
||||
// The first RPC should fail because there's no address.
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond)
|
||||
defer cancel()
|
||||
req := "port"
|
||||
var reply string
|
||||
if err := Invoke(ctx, "/foo/bar", &req, &reply, cc); err == nil || Code(err) != codes.DeadlineExceeded {
|
||||
t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err)
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < 3; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
// This RPC blocks until NewAddress is called.
|
||||
Invoke(context.Background(), "/foo/bar", &req, &reply, cc, FailFast(false))
|
||||
}()
|
||||
}
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
r.NewAddress([]resolver.Address{{Addr: servers[0].addr}})
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func TestCloseWithPendingRPCPickfirst(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
r, rcleanup := manual.GenerateAndRegisterManualResolver()
|
||||
defer rcleanup()
|
||||
|
||||
numServers := 1
|
||||
_, _, scleanup := startServers(t, numServers, math.MaxInt32)
|
||||
defer scleanup()
|
||||
|
||||
cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithBalancerBuilder(newPickfirstBuilder()), WithCodec(testCodec{}))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to dial: %v", err)
|
||||
}
|
||||
defer cc.Close()
|
||||
// The first RPC should fail because there's no address.
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond)
|
||||
defer cancel()
|
||||
req := "port"
|
||||
var reply string
|
||||
if err := Invoke(ctx, "/foo/bar", &req, &reply, cc); err == nil || Code(err) != codes.DeadlineExceeded {
|
||||
t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err)
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < 3; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
// This RPC blocks until NewAddress is called.
|
||||
Invoke(context.Background(), "/foo/bar", &req, &reply, cc, FailFast(false))
|
||||
}()
|
||||
}
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
cc.Close()
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func TestOneServerDownPickfirst(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
r, rcleanup := manual.GenerateAndRegisterManualResolver()
|
||||
defer rcleanup()
|
||||
|
||||
numServers := 2
|
||||
servers, _, scleanup := startServers(t, numServers, math.MaxInt32)
|
||||
defer scleanup()
|
||||
|
||||
cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithBalancerBuilder(newPickfirstBuilder()), WithCodec(testCodec{}))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to dial: %v", err)
|
||||
}
|
||||
defer cc.Close()
|
||||
// The first RPC should fail because there's no address.
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond)
|
||||
defer cancel()
|
||||
req := "port"
|
||||
var reply string
|
||||
if err := Invoke(ctx, "/foo/bar", &req, &reply, cc); err == nil || Code(err) != codes.DeadlineExceeded {
|
||||
t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err)
|
||||
}
|
||||
|
||||
r.NewAddress([]resolver.Address{{Addr: servers[0].addr}, {Addr: servers[1].addr}})
|
||||
// The second RPC should succeed with the first server.
|
||||
for i := 0; i < 1000; i++ {
|
||||
if err = Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err != nil && ErrorDesc(err) == servers[0].port {
|
||||
break
|
||||
}
|
||||
time.Sleep(time.Millisecond)
|
||||
}
|
||||
|
||||
servers[0].stop()
|
||||
for i := 0; i < 1000; i++ {
|
||||
if err = Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err != nil && ErrorDesc(err) == servers[1].port {
|
||||
return
|
||||
}
|
||||
time.Sleep(time.Millisecond)
|
||||
}
|
||||
t.Fatalf("EmptyCall() = _, %v, want _, %v", err, servers[0].port)
|
||||
}
|
||||
|
||||
func TestAllServersDownPickfirst(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
r, rcleanup := manual.GenerateAndRegisterManualResolver()
|
||||
defer rcleanup()
|
||||
|
||||
numServers := 2
|
||||
servers, _, scleanup := startServers(t, numServers, math.MaxInt32)
|
||||
defer scleanup()
|
||||
|
||||
cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithBalancerBuilder(newPickfirstBuilder()), WithCodec(testCodec{}))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to dial: %v", err)
|
||||
}
|
||||
defer cc.Close()
|
||||
// The first RPC should fail because there's no address.
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond)
|
||||
defer cancel()
|
||||
req := "port"
|
||||
var reply string
|
||||
if err := Invoke(ctx, "/foo/bar", &req, &reply, cc); err == nil || Code(err) != codes.DeadlineExceeded {
|
||||
t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err)
|
||||
}
|
||||
|
||||
r.NewAddress([]resolver.Address{{Addr: servers[0].addr}, {Addr: servers[1].addr}})
|
||||
// The second RPC should succeed with the first server.
|
||||
for i := 0; i < 1000; i++ {
|
||||
if err = Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err != nil && ErrorDesc(err) == servers[0].port {
|
||||
break
|
||||
}
|
||||
time.Sleep(time.Millisecond)
|
||||
}
|
||||
|
||||
for i := 0; i < numServers; i++ {
|
||||
servers[i].stop()
|
||||
}
|
||||
for i := 0; i < 1000; i++ {
|
||||
if err = Invoke(context.Background(), "/foo/bar", &req, &reply, cc); Code(err) == codes.Unavailable {
|
||||
return
|
||||
}
|
||||
time.Sleep(time.Millisecond)
|
||||
}
|
||||
t.Fatalf("EmptyCall() = _, %v, want _, error with code unavailable", err)
|
||||
}
|
||||
|
||||
func TestAddressesRemovedPickfirst(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
r, rcleanup := manual.GenerateAndRegisterManualResolver()
|
||||
defer rcleanup()
|
||||
|
||||
numServers := 3
|
||||
servers, _, scleanup := startServers(t, numServers, math.MaxInt32)
|
||||
defer scleanup()
|
||||
|
||||
cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithBalancerBuilder(newPickfirstBuilder()), WithCodec(testCodec{}))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to dial: %v", err)
|
||||
}
|
||||
defer cc.Close()
|
||||
// The first RPC should fail because there's no address.
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond)
|
||||
defer cancel()
|
||||
req := "port"
|
||||
var reply string
|
||||
if err := Invoke(ctx, "/foo/bar", &req, &reply, cc); err == nil || Code(err) != codes.DeadlineExceeded {
|
||||
t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err)
|
||||
}
|
||||
|
||||
r.NewAddress([]resolver.Address{{Addr: servers[0].addr}, {Addr: servers[1].addr}, {Addr: servers[2].addr}})
|
||||
for i := 0; i < 1000; i++ {
|
||||
if err = Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err != nil && ErrorDesc(err) == servers[0].port {
|
||||
break
|
||||
}
|
||||
time.Sleep(time.Millisecond)
|
||||
}
|
||||
for i := 0; i < 20; i++ {
|
||||
if err := Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err == nil || ErrorDesc(err) != servers[0].port {
|
||||
t.Fatalf("Index %d: Invoke(_, _, _, _, _) = %v, want %s", 0, err, servers[0].port)
|
||||
}
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
|
||||
// Remove server[0].
|
||||
r.NewAddress([]resolver.Address{{Addr: servers[1].addr}, {Addr: servers[2].addr}})
|
||||
for i := 0; i < 1000; i++ {
|
||||
if err = Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err != nil && ErrorDesc(err) == servers[1].port {
|
||||
break
|
||||
}
|
||||
time.Sleep(time.Millisecond)
|
||||
}
|
||||
for i := 0; i < 20; i++ {
|
||||
if err := Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err == nil || ErrorDesc(err) != servers[1].port {
|
||||
t.Fatalf("Index %d: Invoke(_, _, _, _, _) = %v, want %s", 1, err, servers[1].port)
|
||||
}
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
|
||||
// Append server[0], nothing should change.
|
||||
r.NewAddress([]resolver.Address{{Addr: servers[1].addr}, {Addr: servers[2].addr}, {Addr: servers[0].addr}})
|
||||
for i := 0; i < 20; i++ {
|
||||
if err := Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err == nil || ErrorDesc(err) != servers[1].port {
|
||||
t.Fatalf("Index %d: Invoke(_, _, _, _, _) = %v, want %s", 1, err, servers[1].port)
|
||||
}
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
|
||||
// Remove server[1].
|
||||
r.NewAddress([]resolver.Address{{Addr: servers[2].addr}, {Addr: servers[0].addr}})
|
||||
for i := 0; i < 1000; i++ {
|
||||
if err = Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err != nil && ErrorDesc(err) == servers[2].port {
|
||||
break
|
||||
}
|
||||
time.Sleep(time.Millisecond)
|
||||
}
|
||||
for i := 0; i < 20; i++ {
|
||||
if err := Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err == nil || ErrorDesc(err) != servers[2].port {
|
||||
t.Fatalf("Index %d: Invoke(_, _, _, _, _) = %v, want %s", 2, err, servers[2].port)
|
||||
}
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
|
||||
// Remove server[2].
|
||||
r.NewAddress([]resolver.Address{{Addr: servers[0].addr}})
|
||||
for i := 0; i < 1000; i++ {
|
||||
if err = Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err != nil && ErrorDesc(err) == servers[0].port {
|
||||
break
|
||||
}
|
||||
time.Sleep(time.Millisecond)
|
||||
}
|
||||
for i := 0; i < 20; i++ {
|
||||
if err := Invoke(context.Background(), "/foo/bar", &req, &reply, cc); err == nil || ErrorDesc(err) != servers[0].port {
|
||||
t.Fatalf("Index %d: Invoke(_, _, _, _, _) = %v, want %s", 0, err, servers[0].port)
|
||||
}
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
3
vendor/google.golang.org/grpc/proxy.go
generated
vendored
3
vendor/google.golang.org/grpc/proxy.go
generated
vendored
|
|
@ -82,7 +82,8 @@ func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, addr string) (_
|
|||
Header: map[string][]string{"User-Agent": {grpcUA}},
|
||||
})
|
||||
|
||||
if err := sendHTTPRequest(ctx, req, conn); err != nil {
|
||||
req = req.WithContext(ctx)
|
||||
if err := req.Write(conn); err != nil {
|
||||
return nil, fmt.Errorf("failed to write the HTTP request: %v", err)
|
||||
}
|
||||
|
||||
|
|
|
|||
49
vendor/google.golang.org/grpc/proxy_test.go
generated
vendored
49
vendor/google.golang.org/grpc/proxy_test.go
generated
vendored
|
|
@ -28,6 +28,7 @@ import (
|
|||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc/test/leakcheck"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -45,29 +46,6 @@ func overwrite(hpfe func(req *http.Request) (*url.URL, error)) func() {
|
|||
}
|
||||
}
|
||||
|
||||
func TestMapAddressEnv(t *testing.T) {
|
||||
// Overwrite the function in the test and restore them in defer.
|
||||
hpfe := func(req *http.Request) (*url.URL, error) {
|
||||
if req.URL.Host == envTestAddr {
|
||||
return &url.URL{
|
||||
Scheme: "https",
|
||||
Host: envProxyAddr,
|
||||
}, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
defer overwrite(hpfe)()
|
||||
|
||||
// envTestAddr should be handled by ProxyFromEnvironment.
|
||||
got, err := mapAddress(context.Background(), envTestAddr)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if got != envProxyAddr {
|
||||
t.Errorf("want %v, got %v", envProxyAddr, got)
|
||||
}
|
||||
}
|
||||
|
||||
type proxyServer struct {
|
||||
t *testing.T
|
||||
lis net.Listener
|
||||
|
|
@ -118,6 +96,7 @@ func (p *proxyServer) stop() {
|
|||
}
|
||||
|
||||
func TestHTTPConnect(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
plis, err := net.Listen("tcp", "localhost:0")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to listen: %v", err)
|
||||
|
|
@ -175,3 +154,27 @@ func TestHTTPConnect(t *testing.T) {
|
|||
t.Fatalf("received msg: %v, want %v", recvBuf, msg)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMapAddressEnv(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
// Overwrite the function in the test and restore them in defer.
|
||||
hpfe := func(req *http.Request) (*url.URL, error) {
|
||||
if req.URL.Host == envTestAddr {
|
||||
return &url.URL{
|
||||
Scheme: "https",
|
||||
Host: envProxyAddr,
|
||||
}, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
defer overwrite(hpfe)()
|
||||
|
||||
// envTestAddr should be handled by ProxyFromEnvironment.
|
||||
got, err := mapAddress(context.Background(), envTestAddr)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if got != envProxyAddr {
|
||||
t.Errorf("want %v, got %v", envProxyAddr, got)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
143
vendor/google.golang.org/grpc/resolver/resolver.go
generated
vendored
Normal file
143
vendor/google.golang.org/grpc/resolver/resolver.go
generated
vendored
Normal file
|
|
@ -0,0 +1,143 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package resolver defines APIs for name resolution in gRPC.
|
||||
// All APIs in this package are experimental.
|
||||
package resolver
|
||||
|
||||
var (
|
||||
// m is a map from scheme to resolver builder.
|
||||
m = make(map[string]Builder)
|
||||
// defaultScheme is the default scheme to use.
|
||||
defaultScheme string
|
||||
)
|
||||
|
||||
// TODO(bar) install dns resolver in init(){}.
|
||||
|
||||
// Register registers the resolver builder to the resolver map.
|
||||
// b.Scheme will be used as the scheme registered with this builder.
|
||||
func Register(b Builder) {
|
||||
m[b.Scheme()] = b
|
||||
}
|
||||
|
||||
// Get returns the resolver builder registered with the given scheme.
|
||||
// If no builder is register with the scheme, the default scheme will
|
||||
// be used.
|
||||
// If the default scheme is not modified, "dns" will be the default
|
||||
// scheme, and the preinstalled dns resolver will be used.
|
||||
// If the default scheme is modified, and a resolver is registered with
|
||||
// the scheme, that resolver will be returned.
|
||||
// If the default scheme is modified, and no resolver is registered with
|
||||
// the scheme, nil will be returned.
|
||||
func Get(scheme string) Builder {
|
||||
if b, ok := m[scheme]; ok {
|
||||
return b
|
||||
}
|
||||
if b, ok := m[defaultScheme]; ok {
|
||||
return b
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetDefaultScheme sets the default scheme that will be used.
|
||||
// The default default scheme is "dns".
|
||||
func SetDefaultScheme(scheme string) {
|
||||
defaultScheme = scheme
|
||||
}
|
||||
|
||||
// AddressType indicates the address type returned by name resolution.
|
||||
type AddressType uint8
|
||||
|
||||
const (
|
||||
// Backend indicates the address is for a backend server.
|
||||
Backend AddressType = iota
|
||||
// GRPCLB indicates the address is for a grpclb load balancer.
|
||||
GRPCLB
|
||||
)
|
||||
|
||||
// Address represents a server the client connects to.
|
||||
// This is the EXPERIMENTAL API and may be changed or extended in the future.
|
||||
type Address struct {
|
||||
// Addr is the server address on which a connection will be established.
|
||||
Addr string
|
||||
// Type is the type of this address.
|
||||
Type AddressType
|
||||
// ServerName is the name of this address.
|
||||
// It's the name of the grpc load balancer, which will be used for authentication.
|
||||
ServerName string
|
||||
// Metadata is the information associated with Addr, which may be used
|
||||
// to make load balancing decision.
|
||||
Metadata interface{}
|
||||
}
|
||||
|
||||
// BuildOption includes additional information for the builder to create
|
||||
// the resolver.
|
||||
type BuildOption struct {
|
||||
}
|
||||
|
||||
// ClientConn contains the callbacks for resolver to notify any updates
|
||||
// to the gRPC ClientConn.
|
||||
type ClientConn interface {
|
||||
// NewAddress is called by resolver to notify ClientConn a new list
|
||||
// of resolved addresses.
|
||||
// The address list should be the complete list of resolved addresses.
|
||||
NewAddress(addresses []Address)
|
||||
// NewServiceConfig is called by resolver to notify ClientConn a new
|
||||
// service config. The service config should be provided as a json string.
|
||||
NewServiceConfig(serviceConfig string)
|
||||
}
|
||||
|
||||
// Target represents a target for gRPC, as specified in:
|
||||
// https://github.com/grpc/grpc/blob/master/doc/naming.md.
|
||||
type Target struct {
|
||||
Scheme string
|
||||
Authority string
|
||||
Endpoint string
|
||||
}
|
||||
|
||||
// Builder creates a resolver that will be used to watch name resolution updates.
|
||||
type Builder interface {
|
||||
// Build creates a new resolver for the given target.
|
||||
//
|
||||
// gRPC dial calls Build synchronously, and fails if the returned error is
|
||||
// not nil.
|
||||
Build(target Target, cc ClientConn, opts BuildOption) (Resolver, error)
|
||||
// Scheme returns the scheme supported by this resolver.
|
||||
// Scheme is defined at https://github.com/grpc/grpc/blob/master/doc/naming.md.
|
||||
Scheme() string
|
||||
}
|
||||
|
||||
// ResolveNowOption includes additional information for ResolveNow.
|
||||
type ResolveNowOption struct{}
|
||||
|
||||
// Resolver watches for the updates on the specified target.
|
||||
// Updates include address updates and service config updates.
|
||||
type Resolver interface {
|
||||
// ResolveNow will be called by gRPC to try to resolve the target name again.
|
||||
// It's just a hint, resolver can ignore this if it's not necessary.
|
||||
ResolveNow(ResolveNowOption)
|
||||
// Close closes the resolver.
|
||||
Close()
|
||||
}
|
||||
|
||||
// UnregisterForTesting removes the resolver builder with the given scheme from the
|
||||
// resolver map.
|
||||
// This function is for testing only.
|
||||
func UnregisterForTesting(scheme string) {
|
||||
delete(m, scheme)
|
||||
}
|
||||
139
vendor/google.golang.org/grpc/resolver_conn_wrapper.go
generated
vendored
Normal file
139
vendor/google.golang.org/grpc/resolver_conn_wrapper.go
generated
vendored
Normal file
|
|
@ -0,0 +1,139 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
// ccResolverWrapper is a wrapper on top of cc for resolvers.
|
||||
// It implements resolver.ClientConnection interface.
|
||||
type ccResolverWrapper struct {
|
||||
cc *ClientConn
|
||||
resolver resolver.Resolver
|
||||
addrCh chan []resolver.Address
|
||||
scCh chan string
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
// split2 returns the values from strings.SplitN(s, sep, 2).
|
||||
// If sep is not found, it returns "", s instead.
|
||||
func split2(s, sep string) (string, string) {
|
||||
spl := strings.SplitN(s, sep, 2)
|
||||
if len(spl) < 2 {
|
||||
return "", s
|
||||
}
|
||||
return spl[0], spl[1]
|
||||
}
|
||||
|
||||
// parseTarget splits target into a struct containing scheme, authority and
|
||||
// endpoint.
|
||||
func parseTarget(target string) (ret resolver.Target) {
|
||||
ret.Scheme, ret.Endpoint = split2(target, "://")
|
||||
ret.Authority, ret.Endpoint = split2(ret.Endpoint, "/")
|
||||
return ret
|
||||
}
|
||||
|
||||
// newCCResolverWrapper parses cc.target for scheme and gets the resolver
|
||||
// builder for this scheme. It then builds the resolver and starts the
|
||||
// monitoring goroutine for it.
|
||||
//
|
||||
// This function could return nil, nil, in tests for old behaviors.
|
||||
// TODO(bar) never return nil, nil when DNS becomes the default resolver.
|
||||
func newCCResolverWrapper(cc *ClientConn) (*ccResolverWrapper, error) {
|
||||
target := parseTarget(cc.target)
|
||||
grpclog.Infof("dialing to target with scheme: %q", target.Scheme)
|
||||
|
||||
rb := resolver.Get(target.Scheme)
|
||||
if rb == nil {
|
||||
// TODO(bar) return error when DNS becomes the default (implemented and
|
||||
// registered by DNS package).
|
||||
grpclog.Infof("could not get resolver for scheme: %q", target.Scheme)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
ccr := &ccResolverWrapper{
|
||||
cc: cc,
|
||||
addrCh: make(chan []resolver.Address, 1),
|
||||
scCh: make(chan string, 1),
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
|
||||
var err error
|
||||
ccr.resolver, err = rb.Build(target, ccr, resolver.BuildOption{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
go ccr.watcher()
|
||||
return ccr, nil
|
||||
}
|
||||
|
||||
// watcher processes address updates and service config updates sequencially.
|
||||
// Otherwise, we need to resolve possible races between address and service
|
||||
// config (e.g. they specify different balancer types).
|
||||
func (ccr *ccResolverWrapper) watcher() {
|
||||
for {
|
||||
select {
|
||||
case <-ccr.done:
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
select {
|
||||
case addrs := <-ccr.addrCh:
|
||||
grpclog.Infof("ccResolverWrapper: sending new addresses to balancer wrapper: %v", addrs)
|
||||
// TODO(bar switching) this should never be nil. Pickfirst should be default.
|
||||
if ccr.cc.balancerWrapper != nil {
|
||||
// TODO(bar switching) create balancer if it's nil?
|
||||
ccr.cc.balancerWrapper.handleResolvedAddrs(addrs, nil)
|
||||
}
|
||||
case sc := <-ccr.scCh:
|
||||
grpclog.Infof("ccResolverWrapper: got new service config: %v", sc)
|
||||
case <-ccr.done:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ccr *ccResolverWrapper) close() {
|
||||
ccr.resolver.Close()
|
||||
close(ccr.done)
|
||||
}
|
||||
|
||||
// NewAddress is called by the resolver implemenetion to send addresses to gRPC.
|
||||
func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) {
|
||||
select {
|
||||
case <-ccr.addrCh:
|
||||
default:
|
||||
}
|
||||
ccr.addrCh <- addrs
|
||||
}
|
||||
|
||||
// NewServiceConfig is called by the resolver implemenetion to send service
|
||||
// configs to gPRC.
|
||||
func (ccr *ccResolverWrapper) NewServiceConfig(sc string) {
|
||||
select {
|
||||
case <-ccr.scCh:
|
||||
default:
|
||||
}
|
||||
ccr.scCh <- sc
|
||||
}
|
||||
47
vendor/google.golang.org/grpc/resolver_conn_wrapper_test.go
generated
vendored
Normal file
47
vendor/google.golang.org/grpc/resolver_conn_wrapper_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,47 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
func TestParseTarget(t *testing.T) {
|
||||
for _, test := range []resolver.Target{
|
||||
{"", "", ""},
|
||||
{"a", "", ""},
|
||||
{"", "a", ""},
|
||||
{"", "", "a"},
|
||||
{"a", "b", ""},
|
||||
{"a", "", "b"},
|
||||
{"", "a", "b"},
|
||||
{"a", "b", "c"},
|
||||
{"dns", "a.server.com", "google.com"},
|
||||
{"dns", "a.server.com", "google.com"},
|
||||
{"dns", "a.server.com", "google.com/?a=b"},
|
||||
} {
|
||||
str := test.Scheme + "://" + test.Authority + "/" + test.Endpoint
|
||||
got := parseTarget(str)
|
||||
if got != test {
|
||||
t.Errorf("parseTarget(%q) = %v, want %v", str, got, test)
|
||||
}
|
||||
}
|
||||
}
|
||||
71
vendor/google.golang.org/grpc/rpc_util.go
generated
vendored
71
vendor/google.golang.org/grpc/rpc_util.go
generated
vendored
|
|
@ -21,10 +21,12 @@ package grpc
|
|||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
stdctx "context"
|
||||
"encoding/binary"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
|
|
@ -132,7 +134,9 @@ type callInfo struct {
|
|||
creds credentials.PerRPCCredentials
|
||||
}
|
||||
|
||||
var defaultCallInfo = callInfo{failFast: true}
|
||||
func defaultCallInfo() *callInfo {
|
||||
return &callInfo{failFast: true}
|
||||
}
|
||||
|
||||
// CallOption configures a Call before it starts or extracts information from
|
||||
// a Call after it completes.
|
||||
|
|
@ -384,14 +388,15 @@ func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{
|
|||
}
|
||||
|
||||
type rpcInfo struct {
|
||||
failfast bool
|
||||
bytesSent bool
|
||||
bytesReceived bool
|
||||
}
|
||||
|
||||
type rpcInfoContextKey struct{}
|
||||
|
||||
func newContextWithRPCInfo(ctx context.Context) context.Context {
|
||||
return context.WithValue(ctx, rpcInfoContextKey{}, &rpcInfo{})
|
||||
func newContextWithRPCInfo(ctx context.Context, failfast bool) context.Context {
|
||||
return context.WithValue(ctx, rpcInfoContextKey{}, &rpcInfo{failfast: failfast})
|
||||
}
|
||||
|
||||
func rpcInfoFromContext(ctx context.Context) (s *rpcInfo, ok bool) {
|
||||
|
|
@ -401,11 +406,63 @@ func rpcInfoFromContext(ctx context.Context) (s *rpcInfo, ok bool) {
|
|||
|
||||
func updateRPCInfoInContext(ctx context.Context, s rpcInfo) {
|
||||
if ss, ok := rpcInfoFromContext(ctx); ok {
|
||||
*ss = s
|
||||
ss.bytesReceived = s.bytesReceived
|
||||
ss.bytesSent = s.bytesSent
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// toRPCErr converts an error into an error from the status package.
|
||||
func toRPCErr(err error) error {
|
||||
if _, ok := status.FromError(err); ok {
|
||||
return err
|
||||
}
|
||||
switch e := err.(type) {
|
||||
case transport.StreamError:
|
||||
return status.Error(e.Code, e.Desc)
|
||||
case transport.ConnectionError:
|
||||
return status.Error(codes.Unavailable, e.Desc)
|
||||
default:
|
||||
switch err {
|
||||
case context.DeadlineExceeded, stdctx.DeadlineExceeded:
|
||||
return status.Error(codes.DeadlineExceeded, err.Error())
|
||||
case context.Canceled, stdctx.Canceled:
|
||||
return status.Error(codes.Canceled, err.Error())
|
||||
case ErrClientConnClosing:
|
||||
return status.Error(codes.FailedPrecondition, err.Error())
|
||||
}
|
||||
}
|
||||
return status.Error(codes.Unknown, err.Error())
|
||||
}
|
||||
|
||||
// convertCode converts a standard Go error into its canonical code. Note that
|
||||
// this is only used to translate the error returned by the server applications.
|
||||
func convertCode(err error) codes.Code {
|
||||
switch err {
|
||||
case nil:
|
||||
return codes.OK
|
||||
case io.EOF:
|
||||
return codes.OutOfRange
|
||||
case io.ErrClosedPipe, io.ErrNoProgress, io.ErrShortBuffer, io.ErrShortWrite, io.ErrUnexpectedEOF:
|
||||
return codes.FailedPrecondition
|
||||
case os.ErrInvalid:
|
||||
return codes.InvalidArgument
|
||||
case context.Canceled, stdctx.Canceled:
|
||||
return codes.Canceled
|
||||
case context.DeadlineExceeded, stdctx.DeadlineExceeded:
|
||||
return codes.DeadlineExceeded
|
||||
}
|
||||
switch {
|
||||
case os.IsExist(err):
|
||||
return codes.AlreadyExists
|
||||
case os.IsNotExist(err):
|
||||
return codes.NotFound
|
||||
case os.IsPermission(err):
|
||||
return codes.PermissionDenied
|
||||
}
|
||||
return codes.Unknown
|
||||
}
|
||||
|
||||
// Code returns the error code for err if it was produced by the rpc system.
|
||||
// Otherwise, it returns codes.Unknown.
|
||||
//
|
||||
|
|
@ -452,7 +509,7 @@ type MethodConfig struct {
|
|||
// MaxReqSize is the maximum allowed payload size for an individual request in a
|
||||
// stream (client->server) in bytes. The size which is measured is the serialized
|
||||
// payload after per-message compression (but before stream compression) in bytes.
|
||||
// The actual value used is the minumum of the value specified here and the value set
|
||||
// The actual value used is the minimum of the value specified here and the value set
|
||||
// by the application via the gRPC client API. If either one is not set, then the other
|
||||
// will be used. If neither is set, then the built-in default is used.
|
||||
MaxReqSize *int
|
||||
|
|
@ -497,7 +554,7 @@ func getMaxSize(mcMax, doptMax *int, defaultVal int) *int {
|
|||
|
||||
// SupportPackageIsVersion3 is referenced from generated protocol buffer files.
|
||||
// The latest support package version is 4.
|
||||
// SupportPackageIsVersion3 is kept for compability. It will be removed in the
|
||||
// SupportPackageIsVersion3 is kept for compatibility. It will be removed in the
|
||||
// next support package version update.
|
||||
const SupportPackageIsVersion3 = true
|
||||
|
||||
|
|
@ -510,6 +567,6 @@ const SupportPackageIsVersion3 = true
|
|||
const SupportPackageIsVersion4 = true
|
||||
|
||||
// Version is the current grpc version.
|
||||
const Version = "1.6.0"
|
||||
const Version = "1.7.0"
|
||||
|
||||
const grpcUA = "grpc-go/" + Version
|
||||
|
|
|
|||
25
vendor/google.golang.org/grpc/server.go
generated
vendored
25
vendor/google.golang.org/grpc/server.go
generated
vendored
|
|
@ -116,6 +116,8 @@ type options struct {
|
|||
keepalivePolicy keepalive.EnforcementPolicy
|
||||
initialWindowSize int32
|
||||
initialConnWindowSize int32
|
||||
writeBufferSize int
|
||||
readBufferSize int
|
||||
}
|
||||
|
||||
var defaultServerOptions = options{
|
||||
|
|
@ -126,6 +128,22 @@ var defaultServerOptions = options{
|
|||
// A ServerOption sets options such as credentials, codec and keepalive parameters, etc.
|
||||
type ServerOption func(*options)
|
||||
|
||||
// WriteBufferSize lets you set the size of write buffer, this determines how much data can be batched
|
||||
// before doing a write on the wire.
|
||||
func WriteBufferSize(s int) ServerOption {
|
||||
return func(o *options) {
|
||||
o.writeBufferSize = s
|
||||
}
|
||||
}
|
||||
|
||||
// ReadBufferSize lets you set the size of read buffer, this determines how much data can be read at most
|
||||
// for one read syscall.
|
||||
func ReadBufferSize(s int) ServerOption {
|
||||
return func(o *options) {
|
||||
o.readBufferSize = s
|
||||
}
|
||||
}
|
||||
|
||||
// InitialWindowSize returns a ServerOption that sets window size for stream.
|
||||
// The lower bound for window size is 64K and any value smaller than that will be ignored.
|
||||
func InitialWindowSize(s int32) ServerOption {
|
||||
|
|
@ -260,7 +278,7 @@ func StatsHandler(h stats.Handler) ServerOption {
|
|||
// handler that will be invoked instead of returning the "unimplemented" gRPC
|
||||
// error whenever a request is received for an unregistered service or method.
|
||||
// The handling function has full access to the Context of the request and the
|
||||
// stream, and the invocation passes through interceptors.
|
||||
// stream, and the invocation bypasses interceptors.
|
||||
func UnknownServiceHandler(streamHandler StreamHandler) ServerOption {
|
||||
return func(o *options) {
|
||||
o.unknownStreamDesc = &StreamDesc{
|
||||
|
|
@ -524,6 +542,8 @@ func (s *Server) serveHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo)
|
|||
KeepalivePolicy: s.opts.keepalivePolicy,
|
||||
InitialWindowSize: s.opts.initialWindowSize,
|
||||
InitialConnWindowSize: s.opts.initialConnWindowSize,
|
||||
WriteBufferSize: s.opts.writeBufferSize,
|
||||
ReadBufferSize: s.opts.readBufferSize,
|
||||
}
|
||||
st, err := transport.NewServerTransport("http2", c, config)
|
||||
if err != nil {
|
||||
|
|
@ -891,9 +911,6 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
|
|||
trInfo: trInfo,
|
||||
statsHandler: sh,
|
||||
}
|
||||
if ss.cp != nil {
|
||||
ss.cbuf = new(bytes.Buffer)
|
||||
}
|
||||
if trInfo != nil {
|
||||
trInfo.tr.LazyLog(&trInfo.firstLine, false)
|
||||
defer func() {
|
||||
|
|
|
|||
4
vendor/google.golang.org/grpc/server_test.go
generated
vendored
4
vendor/google.golang.org/grpc/server_test.go
generated
vendored
|
|
@ -23,6 +23,8 @@ import (
|
|||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"google.golang.org/grpc/test/leakcheck"
|
||||
)
|
||||
|
||||
type emptyServiceServer interface{}
|
||||
|
|
@ -30,6 +32,7 @@ type emptyServiceServer interface{}
|
|||
type testServer struct{}
|
||||
|
||||
func TestStopBeforeServe(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
lis, err := net.Listen("tcp", "localhost:0")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create listener: %v", err)
|
||||
|
|
@ -51,6 +54,7 @@ func TestStopBeforeServe(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGetServiceInfo(t *testing.T) {
|
||||
defer leakcheck.Check(t)
|
||||
testSd := ServiceDesc{
|
||||
ServiceName: "grpc.testing.EmptyService",
|
||||
HandlerType: (*emptyServiceServer)(nil),
|
||||
|
|
|
|||
10
vendor/google.golang.org/grpc/stats/stats.go
generated
vendored
10
vendor/google.golang.org/grpc/stats/stats.go
generated
vendored
|
|
@ -135,8 +135,6 @@ func (s *OutPayload) isRPCStats() {}
|
|||
type OutHeader struct {
|
||||
// Client is true if this OutHeader is from client side.
|
||||
Client bool
|
||||
// WireLength is the wire length of header.
|
||||
WireLength int
|
||||
|
||||
// The following fields are valid only if Client is true.
|
||||
// FullMethod is the full RPC method string, i.e., /package.service/method.
|
||||
|
|
@ -220,7 +218,7 @@ type outgoingTagsKey struct{}
|
|||
// the outgoing RPC with the header grpc-tags-bin. Subsequent calls to
|
||||
// SetTags will overwrite the values from earlier calls.
|
||||
//
|
||||
// NOTE: this is provided only for backward compatibilty with existing clients
|
||||
// NOTE: this is provided only for backward compatibility with existing clients
|
||||
// and will likely be removed in an upcoming release. New uses should transmit
|
||||
// this type of data using metadata with a different, non-reserved (i.e. does
|
||||
// not begin with "grpc-") header name.
|
||||
|
|
@ -230,7 +228,7 @@ func SetTags(ctx context.Context, b []byte) context.Context {
|
|||
|
||||
// Tags returns the tags from the context for the inbound RPC.
|
||||
//
|
||||
// NOTE: this is provided only for backward compatibilty with existing clients
|
||||
// NOTE: this is provided only for backward compatibility with existing clients
|
||||
// and will likely be removed in an upcoming release. New uses should transmit
|
||||
// this type of data using metadata with a different, non-reserved (i.e. does
|
||||
// not begin with "grpc-") header name.
|
||||
|
|
@ -262,7 +260,7 @@ type outgoingTraceKey struct{}
|
|||
// the outgoing RPC with the header grpc-trace-bin. Subsequent calls to
|
||||
// SetTrace will overwrite the values from earlier calls.
|
||||
//
|
||||
// NOTE: this is provided only for backward compatibilty with existing clients
|
||||
// NOTE: this is provided only for backward compatibility with existing clients
|
||||
// and will likely be removed in an upcoming release. New uses should transmit
|
||||
// this type of data using metadata with a different, non-reserved (i.e. does
|
||||
// not begin with "grpc-") header name.
|
||||
|
|
@ -272,7 +270,7 @@ func SetTrace(ctx context.Context, b []byte) context.Context {
|
|||
|
||||
// Trace returns the trace from the context for the inbound RPC.
|
||||
//
|
||||
// NOTE: this is provided only for backward compatibilty with existing clients
|
||||
// NOTE: this is provided only for backward compatibility with existing clients
|
||||
// and will likely be removed in an upcoming release. New uses should transmit
|
||||
// this type of data using metadata with a different, non-reserved (i.e. does
|
||||
// not begin with "grpc-") header name.
|
||||
|
|
|
|||
23
vendor/google.golang.org/grpc/stats/stats_test.go
generated
vendored
23
vendor/google.golang.org/grpc/stats/stats_test.go
generated
vendored
|
|
@ -444,10 +444,6 @@ func checkInHeader(t *testing.T, d *gotData, e *expectedData) {
|
|||
if d.ctx == nil {
|
||||
t.Fatalf("d.ctx = nil, want <non-nil>")
|
||||
}
|
||||
// TODO check real length, not just > 0.
|
||||
if st.WireLength <= 0 {
|
||||
t.Fatalf("st.Lenght = 0, want > 0")
|
||||
}
|
||||
if !d.client {
|
||||
if st.FullMethod != e.method {
|
||||
t.Fatalf("st.FullMethod = %s, want %v", st.FullMethod, e.method)
|
||||
|
|
@ -530,18 +526,13 @@ func checkInPayload(t *testing.T, d *gotData, e *expectedData) {
|
|||
func checkInTrailer(t *testing.T, d *gotData, e *expectedData) {
|
||||
var (
|
||||
ok bool
|
||||
st *stats.InTrailer
|
||||
)
|
||||
if st, ok = d.s.(*stats.InTrailer); !ok {
|
||||
if _, ok = d.s.(*stats.InTrailer); !ok {
|
||||
t.Fatalf("got %T, want InTrailer", d.s)
|
||||
}
|
||||
if d.ctx == nil {
|
||||
t.Fatalf("d.ctx = nil, want <non-nil>")
|
||||
}
|
||||
// TODO check real length, not just > 0.
|
||||
if st.WireLength <= 0 {
|
||||
t.Fatalf("st.Lenght = 0, want > 0")
|
||||
}
|
||||
}
|
||||
|
||||
func checkOutHeader(t *testing.T, d *gotData, e *expectedData) {
|
||||
|
|
@ -555,10 +546,6 @@ func checkOutHeader(t *testing.T, d *gotData, e *expectedData) {
|
|||
if d.ctx == nil {
|
||||
t.Fatalf("d.ctx = nil, want <non-nil>")
|
||||
}
|
||||
// TODO check real length, not just > 0.
|
||||
if st.WireLength <= 0 {
|
||||
t.Fatalf("st.Lenght = 0, want > 0")
|
||||
}
|
||||
if d.client {
|
||||
if st.FullMethod != e.method {
|
||||
t.Fatalf("st.FullMethod = %s, want %v", st.FullMethod, e.method)
|
||||
|
|
@ -642,10 +629,6 @@ func checkOutTrailer(t *testing.T, d *gotData, e *expectedData) {
|
|||
if st.Client {
|
||||
t.Fatalf("st IsClient = true, want false")
|
||||
}
|
||||
// TODO check real length, not just > 0.
|
||||
if st.WireLength <= 0 {
|
||||
t.Fatalf("st.Lenght = 0, want > 0")
|
||||
}
|
||||
}
|
||||
|
||||
func checkEnd(t *testing.T, d *gotData, e *expectedData) {
|
||||
|
|
@ -830,7 +813,9 @@ func testServerStats(t *testing.T, tc *testConfig, cc *rpcConfig, checkFuncs []f
|
|||
err: err,
|
||||
}
|
||||
|
||||
h.mu.Lock()
|
||||
checkConnStats(t, h.gotConn)
|
||||
h.mu.Unlock()
|
||||
checkServerStats(t, h.gotRPC, expect, checkFuncs)
|
||||
}
|
||||
|
||||
|
|
@ -1123,7 +1108,9 @@ func testClientStats(t *testing.T, tc *testConfig, cc *rpcConfig, checkFuncs map
|
|||
err: err,
|
||||
}
|
||||
|
||||
h.mu.Lock()
|
||||
checkConnStats(t, h.gotConn)
|
||||
h.mu.Unlock()
|
||||
checkClientStats(t, h.gotRPC, expect, checkFuncs)
|
||||
}
|
||||
|
||||
|
|
|
|||
61
vendor/google.golang.org/grpc/stream.go
generated
vendored
61
vendor/google.golang.org/grpc/stream.go
generated
vendored
|
|
@ -27,6 +27,7 @@ import (
|
|||
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/net/trace"
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/peer"
|
||||
|
|
@ -106,10 +107,10 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
|
|||
var (
|
||||
t transport.ClientTransport
|
||||
s *transport.Stream
|
||||
put func()
|
||||
done func(balancer.DoneInfo)
|
||||
cancel context.CancelFunc
|
||||
)
|
||||
c := defaultCallInfo
|
||||
c := defaultCallInfo()
|
||||
mc := cc.GetMethodConfig(method)
|
||||
if mc.WaitForReady != nil {
|
||||
c.failFast = !*mc.WaitForReady
|
||||
|
|
@ -126,7 +127,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
|
|||
|
||||
opts = append(cc.dopts.callOptions, opts...)
|
||||
for _, o := range opts {
|
||||
if err := o.before(&c); err != nil {
|
||||
if err := o.before(c); err != nil {
|
||||
return nil, toRPCErr(err)
|
||||
}
|
||||
}
|
||||
|
|
@ -167,7 +168,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
|
|||
}
|
||||
}()
|
||||
}
|
||||
ctx = newContextWithRPCInfo(ctx)
|
||||
ctx = newContextWithRPCInfo(ctx, c.failFast)
|
||||
sh := cc.dopts.copts.StatsHandler
|
||||
if sh != nil {
|
||||
ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast})
|
||||
|
|
@ -188,11 +189,8 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
|
|||
}
|
||||
}()
|
||||
}
|
||||
gopts := BalancerGetOptions{
|
||||
BlockingWait: !c.failFast,
|
||||
}
|
||||
for {
|
||||
t, put, err = cc.getTransport(ctx, gopts)
|
||||
t, done, err = cc.getTransport(ctx, c.failFast)
|
||||
if err != nil {
|
||||
// TODO(zhaoq): Probably revisit the error handling.
|
||||
if _, ok := status.FromError(err); ok {
|
||||
|
|
@ -210,15 +208,15 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
|
|||
|
||||
s, err = t.NewStream(ctx, callHdr)
|
||||
if err != nil {
|
||||
if _, ok := err.(transport.ConnectionError); ok && put != nil {
|
||||
if _, ok := err.(transport.ConnectionError); ok && done != nil {
|
||||
// If error is connection error, transport was sending data on wire,
|
||||
// and we are not sure if anything has been sent on wire.
|
||||
// If error is not connection error, we are sure nothing has been sent.
|
||||
updateRPCInfoInContext(ctx, rpcInfo{bytesSent: true, bytesReceived: false})
|
||||
}
|
||||
if put != nil {
|
||||
put()
|
||||
put = nil
|
||||
if done != nil {
|
||||
done(balancer.DoneInfo{Err: err})
|
||||
done = nil
|
||||
}
|
||||
if _, ok := err.(transport.ConnectionError); (ok || err == transport.ErrStreamDrain) && !c.failFast {
|
||||
continue
|
||||
|
|
@ -240,10 +238,10 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
|
|||
dc: cc.dopts.dc,
|
||||
cancel: cancel,
|
||||
|
||||
put: put,
|
||||
t: t,
|
||||
s: s,
|
||||
p: &parser{r: s},
|
||||
done: done,
|
||||
t: t,
|
||||
s: s,
|
||||
p: &parser{r: s},
|
||||
|
||||
tracing: EnableTracing,
|
||||
trInfo: trInfo,
|
||||
|
|
@ -251,9 +249,6 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
|
|||
statsCtx: ctx,
|
||||
statsHandler: cc.dopts.copts.StatsHandler,
|
||||
}
|
||||
if cc.dopts.cp != nil {
|
||||
cs.cbuf = new(bytes.Buffer)
|
||||
}
|
||||
// Listen on ctx.Done() to detect cancellation and s.Done() to detect normal termination
|
||||
// when there is no pending I/O operations on this stream.
|
||||
go func() {
|
||||
|
|
@ -283,21 +278,20 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
|
|||
// clientStream implements a client side Stream.
|
||||
type clientStream struct {
|
||||
opts []CallOption
|
||||
c callInfo
|
||||
c *callInfo
|
||||
t transport.ClientTransport
|
||||
s *transport.Stream
|
||||
p *parser
|
||||
desc *StreamDesc
|
||||
codec Codec
|
||||
cp Compressor
|
||||
cbuf *bytes.Buffer
|
||||
dc Decompressor
|
||||
cancel context.CancelFunc
|
||||
|
||||
tracing bool // set to EnableTracing when the clientStream is created.
|
||||
|
||||
mu sync.Mutex
|
||||
put func()
|
||||
done func(balancer.DoneInfo)
|
||||
closed bool
|
||||
finished bool
|
||||
// trInfo.tr is set when the clientStream is created (if EnableTracing is true),
|
||||
|
|
@ -367,12 +361,7 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) {
|
|||
Client: true,
|
||||
}
|
||||
}
|
||||
hdr, data, err := encode(cs.codec, m, cs.cp, cs.cbuf, outPayload)
|
||||
defer func() {
|
||||
if cs.cbuf != nil {
|
||||
cs.cbuf.Reset()
|
||||
}
|
||||
}()
|
||||
hdr, data, err := encode(cs.codec, m, cs.cp, bytes.NewBuffer([]byte{}), outPayload)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -494,15 +483,15 @@ func (cs *clientStream) finish(err error) {
|
|||
}
|
||||
}()
|
||||
for _, o := range cs.opts {
|
||||
o.after(&cs.c)
|
||||
o.after(cs.c)
|
||||
}
|
||||
if cs.put != nil {
|
||||
if cs.done != nil {
|
||||
updateRPCInfoInContext(cs.s.Context(), rpcInfo{
|
||||
bytesSent: cs.s.BytesSent(),
|
||||
bytesReceived: cs.s.BytesReceived(),
|
||||
})
|
||||
cs.put()
|
||||
cs.put = nil
|
||||
cs.done(balancer.DoneInfo{Err: err})
|
||||
cs.done = nil
|
||||
}
|
||||
if cs.statsHandler != nil {
|
||||
end := &stats.End{
|
||||
|
|
@ -557,7 +546,6 @@ type serverStream struct {
|
|||
codec Codec
|
||||
cp Compressor
|
||||
dc Decompressor
|
||||
cbuf *bytes.Buffer
|
||||
maxReceiveMessageSize int
|
||||
maxSendMessageSize int
|
||||
trInfo *traceInfo
|
||||
|
|
@ -613,12 +601,7 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) {
|
|||
if ss.statsHandler != nil {
|
||||
outPayload = &stats.OutPayload{}
|
||||
}
|
||||
hdr, data, err := encode(ss.codec, m, ss.cp, ss.cbuf, outPayload)
|
||||
defer func() {
|
||||
if ss.cbuf != nil {
|
||||
ss.cbuf.Reset()
|
||||
}
|
||||
}()
|
||||
hdr, data, err := encode(ss.codec, m, ss.cp, bytes.NewBuffer([]byte{}), outPayload)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
15
vendor/google.golang.org/grpc/trace.go
generated
vendored
15
vendor/google.golang.org/grpc/trace.go
generated
vendored
|
|
@ -31,7 +31,7 @@ import (
|
|||
|
||||
// EnableTracing controls whether to trace RPCs using the golang.org/x/net/trace package.
|
||||
// This should only be set before any RPCs are sent or received by this program.
|
||||
var EnableTracing = true
|
||||
var EnableTracing bool
|
||||
|
||||
// methodFamily returns the trace family for the given method.
|
||||
// It turns "/pkg.Service/GetFoo" into "pkg.Service".
|
||||
|
|
@ -76,6 +76,15 @@ func (f *firstLine) String() string {
|
|||
return line.String()
|
||||
}
|
||||
|
||||
const truncateSize = 100
|
||||
|
||||
func truncate(x string, l int) string {
|
||||
if l > len(x) {
|
||||
return x
|
||||
}
|
||||
return x[:l]
|
||||
}
|
||||
|
||||
// payload represents an RPC request or response payload.
|
||||
type payload struct {
|
||||
sent bool // whether this is an outgoing payload
|
||||
|
|
@ -85,9 +94,9 @@ type payload struct {
|
|||
|
||||
func (p payload) String() string {
|
||||
if p.sent {
|
||||
return fmt.Sprintf("sent: %v", p.msg)
|
||||
return truncate(fmt.Sprintf("sent: %v", p.msg), truncateSize)
|
||||
}
|
||||
return fmt.Sprintf("recv: %v", p.msg)
|
||||
return truncate(fmt.Sprintf("recv: %v", p.msg), truncateSize)
|
||||
}
|
||||
|
||||
type fmtStringer struct {
|
||||
|
|
|
|||
6
vendor/google.golang.org/grpc/transport/bdp_estimator.go
generated
vendored
6
vendor/google.golang.org/grpc/transport/bdp_estimator.go
generated
vendored
|
|
@ -59,7 +59,7 @@ type bdpEstimator struct {
|
|||
sample uint32
|
||||
// bwMax is the maximum bandwidth noted so far (bytes/sec).
|
||||
bwMax float64
|
||||
// bool to keep track of the begining of a new measurement cycle.
|
||||
// bool to keep track of the beginning of a new measurement cycle.
|
||||
isSent bool
|
||||
// Callback to update the window sizes.
|
||||
updateFlowControl func(n uint32)
|
||||
|
|
@ -70,7 +70,7 @@ type bdpEstimator struct {
|
|||
}
|
||||
|
||||
// timesnap registers the time bdp ping was sent out so that
|
||||
// network rtt can be calculated when its ack is recieved.
|
||||
// network rtt can be calculated when its ack is received.
|
||||
// It is called (by controller) when the bdpPing is
|
||||
// being written on the wire.
|
||||
func (b *bdpEstimator) timesnap(d [8]byte) {
|
||||
|
|
@ -119,7 +119,7 @@ func (b *bdpEstimator) calculate(d [8]byte) {
|
|||
b.rtt += (rttSample - b.rtt) * float64(alpha)
|
||||
}
|
||||
b.isSent = false
|
||||
// The number of bytes accumalated so far in the sample is smaller
|
||||
// The number of bytes accumulated so far in the sample is smaller
|
||||
// than or equal to 1.5 times the real BDP on a saturated connection.
|
||||
bwCurrent := float64(b.sample) / (b.rtt * float64(1.5))
|
||||
if bwCurrent > b.bwMax {
|
||||
|
|
|
|||
71
vendor/google.golang.org/grpc/transport/control.go
generated
vendored
71
vendor/google.golang.org/grpc/transport/control.go
generated
vendored
|
|
@ -22,9 +22,11 @@ import (
|
|||
"fmt"
|
||||
"math"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/http2"
|
||||
"golang.org/x/net/http2/hpack"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -44,15 +46,44 @@ const (
|
|||
defaultKeepalivePolicyMinTime = time.Duration(5 * time.Minute)
|
||||
// max window limit set by HTTP2 Specs.
|
||||
maxWindowSize = math.MaxInt32
|
||||
// defaultLocalSendQuota sets is default value for number of data
|
||||
// bytes that each stream can schedule before some of it being
|
||||
// flushed out.
|
||||
defaultLocalSendQuota = 64 * 1024
|
||||
)
|
||||
|
||||
// The following defines various control items which could flow through
|
||||
// the control buffer of transport. They represent different aspects of
|
||||
// control tasks, e.g., flow control, settings, streaming resetting, etc.
|
||||
|
||||
type headerFrame struct {
|
||||
streamID uint32
|
||||
hf []hpack.HeaderField
|
||||
endStream bool
|
||||
}
|
||||
|
||||
func (*headerFrame) item() {}
|
||||
|
||||
type continuationFrame struct {
|
||||
streamID uint32
|
||||
endHeaders bool
|
||||
headerBlockFragment []byte
|
||||
}
|
||||
|
||||
type dataFrame struct {
|
||||
streamID uint32
|
||||
endStream bool
|
||||
d []byte
|
||||
f func()
|
||||
}
|
||||
|
||||
func (*dataFrame) item() {}
|
||||
|
||||
func (*continuationFrame) item() {}
|
||||
|
||||
type windowUpdate struct {
|
||||
streamID uint32
|
||||
increment uint32
|
||||
flush bool
|
||||
}
|
||||
|
||||
func (*windowUpdate) item() {}
|
||||
|
|
@ -97,8 +128,9 @@ func (*ping) item() {}
|
|||
type quotaPool struct {
|
||||
c chan int
|
||||
|
||||
mu sync.Mutex
|
||||
quota int
|
||||
mu sync.Mutex
|
||||
version uint32
|
||||
quota int
|
||||
}
|
||||
|
||||
// newQuotaPool creates a quotaPool which has quota q available to consume.
|
||||
|
|
@ -119,6 +151,10 @@ func newQuotaPool(q int) *quotaPool {
|
|||
func (qb *quotaPool) add(v int) {
|
||||
qb.mu.Lock()
|
||||
defer qb.mu.Unlock()
|
||||
qb.lockedAdd(v)
|
||||
}
|
||||
|
||||
func (qb *quotaPool) lockedAdd(v int) {
|
||||
select {
|
||||
case n := <-qb.c:
|
||||
qb.quota += n
|
||||
|
|
@ -139,6 +175,35 @@ func (qb *quotaPool) add(v int) {
|
|||
}
|
||||
}
|
||||
|
||||
func (qb *quotaPool) addAndUpdate(v int) {
|
||||
qb.mu.Lock()
|
||||
defer qb.mu.Unlock()
|
||||
qb.lockedAdd(v)
|
||||
// Update the version only after having added to the quota
|
||||
// so that if acquireWithVesrion sees the new vesrion it is
|
||||
// guaranteed to have seen the updated quota.
|
||||
// Also, still keep this inside of the lock, so that when
|
||||
// compareAndExecute is processing, this function doesn't
|
||||
// get executed partially (quota gets updated but the version
|
||||
// doesn't).
|
||||
atomic.AddUint32(&(qb.version), 1)
|
||||
}
|
||||
|
||||
func (qb *quotaPool) acquireWithVersion() (<-chan int, uint32) {
|
||||
return qb.c, atomic.LoadUint32(&(qb.version))
|
||||
}
|
||||
|
||||
func (qb *quotaPool) compareAndExecute(version uint32, success, failure func()) bool {
|
||||
qb.mu.Lock()
|
||||
defer qb.mu.Unlock()
|
||||
if version == atomic.LoadUint32(&(qb.version)) {
|
||||
success()
|
||||
return true
|
||||
}
|
||||
failure()
|
||||
return false
|
||||
}
|
||||
|
||||
// acquire returns the channel on which available quota amounts are sent.
|
||||
func (qb *quotaPool) acquire() <-chan int {
|
||||
return qb.c
|
||||
|
|
|
|||
45
vendor/google.golang.org/grpc/transport/go16.go
generated
vendored
45
vendor/google.golang.org/grpc/transport/go16.go
generated
vendored
|
|
@ -1,45 +0,0 @@
|
|||
// +build go1.6,!go1.7
|
||||
|
||||
/*
|
||||
*
|
||||
* Copyright 2016 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package transport
|
||||
|
||||
import (
|
||||
"net"
|
||||
|
||||
"google.golang.org/grpc/codes"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// dialContext connects to the address on the named network.
|
||||
func dialContext(ctx context.Context, network, address string) (net.Conn, error) {
|
||||
return (&net.Dialer{Cancel: ctx.Done()}).Dial(network, address)
|
||||
}
|
||||
|
||||
// ContextErr converts the error from context package into a StreamError.
|
||||
func ContextErr(err error) StreamError {
|
||||
switch err {
|
||||
case context.DeadlineExceeded:
|
||||
return streamErrorf(codes.DeadlineExceeded, "%v", err)
|
||||
case context.Canceled:
|
||||
return streamErrorf(codes.Canceled, "%v", err)
|
||||
}
|
||||
return streamErrorf(codes.Internal, "Unexpected error from context packet: %v", err)
|
||||
}
|
||||
46
vendor/google.golang.org/grpc/transport/go17.go
generated
vendored
46
vendor/google.golang.org/grpc/transport/go17.go
generated
vendored
|
|
@ -1,46 +0,0 @@
|
|||
// +build go1.7
|
||||
|
||||
/*
|
||||
*
|
||||
* Copyright 2016 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package transport
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
|
||||
"google.golang.org/grpc/codes"
|
||||
|
||||
netctx "golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// dialContext connects to the address on the named network.
|
||||
func dialContext(ctx context.Context, network, address string) (net.Conn, error) {
|
||||
return (&net.Dialer{}).DialContext(ctx, network, address)
|
||||
}
|
||||
|
||||
// ContextErr converts the error from context package into a StreamError.
|
||||
func ContextErr(err error) StreamError {
|
||||
switch err {
|
||||
case context.DeadlineExceeded, netctx.DeadlineExceeded:
|
||||
return streamErrorf(codes.DeadlineExceeded, "%v", err)
|
||||
case context.Canceled, netctx.Canceled:
|
||||
return streamErrorf(codes.Canceled, "%v", err)
|
||||
}
|
||||
return streamErrorf(codes.Internal, "Unexpected error from context packet: %v", err)
|
||||
}
|
||||
5
vendor/google.golang.org/grpc/transport/handler_server.go
generated
vendored
5
vendor/google.golang.org/grpc/transport/handler_server.go
generated
vendored
|
|
@ -173,7 +173,6 @@ func (ht *serverHandlerTransport) do(fn func()) error {
|
|||
case <-ht.closedCh:
|
||||
return ErrConnClosing
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -183,6 +182,7 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro
|
|||
ht.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
ht.streamDone = true
|
||||
ht.mu.Unlock()
|
||||
err := ht.do(func() {
|
||||
ht.writeCommonHeaders(s)
|
||||
|
|
@ -223,9 +223,6 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro
|
|||
}
|
||||
})
|
||||
close(ht.writes)
|
||||
ht.mu.Lock()
|
||||
ht.streamDone = true
|
||||
ht.mu.Unlock()
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
|||
25
vendor/google.golang.org/grpc/transport/handler_server_test.go
generated
vendored
25
vendor/google.golang.org/grpc/transport/handler_server_test.go
generated
vendored
|
|
@ -26,6 +26,7 @@ import (
|
|||
"net/http/httptest"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
|
|
@ -390,6 +391,30 @@ func TestHandlerTransport_HandleStreams_Timeout(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestHandlerTransport_HandleStreams_MultiWriteStatus(t *testing.T) {
|
||||
st := newHandleStreamTest(t)
|
||||
handleStream := func(s *Stream) {
|
||||
if want := "/service/foo.bar"; s.method != want {
|
||||
t.Errorf("stream method = %q; want %q", s.method, want)
|
||||
}
|
||||
st.bodyw.Close() // no body
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(5)
|
||||
for i := 0; i < 5; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
st.ht.WriteStatus(s, status.New(codes.OK, ""))
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
st.ht.HandleStreams(
|
||||
func(s *Stream) { go handleStream(s) },
|
||||
func(ctx context.Context, method string) context.Context { return ctx },
|
||||
)
|
||||
}
|
||||
|
||||
func TestHandlerTransport_HandleStreams_ErrDetails(t *testing.T) {
|
||||
errDetails := []proto.Message{
|
||||
&epb.RetryInfo{
|
||||
|
|
|
|||
606
vendor/google.golang.org/grpc/transport/http2_client.go
generated
vendored
606
vendor/google.golang.org/grpc/transport/http2_client.go
generated
vendored
|
|
@ -43,6 +43,7 @@ import (
|
|||
// http2Client implements the ClientTransport interface with HTTP2.
|
||||
type http2Client struct {
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
target string // server name/addr
|
||||
userAgent string
|
||||
md interface{}
|
||||
|
|
@ -52,17 +53,6 @@ type http2Client struct {
|
|||
authInfo credentials.AuthInfo // auth info about the connection
|
||||
nextID uint32 // the next stream ID to be used
|
||||
|
||||
// writableChan synchronizes write access to the transport.
|
||||
// A writer acquires the write lock by sending a value on writableChan
|
||||
// and releases it by receiving from writableChan.
|
||||
writableChan chan int
|
||||
// shutdownChan is closed when Close is called.
|
||||
// Blocking operations should select on shutdownChan to avoid
|
||||
// blocking forever after Close.
|
||||
// TODO(zhaoq): Maybe have a channel context?
|
||||
shutdownChan chan struct{}
|
||||
// errorChan is closed to notify the I/O error to the caller.
|
||||
errorChan chan struct{}
|
||||
// goAway is closed to notify the upper layer (i.e., addrConn.transportMonitor)
|
||||
// that the server sent GoAway on this transport.
|
||||
goAway chan struct{}
|
||||
|
|
@ -119,7 +109,7 @@ func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error
|
|||
if fn != nil {
|
||||
return fn(ctx, addr)
|
||||
}
|
||||
return dialContext(ctx, "tcp", addr)
|
||||
return (&net.Dialer{}).DialContext(ctx, "tcp", addr)
|
||||
}
|
||||
|
||||
func isTemporary(err error) bool {
|
||||
|
|
@ -153,9 +143,18 @@ func isTemporary(err error) bool {
|
|||
// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2
|
||||
// and starts to receive messages on it. Non-nil error returns if construction
|
||||
// fails.
|
||||
func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions) (_ ClientTransport, err error) {
|
||||
func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions, timeout time.Duration) (_ ClientTransport, err error) {
|
||||
scheme := "http"
|
||||
conn, err := dial(ctx, opts.Dialer, addr.Addr)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
connectCtx, connectCancel := context.WithTimeout(ctx, timeout)
|
||||
defer func() {
|
||||
connectCancel()
|
||||
if err != nil {
|
||||
cancel()
|
||||
}
|
||||
}()
|
||||
|
||||
conn, err := dial(connectCtx, opts.Dialer, addr.Addr)
|
||||
if err != nil {
|
||||
if opts.FailOnNonTempDialError {
|
||||
return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err)
|
||||
|
|
@ -174,7 +173,7 @@ func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions) (
|
|||
)
|
||||
if creds := opts.TransportCredentials; creds != nil {
|
||||
scheme = "https"
|
||||
conn, authInfo, err = creds.ClientHandshake(ctx, addr.Addr, conn)
|
||||
conn, authInfo, err = creds.ClientHandshake(connectCtx, addr.Addr, conn)
|
||||
if err != nil {
|
||||
// Credentials handshake errors are typically considered permanent
|
||||
// to avoid retrying on e.g. bad certificates.
|
||||
|
|
@ -198,8 +197,17 @@ func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions) (
|
|||
dynamicWindow = false
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
writeBufSize := defaultWriteBufSize
|
||||
if opts.WriteBufferSize > 0 {
|
||||
writeBufSize = opts.WriteBufferSize
|
||||
}
|
||||
readBufSize := defaultReadBufSize
|
||||
if opts.ReadBufferSize > 0 {
|
||||
readBufSize = opts.ReadBufferSize
|
||||
}
|
||||
t := &http2Client{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
target: addr.Addr,
|
||||
userAgent: opts.UserAgent,
|
||||
md: addr.Metadata,
|
||||
|
|
@ -209,14 +217,11 @@ func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions) (
|
|||
authInfo: authInfo,
|
||||
// The client initiated stream id is odd starting from 1.
|
||||
nextID: 1,
|
||||
writableChan: make(chan int, 1),
|
||||
shutdownChan: make(chan struct{}),
|
||||
errorChan: make(chan struct{}),
|
||||
goAway: make(chan struct{}),
|
||||
awakenKeepalive: make(chan struct{}, 1),
|
||||
framer: newFramer(conn),
|
||||
hBuf: &buf,
|
||||
hEnc: hpack.NewEncoder(&buf),
|
||||
framer: newFramer(conn, writeBufSize, readBufSize),
|
||||
controlBuf: newControlBuffer(),
|
||||
fc: &inFlow{limit: uint32(icwz)},
|
||||
sendQuotaPool: newQuotaPool(defaultWindowSize),
|
||||
|
|
@ -270,12 +275,12 @@ func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions) (
|
|||
return nil, connectionErrorf(true, err, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface))
|
||||
}
|
||||
if t.initialWindowSize != defaultWindowSize {
|
||||
err = t.framer.writeSettings(true, http2.Setting{
|
||||
err = t.framer.fr.WriteSettings(http2.Setting{
|
||||
ID: http2.SettingInitialWindowSize,
|
||||
Val: uint32(t.initialWindowSize),
|
||||
})
|
||||
} else {
|
||||
err = t.framer.writeSettings(true)
|
||||
err = t.framer.fr.WriteSettings()
|
||||
}
|
||||
if err != nil {
|
||||
t.Close()
|
||||
|
|
@ -283,31 +288,35 @@ func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions) (
|
|||
}
|
||||
// Adjust the connection flow control window if needed.
|
||||
if delta := uint32(icwz - defaultWindowSize); delta > 0 {
|
||||
if err := t.framer.writeWindowUpdate(true, 0, delta); err != nil {
|
||||
if err := t.framer.fr.WriteWindowUpdate(0, delta); err != nil {
|
||||
t.Close()
|
||||
return nil, connectionErrorf(true, err, "transport: failed to write window update: %v", err)
|
||||
}
|
||||
}
|
||||
go t.controller()
|
||||
t.framer.writer.Flush()
|
||||
go func() {
|
||||
loopyWriter(t.ctx, t.controlBuf, t.itemHandler)
|
||||
t.Close()
|
||||
}()
|
||||
if t.kp.Time != infinity {
|
||||
go t.keepalive()
|
||||
}
|
||||
t.writableChan <- 0
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream {
|
||||
// TODO(zhaoq): Handle uint32 overflow of Stream.id.
|
||||
s := &Stream{
|
||||
id: t.nextID,
|
||||
done: make(chan struct{}),
|
||||
goAway: make(chan struct{}),
|
||||
method: callHdr.Method,
|
||||
sendCompress: callHdr.SendCompress,
|
||||
buf: newRecvBuffer(),
|
||||
fc: &inFlow{limit: uint32(t.initialWindowSize)},
|
||||
sendQuotaPool: newQuotaPool(int(t.streamSendQuota)),
|
||||
headerChan: make(chan struct{}),
|
||||
id: t.nextID,
|
||||
done: make(chan struct{}),
|
||||
goAway: make(chan struct{}),
|
||||
method: callHdr.Method,
|
||||
sendCompress: callHdr.SendCompress,
|
||||
buf: newRecvBuffer(),
|
||||
fc: &inFlow{limit: uint32(t.initialWindowSize)},
|
||||
sendQuotaPool: newQuotaPool(int(t.streamSendQuota)),
|
||||
localSendQuota: newQuotaPool(defaultLocalSendQuota),
|
||||
headerChan: make(chan struct{}),
|
||||
}
|
||||
t.nextID += 2
|
||||
s.requestRead = func(n int) {
|
||||
|
|
@ -368,13 +377,13 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
|
|||
authData[k] = v
|
||||
}
|
||||
}
|
||||
callAuthData := make(map[string]string)
|
||||
callAuthData := map[string]string{}
|
||||
// Check if credentials.PerRPCCredentials were provided via call options.
|
||||
// Note: if these credentials are provided both via dial options and call
|
||||
// options, then both sets of credentials will be applied.
|
||||
if callCreds := callHdr.Creds; callCreds != nil {
|
||||
if !t.isSecure && callCreds.RequireTransportSecurity() {
|
||||
return nil, streamErrorf(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure conneciton")
|
||||
return nil, streamErrorf(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure connection")
|
||||
}
|
||||
data, err := callCreds.GetRequestMetadata(ctx, audience)
|
||||
if err != nil {
|
||||
|
|
@ -400,7 +409,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
|
|||
return nil, ErrConnClosing
|
||||
}
|
||||
t.mu.Unlock()
|
||||
sq, err := wait(ctx, nil, nil, t.shutdownChan, t.streamsQuota.acquire())
|
||||
sq, err := wait(ctx, t.ctx, nil, nil, t.streamsQuota.acquire())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -408,19 +417,66 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
|
|||
if sq > 1 {
|
||||
t.streamsQuota.add(sq - 1)
|
||||
}
|
||||
if _, err := wait(ctx, nil, nil, t.shutdownChan, t.writableChan); err != nil {
|
||||
// Return the quota back now because there is no stream returned to the caller.
|
||||
if _, ok := err.(StreamError); ok {
|
||||
t.streamsQuota.add(1)
|
||||
// TODO(mmukhi): Benchmark if the perfomance gets better if count the metadata and other header fields
|
||||
// first and create a slice of that exact size.
|
||||
// Make the slice of certain predictable size to reduce allocations made by append.
|
||||
hfLen := 7 // :method, :scheme, :path, :authority, content-type, user-agent, te
|
||||
hfLen += len(authData) + len(callAuthData)
|
||||
headerFields := make([]hpack.HeaderField, 0, hfLen)
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: ":method", Value: "POST"})
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: ":scheme", Value: t.scheme})
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: ":path", Value: callHdr.Method})
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: ":authority", Value: callHdr.Host})
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: "application/grpc"})
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: "user-agent", Value: t.userAgent})
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: "te", Value: "trailers"})
|
||||
|
||||
if callHdr.SendCompress != "" {
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress})
|
||||
}
|
||||
if dl, ok := ctx.Deadline(); ok {
|
||||
// Send out timeout regardless its value. The server can detect timeout context by itself.
|
||||
// TODO(mmukhi): Perhaps this field should be updated when actually writing out to the wire.
|
||||
timeout := dl.Sub(time.Now())
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-timeout", Value: encodeTimeout(timeout)})
|
||||
}
|
||||
for k, v := range authData {
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
|
||||
}
|
||||
for k, v := range callAuthData {
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
|
||||
}
|
||||
if b := stats.OutgoingTags(ctx); b != nil {
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-tags-bin", Value: encodeBinHeader(b)})
|
||||
}
|
||||
if b := stats.OutgoingTrace(ctx); b != nil {
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)})
|
||||
}
|
||||
if md, ok := metadata.FromOutgoingContext(ctx); ok {
|
||||
for k, vv := range md {
|
||||
// HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set.
|
||||
if isReservedHeader(k) {
|
||||
continue
|
||||
}
|
||||
for _, v := range vv {
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
|
||||
}
|
||||
}
|
||||
}
|
||||
if md, ok := t.md.(*metadata.MD); ok {
|
||||
for k, vv := range *md {
|
||||
if isReservedHeader(k) {
|
||||
continue
|
||||
}
|
||||
for _, v := range vv {
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
|
||||
}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
t.mu.Lock()
|
||||
if t.state == draining {
|
||||
t.mu.Unlock()
|
||||
t.streamsQuota.add(1)
|
||||
// Need to make t writable again so that the rpc in flight can still proceed.
|
||||
t.writableChan <- 0
|
||||
return nil, ErrStreamDrain
|
||||
}
|
||||
if t.state != reachable {
|
||||
|
|
@ -434,7 +490,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
|
|||
if len(t.activeStreams) == 1 {
|
||||
select {
|
||||
case t.awakenKeepalive <- struct{}{}:
|
||||
t.framer.writePing(false, false, [8]byte{})
|
||||
t.controlBuf.put(&ping{data: [8]byte{}})
|
||||
// Fill the awakenKeepalive channel again as this channel must be
|
||||
// kept non-writable except at the point that the keepalive()
|
||||
// goroutine is waiting either to be awaken or shutdown.
|
||||
|
|
@ -442,102 +498,13 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
|
|||
default:
|
||||
}
|
||||
}
|
||||
|
||||
t.controlBuf.put(&headerFrame{
|
||||
streamID: s.id,
|
||||
hf: headerFields,
|
||||
endStream: false,
|
||||
})
|
||||
t.mu.Unlock()
|
||||
|
||||
// HPACK encodes various headers. Note that once WriteField(...) is
|
||||
// called, the corresponding headers/continuation frame has to be sent
|
||||
// because hpack.Encoder is stateful.
|
||||
t.hBuf.Reset()
|
||||
t.hEnc.WriteField(hpack.HeaderField{Name: ":method", Value: "POST"})
|
||||
t.hEnc.WriteField(hpack.HeaderField{Name: ":scheme", Value: t.scheme})
|
||||
t.hEnc.WriteField(hpack.HeaderField{Name: ":path", Value: callHdr.Method})
|
||||
t.hEnc.WriteField(hpack.HeaderField{Name: ":authority", Value: callHdr.Host})
|
||||
t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"})
|
||||
t.hEnc.WriteField(hpack.HeaderField{Name: "user-agent", Value: t.userAgent})
|
||||
t.hEnc.WriteField(hpack.HeaderField{Name: "te", Value: "trailers"})
|
||||
|
||||
if callHdr.SendCompress != "" {
|
||||
t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress})
|
||||
}
|
||||
if dl, ok := ctx.Deadline(); ok {
|
||||
// Send out timeout regardless its value. The server can detect timeout context by itself.
|
||||
timeout := dl.Sub(time.Now())
|
||||
t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-timeout", Value: encodeTimeout(timeout)})
|
||||
}
|
||||
|
||||
for k, v := range authData {
|
||||
t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
|
||||
}
|
||||
for k, v := range callAuthData {
|
||||
t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
|
||||
}
|
||||
var (
|
||||
endHeaders bool
|
||||
)
|
||||
if b := stats.OutgoingTags(ctx); b != nil {
|
||||
t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-tags-bin", Value: encodeBinHeader(b)})
|
||||
}
|
||||
if b := stats.OutgoingTrace(ctx); b != nil {
|
||||
t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)})
|
||||
}
|
||||
if md, ok := metadata.FromOutgoingContext(ctx); ok {
|
||||
for k, vv := range md {
|
||||
// HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set.
|
||||
if isReservedHeader(k) {
|
||||
continue
|
||||
}
|
||||
for _, v := range vv {
|
||||
t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
|
||||
}
|
||||
}
|
||||
}
|
||||
if md, ok := t.md.(*metadata.MD); ok {
|
||||
for k, vv := range *md {
|
||||
if isReservedHeader(k) {
|
||||
continue
|
||||
}
|
||||
for _, v := range vv {
|
||||
t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
|
||||
}
|
||||
}
|
||||
}
|
||||
first := true
|
||||
bufLen := t.hBuf.Len()
|
||||
// Sends the headers in a single batch even when they span multiple frames.
|
||||
for !endHeaders {
|
||||
size := t.hBuf.Len()
|
||||
if size > http2MaxFrameLen {
|
||||
size = http2MaxFrameLen
|
||||
} else {
|
||||
endHeaders = true
|
||||
}
|
||||
var flush bool
|
||||
if callHdr.Flush && endHeaders {
|
||||
flush = true
|
||||
}
|
||||
if first {
|
||||
// Sends a HeadersFrame to server to start a new stream.
|
||||
p := http2.HeadersFrameParam{
|
||||
StreamID: s.id,
|
||||
BlockFragment: t.hBuf.Next(size),
|
||||
EndStream: false,
|
||||
EndHeaders: endHeaders,
|
||||
}
|
||||
// Do a force flush for the buffered frames iff it is the last headers frame
|
||||
// and there is header metadata to be sent. Otherwise, there is flushing until
|
||||
// the corresponding data frame is written.
|
||||
err = t.framer.writeHeaders(flush, p)
|
||||
first = false
|
||||
} else {
|
||||
// Sends Continuation frames for the leftover headers.
|
||||
err = t.framer.writeContinuation(flush, s.id, endHeaders, t.hBuf.Next(size))
|
||||
}
|
||||
if err != nil {
|
||||
t.notifyError(err)
|
||||
return nil, connectionErrorf(true, err, "transport: %v", err)
|
||||
}
|
||||
}
|
||||
s.mu.Lock()
|
||||
s.bytesSent = true
|
||||
s.mu.Unlock()
|
||||
|
|
@ -545,7 +512,6 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
|
|||
if t.statsHandler != nil {
|
||||
outHeader := &stats.OutHeader{
|
||||
Client: true,
|
||||
WireLength: bufLen,
|
||||
FullMethod: callHdr.Method,
|
||||
RemoteAddr: t.remoteAddr,
|
||||
LocalAddr: t.localAddr,
|
||||
|
|
@ -553,7 +519,6 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
|
|||
}
|
||||
t.statsHandler.HandleRPC(s.ctx, outHeader)
|
||||
}
|
||||
t.writableChan <- 0
|
||||
return s, nil
|
||||
}
|
||||
|
||||
|
|
@ -623,12 +588,9 @@ func (t *http2Client) Close() (err error) {
|
|||
t.mu.Unlock()
|
||||
return
|
||||
}
|
||||
if t.state == reachable || t.state == draining {
|
||||
close(t.errorChan)
|
||||
}
|
||||
t.state = closing
|
||||
t.mu.Unlock()
|
||||
close(t.shutdownChan)
|
||||
t.cancel()
|
||||
err = t.conn.Close()
|
||||
t.mu.Lock()
|
||||
streams := t.activeStreams
|
||||
|
|
@ -650,23 +612,18 @@ func (t *http2Client) Close() (err error) {
|
|||
}
|
||||
t.statsHandler.HandleConn(t.ctx, connEnd)
|
||||
}
|
||||
return
|
||||
return err
|
||||
}
|
||||
|
||||
// GracefulClose sets the state to draining, which prevents new streams from
|
||||
// being created and causes the transport to be closed when the last active
|
||||
// stream is closed. If there are no active streams, the transport is closed
|
||||
// immediately. This does nothing if the transport is already draining or
|
||||
// closing.
|
||||
func (t *http2Client) GracefulClose() error {
|
||||
t.mu.Lock()
|
||||
switch t.state {
|
||||
case unreachable:
|
||||
// The server may close the connection concurrently. t is not available for
|
||||
// any streams. Close it now.
|
||||
t.mu.Unlock()
|
||||
t.Close()
|
||||
return nil
|
||||
case closing:
|
||||
t.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
if t.state == draining {
|
||||
case closing, draining:
|
||||
t.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
|
@ -681,32 +638,38 @@ func (t *http2Client) GracefulClose() error {
|
|||
|
||||
// Write formats the data into HTTP2 data frame(s) and sends it out. The caller
|
||||
// should proceed only if Write returns nil.
|
||||
// TODO(zhaoq): opts.Delay is ignored in this implementation. Support it later
|
||||
// if it improves the performance.
|
||||
func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) error {
|
||||
secondStart := http2MaxFrameLen - len(hdr)%http2MaxFrameLen
|
||||
if len(data) < secondStart {
|
||||
secondStart = len(data)
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
return ContextErr(s.ctx.Err())
|
||||
case <-t.ctx.Done():
|
||||
return ErrConnClosing
|
||||
default:
|
||||
}
|
||||
hdr = append(hdr, data[:secondStart]...)
|
||||
data = data[secondStart:]
|
||||
isLastSlice := (len(data) == 0)
|
||||
r := bytes.NewBuffer(hdr)
|
||||
var (
|
||||
p []byte
|
||||
oqv uint32
|
||||
)
|
||||
for {
|
||||
oqv = atomic.LoadUint32(&t.outQuotaVersion)
|
||||
if r.Len() > 0 || p != nil {
|
||||
|
||||
if hdr == nil && data == nil && opts.Last {
|
||||
// stream.CloseSend uses this to send an empty frame with endStream=True
|
||||
t.controlBuf.put(&dataFrame{streamID: s.id, endStream: true, f: func() {}})
|
||||
return nil
|
||||
}
|
||||
// Add data to header frame so that we can equally distribute data across frames.
|
||||
emptyLen := http2MaxFrameLen - len(hdr)
|
||||
if emptyLen > len(data) {
|
||||
emptyLen = len(data)
|
||||
}
|
||||
hdr = append(hdr, data[:emptyLen]...)
|
||||
data = data[emptyLen:]
|
||||
for idx, r := range [][]byte{hdr, data} {
|
||||
for len(r) > 0 {
|
||||
size := http2MaxFrameLen
|
||||
// Wait until the stream has some quota to send the data.
|
||||
sq, err := wait(s.ctx, s.done, s.goAway, t.shutdownChan, s.sendQuotaPool.acquire())
|
||||
quotaChan, quotaVer := s.sendQuotaPool.acquireWithVersion()
|
||||
sq, err := wait(s.ctx, t.ctx, s.done, s.goAway, quotaChan)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Wait until the transport has some quota to send the data.
|
||||
tq, err := wait(s.ctx, s.done, s.goAway, t.shutdownChan, t.sendQuotaPool.acquire())
|
||||
tq, err := wait(s.ctx, t.ctx, s.done, s.goAway, t.sendQuotaPool.acquire())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -716,93 +679,51 @@ func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) e
|
|||
if tq < size {
|
||||
size = tq
|
||||
}
|
||||
if p == nil {
|
||||
p = r.Next(size)
|
||||
if size > len(r) {
|
||||
size = len(r)
|
||||
}
|
||||
p := r[:size]
|
||||
ps := len(p)
|
||||
if ps < sq {
|
||||
// Overbooked stream quota. Return it back.
|
||||
s.sendQuotaPool.add(sq - ps)
|
||||
}
|
||||
if ps < tq {
|
||||
// Overbooked transport quota. Return it back.
|
||||
t.sendQuotaPool.add(tq - ps)
|
||||
}
|
||||
}
|
||||
var (
|
||||
endStream bool
|
||||
forceFlush bool
|
||||
)
|
||||
// Indicate there is a writer who is about to write a data frame.
|
||||
t.framer.adjustNumWriters(1)
|
||||
// Got some quota. Try to acquire writing privilege on the transport.
|
||||
if _, err := wait(s.ctx, s.done, s.goAway, t.shutdownChan, t.writableChan); err != nil {
|
||||
if _, ok := err.(StreamError); ok || err == io.EOF {
|
||||
// Return the connection quota back.
|
||||
t.sendQuotaPool.add(len(p))
|
||||
}
|
||||
if t.framer.adjustNumWriters(-1) == 0 {
|
||||
// This writer is the last one in this batch and has the
|
||||
// responsibility to flush the buffered frames. It queues
|
||||
// a flush request to controlBuf instead of flushing directly
|
||||
// in order to avoid the race with other writing or flushing.
|
||||
t.controlBuf.put(&flushIO{})
|
||||
}
|
||||
return err
|
||||
}
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
t.sendQuotaPool.add(len(p))
|
||||
if t.framer.adjustNumWriters(-1) == 0 {
|
||||
t.controlBuf.put(&flushIO{})
|
||||
}
|
||||
t.writableChan <- 0
|
||||
return ContextErr(s.ctx.Err())
|
||||
default:
|
||||
}
|
||||
if oqv != atomic.LoadUint32(&t.outQuotaVersion) {
|
||||
// InitialWindowSize settings frame must have been received after we
|
||||
// acquired send quota but before we got the writable channel.
|
||||
// We must forsake this write.
|
||||
t.sendQuotaPool.add(len(p))
|
||||
s.sendQuotaPool.add(len(p))
|
||||
if t.framer.adjustNumWriters(-1) == 0 {
|
||||
t.controlBuf.put(&flushIO{})
|
||||
}
|
||||
t.writableChan <- 0
|
||||
continue
|
||||
}
|
||||
if r.Len() == 0 {
|
||||
if isLastSlice {
|
||||
if opts.Last {
|
||||
endStream = true
|
||||
// Acquire local send quota to be able to write to the controlBuf.
|
||||
ltq, err := wait(s.ctx, t.ctx, s.done, s.goAway, s.localSendQuota.acquire())
|
||||
if err != nil {
|
||||
if _, ok := err.(ConnectionError); !ok {
|
||||
t.sendQuotaPool.add(ps)
|
||||
}
|
||||
if t.framer.adjustNumWriters(0) == 1 {
|
||||
// Do a force flush iff this is last frame for the entire gRPC message
|
||||
// and the caller is the only writer at this moment.
|
||||
forceFlush = true
|
||||
}
|
||||
} else {
|
||||
isLastSlice = true
|
||||
if len(data) != 0 {
|
||||
r = bytes.NewBuffer(data)
|
||||
return err
|
||||
}
|
||||
s.localSendQuota.add(ltq - ps) // It's ok if we make it negative.
|
||||
var endStream bool
|
||||
// See if this is the last frame to be written.
|
||||
if opts.Last {
|
||||
if len(r)-size == 0 { // No more data in r after this iteration.
|
||||
if idx == 0 { // We're writing data header.
|
||||
if len(data) == 0 { // There's no data to follow.
|
||||
endStream = true
|
||||
}
|
||||
} else { // We're writing data.
|
||||
endStream = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// If WriteData fails, all the pending streams will be handled
|
||||
// by http2Client.Close(). No explicit CloseStream() needs to be
|
||||
// invoked.
|
||||
if err := t.framer.writeData(forceFlush, s.id, endStream, p); err != nil {
|
||||
t.notifyError(err)
|
||||
return connectionErrorf(true, err, "transport: %v", err)
|
||||
}
|
||||
p = nil
|
||||
if t.framer.adjustNumWriters(-1) == 0 {
|
||||
t.framer.flushWrite()
|
||||
}
|
||||
t.writableChan <- 0
|
||||
if r.Len() == 0 {
|
||||
break
|
||||
success := func() {
|
||||
t.controlBuf.put(&dataFrame{streamID: s.id, endStream: endStream, d: p, f: func() { s.localSendQuota.add(ps) }})
|
||||
if ps < sq {
|
||||
s.sendQuotaPool.lockedAdd(sq - ps)
|
||||
}
|
||||
r = r[ps:]
|
||||
}
|
||||
failure := func() {
|
||||
s.sendQuotaPool.lockedAdd(sq)
|
||||
}
|
||||
if !s.sendQuotaPool.compareAndExecute(quotaVer, success, failure) {
|
||||
t.sendQuotaPool.add(ps)
|
||||
s.localSendQuota.add(ps)
|
||||
}
|
||||
}
|
||||
}
|
||||
if !opts.Last {
|
||||
|
|
@ -833,11 +754,11 @@ func (t *http2Client) adjustWindow(s *Stream, n uint32) {
|
|||
return
|
||||
}
|
||||
if w := s.fc.maybeAdjust(n); w > 0 {
|
||||
// Piggyback conneciton's window update along.
|
||||
// Piggyback connection's window update along.
|
||||
if cw := t.fc.resetPendingUpdate(); cw > 0 {
|
||||
t.controlBuf.put(&windowUpdate{0, cw, false})
|
||||
t.controlBuf.put(&windowUpdate{0, cw})
|
||||
}
|
||||
t.controlBuf.put(&windowUpdate{s.id, w, true})
|
||||
t.controlBuf.put(&windowUpdate{s.id, w})
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -852,9 +773,9 @@ func (t *http2Client) updateWindow(s *Stream, n uint32) {
|
|||
}
|
||||
if w := s.fc.onRead(n); w > 0 {
|
||||
if cw := t.fc.resetPendingUpdate(); cw > 0 {
|
||||
t.controlBuf.put(&windowUpdate{0, cw, false})
|
||||
t.controlBuf.put(&windowUpdate{0, cw})
|
||||
}
|
||||
t.controlBuf.put(&windowUpdate{s.id, w, true})
|
||||
t.controlBuf.put(&windowUpdate{s.id, w})
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -868,7 +789,7 @@ func (t *http2Client) updateFlowControl(n uint32) {
|
|||
}
|
||||
t.initialWindowSize = int32(n)
|
||||
t.mu.Unlock()
|
||||
t.controlBuf.put(&windowUpdate{0, t.fc.newLimit(n), false})
|
||||
t.controlBuf.put(&windowUpdate{0, t.fc.newLimit(n)})
|
||||
t.controlBuf.put(&settings{
|
||||
ack: false,
|
||||
ss: []http2.Setting{
|
||||
|
|
@ -898,15 +819,17 @@ func (t *http2Client) handleData(f *http2.DataFrame) {
|
|||
// Furthermore, if a bdpPing is being sent out we can piggyback
|
||||
// connection's window update for the bytes we just received.
|
||||
if sendBDPPing {
|
||||
t.controlBuf.put(&windowUpdate{0, uint32(size), false})
|
||||
if size != 0 { // Could've been an empty data frame.
|
||||
t.controlBuf.put(&windowUpdate{0, uint32(size)})
|
||||
}
|
||||
t.controlBuf.put(bdpPing)
|
||||
} else {
|
||||
if err := t.fc.onData(uint32(size)); err != nil {
|
||||
t.notifyError(connectionErrorf(true, err, "%v", err))
|
||||
t.Close()
|
||||
return
|
||||
}
|
||||
if w := t.fc.onRead(uint32(size)); w > 0 {
|
||||
t.controlBuf.put(&windowUpdate{0, w, true})
|
||||
t.controlBuf.put(&windowUpdate{0, w})
|
||||
}
|
||||
}
|
||||
// Select the right stream to dispatch.
|
||||
|
|
@ -930,7 +853,7 @@ func (t *http2Client) handleData(f *http2.DataFrame) {
|
|||
}
|
||||
if f.Header().Flags.Has(http2.FlagDataPadded) {
|
||||
if w := s.fc.onRead(uint32(size) - uint32(len(f.Data()))); w > 0 {
|
||||
t.controlBuf.put(&windowUpdate{s.id, w, true})
|
||||
t.controlBuf.put(&windowUpdate{s.id, w})
|
||||
}
|
||||
}
|
||||
s.mu.Unlock()
|
||||
|
|
@ -1019,10 +942,10 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
|
|||
id := f.LastStreamID
|
||||
if id > 0 && id%2 != 1 {
|
||||
t.mu.Unlock()
|
||||
t.notifyError(connectionErrorf(true, nil, "received illegal http2 GOAWAY frame: stream ID %d is even", f.LastStreamID))
|
||||
t.Close()
|
||||
return
|
||||
}
|
||||
// A client can recieve multiple GoAways from server (look at https://github.com/grpc/grpc-go/issues/1387).
|
||||
// A client can receive multiple GoAways from server (look at https://github.com/grpc/grpc-go/issues/1387).
|
||||
// The idea is that the first GoAway will be sent with an ID of MaxInt32 and the second GoAway will be sent after an RTT delay
|
||||
// with the ID of the last stream the server will process.
|
||||
// Therefore, when we get the first GoAway we don't really close any streams. While in case of second GoAway we
|
||||
|
|
@ -1033,7 +956,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
|
|||
// If there are multiple GoAways the first one should always have an ID greater than the following ones.
|
||||
if id > t.prevGoAwayID {
|
||||
t.mu.Unlock()
|
||||
t.notifyError(connectionErrorf(true, nil, "received illegal http2 GOAWAY frame: previously recv GOAWAY frame with LastStramID %d, currently recv %d", id, f.LastStreamID))
|
||||
t.Close()
|
||||
return
|
||||
}
|
||||
default:
|
||||
|
|
@ -1177,22 +1100,22 @@ func handleMalformedHTTP2(s *Stream, err error) {
|
|||
// TODO(zhaoq): Check the validity of the incoming frame sequence.
|
||||
func (t *http2Client) reader() {
|
||||
// Check the validity of server preface.
|
||||
frame, err := t.framer.readFrame()
|
||||
frame, err := t.framer.fr.ReadFrame()
|
||||
if err != nil {
|
||||
t.notifyError(err)
|
||||
t.Close()
|
||||
return
|
||||
}
|
||||
atomic.CompareAndSwapUint32(&t.activity, 0, 1)
|
||||
sf, ok := frame.(*http2.SettingsFrame)
|
||||
if !ok {
|
||||
t.notifyError(err)
|
||||
t.Close()
|
||||
return
|
||||
}
|
||||
t.handleSettings(sf)
|
||||
|
||||
// loop to keep reading incoming messages on this transport.
|
||||
for {
|
||||
frame, err := t.framer.readFrame()
|
||||
frame, err := t.framer.fr.ReadFrame()
|
||||
atomic.CompareAndSwapUint32(&t.activity, 0, 1)
|
||||
if err != nil {
|
||||
// Abort an active stream if the http2.Framer returns a
|
||||
|
|
@ -1204,12 +1127,12 @@ func (t *http2Client) reader() {
|
|||
t.mu.Unlock()
|
||||
if s != nil {
|
||||
// use error detail to provide better err message
|
||||
handleMalformedHTTP2(s, streamErrorf(http2ErrConvTab[se.Code], "%v", t.framer.errorDetail()))
|
||||
handleMalformedHTTP2(s, streamErrorf(http2ErrConvTab[se.Code], "%v", t.framer.fr.ErrorDetail()))
|
||||
}
|
||||
continue
|
||||
} else {
|
||||
// Transport error.
|
||||
t.notifyError(err)
|
||||
t.Close()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
|
@ -1253,61 +1176,86 @@ func (t *http2Client) applySettings(ss []http2.Setting) {
|
|||
t.mu.Lock()
|
||||
for _, stream := range t.activeStreams {
|
||||
// Adjust the sending quota for each stream.
|
||||
stream.sendQuotaPool.add(int(s.Val) - int(t.streamSendQuota))
|
||||
stream.sendQuotaPool.addAndUpdate(int(s.Val) - int(t.streamSendQuota))
|
||||
}
|
||||
t.streamSendQuota = s.Val
|
||||
t.mu.Unlock()
|
||||
atomic.AddUint32(&t.outQuotaVersion, 1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// controller running in a separate goroutine takes charge of sending control
|
||||
// frames (e.g., window update, reset stream, setting, etc.) to the server.
|
||||
func (t *http2Client) controller() {
|
||||
for {
|
||||
select {
|
||||
case i := <-t.controlBuf.get():
|
||||
t.controlBuf.load()
|
||||
select {
|
||||
case <-t.writableChan:
|
||||
switch i := i.(type) {
|
||||
case *windowUpdate:
|
||||
t.framer.writeWindowUpdate(i.flush, i.streamID, i.increment)
|
||||
case *settings:
|
||||
if i.ack {
|
||||
t.framer.writeSettingsAck(true)
|
||||
t.applySettings(i.ss)
|
||||
} else {
|
||||
t.framer.writeSettings(true, i.ss...)
|
||||
}
|
||||
case *resetStream:
|
||||
// If the server needs to be to intimated about stream closing,
|
||||
// then we need to make sure the RST_STREAM frame is written to
|
||||
// the wire before the headers of the next stream waiting on
|
||||
// streamQuota. We ensure this by adding to the streamsQuota pool
|
||||
// only after having acquired the writableChan to send RST_STREAM.
|
||||
t.streamsQuota.add(1)
|
||||
t.framer.writeRSTStream(true, i.streamID, i.code)
|
||||
case *flushIO:
|
||||
t.framer.flushWrite()
|
||||
case *ping:
|
||||
if !i.ack {
|
||||
t.bdpEst.timesnap(i.data)
|
||||
}
|
||||
t.framer.writePing(true, i.ack, i.data)
|
||||
default:
|
||||
errorf("transport: http2Client.controller got unexpected item type %v\n", i)
|
||||
}
|
||||
t.writableChan <- 0
|
||||
continue
|
||||
case <-t.shutdownChan:
|
||||
return
|
||||
}
|
||||
case <-t.shutdownChan:
|
||||
return
|
||||
// TODO(mmukhi): A lot of this code(and code in other places in the tranpsort layer)
|
||||
// is duplicated between the client and the server.
|
||||
// The transport layer needs to be refactored to take care of this.
|
||||
func (t *http2Client) itemHandler(i item) error {
|
||||
var err error
|
||||
switch i := i.(type) {
|
||||
case *dataFrame:
|
||||
err = t.framer.fr.WriteData(i.streamID, i.endStream, i.d)
|
||||
if err == nil {
|
||||
i.f()
|
||||
}
|
||||
case *headerFrame:
|
||||
t.hBuf.Reset()
|
||||
for _, f := range i.hf {
|
||||
t.hEnc.WriteField(f)
|
||||
}
|
||||
endHeaders := false
|
||||
first := true
|
||||
for !endHeaders {
|
||||
size := t.hBuf.Len()
|
||||
if size > http2MaxFrameLen {
|
||||
size = http2MaxFrameLen
|
||||
} else {
|
||||
endHeaders = true
|
||||
}
|
||||
if first {
|
||||
first = false
|
||||
err = t.framer.fr.WriteHeaders(http2.HeadersFrameParam{
|
||||
StreamID: i.streamID,
|
||||
BlockFragment: t.hBuf.Next(size),
|
||||
EndStream: i.endStream,
|
||||
EndHeaders: endHeaders,
|
||||
})
|
||||
} else {
|
||||
err = t.framer.fr.WriteContinuation(
|
||||
i.streamID,
|
||||
endHeaders,
|
||||
t.hBuf.Next(size),
|
||||
)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
case *windowUpdate:
|
||||
err = t.framer.fr.WriteWindowUpdate(i.streamID, i.increment)
|
||||
case *settings:
|
||||
if i.ack {
|
||||
t.applySettings(i.ss)
|
||||
err = t.framer.fr.WriteSettingsAck()
|
||||
} else {
|
||||
err = t.framer.fr.WriteSettings(i.ss...)
|
||||
}
|
||||
case *resetStream:
|
||||
// If the server needs to be to intimated about stream closing,
|
||||
// then we need to make sure the RST_STREAM frame is written to
|
||||
// the wire before the headers of the next stream waiting on
|
||||
// streamQuota. We ensure this by adding to the streamsQuota pool
|
||||
// only after having acquired the writableChan to send RST_STREAM.
|
||||
err = t.framer.fr.WriteRSTStream(i.streamID, i.code)
|
||||
t.streamsQuota.add(1)
|
||||
case *flushIO:
|
||||
err = t.framer.writer.Flush()
|
||||
case *ping:
|
||||
if !i.ack {
|
||||
t.bdpEst.timesnap(i.data)
|
||||
}
|
||||
err = t.framer.fr.WritePing(i.ack, i.data)
|
||||
default:
|
||||
errorf("transport: http2Client.controller got unexpected item type %v\n", i)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// keepalive running in a separate goroutune makes sure the connection is alive by sending pings.
|
||||
|
|
@ -1331,7 +1279,7 @@ func (t *http2Client) keepalive() {
|
|||
case <-t.awakenKeepalive:
|
||||
// If the control gets here a ping has been sent
|
||||
// need to reset the timer with keepalive.Timeout.
|
||||
case <-t.shutdownChan:
|
||||
case <-t.ctx.Done():
|
||||
return
|
||||
}
|
||||
} else {
|
||||
|
|
@ -1350,13 +1298,13 @@ func (t *http2Client) keepalive() {
|
|||
}
|
||||
t.Close()
|
||||
return
|
||||
case <-t.shutdownChan:
|
||||
case <-t.ctx.Done():
|
||||
if !timer.Stop() {
|
||||
<-timer.C
|
||||
}
|
||||
return
|
||||
}
|
||||
case <-t.shutdownChan:
|
||||
case <-t.ctx.Done():
|
||||
if !timer.Stop() {
|
||||
<-timer.C
|
||||
}
|
||||
|
|
@ -1366,25 +1314,9 @@ func (t *http2Client) keepalive() {
|
|||
}
|
||||
|
||||
func (t *http2Client) Error() <-chan struct{} {
|
||||
return t.errorChan
|
||||
return t.ctx.Done()
|
||||
}
|
||||
|
||||
func (t *http2Client) GoAway() <-chan struct{} {
|
||||
return t.goAway
|
||||
}
|
||||
|
||||
func (t *http2Client) notifyError(err error) {
|
||||
t.mu.Lock()
|
||||
// make sure t.errorChan is closed only once.
|
||||
if t.state == draining {
|
||||
t.mu.Unlock()
|
||||
t.Close()
|
||||
return
|
||||
}
|
||||
if t.state == reachable {
|
||||
t.state = unreachable
|
||||
close(t.errorChan)
|
||||
infof("transport: http2Client.notifyError got notified that the client transport was broken %v.", err)
|
||||
}
|
||||
t.mu.Unlock()
|
||||
}
|
||||
|
|
|
|||
598
vendor/google.golang.org/grpc/transport/http2_server.go
generated
vendored
598
vendor/google.golang.org/grpc/transport/http2_server.go
generated
vendored
|
|
@ -21,6 +21,7 @@ package transport
|
|||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"math/rand"
|
||||
|
|
@ -51,23 +52,16 @@ var ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHe
|
|||
// http2Server implements the ServerTransport interface with HTTP2.
|
||||
type http2Server struct {
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
conn net.Conn
|
||||
remoteAddr net.Addr
|
||||
localAddr net.Addr
|
||||
maxStreamID uint32 // max stream ID ever seen
|
||||
authInfo credentials.AuthInfo // auth info about the connection
|
||||
inTapHandle tap.ServerInHandle
|
||||
// writableChan synchronizes write access to the transport.
|
||||
// A writer acquires the write lock by receiving a value on writableChan
|
||||
// and releases it by sending on writableChan.
|
||||
writableChan chan int
|
||||
// shutdownChan is closed when Close is called.
|
||||
// Blocking operations should select on shutdownChan to avoid
|
||||
// blocking forever after Close.
|
||||
shutdownChan chan struct{}
|
||||
framer *framer
|
||||
hBuf *bytes.Buffer // the buffer for HPACK encoding
|
||||
hEnc *hpack.Encoder // HPACK encoder
|
||||
framer *framer
|
||||
hBuf *bytes.Buffer // the buffer for HPACK encoding
|
||||
hEnc *hpack.Encoder // HPACK encoder
|
||||
// The max number of concurrent streams.
|
||||
maxStreams uint32
|
||||
// controlBuf delivers all the control related tasks (e.g., window
|
||||
|
|
@ -96,8 +90,6 @@ type http2Server struct {
|
|||
initialWindowSize int32
|
||||
bdpEst *bdpEstimator
|
||||
|
||||
outQuotaVersion uint32
|
||||
|
||||
mu sync.Mutex // guard the following
|
||||
|
||||
// drainChan is initialized when drain(...) is called the first time.
|
||||
|
|
@ -112,7 +104,7 @@ type http2Server struct {
|
|||
// the per-stream outbound flow control window size set by the peer.
|
||||
streamSendQuota uint32
|
||||
// idle is the time instant when the connection went idle.
|
||||
// This is either the begining of the connection or when the number of
|
||||
// This is either the beginning of the connection or when the number of
|
||||
// RPCs go down to 0.
|
||||
// When the connection is busy, this value is set to 0.
|
||||
idle time.Time
|
||||
|
|
@ -121,7 +113,15 @@ type http2Server struct {
|
|||
// newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is
|
||||
// returned if something goes wrong.
|
||||
func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) {
|
||||
framer := newFramer(conn)
|
||||
writeBufSize := defaultWriteBufSize
|
||||
if config.WriteBufferSize > 0 {
|
||||
writeBufSize = config.WriteBufferSize
|
||||
}
|
||||
readBufSize := defaultReadBufSize
|
||||
if config.ReadBufferSize > 0 {
|
||||
readBufSize = config.ReadBufferSize
|
||||
}
|
||||
framer := newFramer(conn, writeBufSize, readBufSize)
|
||||
// Send initial settings as connection preface to client.
|
||||
var isettings []http2.Setting
|
||||
// TODO(zhaoq): Have a better way to signal "no limit" because 0 is
|
||||
|
|
@ -151,12 +151,12 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
|
|||
ID: http2.SettingInitialWindowSize,
|
||||
Val: uint32(iwz)})
|
||||
}
|
||||
if err := framer.writeSettings(true, isettings...); err != nil {
|
||||
if err := framer.fr.WriteSettings(isettings...); err != nil {
|
||||
return nil, connectionErrorf(true, err, "transport: %v", err)
|
||||
}
|
||||
// Adjust the connection flow control window if needed.
|
||||
if delta := uint32(icwz - defaultWindowSize); delta > 0 {
|
||||
if err := framer.writeWindowUpdate(true, 0, delta); err != nil {
|
||||
if err := framer.fr.WriteWindowUpdate(0, delta); err != nil {
|
||||
return nil, connectionErrorf(true, err, "transport: %v", err)
|
||||
}
|
||||
}
|
||||
|
|
@ -183,8 +183,10 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
|
|||
kep.MinTime = defaultKeepalivePolicyMinTime
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
t := &http2Server{
|
||||
ctx: context.Background(),
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
conn: conn,
|
||||
remoteAddr: conn.RemoteAddr(),
|
||||
localAddr: conn.LocalAddr(),
|
||||
|
|
@ -198,8 +200,6 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
|
|||
fc: &inFlow{limit: uint32(icwz)},
|
||||
sendQuotaPool: newQuotaPool(defaultWindowSize),
|
||||
state: reachable,
|
||||
writableChan: make(chan int, 1),
|
||||
shutdownChan: make(chan struct{}),
|
||||
activeStreams: make(map[uint32]*Stream),
|
||||
streamSendQuota: defaultWindowSize,
|
||||
stats: config.StatsHandler,
|
||||
|
|
@ -222,9 +222,12 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
|
|||
connBegin := &stats.ConnBegin{}
|
||||
t.stats.HandleConn(t.ctx, connBegin)
|
||||
}
|
||||
go t.controller()
|
||||
t.framer.writer.Flush()
|
||||
go func() {
|
||||
loopyWriter(t.ctx, t.controlBuf, t.itemHandler)
|
||||
t.Close()
|
||||
}()
|
||||
go t.keepalive()
|
||||
t.writableChan <- 0
|
||||
return t, nil
|
||||
}
|
||||
|
||||
|
|
@ -313,6 +316,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
|||
}
|
||||
t.maxStreamID = streamID
|
||||
s.sendQuotaPool = newQuotaPool(int(t.streamSendQuota))
|
||||
s.localSendQuota = newQuotaPool(defaultLocalSendQuota)
|
||||
t.activeStreams[streamID] = s
|
||||
if len(t.activeStreams) == 1 {
|
||||
t.idle = time.Time{}
|
||||
|
|
@ -366,7 +370,7 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.
|
|||
return
|
||||
}
|
||||
|
||||
frame, err := t.framer.readFrame()
|
||||
frame, err := t.framer.fr.ReadFrame()
|
||||
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||
t.Close()
|
||||
return
|
||||
|
|
@ -386,7 +390,7 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.
|
|||
t.handleSettings(sf)
|
||||
|
||||
for {
|
||||
frame, err := t.framer.readFrame()
|
||||
frame, err := t.framer.fr.ReadFrame()
|
||||
atomic.StoreUint32(&t.activity, 1)
|
||||
if err != nil {
|
||||
if se, ok := err.(http2.StreamError); ok {
|
||||
|
|
@ -457,9 +461,9 @@ func (t *http2Server) adjustWindow(s *Stream, n uint32) {
|
|||
}
|
||||
if w := s.fc.maybeAdjust(n); w > 0 {
|
||||
if cw := t.fc.resetPendingUpdate(); cw > 0 {
|
||||
t.controlBuf.put(&windowUpdate{0, cw, false})
|
||||
t.controlBuf.put(&windowUpdate{0, cw})
|
||||
}
|
||||
t.controlBuf.put(&windowUpdate{s.id, w, true})
|
||||
t.controlBuf.put(&windowUpdate{s.id, w})
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -474,9 +478,9 @@ func (t *http2Server) updateWindow(s *Stream, n uint32) {
|
|||
}
|
||||
if w := s.fc.onRead(n); w > 0 {
|
||||
if cw := t.fc.resetPendingUpdate(); cw > 0 {
|
||||
t.controlBuf.put(&windowUpdate{0, cw, false})
|
||||
t.controlBuf.put(&windowUpdate{0, cw})
|
||||
}
|
||||
t.controlBuf.put(&windowUpdate{s.id, w, true})
|
||||
t.controlBuf.put(&windowUpdate{s.id, w})
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -490,7 +494,7 @@ func (t *http2Server) updateFlowControl(n uint32) {
|
|||
}
|
||||
t.initialWindowSize = int32(n)
|
||||
t.mu.Unlock()
|
||||
t.controlBuf.put(&windowUpdate{0, t.fc.newLimit(n), false})
|
||||
t.controlBuf.put(&windowUpdate{0, t.fc.newLimit(n)})
|
||||
t.controlBuf.put(&settings{
|
||||
ack: false,
|
||||
ss: []http2.Setting{
|
||||
|
|
@ -521,7 +525,9 @@ func (t *http2Server) handleData(f *http2.DataFrame) {
|
|||
// Furthermore, if a bdpPing is being sent out we can piggyback
|
||||
// connection's window update for the bytes we just received.
|
||||
if sendBDPPing {
|
||||
t.controlBuf.put(&windowUpdate{0, uint32(size), false})
|
||||
if size != 0 { // Could be an empty frame.
|
||||
t.controlBuf.put(&windowUpdate{0, uint32(size)})
|
||||
}
|
||||
t.controlBuf.put(bdpPing)
|
||||
} else {
|
||||
if err := t.fc.onData(uint32(size)); err != nil {
|
||||
|
|
@ -530,7 +536,7 @@ func (t *http2Server) handleData(f *http2.DataFrame) {
|
|||
return
|
||||
}
|
||||
if w := t.fc.onRead(uint32(size)); w > 0 {
|
||||
t.controlBuf.put(&windowUpdate{0, w, true})
|
||||
t.controlBuf.put(&windowUpdate{0, w})
|
||||
}
|
||||
}
|
||||
// Select the right stream to dispatch.
|
||||
|
|
@ -552,7 +558,7 @@ func (t *http2Server) handleData(f *http2.DataFrame) {
|
|||
}
|
||||
if f.Header().Flags.Has(http2.FlagDataPadded) {
|
||||
if w := s.fc.onRead(uint32(size) - uint32(len(f.Data()))); w > 0 {
|
||||
t.controlBuf.put(&windowUpdate{s.id, w, true})
|
||||
t.controlBuf.put(&windowUpdate{s.id, w})
|
||||
}
|
||||
}
|
||||
s.mu.Unlock()
|
||||
|
|
@ -593,10 +599,23 @@ func (t *http2Server) handleSettings(f *http2.SettingsFrame) {
|
|||
ss = append(ss, s)
|
||||
return nil
|
||||
})
|
||||
// The settings will be applied once the ack is sent.
|
||||
t.controlBuf.put(&settings{ack: true, ss: ss})
|
||||
}
|
||||
|
||||
func (t *http2Server) applySettings(ss []http2.Setting) {
|
||||
for _, s := range ss {
|
||||
if s.ID == http2.SettingInitialWindowSize {
|
||||
t.mu.Lock()
|
||||
for _, stream := range t.activeStreams {
|
||||
stream.sendQuotaPool.addAndUpdate(int(s.Val) - int(t.streamSendQuota))
|
||||
}
|
||||
t.streamSendQuota = s.Val
|
||||
t.mu.Unlock()
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
maxPingStrikes = 2
|
||||
defaultPingTimeout = 2 * time.Hour
|
||||
|
|
@ -634,7 +653,7 @@ func (t *http2Server) handlePing(f *http2.PingFrame) {
|
|||
t.mu.Unlock()
|
||||
if ns < 1 && !t.kep.PermitWithoutStream {
|
||||
// Keepalive shouldn't be active thus, this new ping should
|
||||
// have come after atleast defaultPingTimeout.
|
||||
// have come after at least defaultPingTimeout.
|
||||
if t.lastPingAt.Add(defaultPingTimeout).After(now) {
|
||||
t.pingStrikes++
|
||||
}
|
||||
|
|
@ -647,6 +666,7 @@ func (t *http2Server) handlePing(f *http2.PingFrame) {
|
|||
|
||||
if t.pingStrikes > maxPingStrikes {
|
||||
// Send goaway and close the connection.
|
||||
errorf("transport: Got to too many pings from the client, closing the connection.")
|
||||
t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: true})
|
||||
}
|
||||
}
|
||||
|
|
@ -663,47 +683,16 @@ func (t *http2Server) handleWindowUpdate(f *http2.WindowUpdateFrame) {
|
|||
}
|
||||
}
|
||||
|
||||
func (t *http2Server) writeHeaders(s *Stream, b *bytes.Buffer, endStream bool) error {
|
||||
first := true
|
||||
endHeaders := false
|
||||
var err error
|
||||
defer func() {
|
||||
if err == nil {
|
||||
// Reset ping strikes when seding headers since that might cause the
|
||||
// peer to send ping.
|
||||
atomic.StoreUint32(&t.resetPingStrikes, 1)
|
||||
}
|
||||
}()
|
||||
// Sends the headers in a single batch.
|
||||
for !endHeaders {
|
||||
size := t.hBuf.Len()
|
||||
if size > http2MaxFrameLen {
|
||||
size = http2MaxFrameLen
|
||||
} else {
|
||||
endHeaders = true
|
||||
}
|
||||
if first {
|
||||
p := http2.HeadersFrameParam{
|
||||
StreamID: s.id,
|
||||
BlockFragment: b.Next(size),
|
||||
EndStream: endStream,
|
||||
EndHeaders: endHeaders,
|
||||
}
|
||||
err = t.framer.writeHeaders(endHeaders, p)
|
||||
first = false
|
||||
} else {
|
||||
err = t.framer.writeContinuation(endHeaders, s.id, endHeaders, b.Next(size))
|
||||
}
|
||||
if err != nil {
|
||||
t.Close()
|
||||
return connectionErrorf(true, err, "transport: %v", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WriteHeader sends the header metedata md back to the client.
|
||||
func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
return ContextErr(s.ctx.Err())
|
||||
case <-t.ctx.Done():
|
||||
return ErrConnClosing
|
||||
default:
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
if s.headerOk || s.state == streamDone {
|
||||
s.mu.Unlock()
|
||||
|
|
@ -719,14 +708,13 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
|
|||
}
|
||||
md = s.header
|
||||
s.mu.Unlock()
|
||||
if _, err := wait(s.ctx, nil, nil, t.shutdownChan, t.writableChan); err != nil {
|
||||
return err
|
||||
}
|
||||
t.hBuf.Reset()
|
||||
t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
|
||||
t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"})
|
||||
// TODO(mmukhi): Benchmark if the perfomance gets better if count the metadata and other header fields
|
||||
// first and create a slice of that exact size.
|
||||
headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else.
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"})
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: "application/grpc"})
|
||||
if s.sendCompress != "" {
|
||||
t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress})
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress})
|
||||
}
|
||||
for k, vv := range md {
|
||||
if isReservedHeader(k) {
|
||||
|
|
@ -734,20 +722,20 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
|
|||
continue
|
||||
}
|
||||
for _, v := range vv {
|
||||
t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
|
||||
}
|
||||
}
|
||||
bufLen := t.hBuf.Len()
|
||||
if err := t.writeHeaders(s, t.hBuf, false); err != nil {
|
||||
return err
|
||||
}
|
||||
t.controlBuf.put(&headerFrame{
|
||||
streamID: s.id,
|
||||
hf: headerFields,
|
||||
endStream: false,
|
||||
})
|
||||
if t.stats != nil {
|
||||
outHeader := &stats.OutHeader{
|
||||
WireLength: bufLen,
|
||||
//WireLength: // TODO(mmukhi): Revisit this later, if needed.
|
||||
}
|
||||
t.stats.HandleRPC(s.Context(), outHeader)
|
||||
}
|
||||
t.writableChan <- 0
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -756,6 +744,12 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
|
|||
// TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early
|
||||
// OK is adopted.
|
||||
func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
|
||||
select {
|
||||
case <-t.ctx.Done():
|
||||
return ErrConnClosing
|
||||
default:
|
||||
}
|
||||
|
||||
var headersSent, hasHeader bool
|
||||
s.mu.Lock()
|
||||
if s.state == streamDone {
|
||||
|
|
@ -775,25 +769,15 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
|
|||
headersSent = true
|
||||
}
|
||||
|
||||
// Always write a status regardless of context cancellation unless the stream
|
||||
// is terminated (e.g. by a RST_STREAM, GOAWAY, or transport error). The
|
||||
// server's application code is already done so it is fine to ignore s.ctx.
|
||||
select {
|
||||
case <-t.shutdownChan:
|
||||
return ErrConnClosing
|
||||
case <-t.writableChan:
|
||||
}
|
||||
t.hBuf.Reset()
|
||||
// TODO(mmukhi): Benchmark if the perfomance gets better if count the metadata and other header fields
|
||||
// first and create a slice of that exact size.
|
||||
headerFields := make([]hpack.HeaderField, 0, 2) // grpc-status and grpc-message will be there if none else.
|
||||
if !headersSent {
|
||||
t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
|
||||
t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"})
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"})
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: "application/grpc"})
|
||||
}
|
||||
t.hEnc.WriteField(
|
||||
hpack.HeaderField{
|
||||
Name: "grpc-status",
|
||||
Value: strconv.Itoa(int(st.Code())),
|
||||
})
|
||||
t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())})
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status", Value: strconv.Itoa(int(st.Code()))})
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())})
|
||||
|
||||
if p := st.Proto(); p != nil && len(p.Details) > 0 {
|
||||
stBytes, err := proto.Marshal(p)
|
||||
|
|
@ -802,7 +786,7 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
|
|||
panic(err)
|
||||
}
|
||||
|
||||
t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)})
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)})
|
||||
}
|
||||
|
||||
// Attach the trailer metadata.
|
||||
|
|
@ -812,36 +796,32 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
|
|||
continue
|
||||
}
|
||||
for _, v := range vv {
|
||||
t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
|
||||
}
|
||||
}
|
||||
bufLen := t.hBuf.Len()
|
||||
if err := t.writeHeaders(s, t.hBuf, true); err != nil {
|
||||
t.Close()
|
||||
return err
|
||||
}
|
||||
t.controlBuf.put(&headerFrame{
|
||||
streamID: s.id,
|
||||
hf: headerFields,
|
||||
endStream: true,
|
||||
})
|
||||
if t.stats != nil {
|
||||
outTrailer := &stats.OutTrailer{
|
||||
WireLength: bufLen,
|
||||
}
|
||||
t.stats.HandleRPC(s.Context(), outTrailer)
|
||||
t.stats.HandleRPC(s.Context(), &stats.OutTrailer{})
|
||||
}
|
||||
t.closeStream(s)
|
||||
t.writableChan <- 0
|
||||
return nil
|
||||
}
|
||||
|
||||
// Write converts the data into HTTP2 data frame and sends it out. Non-nil error
|
||||
// is returns if it fails (e.g., framing error, transport error).
|
||||
func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) (err error) {
|
||||
// TODO(zhaoq): Support multi-writers for a single stream.
|
||||
secondStart := http2MaxFrameLen - len(hdr)%http2MaxFrameLen
|
||||
if len(data) < secondStart {
|
||||
secondStart = len(data)
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
return ContextErr(s.ctx.Err())
|
||||
case <-t.ctx.Done():
|
||||
return ErrConnClosing
|
||||
default:
|
||||
}
|
||||
hdr = append(hdr, data[:secondStart]...)
|
||||
data = data[secondStart:]
|
||||
isLastSlice := (len(data) == 0)
|
||||
|
||||
var writeHeaderFrame bool
|
||||
s.mu.Lock()
|
||||
if s.state == streamDone {
|
||||
|
|
@ -855,124 +835,74 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) (
|
|||
if writeHeaderFrame {
|
||||
t.WriteHeader(s, nil)
|
||||
}
|
||||
r := bytes.NewBuffer(hdr)
|
||||
var (
|
||||
p []byte
|
||||
oqv uint32
|
||||
)
|
||||
for {
|
||||
if r.Len() == 0 && p == nil {
|
||||
return nil
|
||||
}
|
||||
oqv = atomic.LoadUint32(&t.outQuotaVersion)
|
||||
size := http2MaxFrameLen
|
||||
// Wait until the stream has some quota to send the data.
|
||||
sq, err := wait(s.ctx, nil, nil, t.shutdownChan, s.sendQuotaPool.acquire())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Wait until the transport has some quota to send the data.
|
||||
tq, err := wait(s.ctx, nil, nil, t.shutdownChan, t.sendQuotaPool.acquire())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if sq < size {
|
||||
size = sq
|
||||
}
|
||||
if tq < size {
|
||||
size = tq
|
||||
}
|
||||
if p == nil {
|
||||
p = r.Next(size)
|
||||
}
|
||||
ps := len(p)
|
||||
if ps < sq {
|
||||
// Overbooked stream quota. Return it back.
|
||||
s.sendQuotaPool.add(sq - ps)
|
||||
}
|
||||
if ps < tq {
|
||||
// Overbooked transport quota. Return it back.
|
||||
t.sendQuotaPool.add(tq - ps)
|
||||
}
|
||||
t.framer.adjustNumWriters(1)
|
||||
// Got some quota. Try to acquire writing privilege on the
|
||||
// transport.
|
||||
if _, err := wait(s.ctx, nil, nil, t.shutdownChan, t.writableChan); err != nil {
|
||||
if _, ok := err.(StreamError); ok {
|
||||
// Return the connection quota back.
|
||||
t.sendQuotaPool.add(ps)
|
||||
// Add data to header frame so that we can equally distribute data across frames.
|
||||
emptyLen := http2MaxFrameLen - len(hdr)
|
||||
if emptyLen > len(data) {
|
||||
emptyLen = len(data)
|
||||
}
|
||||
hdr = append(hdr, data[:emptyLen]...)
|
||||
data = data[emptyLen:]
|
||||
for _, r := range [][]byte{hdr, data} {
|
||||
for len(r) > 0 {
|
||||
size := http2MaxFrameLen
|
||||
// Wait until the stream has some quota to send the data.
|
||||
quotaChan, quotaVer := s.sendQuotaPool.acquireWithVersion()
|
||||
sq, err := wait(s.ctx, t.ctx, nil, nil, quotaChan)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if t.framer.adjustNumWriters(-1) == 0 {
|
||||
// This writer is the last one in this batch and has the
|
||||
// responsibility to flush the buffered frames. It queues
|
||||
// a flush request to controlBuf instead of flushing directly
|
||||
// in order to avoid the race with other writing or flushing.
|
||||
t.controlBuf.put(&flushIO{})
|
||||
// Wait until the transport has some quota to send the data.
|
||||
tq, err := wait(s.ctx, t.ctx, nil, nil, t.sendQuotaPool.acquire())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
t.sendQuotaPool.add(ps)
|
||||
if t.framer.adjustNumWriters(-1) == 0 {
|
||||
t.controlBuf.put(&flushIO{})
|
||||
if sq < size {
|
||||
size = sq
|
||||
}
|
||||
t.writableChan <- 0
|
||||
return ContextErr(s.ctx.Err())
|
||||
default:
|
||||
}
|
||||
if oqv != atomic.LoadUint32(&t.outQuotaVersion) {
|
||||
// InitialWindowSize settings frame must have been received after we
|
||||
// acquired send quota but before we got the writable channel.
|
||||
// We must forsake this write.
|
||||
t.sendQuotaPool.add(ps)
|
||||
s.sendQuotaPool.add(ps)
|
||||
if t.framer.adjustNumWriters(-1) == 0 {
|
||||
t.controlBuf.put(&flushIO{})
|
||||
if tq < size {
|
||||
size = tq
|
||||
}
|
||||
t.writableChan <- 0
|
||||
continue
|
||||
}
|
||||
var forceFlush bool
|
||||
if r.Len() == 0 {
|
||||
if isLastSlice {
|
||||
if t.framer.adjustNumWriters(0) == 1 && !opts.Last {
|
||||
forceFlush = true
|
||||
if size > len(r) {
|
||||
size = len(r)
|
||||
}
|
||||
p := r[:size]
|
||||
ps := len(p)
|
||||
if ps < tq {
|
||||
// Overbooked transport quota. Return it back.
|
||||
t.sendQuotaPool.add(tq - ps)
|
||||
}
|
||||
// Acquire local send quota to be able to write to the controlBuf.
|
||||
ltq, err := wait(s.ctx, t.ctx, nil, nil, s.localSendQuota.acquire())
|
||||
if err != nil {
|
||||
if _, ok := err.(ConnectionError); !ok {
|
||||
t.sendQuotaPool.add(ps)
|
||||
}
|
||||
} else {
|
||||
r = bytes.NewBuffer(data)
|
||||
isLastSlice = true
|
||||
return err
|
||||
}
|
||||
s.localSendQuota.add(ltq - ps) // It's ok we make this negative.
|
||||
// Reset ping strikes when sending data since this might cause
|
||||
// the peer to send ping.
|
||||
atomic.StoreUint32(&t.resetPingStrikes, 1)
|
||||
success := func() {
|
||||
t.controlBuf.put(&dataFrame{streamID: s.id, endStream: false, d: p, f: func() {
|
||||
s.localSendQuota.add(ps)
|
||||
}})
|
||||
if ps < sq {
|
||||
// Overbooked stream quota. Return it back.
|
||||
s.sendQuotaPool.lockedAdd(sq - ps)
|
||||
}
|
||||
r = r[ps:]
|
||||
}
|
||||
failure := func() {
|
||||
s.sendQuotaPool.lockedAdd(sq)
|
||||
}
|
||||
if !s.sendQuotaPool.compareAndExecute(quotaVer, success, failure) {
|
||||
t.sendQuotaPool.add(ps)
|
||||
s.localSendQuota.add(ps)
|
||||
}
|
||||
}
|
||||
// Reset ping strikes when sending data since this might cause
|
||||
// the peer to send ping.
|
||||
atomic.StoreUint32(&t.resetPingStrikes, 1)
|
||||
if err := t.framer.writeData(forceFlush, s.id, false, p); err != nil {
|
||||
t.Close()
|
||||
return connectionErrorf(true, err, "transport: %v", err)
|
||||
}
|
||||
p = nil
|
||||
if t.framer.adjustNumWriters(-1) == 0 {
|
||||
t.framer.flushWrite()
|
||||
}
|
||||
t.writableChan <- 0
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (t *http2Server) applySettings(ss []http2.Setting) {
|
||||
for _, s := range ss {
|
||||
if s.ID == http2.SettingInitialWindowSize {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
for _, stream := range t.activeStreams {
|
||||
stream.sendQuotaPool.add(int(s.Val) - int(t.streamSendQuota))
|
||||
}
|
||||
t.streamSendQuota = s.Val
|
||||
atomic.AddUint32(&t.outQuotaVersion, 1)
|
||||
}
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// keepalive running in a separate goroutine does the following:
|
||||
|
|
@ -988,7 +918,7 @@ func (t *http2Server) keepalive() {
|
|||
maxAge := time.NewTimer(t.kp.MaxConnectionAge)
|
||||
keepalive := time.NewTimer(t.kp.Time)
|
||||
// NOTE: All exit paths of this function should reset their
|
||||
// respecitve timers. A failure to do so will cause the
|
||||
// respective timers. A failure to do so will cause the
|
||||
// following clean-up to deadlock and eventually leak.
|
||||
defer func() {
|
||||
if !maxIdle.Stop() {
|
||||
|
|
@ -1031,7 +961,7 @@ func (t *http2Server) keepalive() {
|
|||
t.Close()
|
||||
// Reseting the timer so that the clean-up doesn't deadlock.
|
||||
maxAge.Reset(infinity)
|
||||
case <-t.shutdownChan:
|
||||
case <-t.ctx.Done():
|
||||
}
|
||||
return
|
||||
case <-keepalive.C:
|
||||
|
|
@ -1049,7 +979,7 @@ func (t *http2Server) keepalive() {
|
|||
pingSent = true
|
||||
t.controlBuf.put(p)
|
||||
keepalive.Reset(t.kp.Timeout)
|
||||
case <-t.shutdownChan:
|
||||
case <-t.ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
|
|
@ -1057,93 +987,129 @@ func (t *http2Server) keepalive() {
|
|||
|
||||
var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}}
|
||||
|
||||
// controller running in a separate goroutine takes charge of sending control
|
||||
// frames (e.g., window update, reset stream, setting, etc.) to the server.
|
||||
func (t *http2Server) controller() {
|
||||
for {
|
||||
select {
|
||||
case i := <-t.controlBuf.get():
|
||||
t.controlBuf.load()
|
||||
// TODO(mmukhi): A lot of this code(and code in other places in the tranpsort layer)
|
||||
// is duplicated between the client and the server.
|
||||
// The transport layer needs to be refactored to take care of this.
|
||||
func (t *http2Server) itemHandler(i item) error {
|
||||
switch i := i.(type) {
|
||||
case *dataFrame:
|
||||
if err := t.framer.fr.WriteData(i.streamID, i.endStream, i.d); err != nil {
|
||||
return err
|
||||
}
|
||||
i.f()
|
||||
return nil
|
||||
case *headerFrame:
|
||||
t.hBuf.Reset()
|
||||
for _, f := range i.hf {
|
||||
t.hEnc.WriteField(f)
|
||||
}
|
||||
first := true
|
||||
endHeaders := false
|
||||
for !endHeaders {
|
||||
size := t.hBuf.Len()
|
||||
if size > http2MaxFrameLen {
|
||||
size = http2MaxFrameLen
|
||||
} else {
|
||||
endHeaders = true
|
||||
}
|
||||
var err error
|
||||
if first {
|
||||
first = false
|
||||
err = t.framer.fr.WriteHeaders(http2.HeadersFrameParam{
|
||||
StreamID: i.streamID,
|
||||
BlockFragment: t.hBuf.Next(size),
|
||||
EndStream: i.endStream,
|
||||
EndHeaders: endHeaders,
|
||||
})
|
||||
} else {
|
||||
err = t.framer.fr.WriteContinuation(
|
||||
i.streamID,
|
||||
endHeaders,
|
||||
t.hBuf.Next(size),
|
||||
)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
atomic.StoreUint32(&t.resetPingStrikes, 1)
|
||||
return nil
|
||||
case *windowUpdate:
|
||||
return t.framer.fr.WriteWindowUpdate(i.streamID, i.increment)
|
||||
case *settings:
|
||||
if i.ack {
|
||||
t.applySettings(i.ss)
|
||||
return t.framer.fr.WriteSettingsAck()
|
||||
}
|
||||
return t.framer.fr.WriteSettings(i.ss...)
|
||||
case *resetStream:
|
||||
return t.framer.fr.WriteRSTStream(i.streamID, i.code)
|
||||
case *goAway:
|
||||
t.mu.Lock()
|
||||
if t.state == closing {
|
||||
t.mu.Unlock()
|
||||
// The transport is closing.
|
||||
return fmt.Errorf("transport: Connection closing")
|
||||
}
|
||||
sid := t.maxStreamID
|
||||
if !i.headsUp {
|
||||
// Stop accepting more streams now.
|
||||
t.state = draining
|
||||
t.mu.Unlock()
|
||||
if err := t.framer.fr.WriteGoAway(sid, i.code, i.debugData); err != nil {
|
||||
return err
|
||||
}
|
||||
if i.closeConn {
|
||||
// Abruptly close the connection following the GoAway (via
|
||||
// loopywriter). But flush out what's inside the buffer first.
|
||||
t.framer.writer.Flush()
|
||||
return fmt.Errorf("transport: Connection closing")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
t.mu.Unlock()
|
||||
// For a graceful close, send out a GoAway with stream ID of MaxUInt32,
|
||||
// Follow that with a ping and wait for the ack to come back or a timer
|
||||
// to expire. During this time accept new streams since they might have
|
||||
// originated before the GoAway reaches the client.
|
||||
// After getting the ack or timer expiration send out another GoAway this
|
||||
// time with an ID of the max stream server intends to process.
|
||||
if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, []byte{}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := t.framer.fr.WritePing(false, goAwayPing.data); err != nil {
|
||||
return err
|
||||
}
|
||||
go func() {
|
||||
timer := time.NewTimer(time.Minute)
|
||||
defer timer.Stop()
|
||||
select {
|
||||
case <-t.writableChan:
|
||||
switch i := i.(type) {
|
||||
case *windowUpdate:
|
||||
t.framer.writeWindowUpdate(i.flush, i.streamID, i.increment)
|
||||
case *settings:
|
||||
if i.ack {
|
||||
t.framer.writeSettingsAck(true)
|
||||
t.applySettings(i.ss)
|
||||
} else {
|
||||
t.framer.writeSettings(true, i.ss...)
|
||||
}
|
||||
case *resetStream:
|
||||
t.framer.writeRSTStream(true, i.streamID, i.code)
|
||||
case *goAway:
|
||||
t.mu.Lock()
|
||||
if t.state == closing {
|
||||
t.mu.Unlock()
|
||||
// The transport is closing.
|
||||
return
|
||||
}
|
||||
sid := t.maxStreamID
|
||||
if !i.headsUp {
|
||||
// Stop accepting more streams now.
|
||||
t.state = draining
|
||||
activeStreams := len(t.activeStreams)
|
||||
t.mu.Unlock()
|
||||
t.framer.writeGoAway(true, sid, i.code, i.debugData)
|
||||
if i.closeConn || activeStreams == 0 {
|
||||
// Abruptly close the connection following the GoAway.
|
||||
t.Close()
|
||||
}
|
||||
t.writableChan <- 0
|
||||
continue
|
||||
}
|
||||
t.mu.Unlock()
|
||||
// For a graceful close, send out a GoAway with stream ID of MaxUInt32,
|
||||
// Follow that with a ping and wait for the ack to come back or a timer
|
||||
// to expire. During this time accept new streams since they might have
|
||||
// originated before the GoAway reaches the client.
|
||||
// After getting the ack or timer expiration send out another GoAway this
|
||||
// time with an ID of the max stream server intends to process.
|
||||
t.framer.writeGoAway(true, math.MaxUint32, http2.ErrCodeNo, []byte{})
|
||||
t.framer.writePing(true, false, goAwayPing.data)
|
||||
go func() {
|
||||
timer := time.NewTimer(time.Minute)
|
||||
defer timer.Stop()
|
||||
select {
|
||||
case <-t.drainChan:
|
||||
case <-timer.C:
|
||||
case <-t.shutdownChan:
|
||||
return
|
||||
}
|
||||
t.controlBuf.put(&goAway{code: i.code, debugData: i.debugData})
|
||||
}()
|
||||
case *flushIO:
|
||||
t.framer.flushWrite()
|
||||
case *ping:
|
||||
if !i.ack {
|
||||
t.bdpEst.timesnap(i.data)
|
||||
}
|
||||
t.framer.writePing(true, i.ack, i.data)
|
||||
default:
|
||||
errorf("transport: http2Server.controller got unexpected item type %v\n", i)
|
||||
}
|
||||
t.writableChan <- 0
|
||||
continue
|
||||
case <-t.shutdownChan:
|
||||
case <-t.drainChan:
|
||||
case <-timer.C:
|
||||
case <-t.ctx.Done():
|
||||
return
|
||||
}
|
||||
case <-t.shutdownChan:
|
||||
return
|
||||
t.controlBuf.put(&goAway{code: i.code, debugData: i.debugData})
|
||||
}()
|
||||
return nil
|
||||
case *flushIO:
|
||||
return t.framer.writer.Flush()
|
||||
case *ping:
|
||||
if !i.ack {
|
||||
t.bdpEst.timesnap(i.data)
|
||||
}
|
||||
return t.framer.fr.WritePing(i.ack, i.data)
|
||||
default:
|
||||
err := status.Errorf(codes.Internal, "transport: http2Server.controller got unexpected item type %t", i)
|
||||
errorf("%v", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Close starts shutting down the http2Server transport.
|
||||
// TODO(zhaoq): Now the destruction is not blocked on any pending streams. This
|
||||
// could cause some resource issue. Revisit this later.
|
||||
func (t *http2Server) Close() (err error) {
|
||||
func (t *http2Server) Close() error {
|
||||
t.mu.Lock()
|
||||
if t.state == closing {
|
||||
t.mu.Unlock()
|
||||
|
|
@ -1153,8 +1119,8 @@ func (t *http2Server) Close() (err error) {
|
|||
streams := t.activeStreams
|
||||
t.activeStreams = nil
|
||||
t.mu.Unlock()
|
||||
close(t.shutdownChan)
|
||||
err = t.conn.Close()
|
||||
t.cancel()
|
||||
err := t.conn.Close()
|
||||
// Cancel all active streams.
|
||||
for _, s := range streams {
|
||||
s.cancel()
|
||||
|
|
@ -1163,7 +1129,7 @@ func (t *http2Server) Close() (err error) {
|
|||
connEnd := &stats.ConnEnd{}
|
||||
t.stats.HandleConn(t.ctx, connEnd)
|
||||
}
|
||||
return
|
||||
return err
|
||||
}
|
||||
|
||||
// closeStream clears the footprint of a stream when the stream is not needed
|
||||
|
|
|
|||
139
vendor/google.golang.org/grpc/transport/http_util.go
generated
vendored
139
vendor/google.golang.org/grpc/transport/http_util.go
generated
vendored
|
|
@ -28,7 +28,6 @@ import (
|
|||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
|
|
@ -45,7 +44,8 @@ const (
|
|||
// http://http2.github.io/http2-spec/#SettingValues
|
||||
http2InitHeaderTableSize = 4096
|
||||
// http2IOBufSize specifies the buffer size for sending frames.
|
||||
http2IOBufSize = 32 * 1024
|
||||
defaultWriteBufSize = 32 * 1024
|
||||
defaultReadBufSize = 32 * 1024
|
||||
)
|
||||
|
||||
var (
|
||||
|
|
@ -475,10 +475,10 @@ type framer struct {
|
|||
fr *http2.Framer
|
||||
}
|
||||
|
||||
func newFramer(conn net.Conn) *framer {
|
||||
func newFramer(conn net.Conn, writeBufferSize, readBufferSize int) *framer {
|
||||
f := &framer{
|
||||
reader: bufio.NewReaderSize(conn, http2IOBufSize),
|
||||
writer: bufio.NewWriterSize(conn, http2IOBufSize),
|
||||
reader: bufio.NewReaderSize(conn, readBufferSize),
|
||||
writer: bufio.NewWriterSize(conn, writeBufferSize),
|
||||
}
|
||||
f.fr = http2.NewFramer(f.writer, f.reader)
|
||||
// Opt-in to Frame reuse API on framer to reduce garbage.
|
||||
|
|
@ -487,132 +487,3 @@ func newFramer(conn net.Conn) *framer {
|
|||
f.fr.ReadMetaHeaders = hpack.NewDecoder(http2InitHeaderTableSize, nil)
|
||||
return f
|
||||
}
|
||||
|
||||
func (f *framer) adjustNumWriters(i int32) int32 {
|
||||
return atomic.AddInt32(&f.numWriters, i)
|
||||
}
|
||||
|
||||
// The following writeXXX functions can only be called when the caller gets
|
||||
// unblocked from writableChan channel (i.e., owns the privilege to write).
|
||||
|
||||
func (f *framer) writeContinuation(forceFlush bool, streamID uint32, endHeaders bool, headerBlockFragment []byte) error {
|
||||
if err := f.fr.WriteContinuation(streamID, endHeaders, headerBlockFragment); err != nil {
|
||||
return err
|
||||
}
|
||||
if forceFlush {
|
||||
return f.writer.Flush()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *framer) writeData(forceFlush bool, streamID uint32, endStream bool, data []byte) error {
|
||||
if err := f.fr.WriteData(streamID, endStream, data); err != nil {
|
||||
return err
|
||||
}
|
||||
if forceFlush {
|
||||
return f.writer.Flush()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *framer) writeGoAway(forceFlush bool, maxStreamID uint32, code http2.ErrCode, debugData []byte) error {
|
||||
if err := f.fr.WriteGoAway(maxStreamID, code, debugData); err != nil {
|
||||
return err
|
||||
}
|
||||
if forceFlush {
|
||||
return f.writer.Flush()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *framer) writeHeaders(forceFlush bool, p http2.HeadersFrameParam) error {
|
||||
if err := f.fr.WriteHeaders(p); err != nil {
|
||||
return err
|
||||
}
|
||||
if forceFlush {
|
||||
return f.writer.Flush()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *framer) writePing(forceFlush, ack bool, data [8]byte) error {
|
||||
if err := f.fr.WritePing(ack, data); err != nil {
|
||||
return err
|
||||
}
|
||||
if forceFlush {
|
||||
return f.writer.Flush()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *framer) writePriority(forceFlush bool, streamID uint32, p http2.PriorityParam) error {
|
||||
if err := f.fr.WritePriority(streamID, p); err != nil {
|
||||
return err
|
||||
}
|
||||
if forceFlush {
|
||||
return f.writer.Flush()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *framer) writePushPromise(forceFlush bool, p http2.PushPromiseParam) error {
|
||||
if err := f.fr.WritePushPromise(p); err != nil {
|
||||
return err
|
||||
}
|
||||
if forceFlush {
|
||||
return f.writer.Flush()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *framer) writeRSTStream(forceFlush bool, streamID uint32, code http2.ErrCode) error {
|
||||
if err := f.fr.WriteRSTStream(streamID, code); err != nil {
|
||||
return err
|
||||
}
|
||||
if forceFlush {
|
||||
return f.writer.Flush()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *framer) writeSettings(forceFlush bool, settings ...http2.Setting) error {
|
||||
if err := f.fr.WriteSettings(settings...); err != nil {
|
||||
return err
|
||||
}
|
||||
if forceFlush {
|
||||
return f.writer.Flush()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *framer) writeSettingsAck(forceFlush bool) error {
|
||||
if err := f.fr.WriteSettingsAck(); err != nil {
|
||||
return err
|
||||
}
|
||||
if forceFlush {
|
||||
return f.writer.Flush()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *framer) writeWindowUpdate(forceFlush bool, streamID, incr uint32) error {
|
||||
if err := f.fr.WriteWindowUpdate(streamID, incr); err != nil {
|
||||
return err
|
||||
}
|
||||
if forceFlush {
|
||||
return f.writer.Flush()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *framer) flushWrite() error {
|
||||
return f.writer.Flush()
|
||||
}
|
||||
|
||||
func (f *framer) readFrame() (http2.Frame, error) {
|
||||
return f.fr.ReadFrame()
|
||||
}
|
||||
|
||||
func (f *framer) errorDetail() error {
|
||||
return f.fr.ErrorDetail()
|
||||
}
|
||||
|
|
|
|||
117
vendor/google.golang.org/grpc/transport/transport.go
generated
vendored
117
vendor/google.golang.org/grpc/transport/transport.go
generated
vendored
|
|
@ -21,10 +21,12 @@
|
|||
package transport // import "google.golang.org/grpc/transport"
|
||||
|
||||
import (
|
||||
stdctx "context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/net/http2"
|
||||
|
|
@ -67,20 +69,20 @@ func newRecvBuffer() *recvBuffer {
|
|||
|
||||
func (b *recvBuffer) put(r recvMsg) {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
if len(b.backlog) == 0 {
|
||||
select {
|
||||
case b.c <- r:
|
||||
b.mu.Unlock()
|
||||
return
|
||||
default:
|
||||
}
|
||||
}
|
||||
b.backlog = append(b.backlog, r)
|
||||
b.mu.Unlock()
|
||||
}
|
||||
|
||||
func (b *recvBuffer) load() {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
if len(b.backlog) > 0 {
|
||||
select {
|
||||
case b.c <- b.backlog[0]:
|
||||
|
|
@ -89,6 +91,7 @@ func (b *recvBuffer) load() {
|
|||
default:
|
||||
}
|
||||
}
|
||||
b.mu.Unlock()
|
||||
}
|
||||
|
||||
// get returns the channel that receives a recvMsg in the buffer.
|
||||
|
|
@ -164,20 +167,20 @@ func newControlBuffer() *controlBuffer {
|
|||
|
||||
func (b *controlBuffer) put(r item) {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
if len(b.backlog) == 0 {
|
||||
select {
|
||||
case b.c <- r:
|
||||
b.mu.Unlock()
|
||||
return
|
||||
default:
|
||||
}
|
||||
}
|
||||
b.backlog = append(b.backlog, r)
|
||||
b.mu.Unlock()
|
||||
}
|
||||
|
||||
func (b *controlBuffer) load() {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
if len(b.backlog) > 0 {
|
||||
select {
|
||||
case b.c <- b.backlog[0]:
|
||||
|
|
@ -186,6 +189,7 @@ func (b *controlBuffer) load() {
|
|||
default:
|
||||
}
|
||||
}
|
||||
b.mu.Unlock()
|
||||
}
|
||||
|
||||
// get returns the channel that receives an item in the buffer.
|
||||
|
|
@ -235,7 +239,8 @@ type Stream struct {
|
|||
// is used to adjust flow control, if need be.
|
||||
requestRead func(int)
|
||||
|
||||
sendQuotaPool *quotaPool
|
||||
sendQuotaPool *quotaPool
|
||||
localSendQuota *quotaPool
|
||||
// Close headerChan to indicate the end of reception of header metadata.
|
||||
headerChan chan struct{}
|
||||
// header caches the received header metadata.
|
||||
|
|
@ -313,8 +318,9 @@ func (s *Stream) Header() (metadata.MD, error) {
|
|||
// side only.
|
||||
func (s *Stream) Trailer() metadata.MD {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.trailer.Copy()
|
||||
c := s.trailer.Copy()
|
||||
s.mu.RUnlock()
|
||||
return c
|
||||
}
|
||||
|
||||
// ServerTransport returns the underlying ServerTransport for the stream.
|
||||
|
|
@ -342,14 +348,16 @@ func (s *Stream) Status() *status.Status {
|
|||
// Server side only.
|
||||
func (s *Stream) SetHeader(md metadata.MD) error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if s.headerOk || s.state == streamDone {
|
||||
s.mu.Unlock()
|
||||
return ErrIllegalHeaderWrite
|
||||
}
|
||||
if md.Len() == 0 {
|
||||
s.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
s.header = metadata.Join(s.header, md)
|
||||
s.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -360,8 +368,8 @@ func (s *Stream) SetTrailer(md metadata.MD) error {
|
|||
return nil
|
||||
}
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.trailer = metadata.Join(s.trailer, md)
|
||||
s.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -412,15 +420,17 @@ func (s *Stream) finish(st *status.Status) {
|
|||
// BytesSent indicates whether any bytes have been sent on this stream.
|
||||
func (s *Stream) BytesSent() bool {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
return s.bytesSent
|
||||
bs := s.bytesSent
|
||||
s.mu.Unlock()
|
||||
return bs
|
||||
}
|
||||
|
||||
// BytesReceived indicates whether any bytes have been received on this stream.
|
||||
func (s *Stream) BytesReceived() bool {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
return s.bytesReceived
|
||||
br := s.bytesReceived
|
||||
s.mu.Unlock()
|
||||
return br
|
||||
}
|
||||
|
||||
// GoString is implemented by Stream so context.String() won't
|
||||
|
|
@ -449,7 +459,6 @@ type transportState int
|
|||
|
||||
const (
|
||||
reachable transportState = iota
|
||||
unreachable
|
||||
closing
|
||||
draining
|
||||
)
|
||||
|
|
@ -464,6 +473,8 @@ type ServerConfig struct {
|
|||
KeepalivePolicy keepalive.EnforcementPolicy
|
||||
InitialWindowSize int32
|
||||
InitialConnWindowSize int32
|
||||
WriteBufferSize int
|
||||
ReadBufferSize int
|
||||
}
|
||||
|
||||
// NewServerTransport creates a ServerTransport with conn or non-nil error
|
||||
|
|
@ -491,10 +502,14 @@ type ConnectOptions struct {
|
|||
KeepaliveParams keepalive.ClientParameters
|
||||
// StatsHandler stores the handler for stats.
|
||||
StatsHandler stats.Handler
|
||||
// InitialWindowSize sets the intial window size for a stream.
|
||||
// InitialWindowSize sets the initial window size for a stream.
|
||||
InitialWindowSize int32
|
||||
// InitialConnWindowSize sets the intial window size for a connection.
|
||||
// InitialConnWindowSize sets the initial window size for a connection.
|
||||
InitialConnWindowSize int32
|
||||
// WriteBufferSize sets the size of write buffer which in turn determines how much data can be batched before it's written on the wire.
|
||||
WriteBufferSize int
|
||||
// ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall.
|
||||
ReadBufferSize int
|
||||
}
|
||||
|
||||
// TargetInfo contains the information of the target such as network address and metadata.
|
||||
|
|
@ -505,8 +520,8 @@ type TargetInfo struct {
|
|||
|
||||
// NewClientTransport establishes the transport with the required ConnectOptions
|
||||
// and returns it to the caller.
|
||||
func NewClientTransport(ctx context.Context, target TargetInfo, opts ConnectOptions) (ClientTransport, error) {
|
||||
return newHTTP2Client(ctx, target, opts)
|
||||
func NewClientTransport(ctx context.Context, target TargetInfo, opts ConnectOptions, timeout time.Duration) (ClientTransport, error) {
|
||||
return newHTTP2Client(ctx, target, opts, timeout)
|
||||
}
|
||||
|
||||
// Options provides additional hints and information for message
|
||||
|
|
@ -518,7 +533,7 @@ type Options struct {
|
|||
|
||||
// Delay is a hint to the transport implementation for whether
|
||||
// the data could be buffered for a batching write. The
|
||||
// Transport implementation may ignore the hint.
|
||||
// transport implementation may ignore the hint.
|
||||
Delay bool
|
||||
}
|
||||
|
||||
|
|
@ -688,34 +703,33 @@ func (e StreamError) Error() string {
|
|||
return fmt.Sprintf("stream error: code = %s desc = %q", e.Code, e.Desc)
|
||||
}
|
||||
|
||||
// wait blocks until it can receive from ctx.Done, closing, or proceed.
|
||||
// If it receives from ctx.Done, it returns 0, the StreamError for ctx.Err.
|
||||
// If it receives from done, it returns 0, io.EOF if ctx is not done; otherwise
|
||||
// it return the StreamError for ctx.Err.
|
||||
// If it receives from goAway, it returns 0, ErrStreamDrain.
|
||||
// If it receives from closing, it returns 0, ErrConnClosing.
|
||||
// If it receives from proceed, it returns the received integer, nil.
|
||||
func wait(ctx context.Context, done, goAway, closing <-chan struct{}, proceed <-chan int) (int, error) {
|
||||
// wait blocks until it can receive from one of the provided contexts or channels
|
||||
func wait(ctx, tctx context.Context, done, goAway <-chan struct{}, proceed <-chan int) (int, error) {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return 0, ContextErr(ctx.Err())
|
||||
case <-done:
|
||||
// User cancellation has precedence.
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return 0, ContextErr(ctx.Err())
|
||||
default:
|
||||
}
|
||||
return 0, io.EOF
|
||||
case <-goAway:
|
||||
return 0, ErrStreamDrain
|
||||
case <-closing:
|
||||
case <-tctx.Done():
|
||||
return 0, ErrConnClosing
|
||||
case i := <-proceed:
|
||||
return i, nil
|
||||
}
|
||||
}
|
||||
|
||||
// ContextErr converts the error from context package into a StreamError.
|
||||
func ContextErr(err error) StreamError {
|
||||
switch err {
|
||||
case context.DeadlineExceeded, stdctx.DeadlineExceeded:
|
||||
return streamErrorf(codes.DeadlineExceeded, "%v", err)
|
||||
case context.Canceled, stdctx.Canceled:
|
||||
return streamErrorf(codes.Canceled, "%v", err)
|
||||
}
|
||||
return streamErrorf(codes.Internal, "Unexpected error from context packet: %v", err)
|
||||
}
|
||||
|
||||
// GoAwayReason contains the reason for the GoAway frame received.
|
||||
type GoAwayReason uint8
|
||||
|
||||
|
|
@ -725,6 +739,39 @@ const (
|
|||
// NoReason is the default value when GoAway frame is received.
|
||||
NoReason GoAwayReason = 1
|
||||
// TooManyPings indicates that a GoAway frame with ErrCodeEnhanceYourCalm
|
||||
// was recieved and that the debug data said "too_many_pings".
|
||||
// was received and that the debug data said "too_many_pings".
|
||||
TooManyPings GoAwayReason = 2
|
||||
)
|
||||
|
||||
// loopyWriter is run in a separate go routine. It is the single code path that will
|
||||
// write data on wire.
|
||||
func loopyWriter(ctx context.Context, cbuf *controlBuffer, handler func(item) error) {
|
||||
for {
|
||||
select {
|
||||
case i := <-cbuf.get():
|
||||
cbuf.load()
|
||||
if err := handler(i); err != nil {
|
||||
return
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
hasData:
|
||||
for {
|
||||
select {
|
||||
case i := <-cbuf.get():
|
||||
cbuf.load()
|
||||
if err := handler(i); err != nil {
|
||||
return
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return
|
||||
default:
|
||||
if err := handler(&flushIO{}); err != nil {
|
||||
return
|
||||
}
|
||||
break hasData
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
208
vendor/google.golang.org/grpc/transport/transport_test.go
generated
vendored
208
vendor/google.golang.org/grpc/transport/transport_test.go
generated
vendored
|
|
@ -49,6 +49,7 @@ type server struct {
|
|||
startedErr chan error // error (or nil) with server start value
|
||||
mu sync.Mutex
|
||||
conns map[ServerTransport]bool
|
||||
h *testStreamHandler
|
||||
}
|
||||
|
||||
var (
|
||||
|
|
@ -60,7 +61,8 @@ var (
|
|||
)
|
||||
|
||||
type testStreamHandler struct {
|
||||
t *http2Server
|
||||
t *http2Server
|
||||
notify chan struct{}
|
||||
}
|
||||
|
||||
type hType int
|
||||
|
|
@ -68,6 +70,7 @@ type hType int
|
|||
const (
|
||||
normal hType = iota
|
||||
suspended
|
||||
notifyCall
|
||||
misbehaved
|
||||
encodingRequiredStatus
|
||||
invalidHeaderField
|
||||
|
|
@ -76,6 +79,19 @@ const (
|
|||
pingpong
|
||||
)
|
||||
|
||||
func (h *testStreamHandler) handleStreamAndNotify(s *Stream) {
|
||||
if h.notify == nil {
|
||||
return
|
||||
}
|
||||
go func() {
|
||||
select {
|
||||
case <-h.notify:
|
||||
default:
|
||||
close(h.notify)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (h *testStreamHandler) handleStream(t *testing.T, s *Stream) {
|
||||
req := expectedRequest
|
||||
resp := expectedResponse
|
||||
|
|
@ -92,7 +108,7 @@ func (h *testStreamHandler) handleStream(t *testing.T, s *Stream) {
|
|||
t.Fatalf("handleStream got %v, want %v", p, req)
|
||||
}
|
||||
// send a response back to the client.
|
||||
h.t.Write(s, resp, nil, &Options{})
|
||||
h.t.Write(s, nil, resp, &Options{})
|
||||
// send the trailer to end the stream.
|
||||
h.t.WriteStatus(s, status.New(codes.OK, ""))
|
||||
}
|
||||
|
|
@ -112,7 +128,7 @@ func (h *testStreamHandler) handleStreamPingPong(t *testing.T, s *Stream) {
|
|||
buf[0] = byte(0)
|
||||
binary.BigEndian.PutUint32(buf[1:], uint32(sz))
|
||||
copy(buf[5:], msg)
|
||||
h.t.Write(s, buf, nil, &Options{})
|
||||
h.t.Write(s, nil, buf, &Options{})
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -124,7 +140,6 @@ func (h *testStreamHandler) handleStreamMisbehave(t *testing.T, s *Stream) {
|
|||
var sent int
|
||||
p := make([]byte, http2MaxFrameLen)
|
||||
for sent < initialWindowSize {
|
||||
<-conn.writableChan
|
||||
n := initialWindowSize - sent
|
||||
// The last message may be smaller than http2MaxFrameLen
|
||||
if n <= http2MaxFrameLen {
|
||||
|
|
@ -137,11 +152,7 @@ func (h *testStreamHandler) handleStreamMisbehave(t *testing.T, s *Stream) {
|
|||
p = make([]byte, n+1)
|
||||
}
|
||||
}
|
||||
if err := conn.framer.writeData(true, s.id, false, p); err != nil {
|
||||
conn.writableChan <- 0
|
||||
break
|
||||
}
|
||||
conn.writableChan <- 0
|
||||
conn.controlBuf.put(&dataFrame{s.id, false, p, func() {}})
|
||||
sent += len(p)
|
||||
}
|
||||
}
|
||||
|
|
@ -152,13 +163,13 @@ func (h *testStreamHandler) handleStreamEncodingRequiredStatus(t *testing.T, s *
|
|||
}
|
||||
|
||||
func (h *testStreamHandler) handleStreamInvalidHeaderField(t *testing.T, s *Stream) {
|
||||
<-h.t.writableChan
|
||||
h.t.hBuf.Reset()
|
||||
h.t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: expectedInvalidHeaderField})
|
||||
if err := h.t.writeHeaders(s, h.t.hBuf, false); err != nil {
|
||||
t.Fatalf("Failed to write headers: %v", err)
|
||||
}
|
||||
h.t.writableChan <- 0
|
||||
headerFields := []hpack.HeaderField{}
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: expectedInvalidHeaderField})
|
||||
h.t.controlBuf.put(&headerFrame{
|
||||
streamID: s.id,
|
||||
hf: headerFields,
|
||||
endStream: false,
|
||||
})
|
||||
}
|
||||
|
||||
func (h *testStreamHandler) handleStreamDelayRead(t *testing.T, s *Stream) {
|
||||
|
|
@ -183,7 +194,7 @@ func (h *testStreamHandler) handleStreamDelayRead(t *testing.T, s *Stream) {
|
|||
t.Fatalf("handleStream got %v, want %v", p, req)
|
||||
}
|
||||
// send a response back to the client.
|
||||
h.t.Write(s, resp, nil, &Options{})
|
||||
h.t.Write(s, nil, resp, &Options{})
|
||||
// send the trailer to end the stream.
|
||||
h.t.WriteStatus(s, status.New(codes.OK, ""))
|
||||
}
|
||||
|
|
@ -208,7 +219,7 @@ func (h *testStreamHandler) handleStreamDelayWrite(t *testing.T, s *Stream) {
|
|||
// Wait before sending. Give time to client to start reading
|
||||
// before server starts sending.
|
||||
time.Sleep(2 * time.Second)
|
||||
h.t.Write(s, resp, nil, &Options{})
|
||||
h.t.Write(s, nil, resp, &Options{})
|
||||
// send the trailer to end the stream.
|
||||
h.t.WriteStatus(s, status.New(codes.OK, ""))
|
||||
}
|
||||
|
|
@ -249,9 +260,15 @@ func (s *server) start(t *testing.T, port int, serverConfig *ServerConfig, ht hT
|
|||
return
|
||||
}
|
||||
s.conns[transport] = true
|
||||
h := &testStreamHandler{t: transport.(*http2Server)}
|
||||
s.h = h
|
||||
s.mu.Unlock()
|
||||
h := &testStreamHandler{transport.(*http2Server)}
|
||||
switch ht {
|
||||
case notifyCall:
|
||||
go transport.HandleStreams(h.handleStreamAndNotify,
|
||||
func(ctx context.Context, _ string) context.Context {
|
||||
return ctx
|
||||
})
|
||||
case suspended:
|
||||
go transport.HandleStreams(func(*Stream) {}, // Do nothing to handle the stream.
|
||||
func(ctx context.Context, method string) context.Context {
|
||||
|
|
@ -340,7 +357,7 @@ func setUpWithOptions(t *testing.T, port int, serverConfig *ServerConfig, ht hTy
|
|||
target := TargetInfo{
|
||||
Addr: addr,
|
||||
}
|
||||
ct, connErr = NewClientTransport(context.Background(), target, copts)
|
||||
ct, connErr = NewClientTransport(context.Background(), target, copts, 2*time.Second)
|
||||
if connErr != nil {
|
||||
t.Fatalf("failed to create transport: %v", connErr)
|
||||
}
|
||||
|
|
@ -363,7 +380,7 @@ func setUpWithNoPingServer(t *testing.T, copts ConnectOptions, done chan net.Con
|
|||
}
|
||||
done <- conn
|
||||
}()
|
||||
tr, err := NewClientTransport(context.Background(), TargetInfo{Addr: lis.Addr().String()}, copts)
|
||||
tr, err := NewClientTransport(context.Background(), TargetInfo{Addr: lis.Addr().String()}, copts, 2*time.Second)
|
||||
if err != nil {
|
||||
// Server clean-up.
|
||||
lis.Close()
|
||||
|
|
@ -801,7 +818,7 @@ func TestClientSendAndReceive(t *testing.T) {
|
|||
Last: true,
|
||||
Delay: false,
|
||||
}
|
||||
if err := ct.Write(s1, expectedRequest, nil, &opts); err != nil && err != io.EOF {
|
||||
if err := ct.Write(s1, nil, expectedRequest, &opts); err != nil && err != io.EOF {
|
||||
t.Fatalf("failed to send data: %v", err)
|
||||
}
|
||||
p := make([]byte, len(expectedResponse))
|
||||
|
|
@ -838,7 +855,7 @@ func performOneRPC(ct ClientTransport) {
|
|||
Last: true,
|
||||
Delay: false,
|
||||
}
|
||||
if err := ct.Write(s, expectedRequest, nil, &opts); err == nil || err == io.EOF {
|
||||
if err := ct.Write(s, []byte{}, expectedRequest, &opts); err == nil || err == io.EOF {
|
||||
time.Sleep(5 * time.Millisecond)
|
||||
// The following s.Recv()'s could error out because the
|
||||
// underlying transport is gone.
|
||||
|
|
@ -882,7 +899,7 @@ func TestLargeMessage(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Errorf("%v.NewStream(_, _) = _, %v, want _, <nil>", ct, err)
|
||||
}
|
||||
if err := ct.Write(s, expectedRequestLarge, nil, &Options{Last: true, Delay: false}); err != nil && err != io.EOF {
|
||||
if err := ct.Write(s, []byte{}, expectedRequestLarge, &Options{Last: true, Delay: false}); err != nil && err != io.EOF {
|
||||
t.Errorf("%v.Write(_, _, _) = %v, want <nil>", ct, err)
|
||||
}
|
||||
p := make([]byte, len(expectedResponseLarge))
|
||||
|
|
@ -914,7 +931,7 @@ func TestLargeMessageWithDelayRead(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Errorf("%v.NewStream(_, _) = _, %v, want _, <nil>", ct, err)
|
||||
}
|
||||
if err := ct.Write(s, expectedRequestLarge, nil, &Options{Last: true, Delay: false}); err != nil && err != io.EOF {
|
||||
if err := ct.Write(s, []byte{}, expectedRequestLarge, &Options{Last: true, Delay: false}); err != nil && err != io.EOF {
|
||||
t.Errorf("%v.Write(_, _, _) = %v, want <nil>", ct, err)
|
||||
}
|
||||
p := make([]byte, len(expectedResponseLarge))
|
||||
|
|
@ -952,7 +969,7 @@ func TestLargeMessageDelayWrite(t *testing.T) {
|
|||
|
||||
// Give time to server to start reading before client starts sending.
|
||||
time.Sleep(2 * time.Second)
|
||||
if err := ct.Write(s, expectedRequestLarge, nil, &Options{Last: true, Delay: false}); err != nil && err != io.EOF {
|
||||
if err := ct.Write(s, []byte{}, expectedRequestLarge, &Options{Last: true, Delay: false}); err != nil && err != io.EOF {
|
||||
t.Errorf("%v.Write(_, _, _) = %v, want <nil>", ct, err)
|
||||
}
|
||||
p := make([]byte, len(expectedResponseLarge))
|
||||
|
|
@ -998,7 +1015,7 @@ func TestGracefulClose(t *testing.T) {
|
|||
Delay: false,
|
||||
}
|
||||
// The stream which was created before graceful close can still proceed.
|
||||
if err := ct.Write(s, expectedRequest, nil, &opts); err != nil && err != io.EOF {
|
||||
if err := ct.Write(s, nil, expectedRequest, &opts); err != nil && err != io.EOF {
|
||||
t.Fatalf("%v.Write(_, _, _) = %v, want <nil>", ct, err)
|
||||
}
|
||||
p := make([]byte, len(expectedResponse))
|
||||
|
|
@ -1028,7 +1045,7 @@ func TestLargeMessageSuspension(t *testing.T) {
|
|||
}
|
||||
// Write should not be done successfully due to flow control.
|
||||
msg := make([]byte, initialWindowSize*8)
|
||||
err = ct.Write(s, msg, nil, &Options{Last: true, Delay: false})
|
||||
err = ct.Write(s, nil, msg, &Options{Last: true, Delay: false})
|
||||
expectedErr := streamErrorf(codes.DeadlineExceeded, "%v", context.DeadlineExceeded)
|
||||
if err != expectedErr {
|
||||
t.Fatalf("Write got %v, want %v", err, expectedErr)
|
||||
|
|
@ -1150,12 +1167,7 @@ func TestServerContextCanceledOnClosedConnection(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatalf("Failed to open stream: %v", err)
|
||||
}
|
||||
// Make sure the headers frame is flushed out.
|
||||
<-cc.writableChan
|
||||
if err = cc.framer.writeData(true, s.id, false, make([]byte, http2MaxFrameLen)); err != nil {
|
||||
t.Fatalf("Failed to write data: %v", err)
|
||||
}
|
||||
cc.writableChan <- 0
|
||||
cc.controlBuf.put(&dataFrame{s.id, false, make([]byte, http2MaxFrameLen), func() {}})
|
||||
// Loop until the server side stream is created.
|
||||
var ss *Stream
|
||||
for {
|
||||
|
|
@ -1186,7 +1198,7 @@ func TestClientConnDecoupledFromApplicationRead(t *testing.T) {
|
|||
InitialWindowSize: defaultWindowSize,
|
||||
InitialConnWindowSize: defaultWindowSize,
|
||||
}
|
||||
server, client := setUpWithOptions(t, 0, &ServerConfig{}, suspended, connectOptions)
|
||||
server, client := setUpWithOptions(t, 0, &ServerConfig{}, notifyCall, connectOptions)
|
||||
defer server.stop()
|
||||
defer client.Close()
|
||||
|
||||
|
|
@ -1205,66 +1217,56 @@ func TestClientConnDecoupledFromApplicationRead(t *testing.T) {
|
|||
for k := range server.conns {
|
||||
st = k.(*http2Server)
|
||||
}
|
||||
notifyChan := make(chan struct{})
|
||||
server.h.notify = notifyChan
|
||||
server.mu.Unlock()
|
||||
cstream1, err := client.NewStream(context.Background(), &CallHdr{Flush: true})
|
||||
if err != nil {
|
||||
t.Fatalf("Client failed to create first stream. Err: %v", err)
|
||||
}
|
||||
|
||||
<-notifyChan
|
||||
var sstream1 *Stream
|
||||
// Access stream on the server.
|
||||
waitWhileTrue(t, func() (bool, error) {
|
||||
st.mu.Lock()
|
||||
defer st.mu.Unlock()
|
||||
|
||||
if len(st.activeStreams) != 1 {
|
||||
return true, fmt.Errorf("timed-out while waiting for server to have created a stream")
|
||||
}
|
||||
for _, v := range st.activeStreams {
|
||||
st.mu.Lock()
|
||||
for _, v := range st.activeStreams {
|
||||
if v.id == cstream1.id {
|
||||
sstream1 = v
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
|
||||
}
|
||||
st.mu.Unlock()
|
||||
if sstream1 == nil {
|
||||
t.Fatalf("Didn't find stream corresponding to client cstream.id: %v on the server", cstream1.id)
|
||||
}
|
||||
// Exhaust client's connection window.
|
||||
<-st.writableChan
|
||||
if err := st.framer.writeData(true, sstream1.id, true, make([]byte, defaultWindowSize)); err != nil {
|
||||
st.writableChan <- 0
|
||||
if err := st.Write(sstream1, []byte{}, make([]byte, defaultWindowSize), &Options{}); err != nil {
|
||||
t.Fatalf("Server failed to write data. Err: %v", err)
|
||||
}
|
||||
st.writableChan <- 0
|
||||
notifyChan = make(chan struct{})
|
||||
server.mu.Lock()
|
||||
server.h.notify = notifyChan
|
||||
server.mu.Unlock()
|
||||
// Create another stream on client.
|
||||
cstream2, err := client.NewStream(context.Background(), &CallHdr{Flush: true})
|
||||
if err != nil {
|
||||
t.Fatalf("Client failed to create second stream. Err: %v", err)
|
||||
}
|
||||
|
||||
<-notifyChan
|
||||
var sstream2 *Stream
|
||||
waitWhileTrue(t, func() (bool, error) {
|
||||
st.mu.Lock()
|
||||
defer st.mu.Unlock()
|
||||
|
||||
if len(st.activeStreams) != 2 {
|
||||
return true, fmt.Errorf("timed-out while waiting for server to have created the second stream")
|
||||
st.mu.Lock()
|
||||
for _, v := range st.activeStreams {
|
||||
if v.id == cstream2.id {
|
||||
sstream2 = v
|
||||
}
|
||||
for _, v := range st.activeStreams {
|
||||
if v.id == cstream2.id {
|
||||
sstream2 = v
|
||||
}
|
||||
}
|
||||
if sstream2 == nil {
|
||||
return true, fmt.Errorf("didn't find stream corresponding to client cstream.id: %v on the server", cstream2.id)
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
|
||||
}
|
||||
st.mu.Unlock()
|
||||
if sstream2 == nil {
|
||||
t.Fatalf("Didn't find stream corresponding to client cstream.id: %v on the server", cstream2.id)
|
||||
}
|
||||
// Server should be able to send data on the new stream, even though the client hasn't read anything on the first stream.
|
||||
<-st.writableChan
|
||||
if err := st.framer.writeData(true, sstream2.id, true, make([]byte, defaultWindowSize)); err != nil {
|
||||
st.writableChan <- 0
|
||||
if err := st.Write(sstream2, []byte{}, make([]byte, defaultWindowSize), &Options{}); err != nil {
|
||||
t.Fatalf("Server failed to write data. Err: %v", err)
|
||||
}
|
||||
st.writableChan <- 0
|
||||
|
||||
// Client should be able to read data on second stream.
|
||||
if _, err := cstream2.Read(make([]byte, defaultWindowSize)); err != nil {
|
||||
|
|
@ -1305,7 +1307,7 @@ func TestServerConnDecoupledFromApplicationRead(t *testing.T) {
|
|||
t.Fatalf("Failed to create 1st stream. Err: %v", err)
|
||||
}
|
||||
// Exhaust server's connection window.
|
||||
if err := client.Write(cstream1, make([]byte, defaultWindowSize), nil, &Options{Last: true}); err != nil {
|
||||
if err := client.Write(cstream1, nil, make([]byte, defaultWindowSize), &Options{Last: true}); err != nil {
|
||||
t.Fatalf("Client failed to write data. Err: %v", err)
|
||||
}
|
||||
//Client should be able to create another stream and send data on it.
|
||||
|
|
@ -1313,7 +1315,7 @@ func TestServerConnDecoupledFromApplicationRead(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatalf("Failed to create 2nd stream. Err: %v", err)
|
||||
}
|
||||
if err := client.Write(cstream2, make([]byte, defaultWindowSize), nil, &Options{}); err != nil {
|
||||
if err := client.Write(cstream2, nil, make([]byte, defaultWindowSize), &Options{}); err != nil {
|
||||
t.Fatalf("Client failed to write data. Err: %v", err)
|
||||
}
|
||||
// Get the streams on server.
|
||||
|
|
@ -1336,11 +1338,7 @@ func TestServerConnDecoupledFromApplicationRead(t *testing.T) {
|
|||
st.mu.Unlock()
|
||||
// Trying to write more on a max-ed out stream should result in a RST_STREAM from the server.
|
||||
ct := client.(*http2Client)
|
||||
<-ct.writableChan
|
||||
if err := ct.framer.writeData(true, cstream2.id, true, make([]byte, 1)); err != nil {
|
||||
t.Fatalf("Client failed to write. Err: %v", err)
|
||||
}
|
||||
ct.writableChan <- 0
|
||||
ct.controlBuf.put(&dataFrame{cstream2.id, true, make([]byte, 1), func() {}})
|
||||
code := http2ErrConvTab[http2.ErrCodeFlowControl]
|
||||
waitWhileTrue(t, func() (bool, error) {
|
||||
cstream2.mu.Lock()
|
||||
|
|
@ -1397,11 +1395,7 @@ func TestServerWithMisbehavedClient(t *testing.T) {
|
|||
}
|
||||
var sent int
|
||||
// Drain the stream flow control window
|
||||
<-cc.writableChan
|
||||
if err = cc.framer.writeData(true, s.id, false, make([]byte, http2MaxFrameLen)); err != nil {
|
||||
t.Fatalf("Failed to write data: %v", err)
|
||||
}
|
||||
cc.writableChan <- 0
|
||||
cc.controlBuf.put(&dataFrame{s.id, false, make([]byte, http2MaxFrameLen), func() {}})
|
||||
sent += http2MaxFrameLen
|
||||
// Wait until the server creates the corresponding stream and receive some data.
|
||||
var ss *Stream
|
||||
|
|
@ -1426,11 +1420,7 @@ func TestServerWithMisbehavedClient(t *testing.T) {
|
|||
}
|
||||
// Keep sending until the server inbound window is drained for that stream.
|
||||
for sent <= initialWindowSize {
|
||||
<-cc.writableChan
|
||||
if err = cc.framer.writeData(true, s.id, false, make([]byte, 1)); err != nil {
|
||||
t.Fatalf("Failed to write data: %v", err)
|
||||
}
|
||||
cc.writableChan <- 0
|
||||
cc.controlBuf.put(&dataFrame{s.id, false, make([]byte, 1), func() {}})
|
||||
sent++
|
||||
}
|
||||
// Server sent a resetStream for s already.
|
||||
|
|
@ -1468,7 +1458,7 @@ func TestClientWithMisbehavedServer(t *testing.T) {
|
|||
t.Fatalf("Failed to open stream: %v", err)
|
||||
}
|
||||
d := make([]byte, 1)
|
||||
if err := ct.Write(s, d, nil, &Options{Last: true, Delay: false}); err != nil && err != io.EOF {
|
||||
if err := ct.Write(s, nil, d, &Options{Last: true, Delay: false}); err != nil && err != io.EOF {
|
||||
t.Fatalf("Failed to write: %v", err)
|
||||
}
|
||||
// Read without window update.
|
||||
|
|
@ -1510,7 +1500,7 @@ func TestEncodingRequiredStatus(t *testing.T) {
|
|||
Last: true,
|
||||
Delay: false,
|
||||
}
|
||||
if err := ct.Write(s, expectedRequest, nil, &opts); err != nil && err != io.EOF {
|
||||
if err := ct.Write(s, nil, expectedRequest, &opts); err != nil && err != io.EOF {
|
||||
t.Fatalf("Failed to write the request: %v", err)
|
||||
}
|
||||
p := make([]byte, http2MaxFrameLen)
|
||||
|
|
@ -1538,7 +1528,7 @@ func TestInvalidHeaderField(t *testing.T) {
|
|||
Last: true,
|
||||
Delay: false,
|
||||
}
|
||||
if err := ct.Write(s, expectedRequest, nil, &opts); err != nil && err != io.EOF {
|
||||
if err := ct.Write(s, nil, expectedRequest, &opts); err != nil && err != io.EOF {
|
||||
t.Fatalf("Failed to write the request: %v", err)
|
||||
}
|
||||
p := make([]byte, http2MaxFrameLen)
|
||||
|
|
@ -1690,7 +1680,7 @@ func testAccountCheckWindowSize(t *testing.T, wc windowSizeConfig) {
|
|||
})
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||
serverSendQuota, err := wait(ctx, nil, nil, nil, st.sendQuotaPool.acquire())
|
||||
serverSendQuota, err := wait(ctx, context.Background(), nil, nil, st.sendQuotaPool.acquire())
|
||||
if err != nil {
|
||||
t.Fatalf("Error while acquiring sendQuota on server. Err: %v", err)
|
||||
}
|
||||
|
|
@ -1712,7 +1702,7 @@ func testAccountCheckWindowSize(t *testing.T, wc windowSizeConfig) {
|
|||
t.Fatalf("Client transport flow control window size is %v, want %v", limit, connectOptions.InitialConnWindowSize)
|
||||
}
|
||||
ctx, cancel = context.WithTimeout(context.Background(), time.Second)
|
||||
clientSendQuota, err := wait(ctx, nil, nil, nil, ct.sendQuotaPool.acquire())
|
||||
clientSendQuota, err := wait(ctx, context.Background(), nil, nil, ct.sendQuotaPool.acquire())
|
||||
if err != nil {
|
||||
t.Fatalf("Error while acquiring sendQuota on client. Err: %v", err)
|
||||
}
|
||||
|
|
@ -1781,7 +1771,7 @@ func TestAccountCheckExpandingWindow(t *testing.T) {
|
|||
opts := Options{}
|
||||
header := make([]byte, 5)
|
||||
for i := 1; i <= 10; i++ {
|
||||
if err := ct.Write(cstream, buf, nil, &opts); err != nil {
|
||||
if err := ct.Write(cstream, nil, buf, &opts); err != nil {
|
||||
t.Fatalf("Error on client while writing message: %v", err)
|
||||
}
|
||||
if _, err := cstream.Read(header); err != nil {
|
||||
|
|
@ -1848,7 +1838,7 @@ func TestAccountCheckExpandingWindow(t *testing.T) {
|
|||
|
||||
// Check flow conrtrol window on client stream is equal to out flow on server stream.
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||
serverStreamSendQuota, err := wait(ctx, nil, nil, nil, sstream.sendQuotaPool.acquire())
|
||||
serverStreamSendQuota, err := wait(ctx, context.Background(), nil, nil, sstream.sendQuotaPool.acquire())
|
||||
cancel()
|
||||
if err != nil {
|
||||
return true, fmt.Errorf("error while acquiring server stream send quota. Err: %v", err)
|
||||
|
|
@ -1863,7 +1853,7 @@ func TestAccountCheckExpandingWindow(t *testing.T) {
|
|||
|
||||
// Check flow control window on server stream is equal to out flow on client stream.
|
||||
ctx, cancel = context.WithTimeout(context.Background(), time.Second)
|
||||
clientStreamSendQuota, err := wait(ctx, nil, nil, nil, cstream.sendQuotaPool.acquire())
|
||||
clientStreamSendQuota, err := wait(ctx, context.Background(), nil, nil, cstream.sendQuotaPool.acquire())
|
||||
cancel()
|
||||
if err != nil {
|
||||
return true, fmt.Errorf("error while acquiring client stream send quota. Err: %v", err)
|
||||
|
|
@ -1878,7 +1868,7 @@ func TestAccountCheckExpandingWindow(t *testing.T) {
|
|||
|
||||
// Check flow control window on client transport is equal to out flow of server transport.
|
||||
ctx, cancel = context.WithTimeout(context.Background(), time.Second)
|
||||
serverTrSendQuota, err := wait(ctx, nil, nil, nil, st.sendQuotaPool.acquire())
|
||||
serverTrSendQuota, err := wait(ctx, context.Background(), nil, nil, st.sendQuotaPool.acquire())
|
||||
cancel()
|
||||
if err != nil {
|
||||
return true, fmt.Errorf("error while acquring server transport send quota. Err: %v", err)
|
||||
|
|
@ -1893,7 +1883,7 @@ func TestAccountCheckExpandingWindow(t *testing.T) {
|
|||
|
||||
// Check flow control window on server transport is equal to out flow of client transport.
|
||||
ctx, cancel = context.WithTimeout(context.Background(), time.Second)
|
||||
clientTrSendQuota, err := wait(ctx, nil, nil, nil, ct.sendQuotaPool.acquire())
|
||||
clientTrSendQuota, err := wait(ctx, context.Background(), nil, nil, ct.sendQuotaPool.acquire())
|
||||
cancel()
|
||||
if err != nil {
|
||||
return true, fmt.Errorf("error while acquiring client transport send quota. Err: %v", err)
|
||||
|
|
@ -1943,15 +1933,12 @@ func writeOneHeader(framer *http2.Framer, sid uint32, httpStatus int) error {
|
|||
var buf bytes.Buffer
|
||||
henc := hpack.NewEncoder(&buf)
|
||||
henc.WriteField(hpack.HeaderField{Name: ":status", Value: fmt.Sprint(httpStatus)})
|
||||
if err := framer.WriteHeaders(http2.HeadersFrameParam{
|
||||
return framer.WriteHeaders(http2.HeadersFrameParam{
|
||||
StreamID: sid,
|
||||
BlockFragment: buf.Bytes(),
|
||||
EndStream: true,
|
||||
EndHeaders: true,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func writeTwoHeaders(framer *http2.Framer, sid uint32, httpStatus int) error {
|
||||
|
|
@ -1973,15 +1960,12 @@ func writeTwoHeaders(framer *http2.Framer, sid uint32, httpStatus int) error {
|
|||
Name: ":status",
|
||||
Value: fmt.Sprint(httpStatus),
|
||||
})
|
||||
if err := framer.WriteHeaders(http2.HeadersFrameParam{
|
||||
return framer.WriteHeaders(http2.HeadersFrameParam{
|
||||
StreamID: sid,
|
||||
BlockFragment: buf.Bytes(),
|
||||
EndStream: true,
|
||||
EndHeaders: true,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
type httpServer struct {
|
||||
|
|
@ -2005,8 +1989,8 @@ func (s *httpServer) start(t *testing.T, lis net.Listener) {
|
|||
t.Errorf("Error at server-side while reading preface from cleint. Err: %v", err)
|
||||
return
|
||||
}
|
||||
reader := bufio.NewReaderSize(s.conn, http2IOBufSize)
|
||||
writer := bufio.NewWriterSize(s.conn, http2IOBufSize)
|
||||
reader := bufio.NewReaderSize(s.conn, defaultWriteBufSize)
|
||||
writer := bufio.NewWriterSize(s.conn, defaultReadBufSize)
|
||||
framer := http2.NewFramer(writer, reader)
|
||||
if err = framer.WriteSettingsAck(); err != nil {
|
||||
t.Errorf("Error at server-side while sending Settings ack. Err: %v", err)
|
||||
|
|
@ -2071,7 +2055,7 @@ func setUpHTTPStatusTest(t *testing.T, httpStatus int, wh writeHeaders) (stream
|
|||
wh: wh,
|
||||
}
|
||||
server.start(t, lis)
|
||||
client, err = newHTTP2Client(context.Background(), TargetInfo{Addr: lis.Addr().String()}, ConnectOptions{})
|
||||
client, err = newHTTP2Client(context.Background(), TargetInfo{Addr: lis.Addr().String()}, ConnectOptions{}, 2*time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating client. Err: %v", err)
|
||||
}
|
||||
|
|
|
|||
26
vendor/google.golang.org/grpc/vet.sh
generated
vendored
26
vendor/google.golang.org/grpc/vet.sh
generated
vendored
|
|
@ -13,11 +13,8 @@ die() {
|
|||
if git status --porcelain | read; then
|
||||
die "Uncommitted or untracked files found; commit changes first"
|
||||
fi
|
||||
# Undo any edits made by this script.
|
||||
cleanup() {
|
||||
git reset --hard HEAD
|
||||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
PATH="$GOPATH/bin:$GOROOT/bin:$PATH"
|
||||
|
||||
# Check proto in manual runs or cron runs.
|
||||
if [[ "$TRAVIS" != "true" || "$TRAVIS_EVENT_TYPE" = "cron" ]]; then
|
||||
|
|
@ -36,10 +33,12 @@ if [ "$1" = "-install" ]; then
|
|||
if [[ "$check_proto" = "true" ]]; then
|
||||
if [[ "$TRAVIS" = "true" ]]; then
|
||||
PROTOBUF_VERSION=3.3.0
|
||||
cd /home/travis
|
||||
wget https://github.com/google/protobuf/releases/download/v$PROTOBUF_VERSION/$basename-linux-x86_64.zip
|
||||
unzip $basename-linux-x86_64.zip
|
||||
PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip
|
||||
pushd /home/travis
|
||||
wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME}
|
||||
unzip ${PROTOC_FILENAME}
|
||||
bin/protoc --version
|
||||
popd
|
||||
elif ! which protoc > /dev/null; then
|
||||
die "Please install protoc into your path"
|
||||
fi
|
||||
|
|
@ -54,18 +53,23 @@ gofmt -s -d -l . 2>&1 | tee /dev/stderr | (! read)
|
|||
goimports -l . 2>&1 | tee /dev/stderr | (! read)
|
||||
golint ./... 2>&1 | (grep -vE "(_mock|_string|\.pb)\.go:" || true) | tee /dev/stderr | (! read)
|
||||
|
||||
# Undo any edits made by this script.
|
||||
cleanup() {
|
||||
git reset --hard HEAD
|
||||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
# Rewrite golang.org/x/net/context -> context imports (see grpc/grpc-go#1484).
|
||||
# TODO: Remove this mangling once "context" is imported directly (grpc/grpc-go#711).
|
||||
git ls-files "*.go" | xargs sed -i 's:"golang.org/x/net/context":"context":'
|
||||
set +o pipefail
|
||||
# TODO: Stop filtering pb.go files once golang/protobuf#214 is fixed.
|
||||
# TODO: Remove clientconn exception once go1.6 support is removed.
|
||||
go tool vet -all . 2>&1 | grep -vE 'clientconn.go:.*cancel' | grep -vF '.pb.go:' | tee /dev/stderr | (! read)
|
||||
go tool vet -all . 2>&1 | grep -vF '.pb.go:' | tee /dev/stderr | (! read)
|
||||
set -o pipefail
|
||||
git reset --hard HEAD
|
||||
|
||||
if [[ "$check_proto" = "true" ]]; then
|
||||
PATH=/home/travis/bin:$PATH make proto && \
|
||||
PATH="/home/travis/bin:$PATH" make proto && \
|
||||
git status --porcelain 2>&1 | (! read) || \
|
||||
(git status; git --no-pager diff; exit 1)
|
||||
fi
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue