cubefs
1572 строки · 46.0 Кб
1/*
2*
3* Copyright 2014 gRPC authors.
4*
5* Licensed under the Apache License, Version 2.0 (the "License");
6* you may not use this file except in compliance with the License.
7* You may obtain a copy of the License at
8*
9* http://www.apache.org/licenses/LICENSE-2.0
10*
11* Unless required by applicable law or agreed to in writing, software
12* distributed under the License is distributed on an "AS IS" BASIS,
13* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14* See the License for the specific language governing permissions and
15* limitations under the License.
16*
17*/
18
19package grpc
20
21import (
22"context"
23"errors"
24"io"
25"math"
26"strconv"
27"sync"
28"time"
29
30"golang.org/x/net/trace"
31"google.golang.org/grpc/balancer"
32"google.golang.org/grpc/codes"
33"google.golang.org/grpc/encoding"
34"google.golang.org/grpc/internal/balancerload"
35"google.golang.org/grpc/internal/binarylog"
36"google.golang.org/grpc/internal/channelz"
37"google.golang.org/grpc/internal/grpcrand"
38"google.golang.org/grpc/internal/grpcutil"
39iresolver "google.golang.org/grpc/internal/resolver"
40"google.golang.org/grpc/internal/serviceconfig"
41"google.golang.org/grpc/internal/transport"
42"google.golang.org/grpc/metadata"
43"google.golang.org/grpc/peer"
44"google.golang.org/grpc/stats"
45"google.golang.org/grpc/status"
46)
47
48// StreamHandler defines the handler called by gRPC server to complete the
49// execution of a streaming RPC. If a StreamHandler returns an error, it
50// should be produced by the status package, or else gRPC will use
51// codes.Unknown as the status code and err.Error() as the status message
52// of the RPC.
53type StreamHandler func(srv interface{}, stream ServerStream) error
54
55// StreamDesc represents a streaming RPC service's method specification.
56type StreamDesc struct {
57StreamName string
58Handler StreamHandler
59
60// At least one of these is true.
61ServerStreams bool
62ClientStreams bool
63}
64
65// Stream defines the common interface a client or server stream has to satisfy.
66//
67// Deprecated: See ClientStream and ServerStream documentation instead.
68type Stream interface {
69// Deprecated: See ClientStream and ServerStream documentation instead.
70Context() context.Context
71// Deprecated: See ClientStream and ServerStream documentation instead.
72SendMsg(m interface{}) error
73// Deprecated: See ClientStream and ServerStream documentation instead.
74RecvMsg(m interface{}) error
75}
76
77// ClientStream defines the client-side behavior of a streaming RPC.
78//
79// All errors returned from ClientStream methods are compatible with the
80// status package.
81type ClientStream interface {
82// Header returns the header metadata received from the server if there
83// is any. It blocks if the metadata is not ready to read.
84Header() (metadata.MD, error)
85// Trailer returns the trailer metadata from the server, if there is any.
86// It must only be called after stream.CloseAndRecv has returned, or
87// stream.Recv has returned a non-nil error (including io.EOF).
88Trailer() metadata.MD
89// CloseSend closes the send direction of the stream. It closes the stream
90// when non-nil error is met. It is also not safe to call CloseSend
91// concurrently with SendMsg.
92CloseSend() error
93// Context returns the context for this stream.
94//
95// It should not be called until after Header or RecvMsg has returned. Once
96// called, subsequent client-side retries are disabled.
97Context() context.Context
98// SendMsg is generally called by generated code. On error, SendMsg aborts
99// the stream. If the error was generated by the client, the status is
100// returned directly; otherwise, io.EOF is returned and the status of
101// the stream may be discovered using RecvMsg.
102//
103// SendMsg blocks until:
104// - There is sufficient flow control to schedule m with the transport, or
105// - The stream is done, or
106// - The stream breaks.
107//
108// SendMsg does not wait until the message is received by the server. An
109// untimely stream closure may result in lost messages. To ensure delivery,
110// users should ensure the RPC completed successfully using RecvMsg.
111//
112// It is safe to have a goroutine calling SendMsg and another goroutine
113// calling RecvMsg on the same stream at the same time, but it is not safe
114// to call SendMsg on the same stream in different goroutines. It is also
115// not safe to call CloseSend concurrently with SendMsg.
116SendMsg(m interface{}) error
117// RecvMsg blocks until it receives a message into m or the stream is
118// done. It returns io.EOF when the stream completes successfully. On
119// any other error, the stream is aborted and the error contains the RPC
120// status.
121//
122// It is safe to have a goroutine calling SendMsg and another goroutine
123// calling RecvMsg on the same stream at the same time, but it is not
124// safe to call RecvMsg on the same stream in different goroutines.
125RecvMsg(m interface{}) error
126}
127
128// NewStream creates a new Stream for the client side. This is typically
129// called by generated code. ctx is used for the lifetime of the stream.
130//
131// To ensure resources are not leaked due to the stream returned, one of the following
132// actions must be performed:
133//
134// 1. Call Close on the ClientConn.
135// 2. Cancel the context provided.
136// 3. Call RecvMsg until a non-nil error is returned. A protobuf-generated
137// client-streaming RPC, for instance, might use the helper function
138// CloseAndRecv (note that CloseSend does not Recv, therefore is not
139// guaranteed to release all resources).
140// 4. Receive a non-nil, non-io.EOF error from Header or SendMsg.
141//
142// If none of the above happen, a goroutine and a context will be leaked, and grpc
143// will not call the optionally-configured stats handler with a stats.End message.
144func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) {
145// allow interceptor to see all applicable call options, which means those
146// configured as defaults from dial option as well as per-call options
147opts = combine(cc.dopts.callOptions, opts)
148
149if cc.dopts.streamInt != nil {
150return cc.dopts.streamInt(ctx, desc, cc, method, newClientStream, opts...)
151}
152return newClientStream(ctx, desc, cc, method, opts...)
153}
154
155// NewClientStream is a wrapper for ClientConn.NewStream.
156func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) {
157return cc.NewStream(ctx, desc, method, opts...)
158}
159
160func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) {
161if channelz.IsOn() {
162cc.incrCallsStarted()
163defer func() {
164if err != nil {
165cc.incrCallsFailed()
166}
167}()
168}
169c := defaultCallInfo()
170// Provide an opportunity for the first RPC to see the first service config
171// provided by the resolver.
172if err := cc.waitForResolvedAddrs(ctx); err != nil {
173return nil, err
174}
175
176var mc serviceconfig.MethodConfig
177var onCommit func()
178rpcConfig, err := cc.safeConfigSelector.SelectConfig(iresolver.RPCInfo{Context: ctx, Method: method})
179if err != nil {
180return nil, status.Convert(err).Err()
181}
182if rpcConfig != nil {
183if rpcConfig.Context != nil {
184ctx = rpcConfig.Context
185}
186mc = rpcConfig.MethodConfig
187onCommit = rpcConfig.OnCommitted
188}
189
190if mc.WaitForReady != nil {
191c.failFast = !*mc.WaitForReady
192}
193
194// Possible context leak:
195// The cancel function for the child context we create will only be called
196// when RecvMsg returns a non-nil error, if the ClientConn is closed, or if
197// an error is generated by SendMsg.
198// https://github.com/grpc/grpc-go/issues/1818.
199var cancel context.CancelFunc
200if mc.Timeout != nil && *mc.Timeout >= 0 {
201ctx, cancel = context.WithTimeout(ctx, *mc.Timeout)
202} else {
203ctx, cancel = context.WithCancel(ctx)
204}
205defer func() {
206if err != nil {
207cancel()
208}
209}()
210
211for _, o := range opts {
212if err := o.before(c); err != nil {
213return nil, toRPCErr(err)
214}
215}
216c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize)
217c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize)
218if err := setCallInfoCodec(c); err != nil {
219return nil, err
220}
221
222callHdr := &transport.CallHdr{
223Host: cc.authority,
224Method: method,
225ContentSubtype: c.contentSubtype,
226}
227
228// Set our outgoing compression according to the UseCompressor CallOption, if
229// set. In that case, also find the compressor from the encoding package.
230// Otherwise, use the compressor configured by the WithCompressor DialOption,
231// if set.
232var cp Compressor
233var comp encoding.Compressor
234if ct := c.compressorType; ct != "" {
235callHdr.SendCompress = ct
236if ct != encoding.Identity {
237comp = encoding.GetCompressor(ct)
238if comp == nil {
239return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct)
240}
241}
242} else if cc.dopts.cp != nil {
243callHdr.SendCompress = cc.dopts.cp.Type()
244cp = cc.dopts.cp
245}
246if c.creds != nil {
247callHdr.Creds = c.creds
248}
249var trInfo *traceInfo
250if EnableTracing {
251trInfo = &traceInfo{
252tr: trace.New("grpc.Sent."+methodFamily(method), method),
253firstLine: firstLine{
254client: true,
255},
256}
257if deadline, ok := ctx.Deadline(); ok {
258trInfo.firstLine.deadline = time.Until(deadline)
259}
260trInfo.tr.LazyLog(&trInfo.firstLine, false)
261ctx = trace.NewContext(ctx, trInfo.tr)
262}
263ctx = newContextWithRPCInfo(ctx, c.failFast, c.codec, cp, comp)
264sh := cc.dopts.copts.StatsHandler
265var beginTime time.Time
266if sh != nil {
267ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast})
268beginTime = time.Now()
269begin := &stats.Begin{
270Client: true,
271BeginTime: beginTime,
272FailFast: c.failFast,
273}
274sh.HandleRPC(ctx, begin)
275}
276
277cs := &clientStream{
278callHdr: callHdr,
279ctx: ctx,
280methodConfig: &mc,
281opts: opts,
282callInfo: c,
283cc: cc,
284desc: desc,
285codec: c.codec,
286cp: cp,
287comp: comp,
288cancel: cancel,
289beginTime: beginTime,
290firstAttempt: true,
291onCommit: onCommit,
292}
293if !cc.dopts.disableRetry {
294cs.retryThrottler = cc.retryThrottler.Load().(*retryThrottler)
295}
296cs.binlog = binarylog.GetMethodLogger(method)
297
298// Only this initial attempt has stats/tracing.
299// TODO(dfawley): move to newAttempt when per-attempt stats are implemented.
300if err := cs.newAttemptLocked(sh, trInfo); err != nil {
301cs.finish(err)
302return nil, err
303}
304
305op := func(a *csAttempt) error { return a.newStream() }
306if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }); err != nil {
307cs.finish(err)
308return nil, err
309}
310
311if cs.binlog != nil {
312md, _ := metadata.FromOutgoingContext(ctx)
313logEntry := &binarylog.ClientHeader{
314OnClientSide: true,
315Header: md,
316MethodName: method,
317Authority: cs.cc.authority,
318}
319if deadline, ok := ctx.Deadline(); ok {
320logEntry.Timeout = time.Until(deadline)
321if logEntry.Timeout < 0 {
322logEntry.Timeout = 0
323}
324}
325cs.binlog.Log(logEntry)
326}
327
328if desc != unaryStreamDesc {
329// Listen on cc and stream contexts to cleanup when the user closes the
330// ClientConn or cancels the stream context. In all other cases, an error
331// should already be injected into the recv buffer by the transport, which
332// the client will eventually receive, and then we will cancel the stream's
333// context in clientStream.finish.
334go func() {
335select {
336case <-cc.ctx.Done():
337cs.finish(ErrClientConnClosing)
338case <-ctx.Done():
339cs.finish(toRPCErr(ctx.Err()))
340}
341}()
342}
343return cs, nil
344}
345
346// newAttemptLocked creates a new attempt with a transport.
347// If it succeeds, then it replaces clientStream's attempt with this new attempt.
348func (cs *clientStream) newAttemptLocked(sh stats.Handler, trInfo *traceInfo) (retErr error) {
349newAttempt := &csAttempt{
350cs: cs,
351dc: cs.cc.dopts.dc,
352statsHandler: sh,
353trInfo: trInfo,
354}
355defer func() {
356if retErr != nil {
357// This attempt is not set in the clientStream, so it's finish won't
358// be called. Call it here for stats and trace in case they are not
359// nil.
360newAttempt.finish(retErr)
361}
362}()
363
364if err := cs.ctx.Err(); err != nil {
365return toRPCErr(err)
366}
367
368ctx := cs.ctx
369if cs.cc.parsedTarget.Scheme == "xds" {
370// Add extra metadata (metadata that will be added by transport) to context
371// so the balancer can see them.
372ctx = grpcutil.WithExtraMetadata(cs.ctx, metadata.Pairs(
373"content-type", grpcutil.ContentType(cs.callHdr.ContentSubtype),
374))
375}
376t, done, err := cs.cc.getTransport(ctx, cs.callInfo.failFast, cs.callHdr.Method)
377if err != nil {
378return err
379}
380if trInfo != nil {
381trInfo.firstLine.SetRemoteAddr(t.RemoteAddr())
382}
383newAttempt.t = t
384newAttempt.done = done
385cs.attempt = newAttempt
386return nil
387}
388
389func (a *csAttempt) newStream() error {
390cs := a.cs
391cs.callHdr.PreviousAttempts = cs.numRetries
392s, err := a.t.NewStream(cs.ctx, cs.callHdr)
393if err != nil {
394if _, ok := err.(transport.PerformedIOError); ok {
395// Return without converting to an RPC error so retry code can
396// inspect.
397return err
398}
399return toRPCErr(err)
400}
401cs.attempt.s = s
402cs.attempt.p = &parser{r: s}
403return nil
404}
405
406// clientStream implements a client side Stream.
407type clientStream struct {
408callHdr *transport.CallHdr
409opts []CallOption
410callInfo *callInfo
411cc *ClientConn
412desc *StreamDesc
413
414codec baseCodec
415cp Compressor
416comp encoding.Compressor
417
418cancel context.CancelFunc // cancels all attempts
419
420sentLast bool // sent an end stream
421beginTime time.Time
422
423methodConfig *MethodConfig
424
425ctx context.Context // the application's context, wrapped by stats/tracing
426
427retryThrottler *retryThrottler // The throttler active when the RPC began.
428
429binlog *binarylog.MethodLogger // Binary logger, can be nil.
430// serverHeaderBinlogged is a boolean for whether server header has been
431// logged. Server header will be logged when the first time one of those
432// happens: stream.Header(), stream.Recv().
433//
434// It's only read and used by Recv() and Header(), so it doesn't need to be
435// synchronized.
436serverHeaderBinlogged bool
437
438mu sync.Mutex
439firstAttempt bool // if true, transparent retry is valid
440numRetries int // exclusive of transparent retry attempt(s)
441numRetriesSincePushback int // retries since pushback; to reset backoff
442finished bool // TODO: replace with atomic cmpxchg or sync.Once?
443// attempt is the active client stream attempt.
444// The only place where it is written is the newAttemptLocked method and this method never writes nil.
445// So, attempt can be nil only inside newClientStream function when clientStream is first created.
446// One of the first things done after clientStream's creation, is to call newAttemptLocked which either
447// assigns a non nil value to the attempt or returns an error. If an error is returned from newAttemptLocked,
448// then newClientStream calls finish on the clientStream and returns. So, finish method is the only
449// place where we need to check if the attempt is nil.
450attempt *csAttempt
451// TODO(hedging): hedging will have multiple attempts simultaneously.
452committed bool // active attempt committed for retry?
453onCommit func()
454buffer []func(a *csAttempt) error // operations to replay on retry
455bufferSize int // current size of buffer
456}
457
458// csAttempt implements a single transport stream attempt within a
459// clientStream.
460type csAttempt struct {
461cs *clientStream
462t transport.ClientTransport
463s *transport.Stream
464p *parser
465done func(balancer.DoneInfo)
466
467finished bool
468dc Decompressor
469decomp encoding.Compressor
470decompSet bool
471
472mu sync.Mutex // guards trInfo.tr
473// trInfo may be nil (if EnableTracing is false).
474// trInfo.tr is set when created (if EnableTracing is true),
475// and cleared when the finish method is called.
476trInfo *traceInfo
477
478statsHandler stats.Handler
479}
480
481func (cs *clientStream) commitAttemptLocked() {
482if !cs.committed && cs.onCommit != nil {
483cs.onCommit()
484}
485cs.committed = true
486cs.buffer = nil
487}
488
489func (cs *clientStream) commitAttempt() {
490cs.mu.Lock()
491cs.commitAttemptLocked()
492cs.mu.Unlock()
493}
494
495// shouldRetry returns nil if the RPC should be retried; otherwise it returns
496// the error that should be returned by the operation.
497func (cs *clientStream) shouldRetry(err error) error {
498unprocessed := false
499if cs.attempt.s == nil {
500pioErr, ok := err.(transport.PerformedIOError)
501if ok {
502// Unwrap error.
503err = toRPCErr(pioErr.Err)
504} else {
505unprocessed = true
506}
507if !ok && !cs.callInfo.failFast {
508// In the event of a non-IO operation error from NewStream, we
509// never attempted to write anything to the wire, so we can retry
510// indefinitely for non-fail-fast RPCs.
511return nil
512}
513}
514if cs.finished || cs.committed {
515// RPC is finished or committed; cannot retry.
516return err
517}
518// Wait for the trailers.
519if cs.attempt.s != nil {
520<-cs.attempt.s.Done()
521unprocessed = cs.attempt.s.Unprocessed()
522}
523if cs.firstAttempt && unprocessed {
524// First attempt, stream unprocessed: transparently retry.
525return nil
526}
527if cs.cc.dopts.disableRetry {
528return err
529}
530
531pushback := 0
532hasPushback := false
533if cs.attempt.s != nil {
534if !cs.attempt.s.TrailersOnly() {
535return err
536}
537
538// TODO(retry): Move down if the spec changes to not check server pushback
539// before considering this a failure for throttling.
540sps := cs.attempt.s.Trailer()["grpc-retry-pushback-ms"]
541if len(sps) == 1 {
542var e error
543if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 {
544channelz.Infof(logger, cs.cc.channelzID, "Server retry pushback specified to abort (%q).", sps[0])
545cs.retryThrottler.throttle() // This counts as a failure for throttling.
546return err
547}
548hasPushback = true
549} else if len(sps) > 1 {
550channelz.Warningf(logger, cs.cc.channelzID, "Server retry pushback specified multiple values (%q); not retrying.", sps)
551cs.retryThrottler.throttle() // This counts as a failure for throttling.
552return err
553}
554}
555
556var code codes.Code
557if cs.attempt.s != nil {
558code = cs.attempt.s.Status().Code()
559} else {
560code = status.Convert(err).Code()
561}
562
563rp := cs.methodConfig.RetryPolicy
564if rp == nil || !rp.RetryableStatusCodes[code] {
565return err
566}
567
568// Note: the ordering here is important; we count this as a failure
569// only if the code matched a retryable code.
570if cs.retryThrottler.throttle() {
571return err
572}
573if cs.numRetries+1 >= rp.MaxAttempts {
574return err
575}
576
577var dur time.Duration
578if hasPushback {
579dur = time.Millisecond * time.Duration(pushback)
580cs.numRetriesSincePushback = 0
581} else {
582fact := math.Pow(rp.BackoffMultiplier, float64(cs.numRetriesSincePushback))
583cur := float64(rp.InitialBackoff) * fact
584if max := float64(rp.MaxBackoff); cur > max {
585cur = max
586}
587dur = time.Duration(grpcrand.Int63n(int64(cur)))
588cs.numRetriesSincePushback++
589}
590
591// TODO(dfawley): we could eagerly fail here if dur puts us past the
592// deadline, but unsure if it is worth doing.
593t := time.NewTimer(dur)
594select {
595case <-t.C:
596cs.numRetries++
597return nil
598case <-cs.ctx.Done():
599t.Stop()
600return status.FromContextError(cs.ctx.Err()).Err()
601}
602}
603
604// Returns nil if a retry was performed and succeeded; error otherwise.
605func (cs *clientStream) retryLocked(lastErr error) error {
606for {
607cs.attempt.finish(lastErr)
608if err := cs.shouldRetry(lastErr); err != nil {
609cs.commitAttemptLocked()
610return err
611}
612cs.firstAttempt = false
613if err := cs.newAttemptLocked(nil, nil); err != nil {
614return err
615}
616if lastErr = cs.replayBufferLocked(); lastErr == nil {
617return nil
618}
619}
620}
621
622func (cs *clientStream) Context() context.Context {
623cs.commitAttempt()
624// No need to lock before using attempt, since we know it is committed and
625// cannot change.
626return cs.attempt.s.Context()
627}
628
629func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) error {
630cs.mu.Lock()
631for {
632if cs.committed {
633cs.mu.Unlock()
634return op(cs.attempt)
635}
636a := cs.attempt
637cs.mu.Unlock()
638err := op(a)
639cs.mu.Lock()
640if a != cs.attempt {
641// We started another attempt already.
642continue
643}
644if err == io.EOF {
645<-a.s.Done()
646}
647if err == nil || (err == io.EOF && a.s.Status().Code() == codes.OK) {
648onSuccess()
649cs.mu.Unlock()
650return err
651}
652if err := cs.retryLocked(err); err != nil {
653cs.mu.Unlock()
654return err
655}
656}
657}
658
659func (cs *clientStream) Header() (metadata.MD, error) {
660var m metadata.MD
661err := cs.withRetry(func(a *csAttempt) error {
662var err error
663m, err = a.s.Header()
664return toRPCErr(err)
665}, cs.commitAttemptLocked)
666if err != nil {
667cs.finish(err)
668return nil, err
669}
670if cs.binlog != nil && !cs.serverHeaderBinlogged {
671// Only log if binary log is on and header has not been logged.
672logEntry := &binarylog.ServerHeader{
673OnClientSide: true,
674Header: m,
675PeerAddr: nil,
676}
677if peer, ok := peer.FromContext(cs.Context()); ok {
678logEntry.PeerAddr = peer.Addr
679}
680cs.binlog.Log(logEntry)
681cs.serverHeaderBinlogged = true
682}
683return m, err
684}
685
686func (cs *clientStream) Trailer() metadata.MD {
687// On RPC failure, we never need to retry, because usage requires that
688// RecvMsg() returned a non-nil error before calling this function is valid.
689// We would have retried earlier if necessary.
690//
691// Commit the attempt anyway, just in case users are not following those
692// directions -- it will prevent races and should not meaningfully impact
693// performance.
694cs.commitAttempt()
695if cs.attempt.s == nil {
696return nil
697}
698return cs.attempt.s.Trailer()
699}
700
701func (cs *clientStream) replayBufferLocked() error {
702a := cs.attempt
703for _, f := range cs.buffer {
704if err := f(a); err != nil {
705return err
706}
707}
708return nil
709}
710
711func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error) {
712// Note: we still will buffer if retry is disabled (for transparent retries).
713if cs.committed {
714return
715}
716cs.bufferSize += sz
717if cs.bufferSize > cs.callInfo.maxRetryRPCBufferSize {
718cs.commitAttemptLocked()
719return
720}
721cs.buffer = append(cs.buffer, op)
722}
723
724func (cs *clientStream) SendMsg(m interface{}) (err error) {
725defer func() {
726if err != nil && err != io.EOF {
727// Call finish on the client stream for errors generated by this SendMsg
728// call, as these indicate problems created by this client. (Transport
729// errors are converted to an io.EOF error in csAttempt.sendMsg; the real
730// error will be returned from RecvMsg eventually in that case, or be
731// retried.)
732cs.finish(err)
733}
734}()
735if cs.sentLast {
736return status.Errorf(codes.Internal, "SendMsg called after CloseSend")
737}
738if !cs.desc.ClientStreams {
739cs.sentLast = true
740}
741
742// load hdr, payload, data
743hdr, payload, data, err := prepareMsg(m, cs.codec, cs.cp, cs.comp)
744if err != nil {
745return err
746}
747
748// TODO(dfawley): should we be checking len(data) instead?
749if len(payload) > *cs.callInfo.maxSendMessageSize {
750return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize)
751}
752msgBytes := data // Store the pointer before setting to nil. For binary logging.
753op := func(a *csAttempt) error {
754err := a.sendMsg(m, hdr, payload, data)
755// nil out the message and uncomp when replaying; they are only needed for
756// stats which is disabled for subsequent attempts.
757m, data = nil, nil
758return err
759}
760err = cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) })
761if cs.binlog != nil && err == nil {
762cs.binlog.Log(&binarylog.ClientMessage{
763OnClientSide: true,
764Message: msgBytes,
765})
766}
767return
768}
769
770func (cs *clientStream) RecvMsg(m interface{}) error {
771if cs.binlog != nil && !cs.serverHeaderBinlogged {
772// Call Header() to binary log header if it's not already logged.
773cs.Header()
774}
775var recvInfo *payloadInfo
776if cs.binlog != nil {
777recvInfo = &payloadInfo{}
778}
779err := cs.withRetry(func(a *csAttempt) error {
780return a.recvMsg(m, recvInfo)
781}, cs.commitAttemptLocked)
782if cs.binlog != nil && err == nil {
783cs.binlog.Log(&binarylog.ServerMessage{
784OnClientSide: true,
785Message: recvInfo.uncompressedBytes,
786})
787}
788if err != nil || !cs.desc.ServerStreams {
789// err != nil or non-server-streaming indicates end of stream.
790cs.finish(err)
791
792if cs.binlog != nil {
793// finish will not log Trailer. Log Trailer here.
794logEntry := &binarylog.ServerTrailer{
795OnClientSide: true,
796Trailer: cs.Trailer(),
797Err: err,
798}
799if logEntry.Err == io.EOF {
800logEntry.Err = nil
801}
802if peer, ok := peer.FromContext(cs.Context()); ok {
803logEntry.PeerAddr = peer.Addr
804}
805cs.binlog.Log(logEntry)
806}
807}
808return err
809}
810
811func (cs *clientStream) CloseSend() error {
812if cs.sentLast {
813// TODO: return an error and finish the stream instead, due to API misuse?
814return nil
815}
816cs.sentLast = true
817op := func(a *csAttempt) error {
818a.t.Write(a.s, nil, nil, &transport.Options{Last: true})
819// Always return nil; io.EOF is the only error that might make sense
820// instead, but there is no need to signal the client to call RecvMsg
821// as the only use left for the stream after CloseSend is to call
822// RecvMsg. This also matches historical behavior.
823return nil
824}
825cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) })
826if cs.binlog != nil {
827cs.binlog.Log(&binarylog.ClientHalfClose{
828OnClientSide: true,
829})
830}
831// We never returned an error here for reasons.
832return nil
833}
834
835func (cs *clientStream) finish(err error) {
836if err == io.EOF {
837// Ending a stream with EOF indicates a success.
838err = nil
839}
840cs.mu.Lock()
841if cs.finished {
842cs.mu.Unlock()
843return
844}
845cs.finished = true
846cs.commitAttemptLocked()
847if cs.attempt != nil {
848cs.attempt.finish(err)
849// after functions all rely upon having a stream.
850if cs.attempt.s != nil {
851for _, o := range cs.opts {
852o.after(cs.callInfo, cs.attempt)
853}
854}
855}
856cs.mu.Unlock()
857// For binary logging. only log cancel in finish (could be caused by RPC ctx
858// canceled or ClientConn closed). Trailer will be logged in RecvMsg.
859//
860// Only one of cancel or trailer needs to be logged. In the cases where
861// users don't call RecvMsg, users must have already canceled the RPC.
862if cs.binlog != nil && status.Code(err) == codes.Canceled {
863cs.binlog.Log(&binarylog.Cancel{
864OnClientSide: true,
865})
866}
867if err == nil {
868cs.retryThrottler.successfulRPC()
869}
870if channelz.IsOn() {
871if err != nil {
872cs.cc.incrCallsFailed()
873} else {
874cs.cc.incrCallsSucceeded()
875}
876}
877cs.cancel()
878}
879
880func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error {
881cs := a.cs
882if a.trInfo != nil {
883a.mu.Lock()
884if a.trInfo.tr != nil {
885a.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true)
886}
887a.mu.Unlock()
888}
889if err := a.t.Write(a.s, hdr, payld, &transport.Options{Last: !cs.desc.ClientStreams}); err != nil {
890if !cs.desc.ClientStreams {
891// For non-client-streaming RPCs, we return nil instead of EOF on error
892// because the generated code requires it. finish is not called; RecvMsg()
893// will call it with the stream's status independently.
894return nil
895}
896return io.EOF
897}
898if a.statsHandler != nil {
899a.statsHandler.HandleRPC(cs.ctx, outPayload(true, m, data, payld, time.Now()))
900}
901if channelz.IsOn() {
902a.t.IncrMsgSent()
903}
904return nil
905}
906
907func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) {
908cs := a.cs
909if a.statsHandler != nil && payInfo == nil {
910payInfo = &payloadInfo{}
911}
912
913if !a.decompSet {
914// Block until we receive headers containing received message encoding.
915if ct := a.s.RecvCompress(); ct != "" && ct != encoding.Identity {
916if a.dc == nil || a.dc.Type() != ct {
917// No configured decompressor, or it does not match the incoming
918// message encoding; attempt to find a registered compressor that does.
919a.dc = nil
920a.decomp = encoding.GetCompressor(ct)
921}
922} else {
923// No compression is used; disable our decompressor.
924a.dc = nil
925}
926// Only initialize this state once per stream.
927a.decompSet = true
928}
929err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decomp)
930if err != nil {
931if err == io.EOF {
932if statusErr := a.s.Status().Err(); statusErr != nil {
933return statusErr
934}
935return io.EOF // indicates successful end of stream.
936}
937return toRPCErr(err)
938}
939if a.trInfo != nil {
940a.mu.Lock()
941if a.trInfo.tr != nil {
942a.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true)
943}
944a.mu.Unlock()
945}
946if a.statsHandler != nil {
947a.statsHandler.HandleRPC(cs.ctx, &stats.InPayload{
948Client: true,
949RecvTime: time.Now(),
950Payload: m,
951// TODO truncate large payload.
952Data: payInfo.uncompressedBytes,
953WireLength: payInfo.wireLength + headerLen,
954Length: len(payInfo.uncompressedBytes),
955})
956}
957if channelz.IsOn() {
958a.t.IncrMsgRecv()
959}
960if cs.desc.ServerStreams {
961// Subsequent messages should be received by subsequent RecvMsg calls.
962return nil
963}
964// Special handling for non-server-stream rpcs.
965// This recv expects EOF or errors, so we don't collect inPayload.
966err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp)
967if err == nil {
968return toRPCErr(errors.New("grpc: client streaming protocol violation: get <nil>, want <EOF>"))
969}
970if err == io.EOF {
971return a.s.Status().Err() // non-server streaming Recv returns nil on success
972}
973return toRPCErr(err)
974}
975
976func (a *csAttempt) finish(err error) {
977a.mu.Lock()
978if a.finished {
979a.mu.Unlock()
980return
981}
982a.finished = true
983if err == io.EOF {
984// Ending a stream with EOF indicates a success.
985err = nil
986}
987var tr metadata.MD
988if a.s != nil {
989a.t.CloseStream(a.s, err)
990tr = a.s.Trailer()
991}
992
993if a.done != nil {
994br := false
995if a.s != nil {
996br = a.s.BytesReceived()
997}
998a.done(balancer.DoneInfo{
999Err: err,
1000Trailer: tr,
1001BytesSent: a.s != nil,
1002BytesReceived: br,
1003ServerLoad: balancerload.Parse(tr),
1004})
1005}
1006if a.statsHandler != nil {
1007end := &stats.End{
1008Client: true,
1009BeginTime: a.cs.beginTime,
1010EndTime: time.Now(),
1011Trailer: tr,
1012Error: err,
1013}
1014a.statsHandler.HandleRPC(a.cs.ctx, end)
1015}
1016if a.trInfo != nil && a.trInfo.tr != nil {
1017if err == nil {
1018a.trInfo.tr.LazyPrintf("RPC: [OK]")
1019} else {
1020a.trInfo.tr.LazyPrintf("RPC: [%v]", err)
1021a.trInfo.tr.SetError()
1022}
1023a.trInfo.tr.Finish()
1024a.trInfo.tr = nil
1025}
1026a.mu.Unlock()
1027}
1028
1029// newClientStream creates a ClientStream with the specified transport, on the
1030// given addrConn.
1031//
1032// It's expected that the given transport is either the same one in addrConn, or
1033// is already closed. To avoid race, transport is specified separately, instead
1034// of using ac.transpot.
1035//
1036// Main difference between this and ClientConn.NewStream:
1037// - no retry
1038// - no service config (or wait for service config)
1039// - no tracing or stats
1040func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method string, t transport.ClientTransport, ac *addrConn, opts ...CallOption) (_ ClientStream, err error) {
1041if t == nil {
1042// TODO: return RPC error here?
1043return nil, errors.New("transport provided is nil")
1044}
1045// defaultCallInfo contains unnecessary info(i.e. failfast, maxRetryRPCBufferSize), so we just initialize an empty struct.
1046c := &callInfo{}
1047
1048// Possible context leak:
1049// The cancel function for the child context we create will only be called
1050// when RecvMsg returns a non-nil error, if the ClientConn is closed, or if
1051// an error is generated by SendMsg.
1052// https://github.com/grpc/grpc-go/issues/1818.
1053ctx, cancel := context.WithCancel(ctx)
1054defer func() {
1055if err != nil {
1056cancel()
1057}
1058}()
1059
1060for _, o := range opts {
1061if err := o.before(c); err != nil {
1062return nil, toRPCErr(err)
1063}
1064}
1065c.maxReceiveMessageSize = getMaxSize(nil, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize)
1066c.maxSendMessageSize = getMaxSize(nil, c.maxSendMessageSize, defaultServerMaxSendMessageSize)
1067if err := setCallInfoCodec(c); err != nil {
1068return nil, err
1069}
1070
1071callHdr := &transport.CallHdr{
1072Host: ac.cc.authority,
1073Method: method,
1074ContentSubtype: c.contentSubtype,
1075}
1076
1077// Set our outgoing compression according to the UseCompressor CallOption, if
1078// set. In that case, also find the compressor from the encoding package.
1079// Otherwise, use the compressor configured by the WithCompressor DialOption,
1080// if set.
1081var cp Compressor
1082var comp encoding.Compressor
1083if ct := c.compressorType; ct != "" {
1084callHdr.SendCompress = ct
1085if ct != encoding.Identity {
1086comp = encoding.GetCompressor(ct)
1087if comp == nil {
1088return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct)
1089}
1090}
1091} else if ac.cc.dopts.cp != nil {
1092callHdr.SendCompress = ac.cc.dopts.cp.Type()
1093cp = ac.cc.dopts.cp
1094}
1095if c.creds != nil {
1096callHdr.Creds = c.creds
1097}
1098
1099// Use a special addrConnStream to avoid retry.
1100as := &addrConnStream{
1101callHdr: callHdr,
1102ac: ac,
1103ctx: ctx,
1104cancel: cancel,
1105opts: opts,
1106callInfo: c,
1107desc: desc,
1108codec: c.codec,
1109cp: cp,
1110comp: comp,
1111t: t,
1112}
1113
1114s, err := as.t.NewStream(as.ctx, as.callHdr)
1115if err != nil {
1116err = toRPCErr(err)
1117return nil, err
1118}
1119as.s = s
1120as.p = &parser{r: s}
1121ac.incrCallsStarted()
1122if desc != unaryStreamDesc {
1123// Listen on cc and stream contexts to cleanup when the user closes the
1124// ClientConn or cancels the stream context. In all other cases, an error
1125// should already be injected into the recv buffer by the transport, which
1126// the client will eventually receive, and then we will cancel the stream's
1127// context in clientStream.finish.
1128go func() {
1129select {
1130case <-ac.ctx.Done():
1131as.finish(status.Error(codes.Canceled, "grpc: the SubConn is closing"))
1132case <-ctx.Done():
1133as.finish(toRPCErr(ctx.Err()))
1134}
1135}()
1136}
1137return as, nil
1138}
1139
1140type addrConnStream struct {
1141s *transport.Stream
1142ac *addrConn
1143callHdr *transport.CallHdr
1144cancel context.CancelFunc
1145opts []CallOption
1146callInfo *callInfo
1147t transport.ClientTransport
1148ctx context.Context
1149sentLast bool
1150desc *StreamDesc
1151codec baseCodec
1152cp Compressor
1153comp encoding.Compressor
1154decompSet bool
1155dc Decompressor
1156decomp encoding.Compressor
1157p *parser
1158mu sync.Mutex
1159finished bool
1160}
1161
1162func (as *addrConnStream) Header() (metadata.MD, error) {
1163m, err := as.s.Header()
1164if err != nil {
1165as.finish(toRPCErr(err))
1166}
1167return m, err
1168}
1169
1170func (as *addrConnStream) Trailer() metadata.MD {
1171return as.s.Trailer()
1172}
1173
1174func (as *addrConnStream) CloseSend() error {
1175if as.sentLast {
1176// TODO: return an error and finish the stream instead, due to API misuse?
1177return nil
1178}
1179as.sentLast = true
1180
1181as.t.Write(as.s, nil, nil, &transport.Options{Last: true})
1182// Always return nil; io.EOF is the only error that might make sense
1183// instead, but there is no need to signal the client to call RecvMsg
1184// as the only use left for the stream after CloseSend is to call
1185// RecvMsg. This also matches historical behavior.
1186return nil
1187}
1188
1189func (as *addrConnStream) Context() context.Context {
1190return as.s.Context()
1191}
1192
1193func (as *addrConnStream) SendMsg(m interface{}) (err error) {
1194defer func() {
1195if err != nil && err != io.EOF {
1196// Call finish on the client stream for errors generated by this SendMsg
1197// call, as these indicate problems created by this client. (Transport
1198// errors are converted to an io.EOF error in csAttempt.sendMsg; the real
1199// error will be returned from RecvMsg eventually in that case, or be
1200// retried.)
1201as.finish(err)
1202}
1203}()
1204if as.sentLast {
1205return status.Errorf(codes.Internal, "SendMsg called after CloseSend")
1206}
1207if !as.desc.ClientStreams {
1208as.sentLast = true
1209}
1210
1211// load hdr, payload, data
1212hdr, payld, _, err := prepareMsg(m, as.codec, as.cp, as.comp)
1213if err != nil {
1214return err
1215}
1216
1217// TODO(dfawley): should we be checking len(data) instead?
1218if len(payld) > *as.callInfo.maxSendMessageSize {
1219return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payld), *as.callInfo.maxSendMessageSize)
1220}
1221
1222if err := as.t.Write(as.s, hdr, payld, &transport.Options{Last: !as.desc.ClientStreams}); err != nil {
1223if !as.desc.ClientStreams {
1224// For non-client-streaming RPCs, we return nil instead of EOF on error
1225// because the generated code requires it. finish is not called; RecvMsg()
1226// will call it with the stream's status independently.
1227return nil
1228}
1229return io.EOF
1230}
1231
1232if channelz.IsOn() {
1233as.t.IncrMsgSent()
1234}
1235return nil
1236}
1237
1238func (as *addrConnStream) RecvMsg(m interface{}) (err error) {
1239defer func() {
1240if err != nil || !as.desc.ServerStreams {
1241// err != nil or non-server-streaming indicates end of stream.
1242as.finish(err)
1243}
1244}()
1245
1246if !as.decompSet {
1247// Block until we receive headers containing received message encoding.
1248if ct := as.s.RecvCompress(); ct != "" && ct != encoding.Identity {
1249if as.dc == nil || as.dc.Type() != ct {
1250// No configured decompressor, or it does not match the incoming
1251// message encoding; attempt to find a registered compressor that does.
1252as.dc = nil
1253as.decomp = encoding.GetCompressor(ct)
1254}
1255} else {
1256// No compression is used; disable our decompressor.
1257as.dc = nil
1258}
1259// Only initialize this state once per stream.
1260as.decompSet = true
1261}
1262err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp)
1263if err != nil {
1264if err == io.EOF {
1265if statusErr := as.s.Status().Err(); statusErr != nil {
1266return statusErr
1267}
1268return io.EOF // indicates successful end of stream.
1269}
1270return toRPCErr(err)
1271}
1272
1273if channelz.IsOn() {
1274as.t.IncrMsgRecv()
1275}
1276if as.desc.ServerStreams {
1277// Subsequent messages should be received by subsequent RecvMsg calls.
1278return nil
1279}
1280
1281// Special handling for non-server-stream rpcs.
1282// This recv expects EOF or errors, so we don't collect inPayload.
1283err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp)
1284if err == nil {
1285return toRPCErr(errors.New("grpc: client streaming protocol violation: get <nil>, want <EOF>"))
1286}
1287if err == io.EOF {
1288return as.s.Status().Err() // non-server streaming Recv returns nil on success
1289}
1290return toRPCErr(err)
1291}
1292
1293func (as *addrConnStream) finish(err error) {
1294as.mu.Lock()
1295if as.finished {
1296as.mu.Unlock()
1297return
1298}
1299as.finished = true
1300if err == io.EOF {
1301// Ending a stream with EOF indicates a success.
1302err = nil
1303}
1304if as.s != nil {
1305as.t.CloseStream(as.s, err)
1306}
1307
1308if err != nil {
1309as.ac.incrCallsFailed()
1310} else {
1311as.ac.incrCallsSucceeded()
1312}
1313as.cancel()
1314as.mu.Unlock()
1315}
1316
1317// ServerStream defines the server-side behavior of a streaming RPC.
1318//
1319// All errors returned from ServerStream methods are compatible with the
1320// status package.
1321type ServerStream interface {
1322// SetHeader sets the header metadata. It may be called multiple times.
1323// When call multiple times, all the provided metadata will be merged.
1324// All the metadata will be sent out when one of the following happens:
1325// - ServerStream.SendHeader() is called;
1326// - The first response is sent out;
1327// - An RPC status is sent out (error or success).
1328SetHeader(metadata.MD) error
1329// SendHeader sends the header metadata.
1330// The provided md and headers set by SetHeader() will be sent.
1331// It fails if called multiple times.
1332SendHeader(metadata.MD) error
1333// SetTrailer sets the trailer metadata which will be sent with the RPC status.
1334// When called more than once, all the provided metadata will be merged.
1335SetTrailer(metadata.MD)
1336// Context returns the context for this stream.
1337Context() context.Context
1338// SendMsg sends a message. On error, SendMsg aborts the stream and the
1339// error is returned directly.
1340//
1341// SendMsg blocks until:
1342// - There is sufficient flow control to schedule m with the transport, or
1343// - The stream is done, or
1344// - The stream breaks.
1345//
1346// SendMsg does not wait until the message is received by the client. An
1347// untimely stream closure may result in lost messages.
1348//
1349// It is safe to have a goroutine calling SendMsg and another goroutine
1350// calling RecvMsg on the same stream at the same time, but it is not safe
1351// to call SendMsg on the same stream in different goroutines.
1352SendMsg(m interface{}) error
1353// RecvMsg blocks until it receives a message into m or the stream is
1354// done. It returns io.EOF when the client has performed a CloseSend. On
1355// any non-EOF error, the stream is aborted and the error contains the
1356// RPC status.
1357//
1358// It is safe to have a goroutine calling SendMsg and another goroutine
1359// calling RecvMsg on the same stream at the same time, but it is not
1360// safe to call RecvMsg on the same stream in different goroutines.
1361RecvMsg(m interface{}) error
1362}
1363
1364// serverStream implements a server side Stream.
1365type serverStream struct {
1366ctx context.Context
1367t transport.ServerTransport
1368s *transport.Stream
1369p *parser
1370codec baseCodec
1371
1372cp Compressor
1373dc Decompressor
1374comp encoding.Compressor
1375decomp encoding.Compressor
1376
1377maxReceiveMessageSize int
1378maxSendMessageSize int
1379trInfo *traceInfo
1380
1381statsHandler stats.Handler
1382
1383binlog *binarylog.MethodLogger
1384// serverHeaderBinlogged indicates whether server header has been logged. It
1385// will happen when one of the following two happens: stream.SendHeader(),
1386// stream.Send().
1387//
1388// It's only checked in send and sendHeader, doesn't need to be
1389// synchronized.
1390serverHeaderBinlogged bool
1391
1392mu sync.Mutex // protects trInfo.tr after the service handler runs.
1393}
1394
1395func (ss *serverStream) Context() context.Context {
1396return ss.ctx
1397}
1398
1399func (ss *serverStream) SetHeader(md metadata.MD) error {
1400if md.Len() == 0 {
1401return nil
1402}
1403return ss.s.SetHeader(md)
1404}
1405
1406func (ss *serverStream) SendHeader(md metadata.MD) error {
1407err := ss.t.WriteHeader(ss.s, md)
1408if ss.binlog != nil && !ss.serverHeaderBinlogged {
1409h, _ := ss.s.Header()
1410ss.binlog.Log(&binarylog.ServerHeader{
1411Header: h,
1412})
1413ss.serverHeaderBinlogged = true
1414}
1415return err
1416}
1417
1418func (ss *serverStream) SetTrailer(md metadata.MD) {
1419if md.Len() == 0 {
1420return
1421}
1422ss.s.SetTrailer(md)
1423}
1424
1425func (ss *serverStream) SendMsg(m interface{}) (err error) {
1426defer func() {
1427if ss.trInfo != nil {
1428ss.mu.Lock()
1429if ss.trInfo.tr != nil {
1430if err == nil {
1431ss.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true)
1432} else {
1433ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
1434ss.trInfo.tr.SetError()
1435}
1436}
1437ss.mu.Unlock()
1438}
1439if err != nil && err != io.EOF {
1440st, _ := status.FromError(toRPCErr(err))
1441ss.t.WriteStatus(ss.s, st)
1442// Non-user specified status was sent out. This should be an error
1443// case (as a server side Cancel maybe).
1444//
1445// This is not handled specifically now. User will return a final
1446// status from the service handler, we will log that error instead.
1447// This behavior is similar to an interceptor.
1448}
1449if channelz.IsOn() && err == nil {
1450ss.t.IncrMsgSent()
1451}
1452}()
1453
1454// load hdr, payload, data
1455hdr, payload, data, err := prepareMsg(m, ss.codec, ss.cp, ss.comp)
1456if err != nil {
1457return err
1458}
1459
1460// TODO(dfawley): should we be checking len(data) instead?
1461if len(payload) > ss.maxSendMessageSize {
1462return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), ss.maxSendMessageSize)
1463}
1464if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil {
1465return toRPCErr(err)
1466}
1467if ss.binlog != nil {
1468if !ss.serverHeaderBinlogged {
1469h, _ := ss.s.Header()
1470ss.binlog.Log(&binarylog.ServerHeader{
1471Header: h,
1472})
1473ss.serverHeaderBinlogged = true
1474}
1475ss.binlog.Log(&binarylog.ServerMessage{
1476Message: data,
1477})
1478}
1479if ss.statsHandler != nil {
1480ss.statsHandler.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now()))
1481}
1482return nil
1483}
1484
1485func (ss *serverStream) RecvMsg(m interface{}) (err error) {
1486defer func() {
1487if ss.trInfo != nil {
1488ss.mu.Lock()
1489if ss.trInfo.tr != nil {
1490if err == nil {
1491ss.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true)
1492} else if err != io.EOF {
1493ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
1494ss.trInfo.tr.SetError()
1495}
1496}
1497ss.mu.Unlock()
1498}
1499if err != nil && err != io.EOF {
1500st, _ := status.FromError(toRPCErr(err))
1501ss.t.WriteStatus(ss.s, st)
1502// Non-user specified status was sent out. This should be an error
1503// case (as a server side Cancel maybe).
1504//
1505// This is not handled specifically now. User will return a final
1506// status from the service handler, we will log that error instead.
1507// This behavior is similar to an interceptor.
1508}
1509if channelz.IsOn() && err == nil {
1510ss.t.IncrMsgRecv()
1511}
1512}()
1513var payInfo *payloadInfo
1514if ss.statsHandler != nil || ss.binlog != nil {
1515payInfo = &payloadInfo{}
1516}
1517if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp); err != nil {
1518if err == io.EOF {
1519if ss.binlog != nil {
1520ss.binlog.Log(&binarylog.ClientHalfClose{})
1521}
1522return err
1523}
1524if err == io.ErrUnexpectedEOF {
1525err = status.Errorf(codes.Internal, io.ErrUnexpectedEOF.Error())
1526}
1527return toRPCErr(err)
1528}
1529if ss.statsHandler != nil {
1530ss.statsHandler.HandleRPC(ss.s.Context(), &stats.InPayload{
1531RecvTime: time.Now(),
1532Payload: m,
1533// TODO truncate large payload.
1534Data: payInfo.uncompressedBytes,
1535WireLength: payInfo.wireLength + headerLen,
1536Length: len(payInfo.uncompressedBytes),
1537})
1538}
1539if ss.binlog != nil {
1540ss.binlog.Log(&binarylog.ClientMessage{
1541Message: payInfo.uncompressedBytes,
1542})
1543}
1544return nil
1545}
1546
1547// MethodFromServerStream returns the method string for the input stream.
1548// The returned string is in the format of "/service/method".
1549func MethodFromServerStream(stream ServerStream) (string, bool) {
1550return Method(stream.Context())
1551}
1552
1553// prepareMsg returns the hdr, payload and data
1554// using the compressors passed or using the
1555// passed preparedmsg
1556func prepareMsg(m interface{}, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) {
1557if preparedMsg, ok := m.(*PreparedMsg); ok {
1558return preparedMsg.hdr, preparedMsg.payload, preparedMsg.encodedData, nil
1559}
1560// The input interface is not a prepared msg.
1561// Marshal and Compress the data at this point
1562data, err = encode(codec, m)
1563if err != nil {
1564return nil, nil, nil, err
1565}
1566compData, err := compress(data, cp, comp)
1567if err != nil {
1568return nil, nil, nil, err
1569}
1570hdr, payload = msgHeader(data, compData)
1571return hdr, payload, data, nil
1572}
1573