3
* Copyright 2014 gRPC authors.
5
* Licensed under the Apache License, Version 2.0 (the "License");
6
* you may not use this file except in compliance with the License.
7
* You may obtain a copy of the License at
9
* http://www.apache.org/licenses/LICENSE-2.0
11
* Unless required by applicable law or agreed to in writing, software
12
* distributed under the License is distributed on an "AS IS" BASIS,
13
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
* See the License for the specific language governing permissions and
15
* limitations under the License.
28
"golang.org/x/net/http2"
29
"golang.org/x/net/http2/hpack"
32
var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) {
33
e.SetMaxDynamicTableSizeLimit(v)
46
func (il *itemList) enqueue(i interface{}) {
49
il.head, il.tail = n, n
56
// peek returns the first item in the list without removing it from the
58
func (il *itemList) peek() interface{} {
62
func (il *itemList) dequeue() interface{} {
67
il.head = il.head.next
74
func (il *itemList) dequeueAll() *itemNode {
76
il.head, il.tail = nil, nil
80
func (il *itemList) isEmpty() bool {
84
// The following defines various control items which could flow through
85
// the control buffer of transport. They represent different aspects of
86
// control tasks, e.g., flow control, settings, streaming resetting, etc.
88
// maxQueuedTransportResponseFrames is the most queued "transport response"
89
// frames we will buffer before preventing new reads from occurring on the
90
// transport. These are control frames sent in response to client requests,
91
// such as RST_STREAM due to bad headers or settings acks.
92
const maxQueuedTransportResponseFrames = 50
94
type cbItem interface {
95
isTransportResponseFrame() bool
98
// registerStream is used to register an incoming stream with loopy writer.
99
type registerStream struct {
104
func (*registerStream) isTransportResponseFrame() bool { return false }
106
// headerFrame is also used to register stream on the client-side.
107
type headerFrame struct {
109
hf []hpack.HeaderField
110
endStream bool // Valid on server side.
111
initStream func(uint32) error // Used only on the client side.
113
wq *writeQuota // write quota for the stream created.
114
cleanup *cleanupStream // Valid on the server side.
115
onOrphaned func(error) // Valid on client-side
118
func (h *headerFrame) isTransportResponseFrame() bool {
119
return h.cleanup != nil && h.cleanup.rst // Results in a RST_STREAM
122
type cleanupStream struct {
125
rstCode http2.ErrCode
129
func (c *cleanupStream) isTransportResponseFrame() bool { return c.rst } // Results in a RST_STREAM
131
type dataFrame struct {
136
// onEachWrite is called every time
137
// a part of d is written out.
141
func (*dataFrame) isTransportResponseFrame() bool { return false }
143
type incomingWindowUpdate struct {
148
func (*incomingWindowUpdate) isTransportResponseFrame() bool { return false }
150
type outgoingWindowUpdate struct {
155
func (*outgoingWindowUpdate) isTransportResponseFrame() bool {
156
return false // window updates are throttled by thresholds
159
type incomingSettings struct {
163
func (*incomingSettings) isTransportResponseFrame() bool { return true } // Results in a settings ACK
165
type outgoingSettings struct {
169
func (*outgoingSettings) isTransportResponseFrame() bool { return false }
171
type incomingGoAway struct {
174
func (*incomingGoAway) isTransportResponseFrame() bool { return false }
183
func (*goAway) isTransportResponseFrame() bool { return false }
190
func (*ping) isTransportResponseFrame() bool { return true }
192
type outFlowControlSizeRequest struct {
196
func (*outFlowControlSizeRequest) isTransportResponseFrame() bool { return false }
198
type outStreamState int
201
active outStreamState = iota
206
type outStream struct {
217
func (s *outStream) deleteSelf() {
224
s.next, s.prev = nil, nil
227
type outStreamList struct {
228
// Following are sentinel objects that mark the
229
// beginning and end of the list. They do not
230
// contain any item lists. All valid objects are
231
// inserted in between them.
232
// This is needed so that an outStream object can
233
// deleteSelf() in O(1) time without knowing which
234
// list it belongs to.
239
func newOutStreamList() *outStreamList {
240
head, tail := new(outStream), new(outStream)
243
return &outStreamList{
249
func (l *outStreamList) enqueue(s *outStream) {
257
// remove from the beginning of the list.
258
func (l *outStreamList) dequeue() *outStream {
267
// controlBuffer is a way to pass information to loopy.
268
// Information is passed as specific struct types called control frames.
269
// A control frame not only represents data, messages or headers to be sent out
270
// but can also be used to instruct loopy to update its internal state.
271
// It shouldn't be confused with an HTTP2 frame, although some of the control frames
272
// like dataFrame and headerFrame do go out on wire as HTTP2 frames.
273
type controlBuffer struct {
281
// transportResponseFrames counts the number of queued items that represent
282
// the response of an action initiated by the peer. trfChan is created
283
// when transportResponseFrames >= maxQueuedTransportResponseFrames and is
284
// closed and nilled when transportResponseFrames drops below the
285
// threshold. Both fields are protected by mu.
286
transportResponseFrames int
287
trfChan atomic.Value // *chan struct{}
290
func newControlBuffer(done <-chan struct{}) *controlBuffer {
291
return &controlBuffer{
292
ch: make(chan struct{}, 1),
298
// throttle blocks if there are too many incomingSettings/cleanupStreams in the
300
func (c *controlBuffer) throttle() {
301
ch, _ := c.trfChan.Load().(*chan struct{})
310
func (c *controlBuffer) put(it cbItem) error {
311
_, err := c.executeAndPut(nil, it)
315
func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it cbItem) (bool, error) {
323
if !f(it) { // f wasn't successful
328
if c.consumerWaiting {
330
c.consumerWaiting = false
333
if it.isTransportResponseFrame() {
334
c.transportResponseFrames++
335
if c.transportResponseFrames == maxQueuedTransportResponseFrames {
336
// We are adding the frame that puts us over the threshold; create
337
// a throttling channel.
338
ch := make(chan struct{})
345
case c.ch <- struct{}{}:
352
// Note argument f should never be nil.
353
func (c *controlBuffer) execute(f func(it interface{}) bool, it interface{}) (bool, error) {
359
if !f(it) { // f wasn't successful
367
func (c *controlBuffer) get(block bool) (interface{}, error) {
374
if !c.list.isEmpty() {
375
h := c.list.dequeue().(cbItem)
376
if h.isTransportResponseFrame() {
377
if c.transportResponseFrames == maxQueuedTransportResponseFrames {
378
// We are removing the frame that put us over the
379
// threshold; close and clear the throttling channel.
380
ch := c.trfChan.Load().(*chan struct{})
382
c.trfChan.Store((*chan struct{})(nil))
384
c.transportResponseFrames--
393
c.consumerWaiting = true
399
return nil, ErrConnClosing
404
func (c *controlBuffer) finish() {
410
c.err = ErrConnClosing
411
// There may be headers for streams in the control buffer.
412
// These streams need to be cleaned out since the transport
413
// is still not aware of these yet.
414
for head := c.list.dequeueAll(); head != nil; head = head.next {
415
hdr, ok := head.it.(*headerFrame)
419
if hdr.onOrphaned != nil { // It will be nil on the server-side.
420
hdr.onOrphaned(ErrConnClosing)
429
clientSide side = iota
433
// Loopy receives frames from the control buffer.
434
// Each frame is handled individually; most of the work done by loopy goes
435
// into handling data frames. Loopy maintains a queue of active streams, and each
436
// stream maintains a queue of data frames; as loopy receives data frames
437
// it gets added to the queue of the relevant stream.
438
// Loopy goes over this list of active streams by processing one node every iteration,
439
// thereby closely resemebling to a round-robin scheduling over all streams. While
440
// processing a stream, loopy writes out data bytes from this stream capped by the min
441
// of http2MaxFrameLen, connection-level flow control and stream-level flow control.
442
type loopyWriter struct {
446
oiws uint32 // outbound initial window size.
447
// estdStreams is map of all established streams that are not cleaned-up yet.
448
// On client-side, this is all streams whose headers were sent out.
449
// On server-side, this is all streams whose headers were received.
450
estdStreams map[uint32]*outStream // Established streams.
451
// activeStreams is a linked-list of all streams that have data to send and some
452
// stream-level flow control quota.
453
// Each of these streams internally have a list of data items(and perhaps trailers
454
// on the server-side) to be sent out.
455
activeStreams *outStreamList
457
hBuf *bytes.Buffer // The buffer for HPACK encoding.
458
hEnc *hpack.Encoder // HPACK encoder.
462
// Side-specific handlers
463
ssGoAwayHandler func(*goAway) (bool, error)
466
func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator) *loopyWriter {
471
sendQuota: defaultWindowSize,
472
oiws: defaultWindowSize,
473
estdStreams: make(map[uint32]*outStream),
474
activeStreams: newOutStreamList(),
477
hEnc: hpack.NewEncoder(&buf),
483
const minBatchSize = 1000
485
// run should be run in a separate goroutine.
486
// It reads control frames from controlBuf and processes them by:
487
// 1. Updating loopy's internal state, or/and
488
// 2. Writing out HTTP2 frames on the wire.
490
// Loopy keeps all active streams with data to send in a linked-list.
491
// All streams in the activeStreams linked-list must have both:
492
// 1. Data to send, and
493
// 2. Stream level flow control quota available.
495
// In each iteration of run loop, other than processing the incoming control
496
// frame, loopy calls processData, which processes one node from the activeStreams linked-list.
497
// This results in writing of HTTP2 frames into an underlying write buffer.
498
// When there's no more control frames to read from controlBuf, loopy flushes the write buffer.
499
// As an optimization, to increase the batch size for each flush, loopy yields the processor, once
500
// if the batch size is too low to give stream goroutines a chance to fill it up.
501
func (l *loopyWriter) run() (err error) {
503
if err == ErrConnClosing {
504
// Don't log ErrConnClosing as error since it happens
505
// 1. When the connection is closed by some other known issue.
506
// 2. User closed the connection.
507
// 3. A graceful close of connection.
508
if logger.V(logLevel) {
509
logger.Infof("transport: loopyWriter.run returning. %v", err)
515
it, err := l.cbuf.get(true)
519
if err = l.handle(it); err != nil {
522
if _, err = l.processData(); err != nil {
528
it, err := l.cbuf.get(false)
533
if err = l.handle(it); err != nil {
536
if _, err = l.processData(); err != nil {
541
isEmpty, err := l.processData()
550
if l.framer.writer.offset < minBatchSize {
555
l.framer.writer.Flush()
562
func (l *loopyWriter) outgoingWindowUpdateHandler(w *outgoingWindowUpdate) error {
563
return l.framer.fr.WriteWindowUpdate(w.streamID, w.increment)
566
func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) error {
567
// Otherwise update the quota.
569
l.sendQuota += w.increment
572
// Find the stream and update it.
573
if str, ok := l.estdStreams[w.streamID]; ok {
574
str.bytesOutStanding -= int(w.increment)
575
if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota > 0 && str.state == waitingOnStreamQuota {
577
l.activeStreams.enqueue(str)
584
func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error {
585
return l.framer.fr.WriteSettings(s.ss...)
588
func (l *loopyWriter) incomingSettingsHandler(s *incomingSettings) error {
589
if err := l.applySettings(s.ss); err != nil {
592
return l.framer.fr.WriteSettingsAck()
595
func (l *loopyWriter) registerStreamHandler(h *registerStream) error {
602
l.estdStreams[h.streamID] = str
606
func (l *loopyWriter) headerHandler(h *headerFrame) error {
607
if l.side == serverSide {
608
str, ok := l.estdStreams[h.streamID]
610
if logger.V(logLevel) {
611
logger.Warningf("transport: loopy doesn't recognize the stream: %d", h.streamID)
615
// Case 1.A: Server is responding back with headers.
617
return l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite)
619
// else: Case 1.B: Server wants to close stream.
621
if str.state != empty { // either active or waiting on stream quota.
622
// add it str's list of items.
626
if err := l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite); err != nil {
629
return l.cleanupStreamHandler(h.cleanup)
631
// Case 2: Client wants to originate stream.
639
return l.originateStream(str)
642
func (l *loopyWriter) originateStream(str *outStream) error {
643
hdr := str.itl.dequeue().(*headerFrame)
644
if err := hdr.initStream(str.id); err != nil {
645
if err == ErrConnClosing {
648
// Other errors(errStreamDrain) need not close transport.
651
if err := l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil {
654
l.estdStreams[str.id] = str
658
func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.HeaderField, onWrite func()) error {
663
for _, f := range hf {
664
if err := l.hEnc.WriteField(f); err != nil {
665
if logger.V(logLevel) {
666
logger.Warningf("transport: loopyWriter.writeHeader encountered error while encoding headers: %v", err)
672
endHeaders, first bool
677
if size > http2MaxFrameLen {
678
size = http2MaxFrameLen
684
err = l.framer.fr.WriteHeaders(http2.HeadersFrameParam{
686
BlockFragment: l.hBuf.Next(size),
687
EndStream: endStream,
688
EndHeaders: endHeaders,
691
err = l.framer.fr.WriteContinuation(
704
func (l *loopyWriter) preprocessData(df *dataFrame) error {
705
str, ok := l.estdStreams[df.streamID]
709
// If we got data for a stream it means that
710
// stream was originated and the headers were sent out.
712
if str.state == empty {
714
l.activeStreams.enqueue(str)
719
func (l *loopyWriter) pingHandler(p *ping) error {
721
l.bdpEst.timesnap(p.data)
723
return l.framer.fr.WritePing(p.ack, p.data)
727
func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) error {
728
o.resp <- l.sendQuota
732
func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error {
734
if str, ok := l.estdStreams[c.streamID]; ok {
735
// On the server side it could be a trailers-only response or
736
// a RST_STREAM before stream initialization thus the stream might
737
// not be established yet.
738
delete(l.estdStreams, c.streamID)
741
if c.rst { // If RST_STREAM needs to be sent.
742
if err := l.framer.fr.WriteRSTStream(c.streamID, c.rstCode); err != nil {
746
if l.side == clientSide && l.draining && len(l.estdStreams) == 0 {
747
return ErrConnClosing
752
func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error {
753
if l.side == clientSide {
755
if len(l.estdStreams) == 0 {
756
return ErrConnClosing
762
func (l *loopyWriter) goAwayHandler(g *goAway) error {
763
// Handling of outgoing GoAway is very specific to side.
764
if l.ssGoAwayHandler != nil {
765
draining, err := l.ssGoAwayHandler(g)
769
l.draining = draining
774
func (l *loopyWriter) handle(i interface{}) error {
775
switch i := i.(type) {
776
case *incomingWindowUpdate:
777
return l.incomingWindowUpdateHandler(i)
778
case *outgoingWindowUpdate:
779
return l.outgoingWindowUpdateHandler(i)
780
case *incomingSettings:
781
return l.incomingSettingsHandler(i)
782
case *outgoingSettings:
783
return l.outgoingSettingsHandler(i)
785
return l.headerHandler(i)
786
case *registerStream:
787
return l.registerStreamHandler(i)
789
return l.cleanupStreamHandler(i)
790
case *incomingGoAway:
791
return l.incomingGoAwayHandler(i)
793
return l.preprocessData(i)
795
return l.pingHandler(i)
797
return l.goAwayHandler(i)
798
case *outFlowControlSizeRequest:
799
return l.outFlowControlSizeRequestHandler(i)
801
return fmt.Errorf("transport: unknown control message type %T", i)
805
func (l *loopyWriter) applySettings(ss []http2.Setting) error {
806
for _, s := range ss {
808
case http2.SettingInitialWindowSize:
812
// If the new limit is greater make all depleted streams active.
813
for _, stream := range l.estdStreams {
814
if stream.state == waitingOnStreamQuota {
815
stream.state = active
816
l.activeStreams.enqueue(stream)
820
case http2.SettingHeaderTableSize:
821
updateHeaderTblSize(l.hEnc, s.Val)
827
// processData removes the first stream from active streams, writes out at most 16KB
828
// of its data and then puts it at the end of activeStreams if there's still more data
829
// to be sent and stream has some stream-level flow control.
830
func (l *loopyWriter) processData() (bool, error) {
831
if l.sendQuota == 0 {
834
str := l.activeStreams.dequeue() // Remove the first stream.
838
dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream.
839
// A data item is represented by a dataFrame, since it later translates into
840
// multiple HTTP2 data frames.
841
// Every dataFrame has two buffers; h that keeps grpc-message header and d that is acutal data.
842
// As an optimization to keep wire traffic low, data from d is copied to h to make as big as the
843
// maximum possilbe HTTP2 frame size.
845
if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // Empty data frame
846
// Client sends out empty data frame with endStream = true
847
if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil {
850
str.itl.dequeue() // remove the empty data item from stream
851
if str.itl.isEmpty() {
853
} else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers.
854
if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil {
857
if err := l.cleanupStreamHandler(trailer.cleanup); err != nil {
861
l.activeStreams.enqueue(str)
868
// Figure out the maximum size we can send
869
maxSize := http2MaxFrameLen
870
if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 { // stream-level flow control.
871
str.state = waitingOnStreamQuota
873
} else if maxSize > strQuota {
876
if maxSize > int(l.sendQuota) { // connection-level flow control.
877
maxSize = int(l.sendQuota)
879
// Compute how much of the header and data we can send within quota and max frame length
880
hSize := min(maxSize, len(dataItem.h))
881
dSize := min(maxSize-hSize, len(dataItem.d))
886
// We can add some data to grpc message header to distribute bytes more equally across frames.
887
// Copy on the stack to avoid generating garbage
888
var localBuf [http2MaxFrameLen]byte
889
copy(localBuf[:hSize], dataItem.h)
890
copy(localBuf[hSize:], dataItem.d[:dSize])
891
buf = localBuf[:hSize+dSize]
897
size := hSize + dSize
899
// Now that outgoing flow controls are checked we can replenish str's write quota
900
str.wq.replenish(size)
902
// If this is the last data message on this stream and all of it can be written in this iteration.
903
if dataItem.endStream && len(dataItem.h)+len(dataItem.d) <= size {
906
if dataItem.onEachWrite != nil {
907
dataItem.onEachWrite()
909
if err := l.framer.fr.WriteData(dataItem.streamID, endStream, buf[:size]); err != nil {
912
str.bytesOutStanding += size
913
l.sendQuota -= uint32(size)
914
dataItem.h = dataItem.h[hSize:]
915
dataItem.d = dataItem.d[dSize:]
917
if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // All the data from that message was written out.
920
if str.itl.isEmpty() {
922
} else if trailer, ok := str.itl.peek().(*headerFrame); ok { // The next item is trailers.
923
if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil {
926
if err := l.cleanupStreamHandler(trailer.cleanup); err != nil {
929
} else if int(l.oiws)-str.bytesOutStanding <= 0 { // Ran out of stream quota.
930
str.state = waitingOnStreamQuota
931
} else { // Otherwise add it back to the list of active streams.
932
l.activeStreams.enqueue(str)
937
func min(a, b int) int {