-
Notifications
You must be signed in to change notification settings - Fork 84
/
channel.go
922 lines (778 loc) · 27.8 KB
/
channel.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
// Copyright (c) 2015 Uber Technologies, Inc.
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package tchannel
import (
"errors"
"fmt"
"net"
"os"
"path/filepath"
"runtime"
"strings"
"sync"
"time"
"github.com/uber/tchannel-go/tnet"
"github.com/opentracing/opentracing-go"
"go.uber.org/atomic"
"golang.org/x/net/context"
)
var (
errAlreadyListening = errors.New("channel already listening")
errInvalidStateForOp = errors.New("channel is in an invalid state for that method")
errMaxIdleTimeNotSet = errors.New("IdleCheckInterval is set but MaxIdleTime is zero")
// ErrNoServiceName is returned when no service name is provided when
// creating a new channel.
ErrNoServiceName = errors.New("no service name provided")
)
const ephemeralHostPort = "0.0.0.0:0"
// ChannelOptions are used to control parameters on a create a TChannel
type ChannelOptions struct {
// Default Connection options
DefaultConnectionOptions ConnectionOptions
// The name of the process, for logging and reporting to peers
ProcessName string
// OnPeerStatusChanged is an optional callback that receives a notification
// whenever the channel establishes a usable connection to a peer, or loses
// a connection to a peer.
OnPeerStatusChanged func(*Peer)
// The logger to use for this channel
Logger Logger
// The host:port selection implementation to use for relaying. This is an
// unstable API - breaking changes are likely.
RelayHost RelayHost
// The list of service names that should be handled locally by this channel.
// This is an unstable API - breaking changes are likely.
RelayLocalHandlers []string
// The maximum allowable timeout for relayed calls (longer timeouts are
// clamped to this value). Passing zero uses the default of 2m.
// This is an unstable API - breaking changes are likely.
RelayMaxTimeout time.Duration
// If the relay needs to connect while processing a frame, this specifies
// the max connection timeout used.
RelayMaxConnectionTimeout time.Duration
// RelayMaxTombs is the maximum number of timed-out calls that the relay
// will keep track of per-connection to avoid spurious logs
// for late-arriving frames.
// This is an unstable API - breaking changes are likely.
RelayMaxTombs uint64
// RelayTimerVerification will disable pooling of relay timers, and instead
// verify that timers are not used once they are released.
// This is an unstable API - breaking changes are likely.
RelayTimerVerification bool
// The reporter to use for reporting stats for this channel.
StatsReporter StatsReporter
// TimeNow is a variable for overriding time.Now in unit tests.
// Note: This is not a stable part of the API and may change.
TimeNow func() time.Time
// TimeTicker is a variable for overriding time.Ticker in unit tests.
// Note: This is not a stable part of the API and may change.
TimeTicker func(d time.Duration) *time.Ticker
// MaxIdleTime controls how long we allow an idle connection to exist
// before tearing it down. Must be set to non-zero if IdleCheckInterval
// is set.
MaxIdleTime time.Duration
// IdleCheckInterval controls how often the channel runs a sweep over
// all active connections to see if they can be dropped. Connections that
// are idle for longer than MaxIdleTime are disconnected. If this is set to
// zero (the default), idle checking is disabled.
IdleCheckInterval time.Duration
// Tracer is an OpenTracing Tracer used to manage distributed tracing spans.
// If not set, opentracing.GlobalTracer() is used.
Tracer opentracing.Tracer
// Handler is an alternate handler for all inbound requests, overriding the
// default handler that delegates to a subchannel.
Handler Handler
// SkipHandlerMethods allow users to configure TChannel server such that
// requests with specified methods can be ignored by the above passed-in handler
// and handled natively by TChannel.
// Requests with other methods will be handled by passed-in handler.
// Methods should be in the format of Service::Method.
// This is useful for the gradual migration purpose.
SkipHandlerMethods []string
// Dialer is optional factory method which can be used for overriding
// outbound connections for things like TLS handshake
Dialer func(ctx context.Context, network, hostPort string) (net.Conn, error)
// ConnContext runs when a connection is established, which updates
// the per-connection base context. This context is used as the parent context
// for incoming calls.
ConnContext func(ctx context.Context, conn net.Conn) context.Context
}
// ChannelState is the state of a channel.
type ChannelState int
const (
// ChannelClient is a channel that can be used as a client.
ChannelClient ChannelState = iota + 1
// ChannelListening is a channel that is listening for new connnections.
ChannelListening
// ChannelStartClose is a channel that has received a Close request.
// The channel is no longer listening, and all new incoming connections are rejected.
ChannelStartClose
// ChannelInboundClosed is a channel that has drained all incoming connections, but may
// have outgoing connections. All incoming calls and new outgoing calls are rejected.
ChannelInboundClosed
// ChannelClosed is a channel that has closed completely.
ChannelClosed
)
//go:generate stringer -type=ChannelState
// A Channel is a bi-directional connection to the peering and routing network.
// Applications can use a Channel to make service calls to remote peers via
// BeginCall, or to listen for incoming calls from peers. Applications that
// want to receive requests should call one of Serve or ListenAndServe
// TODO(prashant): Shutdown all subchannels + peers when channel is closed.
type Channel struct {
channelConnectionCommon
chID uint32
createdStack string
commonStatsTags map[string]string
connectionOptions ConnectionOptions
peers *PeerList
relayHost RelayHost
relayMaxTimeout time.Duration
relayMaxConnTimeout time.Duration
relayMaxTombs uint64
relayTimerVerify bool
internalHandlers *handlerMap
handler Handler
onPeerStatusChanged func(*Peer)
dialer func(ctx context.Context, hostPort string) (net.Conn, error)
connContext func(ctx context.Context, conn net.Conn) context.Context
closed chan struct{}
// mutable contains all the members of Channel which are mutable.
mutable struct {
sync.RWMutex // protects members of the mutable struct.
state ChannelState
peerInfo LocalPeerInfo // May be ephemeral if this is a client only channel
l net.Listener // May be nil if this is a client only channel
idleSweep *idleSweep
conns map[uint32]*Connection
}
}
// channelConnectionCommon is the list of common objects that both use
// and can be copied directly from the channel to the connection.
type channelConnectionCommon struct {
log Logger
relayLocal map[string]struct{}
statsReporter StatsReporter
tracer opentracing.Tracer
subChannels *subChannelMap
timeNow func() time.Time
timeTicker func(time.Duration) *time.Ticker
}
// _nextChID is used to allocate unique IDs to every channel for debugging purposes.
var _nextChID atomic.Uint32
// Tracer returns the OpenTracing Tracer for this channel. If no tracer was provided
// in the configuration, returns opentracing.GlobalTracer(). Note that this approach
// allows opentracing.GlobalTracer() to be initialized _after_ the channel is created.
func (ccc channelConnectionCommon) Tracer() opentracing.Tracer {
if ccc.tracer != nil {
return ccc.tracer
}
return opentracing.GlobalTracer()
}
// NewChannel creates a new Channel. The new channel can be used to send outbound requests
// to peers, but will not listen or handling incoming requests until one of ListenAndServe
// or Serve is called. The local service name should be passed to serviceName.
func NewChannel(serviceName string, opts *ChannelOptions) (*Channel, error) {
if serviceName == "" {
return nil, ErrNoServiceName
}
if opts == nil {
opts = &ChannelOptions{}
}
processName := opts.ProcessName
if processName == "" {
processName = fmt.Sprintf("%s[%d]", filepath.Base(os.Args[0]), os.Getpid())
}
logger := opts.Logger
if logger == nil {
logger = NullLogger
}
statsReporter := opts.StatsReporter
if statsReporter == nil {
statsReporter = NullStatsReporter
}
timeNow := opts.TimeNow
if timeNow == nil {
timeNow = time.Now
}
timeTicker := opts.TimeTicker
if timeTicker == nil {
timeTicker = time.NewTicker
}
chID := _nextChID.Inc()
logger = logger.WithFields(
LogField{"serviceName", serviceName},
LogField{"process", processName},
LogField{"chID", chID},
)
if err := opts.validateIdleCheck(); err != nil {
return nil, err
}
// Default to dialContext if dialer is not passed in as an option
dialCtx := dialContext
if opts.Dialer != nil {
dialCtx = func(ctx context.Context, hostPort string) (net.Conn, error) {
return opts.Dialer(ctx, "tcp", hostPort)
}
}
if opts.ConnContext == nil {
opts.ConnContext = func(ctx context.Context, conn net.Conn) context.Context {
return ctx
}
}
ch := &Channel{
channelConnectionCommon: channelConnectionCommon{
log: logger,
relayLocal: toStringSet(opts.RelayLocalHandlers),
statsReporter: statsReporter,
subChannels: &subChannelMap{},
timeNow: timeNow,
timeTicker: timeTicker,
tracer: opts.Tracer,
},
chID: chID,
connectionOptions: opts.DefaultConnectionOptions.withDefaults(),
relayHost: opts.RelayHost,
relayMaxTimeout: validateRelayMaxTimeout(opts.RelayMaxTimeout, logger),
relayMaxConnTimeout: opts.RelayMaxConnectionTimeout,
relayMaxTombs: opts.RelayMaxTombs,
relayTimerVerify: opts.RelayTimerVerification,
dialer: dialCtx,
connContext: opts.ConnContext,
closed: make(chan struct{}),
}
ch.peers = newRootPeerList(ch, opts.OnPeerStatusChanged).newChild()
switch {
case len(opts.SkipHandlerMethods) > 0 && opts.Handler != nil:
sm, err := toServiceMethodSet(opts.SkipHandlerMethods)
if err != nil {
return nil, err
}
ch.handler = userHandlerWithSkip{
localHandler: channelHandler{ch},
ignoreUserHandler: sm,
userHandler: opts.Handler,
}
case opts.Handler != nil:
ch.handler = opts.Handler
default:
ch.handler = channelHandler{ch}
}
ch.mutable.peerInfo = LocalPeerInfo{
PeerInfo: PeerInfo{
ProcessName: processName,
HostPort: ephemeralHostPort,
IsEphemeral: true,
Version: PeerVersion{
Language: "go",
LanguageVersion: strings.TrimPrefix(runtime.Version(), "go"),
TChannelVersion: VersionInfo,
},
},
ServiceName: serviceName,
}
ch.mutable.state = ChannelClient
ch.mutable.conns = make(map[uint32]*Connection)
ch.createCommonStats()
ch.internalHandlers = ch.createInternalHandlers()
registerNewChannel(ch)
if opts.RelayHost != nil {
opts.RelayHost.SetChannel(ch)
}
// Start the idle connection timer.
ch.mutable.idleSweep = startIdleSweep(ch, opts)
return ch, nil
}
// ConnectionOptions returns the channel's connection options.
func (ch *Channel) ConnectionOptions() *ConnectionOptions {
return &ch.connectionOptions
}
// Serve serves incoming requests using the provided listener.
// The local peer info is set synchronously, but the actual socket listening is done in
// a separate goroutine.
func (ch *Channel) Serve(l net.Listener) error {
mutable := &ch.mutable
mutable.Lock()
defer mutable.Unlock()
if mutable.l != nil {
return errAlreadyListening
}
mutable.l = tnet.Wrap(l)
if mutable.state != ChannelClient {
return errInvalidStateForOp
}
mutable.state = ChannelListening
mutable.peerInfo.HostPort = l.Addr().String()
mutable.peerInfo.IsEphemeral = false
ch.log = ch.log.WithFields(LogField{"hostPort", mutable.peerInfo.HostPort})
ch.log.Info("Channel is listening.")
go ch.serve()
return nil
}
// ListenAndServe listens on the given address and serves incoming requests.
// The port may be 0, in which case the channel will use an OS assigned port
// This method does not block as the handling of connections is done in a goroutine.
func (ch *Channel) ListenAndServe(hostPort string) error {
mutable := &ch.mutable
mutable.RLock()
if mutable.l != nil {
mutable.RUnlock()
return errAlreadyListening
}
l, err := net.Listen("tcp", hostPort)
if err != nil {
mutable.RUnlock()
return err
}
mutable.RUnlock()
return ch.Serve(l)
}
// Registrar is the base interface for registering handlers on either the base
// Channel or the SubChannel
type Registrar interface {
// ServiceName returns the service name that this Registrar is for.
ServiceName() string
// Register registers a handler for ServiceName and the given method.
Register(h Handler, methodName string)
// Logger returns the logger for this Registrar.
Logger() Logger
// StatsReporter returns the stats reporter for this Registrar
StatsReporter() StatsReporter
// StatsTags returns the tags that should be used.
StatsTags() map[string]string
// Peers returns the peer list for this Registrar.
Peers() *PeerList
}
// Register registers a handler for a method.
//
// The handler is registered with the service name used when the Channel was
// created. To register a handler with a different service name, obtain a
// SubChannel for that service with GetSubChannel, and Register a handler
// under that. You may also use SetHandler on a SubChannel to set up a
// catch-all Handler for that service. See the docs for SetHandler for more
// information.
//
// Register panics if the channel was constructed with an alternate root
// handler that does not support Register.
func (ch *Channel) Register(h Handler, methodName string) {
r, ok := ch.handler.(registrar)
if !ok {
panic("can't register handler when channel configured with alternate root handler without Register method")
}
r.Register(h, methodName)
}
// PeerInfo returns the current peer info for the channel
func (ch *Channel) PeerInfo() LocalPeerInfo {
ch.mutable.RLock()
peerInfo := ch.mutable.peerInfo
ch.mutable.RUnlock()
return peerInfo
}
func (ch *Channel) createCommonStats() {
ch.commonStatsTags = map[string]string{
"app": ch.mutable.peerInfo.ProcessName,
"service": ch.mutable.peerInfo.ServiceName,
}
host, err := os.Hostname()
if err != nil {
ch.log.WithFields(ErrField(err)).Info("Channel creation failed to get host.")
return
}
ch.commonStatsTags["host"] = host
// TODO(prashant): Allow user to pass extra tags (such as cluster, version).
}
// GetSubChannel returns a SubChannel for the given service name. If the subchannel does not
// exist, it is created.
func (ch *Channel) GetSubChannel(serviceName string, opts ...SubChannelOption) *SubChannel {
sub, added := ch.subChannels.getOrAdd(serviceName, ch)
if added {
for _, opt := range opts {
opt(sub)
}
}
return sub
}
// Peers returns the PeerList for the channel.
func (ch *Channel) Peers() *PeerList {
return ch.peers
}
// RootPeers returns the root PeerList for the channel, which is the sole place
// new Peers are created. All children of the root list (including ch.Peers())
// automatically re-use peers from the root list and create new peers in the
// root list.
func (ch *Channel) RootPeers() *RootPeerList {
return ch.peers.parent
}
// BeginCall starts a new call to a remote peer, returning an OutboundCall that can
// be used to write the arguments of the call.
func (ch *Channel) BeginCall(ctx context.Context, hostPort, serviceName, methodName string, callOptions *CallOptions) (*OutboundCall, error) {
p := ch.RootPeers().GetOrAdd(hostPort)
return p.BeginCall(ctx, serviceName, methodName, callOptions)
}
// serve runs the listener to accept and manage new incoming connections, blocking
// until the channel is closed.
func (ch *Channel) serve() {
acceptBackoff := 0 * time.Millisecond
for {
netConn, err := ch.mutable.l.Accept()
if err != nil {
// Backoff from new accepts if this is a temporary error
if ne, ok := err.(net.Error); ok && ne.Temporary() {
if acceptBackoff == 0 {
acceptBackoff = 5 * time.Millisecond
} else {
acceptBackoff *= 2
}
if max := 1 * time.Second; acceptBackoff > max {
acceptBackoff = max
}
ch.log.WithFields(
ErrField(err),
LogField{"backoff", acceptBackoff},
).Warn("Accept error, will wait and retry.")
time.Sleep(acceptBackoff)
continue
} else {
// Only log an error if this didn't happen due to a Close.
if ch.State() >= ChannelStartClose {
return
}
ch.log.WithFields(ErrField(err)).Fatal("Unrecoverable accept error, closing server.")
return
}
}
acceptBackoff = 0
// Perform the connection handshake in a background goroutine.
go func() {
// Register the connection in the peer once the channel is set up.
events := connectionEvents{
OnActive: ch.inboundConnectionActive,
OnCloseStateChange: ch.connectionCloseStateChange,
OnExchangeUpdated: ch.exchangeUpdated,
}
if _, err := ch.inboundHandshake(context.Background(), netConn, events); err != nil {
netConn.Close()
}
}()
}
}
// Ping sends a ping message to the given hostPort and waits for a response.
func (ch *Channel) Ping(ctx context.Context, hostPort string) error {
peer := ch.RootPeers().GetOrAdd(hostPort)
conn, err := peer.GetConnection(ctx)
if err != nil {
return err
}
return conn.ping(ctx)
}
// Logger returns the logger for this channel.
func (ch *Channel) Logger() Logger {
return ch.log
}
// StatsReporter returns the stats reporter for this channel.
func (ch *Channel) StatsReporter() StatsReporter {
return ch.statsReporter
}
// StatsTags returns the common tags that should be used when reporting stats.
// It returns a new map for each call.
func (ch *Channel) StatsTags() map[string]string {
m := make(map[string]string)
for k, v := range ch.commonStatsTags {
m[k] = v
}
return m
}
// ServiceName returns the serviceName that this channel was created for.
func (ch *Channel) ServiceName() string {
return ch.PeerInfo().ServiceName
}
// Connect creates a new outbound connection to hostPort.
func (ch *Channel) Connect(ctx context.Context, hostPort string) (*Connection, error) {
switch state := ch.State(); state {
case ChannelClient, ChannelListening:
break
default:
ch.log.Debugf("Connect rejecting new connection as state is %v", state)
return nil, errInvalidStateForOp
}
// The context timeout applies to the whole call, but users may want a lower
// connect timeout (e.g. for streams).
if params := getTChannelParams(ctx); params != nil && params.connectTimeout > 0 {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, params.connectTimeout)
defer cancel()
}
events := connectionEvents{
OnActive: ch.outboundConnectionActive,
OnCloseStateChange: ch.connectionCloseStateChange,
OnExchangeUpdated: ch.exchangeUpdated,
}
if err := ctx.Err(); err != nil {
return nil, GetContextError(err)
}
timeout := getTimeout(ctx)
tcpConn, err := ch.dialer(ctx, hostPort)
if err != nil {
if ne, ok := err.(net.Error); ok && ne.Timeout() {
ch.log.WithFields(
LogField{"remoteHostPort", hostPort},
LogField{"timeout", timeout},
).Info("Outbound net.Dial timed out.")
err = ErrTimeout
} else if ctx.Err() == context.Canceled {
ch.log.WithFields(
LogField{"remoteHostPort", hostPort},
).Info("Outbound net.Dial was cancelled.")
err = GetContextError(ErrRequestCancelled)
} else {
ch.log.WithFields(
ErrField(err),
LogField{"remoteHostPort", hostPort},
).Info("Outbound net.Dial failed.")
}
return nil, err
}
conn, err := ch.outboundHandshake(ctx, tcpConn, hostPort, events)
if conn != nil {
// It's possible that the connection we just created responds with a host:port
// that is not what we tried to connect to. E.g., we may have connected to
// 127.0.0.1:1234, but the returned host:port may be 10.0.0.1:1234.
// In this case, the connection won't be added to 127.0.0.1:1234 peer
// and so future calls to that peer may end up creating new connections. To
// avoid this issue, and to avoid clients being aware of any TCP relays, we
// add the connection to the intended peer.
if hostPort != conn.remotePeerInfo.HostPort {
conn.log.Debugf("Outbound connection host:port mismatch, adding to peer %v", conn.remotePeerInfo.HostPort)
ch.addConnectionToPeer(hostPort, conn, outbound)
}
}
return conn, err
}
// exchangeUpdated updates the peer heap.
func (ch *Channel) exchangeUpdated(c *Connection) {
if c.remotePeerInfo.HostPort == "" {
// Hostport is unknown until we get init resp.
return
}
p, ok := ch.RootPeers().Get(c.remotePeerInfo.HostPort)
if !ok {
return
}
ch.updatePeer(p)
}
// updatePeer updates the score of the peer and update it's position in heap as well.
func (ch *Channel) updatePeer(p *Peer) {
ch.peers.onPeerChange(p)
ch.subChannels.updatePeer(p)
p.callOnUpdateComplete()
}
// addConnection adds the connection to the channel's list of connection
// if the channel is in a valid state to accept this connection. It returns
// whether the connection was added.
func (ch *Channel) addConnection(c *Connection, direction connectionDirection) bool {
ch.mutable.Lock()
defer ch.mutable.Unlock()
if c.readState() != connectionActive {
return false
}
switch state := ch.mutable.state; state {
case ChannelClient, ChannelListening:
break
default:
return false
}
ch.mutable.conns[c.connID] = c
return true
}
func (ch *Channel) connectionActive(c *Connection, direction connectionDirection) {
c.log.Debugf("New active %v connection for peer %v", direction, c.remotePeerInfo.HostPort)
if added := ch.addConnection(c, direction); !added {
// The channel isn't in a valid state to accept this connection, close the connection.
c.close(LogField{"reason", "new active connection on closing channel"})
return
}
ch.addConnectionToPeer(c.remotePeerInfo.HostPort, c, direction)
}
func (ch *Channel) addConnectionToPeer(hostPort string, c *Connection, direction connectionDirection) {
p := ch.RootPeers().GetOrAdd(hostPort)
if err := p.addConnection(c, direction); err != nil {
c.log.WithFields(
LogField{"remoteHostPort", c.remotePeerInfo.HostPort},
LogField{"direction", direction},
ErrField(err),
).Warn("Failed to add connection to peer.")
}
ch.updatePeer(p)
}
func (ch *Channel) inboundConnectionActive(c *Connection) {
ch.connectionActive(c, inbound)
}
func (ch *Channel) outboundConnectionActive(c *Connection) {
ch.connectionActive(c, outbound)
}
// removeClosedConn removes a connection if it's closed.
// Until a connection is fully closed, the channel must keep track of it.
func (ch *Channel) removeClosedConn(c *Connection) {
if c.readState() != connectionClosed {
return
}
ch.mutable.Lock()
delete(ch.mutable.conns, c.connID)
ch.mutable.Unlock()
}
func (ch *Channel) getMinConnectionState() connectionState {
minState := connectionClosed
for _, c := range ch.mutable.conns {
if s := c.readState(); s < minState {
minState = s
}
}
return minState
}
// connectionCloseStateChange is called when a connection's close state changes.
func (ch *Channel) connectionCloseStateChange(c *Connection) {
ch.removeClosedConn(c)
if peer, ok := ch.RootPeers().Get(c.remotePeerInfo.HostPort); ok {
peer.connectionCloseStateChange(c)
ch.updatePeer(peer)
}
if c.outboundHP != "" && c.outboundHP != c.remotePeerInfo.HostPort {
// Outbound connections may be in multiple peers.
if peer, ok := ch.RootPeers().Get(c.outboundHP); ok {
peer.connectionCloseStateChange(c)
ch.updatePeer(peer)
}
}
chState := ch.State()
if chState != ChannelStartClose && chState != ChannelInboundClosed {
return
}
ch.mutable.RLock()
minState := ch.getMinConnectionState()
ch.mutable.RUnlock()
var updateTo ChannelState
if minState >= connectionClosed {
updateTo = ChannelClosed
} else if minState >= connectionInboundClosed && chState == ChannelStartClose {
updateTo = ChannelInboundClosed
}
var updatedToState ChannelState
if updateTo > 0 {
ch.mutable.Lock()
// Recheck the state as it's possible another goroutine changed the state
// from what we expected, and so we might make a stale change.
if ch.mutable.state == chState {
ch.mutable.state = updateTo
updatedToState = updateTo
}
ch.mutable.Unlock()
chState = updateTo
}
c.log.Debugf("ConnectionCloseStateChange channel state = %v connection minState = %v",
chState, minState)
if updatedToState == ChannelClosed {
ch.onClosed()
}
}
func (ch *Channel) onClosed() {
removeClosedChannel(ch)
close(ch.closed)
ch.log.Infof("Channel closed.")
}
// Closed returns whether this channel has been closed with .Close()
func (ch *Channel) Closed() bool {
return ch.State() == ChannelClosed
}
// ClosedChan returns a channel that will close when the Channel has completely
// closed.
func (ch *Channel) ClosedChan() <-chan struct{} {
return ch.closed
}
// State returns the current channel state.
func (ch *Channel) State() ChannelState {
ch.mutable.RLock()
state := ch.mutable.state
ch.mutable.RUnlock()
return state
}
// Close starts a graceful Close for the channel. This does not happen immediately:
// 1. This call closes the Listener and starts closing connections.
// 2. When all incoming connections are drained, the connection blocks new outgoing calls.
// 3. When all connections are drained, the channel's state is updated to Closed.
func (ch *Channel) Close() {
ch.Logger().Info("Channel.Close called.")
var connections []*Connection
var channelClosed bool
func() {
ch.mutable.Lock()
defer ch.mutable.Unlock()
if ch.mutable.state == ChannelClosed {
ch.Logger().Info("Channel already closed, skipping additional Close() calls")
return
}
if ch.mutable.l != nil {
ch.mutable.l.Close()
}
// Stop the idle connections timer.
ch.mutable.idleSweep.Stop()
ch.mutable.state = ChannelStartClose
if len(ch.mutable.conns) == 0 {
ch.mutable.state = ChannelClosed
channelClosed = true
}
for _, c := range ch.mutable.conns {
connections = append(connections, c)
}
}()
for _, c := range connections {
c.close(LogField{"reason", "channel closing"})
}
if channelClosed {
ch.onClosed()
}
}
// RelayHost returns the channel's RelayHost, if any.
func (ch *Channel) RelayHost() RelayHost {
return ch.relayHost
}
func (o *ChannelOptions) validateIdleCheck() error {
if o.IdleCheckInterval > 0 && o.MaxIdleTime <= 0 {
return errMaxIdleTimeNotSet
}
return nil
}
func toStringSet(ss []string) map[string]struct{} {
set := make(map[string]struct{}, len(ss))
for _, s := range ss {
set[s] = struct{}{}
}
return set
}
// take a list of service::method formatted string and make
// the map[service::method]struct{} set
func toServiceMethodSet(sms []string) (map[string]struct{}, error) {
set := map[string]struct{}{}
for _, sm := range sms {
if len(strings.Split(sm, "::")) != 2 {
return nil, fmt.Errorf("each %q value should be of service::Method format but got %q", "SkipHandlerMethods", sm)
}
set[sm] = struct{}{}
}
return set, nil
}