8
8
"math"
9
9
"math/rand"
10
10
"net"
11
+ "strings"
11
12
"sync"
12
13
"sync/atomic"
13
14
"time"
@@ -35,6 +36,7 @@ type ClusterOptions struct {
35
36
// Enables read-only commands on slave nodes.
36
37
ReadOnly bool
37
38
// Allows routing read-only commands to the closest master or slave node.
39
+ // It automatically enables ReadOnly.
38
40
RouteByLatency bool
39
41
// Allows routing read-only commands to the random master or slave node.
40
42
RouteRandomly bool
@@ -150,6 +152,10 @@ func newClusterNode(clOpt *ClusterOptions, addr string) *clusterNode {
150
152
return & node
151
153
}
152
154
155
+ func (n * clusterNode ) String () string {
156
+ return n .Client .String ()
157
+ }
158
+
153
159
func (n * clusterNode ) Close () error {
154
160
return n .Client .Close ()
155
161
}
@@ -379,15 +385,17 @@ func (c *clusterNodes) Random() (*clusterNode, error) {
379
385
380
386
type clusterState struct {
381
387
nodes * clusterNodes
382
- masters []* clusterNode
383
- slaves []* clusterNode
388
+ Masters []* clusterNode
389
+ Slaves []* clusterNode
384
390
385
391
slots [][]* clusterNode
386
392
387
393
generation uint32
388
394
}
389
395
390
- func newClusterState (nodes * clusterNodes , slots []ClusterSlot , origin string ) (* clusterState , error ) {
396
+ func newClusterState (
397
+ nodes * clusterNodes , slots []ClusterSlot , origin string ,
398
+ ) (* clusterState , error ) {
391
399
c := clusterState {
392
400
nodes : nodes ,
393
401
generation : nodes .NextGeneration (),
@@ -413,9 +421,9 @@ func newClusterState(nodes *clusterNodes, slots []ClusterSlot, origin string) (*
413
421
nodes = append (nodes , node )
414
422
415
423
if i == 0 {
416
- c .masters = appendNode (c .masters , node )
424
+ c .Masters = appendUniqueNode (c .Masters , node )
417
425
} else {
418
- c .slaves = appendNode (c .slaves , node )
426
+ c .Slaves = appendUniqueNode (c .Slaves , node )
419
427
}
420
428
}
421
429
@@ -497,6 +505,28 @@ func (c *clusterState) slotNodes(slot int) []*clusterNode {
497
505
return nil
498
506
}
499
507
508
+ func (c * clusterState ) IsConsistent () bool {
509
+ if len (c .Masters ) > len (c .Slaves ) {
510
+ return false
511
+ }
512
+
513
+ for _ , master := range c .Masters {
514
+ s := master .Client .Info ("replication" ).Val ()
515
+ if ! strings .Contains (s , "role:master" ) {
516
+ return false
517
+ }
518
+ }
519
+
520
+ for _ , slave := range c .Slaves {
521
+ s := slave .Client .Info ("replication" ).Val ()
522
+ if ! strings .Contains (s , "role:slave" ) {
523
+ return false
524
+ }
525
+ }
526
+
527
+ return true
528
+ }
529
+
500
530
//------------------------------------------------------------------------------
501
531
502
532
type clusterStateHolder struct {
@@ -516,7 +546,18 @@ func newClusterStateHolder(fn func() (*clusterState, error)) *clusterStateHolder
516
546
}
517
547
}
518
548
519
- func (c * clusterStateHolder ) Load () (* clusterState , error ) {
549
+ func (c * clusterStateHolder ) Reload () (* clusterState , error ) {
550
+ state , err := c .reload ()
551
+ if err != nil {
552
+ return nil , err
553
+ }
554
+ if ! state .IsConsistent () {
555
+ c .LazyReload ()
556
+ }
557
+ return state , nil
558
+ }
559
+
560
+ func (c * clusterStateHolder ) reload () (* clusterState , error ) {
520
561
state , err := c .load ()
521
562
if err != nil {
522
563
c .lastErrMu .Lock ()
@@ -535,9 +576,15 @@ func (c *clusterStateHolder) LazyReload() {
535
576
go func () {
536
577
defer atomic .StoreUint32 (& c .reloading , 0 )
537
578
538
- _ , err := c .Load ()
539
- if err == nil {
540
- time .Sleep (time .Second )
579
+ for {
580
+ state , err := c .reload ()
581
+ if err != nil {
582
+ return
583
+ }
584
+ time .Sleep (100 * time .Millisecond )
585
+ if state .IsConsistent () {
586
+ return
587
+ }
541
588
}
542
589
}()
543
590
}
@@ -596,7 +643,7 @@ func NewClusterClient(opt *ClusterOptions) *ClusterClient {
596
643
597
644
c .cmdable .setProcessor (c .Process )
598
645
599
- _ , _ = c .state .Load ()
646
+ _ , _ = c .state .Reload ()
600
647
if opt .IdleCheckFrequency > 0 {
601
648
go c .reaper (opt .IdleCheckFrequency )
602
649
}
@@ -890,7 +937,7 @@ func (c *ClusterClient) ForEachMaster(fn func(client *Client) error) error {
890
937
891
938
var wg sync.WaitGroup
892
939
errCh := make (chan error , 1 )
893
- for _ , master := range state .masters {
940
+ for _ , master := range state .Masters {
894
941
wg .Add (1 )
895
942
go func (node * clusterNode ) {
896
943
defer wg .Done ()
@@ -923,7 +970,7 @@ func (c *ClusterClient) ForEachSlave(fn func(client *Client) error) error {
923
970
924
971
var wg sync.WaitGroup
925
972
errCh := make (chan error , 1 )
926
- for _ , slave := range state .slaves {
973
+ for _ , slave := range state .Slaves {
927
974
wg .Add (1 )
928
975
go func (node * clusterNode ) {
929
976
defer wg .Done ()
@@ -967,11 +1014,11 @@ func (c *ClusterClient) ForEachNode(fn func(client *Client) error) error {
967
1014
}
968
1015
}
969
1016
970
- for _ , node := range state .masters {
1017
+ for _ , node := range state .Masters {
971
1018
wg .Add (1 )
972
1019
go worker (node )
973
1020
}
974
- for _ , node := range state .slaves {
1021
+ for _ , node := range state .Slaves {
975
1022
wg .Add (1 )
976
1023
go worker (node )
977
1024
}
@@ -994,7 +1041,7 @@ func (c *ClusterClient) PoolStats() *PoolStats {
994
1041
return & acc
995
1042
}
996
1043
997
- for _ , node := range state .masters {
1044
+ for _ , node := range state .Masters {
998
1045
s := node .Client .connPool .Stats ()
999
1046
acc .Hits += s .Hits
1000
1047
acc .Misses += s .Misses
@@ -1005,7 +1052,7 @@ func (c *ClusterClient) PoolStats() *PoolStats {
1005
1052
acc .StaleConns += s .StaleConns
1006
1053
}
1007
1054
1008
- for _ , node := range state .slaves {
1055
+ for _ , node := range state .Slaves {
1009
1056
s := node .Client .connPool .Stats ()
1010
1057
acc .Hits += s .Hits
1011
1058
acc .Misses += s .Misses
@@ -1438,7 +1485,7 @@ func isLoopbackAddr(addr string) bool {
1438
1485
return ip .IsLoopback ()
1439
1486
}
1440
1487
1441
- func appendNode (nodes []* clusterNode , node * clusterNode ) []* clusterNode {
1488
+ func appendUniqueNode (nodes []* clusterNode , node * clusterNode ) []* clusterNode {
1442
1489
for _ , n := range nodes {
1443
1490
if n == node {
1444
1491
return nodes
0 commit comments