Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

scheduler: consider leader score when evict leader #8912

Closed
wants to merge 5 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions pkg/schedule/checker/rule_checker.go
Original file line number Diff line number Diff line change
Expand Up @@ -312,7 +312,7 @@ func (c *RuleChecker) fixLooseMatchPeer(region *core.RegionInfo, fit *placement.
if region.GetLeader().GetId() != peer.GetId() && rf.Rule.Role == placement.Leader {
ruleCheckerFixLeaderRoleCounter.Inc()
if c.allowLeader(fit, peer) {
return operator.CreateTransferLeaderOperator("fix-leader-role", c.cluster, region, peer.GetStoreId(), []uint64{}, 0)
return operator.CreateTransferLeaderOperator("fix-leader-role", c.cluster, region, peer.GetStoreId(), 0)
}
ruleCheckerNotAllowLeaderCounter.Inc()
return nil, errs.ErrPeerCannotBeLeader
Expand All @@ -321,7 +321,7 @@ func (c *RuleChecker) fixLooseMatchPeer(region *core.RegionInfo, fit *placement.
ruleCheckerFixFollowerRoleCounter.Inc()
for _, p := range region.GetPeers() {
if c.allowLeader(fit, p) {
return operator.CreateTransferLeaderOperator("fix-follower-role", c.cluster, region, p.GetStoreId(), []uint64{}, 0)
return operator.CreateTransferLeaderOperator("fix-follower-role", c.cluster, region, p.GetStoreId(), 0)
}
}
ruleCheckerNoNewLeaderCounter.Inc()
Expand Down
1 change: 1 addition & 0 deletions pkg/schedule/filter/comparer.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ type StoreComparer func(a, b *core.StoreInfo) int
// score.
func RegionScoreComparer(conf config.SharedConfigProvider) StoreComparer {
return func(a, b *core.StoreInfo) int {
// TODO: we should use the real time delta data to calculate the score.
sa := a.RegionScore(conf.GetRegionScoreFormulaVersion(), conf.GetHighSpaceRatio(), conf.GetLowSpaceRatio(), 0)
sb := b.RegionScore(conf.GetRegionScoreFormulaVersion(), conf.GetHighSpaceRatio(), conf.GetLowSpaceRatio(), 0)
switch {
Expand Down
45 changes: 45 additions & 0 deletions pkg/schedule/filter/comparer_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
// Copyright 2025 TiKV Project Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package filter

import (
"math/rand"
"testing"
"time"

"github.com/stretchr/testify/require"

"github.com/pingcap/kvproto/pkg/metapb"

"github.com/tikv/pd/pkg/core"
"github.com/tikv/pd/pkg/mock/mockconfig"
)

func TestRegionCompare(t *testing.T) {
re := require.New(t)
ids := []uint64{1, 2, 3, 4, 5}
stores := make([]*core.StoreInfo, 0, len(ids))
for _, id := range ids {
stores = append(stores, core.NewStoreInfo(
&metapb.Store{Id: id},
core.SetRegionSize(int64(6-id)*1000),
))
}
cs := NewCandidates(rand.New(rand.NewSource(time.Now().UnixNano())), stores)
cfg := mockconfig.NewTestOptions()
re.Equal(uint64(1), cs.PickFirst().GetID())
cs.Sort(RegionScoreComparer(cfg))
re.Equal(uint64(5), cs.PickFirst().GetID())
}
2 changes: 1 addition & 1 deletion pkg/schedule/handler/handler.go
Original file line number Diff line number Diff line change
Expand Up @@ -420,7 +420,7 @@ func (h *Handler) AddTransferLeaderOperator(regionID uint64, storeID uint64) err
return errors.Errorf("region has no voter in store %v", storeID)
}

op, err := operator.CreateTransferLeaderOperator("admin-transfer-leader", c, region, newLeader.GetStoreId(), []uint64{}, operator.OpAdmin)
op, err := operator.CreateTransferLeaderOperator("admin-transfer-leader", c, region, newLeader.GetStoreId(), operator.OpAdmin)
if err != nil {
log.Debug("fail to create transfer leader operator", errs.ZapError(err))
return err
Expand Down
19 changes: 0 additions & 19 deletions pkg/schedule/operator/builder.go
Original file line number Diff line number Diff line change
Expand Up @@ -297,25 +297,6 @@ func (b *Builder) SetLeader(storeID uint64) *Builder {
return b
}

// SetLeaders records all valid target leaders in Builder.
func (b *Builder) SetLeaders(storeIDs []uint64) *Builder {
if b.err != nil {
return b
}
sort.Slice(storeIDs, func(i, j int) bool { return storeIDs[i] < storeIDs[j] })
for _, storeID := range storeIDs {
peer := b.targetPeers[storeID]
if peer == nil || core.IsLearner(peer) || b.unhealthyPeers[storeID] != nil {
continue
}
b.targetLeaderStoreIDs = append(b.targetLeaderStoreIDs, storeID)
}
// Don't need to check if there's valid target, because `targetLeaderStoreIDs`
// can be empty if this is not a multi-target evict leader operation. Besides,
// `targetLeaderStoreID` must be valid and there must be at least one valid target.
return b
}

// SetPeers resets the target peer list.
//
// If peer's ID is 0, the builder will allocate a new ID later. If current
Expand Down
3 changes: 1 addition & 2 deletions pkg/schedule/operator/create_operator.go
Original file line number Diff line number Diff line change
Expand Up @@ -80,10 +80,9 @@ func CreateRemovePeerOperator(desc string, ci sche.SharedCluster, kind OpKind, r
}

// CreateTransferLeaderOperator creates an operator that transfers the leader from a source store to a target store.
func CreateTransferLeaderOperator(desc string, ci sche.SharedCluster, region *core.RegionInfo, targetStoreID uint64, targetStoreIDs []uint64, kind OpKind) (*Operator, error) {
func CreateTransferLeaderOperator(desc string, ci sche.SharedCluster, region *core.RegionInfo, targetStoreID uint64, kind OpKind) (*Operator, error) {
return NewBuilder(desc, ci, region, SkipOriginJointStateCheck).
SetLeader(targetStoreID).
SetLeaders(targetStoreIDs).
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why remove it?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We do not need targetStoreIDs, which is used in evict_leader.go before.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It's an optimization, why we don't need it?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

SetLeaders checks learner and unhealthy peer. https://github.com/tikv/pd/blob/master/pkg%2Fschedule%2Foperator%2Fbuilder.go#L301-L301

SetLeader also check these. https://github.com/tikv/pd/blob/master/pkg%2Fschedule%2Foperator%2Fbuilder.go#L284-L284

SetLeaders sort target stores according to store id. This PR sort target stores according to score.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

targetLeaderStoreIDs is used previously, but removed by this PR?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, we use it in evict leader scheduler previously to select targets. It is replaced with this pr.

Copy link
Member

@rleungx rleungx Jan 9, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We allowed multiple targets in the op step before, this PR changes it which might be slower?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It only used in evict leader, after this pr there is no other scheduler using it. So I remove it.
If we will use it in the future, I will recover it.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It was O(n) previously and it is O(nlogn) now.
Considering that the number of sorts is usually two (three replicas) or four (five replicas), the difference is not too big.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Build(kind)
}

Expand Down
2 changes: 1 addition & 1 deletion pkg/schedule/operator/create_operator_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -425,7 +425,7 @@ func (suite *createOperatorTestSuite) TestCreateTransferLeaderOperator() {
}
for _, testCase := range testCases {
region := core.NewRegionInfo(&metapb.Region{Id: 1, Peers: testCase.originPeers}, testCase.originPeers[0])
op, err := CreateTransferLeaderOperator("test", suite.cluster, region, testCase.targetLeaderStoreID, []uint64{}, 0)
op, err := CreateTransferLeaderOperator("test", suite.cluster, region, testCase.targetLeaderStoreID, 0)

if testCase.isErr {
re.Error(err)
Expand Down
2 changes: 1 addition & 1 deletion pkg/schedule/schedulers/balance_leader.go
Original file line number Diff line number Diff line change
Expand Up @@ -537,7 +537,7 @@ func (s *balanceLeaderScheduler) createOperator(solver *solver, collector *plan.
}
solver.Step++
defer func() { solver.Step-- }()
op, err := operator.CreateTransferLeaderOperator(s.GetName(), solver, solver.Region, solver.targetStoreID(), []uint64{}, operator.OpLeader)
op, err := operator.CreateTransferLeaderOperator(s.GetName(), solver, solver.Region, solver.targetStoreID(), operator.OpLeader)
if err != nil {
log.Debug("fail to create balance leader operator", errs.ZapError(err))
if collector != nil {
Expand Down
47 changes: 33 additions & 14 deletions pkg/schedule/schedulers/evict_leader.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
import (
"math/rand"
"net/http"
"sort"
"strconv"

"github.com/gorilla/mux"
Expand Down Expand Up @@ -288,7 +289,7 @@
// Schedule implements the Scheduler interface.
func (s *evictLeaderScheduler) Schedule(cluster sche.SchedulerCluster, _ bool) ([]*operator.Operator, []plan.Plan) {
evictLeaderCounter.Inc()
return scheduleEvictLeaderBatch(s.R, s.GetName(), cluster, s.conf), nil
return scheduleEvictLeaderBatch(s.R, s.GetName(), cluster, s.conf, s.OpController), nil
}

func uniqueAppendOperator(dst []*operator.Operator, src ...*operator.Operator) []*operator.Operator {
Expand All @@ -312,11 +313,11 @@
getBatch() int
}

func scheduleEvictLeaderBatch(r *rand.Rand, name string, cluster sche.SchedulerCluster, conf evictLeaderStoresConf) []*operator.Operator {
func scheduleEvictLeaderBatch(r *rand.Rand, name string, cluster sche.SchedulerCluster, conf evictLeaderStoresConf, opController *operator.Controller) []*operator.Operator {
var ops []*operator.Operator
batchSize := conf.getBatch()
for range batchSize {
once := scheduleEvictLeaderOnce(r, name, cluster, conf)
once := scheduleEvictLeaderOnce(r, name, cluster, conf, opController)
// no more regions
if len(once) == 0 {
break
Expand All @@ -330,7 +331,7 @@
return ops
}

func scheduleEvictLeaderOnce(r *rand.Rand, name string, cluster sche.SchedulerCluster, conf evictLeaderStoresConf) []*operator.Operator {
func scheduleEvictLeaderOnce(r *rand.Rand, name string, cluster sche.SchedulerCluster, conf evictLeaderStoresConf, opController *operator.Controller) []*operator.Operator {
stores := conf.getStores()
ops := make([]*operator.Operator, 0, len(stores))
for _, storeID := range stores {
Expand Down Expand Up @@ -363,19 +364,12 @@
filters = append(filters, &filter.StoreStateFilter{ActionScope: name, TransferLeader: true, OperatorLevel: constant.Urgent})
candidates := filter.NewCandidates(r, cluster.GetFollowerStores(region)).
FilterTarget(cluster.GetSchedulerConfig(), nil, nil, filters...)
// Compatible with old TiKV transfer leader logic.
target := candidates.RandomPick()
targets := candidates.PickAll()
// `targets` MUST contains `target`, so only needs to check if `target` is nil here.
if target == nil {

if len(candidates.Stores) == 0 {
evictLeaderNoTargetStoreCounter.Inc()
continue
}
targetIDs := make([]uint64, 0, len(targets))
for _, t := range targets {
targetIDs = append(targetIDs, t.GetID())
}
op, err := operator.CreateTransferLeaderOperator(name, cluster, region, target.GetID(), targetIDs, operator.OpLeader)
op, err := createOperatorWithSort(name, cluster, candidates, region, opController)
if err != nil {
log.Debug("fail to create evict leader operator", errs.ZapError(err))
continue
Expand All @@ -387,6 +381,31 @@
return ops
}

func createOperatorWithSort(name string, cluster sche.SchedulerCluster, candidates *filter.StoreCandidates, region *core.RegionInfo, opController *operator.Controller) (*operator.Operator, error) {
// we will pick low leader score store firstly.
targets := candidates.Stores
sort.Slice(targets, func(i, j int) bool {
leaderSchedulePolicy := cluster.GetSchedulerConfig().GetLeaderSchedulePolicy()
opInfluence := opController.GetOpInfluence(cluster.GetBasicCluster())
kind := constant.NewScheduleKind(constant.LeaderKind, leaderSchedulePolicy)
iOp := opInfluence.GetStoreInfluence(targets[i].GetID()).ResourceProperty(kind)
jOp := opInfluence.GetStoreInfluence(targets[j].GetID()).ResourceProperty(kind)
return targets[i].LeaderScore(leaderSchedulePolicy, iOp) <
targets[j].LeaderScore(leaderSchedulePolicy, jOp)
})
var (
op *operator.Operator
err error
)
for _, target := range targets {
op, err = operator.CreateTransferLeaderOperator(name, cluster, region, target.GetID(), operator.OpLeader)
if op != nil && err == nil {
return op, err
}
}
return op, err

Check warning on line 406 in pkg/schedule/schedulers/evict_leader.go

View check run for this annotation

Codecov / codecov/patch

pkg/schedule/schedulers/evict_leader.go#L406

Added line #L406 was not covered by tests
}

type evictLeaderHandler struct {
rd *render.Render
config *evictLeaderSchedulerConfig
Expand Down
30 changes: 30 additions & 0 deletions pkg/schedule/schedulers/evict_leader_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -137,3 +137,33 @@ func TestBatchEvict(t *testing.T) {
return len(ops) == 5
})
}

func TestEvictLeaderSelectsLowScoreStore(t *testing.T) {
re := require.New(t)
cancel, _, tc, oc := prepareSchedulersTest()
defer cancel()

// Add stores with different scores
tc.AddLeaderStore(1, 30) // store 1
tc.AddLeaderStore(2, 20) // store 2
tc.AddLeaderStore(3, 10) // store 3

// Add regions 1, 2, 3 with leaders in stores 1, 2, 3
tc.AddLeaderRegion(1, 1, 2, 3)

// Create EvictLeader scheduler
sl, err := CreateScheduler(types.EvictLeaderScheduler, oc, storage.NewStorageWithMemoryBackend(), ConfigSliceDecoder(types.EvictLeaderScheduler, []string{"1"}), func(string) error { return nil })
re.NoError(err)
re.True(sl.IsScheduleAllowed(tc))

// Schedule the operator and it should select store 3 to evict the leader, because it has the lowest score with 10.
ops, _ := sl.Schedule(tc, false)
re.Len(ops, 1)
operatorutil.CheckMultiTargetTransferLeader(re, ops[0], operator.OpLeader, 1, []uint64{3})

// Schedule the operator and it should select store 2 to evict the leader, because it has the lowest score with 5.
tc.AddLeaderStore(2, 5)
ops, _ = sl.Schedule(tc, false)
re.Len(ops, 1)
operatorutil.CheckMultiTargetTransferLeader(re, ops[0], operator.OpLeader, 1, []uint64{2})
}
2 changes: 1 addition & 1 deletion pkg/schedule/schedulers/evict_slow_store.go
Original file line number Diff line number Diff line change
Expand Up @@ -244,7 +244,7 @@ func (s *evictSlowStoreScheduler) cleanupEvictLeader(cluster sche.SchedulerClust
}

func (s *evictSlowStoreScheduler) schedulerEvictLeader(cluster sche.SchedulerCluster) []*operator.Operator {
return scheduleEvictLeaderBatch(s.R, s.GetName(), cluster, s.conf)
return scheduleEvictLeaderBatch(s.R, s.GetName(), cluster, s.conf, s.OpController)
}

// IsScheduleAllowed implements the Scheduler interface.
Expand Down
2 changes: 1 addition & 1 deletion pkg/schedule/schedulers/evict_slow_trend.go
Original file line number Diff line number Diff line change
Expand Up @@ -357,7 +357,7 @@ func (s *evictSlowTrendScheduler) scheduleEvictLeader(cluster sche.SchedulerClus
return nil
}
storeSlowTrendEvictedStatusGauge.WithLabelValues(store.GetAddress(), strconv.FormatUint(store.GetID(), 10)).Set(1)
return scheduleEvictLeaderBatch(s.R, s.GetName(), cluster, s.conf)
return scheduleEvictLeaderBatch(s.R, s.GetName(), cluster, s.conf, s.OpController)
}

// IsScheduleAllowed implements the Scheduler interface.
Expand Down
2 changes: 1 addition & 1 deletion pkg/schedule/schedulers/grant_hot_region.go
Original file line number Diff line number Diff line change
Expand Up @@ -316,7 +316,7 @@
dstStore := &metapb.Peer{StoreId: destStoreIDs[i]}

if isLeader {
op, err = operator.CreateTransferLeaderOperator(s.GetName()+"-leader", cluster, srcRegion, dstStore.StoreId, []uint64{}, operator.OpLeader)
op, err = operator.CreateTransferLeaderOperator(s.GetName()+"-leader", cluster, srcRegion, dstStore.StoreId, operator.OpLeader)

Check warning on line 319 in pkg/schedule/schedulers/grant_hot_region.go

View check run for this annotation

Codecov / codecov/patch

pkg/schedule/schedulers/grant_hot_region.go#L319

Added line #L319 was not covered by tests
} else {
op, err = operator.CreateMovePeerOperator(s.GetName()+"-move", cluster, srcRegion, operator.OpRegion|operator.OpLeader, srcStore.GetID(), dstStore)
}
Expand Down
1 change: 0 additions & 1 deletion pkg/schedule/schedulers/hot_region.go
Original file line number Diff line number Diff line change
Expand Up @@ -1475,7 +1475,6 @@ func (bs *balanceSolver) createOperator(region *core.RegionInfo, srcStoreID, dst
bs,
region,
dstStoreID,
[]uint64{},
operator.OpHotRegion)
} else {
srcPeer := region.GetStorePeer(srcStoreID) // checked in `filterHotPeers`
Expand Down
2 changes: 1 addition & 1 deletion pkg/schedule/schedulers/hot_region_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,7 @@ func checkGCPendingOpInfos(re *require.Assertions, enablePlacementRules bool) {
case movePeer:
op, err = operator.CreateMovePeerOperator("move-peer-test", tc, region, operator.OpAdmin, 2, &metapb.Peer{Id: region.GetID()*10000 + 1, StoreId: 4})
case transferLeader:
op, err = operator.CreateTransferLeaderOperator("transfer-leader-test", tc, region, 2, []uint64{}, operator.OpAdmin)
op, err = operator.CreateTransferLeaderOperator("transfer-leader-test", tc, region, 2, operator.OpAdmin)
}
re.NoError(err)
re.NotNil(op)
Expand Down
2 changes: 1 addition & 1 deletion pkg/schedule/schedulers/label.go
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@ func (s *labelScheduler) Schedule(cluster sche.SchedulerCluster, _ bool) ([]*ope
continue
}

op, err := operator.CreateTransferLeaderOperator("label-reject-leader", cluster, region, target.GetID(), []uint64{}, operator.OpLeader)
op, err := operator.CreateTransferLeaderOperator("label-reject-leader", cluster, region, target.GetID(), operator.OpLeader)
if err != nil {
log.Debug("fail to create transfer label reject leader operator", errs.ZapError(err))
return nil, nil
Expand Down
2 changes: 1 addition & 1 deletion pkg/schedule/schedulers/shuffle_leader.go
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ func (s *shuffleLeaderScheduler) Schedule(cluster sche.SchedulerCluster, _ bool)
shuffleLeaderNoFollowerCounter.Inc()
return nil, nil
}
op, err := operator.CreateTransferLeaderOperator(s.GetName(), cluster, region, targetStore.GetID(), []uint64{}, operator.OpAdmin)
op, err := operator.CreateTransferLeaderOperator(s.GetName(), cluster, region, targetStore.GetID(), operator.OpAdmin)
if err != nil {
log.Debug("fail to create shuffle leader operator", errs.ZapError(err))
return nil, nil
Expand Down
21 changes: 12 additions & 9 deletions pkg/schedule/schedulers/transfer_witness_leader.go
Original file line number Diff line number Diff line change
Expand Up @@ -99,19 +99,22 @@
filters = append(filters, filter.NewExcludedFilter(name, nil, unhealthyPeerStores),
&filter.StoreStateFilter{ActionScope: name, TransferLeader: true, OperatorLevel: constant.Urgent})
candidates := filter.NewCandidates(r, cluster.GetFollowerStores(region)).FilterTarget(cluster.GetSchedulerConfig(), nil, nil, filters...)
// Compatible with old TiKV transfer leader logic.
target := candidates.RandomPick()
targets := candidates.PickAll()
// `targets` MUST contains `target`, so only needs to check if `target` is nil here.
if target == nil {
if len(candidates.Stores) == 0 {
transferWitnessLeaderNoTargetStoreCounter.Inc()
return nil, errors.New("no target store to schedule")
}
targetIDs := make([]uint64, 0, len(targets))
for _, t := range targets {
targetIDs = append(targetIDs, t.GetID())
// TODO: also add sort such as evict leader
var (
op *operator.Operator
err error
)
for _, target := range candidates.Stores {
op, err = operator.CreateTransferLeaderOperator(name, cluster, region, target.GetID(), operator.OpLeader)
if op != nil && err == nil {
return op, err
}
}
return operator.CreateTransferLeaderOperator(name, cluster, region, target.GetID(), targetIDs, operator.OpWitnessLeader)
return op, err

Check warning on line 117 in pkg/schedule/schedulers/transfer_witness_leader.go

View check run for this annotation

Codecov / codecov/patch

pkg/schedule/schedulers/transfer_witness_leader.go#L117

Added line #L117 was not covered by tests
}

// RecvRegionInfo receives a checked region from coordinator
Expand Down
2 changes: 1 addition & 1 deletion pkg/utils/operatorutil/operator_check.go
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ func CheckMultiTargetTransferLeader(re *require.Assertions, op *operator.Operato
re.Equal(1, op.Len())
expectedOps := make([]any, 0, len(targetIDs))
for _, targetID := range targetIDs {
expectedOps = append(expectedOps, operator.TransferLeader{FromStore: sourceID, ToStore: targetID, ToStores: targetIDs})
expectedOps = append(expectedOps, operator.TransferLeader{FromStore: sourceID, ToStore: targetID})
}
re.Contains(expectedOps, op.Step(0))
kind |= operator.OpLeader
Expand Down
2 changes: 1 addition & 1 deletion plugin/scheduler_example/evict_leader.go
Original file line number Diff line number Diff line change
Expand Up @@ -230,7 +230,7 @@ func (s *evictLeaderScheduler) Schedule(cluster sche.SchedulerCluster, _ bool) (
if target == nil {
continue
}
op, err := operator.CreateTransferLeaderOperator(s.GetName(), cluster, region, target.GetID(), []uint64{}, operator.OpLeader)
op, err := operator.CreateTransferLeaderOperator(s.GetName(), cluster, region, target.GetID(), operator.OpLeader)
if err != nil {
log.Debug("fail to create evict leader operator", errs.ZapError(err))
continue
Expand Down
Loading