YugabyteDB (2.13.0.0-b42, bfc6a6643e7399ac8a0e81d06a3ee6d6571b33ab)

Coverage Report

Created: 2022-03-09 17:30

/Users/deen/code/yugabyte-db/src/yb/integration-tests/load_balancer_placement_policy-test.cc
Line
Count
Source (jump to first uncovered line)
1
// Copyright (c) YugaByte, Inc.
2
//
3
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
4
// in compliance with the License.  You may obtain a copy of the License at
5
//
6
// http://www.apache.org/licenses/LICENSE-2.0
7
//
8
// Unless required by applicable law or agreed to in writing, software distributed under the License
9
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
10
// or implied.  See the License for the specific language governing permissions and limitations
11
// under the License.
12
//
13
14
#include <gtest/gtest.h>
15
16
#include "yb/client/client.h"
17
#include "yb/client/schema.h"
18
#include "yb/client/table.h"
19
#include "yb/client/table_creator.h"
20
#include "yb/client/yb_table_name.h"
21
22
#include "yb/integration-tests/cluster_verifier.h"
23
#include "yb/integration-tests/external_mini_cluster.h"
24
#include "yb/integration-tests/test_workload.h"
25
#include "yb/integration-tests/yb_table_test_base.h"
26
27
#include "yb/master/master_client.proxy.h"
28
29
#include "yb/tools/yb-admin_client.h"
30
31
#include "yb/util/monotime.h"
32
#include "yb/util/net/net_fwd.h"
33
#include "yb/util/result.h"
34
#include "yb/util/test_macros.h"
35
36
using namespace std::literals;
37
38
namespace yb {
39
namespace integration_tests {
40
41
const auto kDefaultTimeout = 30000ms;
42
43
class LoadBalancerPlacementPolicyTest : public YBTableTestBase {
44
 protected:
45
5
  void SetUp() override {
46
5
    YBTableTestBase::SetUp();
47
48
5
    yb_admin_client_ = std::make_unique<tools::enterprise::ClusterAdminClient>(
49
5
        external_mini_cluster()->GetMasterAddresses(), kDefaultTimeout);
50
51
5
    ASSERT_OK(yb_admin_client_->Init());
52
5
  }
53
54
99
  bool use_external_mini_cluster() override { return true; }
55
56
14
  int num_tablets() override {
57
14
    return 4;
58
14
  }
59
60
5
  bool enable_ysql() override {
61
    // Do not create the transaction status table.
62
5
    return false;
63
5
  }
64
65
  void GetLoadOnTservers(const string tablename,
66
                         size_t num_tservers,
67
8
                         vector<int> *const out_load_per_tserver) {
68
8
    out_load_per_tserver->clear();
69
40
    for (size_t i = 0; i < num_tservers; ++i) {
70
32
      const int count = ASSERT_RESULT(GetLoadOnTserver(
71
32
          external_mini_cluster()->tablet_server(i), tablename));
72
32
      out_load_per_tserver->emplace_back(count);
73
32
    }
74
8
  }
75
76
32
  Result<uint32_t> GetLoadOnTserver(ExternalTabletServer* server, const string tablename) {
77
32
    auto proxy = GetMasterLeaderProxy<master::MasterClientProxy>();
78
32
    master::GetTableLocationsRequestPB req;
79
32
    req.mutable_table()->set_table_name(tablename);
80
32
    req.mutable_table()->mutable_namespace_()->set_name(table_name().namespace_name());
81
32
    master::GetTableLocationsResponsePB resp;
82
83
32
    rpc::RpcController rpc;
84
32
    rpc.set_timeout(kDefaultTimeout);
85
32
    RETURN_NOT_OK(proxy.GetTableLocations(req, &resp, &rpc));
86
87
32
    uint32_t count = 0;
88
32
    std::vector<string> replicas;
89
128
    for (const auto& loc : resp.tablet_locations()) {
90
432
      for (const auto& replica : loc.replicas()) {
91
432
        if (replica.ts_info().permanent_uuid() == server->instance_id().permanent_uuid()) {
92
104
          replicas.push_back(loc.tablet_id());
93
104
          count++;
94
104
        }
95
432
      }
96
128
    }
97
32
    LOG(INFO) << Format("For ts $0, table name $1 tablet count $2",
98
32
                        server->instance_id().permanent_uuid(), tablename, count);
99
32
    return count;
100
32
  }
101
102
5
  void CustomizeExternalMiniCluster(ExternalMiniClusterOptions* opts) override {
103
5
    opts->extra_tserver_flags.push_back("--placement_cloud=c");
104
5
    opts->extra_tserver_flags.push_back("--placement_region=r");
105
5
    opts->extra_tserver_flags.push_back("--placement_zone=z${index}");
106
5
    opts->extra_master_flags.push_back("--tserver_unresponsive_timeout_ms=5000");
107
5
  }
108
109
6
  void WaitForLoadBalancer() {
110
6
    ASSERT_OK(WaitFor([&]() -> Result<bool> {
111
6
      bool is_idle = VERIFY_RESULT(client_->IsLoadBalancerIdle());
112
6
      return !is_idle;
113
6
    },  kDefaultTimeout * 2, "IsLoadBalancerActive"));
114
115
6
    ASSERT_OK(WaitFor([&]() -> Result<bool> {
116
6
      return client_->IsLoadBalancerIdle();
117
6
    },  kDefaultTimeout * 4, "IsLoadBalancerIdle"));
118
6
  }
119
120
  void AddNewTserverToZone(
121
    const string& zone,
122
    const size_t expected_num_tservers,
123
5
    const string& placement_uuid = "") {
124
125
5
    std::vector<std::string> extra_opts;
126
5
    extra_opts.push_back("--placement_cloud=c");
127
5
    extra_opts.push_back("--placement_region=r");
128
5
    extra_opts.push_back("--placement_zone=" + zone);
129
130
5
    if (!placement_uuid.empty()) {
131
2
      extra_opts.push_back("--placement_uuid=" + placement_uuid);
132
2
    }
133
134
5
    ASSERT_OK(external_mini_cluster()->AddTabletServer(true, extra_opts));
135
3
    ASSERT_OK(
136
3
        external_mini_cluster()->WaitForTabletServerCount(expected_num_tservers, kDefaultTimeout));
137
3
  }
138
139
  void AddNewTserverToLocation(const string& cloud, const string& region,
140
                              const string& zone, const int expected_num_tservers,
141
3
                              const string& placement_uuid = "") {
142
143
3
    std::vector<std::string> extra_opts;
144
3
    extra_opts.push_back("--placement_cloud=" + cloud);
145
3
    extra_opts.push_back("--placement_region=" + region);
146
3
    extra_opts.push_back("--placement_zone=" + zone);
147
148
3
    if (!placement_uuid.empty()) {
149
0
      extra_opts.push_back("--placement_uuid=" + placement_uuid);
150
0
    }
151
152
3
    ASSERT_OK(external_mini_cluster()->AddTabletServer(true, extra_opts));
153
2
    ASSERT_OK(external_mini_cluster()->WaitForTabletServerCount(expected_num_tservers,
154
2
      kDefaultTimeout));
155
2
  }
156
157
  std::unique_ptr<tools::enterprise::ClusterAdminClient> yb_admin_client_;
158
};
159
160
1
TEST_F(LoadBalancerPlacementPolicyTest, CreateTableWithPlacementPolicyTest) {
161
  // Set cluster placement policy.
162
1
  ASSERT_OK(yb_admin_client_->ModifyPlacementInfo("c.r.z0,c.r.z1,c.r.z2", 3, ""));
163
164
1
  const string& create_custom_policy_table = "creation-placement-test";
165
1
  const yb::client::YBTableName placement_table(
166
1
    YQL_DATABASE_CQL, table_name().namespace_name(), create_custom_policy_table);
167
168
1
  yb::client::YBSchemaBuilder b;
169
1
  yb::client::YBSchema schema;
170
1
  b.AddColumn("k")->Type(BINARY)->NotNull()->HashPrimaryKey();
171
1
  ASSERT_OK(b.Build(&schema));
172
173
  // Set placement policy for the new table that is different from the cluster placement policy.
174
1
  master::ReplicationInfoPB replication_info;
175
1
  replication_info.mutable_live_replicas()->set_num_replicas(2);
176
1
  auto* placement_block = replication_info.mutable_live_replicas()->add_placement_blocks();
177
1
  auto* cloud_info = placement_block->mutable_cloud_info();
178
1
  cloud_info->set_placement_cloud("c");
179
1
  cloud_info->set_placement_region("r");
180
1
  cloud_info->set_placement_zone("z1");
181
1
  placement_block->set_min_num_replicas(1);
182
183
1
  placement_block = replication_info.mutable_live_replicas()->add_placement_blocks();
184
1
  cloud_info = placement_block->mutable_cloud_info();
185
1
  cloud_info->set_placement_cloud("c");
186
1
  cloud_info->set_placement_region("r");
187
1
  cloud_info->set_placement_zone("z2");
188
1
  placement_block->set_min_num_replicas(1);
189
190
1
  ASSERT_OK(NewTableCreator()->table_name(placement_table).schema(&schema).replication_info(
191
1
    replication_info).Create());
192
193
1
  vector<int> counts_per_ts;
194
1
  int64 num_tservers = num_tablet_servers();
195
1
  GetLoadOnTservers(create_custom_policy_table, num_tservers, &counts_per_ts);
196
  // Verify that the tserver in zone0 does not have any tablets assigned to it.
197
1
  ASSERT_EQ(counts_per_ts[0], 0);
198
  // Verify that the tservers in z1 and z2 have tablets assigned to them.
199
1
  ASSERT_EQ(counts_per_ts[1], 4);
200
1
  ASSERT_EQ(counts_per_ts[2], 4);
201
202
  // Verify that modifying the placement info for a table with custom placement
203
  // policy works as expected.
204
1
  ASSERT_OK(yb_admin_client_->ModifyTablePlacementInfo(
205
1
    placement_table, "c.r.z0,c.r.z1,c.r.z2", 3, ""));
206
1
  WaitForLoadBalancer();
207
208
  // The replication factor increased to 3, and the placement info now has all 3 zones.
209
  // Thus, all tservers should have 4 tablets.
210
1
  GetLoadOnTservers(create_custom_policy_table, num_tservers, &counts_per_ts);
211
4
  for (int ii = 0; ii < 3; ++ii) {
212
3
    ASSERT_EQ(counts_per_ts[ii], 4);
213
3
  }
214
1
}
215
216
1
TEST_F(LoadBalancerPlacementPolicyTest, PlacementPolicyTest) {
217
  // Set cluster placement policy.
218
1
  ASSERT_OK(yb_admin_client_->ModifyPlacementInfo("c.r.z0,c.r.z1,c.r.z2", 3, ""));
219
220
  // Add a new tserver to zone 1.
221
1
  auto num_tservers = num_tablet_servers() + 1;
222
1
  AddNewTserverToZone("z1", num_tservers);
223
224
1
  WaitForLoadBalancer();
225
226
  // Create another table for which we will set custom placement info.
227
1
  const string& custom_policy_table = "placement-test";
228
1
  const yb::client::YBTableName placement_table(
229
1
    YQL_DATABASE_CQL, table_name().namespace_name(), custom_policy_table);
230
1
  ASSERT_OK(client_->CreateNamespaceIfNotExists(
231
1
    placement_table.namespace_name(),
232
1
    placement_table.namespace_type()));
233
234
1
  yb::client::YBSchemaBuilder b;
235
1
  yb::client::YBSchema schema;
236
1
  b.AddColumn("k")->Type(BINARY)->NotNull()->HashPrimaryKey();
237
1
  b.AddColumn("v")->Type(BINARY)->NotNull();
238
1
  ASSERT_OK(b.Build(&schema));
239
240
1
  ASSERT_OK(NewTableCreator()->table_name(placement_table).schema(&schema).Create());
241
242
1
  WaitForLoadBalancer();
243
244
  // Modify the placement info for the table.
245
1
  ASSERT_OK(yb_admin_client_->ModifyTablePlacementInfo(placement_table, "c.r.z1,c.r.z2", 3, ""));
246
247
1
  WaitForLoadBalancer();
248
249
  // Test 1: Verify placement of tablets for the table with modified placement info.
250
1
  vector<int> counts_per_ts;
251
1
  GetLoadOnTservers(custom_policy_table, num_tservers, &counts_per_ts);
252
  // ts0 in c.r.z0 should have no tablets in it.
253
1
  ASSERT_EQ(counts_per_ts[0], 0);
254
  // The other tablet servers should have tablets spread equally.
255
1
  ASSERT_EQ(counts_per_ts[1], counts_per_ts[2]);
256
1
  ASSERT_EQ(counts_per_ts[2], counts_per_ts[3]);
257
258
  // The table with cluster placement policy should have tablets spread across all tservers.
259
1
  GetLoadOnTservers(table_name().table_name(), num_tservers, &counts_per_ts);
260
5
  for (size_t ii = 0; ii < num_tservers; ++ii) {
261
4
    ASSERT_GT(counts_per_ts[ii], 0);
262
4
  }
263
264
  // Test 2: Verify that custom placement info is honored when tservers are added.
265
  // Add two new tservers in both z0 and z2.
266
1
  ++num_tservers;
267
1
  AddNewTserverToZone("z0", num_tservers);
268
269
1
  ++num_tservers;
270
1
  AddNewTserverToZone("z2", num_tservers);
271
272
1
  WaitForLoadBalancer();
273
274
1
  GetLoadOnTservers(custom_policy_table, num_tservers, &counts_per_ts);
275
1
  for (size_t ii = 0; ii < num_tservers; ++ii) {
276
0
    if (ii == 0 || ii == 4) {
277
      // The table with custom policy should have no tablets in z0, i.e. ts0 and ts4.
278
0
      ASSERT_EQ(counts_per_ts[ii], 0);
279
0
      continue;
280
0
    }
281
    // The other tablet servers should have tablets in them.
282
0
    ASSERT_GT(counts_per_ts[ii], 0);
283
0
  }
284
285
  // Test 3: Verify that custom placement info is honored when tservers are removed.
286
1
  ASSERT_OK(external_mini_cluster()->AddTServerToBlacklist(
287
1
    external_mini_cluster()->master(),
288
1
    external_mini_cluster()->tablet_server(4)));
289
0
  ASSERT_OK(external_mini_cluster()->AddTServerToBlacklist(
290
0
    external_mini_cluster()->master(),
291
0
    external_mini_cluster()->tablet_server(5)));
292
0
  WaitForLoadBalancer();
293
294
0
  num_tservers -= 2;
295
0
  GetLoadOnTservers(custom_policy_table, num_tservers, &counts_per_ts);
296
  // ts0 in c.r.z0 should have no tablets in it.
297
0
  ASSERT_EQ(counts_per_ts[0], 0);
298
  // The other tablet servers should have tablets spread equally.
299
0
  ASSERT_EQ(counts_per_ts[1], counts_per_ts[2]);
300
0
  ASSERT_EQ(counts_per_ts[2], counts_per_ts[3]);
301
302
  // The table with cluster placement policy should continue to have tablets spread across all
303
  // tservers.
304
0
  GetLoadOnTservers(table_name().table_name(), num_tservers, &counts_per_ts);
305
0
  for (size_t ii = 0; ii < num_tservers; ++ii) {
306
0
    ASSERT_GT(counts_per_ts[ii], 0);
307
0
  }
308
0
}
309
310
1
TEST_F(LoadBalancerPlacementPolicyTest, AlterPlacementDataConsistencyTest) {
311
  // Set cluster placement policy.
312
1
  ASSERT_OK(yb_admin_client_->ModifyPlacementInfo("c.r.z0,c.r.z1", 2, ""));
313
314
  // Start workload on a table.
315
1
  const string& table = "placement-data-consistency-test";
316
1
  const yb::client::YBTableName placement_table(
317
1
    YQL_DATABASE_CQL, table_name().namespace_name(), table);
318
319
1
  TestWorkload workload(external_mini_cluster());
320
1
  workload.set_table_name(placement_table);
321
1
  workload.set_sequential_write(true);
322
1
  workload.Setup();
323
1
  workload.Start();
324
325
  // Change its placement policy such that it now has additional replicas spanning additional
326
  // tservers.
327
1
  ASSERT_OK(yb_admin_client_->ModifyTablePlacementInfo(
328
1
      placement_table, "c.r.z0,c.r.z1,c.r.z2", 3, ""));
329
0
  WaitForLoadBalancer();
330
331
  // Verify that the placement policy is honored.
332
0
  vector<int> counts_per_ts;
333
0
  GetLoadOnTservers(table, num_tablet_servers(), &counts_per_ts);
334
0
  for (int ii = 0; ii < 3; ++ii) {
335
0
    ASSERT_EQ(counts_per_ts[ii], 1);
336
0
  }
337
338
  // Change placement policy such that it now spans lesser replicas spanning fewer tservers.
339
0
  ASSERT_OK(yb_admin_client_->ModifyTablePlacementInfo(placement_table, "c.r.z0", 1, ""));
340
0
  WaitForLoadBalancer();
341
342
  // Verify that placement policy is honored.
343
0
  GetLoadOnTservers(table, num_tablet_servers(), &counts_per_ts);
344
  // The table is RF1 and confined to zone 0. Ts0 should have 1 tablet.
345
  // The other two tablet servers should not have any tablets.
346
0
  ASSERT_EQ(counts_per_ts[0], 1);
347
0
  ASSERT_EQ(counts_per_ts[1], 0);
348
0
  ASSERT_EQ(counts_per_ts[2], 0);
349
350
  // Verify that the data inserted is still sane.
351
0
  workload.StopAndJoin();
352
0
  auto rows_inserted = workload.rows_inserted();
353
0
  LOG(INFO) << "Number of rows inserted: " << rows_inserted;
354
355
  // Verify that number of rows is as expected.
356
0
  ClusterVerifier cluster_verifier(external_mini_cluster());
357
0
  ASSERT_NO_FATALS(cluster_verifier.CheckCluster());
358
0
  ASSERT_NO_FATALS(cluster_verifier.CheckRowCount(
359
0
    placement_table, ClusterVerifier::EXACTLY, rows_inserted));
360
0
}
361
362
1
TEST_F(LoadBalancerPlacementPolicyTest, ModifyPlacementUUIDTest) {
363
  // Set cluster placement policy.
364
1
  ASSERT_OK(yb_admin_client_->ModifyPlacementInfo("c.r.z0,c.r.z1,c.r.z2", 3, ""));
365
366
  // Add 2 tservers with custom placement uuid.
367
1
  auto num_tservers = num_tablet_servers() + 1;
368
1
  const string& random_placement_uuid = "19dfa091-2b53-434f-b8dc-97280a5f8831";
369
1
  AddNewTserverToZone("z1", num_tservers, random_placement_uuid);
370
1
  AddNewTserverToZone("z2", ++num_tservers, random_placement_uuid);
371
372
1
  vector<int> counts_per_ts;
373
1
  GetLoadOnTservers(table_name().table_name(), num_tservers, &counts_per_ts);
374
375
  // The first 3 tservers should have equal number of tablets allocated to them, but the new
376
  // tservers should not.
377
4
  for (int ii = 0; ii < 3; ++ii) {
378
3
    ASSERT_EQ(counts_per_ts[ii], 4);
379
3
  }
380
1
  ASSERT_EQ(counts_per_ts[3], 0);
381
1
  ASSERT_EQ(counts_per_ts[4], 0);
382
383
  // Now there are 2 tservers with custom placement_uuid and 3 tservers with default placement_uuid.
384
  // Modify the cluster config to have new placement_uuid matching the new tservers.
385
1
  ASSERT_OK(yb_admin_client_->ModifyPlacementInfo("c.r.z1,c.r.z2", 2, random_placement_uuid));
386
387
  // Change the table placement policy and verify that the change reflected.
388
1
  ASSERT_OK(yb_admin_client_->ModifyTablePlacementInfo(
389
1
    table_name(), "c.r.z1,c.r.z2", 2, random_placement_uuid));
390
1
  WaitForLoadBalancer();
391
392
  // There must now be tablets on the new tservers.
393
1
  GetLoadOnTservers(table_name().table_name(), num_tservers, &counts_per_ts);
394
1
  ASSERT_EQ(counts_per_ts[3], 4);
395
1
  ASSERT_EQ(counts_per_ts[4], 4);
396
397
  // Modify the placement policy with different zones and replication factor but with same
398
  // placement uuid.
399
1
  ASSERT_OK(yb_admin_client_->ModifyTablePlacementInfo(
400
1
    table_name(), "c.r.z2", 1, random_placement_uuid));
401
1
  WaitForLoadBalancer();
402
403
1
  GetLoadOnTservers(table_name().table_name(), num_tservers, &counts_per_ts);
404
  // TS3 belongs to zone1 and will have 0 tablets whereas since TS4 is in zone2 it should have 4
405
  // tablets allotted to it.
406
1
  ASSERT_EQ(counts_per_ts[3], 0);
407
1
  ASSERT_EQ(counts_per_ts[4], 4);
408
409
1
}
410
411
1
TEST_F(LoadBalancerPlacementPolicyTest, PrefixPlacementTest) {
412
1
  int num_tservers = 3;
413
414
  // Test 1.
415
  // Set prefix cluster placement policy for this region.
416
1
  LOG(INFO) << "With c.r,c.r,c.r and num_replicas=3 as placement.";
417
418
1
  ASSERT_OK(yb_admin_client_->ModifyPlacementInfo("c.r,c.r,c.r", 3, ""));
419
  // Don't need to wait for load balancer as we don't expect any movement.
420
421
  // Validate if min_num_replicas is set correctly.
422
1
  int min_num_replicas;
423
1
  ASSERT_OK(external_mini_cluster()->GetMinReplicaCountForPlacementBlock(
424
1
    external_mini_cluster()->master(), "c", "r", "", &min_num_replicas));
425
426
1
  ASSERT_EQ(min_num_replicas, 3);
427
428
  // Load should be evenly distributed onto the 3 TS in z0, z1 and z2.
429
  // With 4 tablets in a table and 3 replica per tablet, each TS should have 4 tablets.
430
1
  vector<int> counts_per_ts;
431
1
  GetLoadOnTservers(table_name().table_name(), num_tservers, &counts_per_ts);
432
433
4
  for (int ii = 0; ii < 3; ++ii) {
434
3
    ASSERT_EQ(counts_per_ts[ii], 4);
435
3
  }
436
437
  // Add 3 tservers in a different region (c.r2.z0, c.r2.z1, c.r2.z2).
438
1
  string cloud = "c", region = "r2", zone = "z0";
439
1
  AddNewTserverToLocation(cloud, region, zone, ++num_tservers);
440
441
1
  zone = "z1";
442
1
  AddNewTserverToLocation(cloud, region, zone, ++num_tservers);
443
444
1
  zone = "z2";
445
1
  AddNewTserverToLocation(cloud, region, zone, ++num_tservers);
446
  // Don't wait for load balancer as we don't anticipate any movement.
447
1
  LOG(INFO) << "Added 3 TS to Region r2.";
448
449
  // Test 2.
450
  // Modify placement policy to shift all the load to region r2.
451
  // From code perspective, this tests HandleAddIfMissingPlacement(),
452
  // and HandleRemoveReplica().
453
  // For each replica in r, there will first be a replica created
454
  // in r2 and then the replica will be removed from r.
455
1
  LOG(INFO) << "With c.r2,c.r2,c.r2 and num_replicas=3 as placement.";
456
457
1
  ASSERT_OK(yb_admin_client_->ModifyPlacementInfo("c.r2,c.r2,c.r2", 3, ""));
458
0
  WaitForLoadBalancer();
459
460
  // Load should be evenly distributed onto the 3 TS in region r2.
461
  // With 4 tablets in a table and 3 replica per tablet, each TS should have 4 tablets.
462
  // TS in region r, shouldn't have any load.
463
0
  counts_per_ts.clear();
464
0
  GetLoadOnTservers(table_name().table_name(), num_tservers, &counts_per_ts);
465
466
0
  for (int ii = 0; ii < 3; ++ii) {
467
0
    ASSERT_EQ(counts_per_ts[ii], 0);
468
0
  }
469
470
0
  for (int ii = 3; ii < 6; ++ii) {
471
0
    ASSERT_EQ(counts_per_ts[ii], 4);
472
0
  }
473
474
  // Test 3.
475
  // Shift all the load to region r now.
476
  // Set min_num_replica for region r to 1 keeping total replicas still 3.
477
  // From code perspective, this tests HandleAddIfMissingPlacement(),
478
  // HandleAddIfWrongPlacement() and HandleRemoveReplica().
479
  // For the second and third replica there won't be any addition to region r
480
  // because of missing placement (since min_num_replica is 1) but because of a wrong placement.
481
0
  LOG(INFO) << "With c.r and num_replicas=3 as placement.";
482
483
0
  ASSERT_OK(yb_admin_client_->ModifyPlacementInfo("c.r", 3, ""));
484
0
  WaitForLoadBalancer();
485
486
  // Load should be evenly distributed onto the 3 TS in region r.
487
  // With 4 tablets in a table and 3 replica per tablet, each TS should have 4 tablets.
488
  // TS in region r2, shouldn't have any load.
489
0
  counts_per_ts.clear();
490
0
  GetLoadOnTservers(table_name().table_name(), num_tservers, &counts_per_ts);
491
492
0
  for (int ii = 0; ii < 3; ++ii) {
493
0
    ASSERT_EQ(counts_per_ts[ii], 4);
494
0
  }
495
496
0
  for (int ii = 3; ii < 6; ++ii) {
497
0
    ASSERT_EQ(counts_per_ts[ii], 0);
498
0
  }
499
500
  // Test 4.
501
  // Reduce the num_replicas to 2 with the same placement.
502
  // This will test the over-replication part of the code. For each tablet, one replica
503
  // will be removed.
504
0
  LOG(INFO) << "With c.r,c.r and num_replicas=2 as placement.";
505
506
0
  ASSERT_OK(yb_admin_client_->ModifyPlacementInfo("c.r,c.r", 2, ""));
507
0
  WaitForLoadBalancer();
508
509
  // Total replicas across all tablets: 2*4 = 8.
510
  // With 3 TS in region r this should split it in a permutation of 3+3+2.
511
  // TS in region r2 shouldn't have any load.
512
0
  counts_per_ts.clear();
513
0
  GetLoadOnTservers(table_name().table_name(), num_tservers, &counts_per_ts);
514
515
0
  int total_load = 0;
516
0
  for (int ii = 0; ii < 3; ++ii) {
517
0
    ASSERT_GE(counts_per_ts[ii], 2);
518
0
    total_load += counts_per_ts[ii];
519
0
  }
520
521
0
  ASSERT_EQ(total_load, 8);
522
523
0
  for (int ii = 3; ii < 6; ++ii) {
524
0
    ASSERT_EQ(counts_per_ts[ii], 0);
525
0
  }
526
527
  // Test 5.
528
  // Blacklist a TS in region r.
529
  // This tests the blacklist portion of CanSelectWrongReplicaToMove().
530
0
  LOG(INFO) << "With c.r,c.r and num_replicas=2 as placement and a TS in region r blacklisted.";
531
0
  ASSERT_OK(external_mini_cluster()->AddTServerToBlacklist(
532
0
                                          external_mini_cluster()->master(),
533
0
                                          external_mini_cluster()->tablet_server(2)));
534
535
0
  WaitForLoadBalancer();
536
0
  LOG(INFO) << "Successfully blacklisted ts3.";
537
538
0
  counts_per_ts.clear();
539
0
  GetLoadOnTservers(table_name().table_name(), num_tservers, &counts_per_ts);
540
541
  // 8 replicas distributed across TS with each TS containing 4.
542
  // No load in region r2.
543
0
  for (int ii = 0; ii < 2; ++ii) {
544
0
    ASSERT_EQ(counts_per_ts[ii], 4);
545
0
  }
546
547
0
  for (int ii = 3; ii < 6; ++ii) {
548
0
    ASSERT_EQ(counts_per_ts[ii], 0);
549
0
  }
550
551
  // Test 6.
552
  // Add a TS in region r, zone 2.
553
0
  LOG(INFO) << "With c.r,c.r and num_replicas=2 as placement, " <<
554
0
                "a blacklisted TS in region r and a new TS added in region r.";
555
556
0
  cloud = "c", region = "r", zone = "z2";
557
0
  AddNewTserverToLocation(cloud, region, zone, ++num_tservers);
558
0
  WaitForLoadBalancer();
559
0
  LOG(INFO) << "Successfully added a TS in region r.";
560
561
0
  counts_per_ts.clear();
562
0
  GetLoadOnTservers(table_name().table_name(), num_tservers, &counts_per_ts);
563
564
  // 8 replicas should be split in permutation of 3+3+2.
565
  // No load in region r2.
566
0
  total_load = 0;
567
0
  for (int ii = 0; ii < 2; ++ii) {
568
0
    total_load += counts_per_ts[ii];
569
0
    ASSERT_GE(counts_per_ts[ii], 2);
570
0
  }
571
572
0
  for (int ii = 3; ii < 6; ++ii) {
573
0
    ASSERT_EQ(counts_per_ts[ii], 0);
574
0
  }
575
576
0
  ASSERT_GE(counts_per_ts[6], 2);
577
578
0
  total_load += counts_per_ts[6];
579
0
  ASSERT_EQ(total_load, 8);
580
581
  // Test 7.
582
  // Bump up the RF to 3 now keeping the same placement.
583
  // A replica will be added despite there not being any missing placement.
584
0
  LOG(INFO) << "With c.r,c.r and num_replicas=3 as placement, " <<
585
0
                "a blacklisted TS in region r and a new TS added in region r.";
586
587
0
  ASSERT_OK(yb_admin_client_->ModifyPlacementInfo("c.r,c.r", 3, ""));
588
0
  WaitForLoadBalancer();
589
590
0
  counts_per_ts.clear();
591
0
  GetLoadOnTservers(table_name().table_name(), num_tservers, &counts_per_ts);
592
593
  // Total replicas across all tablets: 3*4 = 12.
594
  // With 3 TS in region r this should split it in 4+4+4.
595
0
  for (int ii = 0; ii < 2; ++ii) {
596
0
    ASSERT_EQ(counts_per_ts[ii], 4);
597
0
  }
598
599
0
  for (int ii = 3; ii < 6; ++ii) {
600
0
    ASSERT_EQ(counts_per_ts[ii], 0);
601
0
  }
602
603
0
  ASSERT_EQ(counts_per_ts[6], 4);
604
605
  // Test 8.
606
  // Change the placement info to only the cloud (c.*.*)
607
0
  LOG(INFO) << "With c,c,c and num_replicas=3 as placement, " <<
608
0
                "a blacklisted TS in region r and a new TS added in region r.";
609
610
0
  ASSERT_OK(yb_admin_client_->ModifyPlacementInfo("c,c,c", 3, ""));
611
0
  WaitForLoadBalancer();
612
613
0
  counts_per_ts.clear();
614
0
  GetLoadOnTservers(table_name().table_name(), num_tservers, &counts_per_ts);
615
616
  // Total replicas across all tablets: 3*4 = 12.
617
  // With 6 TS (3 in region r and 3 in r2) this should split it in clusters of 2.
618
0
  for (int ii = 0; ii < 2; ++ii) {
619
0
    ASSERT_EQ(counts_per_ts[ii], 2);
620
0
  }
621
622
0
  for (int ii = 3; ii <= 6; ++ii) {
623
0
    ASSERT_EQ(counts_per_ts[ii], 2);
624
0
  }
625
626
  // Some cheap tests for validating user input.
627
  // Test 9 - Only prefixes allowed.
628
0
  LOG(INFO) << "With c..z0,c.r.z0,c.r2.z1 as placement";
629
0
  ASSERT_NOK(yb_admin_client_->ModifyPlacementInfo("c..z0,c.r.z0,c.r2.z1", 3, ""));
630
631
  // Test 10 - No two prefixes should overlap (-ve test case).
632
0
  LOG(INFO) << "With c.r2,c.r2.z0,c.r as placement";
633
0
  ASSERT_NOK(yb_admin_client_->ModifyPlacementInfo("c.r2,c.r2.z0,c.r", 3, ""));
634
635
  // Test 11 - No two prefixes should overlap (-ve test case).
636
0
  LOG(INFO) << "With c,c.r2.z0,c.r as placement";
637
0
  ASSERT_NOK(yb_admin_client_->ModifyPlacementInfo("c,c.r2.z0,c.r", 3, ""));
638
639
  // Test 12 - No two prefixes should overlap (+ve test case).
640
0
  LOG(INFO) << "With c.r.z0,c.r2.z0,c.r.z2 as placement";
641
0
  ASSERT_OK(yb_admin_client_->ModifyPlacementInfo("c.r.z0,c.r2.z0,c.r.z2", 3, ""));
642
643
  // Test 13 - All CRZ empty allowed.
644
0
  LOG(INFO) << "With ,, as placement";
645
0
  ASSERT_OK(yb_admin_client_->ModifyPlacementInfo(",,", 3, ""));
646
647
  // FIN: Thank you all for watching, have a great day ahead!
648
0
}
649
650
} // namespace integration_tests
651
} // namespace yb