YugabyteDB (2.13.0.0-b42, bfc6a6643e7399ac8a0e81d06a3ee6d6571b33ab)

Coverage Report

Created: 2022-03-09 17:30

/Users/deen/code/yugabyte-db/src/yb/master/catalog_manager-test.cc
Line
Count
Source (jump to first uncovered line)
1
// Licensed to the Apache Software Foundation (ASF) under one
2
// or more contributor license agreements.  See the NOTICE file
3
// distributed with this work for additional information
4
// regarding copyright ownership.  The ASF licenses this file
5
// to you under the Apache License, Version 2.0 (the
6
// "License"); you may not use this file except in compliance
7
// with the License.  You may obtain a copy of the License at
8
//
9
//   http://www.apache.org/licenses/LICENSE-2.0
10
//
11
// Unless required by applicable law or agreed to in writing,
12
// software distributed under the License is distributed on an
13
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
// KIND, either express or implied.  See the License for the
15
// specific language governing permissions and limitations
16
// under the License.
17
//
18
// The following only applies to changes made to this file as part of YugaByte development.
19
//
20
// Portions Copyright (c) YugaByte, Inc.
21
//
22
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
23
// in compliance with the License.  You may obtain a copy of the License at
24
//
25
// http://www.apache.org/licenses/LICENSE-2.0
26
//
27
// Unless required by applicable law or agreed to in writing, software distributed under the License
28
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
29
// or implied.  See the License for the specific language governing permissions and limitations
30
// under the License.
31
//
32
33
#include "yb/master/catalog_manager-test_base.h"
34
#include "yb/master/master_client.pb.h"
35
36
namespace yb {
37
namespace master {
38
39
using std::shared_ptr;
40
using std::make_shared;
41
using strings::Substitute;
42
43
class TestLoadBalancerCommunity : public TestLoadBalancerBase<ClusterLoadBalancerMocked> {
44
  typedef TestLoadBalancerBase<ClusterLoadBalancerMocked> super;
45
 public:
46
  TestLoadBalancerCommunity(ClusterLoadBalancerMocked* cb, const string& table_id) :
47
1
      super(cb, table_id) {}
48
49
1
  void TestAlgorithm() {
50
1
    super::TestAlgorithm();
51
1
  }
52
};
53
54
// Test of the tablet assignment algorithm for splits done at table creation time.
55
// This tests that when we define a split, the tablet lands on the expected
56
// side of the split, i.e. it's a closed interval on the start key and an open
57
// interval on the end key (non-inclusive).
58
1
TEST(TableInfoTest, TestAssignmentRanges) {
59
1
  const string table_id = CURRENT_TEST_NAME();
60
1
  scoped_refptr<TableInfo> table(new TableInfo(table_id));
61
1
  vector<scoped_refptr<TabletInfo>> tablets;
62
63
  // Define & create the splits.
64
1
  vector<string> split_keys = {"a", "b", "c"};  // The keys we split on.
65
1
  const size_t kNumSplits = split_keys.size();
66
1
  const int kNumReplicas = 1;
67
68
1
  CreateTable(split_keys, kNumReplicas, true, table.get(), &tablets);
69
70
2
  ASSERT_EQ(table->LockForRead()->pb.replication_info().live_replicas().num_replicas(),
71
2
            kNumReplicas) << "Invalid replicas for created table.";
72
73
  // Ensure they give us what we are expecting.
74
5
  for (size_t i = 0; i <= kNumSplits; i++) {
75
    // Calculate the tablet id and start key.
76
3
    const string& start_key = (i == 0) ? "" : split_keys[i - 1];
77
3
    const string& end_key = (i == kNumSplits) ? "" : split_keys[i];
78
4
    string tablet_id = Substitute("tablet-$0-$1", start_key, end_key);
79
80
    // Query using the start key.
81
4
    GetTableLocationsRequestPB req;
82
4
    req.set_max_returned_locations(1);
83
4
    req.mutable_table()->mutable_table_name()->assign(table_id);
84
4
    req.mutable_partition_key_start()->assign(start_key);
85
4
    vector<scoped_refptr<TabletInfo> > tablets_in_range;
86
4
    table->GetTabletsInRange(&req, &tablets_in_range);
87
88
    // Only one tablet should own this key.
89
4
    ASSERT_EQ(1, tablets_in_range.size());
90
    // The tablet with range start key matching 'start_key' should be the owner.
91
4
    ASSERT_EQ(tablet_id, (*tablets_in_range.begin())->tablet_id());
92
4
    LOG(INFO) << "Key " << start_key << " found in tablet " << tablet_id;
93
4
  }
94
95
4
  for (const TabletInfoPtr& tablet : tablets) {
96
4
    auto lock = tablet->LockForWrite();
97
4
    ASSERT_TRUE(table->RemoveTablet(tablet->id()));
98
4
  }
99
1
}
100
101
1
TEST(TestTSDescriptor, TestReplicaCreationsDecay) {
102
1
  TSDescriptor ts("test");
103
1
  ASSERT_EQ(0, ts.RecentReplicaCreations());
104
1
  ts.IncrementRecentReplicaCreations();
105
106
  // The load should start at close to 1.0.
107
1
  double val_a = ts.RecentReplicaCreations();
108
1
  ASSERT_NEAR(1.0, val_a, 0.05);
109
110
  // After 10ms it should have dropped a bit, but still be close to 1.0.
111
1
  SleepFor(MonoDelta::FromMilliseconds(10));
112
1
  double val_b = ts.RecentReplicaCreations();
113
1
  ASSERT_LT(val_b, val_a);
114
1
  ASSERT_NEAR(0.99, val_a, 0.05);
115
116
1
  if (AllowSlowTests()) {
117
    // After 10 seconds, we should have dropped to 0.5^(10/60) = 0.891
118
0
    SleepFor(MonoDelta::FromSeconds(10));
119
0
    ASSERT_NEAR(0.891, ts.RecentReplicaCreations(), 0.05);
120
0
  }
121
1
}
122
123
1
TEST(TestLoadBalancerCommunity, TestLoadBalancerAlgorithm) {
124
1
  const TableId table_id = CURRENT_TEST_NAME();
125
1
  auto options = make_shared<yb::master::Options>();
126
1
  auto cb = make_shared<ClusterLoadBalancerMocked>(options.get());
127
1
  auto lb = make_shared<TestLoadBalancerCommunity>(cb.get(), table_id);
128
1
  lb->TestAlgorithm();
129
1
}
130
131
1
TEST(TestCatalogManager, TestLoadCountMultiAZ) {
132
1
  std::shared_ptr<TSDescriptor> ts0 = SetupTS("0000", "a");
133
1
  std::shared_ptr<TSDescriptor> ts1 = SetupTS("1111", "b");
134
1
  std::shared_ptr<TSDescriptor> ts2 = SetupTS("2222", "c");
135
1
  std::shared_ptr<TSDescriptor> ts3 = SetupTS("3333", "a");
136
1
  std::shared_ptr<TSDescriptor> ts4 = SetupTS("4444", "a");
137
1
  ts0->set_num_live_replicas(6);
138
1
  ts1->set_num_live_replicas(17);
139
1
  ts2->set_num_live_replicas(19);
140
1
  ts3->set_num_live_replicas(6);
141
1
  ts4->set_num_live_replicas(6);
142
1
  TSDescriptorVector ts_descs = {ts0, ts1, ts2, ts3, ts4};
143
144
1
  ZoneToDescMap zone_to_ts;
145
1
  ASSERT_OK(CatalogManagerUtil::GetPerZoneTSDesc(ts_descs, &zone_to_ts));
146
1
  ASSERT_EQ(3, zone_to_ts.size());
147
1
  ASSERT_EQ(3, zone_to_ts.find("aws:us-west-1:a")->second.size());
148
1
  ASSERT_EQ(1, zone_to_ts.find("aws:us-west-1:b")->second.size());
149
1
  ASSERT_EQ(1, zone_to_ts.find("aws:us-west-1:c")->second.size());
150
151
1
  ASSERT_OK(CatalogManagerUtil::IsLoadBalanced(ts_descs));
152
1
}
153
154
1
TEST(TestCatalogManager, TestLoadCountSingleAZ) {
155
1
  std::shared_ptr<TSDescriptor> ts0 = SetupTS("0000", "a");
156
1
  std::shared_ptr<TSDescriptor> ts1 = SetupTS("1111", "a");
157
1
  std::shared_ptr<TSDescriptor> ts2 = SetupTS("2222", "a");
158
1
  std::shared_ptr<TSDescriptor> ts3 = SetupTS("3333", "a");
159
1
  std::shared_ptr<TSDescriptor> ts4 = SetupTS("4444", "a");
160
1
  ts0->set_num_live_replicas(4);
161
1
  ts1->set_num_live_replicas(5);
162
1
  ts2->set_num_live_replicas(6);
163
1
  ts3->set_num_live_replicas(5);
164
1
  ts4->set_num_live_replicas(4);
165
1
  TSDescriptorVector ts_descs = {ts0, ts1, ts2, ts3, ts4};
166
167
1
  ZoneToDescMap zone_to_ts;
168
1
  ASSERT_OK(CatalogManagerUtil::GetPerZoneTSDesc(ts_descs, &zone_to_ts));
169
1
  ASSERT_EQ(1, zone_to_ts.size());
170
1
  ASSERT_EQ(5, zone_to_ts.find("aws:us-west-1:a")->second.size());
171
172
1
  ASSERT_OK(CatalogManagerUtil::IsLoadBalanced(ts_descs));
173
1
}
174
175
1
TEST(TestCatalogManager, TestLoadNotBalanced) {
176
1
  std::shared_ptr <TSDescriptor> ts0 = SetupTS("0000", "a");
177
1
  std::shared_ptr <TSDescriptor> ts1 = SetupTS("1111", "a");
178
1
  std::shared_ptr <TSDescriptor> ts2 = SetupTS("2222", "c");
179
1
  ts0->set_num_live_replicas(4);
180
1
  ts1->set_num_live_replicas(50);
181
1
  ts2->set_num_live_replicas(16);
182
1
  TSDescriptorVector ts_descs = {ts0, ts1, ts2};
183
184
1
  ASSERT_NOK(CatalogManagerUtil::IsLoadBalanced(ts_descs));
185
1
}
186
187
1
TEST(TestCatalogManager, TestLoadBalancedRFgtAZ) {
188
1
  std::shared_ptr <TSDescriptor> ts0 = SetupTS("0000", "a");
189
1
  std::shared_ptr <TSDescriptor> ts1 = SetupTS("1111", "b");
190
1
  std::shared_ptr <TSDescriptor> ts2 = SetupTS("2222", "b");
191
1
  ts0->set_num_live_replicas(8);
192
1
  ts1->set_num_live_replicas(8);
193
1
  ts2->set_num_live_replicas(8);
194
1
  TSDescriptorVector ts_descs = {ts0, ts1, ts2};
195
196
1
  ZoneToDescMap zone_to_ts;
197
1
  ASSERT_OK(CatalogManagerUtil::GetPerZoneTSDesc(ts_descs, &zone_to_ts));
198
1
  ASSERT_EQ(2, zone_to_ts.size());
199
1
  ASSERT_EQ(1, zone_to_ts.find("aws:us-west-1:a")->second.size());
200
1
  ASSERT_EQ(2, zone_to_ts.find("aws:us-west-1:b")->second.size());
201
202
1
  ASSERT_OK(CatalogManagerUtil::IsLoadBalanced(ts_descs));
203
1
}
204
205
1
TEST(TestCatalogManager, TestLoadBalancedPerAZ) {
206
1
  std::shared_ptr <TSDescriptor> ts0 = SetupTS("0000", "a");
207
1
  std::shared_ptr <TSDescriptor> ts1 = SetupTS("1111", "b");
208
1
  std::shared_ptr <TSDescriptor> ts2 = SetupTS("2222", "b");
209
1
  std::shared_ptr <TSDescriptor> ts3 = SetupTS("3333", "b");
210
1
  ts0->set_num_live_replicas(32);
211
1
  ts1->set_num_live_replicas(22);
212
1
  ts2->set_num_live_replicas(21);
213
1
  ts3->set_num_live_replicas(21);
214
1
  TSDescriptorVector ts_descs = {ts0, ts1, ts2, ts3};
215
216
1
  ZoneToDescMap zone_to_ts;
217
1
  ASSERT_OK(CatalogManagerUtil::GetPerZoneTSDesc(ts_descs, &zone_to_ts));
218
1
  ASSERT_EQ(2, zone_to_ts.size());
219
1
  ASSERT_EQ(1, zone_to_ts.find("aws:us-west-1:a")->second.size());
220
1
  ASSERT_EQ(3, zone_to_ts.find("aws:us-west-1:b")->second.size());
221
222
1
  ASSERT_OK(CatalogManagerUtil::IsLoadBalanced(ts_descs));
223
1
}
224
225
1
TEST(TestCatalogManager, TestLeaderLoadBalanced) {
226
  // AreLeadersOnPreferredOnly should always return true.
227
  // Note that this is essentially using transaction_tables_use_preferred_zones = true
228
1
  ReplicationInfoPB replication_info;
229
1
  SetupClusterConfig({"a", "b", "c"}, &replication_info);
230
231
1
  std::shared_ptr<TSDescriptor> ts0 = SetupTS("0000", "a");
232
1
  std::shared_ptr<TSDescriptor> ts1 = SetupTS("1111", "b");
233
1
  std::shared_ptr<TSDescriptor> ts2 = SetupTS("2222", "c");
234
235
1
  ASSERT_TRUE(ts0->IsAcceptingLeaderLoad(replication_info));
236
1
  ASSERT_TRUE(ts1->IsAcceptingLeaderLoad(replication_info));
237
1
  ASSERT_TRUE(ts2->IsAcceptingLeaderLoad(replication_info));
238
239
1
  TSDescriptorVector ts_descs = {ts0, ts1, ts2};
240
241
1
  ts0->set_leader_count(24);
242
1
  ts1->set_leader_count(0);
243
1
  ts2->set_leader_count(0);
244
1
  ASSERT_OK(CatalogManagerUtil::AreLeadersOnPreferredOnly(ts_descs, replication_info));
245
246
1
  ts0->set_leader_count(10);
247
1
  ts1->set_leader_count(8);
248
1
  ts2->set_leader_count(6);
249
1
  ASSERT_OK(CatalogManagerUtil::AreLeadersOnPreferredOnly(ts_descs, replication_info));
250
251
1
  ts0->set_leader_count(9);
252
1
  ts1->set_leader_count(8);
253
1
  ts2->set_leader_count(7);
254
1
  ASSERT_OK(CatalogManagerUtil::AreLeadersOnPreferredOnly(ts_descs, replication_info));
255
256
1
  ts0->set_leader_count(8);
257
1
  ts1->set_leader_count(8);
258
1
  ts2->set_leader_count(8);
259
1
  ASSERT_OK(CatalogManagerUtil::AreLeadersOnPreferredOnly(ts_descs, replication_info));
260
1
}
261
262
1
TEST(TestCatalogManager, TestGetPlacementUuidFromRaftPeer) {
263
  // Test a voter peer is assigned a live placement.
264
1
  ReplicationInfoPB replication_info;
265
1
  SetupClusterConfigWithReadReplicas({"a", "b", "c"}, {{"d"}}, &replication_info);
266
1
  consensus::RaftPeerPB raft_peer;
267
1
  SetupRaftPeer(consensus::PeerMemberType::VOTER, "a", &raft_peer);
268
1
  ASSERT_EQ(kLivePlacementUuid, ASSERT_RESULT(
269
1
      CatalogManagerUtil::GetPlacementUuidFromRaftPeer(replication_info, raft_peer)));
270
1
  SetupRaftPeer(consensus::PeerMemberType::PRE_VOTER, "b", &raft_peer);
271
1
  ASSERT_EQ(kLivePlacementUuid, ASSERT_RESULT(
272
1
      CatalogManagerUtil::GetPlacementUuidFromRaftPeer(replication_info, raft_peer)));
273
274
  // Test a observer peer is assigned to the rr placement.
275
1
  SetupRaftPeer(consensus::PeerMemberType::OBSERVER, "d", &raft_peer);
276
1
  ASSERT_EQ(Format(kReadReplicaPlacementUuidPrefix, 0), ASSERT_RESULT(
277
1
      CatalogManagerUtil::GetPlacementUuidFromRaftPeer(replication_info, raft_peer)));
278
279
  // Now test multiple rr placements.
280
1
  SetupClusterConfigWithReadReplicas({"a", "b", "c"}, {{"d"}, {"e"}}, &replication_info);
281
1
  SetupRaftPeer(consensus::PeerMemberType::PRE_OBSERVER, "d", &raft_peer);
282
1
  ASSERT_EQ(Format(kReadReplicaPlacementUuidPrefix, 0), ASSERT_RESULT(
283
1
      CatalogManagerUtil::GetPlacementUuidFromRaftPeer(replication_info, raft_peer)));
284
1
  SetupRaftPeer(consensus::PeerMemberType::OBSERVER, "e", &raft_peer);
285
1
  ASSERT_EQ(Format(kReadReplicaPlacementUuidPrefix, 1), ASSERT_RESULT(
286
1
      CatalogManagerUtil::GetPlacementUuidFromRaftPeer(replication_info, raft_peer)));
287
288
  // Test peer with invalid cloud info throws error.
289
1
  SetupRaftPeer(consensus::PeerMemberType::PRE_OBSERVER, "c", &raft_peer);
290
1
  ASSERT_NOK(CatalogManagerUtil::GetPlacementUuidFromRaftPeer(replication_info, raft_peer));
291
292
  // Test cluster config with rr placements with same cloud info throws error.
293
1
  SetupClusterConfigWithReadReplicas({"a", "b", "c"}, {{"d"}, {"d"}}, &replication_info);
294
1
  SetupRaftPeer(consensus::PeerMemberType::OBSERVER, "d", &raft_peer);
295
1
  ASSERT_NOK(CatalogManagerUtil::GetPlacementUuidFromRaftPeer(replication_info, raft_peer));
296
1
}
297
298
namespace {
299
300
40
void SetTabletState(TabletInfo* tablet, const SysTabletsEntryPB::State& state) {
301
40
  auto lock = tablet->LockForWrite();
302
40
  lock.mutable_data()->pb.set_state(state);
303
40
  lock.Commit();
304
40
}
305
306
12
const std::string GetSplitKey(const std::string& start_key, const std::string& end_key) {
307
12
  const auto split_key = start_key.length() < end_key.length()
308
5
      ? start_key + static_cast<char>(end_key[start_key.length()] >> 1)
309
7
      : start_key + "m";
310
311
0
  CHECK_LT(start_key, split_key) << " end_key: " << end_key;
312
12
  if (!end_key.empty()) {
313
0
    CHECK_LT(split_key, end_key) << " start_key: " << start_key;
314
10
  }
315
316
12
  return split_key;
317
12
}
318
319
std::array<scoped_refptr<TabletInfo>, kNumSplitParts> SplitTablet(
320
12
    const scoped_refptr<TabletInfo>& source_tablet) {
321
12
  auto lock = source_tablet->LockForRead();
322
12
  const auto partition = lock->pb.partition();
323
324
12
  const auto split_key =
325
12
      GetSplitKey(partition.partition_key_start(), partition.partition_key_end());
326
327
12
  auto child1 = CreateTablet(
328
12
      source_tablet->table(), source_tablet->tablet_id() + ".1", partition.partition_key_start(),
329
12
      split_key, lock->pb.split_depth() + 1);
330
12
  auto child2 = CreateTablet(
331
12
      source_tablet->table(), source_tablet->tablet_id() + ".2", split_key,
332
12
      partition.partition_key_end(), lock->pb.split_depth() + 1);
333
12
  return { child1, child2 };
334
12
}
335
336
2
void SplitAndDeleteTablets(const TabletInfos& tablets_to_split, TabletInfos* post_split_tablets) {
337
12
  for (const auto& source_tablet : tablets_to_split) {
338
12
    ASSERT_NOK(CatalogManagerUtil::CheckIfCanDeleteSingleTablet(source_tablet));
339
12
    auto child_tablets = SplitTablet(source_tablet);
340
24
    for (const auto& child : child_tablets) {
341
24
      LOG(INFO) << "Child tablet " << child->tablet_id()
342
24
                << " partition: " << AsString(child->LockForRead()->pb.partition())
343
24
                << " state: "
344
24
                << SysTabletsEntryPB_State_Name(child->LockForRead()->pb.state());
345
24
      post_split_tablets->push_back(child);
346
24
    }
347
12
    SetTabletState(child_tablets[1].get(), SysTabletsEntryPB::CREATING);
348
    // We shouldn't be able to delete source tablet when 2nd child is still creating.
349
12
    ASSERT_NOK(CatalogManagerUtil::CheckIfCanDeleteSingleTablet(source_tablet));
350
12
    SetTabletState(child_tablets[1].get(), SysTabletsEntryPB::RUNNING);
351
12
    ASSERT_OK(CatalogManagerUtil::CheckIfCanDeleteSingleTablet(source_tablet));
352
12
    SetTabletState(source_tablet.get(), SysTabletsEntryPB::DELETED);
353
12
    ASSERT_NOK(CatalogManagerUtil::CheckIfCanDeleteSingleTablet(source_tablet));
354
12
  }
355
2
}
356
357
} // namespace
358
359
1
TEST(TestCatalogManager, CheckIfCanDeleteSingleTablet) {
360
1
  const string table_id = CURRENT_TEST_NAME();
361
1
  scoped_refptr<TableInfo> table(new TableInfo(table_id));
362
1
  TabletInfos pre_split_tablets;
363
364
1
  const std::vector<std::string> pre_split_keys = {"a", "b", "c"};
365
1
  const int kNumReplicas = 1;
366
367
1
  CreateTable(pre_split_keys, kNumReplicas, true, table.get(), &pre_split_tablets);
368
369
4
  for (const auto& tablet : pre_split_tablets) {
370
4
    ASSERT_NOK(CatalogManagerUtil::CheckIfCanDeleteSingleTablet(tablet));
371
4
  }
372
373
1
  TabletInfos first_level_splits;
374
1
  NO_FATALS(SplitAndDeleteTablets(pre_split_tablets, &first_level_splits));
375
376
4
  for (const auto& tablet : pre_split_tablets) {
377
4
    SetTabletState(tablet.get(), SysTabletsEntryPB::RUNNING);
378
4
  }
379
380
1
  TabletInfos second_level_splits;
381
1
  NO_FATALS(SplitAndDeleteTablets(first_level_splits, &second_level_splits));
382
383
  // We should be able to delete pre split tablets covered by 2nd level split tablets.
384
4
  for (const auto& source_tablet : pre_split_tablets) {
385
4
    ASSERT_OK(CatalogManagerUtil::CheckIfCanDeleteSingleTablet(source_tablet));
386
4
  }
387
1
}
388
389
} // namespace master
390
} // namespace yb