YugabyteDB (2.13.0.0-b42, bfc6a6643e7399ac8a0e81d06a3ee6d6571b33ab)

Coverage Report

Created: 2022-03-09 17:30

/Users/deen/code/yugabyte-db/src/yb/tools/yb-admin-test.cc
Line
Count
Source (jump to first uncovered line)
1
// Licensed to the Apache Software Foundation (ASF) under one
2
// or more contributor license agreements.  See the NOTICE file
3
// distributed with this work for additional information
4
// regarding copyright ownership.  The ASF licenses this file
5
// to you under the Apache License, Version 2.0 (the
6
// "License"); you may not use this file except in compliance
7
// with the License.  You may obtain a copy of the License at
8
//
9
//   http://www.apache.org/licenses/LICENSE-2.0
10
//
11
// Unless required by applicable law or agreed to in writing,
12
// software distributed under the License is distributed on an
13
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
// KIND, either express or implied.  See the License for the
15
// specific language governing permissions and limitations
16
// under the License.
17
//
18
// The following only applies to changes made to this file as part of YugaByte development.
19
//
20
// Portions Copyright (c) YugaByte, Inc.
21
//
22
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
23
// in compliance with the License.  You may obtain a copy of the License at
24
//
25
// http://www.apache.org/licenses/LICENSE-2.0
26
//
27
// Unless required by applicable law or agreed to in writing, software distributed under the License
28
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
29
// or implied.  See the License for the specific language governing permissions and limitations
30
// under the License.
31
//
32
// Tests for the yb-admin command-line tool.
33
34
#include <regex>
35
36
#include <boost/algorithm/string.hpp>
37
#include <gtest/gtest.h>
38
39
#include "yb/client/client.h"
40
#include "yb/client/schema.h"
41
#include "yb/client/table_creator.h"
42
43
#include "yb/common/json_util.h"
44
45
#include "yb/gutil/map-util.h"
46
#include "yb/gutil/strings/substitute.h"
47
48
#include "yb/integration-tests/cluster_verifier.h"
49
#include "yb/integration-tests/cql_test_util.h"
50
#include "yb/integration-tests/external_mini_cluster.h"
51
#include "yb/integration-tests/cluster_itest_util.h"
52
#include "yb/integration-tests/test_workload.h"
53
54
#include "yb/master/catalog_entity_info.h"
55
#include "yb/master/master_defaults.h"
56
#include "yb/master/master_client.pb.h"
57
58
#include "yb/tools/admin-test-base.h"
59
60
#include "yb/util/format.h"
61
#include "yb/util/jsonreader.h"
62
#include "yb/util/net/net_util.h"
63
#include "yb/util/port_picker.h"
64
#include "yb/util/random_util.h"
65
#include "yb/util/status_format.h"
66
#include "yb/util/string_util.h"
67
#include "yb/util/subprocess.h"
68
#include "yb/util/test_util.h"
69
70
using namespace std::literals;
71
72
namespace yb {
73
namespace tools {
74
75
using client::YBClient;
76
using client::YBClientBuilder;
77
using client::YBTableName;
78
using client::YBSchema;
79
using client::YBSchemaBuilder;
80
using client::YBTableCreator;
81
using client::YBTableType;
82
using std::shared_ptr;
83
using std::vector;
84
using std::string;
85
using std::unordered_map;
86
using itest::TabletServerMap;
87
using itest::TServerDetails;
88
using strings::Substitute;
89
90
namespace {
91
92
//  Helper to check hosts list by requesting cluster config via yb-admin and parse its output:
93
//
94
//  Config:
95
//  version: 1
96
//  server_blacklist {
97
//    hosts {
98
//      host: "node1"
99
//      port: 9100
100
//    }
101
//    hosts {
102
//      host: "node2"
103
//      port: 9100
104
//    }
105
//    initial_replica_load: 0
106
//  }
107
//
108
class BlacklistChecker {
109
 public:
110
  BlacklistChecker(const string& yb_admin_exe, const string& master_address) :
111
0
      args_{yb_admin_exe, "-master_addresses", master_address, "get_universe_config"} {
112
0
  }
113
114
0
  CHECKED_STATUS operator()(const vector<HostPort>& servers) const {
115
0
    string out;
116
0
    RETURN_NOT_OK(Subprocess::Call(args_, &out));
117
0
    boost::erase_all(out, "\n");
118
0
    JsonReader reader(out);
119
120
0
    vector<const rapidjson::Value *> blacklistEntries;
121
0
    const rapidjson::Value *blacklistRoot;
122
0
    RETURN_NOT_OK(reader.Init());
123
0
    RETURN_NOT_OK(
124
0
        reader.ExtractObject(reader.root(), "serverBlacklist", &blacklistRoot));
125
0
    RETURN_NOT_OK(
126
0
        reader.ExtractObjectArray(blacklistRoot, "hosts", &blacklistEntries));
127
128
0
    for (const rapidjson::Value *entry : blacklistEntries) {
129
0
      std::string host;
130
0
      int32_t port;
131
0
      RETURN_NOT_OK(reader.ExtractString(entry, "host", &host));
132
0
      RETURN_NOT_OK(reader.ExtractInt32(entry, "port", &port));
133
0
      HostPort blacklistServer(host, port);
134
0
      if (std::find(servers.begin(), servers.end(), blacklistServer) ==
135
0
          servers.end()) {
136
0
        return STATUS_FORMAT(NotFound,
137
0
                             "Item $0 not found in list of expected hosts $1",
138
0
                             blacklistServer, servers);
139
0
      }
140
0
    }
141
142
0
    if (blacklistEntries.size() != servers.size()) {
143
0
      return STATUS_FORMAT(NotFound, "$0 items expected but $1 found",
144
0
                           servers.size(), blacklistEntries.size());
145
0
    }
146
147
0
    return Status::OK();
148
0
  }
149
150
 private:
151
  vector<string> args_;
152
};
153
154
} // namespace
155
156
class AdminCliTest : public AdminTestBase {
157
};
158
159
// Test yb-admin config change while running a workload.
160
// 1. Instantiate external mini cluster with 3 TS.
161
// 2. Create table with 2 replicas.
162
// 3. Invoke yb-admin CLI to invoke a config change.
163
// 4. Wait until the new server bootstraps.
164
// 5. Profit!
165
0
TEST_F(AdminCliTest, TestChangeConfig) {
166
0
  FLAGS_num_tablet_servers = 3;
167
0
  FLAGS_num_replicas = 2;
168
169
0
  std::vector<std::string> master_flags = {
170
0
    "--catalog_manager_wait_for_new_tablets_to_elect_leader=false"s,
171
0
    "--replication_factor=2"s,
172
0
    "--use_create_table_leader_hint=false"s,
173
0
  };
174
0
  std::vector<std::string> ts_flags = {
175
0
    "--enable_leader_failure_detection=false"s,
176
0
  };
177
0
  BuildAndStart(ts_flags, master_flags);
178
179
0
  vector<TServerDetails*> tservers = TServerDetailsVector(tablet_servers_);
180
0
  ASSERT_EQ(FLAGS_num_tablet_servers, tservers.size());
181
182
0
  itest::TabletServerMapUnowned active_tablet_servers;
183
0
  auto iter = tablet_replicas_.find(tablet_id_);
184
0
  TServerDetails* leader = iter->second;
185
0
  TServerDetails* follower = (++iter)->second;
186
0
  InsertOrDie(&active_tablet_servers, leader->uuid(), leader);
187
0
  InsertOrDie(&active_tablet_servers, follower->uuid(), follower);
188
189
0
  TServerDetails* new_node = nullptr;
190
0
  for (TServerDetails* ts : tservers) {
191
0
    if (!ContainsKey(active_tablet_servers, ts->uuid())) {
192
0
      new_node = ts;
193
0
      break;
194
0
    }
195
0
  }
196
0
  ASSERT_TRUE(new_node != nullptr);
197
198
0
  int cur_log_index = 0;
199
  // Elect the leader (still only a consensus config size of 2).
200
0
  ASSERT_OK(StartElection(leader, tablet_id_, MonoDelta::FromSeconds(10)));
201
0
  ASSERT_OK(WaitUntilCommittedOpIdIndexIs(++cur_log_index, leader, tablet_id_,
202
0
                                          MonoDelta::FromSeconds(30)));
203
0
  ASSERT_OK(WaitForServersToAgree(MonoDelta::FromSeconds(30), active_tablet_servers,
204
0
                                  tablet_id_, 1));
205
206
0
  TestWorkload workload(cluster_.get());
207
0
  workload.set_table_name(kTableName);
208
0
  workload.set_timeout_allowed(true);
209
0
  workload.set_write_timeout_millis(10000);
210
0
  workload.set_num_write_threads(1);
211
0
  workload.set_write_batch_size(1);
212
0
  workload.set_sequential_write(true);
213
0
  workload.Setup();
214
0
  workload.Start();
215
216
  // Wait until the Master knows about the leader tserver.
217
0
  TServerDetails* master_observed_leader;
218
0
  ASSERT_OK(GetLeaderReplicaWithRetries(tablet_id_, &master_observed_leader));
219
0
  ASSERT_EQ(leader->uuid(), master_observed_leader->uuid());
220
221
0
  LOG(INFO) << "Adding tserver with uuid " << new_node->uuid() << " as PRE_VOTER ...";
222
0
  string exe_path = GetAdminToolPath();
223
0
  ASSERT_OK(CallAdmin("change_config", tablet_id_, "ADD_SERVER", new_node->uuid(), "PRE_VOTER"));
224
225
0
  InsertOrDie(&active_tablet_servers, new_node->uuid(), new_node);
226
0
  ASSERT_OK(WaitUntilCommittedConfigNumVotersIs(active_tablet_servers.size(),
227
0
                                                leader, tablet_id_,
228
0
                                                MonoDelta::FromSeconds(10)));
229
230
0
  workload.StopAndJoin();
231
0
  auto num_batches = workload.batches_completed();
232
233
0
  LOG(INFO) << "Waiting for replicas to agree...";
234
  // Wait for all servers to replicate everything up through the last write op.
235
  // Since we don't batch, there should be at least # rows inserted log entries,
236
  // plus the initial leader's no-op, plus 1 for
237
  // the added replica for a total == #rows + 2.
238
0
  auto min_log_index = num_batches + 2;
239
0
  ASSERT_OK(WaitForServersToAgree(MonoDelta::FromSeconds(30),
240
0
                                  active_tablet_servers, tablet_id_,
241
0
                                  min_log_index));
242
243
0
  auto rows_inserted = workload.rows_inserted();
244
0
  LOG(INFO) << "Number of rows inserted: " << rows_inserted;
245
246
0
  ClusterVerifier cluster_verifier(cluster_.get());
247
0
  ASSERT_NO_FATALS(cluster_verifier.CheckCluster());
248
0
  ASSERT_NO_FATALS(cluster_verifier.CheckRowCount(
249
0
      kTableName, ClusterVerifier::AT_LEAST, rows_inserted));
250
251
  // Now remove the server once again.
252
0
  LOG(INFO) << "Removing tserver with uuid " << new_node->uuid() << " from the config...";
253
0
  ASSERT_OK(CallAdmin("change_config", tablet_id_, "REMOVE_SERVER", new_node->uuid()));
254
255
0
  ASSERT_EQ(1, active_tablet_servers.erase(new_node->uuid()));
256
0
  ASSERT_OK(WaitUntilCommittedConfigNumVotersIs(active_tablet_servers.size(),
257
0
                                                leader, tablet_id_,
258
0
                                                MonoDelta::FromSeconds(10)));
259
0
}
260
261
0
TEST_F(AdminCliTest, TestDeleteTable) {
262
0
  FLAGS_num_tablet_servers = 1;
263
0
  FLAGS_num_replicas = 1;
264
265
0
  vector<string> ts_flags, master_flags;
266
0
  master_flags.push_back("--replication_factor=1");
267
0
  BuildAndStart(ts_flags, master_flags);
268
0
  string master_address = ToString(cluster_->master()->bound_rpc_addr());
269
270
0
  auto client = ASSERT_RESULT(YBClientBuilder()
271
0
      .add_master_server_addr(master_address)
272
0
      .Build());
273
274
  // Default table that gets created;
275
0
  string table_name = kTableName.table_name();
276
0
  string keyspace = kTableName.namespace_name();
277
278
0
  string exe_path = GetAdminToolPath();
279
0
  ASSERT_OK(CallAdmin("delete_table", keyspace, table_name));
280
281
0
  const auto tables = ASSERT_RESULT(client->ListTables(/* filter */ "", /* exclude_ysql */ true));
282
0
  ASSERT_EQ(master::kNumSystemTables, tables.size());
283
0
}
284
285
0
TEST_F(AdminCliTest, TestDeleteIndex) {
286
0
  FLAGS_num_tablet_servers = 1;
287
0
  FLAGS_num_replicas = 1;
288
289
0
  vector<string> ts_flags, master_flags;
290
0
  master_flags.push_back("--replication_factor=1");
291
0
  ts_flags.push_back("--index_backfill_upperbound_for_user_enforced_txn_duration_ms=12000");
292
0
  BuildAndStart(ts_flags, master_flags);
293
0
  string master_address = ToString(cluster_->master()->bound_rpc_addr());
294
295
0
  auto client = ASSERT_RESULT(YBClientBuilder()
296
0
      .add_master_server_addr(master_address)
297
0
      .Build());
298
299
  // Default table that gets created;
300
0
  string table_name = kTableName.table_name();
301
0
  string keyspace = kTableName.namespace_name();
302
0
  string index_name = table_name + "-index";
303
304
0
  auto tables = ASSERT_RESULT(client->ListTables(/* filter */ table_name));
305
0
  ASSERT_EQ(1, tables.size());
306
0
  const auto table_id = tables.front().table_id();
307
308
0
  YBSchema index_schema;
309
0
  YBSchemaBuilder b;
310
0
  b.AddColumn("C$_key")->Type(INT32)->NotNull()->HashPrimaryKey();
311
0
  ASSERT_OK(b.Build(&index_schema));
312
313
  // Create index.
314
0
  shared_ptr<YBTableCreator> table_creator(client->NewTableCreator());
315
316
0
  IndexInfoPB *index_info = table_creator->mutable_index_info();
317
0
  index_info->set_indexed_table_id(table_id);
318
0
  index_info->set_is_local(false);
319
0
  index_info->set_is_unique(false);
320
0
  index_info->set_hash_column_count(1);
321
0
  index_info->set_range_column_count(0);
322
0
  index_info->set_use_mangled_column_name(true);
323
0
  index_info->add_indexed_hash_column_ids(10);
324
325
0
  auto *col = index_info->add_columns();
326
0
  col->set_column_name("C$_key");
327
0
  col->set_indexed_column_id(10);
328
329
0
  Status s = table_creator->table_name(YBTableName(YQL_DATABASE_CQL, keyspace, index_name))
330
0
                 .table_type(YBTableType::YQL_TABLE_TYPE)
331
0
                 .schema(&index_schema)
332
0
                 .indexed_table_id(table_id)
333
0
                 .is_local_index(false)
334
0
                 .is_unique_index(false)
335
0
                 .timeout(MonoDelta::FromSeconds(60))
336
0
                 .Create();
337
0
  ASSERT_OK(s);
338
339
0
  tables = ASSERT_RESULT(client->ListTables(/* filter */ "", /* exclude_ysql */ true));
340
0
  ASSERT_EQ(2 + master::kNumSystemTables, tables.size());
341
342
  // Delete index.
343
0
  string exe_path = GetAdminToolPath();
344
0
  LOG(INFO) << "Delete index via yb-admin: " << keyspace << "." << index_name;
345
0
  ASSERT_OK(CallAdmin("delete_index", keyspace, index_name));
346
347
0
  tables = ASSERT_RESULT(client->ListTables(/* filter */ "", /* exclude_ysql */ true));
348
0
  ASSERT_EQ(1 + master::kNumSystemTables, tables.size());
349
350
  // Delete table.
351
0
  LOG(INFO) << "Delete table via yb-admin: " << keyspace << "." << table_name;
352
0
  ASSERT_OK(CallAdmin("delete_table", keyspace, table_name));
353
354
0
  tables = ASSERT_RESULT(client->ListTables(/* filter */ "", /* exclude_ysql */ true));
355
0
  ASSERT_EQ(master::kNumSystemTables, tables.size());
356
0
}
357
358
0
TEST_F(AdminCliTest, BlackList) {
359
0
  BuildAndStart();
360
0
  const auto master_address = ToString(cluster_->master()->bound_rpc_addr());
361
0
  const auto exe_path = GetAdminToolPath();
362
0
  const auto default_port = 9100;
363
0
  vector<HostPort> hosts{{"node1", default_port}, {"node2", default_port}, {"node3", default_port}};
364
0
  ASSERT_OK(CallAdmin("change_blacklist", "ADD", unpack(hosts)));
365
0
  const BlacklistChecker checker(exe_path, master_address);
366
0
  ASSERT_OK(checker(hosts));
367
0
  ASSERT_OK(CallAdmin("change_blacklist", "REMOVE", hosts.back()));
368
0
  hosts.pop_back();
369
0
  ASSERT_OK(checker(hosts));
370
0
}
371
372
0
TEST_F(AdminCliTest, InvalidMasterAddresses) {
373
0
  int port = AllocateFreePort();
374
0
  string unreachable_host = Substitute("127.0.0.1:$0", port);
375
0
  std::string error_string;
376
0
  ASSERT_NOK(Subprocess::Call(ToStringVector(
377
0
      GetAdminToolPath(), "-master_addresses", unreachable_host,
378
0
      "-timeout_ms", "1000", "list_tables"), &error_string, StdFdTypes{StdFdType::kErr}));
379
0
  ASSERT_STR_CONTAINS(error_string, "verify the addresses");
380
0
}
381
382
0
TEST_F(AdminCliTest, CheckTableIdUsage) {
383
0
  BuildAndStart();
384
0
  const auto master_address = ToString(cluster_->master()->bound_rpc_addr());
385
0
  auto client = ASSERT_RESULT(YBClientBuilder().add_master_server_addr(master_address).Build());
386
0
  const auto tables = ASSERT_RESULT(client->ListTables(kTableName.table_name(),
387
0
                                                       /* exclude_ysql */ true));
388
0
  ASSERT_EQ(1, tables.size());
389
0
  const auto exe_path = GetAdminToolPath();
390
0
  const auto table_id = tables.front().table_id();
391
0
  const auto table_id_arg = Format("tableid.$0", table_id);
392
0
  auto args = ToStringVector(
393
0
      exe_path, "-master_addresses", master_address, "list_tablets", table_id_arg);
394
0
  const auto args_size = args.size();
395
0
  ASSERT_OK(Subprocess::Call(args));
396
  // Check good optional integer argument.
397
0
  args.push_back("1");
398
0
  ASSERT_OK(Subprocess::Call(args));
399
  // Check bad optional integer argument.
400
0
  args.resize(args_size);
401
0
  args.push_back("bad");
402
0
  std::string output;
403
0
  ASSERT_NOK(Subprocess::Call(args, &output, StdFdTypes{StdFdType::kErr}));
404
  // Due to greedy algorithm all bad arguments are treated as table identifier.
405
0
  ASSERT_NE(output.find("Namespace 'bad' of type 'ycql' not found"), std::string::npos);
406
  // Check multiple tables when single one is expected.
407
0
  args.resize(args_size);
408
0
  args.push_back(table_id_arg);
409
0
  ASSERT_NOK(Subprocess::Call(args, &output, StdFdTypes{StdFdType::kErr}));
410
0
  ASSERT_NE(output.find("Single table expected, 2 found"), std::string::npos);
411
  // Check wrong table id.
412
0
  args.resize(args_size - 1);
413
0
  const auto bad_table_id = table_id + "_bad";
414
0
  args.push_back(Format("tableid.$0", bad_table_id));
415
0
  ASSERT_NOK(Subprocess::Call(args, &output, StdFdTypes{StdFdType::kErr}));
416
0
  ASSERT_NE(
417
0
      output.find(Format("Table with id '$0' not found", bad_table_id)), std::string::npos);
418
0
}
419
420
0
TEST_F(AdminCliTest, TestSnapshotCreation) {
421
0
  BuildAndStart();
422
0
  const auto extra_table = YBTableName(YQLDatabase::YQL_DATABASE_CQL,
423
0
                                       kTableName.namespace_name(),
424
0
                                       "extra-table");
425
0
  YBSchemaBuilder schemaBuilder;
426
0
  schemaBuilder.AddColumn("k")->HashPrimaryKey()->Type(yb::BINARY)->NotNull();
427
0
  schemaBuilder.AddColumn("v")->Type(yb::BINARY)->NotNull();
428
0
  YBSchema schema;
429
0
  ASSERT_OK(schemaBuilder.Build(&schema));
430
0
  ASSERT_OK(client_->NewTableCreator()->table_name(extra_table)
431
0
      .schema(&schema).table_type(yb::client::YBTableType::YQL_TABLE_TYPE).Create());
432
0
  const auto tables = ASSERT_RESULT(client_->ListTables(kTableName.table_name(),
433
0
      /* exclude_ysql */ true));
434
0
  ASSERT_EQ(1, tables.size());
435
0
  std::string output = ASSERT_RESULT(CallAdmin(
436
0
      "create_snapshot", Format("tableid.$0", tables.front().table_id()),
437
0
      extra_table.namespace_name(), extra_table.table_name()));
438
0
  ASSERT_NE(output.find("Started snapshot creation"), string::npos);
439
440
0
  output = ASSERT_RESULT(CallAdmin("list_snapshots", "SHOW_DETAILS"));
441
0
  ASSERT_NE(output.find(extra_table.table_name()), string::npos);
442
0
  ASSERT_NE(output.find(kTableName.table_name()), string::npos);
443
444
  // Snapshot creation should be blocked for CQL system tables (which are virtual) but not for
445
  // redis system tables (which are not).
446
0
  const auto result = CallAdmin("create_snapshot", "system", "peers");
447
0
  ASSERT_FALSE(result.ok());
448
0
  ASSERT_TRUE(result.status().IsRuntimeError());
449
0
  ASSERT_NE(result.status().ToUserMessage().find(
450
0
      "Error running create_snapshot: Invalid argument"), std::string::npos);
451
0
  ASSERT_NE(result.status().ToUserMessage().find(
452
0
      "Cannot create snapshot of YCQL system table: peers"), std::string::npos);
453
454
0
  ASSERT_OK(CallAdmin("setup_redis_table"));
455
0
  ASSERT_OK(CallAdmin("create_snapshot", "system_redis", "redis"));
456
0
}
457
458
0
TEST_F(AdminCliTest, GetIsLoadBalancerIdle) {
459
0
  const MonoDelta kWaitTime = 20s;
460
0
  std::string output;
461
0
  std::vector<std::string> master_flags;
462
0
  std::vector<std::string> ts_flags;
463
0
  master_flags.push_back("--enable_load_balancing=true");
464
0
  BuildAndStart(ts_flags, master_flags);
465
466
0
  const std::string master_address = ToString(cluster_->master()->bound_rpc_addr());
467
0
  auto client = ASSERT_RESULT(YBClientBuilder()
468
0
      .add_master_server_addr(master_address)
469
0
      .Build());
470
471
  // Load balancer IsIdle() logic has been changed to the following - unless a task was explicitly
472
  // triggered by the load balancer (AsyncAddServerTask / AsyncRemoveServerTask / AsyncTryStepDown)
473
  // then the task does not count towards determining whether the load balancer is active. If no
474
  // pending LB tasks of the aforementioned types exist, the load balancer will report idle.
475
476
  // Delete table should not activate the load balancer.
477
0
  ASSERT_OK(client->DeleteTable(kTableName, false /* wait */));
478
  // This should timeout.
479
0
  Status s = WaitFor(
480
0
      [&]() -> Result<bool> {
481
0
        auto output = VERIFY_RESULT(CallAdmin("get_is_load_balancer_idle"));
482
0
        return output.compare("Idle = 0\n") == 0;
483
0
      },
484
0
      kWaitTime,
485
0
      "wait for load balancer to stay idle");
486
487
0
  ASSERT_FALSE(s.ok());
488
0
}
489
490
0
TEST_F(AdminCliTest, TestLeaderStepdown) {
491
0
  BuildAndStart();
492
0
  std::string out;
493
0
  auto regex_fetch_first = [&out](const std::string& exp) -> Result<std::string> {
494
0
    std::smatch match;
495
0
    if (!std::regex_search(out.cbegin(), out.cend(), match, std::regex(exp)) || match.size() != 2) {
496
0
      return STATUS_FORMAT(NotFound, "No pattern in '$0'", out);
497
0
    }
498
0
    return match[1];
499
0
  };
500
501
0
  out = ASSERT_RESULT(CallAdmin(
502
0
      "list_tablets", kTableName.namespace_name(), kTableName.table_name()));
503
0
  const auto tablet_id = ASSERT_RESULT(regex_fetch_first(R"(\s+([a-z0-9]{32})\s+)"));
504
0
  out = ASSERT_RESULT(CallAdmin("list_tablet_servers", tablet_id));
505
0
  const auto tserver_id = ASSERT_RESULT(regex_fetch_first(R"(\s+([a-z0-9]{32})\s+\S+\s+FOLLOWER)"));
506
0
  ASSERT_OK(CallAdmin("leader_stepdown", tablet_id, tserver_id));
507
508
0
  ASSERT_OK(WaitFor([&]() -> Result<bool> {
509
0
    out = VERIFY_RESULT(CallAdmin("list_tablet_servers", tablet_id));
510
0
    return tserver_id == VERIFY_RESULT(regex_fetch_first(R"(\s+([a-z0-9]{32})\s+\S+\s+LEADER)"));
511
0
  }, 5s, "Leader stepdown"));
512
0
}
513
514
namespace {
515
0
Result<string> RegexFetchFirst(const string out, const string &exp) {
516
0
  std::smatch match;
517
0
  if (!std::regex_search(out.cbegin(), out.cend(), match, std::regex(exp)) || match.size() != 2) {
518
0
    return STATUS_FORMAT(NotFound, "No pattern in '$0'", out);
519
0
  }
520
0
  return match[1];
521
0
}
522
} // namespace
523
524
// Use list_tablets to list one single tablet for the table.
525
// Parse out the tablet from the output.
526
// Use list_tablet_servers to list the tablets leader and follower details.
527
// Get the leader uuid and host:port from this.
528
// Also create an unordered_map of all followers host_port to uuid.
529
// Verify that these are present in the appropriate locations in the output of list_tablets.
530
// Use list_tablets with the json option to get the json output.
531
// Parse the json output, extract data for the specific tablet and compare the leader and
532
// follower details with the values extraced previously.
533
0
TEST_F(AdminCliTest, TestFollowersTableList) {
534
0
  BuildAndStart();
535
536
0
  string lt_out = ASSERT_RESULT(CallAdmin(
537
0
      "list_tablets", kTableName.namespace_name(), kTableName.table_name(),
538
0
      "1", "include_followers"));
539
0
  const auto tablet_id = ASSERT_RESULT(RegexFetchFirst(lt_out, R"(\s+([a-z0-9]{32})\s+)"));
540
0
  ASSERT_FALSE(tablet_id.empty());
541
542
0
  master::TabletLocationsPB locs_pb;
543
0
  ASSERT_OK(itest::GetTabletLocations(
544
0
        cluster_.get(), tablet_id, MonoDelta::FromSeconds(10), &locs_pb));
545
0
  ASSERT_EQ(3, locs_pb.replicas().size());
546
0
  string leader_uuid;
547
0
  string leader_host_port;
548
0
  unordered_map<string, string> follower_hp_to_uuid_map;
549
0
  for (const auto& replica : locs_pb.replicas()) {
550
0
    if (replica.role() == PeerRole::LEADER) {
551
0
      leader_host_port = HostPortPBToString(replica.ts_info().private_rpc_addresses(0));
552
0
      leader_uuid = replica.ts_info().permanent_uuid();
553
0
    } else {
554
0
      follower_hp_to_uuid_map[HostPortPBToString(replica.ts_info().private_rpc_addresses(0))] =
555
0
          replica.ts_info().permanent_uuid();
556
0
    }
557
0
  }
558
0
  ASSERT_FALSE(leader_uuid.empty());
559
0
  ASSERT_FALSE(leader_host_port.empty());
560
0
  ASSERT_EQ(2, follower_hp_to_uuid_map.size());
561
562
0
  const auto follower_list_str = ASSERT_RESULT(RegexFetchFirst(
563
0
        lt_out, R"(\s+)" + leader_host_port + R"(\s+)" + leader_uuid + R"(\s+(\S+))"));
564
565
0
  vector<string> followers;
566
0
  boost::split(followers, follower_list_str, boost::is_any_of(","));
567
0
  ASSERT_EQ(2, followers.size());
568
0
  for (const string &follower_host_port : followers) {
569
0
      auto got = follower_hp_to_uuid_map.find(follower_host_port);
570
0
      ASSERT_TRUE(got != follower_hp_to_uuid_map.end());
571
0
  }
572
573
0
  string lt_json_out = ASSERT_RESULT(CallAdmin(
574
0
      "list_tablets", kTableName.namespace_name(), kTableName.table_name(),
575
0
      "json", "include_followers"));
576
0
  boost::erase_all(lt_json_out, "\n");
577
0
  JsonReader reader(lt_json_out);
578
579
0
  ASSERT_OK(reader.Init());
580
0
  vector<const rapidjson::Value *> tablets;
581
0
  ASSERT_OK(reader.ExtractObjectArray(reader.root(), "tablets", &tablets));
582
583
0
  for (const rapidjson::Value *entry : tablets) {
584
0
    string tid;
585
0
    ASSERT_OK(reader.ExtractString(entry, "id", &tid));
586
    // Testing only for the tablet received in list_tablets <table id> 1 include_followers.
587
0
    if (tid != tablet_id) {
588
0
      continue;
589
0
    }
590
0
    const rapidjson::Value *leader;
591
0
    ASSERT_OK(reader.ExtractObject(entry, "leader", &leader));
592
0
    string lhp;
593
0
    string luuid;
594
0
    ASSERT_OK(reader.ExtractString(leader, "endpoint", &lhp));
595
0
    ASSERT_OK(reader.ExtractString(leader, "uuid", &luuid));
596
0
    ASSERT_STR_EQ(lhp, leader_host_port);
597
0
    ASSERT_STR_EQ(luuid, leader_uuid);
598
599
0
    vector<const rapidjson::Value *> follower_json;
600
0
    ASSERT_OK(reader.ExtractObjectArray(entry, "followers", &follower_json));
601
0
    for (const rapidjson::Value *f : follower_json) {
602
0
      string fhp;
603
0
      string fuuid;
604
0
      ASSERT_OK(reader.ExtractString(f, "endpoint", &fhp));
605
0
      ASSERT_OK(reader.ExtractString(f, "uuid", &fuuid));
606
0
      auto got = follower_hp_to_uuid_map.find(fhp);
607
0
      ASSERT_TRUE(got != follower_hp_to_uuid_map.end());
608
0
      ASSERT_STR_EQ(got->second, fuuid);
609
0
    }
610
0
  }
611
0
}
612
613
0
TEST_F(AdminCliTest, TestGetClusterLoadBalancerState) {
614
0
  std::string output;
615
0
  std::vector<std::string> master_flags;
616
0
  std::vector<std::string> ts_flags;
617
0
  master_flags.push_back("--enable_load_balancing=true");
618
0
  BuildAndStart(ts_flags, master_flags);
619
620
0
  const std::string master_address = ToString(cluster_->master()->bound_rpc_addr());
621
0
  auto client = ASSERT_RESULT(YBClientBuilder()
622
0
                                  .add_master_server_addr(master_address)
623
0
                                  .Build());
624
0
  output = ASSERT_RESULT(CallAdmin("get_load_balancer_state"));
625
626
0
  ASSERT_NE(output.find("ENABLED"), std::string::npos);
627
628
0
  output = ASSERT_RESULT(CallAdmin("set_load_balancer_enabled", "0"));
629
630
0
  ASSERT_EQ(output.find("Unable to change load balancer state"), std::string::npos);
631
632
0
  output = ASSERT_RESULT(CallAdmin("get_load_balancer_state"));
633
634
0
  ASSERT_NE(output.find("DISABLED"), std::string::npos);
635
636
0
  output = ASSERT_RESULT(CallAdmin("set_load_balancer_enabled", "1"));
637
638
0
  ASSERT_EQ(output.find("Unable to change load balancer state"), std::string::npos);
639
640
0
  output = ASSERT_RESULT(CallAdmin("get_load_balancer_state"));
641
642
0
  ASSERT_NE(output.find("ENABLED"), std::string::npos);
643
0
}
644
645
0
TEST_F(AdminCliTest, TestModifyPlacementPolicy) {
646
0
  BuildAndStart();
647
648
  // Modify the cluster placement policy to consist of 2 zones.
649
0
  ASSERT_OK(CallAdmin("modify_placement_info", "c.r.z0,c.r.z1:2,c.r.z0:2", 5, ""));
650
651
0
  auto output = ASSERT_RESULT(CallAdmin("get_universe_config"));
652
653
0
  std::string expected_placement_blocks =
654
0
      "[{\"cloudInfo\":{\"placementCloud\":\"c\",\"placementRegion\":\"r\","
655
0
      "\"placementZone\":\"z1\"},\"minNumReplicas\":2},{\"cloudInfo\":{\"placementCloud\":\"c\","
656
0
      "\"placementRegion\":\"r\",\"placementZone\":\"z0\"},\"minNumReplicas\":3}]";
657
658
0
  ASSERT_NE(output.find(expected_placement_blocks), string::npos);
659
0
}
660
661
0
TEST_F(AdminCliTest, TestModifyTablePlacementPolicy) {
662
  // Start a cluster with 3 tservers, each corresponding to a different zone.
663
0
  FLAGS_num_tablet_servers = 3;
664
0
  FLAGS_num_replicas = 2;
665
0
  std::vector<std::string> master_flags;
666
0
  master_flags.push_back("--enable_load_balancing=true");
667
0
  master_flags.push_back("--catalog_manager_wait_for_new_tablets_to_elect_leader=false");
668
0
  std::vector<std::string> ts_flags;
669
0
  ts_flags.push_back("--placement_cloud=c");
670
0
  ts_flags.push_back("--placement_region=r");
671
0
  ts_flags.push_back("--placement_zone=z${index}");
672
0
  BuildAndStart(ts_flags, master_flags);
673
674
0
  const std::string& master_address = ToString(cluster_->master()->bound_rpc_addr());
675
0
  auto client = ASSERT_RESULT(YBClientBuilder()
676
0
      .add_master_server_addr(master_address)
677
0
      .Build());
678
679
  // Modify the cluster placement policy to consist of 2 zones.
680
0
  ASSERT_OK(CallAdmin("modify_placement_info", "c.r.z0,c.r.z1", 2, ""));
681
682
  // Create a new table.
683
0
  const auto extra_table = YBTableName(YQLDatabase::YQL_DATABASE_CQL,
684
0
                                       kTableName.namespace_name(),
685
0
                                       "extra-table");
686
  // Start a workload.
687
0
  TestWorkload workload(cluster_.get());
688
0
  workload.set_table_name(extra_table);
689
0
  workload.set_timeout_allowed(true);
690
0
  workload.set_sequential_write(true);
691
0
  workload.Setup();
692
0
  workload.Start();
693
694
  // Verify that the table has no custom placement policy set for it.
695
0
  std::shared_ptr<client::YBTable> table;
696
0
  ASSERT_OK(client->OpenTable(extra_table, &table));
697
0
  ASSERT_FALSE(table->replication_info());
698
699
  // Use yb-admin_cli to set a custom placement policy different from that of
700
  // the cluster placement policy for the new table.
701
0
  ASSERT_OK(CallAdmin(
702
0
      "modify_table_placement_info", kTableName.namespace_name(), "extra-table",
703
0
      "c.r.z0,c.r.z1,c.r.z2", 3, ""));
704
705
  // Verify that changing the placement _uuid for a table fails if the
706
  // placement_uuid does not match the cluster live placement_uuid.
707
0
  const string& random_placement_uuid = "19dfa091-2b53-434f-b8dc-97280a5f8831";
708
0
  ASSERT_NOK(CallAdmin(
709
0
      "modify_table_placement_info", kTableName.namespace_name(), "extra-table",
710
0
      "c.r.z0,c.r.z1,c.r.z2", 3, random_placement_uuid));
711
712
0
  ASSERT_OK(client->OpenTable(extra_table, &table));
713
0
  ASSERT_TRUE(table->replication_info().get().live_replicas().placement_uuid().empty());
714
715
  // Fetch the placement policy for the table and verify that it matches
716
  // the custom info set previously.
717
0
  ASSERT_OK(client->OpenTable(extra_table, &table));
718
0
  vector<bool> found_zones;
719
0
  found_zones.assign(3, false);
720
0
  ASSERT_EQ(table->replication_info().get().live_replicas().placement_blocks_size(), 3);
721
0
  for (int ii = 0; ii < 3; ++ii) {
722
0
    auto pb = table->replication_info().get().live_replicas().placement_blocks(ii).cloud_info();
723
0
    ASSERT_EQ(pb.placement_cloud(), "c");
724
0
    ASSERT_EQ(pb.placement_region(), "r");
725
0
    if (pb.placement_zone() == "z0") {
726
0
      found_zones[0] = true;
727
0
    } else if (pb.placement_zone() == "z1") {
728
0
      found_zones[1] = true;
729
0
    } else {
730
0
      ASSERT_EQ(pb.placement_zone(), "z2");
731
0
      found_zones[2] = true;
732
0
    }
733
0
  }
734
0
  for (const bool found : found_zones) {
735
0
    ASSERT_TRUE(found);
736
0
  }
737
738
  // Perform the same test, but use the table-id instead of table name to set the
739
  // custom placement policy.
740
0
  std::string table_id = "tableid." + table->id();
741
0
  ASSERT_OK(CallAdmin("modify_table_placement_info", table_id, "c.r.z1", 1, ""));
742
743
  // Verify that changing the placement _uuid for a table fails if the
744
  // placement_uuid does not match the cluster live placement_uuid.
745
0
  ASSERT_NOK(CallAdmin(
746
0
      "modify_table_placement_info", table_id, "c.r.z1", 1, random_placement_uuid));
747
748
0
  ASSERT_OK(client->OpenTable(extra_table, &table));
749
0
  ASSERT_TRUE(table->replication_info().get().live_replicas().placement_uuid().empty());
750
751
  // Fetch the placement policy for the table and verify that it matches
752
  // the custom info set previously.
753
0
  ASSERT_OK(client->OpenTable(extra_table, &table));
754
0
  ASSERT_EQ(table->replication_info().get().live_replicas().placement_blocks_size(), 1);
755
0
  auto pb = table->replication_info().get().live_replicas().placement_blocks(0).cloud_info();
756
0
  ASSERT_EQ(pb.placement_cloud(), "c");
757
0
  ASSERT_EQ(pb.placement_region(), "r");
758
0
  ASSERT_EQ(pb.placement_zone(), "z1");
759
760
  // Stop the workload.
761
0
  workload.StopAndJoin();
762
0
  auto rows_inserted = workload.rows_inserted();
763
0
  LOG(INFO) << "Number of rows inserted: " << rows_inserted;
764
765
0
  sleep(5);
766
767
  // Verify that there was no data loss.
768
0
  ClusterVerifier cluster_verifier(cluster_.get());
769
0
  ASSERT_NO_FATALS(cluster_verifier.CheckCluster());
770
0
  ASSERT_NO_FATALS(cluster_verifier.CheckRowCount(
771
0
    extra_table, ClusterVerifier::EXACTLY, rows_inserted));
772
0
}
773
774
775
0
TEST_F(AdminCliTest, TestCreateTransactionStatusTablesWithPlacements) {
776
  // Start a cluster with 3 tservers, each corresponding to a different zone.
777
0
  FLAGS_num_tablet_servers = 3;
778
0
  FLAGS_num_replicas = 3;
779
0
  std::vector<std::string> master_flags;
780
0
  master_flags.push_back("--enable_load_balancing=true");
781
0
  master_flags.push_back("--catalog_manager_wait_for_new_tablets_to_elect_leader=false");
782
0
  std::vector<std::string> ts_flags;
783
0
  ts_flags.push_back("--placement_cloud=c");
784
0
  ts_flags.push_back("--placement_region=r");
785
0
  ts_flags.push_back("--placement_zone=z${index}");
786
0
  BuildAndStart(ts_flags, master_flags);
787
788
  // Create a new table.
789
0
  const auto extra_table = YBTableName(YQLDatabase::YQL_DATABASE_CQL,
790
0
                                       kTableName.namespace_name(),
791
0
                                       "extra-table");
792
  // Start a workload.
793
0
  TestWorkload workload(cluster_.get());
794
0
  workload.set_table_name(extra_table);
795
0
  workload.set_timeout_allowed(true);
796
0
  workload.set_sequential_write(true);
797
0
  workload.Setup();
798
0
  workload.Start();
799
800
0
  const std::string& master_address = ToString(cluster_->master()->bound_rpc_addr());
801
0
  auto client = ASSERT_RESULT(YBClientBuilder()
802
0
      .add_master_server_addr(master_address)
803
0
      .Build());
804
805
  // Create transaction tables for each zone.
806
0
  for (int i = 0; i < 3; ++i) {
807
0
    string table_name = Substitute("transactions_z$0", i);
808
0
    string placement = Substitute("c.r.z$0", i);
809
0
    ASSERT_OK(CallAdmin("create_transaction_table", table_name));
810
0
    ASSERT_OK(CallAdmin("modify_table_placement_info", "system", table_name, placement, 1));
811
0
  }
812
813
  // Verify that the tables are all in transaction status tables in the right zone.
814
0
  std::shared_ptr<client::YBTable> table;
815
0
  for (int i = 0; i < 3; ++i) {
816
0
    const auto table_name = YBTableName(YQLDatabase::YQL_DATABASE_CQL,
817
0
                                        "system",
818
0
                                        Substitute("transactions_z$0", i));
819
0
    ASSERT_OK(client->OpenTable(table_name, &table));
820
0
    ASSERT_EQ(table->table_type(), YBTableType::TRANSACTION_STATUS_TABLE_TYPE);
821
0
    ASSERT_EQ(table->replication_info().get().live_replicas().placement_blocks_size(), 1);
822
0
    auto pb = table->replication_info().get().live_replicas().placement_blocks(0).cloud_info();
823
0
    ASSERT_EQ(pb.placement_zone(), Substitute("z$0", i));
824
0
  }
825
826
  // Add two new tservers, to zone3 and an unused zone.
827
0
  std::vector<std::string> existing_zone_ts_flags;
828
0
  existing_zone_ts_flags.push_back("--placement_cloud=c");
829
0
  existing_zone_ts_flags.push_back("--placement_region=r");
830
0
  existing_zone_ts_flags.push_back("--placement_zone=z3");
831
0
  ASSERT_OK(cluster_->AddTabletServer(true, existing_zone_ts_flags));
832
833
0
  std::vector<std::string> new_zone_ts_flags;
834
0
  new_zone_ts_flags.push_back("--placement_cloud=c");
835
0
  new_zone_ts_flags.push_back("--placement_region=r");
836
0
  new_zone_ts_flags.push_back("--placement_zone=z4");
837
0
  ASSERT_OK(cluster_->AddTabletServer(true, new_zone_ts_flags));
838
839
0
  ASSERT_OK(cluster_->WaitForTabletServerCount(5, 5s));
840
841
  // Blacklist the original zone3 tserver.
842
0
  ASSERT_OK(cluster_->AddTServerToBlacklist(cluster_->master(), cluster_->tablet_server(2)));
843
844
  // Stop the workload.
845
0
  workload.StopAndJoin();
846
0
  auto rows_inserted = workload.rows_inserted();
847
0
  LOG(INFO) << "Number of rows inserted: " << rows_inserted;
848
849
0
  sleep(5);
850
851
  // Verify that there was no data loss.
852
0
  ClusterVerifier cluster_verifier(cluster_.get());
853
0
  ASSERT_NO_FATALS(cluster_verifier.CheckCluster());
854
0
  ASSERT_NO_FATALS(cluster_verifier.CheckRowCountWithRetries(
855
0
    extra_table, ClusterVerifier::EXACTLY, rows_inserted, 20s));
856
0
}
857
858
0
TEST_F(AdminCliTest, TestClearPlacementPolicy) {
859
  // Start a cluster with 3 tservers.
860
0
  FLAGS_num_tablet_servers = 3;
861
0
  FLAGS_num_replicas = 2;
862
0
  std::vector<std::string> master_flags;
863
0
  master_flags.push_back("--enable_load_balancing=true");
864
0
  std::vector<std::string> ts_flags;
865
0
  ts_flags.push_back("--placement_cloud=c");
866
0
  ts_flags.push_back("--placement_region=r");
867
0
  ts_flags.push_back("--placement_zone=z");
868
0
  BuildAndStart(ts_flags, master_flags);
869
870
  // Create the placement config.
871
0
  ASSERT_OK(CallAdmin("modify_placement_info", "c.r.z", 3, ""));
872
873
  // Ensure that the universe config has placement information.
874
0
  auto output = ASSERT_RESULT(CallAdmin("get_universe_config"));
875
0
  ASSERT_TRUE(output.find("replicationInfo") != std::string::npos);
876
877
  // Clear the placement config.
878
0
  ASSERT_OK(CallAdmin("clear_placement_info"));
879
880
  // Ensure that the placement config is absent.
881
0
  output = ASSERT_RESULT(CallAdmin("get_universe_config"));
882
0
  ASSERT_TRUE(output.find("replicationInfo") == std::string::npos);
883
0
}
884
885
0
TEST_F(AdminCliTest, DdlLog) {
886
0
  const std::string kNamespaceName = "test_namespace";
887
0
  const std::string kTableName = "test_table";
888
0
  BuildAndStart({}, {});
889
890
0
  auto session = ASSERT_RESULT(CqlConnect());
891
0
  ASSERT_OK(session.ExecuteQueryFormat(
892
0
      "CREATE KEYSPACE IF NOT EXISTS $0", kNamespaceName));
893
894
0
  ASSERT_OK(session.ExecuteQueryFormat("USE $0", kNamespaceName));
895
896
0
  ASSERT_OK(session.ExecuteQueryFormat(
897
0
      "CREATE TABLE $0 (key INT PRIMARY KEY, text_column TEXT) "
898
0
      "WITH transactions = { 'enabled' : true }", kTableName));
899
900
0
  ASSERT_OK(session.ExecuteQueryFormat(
901
0
      "CREATE INDEX test_idx ON $0 (text_column)", kTableName));
902
903
0
  ASSERT_OK(session.ExecuteQueryFormat(
904
0
      "ALTER TABLE $0 ADD int_column INT", kTableName));
905
906
0
  ASSERT_OK(session.ExecuteQuery("DROP INDEX test_idx"));
907
908
0
  ASSERT_OK(session.ExecuteQueryFormat("ALTER TABLE $0 DROP text_column", kTableName));
909
910
0
  auto document = ASSERT_RESULT(CallJsonAdmin("ddl_log"));
911
912
0
  auto log = ASSERT_RESULT(Get(document, "log")).get().GetArray();
913
0
  ASSERT_EQ(log.Size(), 3);
914
0
  std::vector<std::string> actions;
915
0
  for (const auto& entry : log) {
916
0
    LOG(INFO) << "Entry: " << common::PrettyWriteRapidJsonToString(entry);
917
0
    TableType type;
918
0
    bool parse_result = TableType_Parse(
919
0
        ASSERT_RESULT(Get(entry, "table_type")).get().GetString(), &type);
920
0
    ASSERT_TRUE(parse_result);
921
0
    ASSERT_EQ(type, TableType::YQL_TABLE_TYPE);
922
0
    auto namespace_name = ASSERT_RESULT(Get(entry, "namespace")).get().GetString();
923
0
    ASSERT_EQ(namespace_name, kNamespaceName);
924
0
    auto table_name = ASSERT_RESULT(Get(entry, "table")).get().GetString();
925
0
    ASSERT_EQ(table_name, kTableName);
926
0
    actions.emplace_back(ASSERT_RESULT(Get(entry, "action")).get().GetString());
927
0
  }
928
0
  ASSERT_EQ(actions[0], "Drop column text_column");
929
0
  ASSERT_EQ(actions[1], "Drop index test_idx");
930
0
  ASSERT_EQ(actions[2], "Add column int_column[int32 NULLABLE NOT A PARTITION KEY]");
931
0
}
932
933
0
TEST_F(AdminCliTest, FlushSysCatalog) {
934
0
  BuildAndStart();
935
0
  string master_address = ToString(cluster_->master()->bound_rpc_addr());
936
0
  auto client = ASSERT_RESULT(YBClientBuilder().add_master_server_addr(master_address).Build());
937
0
  ASSERT_OK(CallAdmin("flush_sys_catalog"));
938
0
}
939
940
0
TEST_F(AdminCliTest, CompactSysCatalog) {
941
0
  BuildAndStart();
942
0
  string master_address = ToString(cluster_->master()->bound_rpc_addr());
943
0
  auto client = ASSERT_RESULT(YBClientBuilder().add_master_server_addr(master_address).Build());
944
0
  ASSERT_OK(CallAdmin("compact_sys_catalog"));
945
0
}
946
947
// A simple smoke test to ensure it working and
948
// nothing is broken by future changes.
949
0
TEST_F(AdminCliTest, SetWalRetentionSecsTest) {
950
0
  constexpr auto WAL_RET_TIME_SEC = 100ul;
951
0
  constexpr auto UNEXPECTED_ARG = 911ul;
952
953
0
  BuildAndStart();
954
0
  const auto master_address = ToString(cluster_->master()->bound_rpc_addr());
955
956
  // Default table that gets created;
957
0
  const auto& table_name = kTableName.table_name();
958
0
  const auto& keyspace = kTableName.namespace_name();
959
960
  // No WAL ret time found
961
0
  {
962
0
    const auto output = ASSERT_RESULT(CallAdmin("get_wal_retention_secs", keyspace, table_name));
963
0
    ASSERT_NE(output.find("not set"), std::string::npos);
964
0
  }
965
966
  // Successfuly set WAL time and verified by the getter
967
0
  {
968
0
    ASSERT_OK(CallAdmin("set_wal_retention_secs", keyspace, table_name, WAL_RET_TIME_SEC));
969
0
    const auto output = ASSERT_RESULT(CallAdmin("get_wal_retention_secs", keyspace, table_name));
970
0
    ASSERT_TRUE(
971
0
        output.find(std::to_string(WAL_RET_TIME_SEC)) != std::string::npos &&
972
0
        output.find(table_name) != std::string::npos);
973
0
  }
974
975
  // Too many args in input
976
0
  {
977
0
    const auto output_setter =
978
0
        CallAdmin("set_wal_retention_secs", keyspace, table_name, WAL_RET_TIME_SEC, UNEXPECTED_ARG);
979
0
    ASSERT_FALSE(output_setter.ok());
980
0
    ASSERT_TRUE(output_setter.status().IsRuntimeError());
981
0
    ASSERT_TRUE(
982
0
        output_setter.status().ToUserMessage().find("Invalid argument") != std::string::npos);
983
984
0
    const auto output_getter =
985
0
        CallAdmin("get_wal_retention_secs", keyspace, table_name, UNEXPECTED_ARG);
986
0
    ASSERT_FALSE(output_getter.ok());
987
0
    ASSERT_TRUE(output_getter.status().IsRuntimeError());
988
0
    ASSERT_TRUE(
989
0
        output_getter.status().ToUserMessage().find("Invalid argument") != std::string::npos);
990
0
  }
991
992
  // Outbounded arg in input
993
0
  {
994
0
    const auto output_setter =
995
0
        CallAdmin("set_wal_retention_secs", keyspace, table_name, -WAL_RET_TIME_SEC);
996
0
    ASSERT_FALSE(output_setter.ok());
997
0
    ASSERT_TRUE(output_setter.status().IsRuntimeError());
998
999
0
    const auto output_getter =
1000
0
        CallAdmin("get_wal_retention_secs", keyspace, table_name + "NoTable");
1001
0
    ASSERT_FALSE(output_getter.ok());
1002
0
    ASSERT_TRUE(output_getter.status().IsRuntimeError());
1003
0
  }
1004
0
}
1005
1006
}  // namespace tools
1007
}  // namespace yb