YugabyteDB (2.13.0.0-b42, bfc6a6643e7399ac8a0e81d06a3ee6d6571b33ab)

Coverage Report

Created: 2022-03-09 17:30

/Users/deen/code/yugabyte-db/src/yb/master/mini_master.cc
Line
Count
Source (jump to first uncovered line)
1
// Licensed to the Apache Software Foundation (ASF) under one
2
// or more contributor license agreements.  See the NOTICE file
3
// distributed with this work for additional information
4
// regarding copyright ownership.  The ASF licenses this file
5
// to you under the Apache License, Version 2.0 (the
6
// "License"); you may not use this file except in compliance
7
// with the License.  You may obtain a copy of the License at
8
//
9
//   http://www.apache.org/licenses/LICENSE-2.0
10
//
11
// Unless required by applicable law or agreed to in writing,
12
// software distributed under the License is distributed on an
13
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
// KIND, either express or implied.  See the License for the
15
// specific language governing permissions and limitations
16
// under the License.
17
//
18
// The following only applies to changes made to this file as part of YugaByte development.
19
//
20
// Portions Copyright (c) YugaByte, Inc.
21
//
22
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
23
// in compliance with the License.  You may obtain a copy of the License at
24
//
25
// http://www.apache.org/licenses/LICENSE-2.0
26
//
27
// Unless required by applicable law or agreed to in writing, software distributed under the License
28
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
29
// or implied.  See the License for the specific language governing permissions and limitations
30
// under the License.
31
//
32
33
#include "yb/master/mini_master.h"
34
35
#include <string>
36
37
#include <glog/logging.h>
38
39
#include "yb/master/catalog_manager.h"
40
#include "yb/master/master.h"
41
42
#include "yb/rpc/messenger.h"
43
44
#include "yb/util/net/net_util.h"
45
#include "yb/util/net/sockaddr.h"
46
#include "yb/util/net/tunnel.h"
47
#include "yb/util/status.h"
48
49
using strings::Substitute;
50
51
DECLARE_bool(TEST_simulate_fs_create_failure);
52
DECLARE_bool(rpc_server_allow_ephemeral_ports);
53
DECLARE_double(leader_failure_max_missed_heartbeat_periods);
54
DECLARE_int32(TEST_nodes_per_cloud);
55
DECLARE_bool(durable_wal_write);
56
57
namespace yb {
58
namespace master {
59
60
MiniMaster::MiniMaster(Env* env, string fs_root, uint16_t rpc_port, uint16_t web_port, int index)
61
    : running_(false),
62
      env_(env),
63
      fs_root_(std::move(fs_root)),
64
      rpc_port_(rpc_port),
65
      web_port_(web_port),
66
518
      index_(index + 1) {}
67
68
81
MiniMaster::~MiniMaster() {
69
81
  if (running_) {
70
0
    LOG(WARNING) << "MiniMaster destructor called without clean shutdown for: "
71
0
                 << bound_rpc_addr_str();
72
0
  }
73
81
}
74
75
55
Status MiniMaster::Start(bool TEST_simulate_fs_create_failure) {
76
55
  CHECK(!running_);
77
55
  FLAGS_rpc_server_allow_ephemeral_ports = true;
78
55
  FLAGS_TEST_simulate_fs_create_failure = TEST_simulate_fs_create_failure;
79
  // Disable WAL fsync for tests
80
55
  FLAGS_durable_wal_write = false;
81
55
  RETURN_NOT_OK(StartOnPorts(rpc_port_, web_port_));
82
53
  return master_->WaitForCatalogManagerInit();
83
55
}
84
85
86
464
Status MiniMaster::StartDistributedMaster(const vector<uint16_t>& peer_ports) {
87
464
  CHECK(!running_);
88
464
  return StartDistributedMasterOnPorts(rpc_port_, web_port_, peer_ports);
89
464
}
90
91
93
void MiniMaster::Shutdown() {
92
93
  if (tunnel_) {
93
87
    tunnel_->Shutdown();
94
87
  }
95
93
  if (running_) {
96
87
    master_->Shutdown();
97
87
  }
98
93
  tunnel_.reset();
99
93
  running_ = false;
100
93
  master_.reset();
101
93
}
102
103
55
Status MiniMaster::StartOnPorts(uint16_t rpc_port, uint16_t web_port) {
104
55
  CHECK(!running_);
105
55
  CHECK(!master_);
106
107
55
  auto master_addresses = std::make_shared<server::MasterAddresses>();
108
55
  if (pass_master_addresses_) {
109
55
    HostPort local_host_port;
110
55
    RETURN_NOT_OK(local_host_port.ParseString(
111
55
        server::TEST_RpcBindEndpoint(index_, rpc_port), rpc_port));
112
55
    master_addresses->push_back({local_host_port});
113
55
  }
114
55
  MasterOptions opts(master_addresses);
115
116
55
  Status start_status = StartOnPorts(rpc_port, web_port, &opts);
117
55
  if (!start_status.ok()) {
118
2
    LOG(ERROR) << "MiniMaster failed to start on RPC port " << rpc_port
119
2
               << ", web port " << web_port << ": " << start_status;
120
    // Don't crash here. Handle the error in the caller (e.g. could retry there).
121
2
  }
122
55
  return start_status;
123
55
}
124
125
Status MiniMaster::StartOnPorts(uint16_t rpc_port, uint16_t web_port,
126
526
                                MasterOptions* opts) {
127
526
  opts->rpc_opts.rpc_bind_addresses = server::TEST_RpcBindEndpoint(index_, rpc_port);
128
526
  opts->webserver_opts.port = web_port;
129
526
  opts->fs_opts.wal_paths = { fs_root_ };
130
526
  opts->fs_opts.data_paths = { fs_root_ };
131
  // A.B.C.D.xip.io resolves to A.B.C.D so it is very useful for testing.
132
526
  opts->broadcast_addresses = {
133
526
      HostPort(server::TEST_RpcAddress(index_, server::Private::kFalse), rpc_port) };
134
135
526
  if (!opts->has_placement_cloud()) {
136
526
    opts->SetPlacement(
137
526
        Format("cloud$0", (index_ + 1) / FLAGS_TEST_nodes_per_cloud),
138
526
        Format("rack$0", index_), "zone");
139
526
  }
140
141
526
  std::unique_ptr<Master> server(new enterprise::Master(*opts));
142
526
  RETURN_NOT_OK(server->Init());
143
144
519
  server::TEST_SetupConnectivity(server->messenger(), index_);
145
146
519
  RETURN_NOT_OK(server->StartAsync());
147
148
519
  master_.swap(server);
149
150
519
  tunnel_ = std::make_unique<Tunnel>(&master_->messenger()->io_service());
151
519
  std::vector<Endpoint> local;
152
519
  RETURN_NOT_OK(opts->broadcast_addresses[0].ResolveAddresses(&local));
153
519
  Endpoint remote = VERIFY_RESULT(ParseEndpoint(opts->rpc_opts.rpc_bind_addresses, 0));
154
519
  RETURN_NOT_OK(tunnel_->Start(local.front(), remote));
155
156
519
  running_ = true;
157
158
519
  return Status::OK();
159
519
}
160
161
Status MiniMaster::StartDistributedMasterOnPorts(uint16_t rpc_port, uint16_t web_port,
162
464
                                                 const vector<uint16_t>& peer_ports) {
163
464
  CHECK(!running_);
164
464
  CHECK(!master_);
165
166
464
  auto peer_addresses = std::make_shared<server::MasterAddresses>();
167
464
  if (pass_master_addresses_) {
168
464
    peer_addresses->resize(peer_ports.size());
169
170
464
    int index = 0;
171
662
    for (uint16_t peer_port : peer_ports) {
172
662
      auto& addresses = (*peer_addresses)[index];
173
662
      ++index;
174
662
      addresses.push_back(VERIFY_RESULT(HostPort::FromString(
175
662
          server::TEST_RpcBindEndpoint(index, peer_port), peer_port)));
176
662
      addresses.push_back(VERIFY_RESULT(HostPort::FromString(
177
662
          server::TEST_RpcAddress(index, server::Private::kFalse), peer_port)));
178
662
    }
179
464
  }
180
464
  MasterOptions opts(peer_addresses);
181
182
464
  return StartOnPorts(rpc_port, web_port, &opts);
183
464
}
184
185
7
Status MiniMaster::Restart() {
186
7
  CHECK(running_);
187
188
7
  auto prev_rpc = bound_rpc_addr();
189
7
  Endpoint prev_http = bound_http_addr();
190
7
  auto master_addresses = master_->opts().GetMasterAddresses();
191
7
  Shutdown();
192
193
7
  MasterOptions opts(master_addresses);
194
7
  RETURN_NOT_OK(StartOnPorts(prev_rpc.port(), prev_http.port(), &opts));
195
7
  CHECK(running_);
196
7
  return WaitForCatalogManagerInit();
197
7
}
198
199
372
Status MiniMaster::WaitForCatalogManagerInit() {
200
372
  RETURN_NOT_OK(master_->catalog_manager()->WaitForWorkerPoolTests());
201
372
  return master_->WaitForCatalogManagerInit();
202
372
}
203
204
0
Status MiniMaster::WaitUntilCatalogManagerIsLeaderAndReadyForTests() {
205
0
  return master_->WaitUntilCatalogManagerIsLeaderAndReadyForTests();
206
0
}
207
208
912
HostPort MiniMaster::bound_rpc_addr() const {
209
912
  CHECK(running_);
210
912
  return HostPort::FromBoundEndpoint(master_->first_rpc_address());
211
912
}
212
213
9
Endpoint MiniMaster::bound_http_addr() const {
214
9
  CHECK(running_);
215
9
  return master_->first_http_address();
216
9
}
217
218
0
std::string MiniMaster::permanent_uuid() const {
219
0
  CHECK(master_);
220
0
  return DCHECK_NOTNULL(master_->fs_manager())->uuid();
221
0
}
222
223
69
std::string MiniMaster::bound_rpc_addr_str() const {
224
69
  return bound_rpc_addr().ToString();
225
69
}
226
227
67
CatalogManagerIf& MiniMaster::catalog_manager() const {
228
67
  return *master_->catalog_manager();
229
67
}
230
231
19
CatalogManager& MiniMaster::catalog_manager_impl() const {
232
19
  return *master_->catalog_manager_impl();
233
19
}
234
235
2
tablet::TabletPeerPtr MiniMaster::tablet_peer() const {
236
2
  return catalog_manager().tablet_peer();
237
2
}
238
239
0
rpc::Messenger& MiniMaster::messenger() const {
240
0
  return *master_->messenger();
241
0
}
242
243
0
master::SysCatalogTable& MiniMaster::sys_catalog() const {
244
0
  return *catalog_manager().sys_catalog();
245
0
}
246
247
6.21k
master::TSManager& MiniMaster::ts_manager() const {
248
6.21k
  return *master_->ts_manager();
249
6.21k
}
250
251
0
master::FlushManager& MiniMaster::flush_manager() const {
252
0
  return *master_->flush_manager();
253
0
}
254
255
0
FsManager& MiniMaster::fs_manager() const {
256
0
  return *master_->fs_manager();
257
0
}
258
259
} // namespace master
260
} // namespace yb