/Users/deen/code/yugabyte-db/src/yb/tools/yb-admin_client.cc
Line | Count | Source (jump to first uncovered line) |
1 | | // Licensed to the Apache Software Foundation (ASF) under one |
2 | | // or more contributor license agreements. See the NOTICE file |
3 | | // distributed with this work for additional information |
4 | | // regarding copyright ownership. The ASF licenses this file |
5 | | // to you under the Apache License, Version 2.0 (the |
6 | | // "License"); you may not use this file except in compliance |
7 | | // with the License. You may obtain a copy of the License at |
8 | | // |
9 | | // http://www.apache.org/licenses/LICENSE-2.0 |
10 | | // |
11 | | // Unless required by applicable law or agreed to in writing, |
12 | | // software distributed under the License is distributed on an |
13 | | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
14 | | // KIND, either express or implied. See the License for the |
15 | | // specific language governing permissions and limitations |
16 | | // under the License. |
17 | | // |
18 | | // The following only applies to changes made to this file as part of YugaByte development. |
19 | | // |
20 | | // Portions Copyright (c) YugaByte, Inc. |
21 | | // |
22 | | // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except |
23 | | // in compliance with the License. You may obtain a copy of the License at |
24 | | // |
25 | | // http://www.apache.org/licenses/LICENSE-2.0 |
26 | | // |
27 | | // Unless required by applicable law or agreed to in writing, software distributed under the License |
28 | | // is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express |
29 | | // or implied. See the License for the specific language governing permissions and limitations |
30 | | // under the License. |
31 | | // |
32 | | |
33 | | #include "yb/tools/yb-admin_client.h" |
34 | | |
35 | | #include <sstream> |
36 | | #include <type_traits> |
37 | | |
38 | | #include <boost/multi_index/composite_key.hpp> |
39 | | #include <boost/multi_index/global_fun.hpp> |
40 | | #include <boost/multi_index/ordered_index.hpp> |
41 | | #include <boost/multi_index_container.hpp> |
42 | | #include <boost/tti/has_member_function.hpp> |
43 | | #include <google/protobuf/util/json_util.h> |
44 | | #include <gtest/gtest.h> |
45 | | |
46 | | #include "yb/client/client.h" |
47 | | #include "yb/client/table.h" |
48 | | #include "yb/client/table_creator.h" |
49 | | #include "yb/client/table_alterer.h" |
50 | | #include "yb/client/table_info.h" |
51 | | |
52 | | #include "yb/common/json_util.h" |
53 | | #include "yb/common/redis_constants_common.h" |
54 | | #include "yb/common/transaction.h" |
55 | | #include "yb/common/wire_protocol.h" |
56 | | |
57 | | #include "yb/consensus/consensus.proxy.h" |
58 | | |
59 | | #include "yb/gutil/strings/join.h" |
60 | | #include "yb/gutil/strings/numbers.h" |
61 | | #include "yb/gutil/strings/split.h" |
62 | | |
63 | | #include "yb/master/master_admin.proxy.h" |
64 | | #include "yb/master/master_backup.proxy.h" |
65 | | #include "yb/master/master_client.proxy.h" |
66 | | #include "yb/master/master_cluster.proxy.h" |
67 | | #include "yb/master/master_ddl.proxy.h" |
68 | | #include "yb/master/master_encryption.proxy.h" |
69 | | #include "yb/master/master_replication.proxy.h" |
70 | | #include "yb/master/master_defaults.h" |
71 | | #include "yb/master/sys_catalog.h" |
72 | | |
73 | | #include "yb/rpc/messenger.h" |
74 | | #include "yb/rpc/proxy.h" |
75 | | |
76 | | #include "yb/tserver/tserver_admin.proxy.h" |
77 | | #include "yb/tserver/tserver_service.proxy.h" |
78 | | |
79 | | #include "yb/util/format.h" |
80 | | #include "yb/util/net/net_util.h" |
81 | | #include "yb/util/protobuf_util.h" |
82 | | #include "yb/util/random_util.h" |
83 | | #include "yb/util/status_format.h" |
84 | | #include "yb/util/stol_utils.h" |
85 | | #include "yb/util/string_case.h" |
86 | | #include "yb/util/string_util.h" |
87 | | |
88 | | DEFINE_bool(wait_if_no_leader_master, false, |
89 | | "When yb-admin connects to the cluster and no leader master is present, " |
90 | | "this flag determines if yb-admin should wait for the entire duration of timeout or" |
91 | | "in case a leader master appears in that duration or return error immediately."); |
92 | | |
93 | | DEFINE_string(certs_dir_name, "", |
94 | | "Directory with certificates to use for secure server connection."); |
95 | | |
96 | | DEFINE_string(client_node_name, "", "Client node name."); |
97 | | |
98 | | DEFINE_bool( |
99 | | disable_graceful_transition, false, |
100 | | "During a leader stepdown, disable graceful leadership transfer " |
101 | | "to an up to date peer"); |
102 | | |
103 | | // Maximum number of elements to dump on unexpected errors. |
104 | | static constexpr int MAX_NUM_ELEMENTS_TO_SHOW_ON_ERROR = 10; |
105 | | |
106 | | PB_ENUM_FORMATTERS(yb::PeerRole); |
107 | | PB_ENUM_FORMATTERS(yb::AppStatusPB::ErrorCode); |
108 | | PB_ENUM_FORMATTERS(yb::tablet::RaftGroupStatePB); |
109 | | |
110 | | namespace yb { |
111 | | namespace tools { |
112 | | |
113 | | using namespace std::literals; |
114 | | |
115 | | using std::cout; |
116 | | using std::endl; |
117 | | |
118 | | using google::protobuf::RepeatedPtrField; |
119 | | using google::protobuf::util::MessageToJsonString; |
120 | | |
121 | | using client::YBClientBuilder; |
122 | | using client::YBTableName; |
123 | | using rpc::MessengerBuilder; |
124 | | using rpc::RpcController; |
125 | | using strings::Substitute; |
126 | | using tserver::TabletServerServiceProxy; |
127 | | using tserver::TabletServerAdminServiceProxy; |
128 | | using tserver::UpgradeYsqlRequestPB; |
129 | | using tserver::UpgradeYsqlResponsePB; |
130 | | |
131 | | using consensus::ConsensusServiceProxy; |
132 | | using consensus::LeaderStepDownRequestPB; |
133 | | using consensus::LeaderStepDownResponsePB; |
134 | | using consensus::RaftPeerPB; |
135 | | using consensus::RunLeaderElectionRequestPB; |
136 | | using consensus::RunLeaderElectionResponsePB; |
137 | | |
138 | | using master::ListMastersRequestPB; |
139 | | using master::ListMastersResponsePB; |
140 | | using master::ListMasterRaftPeersRequestPB; |
141 | | using master::ListMasterRaftPeersResponsePB; |
142 | | using master::ListTabletServersRequestPB; |
143 | | using master::ListTabletServersResponsePB; |
144 | | using master::ListLiveTabletServersRequestPB; |
145 | | using master::ListLiveTabletServersResponsePB; |
146 | | using master::TabletLocationsPB; |
147 | | using master::TSInfoPB; |
148 | | |
149 | | namespace { |
150 | | |
151 | | static constexpr const char* kRpcHostPortHeading = "RPC Host/Port"; |
152 | | static constexpr const char* kDBTypePrefixUnknown = "unknown"; |
153 | | static constexpr const char* kDBTypePrefixCql = "ycql"; |
154 | | static constexpr const char* kDBTypePrefixYsql = "ysql"; |
155 | | static constexpr const char* kDBTypePrefixRedis = "yedis"; |
156 | | static constexpr const char* kTableIDPrefix = "tableid"; |
157 | | |
158 | | string FormatFirstHostPort( |
159 | 0 | const RepeatedPtrField<HostPortPB>& rpc_addresses) { |
160 | 0 | if (rpc_addresses.empty()) { |
161 | 0 | return "N/A"; |
162 | 0 | } else { |
163 | 0 | return HostPortPBToString(rpc_addresses.Get(0)); |
164 | 0 | } |
165 | 0 | } |
166 | | |
167 | 0 | string FormatDouble(double d, int precision = 2) { |
168 | 0 | std::ostringstream op_stream; |
169 | 0 | op_stream << std::fixed << std::setprecision(precision); |
170 | 0 | op_stream << d; |
171 | 0 | return op_stream.str(); |
172 | 0 | } |
173 | | |
174 | | const int kPartitionRangeColWidth = 56; |
175 | | const int kHostPortColWidth = 20; |
176 | | const int kTableNameColWidth = 48; |
177 | | const int kNumCharactersInUuid = 32; |
178 | | const int kLongColWidth = 15; |
179 | | const int kSmallColWidth = 8; |
180 | | const int kSleepTimeSec = 1; |
181 | | const int kNumberOfTryouts = 30; |
182 | | |
183 | | BOOST_TTI_HAS_MEMBER_FUNCTION(has_error) |
184 | | template<typename T> |
185 | | constexpr bool HasMemberFunctionHasError = has_member_function_has_error<const T, bool>::value; |
186 | | |
187 | | template<class Response> |
188 | | Result<Response> ResponseResult(Response&& response, |
189 | 48 | typename std::enable_if<HasMemberFunctionHasError<Response>, void*>::type = nullptr) { |
190 | | // Response has has_error method, use status from it |
191 | 48 | if(response.has_error()) { |
192 | 0 | return StatusFromPB(response.error().status()); |
193 | 0 | } |
194 | 48 | return std::move(response); |
195 | 48 | } Unexecuted instantiation: yb-admin_client.cc:_ZN2yb5tools12_GLOBAL__N_114ResponseResultINS_6master21ListMastersResponsePBEEENS_6ResultIT_EEOS6_NSt3__19enable_ifIX25HasMemberFunctionHasErrorIS6_EEPvE4typeE Unexecuted instantiation: yb-admin_client.cc:_ZN2yb5tools12_GLOBAL__N_114ResponseResultINS_9consensus27RunLeaderElectionResponsePBEEENS_6ResultIT_EEOS6_NSt3__19enable_ifIX25HasMemberFunctionHasErrorIS6_EEPvE4typeE Unexecuted instantiation: yb-admin_client.cc:_ZN2yb5tools12_GLOBAL__N_114ResponseResultINS_9consensus22ChangeConfigResponsePBEEENS_6ResultIT_EEOS6_NSt3__19enable_ifIX25HasMemberFunctionHasErrorIS6_EEPvE4typeE Unexecuted instantiation: yb-admin_client.cc:_ZN2yb5tools12_GLOBAL__N_114ResponseResultINS_6master25DumpMasterStateResponsePBEEENS_6ResultIT_EEOS6_NSt3__19enable_ifIX25HasMemberFunctionHasErrorIS6_EEPvE4typeE Unexecuted instantiation: yb-admin_client.cc:_ZN2yb5tools12_GLOBAL__N_114ResponseResultINS_6master28GetLoadMovePercentResponsePBEEENS_6ResultIT_EEOS6_NSt3__19enable_ifIX25HasMemberFunctionHasErrorIS6_EEPvE4typeE yb-admin_client.cc:_ZN2yb5tools12_GLOBAL__N_114ResponseResultINS_6master28GetTabletLocationsResponsePBEEENS_6ResultIT_EEOS6_NSt3__19enable_ifIX25HasMemberFunctionHasErrorIS6_EEPvE4typeE Line | Count | Source | 189 | 15 | typename std::enable_if<HasMemberFunctionHasError<Response>, void*>::type = nullptr) { | 190 | | // Response has has_error method, use status from it | 191 | 15 | if(response.has_error()) { | 192 | 0 | return StatusFromPB(response.error().status()); | 193 | 0 | } | 194 | 15 | return std::move(response); | 195 | 15 | } |
Unexecuted instantiation: yb-admin_client.cc:_ZN2yb5tools12_GLOBAL__N_114ResponseResultINS_6master27ListTabletServersResponsePBEEENS_6ResultIT_EEOS6_NSt3__19enable_ifIX25HasMemberFunctionHasErrorIS6_EEPvE4typeE yb-admin_client.cc:_ZN2yb5tools12_GLOBAL__N_114ResponseResultINS_6master37LaunchBackfillIndexForTableResponsePBEEENS_6ResultIT_EEOS6_NSt3__19enable_ifIX25HasMemberFunctionHasErrorIS6_EEPvE4typeE Line | Count | Source | 189 | 1 | typename std::enable_if<HasMemberFunctionHasError<Response>, void*>::type = nullptr) { | 190 | | // Response has has_error method, use status from it | 191 | 1 | if(response.has_error()) { | 192 | 0 | return StatusFromPB(response.error().status()); | 193 | 0 | } | 194 | 1 | return std::move(response); | 195 | 1 | } |
Unexecuted instantiation: yb-admin_client.cc:_ZN2yb5tools12_GLOBAL__N_114ResponseResultINS_6master33ChangeLoadBalancerStateResponsePBEEENS_6ResultIT_EEOS6_NSt3__19enable_ifIX25HasMemberFunctionHasErrorIS6_EEPvE4typeE Unexecuted instantiation: yb-admin_client.cc:_ZN2yb5tools12_GLOBAL__N_114ResponseResultINS_6master30GetLoadBalancerStateResponsePBEEENS_6ResultIT_EEOS6_NSt3__19enable_ifIX25HasMemberFunctionHasErrorIS6_EEPvE4typeE Unexecuted instantiation: yb-admin_client.cc:_ZN2yb5tools12_GLOBAL__N_114ResponseResultINS_6master25FlushSysCatalogResponsePBEEENS_6ResultIT_EEOS6_NSt3__19enable_ifIX25HasMemberFunctionHasErrorIS6_EEPvE4typeE Unexecuted instantiation: yb-admin_client.cc:_ZN2yb5tools12_GLOBAL__N_114ResponseResultINS_6master27CompactSysCatalogResponsePBEEENS_6ResultIT_EEOS6_NSt3__19enable_ifIX25HasMemberFunctionHasErrorIS6_EEPvE4typeE yb-admin_client.cc:_ZN2yb5tools12_GLOBAL__N_114ResponseResultINS_6master35ChangeMasterClusterConfigResponsePBEEENS_6ResultIT_EEOS6_NSt3__19enable_ifIX25HasMemberFunctionHasErrorIS6_EEPvE4typeE Line | Count | Source | 189 | 16 | typename std::enable_if<HasMemberFunctionHasError<Response>, void*>::type = nullptr) { | 190 | | // Response has has_error method, use status from it | 191 | 16 | if(response.has_error()) { | 192 | 0 | return StatusFromPB(response.error().status()); | 193 | 0 | } | 194 | 16 | return std::move(response); | 195 | 16 | } |
Unexecuted instantiation: yb-admin_client.cc:_ZN2yb5tools12_GLOBAL__N_114ResponseResultINS_6master22IsInitDbDoneResponsePBEEENS_6ResultIT_EEOS6_NSt3__19enable_ifIX25HasMemberFunctionHasErrorIS6_EEPvE4typeE Unexecuted instantiation: yb-admin_client.cc:_ZN2yb5tools12_GLOBAL__N_114ResponseResultINS_7tserver21UpgradeYsqlResponsePBEEENS_6ResultIT_EEOS6_NSt3__19enable_ifIX25HasMemberFunctionHasErrorIS6_EEPvE4typeE yb-admin_client.cc:_ZN2yb5tools12_GLOBAL__N_114ResponseResultINS_6master32GetMasterClusterConfigResponsePBEEENS_6ResultIT_EEOS6_NSt3__19enable_ifIX25HasMemberFunctionHasErrorIS6_EEPvE4typeE Line | Count | Source | 189 | 16 | typename std::enable_if<HasMemberFunctionHasError<Response>, void*>::type = nullptr) { | 190 | | // Response has has_error method, use status from it | 191 | 16 | if(response.has_error()) { | 192 | 0 | return StatusFromPB(response.error().status()); | 193 | 0 | } | 194 | 16 | return std::move(response); | 195 | 16 | } |
Unexecuted instantiation: yb-admin_client.cc:_ZN2yb5tools12_GLOBAL__N_114ResponseResultINS_6master21SplitTabletResponsePBEEENS_6ResultIT_EEOS6_NSt3__19enable_ifIX25HasMemberFunctionHasErrorIS6_EEPvE4typeE |
196 | | |
197 | | template<class Response> |
198 | | Result<Response> ResponseResult(Response&& response, |
199 | 0 | typename std::enable_if<!HasMemberFunctionHasError<Response>, void*>::type = nullptr) { |
200 | | // Response has no has_error method, nothing to check |
201 | 0 | return std::move(response); |
202 | 0 | } Unexecuted instantiation: yb-admin_client.cc:_ZN2yb5tools12_GLOBAL__N_114ResponseResultINS_7tserver24GetLogLocationResponsePBEEENS_6ResultIT_EEOS6_NSt3__19enable_ifIXnt25HasMemberFunctionHasErrorIS6_EEPvE4typeE Unexecuted instantiation: yb-admin_client.cc:_ZN2yb5tools12_GLOBAL__N_114ResponseResultINS_7tserver36ListTabletsForTabletServerResponsePBEEENS_6ResultIT_EEOS6_NSt3__19enable_ifIXnt25HasMemberFunctionHasErrorIS6_EEPvE4typeE |
203 | | |
204 | 0 | const char* DatabasePrefix(YQLDatabase db) { |
205 | 0 | switch(db) { |
206 | 0 | case YQL_DATABASE_UNKNOWN: break; |
207 | 0 | case YQL_DATABASE_CQL: return kDBTypePrefixCql; |
208 | 0 | case YQL_DATABASE_PGSQL: return kDBTypePrefixYsql; |
209 | 0 | case YQL_DATABASE_REDIS: return kDBTypePrefixRedis; |
210 | 0 | } |
211 | 0 | CHECK(false) << "Unexpected db type " << db; |
212 | 0 | return kDBTypePrefixUnknown; |
213 | 0 | } |
214 | | |
215 | | Result<TypedNamespaceName> ResolveNamespaceName( |
216 | | const Slice& prefix, |
217 | | const Slice& name, |
218 | 1 | const YQLDatabase default_if_no_prefix = YQL_DATABASE_CQL) { |
219 | 1 | auto db_type = YQL_DATABASE_UNKNOWN; |
220 | 1 | if (!prefix.empty()) { |
221 | 1 | static const std::array<pair<const char*, YQLDatabase>, 3> type_prefixes{ |
222 | 1 | make_pair(kDBTypePrefixCql, YQL_DATABASE_CQL), |
223 | 1 | make_pair(kDBTypePrefixYsql, YQL_DATABASE_PGSQL), |
224 | 1 | make_pair(kDBTypePrefixRedis, YQL_DATABASE_REDIS)}; |
225 | 2 | for (const auto& p : type_prefixes) { |
226 | 2 | if (prefix == p.first) { |
227 | 1 | db_type = p.second; |
228 | 1 | break; |
229 | 1 | } |
230 | 2 | } |
231 | | |
232 | 1 | if (db_type == YQL_DATABASE_UNKNOWN) { |
233 | 0 | return STATUS_FORMAT(InvalidArgument, "Invalid db type name '$0'", prefix); |
234 | 0 | } |
235 | 0 | } else { |
236 | 0 | db_type = (name == common::kRedisKeyspaceName ? YQL_DATABASE_REDIS : default_if_no_prefix); |
237 | 0 | } |
238 | 1 | return TypedNamespaceName{.db_type = db_type, .name = name.cdata()}; |
239 | 1 | } |
240 | | |
241 | 0 | Slice GetTableIdAsSlice(const YBTableName& table_name) { |
242 | 0 | return table_name.table_id(); |
243 | 0 | } |
244 | | |
245 | 0 | Slice GetNamespaceIdAsSlice(const YBTableName& table_name) { |
246 | 0 | return table_name.namespace_id(); |
247 | 0 | } |
248 | | |
249 | 0 | Slice GetTableNameAsSlice(const YBTableName& table_name) { |
250 | 0 | return table_name.table_name(); |
251 | 0 | } |
252 | | |
253 | 0 | std::string FullNamespaceName(const master::NamespaceIdentifierPB& ns) { |
254 | 0 | return Format("$0.$1", DatabasePrefix(ns.database_type()), ns.name()); |
255 | 0 | } |
256 | | |
257 | | struct NamespaceKey { |
258 | | explicit NamespaceKey(const master::NamespaceIdentifierPB& ns) |
259 | 0 | : db_type(ns.database_type()), name(ns.name()) { |
260 | 0 | } |
261 | | |
262 | | NamespaceKey(YQLDatabase d, const Slice& n) |
263 | 0 | : db_type(d), name(n) { |
264 | 0 | } |
265 | | |
266 | | YQLDatabase db_type; |
267 | | Slice name; |
268 | | }; |
269 | | |
270 | | struct NamespaceComparator { |
271 | | using is_transparent = void; |
272 | | |
273 | | bool operator()(const master::NamespaceIdentifierPB& lhs, |
274 | 0 | const master::NamespaceIdentifierPB& rhs) const { |
275 | 0 | return (*this)(NamespaceKey(lhs), NamespaceKey(rhs)); |
276 | 0 | } |
277 | | |
278 | 0 | bool operator()(const master::NamespaceIdentifierPB& lhs, const NamespaceKey& rhs) const { |
279 | 0 | return (*this)(NamespaceKey(lhs), rhs); |
280 | 0 | } |
281 | | |
282 | 0 | bool operator()(const NamespaceKey& lhs, const master::NamespaceIdentifierPB& rhs) const { |
283 | 0 | return (*this)(lhs, NamespaceKey(rhs)); |
284 | 0 | } |
285 | | |
286 | 0 | bool operator()(const NamespaceKey& lhs, const NamespaceKey& rhs) const { |
287 | 0 | return lhs.db_type < rhs.db_type || |
288 | 0 | (lhs.db_type == rhs.db_type && lhs.name.compare(rhs.name) < 0); |
289 | 0 | } |
290 | | }; |
291 | | |
292 | | struct DotStringParts { |
293 | | Slice prefix; |
294 | | Slice value; |
295 | | }; |
296 | | |
297 | 1 | DotStringParts SplitByDot(const std::string& str) { |
298 | 1 | const size_t dot_pos = str.find('.'); |
299 | 1 | DotStringParts result{.prefix = Slice(), .value = str}; |
300 | 1 | if (dot_pos != string::npos) { |
301 | 1 | result.prefix = Slice(str.data(), dot_pos); |
302 | 1 | result.value.remove_prefix(dot_pos + 1); |
303 | 1 | } |
304 | 1 | return result; |
305 | 1 | } |
306 | | |
307 | | } // anonymous namespace |
308 | | |
309 | | class TableNameResolver::Impl { |
310 | | public: |
311 | | struct TableIdTag; |
312 | | struct TableNameTag; |
313 | | using Values = std::vector<client::YBTableName>; |
314 | | |
315 | | Impl(std::vector<YBTableName> tables, vector<master::NamespaceIdentifierPB> namespaces) |
316 | 0 | : current_namespace_(nullptr) { |
317 | 0 | std::move(tables.begin(), tables.end(), std::inserter(tables_, tables_.end())); |
318 | 0 | std::move(namespaces.begin(), namespaces.end(), std::inserter(namespaces_, namespaces_.end())); |
319 | 0 | } |
320 | | |
321 | 0 | Result<bool> Feed(const std::string& str) { |
322 | 0 | const auto result = FeedImpl(str); |
323 | 0 | if (!result.ok()) { |
324 | 0 | current_namespace_ = nullptr; |
325 | 0 | } |
326 | 0 | return result; |
327 | 0 | } |
328 | | |
329 | 0 | Values& values() { |
330 | 0 | return values_; |
331 | 0 | } |
332 | | |
333 | 0 | master::NamespaceIdentifierPB last_namespace() { |
334 | 0 | if (!current_namespace_) { |
335 | 0 | return master::NamespaceIdentifierPB(); |
336 | 0 | } |
337 | 0 | return *current_namespace_; |
338 | 0 | } |
339 | | |
340 | | private: |
341 | 0 | Result<bool> FeedImpl(const std::string& str) { |
342 | 0 | auto parts = SplitByDot(str); |
343 | 0 | if (parts.prefix == kTableIDPrefix) { |
344 | 0 | RETURN_NOT_OK(ProcessTableId(parts.value)); |
345 | 0 | return true; |
346 | 0 | } else { |
347 | 0 | if (!current_namespace_) { |
348 | 0 | RETURN_NOT_OK(ProcessNamespace(parts.prefix, parts.value)); |
349 | 0 | } else { |
350 | 0 | if (parts.prefix.empty()) { |
351 | 0 | RETURN_NOT_OK(ProcessTableName(parts.value)); |
352 | 0 | return true; |
353 | 0 | } |
354 | 0 | return STATUS(InvalidArgument, "Wrong table name " + str); |
355 | 0 | } |
356 | 0 | } |
357 | 0 | return false; |
358 | 0 | } |
359 | | |
360 | 0 | CHECKED_STATUS ProcessNamespace(const Slice& prefix, const Slice& value) { |
361 | 0 | DCHECK(!current_namespace_); |
362 | 0 | const auto ns = VERIFY_RESULT(ResolveNamespaceName(prefix, value)); |
363 | 0 | const auto i = namespaces_.find(NamespaceKey(ns.db_type, ns.name)); |
364 | 0 | if (i != namespaces_.end()) { |
365 | 0 | current_namespace_ = &*i; |
366 | 0 | return Status::OK(); |
367 | 0 | } |
368 | 0 | return STATUS_FORMAT( |
369 | 0 | InvalidArgument, "Namespace '$0' of type '$1' not found", |
370 | 0 | ns.name, DatabasePrefix(ns.db_type)); |
371 | 0 | } |
372 | | |
373 | 0 | CHECKED_STATUS ProcessTableId(const Slice& table_id) { |
374 | 0 | const auto& idx = tables_.get<TableIdTag>(); |
375 | 0 | const auto i = idx.find(table_id); |
376 | 0 | if (i == idx.end()) { |
377 | 0 | return STATUS_FORMAT(InvalidArgument, "Table with id '$0' not found", table_id); |
378 | 0 | } |
379 | 0 | if (current_namespace_ && current_namespace_->id() != i->namespace_id()) { |
380 | 0 | return STATUS_FORMAT( |
381 | 0 | InvalidArgument, "Table with id '$0' belongs to different namespace '$1'", |
382 | 0 | table_id, FullNamespaceName(*current_namespace_)); |
383 | 0 | } |
384 | 0 | AppendTable(*i); |
385 | 0 | return Status::OK(); |
386 | 0 | } |
387 | | |
388 | 0 | CHECKED_STATUS ProcessTableName(const Slice& table_name) { |
389 | 0 | DCHECK(current_namespace_); |
390 | 0 | const auto& idx = tables_.get<TableNameTag>(); |
391 | 0 | const auto key = boost::make_tuple(Slice(current_namespace_->id()), table_name); |
392 | | // For some reason idx.equal_range(key) failed to compile. |
393 | 0 | const auto range = std::make_pair(idx.lower_bound(key), idx.upper_bound(key)); |
394 | 0 | switch (std::distance(range.first, range.second)) { |
395 | 0 | case 0: |
396 | 0 | return STATUS_FORMAT( |
397 | 0 | InvalidArgument, "Table with name '$0' not found in namespace '$1'", |
398 | 0 | table_name, FullNamespaceName(*current_namespace_)); |
399 | 0 | case 1: |
400 | 0 | AppendTable(*range.first); |
401 | 0 | return Status::OK(); |
402 | 0 | default: |
403 | 0 | return STATUS_FORMAT( |
404 | 0 | InvalidArgument, |
405 | 0 | "Namespace '$0' has multiple tables named '$1', specify table id instead", |
406 | 0 | FullNamespaceName(*current_namespace_), table_name); |
407 | 0 | } |
408 | 0 | } |
409 | | |
410 | 0 | void AppendTable(const YBTableName& table) { |
411 | 0 | current_namespace_ = nullptr; |
412 | 0 | values_.push_back(table); |
413 | 0 | } |
414 | | |
415 | | using TableContainer = boost::multi_index_container<YBTableName, |
416 | | boost::multi_index::indexed_by< |
417 | | boost::multi_index::ordered_unique< |
418 | | boost::multi_index::tag<TableIdTag>, |
419 | | boost::multi_index::global_fun<const YBTableName&, Slice, &GetTableIdAsSlice>, |
420 | | Slice::Comparator |
421 | | >, |
422 | | boost::multi_index::ordered_non_unique< |
423 | | boost::multi_index::tag<TableNameTag>, |
424 | | boost::multi_index::composite_key< |
425 | | YBTableName, |
426 | | boost::multi_index::global_fun<const YBTableName&, Slice, &GetNamespaceIdAsSlice>, |
427 | | boost::multi_index::global_fun<const YBTableName&, Slice, &GetTableNameAsSlice> |
428 | | >, |
429 | | boost::multi_index::composite_key_compare< |
430 | | Slice::Comparator, |
431 | | Slice::Comparator |
432 | | > |
433 | | > |
434 | | > |
435 | | >; |
436 | | |
437 | | TableContainer tables_; |
438 | | std::set<master::NamespaceIdentifierPB, NamespaceComparator> namespaces_; |
439 | | const master::NamespaceIdentifierPB* current_namespace_; |
440 | | Values values_; |
441 | | }; |
442 | | |
443 | | TableNameResolver::TableNameResolver(std::vector<client::YBTableName> tables, |
444 | | std::vector<master::NamespaceIdentifierPB> namespaces) |
445 | 0 | : impl_(new Impl(std::move(tables), std::move(namespaces))) { |
446 | 0 | } |
447 | | |
448 | 0 | TableNameResolver::TableNameResolver(TableNameResolver&&) = default; |
449 | | |
450 | 0 | TableNameResolver::~TableNameResolver() = default; |
451 | | |
452 | 0 | Result<bool> TableNameResolver::Feed(const std::string& value) { |
453 | 0 | return impl_->Feed(value); |
454 | 0 | } |
455 | | |
456 | 0 | std::vector<client::YBTableName>& TableNameResolver::values() { |
457 | 0 | return impl_->values(); |
458 | 0 | } |
459 | | |
460 | 0 | master::NamespaceIdentifierPB TableNameResolver::last_namespace() { |
461 | 0 | return impl_->last_namespace(); |
462 | 0 | } |
463 | | |
464 | | ClusterAdminClient::ClusterAdminClient(string addrs, MonoDelta timeout) |
465 | | : master_addr_list_(std::move(addrs)), |
466 | | timeout_(timeout), |
467 | 27 | initted_(false) {} |
468 | | |
469 | | ClusterAdminClient::ClusterAdminClient(const HostPort& init_master_addr, MonoDelta timeout) |
470 | | : init_master_addr_(init_master_addr), |
471 | | timeout_(timeout), |
472 | 0 | initted_(false) {} |
473 | | |
474 | 22 | ClusterAdminClient::~ClusterAdminClient() { |
475 | 22 | if (messenger_) { |
476 | 22 | messenger_->Shutdown(); |
477 | 22 | } |
478 | 22 | } |
479 | | |
480 | | Status ClusterAdminClient::DiscoverAllMasters( |
481 | | const HostPort& init_master_addr, |
482 | 0 | std::string* all_master_addrs) { |
483 | |
|
484 | 0 | master::MasterClusterProxy proxy(proxy_cache_.get(), init_master_addr); |
485 | |
|
486 | 0 | VLOG(0) << "Initializing master leader list from single master at " |
487 | 0 | << init_master_addr.ToString(); |
488 | 0 | const auto list_resp = VERIFY_RESULT(InvokeRpc( |
489 | 0 | &master::MasterClusterProxy::ListMasters, proxy, ListMastersRequestPB())); |
490 | 0 | if (list_resp.masters().empty()) { |
491 | 0 | return STATUS(NotFound, "no masters found"); |
492 | 0 | } |
493 | | |
494 | 0 | std::vector<std::string> addrs; |
495 | 0 | for (const auto& master : list_resp.masters()) { |
496 | 0 | if (!master.has_registration()) { |
497 | 0 | LOG(WARNING) << master.instance_id().permanent_uuid() << " has no registration."; |
498 | 0 | continue; |
499 | 0 | } |
500 | | |
501 | 0 | if (master.registration().broadcast_addresses_size() > 0) { |
502 | 0 | addrs.push_back(FormatFirstHostPort(master.registration().broadcast_addresses())); |
503 | 0 | } else if (master.registration().private_rpc_addresses_size() > 0) { |
504 | 0 | addrs.push_back(FormatFirstHostPort(master.registration().private_rpc_addresses())); |
505 | 0 | } else { |
506 | 0 | LOG(WARNING) << master.instance_id().permanent_uuid() << " has no rpc/broadcast address."; |
507 | 0 | continue; |
508 | 0 | } |
509 | 0 | } |
510 | |
|
511 | 0 | if (addrs.empty()) { |
512 | 0 | return STATUS(NotFound, "no masters found"); |
513 | 0 | } |
514 | | |
515 | 0 | JoinStrings(addrs, ",", all_master_addrs); |
516 | 0 | VLOG(0) << "Discovered full master list: " << *all_master_addrs; |
517 | 0 | return Status::OK(); |
518 | 0 | } |
519 | | |
520 | 27 | Status ClusterAdminClient::Init() { |
521 | 27 | CHECK(!initted_); |
522 | | |
523 | | // Check if caller will initialize the client and related parts. |
524 | 27 | rpc::MessengerBuilder messenger_builder("yb-admin"); |
525 | 27 | if (!FLAGS_certs_dir_name.empty()) { |
526 | 0 | LOG(INFO) << "Built secure client using certs dir " << FLAGS_certs_dir_name; |
527 | 0 | const auto& cert_name = FLAGS_client_node_name; |
528 | 0 | secure_context_ = VERIFY_RESULT(server::CreateSecureContext( |
529 | 0 | FLAGS_certs_dir_name, server::UseClientCerts(!cert_name.empty()), cert_name)); |
530 | 0 | server::ApplySecureContext(secure_context_.get(), &messenger_builder); |
531 | 0 | } |
532 | | |
533 | 27 | messenger_ = VERIFY_RESULT(messenger_builder.Build()); |
534 | 27 | proxy_cache_ = std::make_unique<rpc::ProxyCache>(messenger_.get()); |
535 | | |
536 | 27 | if (!init_master_addr_.host().empty()) { |
537 | 0 | RETURN_NOT_OK(DiscoverAllMasters(init_master_addr_, &master_addr_list_)); |
538 | 0 | } |
539 | | |
540 | 27 | yb_client_ = VERIFY_RESULT(YBClientBuilder() |
541 | 27 | .add_master_server_addr(master_addr_list_) |
542 | 27 | .default_admin_operation_timeout(timeout_) |
543 | 27 | .wait_for_leader_election_on_init(FLAGS_wait_if_no_leader_master) |
544 | 27 | .Build(messenger_.get())); |
545 | | |
546 | 27 | ResetMasterProxy(); |
547 | | |
548 | 27 | initted_ = true; |
549 | 27 | return Status::OK(); |
550 | 27 | } |
551 | | |
552 | 27 | void ClusterAdminClient::ResetMasterProxy(const HostPort& leader_addr) { |
553 | | // Find the leader master's socket info to set up the master proxy. |
554 | 27 | if (leader_addr.host().empty()) { |
555 | 27 | leader_addr_ = yb_client_->GetMasterLeaderAddress(); |
556 | 0 | } else { |
557 | 0 | leader_addr_ = leader_addr; |
558 | 0 | } |
559 | | |
560 | 27 | master_admin_proxy_ = std::make_unique<master::MasterAdminProxy>( |
561 | 27 | proxy_cache_.get(), leader_addr_); |
562 | | |
563 | 27 | master_backup_proxy_ = std::make_unique<master::MasterBackupProxy>( |
564 | 27 | proxy_cache_.get(), leader_addr_); |
565 | | |
566 | 27 | master_client_proxy_ = std::make_unique<master::MasterClientProxy>( |
567 | 27 | proxy_cache_.get(), leader_addr_); |
568 | | |
569 | 27 | master_cluster_proxy_ = std::make_unique<master::MasterClusterProxy>( |
570 | 27 | proxy_cache_.get(), leader_addr_); |
571 | | |
572 | 27 | master_ddl_proxy_ = std::make_unique<master::MasterDdlProxy>( |
573 | 27 | proxy_cache_.get(), leader_addr_); |
574 | | |
575 | 27 | master_encryption_proxy_ = std::make_unique<master::MasterEncryptionProxy>( |
576 | 27 | proxy_cache_.get(), leader_addr_); |
577 | | |
578 | 27 | master_replication_proxy_ = std::make_unique<master::MasterReplicationProxy>( |
579 | 27 | proxy_cache_.get(), leader_addr_); |
580 | 27 | } |
581 | | |
582 | | Status ClusterAdminClient::MasterLeaderStepDown( |
583 | | const string& leader_uuid, |
584 | 0 | const string& new_leader_uuid) { |
585 | 0 | auto master_proxy = std::make_unique<ConsensusServiceProxy>(proxy_cache_.get(), leader_addr_); |
586 | |
|
587 | 0 | return LeaderStepDown(leader_uuid, yb::master::kSysCatalogTabletId, |
588 | 0 | new_leader_uuid, &master_proxy); |
589 | 0 | } |
590 | | |
591 | | CHECKED_STATUS ClusterAdminClient::LeaderStepDownWithNewLeader( |
592 | | const std::string& tablet_id, |
593 | 0 | const std::string& dest_ts_uuid) { |
594 | 0 | return LeaderStepDown( |
595 | 0 | /* leader_uuid */ std::string(), |
596 | 0 | tablet_id, |
597 | 0 | dest_ts_uuid, |
598 | 0 | /* leader_proxy */ nullptr); |
599 | 0 | } |
600 | | |
601 | | Status ClusterAdminClient::LeaderStepDown( |
602 | | const PeerId& leader_uuid, |
603 | | const TabletId& tablet_id, |
604 | | const PeerId& new_leader_uuid, |
605 | 0 | std::unique_ptr<ConsensusServiceProxy>* leader_proxy) { |
606 | 0 | LeaderStepDownRequestPB req; |
607 | 0 | req.set_tablet_id(tablet_id); |
608 | 0 | if (!new_leader_uuid.empty()) { |
609 | 0 | req.set_new_leader_uuid(new_leader_uuid); |
610 | 0 | } else { |
611 | 0 | if (FLAGS_disable_graceful_transition) { |
612 | 0 | req.set_disable_graceful_transition(true); |
613 | 0 | } |
614 | 0 | } |
615 | | // The API for InvokeRpcNoResponseCheck requires a raw pointer to a ConsensusServiceProxy, so |
616 | | // cache it outside, if we are creating a new proxy to a previously unknown leader. |
617 | 0 | std::unique_ptr<ConsensusServiceProxy> new_proxy; |
618 | 0 | if (!leader_uuid.empty()) { |
619 | | // TODO: validate leader_proxy ? |
620 | 0 | req.set_dest_uuid(leader_uuid); |
621 | 0 | } else { |
622 | | // Look up the location of the tablet leader from the Master. |
623 | 0 | HostPort leader_addr; |
624 | 0 | string lookup_leader_uuid; |
625 | 0 | RETURN_NOT_OK(SetTabletPeerInfo(tablet_id, LEADER, &lookup_leader_uuid, &leader_addr)); |
626 | 0 | req.set_dest_uuid(lookup_leader_uuid); |
627 | 0 | new_proxy = std::make_unique<ConsensusServiceProxy>(proxy_cache_.get(), leader_addr); |
628 | 0 | } |
629 | 0 | VLOG(2) << "Sending request " << req.DebugString() << " to node with uuid [" << leader_uuid |
630 | 0 | << "]"; |
631 | 0 | const auto resp = VERIFY_RESULT(InvokeRpcNoResponseCheck(&ConsensusServiceProxy::LeaderStepDown, |
632 | 0 | *(new_proxy ? new_proxy.get() : leader_proxy->get()), |
633 | 0 | req)); |
634 | 0 | if (resp.has_error()) { |
635 | 0 | LOG(ERROR) << "LeaderStepDown for " << leader_uuid << "received error " |
636 | 0 | << resp.error().ShortDebugString(); |
637 | 0 | return StatusFromPB(resp.error().status()); |
638 | 0 | } |
639 | 0 | return Status::OK(); |
640 | 0 | } |
641 | | |
642 | | // Force start an election on a randomly chosen non-leader peer of this tablet's raft quorum. |
643 | 0 | Status ClusterAdminClient::StartElection(const TabletId& tablet_id) { |
644 | 0 | HostPort non_leader_addr; |
645 | 0 | string non_leader_uuid; |
646 | 0 | RETURN_NOT_OK(SetTabletPeerInfo(tablet_id, FOLLOWER, &non_leader_uuid, &non_leader_addr)); |
647 | 0 | ConsensusServiceProxy non_leader_proxy(proxy_cache_.get(), non_leader_addr); |
648 | 0 | RunLeaderElectionRequestPB req; |
649 | 0 | req.set_dest_uuid(non_leader_uuid); |
650 | 0 | req.set_tablet_id(tablet_id); |
651 | 0 | return ResultToStatus(InvokeRpc( |
652 | 0 | &ConsensusServiceProxy::RunLeaderElection, non_leader_proxy, req)); |
653 | 0 | } |
654 | | |
655 | | // Look up the location of the tablet server leader or non-leader peer from the leader master |
656 | | Status ClusterAdminClient::SetTabletPeerInfo( |
657 | | const TabletId& tablet_id, |
658 | | PeerMode mode, |
659 | | PeerId* peer_uuid, |
660 | 0 | HostPort* peer_addr) { |
661 | 0 | TSInfoPB peer_ts_info; |
662 | 0 | RETURN_NOT_OK(GetTabletPeer(tablet_id, mode, &peer_ts_info)); |
663 | 0 | auto rpc_addresses = peer_ts_info.private_rpc_addresses(); |
664 | 0 | CHECK_GT(rpc_addresses.size(), 0) << peer_ts_info |
665 | 0 | .ShortDebugString(); |
666 | |
|
667 | 0 | *peer_addr = HostPortFromPB(rpc_addresses.Get(0)); |
668 | 0 | *peer_uuid = peer_ts_info.permanent_uuid(); |
669 | 0 | return Status::OK(); |
670 | 0 | } |
671 | | |
672 | | CHECKED_STATUS ClusterAdminClient::SetWalRetentionSecs( |
673 | | const YBTableName& table_name, |
674 | 0 | const uint32_t wal_ret_secs) { |
675 | 0 | auto alterer = yb_client_->NewTableAlterer(table_name); |
676 | 0 | RETURN_NOT_OK(alterer->SetWalRetentionSecs(wal_ret_secs)->Alter()); |
677 | 0 | cout << "Set table " << table_name.table_name() << " WAL retention time to " << wal_ret_secs |
678 | 0 | << " seconds." << endl; |
679 | 0 | return Status::OK(); |
680 | 0 | } |
681 | | |
682 | 0 | CHECKED_STATUS ClusterAdminClient::GetWalRetentionSecs(const YBTableName& table_name) { |
683 | 0 | const auto info = VERIFY_RESULT(yb_client_->GetYBTableInfo(table_name)); |
684 | 0 | if (!info.wal_retention_secs) { |
685 | 0 | cout << "WAL retention time not set for table " << table_name.table_name() << endl; |
686 | 0 | } else { |
687 | 0 | cout << "Found WAL retention time for table " << table_name.table_name() << ": " |
688 | 0 | << info.wal_retention_secs.get() << " seconds" << endl; |
689 | 0 | } |
690 | 0 | return Status::OK(); |
691 | 0 | } |
692 | | |
693 | | Status ClusterAdminClient::ParseChangeType( |
694 | | const string& change_type, |
695 | 0 | consensus::ChangeConfigType* cc_type) { |
696 | 0 | consensus::ChangeConfigType cctype = consensus::UNKNOWN_CHANGE; |
697 | 0 | *cc_type = cctype; |
698 | 0 | string uppercase_change_type; |
699 | 0 | ToUpperCase(change_type, &uppercase_change_type); |
700 | 0 | if (!consensus::ChangeConfigType_Parse(uppercase_change_type, &cctype) || |
701 | 0 | cctype == consensus::UNKNOWN_CHANGE) { |
702 | 0 | return STATUS(InvalidArgument, "Unsupported change_type", change_type); |
703 | 0 | } |
704 | | |
705 | 0 | *cc_type = cctype; |
706 | |
|
707 | 0 | return Status::OK(); |
708 | 0 | } |
709 | | |
710 | | Status ClusterAdminClient::ChangeConfig( |
711 | | const TabletId& tablet_id, |
712 | | const string& change_type, |
713 | | const PeerId& peer_uuid, |
714 | 0 | const boost::optional<string>& member_type) { |
715 | 0 | CHECK(initted_); |
716 | |
|
717 | 0 | consensus::ChangeConfigType cc_type; |
718 | 0 | RETURN_NOT_OK(ParseChangeType(change_type, &cc_type)); |
719 | |
|
720 | 0 | RaftPeerPB peer_pb; |
721 | 0 | peer_pb.set_permanent_uuid(peer_uuid); |
722 | | |
723 | | // Parse the optional fields. |
724 | 0 | if (member_type) { |
725 | 0 | consensus::PeerMemberType member_type_val; |
726 | 0 | string uppercase_member_type; |
727 | 0 | ToUpperCase(*member_type, &uppercase_member_type); |
728 | 0 | if (!PeerMemberType_Parse(uppercase_member_type, &member_type_val)) { |
729 | 0 | return STATUS(InvalidArgument, "Unrecognized member_type", *member_type); |
730 | 0 | } |
731 | 0 | if (member_type_val != consensus::PeerMemberType::PRE_VOTER && |
732 | 0 | member_type_val != consensus::PeerMemberType::PRE_OBSERVER) { |
733 | 0 | return STATUS(InvalidArgument, "member_type should be PRE_VOTER or PRE_OBSERVER"); |
734 | 0 | } |
735 | 0 | peer_pb.set_member_type(member_type_val); |
736 | 0 | } |
737 | | |
738 | | // Validate the existence of the optional fields. |
739 | 0 | if (!member_type && cc_type == consensus::ADD_SERVER) { |
740 | 0 | return STATUS(InvalidArgument, "Must specify member_type when adding a server."); |
741 | 0 | } |
742 | | |
743 | | // Look up RPC address of peer if adding as a new server. |
744 | 0 | if (cc_type == consensus::ADD_SERVER) { |
745 | 0 | HostPort host_port = VERIFY_RESULT(GetFirstRpcAddressForTS(peer_uuid)); |
746 | 0 | HostPortToPB(host_port, peer_pb.mutable_last_known_private_addr()->Add()); |
747 | 0 | } |
748 | | |
749 | | // Look up the location of the tablet leader from the Master. |
750 | 0 | HostPort leader_addr; |
751 | 0 | string leader_uuid; |
752 | 0 | RETURN_NOT_OK(SetTabletPeerInfo(tablet_id, LEADER, &leader_uuid, &leader_addr)); |
753 | |
|
754 | 0 | auto consensus_proxy = std::make_unique<ConsensusServiceProxy>(proxy_cache_.get(), leader_addr); |
755 | | // If removing the leader ts, then first make it step down and that |
756 | | // starts an election and gets a new leader ts. |
757 | 0 | if (cc_type == consensus::REMOVE_SERVER && |
758 | 0 | leader_uuid == peer_uuid) { |
759 | 0 | string old_leader_uuid = leader_uuid; |
760 | 0 | RETURN_NOT_OK(LeaderStepDown( |
761 | 0 | leader_uuid, tablet_id, /* new_leader_uuid */ std::string(), &consensus_proxy)); |
762 | 0 | sleep(5); // TODO - election completion timing is not known accurately |
763 | 0 | RETURN_NOT_OK(SetTabletPeerInfo(tablet_id, LEADER, &leader_uuid, &leader_addr)); |
764 | 0 | if (leader_uuid == old_leader_uuid) { |
765 | 0 | return STATUS(ConfigurationError, |
766 | 0 | "Old tablet server leader same as new even after re-election!"); |
767 | 0 | } |
768 | 0 | consensus_proxy.reset(new ConsensusServiceProxy(proxy_cache_.get(), leader_addr)); |
769 | 0 | } |
770 | |
|
771 | 0 | consensus::ChangeConfigRequestPB req; |
772 | 0 | req.set_dest_uuid(leader_uuid); |
773 | 0 | req.set_tablet_id(tablet_id); |
774 | 0 | req.set_type(cc_type); |
775 | 0 | *req.mutable_server() = peer_pb; |
776 | 0 | return ResultToStatus(InvokeRpc( |
777 | 0 | &ConsensusServiceProxy::ChangeConfig, *consensus_proxy, req)); |
778 | 0 | } |
779 | | |
780 | 0 | Result<std::string> ClusterAdminClient::GetMasterLeaderUuid() { |
781 | 0 | std::string leader_uuid; |
782 | 0 | const auto list_resp = VERIFY_RESULT_PREPEND( |
783 | 0 | InvokeRpc( |
784 | 0 | &master::MasterClusterProxy::ListMasters, *master_cluster_proxy_, |
785 | 0 | ListMastersRequestPB()), |
786 | 0 | "Could not locate master leader"); |
787 | 0 | for (const auto& master : list_resp.masters()) { |
788 | 0 | if (master.role() == PeerRole::LEADER) { |
789 | 0 | SCHECK( |
790 | 0 | leader_uuid.empty(), ConfigurationError, "Found two LEADER's in the same raft config."); |
791 | 0 | leader_uuid = master.instance_id().permanent_uuid(); |
792 | 0 | } |
793 | 0 | } |
794 | 0 | SCHECK(!leader_uuid.empty(), ConfigurationError, "Could not locate master leader!"); |
795 | 0 | return std::move(leader_uuid); |
796 | 0 | } |
797 | | |
798 | 0 | Status ClusterAdminClient::DumpMasterState(bool to_console) { |
799 | 0 | CHECK(initted_); |
800 | 0 | master::DumpMasterStateRequestPB req; |
801 | 0 | req.set_peers_also(true); |
802 | 0 | req.set_on_disk(true); |
803 | 0 | req.set_return_dump_as_string(to_console); |
804 | |
|
805 | 0 | const auto resp = VERIFY_RESULT(InvokeRpc( |
806 | 0 | &master::MasterClusterProxy::DumpState, *master_cluster_proxy_, req)); |
807 | |
|
808 | 0 | if (to_console) { |
809 | 0 | cout << resp.dump() << endl; |
810 | 0 | } else { |
811 | 0 | cout << "Master state dump has been completed and saved into " |
812 | 0 | "the master respective log files." << endl; |
813 | 0 | } |
814 | 0 | return Status::OK(); |
815 | 0 | } |
816 | | |
817 | 0 | Status ClusterAdminClient::GetLoadMoveCompletion() { |
818 | 0 | CHECK(initted_); |
819 | 0 | const auto resp = VERIFY_RESULT(InvokeRpc( |
820 | 0 | &master::MasterClusterProxy::GetLoadMoveCompletion, *master_cluster_proxy_, |
821 | 0 | master::GetLoadMovePercentRequestPB())); |
822 | 0 | cout << "Percent complete = " << resp.percent() << " : " |
823 | 0 | << resp.remaining() << " remaining out of " << resp.total() << endl; |
824 | 0 | return Status::OK(); |
825 | 0 | } |
826 | | |
827 | 0 | Status ClusterAdminClient::GetLeaderBlacklistCompletion() { |
828 | 0 | CHECK(initted_); |
829 | 0 | const auto resp = VERIFY_RESULT(InvokeRpc( |
830 | 0 | &master::MasterClusterProxy::GetLeaderBlacklistCompletion, *master_cluster_proxy_, |
831 | 0 | master::GetLeaderBlacklistPercentRequestPB())); |
832 | 0 | cout << "Percent complete = " << resp.percent() << " : " |
833 | 0 | << resp.remaining() << " remaining out of " << resp.total() << endl; |
834 | 0 | return Status::OK(); |
835 | 0 | } |
836 | | |
837 | 0 | Status ClusterAdminClient::GetIsLoadBalancerIdle() { |
838 | 0 | CHECK(initted_); |
839 | |
|
840 | 0 | const bool is_idle = VERIFY_RESULT(yb_client_->IsLoadBalancerIdle()); |
841 | 0 | cout << "Idle = " << is_idle << endl; |
842 | 0 | return Status::OK(); |
843 | 0 | } |
844 | | |
845 | 0 | Status ClusterAdminClient::ListLeaderCounts(const YBTableName& table_name) { |
846 | 0 | std::unordered_map<string, int> leader_counts = VERIFY_RESULT(GetLeaderCounts(table_name)); |
847 | 0 | int total_leader_count = 0; |
848 | 0 | for (const auto& lc : leader_counts) { total_leader_count += lc.second; } |
849 | | |
850 | | // Calculate the standard deviation and adjusted deviation percentage according to the best and |
851 | | // worst-case scenarios. Best-case distribution is when leaders are evenly distributed and |
852 | | // worst-case is when leaders are all on one tablet server. |
853 | | // For example, say we have 16 leaders on 3 tablet servers: |
854 | | // Leader distribution: 7 5 4 |
855 | | // Best-case scenario: 6 5 5 |
856 | | // Worst-case scenario: 12 0 0 |
857 | | // Standard deviation: 1.24722 |
858 | | // Adjusted deviation %: 10.9717% |
859 | 0 | vector<double> leader_dist, best_case, worst_case; |
860 | 0 | cout << RightPadToUuidWidth("Server UUID") << kColumnSep << "Leader Count" << endl; |
861 | 0 | for (const auto& leader_count : leader_counts) { |
862 | 0 | cout << leader_count.first << kColumnSep << leader_count.second << endl; |
863 | 0 | leader_dist.push_back(leader_count.second); |
864 | 0 | } |
865 | |
|
866 | 0 | if (!leader_dist.empty()) { |
867 | 0 | for (size_t i = 0; i < leader_dist.size(); ++i) { |
868 | 0 | best_case.push_back(total_leader_count / leader_dist.size()); |
869 | 0 | worst_case.push_back(0); |
870 | 0 | } |
871 | 0 | for (size_t i = 0; i < total_leader_count % leader_dist.size(); ++i) { |
872 | 0 | ++best_case[i]; |
873 | 0 | } |
874 | 0 | worst_case[0] = total_leader_count; |
875 | |
|
876 | 0 | double stdev = yb::standard_deviation(leader_dist); |
877 | 0 | double best_stdev = yb::standard_deviation(best_case); |
878 | 0 | double worst_stdev = yb::standard_deviation(worst_case); |
879 | 0 | double percent_dev = (stdev - best_stdev) / (worst_stdev - best_stdev) * 100.0; |
880 | 0 | cout << "Standard deviation: " << stdev << endl; |
881 | 0 | cout << "Adjusted deviation percentage: " << percent_dev << "%" << endl; |
882 | 0 | } |
883 | |
|
884 | 0 | return Status::OK(); |
885 | 0 | } |
886 | | |
887 | | Result<std::unordered_map<string, int>> ClusterAdminClient::GetLeaderCounts( |
888 | 15 | const client::YBTableName& table_name) { |
889 | 15 | vector<string> tablet_ids, ranges; |
890 | 15 | RETURN_NOT_OK(yb_client_->GetTablets(table_name, 0, &tablet_ids, &ranges)); |
891 | 15 | master::GetTabletLocationsRequestPB req; |
892 | 75 | for (const auto& tablet_id : tablet_ids) { |
893 | 75 | req.add_tablet_ids(tablet_id); |
894 | 75 | } |
895 | 15 | const auto resp = VERIFY_RESULT(InvokeRpc( |
896 | 15 | &master::MasterClientProxy::GetTabletLocations, *master_client_proxy_, req)); |
897 | | |
898 | 15 | std::unordered_map<string, int> leader_counts; |
899 | 75 | for (const auto& locs : resp.tablet_locations()) { |
900 | 225 | for (const auto& replica : locs.replicas()) { |
901 | 225 | const auto uuid = replica.ts_info().permanent_uuid(); |
902 | 225 | switch(replica.role()) { |
903 | 75 | case PeerRole::LEADER: |
904 | | // If this is a leader, increment leader counts. |
905 | 75 | ++leader_counts[uuid]; |
906 | 75 | break; |
907 | 150 | case PeerRole::FOLLOWER: |
908 | | // If this is a follower, touch the leader count entry also so that tablet server with |
909 | | // followers only and 0 leader will be accounted for still. |
910 | 150 | leader_counts[uuid]; |
911 | 150 | break; |
912 | 0 | default: |
913 | 0 | break; |
914 | 225 | } |
915 | 225 | } |
916 | 75 | } |
917 | | |
918 | 15 | return leader_counts; |
919 | 15 | } |
920 | | |
921 | 0 | Status ClusterAdminClient::SetupRedisTable() { |
922 | 0 | const YBTableName table_name( |
923 | 0 | YQL_DATABASE_REDIS, common::kRedisKeyspaceName, common::kRedisTableName); |
924 | 0 | RETURN_NOT_OK(yb_client_->CreateNamespaceIfNotExists(common::kRedisKeyspaceName, |
925 | 0 | YQLDatabase::YQL_DATABASE_REDIS)); |
926 | | // Try to create the table. |
927 | 0 | std::unique_ptr<yb::client::YBTableCreator> table_creator(yb_client_->NewTableCreator()); |
928 | 0 | Status s = table_creator->table_name(table_name) |
929 | 0 | .table_type(yb::client::YBTableType::REDIS_TABLE_TYPE) |
930 | 0 | .Create(); |
931 | | // If we could create it, then all good! |
932 | 0 | if (s.ok()) { |
933 | 0 | LOG(INFO) << "Table '" << table_name.ToString() << "' created."; |
934 | | // If the table was already there, also not an error... |
935 | 0 | } else if (s.IsAlreadyPresent()) { |
936 | 0 | LOG(INFO) << "Table '" << table_name.ToString() << "' already exists"; |
937 | 0 | } else { |
938 | | // If any other error, report that! |
939 | 0 | LOG(ERROR) << s; |
940 | 0 | RETURN_NOT_OK(s); |
941 | 0 | } |
942 | 0 | return Status::OK(); |
943 | 0 | } |
944 | | |
945 | 0 | Status ClusterAdminClient::DropRedisTable() { |
946 | 0 | const YBTableName table_name( |
947 | 0 | YQL_DATABASE_REDIS, common::kRedisKeyspaceName, common::kRedisTableName); |
948 | 0 | Status s = yb_client_->DeleteTable(table_name, true /* wait */); |
949 | 0 | if (s.ok()) { |
950 | 0 | LOG(INFO) << "Table '" << table_name.ToString() << "' deleted."; |
951 | 0 | } else if (s.IsNotFound()) { |
952 | 0 | LOG(INFO) << "Table '" << table_name.ToString() << "' does not exist."; |
953 | 0 | } else { |
954 | 0 | RETURN_NOT_OK(s); |
955 | 0 | } |
956 | 0 | return Status::OK(); |
957 | 0 | } |
958 | | |
959 | | Status ClusterAdminClient::ChangeMasterConfig( |
960 | | const string& change_type, |
961 | | const string& peer_host, |
962 | | uint16_t peer_port, |
963 | 0 | const string& given_uuid) { |
964 | 0 | CHECK(initted_); |
965 | |
|
966 | 0 | consensus::ChangeConfigType cc_type; |
967 | 0 | RETURN_NOT_OK(ParseChangeType(change_type, &cc_type)); |
968 | |
|
969 | 0 | string peer_uuid; |
970 | 0 | if (cc_type == consensus::ADD_SERVER) { |
971 | 0 | VLOG(1) << "ChangeMasterConfig: attempt to get UUID for changed host: " << peer_host << ":" |
972 | 0 | << peer_port; |
973 | 0 | RETURN_NOT_OK(yb_client_->GetMasterUUID(peer_host, peer_port, &peer_uuid)); |
974 | 0 | if (!given_uuid.empty() && given_uuid != peer_uuid) { |
975 | 0 | return STATUS_FORMAT( |
976 | 0 | InvalidArgument, "Specified uuid $0. But the server has uuid $1", given_uuid, peer_uuid); |
977 | 0 | } |
978 | 0 | } else { |
979 | | // Do not verify uuid for REMOVE_SERVER, as the server may not be accessible. |
980 | 0 | peer_uuid = given_uuid; |
981 | 0 | } |
982 | 0 | VLOG(1) << "ChangeMasterConfig: " << change_type << " | " << peer_host << ":" << peer_port |
983 | 0 | << " uuid : " << peer_uuid; |
984 | |
|
985 | 0 | auto leader_uuid = VERIFY_RESULT(GetMasterLeaderUuid()); |
986 | | |
987 | | // If removing the leader master, then first make it step down and that |
988 | | // starts an election and gets a new leader master. |
989 | 0 | const HostPort changed_master_addr(peer_host, peer_port); |
990 | 0 | if (cc_type == consensus::REMOVE_SERVER && leader_addr_ == changed_master_addr) { |
991 | 0 | VLOG(1) << "ChangeMasterConfig: request leader " << leader_addr_ |
992 | 0 | << " to step down before removal."; |
993 | 0 | string old_leader_uuid = leader_uuid; |
994 | 0 | RETURN_NOT_OK(MasterLeaderStepDown(leader_uuid)); |
995 | 0 | sleep(5); // TODO - wait for exactly the time needed for new leader to get elected. |
996 | | // Reget the leader master's socket info to set up the proxy |
997 | 0 | ResetMasterProxy(VERIFY_RESULT(yb_client_->RefreshMasterLeaderAddress())); |
998 | 0 | leader_uuid = VERIFY_RESULT(GetMasterLeaderUuid()); |
999 | 0 | if (leader_uuid == old_leader_uuid) { |
1000 | 0 | return STATUS(ConfigurationError, |
1001 | 0 | Substitute("Old master leader uuid $0 same as new one even after stepdown!", leader_uuid)); |
1002 | 0 | } |
1003 | | // Go ahead below and send the actual config change message to the new master |
1004 | 0 | } |
1005 | | |
1006 | 0 | std::unique_ptr<consensus::ConsensusServiceProxy> leader_proxy( |
1007 | 0 | new consensus::ConsensusServiceProxy(proxy_cache_.get(), leader_addr_)); |
1008 | 0 | consensus::ChangeConfigRequestPB req; |
1009 | |
|
1010 | 0 | RaftPeerPB peer_pb; |
1011 | 0 | if (!peer_uuid.empty()) { |
1012 | 0 | peer_pb.set_permanent_uuid(peer_uuid); |
1013 | 0 | } |
1014 | |
|
1015 | 0 | if (cc_type == consensus::ADD_SERVER) { |
1016 | 0 | peer_pb.set_member_type(consensus::PeerMemberType::PRE_VOTER); |
1017 | 0 | } else { // REMOVE_SERVER |
1018 | 0 | req.set_use_host(peer_uuid.empty()); |
1019 | 0 | } |
1020 | 0 | HostPortPB *peer_host_port = peer_pb.mutable_last_known_private_addr()->Add(); |
1021 | 0 | peer_host_port->set_port(peer_port); |
1022 | 0 | peer_host_port->set_host(peer_host); |
1023 | 0 | req.set_dest_uuid(leader_uuid); |
1024 | 0 | req.set_tablet_id(yb::master::kSysCatalogTabletId); |
1025 | 0 | req.set_type(cc_type); |
1026 | 0 | *req.mutable_server() = peer_pb; |
1027 | |
|
1028 | 0 | VLOG(1) << "ChangeMasterConfig: ChangeConfig for tablet id " << yb::master::kSysCatalogTabletId |
1029 | 0 | << " to host " << leader_addr_; |
1030 | 0 | RETURN_NOT_OK(InvokeRpc(&consensus::ConsensusServiceProxy::ChangeConfig, *leader_proxy, req)); |
1031 | |
|
1032 | 0 | VLOG(1) << "ChangeMasterConfig: update yb client to reflect config change."; |
1033 | 0 | if (cc_type == consensus::ADD_SERVER) { |
1034 | 0 | RETURN_NOT_OK(yb_client_->AddMasterToClient(changed_master_addr)); |
1035 | 0 | } else { |
1036 | 0 | RETURN_NOT_OK(yb_client_->RemoveMasterFromClient(changed_master_addr)); |
1037 | 0 | } |
1038 | |
|
1039 | 0 | return Status::OK(); |
1040 | 0 | } |
1041 | | |
1042 | | Status ClusterAdminClient::GetTabletLocations(const TabletId& tablet_id, |
1043 | 0 | TabletLocationsPB* locations) { |
1044 | 0 | master::GetTabletLocationsRequestPB req; |
1045 | 0 | req.add_tablet_ids(tablet_id); |
1046 | 0 | const auto resp = VERIFY_RESULT(InvokeRpc( |
1047 | 0 | &master::MasterClientProxy::GetTabletLocations, *master_client_proxy_, req)); |
1048 | |
|
1049 | 0 | if (resp.errors_size() > 0) { |
1050 | | // This tool only needs to support one-by-one requests for tablet |
1051 | | // locations, so we only look at the first error. |
1052 | 0 | return StatusFromPB(resp.errors(0).status()); |
1053 | 0 | } |
1054 | | |
1055 | | // Same as above, no batching, and we already got past the error checks. |
1056 | 0 | CHECK_EQ(1, resp.tablet_locations_size()) << resp.ShortDebugString(); |
1057 | |
|
1058 | 0 | *locations = resp.tablet_locations(0); |
1059 | 0 | return Status::OK(); |
1060 | 0 | } |
1061 | | |
1062 | | Status ClusterAdminClient::GetTabletPeer(const TabletId& tablet_id, |
1063 | | PeerMode mode, |
1064 | 0 | TSInfoPB* ts_info) { |
1065 | 0 | TabletLocationsPB locations; |
1066 | 0 | RETURN_NOT_OK(GetTabletLocations(tablet_id, &locations)); |
1067 | 0 | CHECK_EQ(tablet_id, locations.tablet_id()) << locations.ShortDebugString(); |
1068 | 0 | bool found = false; |
1069 | 0 | for (const TabletLocationsPB::ReplicaPB& replica : locations.replicas()) { |
1070 | 0 | if (mode == LEADER && replica.role() == PeerRole::LEADER) { |
1071 | 0 | *ts_info = replica.ts_info(); |
1072 | 0 | found = true; |
1073 | 0 | break; |
1074 | 0 | } |
1075 | 0 | if (mode == FOLLOWER && replica.role() != PeerRole::LEADER) { |
1076 | 0 | *ts_info = replica.ts_info(); |
1077 | 0 | found = true; |
1078 | 0 | break; |
1079 | 0 | } |
1080 | 0 | } |
1081 | |
|
1082 | 0 | if (!found) { |
1083 | 0 | return STATUS(NotFound, |
1084 | 0 | Substitute("No peer replica found in $0 mode for tablet $1", mode, tablet_id)); |
1085 | 0 | } |
1086 | | |
1087 | 0 | return Status::OK(); |
1088 | 0 | } |
1089 | | |
1090 | | Status ClusterAdminClient::ListTabletServers( |
1091 | 0 | RepeatedPtrField<ListTabletServersResponsePB::Entry>* servers) { |
1092 | 0 | auto resp = VERIFY_RESULT(InvokeRpc( |
1093 | 0 | &master::MasterClusterProxy::ListTabletServers, *master_cluster_proxy_, |
1094 | 0 | ListTabletServersRequestPB())); |
1095 | 0 | *servers = std::move(*resp.mutable_servers()); |
1096 | 0 | return Status::OK(); |
1097 | 0 | } |
1098 | | |
1099 | 0 | Result<HostPort> ClusterAdminClient::GetFirstRpcAddressForTS(const PeerId& uuid) { |
1100 | 0 | RepeatedPtrField<ListTabletServersResponsePB::Entry> servers; |
1101 | 0 | RETURN_NOT_OK(ListTabletServers(&servers)); |
1102 | 0 | for (const ListTabletServersResponsePB::Entry& server : servers) { |
1103 | 0 | if (server.instance_id().permanent_uuid() == uuid) { |
1104 | 0 | if (!server.has_registration() || |
1105 | 0 | server.registration().common().private_rpc_addresses().empty()) { |
1106 | 0 | break; |
1107 | 0 | } |
1108 | 0 | return HostPortFromPB(server.registration().common().private_rpc_addresses(0)); |
1109 | 0 | } |
1110 | 0 | } |
1111 | |
|
1112 | 0 | return STATUS_FORMAT( |
1113 | 0 | NotFound, "Server with UUID $0 has no RPC address registered with the Master", uuid); |
1114 | 0 | } |
1115 | | |
1116 | 0 | Status ClusterAdminClient::ListAllTabletServers(bool exclude_dead) { |
1117 | 0 | RepeatedPtrField<ListTabletServersResponsePB::Entry> servers; |
1118 | 0 | RETURN_NOT_OK(ListTabletServers(&servers)); |
1119 | 0 | char kSpaceSep = ' '; |
1120 | |
|
1121 | 0 | cout << RightPadToUuidWidth("Tablet Server UUID") << kSpaceSep |
1122 | 0 | << kRpcHostPortHeading << kSpaceSep |
1123 | 0 | << RightPadToWidth("Heartbeat delay", kLongColWidth) << kSpaceSep |
1124 | 0 | << RightPadToWidth("Status", kSmallColWidth) << kSpaceSep |
1125 | 0 | << RightPadToWidth("Reads/s", kSmallColWidth) << kSpaceSep |
1126 | 0 | << RightPadToWidth("Writes/s", kSmallColWidth) << kSpaceSep |
1127 | 0 | << RightPadToWidth("Uptime", kSmallColWidth) << kSpaceSep |
1128 | 0 | << RightPadToWidth("SST total size", kLongColWidth) << kSpaceSep |
1129 | 0 | << RightPadToWidth("SST uncomp size", kLongColWidth) << kSpaceSep |
1130 | 0 | << RightPadToWidth("SST #files", kLongColWidth) << kSpaceSep |
1131 | 0 | << RightPadToWidth("Memory", kSmallColWidth) << kSpaceSep |
1132 | 0 | << endl; |
1133 | 0 | for (const ListTabletServersResponsePB::Entry& server : servers) { |
1134 | 0 | if (exclude_dead && server.has_alive() && !server.alive()) { |
1135 | 0 | continue; |
1136 | 0 | } |
1137 | 0 | std::stringstream time_str; |
1138 | 0 | auto heartbeat_delay_ms = server.has_millis_since_heartbeat() ? |
1139 | 0 | server.millis_since_heartbeat() : 0; |
1140 | 0 | time_str << std::fixed << std::setprecision(2) << (heartbeat_delay_ms/1000.0) << "s"; |
1141 | 0 | auto status_str = server.has_alive() ? (server.alive() ? "ALIVE" : "DEAD") : "UNKNOWN"; |
1142 | 0 | cout << server.instance_id().permanent_uuid() << kSpaceSep |
1143 | 0 | << FormatFirstHostPort(server.registration().common().private_rpc_addresses()) |
1144 | 0 | << kSpaceSep |
1145 | 0 | << RightPadToWidth(time_str.str(), kLongColWidth) << kSpaceSep |
1146 | 0 | << RightPadToWidth(status_str, kSmallColWidth) << kSpaceSep |
1147 | 0 | << RightPadToWidth(FormatDouble(server.metrics().read_ops_per_sec()), kSmallColWidth) |
1148 | 0 | << kSpaceSep |
1149 | 0 | << RightPadToWidth(FormatDouble(server.metrics().write_ops_per_sec()), kSmallColWidth) |
1150 | 0 | << kSpaceSep |
1151 | 0 | << RightPadToWidth(server.metrics().uptime_seconds(), kSmallColWidth) << kSpaceSep |
1152 | 0 | << RightPadToWidth(HumanizeBytes(server.metrics().total_sst_file_size()), kLongColWidth) |
1153 | 0 | << kSpaceSep |
1154 | 0 | << RightPadToWidth(HumanizeBytes(server.metrics().uncompressed_sst_file_size()), |
1155 | 0 | kLongColWidth) |
1156 | 0 | << kSpaceSep |
1157 | 0 | << RightPadToWidth(server.metrics().num_sst_files(), kLongColWidth) << kSpaceSep |
1158 | 0 | << RightPadToWidth(HumanizeBytes(server.metrics().total_ram_usage()), kSmallColWidth) |
1159 | 0 | << kSpaceSep |
1160 | 0 | << endl; |
1161 | 0 | } |
1162 | |
|
1163 | 0 | return Status::OK(); |
1164 | 0 | } |
1165 | | |
1166 | 0 | Status ClusterAdminClient::ListAllMasters() { |
1167 | 0 | const auto lresp = VERIFY_RESULT(InvokeRpc( |
1168 | 0 | &master::MasterClusterProxy::ListMasters, *master_cluster_proxy_, |
1169 | 0 | ListMastersRequestPB())); |
1170 | |
|
1171 | 0 | if (lresp.has_error()) { |
1172 | 0 | LOG(ERROR) << "Error: querying leader master for live master info : " |
1173 | 0 | << lresp.error().DebugString() << endl; |
1174 | 0 | return STATUS(RemoteError, lresp.error().DebugString()); |
1175 | 0 | } |
1176 | | |
1177 | 0 | cout << RightPadToUuidWidth("Master UUID") << kColumnSep |
1178 | 0 | << RightPadToWidth(kRpcHostPortHeading, kHostPortColWidth) << kColumnSep |
1179 | 0 | << RightPadToWidth("State", kSmallColWidth) << kColumnSep |
1180 | 0 | << "Role" << endl; |
1181 | |
|
1182 | 0 | for (const auto& master : lresp.masters()) { |
1183 | 0 | const auto master_reg = master.has_registration() ? &master.registration() : nullptr; |
1184 | 0 | cout << (master.has_instance_id() ? master.instance_id().permanent_uuid() |
1185 | 0 | : RightPadToUuidWidth("UNKNOWN_UUID")) << kColumnSep; |
1186 | 0 | cout << RightPadToWidth( |
1187 | 0 | master_reg ? FormatFirstHostPort(master_reg->private_rpc_addresses()) |
1188 | 0 | : "UNKNOWN", kHostPortColWidth) |
1189 | 0 | << kColumnSep; |
1190 | 0 | cout << RightPadToWidth((master.has_error() ? |
1191 | 0 | PBEnumToString(master.error().code()) : "ALIVE"), |
1192 | 0 | kSmallColWidth) |
1193 | 0 | << kColumnSep; |
1194 | 0 | cout << (master.has_role() ? PBEnumToString(master.role()) : "UNKNOWN") << endl; |
1195 | 0 | } |
1196 | |
|
1197 | 0 | return Status::OK(); |
1198 | 0 | } |
1199 | | |
1200 | 0 | Status ClusterAdminClient::ListTabletServersLogLocations() { |
1201 | 0 | RepeatedPtrField<ListTabletServersResponsePB::Entry> servers; |
1202 | 0 | RETURN_NOT_OK(ListTabletServers(&servers)); |
1203 | |
|
1204 | 0 | if (!servers.empty()) { |
1205 | 0 | cout << RightPadToUuidWidth("TS UUID") << kColumnSep |
1206 | 0 | << kRpcHostPortHeading << kColumnSep |
1207 | 0 | << "LogLocation" |
1208 | 0 | << endl; |
1209 | 0 | } |
1210 | |
|
1211 | 0 | for (const ListTabletServersResponsePB::Entry& server : servers) { |
1212 | 0 | auto ts_uuid = server.instance_id().permanent_uuid(); |
1213 | |
|
1214 | 0 | HostPort ts_addr = VERIFY_RESULT(GetFirstRpcAddressForTS(ts_uuid)); |
1215 | |
|
1216 | 0 | TabletServerServiceProxy ts_proxy(proxy_cache_.get(), ts_addr); |
1217 | |
|
1218 | 0 | const auto resp = VERIFY_RESULT(InvokeRpc( |
1219 | 0 | &TabletServerServiceProxy::GetLogLocation, ts_proxy, tserver::GetLogLocationRequestPB())); |
1220 | 0 | cout << ts_uuid << kColumnSep |
1221 | 0 | << ts_addr << kColumnSep |
1222 | 0 | << resp.log_location() << endl; |
1223 | 0 | } |
1224 | |
|
1225 | 0 | return Status::OK(); |
1226 | 0 | } |
1227 | | |
1228 | | Status ClusterAdminClient::ListTables(bool include_db_type, |
1229 | | bool include_table_id, |
1230 | 0 | bool include_table_type) { |
1231 | 0 | const auto tables = VERIFY_RESULT(yb_client_->ListTables()); |
1232 | 0 | const auto& namespace_metadata = VERIFY_RESULT_REF(GetNamespaceMap()); |
1233 | 0 | vector<string> names; |
1234 | 0 | for (const auto& table : tables) { |
1235 | 0 | std::stringstream str; |
1236 | 0 | if (include_db_type) { |
1237 | 0 | const auto db_type_iter = namespace_metadata.find(table.namespace_id()); |
1238 | 0 | if (db_type_iter != namespace_metadata.end()) { |
1239 | 0 | str << DatabasePrefix(db_type_iter->second.database_type()) << '.'; |
1240 | 0 | } else { |
1241 | 0 | LOG(WARNING) << "Table in unknown namespace found " << table.ToString(); |
1242 | 0 | continue; |
1243 | 0 | } |
1244 | 0 | } |
1245 | 0 | str << table.ToString(); |
1246 | 0 | if (include_table_id) { |
1247 | 0 | str << ' ' << table.table_id(); |
1248 | 0 | } |
1249 | 0 | if (include_table_type) { |
1250 | 0 | boost::optional<master::RelationType> relation_type = table.relation_type(); |
1251 | 0 | switch (relation_type.get()) { |
1252 | 0 | case master::SYSTEM_TABLE_RELATION: |
1253 | 0 | str << " catalog"; |
1254 | 0 | break; |
1255 | 0 | case master::USER_TABLE_RELATION: |
1256 | 0 | str << " table"; |
1257 | 0 | break; |
1258 | 0 | case master::INDEX_TABLE_RELATION: |
1259 | 0 | str << " index"; |
1260 | 0 | break; |
1261 | 0 | default: |
1262 | 0 | str << " other"; |
1263 | 0 | } |
1264 | 0 | } |
1265 | 0 | names.push_back(str.str()); |
1266 | 0 | } |
1267 | 0 | sort(names.begin(), names.end()); |
1268 | 0 | copy(names.begin(), names.end(), std::ostream_iterator<string>(cout, "\n")); |
1269 | 0 | return Status::OK(); |
1270 | 0 | } |
1271 | | |
1272 | | struct FollowerDetails { |
1273 | | string uuid; |
1274 | | string host_port; |
1275 | 0 | FollowerDetails(const string &u, const string &hp) : uuid(u), host_port(hp) {} |
1276 | | }; |
1277 | | |
1278 | | Status ClusterAdminClient::ListTablets( |
1279 | 0 | const YBTableName& table_name, int max_tablets, bool json, bool followers) { |
1280 | 0 | vector<string> tablet_uuids, ranges; |
1281 | 0 | std::vector<master::TabletLocationsPB> locations; |
1282 | 0 | RETURN_NOT_OK(yb_client_->GetTablets( |
1283 | 0 | table_name, max_tablets, &tablet_uuids, &ranges, &locations)); |
1284 | |
|
1285 | 0 | rapidjson::Document document(rapidjson::kObjectType); |
1286 | 0 | rapidjson::Value json_tablets(rapidjson::kArrayType); |
1287 | 0 | CHECK(json_tablets.IsArray()); |
1288 | |
|
1289 | 0 | if (!json) { |
1290 | 0 | cout << RightPadToUuidWidth("Tablet-UUID") << kColumnSep |
1291 | 0 | << RightPadToWidth("Range", kPartitionRangeColWidth) << kColumnSep |
1292 | 0 | << RightPadToWidth("Leader-IP", kLongColWidth) << kColumnSep << "Leader-UUID"; |
1293 | 0 | if (followers) { |
1294 | 0 | cout << kColumnSep << "Followers"; |
1295 | 0 | } |
1296 | 0 | cout << endl; |
1297 | 0 | } |
1298 | |
|
1299 | 0 | for (size_t i = 0; i < tablet_uuids.size(); i++) { |
1300 | 0 | const string& tablet_uuid = tablet_uuids[i]; |
1301 | 0 | string leader_host_port; |
1302 | 0 | string leader_uuid; |
1303 | 0 | string follower_host_port; |
1304 | 0 | vector<FollowerDetails> follower_list; |
1305 | 0 | string follower_list_str; |
1306 | 0 | const auto& locations_of_this_tablet = locations[i]; |
1307 | 0 | for (const auto& replica : locations_of_this_tablet.replicas()) { |
1308 | 0 | if (replica.role() == PeerRole::LEADER) { |
1309 | 0 | if (leader_host_port.empty()) { |
1310 | 0 | leader_host_port = HostPortPBToString(replica.ts_info().private_rpc_addresses(0)); |
1311 | 0 | leader_uuid = replica.ts_info().permanent_uuid(); |
1312 | 0 | } else { |
1313 | 0 | LOG(ERROR) << "Multiple leader replicas found for tablet " << tablet_uuid |
1314 | 0 | << ": " << locations_of_this_tablet.ShortDebugString(); |
1315 | 0 | } |
1316 | 0 | } else { |
1317 | 0 | if (followers) { |
1318 | 0 | string follower_host_port = |
1319 | 0 | HostPortPBToString(replica.ts_info().private_rpc_addresses(0)); |
1320 | 0 | if (json) { |
1321 | 0 | follower_list.push_back( |
1322 | 0 | FollowerDetails(replica.ts_info().permanent_uuid(), follower_host_port)); |
1323 | 0 | } else { |
1324 | 0 | if (!follower_list_str.empty()) { |
1325 | 0 | follower_list_str += ","; |
1326 | 0 | } |
1327 | 0 | follower_list_str += follower_host_port; |
1328 | 0 | } |
1329 | 0 | } |
1330 | 0 | } |
1331 | 0 | } |
1332 | |
|
1333 | 0 | if (json) { |
1334 | 0 | rapidjson::Value json_tablet(rapidjson::kObjectType); |
1335 | 0 | AddStringField("id", tablet_uuid, &json_tablet, &document.GetAllocator()); |
1336 | 0 | const auto& partition = locations_of_this_tablet.partition(); |
1337 | 0 | AddStringField("partition_key_start", |
1338 | 0 | Slice(partition.partition_key_start()).ToDebugHexString(), &json_tablet, |
1339 | 0 | &document.GetAllocator()); |
1340 | 0 | AddStringField("partition_key_end", |
1341 | 0 | Slice(partition.partition_key_end()).ToDebugHexString(), &json_tablet, |
1342 | 0 | &document.GetAllocator()); |
1343 | 0 | rapidjson::Value json_leader(rapidjson::kObjectType); |
1344 | 0 | AddStringField("uuid", leader_uuid, &json_leader, &document.GetAllocator()); |
1345 | 0 | AddStringField("endpoint", leader_host_port, &json_leader, &document.GetAllocator()); |
1346 | 0 | json_tablet.AddMember(rapidjson::StringRef("leader"), json_leader, document.GetAllocator()); |
1347 | 0 | if (followers) { |
1348 | 0 | rapidjson::Value json_followers(rapidjson::kArrayType); |
1349 | 0 | CHECK(json_followers.IsArray()); |
1350 | 0 | for (const FollowerDetails &follower : follower_list) { |
1351 | 0 | rapidjson::Value json_follower(rapidjson::kObjectType); |
1352 | 0 | AddStringField("uuid", follower.uuid, &json_follower, &document.GetAllocator()); |
1353 | 0 | AddStringField("endpoint", follower.host_port, &json_follower, &document.GetAllocator()); |
1354 | 0 | json_followers.PushBack(json_follower, document.GetAllocator()); |
1355 | 0 | } |
1356 | 0 | json_tablet.AddMember(rapidjson::StringRef("followers"), json_followers, |
1357 | 0 | document.GetAllocator()); |
1358 | 0 | } |
1359 | 0 | json_tablets.PushBack(json_tablet, document.GetAllocator()); |
1360 | 0 | } else { |
1361 | 0 | cout << tablet_uuid << kColumnSep << RightPadToWidth(ranges[i], kPartitionRangeColWidth) |
1362 | 0 | << kColumnSep << RightPadToWidth(leader_host_port, kLongColWidth) << kColumnSep |
1363 | 0 | << leader_uuid; |
1364 | 0 | if (followers) { |
1365 | 0 | cout << kColumnSep << follower_list_str; |
1366 | 0 | } |
1367 | 0 | cout << endl; |
1368 | 0 | } |
1369 | 0 | } |
1370 | |
|
1371 | 0 | if (json) { |
1372 | 0 | document.AddMember("tablets", json_tablets, document.GetAllocator()); |
1373 | 0 | std::cout << common::PrettyWriteRapidJsonToString(document) << std::endl; |
1374 | 0 | } |
1375 | |
|
1376 | 0 | return Status::OK(); |
1377 | 0 | } |
1378 | | |
1379 | 1 | Status ClusterAdminClient::LaunchBackfillIndexForTable(const YBTableName& table_name) { |
1380 | 1 | master::LaunchBackfillIndexForTableRequestPB req; |
1381 | 1 | table_name.SetIntoTableIdentifierPB(req.mutable_table_identifier()); |
1382 | 1 | const auto resp = VERIFY_RESULT(InvokeRpc( |
1383 | 1 | &master::MasterDdlProxy::LaunchBackfillIndexForTable, *master_ddl_proxy_, req)); |
1384 | 1 | if (resp.has_error()) { |
1385 | 0 | return STATUS(RemoteError, resp.error().DebugString()); |
1386 | 0 | } |
1387 | 1 | return Status::OK(); |
1388 | 1 | } |
1389 | | |
1390 | 0 | Status ClusterAdminClient::ListPerTabletTabletServers(const TabletId& tablet_id) { |
1391 | 0 | master::GetTabletLocationsRequestPB req; |
1392 | 0 | req.add_tablet_ids(tablet_id); |
1393 | 0 | const auto resp = VERIFY_RESULT(InvokeRpc( |
1394 | 0 | &master::MasterClientProxy::GetTabletLocations, *master_client_proxy_, req)); |
1395 | |
|
1396 | 0 | if (resp.tablet_locations_size() != 1) { |
1397 | 0 | if (resp.tablet_locations_size() > 0) { |
1398 | 0 | std::cerr << "List of all incorrect locations - " << resp.tablet_locations_size() |
1399 | 0 | << " : " << endl; |
1400 | 0 | const auto limit = std::min(resp.tablet_locations_size(), MAX_NUM_ELEMENTS_TO_SHOW_ON_ERROR); |
1401 | 0 | for (int i = 0; i < limit; ++i) { |
1402 | 0 | std::cerr << i << " : " << resp.tablet_locations(i).DebugString(); |
1403 | 0 | } |
1404 | 0 | std::cerr << endl; |
1405 | 0 | } |
1406 | 0 | return STATUS_FORMAT(IllegalState, |
1407 | 0 | "Incorrect number of locations $0 for tablet $1.", |
1408 | 0 | resp.tablet_locations_size(), tablet_id); |
1409 | 0 | } |
1410 | | |
1411 | 0 | TabletLocationsPB locs = resp.tablet_locations(0); |
1412 | 0 | if (!locs.replicas().empty()) { |
1413 | 0 | cout << RightPadToUuidWidth("Server UUID") << kColumnSep |
1414 | 0 | << RightPadToWidth(kRpcHostPortHeading, kHostPortColWidth) << kColumnSep |
1415 | 0 | << "Role" << endl; |
1416 | 0 | } |
1417 | 0 | for (const auto& replica : locs.replicas()) { |
1418 | 0 | cout << replica.ts_info().permanent_uuid() << kColumnSep |
1419 | 0 | << RightPadToWidth(HostPortPBToString(replica.ts_info().private_rpc_addresses(0)), |
1420 | 0 | kHostPortColWidth) << kColumnSep |
1421 | 0 | << PBEnumToString(replica.role()) << endl; |
1422 | 0 | } |
1423 | |
|
1424 | 0 | return Status::OK(); |
1425 | 0 | } |
1426 | | |
1427 | 0 | Status ClusterAdminClient::DeleteTable(const YBTableName& table_name) { |
1428 | 0 | RETURN_NOT_OK(yb_client_->DeleteTable(table_name)); |
1429 | 0 | cout << "Deleted table " << table_name.ToString() << endl; |
1430 | 0 | return Status::OK(); |
1431 | 0 | } |
1432 | | |
1433 | 0 | Status ClusterAdminClient::DeleteTableById(const TableId& table_id) { |
1434 | 0 | RETURN_NOT_OK(yb_client_->DeleteTable(table_id)); |
1435 | 0 | cout << "Deleted table " << table_id << endl; |
1436 | 0 | return Status::OK(); |
1437 | 0 | } |
1438 | | |
1439 | 0 | Status ClusterAdminClient::DeleteIndex(const YBTableName& table_name) { |
1440 | 0 | YBTableName indexed_table_name; |
1441 | 0 | RETURN_NOT_OK(yb_client_->DeleteIndexTable(table_name, &indexed_table_name)); |
1442 | 0 | cout << "Deleted index " << table_name.ToString() << " from table " << |
1443 | 0 | indexed_table_name.ToString() << endl; |
1444 | 0 | return Status::OK(); |
1445 | 0 | } |
1446 | | |
1447 | 0 | Status ClusterAdminClient::DeleteIndexById(const TableId& table_id) { |
1448 | 0 | YBTableName indexed_table_name; |
1449 | 0 | RETURN_NOT_OK(yb_client_->DeleteIndexTable(table_id, &indexed_table_name)); |
1450 | 0 | cout << "Deleted index " << table_id << " from table " << |
1451 | 0 | indexed_table_name.ToString() << endl; |
1452 | 0 | return Status::OK(); |
1453 | 0 | } |
1454 | | |
1455 | 1 | Status ClusterAdminClient::DeleteNamespace(const TypedNamespaceName& namespace_name) { |
1456 | 1 | RETURN_NOT_OK(yb_client_->DeleteNamespace(namespace_name.name, namespace_name.db_type)); |
1457 | 1 | cout << "Deleted namespace " << namespace_name.name << endl; |
1458 | 1 | return Status::OK(); |
1459 | 1 | } |
1460 | | |
1461 | 0 | Status ClusterAdminClient::DeleteNamespaceById(const NamespaceId& namespace_id) { |
1462 | 0 | RETURN_NOT_OK(yb_client_->DeleteNamespace( |
1463 | 0 | std::string() /* name */, boost::none /* database type */, namespace_id)); |
1464 | 0 | cout << "Deleted namespace " << namespace_id << endl; |
1465 | 0 | return Status::OK(); |
1466 | 0 | } |
1467 | | |
1468 | 0 | Status ClusterAdminClient::ListTabletsForTabletServer(const PeerId& ts_uuid) { |
1469 | 0 | auto ts_addr = VERIFY_RESULT(GetFirstRpcAddressForTS(ts_uuid)); |
1470 | |
|
1471 | 0 | TabletServerServiceProxy ts_proxy(proxy_cache_.get(), ts_addr); |
1472 | |
|
1473 | 0 | const auto resp = VERIFY_RESULT(InvokeRpc( |
1474 | 0 | &TabletServerServiceProxy::ListTabletsForTabletServer, ts_proxy, |
1475 | 0 | tserver::ListTabletsForTabletServerRequestPB())); |
1476 | |
|
1477 | 0 | cout << RightPadToWidth("Table name", kTableNameColWidth) << kColumnSep |
1478 | 0 | << RightPadToUuidWidth("Tablet ID") << kColumnSep |
1479 | 0 | << "Is Leader" << kColumnSep |
1480 | 0 | << "State" << kColumnSep |
1481 | 0 | << "Num SST Files" << kColumnSep |
1482 | 0 | << "Num Log Segments" << kColumnSep |
1483 | 0 | << "Num Memtables (Intents/Regular)" << endl; |
1484 | 0 | for (const auto& entry : resp.entries()) { |
1485 | 0 | cout << RightPadToWidth(entry.table_name(), kTableNameColWidth) << kColumnSep |
1486 | 0 | << RightPadToUuidWidth(entry.tablet_id()) << kColumnSep |
1487 | 0 | << entry.is_leader() << kColumnSep |
1488 | 0 | << PBEnumToString(entry.state()) << kColumnSep |
1489 | 0 | << entry.num_sst_files() << kColumnSep |
1490 | 0 | << entry.num_log_segments() << kColumnSep |
1491 | 0 | << entry.num_memtables_intents() << "/" << entry.num_memtables_regular() << endl; |
1492 | 0 | } |
1493 | 0 | return Status::OK(); |
1494 | 0 | } |
1495 | | |
1496 | 0 | Status ClusterAdminClient::SetLoadBalancerEnabled(bool is_enabled) { |
1497 | 0 | const auto list_resp = VERIFY_RESULT(InvokeRpc( |
1498 | 0 | &master::MasterClusterProxy::ListMasters, *master_cluster_proxy_, |
1499 | 0 | ListMastersRequestPB())); |
1500 | |
|
1501 | 0 | master::ChangeLoadBalancerStateRequestPB req; |
1502 | 0 | req.set_is_enabled(is_enabled); |
1503 | 0 | for (const auto& master : list_resp.masters()) { |
1504 | |
|
1505 | 0 | if (master.role() == PeerRole::LEADER) { |
1506 | 0 | RETURN_NOT_OK(InvokeRpc( |
1507 | 0 | &master::MasterClusterProxy::ChangeLoadBalancerState, *master_cluster_proxy_, |
1508 | 0 | req)); |
1509 | 0 | } else { |
1510 | 0 | HostPortPB hp_pb = master.registration().private_rpc_addresses(0); |
1511 | |
|
1512 | 0 | master::MasterClusterProxy proxy(proxy_cache_.get(), HostPortFromPB(hp_pb)); |
1513 | 0 | RETURN_NOT_OK(InvokeRpc( |
1514 | 0 | &master::MasterClusterProxy::ChangeLoadBalancerState, proxy, req)); |
1515 | 0 | } |
1516 | 0 | } |
1517 | |
|
1518 | 0 | return Status::OK(); |
1519 | 0 | } |
1520 | | |
1521 | 0 | Status ClusterAdminClient::GetLoadBalancerState() { |
1522 | 0 | const auto list_resp = VERIFY_RESULT(InvokeRpc( |
1523 | 0 | &master::MasterClusterProxy::ListMasters, *master_cluster_proxy_, |
1524 | 0 | ListMastersRequestPB())); |
1525 | |
|
1526 | 0 | if (list_resp.has_error()) { |
1527 | 0 | LOG(ERROR) << "Error: querying leader master for live master info : " |
1528 | 0 | << list_resp.error().DebugString() << endl; |
1529 | 0 | return STATUS(RemoteError, list_resp.error().DebugString()); |
1530 | 0 | } |
1531 | | |
1532 | 0 | cout << RightPadToUuidWidth("Master UUID") << kColumnSep |
1533 | 0 | << RightPadToWidth(kRpcHostPortHeading, kHostPortColWidth) << kColumnSep |
1534 | 0 | << RightPadToWidth("State", kSmallColWidth) << kColumnSep |
1535 | 0 | << RightPadToWidth("Role", kSmallColWidth) << kColumnSep |
1536 | 0 | << "Load Balancer State" << endl; |
1537 | | |
1538 | |
|
1539 | 0 | master::GetLoadBalancerStateRequestPB req; |
1540 | 0 | master::GetLoadBalancerStateResponsePB resp; |
1541 | 0 | string error; |
1542 | 0 | master::MasterClusterProxy* proxy; |
1543 | 0 | for (const auto& master : list_resp.masters()) { |
1544 | 0 | error.clear(); |
1545 | 0 | std::unique_ptr<master::MasterClusterProxy> follower_proxy; |
1546 | 0 | if (master.role() == PeerRole::LEADER) { |
1547 | 0 | proxy = master_cluster_proxy_.get(); |
1548 | 0 | } else { |
1549 | 0 | HostPortPB hp_pb = master.registration().private_rpc_addresses(0); |
1550 | 0 | follower_proxy = std::make_unique<master::MasterClusterProxy>( |
1551 | 0 | proxy_cache_.get(), HostPortFromPB(hp_pb)); |
1552 | 0 | proxy = follower_proxy.get(); |
1553 | 0 | } |
1554 | 0 | auto result = InvokeRpc(&master::MasterClusterProxy::GetLoadBalancerState, *proxy, req); |
1555 | 0 | if (!result) { |
1556 | 0 | error = result.ToString(); |
1557 | 0 | } else { |
1558 | 0 | resp = *result; |
1559 | 0 | if (!resp.has_error()) { |
1560 | 0 | error = resp.error().status().message(); |
1561 | 0 | } |
1562 | 0 | } |
1563 | 0 | const auto master_reg = master.has_registration() ? &master.registration() : nullptr; |
1564 | 0 | cout << (master.has_instance_id() ? master.instance_id().permanent_uuid() |
1565 | 0 | : RightPadToUuidWidth("UNKNOWN_UUID")) << kColumnSep; |
1566 | 0 | cout << RightPadToWidth( |
1567 | 0 | master_reg ? FormatFirstHostPort(master_reg->private_rpc_addresses()) |
1568 | 0 | : "UNKNOWN", kHostPortColWidth) |
1569 | 0 | << kColumnSep; |
1570 | 0 | cout << RightPadToWidth((master.has_error() ? |
1571 | 0 | PBEnumToString(master.error().code()) : "ALIVE"), kSmallColWidth) |
1572 | 0 | << kColumnSep; |
1573 | 0 | cout << RightPadToWidth((master.has_role() ? |
1574 | 0 | PBEnumToString(master.role()) : "UNKNOWN"), kSmallColWidth) |
1575 | 0 | << kColumnSep; |
1576 | 0 | cout << (!error.empty() ? "Error: " + error : (resp.is_enabled() ? "ENABLED" : "DISABLED")) |
1577 | 0 | << std::endl; |
1578 | 0 | } |
1579 | |
|
1580 | 0 | return Status::OK(); |
1581 | 0 | } |
1582 | | |
1583 | | Status ClusterAdminClient::FlushTables(const std::vector<YBTableName>& table_names, |
1584 | | bool add_indexes, |
1585 | | int timeout_secs, |
1586 | 0 | bool is_compaction) { |
1587 | 0 | RETURN_NOT_OK(yb_client_->FlushTables(table_names, add_indexes, timeout_secs, is_compaction)); |
1588 | 0 | cout << (is_compaction ? "Compacted " : "Flushed ") |
1589 | 0 | << ToString(table_names) << " tables" |
1590 | 0 | << (add_indexes ? " and associated indexes." : ".") << endl; |
1591 | 0 | return Status::OK(); |
1592 | 0 | } |
1593 | | |
1594 | | Status ClusterAdminClient::FlushTablesById( |
1595 | | const std::vector<TableId>& table_ids, |
1596 | | bool add_indexes, |
1597 | | int timeout_secs, |
1598 | 0 | bool is_compaction) { |
1599 | 0 | RETURN_NOT_OK(yb_client_->FlushTables(table_ids, add_indexes, timeout_secs, is_compaction)); |
1600 | 0 | cout << (is_compaction ? "Compacted " : "Flushed ") |
1601 | 0 | << ToString(table_ids) << " tables" |
1602 | 0 | << (add_indexes ? " and associated indexes." : ".") << endl; |
1603 | 0 | return Status::OK(); |
1604 | 0 | } |
1605 | | |
1606 | 0 | Status ClusterAdminClient::FlushSysCatalog() { |
1607 | 0 | master::FlushSysCatalogRequestPB req; |
1608 | 0 | auto res = InvokeRpc( |
1609 | 0 | &master::MasterAdminProxy::FlushSysCatalog, *master_admin_proxy_, req); |
1610 | 0 | return res.ok() ? Status::OK() : res.status(); |
1611 | 0 | } |
1612 | | |
1613 | 0 | Status ClusterAdminClient::CompactSysCatalog() { |
1614 | 0 | master::CompactSysCatalogRequestPB req; |
1615 | 0 | auto res = InvokeRpc( |
1616 | 0 | &master::MasterAdminProxy::CompactSysCatalog, *master_admin_proxy_, req); |
1617 | 0 | return res.ok() ? Status::OK() : res.status(); |
1618 | 0 | } |
1619 | | |
1620 | 23 | Status ClusterAdminClient::WaitUntilMasterLeaderReady() { |
1621 | 23 | for(int iter = 0; iter < kNumberOfTryouts; ++iter) { |
1622 | 23 | const auto res_leader_ready = VERIFY_RESULT(InvokeRpcNoResponseCheck( |
1623 | 23 | &master::MasterClusterProxy::IsMasterLeaderServiceReady, |
1624 | 23 | *master_cluster_proxy_, master::IsMasterLeaderReadyRequestPB(), |
1625 | 23 | "MasterServiceImpl::IsMasterLeaderServiceReady call failed.")); |
1626 | 23 | if (!res_leader_ready.has_error()) { |
1627 | 23 | return Status::OK(); |
1628 | 23 | } |
1629 | 0 | sleep(kSleepTimeSec); |
1630 | 0 | } |
1631 | 0 | return STATUS(TimedOut, "ClusterAdminClient::WaitUntilMasterLeaderReady timed out."); |
1632 | 23 | } |
1633 | | |
1634 | | Status ClusterAdminClient::AddReadReplicaPlacementInfo( |
1635 | 1 | const string& placement_info, int replication_factor, const std::string& optional_uuid) { |
1636 | 1 | RETURN_NOT_OK_PREPEND(WaitUntilMasterLeaderReady(), "Wait for master leader failed!"); |
1637 | | |
1638 | | // Get the cluster config from the master leader. |
1639 | 1 | auto resp_cluster_config = VERIFY_RESULT(GetMasterClusterConfig()); |
1640 | | |
1641 | 1 | auto* cluster_config = resp_cluster_config.mutable_cluster_config(); |
1642 | 1 | if (cluster_config->replication_info().read_replicas_size() > 0) { |
1643 | 0 | return STATUS(InvalidCommand, "Already have a read replica placement, cannot add another."); |
1644 | 0 | } |
1645 | 1 | auto* read_replica_config = cluster_config->mutable_replication_info()->add_read_replicas(); |
1646 | | |
1647 | | // If optional_uuid is set, make that the placement info, otherwise generate a random one. |
1648 | 1 | string uuid_str = optional_uuid; |
1649 | 1 | if (optional_uuid.empty()) { |
1650 | 0 | uuid_str = RandomHumanReadableString(16); |
1651 | 0 | } |
1652 | 1 | read_replica_config->set_num_replicas(replication_factor); |
1653 | 1 | read_replica_config->set_placement_uuid(uuid_str); |
1654 | | |
1655 | | // Fill in the placement info with new stuff. |
1656 | 1 | RETURN_NOT_OK(FillPlacementInfo(read_replica_config, placement_info)); |
1657 | | |
1658 | 1 | master::ChangeMasterClusterConfigRequestPB req_new_cluster_config; |
1659 | | |
1660 | 1 | *req_new_cluster_config.mutable_cluster_config() = *cluster_config; |
1661 | | |
1662 | 1 | RETURN_NOT_OK(InvokeRpc(&master::MasterClusterProxy::ChangeMasterClusterConfig, |
1663 | 1 | *master_cluster_proxy_, req_new_cluster_config, |
1664 | 1 | "MasterServiceImpl::ChangeMasterClusterConfig call failed.")); |
1665 | | |
1666 | 1 | LOG(INFO) << "Created read replica placement with uuid: " << uuid_str; |
1667 | 1 | return Status::OK(); |
1668 | 1 | } |
1669 | | |
1670 | | CHECKED_STATUS ClusterAdminClient::ModifyReadReplicaPlacementInfo( |
1671 | 0 | const std::string& placement_uuid, const std::string& placement_info, int replication_factor) { |
1672 | 0 | RETURN_NOT_OK_PREPEND(WaitUntilMasterLeaderReady(), "Wait for master leader failed!"); |
1673 | | |
1674 | | // Get the cluster config from the master leader. |
1675 | 0 | auto master_resp = VERIFY_RESULT(GetMasterClusterConfig()); |
1676 | 0 | auto* cluster_config = master_resp.mutable_cluster_config(); |
1677 | |
|
1678 | 0 | auto* replication_info = cluster_config->mutable_replication_info(); |
1679 | 0 | if (replication_info->read_replicas_size() == 0) { |
1680 | 0 | return STATUS(InvalidCommand, "No read replica placement info to modify."); |
1681 | 0 | } |
1682 | | |
1683 | 0 | auto* read_replica_config = replication_info->mutable_read_replicas(0); |
1684 | |
|
1685 | 0 | std::string config_placement_uuid; |
1686 | 0 | if (placement_uuid.empty()) { |
1687 | | // If there is no placement_uuid set, use the existing uuid. |
1688 | 0 | config_placement_uuid = read_replica_config->placement_uuid(); |
1689 | 0 | } else { |
1690 | | // Otherwise, use the passed in value. |
1691 | 0 | config_placement_uuid = placement_uuid; |
1692 | 0 | } |
1693 | |
|
1694 | 0 | read_replica_config->Clear(); |
1695 | |
|
1696 | 0 | read_replica_config->set_num_replicas(replication_factor); |
1697 | 0 | read_replica_config->set_placement_uuid(config_placement_uuid); |
1698 | 0 | RETURN_NOT_OK(FillPlacementInfo(read_replica_config, placement_info)); |
1699 | |
|
1700 | 0 | master::ChangeMasterClusterConfigRequestPB req_new_cluster_config; |
1701 | |
|
1702 | 0 | *req_new_cluster_config.mutable_cluster_config() = *cluster_config; |
1703 | |
|
1704 | 0 | RETURN_NOT_OK(InvokeRpc(&master::MasterClusterProxy::ChangeMasterClusterConfig, |
1705 | 0 | *master_cluster_proxy_, req_new_cluster_config, |
1706 | 0 | "MasterServiceImpl::ChangeMasterClusterConfig call failed.")); |
1707 | |
|
1708 | 0 | LOG(INFO) << "Changed read replica placement."; |
1709 | 0 | return Status::OK(); |
1710 | 0 | } |
1711 | | |
1712 | 0 | CHECKED_STATUS ClusterAdminClient::DeleteReadReplicaPlacementInfo() { |
1713 | 0 | RETURN_NOT_OK_PREPEND(WaitUntilMasterLeaderReady(), "Wait for master leader failed!"); |
1714 | |
|
1715 | 0 | auto master_resp = VERIFY_RESULT(GetMasterClusterConfig()); |
1716 | 0 | auto* cluster_config = master_resp.mutable_cluster_config(); |
1717 | |
|
1718 | 0 | auto* replication_info = cluster_config->mutable_replication_info(); |
1719 | 0 | if (replication_info->read_replicas_size() == 0) { |
1720 | 0 | return STATUS(InvalidCommand, "No read replica placement info to delete."); |
1721 | 0 | } |
1722 | | |
1723 | 0 | replication_info->clear_read_replicas(); |
1724 | |
|
1725 | 0 | master::ChangeMasterClusterConfigRequestPB req_new_cluster_config; |
1726 | |
|
1727 | 0 | *req_new_cluster_config.mutable_cluster_config() = *cluster_config; |
1728 | |
|
1729 | 0 | RETURN_NOT_OK(InvokeRpc(&master::MasterClusterProxy::ChangeMasterClusterConfig, |
1730 | 0 | *master_cluster_proxy_, req_new_cluster_config, |
1731 | 0 | "MasterServiceImpl::ChangeMasterClusterConfig call failed.")); |
1732 | |
|
1733 | 0 | LOG(INFO) << "Deleted read replica placement."; |
1734 | 0 | return Status::OK(); |
1735 | 0 | } |
1736 | | |
1737 | | Status ClusterAdminClient::FillPlacementInfo( |
1738 | 1 | master::PlacementInfoPB* placement_info_pb, const string& placement_str) { |
1739 | | |
1740 | 1 | std::vector<std::string> placement_info_split = strings::Split( |
1741 | 1 | placement_str, ",", strings::SkipEmpty()); |
1742 | 1 | if (placement_info_split.size() < 1) { |
1743 | 0 | return STATUS(InvalidCommand, "Cluster config must be a list of " |
1744 | 0 | "placement infos seperated by commas. " |
1745 | 0 | "Format: 'cloud1.region1.zone1:rf,cloud2.region2.zone2:rf, ..." |
1746 | 0 | + std::to_string(placement_info_split.size())); |
1747 | 0 | } |
1748 | | |
1749 | 2 | for (size_t iter = 0; iter < placement_info_split.size(); iter++) { |
1750 | 1 | std::vector<std::string> placement_block = strings::Split(placement_info_split[iter], ":", |
1751 | 1 | strings::SkipEmpty()); |
1752 | | |
1753 | 1 | if (placement_block.size() != 2) { |
1754 | 0 | return STATUS(InvalidCommand, "Each placement info must be in format placement:rf"); |
1755 | 0 | } |
1756 | | |
1757 | 1 | int min_num_replicas = VERIFY_RESULT(CheckedStoInt<int>(placement_block[1])); |
1758 | | |
1759 | 1 | std::vector<std::string> block = strings::Split(placement_block[0], ".", |
1760 | 1 | strings::SkipEmpty()); |
1761 | 1 | if (block.size() != 3) { |
1762 | 0 | return STATUS(InvalidCommand, |
1763 | 0 | "Each placement info must have exactly 3 values seperated" |
1764 | 0 | "by dots that denote cloud, region and zone. Block: " + placement_info_split[iter] |
1765 | 0 | + " is invalid"); |
1766 | 0 | } |
1767 | 1 | auto pb = placement_info_pb->add_placement_blocks(); |
1768 | 1 | pb->mutable_cloud_info()->set_placement_cloud(block[0]); |
1769 | 1 | pb->mutable_cloud_info()->set_placement_region(block[1]); |
1770 | 1 | pb->mutable_cloud_info()->set_placement_zone(block[2]); |
1771 | | |
1772 | 1 | pb->set_min_num_replicas(min_num_replicas); |
1773 | 1 | } |
1774 | | |
1775 | 1 | return Status::OK(); |
1776 | 1 | } |
1777 | | |
1778 | | Status ClusterAdminClient::ModifyTablePlacementInfo( |
1779 | | const YBTableName& table_name, const std::string& placement_info, int replication_factor, |
1780 | 4 | const std::string& optional_uuid) { |
1781 | | |
1782 | 4 | YBTableName global_transactions( |
1783 | 4 | YQL_DATABASE_CQL, master::kSystemNamespaceName, kGlobalTransactionsTableName); |
1784 | 4 | if (table_name == global_transactions) { |
1785 | 0 | return STATUS(InvalidCommand, "Placement cannot be modified for the global transactions table"); |
1786 | 0 | } |
1787 | | |
1788 | 4 | std::vector<std::string> placement_info_split = strings::Split( |
1789 | 4 | placement_info, ",", strings::SkipEmpty()); |
1790 | 4 | if (placement_info_split.size() < 1) { |
1791 | 0 | return STATUS(InvalidCommand, "Table placement config must be a list of " |
1792 | 0 | "placement infos seperated by commas. " |
1793 | 0 | "Format: 'cloud1.region1.zone1,cloud2.region2.zone2,cloud3.region3.zone3 ..." |
1794 | 0 | + std::to_string(placement_info_split.size())); |
1795 | 0 | } |
1796 | | |
1797 | 4 | master::PlacementInfoPB* live_replicas = new master::PlacementInfoPB; |
1798 | 4 | live_replicas->set_num_replicas(replication_factor); |
1799 | | // Iterate over the placement blocks of the placementInfo structure. |
1800 | 12 | for (size_t iter = 0; iter < placement_info_split.size(); iter++) { |
1801 | 8 | std::vector<std::string> block = strings::Split(placement_info_split[iter], ".", |
1802 | 8 | strings::SkipEmpty()); |
1803 | 8 | if (block.size() != 3) { |
1804 | 0 | return STATUS(InvalidCommand, "Each placement info must have exactly 3 values seperated" |
1805 | 0 | "by dots that denote cloud, region and zone. Block: " + placement_info_split[iter] |
1806 | 0 | + " is invalid"); |
1807 | 0 | } |
1808 | 8 | auto pb = live_replicas->add_placement_blocks(); |
1809 | 8 | pb->mutable_cloud_info()->set_placement_cloud(block[0]); |
1810 | 8 | pb->mutable_cloud_info()->set_placement_region(block[1]); |
1811 | 8 | pb->mutable_cloud_info()->set_placement_zone(block[2]); |
1812 | | // TODO: Should this also be passed in as input? |
1813 | 8 | pb->set_min_num_replicas(1); |
1814 | 8 | } |
1815 | | |
1816 | 4 | if (!optional_uuid.empty()) { |
1817 | | // If we have an optional uuid, set it. |
1818 | 2 | live_replicas->set_placement_uuid(optional_uuid); |
1819 | 2 | } |
1820 | | |
1821 | 4 | return yb_client_->ModifyTablePlacementInfo(table_name, live_replicas); |
1822 | 4 | } |
1823 | | |
1824 | | Status ClusterAdminClient::ModifyPlacementInfo( |
1825 | 22 | std::string placement_info, int replication_factor, const std::string& optional_uuid) { |
1826 | | |
1827 | | // Wait to make sure that master leader is ready. |
1828 | 22 | RETURN_NOT_OK_PREPEND(WaitUntilMasterLeaderReady(), "Wait for master leader failed!"); |
1829 | | |
1830 | | // Get the cluster config from the master leader. |
1831 | 15 | auto resp_cluster_config = VERIFY_RESULT(GetMasterClusterConfig()); |
1832 | | |
1833 | | // Create a new cluster config. |
1834 | 15 | std::vector<std::string> placement_info_split = strings::Split( |
1835 | 15 | placement_info, ",", strings::AllowEmpty()); |
1836 | 15 | if (placement_info_split.size() < 1) { |
1837 | 0 | return STATUS( |
1838 | 0 | InvalidCommand, |
1839 | 0 | "Cluster config must be a list of placement infos seperated by commas. Format: " |
1840 | 0 | "cloud1.region1.zone1:[min_replica_count1],cloud2.region2.zone2:[min_replica_count2] ..." |
1841 | 0 | + std::to_string(placement_info_split.size())); |
1842 | 0 | } |
1843 | 15 | master::ChangeMasterClusterConfigRequestPB req_new_cluster_config; |
1844 | 15 | master::SysClusterConfigEntryPB* sys_cluster_config_entry = |
1845 | 15 | resp_cluster_config.mutable_cluster_config(); |
1846 | 15 | master::PlacementInfoPB* live_replicas = new master::PlacementInfoPB; |
1847 | 15 | live_replicas->set_num_replicas(replication_factor); |
1848 | | |
1849 | 15 | int total_min_replica_count = 0; |
1850 | | |
1851 | | // Iterate over the placement blocks of the placementInfo structure. |
1852 | 15 | std::unordered_map<std::string, int> placement_to_min_replicas; |
1853 | 43 | for (const auto& placement_block : placement_info_split) { |
1854 | 43 | std::vector<std::string> placement_info_min_replica_split = |
1855 | 43 | strings::Split(placement_block, ":", strings::AllowEmpty()); |
1856 | | |
1857 | 43 | if (placement_info_min_replica_split.size() == 0 || |
1858 | 43 | placement_info_min_replica_split.size() > 2) { |
1859 | 0 | return STATUS( |
1860 | 0 | InvalidCommand, |
1861 | 0 | "Each placement info must have at most 2 values separated by a colon. " |
1862 | 0 | "Format: cloud.region.zone:[min_replica_count]. Invalid placement info: " |
1863 | 0 | + placement_block); |
1864 | 0 | } |
1865 | | |
1866 | 43 | std::string placement_target = placement_info_min_replica_split[0]; |
1867 | 43 | int placement_min_replica_count = 1; |
1868 | | |
1869 | 43 | if (placement_info_min_replica_split.size() == 2) { |
1870 | 0 | placement_min_replica_count = VERIFY_RESULT(CheckedStoi(placement_info_min_replica_split[1])); |
1871 | 0 | } |
1872 | | |
1873 | 43 | total_min_replica_count += placement_min_replica_count; |
1874 | 43 | placement_to_min_replicas[placement_target] += placement_min_replica_count; |
1875 | 43 | } |
1876 | | |
1877 | 15 | if (total_min_replica_count > replication_factor) { |
1878 | 0 | return STATUS( |
1879 | 0 | InvalidCommand, |
1880 | 0 | "replication_factor should be greater than or equal to the total of replica counts " |
1881 | 0 | "specified in placement_info."); |
1882 | 0 | } |
1883 | | |
1884 | 41 | for (const auto& placement_block : placement_to_min_replicas) { |
1885 | 41 | std::vector<std::string> block = strings::Split(placement_block.first, ".", |
1886 | 41 | strings::AllowEmpty()); |
1887 | 41 | auto pb = live_replicas->add_placement_blocks(); |
1888 | 41 | if (block.size() > 0 && block[0] != "") { |
1889 | 41 | pb->mutable_cloud_info()->set_placement_cloud(block[0]); |
1890 | 41 | } |
1891 | | |
1892 | 41 | if (block.size() > 1 && block[1] != "") { |
1893 | 41 | pb->mutable_cloud_info()->set_placement_region(block[1]); |
1894 | 41 | } |
1895 | | |
1896 | 41 | if (block.size() > 2 && block[2] != "") { |
1897 | 40 | pb->mutable_cloud_info()->set_placement_zone(block[2]); |
1898 | 40 | } |
1899 | | |
1900 | 41 | pb->set_min_num_replicas(placement_block.second); |
1901 | 41 | } |
1902 | | |
1903 | 15 | if (!optional_uuid.empty()) { |
1904 | | // If we have an optional uuid, set it. |
1905 | 2 | live_replicas->set_placement_uuid(optional_uuid); |
1906 | 13 | } else if (sys_cluster_config_entry->replication_info().live_replicas().has_placement_uuid()) { |
1907 | | // Otherwise, if we have an existing placement uuid, use that. |
1908 | 0 | live_replicas->set_placement_uuid( |
1909 | 0 | sys_cluster_config_entry->replication_info().live_replicas().placement_uuid()); |
1910 | 0 | } |
1911 | | |
1912 | 15 | sys_cluster_config_entry->mutable_replication_info()->set_allocated_live_replicas(live_replicas); |
1913 | 15 | req_new_cluster_config.mutable_cluster_config()->CopyFrom(*sys_cluster_config_entry); |
1914 | | |
1915 | 15 | RETURN_NOT_OK(InvokeRpc( |
1916 | 15 | &master::MasterClusterProxy::ChangeMasterClusterConfig, *master_cluster_proxy_, |
1917 | 15 | req_new_cluster_config, "MasterServiceImpl::ChangeMasterClusterConfig call failed.")); |
1918 | | |
1919 | 15 | LOG(INFO) << "Changed master cluster config."; |
1920 | 15 | return Status::OK(); |
1921 | 15 | } |
1922 | | |
1923 | 0 | Status ClusterAdminClient::ClearPlacementInfo() { |
1924 | | // Wait to make sure that master leader is ready. |
1925 | 0 | RETURN_NOT_OK_PREPEND(WaitUntilMasterLeaderReady(), "Wait for master leader failed!"); |
1926 | | |
1927 | | // Get the cluster config from the master leader. |
1928 | 0 | auto resp_cluster_config = VERIFY_RESULT(GetMasterClusterConfig()); |
1929 | |
|
1930 | 0 | master::SysClusterConfigEntryPB* sys_cluster_config_entry = |
1931 | 0 | resp_cluster_config.mutable_cluster_config(); |
1932 | 0 | sys_cluster_config_entry->clear_replication_info(); |
1933 | |
|
1934 | 0 | master::ChangeMasterClusterConfigRequestPB req_new_cluster_config; |
1935 | 0 | req_new_cluster_config.mutable_cluster_config()->CopyFrom(*sys_cluster_config_entry); |
1936 | |
|
1937 | 0 | RETURN_NOT_OK(InvokeRpc( |
1938 | 0 | &master::MasterClusterProxy::ChangeMasterClusterConfig, *master_cluster_proxy_, |
1939 | 0 | req_new_cluster_config, "MasterServiceImpl::ChangeMasterClusterConfig call failed.")); |
1940 | |
|
1941 | 0 | LOG(INFO) << "Cleared master placement info config"; |
1942 | 0 | return Status::OK(); |
1943 | 0 | } |
1944 | | |
1945 | 0 | Status ClusterAdminClient::GetUniverseConfig() { |
1946 | 0 | const auto cluster_config = VERIFY_RESULT(GetMasterClusterConfig()); |
1947 | 0 | std::string output; |
1948 | 0 | MessageToJsonString(cluster_config.cluster_config(), &output); |
1949 | 0 | cout << output << endl; |
1950 | 0 | return Status::OK(); |
1951 | 0 | } |
1952 | | |
1953 | 0 | Status ClusterAdminClient::GetYsqlCatalogVersion() { |
1954 | 0 | uint64_t version = 0; |
1955 | 0 | RETURN_NOT_OK(yb_client_->GetYsqlCatalogMasterVersion(&version)); |
1956 | 0 | cout << "Version: " << version << endl; |
1957 | 0 | return Status::OK(); |
1958 | 0 | } |
1959 | | |
1960 | 0 | Result<rapidjson::Document> ClusterAdminClient::DdlLog() { |
1961 | 0 | RpcController rpc; |
1962 | 0 | rpc.set_timeout(timeout_); |
1963 | 0 | master::DdlLogRequestPB req; |
1964 | 0 | master::DdlLogResponsePB resp; |
1965 | |
|
1966 | 0 | RETURN_NOT_OK(master_admin_proxy_->DdlLog(req, &resp, &rpc)); |
1967 | |
|
1968 | 0 | if (resp.has_error()) { |
1969 | 0 | return StatusFromPB(resp.error().status()); |
1970 | 0 | } |
1971 | | |
1972 | 0 | rapidjson::Document result; |
1973 | 0 | result.SetObject(); |
1974 | 0 | rapidjson::Value json_entries(rapidjson::kArrayType); |
1975 | 0 | for (const auto& entry : resp.entries()) { |
1976 | 0 | rapidjson::Value json_entry(rapidjson::kObjectType); |
1977 | 0 | AddStringField("table_type", TableType_Name(entry.table_type()), &json_entry, |
1978 | 0 | &result.GetAllocator()); |
1979 | 0 | AddStringField("namespace", entry.namespace_name(), &json_entry, &result.GetAllocator()); |
1980 | 0 | AddStringField("table", entry.table_name(), &json_entry, &result.GetAllocator()); |
1981 | 0 | AddStringField("action", entry.action(), &json_entry, &result.GetAllocator()); |
1982 | 0 | AddStringField("time", HybridTimeToString(HybridTime(entry.time())), |
1983 | 0 | &json_entry, &result.GetAllocator()); |
1984 | 0 | json_entries.PushBack(json_entry, result.GetAllocator()); |
1985 | 0 | } |
1986 | 0 | result.AddMember("log", json_entries, result.GetAllocator()); |
1987 | 0 | return result; |
1988 | 0 | } |
1989 | | |
1990 | 0 | Status ClusterAdminClient::UpgradeYsql() { |
1991 | 0 | { |
1992 | 0 | master::IsInitDbDoneRequestPB req; |
1993 | 0 | auto res = InvokeRpc( |
1994 | 0 | &master::MasterAdminProxy::IsInitDbDone, *master_admin_proxy_, req); |
1995 | 0 | if (!res.ok()) { |
1996 | 0 | return res.status(); |
1997 | 0 | } |
1998 | 0 | if (!res->done()) { |
1999 | 0 | cout << "Upgrade is not needed since YSQL is disabled" << endl; |
2000 | 0 | return Status::OK(); |
2001 | 0 | } |
2002 | 0 | if (res->done() && res->has_error()) { |
2003 | 0 | return STATUS_FORMAT(IllegalState, |
2004 | 0 | "YSQL is not ready, initdb finished with an error: $0", |
2005 | 0 | res->error()); |
2006 | 0 | } |
2007 | | // Otherwise, we can proceed. |
2008 | 0 | } |
2009 | | |
2010 | | // Pick some alive TServer. |
2011 | 0 | RepeatedPtrField<ListTabletServersResponsePB::Entry> servers; |
2012 | 0 | RETURN_NOT_OK(ListTabletServers(&servers)); |
2013 | 0 | boost::optional<HostPortPB> ts_rpc_addr; |
2014 | 0 | for (const ListTabletServersResponsePB::Entry& server : servers) { |
2015 | 0 | if (!server.has_alive() || !server.alive()) { |
2016 | 0 | continue; |
2017 | 0 | } |
2018 | | |
2019 | 0 | if (!server.has_registration() || |
2020 | 0 | server.registration().common().private_rpc_addresses().empty()) { |
2021 | 0 | continue; |
2022 | 0 | } |
2023 | | |
2024 | 0 | ts_rpc_addr.emplace(server.registration().common().private_rpc_addresses(0)); |
2025 | 0 | break; |
2026 | 0 | } |
2027 | 0 | if (!ts_rpc_addr.has_value()) { |
2028 | 0 | return STATUS(IllegalState, "Couldn't find alive tablet server to connect to"); |
2029 | 0 | } |
2030 | | |
2031 | 0 | TabletServerAdminServiceProxy ts_admin_proxy(proxy_cache_.get(), HostPortFromPB(*ts_rpc_addr)); |
2032 | |
|
2033 | 0 | UpgradeYsqlRequestPB req; |
2034 | 0 | const auto resp_result = InvokeRpc(&TabletServerAdminServiceProxy::UpgradeYsql, |
2035 | 0 | ts_admin_proxy, req); |
2036 | 0 | if (!resp_result.ok()) { |
2037 | 0 | return resp_result.status(); |
2038 | 0 | } |
2039 | 0 | if (resp_result->has_error()) { |
2040 | 0 | return StatusFromPB(resp_result->error().status()); |
2041 | 0 | } |
2042 | | |
2043 | 0 | cout << "YSQL successfully upgraded to the latest version" << endl; |
2044 | 0 | return Status::OK(); |
2045 | 0 | } |
2046 | | |
2047 | | Status ClusterAdminClient::ChangeBlacklist(const std::vector<HostPort>& servers, bool add, |
2048 | 0 | bool blacklist_leader) { |
2049 | 0 | auto config = VERIFY_RESULT(GetMasterClusterConfig()); |
2050 | 0 | auto& cluster_config = *config.mutable_cluster_config(); |
2051 | 0 | auto& blacklist = (blacklist_leader) ? |
2052 | 0 | *cluster_config.mutable_leader_blacklist() : |
2053 | 0 | *cluster_config.mutable_server_blacklist(); |
2054 | 0 | std::vector<HostPort> result_blacklist; |
2055 | 0 | for (const auto& host : blacklist.hosts()) { |
2056 | 0 | const HostPort hostport(host.host(), host.port()); |
2057 | 0 | if (std::find(servers.begin(), servers.end(), hostport) == servers.end()) { |
2058 | 0 | result_blacklist.emplace_back(host.host(), host.port()); |
2059 | 0 | } |
2060 | 0 | } |
2061 | 0 | if (add) { |
2062 | 0 | result_blacklist.insert(result_blacklist.end(), servers.begin(), servers.end()); |
2063 | 0 | } |
2064 | 0 | auto result_begin = result_blacklist.begin(), result_end = result_blacklist.end(); |
2065 | 0 | std::sort(result_begin, result_end); |
2066 | 0 | result_blacklist.erase(std::unique(result_begin, result_end), result_end); |
2067 | 0 | blacklist.clear_hosts(); |
2068 | 0 | for (const auto& hostport : result_blacklist) { |
2069 | 0 | auto& new_host = *blacklist.add_hosts(); |
2070 | 0 | new_host.set_host(hostport.host()); |
2071 | 0 | new_host.set_port(hostport.port()); |
2072 | 0 | } |
2073 | 0 | master::ChangeMasterClusterConfigRequestPB req_new_cluster_config; |
2074 | 0 | req_new_cluster_config.mutable_cluster_config()->Swap(&cluster_config); |
2075 | 0 | return ResultToStatus(InvokeRpc(&master::MasterClusterProxy::ChangeMasterClusterConfig, |
2076 | 0 | *master_cluster_proxy_, req_new_cluster_config, |
2077 | 0 | "MasterServiceImpl::ChangeMasterClusterConfig call failed.")); |
2078 | 0 | } |
2079 | | |
2080 | | Result<const master::NamespaceIdentifierPB&> ClusterAdminClient::GetNamespaceInfo( |
2081 | 0 | YQLDatabase db_type, const std::string& namespace_name) { |
2082 | 0 | LOG(INFO) << Format( |
2083 | 0 | "Resolving namespace id for '$0' of type '$1'", namespace_name, DatabasePrefix(db_type)); |
2084 | 0 | for (const auto& item : VERIFY_RESULT_REF(GetNamespaceMap())) { |
2085 | 0 | const auto& namespace_info = item.second; |
2086 | 0 | if (namespace_info.database_type() == db_type && namespace_name == namespace_info.name()) { |
2087 | 0 | return namespace_info; |
2088 | 0 | } |
2089 | 0 | } |
2090 | 0 | return STATUS_FORMAT( |
2091 | 0 | NotFound, "Namespace '$0' of type '$1' not found", namespace_name, DatabasePrefix(db_type)); |
2092 | 0 | } |
2093 | | |
2094 | 23 | Result<master::GetMasterClusterConfigResponsePB> ClusterAdminClient::GetMasterClusterConfig() { |
2095 | 23 | return InvokeRpc(&master::MasterClusterProxy::GetMasterClusterConfig, |
2096 | 23 | *master_cluster_proxy_, master::GetMasterClusterConfigRequestPB(), |
2097 | 23 | "MasterServiceImpl::GetMasterClusterConfig call failed."); |
2098 | 23 | } |
2099 | | |
2100 | 0 | CHECKED_STATUS ClusterAdminClient::SplitTablet(const std::string& tablet_id) { |
2101 | 0 | master::SplitTabletRequestPB req; |
2102 | 0 | req.set_tablet_id(tablet_id); |
2103 | 0 | const auto resp = VERIFY_RESULT(InvokeRpc( |
2104 | 0 | &master::MasterAdminProxy::SplitTablet, *master_admin_proxy_, req)); |
2105 | 0 | std::cout << "Response: " << AsString(resp) << std::endl; |
2106 | 0 | return Status::OK(); |
2107 | 0 | } |
2108 | | |
2109 | 0 | Status ClusterAdminClient::CreateTransactionsStatusTable(const std::string& table_name) { |
2110 | 0 | return yb_client_->CreateTransactionsStatusTable(table_name); |
2111 | 0 | } |
2112 | | |
2113 | | template<class Response, class Request, class Object> |
2114 | | Result<Response> ClusterAdminClient::InvokeRpcNoResponseCheck( |
2115 | | Status (Object::*func)(const Request&, Response*, rpc::RpcController*) const, |
2116 | 78 | const Object& obj, const Request& req, const char* error_message) { |
2117 | 78 | rpc::RpcController rpc; |
2118 | 78 | rpc.set_timeout(timeout_); |
2119 | 78 | Response response; |
2120 | 78 | auto result = (obj.*func)(req, &response, &rpc); |
2121 | 78 | if (error_message) { |
2122 | 62 | RETURN_NOT_OK_PREPEND(result, error_message); |
2123 | 16 | } else { |
2124 | 16 | RETURN_NOT_OK(result); |
2125 | 16 | } |
2126 | 71 | return std::move(response); |
2127 | 78 | } Unexecuted instantiation: _ZN2yb5tools18ClusterAdminClient24InvokeRpcNoResponseCheckINS_6master21ListMastersResponsePBENS3_20ListMastersRequestPBENS3_18MasterClusterProxyEEENS_6ResultIT_EEMT1_KFNS_6StatusERKT0_PS8_PNS_3rpc13RpcControllerEERKSA_SE_PKc Unexecuted instantiation: _ZN2yb5tools18ClusterAdminClient24InvokeRpcNoResponseCheckINS_9consensus24LeaderStepDownResponsePBENS3_23LeaderStepDownRequestPBENS3_21ConsensusServiceProxyEEENS_6ResultIT_EEMT1_KFNS_6StatusERKT0_PS8_PNS_3rpc13RpcControllerEERKSA_SE_PKc Unexecuted instantiation: _ZN2yb5tools18ClusterAdminClient24InvokeRpcNoResponseCheckINS_9consensus27RunLeaderElectionResponsePBENS3_26RunLeaderElectionRequestPBENS3_21ConsensusServiceProxyEEENS_6ResultIT_EEMT1_KFNS_6StatusERKT0_PS8_PNS_3rpc13RpcControllerEERKSA_SE_PKc Unexecuted instantiation: _ZN2yb5tools18ClusterAdminClient24InvokeRpcNoResponseCheckINS_9consensus22ChangeConfigResponsePBENS3_21ChangeConfigRequestPBENS3_21ConsensusServiceProxyEEENS_6ResultIT_EEMT1_KFNS_6StatusERKT0_PS8_PNS_3rpc13RpcControllerEERKSA_SE_PKc Unexecuted instantiation: _ZN2yb5tools18ClusterAdminClient24InvokeRpcNoResponseCheckINS_6master25DumpMasterStateResponsePBENS3_24DumpMasterStateRequestPBENS3_18MasterClusterProxyEEENS_6ResultIT_EEMT1_KFNS_6StatusERKT0_PS8_PNS_3rpc13RpcControllerEERKSA_SE_PKc Unexecuted instantiation: _ZN2yb5tools18ClusterAdminClient24InvokeRpcNoResponseCheckINS_6master28GetLoadMovePercentResponsePBENS3_27GetLoadMovePercentRequestPBENS3_18MasterClusterProxyEEENS_6ResultIT_EEMT1_KFNS_6StatusERKT0_PS8_PNS_3rpc13RpcControllerEERKSA_SE_PKc Unexecuted instantiation: _ZN2yb5tools18ClusterAdminClient24InvokeRpcNoResponseCheckINS_6master28GetLoadMovePercentResponsePBENS3_34GetLeaderBlacklistPercentRequestPBENS3_18MasterClusterProxyEEENS_6ResultIT_EEMT1_KFNS_6StatusERKT0_PS8_PNS_3rpc13RpcControllerEERKSA_SE_PKc _ZN2yb5tools18ClusterAdminClient24InvokeRpcNoResponseCheckINS_6master28GetTabletLocationsResponsePBENS3_27GetTabletLocationsRequestPBENS3_17MasterClientProxyEEENS_6ResultIT_EEMT1_KFNS_6StatusERKT0_PS8_PNS_3rpc13RpcControllerEERKSA_SE_PKc Line | Count | Source | 2116 | 15 | const Object& obj, const Request& req, const char* error_message) { | 2117 | 15 | rpc::RpcController rpc; | 2118 | 15 | rpc.set_timeout(timeout_); | 2119 | 15 | Response response; | 2120 | 15 | auto result = (obj.*func)(req, &response, &rpc); | 2121 | 15 | if (error_message) { | 2122 | 0 | RETURN_NOT_OK_PREPEND(result, error_message); | 2123 | 15 | } else { | 2124 | 15 | RETURN_NOT_OK(result); | 2125 | 15 | } | 2126 | 15 | return std::move(response); | 2127 | 15 | } |
Unexecuted instantiation: _ZN2yb5tools18ClusterAdminClient24InvokeRpcNoResponseCheckINS_6master27ListTabletServersResponsePBENS3_26ListTabletServersRequestPBENS3_18MasterClusterProxyEEENS_6ResultIT_EEMT1_KFNS_6StatusERKT0_PS8_PNS_3rpc13RpcControllerEERKSA_SE_PKc Unexecuted instantiation: _ZN2yb5tools18ClusterAdminClient24InvokeRpcNoResponseCheckINS_7tserver24GetLogLocationResponsePBENS3_23GetLogLocationRequestPBENS3_24TabletServerServiceProxyEEENS_6ResultIT_EEMT1_KFNS_6StatusERKT0_PS8_PNS_3rpc13RpcControllerEERKSA_SE_PKc _ZN2yb5tools18ClusterAdminClient24InvokeRpcNoResponseCheckINS_6master37LaunchBackfillIndexForTableResponsePBENS3_36LaunchBackfillIndexForTableRequestPBENS3_14MasterDdlProxyEEENS_6ResultIT_EEMT1_KFNS_6StatusERKT0_PS8_PNS_3rpc13RpcControllerEERKSA_SE_PKc Line | Count | Source | 2116 | 1 | const Object& obj, const Request& req, const char* error_message) { | 2117 | 1 | rpc::RpcController rpc; | 2118 | 1 | rpc.set_timeout(timeout_); | 2119 | 1 | Response response; | 2120 | 1 | auto result = (obj.*func)(req, &response, &rpc); | 2121 | 1 | if (error_message) { | 2122 | 0 | RETURN_NOT_OK_PREPEND(result, error_message); | 2123 | 1 | } else { | 2124 | 1 | RETURN_NOT_OK(result); | 2125 | 1 | } | 2126 | 1 | return std::move(response); | 2127 | 1 | } |
Unexecuted instantiation: _ZN2yb5tools18ClusterAdminClient24InvokeRpcNoResponseCheckINS_7tserver36ListTabletsForTabletServerResponsePBENS3_35ListTabletsForTabletServerRequestPBENS3_24TabletServerServiceProxyEEENS_6ResultIT_EEMT1_KFNS_6StatusERKT0_PS8_PNS_3rpc13RpcControllerEERKSA_SE_PKc Unexecuted instantiation: _ZN2yb5tools18ClusterAdminClient24InvokeRpcNoResponseCheckINS_6master33ChangeLoadBalancerStateResponsePBENS3_32ChangeLoadBalancerStateRequestPBENS3_18MasterClusterProxyEEENS_6ResultIT_EEMT1_KFNS_6StatusERKT0_PS8_PNS_3rpc13RpcControllerEERKSA_SE_PKc Unexecuted instantiation: _ZN2yb5tools18ClusterAdminClient24InvokeRpcNoResponseCheckINS_6master30GetLoadBalancerStateResponsePBENS3_29GetLoadBalancerStateRequestPBENS3_18MasterClusterProxyEEENS_6ResultIT_EEMT1_KFNS_6StatusERKT0_PS8_PNS_3rpc13RpcControllerEERKSA_SE_PKc Unexecuted instantiation: _ZN2yb5tools18ClusterAdminClient24InvokeRpcNoResponseCheckINS_6master25FlushSysCatalogResponsePBENS3_24FlushSysCatalogRequestPBENS3_16MasterAdminProxyEEENS_6ResultIT_EEMT1_KFNS_6StatusERKT0_PS8_PNS_3rpc13RpcControllerEERKSA_SE_PKc Unexecuted instantiation: _ZN2yb5tools18ClusterAdminClient24InvokeRpcNoResponseCheckINS_6master27CompactSysCatalogResponsePBENS3_26CompactSysCatalogRequestPBENS3_16MasterAdminProxyEEENS_6ResultIT_EEMT1_KFNS_6StatusERKT0_PS8_PNS_3rpc13RpcControllerEERKSA_SE_PKc _ZN2yb5tools18ClusterAdminClient24InvokeRpcNoResponseCheckINS_6master29IsMasterLeaderReadyResponsePBENS3_28IsMasterLeaderReadyRequestPBENS3_18MasterClusterProxyEEENS_6ResultIT_EEMT1_KFNS_6StatusERKT0_PS8_PNS_3rpc13RpcControllerEERKSA_SE_PKc Line | Count | Source | 2116 | 23 | const Object& obj, const Request& req, const char* error_message) { | 2117 | 23 | rpc::RpcController rpc; | 2118 | 23 | rpc.set_timeout(timeout_); | 2119 | 23 | Response response; | 2120 | 23 | auto result = (obj.*func)(req, &response, &rpc); | 2121 | 23 | if (error_message) { | 2122 | 23 | RETURN_NOT_OK_PREPEND(result, error_message); | 2123 | 0 | } else { | 2124 | 0 | RETURN_NOT_OK(result); | 2125 | 0 | } | 2126 | 23 | return std::move(response); | 2127 | 23 | } |
_ZN2yb5tools18ClusterAdminClient24InvokeRpcNoResponseCheckINS_6master35ChangeMasterClusterConfigResponsePBENS3_34ChangeMasterClusterConfigRequestPBENS3_18MasterClusterProxyEEENS_6ResultIT_EEMT1_KFNS_6StatusERKT0_PS8_PNS_3rpc13RpcControllerEERKSA_SE_PKc Line | Count | Source | 2116 | 16 | const Object& obj, const Request& req, const char* error_message) { | 2117 | 16 | rpc::RpcController rpc; | 2118 | 16 | rpc.set_timeout(timeout_); | 2119 | 16 | Response response; | 2120 | 16 | auto result = (obj.*func)(req, &response, &rpc); | 2121 | 16 | if (error_message) { | 2122 | 16 | RETURN_NOT_OK_PREPEND(result, error_message); | 2123 | 0 | } else { | 2124 | 0 | RETURN_NOT_OK(result); | 2125 | 0 | } | 2126 | 16 | return std::move(response); | 2127 | 16 | } |
Unexecuted instantiation: _ZN2yb5tools18ClusterAdminClient24InvokeRpcNoResponseCheckINS_6master22IsInitDbDoneResponsePBENS3_21IsInitDbDoneRequestPBENS3_16MasterAdminProxyEEENS_6ResultIT_EEMT1_KFNS_6StatusERKT0_PS8_PNS_3rpc13RpcControllerEERKSA_SE_PKc Unexecuted instantiation: _ZN2yb5tools18ClusterAdminClient24InvokeRpcNoResponseCheckINS_7tserver21UpgradeYsqlResponsePBENS3_20UpgradeYsqlRequestPBENS3_29TabletServerAdminServiceProxyEEENS_6ResultIT_EEMT1_KFNS_6StatusERKT0_PS8_PNS_3rpc13RpcControllerEERKSA_SE_PKc _ZN2yb5tools18ClusterAdminClient24InvokeRpcNoResponseCheckINS_6master32GetMasterClusterConfigResponsePBENS3_31GetMasterClusterConfigRequestPBENS3_18MasterClusterProxyEEENS_6ResultIT_EEMT1_KFNS_6StatusERKT0_PS8_PNS_3rpc13RpcControllerEERKSA_SE_PKc Line | Count | Source | 2116 | 23 | const Object& obj, const Request& req, const char* error_message) { | 2117 | 23 | rpc::RpcController rpc; | 2118 | 23 | rpc.set_timeout(timeout_); | 2119 | 23 | Response response; | 2120 | 23 | auto result = (obj.*func)(req, &response, &rpc); | 2121 | 23 | if (error_message) { | 2122 | 23 | RETURN_NOT_OK_PREPEND(result, error_message); | 2123 | 0 | } else { | 2124 | 0 | RETURN_NOT_OK(result); | 2125 | 0 | } | 2126 | 16 | return std::move(response); | 2127 | 23 | } |
Unexecuted instantiation: _ZN2yb5tools18ClusterAdminClient24InvokeRpcNoResponseCheckINS_6master21SplitTabletResponsePBENS3_20SplitTabletRequestPBENS3_16MasterAdminProxyEEENS_6ResultIT_EEMT1_KFNS_6StatusERKT0_PS8_PNS_3rpc13RpcControllerEERKSA_SE_PKc |
2128 | | |
2129 | | template<class Response, class Request, class Object> |
2130 | | Result<Response> ClusterAdminClient::InvokeRpc( |
2131 | | Status (Object::*func)(const Request&, Response*, rpc::RpcController*) const, |
2132 | 55 | const Object& obj, const Request& req, const char* error_message) { |
2133 | 48 | return ResponseResult(VERIFY_RESULT(InvokeRpcNoResponseCheck(func, obj, req, error_message))); |
2134 | 55 | } Unexecuted instantiation: _ZN2yb5tools18ClusterAdminClient9InvokeRpcINS_6master21ListMastersResponsePBENS3_20ListMastersRequestPBENS3_18MasterClusterProxyEEENS_6ResultIT_EEMT1_KFNS_6StatusERKT0_PS8_PNS_3rpc13RpcControllerEERKSA_SE_PKc Unexecuted instantiation: _ZN2yb5tools18ClusterAdminClient9InvokeRpcINS_9consensus27RunLeaderElectionResponsePBENS3_26RunLeaderElectionRequestPBENS3_21ConsensusServiceProxyEEENS_6ResultIT_EEMT1_KFNS_6StatusERKT0_PS8_PNS_3rpc13RpcControllerEERKSA_SE_PKc Unexecuted instantiation: _ZN2yb5tools18ClusterAdminClient9InvokeRpcINS_9consensus22ChangeConfigResponsePBENS3_21ChangeConfigRequestPBENS3_21ConsensusServiceProxyEEENS_6ResultIT_EEMT1_KFNS_6StatusERKT0_PS8_PNS_3rpc13RpcControllerEERKSA_SE_PKc Unexecuted instantiation: _ZN2yb5tools18ClusterAdminClient9InvokeRpcINS_6master25DumpMasterStateResponsePBENS3_24DumpMasterStateRequestPBENS3_18MasterClusterProxyEEENS_6ResultIT_EEMT1_KFNS_6StatusERKT0_PS8_PNS_3rpc13RpcControllerEERKSA_SE_PKc Unexecuted instantiation: _ZN2yb5tools18ClusterAdminClient9InvokeRpcINS_6master28GetLoadMovePercentResponsePBENS3_27GetLoadMovePercentRequestPBENS3_18MasterClusterProxyEEENS_6ResultIT_EEMT1_KFNS_6StatusERKT0_PS8_PNS_3rpc13RpcControllerEERKSA_SE_PKc Unexecuted instantiation: _ZN2yb5tools18ClusterAdminClient9InvokeRpcINS_6master28GetLoadMovePercentResponsePBENS3_34GetLeaderBlacklistPercentRequestPBENS3_18MasterClusterProxyEEENS_6ResultIT_EEMT1_KFNS_6StatusERKT0_PS8_PNS_3rpc13RpcControllerEERKSA_SE_PKc _ZN2yb5tools18ClusterAdminClient9InvokeRpcINS_6master28GetTabletLocationsResponsePBENS3_27GetTabletLocationsRequestPBENS3_17MasterClientProxyEEENS_6ResultIT_EEMT1_KFNS_6StatusERKT0_PS8_PNS_3rpc13RpcControllerEERKSA_SE_PKc Line | Count | Source | 2132 | 15 | const Object& obj, const Request& req, const char* error_message) { | 2133 | 15 | return ResponseResult(VERIFY_RESULT(InvokeRpcNoResponseCheck(func, obj, req, error_message))); | 2134 | 15 | } |
Unexecuted instantiation: _ZN2yb5tools18ClusterAdminClient9InvokeRpcINS_6master27ListTabletServersResponsePBENS3_26ListTabletServersRequestPBENS3_18MasterClusterProxyEEENS_6ResultIT_EEMT1_KFNS_6StatusERKT0_PS8_PNS_3rpc13RpcControllerEERKSA_SE_PKc Unexecuted instantiation: _ZN2yb5tools18ClusterAdminClient9InvokeRpcINS_7tserver24GetLogLocationResponsePBENS3_23GetLogLocationRequestPBENS3_24TabletServerServiceProxyEEENS_6ResultIT_EEMT1_KFNS_6StatusERKT0_PS8_PNS_3rpc13RpcControllerEERKSA_SE_PKc _ZN2yb5tools18ClusterAdminClient9InvokeRpcINS_6master37LaunchBackfillIndexForTableResponsePBENS3_36LaunchBackfillIndexForTableRequestPBENS3_14MasterDdlProxyEEENS_6ResultIT_EEMT1_KFNS_6StatusERKT0_PS8_PNS_3rpc13RpcControllerEERKSA_SE_PKc Line | Count | Source | 2132 | 1 | const Object& obj, const Request& req, const char* error_message) { | 2133 | 1 | return ResponseResult(VERIFY_RESULT(InvokeRpcNoResponseCheck(func, obj, req, error_message))); | 2134 | 1 | } |
Unexecuted instantiation: _ZN2yb5tools18ClusterAdminClient9InvokeRpcINS_7tserver36ListTabletsForTabletServerResponsePBENS3_35ListTabletsForTabletServerRequestPBENS3_24TabletServerServiceProxyEEENS_6ResultIT_EEMT1_KFNS_6StatusERKT0_PS8_PNS_3rpc13RpcControllerEERKSA_SE_PKc Unexecuted instantiation: _ZN2yb5tools18ClusterAdminClient9InvokeRpcINS_6master33ChangeLoadBalancerStateResponsePBENS3_32ChangeLoadBalancerStateRequestPBENS3_18MasterClusterProxyEEENS_6ResultIT_EEMT1_KFNS_6StatusERKT0_PS8_PNS_3rpc13RpcControllerEERKSA_SE_PKc Unexecuted instantiation: _ZN2yb5tools18ClusterAdminClient9InvokeRpcINS_6master30GetLoadBalancerStateResponsePBENS3_29GetLoadBalancerStateRequestPBENS3_18MasterClusterProxyEEENS_6ResultIT_EEMT1_KFNS_6StatusERKT0_PS8_PNS_3rpc13RpcControllerEERKSA_SE_PKc Unexecuted instantiation: _ZN2yb5tools18ClusterAdminClient9InvokeRpcINS_6master25FlushSysCatalogResponsePBENS3_24FlushSysCatalogRequestPBENS3_16MasterAdminProxyEEENS_6ResultIT_EEMT1_KFNS_6StatusERKT0_PS8_PNS_3rpc13RpcControllerEERKSA_SE_PKc Unexecuted instantiation: _ZN2yb5tools18ClusterAdminClient9InvokeRpcINS_6master27CompactSysCatalogResponsePBENS3_26CompactSysCatalogRequestPBENS3_16MasterAdminProxyEEENS_6ResultIT_EEMT1_KFNS_6StatusERKT0_PS8_PNS_3rpc13RpcControllerEERKSA_SE_PKc _ZN2yb5tools18ClusterAdminClient9InvokeRpcINS_6master35ChangeMasterClusterConfigResponsePBENS3_34ChangeMasterClusterConfigRequestPBENS3_18MasterClusterProxyEEENS_6ResultIT_EEMT1_KFNS_6StatusERKT0_PS8_PNS_3rpc13RpcControllerEERKSA_SE_PKc Line | Count | Source | 2132 | 16 | const Object& obj, const Request& req, const char* error_message) { | 2133 | 16 | return ResponseResult(VERIFY_RESULT(InvokeRpcNoResponseCheck(func, obj, req, error_message))); | 2134 | 16 | } |
Unexecuted instantiation: _ZN2yb5tools18ClusterAdminClient9InvokeRpcINS_6master22IsInitDbDoneResponsePBENS3_21IsInitDbDoneRequestPBENS3_16MasterAdminProxyEEENS_6ResultIT_EEMT1_KFNS_6StatusERKT0_PS8_PNS_3rpc13RpcControllerEERKSA_SE_PKc Unexecuted instantiation: _ZN2yb5tools18ClusterAdminClient9InvokeRpcINS_7tserver21UpgradeYsqlResponsePBENS3_20UpgradeYsqlRequestPBENS3_29TabletServerAdminServiceProxyEEENS_6ResultIT_EEMT1_KFNS_6StatusERKT0_PS8_PNS_3rpc13RpcControllerEERKSA_SE_PKc _ZN2yb5tools18ClusterAdminClient9InvokeRpcINS_6master32GetMasterClusterConfigResponsePBENS3_31GetMasterClusterConfigRequestPBENS3_18MasterClusterProxyEEENS_6ResultIT_EEMT1_KFNS_6StatusERKT0_PS8_PNS_3rpc13RpcControllerEERKSA_SE_PKc Line | Count | Source | 2132 | 23 | const Object& obj, const Request& req, const char* error_message) { | 2133 | 16 | return ResponseResult(VERIFY_RESULT(InvokeRpcNoResponseCheck(func, obj, req, error_message))); | 2134 | 23 | } |
Unexecuted instantiation: _ZN2yb5tools18ClusterAdminClient9InvokeRpcINS_6master21SplitTabletResponsePBENS3_20SplitTabletRequestPBENS3_16MasterAdminProxyEEENS_6ResultIT_EEMT1_KFNS_6StatusERKT0_PS8_PNS_3rpc13RpcControllerEERKSA_SE_PKc |
2135 | | |
2136 | 0 | Result<const ClusterAdminClient::NamespaceMap&> ClusterAdminClient::GetNamespaceMap() { |
2137 | 0 | if (namespace_map_.empty()) { |
2138 | 0 | auto v = VERIFY_RESULT(yb_client_->ListNamespaces()); |
2139 | 0 | for (auto& ns : v) { |
2140 | 0 | auto ns_id = ns.id(); |
2141 | 0 | namespace_map_.emplace(std::move(ns_id), std::move(ns)); |
2142 | 0 | } |
2143 | 0 | } |
2144 | 0 | return const_cast<const ClusterAdminClient::NamespaceMap&>(namespace_map_); |
2145 | 0 | } |
2146 | | |
2147 | 0 | Result<TableNameResolver> ClusterAdminClient::BuildTableNameResolver() { |
2148 | 0 | return TableNameResolver(VERIFY_RESULT(yb_client_->ListTables()), |
2149 | 0 | VERIFY_RESULT(yb_client_->ListNamespaces())); |
2150 | 0 | } |
2151 | | |
2152 | 0 | string RightPadToUuidWidth(const string &s) { |
2153 | 0 | return RightPadToWidth(s, kNumCharactersInUuid); |
2154 | 0 | } |
2155 | | |
2156 | | Result<TypedNamespaceName> ParseNamespaceName(const std::string& full_namespace_name, |
2157 | 1 | const YQLDatabase default_if_no_prefix) { |
2158 | 1 | const auto parts = SplitByDot(full_namespace_name); |
2159 | 1 | return ResolveNamespaceName(parts.prefix, parts.value, default_if_no_prefix); |
2160 | 1 | } |
2161 | | |
2162 | | void AddStringField( |
2163 | | const char* name, const std::string& value, rapidjson::Value* out, |
2164 | 0 | rapidjson::Value::AllocatorType* allocator) { |
2165 | 0 | rapidjson::Value json_value(value.c_str(), *allocator); |
2166 | 0 | out->AddMember(rapidjson::StringRef(name), json_value, *allocator); |
2167 | 0 | } |
2168 | | |
2169 | 0 | string HybridTimeToString(HybridTime ht) { |
2170 | 0 | return Timestamp(ht.GetPhysicalValueMicros()).ToHumanReadableTime(); |
2171 | 0 | } |
2172 | | |
2173 | | } // namespace tools |
2174 | | } // namespace yb |