/Users/deen/code/yugabyte-db/src/yb/client/table_creator.cc
Line | Count | Source (jump to first uncovered line) |
1 | | // Copyright (c) YugaByte, Inc. |
2 | | // |
3 | | // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except |
4 | | // in compliance with the License. You may obtain a copy of the License at |
5 | | // |
6 | | // http://www.apache.org/licenses/LICENSE-2.0 |
7 | | // |
8 | | // Unless required by applicable law or agreed to in writing, software distributed under the License |
9 | | // is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express |
10 | | // or implied. See the License for the specific language governing permissions and limitations |
11 | | // under the License. |
12 | | // |
13 | | |
14 | | #include "yb/client/table_creator.h" |
15 | | |
16 | | #include "yb/client/client-internal.h" |
17 | | #include "yb/client/client.h" |
18 | | #include "yb/client/table_info.h" |
19 | | |
20 | | #include "yb/common/schema.h" |
21 | | #include "yb/common/transaction.h" |
22 | | #include "yb/common/wire_protocol.h" |
23 | | |
24 | | #include "yb/master/master_ddl.pb.h" |
25 | | |
26 | | #include "yb/util/result.h" |
27 | | #include "yb/util/status_format.h" |
28 | | |
29 | | #include "yb/yql/redis/redisserver/redis_constants.h" |
30 | | |
31 | | DECLARE_bool(client_suppress_created_logs); |
32 | | |
33 | | namespace yb { |
34 | | namespace client { |
35 | | |
36 | | YBTableCreator::YBTableCreator(YBClient* client) |
37 | 7.01k | : client_(client), partition_schema_(new PartitionSchemaPB), index_info_(new IndexInfoPB) { |
38 | 7.01k | } |
39 | | |
40 | 7.01k | YBTableCreator::~YBTableCreator() { |
41 | 7.01k | } |
42 | | |
43 | 7.01k | YBTableCreator& YBTableCreator::table_name(const YBTableName& name) { |
44 | 7.01k | table_name_ = name; |
45 | 7.01k | return *this; |
46 | 7.01k | } |
47 | | |
48 | 6.92k | YBTableCreator& YBTableCreator::table_type(YBTableType table_type) { |
49 | 6.92k | table_type_ = ClientToPBTableType(table_type); |
50 | 6.92k | return *this; |
51 | 6.92k | } |
52 | | |
53 | 1.66k | YBTableCreator& YBTableCreator::creator_role_name(const RoleName& creator_role_name) { |
54 | 1.66k | creator_role_name_ = creator_role_name; |
55 | 1.66k | return *this; |
56 | 1.66k | } |
57 | | |
58 | 5.14k | YBTableCreator& YBTableCreator::table_id(const std::string& table_id) { |
59 | 5.14k | table_id_ = table_id; |
60 | 5.14k | return *this; |
61 | 5.14k | } |
62 | | |
63 | 313 | YBTableCreator& YBTableCreator::is_pg_catalog_table() { |
64 | 313 | is_pg_catalog_table_ = true; |
65 | 313 | return *this; |
66 | 313 | } |
67 | | |
68 | 50 | YBTableCreator& YBTableCreator::is_pg_shared_table() { |
69 | 50 | is_pg_shared_table_ = true; |
70 | 50 | return *this; |
71 | 50 | } |
72 | | |
73 | 4.44k | YBTableCreator& YBTableCreator::hash_schema(YBHashSchema hash_schema) { |
74 | 4.44k | switch (hash_schema) { |
75 | 30 | case YBHashSchema::kMultiColumnHash: |
76 | 30 | partition_schema_->set_hash_schema(PartitionSchemaPB::MULTI_COLUMN_HASH_SCHEMA); |
77 | 30 | break; |
78 | 0 | case YBHashSchema::kRedisHash: |
79 | 0 | partition_schema_->set_hash_schema(PartitionSchemaPB::REDIS_HASH_SCHEMA); |
80 | 0 | break; |
81 | 4.41k | case YBHashSchema::kPgsqlHash: |
82 | 4.41k | partition_schema_->set_hash_schema(PartitionSchemaPB::PGSQL_HASH_SCHEMA); |
83 | 4.41k | break; |
84 | 4.44k | } |
85 | 4.44k | return *this; |
86 | 4.44k | } |
87 | | |
88 | 188 | YBTableCreator& YBTableCreator::num_tablets(int32_t count) { |
89 | 188 | num_tablets_ = count; |
90 | 188 | return *this; |
91 | 188 | } |
92 | | |
93 | 5.05k | YBTableCreator& YBTableCreator::colocated(const bool colocated) { |
94 | 5.05k | colocated_ = colocated; |
95 | 5.05k | return *this; |
96 | 5.05k | } |
97 | | |
98 | 89 | YBTableCreator& YBTableCreator::tablegroup_id(const std::string& tablegroup_id) { |
99 | 89 | tablegroup_id_ = tablegroup_id; |
100 | 89 | return *this; |
101 | 89 | } |
102 | | |
103 | 24 | YBTableCreator& YBTableCreator::colocation_id(ColocationId colocation_id) { |
104 | 24 | colocation_id_ = colocation_id; |
105 | 24 | return *this; |
106 | 24 | } |
107 | | |
108 | 114 | YBTableCreator& YBTableCreator::tablespace_id(const std::string& tablespace_id) { |
109 | 114 | tablespace_id_ = tablespace_id; |
110 | 114 | return *this; |
111 | 114 | } |
112 | | |
113 | 24 | YBTableCreator& YBTableCreator::matview_pg_table_id(const std::string& matview_pg_table_id) { |
114 | 24 | matview_pg_table_id_ = matview_pg_table_id; |
115 | 24 | return *this; |
116 | 24 | } |
117 | | |
118 | 7.01k | YBTableCreator& YBTableCreator::schema(const YBSchema* schema) { |
119 | 7.01k | schema_ = schema; |
120 | 7.01k | return *this; |
121 | 7.01k | } |
122 | | |
123 | 4.78k | YBTableCreator& YBTableCreator::part_of_transaction(const TransactionMetadata* txn) { |
124 | 4.78k | txn_ = txn; |
125 | 4.78k | return *this; |
126 | 4.78k | } |
127 | | |
128 | 2 | YBTableCreator &YBTableCreator::add_partition(const Partition& partition) { |
129 | 2 | partitions_.push_back(partition); |
130 | 2 | return *this; |
131 | 2 | } |
132 | | |
133 | | YBTableCreator& YBTableCreator::add_hash_partitions(const std::vector<std::string>& columns, |
134 | 0 | int32_t num_buckets) { |
135 | 0 | return add_hash_partitions(columns, num_buckets, 0); |
136 | 0 | } |
137 | | |
138 | | YBTableCreator& YBTableCreator::add_hash_partitions(const std::vector<std::string>& columns, |
139 | 0 | int32_t num_buckets, int32_t seed) { |
140 | 0 | PartitionSchemaPB::HashBucketSchemaPB* bucket_schema = |
141 | 0 | partition_schema_->add_hash_bucket_schemas(); |
142 | 0 | for (const string& col_name : columns) { |
143 | 0 | bucket_schema->add_columns()->set_name(col_name); |
144 | 0 | } |
145 | 0 | bucket_schema->set_num_buckets(num_buckets); |
146 | 0 | bucket_schema->set_seed(seed); |
147 | 0 | return *this; |
148 | 0 | } |
149 | | |
150 | | YBTableCreator& YBTableCreator::set_range_partition_columns( |
151 | | const std::vector<std::string>& columns, |
152 | 415 | const std::vector<std::string>& split_rows) { |
153 | 415 | PartitionSchemaPB::RangeSchemaPB* range_schema = |
154 | 415 | partition_schema_->mutable_range_schema(); |
155 | 415 | range_schema->Clear(); |
156 | 660 | for (const string& col_name : columns) { |
157 | 660 | range_schema->add_columns()->set_name(col_name); |
158 | 660 | } |
159 | | |
160 | 415 | for (const auto& row : split_rows) { |
161 | 158 | range_schema->add_splits()->set_column_bounds(row); |
162 | 158 | } |
163 | 415 | return *this; |
164 | 415 | } |
165 | | |
166 | 1 | YBTableCreator& YBTableCreator::replication_info(const master::ReplicationInfoPB& ri) { |
167 | 1 | replication_info_ = std::make_unique<master::ReplicationInfoPB>(ri); |
168 | 1 | return *this; |
169 | 1 | } |
170 | | |
171 | 1.32k | YBTableCreator& YBTableCreator::indexed_table_id(const std::string& id) { |
172 | 1.32k | index_info_->set_indexed_table_id(id); |
173 | 1.32k | return *this; |
174 | 1.32k | } |
175 | | |
176 | 445 | YBTableCreator& YBTableCreator::is_local_index(bool is_local_index) { |
177 | 445 | index_info_->set_is_local(is_local_index); |
178 | 445 | return *this; |
179 | 445 | } |
180 | | |
181 | 810 | YBTableCreator& YBTableCreator::is_unique_index(bool is_unique_index) { |
182 | 810 | index_info_->set_is_unique(is_unique_index); |
183 | 810 | return *this; |
184 | 810 | } |
185 | | |
186 | 445 | YBTableCreator& YBTableCreator::is_backfill_deferred(bool is_backfill_deferred) { |
187 | 445 | index_info_->set_is_backfill_deferred(is_backfill_deferred); |
188 | 445 | return *this; |
189 | 445 | } |
190 | | |
191 | 307 | YBTableCreator& YBTableCreator::skip_index_backfill(const bool skip_index_backfill) { |
192 | 307 | skip_index_backfill_ = skip_index_backfill; |
193 | 307 | return *this; |
194 | 307 | } |
195 | | |
196 | 0 | YBTableCreator& YBTableCreator::use_mangled_column_name(bool value) { |
197 | 0 | index_info_->set_use_mangled_column_name(value); |
198 | 0 | return *this; |
199 | 0 | } |
200 | | |
201 | 5.18k | YBTableCreator& YBTableCreator::timeout(const MonoDelta& timeout) { |
202 | 5.18k | timeout_ = timeout; |
203 | 5.18k | return *this; |
204 | 5.18k | } |
205 | | |
206 | 40 | YBTableCreator& YBTableCreator::wait(bool wait) { |
207 | 40 | wait_ = wait; |
208 | 40 | return *this; |
209 | 40 | } |
210 | | |
211 | 18 | YBTableCreator& YBTableCreator::TEST_use_old_style_create_request() { |
212 | 18 | TEST_use_old_style_create_request_ = true; |
213 | 18 | return *this; |
214 | 18 | } |
215 | | |
216 | 7.01k | Status YBTableCreator::Create() { |
217 | 7.01k | const char *object_type = index_info_->has_indexed_table_id() ? "index"1.32k : "table"5.69k ; |
218 | 7.01k | if (table_name_.table_name().empty()) { |
219 | 0 | return STATUS_SUBSTITUTE(InvalidArgument, "Missing $0 name", object_type); |
220 | 0 | } |
221 | | // For a redis table, no external schema is passed to TableCreator, we make a unique schema |
222 | | // and manage its memory withing here. |
223 | 7.01k | std::unique_ptr<YBSchema> redis_schema; |
224 | | // We create dummy schema for transaction status table, redis schema is quite lightweight for |
225 | | // this purpose. |
226 | 7.01k | if (table_type_ == TableType::REDIS_TABLE_TYPE || |
227 | 7.01k | table_type_ == TableType::TRANSACTION_STATUS_TABLE_TYPE7.00k ) { |
228 | 13 | CHECK(!schema_) << "Schema should not be set for redis table creation"0 ; |
229 | 13 | redis_schema.reset(new YBSchema()); |
230 | 13 | YBSchemaBuilder b; |
231 | 13 | b.AddColumn(kRedisKeyColumnName)->Type(BINARY)->NotNull()->HashPrimaryKey(); |
232 | 13 | RETURN_NOT_OK(b.Build(redis_schema.get())); |
233 | 13 | schema(redis_schema.get()); |
234 | 13 | } |
235 | 7.01k | if (!schema_) { |
236 | 0 | return STATUS(InvalidArgument, "Missing schema"); |
237 | 0 | } |
238 | | |
239 | | // Build request. |
240 | 7.01k | master::CreateTableRequestPB req; |
241 | 7.01k | req.set_name(table_name_.table_name()); |
242 | 7.01k | table_name_.SetIntoNamespaceIdentifierPB(req.mutable_namespace_()); |
243 | 7.01k | req.set_table_type(table_type_); |
244 | 7.01k | req.set_colocated(colocated_); |
245 | | |
246 | 7.01k | if (!creator_role_name_.empty()) { |
247 | 242 | req.set_creator_role_name(creator_role_name_); |
248 | 242 | } |
249 | | |
250 | 7.01k | if (!table_id_.empty()) { |
251 | 5.14k | req.set_table_id(table_id_); |
252 | 5.14k | } |
253 | 7.01k | if (is_pg_catalog_table_) { |
254 | 313 | req.set_is_pg_catalog_table(*is_pg_catalog_table_); |
255 | 313 | } |
256 | 7.01k | if (is_pg_shared_table_) { |
257 | 50 | req.set_is_pg_shared_table(*is_pg_shared_table_); |
258 | 50 | } |
259 | | |
260 | 7.01k | if (!tablegroup_id_.empty()) { |
261 | 89 | req.set_tablegroup_id(tablegroup_id_); |
262 | 89 | } |
263 | 7.01k | if (colocation_id_ != kColocationIdNotSet) { |
264 | 24 | req.set_colocation_id(colocation_id_); |
265 | 24 | } |
266 | | |
267 | 7.01k | if (!tablespace_id_.empty()) { |
268 | 114 | req.set_tablespace_id(tablespace_id_); |
269 | 114 | } |
270 | | |
271 | 7.01k | if (!matview_pg_table_id_.empty()) { |
272 | 24 | req.set_matview_pg_table_id(matview_pg_table_id_); |
273 | 24 | } |
274 | | |
275 | | // Note that the check that the sum of min_num_replicas for each placement block being less or |
276 | | // equal than the overall placement info num_replicas is done on the master side and an error is |
277 | | // naturally returned if you try to create a table and the numbers mismatch. As such, it is the |
278 | | // responsibility of the client to ensure that does not happen. |
279 | 7.01k | if (replication_info_) { |
280 | 1 | req.mutable_replication_info()->CopyFrom(*replication_info_); |
281 | 1 | } |
282 | | |
283 | 7.01k | SchemaToPB(internal::GetSchema(*schema_), req.mutable_schema()); |
284 | | |
285 | 7.01k | if (txn_) { |
286 | 4.78k | txn_->ToPB(req.mutable_transaction()); |
287 | 4.78k | } |
288 | | |
289 | | // Setup the number splits (i.e. number of splits). |
290 | 7.01k | if (num_tablets_ > 0) { |
291 | 176 | VLOG(1) << "num_tablets: number of tablets explicitly specified: " << num_tablets_0 ; |
292 | 6.84k | } else if (schema_->table_properties().num_tablets() > 0) { |
293 | 368 | VLOG(1) << "num_tablets: number of tablets specified by user: " |
294 | 0 | << schema_->table_properties().num_tablets(); |
295 | 368 | num_tablets_ = schema_->table_properties().num_tablets(); |
296 | 6.47k | } else { |
297 | 6.47k | if (table_name_.is_system()) { |
298 | 0 | num_tablets_ = 1; |
299 | 0 | VLOG(1) << "num_tablets=1: using one tablet for a system table"; |
300 | 6.47k | } else { |
301 | 6.47k | num_tablets_ = VERIFY_RESULT(client_->NumTabletsForUserTable(table_type_)); |
302 | 6.47k | } |
303 | 6.47k | } |
304 | 7.01k | req.set_num_tablets(num_tablets_); |
305 | | |
306 | 7.01k | req.mutable_partition_schema()->CopyFrom(*partition_schema_); |
307 | | |
308 | 7.01k | if (!partitions_.empty()) { |
309 | 2 | for (const auto& p : partitions_) { |
310 | 2 | auto * np = req.add_partitions(); |
311 | 2 | p.ToPB(np); |
312 | 2 | } |
313 | 1 | } |
314 | | |
315 | | // Index mapping with data-table being indexed. |
316 | 7.01k | if (index_info_->has_indexed_table_id()) { |
317 | 1.32k | if (!TEST_use_old_style_create_request_) { |
318 | 1.30k | req.mutable_index_info()->CopyFrom(*index_info_); |
319 | 1.30k | } |
320 | | |
321 | | // For compatibility reasons, set the old fields just in case we have new clients talking to |
322 | | // old master server during rolling upgrade. |
323 | 1.32k | req.set_indexed_table_id(index_info_->indexed_table_id()); |
324 | 1.32k | req.set_is_local_index(index_info_->is_local()); |
325 | 1.32k | req.set_is_unique_index(index_info_->is_unique()); |
326 | 1.32k | req.set_skip_index_backfill(skip_index_backfill_); |
327 | 1.32k | req.set_is_backfill_deferred(index_info_->is_backfill_deferred()); |
328 | 1.32k | } |
329 | | |
330 | 7.01k | auto deadline = CoarseMonoClock::Now() + |
331 | 7.01k | (timeout_.Initialized() ? timeout_5.18k : client_->default_admin_operation_timeout()1.82k ); |
332 | | |
333 | 7.01k | auto s = client_->data_->CreateTable( |
334 | 7.01k | client_, req, *schema_, deadline, &table_id_); |
335 | | |
336 | 7.01k | if (!s.ok() && !s.IsAlreadyPresent()30 ) { |
337 | 26 | RETURN_NOT_OK_PREPEND(s, strings::Substitute("Error creating $0 $1 on the master", |
338 | 26 | object_type, table_name_.ToString())); |
339 | 0 | } |
340 | | |
341 | | // We are here because the create request succeeded or we received an IsAlreadyPresent error. |
342 | | // Although the table is already in the catalog manager, it doesn't mean that the table is |
343 | | // ready to receive requests. So we will call WaitForCreateTableToFinish to ensure that once |
344 | | // this request returns, the client can send operations without receiving a "Table Not Found" |
345 | | // error. |
346 | | |
347 | | // Spin until the table is fully created, if requested. |
348 | 6.99k | if (wait_) { |
349 | 6.97k | RETURN_NOT_OK(client_->data_->WaitForCreateTableToFinish( |
350 | 6.97k | client_, YBTableName(), table_id_, deadline)); |
351 | 6.97k | } |
352 | | |
353 | 6.98k | if (s.ok() && !FLAGS_client_suppress_created_logs6.97k ) { |
354 | 6.97k | LOG(INFO) << "Created " << object_type << " " << table_name_.ToString() |
355 | 6.97k | << " of type " << TableType_Name(table_type_); |
356 | 6.97k | } |
357 | | |
358 | 6.98k | return s; |
359 | 6.99k | } |
360 | | |
361 | | } // namespace client |
362 | | } // namespace yb |