-
Notifications
You must be signed in to change notification settings - Fork 1k
/
catalog_manager.cc
13975 lines (12138 loc) · 570 KB
/
catalog_manager.cc
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
// The following only applies to changes made to this file as part of YugaByte development.
//
// Portions Copyright (c) YugaByte, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under the License
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations
// under the License.
//
// ================================================================================================
//
// The catalog manager handles the current list of tables
// and tablets in the cluster, as well as their current locations.
// Since most operations in the master go through these data
// structures, locking is carefully managed here to prevent unnecessary
// contention and deadlocks:
//
// - each structure has an internal spinlock used for operations that
// are purely in-memory (eg the current status of replicas)
// - data that is persisted on disk is stored in separate PersistentTable(t)Info
// structs. These are managed using copy-on-write so that writers may block
// writing them back to disk while not impacting concurrent readers.
//
// Usage rules:
// - You may obtain READ locks in any order. READ locks should never block,
// since they only conflict with COMMIT which is a purely in-memory operation.
// Thus they are deadlock-free.
// - If you need a WRITE lock on both a table and one or more of its tablets,
// acquire the lock on the table first. This strict ordering prevents deadlocks.
//
// ================================================================================================
#include "yb/master/catalog_manager.h"
#include <stdlib.h>
#include <algorithm>
#include <atomic>
#include <bitset>
#include <chrono>
#include <functional>
#include <memory>
#include <mutex>
#include <set>
#include <string>
#include <unordered_map>
#include <vector>
#include <boost/optional.hpp>
#include "yb/cdc/cdc_state_table.h"
#include "yb/client/client.h"
#include "yb/client/schema.h"
#include "yb/client/universe_key_client.h"
#include "yb/common/colocated_util.h"
#include "yb/common/common.pb.h"
#include "yb/common/common_flags.h"
#include "yb/common/common_util.h"
#include "yb/common/constants.h"
#include "yb/common/key_encoder.h"
#include "yb/common/pgsql_error.h"
#include "yb/common/pg_catversions.h"
#include "yb/common/ql_type.h"
#include "yb/common/ql_type_util.h"
#include "yb/common/schema_pbutil.h"
#include "yb/common/roles_permissions.h"
#include "yb/common/schema.h"
#include "yb/common/transaction.h"
#include "yb/common/wire_protocol.h"
#include "yb/consensus/consensus.h"
#include "yb/consensus/consensus.pb.h"
#include "yb/consensus/consensus_util.h"
#include "yb/consensus/metadata.pb.h"
#include "yb/consensus/opid_util.h"
#include "yb/consensus/quorum_util.h"
#include "yb/dockv/doc_key.h"
#include "yb/dockv/partial_row.h"
#include "yb/dockv/partition.h"
#include "yb/gutil/atomicops.h"
#include "yb/gutil/bind.h"
#include "yb/gutil/casts.h"
#include "yb/gutil/map-util.h"
#include "yb/gutil/mathlimits.h"
#include "yb/gutil/stl_util.h"
#include "yb/gutil/strings/escaping.h"
#include "yb/gutil/strings/join.h"
#include "yb/gutil/strings/substitute.h"
#include "yb/gutil/sysinfo.h"
#include "yb/gutil/walltime.h"
#include "yb/master/leader_epoch.h"
#include "yb/master/master_fwd.h"
#include "yb/master/async_rpc_tasks.h"
#include "yb/master/backfill_index.h"
#include "yb/master/catalog_entity_info.h"
#include "yb/master/catalog_loaders.h"
#include "yb/master/catalog_manager-internal.h"
#include "yb/master/catalog_manager_bg_tasks.h"
#include "yb/master/catalog_manager_util.h"
#include "yb/master/cluster_balance.h"
#include "yb/master/encryption_manager.h"
#include "yb/master/master.h"
#include "yb/master/master_admin.pb.h"
#include "yb/master/master_client.pb.h"
#include "yb/master/master_cluster.proxy.h"
#include "yb/master/master_dcl.pb.h"
#include "yb/master/master_ddl.pb.h"
#include "yb/master/master_encryption.pb.h"
#include "yb/master/master_error.h"
#include "yb/master/master_heartbeat.pb.h"
#include "yb/master/master_replication.pb.h"
#include "yb/master/master_util.h"
#include "yb/master/permissions_manager.h"
#include "yb/master/post_tablet_create_task_base.h"
#include "yb/master/scoped_leader_shared_lock-internal.h"
#include "yb/master/sys_catalog.h"
#include "yb/master/sys_catalog_constants.h"
#include "yb/master/ts_descriptor.h"
#include "yb/master/xcluster/xcluster_manager.h"
#include "yb/master/yql_aggregates_vtable.h"
#include "yb/master/yql_auth_resource_role_permissions_index.h"
#include "yb/master/yql_auth_role_permissions_vtable.h"
#include "yb/master/yql_auth_roles_vtable.h"
#include "yb/master/yql_columns_vtable.h"
#include "yb/master/yql_empty_vtable.h"
#include "yb/master/yql_functions_vtable.h"
#include "yb/master/yql_indexes_vtable.h"
#include "yb/master/yql_keyspaces_vtable.h"
#include "yb/master/yql_local_vtable.h"
#include "yb/master/yql_partitions_vtable.h"
#include "yb/master/yql_peers_vtable.h"
#include "yb/master/yql_size_estimates_vtable.h"
#include "yb/master/yql_tables_vtable.h"
#include "yb/master/yql_triggers_vtable.h"
#include "yb/master/yql_types_vtable.h"
#include "yb/master/yql_views_vtable.h"
#include "yb/master/ysql_ddl_verification_task.h"
#include "yb/master/ysql_tablegroup_manager.h"
#include "yb/rpc/messenger.h"
#include "yb/rpc/rpc_controller.h"
#include "yb/tablet/operations/change_metadata_operation.h"
#include "yb/tablet/tablet.h"
#include "yb/tablet/tablet_metadata.h"
#include "yb/tablet/tablet_peer.h"
#include "yb/tablet/tablet_retention_policy.h"
#include "yb/tserver/remote_bootstrap_client.h"
#include "yb/tserver/ts_tablet_manager.h"
#include "yb/tserver/tserver_error.h"
#include "yb/util/atomic.h"
#include "yb/util/backoff_waiter.h"
#include "yb/util/countdown_latch.h"
#include "yb/util/debug-util.h"
#include "yb/util/debug/trace_event.h"
#include "yb/util/flags.h"
#include "yb/util/format.h"
#include "yb/util/hash_util.h"
#include "yb/util/is_operation_done_result.h"
#include "yb/util/locks.h"
#include "yb/util/logging.h"
#include "yb/util/math_util.h"
#include "yb/util/metrics.h"
#include "yb/util/monotime.h"
#include "yb/util/net/net_util.h"
#include "yb/util/oid_generator.h"
#include "yb/util/random_util.h"
#include "yb/util/rw_mutex.h"
#include "yb/util/scope_exit.h"
#include "yb/util/semaphore.h"
#include "yb/util/shared_lock.h"
#include "yb/util/size_literals.h"
#include "yb/util/status_format.h"
#include "yb/util/status_log.h"
#include "yb/util/status.h"
#include "yb/util/stopwatch.h"
#include "yb/util/string_case.h"
#include "yb/util/string_util.h"
#include "yb/util/sync_point.h"
#include "yb/util/thread.h"
#include "yb/util/threadpool.h"
#include "yb/util/to_stream.h"
#include "yb/util/trace.h"
#include "yb/util/tsan_util.h"
#include "yb/util/uuid.h"
#include "yb/util/yb_pg_errcodes.h"
#include "yb/yql/pgwrapper/pg_wrapper.h"
#include "yb/yql/redis/redisserver/redis_constants.h"
using namespace std::literals;
using namespace yb::size_literals;
// TODO: Cannot be runtime state due to cdc_client...
DEFINE_NON_RUNTIME_int32(master_ts_rpc_timeout_ms, 30 * 1000, // 30 sec
"Timeout used for the Master->TS async rpc calls.");
TAG_FLAG(master_ts_rpc_timeout_ms, advanced);
// The time is temporarly set to 600 sec to avoid hitting the tablet replacement code inherited from
// Kudu. Removing tablet replacement code will be fixed in GH-6006
DEFINE_RUNTIME_int32(
tablet_creation_timeout_ms, 600 * 1000, // 600 sec
"Timeout used by the master when attempting to create tablet "
"replicas during table creation.");
TAG_FLAG(tablet_creation_timeout_ms, advanced);
DEFINE_test_flag(bool, disable_tablet_deletion, false,
"Whether catalog manager should disable tablet deletion.");
DEFINE_test_flag(bool, get_ysql_catalog_version_from_sys_catalog, false,
"Whether catalog manager should get the ysql catalog version "
"from the sys_catalog.");
// TODO: should this be a test flag?
DEFINE_RUNTIME_bool(catalog_manager_wait_for_new_tablets_to_elect_leader, true,
"Whether the catalog manager should wait for a newly created tablet to "
"elect a leader before considering it successfully created. "
"This is disabled in some tests where we explicitly manage leader "
"election.");
TAG_FLAG(catalog_manager_wait_for_new_tablets_to_elect_leader, hidden);
// TODO: should this be a test flag?
DEFINE_RUNTIME_int32(catalog_manager_inject_latency_in_delete_table_ms, 0,
"Number of milliseconds that the master will sleep in DeleteTable.");
TAG_FLAG(catalog_manager_inject_latency_in_delete_table_ms, hidden);
DECLARE_int32(catalog_manager_bg_task_wait_ms);
DEFINE_RUNTIME_int32(replication_factor, 3,
"Default number of replicas for tables that do not have the num_replicas set. "
"Note: Changing this at runtime will only affect newly created tables.");
TAG_FLAG(replication_factor, advanced);
DEFINE_RUNTIME_int32(max_create_tablets_per_ts, 50,
"The number of tablets per TS that can be requested for a new table.");
TAG_FLAG(max_create_tablets_per_ts, advanced);
DEFINE_RUNTIME_int32(catalog_manager_report_batch_size, 1,
"The max number of tablets evaluated in the heartbeat as a single SysCatalog update.");
TAG_FLAG(catalog_manager_report_batch_size, advanced);
// TODO: Is this code even useful?
DEFINE_RUNTIME_int32(master_failover_catchup_timeout_ms, 30 * 1000 * yb::kTimeMultiplier, // 30 sec
"Amount of time to give a newly-elected leader master to load"
" the previous master's metadata and become active. If this time"
" is exceeded, the node crashes.");
TAG_FLAG(master_failover_catchup_timeout_ms, advanced);
TAG_FLAG(master_failover_catchup_timeout_ms, experimental);
DEFINE_RUNTIME_bool(master_tombstone_evicted_tablet_replicas, true,
"Whether the Master should tombstone (delete) tablet replicas that "
"are no longer part of the latest reported raft config.");
TAG_FLAG(master_tombstone_evicted_tablet_replicas, hidden);
DECLARE_bool(master_ignore_deleted_on_load);
// Temporary. Can be removed after long-run testing.
// TODO: how temporary is this?
DEFINE_RUNTIME_bool(master_ignore_stale_cstate, true,
"Whether Master processes the raft config when the version is lower.");
TAG_FLAG(master_ignore_stale_cstate, hidden);
// TODO: should this be a test flag?
DEFINE_RUNTIME_bool(catalog_manager_check_ts_count_for_create_table, true,
"Whether the master should ensure that there are enough live tablet "
"servers to satisfy the provided replication count before allowing "
"a table to be created.");
TAG_FLAG(catalog_manager_check_ts_count_for_create_table, hidden);
DEFINE_test_flag(bool, catalog_manager_check_yql_partitions_exist_for_is_create_table_done, true,
"Whether the master should ensure that all of a table's tablets are "
"in the YQL system.partitions vtable during the IsCreateTableDone check.");
DEFINE_test_flag(uint64, inject_latency_during_remote_bootstrap_secs, 0,
"Number of seconds to sleep during a remote bootstrap.");
DEFINE_test_flag(uint64, inject_latency_during_tablet_report_ms, 0,
"Number of milliseconds to sleep during the processing of a tablet batch.");
DEFINE_test_flag(bool, catalog_manager_simulate_system_table_create_failure, false,
"This is only used in tests to simulate a failure where the table information is "
"persisted in syscatalog, but the tablet information is not yet persisted and "
"there is a failure.");
DEFINE_test_flag(bool, fail_table_creation_at_preparing_state, false,
"This is only used in tests to simulate a failure that occurs when a table in "
"process of creation is still in PREPARING state.");
DEFINE_test_flag(bool, pause_before_send_hinted_election, false,
"Inside StartElectionIfReady, pause before sending request for hinted election");
// This flag is only used on the first master leader setup, after which we serialize the
// cluster_uuid to disk. So changing this at runtime is meaningless.
DEFINE_NON_RUNTIME_string(cluster_uuid, "", "Cluster UUID to be used by this cluster");
TAG_FLAG(cluster_uuid, hidden);
DEFINE_RUNTIME_int32(transaction_table_num_tablets, 0,
"Number of tablets to use when creating the transaction status table."
"0 to use transaction_table_num_tablets_per_tserver.");
DEFINE_RUNTIME_int32(transaction_table_num_tablets_per_tserver, kAutoDetectNumShardsPerTServer,
"The default number of tablets per tablet server for transaction status table. If the value is "
"-1, the system automatically determines an appropriate value based on number of CPU cores.");
DEFINE_RUNTIME_bool(auto_create_local_transaction_tables, true,
"Whether or not to create local transaction status tables automatically on table "
"creation with a tablespace with placement specified.");
DEFINE_test_flag(bool, name_transaction_tables_with_tablespace_id, false,
"This is only used in tests to make associating automatically created transaction "
"tables with their tablespaces easier, and causes transaction tables created "
"automatically for tablespaces to include the tablespace oid in their names.");
DEFINE_test_flag(bool, consider_all_local_transaction_tables_local, false,
"This is only used in tests, and forces the catalog manager to return all tablets "
"of all transaction tables with placements as placement local, regardless of "
"their placement.");
DEFINE_RUNTIME_bool(master_enable_metrics_snapshotter, false,
"Should metrics snapshotter be enabled");
DEFINE_RUNTIME_int32(metrics_snapshots_table_num_tablets, 0,
"Number of tablets to use when creating the metrics snapshots table."
"0 to use the same default num tablets as for regular tables.");
DEFINE_RUNTIME_bool(disable_index_backfill, false,
"A kill switch to disable multi-stage backfill for YCQL indexes.");
TAG_FLAG(disable_index_backfill, hidden);
DEFINE_RUNTIME_bool(disable_index_backfill_for_non_txn_tables, true,
"A kill switch to disable multi-stage backfill for user enforced YCQL indexes. "
"Note that enabling this feature may cause the create index flow to be slow. "
"This is needed to ensure the safety of the index backfill process. See also "
"index_backfill_upperbound_for_user_enforced_txn_duration_ms");
TAG_FLAG(disable_index_backfill_for_non_txn_tables, hidden);
DEFINE_RUNTIME_bool(enable_transactional_ddl_gc, true,
"A kill switch for transactional DDL GC. Temporary safety measure.");
TAG_FLAG(enable_transactional_ddl_gc, hidden);
// TODO: should this be a test flag?
DEFINE_RUNTIME_bool(hide_pg_catalog_table_creation_logs, false,
"Whether to hide detailed log messages for PostgreSQL catalog table creation. "
"This cuts down test logs significantly.");
TAG_FLAG(hide_pg_catalog_table_creation_logs, hidden);
DEFINE_test_flag(int32, simulate_slow_table_create_secs, 0,
"Simulates a slow table creation by sleeping after the table has been added to memory.");
DEFINE_test_flag(int32, simulate_slow_system_tablet_bootstrap_secs, 0,
"Simulates a slow tablet bootstrap by adding a sleep before system tablet init.");
DEFINE_test_flag(bool, return_error_if_namespace_not_found, false,
"Return an error from ListTables if a namespace id is not found in the map");
DEFINE_test_flag(bool, hang_on_namespace_transition, false,
"Used in tests to simulate a lapse between issuing a namespace op and final processing.");
DEFINE_test_flag(bool, simulate_crash_after_table_marked_deleting, false,
"Crash yb-master after table's state is set to DELETING. This skips tablets deletion.");
DEPRECATE_FLAG(bool, master_drop_table_after_task_response, "11_2022");
DEFINE_test_flag(bool, tablegroup_master_only, false,
"This is only for MasterTest to be able to test tablegroups without the"
" transaction status table being created.");
DEFINE_RUNTIME_bool(enable_register_ts_from_raft, true,
"Whether to register a tserver from the consensus information of a reported tablet.");
DECLARE_int32(tserver_unresponsive_timeout_ms);
DEFINE_RUNTIME_bool(use_create_table_leader_hint, true,
"Whether the Master should hint which replica for each tablet should "
"be leader initially on tablet creation.");
DEFINE_test_flag(bool, create_table_leader_hint_min_lexicographic, false,
"Whether the Master should hint replica with smallest lexicographic rank for each "
"tablet as leader initially on tablet creation.");
DEFINE_RUNTIME_double(heartbeat_safe_deadline_ratio, .20,
"When the heartbeat deadline has this percentage of time remaining, "
"the master should halt tablet report processing so it can respond in time.");
DECLARE_int32(heartbeat_rpc_timeout_ms);
DEFINE_test_flag(int32, num_missing_tablets, 0, "Simulates missing tablets in a table");
DEFINE_RUNTIME_int32(partitions_vtable_cache_refresh_secs, 30,
"Amount of time to wait before refreshing the system.partitions cached vtable. "
"If generate_partitions_vtable_on_changes is true and this flag is > 0, then this background "
"task will update the cached vtable using the internal map. "
"If generate_partitions_vtable_on_changes is false and this flag is > 0, then this background "
"task will be responsible for regenerating and updating the entire cached vtable.");
DEFINE_RUNTIME_bool(invalidate_yql_partitions_cache_on_create_table, true,
"Whether the YCQL system.partitions vtable cache should be invalidated "
"on a create table. Note that this requires "
"partitions_vtable_cache_refresh_secs > 0 and "
"generate_partitions_vtable_on_changes = false in order to take effect. "
"If set to true, then this will ensure that newly created tables will be seen "
"immediately in system.partitions.");
DEFINE_RUNTIME_int32(txn_table_wait_min_ts_count, 1,
"Minimum Number of TS to wait for before creating the transaction status table."
" Default value is 1. We wait for atleast --replication_factor if this value"
" is smaller than that");
TAG_FLAG(txn_table_wait_min_ts_count, advanced);
// TODO (mbautin, 2019-12): switch the default to true after updating all external callers
// (yb-ctl, YugaWare) and unit tests.
DEFINE_RUNTIME_bool(master_auto_run_initdb, false,
"Automatically run initdb on master leader initialization");
DEFINE_RUNTIME_bool(enable_ysql_tablespaces_for_placement, true,
"If set, tablespaces will be used for placement of YSQL tables.");
DEFINE_RUNTIME_int32(ysql_tablespace_info_refresh_secs, 30,
"Frequency at which the table to tablespace information will be updated in master "
"from pg catalog tables. A value of -1 disables the refresh task.");
// Change the default value of this flag to false once we declare Colocation GA.
DEFINE_NON_RUNTIME_bool(ysql_legacy_colocated_database_creation, false,
"Whether to create a legacy colocated database using pre-Colocation GA implementation");
TAG_FLAG(ysql_legacy_colocated_database_creation, advanced);
DEPRECATE_FLAG(int64, tablet_split_size_threshold_bytes, "10_2022");
DEFINE_RUNTIME_int64(tablet_split_low_phase_shard_count_per_node, 1,
"The per-node tablet leader count until which a table is splitting at the phase 1 threshold, "
"as defined by tablet_split_low_phase_size_threshold_bytes.");
DEFINE_RUNTIME_int64(tablet_split_high_phase_shard_count_per_node, 24,
"The per-node tablet leader count until which a table is splitting at the phase 2 threshold, "
"as defined by tablet_split_high_phase_size_threshold_bytes.");
DEFINE_RUNTIME_int64(tablet_split_low_phase_size_threshold_bytes, 128_MB,
"The tablet size threshold at which to split tablets in phase 1. "
"See tablet_split_low_phase_shard_count_per_node.");
DEFINE_RUNTIME_int64(tablet_split_high_phase_size_threshold_bytes, 10_GB,
"The tablet size threshold at which to split tablets in phase 2. "
"See tablet_split_high_phase_shard_count_per_node.");
DEFINE_RUNTIME_int64(tablet_force_split_threshold_bytes, 100_GB,
"The tablet size threshold at which to split tablets regardless of how many tablets "
"exist in the table already. This should be configured to prevent runaway whale "
"tablets from forming in your cluster even if both automatic splitting phases have "
"been finished.");
DEFINE_test_flag(bool, crash_server_on_sys_catalog_leader_affinity_move, false,
"When set, crash the master process if it performs a sys catalog leader affinity "
"move.");
DEFINE_RUNTIME_int32(blacklist_progress_initial_delay_secs, yb::master::kDelayAfterFailoverSecs,
"When a master leader failsover, the time until which the progress of load movement "
"off the blacklisted tservers is reported as 0. This initial delay "
"gives sufficient time for heartbeats so that we don't report"
" a premature incorrect completion.");
DEFINE_test_flag(bool, validate_all_tablet_candidates, false,
"When set to true, consider any tablet a valid candidate for splitting. "
"Specifically this flag ensures that ValidateSplitCandidateTable and "
"ValidateSplitCandidateTablet always return OK and all tablets are considered "
"valid candidates for splitting.");
DEFINE_test_flag(bool, skip_placement_validation_createtable_api, false,
"When set, it skips checking that all the tablets of a table have enough tservers"
" conforming to the table placement policy during CreateTable API call.");
DEFINE_test_flag(int32, slowdown_alter_table_rpcs_ms, 0,
"Slows down the alter table rpc's send and response handler so that the TServer "
"has a heartbeat delay and triggers tablet leader change.");
DEFINE_test_flag(bool, reject_delete_not_serving_tablet_rpc, false,
"Whether to reject DeleteNotServingTablet RPC.");
DEFINE_test_flag(double, crash_after_creating_single_split_tablet, 0.0,
"Crash inside CatalogManager::RegisterNewTabletForSplit after calling Upsert.");
DEFINE_test_flag(bool, error_after_creating_single_split_tablet, false,
"Return an error inside CatalogManager::RegisterNewTabletForSplit "
"after calling Upsert.");
DEFINE_RUNTIME_bool(enable_delete_truncate_xcluster_replicated_table, false,
"When set, enables deleting/truncating YCQL tables currently in xCluster replication. "
"For YSQL tables, deletion is always allowed and TRUNCATE is always disallowed.");
DEFINE_RUNTIME_bool(xcluster_wait_on_ddl_alter, true,
"When xCluster replication sends a DDL change, wait for the user to enter a "
"compatible/matching entry. Note: Can also set at runtime to resume after stall.");
DEFINE_test_flag(bool, sequential_colocation_ids, false,
"When set, colocation IDs will be assigned sequentially (starting from 20001) "
"rather than at random. This is especially useful for making pg_regress "
"tests output consistent and predictable.");
DEFINE_RUNTIME_bool(disable_truncate_table, false,
"When enabled, truncate table will be disallowed");
DEFINE_RUNTIME_bool(enable_truncate_on_pitr_table, false,
"When enabled, truncate table will be allowed on PITR tables in YCQL. For PITR tables in YSQL, "
"truncate is always allowed by default, and it can be turned off by setting the "
"ysql_yb_enable_alter_table_rewrite autoflag to false.");
DEFINE_test_flag(double, fault_crash_after_registering_split_children, 0.0,
"Crash after registering the children for a tablet split.");
DEFINE_test_flag(uint64, delay_sys_catalog_reload_secs, 0,
"Number of seconds to sleep before a sys catalog reload.");
DECLARE_bool(transaction_tables_use_preferred_zones);
DECLARE_string(tmp_dir);
DEFINE_RUNTIME_bool(batch_ysql_system_tables_metadata, true,
"Whether change metadata operation and SysCatalogTable upserts for ysql system tables during a "
"create database is performed one by one or batched together");
DEFINE_test_flag(bool, pause_split_child_registration,
false, "Pause split after registering one child");
DEFINE_test_flag(bool, keep_docdb_table_on_ysql_drop_table, false,
"When enabled does not delete tables from the docdb layer, resulting in YSQL "
"tables only being dropped in the postgres layer.");
DEFINE_RUNTIME_int32(max_concurrent_delete_replica_rpcs_per_ts, 50,
"The maximum number of outstanding DeleteReplica RPCs sent to an individual tserver.");
DEFINE_RUNTIME_bool(
enable_truncate_cdcsdk_table, false,
"When set, enables truncating tables currently part of a CDCSDK Stream");
DEFINE_RUNTIME_AUTO_bool(enable_tablet_split_of_xcluster_replicated_tables, kExternal, false, true,
"When set, it enables automatic tablet splitting for tables that are part of an "
"xCluster replication setup");
DEFINE_RUNTIME_bool(enable_tablet_split_of_xcluster_bootstrapping_tables, false,
"When set, it enables automatic tablet splitting for tables that are part of an "
"xCluster replication setup and are currently being bootstrapped for xCluster.");
DEFINE_RUNTIME_bool(enable_tablet_split_of_cdcsdk_streamed_tables, false,
"When set, it enables automatic tablet splitting for tables that are part of a "
"CDCSDK stream");
METRIC_DEFINE_gauge_uint32(cluster, num_tablet_servers_live,
"Number of live tservers in the cluster", yb::MetricUnit::kUnits,
"The number of tablet servers that have responded or done a heartbeat "
"in the time interval defined by the gflag "
"FLAGS_tserver_unresponsive_timeout_ms.");
METRIC_DEFINE_gauge_uint32(cluster, num_tablet_servers_dead,
"Number of dead tservers in the cluster", yb::MetricUnit::kUnits,
"The number of tablet servers that have not responded or done a "
"heartbeat in the time interval defined by the gflag "
"FLAGS_tserver_unresponsive_timeout_ms.");
METRIC_DEFINE_counter(cluster, create_table_too_many_tablets,
"How many CreateTable requests have failed due to too many tablets", yb::MetricUnit::kRequests,
"The number of CreateTable request errors due to attempting to create too many tablets.");
METRIC_DEFINE_counter(
cluster, split_tablet_too_many_tablets,
"How many SplitTablet operations have failed because the cluster cannot host any more tablets",
yb::MetricUnit::kRequests,
"The number of SplitTablet operations failed because the cluster cannot host any more "
"tablets.");
DEFINE_test_flag(bool, duplicate_addtabletotablet_request, false,
"Send a duplicate AddTableToTablet request to the tserver to simulate a retry.");
DEFINE_test_flag(bool, create_table_in_running_state, false,
"In master-only tests, create tables in the running state without waiting for tablet creation, "
"as we will not have any tablet servers.");
DEFINE_test_flag(bool, pause_before_upsert_ysql_sys_table, false,
"Pause before upserting a table in CreateYsqlSysTable.");
DEFINE_test_flag(bool, create_table_with_empty_pgschema_name, false,
"Create YSQL tables with an empty pgschema_name field in their schema.");
DEFINE_test_flag(bool, create_table_with_empty_namespace_name, false,
"Create YSQL tables with an empty namespace_name field in their schema.");
DEFINE_test_flag(int32, delay_split_registration_secs, 0,
"Delay creating child tablets and upserting them to sys catalog");
DECLARE_bool(ysql_enable_colocated_tables_with_tablespaces);
DEFINE_NON_RUNTIME_bool(enable_heartbeat_pg_catalog_versions_cache, false,
"Whether to enable the use of heartbeat catalog versions cache for the "
"pg_yb_catalog_version table which can help to reduce the number of reads "
"from the table. This is more useful when there are many databases and/or "
"many tservers in the cluster.");
DEFINE_test_flag(string, block_alter_table, "",
"If non-empty, the specified alter table step is blocked. Possible values are "
"\"alter_schema\" (blocks the schema from being altered) and \"completion\","
"(blocks the service completion of the alter table request)");
DECLARE_bool(master_enable_universe_uuid_heartbeat_check);
DECLARE_int32(heartbeat_interval_ms);
DEFINE_RUNTIME_bool(master_join_existing_universe, false,
"This flag helps prevent the accidental creation of a new universe. If the master_addresses "
"flag is misconfigured or the on disk state of a master is wiped out the master could create a "
"fresh universe, causing inconsistency with other masters in the universe and potential data "
"loss. Setting this flag will prevent a master from creating a fresh universe regardless of "
"other factors. To create a new universe with a new group of masters, unset this flag. Set "
"this flag on all new and existing master processes once the universe creation completes.");
DEFINE_RUNTIME_bool(master_enable_deletion_check_for_orphaned_tablets, true,
"When set, this flag adds stricter protection around the deletion of orphaned tablets. When "
"master leader is processing a tablet report and doesn't know about a tablet, explicitly "
"check that the tablet has been deleted in the past. If it has, then issue a DeleteTablet "
"to the tservers. Otherwise, it means that tserver has heartbeated to the wrong cluster, "
"or there has been sys catalog corruption. In this case, log an error but don't actually "
"delete any data.");
DEFINE_test_flag(bool, simulate_sys_catalog_data_loss, false,
"On the heartbeat processing path, simulate a scenario where tablet metadata is missing due to "
"a corruption. ");
DEFINE_RUNTIME_uint32(maximum_tablet_leader_lease_expired_secs, 2 * 60,
"If the leader lease in master's view has expired for this amount of seconds, "
"treat the lease as expired for too long time.");
DEFINE_test_flag(bool, disable_set_catalog_version_table_in_perdb_mode, false,
"Whether to disable setting the catalog version table in perdb mode.");
DEFINE_RUNTIME_uint32(initial_tserver_registration_duration_secs,
yb::master::kDelayAfterFailoverSecs,
"Amount of time to wait between becoming master leader and relying on all live TServers having "
"registered.");
TAG_FLAG(initial_tserver_registration_duration_secs, advanced);
DECLARE_bool(ysql_yb_enable_replica_identity);
namespace yb {
namespace master {
using std::shared_ptr;
using std::string;
using std::unique_ptr;
using std::vector;
using std::set;
using std::min;
using std::map;
using std::pair;
using namespace std::placeholders;
using consensus::kMinimumTerm;
using consensus::CONSENSUS_CONFIG_COMMITTED;
using consensus::CONSENSUS_CONFIG_ACTIVE;
using consensus::Consensus;
using consensus::ConsensusStatePB;
using consensus::GetConsensusRole;
using consensus::PeerMemberType;
using consensus::RaftPeerPB;
using consensus::StartRemoteBootstrapRequestPB;
using dockv::Partition;
using dockv::PartitionSchema;
using rpc::RpcContext;
using server::MonitoredTask;
using strings::Substitute;
using tablet::TABLET_DATA_DELETED;
using tablet::TABLET_DATA_TOMBSTONED;
using tablet::TabletDataState;
using tablet::RaftGroupMetadataPtr;
using tablet::TabletPeer;
using tablet::RaftGroupStatePB;
using yb::pgwrapper::PgWrapper;
using yb::server::MasterAddressesToString;
using yb::client::YBSchema;
using yb::client::YBSchemaBuilder;
// Macros to access index information in CATALOG.
//
// NOTES from file master.proto for SysTablesEntryPB.
// - For index table: [to be deprecated and replaced by "index_info"]
// optional bytes indexed_table_id = 13; // Indexed table id of this index.
// optional bool is_local_index = 14 [ default = false ]; // Whether this is a local index.
// optional bool is_unique_index = 15 [ default = false ]; // Whether this is a unique index.
// - During transition period, we have to consider both fields and the following macros help
// avoiding duplicate protobuf version check thru out our code.
const std::string& GetIndexedTableId(const SysTablesEntryPB& pb) {
return pb.has_index_info() ? pb.index_info().indexed_table_id() : pb.indexed_table_id();
}
namespace {
#define PROTO_GET_IS_LOCAL(tabpb) \
(tabpb.has_index_info() ? tabpb.index_info().is_local() \
: tabpb.is_local_index())
#define PROTO_GET_IS_UNIQUE(tabpb) \
(tabpb.has_index_info() ? tabpb.index_info().is_unique() \
: tabpb.is_unique_index())
#define PROTO_PTR_IS_INDEX(tabpb) \
(tabpb->has_index_info() || !tabpb->indexed_table_id().empty())
#define PROTO_PTR_IS_TABLE(tabpb) \
(!tabpb->has_index_info() && tabpb->indexed_table_id().empty())
#if (0)
// Once the deprecated fields are obsolete, the above macros should be defined as the following.
#define GetIndexedTableId(tabpb) (tabpb.index_info().indexed_table_id())
#define PROTO_GET_IS_LOCAL(tabpb) (tabpb.index_info().is_local())
#define PROTO_GET_IS_UNIQUE(tabpb) (tabpb.index_info().is_unique())
#define PROTO_IS_INDEX(tabpb) (tabpb.has_index_info())
#define PROTO_IS_TABLE(tabpb) (!tabpb.has_index_info())
#define PROTO_PTR_IS_INDEX(tabpb) (tabpb->has_index_info())
#define PROTO_PTR_IS_TABLE(tabpb) (!tabpb->has_index_info())
#endif
class IndexInfoBuilder {
public:
explicit IndexInfoBuilder(IndexInfoPB* index_info) : index_info_(*index_info) {
DVLOG(3) << " After " << __PRETTY_FUNCTION__ << " index_info_ is " << yb::ToString(index_info_);
}
void ApplyProperties(const TableId& indexed_table_id, bool is_local, bool is_unique) {
index_info_.set_indexed_table_id(indexed_table_id);
index_info_.set_version(0);
index_info_.set_is_local(is_local);
index_info_.set_is_unique(is_unique);
DVLOG(3) << " After " << __PRETTY_FUNCTION__ << " index_info_ is " << yb::ToString(index_info_);
}
Status ApplyColumnMapping(const Schema& indexed_schema, const Schema& index_schema) {
for (size_t i = 0; i < index_schema.num_columns(); i++) {
const auto& col_name = index_schema.column(i).name();
const auto indexed_col_idx = indexed_schema.find_column(col_name);
if (PREDICT_FALSE(indexed_col_idx == Schema::kColumnNotFound)) {
return STATUS(NotFound, "The indexed table column does not exist", col_name);
}
auto* col = index_info_.add_columns();
col->set_column_id(index_schema.column_id(i));
col->set_indexed_column_id(indexed_schema.column_id(indexed_col_idx));
}
index_info_.set_hash_column_count(narrow_cast<uint32_t>(index_schema.num_hash_key_columns()));
index_info_.set_range_column_count(narrow_cast<uint32_t>(index_schema.num_range_key_columns()));
for (size_t i = 0; i < indexed_schema.num_hash_key_columns(); i++) {
index_info_.add_indexed_hash_column_ids(indexed_schema.column_id(i));
}
for (size_t i = indexed_schema.num_hash_key_columns(); i < indexed_schema.num_key_columns();
i++) {
index_info_.add_indexed_range_column_ids(indexed_schema.column_id(i));
}
DVLOG(3) << " After " << __PRETTY_FUNCTION__ << " index_info_ is " << yb::ToString(index_info_);
return Status::OK();
}
private:
IndexInfoPB& index_info_;
};
MasterErrorPB_Code NamespaceMasterError(SysNamespaceEntryPB_State state) {
switch (state) {
case SysNamespaceEntryPB::PREPARING: FALLTHROUGH_INTENDED;
case SysNamespaceEntryPB::DELETING:
return MasterErrorPB::IN_TRANSITION_CAN_RETRY;
case SysNamespaceEntryPB::DELETED: FALLTHROUGH_INTENDED;
case SysNamespaceEntryPB::FAILED: FALLTHROUGH_INTENDED;
case SysNamespaceEntryPB::RUNNING:
return MasterErrorPB::INTERNAL_ERROR;
default:
FATAL_INVALID_ENUM_VALUE(SysNamespaceEntryPB_State, state);
}
}
size_t GetNameMapperIndex(YQLDatabase db_type) {
switch (db_type) {
case YQL_DATABASE_UNKNOWN: break;
case YQL_DATABASE_CQL: return 1;
case YQL_DATABASE_PGSQL: return 2;
case YQL_DATABASE_REDIS: return 3;
}
CHECK(false) << "Unexpected db type " << db_type;
return 0;
}
bool IsIndexBackfillEnabled(TableType table_type, bool is_transactional) {
// Fetch the runtime flag to prevent any issues from the updates to flag while processing.
const bool disabled =
(table_type == PGSQL_TABLE_TYPE
? GetAtomicFlag(&FLAGS_ysql_disable_index_backfill)
: GetAtomicFlag(&FLAGS_disable_index_backfill) ||
(!is_transactional && GetAtomicFlag(&FLAGS_disable_index_backfill_for_non_txn_tables)));
return !disabled;
}
constexpr auto kDefaultYQLPartitionsRefreshBgTaskSleep = 10s;
int GetTransactionTableNumShardsPerTServer() {
int value = 8;
if (IsTsan()) {
value = 2;
} else if (base::NumCPUs() <= 2) {
value = 4;
}
return value;
}
void InitMasterFlags() {
yb::InitCommonFlags();
if (GetAtomicFlag(&FLAGS_transaction_table_num_tablets_per_tserver) ==
kAutoDetectNumShardsPerTServer) {
const auto value = GetTransactionTableNumShardsPerTServer();
VLOG(1) << "Auto setting FLAGS_transaction_table_num_tablets_per_tserver to " << value;
CHECK_OK(SET_FLAG_DEFAULT_AND_CURRENT(transaction_table_num_tablets_per_tserver, value));
}
}
Result<bool> DoesTableExist(const Result<TableInfoPtr>& result) {
if (result.ok()) {
return true;
}
if (result.status().IsNotFound()
&& MasterError(result.status()) == MasterErrorPB::OBJECT_NOT_FOUND) {
return false;
}
return result.status();
}
const BlacklistPB& GetBlacklist(const SysClusterConfigEntryPB& pb, bool blacklist_leader) {
return blacklist_leader ? pb.leader_blacklist() : pb.server_blacklist();
}
// Orders all servers in the masters argument by their score in ascending order. Scores are inverse
// priorities, i.e. masters with lower scores have a greater priority to act as the sys catalog
// tablet leader. Scores are computed from the affinitized_zones and the blacklist. All masters with
// a score greater than or equal to the current leader are dropped from the list. Also returns a
// bool which is true if the current leader is a valid choice for the sys catalog tablet leader.
//
// If a master is blacklisted, it has the highest possible score.
// Otherwise if a master is not in any affinitized zone, it has the second highest score.
// Otherwise the score of a master is the index of its placement zone in the affinitized_zones
// input.
const std::pair<std::vector<std::pair<ServerEntryPB, size_t>>, bool>
GetMoreEligibleSysCatalogLeaders(
const vector<AffinitizedZonesSet>& affinitized_zones, const BlacklistSet& blacklist,
const std::vector<ServerEntryPB>& masters, const ServerRegistrationPB& current_leader) {
std::unordered_map<CloudInfoPB, size_t, cloud_hash, cloud_equal_to> cloud_info_scores;
for (size_t i = 0; i < affinitized_zones.size(); ++i) {
for (const auto& cloud_info : affinitized_zones[i]) {
cloud_info_scores.insert({cloud_info, i});
}
}
auto get_score = [&](const ServerRegistrationPB& registration) {
if (IsBlacklisted(registration, blacklist)) {
return affinitized_zones.size() + 1;
} else {
auto cloud_score = cloud_info_scores.find(registration.cloud_info());
if (cloud_score == cloud_info_scores.end()) {
return affinitized_zones.size();
} else {
return cloud_score->second;
}
}
};
auto my_score = get_score(current_leader);
std::vector<std::pair<ServerEntryPB, size_t>> scored_masters;
for (const auto& master : masters) {
auto master_score = get_score(master.registration());
if (master_score < my_score) {
scored_masters.push_back({master, get_score(master.registration())});
}
}
std::sort(scored_masters.begin(), scored_masters.end(), [](const auto& lhs, const auto& rhs) {
return lhs.second < rhs.second;
});
return {scored_masters, my_score == 0 || my_score < affinitized_zones.size()};
}
// Sets basic fields in the TabletLocationsPB proto that are always filled regardless of the
// PartitionsOnly parameter.
void InitializeTabletLocationsPB(
const TabletId& tablet_id, const SysTabletsEntryPB& pb, TabletLocationsPB* locs_pb) {
locs_pb->set_table_id(pb.table_id());
locs_pb->set_tablet_id(tablet_id);
locs_pb->mutable_partition()->CopyFrom(pb.partition());
locs_pb->set_split_depth(pb.split_depth());
locs_pb->set_split_parent_tablet_id(pb.split_parent_tablet_id());
}
IndexStatusPB::BackfillStatus GetBackfillStatus(IndexPermissions permissions) {
switch (permissions) {
case INDEX_PERM_READ_WRITE_AND_DELETE:
return IndexStatusPB::BACKFILL_SUCCESS;
case INDEX_PERM_DELETE_ONLY: [[fallthrough]];
case INDEX_PERM_WRITE_AND_DELETE: [[fallthrough]];
case INDEX_PERM_DO_BACKFILL: [[fallthrough]];
case INDEX_PERM_WRITE_AND_DELETE_WHILE_REMOVING: [[fallthrough]];
case INDEX_PERM_DELETE_ONLY_WHILE_REMOVING: [[fallthrough]];
case INDEX_PERM_INDEX_UNUSED: [[fallthrough]];
case INDEX_PERM_NOT_USED:
return IndexStatusPB::BACKFILL_UNKNOWN;
}
FATAL_INVALID_ENUM_VALUE(IndexPermissions, permissions);
}
IndexStatusPB::BackfillStatus GetBackfillStatus(const IndexInfoPB& index) {
// It is expected index permissions are always specified.
return index.has_index_permissions() ? GetBackfillStatus(index.index_permissions())
: IndexStatusPB::BACKFILL_UNKNOWN;
}
} // anonymous namespace
////////////////////////////////////////////////////////////
// Snapshot Loader
////////////////////////////////////////////////////////////
class SnapshotLoader : public Visitor<PersistentSnapshotInfo> {
public:
explicit SnapshotLoader(CatalogManager* catalog_manager) : catalog_manager_(catalog_manager) {}
Status Visit(const SnapshotId& snapshot_id, const SysSnapshotEntryPB& metadata) override {
if (TryFullyDecodeTxnSnapshotId(snapshot_id)) {
// Transaction aware snapshots should be already loaded.
return Status::OK();
}
return VisitNonTransactionAwareSnapshot(snapshot_id, metadata);
}
Status VisitNonTransactionAwareSnapshot(
const SnapshotId& snapshot_id, const SysSnapshotEntryPB& metadata) {
// Setup the snapshot info.
auto snapshot_info = make_scoped_refptr<SnapshotInfo>(snapshot_id);
auto l = snapshot_info->LockForWrite();
l.mutable_data()->pb.CopyFrom(metadata);
// Add the snapshot to the IDs map (if the snapshot is not deleted).
auto emplace_result =
catalog_manager_->non_txn_snapshot_ids_map_.emplace(snapshot_id, std::move(snapshot_info));
CHECK(emplace_result.second) << "Snapshot already exists: " << snapshot_id;
LOG(INFO) << "Loaded metadata for snapshot (id=" << snapshot_id
<< "): " << emplace_result.first->second->ToString() << ": "
<< metadata.ShortDebugString();
l.Commit();
return Status::OK();
}
private:
CatalogManager* catalog_manager_;
DISALLOW_COPY_AND_ASSIGN(SnapshotLoader);
};
////////////////////////////////////////////////////////////
// CatalogManager
////////////////////////////////////////////////////////////
CatalogManager::NamespaceInfoMap& CatalogManager::NamespaceNameMapper::operator[](
YQLDatabase db_type) {
return typed_maps_[GetNameMapperIndex(db_type)];
}
const CatalogManager::NamespaceInfoMap& CatalogManager::NamespaceNameMapper::operator[](
YQLDatabase db_type) const {
return typed_maps_[GetNameMapperIndex(db_type)];
}
void CatalogManager::NamespaceNameMapper::clear() {
for (auto& m : typed_maps_) {