Skip to content

Commit

Permalink
Replace master with citus in logs and comments (#5210)
Browse files Browse the repository at this point in the history
I replaced 

- master_add_node,
- master_add_inactive_node
- master_activate_node

with

- citus_add_node,
- citus_add_inactive_node
- citus_activate_node

respectively.
  • Loading branch information
hanefi committed Aug 26, 2021
1 parent 51fa7a2 commit 7e39c7e
Show file tree
Hide file tree
Showing 12 changed files with 23 additions and 23 deletions.
4 changes: 2 additions & 2 deletions src/backend/distributed/commands/dependencies.c
Expand Up @@ -81,9 +81,9 @@ EnsureDependenciesExistOnAllNodes(const ObjectAddress *target)
/*
* Make sure that no new nodes are added after this point until the end of the
* transaction by taking a RowShareLock on pg_dist_node, which conflicts with the
* ExclusiveLock taken by master_add_node.
* ExclusiveLock taken by citus_add_node.
* This guarantees that all active nodes will have the object, because they will
* either get it now, or get it in master_add_node after this transaction finishes and
* either get it now, or get it in citus_add_node after this transaction finishes and
* the pg_dist_object record becomes visible.
*/
List *workerNodeList = ActivePrimaryNonCoordinatorNodeList(RowShareLock);
Expand Down
14 changes: 7 additions & 7 deletions src/backend/distributed/commands/extension.c
Expand Up @@ -152,9 +152,9 @@ PostprocessCreateExtensionStmt(Node *node, const char *queryString)
/*
* Make sure that no new nodes are added after this point until the end of the
* transaction by taking a RowShareLock on pg_dist_node, which conflicts with the
* ExclusiveLock taken by master_add_node.
* ExclusiveLock taken by citus_add_node.
* This guarantees that all active nodes will have the extension, because they will
* either get it now, or get it in master_add_node after this transaction finishes and
* either get it now, or get it in citus_add_node after this transaction finishes and
* the pg_dist_object record becomes visible.
*/
LockRelationOid(DistNodeRelationId(), RowShareLock);
Expand Down Expand Up @@ -265,9 +265,9 @@ PreprocessDropExtensionStmt(Node *node, const char *queryString,
/*
* Make sure that no new nodes are added after this point until the end of the
* transaction by taking a RowShareLock on pg_dist_node, which conflicts with the
* ExclusiveLock taken by master_add_node.
* ExclusiveLock taken by citus_add_node.
* This guarantees that all active nodes will drop the extension, because they will
* either get it now, or get it in master_add_node after this transaction finishes and
* either get it now, or get it in citus_add_node after this transaction finishes and
* the pg_dist_object record becomes visible.
*/
LockRelationOid(DistNodeRelationId(), RowShareLock);
Expand Down Expand Up @@ -401,7 +401,7 @@ PreprocessAlterExtensionSchemaStmt(Node *node, const char *queryString,
/*
* Make sure that no new nodes are added after this point until the end of the
* transaction by taking a RowShareLock on pg_dist_node, which conflicts with the
* ExclusiveLock taken by master_add_node.
* ExclusiveLock taken by citus_add_node.
* This guarantees that all active nodes will update the extension schema after
* this transaction finishes and the pg_dist_object record becomes visible.
*/
Expand Down Expand Up @@ -469,9 +469,9 @@ PreprocessAlterExtensionUpdateStmt(Node *node, const char *queryString,
/*
* Make sure that no new nodes are added after this point until the end of the
* transaction by taking a RowShareLock on pg_dist_node, which conflicts with the
* ExclusiveLock taken by master_add_node.
* ExclusiveLock taken by citus_add_node.
* This guarantees that all active nodes will update the extension version, because
* they will either get it now, or get it in master_add_node after this transaction
* they will either get it now, or get it in citus_add_node after this transaction
* finishes and the pg_dist_object record becomes visible.
*/
LockRelationOid(DistNodeRelationId(), RowShareLock);
Expand Down
2 changes: 1 addition & 1 deletion src/backend/distributed/commands/role.c
Expand Up @@ -143,7 +143,7 @@ PostprocessAlterRoleStmt(Node *node, const char *queryString)
/*
* Make sure that no new nodes are added after this point until the end of the
* transaction by taking a RowShareLock on pg_dist_node, which conflicts with the
* ExclusiveLock taken by master_add_node.
* ExclusiveLock taken by citus_add_node.
*/
LockRelationOid(DistNodeRelationId(), RowShareLock);

Expand Down
4 changes: 2 additions & 2 deletions src/backend/distributed/commands/type.c
Expand Up @@ -133,9 +133,9 @@ PreprocessCompositeTypeStmt(Node *node, const char *queryString,
/*
* Make sure that no new nodes are added after this point until the end of the
* transaction by taking a RowShareLock on pg_dist_node, which conflicts with the
* ExclusiveLock taken by master_add_node.
* ExclusiveLock taken by citus_add_node.
* This guarantees that all active nodes will have the object, because they will
* either get it now, or get it in master_add_node after this transaction finishes and
* either get it now, or get it in citus_add_node after this transaction finishes and
* the pg_dist_object record becomes visible.
*/
LockRelationOid(DistNodeRelationId(), RowShareLock);
Expand Down
4 changes: 2 additions & 2 deletions src/backend/distributed/metadata/metadata_sync.c
Expand Up @@ -182,7 +182,7 @@ StartMetadataSyncToNode(const char *nodeNameString, int32 nodePort)
{
ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("you cannot sync metadata to a non-existent node"),
errhint("First, add the node with SELECT master_add_node"
errhint("First, add the node with SELECT citus_add_node"
"(%s,%d)", escapedNodeName, nodePort)));
}

Expand All @@ -191,7 +191,7 @@ StartMetadataSyncToNode(const char *nodeNameString, int32 nodePort)
ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("you cannot sync metadata to an inactive node"),
errhint("First, activate the node with "
"SELECT master_activate_node(%s,%d)",
"SELECT citus_activate_node(%s,%d)",
escapedNodeName, nodePort)));
}

Expand Down
4 changes: 2 additions & 2 deletions src/backend/distributed/metadata/node_metadata.c
Expand Up @@ -380,7 +380,7 @@ master_add_secondary_node(PG_FUNCTION_ARGS)
* node should not have any active placements.
* This function also deletes all reference table placements belong to the given node from
* pg_dist_placement, but it does not drop actual placement at the node. In the case of
* re-adding the node, master_add_node first drops and re-creates the reference tables.
* re-adding the node, citus_add_node first drops and re-creates the reference tables.
*/
Datum
citus_remove_node(PG_FUNCTION_ARGS)
Expand Down Expand Up @@ -446,7 +446,7 @@ citus_disable_node(PG_FUNCTION_ARGS)
ereport(NOTICE, (errmsg(
"Node %s:%d has active shard placements. Some queries "
"may fail after this operation. Use "
"SELECT master_activate_node('%s', %d) to activate this "
"SELECT citus_activate_node('%s', %d) to activate this "
"node back.",
workerNode->workerName, nodePort,
workerNode->workerName,
Expand Down
2 changes: 1 addition & 1 deletion src/backend/distributed/utils/reference_table_utils.c
Expand Up @@ -130,7 +130,7 @@ EnsureReferenceTablesExistOnAllNodesExtended(char transferMode)
uint64 shardId = shardInterval->shardId;

/*
* We only take an access share lock, otherwise we'll hold up master_add_node.
* We only take an access share lock, otherwise we'll hold up citus_add_node.
* In case of create_reference_table() where we don't want concurrent writes
* to pg_dist_node, we have already acquired ShareLock on pg_dist_node.
*/
Expand Down
2 changes: 1 addition & 1 deletion src/include/distributed/pg_dist_node.h
Expand Up @@ -15,7 +15,7 @@
* compiler constants for pg_dist_node
* ----------------
*
* n.b. master_add_node, master_add_inactive_node, and master_activate_node all
* n.b. citus_add_node, citus_add_inactive_node, and citus_activate_node all
* directly return pg_dist_node tuples. This means their definitions (and
* in particular their OUT parameters) must be changed whenever the definition of
* pg_dist_node changes.
Expand Down
2 changes: 1 addition & 1 deletion src/test/regress/expected/failure_add_disable_node.out
Expand Up @@ -54,7 +54,7 @@ ORDER BY placementid;
(2 rows)

SELECT master_disable_node('localhost', :worker_2_proxy_port);
NOTICE: Node localhost:xxxxx has active shard placements. Some queries may fail after this operation. Use SELECT master_activate_node('localhost', 9060) to activate this node back.
NOTICE: Node localhost:xxxxx has active shard placements. Some queries may fail after this operation. Use SELECT citus_activate_node('localhost', 9060) to activate this node back.
master_disable_node
---------------------------------------------------------------------

Expand Down
4 changes: 2 additions & 2 deletions src/test/regress/expected/multi_cluster_management.out
Expand Up @@ -143,7 +143,7 @@ HINT: To proceed, either drop the distributed tables or use undistribute_table(
-- try to disable a node with active placements see that node is removed
-- observe that a notification is displayed
SELECT master_disable_node('localhost', :worker_2_port);
NOTICE: Node localhost:xxxxx has active shard placements. Some queries may fail after this operation. Use SELECT master_activate_node('localhost', 57638) to activate this node back.
NOTICE: Node localhost:xxxxx has active shard placements. Some queries may fail after this operation. Use SELECT citus_activate_node('localhost', 57638) to activate this node back.
master_disable_node
---------------------------------------------------------------------

Expand All @@ -170,7 +170,7 @@ NOTICE: Replicating reference table "test_reference_table" to the node localhos

DROP TABLE test_reference_table;
SELECT master_disable_node('localhost', :worker_2_port);
NOTICE: Node localhost:xxxxx has active shard placements. Some queries may fail after this operation. Use SELECT master_activate_node('localhost', 57638) to activate this node back.
NOTICE: Node localhost:xxxxx has active shard placements. Some queries may fail after this operation. Use SELECT citus_activate_node('localhost', 57638) to activate this node back.
master_disable_node
---------------------------------------------------------------------

Expand Down
2 changes: 1 addition & 1 deletion src/test/regress/expected/start_stop_metadata_sync.out
Expand Up @@ -369,7 +369,7 @@ SELECT hasmetadata, metadatasynced, shouldhaveshards FROM pg_dist_node WHERE nod
\c - - - :master_port
-- verify that mx workers are updated when disabling/activating nodes
SELECT citus_disable_node('localhost', :worker_1_port);
NOTICE: Node localhost:xxxxx has active shard placements. Some queries may fail after this operation. Use SELECT master_activate_node('localhost', 57637) to activate this node back.
NOTICE: Node localhost:xxxxx has active shard placements. Some queries may fail after this operation. Use SELECT citus_activate_node('localhost', 57637) to activate this node back.
citus_disable_node
---------------------------------------------------------------------

Expand Down
2 changes: 1 addition & 1 deletion src/test/regress/output/multi_copy.source
Expand Up @@ -650,7 +650,7 @@ SELECT shardid, nodename, nodeport

-- disable the first node
SELECT master_disable_node('localhost', :worker_1_port);
NOTICE: Node localhost:xxxxx has active shard placements. Some queries may fail after this operation. Use SELECT master_activate_node('localhost', 57637) to activate this node back.
NOTICE: Node localhost:xxxxx has active shard placements. Some queries may fail after this operation. Use SELECT citus_activate_node('localhost', 57637) to activate this node back.
master_disable_node
---------------------------------------------------------------------

Expand Down

0 comments on commit 7e39c7e

Please sign in to comment.