Skip to content

Commit

Permalink
[#20301] yugabyted: yugabyted-ui support for k8s deployments
Browse files Browse the repository at this point in the history
Summary:
These changes to yugabyted-ui are needed to support single zone insecure/secure deployments using the yugabyte helm chart.
The changes mostly involve the implementation of api endpoints that previously relied on the assumption that every node would have a tserver.
In k8s deployments masters and tservers are in separate pods with different addresses. The changes make it so that yugabyted-ui will work
properly for nodes that only have masters as well. The api interface of yugabyted-ui has not changed.

A new flag, `bind_address`, was added to the yugabyted-ui API server. This flag determines the bind address for the API server.
This change was added to allow the yugabyted-ui API server to bind to 0.0.0.0 as is standard for k8s deployments,
but at the same time allow a different value to be set for `database_host`, which should be the address associated with the node.
Jira: DB-9267

Test Plan: no test plan

Reviewers: nikhil

Reviewed By: nikhil

Subscribers: yugabyted-dev, djiang

Differential Revision: https://phorge.dev.yugabyte.com/D31042
  • Loading branch information
Daniel Jiang committed Jan 23, 2024
1 parent 73167e5 commit b5ed6bb
Show file tree
Hide file tree
Showing 38 changed files with 491 additions and 107 deletions.
1 change: 1 addition & 0 deletions .arclint
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,7 @@
"(^yugabyted-ui/.*/go[.]mod)",
"(^yugabyted-ui/.*/go[.]sum)",
"(^yugabyted-ui/ui/src/api/)",
"(^yugabyted-ui/ui/src/translations/)",
"(^thirdparty/thirdparty_src_checksums[.]txt$)",
"(^java/yb-client/src/test/java/org/yb/client/TestYBClient[.]java$)",
"(^cmake_modules/cotire[.]cmake$)",
Expand Down
4 changes: 3 additions & 1 deletion bin/yugabyted
Original file line number Diff line number Diff line change
Expand Up @@ -1830,7 +1830,9 @@ class ControlScript(object):
]
if self.configs.saved_data.get("secure"):
yugabyted_ui_cmd.extend(["-secure=true",
"-database_password={}".format(
"-ysql_password={}".format(
self.configs.saved_data.get("database_password")),
"-ycql_password={}".format(
self.configs.saved_data.get("database_password"))])

if self.configs.saved_data.get("ysql_port"):
Expand Down
11 changes: 10 additions & 1 deletion yugabyted-ui/apiserver/cmd/server/.docs/api/openapi.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -330,6 +330,16 @@ paths:
get:
description: Get nodes for a Yugabyte cluster
operationId: getClusterNodes
parameters:
- allowEmptyValue: true
description: Whether to get master-only nodes that have no tserver
explode: true
in: query
name: get_all_masters
required: false
schema:
type: boolean
style: form
responses:
"200":
content:
Expand Down Expand Up @@ -2513,7 +2523,6 @@ components:
type: object
ConnectionStatsItem:
properties:
# GH #19722 : Structure of Ysql Connection Manager stats have changed.
pool:
type: string
active_logical_connections:
Expand Down
57 changes: 52 additions & 5 deletions yugabyted-ui/apiserver/cmd/server/handlers/api_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ import (
"net/http"
"runtime"
"sort"
"strconv"
"time"

"github.com/labstack/echo/v4"
Expand Down Expand Up @@ -53,6 +54,7 @@ func (c *Container) GetCluster(ctx echo.Context) error {
gFlagsTserverFutures := map[string]chan helpers.GFlagsFuture{}
gFlagsMasterFutures := map[string]chan helpers.GFlagsFuture{}
versionInfoFutures := map[string]chan helpers.VersionInfoFuture{}
versionInfoMasterFutures := map[string]chan helpers.VersionInfoFuture{}
masterMemTrackersFutures := map[string]chan helpers.MemTrackersFuture{}
tserverMemTrackersFutures := map[string]chan helpers.MemTrackersFuture{}
for _, nodeHost := range nodeList {
Expand All @@ -61,12 +63,15 @@ func (c *Container) GetCluster(ctx echo.Context) error {
go c.helper.GetGFlagsFuture(nodeHost, false, gFlagsTserverFuture)
versionInfoFuture := make(chan helpers.VersionInfoFuture)
versionInfoFutures[nodeHost] = versionInfoFuture
go c.helper.GetVersionFuture(nodeHost, versionInfoFuture)
go c.helper.GetVersionFuture(nodeHost, false, versionInfoFuture)
}
for _, nodeHost := range masterAddressesResponse.HostList {
gFlagsMasterFuture := make(chan helpers.GFlagsFuture)
gFlagsMasterFutures[nodeHost] = gFlagsMasterFuture
go c.helper.GetGFlagsFuture(nodeHost, true, gFlagsMasterFuture)
versionInfoFuture := make(chan helpers.VersionInfoFuture)
versionInfoMasterFutures[nodeHost] = versionInfoFuture
go c.helper.GetVersionFuture(nodeHost, true, versionInfoFuture)
}
for _, nodeHost := range reducedNodeList {
tserverMemTrackersFuture := make(chan helpers.MemTrackersFuture)
Expand All @@ -82,11 +87,11 @@ func (c *Container) GetCluster(ctx echo.Context) error {
// Getting relevant data from tabletServersResponse
regionsMap := map[string]int32{}
zonesMap := map[string]int32{}
numNodes := int32(0)
tserverMap := map[string]bool{}
ramUsageBytes := float64(0)
for _, cluster := range tabletServersResponse.Tablets {
for _, tablet := range cluster {
numNodes++;
for host, tablet := range cluster {
tserverMap[host] = true
region := tablet.Region
regionsMap[region]++
zone := tablet.Zone
Expand Down Expand Up @@ -124,18 +129,25 @@ func (c *Container) GetCluster(ctx echo.Context) error {

// Getting relevant data from mastersResponse
timestamp := time.Now().UnixMicro()
numMasters := int32(0)
for _, master := range mastersResponse.Masters {
startTime := master.InstanceId.StartTimeUs
if startTime < timestamp && startTime != 0 {
timestamp = startTime
}
if len(master.Registration.PrivateRpcAddresses) > 0 {
if _, ok := tserverMap[master.Registration.PrivateRpcAddresses[0].Host]; !ok {
numMasters++
}
}
}
createdOn := time.UnixMicro(timestamp).Format(time.RFC3339)
// Less than 3 replicas -> None
// In at least 3 different regions -> Region
// In at least 3 different zones but fewer than 3 regions -> Zone
// At least 3 replicas but in fewer than 3 zones -> Node
faultTolerance := models.CLUSTERFAULTTOLERANCE_NONE
numNodes := int32(len(tserverMap)) // + numMasters
if numNodes >= 3 {
if len(regionsMap) >= 3 {
// regionsMap comes from parsing /tablet-servers endpoint
Expand Down Expand Up @@ -191,11 +203,17 @@ func (c *Container) GetCluster(ctx echo.Context) error {
break
}
}
// Use this to track gflag results that have been read, to avoid reading a future twice
// which will hang
gFlagsMasterResults := map[string]helpers.GFlagsFuture{}
// Only need to keep checking masters if it is still possible that in-transit encryption is
// enabled.
if isEncryptionInTransitEnabled {
for host, gFlagsMasterFuture := range gFlagsMasterFutures {
masterFlags := <-gFlagsMasterFuture
if _, ok := gFlagsMasterResults[host]; !ok {
gFlagsMasterResults[host] = <-gFlagsMasterFuture
}
masterFlags := gFlagsMasterResults[host]
if masterFlags.Error != nil ||
masterFlags.GFlags["use_node_to_node_encryption"] != "true" ||
masterFlags.GFlags["allow_insecure_connections"] != "false" {
Expand All @@ -215,6 +233,30 @@ func (c *Container) GetCluster(ctx echo.Context) error {
}
}

// We can get clusterReplicationFactor == 0 if /cluster-config doesn't return the
// livereplicas structure. In this case, use the master gflag to get replication factor
if clusterReplicationFactor == 0 {
for host, gFlagsMasterFuture := range gFlagsMasterFutures {
if _, ok := gFlagsMasterResults[host]; !ok {
gFlagsMasterResults[host] = <-gFlagsMasterFuture
}
masterFlags := gFlagsMasterResults[host]
if masterFlags.Error == nil {
gflagReplicationFactor, err :=
strconv.ParseInt(masterFlags.GFlags["replication_factor"], 10, 32)
if err == nil && gflagReplicationFactor > 0 {
clusterReplicationFactor = int32(gflagReplicationFactor)
break
}
if err != nil {
c.logger.Warnf("error parsing replication_factor gflag: %s", err.Error())
} else {
c.logger.Warnf("replication_factor gflag is 0")
}
}
}
}

// Use the session from the context.
session, err := c.GetSession()
if err != nil {
Expand Down Expand Up @@ -302,6 +344,11 @@ func (c *Container) GetCluster(ctx echo.Context) error {
}
// Get software version
smallestVersion := c.helper.GetSmallestVersion(versionInfoFutures)
smallestVersionMaster := c.helper.GetSmallestVersion(versionInfoMasterFutures)
if smallestVersion == "" ||
c.helper.CompareVersions(smallestVersion, smallestVersionMaster) > 0 {
smallestVersion = smallestVersionMaster
}
numCores := int32(len(reducedNodeList)) * int32(runtime.NumCPU())

// Get ram limits
Expand Down

0 comments on commit b5ed6bb

Please sign in to comment.