diff --git a/_vendor/github.com/linode/linode-docs-theme/assets/js/main/navigation/create-href.js b/_vendor/github.com/linode/linode-docs-theme/assets/js/main/navigation/create-href.js index ad194893126..f4ea9a1ca54 100644 --- a/_vendor/github.com/linode/linode-docs-theme/assets/js/main/navigation/create-href.js +++ b/_vendor/github.com/linode/linode-docs-theme/assets/js/main/navigation/create-href.js @@ -19,10 +19,6 @@ export function newCreateHref(searchConfig) { return sections; }, hrefSection: function (key) { - if (key == 'community') { - // We don't have any list page for the community section. - return ''; - } let parts = key.split(' > '); if (parts.length > 1 && parts[0] === 'taxonomies') { diff --git a/_vendor/github.com/linode/linode-docs-theme/assets/js/main/search/search-store.js b/_vendor/github.com/linode/linode-docs-theme/assets/js/main/search/search-store.js index 48f9cb383a4..08fda809a53 100644 --- a/_vendor/github.com/linode/linode-docs-theme/assets/js/main/search/search-store.js +++ b/_vendor/github.com/linode/linode-docs-theme/assets/js/main/search/search-store.js @@ -285,7 +285,9 @@ export function newSearchStore(searchConfig, params, Alpine) { let hitsPerPage = 0; let q = ''; - let filters = sectionConfig.filters || ''; + // TODO(bep) we have removed the QA section from explorer/search, but the + // data is still there. The docType filter below can be remove when we have completed the migration. + let filters = sectionConfig.filters || 'NOT docType:community'; let facetFilters = []; let attributesToHighlight = []; let analyticsTags = []; @@ -424,6 +426,11 @@ const normalizeResult = function (self, result) { if (k === 'docType' || k.startsWith('section.')) { let obj = {}; Object.entries(v).forEach(([kk, vv]) => { + // TODO(bep) we have removed the QA section from explorer/search, but the + // data is still there. The docType filter below can be remove when we have completed the migration. + if (k == 'docType' && kk == 'community') { + return; + } let m = self.metaProvider.getSectionMeta(kk.toLocaleLowerCase()); obj[kk] = { count: vv, meta: m }; }); diff --git a/_vendor/github.com/linode/linode-docs-theme/assets/js/main/sections/home/home.js b/_vendor/github.com/linode/linode-docs-theme/assets/js/main/sections/home/home.js index 59d6ff7e979..1a4f84c259c 100644 --- a/_vendor/github.com/linode/linode-docs-theme/assets/js/main/sections/home/home.js +++ b/_vendor/github.com/linode/linode-docs-theme/assets/js/main/sections/home/home.js @@ -15,7 +15,7 @@ export function newHomeController(searchConfig, staticData) { // The section we paginate on the home page. // This maps to section.lvl0 in linode-merged. - const sectionLevel0s = ['guides', 'blog', 'resources', 'marketplace', 'community']; + const sectionLevel0s = ['guides', 'blog', 'resources', 'marketplace']; // Avoid loading too much data when on mobile. const tilesAlgoliaPreloadItems = isMobile() ? 12 : 30; @@ -206,13 +206,13 @@ export function newHomeController(searchConfig, staticData) { this.data.sectionTiles['products'] = newPager( productsStripPageSize, this.$refs[`carousel-products`], - staticData.productItems + staticData.productItems, ); // Make the developers pager the same size as the products pager. this.data.sectionTiles['developers'] = newPager( productsStripPageSize, this.$refs[`carousel-developers`], - staticData.developerItems + staticData.developerItems, ); this.loaded = true; diff --git a/_vendor/github.com/linode/linode-docs-theme/config.toml b/_vendor/github.com/linode/linode-docs-theme/config.toml index c8afa4cc46a..600906976a9 100644 --- a/_vendor/github.com/linode/linode-docs-theme/config.toml +++ b/_vendor/github.com/linode/linode-docs-theme/config.toml @@ -116,13 +116,6 @@ seo_title_template = "Cloud Computing Resources | Linode" seo_title_template_category = "Cloud Computing {category} | Linode" explorer_icon = "#icon-explorer--resources" -[params.search_config2.sections.community] -name = "community" -filters = "objectType:question" -weight = 70 -title = "Q&A" -explorer_icon = "#icon-explorer--qa" - [module] [[module.mounts]] source = "content" diff --git a/_vendor/github.com/linode/linode-docs-theme/layouts/index.html b/_vendor/github.com/linode/linode-docs-theme/layouts/index.html index 35571559641..a45e7411559 100644 --- a/_vendor/github.com/linode/linode-docs-theme/layouts/index.html +++ b/_vendor/github.com/linode/linode-docs-theme/layouts/index.html @@ -146,10 +146,6 @@

{{ end }} - {{/* Q&A carousel. */}} -
- {{ template "home/section/carousel" (dict "section" "community" "title" "Q&A") }} -
{{ end }} @@ -158,7 +154,6 @@

{{ define "home/hero" }}
- {{ partial "sections/navigation/breadcrumbs.html" (dict "page" . "wrap" true) }}

{{ .Params.h1_title | default .Title }} diff --git a/_vendor/modules.txt b/_vendor/modules.txt index 7605116f126..c843fab6c36 100644 --- a/_vendor/modules.txt +++ b/_vendor/modules.txt @@ -1,4 +1,4 @@ -# github.com/linode/linode-docs-theme v0.0.0-20240220192940-74515c307486 +# github.com/linode/linode-docs-theme v0.0.0-20240319173150-b92334abe6d7 # github.com/linode/linode-website-partials v0.0.0-20240130163753-4a933fe77633 # github.com/gohugoio/hugo-mod-jslibs-dist/alpinejs/v3 v3.401.201 # github.com/gohugoio/hugo-mod-jslibs/turbo/v7 v7.20300.20000 diff --git a/archetypes/products/_index.md b/archetypes/products/_index.md new file mode 100644 index 00000000000..cf58e128bca --- /dev/null +++ b/archetypes/products/_index.md @@ -0,0 +1,13 @@ +--- +title: "{{ replace (path.Base .File.Dir) "-" " " | title }}" +title_meta: +description: +tab_group_main: + is_root: true + title: Overview + weight: 10 +cascade: + date: {{ .Date }} + product_description: +modified: {{ now.Format "2006-01-02" }} +--- \ No newline at end of file diff --git a/archetypes/products/_shortguides/.gitkeep b/archetypes/products/_shortguides/.gitkeep new file mode 100644 index 00000000000..e69de29bb2d diff --git a/archetypes/products/developers/index.md b/archetypes/products/developers/index.md new file mode 100644 index 00000000000..e61c7c4e9df --- /dev/null +++ b/archetypes/products/developers/index.md @@ -0,0 +1,8 @@ +--- +title: Developers +title_meta: "Developer Resources for PRODUCT NAME" +description: "Easily manage PRODUCT NAME with developer tools like the Linode API or CLI as well as third party tools and integrations." +tab_group_main: + weight: 50 +aliases: [] +--- \ No newline at end of file diff --git a/archetypes/products/faqs/_index.md b/archetypes/products/faqs/_index.md new file mode 100644 index 00000000000..ecafa487e95 --- /dev/null +++ b/archetypes/products/faqs/_index.md @@ -0,0 +1,10 @@ +--- +title: "FAQs" +title_meta: "FAQs for PRODUCT NAME" +description: "Find quick answers to some of the most commonly asked questions about PRODUCT NAME." +tab_group_main: + weight: 60 +published: {{ now.Format "2006-01-02" }} +modified: {{ now.Format "2006-01-02" }} +aliases: [] +--- \ No newline at end of file diff --git a/archetypes/products/faqs/tab.svg b/archetypes/products/faqs/tab.svg new file mode 100644 index 00000000000..24b2df1f82a --- /dev/null +++ b/archetypes/products/faqs/tab.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/archetypes/products/get-started/index.md b/archetypes/products/get-started/index.md new file mode 100644 index 00000000000..69b3d79bd8a --- /dev/null +++ b/archetypes/products/get-started/index.md @@ -0,0 +1,8 @@ +--- +title: Get Started +title_meta: "Getting Started with PRODUCT NAME" +description: "Learn how to quickly start using PRODUCT NAME on the Linode Platform." +tab_group_main: + weight: 20 +aliases: [] +--- \ No newline at end of file diff --git a/archetypes/products/guides/_index.md b/archetypes/products/guides/_index.md new file mode 100644 index 00000000000..bb355efea39 --- /dev/null +++ b/archetypes/products/guides/_index.md @@ -0,0 +1,9 @@ +--- +title: Guides +title_meta: "Guides and Tutorials for PRODUCT NAME" +description: "A collection of guides to help you start deploying PRODUCT NAME and using them to host your web applications and Cloud workloads" +tab_group_main: + weight: 30 +aliases: [] +modified: {{ now.Format "2006-01-02" }} +--- diff --git a/archetypes/products/resources/index.md b/archetypes/products/resources/index.md new file mode 100644 index 00000000000..7473498b861 --- /dev/null +++ b/archetypes/products/resources/index.md @@ -0,0 +1,8 @@ +--- +title: Resources +title_meta: "Resources for PRODUCT NAME" +description: "Resources and other information related to PRODUCT NAME, including videos, blog posts, community posts, customer stories, and press releases." +tab_group_main: + weight: 40 +aliases: [] +--- \ No newline at end of file diff --git a/ci/vale/dictionary.txt b/ci/vale/dictionary.txt index 3af422f5d06..74d5a5e56d7 100644 --- a/ci/vale/dictionary.txt +++ b/ci/vale/dictionary.txt @@ -1707,6 +1707,7 @@ permalink permalinks Petazzoni pflogsumm +pg_auto_failover pg_dump pg_dumpall pg_restore @@ -1933,6 +1934,7 @@ replicants replicaset replicationcontrollers replset +repmgr repo repos reputational diff --git a/docs/guides/databases/postgresql/comparison-of-high-availability-postgresql-solutions/index.md b/docs/guides/databases/postgresql/comparison-of-high-availability-postgresql-solutions/index.md new file mode 100644 index 00000000000..143656c8b4d --- /dev/null +++ b/docs/guides/databases/postgresql/comparison-of-high-availability-postgresql-solutions/index.md @@ -0,0 +1,236 @@ +--- +slug: comparison-of-high-availability-postgresql-solutions +title: "A Comparison of High Availability PostgreSQL Solutions" +description: 'This guide describes the high availability and resiliency options for PostgreSQL, with a survey of the three most common replication managers.' +keywords: ['PostgreSQL high availability','high availability comparison PostgreSQL','PostgreSQL patroni','PostgreSQL repmgr','PostgreSQL paf'] +license: '[CC BY-ND 4.0](https://creativecommons.org/licenses/by-nd/4.0)' +authors: ["Jeff Novotny"] +published: 2024-03-19 +modified_by: + name: Linode +external_resources: +- '[PostgreSQL](https://www.postgresql.org/)' +- '[PostgreSQL High Availability Documentation](https://www.postgresql.org/docs/current/high-availability.html)' +- '[Google Cloud Architecture Center PostgreSQL high availability survey](https://cloud.google.com/architecture/architectures-high-availability-postgresql-clusters-compute-engine#comparison_between_the_ha_options)' +- '[repmgr](https://www.repmgr.org/)' +- '[repmgr documentation](https://www.repmgr.org/docs/current/getting-started.html)' +- '[Patroni](https://patroni.readthedocs.io/en/latest/)' +- '[Patroni GitHub](https://github.com/zalando/patroni)' +- '[pg_auto_failover](https://pg-auto-failover.readthedocs.io/en/main/)' +- '[pg_auto_failover GitHub](https://github.com/hapostgres/pg_auto_failover/blob/main/docs/index.rst)' +--- + +[PostgreSQL](https://www.postgresql.org/) database instances can be configured into a high-availability cluster to increase reliability and resiliency. This technique permits the member nodes to act together as one virtual database. In the event of a failure, one of the backups can take over as the leader (primary node). This guide introduces the high availability options available for PostgreSQL and includes an overview of the main alternatives. + +## Why is High Availability Critical for Production Databases? + +Customer and employee expectations for websites, databases, and other resources have evolved over the last two decades. In the past, a certain amount of downtime was tolerated, but current standards are way more stringent. A database is a key IT component of a corporate website, so if it becomes unavailable, the entire company can be affected. In particular, customer-facing resources, such as e-commerce websites, are likely to be degraded. + +Frequent outages can have negative effects on a company. Visitors often assume an organization hosting an unreliable service is itself unreliable. Customers quickly move over to a competitor, resulting in a loss of business. There can also be a considerable cost when internal databases are down. Employees might not be able to do their jobs and business-critical outages might occur. For example, production lines and orders can be halted. In addition to reducing efficiency, database outages can introduce legal liabilities and reduce employee morale. + +Adding redundancy to a database in the form of a *high availability* (HA) architecture helps address these issues. This typically involves provisioning additional database instances to maintain duplicate copies of the data. All databases in the set function as a single logical instance of the database and all instances store the same data. Data changes can only be made on a single system designated as the primary node, but in most cases, any instance can handle a read request. This capability enhances throughput and enables load balancing and data redundancy. + +{{< note >}} +The term *cluster* has different meanings in different products. It does not always mean a redundant set of databases. Sometimes it only refers to a group of databases in the same network or at the same location. "Cluster" also has a variety of meanings in PostgreSQL, but it does not refer to redundant databases. This is discussed more thoroughly in the following section. +{{< /note >}} + +Organizations typically define a *Maximum Tolerable Downtime* (MTD) for each resource, including websites and databases. MTD indicates the amount of unavailability the business can tolerate. For websites and key databases, an uptime metric of about 99.9 to 99.95% satisfies prevailing industry standards. This amounts to between five to ten hours of downtime a year. The tolerable downtime for a database can be higher or lower depending on its importance. Critical databases might have an extremely low downtime tolerance. An internal human resources database likely has less stringent requirements. However, a high-availability solution has a role to play in both cases. + +Most high-availability architectures, including the solutions for PostgreSQL redundancy, incur additional costs for an organization. These costs include deploying more servers and configuring and managing a more complex network. For some smaller organizations, the additional cost might not make sense. Even within larger organizations, not all systems need high availability infrastructure, such as for test or staging environments. + +## High Availability Concepts for PostgreSQL + +PostgreSQL is a powerful, flexible, and reliable relational database system. However, it does not automatically implement high availability. This means it remains vulnerable to network outages or server failures. Users must take additional steps to enable resiliency and ensure their databases are consistently operational. + +This task is typically accomplished through the use of specialized replication manager applications. A replication manager works in conjunction with the existing PostgreSQL infrastructure, handling system monitoring and automatically initiating switchover events when necessary. If the primary copy of the database goes offline, the replication manager redirects users to a backup copy. Some applications can even take steps to restore a failed system or database process. + +Users must understand some basic concepts to properly evaluate the different PostgreSQL high availability alternatives. Potential architectures can be divided into categories based on how updates are handled across the set of member databases. + +Unfortunately, the term "cluster" has a variety of meanings in PostgreSQL, which can confuse the discussion. + +- It most typically refers to a collection of databases sharing the same data storage area. All databases in the cluster have the same buffer and connection pool sizes. +- Cluster can be used as a verb to describe the process of reordering the data around a specified index. A PostgreSQL table can be reorganized in this manner using the `CLUSTER` command. +- Clustering also refers to splitting a data set into many groups based on the characteristics of the data. +- Finally, "cluster" is used informally to refer to a "computing cluster", although this is technically an inaccurate use of the word. This definition maps to the typical industry usage of a group of machines working together for a common purpose such as redundancy or increased throughput. A cluster in this sense of the word is known as a *high-availability* database in PostgreSQL. The set of all nodes participating in the HA solution is known as a *node group*. + +{{< note >}} +In PostgreSQL terms, high availability is considered an attribute of a single cluster of databases, making the entire system a *high-availability cluster*. To avoid confusion, this guide uses the term "high availability/HA cluster" to refer to the collection of distinct database instances containing the same data. +{{< /note >}} + +Irrespective of implementation details and terminology, all database high-availability solutions must perform the following tasks. + +- Elect a primary node as the leader. +- Direct all write operations to the primary node. +- Replicate all changes on the primary to all active replica/secondary nodes. +- Monitor the status of the primary node and identify any failures. +- In the event of a primary failure, promote one of the replicas to become the new primary. + +Most solutions implement some additional optional features. They are usually able to distribute read requests across the HA cluster for load-balancing purposes. Many replication applications also perform repairs to the individual nodes to bring them back to an active state. + +The PostgreSQL site contains a general discussion of high availability along with a list of configurable settings. Consult the [PostgreSQL high availability documentation](https://www.postgresql.org/docs/current/high-availability.html) for more information. + +### PostgreSQL High Availability Components + +A complete high-availability architecture involves a number of components and processes working together to replicate data. Any organization implementing a high-availability solution should define target metrics for database uptime, switchover recovery time, and acceptable data loss. + +Some of the most important concepts involving database high availability are as follows: + +- **Data Replication**: Data replication generates multiple copies of the original database data. It logs any database additions and updates and transmits them to all nodes in the HA Cluster. These changes can be database data transactions or alterations to the database schema or table structure. Replication can be either synchronous or asynchronous. + +- **High Availability Cluster (HA Cluster)**: A HA Cluster is a collection of nodes that each have a copy of the same underlying data. Having multiple copies of the dataset is essential for data redundancy. Any one of the database servers can respond to queries, and any node can potentially become the master node. From the user's point of view, the HA Cluster appears as a single database. In most cases, users do not know which node responded to their query. + +- **Primary Node**: This is the master node for the HA cluster. It is the recipient of all database changes, including writes and schema updates. Therefore, it always has the most current data set. It replicates these changes to the other instances in the HA cluster, sending them the transactions in either list or stream format. Primary nodes can also handle read requests, but these are typically distributed between the different nodes for load-balancing purposes. The primary node is elected through a *primary election*. + +- **Replica Node**: Also known as a *secondary node*, a replica receives updates from the primary node. During regular operation, these nodes can handle read requests. However, depending on the HA architecture, the data in the replica data set might not be completely up to date. Each HA cluster can contain multiple replica nodes for added redundancy and load balancing. + +- **Failover**: In the event of a primary node failure, a failover event occurs. One of the secondary nodes becomes the primary node and supervises database updates. Administrators can initiate a manual failover for database maintenance purposes. This scheduled activity is sometimes known as a *manual switchover*. A switch back to the original master is known as a *fallback*. + +- **Write-ahead log (WAL)**: This log stores a record of all changes to the database. A unique sequence number identifies each WAL record. In PostgreSQL, the WAL is stored in a *segment file*. A segment file typically contains a large number of records. + +### Methods for Implementing Database Replication + +There are two main forms of data replication and two methods of implementing it. The two main approaches are as follows: + +- **Synchronous replication**: In this approach, the primary node waits for confirmation from at least one replica before confirming the transaction. This guarantees the database is consistent across the HA cluster in the event of a failure. Consistency eliminates potential data loss and is vital for organizations that demand transactional data integrity. However, it introduces latency and can reduce throughput. +- **Asynchronous replication**: In asynchronous replication, the primary node sends updates to the replicas without waiting for a response. It immediately confirms a successful commit after updating its own database, reducing latency. However, this approach increases the chances of data loss in the event of an unexpected failover. This is the default PostgreSQL replication method. + +The following algorithms are used to implement replication: + +- **File-based log shipping**: In this replication method, the primary node asynchronously transmits segment files containing the WAL logs to the replicas. This method cannot be used synchronously because the WAL files build up over a large number of transactions. The primary node continually records all transactions, but the replicas only process the changes after they receive a copy of the file. This is a good approach for latency-sensitive loss-tolerant applications. + +- **Streaming replication**: A streaming-based replication algorithm immediately transmits each update to the replicas. The primary node does not have to wait for transactions to build up in the WAL before transmitting the updates. This results in more timely updates on the replicas. Streaming can be either asynchronous, which is the default setting, or synchronous. In both cases, the updates are immediately sent over to the replicas. However, in synchronous streaming, the primary waits for a response from the replicas before confirming the commit. Users can enable synchronous streaming on PostgreSQL through the `sychronous_commit` configuration option. + +Another relevant set of concepts relates to how the HA cluster handles a split-brain condition. This occurs when multiple segments of the HA cluster are active but are not able to communicate with each other. In some circumstances, more than one node might attempt to become the primary. To handle this situation, the replication manager structures the rules for a primary election or adds a *quorum*. This problem can also be eliminated through the use of an external monitor. + +## Deploying a PostgreSQL HA Cluster on Akamai Cloud Computing + +There are two main methods of deploying a PostgreSQL high-availability cluster on Akamai. There is the traditional manual configuration method and the [Akamai Marketplace](/docs/products/tools/marketplace/guides/postgresql-cluster/) solution. + +For a concise discussion and comparison of the three main alternatives, see the Akamai blog about PostgreSQL's high availability. + +### The Marketplace PostgreSQL HA Cluster + +Akamai allows users to configure a PostgreSQL HA cluster as a [Marketplace application](/docs/products/tools/marketplace/guides/postgresql-cluster/). Using this technique, database administrators can set up an HA cluster from the Linode Dashboard. This solution is supported on Ubuntu 22.04 LTS distribution on any plan type. + +The Akamai Marketplace solution uses the [*repmgr*](https://www.repmgr.org/) replication manager to control the PostgreSQL high availability cluster. The Marketplace application automatically configures a three-node HA cluster. Users only have to create users, roles, schemas, and tables before deploying the database. + +This solution has some limitations. It is not possible to choose the size of the HA cluster or manually edit any application variables. It is a viable option for a smaller organization with less technical expertise. However, it might not meet the specific requirements of a more complicated network. + +It is also possible to configure redundancy using the [IP failover](/docs/products/compute/compute-instances/guides/failover/) option. This feature allows multiple computing instances to share an IP address. If the primary system becomes inaccessible, the secondary server can take over. This enables some level of redundancy, although it is more limited than a full high-availability solution. Adding this enhancement involves configuring the [Lelastic](https://github.com/linode/lelastic) utility on your instances. + +### Manual Deployment Using a Replication Manager + +PostgreSQL can be manually installed using a package manager from the command line. The user then has the option of configuring one of the three replication manager solutions mentioned below. Administrators are responsible for configuring users, databases, tables, and other database elements on the primary node. + +This method is more complicated and requires additional user configuration. However, it allows administrators to have full control over the PostgreSQL HA Cluster configuration, including the choice of replication manager. The most common choices are [*Patroni*](https://patroni.readthedocs.io/en/latest/), [repmgr](https://www.repmgr.org/), also known as *Replication Manager*, and [*pg_auto_failover*](https://pg-auto-failover.readthedocs.io/en/main/) (PAF). + +## Specific High Availability Solutions + +A specialized replication manager application is almost always used to configure PostgreSQL HA Clusters. These applications automatically handle data replication and node monitoring, which are otherwise very difficult to implement. There are a number of different choices. Each alternative has its own series of strengths and drawbacks. This section explains each of the three most common solutions and compares them. + +### Patroni + +[Patroni](https://patroni.readthedocs.io/en/latest/) is a Python-based software template for enabling high availability in PostgreSQL databases. This framework requires some template customization to work most effectively. It also requires a *distributed configuration store* (DCS) but supports a number of different storage solutions. Patroni works well on a two-node HA cluster consisting of a primary node and a single replica. + +Patroni configures a set of nodes into an HA cluster and configures streaming replication to share updates. It runs an agent on each node in the HA cluster to share node health updates between the members. The primary node is responsible for regularly updating the *leader key*, which is stored in the DCS. If it fails to do so, it is evicted as the primary and another node is elected to take over. After a switchover, the replicas coordinate their position with respect to the database updates. The most up-to-date node typically takes over. In the event of a tie, the first node to create a new leader key wins. Only one node can hold the leader key at any time. This reduces any ambiguity about the identity of the primary node and avoids a split-brain scenario. + +Patroni can be installed on Linux nodes using `pip`. Mandatory configuration settings can be configured globally, locally using a YAML file, or through environment variables. The global settings are dynamic and are applied asynchronously to all nodes in the HA cluster. However, local configuration always takes precedence over any global settings. Patroni supports a REST API, which is useful for monitoring and automation purposes. This API is used to determine the status and role of each node in the HA cluster. + +**Advantages:** + +- It is a mature open-source product. +- It performs very well in standard high-availability test scenarios. It is able to handle more failure scenarios than the alternatives. +- In some circumstances, it is able to restore a failed PostgreSQL process. It also includes a fallback function to restore the HA cluster to a healthy state after failures. This involves initializing the affected node as a replica. +- It enables a standard end-to-end solution on all nodes in the HA cluster based on global configuration settings. +- It has a wide set of features and is highly configurable. +- It includes monitoring functionality. +- The associated REST API permits script access to all attributes. +- It includes watchdog support and callbacks for event notifications. +- It can be integrated with HaProxy, a popular high-performance load balancer. +- Patroni works well with Kubernetes as part of an automated pipeline. +- Storing the leader key in the DCS enforces consensus about the primary node and avoids multiple masters. + +**Drawbacks:** + +- It is unable to detect a misconfigured replica node. +- It requires manual intervention in a few cases, such as when the Patroni process itself fails. +- It requires a separate DCS application, which must be configured by the user. DCS requires two open communications ports in addition to the main Patroni port. +- Configuration is more complex than the other solutions. +- It uses more memory and CPU than the alternatives. + +For more information on Patroni, see the [Patroni website and documentation](https://patroni.readthedocs.io/en/latest/) or [Patroni GitHub](https://github.com/zalando/patroni). + +### Repmgr + +[Repmgr](https://www.repmgr.org/) (stylized as repmgr) is a suite of open-source tools for managing PostgreSQL HA clusters. It works in conjunction with existing PostgreSQL functionality to configure replica nodes, monitor the HA cluster, and perform a failover when required. Like other solutions, repmgr supports one primary server for reads and writes and one or more read-only secondary nodes. In repmgr, the replicas are called *standby nodes*. Repmgr allows a cascading configuration, allowing one or more replicas to receive updates from another replica. The node providing the updates is known as an *upstream node* no matter what its role is. + +Repmgr can be installed using the `apt` package. It includes a command line tool to configure the HA cluster, manually administer the nodes, and determine the status of each server. Configuration is provided using the repmgr configuration file. Each node in the HA cluster must be registered as a primary or standby server. The primary should be registered first. This allows for the cloning of standby nodes. Repmgr creates its own schema within the PostgreSQL database to store all information about the nodes and HA cluster. It requires SSH connections between the nodes to manage these activities. + +The other repmgr component is a daemon to actively monitor the servers and performs a switch when necessary. The daemon is also in charge of sending notifications and alerts. It is able to detect failures of a primary or standby node. If the primary fails, repmgr attempts to reconnect to it. If this fails, it performs a failover and promotes one of the standby servers. It fences off a failed primary in case it unexpectedly comes online again. Repmgr uses a *witness server* to cast a deciding vote for the primary server election in certain situations after a switchover. + +**Advantages:** + +- It is a free open-source suite. +- It provides full administrative control over the HA cluster. Users can promote a standby to become the primary node, perform a manual switchover, and use the dry run option. +- The repmgr daemon can perform an automatic switch to one of the standbys. +- It supports automatic notification for a series of predefined events. +- It uses a `location` parameter to handle split-brain events. In the event of a failure, repmgr attempts to promote a standby in the same location as the primary. +- It uses an independent witness server to optimize the primary election process and avoid contention. +- It does not require additional ports for communication. +- It is robust and features good performance. + +**Drawbacks:** + +- It cannot fully manage all resources and might require manual intervention to restart a failed node after some failures. +- It cannot detect misconfigured nodes and can sometimes believe a misconfigured node is an available standby node. +- It cannot automatically restore a node to a healthy state. +- If the primary server is isolated from the other nodes in the same location, two nodes can both be designated the master. This can result in a split-brain situation, requiring manual intervention. + +For more information on repmgr, see the [repmgr documentation](https://www.repmgr.org/docs/current/getting-started.html). + +### pg_auto_failover (PAF) + +The [PAF](https://pg-auto-failover.readthedocs.io/en/main/) project is a PostgreSQL extension for monitoring and managing high availability. Unlike the other HA cluster managers, it requires at least three nodes to work properly. The network requires a primary node, at least one secondary node, and a monitor node. The monitoring node verifies the condition of each node using regular health checks. It manages any switchover events using a finite state machine. PAF refers to the combination of the three nodes as a *formation*. The primary and secondary nodes are responsible for advertising any status changes to the monitor. + +PAF leverages PostgreSQL functionality, implementing a *keeper agent* process on each node. It uses the Pacemaker resource manager to monitor the system resources and database health. However, users must initialize and configure all nodes in the formation before using PAF. They must also provide a recovery template file for each node. The multi-standby solution supports a more granular configuration, including a replication quorum and a candidate priority for each node. PAF mandates synchronous replication to eliminate the possibility of data loss in the event of a switchover. If the monitor server becomes inactive, replication can still occur, but the replication process changes to asynchronous mode. + +**Advantages:** + +- It accounts for latency in determining node status. Secondary nodes with significant lag cannot be considered as potential primaries. This helps prevent data loss. +- It uses synchronous streaming to guarantee a lossless switchover. +- The use of a monitor allows PAF to avoid split-brain scenarios. +- It allows administrators to assign each node a candidate priority value, allowing some control over the primary election. +- It can enforce a quorum before enabling high availability. +- It allows users to initiate a manual switchover. +- It automatically restores the HA cluster to a stable state after a failure is resolved. +- It does not depend on any external components other than PostgreSQL. +- It uses IP address failover to manage a manual switchover, which does not involve rebooting the current primary. +- It is a fully-distributed solution, allowing administrative actions from any node. +- It does not require manual intervention in most failure scenarios. PAF can automatically restart a failed or stopped PostgreSQL process. +- It allows users to manage service dependencies. + +**Drawbacks:** + +- The monitor node is a single point of failure. If it fails, an automatic failover can no longer occur. +- It requires a monitor node. This increases the cost and complexity of the solution. +- It requires extra UDP ports to be opened. +- It does not automate the initial PostgreSQL configuration. +- It does not include a REST API. +- It cannot detect a misconfigured standby node. +- It does not support *Network Address Translation* (NAT). + +Additional information can be found on the [pg_auto_failover site](https://pg-auto-failover.readthedocs.io/en/main/) or [pg_auto_failover GitHub](https://github.com/hapostgres/pg_auto_failover/blob/main/docs/index.rst). + +## Comparing the Replication Managers + +Despite their differences, all three alternatives are credible PostgreSQL replication managers. They enable high availability, demonstrating good performance in various failure scenarios. Many businesses could successfully deploy any of these applications. For larger organizations, the correct choice depends on network and business requirements. These include configurability, usability, performance, and ease of integration into automated pipelines. + +Patroni has the most features and is the most powerful replication manager. It is highly configurable and customizable, does not require much manual intervention, and has good performance in most failure scenarios. Due to its powerful REST API, it is the best choice for large organizations and for integration into an automated infrastructure pipeline. However, some users might find it difficult to configure and use. It also requires an additional distributed configuration store, which increases complexity. + +PAF has fewer features, but it is easier to use and performs very well in a wide range of failover and manual switchover scenarios. It automatically recovers from most failures and rarely requires manual intervention. PAF does not automate PostgreSQL installation and configuration, so it is not as useful in an automated setting. In addition, the requirement for a monitor node increases infrastructure costs and configuration demands. Overall, it is a well-balanced solution and a solid choice for a smaller organization in search of a reliable solution. + +The repmgr suite is a robust solution that has been around for a longer period of time. As a result, it is well-hardened and its capabilities are well-known. It does not require extra servers or additional network components. This makes it a good choice for administrators who want to deploy a streamlined solution. Unfortunately, it can be difficult to use and there are some scenarios where it requires manual intervention. Repmgr is the application used by the Akamai Marketplace PostgreSQL HA cluster solution. This is a good choice for users who want an easy hands-off GUI-driven deployment. + +## Conclusion + +PostgreSQL databases often benefit from a high availability architecture, which adds resiliency and redundancy to the primary database host. Several architectures are available, but synchronous streaming replication minimizes data loss and is the most reliable. A PostgreSQL high availability solution using the repmgr tool suite can be easily configured through the Akamai marketplace. For a manual deployment, three replication management solutions are available. Patroni is the most powerful and fully-featured of the options. However, the flexible and intuitive pg_auto_failover framework and repmgr are also good choices. \ No newline at end of file diff --git a/docs/products/compute/compute-instances/guides/monitor-and-maintain/index.md b/docs/products/compute/compute-instances/guides/monitor-and-maintain/index.md index 05739a5aa03..9c29cfaaac4 100644 --- a/docs/products/compute/compute-instances/guides/monitor-and-maintain/index.md +++ b/docs/products/compute/compute-instances/guides/monitor-and-maintain/index.md @@ -4,7 +4,7 @@ description: "This guide introduces concepts and tools for monitoring and mainta keywords: ["lassie", "monitor", "monitoring", "maintaining", "maintenance"] tags: ["cloud manager","monitoring","linode platform"] published: 2012-08-22 -modified: 2023-03-14 +modified: 2024-03-11 modified_by: name: Linode image: monitor-and-maintain-your-server.png @@ -58,7 +58,7 @@ To turn Lassie on and off, see the [Recover from Unexpected Shutdowns with Lassi ## Linode Managed -[Linode Managed](https://www.linode.com/managed) is our monitoring service that offers 24x7 incident response, dashboard metrics for your Linodes, free cPanel, and an automatic backup service. With a three-month Linode Managed commitment, you also get two complimentary standard site migrations performed by our [Professional Services Team](https://www.linode.com/professional-services). If you are running more than one Compute Instance, not all are required to be managed. You can establish separate accounts (e.g., production and development) and monitor only the most critical services running on designated instance(s). Existing customers can sign up for Linode Managed by [contacting support](https://cloud.linode.com/support/tickets). +[Linode Managed](https://www.linode.com/managed) is our monitoring service that offers 24x7 incident response, dashboard metrics for your Linodes, free cPanel, and an automatic backup service. If you are running more than one Compute Instance, not all are required to be managed. You can establish separate accounts (e.g., production and development) and monitor only the most critical services running on designated instance(s). Existing customers can sign up for Linode Managed by [contacting support](https://cloud.linode.com/support/tickets). ## Manage Logs diff --git a/docs/products/compute/kubernetes/guides/_index.md b/docs/products/compute/kubernetes/guides/_index.md index da173ca5b03..a823c1deeae 100644 --- a/docs/products/compute/kubernetes/guides/_index.md +++ b/docs/products/compute/kubernetes/guides/_index.md @@ -42,6 +42,8 @@ modified: 2023-02-09 - [Drain Node Pools with kubectl](/docs/products/compute/kubernetes/guides/drain-node-pools/) +- [Create a Custom CoreDNS Configuration](/docs/products/compute/kubernetes/guides/coredns-custom-config/) + ## Going Further ### Kubernetes Basics diff --git a/docs/products/compute/kubernetes/guides/coredns-custom-config/index.md b/docs/products/compute/kubernetes/guides/coredns-custom-config/index.md new file mode 100644 index 00000000000..f372ea5f0cd --- /dev/null +++ b/docs/products/compute/kubernetes/guides/coredns-custom-config/index.md @@ -0,0 +1,189 @@ +--- +slug: coredns-custom-config +aliases: ['/guides/create-a-custom-coredns-configuration-in-linode-kubernetes-engine/'] +title: "Create A Custom CoreDNS Configuration" +description: "Learn how to create a custom CoreDNS configuration for your cluster hosted through LKE." +keywords: ['CoreDNS','Corefile','DNS'] +authors: ["Linode"] +published: 2024-03-12 +modified_by: + name: Linode +external_resources: +- '[CoreDNS](https://coredns.io/)' +- '[Corefile Explained](https://coredns.io/2017/07/23/corefile-explained/)' +- '[Server Block](https://coredns.io/manual/configuration/#server-blocks)' +--- + +## CoreDNS In LKE + +Linode Kubernetes Engine (LKE) provides out of the box intra-cluster domain name resolution via [CoreDNS](https://coredns.io/), the *DNS server*. Every new cluster is provided with a minimal, default CoreDNS configuration, which can be customized to suit your workload's needs. + +## Before You Begin + +This guide assumes you have a working [Linode Kubernetes Engine (LKE)](https://www.linode.com/products/kubernetes/) cluster running on Linode and you are familiar with Corefile, the *CoreDNS configuration file*. + +1. [Install the Kubernetes CLI](/docs/products/compute/kubernetes/guides/kubectl/) (`kubectl`) on the local computer. + +1. Follow the instructions in [Deploying and Managing a Cluster with Linode Kubernetes Engine Tutorial](/docs/products/compute/kubernetes/) to connect to an LKE cluster. + + {{< note >}} + Ensure that the `KUBECONFIG` context is [persistent](/docs/products/compute/kubernetes/guides/kubectl/#persist-the-kubeconfig-context) + {{< /note >}} + +1. Ensure that Kubernetes CLI is using the right cluster context. Run the `get-contexts` subcommand to check: + + ```command + kubectl config get-contexts + ``` + +## Default CoreDNS Configuration + +You can view your cluster's default CoreDNS configuration by using the following command: + +```command +kubectl get configmap -n kube-system coredns-base -o yaml +``` + +The output will resemble the following: + +```output +apiVersion: v1 +data: + Corefile: | + .:53 { + errors + health { + lameduck 5s + } + ready + kubernetes cluster.local in-addr.arpa ip6.arpa { + pods insecure + fallthrough in-addr.arpa ip6.arpa + ttl 30 + } + prometheus :9153 + forward . /etc/resolv.conf { + max_concurrent 1000 + } + cache 30 + loop + reload + loadbalance + import custom/*.include + } + import custom/*.server +kind: ConfigMap +metadata: + name: coredns-base + namespace: kube-system + [...] +``` + +The default CoreDNS configuration is located under the `Corefile` field in the above ConfigMap. + +{{< note type="warning" >}} +Do not modify the `kube-system/coredns-base` ConfigMap that comes with your LKE cluster. It may be restored to its original state at any time and without notice. +{{< /note >}} + +## Custom CoreDNS Configuration + +The default CoreDNS configuration leverages the CoreDNS [`import`](https://coredns.io/plugins/import/) plugin to enable customization. Configuration extensions are added through fields in the `kube-system/coredns-custom` ConfigMap: + +```yaml + apiVersion: v1 + kind: ConfigMap + metadata: + name: coredns-custom + namespace: kube-system + data: + sample.include: | + # Added to the .:53 default Server Block. + sample_a.server: | + # Additional Server Block. + sample_b.server: | + # Another Server Block. +``` + +- Fields suffixed with `.include` are added to the default [*Server Block*](https://coredns.io/manual/configuration/#server-blocks). +- Fields suffixed with `.server` are added as new Server Blocks. + +### Create A Custom Configuration + +1. Create a manifest for a ConfigMap named `coredns-custom` in the `kube-system` namespace, with the desired configuration. For the purpose of this guide, an example custom configuration is used. Save it as the `coredns-custom.yaml` file. + + ```yaml + apiVersion: v1 + kind: ConfigMap + metadata: + name: coredns-custom + namespace: kube-system + data: + # Log all incoming DNS queries. + log.include: | + log + # Private DNS resolution example. Handles FQDN resolutions for *.mydomain.com + # Replace with the target IP address. + mydomain.server: | + mydomain.com.:53 { + forward . + } + ``` + +2. Apply the above ConfigMap manifest: + + ```command + kubectl apply -f coredns-custom.yaml + ``` + + {{< note >}} + CoreDNS will attempt to reload the configuration within 45 seconds after the last modification. + {{< /note >}} + +3. Ensure the custom configuration has been loaded: + + ```command + kubectl logs -n kube-system -l k8s-app=kube-dns + ``` + + For the custom configuration shown above, the output will resemble the following, after the reload is complete: + + ```output + [INFO] Reloading + [INFO] plugin/health: Going into lameduck mode for 5s + [INFO] 127.0.0.1:60399 - 40866 "HINFO IN 349145763287755047.2816822520842364744. udp 56 false 512" NXDOMAIN qr,rd,ra 131 0.000980597s + [INFO] plugin/reload: Running configuration SHA512 = 868c96ccca274c442fefc8db8e98b1f4a5cd05c655db1d990803d4019e5d28af101b24a78f85bae7ab3a3f8894f2791fda9d2b4d9c6ae1aa942080e1a88ce3e6 + [INFO] Reloading complete + ``` + + The custom configuration is now in effect. + +{{< note >}} +The `kube-system/coredns-custom` ConfigMap is persistent and will not be affected by LKE maintenance operations. +{{< /note >}} + +### Restore The Defaults + +1. To restore the default CoreDNS configuration, simply delete the `coredns-custom` ConfigMap: + + ```command + kubectl delete -n kube-system coredns-custom + ``` + +1. Check the logs to make sure the reload was successful: + + ```command + kubectl logs -n kube-system -l k8s-app=kube-dns + ``` + + The output looks similar to the one emitted after applying the custom configuration. + + ```output + [INFO] Reloading + [INFO] plugin/health: Going into lameduck mode for 5s + [WARNING] No files matching import glob pattern: custom/*.include + [WARNING] No files matching import glob pattern: custom/*.server + [INFO] plugin/reload: Running configuration SHA512 = 591cf328cccc12bc490481273e738df59329c62c0b729d94e8b61db9961c2fa5f046dd37f1cf888b953814040d180f52594972691cd6ff41be96639138a43908 + [INFO] Reloading complete + ``` + + The emitted warning messages are now to be expected, and should not be a concern. diff --git a/docs/products/networking/vpc/feature.svg b/docs/products/networking/vpc/feature.svg new file mode 100644 index 00000000000..09dc2d87c58 --- /dev/null +++ b/docs/products/networking/vpc/feature.svg @@ -0,0 +1,4 @@ + + + + \ No newline at end of file diff --git a/docs/products/platform/get-started/guides/developer-access/index.md b/docs/products/platform/get-started/guides/developer-access/index.md index 69439378aa0..069037cdc1a 100644 --- a/docs/products/platform/get-started/guides/developer-access/index.md +++ b/docs/products/platform/get-started/guides/developer-access/index.md @@ -4,7 +4,7 @@ description: Shows how to create an account with access restrictions for develop keywords: ["accounts", "passwords", "linode manager", "manager", "security"] tags: ["ssh","linode platform","drupal","security","mysql","wordpress"] published: 2018-07-26 -modified: 2023-03-14 +modified: 2024-03-11 modified_by: name: Linode aliases: ['/platform/create-limited-developer-account/','/guides/create-limited-developer-account/'] @@ -19,7 +19,7 @@ When you hire someone to work on your Linode, there are a variety of ways to gra This guide explains and answers some of the most frequently asked questions about account access. The sections are separated in order of granularity, starting with service-level access at the top, and working towards application-specific access. -For security and privacy, [Linode Support](/docs/products/platform/get-started/guides/support/) is not able to troubleshoot issues related to users and application access. Instead, Linode offers an in-house [Professional Services](https://www.linode.com/products/pro-services/) team that can be hired to help with projects. +For security and privacy, [Linode Support](/docs/products/platform/get-started/guides/support/) is not able to troubleshoot issues related to users and application access. Instead, Linode offers an in-house Professional Services team that can be hired to help with projects. You can reach out to that team through the [Contact Sales](https://www.linode.com/company/contact/) form. {{< note >}} The following sections include commands that show how to manipulate credentials on your Compute Instances, and these commands use `exampleUser` in place of your users' names. Replace `exampleUser` with whatever you would like to name your users. diff --git a/docs/products/platform/get-started/guides/support/index.md b/docs/products/platform/get-started/guides/support/index.md index 0d8c45bb462..13743b7fe3e 100644 --- a/docs/products/platform/get-started/guides/support/index.md +++ b/docs/products/platform/get-started/guides/support/index.md @@ -2,6 +2,7 @@ title: "Help & Support" description: "Information about Linode support, including the scope of support and how to get help." published: 2023-03-14 +modified: 2024-03-11 modified_by: name: Linode keywords: ["support", "tech support", "tickets", "help desk"] @@ -46,7 +47,7 @@ Whenever you need assistance with software that's installed on your Compute Inst ### Infrastructure Deployment and Configuration. -For guidance on your architecture, migrations, and software deployments, consider working with our Professional Services team. To get started, [contact us](https://www.linode.com/products/pro-services/#speakwithus) or [request a free quote](https://proservices.typeform.com/to/xJiIEy). +For guidance on your architecture, migrations, and software deployments, consider working with our Professional Services team. You can reach out to that team through the [Contact Sales](https://www.linode.com/company/contact/) form. If you're looking for a partner to help guide your transition to the Cloud or help build your platform, our [Partner Directory](https://partner-directory.linode.com/s/) offers a list of trusted partners. diff --git a/docs/products/services/managed/_index.md b/docs/products/services/managed/_index.md index 95657fadf2b..0caa842c9a0 100644 --- a/docs/products/services/managed/_index.md +++ b/docs/products/services/managed/_index.md @@ -1,7 +1,7 @@ --- title: Managed title_meta: "Linode Managed Service Product Documentation" -description: "Linode Managed is a service that offers incident response as well as free migrations and discounted professional services." +description: "Linode Managed is a service that offers incident response, backups, and monitoring." tab_group_main: is_root: true title: Overview @@ -10,6 +10,7 @@ cascade: product_description: "A suite of services that includes a robust monitoring system, 24/7 incident response, backups, and cPanel licenses." aliases: ['/guides/platform/managed/'] published: 2023-04-11 +modified: 2024-03-11 --- Downtime is expensive and puts your company’s reputation at risk. [Linode Managed](https://www.linode.com/products/managed/) helps minimize this risk through a suite of services and products aimed at monitoring your Compute Instances, minimizing downtime, protecting your data, and migrating to the Linode Platform. @@ -20,7 +21,7 @@ Linode Managed applies to all Compute Instances on an account **except** for nod ## Benefits -- **24/7 Monitoring and Incident Response:** The core benefit of Linode Managed is 24/7 monitoring and incident response. You can configure monitors for URLs, IP addresses, or TCP ports. This monitor periodically makes a TCP or HTTP request to that property. If a check fails, our experts take immediate steps to get your systems back online as quickly as possible. If they are not able to fix the issue, our experts will share their findings and the steps they've taken so far. Managed services does not include assistance with maintenance, updates, or the configuration of software on your Compute Instances. For that, contact our [Professional Services](https://www.linode.com/products/pro-services/) team. +- **24/7 Monitoring and Incident Response:** The core benefit of Linode Managed is 24/7 monitoring and incident response. You can configure monitors for URLs, IP addresses, or TCP ports. This monitor periodically makes a TCP or HTTP request to that property. If a check fails, our experts take immediate steps to get your systems back online as quickly as possible. If they are not able to fix the issue, our experts will share their findings and the steps they've taken so far. Managed services does not include assistance with maintenance, updates, or the configuration of software on your Compute Instances. - **Included Services and Software:** The following services and software applications are included at no additional charge to Linode Managed customers @@ -32,11 +33,6 @@ Linode Managed applies to all Compute Instances on an account **except** for nod - 2 hour resolution for the past month - 1 day resolution for the past year -- **Professional Services:** - - - **Discounted services:** Linode Managed customers receive a 20% discount for any projects completed by our [Professional Services](https://www.linode.com/products/pro-services/) team. This team of experts can handle server installations, configurations, architectures, deployments, one-off sysadmin jobs, site migrations, and more. - - **Free migrations:** Customers who sign up with Linode Managed for a minimum of 3 months receive 2 free site migrations, performed by our Professional Services team. Use the [Professional Services contact form](https://www.linode.com/products/pro-services/#contactus) to learn more and to schedule your site migrations. - ## Pricing The cost for Linode Managed is $100 per Linode Compute Instance per month. For example, if there are 10 Compute Instances on your account, your total monthly cost will be $1,000. diff --git a/docs/products/services/professional-services/_index.md b/docs/products/services/professional-services/_index.md deleted file mode 100644 index 9bc60e52cf1..00000000000 --- a/docs/products/services/professional-services/_index.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -title: Professional Services -title_meta: "Professional Services Documentation" -description: "Linode Professional Services offers site migrations, tuning, and deployments." -tab_group_main: - is_root: true - title: Overview - weight: 10 -cascade: - date: 2020-06-02 - product_description: "Linode's experienced cloud consultants are ready to help you architect your services, carry out site migrations, and deploy software. Achieve your short- and long-term goals in the cloud with us." ---- - -When you hire Linode Professional Services to orchestrate and execute any system administration task – whether for one site or an entire fleet of servers – we’ll make sure it’s done safely, efficiently, and stress-free. In a site migration, for example, we would configure the new Linode servers, sync content, manage the DNS transition, and handle every other aspect of the move. - -Our Professional Services team are experts in architecting the most complex implementations. We can design and deploy a high availability environment using technologies such as HAProxy, GlusterFS, and Galera for database synchronization. If you want to scale seamlessly, we can utilize distributed deployment systems such as SaltStack, Chef, or Puppet. - -## Site Migrations, Tuning, and Deployments - -Let us migrate your site to Linode with minimal stress and downtime. We'll handle everything from content transfer and DNS records to the cutover of your traffic. We can help you transition to a highly available architecture for your services. - -Whether your site's traffic is growing exponentially or struggling to handle its current workload, our team is experienced in identifying performance bottlenecks and tuning your configuration to get the most out of your Linode. - -Lean on our team's diverse experience and skill sets to execute pain-free software deployments that follow industry standards and best practices. - -## Pricing - -To get a quote on a project, [complete the contact form](https://www.linode.com/products/pro-services/#contactus) on the Professional Services page. diff --git a/docs/products/services/professional-services/feature.png b/docs/products/services/professional-services/feature.png deleted file mode 100644 index 94c3a6b25d1..00000000000 Binary files a/docs/products/services/professional-services/feature.png and /dev/null differ diff --git a/docs/products/services/professional-services/feature.svg b/docs/products/services/professional-services/feature.svg deleted file mode 100644 index 75169f62129..00000000000 --- a/docs/products/services/professional-services/feature.svg +++ /dev/null @@ -1,7 +0,0 @@ - - Professional Services - - - - - \ No newline at end of file diff --git a/docs/products/services/professional-services/resources/index.md b/docs/products/services/professional-services/resources/index.md deleted file mode 100644 index 0ea0e9e8414..00000000000 --- a/docs/products/services/professional-services/resources/index.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -title: Resources -title_meta: "Resources for Professional Services" -description: "Resources and other information related to the Linode Professional Services including videos, blog posts, community posts, customer stories, ebooks, and press releases." -tab_group_main: - weight: 40 ---- - -## Videos - -- [Linode India Overview](https://www.linode.com/content/linode-mumbai-data-center-overview-video/) - -## Blog Posts - -- [Introducing Professional Services](https://www.linode.com/blog/cloud-consulting-services/introducing-professional-services/) - -- [How Cloud Infrastructure Service and Support Helped Lead Sherpa Through Accelerated Growth](https://www.linode.com/blog/cloud-consulting-services/how-cloud-infrastructure-service-and-support-helped-lead-sherpa-through-accelerated-growth/) - -- [Linode Managed is even better than before!](https://www.linode.com/blog/linode/linode-managed-is-even-better-than-before/) - -## Community Posts - -- [Linode Professional Services (trials)](https://www.linode.com/community/questions/7030/linode-professional-services-trials) - -- [Can I have Linode configure and update my system for me?](https://www.linode.com/community/questions/342/can-i-have-linode-configure-and-update-my-system-for-me) - -- [Can you help me migrate to Linode?](https://www.linode.com/community/questions/50/can-you-help-me-migrate-to-linode) - -- [Can Linode help me perform the server related changes like mod_rewrite enable, etc?](https://www.linode.com/community/questions/429/can-linode-help-me-perform-the-server-related-changes-like-mod_rewrite-enableetc) - -Browse our [Linode Community Questions & Answers page](https://www.linode.com/community/questions/) for more [related questions](https://www.linode.com/community/questions/search?query=professional+services). - -## Customer Stories - -- [Craft of Code Jason Nickel](https://www.linode.com/spotlight/jason-nickel/) - -- [Craft of Code Retargetly](https://www.linode.com/spotlight/retargetly/) - -## eBooks - -- [2020 Trends in Managed Services & Hosting](https://www.linode.com/content/2020-trends-in-managed-services-and-hosting/) - -## Press Releases - -- [Linode Launches Frankfurt Data Center](https://www.linode.com/press-release/linode-launches-frankfurt-datacenter/) - -- [Linode Celebrates 12th Birthday by Adopting KVM & Boosting Cloud Server Performance by 300%](https://www.linode.com/press-release/linode-celebrates-12th-birthday-by-adopting-kvm-and-boosting-cloud-server-performance-by-300/) - -- [Linode Opens New Data Center in Singapore](https://www.linode.com/press-release/linode-opens-new-datacenter-in-singapore/) diff --git a/docs/products/tools/marketplace/_shortguides/marketplace-required-limited-user-fields-shortguide/index.md b/docs/products/tools/marketplace/_shortguides/marketplace-required-limited-user-fields-shortguide/index.md index faf4ff430ca..a4c9272b79a 100644 --- a/docs/products/tools/marketplace/_shortguides/marketplace-required-limited-user-fields-shortguide/index.md +++ b/docs/products/tools/marketplace/_shortguides/marketplace-required-limited-user-fields-shortguide/index.md @@ -1,5 +1,5 @@ --- -# Shortguide: Details the optional fields related to creating a limited user account on a Marketplace App. +# Shortguide: Details the fields related to creating a required limited user account on a Marketplace App. headless: true show_on_rss_feed: false @@ -7,12 +7,18 @@ show_on_rss_feed: false #### Limited Sudo User -You can fill out the following fields to automatically create a limited sudo user for your new Compute Instance. This account will be assigned to the *sudo* group, which provides elevated permissions when running commands with the `sudo` prefix. +You need to fill out the following fields to automatically create a limited sudo user, with a strong generated password for your new Compute Instance. This account will be assigned to the *sudo* group, which provides elevated permissions when running commands with the `sudo` prefix. -- **Limited sudo user:** Enter your preferred username for the limited user. *No Capital Letters, Spaces, or Special Characters* -- **SSH public key for the limited user:** If you wish to login as the limited user through public key authentication (without entering a password), enter your public key here. See [Creating an SSH Key Pair and Configuring Public Key Authentication on a Server](/docs/guides/use-public-key-authentication-with-ssh/) for instructions on generating a key pair. -- **Disable root access over SSH:** To block the root user from logging in over SSH, select *Yes* (recommended). You can still switch to the root user once logged in, and you can also log in as root through [Lish](/docs/products/compute/compute-instances/guides/lish/). +- **Limited sudo user:** Enter your preferred username for the limited user. *No Capital Letters, Spaces, or Special Characters* -{{< note type="warning" title="Locating Your Sudo Password As Root">}} -If you disable root access for your deployment and do not enter a valid SSH public key, you will need to login as the root user via the [Lish console](/docs/products/compute/compute-instances/guides/lish/) and locate the credentials file found at `/root/.credentials` to obtain the limited sudo user password. -{{< /note >}} \ No newline at end of file +{{< note type="warning" title="Locating The Generated Sudo Password">}} +A password is generated for the limited user and stored in a `.credentials` file in their home directory, along with application specific passwords. This can be viewed by running: `cat /home/$USERNAME/.credentials` + +For best results, add an [account SSH key](/docs/products/platform/accounts/guides/manage-ssh-keys/) for the Cloud Manager user that is deploying the instance, and select that user as an `authorized_user` in the API or by selecting that option in the Cloud Manager. Their SSH pubkey will be assigned to _both_ root and the limited user. +{{< /note >}} + +- **Disable root access over SSH:** To block the root user from logging in over SSH, select *Yes*. You can still switch to the root user once logged in, and you can also log in as root through [Lish](/docs/products/compute/compute-instances/guides/lish/). + +{{< note type="warning" title="Accessing The Instance Without SSH">}} +If you disable root access for your deployment and do not provide a valid Account SSH Key assigned to the `authorized_user`, you will need to login as the root user via the [Lish console](/docs/products/compute/compute-instances/guides/lish/) and run `cat /home/$USERNAME/.credentials` to view the generated password for the limited user. +{{< /note >}} diff --git a/docs/products/tools/marketplace/guides/_index.md b/docs/products/tools/marketplace/guides/_index.md index 70f50084aa5..82080cee425 100644 --- a/docs/products/tools/marketplace/guides/_index.md +++ b/docs/products/tools/marketplace/guides/_index.md @@ -65,6 +65,7 @@ See the [Marketplace](/docs/marketplace/) listing page for a full list of all Ma - [Kepler](/docs/products/tools/marketplace/guides/kepler/) - [LAMP Stack](/docs/products/tools/marketplace/guides/lamp-stack/) - [LEMP Stack](/docs/products/tools/marketplace/guides/lemp-stack/) +- [LinuxGSM](/docs/products/tools/marketplace/guides/linuxgsm/) - [LiteSpeed cPanel](/docs/products/tools/marketplace/guides/litespeed-cpanel/) - [LiveSwitch](/docs/products/tools/marketplace/guides/liveswitch/) - [Mastodon](/docs/products/tools/marketplace/guides/mastodon/) @@ -88,6 +89,7 @@ See the [Marketplace](/docs/marketplace/) listing page for a full list of all Ma - [Owncast](/docs/products/tools/marketplace/guides/owncast/) - [Owncloud Server](/docs/products/tools/marketplace/guides/owncloud/) - [Passky](/docs/products/tools/marketplace/guides/passky/) +- [Passbolt](/docs/products/tools/marketplace/guides/passbolt/) - [Peppermint](/docs/products/tools/marketplace/guides/peppermint/) - [phpMyAdmin](/docs/products/tools/marketplace/guides/phpmyadmin/) - [Pi-hole](/docs/products/tools/marketplace/guides/pihole/) diff --git a/docs/products/tools/marketplace/guides/harbor/index.md b/docs/products/tools/marketplace/guides/harbor/index.md index 8ed72f6680e..31c01475eda 100644 --- a/docs/products/tools/marketplace/guides/harbor/index.md +++ b/docs/products/tools/marketplace/guides/harbor/index.md @@ -54,7 +54,7 @@ Harbor is an excellent compliment to the [Linode Kubernetes Engine (LKE)](/docs/ - **SSH:** Log in to your Compute Instance over SSH using the `root` user and run the following command. See [Connecting to a Remote Server Over SSH](/docs/guides/connect-to-server-over-ssh/) for assistance. ```command - cat /root/.credentials + cat /home/$USERNAME/.credentials ``` 1. In the Harbor login screen that appears, enter `admin` as the username and use the *Harbor admin password* found in your `.credentials` file. diff --git a/docs/products/tools/marketplace/guides/jitsi/index.md b/docs/products/tools/marketplace/guides/jitsi/index.md index e58400239b7..64dee2cdc5b 100644 --- a/docs/products/tools/marketplace/guides/jitsi/index.md +++ b/docs/products/tools/marketplace/guides/jitsi/index.md @@ -3,7 +3,7 @@ description: "Jitsi is an open source suite that helps you host your own virtual keywords: ['jitsi','marketplace', 'server'] tags: ["ubuntu","marketplace", "web applications","linode platform", "cloud manager", "ssl", "education"] published: 2020-09-28 -modified: 2022-03-08 +modified: 2024-03-13 image: Deploy_Jitsi_oneclickapps.png modified_by: name: Linode @@ -31,14 +31,14 @@ authors: ["Linode"] ## Configuration Options - **Supported distributions:** Ubuntu 22.04 LTS, Ubuntu 20.04 LTS -- **Recommended minimum plan:** All plan types and sizes can be used, though we recommend a 8GB Dedicated CPU or Shared Compute Instance for hosting large meetings or multiple simultaneous meetings. +- **Suggested minimum plan:** All plan types and sizes can be used. For best results, use a 8GB Dedicated CPU or Shared Compute Instance when hosting large meetings or multiple simultaneous meetings. ### Jitsi Options {{< content "marketplace-custom-domain-fields-shortguide">}} - **Admin Email for the Jitsi server** -{{< content "marketplace-limited-user-fields-shortguide">}} +{{< content "marketplace-required-limited-user-fields-shortguide">}} {{< content "marketplace-special-character-limitations-shortguide">}} diff --git a/docs/products/tools/marketplace/guides/jupyterlab/index.md b/docs/products/tools/marketplace/guides/jupyterlab/index.md index 0c814804a18..9a56060b9c9 100644 --- a/docs/products/tools/marketplace/guides/jupyterlab/index.md +++ b/docs/products/tools/marketplace/guides/jupyterlab/index.md @@ -47,10 +47,10 @@ Launch your web browser and navigate to the custom domain you set during deploym ## Obtaining the Jupyter Token -By default, Jupyter issues a token for authentication. The Jupyter access token was automatically generated during the initial install process and is stored in the `/root/.credentials` file. To view the default token, log in to your Compute Instance either through the [LISH Console](/docs/products/compute/compute-instances/guides/lish/#through-the-cloud-manager-weblish) or via SSH, and check the contents of the file: +By default, Jupyter issues a token for authentication. The Jupyter access token was automatically generated during the initial install process and is stored in the `/home/$USERNAME/.credentials` file. To view the default token, log in to your Compute Instance either through the [LISH Console](/docs/products/compute/compute-instances/guides/lish/#through-the-cloud-manager-weblish) or via SSH, and check the contents of the file: ```command -cat /root/.credentials +cat /home/$USERNAME/.credentials ``` Copy and paste this token into the **Password or token:** field on the Jupyter login page. If you prefer password authentication, use the token to set a password in the **Setup a Password** section on the login page. Once this is done, you can employ both the token and the password for accessing JupyterLab. diff --git a/docs/products/tools/marketplace/guides/kepler/index.md b/docs/products/tools/marketplace/guides/kepler/index.md index 98a57243caf..1863de56132 100644 --- a/docs/products/tools/marketplace/guides/kepler/index.md +++ b/docs/products/tools/marketplace/guides/kepler/index.md @@ -45,7 +45,7 @@ Kepler requires a valid license to use the software beyond the initial 14 day fr - **Website Title:** Enter a title for your WordPress site. {{< note >}} - The passwords for the WordPress Admin User, WordPress Database User and MySQL root user are automatically generated and provided in the file `/root/.credentials` when the WordPress deployment completes. + The passwords for the WordPress Admin User, WordPress Database User and MySQL root user are automatically generated and provided in the file `/home/$USERNAME/.credentials` when the WordPress deployment completes. {{< /note >}} {{< content "marketplace-required-limited-user-fields-shortguide">}} @@ -73,10 +73,10 @@ Once the app has been *fully* deployed, you need to obtain the credentials from - **Lish Console:** Within the Cloud Manager, navigate to **Linodes** from the left menu, select the Compute Instance you just deployed, and click the **Launch LISH Console** button. Log in as the `root` user. See [Using the Lish Console](/docs/products/compute/compute-instances/guides/lish/). - **SSH:** Log in to your Compute Instance over SSH using the `root` user. See [Connecting to a Remote Server Over SSH](/docs/guides/connect-to-server-over-ssh/) for assistance. -1. Once logged in, access the credentials file by runing the following command: +1. Once logged in, access the credentials file by running the following command: ```command - cat /root/.credentials + cat /home/$USERNAME/.credentials ``` 1. This displays the passwords that were automatically generated when the instance was deployed. Once you save these passwords, you can safely delete this file. diff --git a/docs/products/tools/marketplace/guides/lamp-stack/index.md b/docs/products/tools/marketplace/guides/lamp-stack/index.md index a54fa2d841b..64c451bfedd 100644 --- a/docs/products/tools/marketplace/guides/lamp-stack/index.md +++ b/docs/products/tools/marketplace/guides/lamp-stack/index.md @@ -35,7 +35,7 @@ A LAMP (Linux, [Apache](https://www.apache.org), [MySQL](https://www.mysql.com), - **Email address** *(required)*: Enter the email address to use for generating the SSL certificates. {{< note >}} - The password for the MySQL root user is automatically generated and provided in the file `/root/.credentials` when the LAMP deployment completes. + The password for the MySQL root user is automatically generated and provided in the file `/home/$USERNAME/.credentials` when the LAMP deployment completes. {{< /note >}} {{< content "marketplace-required-limited-user-fields-shortguide">}} diff --git a/docs/products/tools/marketplace/guides/lemp-stack/index.md b/docs/products/tools/marketplace/guides/lemp-stack/index.md index 6fea8f685e2..a1a05812819 100644 --- a/docs/products/tools/marketplace/guides/lemp-stack/index.md +++ b/docs/products/tools/marketplace/guides/lemp-stack/index.md @@ -36,7 +36,7 @@ The LEMP stack (Linux, [NGINX](https://www.nginx.com/), [MySQL](https://www.mysq - **Email address** *(required)*: Enter the email address to use for generating the SSL certificates. {{< note >}} - The password for the MySQL root user is automatically generated and provided in the file `/root/.credentials` when the LEMP deployment completes. + The password for the MySQL root user is automatically generated and provided in the file `/home/$USERNAME/.credentials` when the LEMP deployment completes. {{< /note >}} {{< content "marketplace-required-limited-user-fields-shortguide">}} diff --git a/docs/products/tools/marketplace/guides/linuxgsm/index.md b/docs/products/tools/marketplace/guides/linuxgsm/index.md new file mode 100644 index 00000000000..5d0c926cd03 --- /dev/null +++ b/docs/products/tools/marketplace/guides/linuxgsm/index.md @@ -0,0 +1,65 @@ +--- +description: "Deploy LinuxGSM on a Linode Compute Instance. LinuxGSM is a command line utility for managing multiplayer game servers." +keywords: ['game servers','multiplayer','game'] +tags: ["marketplace", "linode platform", "cloud manager"] +published: 2024-01-12 +modified_by: + name: Linode +title: "Deploy LinuxGSM through the Linode Marketplace" +external_resources: +- '[LinuxGSM](https://linuxgsm.com/)' +authors: ["Linode"] +--- + +[LinuxGSM](https://linuxgsm.com/) + +## Deploying a Marketplace App + +{{< content "deploy-marketplace-apps-shortguide">}} + +{{< content "marketplace-verify-standard-shortguide">}} + +{{< note >}} +**Estimated deployment time:** LinuxGSM should be fully installed within 10-15 minutes after the Compute Instance has finished provisioning. +{{< /note >}} + +## Configuration Options + +- **Supported distributions:** Ubuntu 22.04 LTS +- **Suggested plan:** All plan types and sizes can be used. + +### LinuxGSM Options + +- **Email address** *(required)*: Enter the email address to use for generating the SSL certificates. +- **Game Server Name**: Enter the code for the server you want to install. See the [LinuxGSM Server List](https://github.com/GameServerManagers/LinuxGSM/blob/master/lgsm/data/serverlist.csv). + +{{< content "marketplace-required-limited-user-fields-shortguide">}} + +{{< content "marketplace-custom-domain-fields-shortguide">}} + +{{< content "marketplace-special-character-limitations-shortguide">}} + +### Getting Started after Deployment + +### Obtaining the Admin Password + +The password for the sudo user account was automatically generated during the initial install process. To find this password, log in to your Compute Instance through the [LISH Console](/docs/products/compute/compute-instances/guides/lish/#through-the-cloud-manager-weblish), or with SSH if you provided an [Account Key](/docs/products/platform/accounts/guides/manage-ssh-keys/). The credentials are available in the file `/home/$USERNAME/.credentials` +``` +cat /home/$USERNAME/.credentials +Sudo Username: $USERNAME +Sudo Password: 0oVSsWmkbGesmtuTlOEgFl7t +LinuxGSM User: linuxgsm +LinuxGSM User Password: nc023n30cal-3kd +``` +`linuxgsm` is a member of the sudo group. + +To complete the server installation process run `su linuxgsm && chdir /home/linuxgsm` followed by `./$GAMEERVERNAME install` + +### More Information + +Additonal information is available from LinuxGSM. + +- [LinuxGSM](https://linuxgsm.com/) +- [LinuxGSM Documentation](https://docs.linuxgsm.com/) + +{{< content "marketplace-update-note-shortguide">}} diff --git a/docs/products/tools/marketplace/guides/nats-single-node/index.md b/docs/products/tools/marketplace/guides/nats-single-node/index.md index 57e80d58933..4160fae3bd6 100644 --- a/docs/products/tools/marketplace/guides/nats-single-node/index.md +++ b/docs/products/tools/marketplace/guides/nats-single-node/index.md @@ -53,10 +53,10 @@ Open your web browser and go to the custom domain you specified during deploymen ## Obtaining the NATS users passwords. -By default, this NATS deployment creates 2 users, one named "example" and another system user named "system". The passwords for these users are generated during the initial install process. To obtain these password, log in to your Compute Instance either through the [LISH Console](/docs/products/compute/compute-instances/guides/lish/#through-the-cloud-manager-weblish) or via SSH, then just read the /root/.credentials file, ie: +By default, this NATS deployment creates 2 users, one named "example" and another system user named "system". The passwords for these users are generated during the initial install process. To obtain these password, log in to your Compute Instance either through the [LISH Console](/docs/products/compute/compute-instances/guides/lish/#through-the-cloud-manager-weblish) or via SSH, then just read the `/home/$USERNAME/.credentials` file, ie: ```command -cat /root/.credentials +cat /home/$USERNAME/.credentials ``` This file will contain the two credentials needed for those users. diff --git a/docs/products/tools/marketplace/guides/passbolt/index.md b/docs/products/tools/marketplace/guides/passbolt/index.md new file mode 100644 index 00000000000..0bd4b0c328e --- /dev/null +++ b/docs/products/tools/marketplace/guides/passbolt/index.md @@ -0,0 +1,86 @@ +--- +slug: Deploy Passbolt Through The Linode Marketplace +title: "Deploy Passbolt Through The Linode Marketplace" +description: 'Deploy Passbolt password manager through the Linode Marketplace.' +og_description: 'Deploy Passbolt password manager through the Linode Marketplace.' +keywords: ['passbolt','password manager','security','authentication'] +license: '[CC BY-ND 4.0](https://creativecommons.org/licenses/by-nd/4.0)' +authors: ["Linode"] +published: 2024-02-05 +modified_by: + name: Linode +--- + +[Passbolt Password Manager](https://github.com/passbolt/passbolt_api) is an open-source password manager designed for teams and businesses. It allows users to securely store, share and manage passwords. + +## Deploying a Marketplace App + +{{< content "deploy-marketplace-apps-shortguide">}} + +{{< content "marketplace-verify-standard-shortguide">}} + +{{< note >}} +**Estimated deployment time:** Passbolt should be fully installed within 5-10 minutes after the Compute Instance has finished provisioning. +{{< /note >}} + +## Configuration Options + +- **Supported distributions:** Ubuntu 22.04 LTS +- **Suggested plan:** For best results, 4GB Dedicated CPU or Shared Compute instance for Passbolt. + +### Passbolt Options + +{{< content "marketplace-required-limited-user-fields-shortguide">}} + +## Getting Started after Deployment + +To start registration follow the link provided in `/etc/motd` to visit the DNS you choose during deployment. +``` +cat /etc/motd +********************************************************* +Akamai Connected Cloud passbolt Marketplace App + +Registering admin user: https://$DNS_NAME/setup/start/$UUID + +App URL: https://$DNS_NAME +Credentials File: /home/$SUDO_USER/.credentials +Documentation: https://www.linode.com/marketplace/apps/linode/passbolt/ +********************************************************* +To delete this message of the day: rm /etc/motd +``` + + +Be sure to download the `passbolt-recovery-kit.txt` file and store it in a safe place. This PGP Private Key Block (and the passphrase you set here) will be required if you ever need to go through account recovery. + +You will need to create a passphrase to access Passbolt + + ![Screenshot of Passbolt passphrase](passphrase_screen.jpg) + +Once the passphrase is set you will be able to view the Passbolt main screen. + + ![Screenshot of Passbolt main screen](passbolt_mainscreen.jpg) + +For more information, visit [Passbolt Installation Documentation](https://help.passbolt.com/hosting/install/ce/ubuntu/ubuntu.html) for details on how to configure Passbolt. + +### Email Configuration + +Postfix is installed as part of the Marketplace App, allowing you to send a test email. Unless you've manually configured your own SMTP provider, to send a test email through the SMTP screen (https:///app/administration/smtp-settings), use the following (replace `example.com` with your FQDN): +* **Email provider**: Other +* **Authentication method**: None +* **SMTP host**: localhost +* **Use TLS?**: No +* **Port**: 25 +* **SMTP client** leave blank +* **Sender name**: root +* **Sender email**: root@ + +{{< note >}} +To make the most out of Passbolt you need a working email setup for email notifications (e.g. - account registration, password recovery and other critical notifications). For more information on setting this up, see the [Configure Email Providers](https://help.passbolt.com/configure/email/setup) page on Passbolt's website. + +Regardless of how you configure your mail server, we suggest that you follow the best practices to ensure mail deliverability. For more information, see the [Running a Mail Server](https://www.linode.com/docs/guides/running-a-mail-server/) guide. +{{< /note >}} + +### Account Recovery +If you ever need to recover your account, you will be prompted for the PGP private key block you downloaded after entering your passphrase for the first time. + +{{< content "marketplace-update-note-shortguide">}} diff --git a/docs/products/tools/marketplace/guides/passbolt/passbolt_mainscreen.jpg b/docs/products/tools/marketplace/guides/passbolt/passbolt_mainscreen.jpg new file mode 100644 index 00000000000..44b67018d99 Binary files /dev/null and b/docs/products/tools/marketplace/guides/passbolt/passbolt_mainscreen.jpg differ diff --git a/docs/products/tools/marketplace/guides/passbolt/passphrase_screen.jpg b/docs/products/tools/marketplace/guides/passbolt/passphrase_screen.jpg new file mode 100644 index 00000000000..73a9d254715 Binary files /dev/null and b/docs/products/tools/marketplace/guides/passbolt/passphrase_screen.jpg differ diff --git a/docs/products/tools/marketplace/guides/secure-your-server/index.md b/docs/products/tools/marketplace/guides/secure-your-server/index.md index 8f51f72413c..f4722455ee8 100644 --- a/docs/products/tools/marketplace/guides/secure-your-server/index.md +++ b/docs/products/tools/marketplace/guides/secure-your-server/index.md @@ -26,17 +26,7 @@ This Marketplace App automatically configures a new Compute Instance with a limi ### Secure Your Server Options -#### Limited User (Required) - -You can fill out the following fields to automatically create a limited sudo user for your new Compute Instance. This account will be assigned to the *sudo* group, which provides elevated permission when running commands with the `sudo` prefix. - -- **Limited sudo user:** Enter your preferred username for the limited user. *No Capital Letters, Spaces, or Special Characters* -- **SSH public key for the limited user:** If you wish to login as the limited user through public key authentication (without entering a password), enter your public key here. See [Creating an SSH Key Pair and Configuring Public Key Authentication on a Server](/docs/guides/use-public-key-authentication-with-ssh/) for instructions on generating a key pair. -- **Disable root access over SSH:** To block the root user from logging in over SSH, select *Yes* (recommended). You can still switch to the root user once logged in and you can also log in as root through [Lish](/docs/products/compute/compute-instances/guides/lish/). - -{{< note type="warning" title="Locating Your Sudo Password As Root">}} -If you disable root access for your deployment and do not enter a valid SSH public key, you will need to login as the root user via the [Lish console](/docs/products/compute/compute-instances/guides/lish/) and locate the credentials file found at `/root/.credentials` to obtain the limited sudo user password. -{{< /note >}} +{{< content "marketplace-required-limited-user-shortguide">}} {{< content "marketplace-custom-domain-fields-shortguide">}} diff --git a/docs/products/tools/marketplace/guides/splunk/index.md b/docs/products/tools/marketplace/guides/splunk/index.md index 1eeec85025a..59bd7b02ad9 100644 --- a/docs/products/tools/marketplace/guides/splunk/index.md +++ b/docs/products/tools/marketplace/guides/splunk/index.md @@ -52,10 +52,10 @@ The Akamai Connected Cloud Splunk Marketplace App includes support for the [Akam ### Obtaining the Admin Password -The password for the sudo user account was automatically generated during the initial install process. To find this password, log in to your Compute Instance through the [LISH Console](/docs/products/compute/compute-instances/guides/lish/#through-the-cloud-manager-weblish). The credentials are available in the file `/root/.credentials` +The password for the sudo user account was automatically generated during the initial install process. To find this password, log in to your Compute Instance through the [LISH Console](/docs/products/compute/compute-instances/guides/lish/#through-the-cloud-manager-weblish). The credentials are available in the file `/home/$USERNAME/.credentials` ``` -cat /root/.credentials -sudo username: $SUDO_USER +cat /home/$USERNAME/.credentials +sudo username: $USERNAME sudo password: 0oVSsWmkbGesmtuTlOEgFl7t splunk user: $SPLUNK_USER splunk admin password: fRLdHksJoMPrjLtRCogEPVLYOML1zQtQ0kIsL7IWvo49 @@ -65,7 +65,7 @@ splunk admin password: fRLdHksJoMPrjLtRCogEPVLYOML1zQtQ0kIsL7IWvo49 Open a browser and navigate to `https://192-0-2-1.ip.linodeusercontent.com:8000`, where `192-0-2-1` represents the IPv4 address of your new Compute Instance. See the [Managing IP Addresses](/docs/products/compute/compute-instances/guides/manage-ip-addresses/#configuring-rdns) guide for information on viewing the rDNS value. -You will be presented a login field where you can enter the credentials you previously specified in the *Splunk Username* and the generated *Splunk Password* in `/root/.credentials` +You will be presented a login field where you can enter the credentials you previously specified in the *Splunk Username* and the generated *Splunk Password* in `/home/$USERNAME/.credentials`. Now that you’ve accessed your dashboard, checkout [the official Splunk documentation](https://docs.splunk.com/Documentation/Splunk) to learn how to further configure your instance. diff --git a/docs/products/tools/marketplace/guides/woocommerce/index.md b/docs/products/tools/marketplace/guides/woocommerce/index.md index b5d62d4d9ae..eca3344438a 100644 --- a/docs/products/tools/marketplace/guides/woocommerce/index.md +++ b/docs/products/tools/marketplace/guides/woocommerce/index.md @@ -42,7 +42,7 @@ external_resources: - **Website Title:** Enter a title for your WordPress site. {{< note >}} - The passwords for the WordPress Admin User, WordPress Database User and MySQL root user are automatically generated and provided in the file `/root/.credentials` when the WordPress deployment completes. + The passwords for the WordPress Admin User, WordPress Database User and MySQL root user are automatically generated and provided in the file `/home/$USERNAME/.credentials` when the WordPress deployment completes. {{< /note >}} {{< content "marketplace-required-limited-user-fields-shortguide">}} @@ -64,10 +64,10 @@ Once the app has been *fully* deployed, you need to obtain the credentials from - **Lish Console:** Within the Cloud Manager, navigate to **Linodes** from the left menu, select the Compute Instance you just deployed, and click the **Launch LISH Console** button. Log in as the `root` user. See [Using the Lish Console](/docs/products/compute/compute-instances/guides/lish/). - **SSH:** Log in to your Compute Instance over SSH using the `root` user. See [Connecting to a Remote Server Over SSH](/docs/guides/connect-to-server-over-ssh/) for assistance. -1. Once logged in, access the credentials file by runing the following command: +1. Once logged in, access the credentials file by running the following command: ```command - cat /root/.credentials + cat /home/$USERNAME/credentials ``` 1. This displays the passwords that were automatically generated when the instance was deployed. Once you save these passwords, you can safely delete this file. diff --git a/docs/products/tools/marketplace/guides/wordpress/index.md b/docs/products/tools/marketplace/guides/wordpress/index.md index 87a7824798d..0702f8c8d8e 100644 --- a/docs/products/tools/marketplace/guides/wordpress/index.md +++ b/docs/products/tools/marketplace/guides/wordpress/index.md @@ -63,7 +63,7 @@ Once the app has been *fully* deployed, you need to obtain the credentials from 1. Once logged in, access the credentials file by running the following command: ```command - cat /root/.credentials + cat /home/$USERNAME/.credentials ``` 1. This displays the passwords that were automatically generated when the instance was deployed. Once you save these passwords, you can safely delete this file. diff --git a/docs/release-notes/lke/v1.65.0.md b/docs/release-notes/lke/v1.65.0.md new file mode 100644 index 00000000000..53d96f794fd --- /dev/null +++ b/docs/release-notes/lke/v1.65.0.md @@ -0,0 +1,19 @@ +--- +title: Linode Kubernetes Engine v1.65.0 +date: 2024-03-10 +version: 1.65.0 +--- + +### Changed + +- Upgraded clusters using Kubernetes: + - 1.27 to patch version [1.27.11](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.27.md#v12711). + - 1.28 to patch version [1.28.7](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.28.md#v1287). +- Adjusted `terminated-pod-gc-threshold`: + - Details: + - **Change**: The `--terminated-pod-gc-threshold` setting in the `kube-controller-manager` has been reduced from its default value to 500 pods. + - **Context**: Previously, Kubernetes kept a large number of evicted and terminated pods. This could consume unnecessary resources and limit space for new pods. + - **Impact**: When the count of evicted and terminated pods exceeds 500, the oldest pods (first by eviction timestamp, then by creation timestamp) are deleted to maintain the threshold. This helps reclaim resources and improve cluster performance. + - **Resources**: + - [Kubernetes: Garbage collection of Pods](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-garbage-collection) + - [Kubernetes: Command line reference](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-controller-manager/#:~:text=%2D%2Dterminated%2Dpod%2Dgc%2Dthreshold) \ No newline at end of file diff --git a/docs/release-notes/lke/v1.66.0.md b/docs/release-notes/lke/v1.66.0.md new file mode 100644 index 00000000000..856c6241e33 --- /dev/null +++ b/docs/release-notes/lke/v1.66.0.md @@ -0,0 +1,13 @@ +--- +title: Linode Kubernetes Engine v1.66.0 +date: 2024-03-11 +version: 1.66.0 +--- + +### Changed + +- Rename ConfigMap `kube-system/coredns` to `kube-system/coredns-base`. + +### Added + +- [CoreDNS configuration customization](https://docs/products/compute/kubernetes/guides/coredns-custom-config/index.md) capabilities via the `kube-system/coredns-custom` ConfigMap. \ No newline at end of file diff --git a/go.mod b/go.mod index ec2015010b0..4a0ac845af8 100644 --- a/go.mod +++ b/go.mod @@ -5,5 +5,5 @@ go 1.15 require ( github.com/hotwired/turbo v7.0.1+incompatible // indirect github.com/linode/linode-api-docs/v4 v4.173.0 // indirect - github.com/linode/linode-docs-theme v0.0.0-20240220192940-74515c307486 // indirect + github.com/linode/linode-docs-theme v0.0.0-20240319173150-b92334abe6d7 // indirect ) diff --git a/go.sum b/go.sum index 9b0c7205f33..83df9c4b68b 100644 --- a/go.sum +++ b/go.sum @@ -243,6 +243,10 @@ github.com/linode/linode-docs-theme v0.0.0-20240205205531-624134aa9cbe h1:kdDUp6 github.com/linode/linode-docs-theme v0.0.0-20240205205531-624134aa9cbe/go.mod h1:pLFUnAD7hJW1C2wheL3HqtWIN6Xy0kywHHf33YyfUTI= github.com/linode/linode-docs-theme v0.0.0-20240220192940-74515c307486 h1:F/AAEvsAzdEbRF+BqKxSVnls7Euco+F3TfiEPV40MM0= github.com/linode/linode-docs-theme v0.0.0-20240220192940-74515c307486/go.mod h1:pLFUnAD7hJW1C2wheL3HqtWIN6Xy0kywHHf33YyfUTI= +github.com/linode/linode-docs-theme v0.0.0-20240319122112-769eb2aadb29 h1:Uno6PIWA+qAP+yrFBLMthQVlD9um+4Pr3FmCIH0BGLE= +github.com/linode/linode-docs-theme v0.0.0-20240319122112-769eb2aadb29/go.mod h1:pLFUnAD7hJW1C2wheL3HqtWIN6Xy0kywHHf33YyfUTI= +github.com/linode/linode-docs-theme v0.0.0-20240319173150-b92334abe6d7 h1:foS1J4mEGMXhuIkzoc616xtENQpyeuYxKdeCq+OQjH0= +github.com/linode/linode-docs-theme v0.0.0-20240319173150-b92334abe6d7/go.mod h1:pLFUnAD7hJW1C2wheL3HqtWIN6Xy0kywHHf33YyfUTI= github.com/linode/linode-website-partials v0.0.0-20221205205120-b6ea1aaa59fb/go.mod h1:K1Em3lwb16JiCwNVftAFwWGhyB9Zkl/nXhxjBBUC1Ao= github.com/linode/linode-website-partials v0.0.0-20221222200538-99862e429110/go.mod h1:K1Em3lwb16JiCwNVftAFwWGhyB9Zkl/nXhxjBBUC1Ao= github.com/linode/linode-website-partials v0.0.0-20230201145731-a8703d0a954a/go.mod h1:K1Em3lwb16JiCwNVftAFwWGhyB9Zkl/nXhxjBBUC1Ao=