Skip to content

Commit

Permalink
Merge branch 'master' of github.com:Altinity/clickhouse-backup
Browse files Browse the repository at this point in the history
  • Loading branch information
Slach committed Mar 2, 2024
2 parents 160861a + 25e80e4 commit 0e16d24
Show file tree
Hide file tree
Showing 13 changed files with 166 additions and 113 deletions.
144 changes: 78 additions & 66 deletions ChangeLog.md

Large diffs are not rendered by default.

2 changes: 2 additions & 0 deletions ReadMe.md
Expand Up @@ -529,6 +529,8 @@ gcs:
credentials_file: "" # GCS_CREDENTIALS_FILE
credentials_json: "" # GCS_CREDENTIALS_JSON
credentials_json_encoded: "" # GCS_CREDENTIALS_JSON_ENCODED
skip_credentials: false # GCS_SKIP_CREDENTIALS, skip add credentials to requests to allow anonymous access to bucket
endpoint: "" # GCS_ENDPOINT, use it for custom GCS endpoint/compatible storage. For example, when using custom endpoint via private service connect
bucket: "" # GCS_BUCKET
path: "" # GCS_PATH, `system.macros` values can be applied as {macro_name}
object_disk_path: "" # GCS_OBJECT_DISK_PATH, path for backup of part from `s3` object disk (clickhouse support only gcs over s3 protocol), if disk present, then shall not be zero and shall not be prefixed by `path`
Expand Down
7 changes: 4 additions & 3 deletions pkg/backup/restore.go
Expand Up @@ -862,10 +862,8 @@ func (b *Backuper) downloadObjectDiskParts(ctx context.Context, backupName strin
dbAndTableDir := path.Join(common.TablePathEncode(backupTable.Database), common.TablePathEncode(backupTable.Table))
ctx, cancel := context.WithCancel(ctx)
defer cancel()

var err error
if err = config.ValidateObjectDiskConfig(b.cfg); err != nil {
return err
}
for diskName, parts := range backupTable.Parts {
diskType, exists := diskTypes[diskName]
if !exists {
Expand All @@ -889,6 +887,9 @@ func (b *Backuper) downloadObjectDiskParts(ctx context.Context, backupName strin
}
isObjectDisk := b.isDiskTypeObject(diskType)
if isObjectDisk || isObjectDiskEncrypted {
if err = config.ValidateObjectDiskConfig(b.cfg); err != nil {
return err
}
if _, exists := diskMap[diskName]; !exists {
for _, part := range parts {
if part.RebalancedDisk != "" {
Expand Down
1 change: 1 addition & 0 deletions pkg/config/config.go
Expand Up @@ -69,6 +69,7 @@ type GCSConfig struct {
CredentialsFile string `yaml:"credentials_file" envconfig:"GCS_CREDENTIALS_FILE"`
CredentialsJSON string `yaml:"credentials_json" envconfig:"GCS_CREDENTIALS_JSON"`
CredentialsJSONEncoded string `yaml:"credentials_json_encoded" envconfig:"GCS_CREDENTIALS_JSON_ENCODED"`
SkipCredentials bool `yaml:"skip_credentials" envconfig:"GCS_SKIP_CREDENTIALS"`
Bucket string `yaml:"bucket" envconfig:"GCS_BUCKET"`
Path string `yaml:"path" envconfig:"GCS_PATH"`
ObjectDiskPath string `yaml:"object_disk_path" envconfig:"GCS_OBJECT_DISK_PATH"`
Expand Down
7 changes: 5 additions & 2 deletions pkg/storage/gcs.go
Expand Up @@ -90,15 +90,18 @@ func (gcs *GCS) Connect(ctx context.Context) error {

if gcs.Config.Endpoint != "" {
endpoint = gcs.Config.Endpoint
clientOptions = append([]option.ClientOption{option.WithoutAuthentication()}, clientOptions...)
clientOptions = append(clientOptions, option.WithEndpoint(endpoint))
} else if gcs.Config.CredentialsJSON != "" {
}

if gcs.Config.CredentialsJSON != "" {
clientOptions = append(clientOptions, option.WithCredentialsJSON([]byte(gcs.Config.CredentialsJSON)))
} else if gcs.Config.CredentialsJSONEncoded != "" {
d, _ := base64.StdEncoding.DecodeString(gcs.Config.CredentialsJSONEncoded)
clientOptions = append(clientOptions, option.WithCredentialsJSON(d))
} else if gcs.Config.CredentialsFile != "" {
clientOptions = append(clientOptions, option.WithCredentialsFile(gcs.Config.CredentialsFile))
} else if gcs.Config.SkipCredentials {
clientOptions = append(clientOptions, option.WithoutAuthentication())
}

if gcs.Config.ForceHttp {
Expand Down
30 changes: 30 additions & 0 deletions test/integration/config-gcs-custom-endpoint.yml
@@ -0,0 +1,30 @@
general:
disable_progress_bar: true
remote_storage: gcs
upload_concurrency: 4
download_concurrency: 4
skip_tables:
- " system.*"
- "INFORMATION_SCHEMA.*"
- "information_schema.*"
- "_temporary_and_external_tables.*"
restore_schema_on_cluster: "{cluster}"
clickhouse:
host: clickhouse
port: 9440
username: backup
password: meow=& 123?*%# МЯУ
secure: true
skip_verify: true
sync_replicated_tables: true
timeout: 5s
restart_command: "sql:SYSTEM RELOAD USERS; sql:SYSTEM RELOAD CONFIG; exec:ls -la /var/lib/clickhouse/access; sql:SYSTEM SHUTDOWN"
# restart_command: bash -c 'echo "FAKE RESTART"'
backup_mutations: true
gcs:
bucket: altinity-qa-test
path: backup/{cluster}/{shard}
object_disk_path: object_disks/{cluster}/{shard}
compression_format: tar
endpoint: http://gcs:8080/storage/v1/
skip_credentials: true
31 changes: 14 additions & 17 deletions test/integration/docker-compose.yml
Expand Up @@ -43,17 +43,19 @@ services:
- clickhouse-backup

# todo need to reproduce download after upload
# gcs:
# image: fsouza/fake-gcs-server:latest
# hostname: gcs
# entrypoint:
# - /bin/sh
# command:
# - -c
# - "mkdir -p /data/clickhouse-backup-test-gcs && fake-gcs-server -data /data -scheme http -port 8080 -public-host gsc:8080"
# networks:
# - clickhouse-backup

gcs:
image: fsouza/fake-gcs-server:latest
hostname: gcs
container_name: gcs
entrypoint:
- /bin/sh
command:
- -c
- "mkdir -p /data/altinity-qa-test && mkdir -p /data/${QA_GCS_OVER_S3_BUCKET} && fake-gcs-server -data /data -scheme http -port 8080 -public-host gcs:8080"
networks:
- clickhouse-backup
environment:
QA_GCS_OVER_S3_BUCKET: "${QA_GCS_OVER_S3_BUCKET}"

azure:
image: mcr.microsoft.com/azure-storage/azurite:latest
Expand Down Expand Up @@ -126,9 +128,6 @@ services:
AZBLOB_DEBUG: "${AZBLOB_DEBUG:-false}"
CLICKHOUSE_DEBUG: "${CLICKHOUSE_DEBUG:-false}"
GOCOVERDIR: "/tmp/_coverage_/"
# fake-gcs-server
# STORAGE_EMULATOR_HOST: "http://gsc:8080"
# GOOGLE_API_USE_CLIENT_CERTIFICATE: "false"
# FIPS
QA_AWS_ACCESS_KEY: ${QA_AWS_ACCESS_KEY}
QA_AWS_SECRET_KEY: ${QA_AWS_SECRET_KEY}
Expand Down Expand Up @@ -166,9 +165,6 @@ services:
AZBLOB_DEBUG: "${AZBLOB_DEBUG:-false}"
CLICKHOUSE_DEBUG: "${CLICKHOUSE_DEBUG:-false}"
GOCOVERDIR: "/tmp/_coverage_/"
# fake-gcs-server
# STORAGE_EMULATOR_HOST: "http://gsc:8080"
# GOOGLE_API_USE_CLIENT_CERTIFICATE: "false"
# FIPS
QA_AWS_ACCESS_KEY: ${QA_AWS_ACCESS_KEY}
QA_AWS_SECRET_KEY: ${QA_AWS_SECRET_KEY}
Expand Down Expand Up @@ -196,6 +192,7 @@ services:
- ./config-ftp.yaml:/etc/clickhouse-backup/config-ftp.yaml
- ./config-ftp-old.yaml:/etc/clickhouse-backup/config-ftp-old.yaml
- ./config-gcs.yml:/etc/clickhouse-backup/config-gcs.yml
- ./config-gcs-custom-endpoint.yml:/etc/clickhouse-backup/config-gcs-custom-endpoint.yml
- ./config-s3.yml:/etc/clickhouse-backup/config-s3.yml
- ./config-s3-embedded.yml:/etc/clickhouse-backup/config-s3-embedded.yml
- ./config-s3-fips.yml:/etc/clickhouse-backup/config-s3-fips.yml.template
Expand Down
31 changes: 14 additions & 17 deletions test/integration/docker-compose_advanced.yml
Expand Up @@ -57,17 +57,19 @@ services:
- clickhouse-backup

# todo need to reproduce download after upload
# gcs:
# image: fsouza/fake-gcs-server:latest
# hostname: gcs
# entrypoint:
# - /bin/sh
# command:
# - -c
# - "mkdir -p /data/clickhouse-backup-test-gcs && fake-gcs-server -data /data -scheme http -port 8080 -public-host gsc:8080"
# networks:
# - clickhouse-backup

gcs:
image: fsouza/fake-gcs-server:latest
hostname: gcs
container_name: gcs
entrypoint:
- /bin/sh
command:
- -c
- "mkdir -p /data/altinity-qa-test && mkdir -p /data/${QA_GCS_OVER_S3_BUCKET} && fake-gcs-server -data /data -scheme http -port 8080 -public-host gcs:8080"
networks:
- clickhouse-backup
environment:
QA_GCS_OVER_S3_BUCKET: "${QA_GCS_OVER_S3_BUCKET}"

azure:
image: mcr.microsoft.com/azure-storage/azurite:latest
Expand Down Expand Up @@ -177,9 +179,6 @@ services:
AZBLOB_DEBUG: "${AZBLOB_DEBUG:-false}"
CLICKHOUSE_DEBUG: "${CLICKHOUSE_DEBUG:-false}"
GOCOVERDIR: "/tmp/_coverage_/"
# fake-gcs-server
# STORAGE_EMULATOR_HOST: "http://gsc:8080"
# GOOGLE_API_USE_CLIENT_CERTIFICATE: "false"
# FIPS
QA_AWS_ACCESS_KEY: ${QA_AWS_ACCESS_KEY}
QA_AWS_SECRET_KEY: ${QA_AWS_SECRET_KEY}
Expand Down Expand Up @@ -217,9 +216,6 @@ services:
AZBLOB_DEBUG: "${AZBLOB_DEBUG:-false}"
CLICKHOUSE_DEBUG: "${CLICKHOUSE_DEBUG:-false}"
GOCOVERDIR: "/tmp/_coverage_/"
# fake-gcs-server
# STORAGE_EMULATOR_HOST: "http://gsc:8080"
# GOOGLE_API_USE_CLIENT_CERTIFICATE: "false"
# FIPS
QA_AWS_ACCESS_KEY: ${QA_AWS_ACCESS_KEY}
QA_AWS_SECRET_KEY: ${QA_AWS_SECRET_KEY}
Expand Down Expand Up @@ -254,6 +250,7 @@ services:
- ./config-ftp.yaml:/etc/clickhouse-backup/config-ftp.yaml
- ./config-ftp-old.yaml:/etc/clickhouse-backup/config-ftp-old.yaml
- ./config-gcs.yml:/etc/clickhouse-backup/config-gcs.yml
- ./config-gcs-custom-endpoint.yml:/etc/clickhouse-backup/config-gcs-custom-endpoint.yml
- ./config-s3.yml:/etc/clickhouse-backup/config-s3.yml
- ./config-s3-embedded.yml:/etc/clickhouse-backup/config-s3-embedded.yml
- ./config-s3-fips.yml:/etc/clickhouse-backup/config-s3-fips.yml.template
Expand Down
3 changes: 3 additions & 0 deletions test/integration/dynamic_settings.sh 100644 → 100755
Expand Up @@ -396,6 +396,9 @@ fi
# zookeeper RBAC available from 21.9
if [[ "${CLICKHOUSE_VERSION}" == "head" || "${CLICKHOUSE_VERSION}" =~ ^21\.9 || "${CLICKHOUSE_VERSION}" =~ ^21\.1[0-9] || "${CLICKHOUSE_VERSION}" =~ ^2[2-9]\.[1-9] ]]; then

mkdir -p /var/lib/clickhouse/access
chown clickhouse:clickhouse /var/lib/clickhouse/access

cat <<EOT > /etc/clickhouse-server/config.d/replicated_user_directories.xml
<yandex>
<user_directories replace="replace">
Expand Down
19 changes: 13 additions & 6 deletions test/integration/integration_test.go
Expand Up @@ -1600,7 +1600,7 @@ func TestFIPS(t *testing.T) {
installDebIfNotExists(r, "clickhouse", "ca-certificates", "curl", "gettext-base", "bsdmainutils", "dnsutils", "git")
r.NoError(dockerExec("clickhouse", "update-ca-certificates"))
r.NoError(dockerCP("config-s3-fips.yml", "clickhouse:/etc/clickhouse-backup/config.yml.fips-template"))
r.NoError(dockerExec("clickhouse", "git", "clone", "--depth", "1", "https://github.com/drwetter/testssl.sh.git", "/opt/testssl"))
r.NoError(dockerExec("clickhouse", "git", "clone", "--depth", "1", "--branch", "v3.2rc3", "https://github.com/drwetter/testssl.sh.git", "/opt/testssl"))
r.NoError(dockerExec("clickhouse", "chmod", "+x", "/opt/testssl/testssl.sh"))

generateCerts := func(certType, keyLength, curveType string) {
Expand Down Expand Up @@ -1659,7 +1659,7 @@ func TestFIPS(t *testing.T) {

r.NoError(dockerExec("clickhouse", "bash", "-ce", "rm -rf /tmp/testssl* && /opt/testssl/testssl.sh -e -s -oC /tmp/testssl.csv --color 0 --disable-rating --quiet -n min --mode parallel --add-ca /etc/clickhouse-backup/ca-cert.pem localhost:7172"))
r.NoError(dockerExec("clickhouse", "cat", "/tmp/testssl.csv"))
out, err := dockerExecOut("clickhouse", "bash", "-ce", fmt.Sprintf("grep -o -E '%s' /tmp/testssl.csv | wc -l", strings.Join(cipherList, "|")))
out, err := dockerExecOut("clickhouse", "bash", "-ce", fmt.Sprintf("grep -o -E '%s' /tmp/testssl.csv | uniq | wc -l", strings.Join(cipherList, "|")))
r.NoError(err)
r.Equal(strconv.Itoa(len(cipherList)), strings.Trim(out, " \t\r\n"))

Expand Down Expand Up @@ -1720,6 +1720,15 @@ func TestIntegrationGCS(t *testing.T) {
runMainIntegrationScenario(t, "GCS", "config-gcs.yml")
}

func TestIntegrationGCSWithCustomEndpoint(t *testing.T) {
if isTestShouldSkip("GCS_TESTS") {
t.Skip("Skipping GCS_EMULATOR integration tests...")
return
}
//t.Parallel()
runMainIntegrationScenario(t, "GCS_EMULATOR", "config-gcs-custom-endpoint.yml")
}

func TestIntegrationSFTPAuthPassword(t *testing.T) {
//t.Parallel()
runMainIntegrationScenario(t, "SFTP", "config-sftp-auth-password.yaml")
Expand Down Expand Up @@ -2110,7 +2119,6 @@ func checkObjectStorageIsEmpty(t *testing.T, r *require.Assertions, remoteStorag
}
if remoteStorageType == "SFTP" {
checkRemoteDir("total 0", "sshd", "bash", "-c", "ls -lh /root/")

}
if remoteStorageType == "FTP" {
if strings.Contains(os.Getenv("COMPOSE_FILE"), "advanced") {
Expand All @@ -2119,9 +2127,8 @@ func checkObjectStorageIsEmpty(t *testing.T, r *require.Assertions, remoteStorag
checkRemoteDir("total 0", "ftp", "bash", "-c", "ls -lh /home/vsftpd/test_backup/backup/")
}
}
//todo check gcs backup is empty
if remoteStorageType == "GCS" {

if remoteStorageType == "GCS_EMULATOR" {
checkRemoteDir("total 0", "gcs", "sh", "-c", "ls -lh /data/altinity-qa-test/")
}
}

Expand Down
Empty file.

0 comments on commit 0e16d24

Please sign in to comment.