Closed 1Const1 closed 10 months ago
could you share
kubectl exec -n <your-namespace> <chi-pod-name-0-0-0> -c clickhouse-backup -- clickhouse-backup print-config
?
config
```yaml
general:
remote_storage: s3
max_file_size: 0
disable_progress_bar: true
backups_to_keep_local: 0
backups_to_keep_remote: 10
log_level: debug
allow_empty_backups: true
download_concurrency: 4
upload_concurrency: 2
use_resumable_state: true
restore_schema_on_cluster: ""
upload_by_part: true
download_by_part: true
restore_database_mapping: {}
retries_on_failure: 3
retries_pause: 30s
watch_interval: 1h
full_interval: 24h
watch_backup_name_template: shard{shard}-{type}-{time:20060102150405}
sharded_operation_mode: ""
cpu_nice_priority: 15
io_nice_priority: idle
retriesduration: 30s
watchduration: 1h0m0s
fullduration: 24h0m0s
clickhouse:
username: default
password: ""
host: localhost
port: 9000
disk_mapping: {}
skip_tables:
- system.*
- INFORMATION_SCHEMA.*
- information_schema.*
- _temporary_and_external_tables.*
skip_table_engines: []
timeout: 5m
freeze_by_part: false
freeze_by_part_where: ""
use_embedded_backup_restore: false
embedded_backup_disk: ""
backup_mutations: true
restore_as_attach: false
check_parts_columns: true
secure: false
skip_verify: false
sync_replicated_tables: false
log_sql_queries: true
config_dir: /etc/clickhouse-server/
restart_command: exec:systemctl restart clickhouse-server
ignore_not_exists_error_during_freeze: true
check_replicas_before_attach: true
tls_key: ""
tls_cert: ""
tls_ca: ""
debug: false
s3:
access_key: ""
secret_key: secret
bucket: clickhouse-backup
endpoint: http://minio:9000
region: ""
acl: ""
assume_role_arn: ""
force_path_style: true
path: backup/shard-{shard}
object_disk_path: ""
disable_ssl: true
compression_level: 1
compression_format: tar
sse: ""
sse_kms_key_id: ""
sse_customer_algorithm: ""
sse_customer_key: ""
sse_customer_key_md5: ""
sse_kms_encryption_context: ""
disable_cert_verification: false
use_custom_storage_class: false
storage_class: STANDARD
custom_storage_class_map: {}
concurrency: 5
part_size: 0
max_parts_count: 5000
allow_multipart_download: false
object_labels: {}
request_payer: ""
debug: true
gcs:
credentials_file: ""
credentials_json: ""
credentials_json_encoded: ""
bucket: ""
path: ""
object_disk_path: ""
compression_level: 1
compression_format: tar
debug: false
force_http: false
endpoint: ""
storage_class: STANDARD
object_labels: {}
custom_storage_class_map: {}
client_pool_size: 12
cos:
url: ""
timeout: 2m
secret_id: ""
secret_key: ""
path: ""
compression_format: tar
compression_level: 1
debug: false
api:
listen: 0.0.0.0:7171
enable_metrics: true
enable_pprof: false
username: ""
password: ""
secure: false
certificate_file: ""
private_key_file: ""
ca_cert_file: ""
ca_key_file: ""
create_integration_tables: true
integration_tables_host: ""
allow_parallel: false
complete_resumable_after_restart: true
ftp:
address: ""
timeout: 2m
username: ""
password: ""
tls: false
skip_tls_verify: false
path: ""
object_disk_path: ""
compression_format: tar
compression_level: 1
concurrency: 5
debug: false
sftp:
address: ""
port: 22
username: ""
password: ""
key: ""
path: ""
object_disk_path: ""
compression_format: tar
compression_level: 1
concurrency: 5
debug: false
azblob:
endpoint_schema: https
endpoint_suffix: core.windows.net
account_name: ""
account_key: ""
sas: ""
use_managed_identity: false
container: ""
path: ""
object_disk_path: ""
compression_level: 1
compression_format: tar
sse_key: ""
buffer_size: 0
buffer_count: 3
max_parts_count: 5000
timeout: 15m
custom:
upload_command: ""
download_command: ""
list_command: ""
delete_command: ""
command_timeout: 4h
commandtimeoutduration: 4h0m0s
```
thank you for fast reply
i see that env S3_ACCESS_KEY
not working correctly but i don't know why
Original issue was resolved with env fix. Thank you
But another error comes
config
```yaml
general:
remote_storage: s3
max_file_size: 0
disable_progress_bar: true
backups_to_keep_local: 0
backups_to_keep_remote: 10
log_level: debug
allow_empty_backups: true
download_concurrency: 4
upload_concurrency: 2
use_resumable_state: true
restore_schema_on_cluster: ""
upload_by_part: true
download_by_part: true
restore_database_mapping: {}
retries_on_failure: 3
retries_pause: 30s
watch_interval: 1h
full_interval: 24h
watch_backup_name_template: shard{shard}-{type}-{time:20060102150405}
sharded_operation_mode: ""
cpu_nice_priority: 15
io_nice_priority: idle
retriesduration: 30s
watchduration: 1h0m0s
fullduration: 24h0m0s
clickhouse:
username: default
password: ""
host: localhost
port: 9000
disk_mapping: {}
skip_tables:
- system.*
- INFORMATION_SCHEMA.*
- information_schema.*
- _temporary_and_external_tables.*
skip_table_engines: []
timeout: 5m
freeze_by_part: false
freeze_by_part_where: ""
use_embedded_backup_restore: false
embedded_backup_disk: ""
backup_mutations: true
restore_as_attach: false
check_parts_columns: true
secure: false
skip_verify: false
sync_replicated_tables: false
log_sql_queries: true
config_dir: /etc/clickhouse-server/
restart_command: exec:systemctl restart clickhouse-server
ignore_not_exists_error_during_freeze: true
check_replicas_before_attach: true
tls_key: ""
tls_cert: ""
tls_ca: ""
debug: false
s3:
access_key: access
secret_key: secret
bucket: clickhouse-backup
endpoint: https://minio:9443
region: ""
acl: ""
assume_role_arn: ""
force_path_style: true
path: backup/shard-{shard}
object_disk_path: ""
disable_ssl: false
compression_level: 1
compression_format: tar
sse: ""
sse_kms_key_id: ""
sse_customer_algorithm: ""
sse_customer_key: ""
sse_customer_key_md5: ""
sse_kms_encryption_context: ""
disable_cert_verification: false
use_custom_storage_class: false
storage_class: STANDARD
custom_storage_class_map: {}
concurrency: 5
part_size: 0
max_parts_count: 5000
allow_multipart_download: false
object_labels: {}
request_payer: ""
debug: true
gcs:
credentials_file: ""
credentials_json: ""
credentials_json_encoded: ""
bucket: ""
path: ""
object_disk_path: ""
compression_level: 1
compression_format: tar
debug: false
force_http: false
endpoint: ""
storage_class: STANDARD
object_labels: {}
custom_storage_class_map: {}
client_pool_size: 12
cos:
url: ""
timeout: 2m
secret_id: ""
secret_key: ""
path: ""
compression_format: tar
compression_level: 1
debug: false
api:
listen: 0.0.0.0:7171
enable_metrics: true
enable_pprof: false
username: ""
password: ""
secure: false
certificate_file: ""
private_key_file: ""
ca_cert_file: ""
ca_key_file: ""
create_integration_tables: true
integration_tables_host: ""
allow_parallel: false
complete_resumable_after_restart: true
ftp:
address: ""
timeout: 2m
username: ""
password: ""
tls: false
skip_tls_verify: false
path: ""
object_disk_path: ""
compression_format: tar
compression_level: 1
concurrency: 5
debug: false
sftp:
address: ""
port: 22
username: ""
password: ""
key: ""
path: ""
object_disk_path: ""
compression_format: tar
compression_level: 1
concurrency: 5
debug: false
azblob:
endpoint_schema: https
endpoint_suffix: core.windows.net
account_name: ""
account_key: ""
sas: ""
use_managed_identity: false
container: ""
path: ""
object_disk_path: ""
compression_level: 1
compression_format: tar
sse_key: ""
buffer_size: 0
buffer_count: 3
max_parts_count: 5000
timeout: 15m
custom:
upload_command: ""
download_command: ""
list_command: ""
delete_command: ""
command_timeout: 4h
commandtimeoutduration: 4h0m0s
```
backup log pod trying using https or http without any luck
log tls
[2023/12/06 13:58:18.555165 info [s3:DEBUG] Request
HEAD /clickhouse-backup/backup/shard-0/clickhouse.demo.svc-full-2023-12-06-13-56-48/shadow/az_stocks/Empty_table/default_all_42_42_0.tar HTTP/1.1
Host: minio:9443
User-Agent: aws-sdk-go-v2/1.23.5 os/linux lang/go#1.21.4 md/GOOS#linux md/GOARCH#amd64 api/s3#1.47.2
Accept-Encoding: identity
Amz-Sdk-Invocation-Id: 140eebfa-862c-4c7d-b036-083ea8a82cf7
Amz-Sdk-Request: attempt=1; max=3
Authorization: AWS4-HMAC-SHA256 Credential=0cae5557-77e0-47ce-a31b-022095b1ad6c/20231206//s3/aws4_request, SignedHeaders=accept-encoding;amz-sdk-invocation-id;host;x-amz-content-sha256;x-amz-date, Signature=8368690fb3e9ac4883450080a71b2d1caefc17e225a8733c301aa5e0103d7620
X-Amz-Content-Sha256: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855
X-Amz-Date: 20231206T135818Z
2023/12/06 13:58:18.560776 info [s3:DEBUG] Response
HTTP/1.0 400 Bad Request
Connection: close](tls: failed to verify certificate: x509: certificate is valid for minio)
how to add to client trusted tls certificate or disable tls verification? or how to add trusted certificate via ENV in that case?
minio api did't working without tls on new versions minio on port 9000
without tls i getting 400 error
log no tls
2023/12/06 14:14:10.391687 info [s3:DEBUG] request failed with unretryable error https response error StatusCode: 400, RequestID: , HostID: , api error BadRequest: Bad Request
ok i found in source code ENV S3_DISABLE_CERT_VERIFICATION
And it's worked for me
ok. please confirm S3_DISABLE_CERT_VERIFICATION
works for you
Hello
Try to use backup to s3 minio in kubernetes cluster
Getting this error:
This is s3 minio in kubernetes cluster - should not use 169.254.169.254 to request any data from there or EC2 role
Give please some recommendations how to solve this on MINIO S3