-
Notifications
You must be signed in to change notification settings - Fork 462
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
clickhouse installation: statefulset pod stuck in CrashLoopBackOff
w/ log filesystem error: in weakly_canonical:
#1509
Comments
could you share |
@Slach sure thing. kubectl get chi -n clickhouse clickhouse-installation -o yaml
apiVersion: clickhouse.altinity.com/v1
kind: ClickHouseInstallation
metadata:
annotations:
kubectl.kubernetes.io/last-applied-configuration: |
{"apiVersion":"clickhouse.altinity.com/v1","kind":"ClickHouseInstallation","metadata":{"annotations":{},"name":"clickhouse-installation","namespace":"clickhouse"},"spec":{"configuration":{"clusters":[{"layout":{"replicasCount":3,"shardsCount":3},"name":"simple","templates":{"podTemplate":"clickhouse:24.3.7","serviceTemplate":"clickhouse:24.3.7"}}],"files":{"config.d/disks.xml":"\u003cclickhouse\u003e\n \u003cstorage_configuration\u003e\n \u003cdisks\u003e\n \u003cs3_disk\u003e\n \u003ctype\u003es3\u003c/type\u003e\n \u003cendpoint\u003ehttps://company-clickhouse-env.s3.amazonaws.com/tables/\u003c/endpoint\u003e\n \u003cuse_environment_credentials\u003etrue\u003c/use_environment_credentials\u003e\n \u003cmetadata_path\u003e/var/lib/clickhouse/disks/s3_disk/\u003c/metadata_path\u003e\n \u003c/s3_disk\u003e\n \u003cs3_cache\u003e\n \u003ctype\u003ecache\u003c/type\u003e\n \u003cdisk\u003es3_disk\u003c/disk\u003e\n \u003cpath\u003e/var/lib/clickhouse/disks/s3_cache/\u003c/path\u003e\n \u003cmax_size\u003e10Gi\u003c/max_size\u003e\n \u003c/s3_cache\u003e\n \u003c/disks\u003e\n \u003cpolicies\u003e\n \u003cs3_main\u003e\n \u003cvolumes\u003e\n \u003cmain\u003e\n \u003cdisk\u003es3_disk\u003c/disk\u003e\n \u003c/main\u003e\n \u003c/volumes\u003e\n \u003c/s3_main\u003e\n \u003c/policies\u003e\n \u003c/storage_configuration\u003e\n\u003c/clickhouse\u003e\n","config.d/s3.xml":"\u003cclickhouse\u003e\n \u003cs3\u003e\n \u003cuse_environment_credentials\u003etrue\u003c/use_environment_credentials\u003e\n \u003c/s3\u003e\n\u003c/clickhouse\u003e\n"},"users":{"username/networks/ip":["0.0.0.0/0"],"username/password_sha256_hex":"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"}},"defaults":{"templates":{"dataVolumeClaimTemplate":"data-volume-template","logVolumeClaimTemplate":"log-volume-template","serviceTemplate":"clickhouse:24.3.7"}},"templates":{"podTemplates":[{"name":"clickhouse:24.3.7","spec":{"containers":[{"env":[{"name":"CLICKHOUSE_ALWAYS_RUN_INITDB_SCRIPTS","value":"true"}],"image":"clickhouse/clickhouse-server:24.3.7","name":"clickhouse","volumeMounts":[{"mountPath":"/var/lib/clickhouse","name":"data-volume-template"},{"mountPath":"/var/log/clickhouse-server","name":"log-volume-template"},{"mountPath":"/docker-entrypoint-initdb.d","name":"bootstrap-configmap-volume"}]}],"nodeSelector":{"clickhouse-installation":"true"},"tolerations":[{"effect":"NoSchedule","key":"installation","operator":"Equal","value":"clickhouse-installation"}],"volumes":[{"configMap":{"name":"bootstrap-configmap"},"name":"bootstrap-configmap-volume"}]}}],"serviceTemplates":[{"metadata":{"annotations":{"external-dns.alpha.kubernetes.io/internal-hostname":"clickhouse.company.us-west-2.env.company.cloud","external-dns.alpha.kubernetes.io/ttl":"60"}},"name":"clickhouse:24.3.7","spec":{"ports":[{"name":"http","port":8123},{"name":"client","port":9000}]}}],"volumeClaimTemplates":[{"name":"data-volume-template","spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"100Gi"}}}},{"name":"log-volume-template","spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"4Gi"}}}}]}}}
creationTimestamp: "2024-08-21T13:49:55Z"
finalizers:
- finalizer.clickhouseinstallation.altinity.com
generation: 1
name: clickhouse-installation
namespace: clickhouse
resourceVersion: "415622675"
uid: 75211649-036d-4cb3-a535-d9a4bab082c0
spec:
configuration:
clusters:
- layout:
replicasCount: 3
shardsCount: 3
name: simple
templates:
podTemplate: clickhouse:24.3.7
serviceTemplate: clickhouse:24.3.7
files:
config.d/disks.xml: |
<clickhouse>
<storage_configuration>
<disks>
<s3_disk>
<type>s3</type>
<endpoint>https://company-clickhouse-env.s3.amazonaws.com/tables/</endpoint>
<use_environment_credentials>true</use_environment_credentials>
<metadata_path>/var/lib/clickhouse/disks/s3_disk/</metadata_path>
</s3_disk>
<s3_cache>
<type>cache</type>
<disk>s3_disk</disk>
<path>/var/lib/clickhouse/disks/s3_cache/</path>
<max_size>10Gi</max_size>
</s3_cache>
</disks>
<policies>
<s3_main>
<volumes>
<main>
<disk>s3_disk</disk>
</main>
</volumes>
</s3_main>
</policies>
</storage_configuration>
</clickhouse>
config.d/s3.xml: |
<clickhouse>
<s3>
<use_environment_credentials>true</use_environment_credentials>
</s3>
</clickhouse>
users:
username/networks/ip:
- 0.0.0.0/0
username/password_sha256_hex: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
defaults:
templates:
dataVolumeClaimTemplate: data-volume-template
logVolumeClaimTemplate: log-volume-template
serviceTemplate: clickhouse:24.3.7
templates:
podTemplates:
- name: clickhouse:24.3.7
spec:
containers:
- env:
- name: CLICKHOUSE_ALWAYS_RUN_INITDB_SCRIPTS
value: "true"
image: clickhouse/clickhouse-server:24.3.7
name: clickhouse
volumeMounts:
- mountPath: /var/lib/clickhouse
name: data-volume-template
- mountPath: /var/log/clickhouse-server
name: log-volume-template
- mountPath: /docker-entrypoint-initdb.d
name: bootstrap-configmap-volume
nodeSelector:
clickhouse-installation: "true"
tolerations:
- effect: NoSchedule
key: installation
operator: Equal
value: clickhouse-installation
volumes:
- configMap:
name: bootstrap-configmap
name: bootstrap-configmap-volume
serviceTemplates:
- metadata:
annotations:
external-dns.alpha.kubernetes.io/internal-hostname: clickhouse.company.us-west-2.env.company.cloud
external-dns.alpha.kubernetes.io/ttl: "60"
name: clickhouse:24.3.7
spec:
ports:
- name: http
port: 8123
- name: client
port: 9000
volumeClaimTemplates:
- name: data-volume-template
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 100Gi
- name: log-volume-template
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 4Gi
status:
action: Update ConfigMap clickhouse/chi-clickhouse-installation-common-usersd
actions:
- '2024-09-13T12:35:44.391764656Z reconcile completed successfully, task id: 520089d3-fa7e-4d8d-932c-2f5dafc1b21b'
- '2024-09-13T12:07:33.838481812Z reconcile started, task id: 520089d3-fa7e-4d8d-932c-2f5dafc1b21b'
- '2024-09-13T03:45:34.579600835Z reconcile completed successfully, task id: f5a11dd9-268b-4c9a-ab70-9c9a92de40b8'
- '2024-09-13T03:31:05.141469604Z reconcile started, task id: f5a11dd9-268b-4c9a-ab70-9c9a92de40b8'
- '2024-09-12T12:37:54.81024906Z reconcile completed successfully, task id: 49d76b1f-d2ff-43f3-b016-355f021bc573'
- '2024-09-12T12:21:41.717022529Z reconcile started, task id: 49d76b1f-d2ff-43f3-b016-355f021bc573'
- '2024-09-12T12:07:27.264047468Z reconcile started, task id: 2acada9a-e3ba-4234-ac65-94d16d402ba3'
- '2024-09-12T10:47:40.524694896Z reconcile completed successfully, task id: 379fea7c-8cb8-4757-ab0c-ba130b30ea04'
- '2024-09-12T09:44:30.147849804Z reconcile started, task id: 379fea7c-8cb8-4757-ab0c-ba130b30ea04'
- '2024-09-12T02:09:56.90045236Z reconcile completed successfully, task id: 28e6faf5-a626-4822-878f-ebda6950bf31'
chop-commit: 2dd9eca
chop-date: 2024-08-12T14:15:16
chop-ip: 10.132.130.39
chop-version: 0.23.7
clusters: 1
endpoint: clickhouse-clickhouse-installation.clickhouse.svc.cluster.local
errors:
- '2024-09-13T13:04:45.448738646Z FAILED to reconcile ConfigMap: chi-clickhouse-installation-common-usersd
CHI: clickhouse-installation '
- '2024-09-13T13:04:43.452622218Z Update ConfigMap clickhouse/chi-clickhouse-installation-common-usersd
failed with error client rate limiter Wait returned an error: context canceled'
- '2024-09-13T12:44:43.6014188Z FAILED to reconcile ConfigMap: chi-clickhouse-installation-common-usersd
CHI: clickhouse-installation '
- '2024-09-13T12:44:41.60058812Z Update ConfigMap clickhouse/chi-clickhouse-installation-common-usersd
failed with error client rate limiter Wait returned an error: context canceled'
- '2024-09-13T12:18:01.035018186Z Update Service: clickhouse/clickhouse-clickhouse-installation
failed with error: just recreate the service in case of service type change ''ClusterIP''=>'''''
- '2024-09-13T09:34:58.265686457Z FAILED to reconcile ConfigMap: chi-clickhouse-installation-common-usersd
CHI: clickhouse-installation '
- '2024-09-13T09:34:56.26912994Z Update ConfigMap clickhouse/chi-clickhouse-installation-common-usersd
failed with error client rate limiter Wait returned an error: context canceled'
- '2024-09-13T09:34:52.468149832Z FAILED to reconcile ConfigMap: chi-clickhouse-installation-common-usersd
CHI: clickhouse-installation '
- '2024-09-13T09:34:50.470358556Z Update ConfigMap clickhouse/chi-clickhouse-installation-common-usersd
failed with error client rate limiter Wait returned an error: context canceled'
- '2024-09-13T04:36:35.263627446Z FAILED to reconcile ConfigMap: chi-clickhouse-installation-common-usersd
CHI: clickhouse-installation '
- '2024-09-13T04:36:33.263382059Z Update ConfigMap clickhouse/chi-clickhouse-installation-common-usersd
failed with error client rate limiter Wait returned an error: context canceled'
- '2024-09-13T03:32:27.328779347Z Update Service: clickhouse/clickhouse-clickhouse-installation
failed with error: just recreate the service in case of service type change ''ClusterIP''=>'''''
- '2024-09-12T14:03:21.848868176Z FAILED to reconcile ConfigMap: chi-clickhouse-installation-common-usersd
CHI: clickhouse-installation '
- '2024-09-12T14:03:19.848689143Z Update ConfigMap clickhouse/chi-clickhouse-installation-common-usersd
failed with error client rate limiter Wait returned an error: context canceled'
fqdns:
- chi-clickhouse-installation-simple-0-0.clickhouse.svc.cluster.local
- chi-clickhouse-installation-simple-0-1.clickhouse.svc.cluster.local
- chi-clickhouse-installation-simple-0-2.clickhouse.svc.cluster.local
- chi-clickhouse-installation-simple-1-0.clickhouse.svc.cluster.local
- chi-clickhouse-installation-simple-1-1.clickhouse.svc.cluster.local
- chi-clickhouse-installation-simple-1-2.clickhouse.svc.cluster.local
- chi-clickhouse-installation-simple-2-0.clickhouse.svc.cluster.local
- chi-clickhouse-installation-simple-2-1.clickhouse.svc.cluster.local
- chi-clickhouse-installation-simple-2-2.clickhouse.svc.cluster.local
hosts: 9
hostsWithTablesCreated:
- chi-clickhouse-installation-simple-0-0.clickhouse.svc.cluster.local
- chi-clickhouse-installation-simple-0-1.clickhouse.svc.cluster.local
- chi-clickhouse-installation-simple-0-2.clickhouse.svc.cluster.local
- chi-clickhouse-installation-simple-2-0.clickhouse.svc.cluster.local
- chi-clickhouse-installation-simple-1-0.clickhouse.svc.cluster.local
- chi-clickhouse-installation-simple-2-1.clickhouse.svc.cluster.local
- chi-clickhouse-installation-simple-1-1.clickhouse.svc.cluster.local
- chi-clickhouse-installation-simple-2-2.clickhouse.svc.cluster.local
- chi-clickhouse-installation-simple-1-2.clickhouse.svc.cluster.local
normalized:
apiVersion: clickhouse.altinity.com/v1
kind: ClickHouseInstallation
metadata:
creationTimestamp: "2024-08-21T13:49:55Z"
finalizers:
- finalizer.clickhouseinstallation.altinity.com
generation: 1
name: clickhouse-installation
namespace: clickhouse
resourceVersion: "415622632"
uid: 75211649-036d-4cb3-a535-d9a4bab082c0
spec:
configuration:
clusters:
- layout:
replicas:
- name: "0"
shards:
- httpPort: 8123
interserverHTTPPort: 9009
name: 0-0
tcpPort: 9000
templates:
dataVolumeClaimTemplate: data-volume-template
logVolumeClaimTemplate: log-volume-template
podTemplate: clickhouse:24.3.7
serviceTemplate: clickhouse:24.3.7
- httpPort: 8123
interserverHTTPPort: 9009
name: 1-0
tcpPort: 9000
templates:
dataVolumeClaimTemplate: data-volume-template
logVolumeClaimTemplate: log-volume-template
podTemplate: clickhouse:24.3.7
serviceTemplate: clickhouse:24.3.7
- httpPort: 8123
interserverHTTPPort: 9009
name: 2-0
tcpPort: 9000
templates:
dataVolumeClaimTemplate: data-volume-template
logVolumeClaimTemplate: log-volume-template
podTemplate: clickhouse:24.3.7
serviceTemplate: clickhouse:24.3.7
shardsCount: 3
templates:
dataVolumeClaimTemplate: data-volume-template
logVolumeClaimTemplate: log-volume-template
podTemplate: clickhouse:24.3.7
serviceTemplate: clickhouse:24.3.7
- name: "1"
shards:
- httpPort: 8123
interserverHTTPPort: 9009
name: 0-1
tcpPort: 9000
templates:
dataVolumeClaimTemplate: data-volume-template
logVolumeClaimTemplate: log-volume-template
podTemplate: clickhouse:24.3.7
serviceTemplate: clickhouse:24.3.7
- httpPort: 8123
interserverHTTPPort: 9009
name: 1-1
tcpPort: 9000
templates:
dataVolumeClaimTemplate: data-volume-template
logVolumeClaimTemplate: log-volume-template
podTemplate: clickhouse:24.3.7
serviceTemplate: clickhouse:24.3.7
- httpPort: 8123
interserverHTTPPort: 9009
name: 2-1
tcpPort: 9000
templates:
dataVolumeClaimTemplate: data-volume-template
logVolumeClaimTemplate: log-volume-template
podTemplate: clickhouse:24.3.7
serviceTemplate: clickhouse:24.3.7
shardsCount: 3
templates:
dataVolumeClaimTemplate: data-volume-template
logVolumeClaimTemplate: log-volume-template
podTemplate: clickhouse:24.3.7
serviceTemplate: clickhouse:24.3.7
- name: "2"
shards:
- httpPort: 8123
interserverHTTPPort: 9009
name: 0-2
tcpPort: 9000
templates:
dataVolumeClaimTemplate: data-volume-template
logVolumeClaimTemplate: log-volume-template
podTemplate: clickhouse:24.3.7
serviceTemplate: clickhouse:24.3.7
- httpPort: 8123
interserverHTTPPort: 9009
name: 1-2
tcpPort: 9000
templates:
dataVolumeClaimTemplate: data-volume-template
logVolumeClaimTemplate: log-volume-template
podTemplate: clickhouse:24.3.7
serviceTemplate: clickhouse:24.3.7
- httpPort: 8123
interserverHTTPPort: 9009
name: 2-2
tcpPort: 9000
templates:
dataVolumeClaimTemplate: data-volume-template
logVolumeClaimTemplate: log-volume-template
podTemplate: clickhouse:24.3.7
serviceTemplate: clickhouse:24.3.7
shardsCount: 3
templates:
dataVolumeClaimTemplate: data-volume-template
logVolumeClaimTemplate: log-volume-template
podTemplate: clickhouse:24.3.7
serviceTemplate: clickhouse:24.3.7
replicasCount: 3
shards:
- internalReplication: "True"
name: "0"
replicas:
- httpPort: 8123
interserverHTTPPort: 9009
name: 0-0
tcpPort: 9000
templates:
dataVolumeClaimTemplate: data-volume-template
logVolumeClaimTemplate: log-volume-template
podTemplate: clickhouse:24.3.7
serviceTemplate: clickhouse:24.3.7
- httpPort: 8123
interserverHTTPPort: 9009
name: 0-1
tcpPort: 9000
templates:
dataVolumeClaimTemplate: data-volume-template
logVolumeClaimTemplate: log-volume-template
podTemplate: clickhouse:24.3.7
serviceTemplate: clickhouse:24.3.7
- httpPort: 8123
interserverHTTPPort: 9009
name: 0-2
tcpPort: 9000
templates:
dataVolumeClaimTemplate: data-volume-template
logVolumeClaimTemplate: log-volume-template
podTemplate: clickhouse:24.3.7
serviceTemplate: clickhouse:24.3.7
replicasCount: 3
templates:
dataVolumeClaimTemplate: data-volume-template
logVolumeClaimTemplate: log-volume-template
podTemplate: clickhouse:24.3.7
serviceTemplate: clickhouse:24.3.7
- internalReplication: "True"
name: "1"
replicas:
- httpPort: 8123
interserverHTTPPort: 9009
name: 1-0
tcpPort: 9000
templates:
dataVolumeClaimTemplate: data-volume-template
logVolumeClaimTemplate: log-volume-template
podTemplate: clickhouse:24.3.7
serviceTemplate: clickhouse:24.3.7
- httpPort: 8123
interserverHTTPPort: 9009
name: 1-1
tcpPort: 9000
templates:
dataVolumeClaimTemplate: data-volume-template
logVolumeClaimTemplate: log-volume-template
podTemplate: clickhouse:24.3.7
serviceTemplate: clickhouse:24.3.7
- httpPort: 8123
interserverHTTPPort: 9009
name: 1-2
tcpPort: 9000
templates:
dataVolumeClaimTemplate: data-volume-template
logVolumeClaimTemplate: log-volume-template
podTemplate: clickhouse:24.3.7
serviceTemplate: clickhouse:24.3.7
replicasCount: 3
templates:
dataVolumeClaimTemplate: data-volume-template
logVolumeClaimTemplate: log-volume-template
podTemplate: clickhouse:24.3.7
serviceTemplate: clickhouse:24.3.7
- internalReplication: "True"
name: "2"
replicas:
- httpPort: 8123
interserverHTTPPort: 9009
name: 2-0
tcpPort: 9000
templates:
dataVolumeClaimTemplate: data-volume-template
logVolumeClaimTemplate: log-volume-template
podTemplate: clickhouse:24.3.7
serviceTemplate: clickhouse:24.3.7
- httpPort: 8123
interserverHTTPPort: 9009
name: 2-1
tcpPort: 9000
templates:
dataVolumeClaimTemplate: data-volume-template
logVolumeClaimTemplate: log-volume-template
podTemplate: clickhouse:24.3.7
serviceTemplate: clickhouse:24.3.7
- httpPort: 8123
interserverHTTPPort: 9009
name: 2-2
tcpPort: 9000
templates:
dataVolumeClaimTemplate: data-volume-template
logVolumeClaimTemplate: log-volume-template
podTemplate: clickhouse:24.3.7
serviceTemplate: clickhouse:24.3.7
replicasCount: 3
templates:
dataVolumeClaimTemplate: data-volume-template
logVolumeClaimTemplate: log-volume-template
podTemplate: clickhouse:24.3.7
serviceTemplate: clickhouse:24.3.7
shardsCount: 3
name: simple
schemaPolicy:
replica: All
shard: All
templates:
dataVolumeClaimTemplate: data-volume-template
logVolumeClaimTemplate: log-volume-template
podTemplate: clickhouse:24.3.7
serviceTemplate: clickhouse:24.3.7
files:
config.d/disks.xml: |
<clickhouse>
<storage_configuration>
<disks>
<s3_disk>
<type>s3</type>
<endpoint>https://company-clickhouse-env.s3.amazonaws.com/tables/</endpoint>
<use_environment_credentials>true</use_environment_credentials>
<metadata_path>/var/lib/clickhouse/disks/s3_disk/</metadata_path>
</s3_disk>
<s3_cache>
<type>cache</type>
<disk>s3_disk</disk>
<path>/var/lib/clickhouse/disks/s3_cache/</path>
<max_size>10Gi</max_size>
</s3_cache>
</disks>
<policies>
<s3_main>
<volumes>
<main>
<disk>s3_disk</disk>
</main>
</volumes>
</s3_main>
</policies>
</storage_configuration>
</clickhouse>
config.d/s3.xml: |
<clickhouse>
<s3>
<use_environment_credentials>true</use_environment_credentials>
</s3>
</clickhouse>
users:
clickhouse_operator/networks/ip:
- 10.132.130.39
clickhouse_operator/password_sha256_hex: 716b36073a90c6fe1d445ac1af85f4777c5b7a155cea359961826a030513e448
clickhouse_operator/profile: clickhouse_operator
default/networks/host_regexp: (chi-clickhouse-installation-[^.]+\d+-\d+|clickhouse\-clickhouse-installation)\.clickhouse\.svc\.cluster\.local$
default/networks/ip:
- ::1
- 127.0.0.1
- 10.132.130.206
- 10.132.130.78
- 10.132.130.47
- 10.132.130.108
- 10.132.130.241
- 10.132.130.26
- 10.132.130.249
- 10.132.130.231
- 10.132.130.150
default/profile: default
default/quota: default
username/networks/host_regexp: (chi-clickhouse-installation-[^.]+\d+-\d+|clickhouse\-clickhouse-installation)\.clickhouse\.svc\.cluster\.local$
username/networks/ip:
- ::1
- 127.0.0.1
- 0.0.0.0/0
username/password_sha256_hex: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
username/profile: default
username/quota: default
defaults:
replicasUseFQDN: "False"
storageManagement: {}
templates:
dataVolumeClaimTemplate: data-volume-template
logVolumeClaimTemplate: log-volume-template
serviceTemplate: clickhouse:24.3.7
reconciling:
cleanup:
reconcileFailedObjects:
configMap: Retain
pvc: Retain
service: Retain
statefulSet: Retain
unknownObjects:
configMap: Delete
pvc: Delete
service: Delete
statefulSet: Delete
configMapPropagationTimeout: 10
policy: unspecified
stop: "False"
taskID: de831f6d-5d63-4370-be33-77dd5fac449e
templates:
PodTemplatesIndex: {}
ServiceTemplatesIndex: {}
VolumeClaimTemplatesIndex: {}
podTemplates:
- metadata:
creationTimestamp: null
name: clickhouse:24.3.7
spec:
containers:
- env:
- name: CLICKHOUSE_ALWAYS_RUN_INITDB_SCRIPTS
value: "true"
image: clickhouse/clickhouse-server:24.3.7
name: clickhouse
resources: {}
volumeMounts:
- mountPath: /var/lib/clickhouse
name: data-volume-template
- mountPath: /var/log/clickhouse-server
name: log-volume-template
- mountPath: /docker-entrypoint-initdb.d
name: bootstrap-configmap-volume
nodeSelector:
clickhouse-installation: "true"
tolerations:
- effect: NoSchedule
key: installation
operator: Equal
value: clickhouse-installation
volumes:
- configMap:
name: bootstrap-configmap
name: bootstrap-configmap-volume
zone: {}
serviceTemplates:
- metadata:
annotations:
external-dns.alpha.kubernetes.io/internal-hostname: clickhouse.company.us-west-2.env.company.cloud
external-dns.alpha.kubernetes.io/ttl: "60"
creationTimestamp: null
name: clickhouse:24.3.7
spec:
ports:
- name: http
port: 8123
targetPort: 0
- name: client
port: 9000
targetPort: 0
volumeClaimTemplates:
- metadata:
creationTimestamp: null
name: data-volume-template
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 100Gi
- metadata:
creationTimestamp: null
name: log-volume-template
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 4Gi
templating:
policy: manual
troubleshoot: "False"
normalizedCompleted:
apiVersion: clickhouse.altinity.com/v1
kind: ClickHouseInstallation
metadata:
creationTimestamp: "2024-08-21T13:49:55Z"
finalizers:
- finalizer.clickhouseinstallation.altinity.com
generation: 1
name: clickhouse-installation
namespace: clickhouse
resourceVersion: "415591205"
uid: 75211649-036d-4cb3-a535-d9a4bab082c0
spec:
configuration:
clusters:
- layout:
replicas:
- name: "0"
shards:
- httpPort: 8123
interserverHTTPPort: 9009
name: 0-0
tcpPort: 9000
templates:
dataVolumeClaimTemplate: data-volume-template
logVolumeClaimTemplate: log-volume-template
podTemplate: clickhouse:24.3.7
serviceTemplate: clickhouse:24.3.7
- httpPort: 8123
interserverHTTPPort: 9009
name: 1-0
tcpPort: 9000
templates:
dataVolumeClaimTemplate: data-volume-template
logVolumeClaimTemplate: log-volume-template
podTemplate: clickhouse:24.3.7
serviceTemplate: clickhouse:24.3.7
- httpPort: 8123
interserverHTTPPort: 9009
name: 2-0
tcpPort: 9000
templates:
dataVolumeClaimTemplate: data-volume-template
logVolumeClaimTemplate: log-volume-template
podTemplate: clickhouse:24.3.7
serviceTemplate: clickhouse:24.3.7
shardsCount: 3
templates:
dataVolumeClaimTemplate: data-volume-template
logVolumeClaimTemplate: log-volume-template
podTemplate: clickhouse:24.3.7
serviceTemplate: clickhouse:24.3.7
- name: "1"
shards:
- httpPort: 8123
interserverHTTPPort: 9009
name: 0-1
tcpPort: 9000
templates:
dataVolumeClaimTemplate: data-volume-template
logVolumeClaimTemplate: log-volume-template
podTemplate: clickhouse:24.3.7
serviceTemplate: clickhouse:24.3.7
- httpPort: 8123
interserverHTTPPort: 9009
name: 1-1
tcpPort: 9000
templates:
dataVolumeClaimTemplate: data-volume-template
logVolumeClaimTemplate: log-volume-template
podTemplate: clickhouse:24.3.7
serviceTemplate: clickhouse:24.3.7
- httpPort: 8123
interserverHTTPPort: 9009
name: 2-1
tcpPort: 9000
templates:
dataVolumeClaimTemplate: data-volume-template
logVolumeClaimTemplate: log-volume-template
podTemplate: clickhouse:24.3.7
serviceTemplate: clickhouse:24.3.7
shardsCount: 3
templates:
dataVolumeClaimTemplate: data-volume-template
logVolumeClaimTemplate: log-volume-template
podTemplate: clickhouse:24.3.7
serviceTemplate: clickhouse:24.3.7
- name: "2"
shards:
- httpPort: 8123
interserverHTTPPort: 9009
name: 0-2
tcpPort: 9000
templates:
dataVolumeClaimTemplate: data-volume-template
logVolumeClaimTemplate: log-volume-template
podTemplate: clickhouse:24.3.7
serviceTemplate: clickhouse:24.3.7
- httpPort: 8123
interserverHTTPPort: 9009
name: 1-2
tcpPort: 9000
templates:
dataVolumeClaimTemplate: data-volume-template
logVolumeClaimTemplate: log-volume-template
podTemplate: clickhouse:24.3.7
serviceTemplate: clickhouse:24.3.7
- httpPort: 8123
interserverHTTPPort: 9009
name: 2-2
tcpPort: 9000
templates:
dataVolumeClaimTemplate: data-volume-template
logVolumeClaimTemplate: log-volume-template
podTemplate: clickhouse:24.3.7
serviceTemplate: clickhouse:24.3.7
shardsCount: 3
templates:
dataVolumeClaimTemplate: data-volume-template
logVolumeClaimTemplate: log-volume-template
podTemplate: clickhouse:24.3.7
serviceTemplate: clickhouse:24.3.7
replicasCount: 3
shards:
- internalReplication: "True"
name: "0"
replicas:
- httpPort: 8123
interserverHTTPPort: 9009
name: 0-0
tcpPort: 9000
templates:
dataVolumeClaimTemplate: data-volume-template
logVolumeClaimTemplate: log-volume-template
podTemplate: clickhouse:24.3.7
serviceTemplate: clickhouse:24.3.7
- httpPort: 8123
interserverHTTPPort: 9009
name: 0-1
tcpPort: 9000
templates:
dataVolumeClaimTemplate: data-volume-template
logVolumeClaimTemplate: log-volume-template
podTemplate: clickhouse:24.3.7
serviceTemplate: clickhouse:24.3.7
- httpPort: 8123
interserverHTTPPort: 9009
name: 0-2
tcpPort: 9000
templates:
dataVolumeClaimTemplate: data-volume-template
logVolumeClaimTemplate: log-volume-template
podTemplate: clickhouse:24.3.7
serviceTemplate: clickhouse:24.3.7
replicasCount: 3
templates:
dataVolumeClaimTemplate: data-volume-template
logVolumeClaimTemplate: log-volume-template
podTemplate: clickhouse:24.3.7
serviceTemplate: clickhouse:24.3.7
- internalReplication: "True"
name: "1"
replicas:
- httpPort: 8123
interserverHTTPPort: 9009
name: 1-0
tcpPort: 9000
templates:
dataVolumeClaimTemplate: data-volume-template
logVolumeClaimTemplate: log-volume-template
podTemplate: clickhouse:24.3.7
serviceTemplate: clickhouse:24.3.7
- httpPort: 8123
interserverHTTPPort: 9009
name: 1-1
tcpPort: 9000
templates:
dataVolumeClaimTemplate: data-volume-template
logVolumeClaimTemplate: log-volume-template
podTemplate: clickhouse:24.3.7
serviceTemplate: clickhouse:24.3.7
- httpPort: 8123
interserverHTTPPort: 9009
name: 1-2
tcpPort: 9000
templates:
dataVolumeClaimTemplate: data-volume-template
logVolumeClaimTemplate: log-volume-template
podTemplate: clickhouse:24.3.7
serviceTemplate: clickhouse:24.3.7
replicasCount: 3
templates:
dataVolumeClaimTemplate: data-volume-template
logVolumeClaimTemplate: log-volume-template
podTemplate: clickhouse:24.3.7
serviceTemplate: clickhouse:24.3.7
- internalReplication: "True"
name: "2"
replicas:
- httpPort: 8123
interserverHTTPPort: 9009
name: 2-0
tcpPort: 9000
templates:
dataVolumeClaimTemplate: data-volume-template
logVolumeClaimTemplate: log-volume-template
podTemplate: clickhouse:24.3.7
serviceTemplate: clickhouse:24.3.7
- httpPort: 8123
interserverHTTPPort: 9009
name: 2-1
tcpPort: 9000
templates:
dataVolumeClaimTemplate: data-volume-template
logVolumeClaimTemplate: log-volume-template
podTemplate: clickhouse:24.3.7
serviceTemplate: clickhouse:24.3.7
- httpPort: 8123
interserverHTTPPort: 9009
name: 2-2
tcpPort: 9000
templates:
dataVolumeClaimTemplate: data-volume-template
logVolumeClaimTemplate: log-volume-template
podTemplate: clickhouse:24.3.7
serviceTemplate: clickhouse:24.3.7
replicasCount: 3
templates:
dataVolumeClaimTemplate: data-volume-template
logVolumeClaimTemplate: log-volume-template
podTemplate: clickhouse:24.3.7
serviceTemplate: clickhouse:24.3.7
shardsCount: 3
name: simple
schemaPolicy:
replica: All
shard: All
templates:
dataVolumeClaimTemplate: data-volume-template
logVolumeClaimTemplate: log-volume-template
podTemplate: clickhouse:24.3.7
serviceTemplate: clickhouse:24.3.7
files:
config.d/disks.xml: |
<clickhouse>
<storage_configuration>
<disks>
<s3_disk>
<type>s3</type>
<endpoint>https://company-clickhouse-env.s3.amazonaws.com/tables/</endpoint>
<use_environment_credentials>true</use_environment_credentials>
<metadata_path>/var/lib/clickhouse/disks/s3_disk/</metadata_path>
</s3_disk>
<s3_cache>
<type>cache</type>
<disk>s3_disk</disk>
<path>/var/lib/clickhouse/disks/s3_cache/</path>
<max_size>10Gi</max_size>
</s3_cache>
</disks>
<policies>
<s3_main>
<volumes>
<main>
<disk>s3_disk</disk>
</main>
</volumes>
</s3_main>
</policies>
</storage_configuration>
</clickhouse>
config.d/s3.xml: |
<clickhouse>
<s3>
<use_environment_credentials>true</use_environment_credentials>
</s3>
</clickhouse>
users:
clickhouse_operator/networks/ip:
- 10.132.130.39
clickhouse_operator/password_sha256_hex: 716b36073a90c6fe1d445ac1af85f4777c5b7a155cea359961826a030513e448
clickhouse_operator/profile: clickhouse_operator
default/networks/host_regexp: (chi-clickhouse-installation-[^.]+\d+-\d+|clickhouse\-clickhouse-installation)\.clickhouse\.svc\.cluster\.local$
default/networks/ip:
- ::1
- 127.0.0.1
- 10.132.130.22
- 10.132.130.78
- 10.132.130.230
- 10.132.130.128
- 10.132.130.139
- 10.132.130.231
- 10.132.130.150
- 10.132.130.108
- 10.132.130.47
default/profile: default
default/quota: default
username/networks/host_regexp: (chi-clickhouse-installation-[^.]+\d+-\d+|clickhouse\-clickhouse-installation)\.clickhouse\.svc\.cluster\.local$
username/networks/ip:
- ::1
- 127.0.0.1
- 0.0.0.0/0
username/password_sha256_hex: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
username/profile: default
username/quota: default
defaults:
replicasUseFQDN: "False"
storageManagement: {}
templates:
dataVolumeClaimTemplate: data-volume-template
logVolumeClaimTemplate: log-volume-template
serviceTemplate: clickhouse:24.3.7
reconciling:
cleanup:
reconcileFailedObjects:
configMap: Retain
pvc: Retain
service: Retain
statefulSet: Retain
unknownObjects:
configMap: Delete
pvc: Delete
service: Delete
statefulSet: Delete
configMapPropagationTimeout: 10
policy: unspecified
stop: "False"
taskID: 5e9a0378-fa89-4029-8dfe-fcffc4c37bae
templates:
PodTemplatesIndex: {}
ServiceTemplatesIndex: {}
VolumeClaimTemplatesIndex: {}
podTemplates:
- metadata:
creationTimestamp: null
name: clickhouse:24.3.7
spec:
containers:
- env:
- name: CLICKHOUSE_ALWAYS_RUN_INITDB_SCRIPTS
value: "true"
image: clickhouse/clickhouse-server:24.3.7
name: clickhouse
resources: {}
volumeMounts:
- mountPath: /var/lib/clickhouse
name: data-volume-template
- mountPath: /var/log/clickhouse-server
name: log-volume-template
- mountPath: /docker-entrypoint-initdb.d
name: bootstrap-configmap-volume
nodeSelector:
clickhouse-installation: "true"
tolerations:
- effect: NoSchedule
key: installation
operator: Equal
value: clickhouse-installation
volumes:
- configMap:
name: bootstrap-configmap
name: bootstrap-configmap-volume
zone: {}
serviceTemplates:
- metadata:
annotations:
external-dns.alpha.kubernetes.io/internal-hostname: clickhouse.company.us-west-2.env.company.cloud
external-dns.alpha.kubernetes.io/ttl: "60"
creationTimestamp: null
name: clickhouse:24.3.7
spec:
ports:
- name: http
port: 8123
targetPort: 0
- name: client
port: 9000
targetPort: 0
volumeClaimTemplates:
- metadata:
creationTimestamp: null
name: data-volume-template
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 100Gi
- metadata:
creationTimestamp: null
name: log-volume-template
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 4Gi
templating:
policy: manual
troubleshoot: "False"
pod-ips:
- 10.132.130.206
- 10.132.130.78
- 10.132.130.47
- 10.132.130.108
- 10.132.130.241
- 10.132.130.26
- 10.132.130.249
- 10.132.130.231
- 10.132.130.150
pods:
- chi-clickhouse-installation-simple-0-0-0
- chi-clickhouse-installation-simple-0-1-0
- chi-clickhouse-installation-simple-0-2-0
- chi-clickhouse-installation-simple-1-0-0
- chi-clickhouse-installation-simple-1-1-0
- chi-clickhouse-installation-simple-1-2-0
- chi-clickhouse-installation-simple-2-0-0
- chi-clickhouse-installation-simple-2-1-0
- chi-clickhouse-installation-simple-2-2-0
shards: 3
status: Completed
taskID: 5e9a0378-fa89-4029-8dfe-fcffc4c37bae
taskIDsCompleted:
- 5e9a0378-fa89-4029-8dfe-fcffc4c37bae
- c4535835-bbd7-4705-843e-445d04cd2ee4
- e6521aa3-c6f3-4bd8-9eb3-42ace89e0c04
- f3cb30fc-35fd-4723-840c-5d280efa0576
- ac33babd-5975-4059-b872-01cbd4c7c184
- 8697fb2d-1dba-44f8-b2c3-e8a7dcda26c7
- 7351d1a4-f5c2-4568-b3fd-0a49d6e7898d
- 66d1a4b8-c473-4a95-87e7-44e7747c82ce
- 8388aa98-62d8-401d-840b-1b604d7ae6cd
- 18f504a6-187f-4784-aeaa-5f967d40439a
taskIDsStarted:
- 520089d3-fa7e-4d8d-932c-2f5dafc1b21b
- f5a11dd9-268b-4c9a-ab70-9c9a92de40b8
- 49d76b1f-d2ff-43f3-b016-355f021bc573
- 2acada9a-e3ba-4234-ac65-94d16d402ba3
- 379fea7c-8cb8-4757-ab0c-ba130b30ea04
- 28e6faf5-a626-4822-878f-ebda6950bf31
- 5851304b-ae19-4b5c-89ca-bf50675b73ee
- 83c46bbc-c173-412d-8fcd-8537683f916d
- 0c4282bd-cf56-429f-b980-fdc5db20a3ee
- 38cef402-0dea-4d74-a056-cb617225f57a
|
maybe this is out of space? could you share |
I see. Easy fix, in that case. NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
gp2 (default) kubernetes.io/aws-ebs Delete WaitForFirstConsumer false 2y135d |
could you share? |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Description
I have a
clickhouse-installation
version24.3.7
running on an EKS cluster. One of myStatefulset
s is stuck inCrashLoopBackOff
. Re-starting the pod has no effect. I am hesitant to delete it manually in case I lose the underlying volume causing data loss.What is the recommended course of action?
Error Logs
State
The text was updated successfully, but these errors were encountered: