diff --git a/cmd/operator/app/clickhouse_operator.go b/cmd/operator/app/clickhouse_operator.go index 089e782d9..816d5abf3 100644 --- a/cmd/operator/app/clickhouse_operator.go +++ b/cmd/operator/app/clickhouse_operator.go @@ -47,7 +47,7 @@ const ( const ( // TODO probably this should be added as a CLI/Config param // Default number of controller threads running concurrently (used in case no other specified in config) - defaultControllerThreadsNum = 1 + defaultControllerThreadsNum = 10 ) // CLI parameter variables diff --git a/deploy/dev/clickhouse-operator-install-dev.yaml b/deploy/dev/clickhouse-operator-install-dev.yaml index 7fcd31ee1..0b1b784d7 100644 --- a/deploy/dev/clickhouse-operator-install-dev.yaml +++ b/deploy/dev/clickhouse-operator-install-dev.yaml @@ -1367,7 +1367,7 @@ subjects: # Possible Template Parameters: # # dev -# altinity/clickhouse-operator:0.9.6 +# altinity/clickhouse-operator:0.9.7 # etc-clickhouse-operator-files # apiVersion: v1 @@ -1494,7 +1494,7 @@ data: # Possible Template Parameters: # # dev -# altinity/clickhouse-operator:0.9.6 +# altinity/clickhouse-operator:0.9.7 # etc-clickhouse-operator-confd-files # apiVersion: v1 @@ -1509,7 +1509,7 @@ data: # Possible Template Parameters: # # dev -# altinity/clickhouse-operator:0.9.6 +# altinity/clickhouse-operator:0.9.7 # etc-clickhouse-operator-configd-files # apiVersion: v1 @@ -1555,7 +1555,7 @@ data: # Possible Template Parameters: # # dev -# altinity/clickhouse-operator:0.9.6 +# altinity/clickhouse-operator:0.9.7 # etc-clickhouse-operator-templatesd-files # apiVersion: v1 @@ -1654,7 +1654,7 @@ data: # Possible Template Parameters: # # dev -# altinity/clickhouse-operator:0.9.6 +# altinity/clickhouse-operator:0.9.7 # etc-clickhouse-operator-usersd-files # apiVersion: v1 @@ -1703,8 +1703,8 @@ data: # Possible Template Parameters: # # dev -# altinity/clickhouse-operator:0.9.6 -# altinity/metrics-exporter:0.9.6 +# altinity/clickhouse-operator:0.9.7 +# altinity/metrics-exporter:0.9.7 # # Setup Deployment for clickhouse-operator # Deployment would be created in kubectl-specified namespace @@ -1747,7 +1747,7 @@ spec: name: etc-clickhouse-operator-usersd-files containers: - name: clickhouse-operator - image: altinity/clickhouse-operator:0.9.6 + image: altinity/clickhouse-operator:0.9.7 imagePullPolicy: Always volumeMounts: - name: etc-clickhouse-operator-folder @@ -1812,7 +1812,7 @@ spec: resource: limits.memory - name: metrics-exporter - image: altinity/metrics-exporter:0.9.6 + image: altinity/metrics-exporter:0.9.7 imagePullPolicy: Always volumeMounts: - name: etc-clickhouse-operator-folder diff --git a/deploy/grafana/grafana-with-grafana-operator/install-grafana-with-operator.sh b/deploy/grafana/grafana-with-grafana-operator/install-grafana-with-operator.sh index 9f52536c0..33055cbe7 100755 --- a/deploy/grafana/grafana-with-grafana-operator/install-grafana-with-operator.sh +++ b/deploy/grafana/grafana-with-grafana-operator/install-grafana-with-operator.sh @@ -117,8 +117,7 @@ OPERATOR_CH_USER=$(grep chUsername ${CUR_DIR}/../../../config/config.yaml | cut OPERATOR_CH_PASS=$(grep chPassword ${CUR_DIR}/../../../config/config.yaml | cut -d " " -f 2-) IFS=$'\n' -for LINE in $(kubectl get --all-namespaces chi -o custom-columns=NAMESPACE:.metadata.namespace,NAME:.metadata.name,ENDPOINT:.status.endpoint | tail -n +2); -do +for LINE in $(kubectl get --all-namespaces chi -o custom-columns=NAMESPACE:.metadata.namespace,NAME:.metadata.name,ENDPOINT:.status.endpoint | tail -n +2); do ITEMS=( $(grep -Eo '([^[:space:]]+)' <<<"$LINE") ) NAMESPACE=${ITEMS[0]} CHI=${ITEMS[1]} @@ -129,8 +128,7 @@ do CLICKHOUSE_URL="http://${ENDPOINT}:${PORT}" # create system.query_log in each pod on CHI - for POD in $(kubectl get --namespace="${NAMESPACE}" pods -l "clickhouse.altinity.com/app=chop,clickhouse.altinity.com/chi=${CHI}" -o='custom-columns=NAME:.metadata.name' | tail -n +2) - do + for POD in $(kubectl get --namespace="${NAMESPACE}" pods -l "clickhouse.altinity.com/app=chop,clickhouse.altinity.com/chi=${CHI}" -o='custom-columns=NAME:.metadata.name' | tail -n +2); do kubectl exec --namespace="${NAMESPACE}" ${POD} -- clickhouse-client --echo -mn -q 'SELECT hostName(), dummy FROM system.one SETTINGS log_queries=1; SYSTEM FLUSH LOGS' done @@ -141,7 +139,7 @@ do ENDPOINT="$ENDPOINT" \ OPERATOR_CH_USER="$OPERATOR_CH_USER" \ OPERATOR_CH_PASS="$OPERATOR_CH_PASS" \ - envsubst + envsubst \ ) done @@ -152,6 +150,5 @@ sleep 10 kubectl apply --namespace="${GRAFANA_NAMESPACE}" -f <( cat ${CUR_DIR}/grafana-dashboard-queries-cr-template.yaml | \ GRAFANA_DASHBOARD_NAME="$GRAFANA_QUERIES_DASHBOARD_NAME" \ - envsubst + envsubst \ ) - diff --git a/deploy/operator/clickhouse-operator-install-deployment.yaml b/deploy/operator/clickhouse-operator-install-deployment.yaml index 9f6261b37..dbc98110a 100644 --- a/deploy/operator/clickhouse-operator-install-deployment.yaml +++ b/deploy/operator/clickhouse-operator-install-deployment.yaml @@ -1,7 +1,7 @@ # Possible Template Parameters: # # - -# altinity/clickhouse-operator:0.9.6 +# altinity/clickhouse-operator:0.9.7 # etc-clickhouse-operator-files # apiVersion: v1 @@ -127,7 +127,7 @@ data: # Possible Template Parameters: # # - -# altinity/clickhouse-operator:0.9.6 +# altinity/clickhouse-operator:0.9.7 # etc-clickhouse-operator-confd-files # apiVersion: v1 @@ -141,7 +141,7 @@ data: # Possible Template Parameters: # # - -# altinity/clickhouse-operator:0.9.6 +# altinity/clickhouse-operator:0.9.7 # etc-clickhouse-operator-configd-files # apiVersion: v1 @@ -186,7 +186,7 @@ data: # Possible Template Parameters: # # - -# altinity/clickhouse-operator:0.9.6 +# altinity/clickhouse-operator:0.9.7 # etc-clickhouse-operator-templatesd-files # apiVersion: v1 @@ -284,7 +284,7 @@ data: # Possible Template Parameters: # # - -# altinity/clickhouse-operator:0.9.6 +# altinity/clickhouse-operator:0.9.7 # etc-clickhouse-operator-usersd-files # apiVersion: v1 @@ -332,8 +332,8 @@ data: # Possible Template Parameters: # # - -# altinity/clickhouse-operator:0.9.6 -# altinity/metrics-exporter:0.9.6 +# altinity/clickhouse-operator:0.9.7 +# altinity/metrics-exporter:0.9.7 # # Setup Deployment for clickhouse-operator # Deployment would be created in kubectl-specified namespace @@ -375,7 +375,7 @@ spec: name: etc-clickhouse-operator-usersd-files containers: - name: clickhouse-operator - image: altinity/clickhouse-operator:0.9.6 + image: altinity/clickhouse-operator:0.9.7 imagePullPolicy: Always volumeMounts: - name: etc-clickhouse-operator-folder @@ -440,7 +440,7 @@ spec: resource: limits.memory - name: metrics-exporter - image: altinity/metrics-exporter:0.9.6 + image: altinity/metrics-exporter:0.9.7 imagePullPolicy: Always volumeMounts: - name: etc-clickhouse-operator-folder diff --git a/deploy/operator/clickhouse-operator-install.yaml b/deploy/operator/clickhouse-operator-install.yaml index eddb539c1..f1e58e923 100644 --- a/deploy/operator/clickhouse-operator-install.yaml +++ b/deploy/operator/clickhouse-operator-install.yaml @@ -1367,7 +1367,7 @@ subjects: # Possible Template Parameters: # # kube-system -# altinity/clickhouse-operator:0.9.6 +# altinity/clickhouse-operator:0.9.7 # etc-clickhouse-operator-files # apiVersion: v1 @@ -1494,7 +1494,7 @@ data: # Possible Template Parameters: # # kube-system -# altinity/clickhouse-operator:0.9.6 +# altinity/clickhouse-operator:0.9.7 # etc-clickhouse-operator-confd-files # apiVersion: v1 @@ -1509,7 +1509,7 @@ data: # Possible Template Parameters: # # kube-system -# altinity/clickhouse-operator:0.9.6 +# altinity/clickhouse-operator:0.9.7 # etc-clickhouse-operator-configd-files # apiVersion: v1 @@ -1555,7 +1555,7 @@ data: # Possible Template Parameters: # # kube-system -# altinity/clickhouse-operator:0.9.6 +# altinity/clickhouse-operator:0.9.7 # etc-clickhouse-operator-templatesd-files # apiVersion: v1 @@ -1654,7 +1654,7 @@ data: # Possible Template Parameters: # # kube-system -# altinity/clickhouse-operator:0.9.6 +# altinity/clickhouse-operator:0.9.7 # etc-clickhouse-operator-usersd-files # apiVersion: v1 @@ -1703,8 +1703,8 @@ data: # Possible Template Parameters: # # kube-system -# altinity/clickhouse-operator:0.9.6 -# altinity/metrics-exporter:0.9.6 +# altinity/clickhouse-operator:0.9.7 +# altinity/metrics-exporter:0.9.7 # # Setup Deployment for clickhouse-operator # Deployment would be created in kubectl-specified namespace @@ -1747,7 +1747,7 @@ spec: name: etc-clickhouse-operator-usersd-files containers: - name: clickhouse-operator - image: altinity/clickhouse-operator:0.9.6 + image: altinity/clickhouse-operator:0.9.7 imagePullPolicy: Always volumeMounts: - name: etc-clickhouse-operator-folder @@ -1812,7 +1812,7 @@ spec: resource: limits.memory - name: metrics-exporter - image: altinity/metrics-exporter:0.9.6 + image: altinity/metrics-exporter:0.9.7 imagePullPolicy: Always volumeMounts: - name: etc-clickhouse-operator-folder diff --git a/dev/image_build_metrics_exporter_universal.sh b/dev/image_build_metrics_exporter_universal.sh index babca376c..7fb41825f 100755 --- a/dev/image_build_metrics_exporter_universal.sh +++ b/dev/image_build_metrics_exporter_universal.sh @@ -24,13 +24,17 @@ if [[ "${MINIKUBE}" == "yes" ]]; then # We'd like to build for minikube eval $(minikube docker-env) fi -docker build -t "${TAG}" -f "${DOCKERFILE}" "${SRC_ROOT}" -# Publish image -if [[ "${DOCKERHUB_PUBLISH}" == "yes" ]]; then - if [[ ! -z "${DOCKERHUB_LOGIN}" ]]; then - echo "Dockerhub login specified: '${DOCKERHUB_LOGIN}', perform login" - docker login -u "${DOCKERHUB_LOGIN}" +if docker build -t "${TAG}" -f "${DOCKERFILE}" "${SRC_ROOT}"; then + # Image ready, time to publish it + if [[ "${DOCKERHUB_PUBLISH}" == "yes" ]]; then + if [[ ! -z "${DOCKERHUB_LOGIN}" ]]; then + echo "Dockerhub login specified: '${DOCKERHUB_LOGIN}', perform login" + docker login -u "${DOCKERHUB_LOGIN}" + fi + docker push "${TAG}" fi - docker push "${TAG}" +else + echo "FAILED docker build! Abort." + exit 1 fi diff --git a/dev/image_build_operator_universal.sh b/dev/image_build_operator_universal.sh index 793927792..ca2cf13e0 100755 --- a/dev/image_build_operator_universal.sh +++ b/dev/image_build_operator_universal.sh @@ -24,13 +24,17 @@ if [[ "${MINIKUBE}" == "yes" ]]; then # We'd like to build for minikube eval $(minikube docker-env) fi -docker build -t "${TAG}" -f "${DOCKERFILE}" "${SRC_ROOT}" -# Publish image -if [[ "${DOCKERHUB_PUBLISH}" == "yes" ]]; then - if [[ ! -z "${DOCKERHUB_LOGIN}" ]]; then - echo "Dockerhub login specified: '${DOCKERHUB_LOGIN}', perform login" - docker login -u "${DOCKERHUB_LOGIN}" +if docker build -t "${TAG}" -f "${DOCKERFILE}" "${SRC_ROOT}"; then + # Image ready, time to publish it + if [[ "${DOCKERHUB_PUBLISH}" == "yes" ]]; then + if [[ ! -z "${DOCKERHUB_LOGIN}" ]]; then + echo "Dockerhub login specified: '${DOCKERHUB_LOGIN}', perform login" + docker login -u "${DOCKERHUB_LOGIN}" + fi + docker push "${TAG}" fi - docker push "${TAG}" +else + echo "FAILED docker build! Abort." + exit 1 fi diff --git a/pkg/apis/clickhouse.altinity.com/v1/types.go b/pkg/apis/clickhouse.altinity.com/v1/types.go index 1a9bd20a1..35d4ab5cf 100644 --- a/pkg/apis/clickhouse.altinity.com/v1/types.go +++ b/pkg/apis/clickhouse.altinity.com/v1/types.go @@ -218,8 +218,8 @@ type ChiHost struct { Templates ChiTemplateNames `json:"templates,omitempty"` // Internal data - Address ChiHostAddress `json:"address"` - Config ChiHostConfig `json:"config"` + Address ChiHostAddress `json:"-"` + Config ChiHostConfig `json:"-"` CHI *ClickHouseInstallation `json:"-" testdiff:"ignore"` } @@ -261,6 +261,7 @@ type ChiHostAddress struct { type ChiHostConfig struct { ZookeeperFingerprint string `json:"zookeeperfingerprint"` SettingsFingerprint string `json:"settingsfingerprint"` + FilesFingerprint string `json:"filesfingerprint"` } // CHITemplates defines templates section of .spec diff --git a/pkg/apis/metrics/exporter.go b/pkg/apis/metrics/exporter.go index 57b540baf..63b112909 100644 --- a/pkg/apis/metrics/exporter.go +++ b/pkg/apis/metrics/exporter.go @@ -67,7 +67,7 @@ func (e *Exporter) getWatchedCHIs() []*WatchedCHI { // Collect implements prometheus.Collector Collect method func (e *Exporter) Collect(ch chan<- prometheus.Metric) { if ch == nil { - log.Info("Prometheus channel is closed. Skipping") + log.V(2).Info("Prometheus channel is closed. Skipping") return } @@ -79,13 +79,13 @@ func (e *Exporter) Collect(ch chan<- prometheus.Metric) { case *WatchedCHI: e.toRemoveFromWatched.Delete(key) e.removeFromWatched(key.(*WatchedCHI)) - log.Infof("Removed ClickHouseInstallation (%s/%s) from Exporter", key.(*WatchedCHI).Name, key.(*WatchedCHI).Namespace) + log.V(1).Infof("Removed ClickHouseInstallation (%s/%s) from Exporter", key.(*WatchedCHI).Name, key.(*WatchedCHI).Namespace) } return true }) }() - log.Info("Starting Collect") + log.V(2).Info("Starting Collect") var wg = sync.WaitGroup{} e.WalkWatchedChi(func(chi *WatchedCHI, hostname string) { wg.Add(1) @@ -95,7 +95,7 @@ func (e *Exporter) Collect(ch chan<- prometheus.Metric) { }(chi, hostname, ch) }) wg.Wait() - log.Info("Finished Collect") + log.V(2).Info("Finished Collect") } func (e *Exporter) enqueueToRemoveFromWatched(chi *WatchedCHI) { @@ -143,7 +143,7 @@ func (e *Exporter) updateWatched(chi *WatchedCHI) { } // CHI is not watched - log.Infof("Added ClickHouseInstallation (%s/%s): including hostnames into Exporter", chi.Namespace, chi.Name) + log.V(1).Infof("Added ClickHouseInstallation (%s/%s): including hostnames into Exporter", chi.Namespace, chi.Name) e.chInstallations[chi.indexKey()] = chi } @@ -168,53 +168,53 @@ func (e *Exporter) collectFromHost(chi *WatchedCHI, hostname string, c chan<- pr fetcher := e.newFetcher(hostname) writer := NewPrometheusWriter(c, chi, hostname) - log.Infof("Querying metrics for %s\n", hostname) + log.V(2).Infof("Querying metrics for %s\n", hostname) if metrics, err := fetcher.getClickHouseQueryMetrics(); err == nil { - log.Infof("Extracted %d metrics for %s\n", len(metrics), hostname) + log.V(2).Infof("Extracted %d metrics for %s\n", len(metrics), hostname) writer.WriteMetrics(metrics) writer.WriteOKFetch("system.metrics") } else { // In case of an error fetching data from clickhouse store CHI name in e.cleanup - log.Infof("Error querying metrics for %s: %s\n", hostname, err) + log.V(2).Infof("Error querying metrics for %s: %s\n", hostname, err) writer.WriteErrorFetch("system.metrics") //e.enqueueToRemoveFromWatched(chi) return } - log.Infof("Querying table sizes for %s\n", hostname) + log.V(2).Infof("Querying table sizes for %s\n", hostname) if tableSizes, err := fetcher.getClickHouseQueryTableSizes(); err == nil { - log.Infof("Extracted %d table sizes for %s\n", len(tableSizes), hostname) + log.V(2).Infof("Extracted %d table sizes for %s\n", len(tableSizes), hostname) writer.WriteTableSizes(tableSizes) writer.WriteOKFetch("table sizes") } else { // In case of an error fetching data from clickhouse store CHI name in e.cleanup - log.Infof("Error querying table sizes for %s: %s\n", hostname, err) + log.V(2).Infof("Error querying table sizes for %s: %s\n", hostname, err) writer.WriteErrorFetch("table sizes") // e.enqueueToRemoveFromWatched(chi) return } - log.Infof("Querying system replicas for %s\n", hostname) + log.V(2).Infof("Querying system replicas for %s\n", hostname) if systemReplicas, err := fetcher.getClickHouseQuerySystemReplicas(); err == nil { - log.Infof("Extracted %d system replicas for %s\n", len(systemReplicas), hostname) + log.V(2).Infof("Extracted %d system replicas for %s\n", len(systemReplicas), hostname) writer.WriteSystemReplicas(systemReplicas) writer.WriteOKFetch("system.replicas") } else { // In case of an error fetching data from clickhouse store CHI name in e.cleanup - log.Infof("Error querying system replicas for %s: %s\n", hostname, err) + log.V(2).Infof("Error querying system replicas for %s: %s\n", hostname, err) writer.WriteErrorFetch("system.replicas") // e.enqueueToRemoveFromWatched(chi) return } - log.Infof("Querying mutations for %s\n", hostname) + log.V(2).Infof("Querying mutations for %s\n", hostname) if mutations, err := fetcher.getClickHouseQueryMutations(); err == nil { - log.Infof("Extracted %d mutations for %s\n", len(mutations), hostname) + log.V(2).Infof("Extracted %d mutations for %s\n", len(mutations), hostname) writer.WriteMutations(mutations) writer.WriteOKFetch("system.mutations") } else { // In case of an error fetching data from clickhouse store CHI name in e.cleanup - log.Infof("Error querying mutations for %s: %s\n", hostname, err) + log.V(2).Infof("Error querying mutations for %s: %s\n", hostname, err) writer.WriteErrorFetch("system.mutations") //e.enqueueToRemoveFromWatched(chi) return @@ -275,7 +275,7 @@ func (e *Exporter) DiscoveryWatchedCHIs(chop *chop.CHOp, chopClient *chopclients // Walk over the list of ClickHouseInstallation objects and add them as watched for i := range list.Items { chi := &list.Items[i] - log.Infof("Adding explicitly found CHI %s/%s with %d hosts\n", chi.Namespace, chi.Name, len(chi.Status.FQDNs)) + log.V(1).Infof("Adding explicitly found CHI %s/%s with %d hosts\n", chi.Namespace, chi.Name, len(chi.Status.FQDNs)) watchedCHI := &WatchedCHI{ Namespace: chi.Namespace, Name: chi.Name, diff --git a/pkg/chop/chop.go b/pkg/chop/chop.go index 7c34cdf9c..0c6e73594 100644 --- a/pkg/chop/chop.go +++ b/pkg/chop/chop.go @@ -50,31 +50,31 @@ func (c *CHOp) Config() *v1.OperatorConfig { func (c *CHOp) SetupLog() { updated := false if c.Config().Logtostderr != "" { - log.Infof("Log option cur value %s=%s\n", "logtostderr", flag.Lookup("logtostderr").Value) - log.Infof("Log option new value %s=%s\n", "logtostderr", c.Config().Logtostderr) + log.V(1).Infof("Log option cur value %s=%s\n", "logtostderr", flag.Lookup("logtostderr").Value) + log.V(1).Infof("Log option new value %s=%s\n", "logtostderr", c.Config().Logtostderr) updated = true _ = flag.Set("logtostderr", c.Config().Logtostderr) } if c.Config().Alsologtostderr != "" { - log.Infof("Log option cur value %s=%s\n", "alsologtostderr", flag.Lookup("alsologtostderr").Value) - log.Infof("Log option new value %s=%s\n", "alsologtostderr", c.Config().Alsologtostderr) + log.V(1).Infof("Log option cur value %s=%s\n", "alsologtostderr", flag.Lookup("alsologtostderr").Value) + log.V(1).Infof("Log option new value %s=%s\n", "alsologtostderr", c.Config().Alsologtostderr) updated = true _ = flag.Set("alsologtostderr", c.Config().Alsologtostderr) } if c.Config().Stderrthreshold != "" { - log.Infof("Log option cur value %s=%s\n", "stderrthreshold", flag.Lookup("stderrthreshold").Value) - log.Infof("Log option new value %s=%s\n", "stderrthreshold", c.Config().Stderrthreshold) + log.V(1).Infof("Log option cur value %s=%s\n", "stderrthreshold", flag.Lookup("stderrthreshold").Value) + log.V(1).Infof("Log option new value %s=%s\n", "stderrthreshold", c.Config().Stderrthreshold) updated = true _ = flag.Set("stderrthreshold", c.Config().Stderrthreshold) } if c.Config().V != "" { - log.Infof("Log option cur value %s=%s\n", "v", flag.Lookup("v").Value) - log.Infof("Log option new value %s=%s\n", "v", c.Config().V) + log.V(1).Infof("Log option cur value %s=%s\n", "v", flag.Lookup("v").Value) + log.V(1).Infof("Log option new value %s=%s\n", "v", c.Config().V) updated = true _ = flag.Set("v", c.Config().V) } if updated { - log.Infof("Additional log options applied\n") + log.V(1).Infof("Additional log options applied\n") } } diff --git a/pkg/controller/chi/controller.go b/pkg/controller/chi/controller.go index ed135912e..70674b4f8 100644 --- a/pkg/controller/chi/controller.go +++ b/pkg/controller/chi/controller.go @@ -105,7 +105,7 @@ func (c *Controller) addEventHandlers( if !c.chop.Config().IsWatchedNamespace(chi.Namespace) { return } - //log.V(1).Infof("chiInformer.AddFunc - %s/%s added", chi.Namespace, chi.Name) + log.V(2).Infof("chiInformer.AddFunc - %s/%s added", chi.Namespace, chi.Name) c.enqueueObject(NewReconcileChi(reconcileAdd, nil, chi)) }, UpdateFunc: func(old, new interface{}) { @@ -114,7 +114,7 @@ func (c *Controller) addEventHandlers( if !c.chop.Config().IsWatchedNamespace(newChi.Namespace) { return } - //log.V(1).Info("chiInformer.UpdateFunc") + log.V(2).Info("chiInformer.UpdateFunc") c.enqueueObject(NewReconcileChi(reconcileUpdate, oldChi, newChi)) }, DeleteFunc: func(obj interface{}) { @@ -122,7 +122,7 @@ func (c *Controller) addEventHandlers( if !c.chop.Config().IsWatchedNamespace(chi.Namespace) { return } - //log.V(1).Infof("chiInformer.DeleteFunc - CHI %s/%s deleted", chi.Namespace, chi.Name) + log.V(2).Infof("chiInformer.DeleteFunc - CHI %s/%s deleted", chi.Namespace, chi.Name) c.enqueueObject(NewReconcileChi(reconcileDelete, chi, nil)) }, }) @@ -133,7 +133,7 @@ func (c *Controller) addEventHandlers( if !c.chop.Config().IsWatchedNamespace(chit.Namespace) { return } - //log.V(1).Infof("chitInformer.AddFunc - %s/%s added", chit.Namespace, chit.Name) + log.V(2).Infof("chitInformer.AddFunc - %s/%s added", chit.Namespace, chit.Name) c.enqueueObject(NewReconcileChit(reconcileAdd, nil, chit)) }, UpdateFunc: func(old, new interface{}) { @@ -142,7 +142,7 @@ func (c *Controller) addEventHandlers( if !c.chop.Config().IsWatchedNamespace(newChit.Namespace) { return } - //log.V(1).Infof("chitInformer.UpdateFunc - %s/%s", newChit.Namespace, newChit.Name) + log.V(2).Infof("chitInformer.UpdateFunc - %s/%s", newChit.Namespace, newChit.Name) c.enqueueObject(NewReconcileChit(reconcileUpdate, oldChit, newChit)) }, DeleteFunc: func(obj interface{}) { @@ -150,7 +150,7 @@ func (c *Controller) addEventHandlers( if !c.chop.Config().IsWatchedNamespace(chit.Namespace) { return } - //log.V(1).Infof("chitInformer.DeleteFunc - %s/%s deleted", chit.Namespace, chit.Name) + log.V(2).Infof("chitInformer.DeleteFunc - %s/%s deleted", chit.Namespace, chit.Name) c.enqueueObject(NewReconcileChit(reconcileDelete, chit, nil)) }, }) @@ -161,7 +161,7 @@ func (c *Controller) addEventHandlers( if !c.chop.Config().IsWatchedNamespace(chopConfig.Namespace) { return } - //log.V(1).Infof("chitInformer.AddFunc - %s/%s added", chit.Namespace, chit.Name) + log.V(2).Infof("chopInformer.AddFunc - %s/%s added", chopConfig.Namespace, chopConfig.Name) c.enqueueObject(NewReconcileChopConfig(reconcileAdd, nil, chopConfig)) }, UpdateFunc: func(old, new interface{}) { @@ -170,7 +170,7 @@ func (c *Controller) addEventHandlers( if !c.chop.Config().IsWatchedNamespace(newChopConfig.Namespace) { return } - //log.V(1).Infof("chitInformer.UpdateFunc - %s/%s", newChit.Namespace, newChit.Name) + log.V(2).Infof("chopInformer.UpdateFunc - %s/%s", newChopConfig.Namespace, newChopConfig.Name) c.enqueueObject(NewReconcileChopConfig(reconcileUpdate, oldChopConfig, newChopConfig)) }, DeleteFunc: func(obj interface{}) { @@ -178,7 +178,7 @@ func (c *Controller) addEventHandlers( if !c.chop.Config().IsWatchedNamespace(chopConfig.Namespace) { return } - //log.V(1).Infof("chitInformer.DeleteFunc - %s/%s deleted", chit.Namespace, chit.Name) + log.V(2).Infof("chopInformer.DeleteFunc - %s/%s deleted", chopConfig.Namespace, chopConfig.Name) c.enqueueObject(NewReconcileChopConfig(reconcileDelete, chopConfig, nil)) }, }) @@ -189,7 +189,7 @@ func (c *Controller) addEventHandlers( if !c.isTrackedObject(&service.ObjectMeta) { return } - //log.V(1).Infof("serviceInformer AddFunc %s/%s", service.Namespace, service.Name) + log.V(2).Infof("serviceInformer AddFunc %s/%s", service.Namespace, service.Name) }, UpdateFunc: func(old, new interface{}) { oldService := old.(*core.Service) @@ -202,7 +202,7 @@ func (c *Controller) addEventHandlers( if !c.isTrackedObject(&service.ObjectMeta) { return } - //log.V(1).Infof("serviceInformer DeleteFunc %s/%s", service.Namespace, service.Name) + log.V(2).Infof("serviceInformer DeleteFunc %s/%s", service.Namespace, service.Name) }, }) @@ -212,7 +212,7 @@ func (c *Controller) addEventHandlers( if !c.isTrackedObject(&endpoints.ObjectMeta) { return } - //log.V(1).Infof("endpointsInformer AddFunc %s/%s", endpoints.Namespace, endpoints.Name) + log.V(2).Infof("endpointsInformer AddFunc %s/%s", endpoints.Namespace, endpoints.Name) }, UpdateFunc: func(old, new interface{}) { oldEndpoints := old.(*core.Endpoints) @@ -223,7 +223,7 @@ func (c *Controller) addEventHandlers( diff, equal := messagediff.DeepDiff(oldEndpoints, newEndpoints) if equal { - //log.V(1).Infof("onUpdateEndpoints(%s/%s): no changes found", oldEndpoints.Namespace, oldEndpoints.Name) + log.V(2).Infof("onUpdateEndpoints(%s/%s): no changes found", oldEndpoints.Namespace, oldEndpoints.Name) // No need to react return } @@ -255,7 +255,7 @@ func (c *Controller) addEventHandlers( if !c.isTrackedObject(&endpoints.ObjectMeta) { return } - //log.V(1).Infof("endpointsInformer DeleteFunc %s/%s", endpoints.Namespace, endpoints.Name) + log.V(2).Infof("endpointsInformer DeleteFunc %s/%s", endpoints.Namespace, endpoints.Name) }, }) @@ -265,21 +265,21 @@ func (c *Controller) addEventHandlers( if !c.isTrackedObject(&configMap.ObjectMeta) { return } - //log.V(1).Infof("configMapInformer AddFunc %s/%s", configMap.Namespace, configMap.Name) + log.V(2).Infof("configMapInformer AddFunc %s/%s", configMap.Namespace, configMap.Name) }, UpdateFunc: func(old, new interface{}) { configMap := old.(*core.ConfigMap) if !c.isTrackedObject(&configMap.ObjectMeta) { return } - //log.V(1).Infof("configMapInformer UpdateFunc %s/%s", configMap.Namespace, configMap.Name) + log.V(2).Infof("configMapInformer UpdateFunc %s/%s", configMap.Namespace, configMap.Name) }, DeleteFunc: func(obj interface{}) { configMap := obj.(*core.ConfigMap) if !c.isTrackedObject(&configMap.ObjectMeta) { return } - //log.V(1).Infof("configMapInformer DeleteFunc %s/%s", configMap.Namespace, configMap.Name) + log.V(2).Infof("configMapInformer DeleteFunc %s/%s", configMap.Namespace, configMap.Name) }, }) @@ -289,7 +289,7 @@ func (c *Controller) addEventHandlers( if !c.isTrackedObject(&statefulSet.ObjectMeta) { return } - //log.V(1).Infof("statefulSetInformer AddFunc %s/%s", statefulSet.Namespace, statefulSet.Name) + log.V(2).Infof("statefulSetInformer AddFunc %s/%s", statefulSet.Namespace, statefulSet.Name) //controller.handleObject(obj) }, UpdateFunc: func(old, new interface{}) { @@ -297,14 +297,14 @@ func (c *Controller) addEventHandlers( if !c.isTrackedObject(&statefulSet.ObjectMeta) { return } - //log.V(1).Infof("statefulSetInformer UpdateFunc %s/%s", statefulSet.Namespace, statefulSet.Name) + log.V(2).Infof("statefulSetInformer UpdateFunc %s/%s", statefulSet.Namespace, statefulSet.Name) }, DeleteFunc: func(obj interface{}) { statefulSet := obj.(*apps.StatefulSet) if !c.isTrackedObject(&statefulSet.ObjectMeta) { return } - //log.V(1).Infof("statefulSetInformer DeleteFunc %s/%s", statefulSet.Namespace, statefulSet.Name) + log.V(2).Infof("statefulSetInformer DeleteFunc %s/%s", statefulSet.Namespace, statefulSet.Name) //controller.handleObject(obj) }, }) @@ -315,28 +315,28 @@ func (c *Controller) addEventHandlers( if !c.isTrackedObject(&pod.ObjectMeta) { return } - //log.V(1).Infof("podInformer AddFunc %s/%s", pod.Namespace, pod.Name) + log.V(2).Infof("podInformer AddFunc %s/%s", pod.Namespace, pod.Name) }, UpdateFunc: func(old, new interface{}) { pod := old.(*core.Pod) if !c.isTrackedObject(&pod.ObjectMeta) { return } - //log.V(1).Infof("podInformer UpdateFunc %s/%s", pod.Namespace, pod.Name) + log.V(2).Infof("podInformer UpdateFunc %s/%s", pod.Namespace, pod.Name) }, DeleteFunc: func(obj interface{}) { pod := obj.(*core.Pod) if !c.isTrackedObject(&pod.ObjectMeta) { return } - //log.V(1).Infof("podInformer DeleteFunc %s/%s", pod.Namespace, pod.Name) + log.V(2).Infof("podInformer DeleteFunc %s/%s", pod.Namespace, pod.Name) }, }) } // isTrackedObject checks whether operator is interested in changes of this object func (c *Controller) isTrackedObject(objectMeta *meta.ObjectMeta) bool { - return c.chop.Config().IsWatchedNamespace(objectMeta.Namespace) && chopmodels.IsChopGeneratedObject(objectMeta) + return c.chop.Config().IsWatchedNamespace(objectMeta.Namespace) && chopmodels.IsCHOPGeneratedObject(objectMeta) } // Run syncs caches, starts workers diff --git a/pkg/controller/chi/deleter.go b/pkg/controller/chi/deleter.go index a69a607aa..d4cf3bd86 100644 --- a/pkg/controller/chi/deleter.go +++ b/pkg/controller/chi/deleter.go @@ -18,22 +18,13 @@ import ( chop "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" chopmodel "github.com/altinity/clickhouse-operator/pkg/model" log "github.com/golang/glog" + "time" + // log "k8s.io/klog" apps "k8s.io/api/apps/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// newDeleteOptions returns filled *metav1.DeleteOptions -func newDeleteOptions() *metav1.DeleteOptions { - gracePeriodSeconds := int64(0) - propagationPolicy := metav1.DeletePropagationForeground - return &metav1.DeleteOptions{ - GracePeriodSeconds: &gracePeriodSeconds, - PropagationPolicy: &propagationPolicy, - } -} - // deleteHost deletes all kubernetes resources related to replica *chop.ChiHost func (c *Controller) deleteHost(host *chop.ChiHost) error { // Each host consists of @@ -45,9 +36,9 @@ func (c *Controller) deleteHost(host *chop.ChiHost) error { // Need to delete all these item log.V(1).Infof("Controller delete host %s/%s", host.Address.ClusterName, host.Name) - _ = c.statefulSetDelete(host) - _ = c.persistentVolumeClaimDelete(host) - _ = c.configMapDelete(host) + _ = c.deleteStatefulSet(host) + _ = c.deletePVC(host) + _ = c.deleteConfigMap(host) _ = c.deleteServiceHost(host) // When deleting the whole CHI (not particular host), CHI may already be unavailable, so update CHI tolerantly @@ -59,10 +50,9 @@ func (c *Controller) deleteHost(host *chop.ChiHost) error { return nil } -// deleteConfigMapsChi -func (c *Controller) deleteConfigMapsChi(chi *chop.ClickHouseInstallation) error { +// deleteConfigMapsCHI +func (c *Controller) deleteConfigMapsCHI(chi *chop.ClickHouseInstallation) error { // Delete common ConfigMap's - // Delete CHI service // // chi-b3d29f-common-configd 2 61s // chi-b3d29f-common-usersd 0 61s @@ -98,8 +88,16 @@ func (c *Controller) statefulSetDeletePod(statefulSet *apps.StatefulSet) error { return c.kubeClient.CoreV1().Pods(statefulSet.Namespace).Delete(name, newDeleteOptions()) } -// statefulSetDelete gracefully deletes StatefulSet through zeroing Pod's count -func (c *Controller) statefulSetDelete(host *chop.ChiHost) error { +func (c *Controller) FindStatefulSet(host *chop.ChiHost) (*apps.StatefulSet, error) { + // Namespaced name + name := chopmodel.CreateStatefulSetName(host) + namespace := host.Address.Namespace + + return c.statefulSetLister.StatefulSets(namespace).Get(name) +} + +// deleteStatefulSet gracefully deletes StatefulSet through zeroing Pod's count +func (c *Controller) deleteStatefulSet(host *chop.ChiHost) error { // IMPORTANT // StatefulSets do not provide any guarantees on the termination of pods when a StatefulSet is deleted. // To achieve ordered and graceful termination of the pods in the StatefulSet, @@ -109,15 +107,16 @@ func (c *Controller) statefulSetDelete(host *chop.ChiHost) error { name := chopmodel.CreateStatefulSetName(host) namespace := host.Address.Namespace - log.V(1).Infof("statefulSetDelete(%s/%s)", namespace, name) + log.V(1).Infof("deleteStatefulSet(%s/%s)", namespace, name) - statefulSet, err := c.statefulSetLister.StatefulSets(namespace).Get(name) + statefulSet, err := c.FindStatefulSet(host) if err != nil { log.V(1).Infof("error get StatefulSet %s/%s", namespace, name) return nil } - // Zero pods count. This is the proper and graceful way to delete StatefulSet + // Scale StatefulSet down to 0 pods count. + // This is the proper and graceful way to delete StatefulSet var zero int32 = 0 statefulSet.Spec.Replicas = &zero statefulSet, _ = c.kubeClient.AppsV1().StatefulSets(namespace).Update(statefulSet) @@ -128,19 +127,31 @@ func (c *Controller) statefulSetDelete(host *chop.ChiHost) error { log.V(1).Infof("StatefulSet %s/%s deleted", namespace, name) } else { log.V(1).Infof("StatefulSet %s/%s FAILED TO DELETE %v", namespace, name, err) + return nil + } + + for { + // TODO + // There should be better way to sync cache + if _, err := c.FindStatefulSet(host); err == nil { + log.V(1).Infof("StatefulSet %s/%s deleted, cache NOT yet synced", namespace, name) + time.Sleep(5 * time.Second) + } else { + log.V(1).Infof("StatefulSet %s/%s deleted, cache synced. Desc: %v", namespace, name, err) + break + } } return nil } -// persistentVolumeClaimDelete deletes PersistentVolumeClaim -func (c *Controller) persistentVolumeClaimDelete(host *chop.ChiHost) error { +// deletePVC deletes PersistentVolumeClaim +func (c *Controller) deletePVC(host *chop.ChiHost) error { namespace := host.Address.Namespace labeler := chopmodel.NewLabeler(c.chop, host.CHI) - listOptions := newListOptions(labeler.GetSelectorHostScope(host)) - list, err := c.kubeClient.CoreV1().PersistentVolumeClaims(namespace).List(listOptions) + list, err := c.kubeClient.CoreV1().PersistentVolumeClaims(namespace).List(newListOptions(labeler.GetSelectorHostScope(host))) if err != nil { log.V(1).Infof("FAIL get list of PVC for host %s/%s %v", namespace, host.Name, err) return err @@ -168,12 +179,12 @@ func (c *Controller) persistentVolumeClaimDelete(host *chop.ChiHost) error { return nil } -// configMapDelete deletes ConfigMap -func (c *Controller) configMapDelete(host *chop.ChiHost) error { +// deleteConfigMap deletes ConfigMap +func (c *Controller) deleteConfigMap(host *chop.ChiHost) error { name := chopmodel.CreateConfigMapPodName(host) namespace := host.Address.Namespace - log.V(1).Infof("configMapDelete(%s/%s)", namespace, name) + log.V(1).Infof("deleteConfigMap(%s/%s)", namespace, name) if err := c.kubeClient.CoreV1().ConfigMaps(namespace).Delete(name, newDeleteOptions()); err == nil { log.V(1).Infof("ConfigMap %s/%s deleted", namespace, name) @@ -220,8 +231,8 @@ func (c *Controller) deleteServiceCHI(chi *chop.ClickHouseInstallation) error { func (c *Controller) deleteServiceIfExists(namespace, name string) error { // Delete Service in case it does not exist - // Check service exists - _, err := c.kubeClient.CoreV1().Services(namespace).Get(name, metav1.GetOptions{}) + // Check specified service exists + _, err := c.kubeClient.CoreV1().Services(namespace).Get(name, newGetOptions()) if err != nil { // No such a service, nothing to delete diff --git a/pkg/controller/chi/events.go b/pkg/controller/chi/event.go similarity index 100% rename from pkg/controller/chi/events.go rename to pkg/controller/chi/event.go diff --git a/pkg/controller/chi/getters.go b/pkg/controller/chi/getter.go similarity index 96% rename from pkg/controller/chi/getters.go rename to pkg/controller/chi/getter.go index a7b46efc7..ffd54af01 100644 --- a/pkg/controller/chi/getters.go +++ b/pkg/controller/chi/getter.go @@ -170,9 +170,9 @@ func (c *Controller) getStatefulSet(objMeta *meta.ObjectMeta, byNameOnly bool) ( return nil, fmt.Errorf("too much objects found %d expecting 1", len(objects)) } -// GetChiByObjectMeta gets CHI by namespaced name -func (c *Controller) GetChiByObjectMeta(objectMeta *meta.ObjectMeta) (*chiv1.ClickHouseInstallation, error) { - chiName, err := chopmodel.GetChiNameFromObjectMeta(objectMeta) +// GetCHIByObjectMeta gets CHI by namespaced name +func (c *Controller) GetCHIByObjectMeta(objectMeta *meta.ObjectMeta) (*chiv1.ClickHouseInstallation, error) { + chiName, err := chopmodel.GetCHINameFromObjectMeta(objectMeta) if err != nil { return nil, fmt.Errorf("unable to find CHI by name: '%s'. More info: %v", objectMeta.Name, err) } diff --git a/pkg/controller/chi/labeler.go b/pkg/controller/chi/labeler.go index eaf5124d1..26a1c699d 100644 --- a/pkg/controller/chi/labeler.go +++ b/pkg/controller/chi/labeler.go @@ -136,5 +136,5 @@ func (c *Controller) labelMyObjectsTree() { func (c *Controller) addLabels(meta *v1.ObjectMeta) { meta.Labels[model.LabelAppName] = model.LabelAppValue - meta.Labels[model.LabelChop] = c.chop.Version + meta.Labels[model.LabelCHOP] = c.chop.Version } diff --git a/pkg/controller/chi/lister.go b/pkg/controller/chi/options.go similarity index 65% rename from pkg/controller/chi/lister.go rename to pkg/controller/chi/options.go index 556575b47..c59c00219 100644 --- a/pkg/controller/chi/lister.go +++ b/pkg/controller/chi/options.go @@ -20,9 +20,25 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +// newListOptions returns filled metav1.ListOptions func newListOptions(labelsMap map[string]string) metav1.ListOptions { labelSelector := labels.SelectorFromSet(labelsMap) return metav1.ListOptions{ LabelSelector: labelSelector.String(), } } + +// newGetOptions returns filled metav1.GetOptions +func newGetOptions() metav1.GetOptions { + return metav1.GetOptions{} +} + +// newDeleteOptions returns filled *metav1.DeleteOptions +func newDeleteOptions() *metav1.DeleteOptions { + gracePeriodSeconds := int64(0) + propagationPolicy := metav1.DeletePropagationForeground + return &metav1.DeleteOptions{ + GracePeriodSeconds: &gracePeriodSeconds, + PropagationPolicy: &propagationPolicy, + } +} diff --git a/pkg/controller/chi/worker.go b/pkg/controller/chi/worker.go index 2afffde32..7aeedab56 100644 --- a/pkg/controller/chi/worker.go +++ b/pkg/controller/chi/worker.go @@ -189,7 +189,8 @@ func (w *worker) updateCHI(old, new *chop.ClickHouseInstallation) error { log.V(1).Info(str) w.c.eventCHI(new, eventTypeNormal, eventActionUpdate, eventReasonUpdateInProgress, str) - _ = w.schemer.ShardCreateTables(shard) + //This is not needed, since tables will be added by HostCreateTables call below + //_ = w.schemer.ShardCreateTables(shard) }, func(host *chop.ChiHost) { str := fmt.Sprintf("Added replica %d to shard %d in cluster %s", host.Address.ReplicaIndex, host.Address.ShardIndex, host.Address.ClusterName) @@ -329,7 +330,7 @@ func (w *worker) deleteCHI(chi *chop.ClickHouseInstallation) error { }) // Delete ConfigMap(s) - err = w.c.deleteConfigMapsChi(chi) + err = w.c.deleteConfigMapsCHI(chi) // Delete Service err = w.c.deleteServiceCHI(chi) @@ -345,6 +346,15 @@ func (w *worker) deleteCHI(chi *chop.ClickHouseInstallation) error { // deleteHost deletes all kubernetes resources related to replica *chop.ChiHost func (w *worker) deleteHost(host *chop.ChiHost) error { + log.V(1).Infof("Worker delete host %s/%s", host.Address.ClusterName, host.Name) + + if _, err := w.c.FindStatefulSet(host); err != nil { + log.V(1).Infof("Worker delete host %s/%s - StatefulSet not found - already deleted?", host.Address.ClusterName, host.Name) + return nil + } + + log.V(1).Infof("Worker delete host %s/%s - StatefulSet found - start delete process", host.Address.ClusterName, host.Name) + // Each host consists of // 1. Tables on host - we need to delete tables on the host in order to clean Zookeeper data // 2. StatefulSet @@ -352,7 +362,6 @@ func (w *worker) deleteHost(host *chop.ChiHost) error { // 4. ConfigMap // 5. Service // Need to delete all these item - log.V(1).Infof("Worker delete host %s/%s", host.Address.ClusterName, host.Name) if host.CanDeleteAllPVCs() { _ = w.schemer.HostDeleteTables(host) @@ -392,7 +401,7 @@ func (w *worker) deleteCluster(cluster *chop.ChiCluster) error { } func (w *worker) createCHIFromObjectMeta(objectMeta *meta.ObjectMeta) (*chop.ClickHouseInstallation, error) { - chi, err := w.c.GetChiByObjectMeta(objectMeta) + chi, err := w.c.GetCHIByObjectMeta(objectMeta) if err != nil { return nil, err } diff --git a/pkg/model/clickhouse/connection.go b/pkg/model/clickhouse/connection.go index 3cff02582..6ece5826c 100644 --- a/pkg/model/clickhouse/connection.go +++ b/pkg/model/clickhouse/connection.go @@ -96,7 +96,7 @@ func (c *CHConnection) QueryContext(ctx context.Context, sql string) (*sqlmodule return nil, err } - // log.V(1).Infof("clickhouse.Query(%s):'%s'", c.Hostname, sql) + log.V(2).Infof("clickhouse.QueryContext():'%s'", sql) return rows, nil } @@ -127,7 +127,7 @@ func (c *CHConnection) ExecContext(ctx context.Context, sql string) error { return err } - // log.V(1).Infof("clickhouse.Exec(%s):'%s'", c.Hostname, sql) + log.V(2).Infof("clickhouse.ExecContext():'%s'", sql) return nil } diff --git a/pkg/model/creator.go b/pkg/model/creator.go index b05ca30a1..f04dfc6e7 100644 --- a/pkg/model/creator.go +++ b/pkg/model/creator.go @@ -589,14 +589,17 @@ func getClickHouseContainer(statefulSet *apps.StatefulSet) (*corev1.Container, b func ensureNamedPortsSpecified(statefulSet *apps.StatefulSet, host *chiv1.ChiHost) { // Ensure ClickHouse container has all named ports specified - if chContainer, ok := getClickHouseContainer(statefulSet); ok { - ensurePortByName(chContainer, chDefaultTCPPortName, host.TCPPort) - ensurePortByName(chContainer, chDefaultHTTPPortName, host.HTTPPort) - ensurePortByName(chContainer, chDefaultInterserverHTTPPortName, host.InterserverHTTPPort) + chContainer, ok := getClickHouseContainer(statefulSet) + if !ok { + return } + ensurePortByName(chContainer, chDefaultTCPPortName, host.TCPPort) + ensurePortByName(chContainer, chDefaultHTTPPortName, host.HTTPPort) + ensurePortByName(chContainer, chDefaultInterserverHTTPPortName, host.InterserverHTTPPort) } func ensurePortByName(container *corev1.Container, name string, port int32) { + // Find port with specified name for i := range container.Ports { containerPort := &container.Ports[i] if containerPort.Name == name { @@ -606,7 +609,7 @@ func ensurePortByName(container *corev1.Container, name string, port int32) { } } - // port with specified name not found. Need to append + // Port with specified name not found. Need to append container.Ports = append(container.Ports, corev1.ContainerPort{ Name: name, ContainerPort: port, diff --git a/pkg/model/serializer.go b/pkg/model/fingerprint.go similarity index 100% rename from pkg/model/serializer.go rename to pkg/model/fingerprint.go diff --git a/pkg/model/labeler.go b/pkg/model/labeler.go index 2a686ef60..bf31d6090 100644 --- a/pkg/model/labeler.go +++ b/pkg/model/labeler.go @@ -29,7 +29,7 @@ const ( // Kubernetes labels LabelAppName = clickhousealtinitycom.GroupName + "/app" LabelAppValue = "chop" - LabelChop = clickhousealtinitycom.GroupName + "/chop" + LabelCHOP = clickhousealtinitycom.GroupName + "/chop" LabelNamespace = clickhousealtinitycom.GroupName + "/namespace" LabelCHIName = clickhousealtinitycom.GroupName + "/chi" LabelClusterName = clickhousealtinitycom.GroupName + "/cluster" @@ -124,7 +124,7 @@ func (l *Labeler) getLabelsCHIScope() map[string]string { return l.appendCHILabels(map[string]string{ LabelNamespace: l.namer.getNamePartNamespace(l.chi), LabelAppName: LabelAppValue, - LabelChop: l.chop.Version, + LabelCHOP: l.chop.Version, LabelCHIName: l.namer.getNamePartCHIName(l.chi), }) } @@ -145,7 +145,7 @@ func (l *Labeler) getLabelsClusterScope(cluster *chi.ChiCluster) map[string]stri return l.appendCHILabels(map[string]string{ LabelNamespace: l.namer.getNamePartNamespace(cluster), LabelAppName: LabelAppValue, - LabelChop: l.chop.Version, + LabelCHOP: l.chop.Version, LabelCHIName: l.namer.getNamePartCHIName(cluster), LabelClusterName: l.namer.getNamePartClusterName(cluster), }) @@ -168,7 +168,7 @@ func (l *Labeler) getLabelsShardScope(shard *chi.ChiShard) map[string]string { return l.appendCHILabels(map[string]string{ LabelNamespace: l.namer.getNamePartNamespace(shard), LabelAppName: LabelAppValue, - LabelChop: l.chop.Version, + LabelCHOP: l.chop.Version, LabelCHIName: l.namer.getNamePartCHIName(shard), LabelClusterName: l.namer.getNamePartClusterName(shard), LabelShardName: l.namer.getNamePartShardName(shard), @@ -193,7 +193,7 @@ func (l *Labeler) getLabelsHostScope(host *chi.ChiHost, applySupplementaryServic labels := map[string]string{ LabelNamespace: l.namer.getNamePartNamespace(host), LabelAppName: LabelAppValue, - LabelChop: l.chop.Version, + LabelCHOP: l.chop.Version, LabelCHIName: l.namer.getNamePartCHIName(host), LabelClusterName: l.namer.getNamePartClusterName(host), LabelShardName: l.namer.getNamePartShardName(host), @@ -210,8 +210,10 @@ func (l *Labeler) getLabelsHostScope(host *chi.ChiHost, applySupplementaryServic LabelClusterScopeCycleOffset: l.namer.getNamePartClusterScopeCycleOffset(host), } if applySupplementaryServiceLabels { + // TODO + // When we'll have Cluster Discovery functionality we can refactor this properly labels[LabelZookeeperConfigVersion] = host.Config.ZookeeperFingerprint - labels[LabelSettingsConfigVersion] = host.Config.SettingsFingerprint + labels[LabelSettingsConfigVersion] = fingerprint(host.Config.SettingsFingerprint + host.Config.FilesFingerprint) } return l.appendCHILabels(labels) } @@ -395,22 +397,22 @@ func GetSelectorFromObjectMeta(objMeta *meta.ObjectMeta) (kublabels.Selector, er } } -// IsChopGeneratedObject check whether object is generated by an operator. Check is label-based -func IsChopGeneratedObject(objectMeta *meta.ObjectMeta) bool { +// IsCHOPGeneratedObject check whether object is generated by an operator. Check is label-based +func IsCHOPGeneratedObject(objectMeta *meta.ObjectMeta) bool { // ObjectMeta must have some labels if len(objectMeta.Labels) == 0 { return false } - // ObjectMeta must have LabelChop - _, ok := objectMeta.Labels[LabelChop] + // ObjectMeta must have LabelCHOP + _, ok := objectMeta.Labels[LabelCHOP] return ok } -// GetChiNameFromObjectMeta extracts CHI name from ObjectMeta by labels -func GetChiNameFromObjectMeta(meta *meta.ObjectMeta) (string, error) { +// GetCHINameFromObjectMeta extracts CHI name from ObjectMeta by labels +func GetCHINameFromObjectMeta(meta *meta.ObjectMeta) (string, error) { // ObjectMeta must have LabelCHIName: chi.Name label name, ok := meta.Labels[LabelCHIName] if ok { diff --git a/pkg/model/normalizer.go b/pkg/model/normalizer.go index a11998890..2f513c76e 100644 --- a/pkg/model/normalizer.go +++ b/pkg/model/normalizer.go @@ -1078,6 +1078,7 @@ func (n *Normalizer) ensureCluster() { func (n *Normalizer) calcFingerprints(host *chiv1.ChiHost) error { host.Config.ZookeeperFingerprint = fingerprint(*host.GetZookeeper()) host.Config.SettingsFingerprint = fingerprint(castToSliceOfStrings(n.chi.Spec.Configuration.Settings)) + host.Config.FilesFingerprint = fingerprint(castToSliceOfStrings(n.chi.Spec.Configuration.Files)) return nil } diff --git a/pkg/model/schemer.go b/pkg/model/schemer.go index f60c4222d..fd81a5803 100644 --- a/pkg/model/schemer.go +++ b/pkg/model/schemer.go @@ -54,20 +54,26 @@ func (s *Schemer) getCHConnection(hostname string) *clickhouse.CHConnection { return clickhouse.GetPooledDBConnection(clickhouse.NewCHConnectionParams(hostname, s.Username, s.Password, s.Port)) } -func (s *Schemer) getObjectListFromClickHouse(serviceUrl string, sql string) ([]string, []string, error) { +func (s *Schemer) getObjectListFromClickHouse(services []string, sql string) ([]string, []string, error) { // Results names := make([]string, 0) sqlStatements := make([]string, 0) - log.V(1).Info(serviceUrl) - conn := s.getCHConnection(serviceUrl) var rows *sqlmodule.Rows = nil var err error - rows, err = conn.Query(sql) + for _, service := range services { + log.V(1).Infof("Trying %s", service) + conn := s.getCHConnection(service) + + rows, err = conn.Query(sql) + if err == nil { + defer rows.Close() + break + } + } if err != nil { return nil, nil, err } - defer rows.Close() // Some data fetched for rows.Next() { @@ -85,56 +91,52 @@ func (s *Schemer) getObjectListFromClickHouse(serviceUrl string, sql string) ([] // clusterGetCreateDistributedObjects returns a list of objects that needs to be created on a shard in a cluster // That includes all Distributed tables, corresponding local tables, and databases, if necessary func (s *Schemer) clusterGetCreateDistributedObjects(cluster *chop.ChiCluster) ([]string, []string, error) { - // system_tables := fmt.Sprintf("cluster('%s', system, tables)", cluster.Name) + // cluster_tables := fmt.Sprintf("cluster('%s', system, tables)", cluster.Name) hosts := CreatePodFQDNsOfCluster(cluster) - system_tables := fmt.Sprintf("remote('%s', system, tables)", strings.Join(hosts, ",")) + cluster_tables := fmt.Sprintf("remote('%s', system, tables)", strings.Join(hosts, ",")) sql := heredoc.Doc(strings.ReplaceAll(` SELECT DISTINCT database AS name, - concat('CREATE DATABASE IF NOT EXISTS ', name) AS create_db_query + concat('CREATE DATABASE IF NOT EXISTS ', name) AS create_query FROM ( - SELECT DISTINCT database - FROM system.tables - WHERE engine = 'Distributed' - SETTINGS skip_unavailable_shards = 1 - UNION ALL - SELECT DISTINCT extract(engine_full, 'Distributed\\([^,]+, *\'?([^,\']+)\'?, *[^,]+') AS shard_database - FROM system.tables + SELECT DISTINCT arrayJoin([database, extract(engine_full, 'Distributed\\([^,]+, *\'?([^,\']+)\'?, *[^,]+')]) database + FROM cluster('all-sharded', system.tables) tables WHERE engine = 'Distributed' SETTINGS skip_unavailable_shards = 1 - ) + ) databases UNION ALL SELECT DISTINCT - name, + concat(database,'.', name) as name, replaceRegexpOne(create_table_query, 'CREATE (TABLE|VIEW|MATERIALIZED VIEW)', 'CREATE \\1 IF NOT EXISTS') FROM ( SELECT - database, - name, + database, name, + create_table_query, 2 AS order - FROM system.tables + FROM cluster('all-sharded', system.tables) tables WHERE engine = 'Distributed' SETTINGS skip_unavailable_shards = 1 UNION ALL SELECT - extract(engine_full, 'Distributed\\([^,]+, *\'?([^,\']+)\'?, *[^,]+') AS shard_database, - extract(engine_full, 'Distributed\\([^,]+, [^,]+, *\'?([^,\\\')]+)') AS shard_table, + extract(engine_full, 'Distributed\\([^,]+, *\'?([^,\']+)\'?, *[^,]+') AS database, + extract(engine_full, 'Distributed\\([^,]+, [^,]+, *\'?([^,\\\')]+)') AS name, + t.create_table_query, 1 AS order - FROM system.tables - WHERE engine = 'Distributed' + FROM cluster('all-sharded', system.tables) tables + LEFT JOIN system.tables t USING (database, name) + WHERE engine = 'Distributed' AND t.create_table_query != '' SETTINGS skip_unavailable_shards = 1 - ) - LEFT JOIN (select distinct database, name, create_table_query from system.tables SETTINGS skip_unavailable_shards = 1) USING (database, name) + ) tables ORDER BY order `, - "system.tables", - system_tables, + "cluster('all-sharded', system.tables)", + cluster_tables, )) - names, sqlStatements, _ := s.getObjectListFromClickHouse(CreateChiServiceFQDN(cluster.GetCHI()), sql) + names, sqlStatements, _ := s.getObjectListFromClickHouse(CreatePodFQDNsOfChi(cluster.GetCHI()), sql) return names, sqlStatements, nil } @@ -176,7 +178,7 @@ func (s *Schemer) getCreateReplicatedObjects(host *chop.ChiHost) ([]string, []st system_tables, )) - names, sqlStatements, _ := s.getObjectListFromClickHouse(CreateChiServiceFQDN(host.GetCHI()), sql) + names, sqlStatements, _ := s.getObjectListFromClickHouse(CreatePodFQDNsOfChi(host.GetCHI()), sql) return names, sqlStatements, nil } @@ -195,7 +197,7 @@ func (s *Schemer) clusterGetCreateDatabases(cluster *chop.ChiCluster) ([]string, ignoredDBs, ) - names, sqlStatements, _ := s.getObjectListFromClickHouse(CreateChiServiceFQDN(cluster.GetCHI()), sql) + names, sqlStatements, _ := s.getObjectListFromClickHouse(CreatePodFQDNsOfChi(cluster.GetCHI()), sql) return names, sqlStatements, nil } @@ -215,7 +217,7 @@ func (s *Schemer) clusterGetCreateTables(cluster *chop.ChiCluster) ([]string, [] ignoredDBs, ) - names, sqlStatements, _ := s.getObjectListFromClickHouse(CreateChiServiceFQDN(cluster.GetCHI()), sql) + names, sqlStatements, _ := s.getObjectListFromClickHouse(CreatePodFQDNsOfChi(cluster.GetCHI()), sql) return names, sqlStatements, nil } @@ -234,7 +236,7 @@ func (s *Schemer) hostGetDropTables(host *chop.ChiHost) ([]string, []string, err ignoredDBs, ) - names, sqlStatements, _ := s.getObjectListFromClickHouse(CreatePodFQDN(host), sql) + names, sqlStatements, _ := s.getObjectListFromClickHouse([]string{CreatePodFQDN(host)}, sql) return names, sqlStatements, nil } @@ -249,11 +251,11 @@ func (s *Schemer) HostDeleteTables(host *chop.ChiHost) error { func (s *Schemer) HostCreateTables(host *chop.ChiHost) error { names, createSQLs, _ := s.getCreateReplicatedObjects(host) - log.V(1).Infof("Creating replicated objects: %v", names) + log.V(1).Infof("Creating replicated objects %v at %s", names, host.Address.HostName) _ = s.hostApplySQLs(host, createSQLs, true) names, createSQLs, _ = s.clusterGetCreateDistributedObjects(host.GetCluster()) - log.V(1).Infof("Creating distributed objects: %v", names) + log.V(1).Infof("Creating distributed objects %v at %s", names, host.Address.HostName) return s.hostApplySQLs(host, createSQLs, true) } @@ -261,7 +263,7 @@ func (s *Schemer) HostCreateTables(host *chop.ChiHost) error { func (s *Schemer) ShardCreateTables(shard *chop.ChiShard) error { names, createSQLs, _ := s.clusterGetCreateDistributedObjects(shard.GetCluster()) - log.V(1).Infof("Creating distributed objects: %v", names) + log.V(1).Infof("Creating distributed objects %v at %s", names, shard.Name) return s.shardApplySQLs(shard, createSQLs, true) } diff --git a/pkg/util/fs.go b/pkg/util/fs.go index c5a274c48..89a4e7026 100644 --- a/pkg/util/fs.go +++ b/pkg/util/fs.go @@ -55,7 +55,7 @@ func ReadFilesIntoMap(path string, isOurFile func(string) bool) map[string]strin file := matches[i] if isOurFile(file) { // Pick our files only - log.Infof("Reading file %s\n", file) + log.V(2).Infof("Reading file %s\n", file) if content, err := ioutil.ReadFile(file); (err == nil) && (len(content) > 0) { // File content read successfully and file has some content if files == nil { diff --git a/pkg/util/retry.go b/pkg/util/retry.go index 143b3114c..438605977 100644 --- a/pkg/util/retry.go +++ b/pkg/util/retry.go @@ -30,7 +30,7 @@ func Retry(tries int, desc string, f func() error) error { // All ok, no need to retry more if try > 1 { // Done, but after some retries, this is not 'clean' - log.V(1).Infof("DONE attempt %d of %d: %s", try, tries, desc) + log.V(2).Infof("DONE attempt %d of %d: %s", try, tries, desc) } return nil } diff --git a/release b/release index 85b7c695b..c81aa44af 100644 --- a/release +++ b/release @@ -1 +1 @@ -0.9.6 +0.9.7 diff --git a/tests/templates/tpl-clickhouse-20.1.yaml b/tests/templates/tpl-clickhouse-20.1.yaml index a9a03bbdb..86bc4b143 100644 --- a/tests/templates/tpl-clickhouse-20.1.yaml +++ b/tests/templates/tpl-clickhouse-20.1.yaml @@ -13,4 +13,4 @@ spec: spec: containers: - name: clickhouse - image: yandex/clickhouse-server:20.1.5.26 + image: yandex/clickhouse-server:20.1 diff --git a/tests/templates/tpl-clickhouse-20.3.yaml b/tests/templates/tpl-clickhouse-20.3.yaml new file mode 100644 index 000000000..d151a9619 --- /dev/null +++ b/tests/templates/tpl-clickhouse-20.3.yaml @@ -0,0 +1,16 @@ +apiVersion: "clickhouse.altinity.com/v1" +kind: "ClickHouseInstallationTemplate" + +metadata: + name: clickhouse-version +spec: + defaults: + templates: + podTemplate: default + templates: + podTemplates: + - name: default + spec: + containers: + - name: clickhouse + image: yandex/clickhouse-server:20.3 diff --git a/tests/test_operator.py b/tests/test_operator.py index afc1fff28..2086d5d0f 100644 --- a/tests/test_operator.py +++ b/tests/test_operator.py @@ -277,14 +277,18 @@ def test_013(): "CREATE TABLE test_local Engine = Log as select * from system.one") clickhouse_query("test-013-add-shards", "CREATE TABLE test_distr as test_local Engine = Distributed('default', default, test_local)") + clickhouse_query("test-013-add-shards", + "CREATE TABLE events_distr as system.events ENGINE = Distributed('all-sharded', system, events)") with Then("Add one more shard"): create_and_check("configs/test-013-add-shards-2.yaml", {"object_counts": [2, 2, 3], "do_not_delete": 1}) with And("Table should be created on a second shard"): - out = clickhouse_query("test-013-add-shards", "select count() from default.test_distr", + clickhouse_query("test-013-add-shards", "select count() from default.test_distr", host="chi-test-013-add-shards-default-1-0") - assert out == "1" - + + clickhouse_query("test-013-add-shards", "select count() from default.events_distr", + host="chi-test-013-add-shards-default-1-0") + with Then("Remove shard"): create_and_check("configs/test-013-add-shards-1.yaml", {"object_counts": [1,1,2]}) @@ -324,6 +328,13 @@ def test_014(): host="chi-test-014-replication-default-0-2") assert out == "1" + with When("Remove replica"): + create_and_check("configs/test-014-replication.yaml", {"pod_count": 1, "do_not_delete": 1}) + + with Then("Replica needs to be removed from the Zookeeper as well"): + out = clickhouse_query("test-014-replication", "select count() from system.replicas where table='t'") + assert out == "1" + kube_delete_chi("test-014-replication") @TestScenario