Skip to content

Commit a08d167

Browse files
authored
align sync and update logs (#2738)
1 parent cc9074c commit a08d167

11 files changed

+58
-55
lines changed

pkg/cluster/cluster.go

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1014,7 +1014,7 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
10141014

10151015
initUsers := !sameUsers || !sameRotatedUsers || needPoolerUser || needStreamUser
10161016
if initUsers {
1017-
c.logger.Debugf("initialize users")
1017+
c.logger.Debug("initialize users")
10181018
if err := c.initUsers(); err != nil {
10191019
c.logger.Errorf("could not init users - skipping sync of secrets and databases: %v", err)
10201020
userInitFailed = true
@@ -1023,7 +1023,7 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
10231023
}
10241024
}
10251025
if initUsers || annotationsChanged {
1026-
c.logger.Debugf("syncing secrets")
1026+
c.logger.Debug("syncing secrets")
10271027
//TODO: mind the secrets of the deleted/new users
10281028
if err := c.syncSecrets(); err != nil {
10291029
c.logger.Errorf("could not sync secrets: %v", err)
@@ -1065,7 +1065,7 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
10651065

10661066
// create if it did not exist
10671067
if !oldSpec.Spec.EnableLogicalBackup && newSpec.Spec.EnableLogicalBackup {
1068-
c.logger.Debugf("creating backup cron job")
1068+
c.logger.Debug("creating backup cron job")
10691069
if err := c.createLogicalBackupJob(); err != nil {
10701070
c.logger.Errorf("could not create a k8s cron job for logical backups: %v", err)
10711071
updateFailed = true
@@ -1075,7 +1075,7 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
10751075

10761076
// delete if no longer needed
10771077
if oldSpec.Spec.EnableLogicalBackup && !newSpec.Spec.EnableLogicalBackup {
1078-
c.logger.Debugf("deleting backup cron job")
1078+
c.logger.Debug("deleting backup cron job")
10791079
if err := c.deleteLogicalBackupJob(); err != nil {
10801080
c.logger.Errorf("could not delete a k8s cron job for logical backups: %v", err)
10811081
updateFailed = true
@@ -1095,7 +1095,7 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
10951095

10961096
// Roles and Databases
10971097
if !userInitFailed && !(c.databaseAccessDisabled() || c.getNumberOfInstances(&c.Spec) <= 0 || c.Spec.StandbyCluster != nil) {
1098-
c.logger.Debugf("syncing roles")
1098+
c.logger.Debug("syncing roles")
10991099
if err := c.syncRoles(); err != nil {
11001100
c.logger.Errorf("could not sync roles: %v", err)
11011101
updateFailed = true

pkg/cluster/connection_pooler.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -591,7 +591,7 @@ func (c *Cluster) deleteConnectionPooler(role PostgresRole) (err error) {
591591
// Lack of connection pooler objects is not a fatal error, just log it if
592592
// it was present before in the manifest
593593
if c.ConnectionPooler[role] == nil || role == "" {
594-
c.logger.Debugf("no connection pooler to delete")
594+
c.logger.Debug("no connection pooler to delete")
595595
return nil
596596
}
597597

@@ -622,7 +622,7 @@ func (c *Cluster) deleteConnectionPooler(role PostgresRole) (err error) {
622622
// Repeat the same for the service object
623623
service := c.ConnectionPooler[role].Service
624624
if service == nil {
625-
c.logger.Debugf("no connection pooler service object to delete")
625+
c.logger.Debug("no connection pooler service object to delete")
626626
} else {
627627

628628
err = c.KubeClient.

pkg/cluster/database.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -111,7 +111,7 @@ func (c *Cluster) pgConnectionString(dbname string) string {
111111

112112
func (c *Cluster) databaseAccessDisabled() bool {
113113
if !c.OpConfig.EnableDBAccess {
114-
c.logger.Debugf("database access is disabled")
114+
c.logger.Debug("database access is disabled")
115115
}
116116

117117
return !c.OpConfig.EnableDBAccess

pkg/cluster/majorversionupgrade.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -116,7 +116,7 @@ func (c *Cluster) majorVersionUpgrade() error {
116116
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeNormal, "Major Version Upgrade", "starting major version upgrade on pod %s of %d pods", masterPod.Name, numberOfPods)
117117
upgradeCommand := fmt.Sprintf("set -o pipefail && /usr/bin/python3 /scripts/inplace_upgrade.py %d 2>&1 | tee last_upgrade.log", numberOfPods)
118118

119-
c.logger.Debugf("checking if the spilo image runs with root or non-root (check for user id=0)")
119+
c.logger.Debug("checking if the spilo image runs with root or non-root (check for user id=0)")
120120
resultIdCheck, errIdCheck := c.ExecCommand(podName, "/bin/bash", "-c", "/usr/bin/id -u")
121121
if errIdCheck != nil {
122122
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "Major Version Upgrade", "checking user id to run upgrade from %d to %d FAILED: %v", c.currentMajorVersion, desiredVersion, errIdCheck)

pkg/cluster/pod.go

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@ func (c *Cluster) markRollingUpdateFlagForPod(pod *v1.Pod, msg string) error {
5959
return nil
6060
}
6161

62-
c.logger.Debugf("mark rolling update annotation for %s: reason %s", pod.Name, msg)
62+
c.logger.Infof("mark rolling update annotation for %s: reason %s", pod.Name, msg)
6363
flag := make(map[string]string)
6464
flag[rollingUpdatePodAnnotationKey] = strconv.FormatBool(true)
6565

@@ -110,7 +110,7 @@ func (c *Cluster) getRollingUpdateFlagFromPod(pod *v1.Pod) (flag bool) {
110110
}
111111

112112
func (c *Cluster) deletePods() error {
113-
c.logger.Debugln("deleting pods")
113+
c.logger.Debug("deleting pods")
114114
pods, err := c.listPods()
115115
if err != nil {
116116
return err
@@ -127,9 +127,9 @@ func (c *Cluster) deletePods() error {
127127
}
128128
}
129129
if len(pods) > 0 {
130-
c.logger.Debugln("pods have been deleted")
130+
c.logger.Debug("pods have been deleted")
131131
} else {
132-
c.logger.Debugln("no pods to delete")
132+
c.logger.Debug("no pods to delete")
133133
}
134134

135135
return nil
@@ -230,7 +230,7 @@ func (c *Cluster) MigrateMasterPod(podName spec.NamespacedName) error {
230230
return fmt.Errorf("could not get node %q: %v", oldMaster.Spec.NodeName, err)
231231
}
232232
if !eol {
233-
c.logger.Debugf("no action needed: master pod is already on a live node")
233+
c.logger.Debug("no action needed: master pod is already on a live node")
234234
return nil
235235
}
236236

pkg/cluster/resources.go

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -187,7 +187,7 @@ func (c *Cluster) updateStatefulSet(newStatefulSet *appsv1.StatefulSet) error {
187187
c.logger.Warningf("could not scale down: %v", err)
188188
}
189189
}
190-
c.logger.Debugf("updating statefulset")
190+
c.logger.Debug("updating statefulset")
191191

192192
patchData, err := specPatch(newStatefulSet.Spec)
193193
if err != nil {
@@ -218,7 +218,7 @@ func (c *Cluster) replaceStatefulSet(newStatefulSet *appsv1.StatefulSet) error {
218218
}
219219

220220
statefulSetName := util.NameFromMeta(c.Statefulset.ObjectMeta)
221-
c.logger.Debugf("replacing statefulset")
221+
c.logger.Debug("replacing statefulset")
222222

223223
// Delete the current statefulset without deleting the pods
224224
deletePropagationPolicy := metav1.DeletePropagationOrphan
@@ -232,7 +232,7 @@ func (c *Cluster) replaceStatefulSet(newStatefulSet *appsv1.StatefulSet) error {
232232
// make sure we clear the stored statefulset status if the subsequent create fails.
233233
c.Statefulset = nil
234234
// wait until the statefulset is truly deleted
235-
c.logger.Debugf("waiting for the statefulset to be deleted")
235+
c.logger.Debug("waiting for the statefulset to be deleted")
236236

237237
err = retryutil.Retry(c.OpConfig.ResourceCheckInterval, c.OpConfig.ResourceCheckTimeout,
238238
func() (bool, error) {
@@ -266,7 +266,7 @@ func (c *Cluster) replaceStatefulSet(newStatefulSet *appsv1.StatefulSet) error {
266266

267267
func (c *Cluster) deleteStatefulSet() error {
268268
c.setProcessName("deleting statefulset")
269-
c.logger.Debugln("deleting statefulset")
269+
c.logger.Debug("deleting statefulset")
270270
if c.Statefulset == nil {
271271
c.logger.Debug("there is no statefulset in the cluster")
272272
return nil
@@ -349,7 +349,8 @@ func (c *Cluster) updateService(role PostgresRole, oldService *v1.Service, newSe
349349
}
350350

351351
func (c *Cluster) deleteService(role PostgresRole) error {
352-
c.logger.Debugf("deleting service %s", role)
352+
c.setProcessName("deleting service")
353+
c.logger.Debugf("deleting %s service", role)
353354

354355
if c.Services[role] == nil {
355356
c.logger.Debugf("No service for %s role was found, nothing to delete", role)
@@ -495,7 +496,7 @@ func (c *Cluster) deletePodDisruptionBudget() error {
495496

496497
func (c *Cluster) deleteEndpoint(role PostgresRole) error {
497498
c.setProcessName("deleting endpoint")
498-
c.logger.Debugln("deleting endpoint")
499+
c.logger.Debugf("deleting %s endpoint", role)
499500
if c.Endpoints[role] == nil {
500501
c.logger.Debugf("there is no %s endpoint in the cluster", role)
501502
return nil
@@ -543,7 +544,7 @@ func (c *Cluster) deletePatroniResources() error {
543544

544545
func (c *Cluster) deletePatroniConfigMap(suffix string) error {
545546
c.setProcessName("deleting Patroni config map")
546-
c.logger.Debugln("deleting Patroni config map")
547+
c.logger.Debugf("deleting %s Patroni config map", suffix)
547548
cm := c.PatroniConfigMaps[suffix]
548549
if cm == nil {
549550
c.logger.Debugf("there is no %s Patroni config map in the cluster", suffix)
@@ -565,7 +566,7 @@ func (c *Cluster) deletePatroniConfigMap(suffix string) error {
565566

566567
func (c *Cluster) deletePatroniEndpoint(suffix string) error {
567568
c.setProcessName("deleting Patroni endpoint")
568-
c.logger.Debugln("deleting Patroni endpoint")
569+
c.logger.Debugf("deleting %s Patroni endpoint", suffix)
569570
ep := c.PatroniEndpoints[suffix]
570571
if ep == nil {
571572
c.logger.Debugf("there is no %s Patroni endpoint in the cluster", suffix)

pkg/cluster/streams.go

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -46,11 +46,13 @@ func (c *Cluster) updateStreams(newEventStreams *zalandov1.FabricEventStream) (p
4646

4747
func (c *Cluster) deleteStream(appId string) error {
4848
c.setProcessName("deleting event stream")
49+
c.logger.Debugf("deleting event stream with applicationId %s", appId)
4950

5051
err := c.KubeClient.FabricEventStreams(c.Streams[appId].Namespace).Delete(context.TODO(), c.Streams[appId].Name, metav1.DeleteOptions{})
5152
if err != nil {
5253
return fmt.Errorf("could not delete event stream %q with applicationId %s: %v", c.Streams[appId].Name, appId, err)
5354
}
55+
c.logger.Infof("event stream %q with applicationId %s has been successfully deleted", c.Streams[appId].Name, appId)
5456
delete(c.Streams, appId)
5557

5658
return nil
@@ -308,7 +310,7 @@ func (c *Cluster) syncStreams() error {
308310

309311
_, err := c.KubeClient.CustomResourceDefinitions().Get(context.TODO(), constants.EventStreamCRDName, metav1.GetOptions{})
310312
if k8sutil.ResourceNotFound(err) {
311-
c.logger.Debugf("event stream CRD not installed, skipping")
313+
c.logger.Debug("event stream CRD not installed, skipping")
312314
return nil
313315
}
314316

@@ -473,7 +475,7 @@ func (c *Cluster) syncStream(appId string) error {
473475
c.Streams[appId] = stream
474476
}
475477
if match, reason := c.compareStreams(&stream, desiredStreams); !match {
476-
c.logger.Debugf("updating event streams with applicationId %s: %s", appId, reason)
478+
c.logger.Infof("updating event streams with applicationId %s: %s", appId, reason)
477479
desiredStreams.ObjectMeta = stream.ObjectMeta
478480
updatedStream, err := c.updateStreams(desiredStreams)
479481
if err != nil {
@@ -550,7 +552,6 @@ func (c *Cluster) cleanupRemovedStreams(appIds []string) error {
550552
if err != nil {
551553
errors = append(errors, fmt.Sprintf("failed deleting event streams with applicationId %s: %v", appId, err))
552554
}
553-
c.logger.Infof("event streams with applicationId %s have been successfully deleted", appId)
554555
}
555556
}
556557

pkg/cluster/sync.go

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -300,6 +300,7 @@ func (c *Cluster) syncPatroniService() error {
300300
err error
301301
)
302302
serviceName := fmt.Sprintf("%s-%s", c.Name, Patroni)
303+
c.logger.Debugf("syncing %s service", serviceName)
303304
c.setProcessName("syncing %s service", serviceName)
304305

305306
if svc, err = c.KubeClient.Services(c.Namespace).Get(context.TODO(), serviceName, metav1.GetOptions{}); err == nil {
@@ -311,7 +312,7 @@ func (c *Cluster) syncPatroniService() error {
311312
c.setProcessName("updating %v service", serviceName)
312313
svc, err = c.KubeClient.Services(c.Namespace).Update(context.TODO(), svc, metav1.UpdateOptions{})
313314
if err != nil {
314-
return fmt.Errorf("could not update %s endpoint: %v", serviceName, err)
315+
return fmt.Errorf("could not update %s service: %v", serviceName, err)
315316
}
316317
c.Services[Patroni] = svc
317318
}
@@ -537,7 +538,7 @@ func (c *Cluster) syncStatefulSet() error {
537538
if err != nil {
538539
return fmt.Errorf("could not generate statefulset: %v", err)
539540
}
540-
c.logger.Debugf("syncing statefulsets")
541+
c.logger.Debug("syncing statefulsets")
541542
// check if there are still pods with a rolling update flag
542543
for _, pod := range pods {
543544
if c.getRollingUpdateFlagFromPod(&pod) {
@@ -552,7 +553,7 @@ func (c *Cluster) syncStatefulSet() error {
552553
}
553554

554555
if len(podsToRecreate) > 0 {
555-
c.logger.Debugf("%d / %d pod(s) still need to be rotated", len(podsToRecreate), len(pods))
556+
c.logger.Infof("%d / %d pod(s) still need to be rotated", len(podsToRecreate), len(pods))
556557
}
557558

558559
// statefulset is already there, make sure we use its definition in order to compare with the spec.
@@ -658,7 +659,7 @@ func (c *Cluster) syncStatefulSet() error {
658659
// statefulset or those that got their configuration from the outdated statefulset)
659660
if len(podsToRecreate) > 0 {
660661
if isSafeToRecreatePods {
661-
c.logger.Debugln("performing rolling update")
662+
c.logger.Info("performing rolling update")
662663
c.eventRecorder.Event(c.GetReference(), v1.EventTypeNormal, "Update", "Performing rolling update")
663664
if err := c.recreatePods(podsToRecreate, switchoverCandidates); err != nil {
664665
return fmt.Errorf("could not recreate pods: %v", err)
@@ -971,7 +972,7 @@ func (c *Cluster) syncStandbyClusterConfiguration() error {
971972
// carries the request to change configuration through
972973
for _, pod := range pods {
973974
podName := util.NameFromMeta(pod.ObjectMeta)
974-
c.logger.Debugf("patching Postgres config via Patroni API on pod %s with following options: %s",
975+
c.logger.Infof("patching Postgres config via Patroni API on pod %s with following options: %s",
975976
podName, standbyOptionsToSet)
976977
if err = c.patroni.SetStandbyClusterParameters(&pod, standbyOptionsToSet); err == nil {
977978
return nil
@@ -983,7 +984,7 @@ func (c *Cluster) syncStandbyClusterConfiguration() error {
983984
}
984985

985986
func (c *Cluster) syncSecrets() error {
986-
c.logger.Info("syncing secrets")
987+
c.logger.Debug("syncing secrets")
987988
c.setProcessName("syncing secrets")
988989
generatedSecrets := c.generateUserSecrets()
989990
retentionUsers := make([]string, 0)
@@ -993,7 +994,7 @@ func (c *Cluster) syncSecrets() error {
993994
secret, err := c.KubeClient.Secrets(generatedSecret.Namespace).Create(context.TODO(), generatedSecret, metav1.CreateOptions{})
994995
if err == nil {
995996
c.Secrets[secret.UID] = secret
996-
c.logger.Debugf("created new secret %s, namespace: %s, uid: %s", util.NameFromMeta(secret.ObjectMeta), generatedSecret.Namespace, secret.UID)
997+
c.logger.Infof("created new secret %s, namespace: %s, uid: %s", util.NameFromMeta(secret.ObjectMeta), generatedSecret.Namespace, secret.UID)
997998
continue
998999
}
9991000
if k8sutil.ResourceAlreadyExists(err) {
@@ -1134,7 +1135,7 @@ func (c *Cluster) updateSecret(
11341135
}
11351136

11361137
if updateSecret {
1137-
c.logger.Debugln(updateSecretMsg)
1138+
c.logger.Infof(updateSecretMsg)
11381139
if secret, err = c.KubeClient.Secrets(secret.Namespace).Update(context.TODO(), secret, metav1.UpdateOptions{}); err != nil {
11391140
return fmt.Errorf("could not update secret %s: %v", secretName, err)
11401141
}

pkg/cluster/util.go

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -193,7 +193,7 @@ func logNiceDiff(log *logrus.Entry, old, new interface{}) {
193193
nice := nicediff.Diff(string(o), string(n), true)
194194
for _, s := range strings.Split(nice, "\n") {
195195
// " is not needed in the value to understand
196-
log.Debugf(strings.ReplaceAll(s, "\"", ""))
196+
log.Debug(strings.ReplaceAll(s, "\"", ""))
197197
}
198198
}
199199

@@ -209,7 +209,7 @@ func (c *Cluster) logStatefulSetChanges(old, new *appsv1.StatefulSet, isUpdate b
209209
logNiceDiff(c.logger, old.Spec, new.Spec)
210210

211211
if !reflect.DeepEqual(old.Annotations, new.Annotations) {
212-
c.logger.Debugf("metadata.annotation are different")
212+
c.logger.Debug("metadata.annotation are different")
213213
logNiceDiff(c.logger, old.Annotations, new.Annotations)
214214
}
215215

@@ -280,7 +280,7 @@ func (c *Cluster) getTeamMembers(teamID string) ([]string, error) {
280280
}
281281

282282
if !c.OpConfig.EnableTeamsAPI {
283-
c.logger.Debugf("team API is disabled")
283+
c.logger.Debug("team API is disabled")
284284
return members, nil
285285
}
286286

@@ -416,7 +416,7 @@ func (c *Cluster) _waitPodLabelsReady(anyReplica bool) error {
416416
podsNumber = len(pods.Items)
417417
c.logger.Debugf("Waiting for %d pods to become ready", podsNumber)
418418
} else {
419-
c.logger.Debugf("Waiting for any replica pod to become ready")
419+
c.logger.Debug("Waiting for any replica pod to become ready")
420420
}
421421

422422
err := retryutil.Retry(c.OpConfig.ResourceCheckInterval, c.OpConfig.ResourceCheckTimeout,

0 commit comments

Comments
 (0)