Skip to content

Commit

Permalink
Velero backup sync: Disable downloading nodes content from objectstore (
Browse files Browse the repository at this point in the history
  • Loading branch information
sancyx authored May 25, 2021
1 parent 79f59a2 commit bbce90d
Show file tree
Hide file tree
Showing 3 changed files with 11 additions and 52 deletions.
29 changes: 9 additions & 20 deletions internal/ark/backups_model.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,17 +31,20 @@ import (
type ClusterBackupsModel struct {
ID uint `gorm:"primary_key"`

UID string
Name string
Cloud string
Distribution string
NodeCount uint
UID string
Name string
Cloud string
Distribution string
//deprecated: nodes are no longer synced from object storage
NodeCount uint
//deprecated: nodes are no longer synced from object storage
ContentChecked bool
StartedAt *time.Time
CompletedAt *time.Time
ExpireAt *time.Time

State []byte `sql:"type:json"`
//deprecated: nodes are no longer synced from object storage
Nodes []byte `sql:"type:json"`

Status string
Expand Down Expand Up @@ -120,20 +123,13 @@ func (backup *ClusterBackupsModel) GetStateObject() *arkAPI.Backup {
// SetValuesFromRequest sets values from PersistBackupRequest to the model
func (backup *ClusterBackupsModel) SetValuesFromRequest(db *gorm.DB, req *api.PersistBackupRequest) error {
var err error
var stateJSON, nodesJSON []byte
var stateJSON []byte

stateJSON, err = json.Marshal(req.Backup)
if err != nil {
return errors.WrapIf(err, "error converting backup to json")
}

if req.Nodes != nil {
nodesJSON, err = json.Marshal(req.Nodes)
if err != nil {
return errors.WrapIf(err, "error converting nodes to json")
}
}

backup.State = stateJSON
// do not overwrite "Deleting" status with phase
if backup.Status != "Deleting" {
Expand All @@ -151,13 +147,6 @@ func (backup *ClusterBackupsModel) SetValuesFromRequest(db *gorm.DB, req *api.Pe
backup.ClusterID = req.ClusterID
}

// only update available node information once
if backup.ContentChecked != true && req.Nodes != nil {
backup.NodeCount = req.NodeCount
backup.Nodes = nodesJSON
backup.ContentChecked = req.ContentChecked
}

if !req.Backup.Status.StartTimestamp.IsZero() {
backup.StartedAt = &req.Backup.Status.StartTimestamp.Time
}
Expand Down
17 changes: 1 addition & 16 deletions internal/ark/sync/backups_sync_svc.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@ import (
"emperror.dev/errors"
"github.com/jinzhu/gorm"
"github.com/sirupsen/logrus"
arkAPI "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"

"github.com/banzaicloud/pipeline/internal/ark"
"github.com/banzaicloud/pipeline/internal/ark/api"
Expand Down Expand Up @@ -130,7 +129,7 @@ func (s *BackupsSyncService) SyncBackupsForCluster(cluster api.Cluster) error {
Cloud: bucket.ClusterCloud,
}

persitedBackup, err := s.backupsSvc.FindByPersistRequest(req)
_, err := s.backupsSvc.FindByPersistRequest(req)
if err == gorm.ErrRecordNotFound {
err = nil
}
Expand All @@ -140,20 +139,6 @@ func (s *BackupsSyncService) SyncBackupsForCluster(cluster api.Cluster) error {
continue
}

if persitedBackup != nil && persitedBackup.ContentChecked != true &&
(backup.Status.Phase == arkAPI.BackupPhaseCompleted || backup.Status.Phase == arkAPI.BackupPhasePartiallyFailed) {
nodes, err := s.bucketsSvc.GetNodesFromBackupContents(bucket, backup.Name)
if err != nil {
log.Warning(err.Error())
err = nil
continue
}
req.ContentChecked = true
req.Nodes = &nodes
req.NodeCount = uint(len(nodes.Items))
log.WithField("count", req.NodeCount).Debug("node count found")
}

syncedBackup, err := s.backupsSvc.Persist(req)
if err != nil {
return errors.WrapIf(err, "could not persist backup")
Expand Down
17 changes: 1 addition & 16 deletions internal/ark/sync/buckets_sync_svc.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@ package sync
import (
"github.com/jinzhu/gorm"
"github.com/sirupsen/logrus"
arkAPI "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"

"github.com/banzaicloud/pipeline/internal/ark"
"github.com/banzaicloud/pipeline/internal/ark/api"
Expand Down Expand Up @@ -101,7 +100,7 @@ func (s *BucketsSyncService) syncBackupsFromBucket(bucket *api.Bucket) (backupID
Cloud: bucket.ClusterCloud,
}

persitedBackup, err := s.backupsSvc.FindByPersistRequest(req)
_, err := s.backupsSvc.FindByPersistRequest(req)
if err == gorm.ErrRecordNotFound {
err = nil
}
Expand All @@ -111,20 +110,6 @@ func (s *BucketsSyncService) syncBackupsFromBucket(bucket *api.Bucket) (backupID
continue
}

if persitedBackup != nil && persitedBackup.ContentChecked != true &&
(backup.Status.Phase == arkAPI.BackupPhaseCompleted || backup.Status.Phase == arkAPI.BackupPhasePartiallyFailed) {
nodes, err := s.bucketsSvc.GetNodesFromBackupContents(bucket, backup.Name)
if err != nil {
log.Warning(err.Error())
err = nil
continue
}
req.ContentChecked = true
req.Nodes = &nodes
req.NodeCount = uint(len(nodes.Items))
log.WithField("count", req.NodeCount).Debug("node count found")
}

syncedBackup, err := s.backupsSvc.Persist(req)
if err != nil {
return backupIDS, err
Expand Down

0 comments on commit bbce90d

Please sign in to comment.