Skip to content

Commit

Permalink
fix suffix stuff
Browse files Browse the repository at this point in the history
  • Loading branch information
psy0rz committed Oct 6, 2024
1 parent d283da8 commit 0ec493d
Show file tree
Hide file tree
Showing 2 changed files with 33 additions and 27 deletions.
8 changes: 4 additions & 4 deletions zfs_autobackup/ZfsAutobackup.py
Original file line number Diff line number Diff line change
Expand Up @@ -357,9 +357,9 @@ def check_target_names(self, source_node, source_datasets, target_node):
target_datasets[target_name] = source_dataset

# NOTE: this method also uses self.args. args that need extra processing are passed as function parameters:
def sync_datasets(self, source_node, source_datasets, target_node, bookmark_name):
def sync_datasets(self, source_node, source_datasets, target_node, bookmark_tag):
"""Sync datasets, or thin-only on both sides
:type bookmark_name: str
:type bookmark_tag: str
:type target_node: ZfsNode
:type source_datasets: list of ZfsDataset
:type source_node: ZfsNode
Expand Down Expand Up @@ -421,7 +421,7 @@ def sync_datasets(self, source_node, source_datasets, target_node, bookmark_name
decrypt=self.args.decrypt, encrypt=self.args.encrypt,
zfs_compressed=self.args.zfs_compressed, force=self.args.force,
guid_check=not self.args.no_guid_check, use_bookmarks=use_bookmarks,
bookmark_name=bookmark_name)
bookmark_tag=bookmark_tag)
except Exception as e:

fail_count = fail_count + 1
Expand Down Expand Up @@ -559,7 +559,7 @@ def run(self):
source_node=source_node,
source_datasets=source_datasets,
target_node=target_node,
bookmark_name=self.args.backup_name + self.tag_seperator + target_dataset.properties['guid'])
bookmark_tag=target_dataset.properties['guid'])

# no target specified, run in snapshot-only mode
else:
Expand Down
52 changes: 29 additions & 23 deletions zfs_autobackup/ZfsDataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -539,7 +539,7 @@ def find_snapshot_in_list(self, snapshots):
"""

for snapshot in snapshots:
if snapshot.suffix == self.suffix:
if snapshot.tagless_suffix == self.tagless_suffix:
return snapshot

return None
Expand All @@ -557,23 +557,24 @@ def find_snapshot(self, snapshot):
return None

if not isinstance(snapshot, ZfsDataset):
suffix = snapshot
tagless_suffix = snapshot
else:
suffix = snapshot.suffix
tagless_suffix = snapshot.tagless_suffix

for snapshot in self.snapshots:
if snapshot.suffix == suffix:
if snapshot.tagless_suffix == tagless_suffix:
return snapshot

return None

def find_bookmark(self, bookmark):
def find_bookmark(self, bookmark, ignore_tag):
"""find bookmark by bookmark name (can be a suffix or a different
ZfsDataset) Returns None if it cant find it.
Args:
:rtype: ZfsDataset|None
:type bookmark: str|ZfsDataset|None
:type ignore_tag: bool
"""

if bookmark is None:
Expand All @@ -582,10 +583,13 @@ def find_bookmark(self, bookmark):
if not isinstance(bookmark, ZfsDataset):
suffix = bookmark
else:
suffix = bookmark.suffix
if ignore_tag:
suffix = bookmark.tagless_suffix
else:
suffix = bookmark.suffix

for bookmark in self.bookmarks:
if bookmark.suffix == suffix:
if (ignore_tag and bookmark.tagless_suffix == suffix) or (not ignore_tag and bookmark.suffix == suffix):
return bookmark

return None
Expand All @@ -601,11 +605,11 @@ def find_snapshot_index(self, snapshot):
if not isinstance(snapshot, ZfsDataset):
snapshot_name = snapshot
else:
snapshot_name = snapshot.suffix
snapshot_name = snapshot.tagless_suffix

index = 0
for snapshot in self.snapshots:
if snapshot.suffix == snapshot_name:
if snapshot.tagless_suffix == snapshot_name:
return index
index = index + 1

Expand Down Expand Up @@ -655,13 +659,14 @@ def bookmark(self, tag):

self.debug("Bookmarking")

bookmark_name = "#" + self.tagless_suffix + self.zfs_node.tag_seperator + tag
cmd = [
"zfs", "bookmark", self.name, "#" + self.tagless_suffix + self.zfs_node.tag_seperator + tag
"zfs", "bookmark", self.name, bookmark_name
]

self.zfs_node.run(cmd=cmd)

bookmark = self.zfs_node.get_dataset(self.name + '#' + self.suffix, force_exists=True)
bookmark = self.zfs_node.get_dataset(bookmark_name, force_exists=True)
self.cache_snapshot_bookmark(bookmark)
return bookmark

Expand Down Expand Up @@ -1008,7 +1013,7 @@ def thin(self, skip_holds=False):
obsolete.destroy()
self.snapshots.remove(obsolete)

def find_common_snapshot(self, target_dataset, guid_check, bookmark_name):
def find_common_snapshot(self, target_dataset, guid_check, bookmark_tag):
"""find latest common snapshot/bookmark between us and target returns None if its
an initial transfer.
Expand All @@ -1019,10 +1024,10 @@ def find_common_snapshot(self, target_dataset, guid_check, bookmark_name):
:rtype: ZfsDataset|None
:type guid_check: bool
:type target_dataset: ZfsDataset
:type preferred_bookmark: str
:type bookmark_tag: str
"""

bookmark = self.zfs_node.get_dataset(bookmark_name)
bookmark = self.zfs_node.get_dataset(bookmark_tag)

if not target_dataset.exists or not target_dataset.snapshots:
# target has nothing yet
Expand All @@ -1038,7 +1043,8 @@ def find_common_snapshot(self, target_dataset, guid_check, bookmark_name):
# else:
# source_bookmark.debug("Common bookmark")
# return source_bookmark
if bookmark.exists and bookmark.properties['guid'] == target_snapshot.properties['guid']:XXX wil eigenlijk guid check opineel houden .dus bookmark name word snapshotname_targetdatasetguid
if bookmark.exists and bookmark.properties['guid'] == target_snapshot.properties['guid']:
# FIXME: wil eigenlijk guid check opineel houden .dus bookmark name word snapshotname_targetdatasetguid
return bookmark

# Source snapshot with same suffix?
Expand Down Expand Up @@ -1120,7 +1126,7 @@ def _pre_clean(self, source_common_snapshot, target_dataset, source_obsoletes, t
for target_snapshot in target_dataset.snapshots:
if (target_snapshot in target_obsoletes) \
and (not source_common_snapshot or (
target_snapshot.suffix != source_common_snapshot.suffix)):
target_snapshot.tagless_suffix != source_common_snapshot.tagless_suffix)):
if target_snapshot.exists:
target_snapshot.destroy()

Expand All @@ -1146,7 +1152,7 @@ def _validate_resume_token(self, target_dataset, start_snapshot):
else:
return resume_token

def _plan_sync(self, target_dataset, also_other_snapshots, guid_check, raw, bookmark_name):
def _plan_sync(self, target_dataset, also_other_snapshots, guid_check, raw, bookmark_tag):
"""Determine at what snapshot to start syncing to target_dataset and what to sync and what to keep.
Args:
Expand All @@ -1155,7 +1161,7 @@ def _plan_sync(self, target_dataset, also_other_snapshots, guid_check, raw, book
:type also_other_snapshots: bool
:type guid_check: bool
:type raw: bool
:type bookmark_name: str
:type bookmark_tag: str
Returns:
tuple: A tuple containing:
Expand All @@ -1172,7 +1178,7 @@ def _plan_sync(self, target_dataset, also_other_snapshots, guid_check, raw, book

target_dataset.debug("Determining start snapshot")
source_common_snapshot = self.find_common_snapshot(target_dataset, guid_check=guid_check,
bookmark_name=bookmark_name)
bookmark_tag=bookmark_tag)
incompatible_target_snapshots = target_dataset.find_incompatible_snapshots(source_common_snapshot, raw)

# let thinner decide whats obsolete on source after the transfer is done
Expand Down Expand Up @@ -1251,7 +1257,7 @@ def handle_incompatible_snapshots(self, incompatible_target_snapshots, destroy_i
def sync_snapshots(self, target_dataset, features, show_progress, filter_properties, set_properties,
ignore_recv_exit_code, holds, rollback, decrypt, encrypt, also_other_snapshots,
no_send, destroy_incompatible, send_pipes, recv_pipes, zfs_compressed, force, guid_check,
use_bookmarks, bookmark_name):
use_bookmarks, bookmark_tag):
"""sync this dataset's snapshots to target_dataset, while also thinning
out old snapshots along the way.
Expand All @@ -1271,7 +1277,7 @@ def sync_snapshots(self, target_dataset, features, show_progress, filter_propert
:type no_send: bool
:type guid_check: bool
:type use_bookmarks: bool
:type bookmark_name: str
:type bookmark_tag: str
"""

# self.verbose("-> {}".format(target_dataset))
Expand All @@ -1294,7 +1300,7 @@ def sync_snapshots(self, target_dataset, features, show_progress, filter_propert
(source_common_snapshot, source_obsoletes, target_obsoletes, target_transfers,
incompatible_target_snapshots) = \
self._plan_sync(target_dataset=target_dataset, also_other_snapshots=also_other_snapshots,
guid_check=guid_check, raw=raw, bookmark_name=bookmark_name)
guid_check=guid_check, raw=raw, bookmark_tag=bookmark_tag)

# NOTE: we do a pre-clean because we dont want filesystems to fillup when backups keep failing.
# Also usefull with no_send to still cleanup stuff.
Expand Down Expand Up @@ -1360,7 +1366,7 @@ def sync_snapshots(self, target_dataset, features, show_progress, filter_propert

# bookmark common snapshot on source, or use holds if bookmarks are not enabled.
if use_bookmarks:
source_bookmark = source_snapshot.bookmark(bookmark_name)
source_bookmark = source_snapshot.bookmark(bookmark_tag)
# note: destroy source_snapshot when obsolete at this point?
else:
source_bookmark = None
Expand Down

0 comments on commit 0ec493d

Please sign in to comment.