Skip to content

Commit

Permalink
Improve error handling (#6)
Browse files Browse the repository at this point in the history
* Handle errors happening in pipelines
  • Loading branch information
kubrickfr authored May 28, 2024
1 parent 604d2c4 commit 61c455d
Showing 1 changed file with 10 additions and 2 deletions.
12 changes: 10 additions & 2 deletions stream_backup.sh
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,7 @@ else
fi

mkdir ${SUBV}/.stream_backup_${EPOCH}/ || true
btrfs subvolume snapshot -r ${SUBV} ${NEW_SNAPSHOT}
btrfs subvolume snapshot -r ${SUBV} ${NEW_SNAPSHOT} || exit 1

trap cleanup ERR
trap cleanup INT
Expand All @@ -140,14 +140,22 @@ eval ${BTRFS_COMMAND} \
| lz4 \
| mbuffer -m ${CHUNK_SIZE} -q \
| split -b ${CHUNK_SIZE} --suffix-length 4 --filter \
"age -R ${RECIPIENTS_FILE} | aws s3 cp - s3://${BUCKET}/${PREFIX}/${EPOCH}/${SEQ_SALTED}/\$FILE --storage-class ${SCLASS}"
"age -R ${RECIPIENTS_FILE} | aws s3 cp - s3://${BUCKET}/${PREFIX}/${EPOCH}/${SEQ_SALTED}/\$FILE --storage-class ${SCLASS}; exit \${PIPESTATUS}"

if [ "${PIPESTATUS}" != "0" ]; then
cleanup
fi

# We only write the subvolume information to S3 at the end, as a marker of completion of the backup
# having the subvolume information might help debuging tricky situations
btrfs subvolume show ${NEW_SNAPSHOT} \
| age -R ${RECIPIENTS_FILE} \
| aws s3 cp - s3://${BUCKET}/${PREFIX}/${EPOCH}/${SEQ_SALTED}/snapshot_info.dat

if [ "${PIPESTATUS}" != "0" ]; then
cleanup
fi

# We delete the snapshot from which we made an incremental backup:
# * If the user asked for it
# * If there is a previous snapshot in the first place
Expand Down

0 comments on commit 61c455d

Please sign in to comment.