Skip to content

Commit

Permalink
Shred (#175)
Browse files Browse the repository at this point in the history
* shred implementation

* update readme

---------

Co-authored-by: syrmel <[email protected]>
  • Loading branch information
mike-ward and syrmel authored Nov 28, 2024
1 parent ea02194 commit a8151ad
Show file tree
Hide file tree
Showing 4 changed files with 234 additions and 2 deletions.
4 changes: 2 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ count below and mark it as done in this README.md. Thanks!
GNU coreutils. They are not 100% compatiable. If you encounter different behaviors,
compare against the true GNU coreutils version on the Linux-based tests first.

## Completed (72/109) - 66% done!
## Completed (73/109) - 67% done!

| Done | Cmd | Descripton |
| :-----: | --------- | ------------------------------------------------ |
Expand Down Expand Up @@ -127,7 +127,7 @@ compare against the true GNU coreutils version on the Linux-based tests first.
| &check; | sha256sum | Print or check SHA-2 256 bit digests |
| &check; | sha384sum | Print or check SHA-2 384 bit digests |
| &check; | sha512sum | Print or check SHA-2 512 bit digests |
| | shred | Remove files more securely |
| &check; | shred | Remove files more securely |
| &check; | shuf | Shuffling text |
| &check; | sleep | Delay for a specified time |
| &check; | sort | Sort text files |
Expand Down
66 changes: 66 additions & 0 deletions src/shred/config.v
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
import common
import flag
import os

@[name: 'shred']
@[version: '0.1']
struct Config {
force bool @[short: f; xdoc: 'change permissions to allow writing if necessary']
iterations int = 3 @[short: n; xdoc: 'overwrite N times instead of the default (3)']
random_source string @[xdoc: 'get random bytes from <string>']
size string @[short: s; xdoc: 'shred this many bytes (suffixes like K, M, G accepted)\n']
rm bool @[only: u; xdoc: 'deallocate and remove file after overwriting']
remove_how string @[long: remove; xdoc: 'like -u but give control on <string> to delete; See below']
verbose bool @[short: v; xdoc: 'show progress']
exact bool @[short: x; xdoc: 'do not round file sizes up to the next full block; this is the default for non-regular files']
zero bool @[short: z; xdoc: 'add a final overwrite with zeros to hide shredding']
show_help bool @[long: help; short: h; xdoc: 'show this help']
show_version bool @[long: 'version'; xdoc: 'show version and exit']
}

fn get_args() (Config, []string) {
config, files := flag.to_struct[Config](os.args, skip: 1) or { panic(err) }

if config.show_help {
doc := flag.to_doc[Config](
description: 'Usage: shred [OPTION]... FILE...\n' +
'Overwrite the specified FILE(s) repeatedly, in order to make it harder\n' +
'for even very expensive hardware probing to recover the data.'
footer:
'\nDelete FILE(s) if --remove (-u) is specified. The default is not to remove\n' +
'the files because it is common to operate on device files like /dev/hda,\n' +
'and those files usually should not be removed.\n\n' +
'The --remove <string> parameter indicates how to remove a directory entry:\n' +
" 'unlink' => use a standard unlink call.\n" +
" 'wipe' => also first obfuscate bytes in the name.\n" +
" 'wipesync' => also sync each obfuscated byte to the device.\n" +
"The default mode is 'wipesync', but note it can be expensive.\n\n" +
'CAUTION: shred assumes the file system and hardware overwrite data in place.\n' +
'Although this is common, many platforms operate otherwise. Also, backups\n' +
'and mirrors may contain unremovable copies that will let a shredded file\n' +
'be recovered later.\n' + common.coreutils_footer()
) or { panic(err) }
println(doc)
exit(0)
}

if files.len > 0 && files.any(it.starts_with('-')) {
eexit('The following flags could not be mapped to any fields: ${files.filter(it.starts_with('-'))}')
}

if files.filter(!it.starts_with('-')).len == 0 {
eexit('missing file operand')
}

if config.iterations <= 0 {
eexit('interations must be greater than zero')
}

return config, files
}

@[noreturn]
fn eexit(msg string) {
eprintln(msg)
exit(1)
}
Empty file removed src/shred/delete.me
Empty file.
166 changes: 166 additions & 0 deletions src/shred/shred.v
Original file line number Diff line number Diff line change
@@ -0,0 +1,166 @@
import os
import rand
import math
import strconv

enum Fill_Pattern {
random
zeros
source
}

const block_size = 4096

fn main() {
config, files := get_args()
shred(files, config)
}

fn shred(files []string, config Config) {
for file in files {
total_iterations := if config.zero { config.iterations + 1 } else { config.iterations }
fill_pattern := if config.random_source.len > 0 {
Fill_Pattern.source
} else {
Fill_Pattern.random
}
for iteration in 0 .. config.iterations {
shred_file(file, fill_pattern, iteration + 1, total_iterations, config)
}
if config.zero {
shred_file(file, .zeros, total_iterations, total_iterations, config)
}
if config.rm || config.remove_how.len > 0 {
remove(file, config)
}
}
}

fn shred_file(file string, fill_pattern Fill_Pattern, iteration int, iterations int, config Config) {
stat := os.lstat(file) or { eexit(err.msg()) }
size_arg := convert_to_number(config.size)
mut block := if stat.size >= u64(block_size) { block_size } else { int(stat.size) }
mut fp := open_file_for_write(file, config)
mut written := u64(0)
for {
pattern := match fill_pattern {
.random { rand.bytes(block) or { panic(err) } }
.source { random_from_source(config.random_source) }
.zeros { []u8{len: block, init: 0} }
}
wrote := fp.write(pattern) or { panic(err) }
written += u64(wrote)
sz := math.min(size_arg, stat.size)
if written >= sz {
break
}
remaining := stat.size - written
block = if remaining > block_size { block_size } else { int(remaining) }
}
fp.close()
if config.verbose {
show_shred_progress(file, iteration, iterations, fill_pattern)
}
}

fn open_file_for_write(file string, config Config) os.File {
return os.create(file) or {
if !config.force {
eexit(err.msg())
}
os.chmod(file, 0o600) or { eexit(err.msg()) }
return os.create(file) or { eexit(err.msg()) }
}
}

fn show_shred_progress(file string, iteration int, iterations int, fill_pattern Fill_Pattern) {
pattern := match fill_pattern {
.random { 'random' }
.source { 'source' }
.zeros { '000000' }
}
println('${progress_prefix(file)}: pass ${iteration} of ${iterations} (${pattern})...')
}

fn progress_prefix(file string) string {
return 'shred ${file}'
}

fn remove(file string, config Config) {
match config.remove_how {
'', 'wipesync', 'wipe' {
name := rename(file)
if config.verbose {
println('${progress_prefix(file)}: renamed to ${name}')
}
os.rm(name) or { panic(err) }
if config.verbose {
println('${progress_prefix(file)}: removed')
}
}
'unlink' {
os.rm(file) or { panic(err) }
if config.verbose {
println('${progress_prefix(file)}: removed')
}
}
else {
eexit('unrecognized --remove option')
}
}
}

fn rename(file string) string {
for _ in 0 .. 10 {
name := file + '${rand.u32()}'
os.rename(file, name) or {}
if os.exists(name) {
return name
}
}
eexit('can not rename file ${file}')
}

fn random_from_source(file string) []u8 {
stat := os.lstat(file) or { eexit(err.msg()) }
file_len := stat.size - 1
if file_len == 0 {
panic('zero length source file detected')
}
mut fp := os.open(file) or { panic(err) }
mut buf := []u8{}
for _ in 0 .. block_size {
pos := rand.u64n(file_len) or { panic(err) }
b := fp.read_bytes_at(1, pos)
buf << b
}
fp.close()
return buf
}

fn convert_to_number(input string) u64 {
if input.len == 0 {
return max_u64
}
if input.ends_with('T') {
number := to_u64(input[..input.len - 1])
return number * 1024 * 1024 * 1024 * 1024
}
if input.ends_with('G') {
number := to_u64(input[..input.len - 1])
return number * 1024 * 1024 * 1024
}
if input.ends_with('M') {
number := to_u64(input[..input.len - 1])
return number * 1024 * 1024
}
if input.ends_with('K') {
number := to_u64(input[..input.len - 1])
return number * 1024
}
return to_u64(input)
}

fn to_u64(s string) u64 {
return strconv.common_parse_uint(s, 0, 64, true, true) or { eexit(err.msg()) }
}

0 comments on commit a8151ad

Please sign in to comment.