Skip to content

Commit

Permalink
no one knows what "special files" means
Browse files Browse the repository at this point in the history
including two-days-ago me
  • Loading branch information
graebm committed Oct 12, 2023
1 parent 09c49da commit d3d845a
Show file tree
Hide file tree
Showing 3 changed files with 19 additions and 18 deletions.
4 changes: 2 additions & 2 deletions include/aws/common/byte_buf.h
Original file line number Diff line number Diff line change
Expand Up @@ -137,11 +137,11 @@ int aws_byte_buf_init_from_file(struct aws_byte_buf *out_buf, struct aws_allocat

/**
* Same as aws_byte_buf_init_from_file(), but for reading "special files" like /proc/cpuinfo.
* This files don't accurately report their size, so size_hint is used as initial buffer size,
* These files don't accurately report their size, so size_hint is used as initial buffer size,
* and the buffer grows until the while file is read.
*/
AWS_COMMON_API
int aws_byte_buf_init_from_special_file(
int aws_byte_buf_init_from_file_with_size_hint(
struct aws_byte_buf *out_buf,
struct aws_allocator *alloc,
const char *filename,
Expand Down
15 changes: 7 additions & 8 deletions source/file.c
Original file line number Diff line number Diff line change
Expand Up @@ -11,16 +11,15 @@

#include <errno.h>

/* For "special files", there's no point querying file size before reading.
/* For "special files", the OS often lies about size.
* For example, on Amazon Linux 2:
* /proc/cpuinfo: size is 0, but contents are several KB of data.
* /sys/devices/virtual/dmi/id/product_name: size is 4096, but contents are "c5.2xlarge"
*
* Therefore, let users pass a hint for the buffer's initial size,
* and grow the buffer as necessary as we read until EOF.
* Therefore, we may need to grow the buffer as we read until EOF.
* This is the min/max step size for growth. */
#define MIN_BUFFER_GROWTH_READING_SPECIAL_FILES 32
#define MAX_BUFFER_GROWTH_READING_SPECIAL_FILES 4096
#define MIN_BUFFER_GROWTH_READING_FILES 32
#define MAX_BUFFER_GROWTH_READING_FILES 4096

FILE *aws_fopen(const char *file_path, const char *mode) {
if (!file_path || strlen(file_path) == 0) {
Expand Down Expand Up @@ -90,8 +89,8 @@ static int s_byte_buf_init_from_file(
/* Expand buffer if necessary (at a reasonable rate) */
if (out_buf->len == out_buf->capacity) {
size_t additional_capacity = out_buf->capacity;
additional_capacity = aws_max_size(MIN_BUFFER_GROWTH_READING_SPECIAL_FILES, additional_capacity);
additional_capacity = aws_min_size(MAX_BUFFER_GROWTH_READING_SPECIAL_FILES, additional_capacity);
additional_capacity = aws_max_size(MIN_BUFFER_GROWTH_READING_FILES, additional_capacity);
additional_capacity = aws_min_size(MAX_BUFFER_GROWTH_READING_FILES, additional_capacity);
if (aws_byte_buf_reserve_relative(out_buf, additional_capacity)) {
AWS_LOGF_ERROR(AWS_LS_COMMON_IO, "static: Failed to grow buffer for file:'%s'", filename);
goto error;
Expand Down Expand Up @@ -146,7 +145,7 @@ int aws_byte_buf_init_from_file(struct aws_byte_buf *out_buf, struct aws_allocat
return s_byte_buf_init_from_file(out_buf, alloc, filename, true /*use_file_size_as_hint*/, 0 /*size_hint*/);
}

int aws_byte_buf_init_from_special_file(
int aws_byte_buf_init_from_file_with_size_hint(
struct aws_byte_buf *out_buf,
struct aws_allocator *alloc,
const char *filename,
Expand Down
18 changes: 10 additions & 8 deletions tests/file_test.c
Original file line number Diff line number Diff line change
Expand Up @@ -462,29 +462,29 @@ static int s_create_file_then_read_it(struct aws_allocator *allocator, struct aw
ASSERT_SUCCESS(s_check_byte_buf_from_file(&buf, contents));
aws_byte_buf_clean_up(&buf);

/* now we check aws_byte_buf_init_from_special_file() with different size_hints */
/* now check aws_byte_buf_init_from_file_with_size_hint() ... */

/* size_hint more then big enough */
size_t size_hint = contents.len * 2;
ASSERT_SUCCESS(aws_byte_buf_init_from_special_file(&buf, allocator, filename, size_hint));
ASSERT_SUCCESS(aws_byte_buf_init_from_file_with_size_hint(&buf, allocator, filename, size_hint));
ASSERT_SUCCESS(s_check_byte_buf_from_file(&buf, contents));
aws_byte_buf_clean_up(&buf);

/* size_hint not big enough for null-terminator */
size_hint = contents.len;
ASSERT_SUCCESS(aws_byte_buf_init_from_special_file(&buf, allocator, filename, size_hint));
ASSERT_SUCCESS(aws_byte_buf_init_from_file_with_size_hint(&buf, allocator, filename, size_hint));
ASSERT_SUCCESS(s_check_byte_buf_from_file(&buf, contents));
aws_byte_buf_clean_up(&buf);

/* size_hint 0 */
size_hint = 0;
ASSERT_SUCCESS(aws_byte_buf_init_from_special_file(&buf, allocator, filename, size_hint));
ASSERT_SUCCESS(aws_byte_buf_init_from_file_with_size_hint(&buf, allocator, filename, size_hint));
ASSERT_SUCCESS(s_check_byte_buf_from_file(&buf, contents));
aws_byte_buf_clean_up(&buf);

/* size_hint 1 */
size_hint = 1;
ASSERT_SUCCESS(aws_byte_buf_init_from_special_file(&buf, allocator, filename, size_hint));
ASSERT_SUCCESS(aws_byte_buf_init_from_file_with_size_hint(&buf, allocator, filename, size_hint));
ASSERT_SUCCESS(s_check_byte_buf_from_file(&buf, contents));
aws_byte_buf_clean_up(&buf);

Expand All @@ -502,11 +502,13 @@ static int s_read_special_file(struct aws_allocator *allocator, const char *file
}

struct aws_byte_buf buf;
ASSERT_SUCCESS(aws_byte_buf_init_from_special_file(&buf, allocator, filename, 128));
ASSERT_SUCCESS(aws_byte_buf_init_from_file(&buf, allocator, filename));
ASSERT_TRUE(buf.capacity > buf.len, "Buffer should end with null-terminator");
ASSERT_UINT_EQUALS(0, buf.buffer[buf.len], "Buffer should end with null-terminator");

if (strcmp("/dev/null", filename) != 0) {
if (strcmp("/dev/null", filename) == 0) {
ASSERT_UINT_EQUALS(0, buf.len, "expected /dev/null to be empty");
} else {
ASSERT_TRUE(buf.len > 0, "expected special file to have data");
}

Expand All @@ -530,7 +532,7 @@ static int s_test_byte_buf_init_from_file(struct aws_allocator *allocator, void
ASSERT_SUCCESS(s_create_file_then_read_it(allocator, aws_byte_cursor_from_buf(&big_rando)));
aws_byte_buf_clean_up(&big_rando);

/* test aws_byte_buf_init_from_special_file() on actual "special files" (if they exist) */
/* test some "special files" (if they exist) */
ASSERT_SUCCESS(s_read_special_file(allocator, "/proc/cpuinfo"));
ASSERT_SUCCESS(s_read_special_file(allocator, "/proc/net/tcp"));
ASSERT_SUCCESS(s_read_special_file(allocator, "/sys/devices/virtual/dmi/id/sys_vendor"));
Expand Down

0 comments on commit d3d845a

Please sign in to comment.