Skip to content

Commit

Permalink
refactor: revert additions which break build with USE_TFLM_COMPRESSION
Browse files Browse the repository at this point in the history
For now, revert two commits that added conditionally included
code using USE_TFLM_COMPRESSION which break the build when
USE_TFLM_COMPRESSION is enabled. This code will be restored in
chunks that do not break the build and tests.

This reverts commit f309046 (#2647).
This reverts commit 2b127fd (#2658).
  • Loading branch information
rkuester committed Nov 27, 2024
1 parent e2cc052 commit 16ad7bb
Show file tree
Hide file tree
Showing 10 changed files with 18 additions and 533 deletions.
70 changes: 0 additions & 70 deletions tensorflow/lite/micro/compression.h

This file was deleted.

79 changes: 5 additions & 74 deletions tensorflow/lite/micro/fake_micro_context.cc
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
/* Copyright 2024 The TensorFlow Authors. All Rights Reserved.
/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
Expand All @@ -23,23 +23,10 @@ limitations under the License.

namespace tflite {

FakeMicroContext::FakeMicroContext(
TfLiteTensor* tensors, SingleArenaBufferAllocator* allocator,
MicroGraph* micro_graph
#ifdef USE_TFLM_COMPRESSION
,
const CompressedTensorList* compressed_tensors
#endif // USE_TFLM_COMPRESSION
)
: graph_(*micro_graph),
tensors_(tensors),
allocator_(allocator)
#ifdef USE_TFLM_COMPRESSION
,
compressed_tensors_(compressed_tensors)
#endif // USE_TFLM_COMPRESSION
{
}
FakeMicroContext::FakeMicroContext(TfLiteTensor* tensors,
SingleArenaBufferAllocator* allocator,
MicroGraph* micro_graph)
: graph_(*micro_graph), tensors_(tensors), allocator_(allocator) {}

TfLiteTensor* FakeMicroContext::AllocateTempTfLiteTensor(int tensor_index) {
allocated_temp_count_++;
Expand Down Expand Up @@ -125,60 +112,4 @@ void* FakeMicroContext::external_context() { return nullptr; }

MicroGraph& FakeMicroContext::graph() { return graph_; }

#ifdef USE_TFLM_COMPRESSION

// Available during Prepare & Eval. Returns false if tensor is not
// compressed.
bool FakeMicroContext::IsTensorCompressed(const TfLiteNode* node,
int tensor_idx) {
if (compressed_tensors_ != nullptr && tensor_idx < node->inputs->size) {
int index = node->inputs->data[tensor_idx];
if (index >= 0 && compressed_tensors_->tensors[index] != nullptr) {
return true;
}
}

return false;
}

// Only available during Prepare. The kernel is responsible for storing the
// scratch buffer handle.
int FakeMicroContext::AllocateDecompressionScratchBuffer(const TfLiteNode* node,
int tensor_idx) {
if (compressed_tensors_ == nullptr || tensor_idx >= node->inputs->size) {
return -1;
}
int index = node->inputs->data[tensor_idx];
if (index < 0 || compressed_tensors_->tensors[index] == nullptr) {
return -1;
}
TfLiteTensor* tensor = &tensors_[index];
int scratch_index = -1;
TfLiteStatus result =
RequestScratchBufferInArena(tensor->bytes, &scratch_index);
if (result != kTfLiteOk) {
return -1;
}

return scratch_index;
}

// Available during Prepare & Eval. Returns nullptr if tensor is not
// compressed.
const CompressionTensorData* FakeMicroContext::GetTensorCompressionData(
const TfLiteNode* node, int tensor_idx) {
if (compressed_tensors_ == nullptr || tensor_idx >= node->inputs->size) {
return nullptr;
}

int index = node->inputs->data[tensor_idx];
if (index < 0) {
return nullptr;
}

return compressed_tensors_->tensors[index];
}

#endif // USE_TFLM_COMPRESSION

} // namespace tflite
36 changes: 2 additions & 34 deletions tensorflow/lite/micro/fake_micro_context.h
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
/* Copyright 2024 The TensorFlow Authors. All Rights Reserved.
/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -30,12 +30,7 @@ class FakeMicroContext : public MicroContext {
~FakeMicroContext() = default;

FakeMicroContext(TfLiteTensor* tensors, SingleArenaBufferAllocator* allocator,
MicroGraph* micro_graph
#ifdef USE_TFLM_COMPRESSION
,
const CompressedTensorList* compressed_tensors = nullptr
#endif // USE_TFLM_COMPRESSION
);
MicroGraph* micro_graph);

void* AllocatePersistentBuffer(size_t bytes) override;
TfLiteStatus RequestScratchBufferInArena(size_t bytes,
Expand All @@ -55,24 +50,6 @@ class FakeMicroContext : public MicroContext {
void* external_context() override;
MicroGraph& graph() override;

#ifdef USE_TFLM_COMPRESSION

// Available during Prepare & Eval. Returns false if tensor is not
// compressed.
bool IsTensorCompressed(const TfLiteNode* node, int tensor_idx) override;

// Only available during Prepare. The kernel is responsible for storing the
// scratch buffer handle.
int AllocateDecompressionScratchBuffer(const TfLiteNode* node,
int tensor_idx) override;

// Available during Prepare & Eval. Returns nullptr if tensor is not
// compressed.
const CompressionTensorData* GetTensorCompressionData(
const TfLiteNode* node, int tensor_idx) override;

#endif // USE_TFLM_COMPRESSION

private:
static constexpr int kNumScratchBuffers_ = 12;

Expand All @@ -85,15 +62,6 @@ class FakeMicroContext : public MicroContext {

SingleArenaBufferAllocator* allocator_;

#ifdef USE_TFLM_COMPRESSION

//
// Compression
//
const CompressedTensorList* compressed_tensors_;

#endif // USE_TFLM_COMPRESSION

TF_LITE_REMOVE_VIRTUAL_DELETE
};

Expand Down
17 changes: 4 additions & 13 deletions tensorflow/lite/micro/kernels/kernel_runner.cc
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
/* Copyright 2024 The TensorFlow Authors. All Rights Reserved.
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
Expand All @@ -18,6 +18,7 @@ limitations under the License.
#include "tensorflow/lite/micro/arena_allocator/single_arena_buffer_allocator.h"
#include "tensorflow/lite/micro/micro_arena_constants.h"
#include "tensorflow/lite/micro/micro_log.h"
#include "tensorflow/lite/micro/test_helpers.h"

namespace tflite {
namespace micro {
Expand All @@ -37,22 +38,12 @@ KernelRunner::KernelRunner(const TFLMRegistration& registration,
TfLiteTensor* tensors, int tensors_size,
TfLiteIntArray* inputs, TfLiteIntArray* outputs,
const void* builtin_data,
TfLiteIntArray* intermediates
#ifdef USE_TFLM_COMPRESSION
,
const CompressedTensorList* compressed_tensors
#endif // USE_TFLM_COMPRESSION
)
TfLiteIntArray* intermediates)
: registration_(registration),
allocator_(SingleArenaBufferAllocator::Create(kKernelRunnerBuffer_,
kKernelRunnerBufferSize_)),
mock_micro_graph_(allocator_),
fake_micro_context_(tensors, allocator_, &mock_micro_graph_
#ifdef USE_TFLM_COMPRESSION
,
compressed_tensors
#endif // USE_TFLM_COMPRESSION
) {
fake_micro_context_(tensors, allocator_, &mock_micro_graph_) {
// Prepare TfLiteContext:
context_.impl_ = static_cast<void*>(&fake_micro_context_);
context_.ReportError = MicroContextReportOpError;
Expand Down
9 changes: 2 additions & 7 deletions tensorflow/lite/micro/kernels/kernel_runner.h
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
/* Copyright 2024 The TensorFlow Authors. All Rights Reserved.
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -36,12 +36,7 @@ class KernelRunner {
KernelRunner(const TFLMRegistration& registration, TfLiteTensor* tensors,
int tensors_size, TfLiteIntArray* inputs,
TfLiteIntArray* outputs, const void* builtin_data,
TfLiteIntArray* intermediates = nullptr
#ifdef USE_TFLM_COMPRESSION
,
const CompressedTensorList* compressed_tensors = nullptr
#endif // USE_TFLM_COMPRESSION
);
TfLiteIntArray* intermediates = nullptr);

// Calls init and prepare on the kernel (i.e. TFLMRegistration) struct.
// Any exceptions will be DebugLog'd and returned as a status code.
Expand Down
27 changes: 1 addition & 26 deletions tensorflow/lite/micro/kernels/kernel_util.h
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
/* Copyright 2024 The TensorFlow Authors. All Rights Reserved.
/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -91,31 +91,6 @@ const T* GetOptionalTensorData(const TfLiteEvalTensor* tensor) {
: reinterpret_cast<const T*>(tensor->data.raw);
}

#ifdef USE_TFLM_COMPRESSION

// Overloads existing GetTensorData. If not compressed, this will return
// tensor->data.
//
// TODO(ddavis-2015): make micro_context a const pointer
template <typename T>
const T* GetTensorData(MicroContext* micro_context,
const TfLiteEvalTensor* tensor,
const CompressionTensorData* compression_data,
int scratch_buffer_handle) {
if (tensor == nullptr) {
return nullptr;
}
if (compression_data == nullptr) {
return reinterpret_cast<const T*>(tensor->data.data);
}

void* uncompressed_data = micro_context->DecompressTensorToScratchBuffer(
*tensor, *compression_data, scratch_buffer_handle);
return reinterpret_cast<const T*>(uncompressed_data);
}

#endif // USE_TFLM_COMPRESSION

// Returns the shape of a TfLiteEvalTensor struct.
const RuntimeShape GetTensorShape(const TfLiteEvalTensor* tensor);

Expand Down
Loading

0 comments on commit 16ad7bb

Please sign in to comment.