From a76d1541b65c59f36fd0afff76c2443aaf261eb3 Mon Sep 17 00:00:00 2001 From: Advait Jain Date: Tue, 15 Aug 2023 14:57:42 -0700 Subject: [PATCH 1/4] Change symmetry to True to generate golden data with zp=0 --- .../kernels/testdata/lstm_test_data_generator.py | 12 ++++++------ .../micro/kernels/testdata/lstm_test_data_utils.py | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/tensorflow/lite/micro/kernels/testdata/lstm_test_data_generator.py b/tensorflow/lite/micro/kernels/testdata/lstm_test_data_generator.py index 97c8798ef44..ad3e023e3cc 100644 --- a/tensorflow/lite/micro/kernels/testdata/lstm_test_data_generator.py +++ b/tensorflow/lite/micro/kernels/testdata/lstm_test_data_generator.py @@ -17,15 +17,15 @@ 2. Print the intermediate step outputs inside the LSTM for a single step LSTM invocation (Get2X2GateOutputCheckData in .cc) 3. Print the outputs for multi-step LSTM invocation (Get2X2LstmEvalCheckData in .cc) -Every invocation gives three types information: -1. Quantized output: kernel output in integer +Every invocation gives three types information: +1. Quantized output: kernel output in integer 2. Dequantized output: Quantized output in floating point representation 3. Float output: output from the floating point computation (i.e., float kernel) -Note: +Note: 1. Change quantization settings in _KERNEL_CONFIG to see the outcomes from various quantization schema (e.g., 8x8 Vs. 16x8) 2. Only single batch inference is supporte here. Change _GATE_TEST_DATA or _MULTISTEP_TEST_DATA to see kernel outputs on different input data -3. The quantization computation here is not the exact as the c++ implementation. The integer calculation is mimiced here using floating point. +3. The quantization computation here is not the exact as the c++ implementation. The integer calculation is emulated here using floating point. No fixed point math is implemented here. The purpose is to illustrate the computation procedure and possible quantization error accumulation, not for bit exactness. """ from absl import app @@ -38,7 +38,7 @@ _KERNEL_CONFIG = { 'quantization_settings': { 'weight_bits': 8, - 'activation_bits': 8, + 'activation_bits': 16, 'bias_bits': 32, 'cell_bits': 16, }, @@ -88,7 +88,7 @@ _MULTISTEP_TEST_DATA = { 'init_hidden_state_vals': [0, 0], 'init_cell_state_vals': [0, 0], - 'input_data': [0.2, 0.3, 0.2, 0.3, 0.2, 0.3], # three time steps + 'input_data': [0.2, 0.3, 0.2, 0.3, 0.2, 0.3], # three time steps 'hidden_state_range': (-0.5, 0.7), 'cell_state_range': [-8, 8], 'input_data_range': [-1, 1] diff --git a/tensorflow/lite/micro/kernels/testdata/lstm_test_data_utils.py b/tensorflow/lite/micro/kernels/testdata/lstm_test_data_utils.py index 345b143fad5..54fafbd9999 100644 --- a/tensorflow/lite/micro/kernels/testdata/lstm_test_data_utils.py +++ b/tensorflow/lite/micro/kernels/testdata/lstm_test_data_utils.py @@ -346,7 +346,7 @@ def __init__( np.array(init_hidden_state_vals).reshape((-1, 1)), hiddens_state_range[0], hiddens_state_range[1], - False, + True, self.quantization_settings['activation_bits'], ) self.cell_state_tensor = assemble_quantized_tensor( From 823cbb387d82ec9b8cda63573e68e7280662ddb6 Mon Sep 17 00:00:00 2001 From: Advait Jain Date: Tue, 15 Aug 2023 16:15:23 -0700 Subject: [PATCH 2/4] update test cases woth zp=0 for int16 --- .../lite/micro/kernels/testdata/lstm_test_data.cc | 12 ++++++------ .../micro/kernels/testdata/lstm_test_data_utils.py | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/tensorflow/lite/micro/kernels/testdata/lstm_test_data.cc b/tensorflow/lite/micro/kernels/testdata/lstm_test_data.cc index 4d7d9d9edcb..557bffeb332 100644 --- a/tensorflow/lite/micro/kernels/testdata/lstm_test_data.cc +++ b/tensorflow/lite/micro/kernels/testdata/lstm_test_data.cc @@ -251,12 +251,12 @@ NodeQuantizationParameters Get2X2Int16LstmQuantizationSettings() { // state quantization parameters quantization_settings.input = {/*scale=*/3.0518044e-5, /*zp=*/0, - /*symmetry=*/false}; - quantization_settings.output = {/*scale=*/1.8310826e-5, /*zp=*/-5461, - /*symmetry=*/false}; - quantization_settings.hidden_state = {/*scale=*/1.8310826e-5, /*zp=*/-5461, - /*symmetry=*/false}; - quantization_settings.cell_state = {/*scale=*/0.00024414062, /*zp=*/0, + /*symmetry=*/true}; + quantization_settings.output = {/*scale=*/2.1362956633198035e-05, /*zp=*/0, + /*symmetry=*/true}; + quantization_settings.hidden_state = {/*scale=*/2.1362956633198035e-05, /*zp=*/0, + /*symmetry=*/true}; + quantization_settings.cell_state = {/*scale=*/0.00024414807580797754, /*zp=*/0, /*symmetry=*/true}; // gate quantization parameters diff --git a/tensorflow/lite/micro/kernels/testdata/lstm_test_data_utils.py b/tensorflow/lite/micro/kernels/testdata/lstm_test_data_utils.py index 54fafbd9999..345b143fad5 100644 --- a/tensorflow/lite/micro/kernels/testdata/lstm_test_data_utils.py +++ b/tensorflow/lite/micro/kernels/testdata/lstm_test_data_utils.py @@ -346,7 +346,7 @@ def __init__( np.array(init_hidden_state_vals).reshape((-1, 1)), hiddens_state_range[0], hiddens_state_range[1], - True, + False, self.quantization_settings['activation_bits'], ) self.cell_state_tensor = assemble_quantized_tensor( From 0229d2e19044a625426be6d982876ec182642b1a Mon Sep 17 00:00:00 2001 From: Advait Jain Date: Tue, 15 Aug 2023 16:17:18 -0700 Subject: [PATCH 3/4] change back to int8 activations. --- .../lite/micro/kernels/testdata/lstm_test_data_generator.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow/lite/micro/kernels/testdata/lstm_test_data_generator.py b/tensorflow/lite/micro/kernels/testdata/lstm_test_data_generator.py index ad3e023e3cc..c6553fe2e4f 100644 --- a/tensorflow/lite/micro/kernels/testdata/lstm_test_data_generator.py +++ b/tensorflow/lite/micro/kernels/testdata/lstm_test_data_generator.py @@ -38,7 +38,7 @@ _KERNEL_CONFIG = { 'quantization_settings': { 'weight_bits': 8, - 'activation_bits': 16, + 'activation_bits': 8, 'bias_bits': 32, 'cell_bits': 16, }, From 5a9424515a913354beab7c4216dc07c57fae1dc4 Mon Sep 17 00:00:00 2001 From: Advait Jain Date: Tue, 15 Aug 2023 16:31:30 -0700 Subject: [PATCH 4/4] fix formatting. --- tensorflow/lite/micro/kernels/testdata/lstm_test_data.cc | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tensorflow/lite/micro/kernels/testdata/lstm_test_data.cc b/tensorflow/lite/micro/kernels/testdata/lstm_test_data.cc index 557bffeb332..8a89fe2b33d 100644 --- a/tensorflow/lite/micro/kernels/testdata/lstm_test_data.cc +++ b/tensorflow/lite/micro/kernels/testdata/lstm_test_data.cc @@ -254,9 +254,11 @@ NodeQuantizationParameters Get2X2Int16LstmQuantizationSettings() { /*symmetry=*/true}; quantization_settings.output = {/*scale=*/2.1362956633198035e-05, /*zp=*/0, /*symmetry=*/true}; - quantization_settings.hidden_state = {/*scale=*/2.1362956633198035e-05, /*zp=*/0, + quantization_settings.hidden_state = {/*scale=*/2.1362956633198035e-05, + /*zp=*/0, /*symmetry=*/true}; - quantization_settings.cell_state = {/*scale=*/0.00024414807580797754, /*zp=*/0, + quantization_settings.cell_state = {/*scale=*/0.00024414807580797754, + /*zp=*/0, /*symmetry=*/true}; // gate quantization parameters