diff --git a/tensorflow/lite/micro/kernels/testdata/lstm_test_data.cc b/tensorflow/lite/micro/kernels/testdata/lstm_test_data.cc index 4d7d9d9edcb..8a89fe2b33d 100644 --- a/tensorflow/lite/micro/kernels/testdata/lstm_test_data.cc +++ b/tensorflow/lite/micro/kernels/testdata/lstm_test_data.cc @@ -251,12 +251,14 @@ NodeQuantizationParameters Get2X2Int16LstmQuantizationSettings() { // state quantization parameters quantization_settings.input = {/*scale=*/3.0518044e-5, /*zp=*/0, - /*symmetry=*/false}; - quantization_settings.output = {/*scale=*/1.8310826e-5, /*zp=*/-5461, - /*symmetry=*/false}; - quantization_settings.hidden_state = {/*scale=*/1.8310826e-5, /*zp=*/-5461, - /*symmetry=*/false}; - quantization_settings.cell_state = {/*scale=*/0.00024414062, /*zp=*/0, + /*symmetry=*/true}; + quantization_settings.output = {/*scale=*/2.1362956633198035e-05, /*zp=*/0, + /*symmetry=*/true}; + quantization_settings.hidden_state = {/*scale=*/2.1362956633198035e-05, + /*zp=*/0, + /*symmetry=*/true}; + quantization_settings.cell_state = {/*scale=*/0.00024414807580797754, + /*zp=*/0, /*symmetry=*/true}; // gate quantization parameters diff --git a/tensorflow/lite/micro/kernels/testdata/lstm_test_data_generator.py b/tensorflow/lite/micro/kernels/testdata/lstm_test_data_generator.py index 97c8798ef44..c6553fe2e4f 100644 --- a/tensorflow/lite/micro/kernels/testdata/lstm_test_data_generator.py +++ b/tensorflow/lite/micro/kernels/testdata/lstm_test_data_generator.py @@ -17,15 +17,15 @@ 2. Print the intermediate step outputs inside the LSTM for a single step LSTM invocation (Get2X2GateOutputCheckData in .cc) 3. Print the outputs for multi-step LSTM invocation (Get2X2LstmEvalCheckData in .cc) -Every invocation gives three types information: -1. Quantized output: kernel output in integer +Every invocation gives three types information: +1. Quantized output: kernel output in integer 2. Dequantized output: Quantized output in floating point representation 3. Float output: output from the floating point computation (i.e., float kernel) -Note: +Note: 1. Change quantization settings in _KERNEL_CONFIG to see the outcomes from various quantization schema (e.g., 8x8 Vs. 16x8) 2. Only single batch inference is supporte here. Change _GATE_TEST_DATA or _MULTISTEP_TEST_DATA to see kernel outputs on different input data -3. The quantization computation here is not the exact as the c++ implementation. The integer calculation is mimiced here using floating point. +3. The quantization computation here is not the exact as the c++ implementation. The integer calculation is emulated here using floating point. No fixed point math is implemented here. The purpose is to illustrate the computation procedure and possible quantization error accumulation, not for bit exactness. """ from absl import app @@ -88,7 +88,7 @@ _MULTISTEP_TEST_DATA = { 'init_hidden_state_vals': [0, 0], 'init_cell_state_vals': [0, 0], - 'input_data': [0.2, 0.3, 0.2, 0.3, 0.2, 0.3], # three time steps + 'input_data': [0.2, 0.3, 0.2, 0.3, 0.2, 0.3], # three time steps 'hidden_state_range': (-0.5, 0.7), 'cell_state_range': [-8, 8], 'input_data_range': [-1, 1]