Skip to content

Commit 0ad7093

Browse files
committed
Fixed all foubles
1 parent e1a6329 commit 0ad7093

10 files changed

Lines changed: 54 additions & 44 deletions

File tree

tensorflow/lite/micro/kernels/activations_common.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -38,8 +38,8 @@ void CalculateReluOpData(const TfLiteTensor* input, TfLiteTensor* output,
3838
ReluOpData* data) {
3939
float act_min = 0.0;
4040
float act_max = std::numeric_limits<float>::infinity();
41-
double real_multiplier =
42-
static_cast<double>(input->params.scale / output->params.scale);
41+
double real_multiplier = static_cast<double>(input->params.scale) /
42+
static_cast<double>(output->params.scale);
4343

4444
const RuntimeShape input_shape = GetTensorShape(input);
4545
const RuntimeShape output_shape = GetTensorShape(output);

tensorflow/lite/micro/kernels/cmsis_nn/svdf.cc

Lines changed: 9 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -148,20 +148,22 @@ TfLiteStatus CmsisNnPrepareSvdf(TfLiteContext* context, TfLiteNode* node) {
148148

149149
TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteInt8);
150150

151-
const double effective_scale_1 = static_cast<double>(
152-
input->params.scale * weights_feature->params.scale /
153-
activation_state->params.scale);
151+
const double effective_scale_1 =
152+
static_cast<double>(input->params.scale) *
153+
static_cast<double>(weights_feature->params.scale) /
154+
static_cast<double>(activation_state->params.scale);
154155
const double effective_scale_2 =
155-
static_cast<double>(activation_state->params.scale *
156-
weights_time->params.scale / output->params.scale);
156+
static_cast<double>(activation_state->params.scale) *
157+
static_cast<double>(weights_time->params.scale) /
158+
static_cast<double>(output->params.scale);
157159

158160
// TODO(b/162018098): Use TF_LITE_ENSURE_NEAR when it is ready.
159161
// TODO(#1751): account for optional bias tensor
160162
TF_LITE_ENSURE(
161163
context,
162164
std::abs(static_cast<double>(bias->params.scale) -
163-
static_cast<double>(activation_state->params.scale *
164-
weights_time->params.scale)) < 1e-5);
165+
(static_cast<double>(activation_state->params.scale) *
166+
static_cast<double>(weights_time->params.scale))) < 1e-5);
165167

166168
QuantizeMultiplier(effective_scale_1, &(data->effective_scale_1_a),
167169
&(data->effective_scale_1_b));

tensorflow/lite/micro/kernels/div.cc

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -53,8 +53,9 @@ TfLiteStatus CalculateOpDataDiv(TfLiteContext* context, TfLiteTensor* input1,
5353
TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized(
5454
context, params->activation, output, &data->output_activation_min,
5555
&data->output_activation_max));
56-
const double real_multiplier = static_cast<double>(
57-
input1->params.scale / (input2->params.scale * output->params.scale));
56+
const double real_multiplier = static_cast<double>(input1->params.scale) /
57+
(static_cast<double>(input2->params.scale) *
58+
static_cast<double>(output->params.scale));
5859
QuantizeMultiplier(real_multiplier, &data->output_multiplier,
5960
&data->output_shift);
6061
data->input1_zero_point = input1->params.zero_point;

tensorflow/lite/micro/kernels/elementwise.cc

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -62,15 +62,16 @@ bool IsRsqrtSupportedType(const TfLiteType type) {
6262
inline void SetAbsOutputMultiplier(const float input_scale,
6363
const float output_scale,
6464
int32_t* multiplier, int* shift) {
65-
QuantizeMultiplier(static_cast<double>(input_scale / output_scale),
66-
multiplier, shift);
65+
QuantizeMultiplier(
66+
static_cast<double>(input_scale) / static_cast<double>(output_scale),
67+
multiplier, shift);
6768
}
6869

6970
inline void SetRsqrtOutputMultiplier(const float input_scale,
7071
const float output_scale,
7172
int32_t* multiplier, int* shift) {
72-
const double scale =
73-
1. / static_cast<double>((std::sqrt(input_scale) * output_scale));
73+
const double scale = 1.0 / (std::sqrt(static_cast<double>(input_scale)) *
74+
static_cast<double>(output_scale));
7475
QuantizeMultiplier(scale, multiplier, shift);
7576
}
7677

tensorflow/lite/micro/kernels/hard_swish_common.cc

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -56,8 +56,8 @@ TfLiteStatus HardSwishPrepare(TfLiteContext* context, TfLiteNode* node) {
5656
const float reluish_scale = 3.0f / 32768.0f;
5757
const float output_scale = output->params.scale;
5858

59-
const double output_multiplier =
60-
static_cast<double>(hires_input_scale / output_scale);
59+
const double output_multiplier = static_cast<double>(hires_input_scale) /
60+
static_cast<double>(output_scale);
6161
int32_t output_multiplier_fixedpoint_int32;
6262
QuantizeMultiplier(output_multiplier, &output_multiplier_fixedpoint_int32,
6363
&params->output_multiplier_exponent);
@@ -67,8 +67,8 @@ TfLiteStatus HardSwishPrepare(TfLiteContext* context, TfLiteNode* node) {
6767

6868
TF_LITE_ENSURE(context, params->output_multiplier_exponent <= 0);
6969

70-
const double reluish_multiplier =
71-
static_cast<double>(hires_input_scale / reluish_scale);
70+
const double reluish_multiplier = static_cast<double>(hires_input_scale) /
71+
static_cast<double>(reluish_scale);
7272
int32_t reluish_multiplier_fixedpoint_int32;
7373
QuantizeMultiplier(reluish_multiplier, &reluish_multiplier_fixedpoint_int32,
7474
&params->reluish_multiplier_exponent);

tensorflow/lite/micro/kernels/leaky_relu_common.cc

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -51,15 +51,16 @@ TfLiteStatus CalculateOpDataLeakyRelu(TfLiteContext* context,
5151
data->output_zero_point = output->params.zero_point;
5252

5353
int output_shift_alpha;
54-
double alpha_multiplier = static_cast<double>(
55-
input->params.scale * params->alpha / output->params.scale);
54+
double alpha_multiplier = static_cast<double>(input->params.scale) *
55+
static_cast<double>(params->alpha) /
56+
static_cast<double>(output->params.scale);
5657
QuantizeMultiplier(alpha_multiplier, &data->output_multiplier_alpha,
5758
&output_shift_alpha);
5859
data->output_shift_alpha = static_cast<int32_t>(output_shift_alpha);
5960

6061
int output_shift_identity;
61-
double identity_multiplier =
62-
static_cast<double>(input->params.scale / output->params.scale);
62+
double identity_multiplier = static_cast<double>(input->params.scale) /
63+
static_cast<double>(output->params.scale);
6364
QuantizeMultiplier(identity_multiplier, &data->output_multiplier_identity,
6465
&output_shift_identity);
6566
data->output_shift_identity = static_cast<int32_t>(output_shift_identity);

tensorflow/lite/micro/kernels/squared_difference.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -67,8 +67,8 @@ void PrepareQuantized(
6767
twice_max_input_scale;
6868
const double real_output_multiplier =
6969
(twice_max_input_scale * twice_max_input_scale) /
70-
static_cast<double>((1 << data->arithmetic_params.left_shift * 2) *
71-
output_quantization_params.scale);
70+
(static_cast<double>(1 << data->arithmetic_params.left_shift * 2) *
71+
static_cast<double>(output_quantization_params.scale));
7272
QuantizeMultiplierSmallerThanOneExp(
7373
real_input1_multiplier, &data->arithmetic_params.input1_multiplier,
7474
&data->arithmetic_params.input1_shift);

tensorflow/lite/micro/kernels/sub_common.cc

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -49,15 +49,16 @@ TfLiteStatus CalculateOpDataSub(TfLiteContext* context, TfLiteSubParams* params,
4949
// accordingly. In case of 16-bit we have 65535 << 15 which is less than 1
5050
// << 31, therefore the addition will still fit in a 32 bit accumulator.
5151
data->left_shift = output->type == kTfLiteInt16 ? 15 : 20;
52-
const float twice_max_input_scale =
53-
2 * std::max(input1->params.scale, input2->params.scale);
52+
const double twice_max_input_scale =
53+
2 * static_cast<double>(
54+
std::max(input1->params.scale, input2->params.scale));
5455
const double real_input1_multiplier =
55-
static_cast<double>(input1->params.scale / twice_max_input_scale);
56+
static_cast<double>(input1->params.scale) / twice_max_input_scale;
5657
const double real_input2_multiplier =
57-
static_cast<double>(input2->params.scale / twice_max_input_scale);
58+
static_cast<double>(input2->params.scale) / twice_max_input_scale;
5859
const double real_output_multiplier =
59-
static_cast<double>(twice_max_input_scale /
60-
((1 << data->left_shift) * output->params.scale));
60+
twice_max_input_scale / (static_cast<double>(1 << data->left_shift) *
61+
static_cast<double>(output->params.scale));
6162

6263
QuantizeMultiplierSmallerThanOneExp(
6364
real_input1_multiplier, &data->input1_multiplier, &data->input1_shift);

tensorflow/lite/micro/kernels/svdf_common.cc

Lines changed: 9 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -452,20 +452,22 @@ TfLiteStatus PrepareSvdf(TfLiteContext* context, TfLiteNode* node) {
452452

453453
TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteInt8);
454454

455-
const double effective_scale_1 = static_cast<double>(
456-
input->params.scale * weights_feature->params.scale /
457-
activation_state->params.scale);
455+
const double effective_scale_1 =
456+
static_cast<double>(input->params.scale) *
457+
static_cast<double>(weights_feature->params.scale) /
458+
static_cast<double>(activation_state->params.scale);
458459
const double effective_scale_2 =
459-
static_cast<double>(activation_state->params.scale *
460-
weights_time->params.scale / output->params.scale);
460+
static_cast<double>(activation_state->params.scale) *
461+
static_cast<double>(weights_time->params.scale) /
462+
static_cast<double>(output->params.scale);
461463

462464
// TODO(b/162018098): Use TF_LITE_ENSURE_NEAR when it is ready.
463465
// TODO(#1751): account for optional bias tensor
464466
TF_LITE_ENSURE(
465467
context,
466468
std::abs(static_cast<double>(bias->params.scale) -
467-
static_cast<double>(activation_state->params.scale *
468-
weights_time->params.scale)) < 1e-5);
469+
(static_cast<double>(activation_state->params.scale) *
470+
static_cast<double>(weights_time->params.scale))) < 1e-5);
469471

470472
QuantizeMultiplier(effective_scale_1, &(data->effective_scale_1_a),
471473
&(data->effective_scale_1_b));

tensorflow/lite/micro/kernels/xtensa/svdf.cc

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -200,16 +200,18 @@ TfLiteStatus PrepareInt8(TfLiteContext* context, TfLiteNode* node) {
200200
TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteInt8);
201201

202202
const double effective_scale_1 =
203-
static_cast<double>(input->params.scale * weights_feature->params.scale /
204-
activation_state->params.scale);
203+
static_cast<double>(input->params.scale) *
204+
static_cast<double>(weights_feature->params.scale) /
205+
static_cast<double>(activation_state->params.scale);
205206
const double effective_scale_2 =
206-
static_cast<double>(activation_state->params.scale *
207-
weights_time->params.scale / output->params.scale);
207+
static_cast<double>(activation_state->params.scale) *
208+
static_cast<double>(weights_time->params.scale) /
209+
static_cast<double>(output->params.scale);
208210

209211
// TODO(#1751): account for optional bias tensor
210212
TF_LITE_ENSURE_NEAR(context, static_cast<double>(bias->params.scale),
211-
static_cast<double>(activation_state->params.scale *
212-
weights_time->params.scale),
213+
(static_cast<double>(activation_state->params.scale) *
214+
static_cast<double>(weights_time->params.scale)),
213215
1e-5);
214216

215217
TFLITE_DCHECK(node->user_data != nullptr);

0 commit comments

Comments
 (0)