1 /*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "[email protected]"
18
19 #include "HexagonModel.h"
20 #include "HexagonOperations.h"
21 #include "OperationsUtils.h"
22
23 namespace android {
24 namespace hardware {
25 namespace neuralnetworks {
26 namespace V1_0 {
27 namespace implementation {
28 namespace hexagon {
29
30 using android::nn::Shape;
31
32 namespace {
33 namespace float32 {
34
add(const std::vector<uint32_t> & ins,const std::vector<uint32_t> & outs,HexagonModel * model)35 bool add(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs, HexagonModel* model) {
36 HEXAGON_SOFT_ASSERT_EQ(3, ins.size(), "Need 3 inputs for float32::add");
37 HEXAGON_SOFT_ASSERT_EQ(1, outs.size(), "Need 1 output for float32::add");
38
39 // get parameters
40 const hexagon_nn_input& in1 = model->getTensor(ins[0]);
41 const hexagon_nn_input& in2 = model->getTensor(ins[1]);
42
43 const op_type act = model->getFloatActivation(ins[2]);
44
45 // add node to graph
46 return model->addFusedFloatOperation(OP_Add_f, NN_PAD_NA, {}, act, {in1, in2}, outs);
47 }
48
average_pool_2d(const std::vector<uint32_t> & ins,const std::vector<uint32_t> & outs,HexagonModel * model)49 bool average_pool_2d(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs,
50 HexagonModel* model) {
51 HEXAGON_SOFT_ASSERT(ins.size() == 10 || ins.size() == 7,
52 "Need 7 or 10 inputs for float32::average_pool_2d");
53 HEXAGON_SOFT_ASSERT_EQ(1, outs.size(), "Need 1 output for float32::average_pool_2d");
54
55 // get parameters
56 const hexagon_nn_input& input = model->getTensor(ins[0]);
57
58 // setup parameters
59 hexagon_nn_padding_type pad;
60 int32_t stride_width;
61 int32_t stride_height;
62 int32_t filter_width;
63 int32_t filter_height;
64 op_type act;
65
66 // get parameters
67 if (ins.size() == 10) {
68 const int32_t padding_left = model->getScalar<int32_t>(ins[1]);
69 const int32_t padding_right = model->getScalar<int32_t>(ins[2]);
70 const int32_t padding_top = model->getScalar<int32_t>(ins[3]);
71 const int32_t padding_bottom = model->getScalar<int32_t>(ins[4]);
72 stride_width = model->getScalar<int32_t>(ins[5]);
73 stride_height = model->getScalar<int32_t>(ins[6]);
74 filter_width = model->getScalar<int32_t>(ins[7]);
75 filter_height = model->getScalar<int32_t>(ins[8]);
76 act = model->getFloatActivation(ins[9]);
77
78 const Shape inputShape = model->getShape(ins[0]);
79 pad = getPadding(inputShape.dimensions[2], inputShape.dimensions[1], stride_width,
80 stride_height, filter_width, filter_height, padding_left, padding_right,
81 padding_top, padding_bottom);
82 HEXAGON_SOFT_ASSERT_NE(pad, NN_PAD_NA, "Unknown padding");
83 } else {
84 pad = model->getPadding(ins[1]);
85 stride_width = model->getScalar<int32_t>(ins[2]);
86 stride_height = model->getScalar<int32_t>(ins[3]);
87 filter_width = model->getScalar<int32_t>(ins[4]);
88 filter_height = model->getScalar<int32_t>(ins[5]);
89 act = model->getFloatActivation(ins[6]);
90 }
91
92 const hexagon_nn_input window = model->createShape(1, filter_height, filter_width, 1);
93 const hexagon_nn_input stride = model->createShape(1, stride_height, stride_width, 1);
94
95 // add node to graph
96 return model->addFloatOperationWithActivation(OP_AvgPool_f, pad, act, {input, window, stride},
97 outs);
98 }
99
concatenation(const std::vector<uint32_t> & ins,const std::vector<uint32_t> & outs,HexagonModel * model)100 bool concatenation(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs,
101 HexagonModel* model) {
102 HEXAGON_SOFT_ASSERT_LE(3, ins.size(), "Need at least 3 inputs for float32::concatenation");
103 HEXAGON_SOFT_ASSERT_EQ(1, outs.size(), "Need 1 output for float32::concatenation");
104
105 const size_t numInputTensors = ins.size() - 1;
106
107 // get parameters
108 std::vector<hexagon_nn_input> inputs(numInputTensors + 1);
109 for (size_t i = 0; i < numInputTensors; ++i) {
110 inputs[i + 1] = model->getTensor(ins[i]);
111 }
112
113 // axis being concatenated
114 const int32_t axis = model->getScalar<int32_t>(ins[numInputTensors]);
115 const int32_t dims = model->getShape(ins[0]).dimensions.size();
116 inputs[0] = model->createScalar<int32_t>(axis + (4 - dims));
117
118 // add node to graph
119 return model->addBasicOperation(OP_Concat_f, NN_PAD_NA, inputs, outs);
120 }
121
conv_2d(const std::vector<uint32_t> & ins,const std::vector<uint32_t> & outs,HexagonModel * model)122 bool conv_2d(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs,
123 HexagonModel* model) {
124 HEXAGON_SOFT_ASSERT(ins.size() == 10 || ins.size() == 7,
125 "Need 7 or 10 inputs for float32::conv_2d");
126 HEXAGON_SOFT_ASSERT_EQ(1, outs.size(), "Need 1 output for float32::conv_2d");
127
128 // get parameters
129 const hexagon_nn_input& input = model->getTensor(ins[0]);
130 const hexagon_nn_input filter = model->createConvFilterTensor(ins[1]);
131 const hexagon_nn_input& bias = model->getTensor(ins[2]);
132
133 // setup parameters
134 hexagon_nn_padding_type pad;
135 int32_t stride_width;
136 int32_t stride_height;
137 op_type act;
138
139 // get parameters
140 if (ins.size() == 10) {
141 const int32_t padding_left = model->getScalar<int32_t>(ins[3]);
142 const int32_t padding_right = model->getScalar<int32_t>(ins[4]);
143 const int32_t padding_top = model->getScalar<int32_t>(ins[5]);
144 const int32_t padding_bottom = model->getScalar<int32_t>(ins[6]);
145 stride_width = model->getScalar<int32_t>(ins[7]);
146 stride_height = model->getScalar<int32_t>(ins[8]);
147 act = model->getFloatActivation(ins[9]);
148
149 const Shape inputShape = model->getShape(ins[0]);
150 const Shape filterShape = model->getShape(ins[1]);
151 pad = getPadding(inputShape.dimensions[2], inputShape.dimensions[1], stride_width,
152 stride_height, filterShape.dimensions[2], filterShape.dimensions[1],
153 padding_left, padding_right, padding_top, padding_bottom);
154 HEXAGON_SOFT_ASSERT_NE(pad, NN_PAD_NA, "Unknown padding");
155 } else {
156 pad = model->getPadding(ins[3]);
157 stride_width = model->getScalar<int32_t>(ins[4]);
158 stride_height = model->getScalar<int32_t>(ins[5]);
159 act = model->getFloatActivation(ins[6]);
160 }
161
162 const hexagon_nn_input stride = model->createShape(1, stride_height, stride_width, 1);
163
164 // add node to graph
165 return model->addFusedFloatOperation(OP_Conv2d_f, pad, bias, act, {input, filter, stride},
166 outs);
167 }
168
depthwise_conv_2d(const std::vector<uint32_t> & ins,const std::vector<uint32_t> & outs,HexagonModel * model)169 bool depthwise_conv_2d(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs,
170 HexagonModel* model) {
171 HEXAGON_SOFT_ASSERT(ins.size() == 11 || ins.size() == 8,
172 "Need 8 or 11 inputs for float32::depthwise_conv_2d");
173 HEXAGON_SOFT_ASSERT_EQ(1, outs.size(), "Need 1 output for float32::depthwise_conv_2d");
174
175 // get parameters
176 const hexagon_nn_input& input = model->getTensor(ins[0]);
177 const hexagon_nn_input& bias = model->getTensor(ins[2]);
178
179 const Shape filterShape = model->getShape(ins[1]);
180
181 // setup parameters
182 hexagon_nn_padding_type pad;
183 int32_t stride_width;
184 int32_t stride_height;
185 int32_t depth_multiplier;
186 op_type act;
187
188 // get parameters
189 if (ins.size() == 11) {
190 const int32_t padding_left = model->getScalar<int32_t>(ins[3]);
191 const int32_t padding_right = model->getScalar<int32_t>(ins[4]);
192 const int32_t padding_top = model->getScalar<int32_t>(ins[5]);
193 const int32_t padding_bottom = model->getScalar<int32_t>(ins[6]);
194 stride_width = model->getScalar<int32_t>(ins[7]);
195 stride_height = model->getScalar<int32_t>(ins[8]);
196 depth_multiplier = model->getScalar<int32_t>(ins[9]);
197 act = model->getFloatActivation(ins[10]);
198
199 const Shape inputShape = model->getShape(ins[0]);
200 const Shape filterShape = model->getShape(ins[1]);
201 pad = getPadding(inputShape.dimensions[2], inputShape.dimensions[1], stride_width,
202 stride_height, filterShape.dimensions[2], filterShape.dimensions[1],
203 padding_left, padding_right, padding_top, padding_bottom);
204 HEXAGON_SOFT_ASSERT_NE(pad, NN_PAD_NA, "Unknown padding");
205 } else {
206 pad = model->getPadding(ins[3]);
207 stride_width = model->getScalar<int32_t>(ins[4]);
208 stride_height = model->getScalar<int32_t>(ins[5]);
209 depth_multiplier = model->getScalar<int32_t>(ins[6]);
210 act = model->getFloatActivation(ins[7]);
211 }
212
213 const hexagon_nn_input filter = model->createDepthwiseFilterTensor(ins[1], depth_multiplier);
214 const hexagon_nn_input stride = model->createShape(1, stride_height, stride_width, 1);
215
216 // add node to graph
217 return model->addFusedFloatOperation(OP_DepthwiseConv2d_f, pad, bias, act,
218 {input, filter, stride}, outs);
219 }
220
fully_connected(const std::vector<uint32_t> & ins,const std::vector<uint32_t> & outs,HexagonModel * model)221 bool fully_connected(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs,
222 HexagonModel* model) {
223 HEXAGON_SOFT_ASSERT_EQ(4, ins.size(), "Need 4 inputs for float32::fully_connected");
224 HEXAGON_SOFT_ASSERT_EQ(1, outs.size(), "Need 1 output for float32::fully_connected");
225
226 // get parameters
227 const hexagon_nn_input& input = model->getTensor(ins[0]);
228 const hexagon_nn_input& weights = model->createFullyConnectedWeightTensor(ins[1]);
229 const hexagon_nn_input& bias = model->getTensor(ins[2]);
230
231 const op_type act = model->getFloatActivation(ins[3]);
232
233 // add node to graph
234 return model->addFusedFloatOperation(OP_MatMul_f, NN_PAD_NA, bias, act, {input, weights}, outs);
235 }
236
l2_pool_2d(const std::vector<uint32_t> & ins,const std::vector<uint32_t> & outs,HexagonModel * model)237 bool l2_pool_2d(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs,
238 HexagonModel* model) {
239 HEXAGON_SOFT_ASSERT(ins.size() == 10 || ins.size() == 7,
240 "Need 7 or 10 inputs for float32::l2_pool_2d");
241 HEXAGON_SOFT_ASSERT_EQ(1, outs.size(), "Need 1 output for float32::l2_pool_2d");
242
243 // get parameters
244 const hexagon_nn_input& input = model->getTensor(ins[0]);
245
246 // setup parameters
247 hexagon_nn_padding_type pad;
248 int32_t stride_width;
249 int32_t stride_height;
250 int32_t filter_width;
251 int32_t filter_height;
252 op_type act;
253
254 // get parameters
255 if (ins.size() == 10) {
256 const int32_t padding_left = model->getScalar<int32_t>(ins[1]);
257 const int32_t padding_right = model->getScalar<int32_t>(ins[2]);
258 const int32_t padding_top = model->getScalar<int32_t>(ins[3]);
259 const int32_t padding_bottom = model->getScalar<int32_t>(ins[4]);
260 stride_width = model->getScalar<int32_t>(ins[5]);
261 stride_height = model->getScalar<int32_t>(ins[6]);
262 filter_width = model->getScalar<int32_t>(ins[7]);
263 filter_height = model->getScalar<int32_t>(ins[8]);
264 act = model->getFloatActivation(ins[9]);
265
266 const Shape inputShape = model->getShape(ins[0]);
267 pad = getPadding(inputShape.dimensions[2], inputShape.dimensions[1], stride_width,
268 stride_height, filter_width, filter_height, padding_left, padding_right,
269 padding_top, padding_bottom);
270 HEXAGON_SOFT_ASSERT_NE(pad, NN_PAD_NA, "Unknown padding");
271 } else {
272 pad = model->getPadding(ins[1]);
273 stride_width = model->getScalar<int32_t>(ins[2]);
274 stride_height = model->getScalar<int32_t>(ins[3]);
275 filter_width = model->getScalar<int32_t>(ins[4]);
276 filter_height = model->getScalar<int32_t>(ins[5]);
277 act = model->getFloatActivation(ins[6]);
278 }
279
280 const hexagon_nn_input window = model->createShape(1, filter_height, filter_width, 1);
281 const hexagon_nn_input stride = model->createShape(1, stride_height, stride_width, 1);
282
283 // add node to graph
284 return model->addFloatOperationWithActivation(OP_L2Pool_f, pad, act, {input, window, stride},
285 outs);
286 }
287
local_response_normalization(const std::vector<uint32_t> & ins,const std::vector<uint32_t> & outs,HexagonModel * model)288 bool local_response_normalization(const std::vector<uint32_t>& ins,
289 const std::vector<uint32_t>& outs, HexagonModel* model) {
290 HEXAGON_SOFT_ASSERT_EQ(5, ins.size(),
291 "Need 5 inputs for float32::local_response_normalization");
292 HEXAGON_SOFT_ASSERT_EQ(1, outs.size(),
293 "Need 1 output for float32::local_response_normalization");
294
295 // get parameters
296 const hexagon_nn_input& input = model->getTensor(ins[0]);
297 const hexagon_nn_input& bias = model->getTensor(ins[2]);
298 const hexagon_nn_input& alpha = model->getTensor(ins[3]);
299 const hexagon_nn_input& beta = model->getTensor(ins[4]);
300
301 // create value that's [1, 1, 1, radius] with value of 1.0f
302 const int32_t radius = model->getScalar<int32_t>(ins[1]);
303 const hexagon_nn_input window = model->createTensor<float>(1, 1, 1, radius * 2 + 1, {1.0f});
304
305 // add node to graph
306 return model->addBasicOperation(OP_LRN_f, NN_PAD_NA, {input, window, bias, alpha, beta}, outs);
307 }
308
logistic(const std::vector<uint32_t> & ins,const std::vector<uint32_t> & outs,HexagonModel * model)309 bool logistic(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs,
310 HexagonModel* model) {
311 HEXAGON_SOFT_ASSERT_EQ(1, ins.size(), "Need 1 input for float32::logistic");
312 HEXAGON_SOFT_ASSERT_EQ(1, outs.size(), "Need 1 output for float32::logistic");
313
314 // get parameters
315 const hexagon_nn_input& input = model->getTensor(ins[0]);
316
317 // add node to graph
318 return model->addBasicOperation(OP_Sigmoid_f, NN_PAD_NA, {input}, outs);
319 }
320
max_pool_2d(const std::vector<uint32_t> & ins,const std::vector<uint32_t> & outs,HexagonModel * model)321 bool max_pool_2d(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs,
322 HexagonModel* model) {
323 HEXAGON_SOFT_ASSERT(ins.size() == 10 || ins.size() == 7,
324 "Need 7 or 10 inputs for float32::max_pool_2d");
325 HEXAGON_SOFT_ASSERT_EQ(1, outs.size(), "Need 1 output for float32::max_pool_2d");
326
327 // get parameters
328 const hexagon_nn_input& input = model->getTensor(ins[0]);
329
330 // setup parameters
331 hexagon_nn_padding_type pad;
332 int32_t stride_width;
333 int32_t stride_height;
334 int32_t filter_width;
335 int32_t filter_height;
336 op_type act;
337
338 // get parameters
339 if (ins.size() == 10) {
340 const int32_t padding_left = model->getScalar<int32_t>(ins[1]);
341 const int32_t padding_right = model->getScalar<int32_t>(ins[2]);
342 const int32_t padding_top = model->getScalar<int32_t>(ins[3]);
343 const int32_t padding_bottom = model->getScalar<int32_t>(ins[4]);
344 stride_width = model->getScalar<int32_t>(ins[5]);
345 stride_height = model->getScalar<int32_t>(ins[6]);
346 filter_width = model->getScalar<int32_t>(ins[7]);
347 filter_height = model->getScalar<int32_t>(ins[8]);
348 act = model->getFloatActivation(ins[9]);
349
350 const Shape inputShape = model->getShape(ins[0]);
351 pad = getPadding(inputShape.dimensions[2], inputShape.dimensions[1], stride_width,
352 stride_height, filter_width, filter_height, padding_left, padding_right,
353 padding_top, padding_bottom);
354 HEXAGON_SOFT_ASSERT_NE(pad, NN_PAD_NA, "Unknown padding");
355 } else {
356 pad = model->getPadding(ins[1]);
357 stride_width = model->getScalar<int32_t>(ins[2]);
358 stride_height = model->getScalar<int32_t>(ins[3]);
359 filter_width = model->getScalar<int32_t>(ins[4]);
360 filter_height = model->getScalar<int32_t>(ins[5]);
361 act = model->getFloatActivation(ins[6]);
362 }
363
364 const hexagon_nn_input window = model->createShape(1, filter_height, filter_width, 1);
365 const hexagon_nn_input stride = model->createShape(1, stride_height, stride_width, 1);
366
367 // add node to graph
368 return model->addFloatOperationWithActivation(OP_MaxPool_f, pad, act, {input, window, stride},
369 outs);
370 }
371
mul(const std::vector<uint32_t> & ins,const std::vector<uint32_t> & outs,HexagonModel * model)372 bool mul(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs, HexagonModel* model) {
373 HEXAGON_SOFT_ASSERT_EQ(3, ins.size(), "Need 3 inputs for float32::mul");
374 HEXAGON_SOFT_ASSERT_EQ(1, outs.size(), "Need 1 output for float32::mul");
375
376 // get parameters
377 const hexagon_nn_input& in1 = model->getTensor(ins[0]);
378 const hexagon_nn_input& in2 = model->getTensor(ins[1]);
379
380 const op_type act = model->getFloatActivation(ins[2]);
381
382 // add node to graph
383 return model->addFusedFloatOperation(OP_Mul_f, NN_PAD_NA, {}, act, {in1, in2}, outs);
384 }
385
relu(const std::vector<uint32_t> & ins,const std::vector<uint32_t> & outs,HexagonModel * model)386 bool relu(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs,
387 HexagonModel* model) {
388 HEXAGON_SOFT_ASSERT_EQ(1, ins.size(), "Need 1 input for float32::relu");
389 HEXAGON_SOFT_ASSERT_EQ(1, outs.size(), "Need 1 output for float32::relu");
390
391 // get parameters
392 const hexagon_nn_input& input = model->getTensor(ins[0]);
393
394 // add node to graph
395 return model->addBasicOperation(OP_Relu_f, NN_PAD_NA, {input}, outs);
396 }
397
relu1(const std::vector<uint32_t> & ins,const std::vector<uint32_t> & outs,HexagonModel * model)398 bool relu1(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs,
399 HexagonModel* model) {
400 HEXAGON_SOFT_ASSERT_EQ(1, ins.size(), "Need 1 input for float32::relu1");
401 HEXAGON_SOFT_ASSERT_EQ(1, outs.size(), "Need 1 output for float32::relu1");
402
403 // get parameters
404 const hexagon_nn_input& input = model->getTensor(ins[0]);
405 const hexagon_nn_input min = model->createScalar(-1.0f);
406 const hexagon_nn_input max = model->createScalar(1.0f);
407
408 // add node to graph
409 return model->addBasicOperation(OP_Clamp_f, NN_PAD_NA, {input, min, max}, outs);
410 }
411
relu6(const std::vector<uint32_t> & ins,const std::vector<uint32_t> & outs,HexagonModel * model)412 bool relu6(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs,
413 HexagonModel* model) {
414 HEXAGON_SOFT_ASSERT_EQ(1, ins.size(), "Need 1 input for float32::relu6");
415 HEXAGON_SOFT_ASSERT_EQ(1, outs.size(), "Need 1 output for float32::relu6");
416
417 // get parameters
418 const hexagon_nn_input& input = model->getTensor(ins[0]);
419 const hexagon_nn_input max = model->createScalar(6.0f);
420
421 // add node to graph
422 return model->addBasicOperation(OP_ReluX_f, NN_PAD_NA, {input, max}, outs);
423 }
424
reshape(const std::vector<uint32_t> & ins,const std::vector<uint32_t> & outs,HexagonModel * model)425 bool reshape(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs,
426 HexagonModel* model) {
427 HEXAGON_SOFT_ASSERT_EQ(2, ins.size(), "Need 2 inputs for float32::reshape");
428 HEXAGON_SOFT_ASSERT_EQ(1, outs.size(), "Need 1 output for float32::reshape");
429
430 // get parameters
431 const hexagon_nn_input& input = model->getTensor(ins[0]);
432 const hexagon_nn_input& newdims = model->getTensor(ins[1]);
433
434 // add node to graph
435 return model->addBasicOperation(OP_Reshape, NN_PAD_NA, {input, newdims}, outs);
436 }
437
resize_bilinear(const std::vector<uint32_t> & ins,const std::vector<uint32_t> & outs,HexagonModel * model)438 bool resize_bilinear(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs,
439 HexagonModel* model) {
440 HEXAGON_SOFT_ASSERT_EQ(3, ins.size(), "Need 3 inputs for float32::resize_bilinear");
441 HEXAGON_SOFT_ASSERT_EQ(1, outs.size(), "Need 1 output for float32::resize_bilinear");
442
443 // get parameters
444 const hexagon_nn_input& input = model->getTensor(ins[0]);
445
446 const int32_t width = model->getScalar<int32_t>(ins[1]);
447 const int32_t height = model->getScalar<int32_t>(ins[2]);
448
449 const hexagon_nn_input newdim = model->createValues<int32_t>({height, width});
450
451 // add node to graph
452 return model->addBasicOperation(OP_ResizeBilinear_f, NN_PAD_NA, {input, newdim}, outs);
453 }
454
softmax(const std::vector<uint32_t> & ins,const std::vector<uint32_t> & outs,HexagonModel * model)455 bool softmax(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs,
456 HexagonModel* model) {
457 HEXAGON_SOFT_ASSERT_EQ(2, ins.size(), "Need 2 inputs for float32::softmax");
458 HEXAGON_SOFT_ASSERT_EQ(1, outs.size(), "Need 1 output for float32::softmax");
459
460 // get parameters
461 const hexagon_nn_input& input = model->getTensor(ins[0]);
462 const hexagon_nn_input& beta = model->getTensor(ins[1]);
463
464 // add node to graph
465 return model->addBasicOperation(OP_Softmax_f, NN_PAD_NA, {input, beta}, outs);
466 }
467
tanh(const std::vector<uint32_t> & ins,const std::vector<uint32_t> & outs,HexagonModel * model)468 bool tanh(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs,
469 HexagonModel* model) {
470 HEXAGON_SOFT_ASSERT_EQ(1, ins.size(), "Need 1 input for float32::tanh");
471 HEXAGON_SOFT_ASSERT_EQ(1, outs.size(), "Need 1 output for float32::tanh");
472
473 // get parameters
474 const hexagon_nn_input& input = model->getTensor(ins[0]);
475
476 // add node to graph
477 return model->addBasicOperation(OP_Tanh_f, NN_PAD_NA, {input}, outs);
478 }
479
480 } // namespace float32
481
482 namespace quant8_asym {
483
add(const std::vector<uint32_t> & ins,const std::vector<uint32_t> & outs,HexagonModel * model)484 bool add(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs, HexagonModel* model) {
485 HEXAGON_SOFT_ASSERT_EQ(3, ins.size(), "Need 3 inputs for quant8_asym::add");
486 HEXAGON_SOFT_ASSERT_EQ(1, outs.size(), "Need 1 output for quant8_asym::add");
487
488 // get parameters
489 const hexagon_nn_input& in1 = model->getTensor(ins[0]);
490 const hexagon_nn_input& in2 = model->getTensor(ins[1]);
491
492 const op_type act = model->getQuantizedActivation(ins[2]);
493
494 const hexagon_nn_input& in1_min = model->getQuantizationMin(ins[0]);
495 const hexagon_nn_input& in1_max = model->getQuantizationMax(ins[0]);
496 const hexagon_nn_input& in2_min = model->getQuantizationMin(ins[1]);
497 const hexagon_nn_input& in2_max = model->getQuantizationMax(ins[1]);
498
499 // add node to graph
500 return model->addFusedQuant8Operation(OP_QuantizedAdd_8p8to32, NN_PAD_NA, {}, act,
501 {in1, in2, in1_min, in1_max, in2_min, in2_max}, outs);
502 }
503
average_pool_2d(const std::vector<uint32_t> & ins,const std::vector<uint32_t> & outs,HexagonModel * model)504 bool average_pool_2d(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs,
505 HexagonModel* model) {
506 HEXAGON_SOFT_ASSERT(ins.size() == 10 || ins.size() == 7,
507 "Need 7 or 10 inputs for quant8_asym::average_pool_2d");
508 HEXAGON_SOFT_ASSERT_EQ(1, outs.size(), "Need 1 output for quant8_asym::average_pool_2d");
509
510 // get parameters
511 const hexagon_nn_input& input = model->getTensor(ins[0]);
512
513 // setup parameters
514 hexagon_nn_padding_type pad;
515 int32_t stride_width;
516 int32_t stride_height;
517 int32_t filter_width;
518 int32_t filter_height;
519 op_type act;
520
521 // get parameters
522 if (ins.size() == 10) {
523 const int32_t padding_left = model->getScalar<int32_t>(ins[1]);
524 const int32_t padding_right = model->getScalar<int32_t>(ins[2]);
525 const int32_t padding_top = model->getScalar<int32_t>(ins[3]);
526 const int32_t padding_bottom = model->getScalar<int32_t>(ins[4]);
527 stride_width = model->getScalar<int32_t>(ins[5]);
528 stride_height = model->getScalar<int32_t>(ins[6]);
529 filter_width = model->getScalar<int32_t>(ins[7]);
530 filter_height = model->getScalar<int32_t>(ins[8]);
531 act = model->getQuantizedActivation(ins[9]);
532
533 const Shape inputShape = model->getShape(ins[0]);
534 pad = getPadding(inputShape.dimensions[2], inputShape.dimensions[1], stride_width,
535 stride_height, filter_width, filter_height, padding_left, padding_right,
536 padding_top, padding_bottom);
537 HEXAGON_SOFT_ASSERT_NE(pad, NN_PAD_NA, "Unknown padding");
538 } else {
539 pad = model->getPadding(ins[1]);
540 stride_width = model->getScalar<int32_t>(ins[2]);
541 stride_height = model->getScalar<int32_t>(ins[3]);
542 filter_width = model->getScalar<int32_t>(ins[4]);
543 filter_height = model->getScalar<int32_t>(ins[5]);
544 act = model->getQuantizedActivation(ins[6]);
545 }
546
547 const hexagon_nn_input& in_min = model->getQuantizationMin(ins[0]);
548 const hexagon_nn_input& in_max = model->getQuantizationMax(ins[0]);
549 const hexagon_nn_input window = model->createShape(1, filter_height, filter_width, 1);
550 const hexagon_nn_input stride = model->createShape(1, stride_height, stride_width, 1);
551
552 // add node to graph
553 return model->addQuant8OperationWithActivation(OP_QuantizedAvgPool_8, pad, act,
554 {input, in_min, in_max, window, stride}, outs);
555 }
556
concatenation(const std::vector<uint32_t> & ins,const std::vector<uint32_t> & outs,HexagonModel * model)557 bool concatenation(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs,
558 HexagonModel* model) {
559 HEXAGON_SOFT_ASSERT_LE(3, ins.size(), "Need at least 3 inputs for quant8_asym::concatenation");
560 HEXAGON_SOFT_ASSERT_EQ(1, outs.size(), "Need 1 output for quant8_asym::concatenation");
561
562 const size_t numInputTensors = ins.size() - 1;
563
564 // get parameters
565 std::vector<hexagon_nn_input> inputs(numInputTensors * 3 + 1);
566 for (size_t i = 0; i < numInputTensors; ++i) {
567 inputs[i + 1 + numInputTensors * 0] = model->getTensor(ins[i]);
568 inputs[i + 1 + numInputTensors * 1] = model->getQuantizationMin(ins[i]);
569 inputs[i + 1 + numInputTensors * 2] = model->getQuantizationMax(ins[i]);
570 }
571
572 // axis being concatenated
573 const int32_t axis = model->getScalar<int32_t>(ins[numInputTensors]);
574 const int32_t dims = model->getShape(ins[0]).dimensions.size();
575 inputs[0] = model->createScalar<int32_t>(axis + (4 - dims));
576
577 // add node to graph
578 return model->addBasicOperation(OP_QuantizedConcat_8, NN_PAD_NA, inputs, outs);
579 }
580
conv_2d(const std::vector<uint32_t> & ins,const std::vector<uint32_t> & outs,HexagonModel * model)581 bool conv_2d(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs,
582 HexagonModel* model) {
583 HEXAGON_SOFT_ASSERT(ins.size() == 10 || ins.size() == 7,
584 "Need 7 or 10 inputs for quant8_asym::conv_2d");
585 HEXAGON_SOFT_ASSERT_EQ(1, outs.size(), "Need 1 output for quant8_asym::conv_2d");
586
587 // get parameters
588 const hexagon_nn_input& input = model->getTensor(ins[0]);
589 const hexagon_nn_input filter = model->createConvFilterTensor(ins[1]);
590 const hexagon_nn_input& bias = model->getTensor(ins[2]);
591
592 // setup parameters
593 hexagon_nn_padding_type pad;
594 int32_t stride_width;
595 int32_t stride_height;
596 op_type act;
597
598 // get parameters
599 if (ins.size() == 10) {
600 const int32_t padding_left = model->getScalar<int32_t>(ins[3]);
601 const int32_t padding_right = model->getScalar<int32_t>(ins[4]);
602 const int32_t padding_top = model->getScalar<int32_t>(ins[5]);
603 const int32_t padding_bottom = model->getScalar<int32_t>(ins[6]);
604 stride_width = model->getScalar<int32_t>(ins[7]);
605 stride_height = model->getScalar<int32_t>(ins[8]);
606 act = model->getQuantizedActivation(ins[9]);
607
608 const Shape inputShape = model->getShape(ins[0]);
609 const Shape filterShape = model->getShape(ins[1]);
610 pad = getPadding(inputShape.dimensions[2], inputShape.dimensions[1], stride_width,
611 stride_height, filterShape.dimensions[2], filterShape.dimensions[1],
612 padding_left, padding_right, padding_top, padding_bottom);
613 HEXAGON_SOFT_ASSERT_NE(pad, NN_PAD_NA, "Unknown padding");
614 } else {
615 pad = model->getPadding(ins[3]);
616 stride_width = model->getScalar<int32_t>(ins[4]);
617 stride_height = model->getScalar<int32_t>(ins[5]);
618 act = model->getQuantizedActivation(ins[6]);
619 }
620
621 const hexagon_nn_input& input_min = model->getQuantizationMin(ins[0]);
622 const hexagon_nn_input& input_max = model->getQuantizationMax(ins[0]);
623 const hexagon_nn_input& filter_min = model->getQuantizationMin(ins[1]);
624 const hexagon_nn_input& filter_max = model->getQuantizationMax(ins[1]);
625 const hexagon_nn_input& bias_min = model->getQuantizationMin(ins[2]);
626 const hexagon_nn_input& bias_max = model->getQuantizationMax(ins[2]);
627
628 const hexagon_nn_input stride = model->createShape(1, stride_height, stride_width, 1);
629
630 // add node to graph
631 return model->addFusedQuant8Operation(
632 OP_QuantizedConv2d_8x8to32, pad, {bias, bias_min, bias_max}, act,
633 {input, filter, input_min, input_max, filter_min, filter_max, stride}, outs);
634 }
635
depthwise_conv_2d(const std::vector<uint32_t> & ins,const std::vector<uint32_t> & outs,HexagonModel * model)636 bool depthwise_conv_2d(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs,
637 HexagonModel* model) {
638 HEXAGON_SOFT_ASSERT(ins.size() == 11 || ins.size() == 8,
639 "Need 8 to 11 inputs for quant8_asym::depthwise_conv_2d");
640 HEXAGON_SOFT_ASSERT_EQ(1, outs.size(), "Need 1 output for quant8_asym::depthwise_conv_2d");
641
642 // get parameters
643 const hexagon_nn_input& input = model->getTensor(ins[0]);
644 const hexagon_nn_input& bias = model->getTensor(ins[2]);
645
646 // setup parameters
647 hexagon_nn_padding_type pad;
648 int32_t stride_width;
649 int32_t stride_height;
650 int32_t depth_multiplier;
651 op_type act;
652
653 // get parameters
654 if (ins.size() == 11) {
655 const int32_t padding_left = model->getScalar<int32_t>(ins[3]);
656 const int32_t padding_right = model->getScalar<int32_t>(ins[4]);
657 const int32_t padding_top = model->getScalar<int32_t>(ins[5]);
658 const int32_t padding_bottom = model->getScalar<int32_t>(ins[6]);
659 stride_width = model->getScalar<int32_t>(ins[7]);
660 stride_height = model->getScalar<int32_t>(ins[8]);
661 depth_multiplier = model->getScalar<int32_t>(ins[9]);
662 act = model->getQuantizedActivation(ins[10]);
663
664 const Shape inputShape = model->getShape(ins[0]);
665 const Shape filterShape = model->getShape(ins[1]);
666 pad = getPadding(inputShape.dimensions[2], inputShape.dimensions[1], stride_width,
667 stride_height, filterShape.dimensions[2], filterShape.dimensions[1],
668 padding_left, padding_right, padding_top, padding_bottom);
669 HEXAGON_SOFT_ASSERT_NE(pad, NN_PAD_NA, "Unknown padding");
670 } else {
671 pad = model->getPadding(ins[3]);
672 stride_width = model->getScalar<int32_t>(ins[4]);
673 stride_height = model->getScalar<int32_t>(ins[5]);
674 depth_multiplier = model->getScalar<int32_t>(ins[6]);
675 act = model->getQuantizedActivation(ins[7]);
676 }
677
678 const hexagon_nn_input& input_min = model->getQuantizationMin(ins[0]);
679 const hexagon_nn_input& input_max = model->getQuantizationMax(ins[0]);
680 const hexagon_nn_input& filter_min = model->getQuantizationMin(ins[1]);
681 const hexagon_nn_input& filter_max = model->getQuantizationMax(ins[1]);
682 const hexagon_nn_input& bias_min = model->getQuantizationMin(ins[2]);
683 const hexagon_nn_input& bias_max = model->getQuantizationMax(ins[2]);
684
685 const hexagon_nn_input filter = model->createDepthwiseFilterTensor(ins[1], depth_multiplier);
686 const hexagon_nn_input stride = model->createShape(1, stride_height, stride_width, 1);
687
688 // add node to graph
689 return model->addFusedQuant8Operation(
690 OP_QuantizedDepthwiseConv2d_8x8to32, pad, {bias, bias_min, bias_max}, act,
691 {input, filter, input_min, input_max, filter_min, filter_max, stride}, outs);
692 }
693
dequantize(const std::vector<uint32_t> & ins,const std::vector<uint32_t> & outs,HexagonModel * model)694 bool dequantize(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs,
695 HexagonModel* model) {
696 HEXAGON_SOFT_ASSERT_EQ(1, ins.size(), "Need 1 input for quant8_asym::dequantize");
697 HEXAGON_SOFT_ASSERT_EQ(1, outs.size(), "Need 1 output for quant8_asym::dequantize");
698
699 // get parameters
700 const hexagon_nn_input& input = model->getTensor(ins[0]);
701
702 const hexagon_nn_input& input_min = model->getQuantizationMin(ins[0]);
703 const hexagon_nn_input& input_max = model->getQuantizationMax(ins[0]);
704
705 // add node to graph
706 return model->addBasicOperation(OP_Dequantize, NN_PAD_NA, {input, input_min, input_max}, outs);
707 }
708
fully_connected(const std::vector<uint32_t> & ins,const std::vector<uint32_t> & outs,HexagonModel * model)709 bool fully_connected(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs,
710 HexagonModel* model) {
711 HEXAGON_SOFT_ASSERT_EQ(4, ins.size(), "Need 4 inputs for quant8::fully_connected");
712 HEXAGON_SOFT_ASSERT_EQ(1, outs.size(), "Need 1 output for quant8::fully_connected");
713
714 // get parameters
715 const hexagon_nn_input& input = model->getTensor(ins[0]);
716 const hexagon_nn_input& weights = model->createFullyConnectedWeightTensor(ins[1]);
717 const hexagon_nn_input& bias = model->getTensor(ins[2]);
718
719 const op_type act = model->getQuantizedActivation(ins[3]);
720
721 const hexagon_nn_input& input_min = model->getQuantizationMin(ins[0]);
722 const hexagon_nn_input& input_max = model->getQuantizationMax(ins[0]);
723 const hexagon_nn_input& weights_min = model->getQuantizationMin(ins[1]);
724 const hexagon_nn_input& weights_max = model->getQuantizationMax(ins[1]);
725 const hexagon_nn_input& bias_min = model->getQuantizationMin(ins[2]);
726 const hexagon_nn_input& bias_max = model->getQuantizationMax(ins[2]);
727
728 // add node to graph
729 return model->addFusedQuant8Operation(
730 OP_QuantizedMatMul_8x8to32, NN_PAD_NA, {bias, bias_min, bias_max}, act,
731 {input, weights, input_min, input_max, weights_min, weights_max}, outs);
732 }
733
logistic(const std::vector<uint32_t> & ins,const std::vector<uint32_t> & outs,HexagonModel * model)734 bool logistic(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs,
735 HexagonModel* model) {
736 HEXAGON_SOFT_ASSERT_EQ(1, ins.size(), "Need 1 input for quant8_asym::logistic");
737 HEXAGON_SOFT_ASSERT_EQ(1, outs.size(), "Need 1 output for quant8_asym::logistic");
738
739 // get parameters
740 const hexagon_nn_input& input = model->getTensor(ins[0]);
741
742 const hexagon_nn_input& input_min = model->getQuantizationMin(ins[0]);
743
744 // TFLite uses different max value
745 const hexagon_nn_input input_max = model->createQuantizationValue(ins[0], 256);
746
747 // add node to graph
748 return model->addBasicOperation(OP_QuantizedSigmoid_8, NN_PAD_NA, {input, input_min, input_max},
749 outs);
750 }
751
max_pool_2d(const std::vector<uint32_t> & ins,const std::vector<uint32_t> & outs,HexagonModel * model)752 bool max_pool_2d(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs,
753 HexagonModel* model) {
754 HEXAGON_SOFT_ASSERT(ins.size() == 10 || ins.size() == 7,
755 "Need 7 or 10 inputs for quant8_asym::max_pool_2d");
756 HEXAGON_SOFT_ASSERT_EQ(1, outs.size(), "Need 1 output for quant8_asym::max_pool_2d");
757
758 // get parameters
759 const hexagon_nn_input& input = model->getTensor(ins[0]);
760
761 // setup parameters
762 hexagon_nn_padding_type pad;
763 int32_t stride_width;
764 int32_t stride_height;
765 int32_t filter_width;
766 int32_t filter_height;
767 op_type act;
768
769 // get parameters
770 if (ins.size() == 10) {
771 const int32_t padding_left = model->getScalar<int32_t>(ins[1]);
772 const int32_t padding_right = model->getScalar<int32_t>(ins[2]);
773 const int32_t padding_top = model->getScalar<int32_t>(ins[3]);
774 const int32_t padding_bottom = model->getScalar<int32_t>(ins[4]);
775 stride_width = model->getScalar<int32_t>(ins[5]);
776 stride_height = model->getScalar<int32_t>(ins[6]);
777 filter_width = model->getScalar<int32_t>(ins[7]);
778 filter_height = model->getScalar<int32_t>(ins[8]);
779 act = model->getQuantizedActivation(ins[9]);
780
781 const Shape inputShape = model->getShape(ins[0]);
782 pad = getPadding(inputShape.dimensions[2], inputShape.dimensions[1], stride_width,
783 stride_height, filter_width, filter_height, padding_left, padding_right,
784 padding_top, padding_bottom);
785 HEXAGON_SOFT_ASSERT_NE(pad, NN_PAD_NA, "Unknown padding");
786 } else {
787 pad = model->getPadding(ins[1]);
788 stride_width = model->getScalar<int32_t>(ins[2]);
789 stride_height = model->getScalar<int32_t>(ins[3]);
790 filter_width = model->getScalar<int32_t>(ins[4]);
791 filter_height = model->getScalar<int32_t>(ins[5]);
792 act = model->getQuantizedActivation(ins[6]);
793 }
794
795 const hexagon_nn_input& input_min = model->getQuantizationMin(ins[0]);
796 const hexagon_nn_input& input_max = model->getQuantizationMax(ins[0]);
797 const hexagon_nn_input window = model->createShape(1, filter_height, filter_width, 1);
798 const hexagon_nn_input stride = model->createShape(1, stride_height, stride_width, 1);
799
800 // add node to graph
801 return model->addQuant8OperationWithActivation(
802 OP_QuantizedMaxPool_8, pad, act, {input, input_min, input_max, window, stride}, outs);
803 }
804
mul(const std::vector<uint32_t> & ins,const std::vector<uint32_t> & outs,HexagonModel * model)805 bool mul(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs, HexagonModel* model) {
806 HEXAGON_SOFT_ASSERT_EQ(3, ins.size(), "Need 3 inputs for quant8_asym::mul");
807 HEXAGON_SOFT_ASSERT_EQ(1, outs.size(), "Need 1 output for quant8_asym::mul");
808
809 // get parameters
810 const hexagon_nn_input& in1 = model->getTensor(ins[0]);
811 const hexagon_nn_input& in2 = model->getTensor(ins[1]);
812
813 const op_type act = model->getQuantizedActivation(ins[2]);
814
815 const hexagon_nn_input& in1_min = model->getQuantizationMin(ins[0]);
816 const hexagon_nn_input& in1_max = model->getQuantizationMax(ins[0]);
817 const hexagon_nn_input& in2_min = model->getQuantizationMin(ins[1]);
818 const hexagon_nn_input& in2_max = model->getQuantizationMax(ins[1]);
819
820 // add node to graph
821 return model->addFusedQuant8Operation(OP_QuantizedMul_8x8to32, NN_PAD_NA, {}, act,
822 {in1, in2, in1_min, in1_max, in2_min, in2_max}, outs);
823 }
824
relu(const std::vector<uint32_t> & ins,const std::vector<uint32_t> & outs,HexagonModel * model)825 bool relu(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs,
826 HexagonModel* model) {
827 HEXAGON_SOFT_ASSERT_EQ(1, ins.size(), "Need 1 input for quant8_asym::relu");
828 HEXAGON_SOFT_ASSERT_EQ(1, outs.size(), "Need 1 output for quant8_asym::relu");
829
830 // get parameters
831 const hexagon_nn_input& input = model->getTensor(ins[0]);
832
833 const hexagon_nn_input& input_min = model->getQuantizationMin(ins[0]);
834 const hexagon_nn_input& input_max = model->getQuantizationMax(ins[0]);
835
836 // add node to graph
837 return model->addBasicOperation(OP_QuantizedRelu_8, NN_PAD_NA, {input, input_min, input_max},
838 outs);
839 }
840
relu1(const std::vector<uint32_t> & ins,const std::vector<uint32_t> & outs,HexagonModel * model)841 bool relu1(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs,
842 HexagonModel* model) {
843 HEXAGON_SOFT_ASSERT_EQ(1, ins.size(), "Need 1 input for quant8_asym::relu1");
844 HEXAGON_SOFT_ASSERT_EQ(1, outs.size(), "Need 1 output for quant8_asym::relu1");
845
846 // get parameters
847 const hexagon_nn_input& input = model->getTensor(ins[0]);
848 const hexagon_nn_input min = model->createScalar(-1.0f);
849 const hexagon_nn_input max = model->createScalar(1.0f);
850
851 const hexagon_nn_input& input_min = model->getQuantizationMin(ins[0]);
852 const hexagon_nn_input& input_max = model->getQuantizationMax(ins[0]);
853
854 // add node to graph
855 return model->addBasicOperation(OP_QuantizedClamp_8, NN_PAD_NA,
856 {input, input_min, input_max, min, max}, outs);
857 }
858
relu6(const std::vector<uint32_t> & ins,const std::vector<uint32_t> & outs,HexagonModel * model)859 bool relu6(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs,
860 HexagonModel* model) {
861 HEXAGON_SOFT_ASSERT_EQ(1, ins.size(), "Need 1 input for quant8_asym::relu6");
862 HEXAGON_SOFT_ASSERT_EQ(1, outs.size(), "Need 1 output for quant8_asym::relu6");
863
864 // get parameters
865 const hexagon_nn_input& input = model->getTensor(ins[0]);
866 const hexagon_nn_input max = model->createScalar(6.0f);
867
868 const hexagon_nn_input& input_min = model->getQuantizationMin(ins[0]);
869 const hexagon_nn_input& input_max = model->getQuantizationMax(ins[0]);
870
871 // add node to graph
872 return model->addBasicOperation(OP_QuantizedReluX_8, NN_PAD_NA,
873 {input, input_min, input_max, max}, outs);
874 }
875
reshape(const std::vector<uint32_t> & ins,const std::vector<uint32_t> & outs,HexagonModel * model)876 bool reshape(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs,
877 HexagonModel* model) {
878 HEXAGON_SOFT_ASSERT_EQ(2, ins.size(), "Need 2 inputs for quant8_asym::reshape");
879 HEXAGON_SOFT_ASSERT_EQ(1, outs.size(), "Need 1 output for quant8_asym::reshape");
880
881 // get parameters
882 const hexagon_nn_input& input = model->getTensor(ins[0]);
883 const hexagon_nn_input& newdims = model->getTensor(ins[1]);
884
885 const hexagon_nn_input& input_min = model->getQuantizationMin(ins[0]);
886 const hexagon_nn_input& input_max = model->getQuantizationMax(ins[0]);
887
888 // add node to graph
889 return model->addBasicOperation(OP_QuantizedReshape, NN_PAD_NA,
890 {input, newdims, input_min, input_max}, outs);
891 }
892
softmax(const std::vector<uint32_t> & ins,const std::vector<uint32_t> & outs,HexagonModel * model)893 bool softmax(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs,
894 HexagonModel* model) {
895 HEXAGON_SOFT_ASSERT_EQ(2, ins.size(), "Need 2 inputs for quant8_asym::softmax");
896 HEXAGON_SOFT_ASSERT_EQ(1, outs.size(), "Need 1 output for quant8_asym::softmax");
897
898 // get parameters
899 const hexagon_nn_input& input = model->getTensor(ins[0]);
900 const hexagon_nn_input& beta = model->getTensor(ins[1]);
901
902 const hexagon_nn_input& input_min = model->getQuantizationMin(ins[0]);
903 const hexagon_nn_input& input_max = model->getQuantizationMax(ins[0]);
904
905 // add node to graph
906 return model->addBasicOperation(OP_QuantizedSoftmax_8, NN_PAD_NA,
907 {input, input_min, input_max, beta}, outs);
908 }
909
910 } // namespace quant8_asym
911
912 } // namespace
913
getOperationPrepareTable()914 OperationTable& getOperationPrepareTable() {
915 static OperationTable table = {
916 // NOTE: the operations that are commented out via inline represent
917 // operations that are valid for the Android O NNAPI release, but are
918 // currently not implemented in HVX.
919
920 // -------------------------- 32-BIT FLOAT ----------------------------
921 // HVX is only performant when running on quantized values. Further, as
922 // an optimization, the current HVX driver will convert some floating
923 // point tensors into quantized values, perform the operation, and then
924 // convert them back to floating point. This results in a loss in
925 // precision causing some tests to fail. For these reasons, the FLOAT32
926 // operations are being temporarily disabled.
927 /*
928 {{OperationType::ADD, OperandType::TENSOR_FLOAT32}, float32::add},
929 {{OperationType::AVERAGE_POOL_2D, OperandType::TENSOR_FLOAT32}, float32::average_pool_2d},
930 {{OperationType::CONCATENATION, OperandType::TENSOR_FLOAT32}, float32::concatenation},
931 {{OperationType::CONV_2D, OperandType::TENSOR_FLOAT32}, float32::conv_2d},
932 {{OperationType::DEPTHWISE_CONV_2D, OperandType::TENSOR_FLOAT32},
933 float32::depthwise_conv_2d},
934 //{{OperationType::DEPTH_TO_SPACE, OperandType::TENSOR_FLOAT32}, float32::depth_to_space},
935 //{{OperationType::EMBEDDING_LOOKUP, OperandType::TENSOR_FLOAT32},
936 // float32::embedding_lookup},
937 //{{OperationType::FLOOR, OperandType::TENSOR_FLOAT32}, float32::floor},
938 {{OperationType::FULLY_CONNECTED, OperandType::TENSOR_FLOAT32}, float32::fully_connected},
939 //{{OperationType::HASHTABLE_LOOKUP, OperandType::TENSOR_FLOAT32},
940 // float32::hashtable_lookup},
941 //{{OperationType::L2_NORMALIZATION, OperandType::TENSOR_FLOAT32},
942 // float32::l2_normalization},
943 {{OperationType::L2_POOL_2D, OperandType::TENSOR_FLOAT32}, float32::l2_pool_2d},
944 {{OperationType::LOCAL_RESPONSE_NORMALIZATION, OperandType::TENSOR_FLOAT32},
945 float32::local_response_normalization},
946 {{OperationType::LOGISTIC, OperandType::TENSOR_FLOAT32}, float32::logistic},
947 //{{OperationType::LSH_PROJECTION, OperandType::TENSOR_FLOAT32}, float32::lsh_projection},
948 //{{OperationType::LSTM, OperandType::TENSOR_FLOAT32}, float32::lstm },
949 {{OperationType::MAX_POOL_2D, OperandType::TENSOR_FLOAT32}, float32::max_pool_2d},
950 {{OperationType::MUL, OperandType::TENSOR_FLOAT32}, float32::mul},
951 {{OperationType::RELU, OperandType::TENSOR_FLOAT32}, float32::relu},
952 {{OperationType::RELU1, OperandType::TENSOR_FLOAT32}, float32::relu1},
953 {{OperationType::RELU6, OperandType::TENSOR_FLOAT32}, float32::relu6},
954 {{OperationType::RESHAPE, OperandType::TENSOR_FLOAT32}, float32::reshape},
955 {{OperationType::RESIZE_BILINEAR, OperandType::TENSOR_FLOAT32}, float32::resize_bilinear},
956 //{{OperationType::RNN, OperandType::TENSOR_FLOAT32}, float32::rnn},
957 {{OperationType::SOFTMAX, OperandType::TENSOR_FLOAT32}, float32::softmax},
958 //{{OperationType::SPACE_TO_DEPTH, OperandType::TENSOR_FLOAT32}, float32::space_to_depth},
959 //{{OperationType::SVDF, OperandType::TENSOR_FLOAT32}, float32::svdf },
960 {{OperationType::TANH, OperandType::TENSOR_FLOAT32}, float32::tanh},
961 */
962
963 // -------------------- QUANTIZED 8-BIT ASYMMETRICAL ------------------
964 {{OperationType::ADD, OperandType::TENSOR_QUANT8_ASYMM}, quant8_asym::add},
965 {{OperationType::AVERAGE_POOL_2D, OperandType::TENSOR_QUANT8_ASYMM},
966 quant8_asym::average_pool_2d},
967 {{OperationType::CONCATENATION, OperandType::TENSOR_QUANT8_ASYMM},
968 quant8_asym::concatenation},
969 {{OperationType::CONV_2D, OperandType::TENSOR_QUANT8_ASYMM}, quant8_asym::conv_2d},
970 {{OperationType::DEPTHWISE_CONV_2D, OperandType::TENSOR_QUANT8_ASYMM},
971 quant8_asym::depthwise_conv_2d},
972 //{{OperationType::DEPTH_TO_SPACE, OperandType::TENSOR_QUANT8_ASYMM},
973 // quant8_asym::depth_to_space},
974 {{OperationType::DEQUANTIZE, OperandType::TENSOR_QUANT8_ASYMM}, quant8_asym::dequantize},
975 //{{OperationType::EMBEDDING_LOOKUP, OperandType::TENSOR_QUANT8_ASYMM},
976 // quant8_asym::embedding_lookup},
977 {{OperationType::FULLY_CONNECTED, OperandType::TENSOR_QUANT8_ASYMM},
978 quant8_asym::fully_connected},
979 //{{OperationType::HASHTABLE_LOOKUP, OperandType::TENSOR_QUANT8_ASYMM},
980 // quant8_asym::hashtable_lookup},
981 {{OperationType::LOGISTIC, OperandType::TENSOR_QUANT8_ASYMM}, quant8_asym::logistic},
982 //{{OperationType::LSH_PROJECTION, OperandType::TENSOR_QUANT8_ASYMM},
983 // quant8_asym::lsh_projection},
984 {{OperationType::MAX_POOL_2D, OperandType::TENSOR_QUANT8_ASYMM}, quant8_asym::max_pool_2d},
985 {{OperationType::MUL, OperandType::TENSOR_QUANT8_ASYMM}, quant8_asym::mul},
986 {{OperationType::RELU, OperandType::TENSOR_QUANT8_ASYMM}, quant8_asym::relu},
987 {{OperationType::RELU1, OperandType::TENSOR_QUANT8_ASYMM}, quant8_asym::relu1},
988 {{OperationType::RELU6, OperandType::TENSOR_QUANT8_ASYMM}, quant8_asym::relu6},
989 {{OperationType::RESHAPE, OperandType::TENSOR_QUANT8_ASYMM}, quant8_asym::reshape},
990 {{OperationType::SOFTMAX, OperandType::TENSOR_QUANT8_ASYMM}, quant8_asym::softmax},
991 //{{OperationType::SPACE_TO_DEPTH, OperandType::TENSOR_QUANT8_ASYMM},
992 // quant8_asym::space_to_depth},
993 };
994
995 // The following functions are normally used by float32, but those
996 // operations have been temporarily disabled. Void explicitly marks them as
997 // unused, and prevents the compiler from throwing an error.
998 (void)float32::add;
999 (void)float32::average_pool_2d;
1000 (void)float32::concatenation;
1001 (void)float32::conv_2d;
1002 (void)float32::depthwise_conv_2d;
1003 (void)float32::fully_connected;
1004 (void)float32::l2_pool_2d;
1005 (void)float32::local_response_normalization;
1006 (void)float32::logistic;
1007 (void)float32::max_pool_2d;
1008 (void)float32::mul;
1009 (void)float32::relu;
1010 (void)float32::relu1;
1011 (void)float32::relu6;
1012 (void)float32::reshape;
1013 (void)float32::resize_bilinear;
1014 (void)float32::softmax;
1015 (void)float32::tanh;
1016
1017 return table;
1018 }
1019
1020 } // namespace hexagon
1021 } // namespace implementation
1022 } // namespace V1_0
1023 } // namespace neuralnetworks
1024 } // namespace hardware
1025 } // namespace android
1026