1 /*
2  * Copyright (C) 2018 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #define LOG_TAG "neuralnetworks_hidl_hal_test"
18 
19 #include "VtsHalNeuralnetworks.h"
20 
21 #include "1.0/Callbacks.h"
22 
23 namespace android::hardware::neuralnetworks::V1_1::vts::functional {
24 
25 using V1_0::DeviceStatus;
26 using V1_0::ErrorStatus;
27 using V1_0::Operand;
28 using V1_0::OperandLifeTime;
29 using V1_0::OperandType;
30 using V1_0::implementation::PreparedModelCallback;
31 
32 // create device test
TEST_P(NeuralnetworksHidlTest,CreateDevice)33 TEST_P(NeuralnetworksHidlTest, CreateDevice) {}
34 
35 // status test
TEST_P(NeuralnetworksHidlTest,StatusTest)36 TEST_P(NeuralnetworksHidlTest, StatusTest) {
37     Return<DeviceStatus> status = kDevice->getStatus();
38     ASSERT_TRUE(status.isOk());
39     EXPECT_EQ(DeviceStatus::AVAILABLE, static_cast<DeviceStatus>(status));
40 }
41 
42 // initialization
TEST_P(NeuralnetworksHidlTest,GetCapabilitiesTest)43 TEST_P(NeuralnetworksHidlTest, GetCapabilitiesTest) {
44     Return<void> ret =
45             kDevice->getCapabilities_1_1([](ErrorStatus status, const Capabilities& capabilities) {
46                 EXPECT_EQ(ErrorStatus::NONE, status);
47                 EXPECT_LT(0.0f, capabilities.float32Performance.execTime);
48                 EXPECT_LT(0.0f, capabilities.float32Performance.powerUsage);
49                 EXPECT_LT(0.0f, capabilities.quantized8Performance.execTime);
50                 EXPECT_LT(0.0f, capabilities.quantized8Performance.powerUsage);
51                 EXPECT_LT(0.0f, capabilities.relaxedFloat32toFloat16Performance.execTime);
52                 EXPECT_LT(0.0f, capabilities.relaxedFloat32toFloat16Performance.powerUsage);
53             });
54     EXPECT_TRUE(ret.isOk());
55 }
56 
57 // detect cycle
TEST_P(NeuralnetworksHidlTest,CycleTest)58 TEST_P(NeuralnetworksHidlTest, CycleTest) {
59     // opnd0 = TENSOR_FLOAT32            // model input
60     // opnd1 = TENSOR_FLOAT32            // model input
61     // opnd2 = INT32                     // model input
62     // opnd3 = ADD(opnd0, opnd4, opnd2)
63     // opnd4 = ADD(opnd1, opnd3, opnd2)
64     // opnd5 = ADD(opnd4, opnd0, opnd2)  // model output
65     //
66     //            +-----+
67     //            |     |
68     //            v     |
69     // 3 = ADD(0, 4, 2) |
70     // |                |
71     // +----------+     |
72     //            |     |
73     //            v     |
74     // 4 = ADD(1, 3, 2) |
75     // |                |
76     // +----------------+
77     // |
78     // |
79     // +-------+
80     //         |
81     //         v
82     // 5 = ADD(4, 0, 2)
83 
84     const std::vector<Operand> operands = {
85             {
86                     // operands[0]
87                     .type = OperandType::TENSOR_FLOAT32,
88                     .dimensions = {1},
89                     .numberOfConsumers = 2,
90                     .scale = 0.0f,
91                     .zeroPoint = 0,
92                     .lifetime = OperandLifeTime::MODEL_INPUT,
93                     .location = {.poolIndex = 0, .offset = 0, .length = 0},
94             },
95             {
96                     // operands[1]
97                     .type = OperandType::TENSOR_FLOAT32,
98                     .dimensions = {1},
99                     .numberOfConsumers = 1,
100                     .scale = 0.0f,
101                     .zeroPoint = 0,
102                     .lifetime = OperandLifeTime::MODEL_INPUT,
103                     .location = {.poolIndex = 0, .offset = 0, .length = 0},
104             },
105             {
106                     // operands[2]
107                     .type = OperandType::INT32,
108                     .dimensions = {},
109                     .numberOfConsumers = 3,
110                     .scale = 0.0f,
111                     .zeroPoint = 0,
112                     .lifetime = OperandLifeTime::MODEL_INPUT,
113                     .location = {.poolIndex = 0, .offset = 0, .length = 0},
114             },
115             {
116                     // operands[3]
117                     .type = OperandType::TENSOR_FLOAT32,
118                     .dimensions = {1},
119                     .numberOfConsumers = 1,
120                     .scale = 0.0f,
121                     .zeroPoint = 0,
122                     .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
123                     .location = {.poolIndex = 0, .offset = 0, .length = 0},
124             },
125             {
126                     // operands[4]
127                     .type = OperandType::TENSOR_FLOAT32,
128                     .dimensions = {1},
129                     .numberOfConsumers = 2,
130                     .scale = 0.0f,
131                     .zeroPoint = 0,
132                     .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
133                     .location = {.poolIndex = 0, .offset = 0, .length = 0},
134             },
135             {
136                     // operands[5]
137                     .type = OperandType::TENSOR_FLOAT32,
138                     .dimensions = {1},
139                     .numberOfConsumers = 0,
140                     .scale = 0.0f,
141                     .zeroPoint = 0,
142                     .lifetime = OperandLifeTime::MODEL_OUTPUT,
143                     .location = {.poolIndex = 0, .offset = 0, .length = 0},
144             },
145     };
146 
147     const std::vector<Operation> operations = {
148             {.type = OperationType::ADD, .inputs = {0, 4, 2}, .outputs = {3}},
149             {.type = OperationType::ADD, .inputs = {1, 3, 2}, .outputs = {4}},
150             {.type = OperationType::ADD, .inputs = {4, 0, 2}, .outputs = {5}},
151     };
152 
153     const Model model = {
154             .operands = operands,
155             .operations = operations,
156             .inputIndexes = {0, 1, 2},
157             .outputIndexes = {5},
158             .operandValues = {},
159             .pools = {},
160     };
161 
162     // ensure that getSupportedOperations_1_1() checks model validity
163     ErrorStatus supportedOpsErrorStatus = ErrorStatus::GENERAL_FAILURE;
164     Return<void> supportedOpsReturn = kDevice->getSupportedOperations_1_1(
165             model, [&model, &supportedOpsErrorStatus](ErrorStatus status,
166                                                       const hidl_vec<bool>& supported) {
167                 supportedOpsErrorStatus = status;
168                 if (status == ErrorStatus::NONE) {
169                     ASSERT_EQ(supported.size(), model.operations.size());
170                 }
171             });
172     ASSERT_TRUE(supportedOpsReturn.isOk());
173     ASSERT_EQ(supportedOpsErrorStatus, ErrorStatus::INVALID_ARGUMENT);
174 
175     // ensure that prepareModel_1_1() checks model validity
176     sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback;
177     Return<ErrorStatus> prepareLaunchReturn = kDevice->prepareModel_1_1(
178             model, ExecutionPreference::FAST_SINGLE_ANSWER, preparedModelCallback);
179     ASSERT_TRUE(prepareLaunchReturn.isOk());
180     //     Note that preparation can fail for reasons other than an
181     //     invalid model (invalid model should result in
182     //     INVALID_ARGUMENT) -- for example, perhaps not all
183     //     operations are supported, or perhaps the device hit some
184     //     kind of capacity limit.
185     EXPECT_NE(prepareLaunchReturn, ErrorStatus::NONE);
186     EXPECT_NE(preparedModelCallback->getStatus(), ErrorStatus::NONE);
187     EXPECT_EQ(preparedModelCallback->getPreparedModel(), nullptr);
188 }
189 
190 }  // namespace android::hardware::neuralnetworks::V1_1::vts::functional
191