1 /*
2 * Copyright (C) 2019 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 /*
18 * Mterp entry point and support functions.
19 */
20 #include "mterp.h"
21
22 #include "base/quasi_atomic.h"
23 #include "dex/dex_instruction_utils.h"
24 #include "debugger.h"
25 #include "entrypoints/entrypoint_utils-inl.h"
26 #include "interpreter/interpreter_common.h"
27 #include "interpreter/interpreter_intrinsics.h"
28 #include "interpreter/shadow_frame-inl.h"
29 #include "mirror/string-alloc-inl.h"
30 #include "nterp_helpers.h"
31
32 namespace art {
33 namespace interpreter {
34
IsNterpSupported()35 bool IsNterpSupported() {
36 return !kPoisonHeapReferences && kUseReadBarrier;
37 }
38
CanRuntimeUseNterp()39 bool CanRuntimeUseNterp() REQUIRES_SHARED(Locks::mutator_lock_) {
40 // Nterp has the same restrictions as Mterp.
41 return IsNterpSupported() && CanUseMterp();
42 }
43
CanMethodUseNterp(ArtMethod * method)44 bool CanMethodUseNterp(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) {
45 return method->SkipAccessChecks() &&
46 !method->IsNative() &&
47 method->GetDexFile()->IsStandardDexFile() &&
48 NterpGetFrameSize(method) < kMaxNterpFrame;
49 }
50
GetNterpEntryPoint()51 const void* GetNterpEntryPoint() {
52 return reinterpret_cast<const void*>(interpreter::ExecuteNterpImpl);
53 }
54
55 /*
56 * Verify some constants used by the nterp interpreter.
57 */
CheckNterpAsmConstants()58 void CheckNterpAsmConstants() {
59 /*
60 * If we're using computed goto instruction transitions, make sure
61 * none of the handlers overflows the byte limit. This won't tell
62 * which one did, but if any one is too big the total size will
63 * overflow.
64 */
65 const int width = kMterpHandlerSize;
66 ptrdiff_t interp_size = reinterpret_cast<uintptr_t>(artNterpAsmInstructionEnd) -
67 reinterpret_cast<uintptr_t>(artNterpAsmInstructionStart);
68 if ((interp_size == 0) || (interp_size != (art::kNumPackedOpcodes * width))) {
69 LOG(FATAL) << "ERROR: unexpected asm interp size " << interp_size
70 << "(did an instruction handler exceed " << width << " bytes?)";
71 }
72 }
73
74 template<typename T>
UpdateCache(Thread * self,uint16_t * dex_pc_ptr,T value)75 inline void UpdateCache(Thread* self, uint16_t* dex_pc_ptr, T value) {
76 DCHECK(kUseReadBarrier) << "Nterp only works with read barriers";
77 // For simplicity, only update the cache if weak ref accesses are enabled. If
78 // they are disabled, this means the GC is processing the cache, and is
79 // reading it concurrently.
80 if (self->GetWeakRefAccessEnabled()) {
81 self->GetInterpreterCache()->Set(dex_pc_ptr, value);
82 }
83 }
84
85 template<typename T>
UpdateCache(Thread * self,uint16_t * dex_pc_ptr,T * value)86 inline void UpdateCache(Thread* self, uint16_t* dex_pc_ptr, T* value) {
87 UpdateCache(self, dex_pc_ptr, reinterpret_cast<size_t>(value));
88 }
89
NterpGetCodeItem(ArtMethod * method)90 extern "C" const dex::CodeItem* NterpGetCodeItem(ArtMethod* method)
91 REQUIRES_SHARED(Locks::mutator_lock_) {
92 ScopedAssertNoThreadSuspension sants("In nterp");
93 return method->GetCodeItem();
94 }
95
NterpGetShorty(ArtMethod * method)96 extern "C" const char* NterpGetShorty(ArtMethod* method)
97 REQUIRES_SHARED(Locks::mutator_lock_) {
98 ScopedAssertNoThreadSuspension sants("In nterp");
99 return method->GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetShorty();
100 }
101
NterpGetShortyFromMethodId(ArtMethod * caller,uint32_t method_index)102 extern "C" const char* NterpGetShortyFromMethodId(ArtMethod* caller, uint32_t method_index)
103 REQUIRES_SHARED(Locks::mutator_lock_) {
104 ScopedAssertNoThreadSuspension sants("In nterp");
105 return caller->GetDexFile()->GetMethodShorty(method_index);
106 }
107
NterpGetShortyFromInvokePolymorphic(ArtMethod * caller,uint16_t * dex_pc_ptr)108 extern "C" const char* NterpGetShortyFromInvokePolymorphic(ArtMethod* caller, uint16_t* dex_pc_ptr)
109 REQUIRES_SHARED(Locks::mutator_lock_) {
110 ScopedAssertNoThreadSuspension sants("In nterp");
111 const Instruction* inst = Instruction::At(dex_pc_ptr);
112 dex::ProtoIndex proto_idx(inst->Opcode() == Instruction::INVOKE_POLYMORPHIC
113 ? inst->VRegH_45cc()
114 : inst->VRegH_4rcc());
115 return caller->GetDexFile()->GetShorty(proto_idx);
116 }
117
NterpGetShortyFromInvokeCustom(ArtMethod * caller,uint16_t * dex_pc_ptr)118 extern "C" const char* NterpGetShortyFromInvokeCustom(ArtMethod* caller, uint16_t* dex_pc_ptr)
119 REQUIRES_SHARED(Locks::mutator_lock_) {
120 ScopedAssertNoThreadSuspension sants("In nterp");
121 const Instruction* inst = Instruction::At(dex_pc_ptr);
122 uint16_t call_site_index = (inst->Opcode() == Instruction::INVOKE_CUSTOM
123 ? inst->VRegB_35c()
124 : inst->VRegB_3rc());
125 const DexFile* dex_file = caller->GetDexFile();
126 dex::ProtoIndex proto_idx = dex_file->GetProtoIndexForCallSite(call_site_index);
127 return dex_file->GetShorty(proto_idx);
128 }
129
NterpGetMethod(Thread * self,ArtMethod * caller,uint16_t * dex_pc_ptr)130 extern "C" size_t NterpGetMethod(Thread* self, ArtMethod* caller, uint16_t* dex_pc_ptr)
131 REQUIRES_SHARED(Locks::mutator_lock_) {
132 const Instruction* inst = Instruction::At(dex_pc_ptr);
133 InvokeType invoke_type = kStatic;
134 uint16_t method_index = 0;
135 switch (inst->Opcode()) {
136 case Instruction::INVOKE_DIRECT: {
137 method_index = inst->VRegB_35c();
138 invoke_type = kDirect;
139 break;
140 }
141
142 case Instruction::INVOKE_INTERFACE: {
143 method_index = inst->VRegB_35c();
144 invoke_type = kInterface;
145 break;
146 }
147
148 case Instruction::INVOKE_STATIC: {
149 method_index = inst->VRegB_35c();
150 invoke_type = kStatic;
151 break;
152 }
153
154 case Instruction::INVOKE_SUPER: {
155 method_index = inst->VRegB_35c();
156 invoke_type = kSuper;
157 break;
158 }
159 case Instruction::INVOKE_VIRTUAL: {
160 method_index = inst->VRegB_35c();
161 invoke_type = kVirtual;
162 break;
163 }
164
165 case Instruction::INVOKE_DIRECT_RANGE: {
166 method_index = inst->VRegB_3rc();
167 invoke_type = kDirect;
168 break;
169 }
170
171 case Instruction::INVOKE_INTERFACE_RANGE: {
172 method_index = inst->VRegB_3rc();
173 invoke_type = kInterface;
174 break;
175 }
176
177 case Instruction::INVOKE_STATIC_RANGE: {
178 method_index = inst->VRegB_3rc();
179 invoke_type = kStatic;
180 break;
181 }
182
183 case Instruction::INVOKE_SUPER_RANGE: {
184 method_index = inst->VRegB_3rc();
185 invoke_type = kSuper;
186 break;
187 }
188
189 case Instruction::INVOKE_VIRTUAL_RANGE: {
190 method_index = inst->VRegB_3rc();
191 invoke_type = kVirtual;
192 break;
193 }
194
195 default:
196 LOG(FATAL) << "Unknown instruction " << inst->Opcode();
197 }
198
199 ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
200 ArtMethod* resolved_method = caller->SkipAccessChecks()
201 ? class_linker->ResolveMethod<ClassLinker::ResolveMode::kNoChecks>(
202 self, method_index, caller, invoke_type)
203 : class_linker->ResolveMethod<ClassLinker::ResolveMode::kCheckICCEAndIAE>(
204 self, method_index, caller, invoke_type);
205 if (resolved_method == nullptr) {
206 DCHECK(self->IsExceptionPending());
207 return 0;
208 }
209
210 // ResolveMethod returns the method based on the method_id. For super invokes
211 // we must use the executing class's context to find the right method.
212 if (invoke_type == kSuper) {
213 ObjPtr<mirror::Class> executing_class = caller->GetDeclaringClass();
214 ObjPtr<mirror::Class> referenced_class = class_linker->LookupResolvedType(
215 executing_class->GetDexFile().GetMethodId(method_index).class_idx_,
216 executing_class->GetDexCache(),
217 executing_class->GetClassLoader());
218 DCHECK(referenced_class != nullptr); // We have already resolved a method from this class.
219 if (!referenced_class->IsAssignableFrom(executing_class)) {
220 // We cannot determine the target method.
221 ThrowNoSuchMethodError(invoke_type,
222 resolved_method->GetDeclaringClass(),
223 resolved_method->GetName(),
224 resolved_method->GetSignature());
225 return 0;
226 }
227 if (referenced_class->IsInterface()) {
228 resolved_method = referenced_class->FindVirtualMethodForInterfaceSuper(
229 resolved_method, class_linker->GetImagePointerSize());
230 } else {
231 uint16_t vtable_index = resolved_method->GetMethodIndex();
232 ObjPtr<mirror::Class> super_class = executing_class->GetSuperClass();
233 if (super_class == nullptr ||
234 !super_class->HasVTable() ||
235 vtable_index >= static_cast<uint32_t>(super_class->GetVTableLength())) {
236 // Behavior to agree with that of the verifier.
237 ThrowNoSuchMethodError(invoke_type,
238 resolved_method->GetDeclaringClass(),
239 resolved_method->GetName(),
240 resolved_method->GetSignature());
241 return 0;
242 } else {
243 resolved_method = executing_class->GetSuperClass()->GetVTableEntry(
244 vtable_index, class_linker->GetImagePointerSize());
245 }
246 }
247 }
248
249 if (invoke_type == kInterface) {
250 if (resolved_method->GetDeclaringClass()->IsObjectClass()) {
251 // Don't update the cache and return a value with high bit set to notify the
252 // interpreter it should do a vtable call instead.
253 DCHECK_LT(resolved_method->GetMethodIndex(), 0x10000);
254 return resolved_method->GetMethodIndex() | (1U << 31);
255 } else {
256 DCHECK(resolved_method->GetDeclaringClass()->IsInterface());
257 UpdateCache(self, dex_pc_ptr, resolved_method->GetImtIndex());
258 return resolved_method->GetImtIndex();
259 }
260 } else if (resolved_method->GetDeclaringClass()->IsStringClass()
261 && !resolved_method->IsStatic()
262 && resolved_method->IsConstructor()) {
263 resolved_method = WellKnownClasses::StringInitToStringFactory(resolved_method);
264 // Or the result with 1 to notify to nterp this is a string init method. We
265 // also don't cache the result as we don't want nterp to have its fast path always
266 // check for it, and we expect a lot more regular calls than string init
267 // calls.
268 return reinterpret_cast<size_t>(resolved_method) | 1;
269 } else if (invoke_type == kVirtual) {
270 UpdateCache(self, dex_pc_ptr, resolved_method->GetMethodIndex());
271 return resolved_method->GetMethodIndex();
272 } else {
273 UpdateCache(self, dex_pc_ptr, resolved_method);
274 return reinterpret_cast<size_t>(resolved_method);
275 }
276 }
277
ResolveFieldWithAccessChecks(Thread * self,ClassLinker * class_linker,uint16_t field_index,ArtMethod * caller,bool is_static,bool is_put)278 static ArtField* ResolveFieldWithAccessChecks(Thread* self,
279 ClassLinker* class_linker,
280 uint16_t field_index,
281 ArtMethod* caller,
282 bool is_static,
283 bool is_put)
284 REQUIRES_SHARED(Locks::mutator_lock_) {
285 if (caller->SkipAccessChecks()) {
286 return class_linker->ResolveField(field_index, caller, is_static);
287 }
288
289 caller = caller->GetInterfaceMethodIfProxy(kRuntimePointerSize);
290
291 StackHandleScope<2> hs(self);
292 Handle<mirror::DexCache> h_dex_cache(hs.NewHandle(caller->GetDexCache()));
293 Handle<mirror::ClassLoader> h_class_loader(hs.NewHandle(caller->GetClassLoader()));
294
295 ArtField* resolved_field = class_linker->ResolveFieldJLS(field_index,
296 h_dex_cache,
297 h_class_loader);
298 if (resolved_field == nullptr) {
299 return nullptr;
300 }
301
302 ObjPtr<mirror::Class> fields_class = resolved_field->GetDeclaringClass();
303 if (UNLIKELY(resolved_field->IsStatic() != is_static)) {
304 ThrowIncompatibleClassChangeErrorField(resolved_field, is_static, caller);
305 return nullptr;
306 }
307 ObjPtr<mirror::Class> referring_class = caller->GetDeclaringClass();
308 if (UNLIKELY(!referring_class->CheckResolvedFieldAccess(fields_class,
309 resolved_field,
310 caller->GetDexCache(),
311 field_index))) {
312 return nullptr;
313 }
314 if (UNLIKELY(is_put && resolved_field->IsFinal() && (fields_class != referring_class))) {
315 ThrowIllegalAccessErrorFinalField(caller, resolved_field);
316 return nullptr;
317 }
318 return resolved_field;
319 }
320
NterpGetStaticField(Thread * self,ArtMethod * caller,uint16_t * dex_pc_ptr)321 extern "C" size_t NterpGetStaticField(Thread* self, ArtMethod* caller, uint16_t* dex_pc_ptr)
322 REQUIRES_SHARED(Locks::mutator_lock_) {
323 const Instruction* inst = Instruction::At(dex_pc_ptr);
324 uint16_t field_index = inst->VRegB_21c();
325 ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
326 ArtField* resolved_field = ResolveFieldWithAccessChecks(
327 self,
328 class_linker,
329 field_index,
330 caller,
331 /* is_static */ true,
332 /* is_put */ IsInstructionSPut(inst->Opcode()));
333
334 if (resolved_field == nullptr) {
335 DCHECK(self->IsExceptionPending());
336 return 0;
337 }
338 if (UNLIKELY(!resolved_field->GetDeclaringClass()->IsVisiblyInitialized())) {
339 StackHandleScope<1> hs(self);
340 Handle<mirror::Class> h_class(hs.NewHandle(resolved_field->GetDeclaringClass()));
341 if (UNLIKELY(!class_linker->EnsureInitialized(
342 self, h_class, /*can_init_fields=*/ true, /*can_init_parents=*/ true))) {
343 DCHECK(self->IsExceptionPending());
344 return 0;
345 }
346 DCHECK(h_class->IsInitializing());
347 }
348 if (resolved_field->IsVolatile()) {
349 // Or the result with 1 to notify to nterp this is a volatile field. We
350 // also don't cache the result as we don't want nterp to have its fast path always
351 // check for it.
352 return reinterpret_cast<size_t>(resolved_field) | 1;
353 } else {
354 UpdateCache(self, dex_pc_ptr, resolved_field);
355 return reinterpret_cast<size_t>(resolved_field);
356 }
357 }
358
NterpGetInstanceFieldOffset(Thread * self,ArtMethod * caller,uint16_t * dex_pc_ptr)359 extern "C" uint32_t NterpGetInstanceFieldOffset(Thread* self,
360 ArtMethod* caller,
361 uint16_t* dex_pc_ptr)
362 REQUIRES_SHARED(Locks::mutator_lock_) {
363 const Instruction* inst = Instruction::At(dex_pc_ptr);
364 uint16_t field_index = inst->VRegC_22c();
365 ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
366 ArtField* resolved_field = ResolveFieldWithAccessChecks(
367 self,
368 class_linker,
369 field_index,
370 caller,
371 /* is_static */ false,
372 /* is_put */ IsInstructionIPut(inst->Opcode()));
373 if (resolved_field == nullptr) {
374 DCHECK(self->IsExceptionPending());
375 return 0;
376 }
377 if (resolved_field->IsVolatile()) {
378 // Don't cache for a volatile field, and return a negative offset as marker
379 // of volatile.
380 return -resolved_field->GetOffset().Uint32Value();
381 }
382 UpdateCache(self, dex_pc_ptr, resolved_field->GetOffset().Uint32Value());
383 return resolved_field->GetOffset().Uint32Value();
384 }
385
NterpGetClassOrAllocateObject(Thread * self,ArtMethod * caller,uint16_t * dex_pc_ptr)386 extern "C" mirror::Object* NterpGetClassOrAllocateObject(Thread* self,
387 ArtMethod* caller,
388 uint16_t* dex_pc_ptr)
389 REQUIRES_SHARED(Locks::mutator_lock_) {
390 const Instruction* inst = Instruction::At(dex_pc_ptr);
391 dex::TypeIndex index;
392 switch (inst->Opcode()) {
393 case Instruction::NEW_INSTANCE:
394 index = dex::TypeIndex(inst->VRegB_21c());
395 break;
396 case Instruction::CHECK_CAST:
397 index = dex::TypeIndex(inst->VRegB_21c());
398 break;
399 case Instruction::INSTANCE_OF:
400 index = dex::TypeIndex(inst->VRegC_22c());
401 break;
402 case Instruction::CONST_CLASS:
403 index = dex::TypeIndex(inst->VRegB_21c());
404 break;
405 case Instruction::NEW_ARRAY:
406 index = dex::TypeIndex(inst->VRegC_22c());
407 break;
408 default:
409 LOG(FATAL) << "Unreachable";
410 }
411 ObjPtr<mirror::Class> c =
412 ResolveVerifyAndClinit(index,
413 caller,
414 self,
415 /* can_run_clinit= */ false,
416 /* verify_access= */ !caller->SkipAccessChecks());
417 if (c == nullptr) {
418 DCHECK(self->IsExceptionPending());
419 return nullptr;
420 }
421
422 if (inst->Opcode() == Instruction::NEW_INSTANCE) {
423 gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
424 if (UNLIKELY(c->IsStringClass())) {
425 // We don't cache the class for strings as we need to special case their
426 // allocation.
427 return mirror::String::AllocEmptyString(self, allocator_type).Ptr();
428 } else {
429 if (!c->IsFinalizable() && c->IsInstantiable()) {
430 // Cache non-finalizable classes for next calls.
431 UpdateCache(self, dex_pc_ptr, c.Ptr());
432 }
433 return AllocObjectFromCode(c, self, allocator_type).Ptr();
434 }
435 } else {
436 // For all other cases, cache the class.
437 UpdateCache(self, dex_pc_ptr, c.Ptr());
438 }
439 return c.Ptr();
440 }
441
NterpLoadObject(Thread * self,ArtMethod * caller,uint16_t * dex_pc_ptr)442 extern "C" mirror::Object* NterpLoadObject(Thread* self, ArtMethod* caller, uint16_t* dex_pc_ptr)
443 REQUIRES_SHARED(Locks::mutator_lock_) {
444 const Instruction* inst = Instruction::At(dex_pc_ptr);
445 ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
446 switch (inst->Opcode()) {
447 case Instruction::CONST_STRING:
448 case Instruction::CONST_STRING_JUMBO: {
449 dex::StringIndex string_index(
450 (inst->Opcode() == Instruction::CONST_STRING)
451 ? inst->VRegB_21c()
452 : inst->VRegB_31c());
453 ObjPtr<mirror::String> str = class_linker->ResolveString(string_index, caller);
454 if (str == nullptr) {
455 DCHECK(self->IsExceptionPending());
456 return nullptr;
457 }
458 UpdateCache(self, dex_pc_ptr, str.Ptr());
459 return str.Ptr();
460 }
461 case Instruction::CONST_METHOD_HANDLE: {
462 // Don't cache: we don't expect this to be performance sensitive, and we
463 // don't want the cache to conflict with a performance sensitive entry.
464 return class_linker->ResolveMethodHandle(self, inst->VRegB_21c(), caller).Ptr();
465 }
466 case Instruction::CONST_METHOD_TYPE: {
467 // Don't cache: we don't expect this to be performance sensitive, and we
468 // don't want the cache to conflict with a performance sensitive entry.
469 return class_linker->ResolveMethodType(
470 self, dex::ProtoIndex(inst->VRegB_21c()), caller).Ptr();
471 }
472 default:
473 LOG(FATAL) << "Unreachable";
474 }
475 return nullptr;
476 }
477
NterpUnimplemented()478 extern "C" void NterpUnimplemented() {
479 LOG(FATAL) << "Unimplemented";
480 }
481
DoFilledNewArray(Thread * self,ArtMethod * caller,uint16_t * dex_pc_ptr,int32_t * regs,bool is_range)482 static mirror::Object* DoFilledNewArray(Thread* self,
483 ArtMethod* caller,
484 uint16_t* dex_pc_ptr,
485 int32_t* regs,
486 bool is_range)
487 REQUIRES_SHARED(Locks::mutator_lock_) {
488 const Instruction* inst = Instruction::At(dex_pc_ptr);
489 if (kIsDebugBuild) {
490 if (is_range) {
491 DCHECK_EQ(inst->Opcode(), Instruction::FILLED_NEW_ARRAY_RANGE);
492 } else {
493 DCHECK_EQ(inst->Opcode(), Instruction::FILLED_NEW_ARRAY);
494 }
495 }
496 const int32_t length = is_range ? inst->VRegA_3rc() : inst->VRegA_35c();
497 DCHECK_GE(length, 0);
498 if (!is_range) {
499 // Checks FILLED_NEW_ARRAY's length does not exceed 5 arguments.
500 DCHECK_LE(length, 5);
501 }
502 uint16_t type_idx = is_range ? inst->VRegB_3rc() : inst->VRegB_35c();
503 ObjPtr<mirror::Class> array_class = ResolveVerifyAndClinit(dex::TypeIndex(type_idx),
504 caller,
505 self,
506 /* can_run_clinit= */ true,
507 /* verify_access= */ false);
508 if (UNLIKELY(array_class == nullptr)) {
509 DCHECK(self->IsExceptionPending());
510 return nullptr;
511 }
512 DCHECK(array_class->IsArrayClass());
513 ObjPtr<mirror::Class> component_class = array_class->GetComponentType();
514 const bool is_primitive_int_component = component_class->IsPrimitiveInt();
515 if (UNLIKELY(component_class->IsPrimitive() && !is_primitive_int_component)) {
516 if (component_class->IsPrimitiveLong() || component_class->IsPrimitiveDouble()) {
517 ThrowRuntimeException("Bad filled array request for type %s",
518 component_class->PrettyDescriptor().c_str());
519 } else {
520 self->ThrowNewExceptionF(
521 "Ljava/lang/InternalError;",
522 "Found type %s; filled-new-array not implemented for anything but 'int'",
523 component_class->PrettyDescriptor().c_str());
524 }
525 return nullptr;
526 }
527 ObjPtr<mirror::Object> new_array = mirror::Array::Alloc(
528 self,
529 array_class,
530 length,
531 array_class->GetComponentSizeShift(),
532 Runtime::Current()->GetHeap()->GetCurrentAllocator());
533 if (UNLIKELY(new_array == nullptr)) {
534 self->AssertPendingOOMException();
535 return nullptr;
536 }
537 uint32_t arg[Instruction::kMaxVarArgRegs]; // only used in filled-new-array.
538 uint32_t vregC = 0; // only used in filled-new-array-range.
539 if (is_range) {
540 vregC = inst->VRegC_3rc();
541 } else {
542 inst->GetVarArgs(arg);
543 }
544 for (int32_t i = 0; i < length; ++i) {
545 size_t src_reg = is_range ? vregC + i : arg[i];
546 if (is_primitive_int_component) {
547 new_array->AsIntArray()->SetWithoutChecks</* kTransactionActive= */ false>(i, regs[src_reg]);
548 } else {
549 new_array->AsObjectArray<mirror::Object>()->SetWithoutChecks</* kTransactionActive= */ false>(
550 i, reinterpret_cast<mirror::Object*>(regs[src_reg]));
551 }
552 }
553 return new_array.Ptr();
554 }
555
NterpFilledNewArray(Thread * self,ArtMethod * caller,int32_t * registers,uint16_t * dex_pc_ptr)556 extern "C" mirror::Object* NterpFilledNewArray(Thread* self,
557 ArtMethod* caller,
558 int32_t* registers,
559 uint16_t* dex_pc_ptr)
560 REQUIRES_SHARED(Locks::mutator_lock_) {
561 return DoFilledNewArray(self, caller, dex_pc_ptr, registers, /* is_range= */ false);
562 }
563
NterpFilledNewArrayRange(Thread * self,ArtMethod * caller,int32_t * registers,uint16_t * dex_pc_ptr)564 extern "C" mirror::Object* NterpFilledNewArrayRange(Thread* self,
565 ArtMethod* caller,
566 int32_t* registers,
567 uint16_t* dex_pc_ptr)
568 REQUIRES_SHARED(Locks::mutator_lock_) {
569 return DoFilledNewArray(self, caller, dex_pc_ptr, registers, /* is_range= */ true);
570 }
571
NterpHotMethod(ArtMethod * method,uint16_t * dex_pc_ptr,uint32_t * vregs)572 extern "C" jit::OsrData* NterpHotMethod(ArtMethod* method, uint16_t* dex_pc_ptr, uint32_t* vregs)
573 REQUIRES_SHARED(Locks::mutator_lock_) {
574 ScopedAssertNoThreadSuspension sants("In nterp");
575 jit::Jit* jit = Runtime::Current()->GetJit();
576 if (jit != nullptr) {
577 // Nterp passes null on entry where we don't want to OSR.
578 if (dex_pc_ptr != nullptr) {
579 // This could be a loop back edge, check if we can OSR.
580 CodeItemInstructionAccessor accessor(method->DexInstructions());
581 uint32_t dex_pc = dex_pc_ptr - accessor.Insns();
582 jit::OsrData* osr_data = jit->PrepareForOsr(
583 method->GetInterfaceMethodIfProxy(kRuntimePointerSize), dex_pc, vregs);
584 if (osr_data != nullptr) {
585 return osr_data;
586 }
587 }
588 jit->EnqueueCompilationFromNterp(method, Thread::Current());
589 }
590 return nullptr;
591 }
592
593 extern "C" ssize_t MterpDoPackedSwitch(const uint16_t* switchData, int32_t testVal);
NterpDoPackedSwitch(const uint16_t * switchData,int32_t testVal)594 extern "C" ssize_t NterpDoPackedSwitch(const uint16_t* switchData, int32_t testVal)
595 REQUIRES_SHARED(Locks::mutator_lock_) {
596 return MterpDoPackedSwitch(switchData, testVal);
597 }
598
599 extern "C" ssize_t MterpDoSparseSwitch(const uint16_t* switchData, int32_t testVal);
NterpDoSparseSwitch(const uint16_t * switchData,int32_t testVal)600 extern "C" ssize_t NterpDoSparseSwitch(const uint16_t* switchData, int32_t testVal)
601 REQUIRES_SHARED(Locks::mutator_lock_) {
602 return MterpDoSparseSwitch(switchData, testVal);
603 }
604
605 } // namespace interpreter
606 } // namespace art
607