1 /*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "jni_macro_assembler_x86.h"
18
19 #include "base/casts.h"
20 #include "entrypoints/quick/quick_entrypoints.h"
21 #include "thread.h"
22 #include "utils/assembler.h"
23
24 namespace art {
25 namespace x86 {
26
27 // Slowpath entered when Thread::Current()->_exception is non-null
28 class X86ExceptionSlowPath final : public SlowPath {
29 public:
X86ExceptionSlowPath(size_t stack_adjust)30 explicit X86ExceptionSlowPath(size_t stack_adjust) : stack_adjust_(stack_adjust) {}
31 void Emit(Assembler *sp_asm) override;
32 private:
33 const size_t stack_adjust_;
34 };
35
DWARFReg(Register reg)36 static dwarf::Reg DWARFReg(Register reg) {
37 return dwarf::Reg::X86Core(static_cast<int>(reg));
38 }
39
40 constexpr size_t kFramePointerSize = 4;
41
42 static constexpr size_t kNativeStackAlignment = 16;
43 static_assert(kNativeStackAlignment == kStackAlignment);
44
45 #define __ asm_.
46
BuildFrame(size_t frame_size,ManagedRegister method_reg,ArrayRef<const ManagedRegister> spill_regs,const ManagedRegisterEntrySpills & entry_spills)47 void X86JNIMacroAssembler::BuildFrame(size_t frame_size,
48 ManagedRegister method_reg,
49 ArrayRef<const ManagedRegister> spill_regs,
50 const ManagedRegisterEntrySpills& entry_spills) {
51 DCHECK_EQ(CodeSize(), 0U); // Nothing emitted yet.
52 cfi().SetCurrentCFAOffset(4); // Return address on stack.
53 if (frame_size == kFramePointerSize) {
54 // For @CriticalNative tail call.
55 CHECK(method_reg.IsNoRegister());
56 CHECK(spill_regs.empty());
57 } else if (method_reg.IsNoRegister()) {
58 CHECK_ALIGNED(frame_size, kNativeStackAlignment);
59 } else {
60 CHECK_ALIGNED(frame_size, kStackAlignment);
61 }
62 int gpr_count = 0;
63 for (int i = spill_regs.size() - 1; i >= 0; --i) {
64 Register spill = spill_regs[i].AsX86().AsCpuRegister();
65 __ pushl(spill);
66 gpr_count++;
67 cfi().AdjustCFAOffset(kFramePointerSize);
68 cfi().RelOffset(DWARFReg(spill), 0);
69 }
70
71 // return address then method on stack.
72 int32_t adjust = frame_size - gpr_count * kFramePointerSize -
73 kFramePointerSize /*return address*/ -
74 (method_reg.IsRegister() ? kFramePointerSize /*method*/ : 0u);
75 if (adjust != 0) {
76 __ addl(ESP, Immediate(-adjust));
77 cfi().AdjustCFAOffset(adjust);
78 }
79 if (method_reg.IsRegister()) {
80 __ pushl(method_reg.AsX86().AsCpuRegister());
81 cfi().AdjustCFAOffset(kFramePointerSize);
82 }
83 DCHECK_EQ(static_cast<size_t>(cfi().GetCurrentCFAOffset()), frame_size);
84
85 for (const ManagedRegisterSpill& spill : entry_spills) {
86 if (spill.AsX86().IsCpuRegister()) {
87 int offset = frame_size + spill.getSpillOffset();
88 __ movl(Address(ESP, offset), spill.AsX86().AsCpuRegister());
89 } else {
90 DCHECK(spill.AsX86().IsXmmRegister());
91 if (spill.getSize() == 8) {
92 __ movsd(Address(ESP, frame_size + spill.getSpillOffset()), spill.AsX86().AsXmmRegister());
93 } else {
94 CHECK_EQ(spill.getSize(), 4);
95 __ movss(Address(ESP, frame_size + spill.getSpillOffset()), spill.AsX86().AsXmmRegister());
96 }
97 }
98 }
99 }
100
RemoveFrame(size_t frame_size,ArrayRef<const ManagedRegister> spill_regs,bool may_suspend ATTRIBUTE_UNUSED)101 void X86JNIMacroAssembler::RemoveFrame(size_t frame_size,
102 ArrayRef<const ManagedRegister> spill_regs,
103 bool may_suspend ATTRIBUTE_UNUSED) {
104 CHECK_ALIGNED(frame_size, kNativeStackAlignment);
105 cfi().RememberState();
106 // -kFramePointerSize for ArtMethod*.
107 int adjust = frame_size - spill_regs.size() * kFramePointerSize - kFramePointerSize;
108 if (adjust != 0) {
109 __ addl(ESP, Immediate(adjust));
110 cfi().AdjustCFAOffset(-adjust);
111 }
112 for (size_t i = 0; i < spill_regs.size(); ++i) {
113 Register spill = spill_regs[i].AsX86().AsCpuRegister();
114 __ popl(spill);
115 cfi().AdjustCFAOffset(-static_cast<int>(kFramePointerSize));
116 cfi().Restore(DWARFReg(spill));
117 }
118 __ ret();
119 // The CFI should be restored for any code that follows the exit block.
120 cfi().RestoreState();
121 cfi().DefCFAOffset(frame_size);
122 }
123
IncreaseFrameSize(size_t adjust)124 void X86JNIMacroAssembler::IncreaseFrameSize(size_t adjust) {
125 if (adjust != 0u) {
126 CHECK_ALIGNED(adjust, kNativeStackAlignment);
127 __ addl(ESP, Immediate(-adjust));
128 cfi().AdjustCFAOffset(adjust);
129 }
130 }
131
DecreaseFrameSizeImpl(X86Assembler * assembler,size_t adjust)132 static void DecreaseFrameSizeImpl(X86Assembler* assembler, size_t adjust) {
133 if (adjust != 0u) {
134 CHECK_ALIGNED(adjust, kNativeStackAlignment);
135 assembler->addl(ESP, Immediate(adjust));
136 assembler->cfi().AdjustCFAOffset(-adjust);
137 }
138 }
139
DecreaseFrameSize(size_t adjust)140 void X86JNIMacroAssembler::DecreaseFrameSize(size_t adjust) {
141 DecreaseFrameSizeImpl(&asm_, adjust);
142 }
143
Store(FrameOffset offs,ManagedRegister msrc,size_t size)144 void X86JNIMacroAssembler::Store(FrameOffset offs, ManagedRegister msrc, size_t size) {
145 X86ManagedRegister src = msrc.AsX86();
146 if (src.IsNoRegister()) {
147 CHECK_EQ(0u, size);
148 } else if (src.IsCpuRegister()) {
149 CHECK_EQ(4u, size);
150 __ movl(Address(ESP, offs), src.AsCpuRegister());
151 } else if (src.IsRegisterPair()) {
152 CHECK_EQ(8u, size);
153 __ movl(Address(ESP, offs), src.AsRegisterPairLow());
154 __ movl(Address(ESP, FrameOffset(offs.Int32Value()+4)), src.AsRegisterPairHigh());
155 } else if (src.IsX87Register()) {
156 if (size == 4) {
157 __ fstps(Address(ESP, offs));
158 } else {
159 __ fstpl(Address(ESP, offs));
160 }
161 } else {
162 CHECK(src.IsXmmRegister());
163 if (size == 4) {
164 __ movss(Address(ESP, offs), src.AsXmmRegister());
165 } else {
166 __ movsd(Address(ESP, offs), src.AsXmmRegister());
167 }
168 }
169 }
170
StoreRef(FrameOffset dest,ManagedRegister msrc)171 void X86JNIMacroAssembler::StoreRef(FrameOffset dest, ManagedRegister msrc) {
172 X86ManagedRegister src = msrc.AsX86();
173 CHECK(src.IsCpuRegister());
174 __ movl(Address(ESP, dest), src.AsCpuRegister());
175 }
176
StoreRawPtr(FrameOffset dest,ManagedRegister msrc)177 void X86JNIMacroAssembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) {
178 X86ManagedRegister src = msrc.AsX86();
179 CHECK(src.IsCpuRegister());
180 __ movl(Address(ESP, dest), src.AsCpuRegister());
181 }
182
StoreImmediateToFrame(FrameOffset dest,uint32_t imm,ManagedRegister)183 void X86JNIMacroAssembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister) {
184 __ movl(Address(ESP, dest), Immediate(imm));
185 }
186
StoreStackOffsetToThread(ThreadOffset32 thr_offs,FrameOffset fr_offs,ManagedRegister mscratch)187 void X86JNIMacroAssembler::StoreStackOffsetToThread(ThreadOffset32 thr_offs,
188 FrameOffset fr_offs,
189 ManagedRegister mscratch) {
190 X86ManagedRegister scratch = mscratch.AsX86();
191 CHECK(scratch.IsCpuRegister());
192 __ leal(scratch.AsCpuRegister(), Address(ESP, fr_offs));
193 __ fs()->movl(Address::Absolute(thr_offs), scratch.AsCpuRegister());
194 }
195
StoreStackPointerToThread(ThreadOffset32 thr_offs)196 void X86JNIMacroAssembler::StoreStackPointerToThread(ThreadOffset32 thr_offs) {
197 __ fs()->movl(Address::Absolute(thr_offs), ESP);
198 }
199
StoreSpanning(FrameOffset,ManagedRegister,FrameOffset,ManagedRegister)200 void X86JNIMacroAssembler::StoreSpanning(FrameOffset /*dst*/,
201 ManagedRegister /*src*/,
202 FrameOffset /*in_off*/,
203 ManagedRegister /*scratch*/) {
204 UNIMPLEMENTED(FATAL); // this case only currently exists for ARM
205 }
206
Load(ManagedRegister mdest,FrameOffset src,size_t size)207 void X86JNIMacroAssembler::Load(ManagedRegister mdest, FrameOffset src, size_t size) {
208 X86ManagedRegister dest = mdest.AsX86();
209 if (dest.IsNoRegister()) {
210 CHECK_EQ(0u, size);
211 } else if (dest.IsCpuRegister()) {
212 CHECK_EQ(4u, size);
213 __ movl(dest.AsCpuRegister(), Address(ESP, src));
214 } else if (dest.IsRegisterPair()) {
215 CHECK_EQ(8u, size);
216 __ movl(dest.AsRegisterPairLow(), Address(ESP, src));
217 __ movl(dest.AsRegisterPairHigh(), Address(ESP, FrameOffset(src.Int32Value()+4)));
218 } else if (dest.IsX87Register()) {
219 if (size == 4) {
220 __ flds(Address(ESP, src));
221 } else {
222 __ fldl(Address(ESP, src));
223 }
224 } else {
225 CHECK(dest.IsXmmRegister());
226 if (size == 4) {
227 __ movss(dest.AsXmmRegister(), Address(ESP, src));
228 } else {
229 __ movsd(dest.AsXmmRegister(), Address(ESP, src));
230 }
231 }
232 }
233
LoadFromThread(ManagedRegister mdest,ThreadOffset32 src,size_t size)234 void X86JNIMacroAssembler::LoadFromThread(ManagedRegister mdest, ThreadOffset32 src, size_t size) {
235 X86ManagedRegister dest = mdest.AsX86();
236 if (dest.IsNoRegister()) {
237 CHECK_EQ(0u, size);
238 } else if (dest.IsCpuRegister()) {
239 if (size == 1u) {
240 __ fs()->movzxb(dest.AsCpuRegister(), Address::Absolute(src));
241 } else {
242 CHECK_EQ(4u, size);
243 __ fs()->movl(dest.AsCpuRegister(), Address::Absolute(src));
244 }
245 } else if (dest.IsRegisterPair()) {
246 CHECK_EQ(8u, size);
247 __ fs()->movl(dest.AsRegisterPairLow(), Address::Absolute(src));
248 __ fs()->movl(dest.AsRegisterPairHigh(), Address::Absolute(ThreadOffset32(src.Int32Value()+4)));
249 } else if (dest.IsX87Register()) {
250 if (size == 4) {
251 __ fs()->flds(Address::Absolute(src));
252 } else {
253 __ fs()->fldl(Address::Absolute(src));
254 }
255 } else {
256 CHECK(dest.IsXmmRegister());
257 if (size == 4) {
258 __ fs()->movss(dest.AsXmmRegister(), Address::Absolute(src));
259 } else {
260 __ fs()->movsd(dest.AsXmmRegister(), Address::Absolute(src));
261 }
262 }
263 }
264
LoadRef(ManagedRegister mdest,FrameOffset src)265 void X86JNIMacroAssembler::LoadRef(ManagedRegister mdest, FrameOffset src) {
266 X86ManagedRegister dest = mdest.AsX86();
267 CHECK(dest.IsCpuRegister());
268 __ movl(dest.AsCpuRegister(), Address(ESP, src));
269 }
270
LoadRef(ManagedRegister mdest,ManagedRegister base,MemberOffset offs,bool unpoison_reference)271 void X86JNIMacroAssembler::LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs,
272 bool unpoison_reference) {
273 X86ManagedRegister dest = mdest.AsX86();
274 CHECK(dest.IsCpuRegister() && dest.IsCpuRegister());
275 __ movl(dest.AsCpuRegister(), Address(base.AsX86().AsCpuRegister(), offs));
276 if (unpoison_reference) {
277 __ MaybeUnpoisonHeapReference(dest.AsCpuRegister());
278 }
279 }
280
LoadRawPtr(ManagedRegister mdest,ManagedRegister base,Offset offs)281 void X86JNIMacroAssembler::LoadRawPtr(ManagedRegister mdest,
282 ManagedRegister base,
283 Offset offs) {
284 X86ManagedRegister dest = mdest.AsX86();
285 CHECK(dest.IsCpuRegister() && dest.IsCpuRegister());
286 __ movl(dest.AsCpuRegister(), Address(base.AsX86().AsCpuRegister(), offs));
287 }
288
LoadRawPtrFromThread(ManagedRegister mdest,ThreadOffset32 offs)289 void X86JNIMacroAssembler::LoadRawPtrFromThread(ManagedRegister mdest, ThreadOffset32 offs) {
290 X86ManagedRegister dest = mdest.AsX86();
291 CHECK(dest.IsCpuRegister());
292 __ fs()->movl(dest.AsCpuRegister(), Address::Absolute(offs));
293 }
294
SignExtend(ManagedRegister mreg,size_t size)295 void X86JNIMacroAssembler::SignExtend(ManagedRegister mreg, size_t size) {
296 X86ManagedRegister reg = mreg.AsX86();
297 CHECK(size == 1 || size == 2) << size;
298 CHECK(reg.IsCpuRegister()) << reg;
299 if (size == 1) {
300 __ movsxb(reg.AsCpuRegister(), reg.AsByteRegister());
301 } else {
302 __ movsxw(reg.AsCpuRegister(), reg.AsCpuRegister());
303 }
304 }
305
ZeroExtend(ManagedRegister mreg,size_t size)306 void X86JNIMacroAssembler::ZeroExtend(ManagedRegister mreg, size_t size) {
307 X86ManagedRegister reg = mreg.AsX86();
308 CHECK(size == 1 || size == 2) << size;
309 CHECK(reg.IsCpuRegister()) << reg;
310 if (size == 1) {
311 __ movzxb(reg.AsCpuRegister(), reg.AsByteRegister());
312 } else {
313 __ movzxw(reg.AsCpuRegister(), reg.AsCpuRegister());
314 }
315 }
316
Move(ManagedRegister mdest,ManagedRegister msrc,size_t size)317 void X86JNIMacroAssembler::Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) {
318 X86ManagedRegister dest = mdest.AsX86();
319 X86ManagedRegister src = msrc.AsX86();
320 if (!dest.Equals(src)) {
321 if (dest.IsCpuRegister() && src.IsCpuRegister()) {
322 __ movl(dest.AsCpuRegister(), src.AsCpuRegister());
323 } else if (src.IsX87Register() && dest.IsXmmRegister()) {
324 // Pass via stack and pop X87 register
325 IncreaseFrameSize(16);
326 if (size == 4) {
327 CHECK_EQ(src.AsX87Register(), ST0);
328 __ fstps(Address(ESP, 0));
329 __ movss(dest.AsXmmRegister(), Address(ESP, 0));
330 } else {
331 CHECK_EQ(src.AsX87Register(), ST0);
332 __ fstpl(Address(ESP, 0));
333 __ movsd(dest.AsXmmRegister(), Address(ESP, 0));
334 }
335 DecreaseFrameSize(16);
336 } else {
337 // TODO: x87, SSE
338 UNIMPLEMENTED(FATAL) << ": Move " << dest << ", " << src;
339 }
340 }
341 }
342
CopyRef(FrameOffset dest,FrameOffset src,ManagedRegister mscratch)343 void X86JNIMacroAssembler::CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister mscratch) {
344 X86ManagedRegister scratch = mscratch.AsX86();
345 CHECK(scratch.IsCpuRegister());
346 __ movl(scratch.AsCpuRegister(), Address(ESP, src));
347 __ movl(Address(ESP, dest), scratch.AsCpuRegister());
348 }
349
CopyRawPtrFromThread(FrameOffset fr_offs,ThreadOffset32 thr_offs,ManagedRegister mscratch)350 void X86JNIMacroAssembler::CopyRawPtrFromThread(FrameOffset fr_offs,
351 ThreadOffset32 thr_offs,
352 ManagedRegister mscratch) {
353 X86ManagedRegister scratch = mscratch.AsX86();
354 CHECK(scratch.IsCpuRegister());
355 __ fs()->movl(scratch.AsCpuRegister(), Address::Absolute(thr_offs));
356 Store(fr_offs, scratch, 4);
357 }
358
CopyRawPtrToThread(ThreadOffset32 thr_offs,FrameOffset fr_offs,ManagedRegister mscratch)359 void X86JNIMacroAssembler::CopyRawPtrToThread(ThreadOffset32 thr_offs,
360 FrameOffset fr_offs,
361 ManagedRegister mscratch) {
362 X86ManagedRegister scratch = mscratch.AsX86();
363 CHECK(scratch.IsCpuRegister());
364 Load(scratch, fr_offs, 4);
365 __ fs()->movl(Address::Absolute(thr_offs), scratch.AsCpuRegister());
366 }
367
Copy(FrameOffset dest,FrameOffset src,ManagedRegister mscratch,size_t size)368 void X86JNIMacroAssembler::Copy(FrameOffset dest, FrameOffset src,
369 ManagedRegister mscratch,
370 size_t size) {
371 X86ManagedRegister scratch = mscratch.AsX86();
372 if (scratch.IsCpuRegister() && size == 8) {
373 Load(scratch, src, 4);
374 Store(dest, scratch, 4);
375 Load(scratch, FrameOffset(src.Int32Value() + 4), 4);
376 Store(FrameOffset(dest.Int32Value() + 4), scratch, 4);
377 } else {
378 Load(scratch, src, size);
379 Store(dest, scratch, size);
380 }
381 }
382
Copy(FrameOffset,ManagedRegister,Offset,ManagedRegister,size_t)383 void X86JNIMacroAssembler::Copy(FrameOffset /*dst*/,
384 ManagedRegister /*src_base*/,
385 Offset /*src_offset*/,
386 ManagedRegister /*scratch*/,
387 size_t /*size*/) {
388 UNIMPLEMENTED(FATAL);
389 }
390
Copy(ManagedRegister dest_base,Offset dest_offset,FrameOffset src,ManagedRegister scratch,size_t size)391 void X86JNIMacroAssembler::Copy(ManagedRegister dest_base,
392 Offset dest_offset,
393 FrameOffset src,
394 ManagedRegister scratch,
395 size_t size) {
396 CHECK(scratch.IsNoRegister());
397 CHECK_EQ(size, 4u);
398 __ pushl(Address(ESP, src));
399 __ popl(Address(dest_base.AsX86().AsCpuRegister(), dest_offset));
400 }
401
Copy(FrameOffset dest,FrameOffset src_base,Offset src_offset,ManagedRegister mscratch,size_t size)402 void X86JNIMacroAssembler::Copy(FrameOffset dest,
403 FrameOffset src_base,
404 Offset src_offset,
405 ManagedRegister mscratch,
406 size_t size) {
407 Register scratch = mscratch.AsX86().AsCpuRegister();
408 CHECK_EQ(size, 4u);
409 __ movl(scratch, Address(ESP, src_base));
410 __ movl(scratch, Address(scratch, src_offset));
411 __ movl(Address(ESP, dest), scratch);
412 }
413
Copy(ManagedRegister dest,Offset dest_offset,ManagedRegister src,Offset src_offset,ManagedRegister scratch,size_t size)414 void X86JNIMacroAssembler::Copy(ManagedRegister dest,
415 Offset dest_offset,
416 ManagedRegister src,
417 Offset src_offset,
418 ManagedRegister scratch,
419 size_t size) {
420 CHECK_EQ(size, 4u);
421 CHECK(scratch.IsNoRegister());
422 __ pushl(Address(src.AsX86().AsCpuRegister(), src_offset));
423 __ popl(Address(dest.AsX86().AsCpuRegister(), dest_offset));
424 }
425
Copy(FrameOffset dest,Offset dest_offset,FrameOffset src,Offset src_offset,ManagedRegister mscratch,size_t size)426 void X86JNIMacroAssembler::Copy(FrameOffset dest,
427 Offset dest_offset,
428 FrameOffset src,
429 Offset src_offset,
430 ManagedRegister mscratch,
431 size_t size) {
432 Register scratch = mscratch.AsX86().AsCpuRegister();
433 CHECK_EQ(size, 4u);
434 CHECK_EQ(dest.Int32Value(), src.Int32Value());
435 __ movl(scratch, Address(ESP, src));
436 __ pushl(Address(scratch, src_offset));
437 __ popl(Address(scratch, dest_offset));
438 }
439
MemoryBarrier(ManagedRegister)440 void X86JNIMacroAssembler::MemoryBarrier(ManagedRegister) {
441 __ mfence();
442 }
443
CreateHandleScopeEntry(ManagedRegister mout_reg,FrameOffset handle_scope_offset,ManagedRegister min_reg,bool null_allowed)444 void X86JNIMacroAssembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
445 FrameOffset handle_scope_offset,
446 ManagedRegister min_reg,
447 bool null_allowed) {
448 X86ManagedRegister out_reg = mout_reg.AsX86();
449 X86ManagedRegister in_reg = min_reg.AsX86();
450 CHECK(in_reg.IsCpuRegister());
451 CHECK(out_reg.IsCpuRegister());
452 VerifyObject(in_reg, null_allowed);
453 if (null_allowed) {
454 Label null_arg;
455 if (!out_reg.Equals(in_reg)) {
456 __ xorl(out_reg.AsCpuRegister(), out_reg.AsCpuRegister());
457 }
458 __ testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister());
459 __ j(kZero, &null_arg);
460 __ leal(out_reg.AsCpuRegister(), Address(ESP, handle_scope_offset));
461 __ Bind(&null_arg);
462 } else {
463 __ leal(out_reg.AsCpuRegister(), Address(ESP, handle_scope_offset));
464 }
465 }
466
CreateHandleScopeEntry(FrameOffset out_off,FrameOffset handle_scope_offset,ManagedRegister mscratch,bool null_allowed)467 void X86JNIMacroAssembler::CreateHandleScopeEntry(FrameOffset out_off,
468 FrameOffset handle_scope_offset,
469 ManagedRegister mscratch,
470 bool null_allowed) {
471 X86ManagedRegister scratch = mscratch.AsX86();
472 CHECK(scratch.IsCpuRegister());
473 if (null_allowed) {
474 Label null_arg;
475 __ movl(scratch.AsCpuRegister(), Address(ESP, handle_scope_offset));
476 __ testl(scratch.AsCpuRegister(), scratch.AsCpuRegister());
477 __ j(kZero, &null_arg);
478 __ leal(scratch.AsCpuRegister(), Address(ESP, handle_scope_offset));
479 __ Bind(&null_arg);
480 } else {
481 __ leal(scratch.AsCpuRegister(), Address(ESP, handle_scope_offset));
482 }
483 Store(out_off, scratch, 4);
484 }
485
486 // Given a handle scope entry, load the associated reference.
LoadReferenceFromHandleScope(ManagedRegister mout_reg,ManagedRegister min_reg)487 void X86JNIMacroAssembler::LoadReferenceFromHandleScope(ManagedRegister mout_reg,
488 ManagedRegister min_reg) {
489 X86ManagedRegister out_reg = mout_reg.AsX86();
490 X86ManagedRegister in_reg = min_reg.AsX86();
491 CHECK(out_reg.IsCpuRegister());
492 CHECK(in_reg.IsCpuRegister());
493 Label null_arg;
494 if (!out_reg.Equals(in_reg)) {
495 __ xorl(out_reg.AsCpuRegister(), out_reg.AsCpuRegister());
496 }
497 __ testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister());
498 __ j(kZero, &null_arg);
499 __ movl(out_reg.AsCpuRegister(), Address(in_reg.AsCpuRegister(), 0));
500 __ Bind(&null_arg);
501 }
502
VerifyObject(ManagedRegister,bool)503 void X86JNIMacroAssembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) {
504 // TODO: not validating references
505 }
506
VerifyObject(FrameOffset,bool)507 void X86JNIMacroAssembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) {
508 // TODO: not validating references
509 }
510
Jump(ManagedRegister mbase,Offset offset,ManagedRegister)511 void X86JNIMacroAssembler::Jump(ManagedRegister mbase, Offset offset, ManagedRegister) {
512 X86ManagedRegister base = mbase.AsX86();
513 CHECK(base.IsCpuRegister());
514 __ jmp(Address(base.AsCpuRegister(), offset.Int32Value()));
515 }
516
Call(ManagedRegister mbase,Offset offset,ManagedRegister)517 void X86JNIMacroAssembler::Call(ManagedRegister mbase, Offset offset, ManagedRegister) {
518 X86ManagedRegister base = mbase.AsX86();
519 CHECK(base.IsCpuRegister());
520 __ call(Address(base.AsCpuRegister(), offset.Int32Value()));
521 // TODO: place reference map on call
522 }
523
Call(FrameOffset base,Offset offset,ManagedRegister mscratch)524 void X86JNIMacroAssembler::Call(FrameOffset base, Offset offset, ManagedRegister mscratch) {
525 Register scratch = mscratch.AsX86().AsCpuRegister();
526 __ movl(scratch, Address(ESP, base));
527 __ call(Address(scratch, offset));
528 }
529
CallFromThread(ThreadOffset32 offset,ManagedRegister)530 void X86JNIMacroAssembler::CallFromThread(ThreadOffset32 offset, ManagedRegister /*mscratch*/) {
531 __ fs()->call(Address::Absolute(offset));
532 }
533
GetCurrentThread(ManagedRegister tr)534 void X86JNIMacroAssembler::GetCurrentThread(ManagedRegister tr) {
535 __ fs()->movl(tr.AsX86().AsCpuRegister(),
536 Address::Absolute(Thread::SelfOffset<kX86PointerSize>()));
537 }
538
GetCurrentThread(FrameOffset offset,ManagedRegister mscratch)539 void X86JNIMacroAssembler::GetCurrentThread(FrameOffset offset,
540 ManagedRegister mscratch) {
541 X86ManagedRegister scratch = mscratch.AsX86();
542 __ fs()->movl(scratch.AsCpuRegister(), Address::Absolute(Thread::SelfOffset<kX86PointerSize>()));
543 __ movl(Address(ESP, offset), scratch.AsCpuRegister());
544 }
545
ExceptionPoll(ManagedRegister,size_t stack_adjust)546 void X86JNIMacroAssembler::ExceptionPoll(ManagedRegister /*scratch*/, size_t stack_adjust) {
547 X86ExceptionSlowPath* slow = new (__ GetAllocator()) X86ExceptionSlowPath(stack_adjust);
548 __ GetBuffer()->EnqueueSlowPath(slow);
549 __ fs()->cmpl(Address::Absolute(Thread::ExceptionOffset<kX86PointerSize>()), Immediate(0));
550 __ j(kNotEqual, slow->Entry());
551 }
552
CreateLabel()553 std::unique_ptr<JNIMacroLabel> X86JNIMacroAssembler::CreateLabel() {
554 return std::unique_ptr<JNIMacroLabel>(new X86JNIMacroLabel());
555 }
556
Jump(JNIMacroLabel * label)557 void X86JNIMacroAssembler::Jump(JNIMacroLabel* label) {
558 CHECK(label != nullptr);
559 __ jmp(X86JNIMacroLabel::Cast(label)->AsX86());
560 }
561
Jump(JNIMacroLabel * label,JNIMacroUnaryCondition condition,ManagedRegister test)562 void X86JNIMacroAssembler::Jump(JNIMacroLabel* label,
563 JNIMacroUnaryCondition condition,
564 ManagedRegister test) {
565 CHECK(label != nullptr);
566
567 art::x86::Condition x86_cond;
568 switch (condition) {
569 case JNIMacroUnaryCondition::kZero:
570 x86_cond = art::x86::kZero;
571 break;
572 case JNIMacroUnaryCondition::kNotZero:
573 x86_cond = art::x86::kNotZero;
574 break;
575 default:
576 LOG(FATAL) << "Not implemented condition: " << static_cast<int>(condition);
577 UNREACHABLE();
578 }
579
580 // TEST reg, reg
581 // Jcc <Offset>
582 __ testl(test.AsX86().AsCpuRegister(), test.AsX86().AsCpuRegister());
583 __ j(x86_cond, X86JNIMacroLabel::Cast(label)->AsX86());
584
585
586 // X86 also has JCZX, JECZX, however it's not worth it to implement
587 // because we aren't likely to codegen with ECX+kZero check.
588 }
589
Bind(JNIMacroLabel * label)590 void X86JNIMacroAssembler::Bind(JNIMacroLabel* label) {
591 CHECK(label != nullptr);
592 __ Bind(X86JNIMacroLabel::Cast(label)->AsX86());
593 }
594
595 #undef __
596
Emit(Assembler * sasm)597 void X86ExceptionSlowPath::Emit(Assembler *sasm) {
598 X86Assembler* sp_asm = down_cast<X86Assembler*>(sasm);
599 #define __ sp_asm->
600 __ Bind(&entry_);
601 // Note: the return value is dead
602 if (stack_adjust_ != 0) { // Fix up the frame.
603 DecreaseFrameSizeImpl(sp_asm, stack_adjust_);
604 }
605 // Pass exception as argument in EAX
606 __ fs()->movl(EAX, Address::Absolute(Thread::ExceptionOffset<kX86PointerSize>()));
607 __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86PointerSize, pDeliverException)));
608 // this call should never return
609 __ int3();
610 #undef __
611 }
612
613 } // namespace x86
614 } // namespace art
615