1 /*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "jni_macro_assembler_x86_64.h"
18
19 #include "base/casts.h"
20 #include "base/memory_region.h"
21 #include "entrypoints/quick/quick_entrypoints.h"
22 #include "thread.h"
23
24 namespace art {
25 namespace x86_64 {
26
DWARFReg(Register reg)27 static dwarf::Reg DWARFReg(Register reg) {
28 return dwarf::Reg::X86_64Core(static_cast<int>(reg));
29 }
DWARFReg(FloatRegister reg)30 static dwarf::Reg DWARFReg(FloatRegister reg) {
31 return dwarf::Reg::X86_64Fp(static_cast<int>(reg));
32 }
33
34 constexpr size_t kFramePointerSize = 8;
35
36 static constexpr size_t kNativeStackAlignment = 16;
37 static_assert(kNativeStackAlignment == kStackAlignment);
38
39 #define __ asm_.
40
BuildFrame(size_t frame_size,ManagedRegister method_reg,ArrayRef<const ManagedRegister> spill_regs,const ManagedRegisterEntrySpills & entry_spills)41 void X86_64JNIMacroAssembler::BuildFrame(size_t frame_size,
42 ManagedRegister method_reg,
43 ArrayRef<const ManagedRegister> spill_regs,
44 const ManagedRegisterEntrySpills& entry_spills) {
45 DCHECK_EQ(CodeSize(), 0U); // Nothing emitted yet.
46 cfi().SetCurrentCFAOffset(8); // Return address on stack.
47 // Note: @CriticalNative tail call is not used (would have frame_size == kFramePointerSize).
48 if (method_reg.IsNoRegister()) {
49 CHECK_ALIGNED(frame_size, kNativeStackAlignment);
50 } else {
51 CHECK_ALIGNED(frame_size, kStackAlignment);
52 }
53 size_t gpr_count = 0u;
54 for (int i = spill_regs.size() - 1; i >= 0; --i) {
55 x86_64::X86_64ManagedRegister spill = spill_regs[i].AsX86_64();
56 if (spill.IsCpuRegister()) {
57 __ pushq(spill.AsCpuRegister());
58 gpr_count++;
59 cfi().AdjustCFAOffset(kFramePointerSize);
60 cfi().RelOffset(DWARFReg(spill.AsCpuRegister().AsRegister()), 0);
61 }
62 }
63 // return address then method on stack.
64 int64_t rest_of_frame = static_cast<int64_t>(frame_size)
65 - (gpr_count * kFramePointerSize)
66 - kFramePointerSize /*return address*/;
67 if (rest_of_frame != 0) {
68 __ subq(CpuRegister(RSP), Immediate(rest_of_frame));
69 cfi().AdjustCFAOffset(rest_of_frame);
70 }
71
72 // spill xmms
73 int64_t offset = rest_of_frame;
74 for (int i = spill_regs.size() - 1; i >= 0; --i) {
75 x86_64::X86_64ManagedRegister spill = spill_regs[i].AsX86_64();
76 if (spill.IsXmmRegister()) {
77 offset -= sizeof(double);
78 __ movsd(Address(CpuRegister(RSP), offset), spill.AsXmmRegister());
79 cfi().RelOffset(DWARFReg(spill.AsXmmRegister().AsFloatRegister()), offset);
80 }
81 }
82
83 static_assert(static_cast<size_t>(kX86_64PointerSize) == kFramePointerSize,
84 "Unexpected frame pointer size.");
85
86 if (method_reg.IsRegister()) {
87 __ movq(Address(CpuRegister(RSP), 0), method_reg.AsX86_64().AsCpuRegister());
88 }
89
90 for (const ManagedRegisterSpill& spill : entry_spills) {
91 if (spill.AsX86_64().IsCpuRegister()) {
92 if (spill.getSize() == 8) {
93 __ movq(Address(CpuRegister(RSP), frame_size + spill.getSpillOffset()),
94 spill.AsX86_64().AsCpuRegister());
95 } else {
96 CHECK_EQ(spill.getSize(), 4);
97 __ movl(Address(CpuRegister(RSP), frame_size + spill.getSpillOffset()),
98 spill.AsX86_64().AsCpuRegister());
99 }
100 } else {
101 if (spill.getSize() == 8) {
102 __ movsd(Address(CpuRegister(RSP), frame_size + spill.getSpillOffset()),
103 spill.AsX86_64().AsXmmRegister());
104 } else {
105 CHECK_EQ(spill.getSize(), 4);
106 __ movss(Address(CpuRegister(RSP), frame_size + spill.getSpillOffset()),
107 spill.AsX86_64().AsXmmRegister());
108 }
109 }
110 }
111 }
112
RemoveFrame(size_t frame_size,ArrayRef<const ManagedRegister> spill_regs,bool may_suspend ATTRIBUTE_UNUSED)113 void X86_64JNIMacroAssembler::RemoveFrame(size_t frame_size,
114 ArrayRef<const ManagedRegister> spill_regs,
115 bool may_suspend ATTRIBUTE_UNUSED) {
116 CHECK_ALIGNED(frame_size, kNativeStackAlignment);
117 cfi().RememberState();
118 int gpr_count = 0;
119 // unspill xmms
120 int64_t offset = static_cast<int64_t>(frame_size)
121 - (spill_regs.size() * kFramePointerSize)
122 - kFramePointerSize;
123 for (size_t i = 0; i < spill_regs.size(); ++i) {
124 x86_64::X86_64ManagedRegister spill = spill_regs[i].AsX86_64();
125 if (spill.IsXmmRegister()) {
126 __ movsd(spill.AsXmmRegister(), Address(CpuRegister(RSP), offset));
127 cfi().Restore(DWARFReg(spill.AsXmmRegister().AsFloatRegister()));
128 offset += sizeof(double);
129 } else {
130 gpr_count++;
131 }
132 }
133 DCHECK_EQ(static_cast<size_t>(offset),
134 frame_size - (gpr_count * kFramePointerSize) - kFramePointerSize);
135 if (offset != 0) {
136 __ addq(CpuRegister(RSP), Immediate(offset));
137 cfi().AdjustCFAOffset(-offset);
138 }
139 for (size_t i = 0; i < spill_regs.size(); ++i) {
140 x86_64::X86_64ManagedRegister spill = spill_regs[i].AsX86_64();
141 if (spill.IsCpuRegister()) {
142 __ popq(spill.AsCpuRegister());
143 cfi().AdjustCFAOffset(-static_cast<int>(kFramePointerSize));
144 cfi().Restore(DWARFReg(spill.AsCpuRegister().AsRegister()));
145 }
146 }
147 __ ret();
148 // The CFI should be restored for any code that follows the exit block.
149 cfi().RestoreState();
150 cfi().DefCFAOffset(frame_size);
151 }
152
IncreaseFrameSize(size_t adjust)153 void X86_64JNIMacroAssembler::IncreaseFrameSize(size_t adjust) {
154 if (adjust != 0u) {
155 CHECK_ALIGNED(adjust, kNativeStackAlignment);
156 __ addq(CpuRegister(RSP), Immediate(-static_cast<int64_t>(adjust)));
157 cfi().AdjustCFAOffset(adjust);
158 }
159 }
160
DecreaseFrameSizeImpl(size_t adjust,X86_64Assembler * assembler)161 static void DecreaseFrameSizeImpl(size_t adjust, X86_64Assembler* assembler) {
162 if (adjust != 0u) {
163 CHECK_ALIGNED(adjust, kNativeStackAlignment);
164 assembler->addq(CpuRegister(RSP), Immediate(adjust));
165 assembler->cfi().AdjustCFAOffset(-adjust);
166 }
167 }
168
DecreaseFrameSize(size_t adjust)169 void X86_64JNIMacroAssembler::DecreaseFrameSize(size_t adjust) {
170 DecreaseFrameSizeImpl(adjust, &asm_);
171 }
172
Store(FrameOffset offs,ManagedRegister msrc,size_t size)173 void X86_64JNIMacroAssembler::Store(FrameOffset offs, ManagedRegister msrc, size_t size) {
174 X86_64ManagedRegister src = msrc.AsX86_64();
175 if (src.IsNoRegister()) {
176 CHECK_EQ(0u, size);
177 } else if (src.IsCpuRegister()) {
178 if (size == 4) {
179 CHECK_EQ(4u, size);
180 __ movl(Address(CpuRegister(RSP), offs), src.AsCpuRegister());
181 } else {
182 CHECK_EQ(8u, size);
183 __ movq(Address(CpuRegister(RSP), offs), src.AsCpuRegister());
184 }
185 } else if (src.IsRegisterPair()) {
186 CHECK_EQ(0u, size);
187 __ movq(Address(CpuRegister(RSP), offs), src.AsRegisterPairLow());
188 __ movq(Address(CpuRegister(RSP), FrameOffset(offs.Int32Value()+4)),
189 src.AsRegisterPairHigh());
190 } else if (src.IsX87Register()) {
191 if (size == 4) {
192 __ fstps(Address(CpuRegister(RSP), offs));
193 } else {
194 __ fstpl(Address(CpuRegister(RSP), offs));
195 }
196 } else {
197 CHECK(src.IsXmmRegister());
198 if (size == 4) {
199 __ movss(Address(CpuRegister(RSP), offs), src.AsXmmRegister());
200 } else {
201 __ movsd(Address(CpuRegister(RSP), offs), src.AsXmmRegister());
202 }
203 }
204 }
205
StoreRef(FrameOffset dest,ManagedRegister msrc)206 void X86_64JNIMacroAssembler::StoreRef(FrameOffset dest, ManagedRegister msrc) {
207 X86_64ManagedRegister src = msrc.AsX86_64();
208 CHECK(src.IsCpuRegister());
209 __ movl(Address(CpuRegister(RSP), dest), src.AsCpuRegister());
210 }
211
StoreRawPtr(FrameOffset dest,ManagedRegister msrc)212 void X86_64JNIMacroAssembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) {
213 X86_64ManagedRegister src = msrc.AsX86_64();
214 CHECK(src.IsCpuRegister());
215 __ movq(Address(CpuRegister(RSP), dest), src.AsCpuRegister());
216 }
217
StoreImmediateToFrame(FrameOffset dest,uint32_t imm,ManagedRegister)218 void X86_64JNIMacroAssembler::StoreImmediateToFrame(FrameOffset dest,
219 uint32_t imm,
220 ManagedRegister) {
221 __ movl(Address(CpuRegister(RSP), dest), Immediate(imm)); // TODO(64) movq?
222 }
223
StoreStackOffsetToThread(ThreadOffset64 thr_offs,FrameOffset fr_offs,ManagedRegister mscratch)224 void X86_64JNIMacroAssembler::StoreStackOffsetToThread(ThreadOffset64 thr_offs,
225 FrameOffset fr_offs,
226 ManagedRegister mscratch) {
227 X86_64ManagedRegister scratch = mscratch.AsX86_64();
228 CHECK(scratch.IsCpuRegister());
229 __ leaq(scratch.AsCpuRegister(), Address(CpuRegister(RSP), fr_offs));
230 __ gs()->movq(Address::Absolute(thr_offs, true), scratch.AsCpuRegister());
231 }
232
StoreStackPointerToThread(ThreadOffset64 thr_offs)233 void X86_64JNIMacroAssembler::StoreStackPointerToThread(ThreadOffset64 thr_offs) {
234 __ gs()->movq(Address::Absolute(thr_offs, true), CpuRegister(RSP));
235 }
236
StoreSpanning(FrameOffset,ManagedRegister,FrameOffset,ManagedRegister)237 void X86_64JNIMacroAssembler::StoreSpanning(FrameOffset /*dst*/,
238 ManagedRegister /*src*/,
239 FrameOffset /*in_off*/,
240 ManagedRegister /*scratch*/) {
241 UNIMPLEMENTED(FATAL); // this case only currently exists for ARM
242 }
243
Load(ManagedRegister mdest,FrameOffset src,size_t size)244 void X86_64JNIMacroAssembler::Load(ManagedRegister mdest, FrameOffset src, size_t size) {
245 X86_64ManagedRegister dest = mdest.AsX86_64();
246 if (dest.IsNoRegister()) {
247 CHECK_EQ(0u, size);
248 } else if (dest.IsCpuRegister()) {
249 if (size == 4) {
250 CHECK_EQ(4u, size);
251 __ movl(dest.AsCpuRegister(), Address(CpuRegister(RSP), src));
252 } else {
253 CHECK_EQ(8u, size);
254 __ movq(dest.AsCpuRegister(), Address(CpuRegister(RSP), src));
255 }
256 } else if (dest.IsRegisterPair()) {
257 CHECK_EQ(0u, size);
258 __ movq(dest.AsRegisterPairLow(), Address(CpuRegister(RSP), src));
259 __ movq(dest.AsRegisterPairHigh(), Address(CpuRegister(RSP), FrameOffset(src.Int32Value()+4)));
260 } else if (dest.IsX87Register()) {
261 if (size == 4) {
262 __ flds(Address(CpuRegister(RSP), src));
263 } else {
264 __ fldl(Address(CpuRegister(RSP), src));
265 }
266 } else {
267 CHECK(dest.IsXmmRegister());
268 if (size == 4) {
269 __ movss(dest.AsXmmRegister(), Address(CpuRegister(RSP), src));
270 } else {
271 __ movsd(dest.AsXmmRegister(), Address(CpuRegister(RSP), src));
272 }
273 }
274 }
275
LoadFromThread(ManagedRegister mdest,ThreadOffset64 src,size_t size)276 void X86_64JNIMacroAssembler::LoadFromThread(ManagedRegister mdest,
277 ThreadOffset64 src, size_t size) {
278 X86_64ManagedRegister dest = mdest.AsX86_64();
279 if (dest.IsNoRegister()) {
280 CHECK_EQ(0u, size);
281 } else if (dest.IsCpuRegister()) {
282 if (size == 1u) {
283 __ gs()->movzxb(dest.AsCpuRegister(), Address::Absolute(src, true));
284 } else {
285 CHECK_EQ(4u, size);
286 __ gs()->movl(dest.AsCpuRegister(), Address::Absolute(src, true));
287 }
288 } else if (dest.IsRegisterPair()) {
289 CHECK_EQ(8u, size);
290 __ gs()->movq(dest.AsRegisterPairLow(), Address::Absolute(src, true));
291 } else if (dest.IsX87Register()) {
292 if (size == 4) {
293 __ gs()->flds(Address::Absolute(src, true));
294 } else {
295 __ gs()->fldl(Address::Absolute(src, true));
296 }
297 } else {
298 CHECK(dest.IsXmmRegister());
299 if (size == 4) {
300 __ gs()->movss(dest.AsXmmRegister(), Address::Absolute(src, true));
301 } else {
302 __ gs()->movsd(dest.AsXmmRegister(), Address::Absolute(src, true));
303 }
304 }
305 }
306
LoadRef(ManagedRegister mdest,FrameOffset src)307 void X86_64JNIMacroAssembler::LoadRef(ManagedRegister mdest, FrameOffset src) {
308 X86_64ManagedRegister dest = mdest.AsX86_64();
309 CHECK(dest.IsCpuRegister());
310 __ movq(dest.AsCpuRegister(), Address(CpuRegister(RSP), src));
311 }
312
LoadRef(ManagedRegister mdest,ManagedRegister mbase,MemberOffset offs,bool unpoison_reference)313 void X86_64JNIMacroAssembler::LoadRef(ManagedRegister mdest,
314 ManagedRegister mbase,
315 MemberOffset offs,
316 bool unpoison_reference) {
317 X86_64ManagedRegister base = mbase.AsX86_64();
318 X86_64ManagedRegister dest = mdest.AsX86_64();
319 CHECK(base.IsCpuRegister());
320 CHECK(dest.IsCpuRegister());
321 __ movl(dest.AsCpuRegister(), Address(base.AsCpuRegister(), offs));
322 if (unpoison_reference) {
323 __ MaybeUnpoisonHeapReference(dest.AsCpuRegister());
324 }
325 }
326
LoadRawPtr(ManagedRegister mdest,ManagedRegister mbase,Offset offs)327 void X86_64JNIMacroAssembler::LoadRawPtr(ManagedRegister mdest,
328 ManagedRegister mbase,
329 Offset offs) {
330 X86_64ManagedRegister base = mbase.AsX86_64();
331 X86_64ManagedRegister dest = mdest.AsX86_64();
332 CHECK(base.IsCpuRegister());
333 CHECK(dest.IsCpuRegister());
334 __ movq(dest.AsCpuRegister(), Address(base.AsCpuRegister(), offs));
335 }
336
LoadRawPtrFromThread(ManagedRegister mdest,ThreadOffset64 offs)337 void X86_64JNIMacroAssembler::LoadRawPtrFromThread(ManagedRegister mdest, ThreadOffset64 offs) {
338 X86_64ManagedRegister dest = mdest.AsX86_64();
339 CHECK(dest.IsCpuRegister());
340 __ gs()->movq(dest.AsCpuRegister(), Address::Absolute(offs, true));
341 }
342
SignExtend(ManagedRegister mreg,size_t size)343 void X86_64JNIMacroAssembler::SignExtend(ManagedRegister mreg, size_t size) {
344 X86_64ManagedRegister reg = mreg.AsX86_64();
345 CHECK(size == 1 || size == 2) << size;
346 CHECK(reg.IsCpuRegister()) << reg;
347 if (size == 1) {
348 __ movsxb(reg.AsCpuRegister(), reg.AsCpuRegister());
349 } else {
350 __ movsxw(reg.AsCpuRegister(), reg.AsCpuRegister());
351 }
352 }
353
ZeroExtend(ManagedRegister mreg,size_t size)354 void X86_64JNIMacroAssembler::ZeroExtend(ManagedRegister mreg, size_t size) {
355 X86_64ManagedRegister reg = mreg.AsX86_64();
356 CHECK(size == 1 || size == 2) << size;
357 CHECK(reg.IsCpuRegister()) << reg;
358 if (size == 1) {
359 __ movzxb(reg.AsCpuRegister(), reg.AsCpuRegister());
360 } else {
361 __ movzxw(reg.AsCpuRegister(), reg.AsCpuRegister());
362 }
363 }
364
Move(ManagedRegister mdest,ManagedRegister msrc,size_t size)365 void X86_64JNIMacroAssembler::Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) {
366 X86_64ManagedRegister dest = mdest.AsX86_64();
367 X86_64ManagedRegister src = msrc.AsX86_64();
368 if (!dest.Equals(src)) {
369 if (dest.IsCpuRegister() && src.IsCpuRegister()) {
370 __ movq(dest.AsCpuRegister(), src.AsCpuRegister());
371 } else if (src.IsX87Register() && dest.IsXmmRegister()) {
372 // Pass via stack and pop X87 register
373 __ subl(CpuRegister(RSP), Immediate(16));
374 if (size == 4) {
375 CHECK_EQ(src.AsX87Register(), ST0);
376 __ fstps(Address(CpuRegister(RSP), 0));
377 __ movss(dest.AsXmmRegister(), Address(CpuRegister(RSP), 0));
378 } else {
379 CHECK_EQ(src.AsX87Register(), ST0);
380 __ fstpl(Address(CpuRegister(RSP), 0));
381 __ movsd(dest.AsXmmRegister(), Address(CpuRegister(RSP), 0));
382 }
383 __ addq(CpuRegister(RSP), Immediate(16));
384 } else {
385 // TODO: x87, SSE
386 UNIMPLEMENTED(FATAL) << ": Move " << dest << ", " << src;
387 }
388 }
389 }
390
CopyRef(FrameOffset dest,FrameOffset src,ManagedRegister mscratch)391 void X86_64JNIMacroAssembler::CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister mscratch) {
392 X86_64ManagedRegister scratch = mscratch.AsX86_64();
393 CHECK(scratch.IsCpuRegister());
394 __ movl(scratch.AsCpuRegister(), Address(CpuRegister(RSP), src));
395 __ movl(Address(CpuRegister(RSP), dest), scratch.AsCpuRegister());
396 }
397
CopyRawPtrFromThread(FrameOffset fr_offs,ThreadOffset64 thr_offs,ManagedRegister mscratch)398 void X86_64JNIMacroAssembler::CopyRawPtrFromThread(FrameOffset fr_offs,
399 ThreadOffset64 thr_offs,
400 ManagedRegister mscratch) {
401 X86_64ManagedRegister scratch = mscratch.AsX86_64();
402 CHECK(scratch.IsCpuRegister());
403 __ gs()->movq(scratch.AsCpuRegister(), Address::Absolute(thr_offs, true));
404 Store(fr_offs, scratch, 8);
405 }
406
CopyRawPtrToThread(ThreadOffset64 thr_offs,FrameOffset fr_offs,ManagedRegister mscratch)407 void X86_64JNIMacroAssembler::CopyRawPtrToThread(ThreadOffset64 thr_offs,
408 FrameOffset fr_offs,
409 ManagedRegister mscratch) {
410 X86_64ManagedRegister scratch = mscratch.AsX86_64();
411 CHECK(scratch.IsCpuRegister());
412 Load(scratch, fr_offs, 8);
413 __ gs()->movq(Address::Absolute(thr_offs, true), scratch.AsCpuRegister());
414 }
415
Copy(FrameOffset dest,FrameOffset src,ManagedRegister mscratch,size_t size)416 void X86_64JNIMacroAssembler::Copy(FrameOffset dest,
417 FrameOffset src,
418 ManagedRegister mscratch,
419 size_t size) {
420 X86_64ManagedRegister scratch = mscratch.AsX86_64();
421 if (scratch.IsCpuRegister() && size == 8) {
422 Load(scratch, src, 4);
423 Store(dest, scratch, 4);
424 Load(scratch, FrameOffset(src.Int32Value() + 4), 4);
425 Store(FrameOffset(dest.Int32Value() + 4), scratch, 4);
426 } else {
427 Load(scratch, src, size);
428 Store(dest, scratch, size);
429 }
430 }
431
Copy(FrameOffset,ManagedRegister,Offset,ManagedRegister,size_t)432 void X86_64JNIMacroAssembler::Copy(FrameOffset /*dst*/,
433 ManagedRegister /*src_base*/,
434 Offset /*src_offset*/,
435 ManagedRegister /*scratch*/,
436 size_t /*size*/) {
437 UNIMPLEMENTED(FATAL);
438 }
439
Copy(ManagedRegister dest_base,Offset dest_offset,FrameOffset src,ManagedRegister scratch,size_t size)440 void X86_64JNIMacroAssembler::Copy(ManagedRegister dest_base,
441 Offset dest_offset,
442 FrameOffset src,
443 ManagedRegister scratch,
444 size_t size) {
445 CHECK(scratch.IsNoRegister());
446 CHECK_EQ(size, 4u);
447 __ pushq(Address(CpuRegister(RSP), src));
448 __ popq(Address(dest_base.AsX86_64().AsCpuRegister(), dest_offset));
449 }
450
Copy(FrameOffset dest,FrameOffset src_base,Offset src_offset,ManagedRegister mscratch,size_t size)451 void X86_64JNIMacroAssembler::Copy(FrameOffset dest,
452 FrameOffset src_base,
453 Offset src_offset,
454 ManagedRegister mscratch,
455 size_t size) {
456 CpuRegister scratch = mscratch.AsX86_64().AsCpuRegister();
457 CHECK_EQ(size, 4u);
458 __ movq(scratch, Address(CpuRegister(RSP), src_base));
459 __ movq(scratch, Address(scratch, src_offset));
460 __ movq(Address(CpuRegister(RSP), dest), scratch);
461 }
462
Copy(ManagedRegister dest,Offset dest_offset,ManagedRegister src,Offset src_offset,ManagedRegister scratch,size_t size)463 void X86_64JNIMacroAssembler::Copy(ManagedRegister dest,
464 Offset dest_offset,
465 ManagedRegister src,
466 Offset src_offset,
467 ManagedRegister scratch,
468 size_t size) {
469 CHECK_EQ(size, 4u);
470 CHECK(scratch.IsNoRegister());
471 __ pushq(Address(src.AsX86_64().AsCpuRegister(), src_offset));
472 __ popq(Address(dest.AsX86_64().AsCpuRegister(), dest_offset));
473 }
474
Copy(FrameOffset dest,Offset dest_offset,FrameOffset src,Offset src_offset,ManagedRegister mscratch,size_t size)475 void X86_64JNIMacroAssembler::Copy(FrameOffset dest,
476 Offset dest_offset,
477 FrameOffset src,
478 Offset src_offset,
479 ManagedRegister mscratch,
480 size_t size) {
481 CpuRegister scratch = mscratch.AsX86_64().AsCpuRegister();
482 CHECK_EQ(size, 4u);
483 CHECK_EQ(dest.Int32Value(), src.Int32Value());
484 __ movq(scratch, Address(CpuRegister(RSP), src));
485 __ pushq(Address(scratch, src_offset));
486 __ popq(Address(scratch, dest_offset));
487 }
488
MemoryBarrier(ManagedRegister)489 void X86_64JNIMacroAssembler::MemoryBarrier(ManagedRegister) {
490 __ mfence();
491 }
492
CreateHandleScopeEntry(ManagedRegister mout_reg,FrameOffset handle_scope_offset,ManagedRegister min_reg,bool null_allowed)493 void X86_64JNIMacroAssembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
494 FrameOffset handle_scope_offset,
495 ManagedRegister min_reg,
496 bool null_allowed) {
497 X86_64ManagedRegister out_reg = mout_reg.AsX86_64();
498 X86_64ManagedRegister in_reg = min_reg.AsX86_64();
499 if (in_reg.IsNoRegister()) { // TODO(64): && null_allowed
500 // Use out_reg as indicator of null.
501 in_reg = out_reg;
502 // TODO: movzwl
503 __ movl(in_reg.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
504 }
505 CHECK(in_reg.IsCpuRegister());
506 CHECK(out_reg.IsCpuRegister());
507 VerifyObject(in_reg, null_allowed);
508 if (null_allowed) {
509 Label null_arg;
510 if (!out_reg.Equals(in_reg)) {
511 __ xorl(out_reg.AsCpuRegister(), out_reg.AsCpuRegister());
512 }
513 __ testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister());
514 __ j(kZero, &null_arg);
515 __ leaq(out_reg.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
516 __ Bind(&null_arg);
517 } else {
518 __ leaq(out_reg.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
519 }
520 }
521
CreateHandleScopeEntry(FrameOffset out_off,FrameOffset handle_scope_offset,ManagedRegister mscratch,bool null_allowed)522 void X86_64JNIMacroAssembler::CreateHandleScopeEntry(FrameOffset out_off,
523 FrameOffset handle_scope_offset,
524 ManagedRegister mscratch,
525 bool null_allowed) {
526 X86_64ManagedRegister scratch = mscratch.AsX86_64();
527 CHECK(scratch.IsCpuRegister());
528 if (null_allowed) {
529 Label null_arg;
530 __ movl(scratch.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
531 __ testl(scratch.AsCpuRegister(), scratch.AsCpuRegister());
532 __ j(kZero, &null_arg);
533 __ leaq(scratch.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
534 __ Bind(&null_arg);
535 } else {
536 __ leaq(scratch.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
537 }
538 Store(out_off, scratch, 8);
539 }
540
541 // Given a handle scope entry, load the associated reference.
LoadReferenceFromHandleScope(ManagedRegister mout_reg,ManagedRegister min_reg)542 void X86_64JNIMacroAssembler::LoadReferenceFromHandleScope(ManagedRegister mout_reg,
543 ManagedRegister min_reg) {
544 X86_64ManagedRegister out_reg = mout_reg.AsX86_64();
545 X86_64ManagedRegister in_reg = min_reg.AsX86_64();
546 CHECK(out_reg.IsCpuRegister());
547 CHECK(in_reg.IsCpuRegister());
548 Label null_arg;
549 if (!out_reg.Equals(in_reg)) {
550 __ xorl(out_reg.AsCpuRegister(), out_reg.AsCpuRegister());
551 }
552 __ testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister());
553 __ j(kZero, &null_arg);
554 __ movq(out_reg.AsCpuRegister(), Address(in_reg.AsCpuRegister(), 0));
555 __ Bind(&null_arg);
556 }
557
VerifyObject(ManagedRegister,bool)558 void X86_64JNIMacroAssembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) {
559 // TODO: not validating references
560 }
561
VerifyObject(FrameOffset,bool)562 void X86_64JNIMacroAssembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) {
563 // TODO: not validating references
564 }
565
Jump(ManagedRegister mbase,Offset offset,ManagedRegister)566 void X86_64JNIMacroAssembler::Jump(ManagedRegister mbase, Offset offset, ManagedRegister) {
567 X86_64ManagedRegister base = mbase.AsX86_64();
568 CHECK(base.IsCpuRegister());
569 __ jmp(Address(base.AsCpuRegister(), offset.Int32Value()));
570 }
571
Call(ManagedRegister mbase,Offset offset,ManagedRegister)572 void X86_64JNIMacroAssembler::Call(ManagedRegister mbase, Offset offset, ManagedRegister) {
573 X86_64ManagedRegister base = mbase.AsX86_64();
574 CHECK(base.IsCpuRegister());
575 __ call(Address(base.AsCpuRegister(), offset.Int32Value()));
576 // TODO: place reference map on call
577 }
578
Call(FrameOffset base,Offset offset,ManagedRegister mscratch)579 void X86_64JNIMacroAssembler::Call(FrameOffset base, Offset offset, ManagedRegister mscratch) {
580 CpuRegister scratch = mscratch.AsX86_64().AsCpuRegister();
581 __ movq(scratch, Address(CpuRegister(RSP), base));
582 __ call(Address(scratch, offset));
583 }
584
CallFromThread(ThreadOffset64 offset,ManagedRegister)585 void X86_64JNIMacroAssembler::CallFromThread(ThreadOffset64 offset, ManagedRegister /*mscratch*/) {
586 __ gs()->call(Address::Absolute(offset, true));
587 }
588
GetCurrentThread(ManagedRegister tr)589 void X86_64JNIMacroAssembler::GetCurrentThread(ManagedRegister tr) {
590 __ gs()->movq(tr.AsX86_64().AsCpuRegister(),
591 Address::Absolute(Thread::SelfOffset<kX86_64PointerSize>(), true));
592 }
593
GetCurrentThread(FrameOffset offset,ManagedRegister mscratch)594 void X86_64JNIMacroAssembler::GetCurrentThread(FrameOffset offset, ManagedRegister mscratch) {
595 X86_64ManagedRegister scratch = mscratch.AsX86_64();
596 __ gs()->movq(scratch.AsCpuRegister(),
597 Address::Absolute(Thread::SelfOffset<kX86_64PointerSize>(), true));
598 __ movq(Address(CpuRegister(RSP), offset), scratch.AsCpuRegister());
599 }
600
601 // Slowpath entered when Thread::Current()->_exception is non-null
602 class X86_64ExceptionSlowPath final : public SlowPath {
603 public:
X86_64ExceptionSlowPath(size_t stack_adjust)604 explicit X86_64ExceptionSlowPath(size_t stack_adjust) : stack_adjust_(stack_adjust) {}
605 void Emit(Assembler *sp_asm) override;
606 private:
607 const size_t stack_adjust_;
608 };
609
ExceptionPoll(ManagedRegister,size_t stack_adjust)610 void X86_64JNIMacroAssembler::ExceptionPoll(ManagedRegister /*scratch*/, size_t stack_adjust) {
611 X86_64ExceptionSlowPath* slow = new (__ GetAllocator()) X86_64ExceptionSlowPath(stack_adjust);
612 __ GetBuffer()->EnqueueSlowPath(slow);
613 __ gs()->cmpl(Address::Absolute(Thread::ExceptionOffset<kX86_64PointerSize>(), true),
614 Immediate(0));
615 __ j(kNotEqual, slow->Entry());
616 }
617
CreateLabel()618 std::unique_ptr<JNIMacroLabel> X86_64JNIMacroAssembler::CreateLabel() {
619 return std::unique_ptr<JNIMacroLabel>(new X86_64JNIMacroLabel());
620 }
621
Jump(JNIMacroLabel * label)622 void X86_64JNIMacroAssembler::Jump(JNIMacroLabel* label) {
623 CHECK(label != nullptr);
624 __ jmp(X86_64JNIMacroLabel::Cast(label)->AsX86_64());
625 }
626
Jump(JNIMacroLabel * label,JNIMacroUnaryCondition condition,ManagedRegister test)627 void X86_64JNIMacroAssembler::Jump(JNIMacroLabel* label,
628 JNIMacroUnaryCondition condition,
629 ManagedRegister test) {
630 CHECK(label != nullptr);
631
632 art::x86_64::Condition x86_64_cond;
633 switch (condition) {
634 case JNIMacroUnaryCondition::kZero:
635 x86_64_cond = art::x86_64::kZero;
636 break;
637 case JNIMacroUnaryCondition::kNotZero:
638 x86_64_cond = art::x86_64::kNotZero;
639 break;
640 default:
641 LOG(FATAL) << "Not implemented condition: " << static_cast<int>(condition);
642 UNREACHABLE();
643 }
644
645 // TEST reg, reg
646 // Jcc <Offset>
647 __ testq(test.AsX86_64().AsCpuRegister(), test.AsX86_64().AsCpuRegister());
648 __ j(x86_64_cond, X86_64JNIMacroLabel::Cast(label)->AsX86_64());
649 }
650
Bind(JNIMacroLabel * label)651 void X86_64JNIMacroAssembler::Bind(JNIMacroLabel* label) {
652 CHECK(label != nullptr);
653 __ Bind(X86_64JNIMacroLabel::Cast(label)->AsX86_64());
654 }
655
656 #undef __
657
Emit(Assembler * sasm)658 void X86_64ExceptionSlowPath::Emit(Assembler *sasm) {
659 X86_64Assembler* sp_asm = down_cast<X86_64Assembler*>(sasm);
660 #define __ sp_asm->
661 __ Bind(&entry_);
662 // Note: the return value is dead
663 if (stack_adjust_ != 0) { // Fix up the frame.
664 DecreaseFrameSizeImpl(stack_adjust_, sp_asm);
665 }
666 // Pass exception as argument in RDI
667 __ gs()->movq(CpuRegister(RDI),
668 Address::Absolute(Thread::ExceptionOffset<kX86_64PointerSize>(), true));
669 __ gs()->call(
670 Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64PointerSize, pDeliverException), true));
671 // this call should never return
672 __ int3();
673 #undef __
674 }
675
676 } // namespace x86_64
677 } // namespace art
678