1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_COMPILER_OPTIMIZING_CODE_GENERATOR_X86_H_
18 #define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_X86_H_
19 
20 #include "arch/x86/instruction_set_features_x86.h"
21 #include "base/enums.h"
22 #include "code_generator.h"
23 #include "dex/dex_file_types.h"
24 #include "driver/compiler_options.h"
25 #include "nodes.h"
26 #include "parallel_move_resolver.h"
27 #include "utils/x86/assembler_x86.h"
28 
29 namespace art {
30 namespace x86 {
31 
32 // Use a local definition to prevent copying mistakes.
33 static constexpr size_t kX86WordSize = static_cast<size_t>(kX86PointerSize);
34 
35 class CodeGeneratorX86;
36 
37 static constexpr Register kParameterCoreRegisters[] = { ECX, EDX, EBX };
38 static constexpr RegisterPair kParameterCorePairRegisters[] = { ECX_EDX, EDX_EBX };
39 static constexpr size_t kParameterCoreRegistersLength = arraysize(kParameterCoreRegisters);
40 static constexpr XmmRegister kParameterFpuRegisters[] = { XMM0, XMM1, XMM2, XMM3 };
41 static constexpr size_t kParameterFpuRegistersLength = arraysize(kParameterFpuRegisters);
42 
43 static constexpr Register kRuntimeParameterCoreRegisters[] = { EAX, ECX, EDX, EBX };
44 static constexpr size_t kRuntimeParameterCoreRegistersLength =
45     arraysize(kRuntimeParameterCoreRegisters);
46 static constexpr XmmRegister kRuntimeParameterFpuRegisters[] = { XMM0, XMM1, XMM2, XMM3 };
47 static constexpr size_t kRuntimeParameterFpuRegistersLength =
48     arraysize(kRuntimeParameterFpuRegisters);
49 
50 class InvokeRuntimeCallingConvention : public CallingConvention<Register, XmmRegister> {
51  public:
InvokeRuntimeCallingConvention()52   InvokeRuntimeCallingConvention()
53       : CallingConvention(kRuntimeParameterCoreRegisters,
54                           kRuntimeParameterCoreRegistersLength,
55                           kRuntimeParameterFpuRegisters,
56                           kRuntimeParameterFpuRegistersLength,
57                           kX86PointerSize) {}
58 
59  private:
60   DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention);
61 };
62 
63 class InvokeDexCallingConvention : public CallingConvention<Register, XmmRegister> {
64  public:
InvokeDexCallingConvention()65   InvokeDexCallingConvention() : CallingConvention(
66       kParameterCoreRegisters,
67       kParameterCoreRegistersLength,
68       kParameterFpuRegisters,
69       kParameterFpuRegistersLength,
70       kX86PointerSize) {}
71 
GetRegisterPairAt(size_t argument_index)72   RegisterPair GetRegisterPairAt(size_t argument_index) {
73     DCHECK_LT(argument_index + 1, GetNumberOfRegisters());
74     return kParameterCorePairRegisters[argument_index];
75   }
76 
77  private:
78   DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConvention);
79 };
80 
81 class InvokeDexCallingConventionVisitorX86 : public InvokeDexCallingConventionVisitor {
82  public:
InvokeDexCallingConventionVisitorX86()83   InvokeDexCallingConventionVisitorX86() {}
~InvokeDexCallingConventionVisitorX86()84   virtual ~InvokeDexCallingConventionVisitorX86() {}
85 
86   Location GetNextLocation(DataType::Type type) override;
87   Location GetReturnLocation(DataType::Type type) const override;
88   Location GetMethodLocation() const override;
89 
90  private:
91   InvokeDexCallingConvention calling_convention;
92 
93   DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionVisitorX86);
94 };
95 
96 class FieldAccessCallingConventionX86 : public FieldAccessCallingConvention {
97  public:
FieldAccessCallingConventionX86()98   FieldAccessCallingConventionX86() {}
99 
GetObjectLocation()100   Location GetObjectLocation() const override {
101     return Location::RegisterLocation(ECX);
102   }
GetFieldIndexLocation()103   Location GetFieldIndexLocation() const override {
104     return Location::RegisterLocation(EAX);
105   }
GetReturnLocation(DataType::Type type)106   Location GetReturnLocation(DataType::Type type) const override {
107     return DataType::Is64BitType(type)
108         ? Location::RegisterPairLocation(EAX, EDX)
109         : Location::RegisterLocation(EAX);
110   }
GetSetValueLocation(DataType::Type type,bool is_instance)111   Location GetSetValueLocation(DataType::Type type, bool is_instance) const override {
112     return DataType::Is64BitType(type)
113         ? (is_instance
114             ? Location::RegisterPairLocation(EDX, EBX)
115             : Location::RegisterPairLocation(ECX, EDX))
116         : (is_instance
117             ? Location::RegisterLocation(EDX)
118             : Location::RegisterLocation(ECX));
119   }
GetFpuLocation(DataType::Type type ATTRIBUTE_UNUSED)120   Location GetFpuLocation(DataType::Type type ATTRIBUTE_UNUSED) const override {
121     return Location::FpuRegisterLocation(XMM0);
122   }
123 
124  private:
125   DISALLOW_COPY_AND_ASSIGN(FieldAccessCallingConventionX86);
126 };
127 
128 class ParallelMoveResolverX86 : public ParallelMoveResolverWithSwap {
129  public:
ParallelMoveResolverX86(ArenaAllocator * allocator,CodeGeneratorX86 * codegen)130   ParallelMoveResolverX86(ArenaAllocator* allocator, CodeGeneratorX86* codegen)
131       : ParallelMoveResolverWithSwap(allocator), codegen_(codegen) {}
132 
133   void EmitMove(size_t index) override;
134   void EmitSwap(size_t index) override;
135   void SpillScratch(int reg) override;
136   void RestoreScratch(int reg) override;
137 
138   X86Assembler* GetAssembler() const;
139 
140  private:
141   void Exchange(Register reg, int mem);
142   void Exchange32(XmmRegister reg, int mem);
143   void Exchange128(XmmRegister reg, int mem);
144   void ExchangeMemory(int mem1, int mem2, int number_of_words);
145   void MoveMemoryToMemory(int dst, int src, int number_of_words);
146 
147   CodeGeneratorX86* const codegen_;
148 
149   DISALLOW_COPY_AND_ASSIGN(ParallelMoveResolverX86);
150 };
151 
152 class LocationsBuilderX86 : public HGraphVisitor {
153  public:
LocationsBuilderX86(HGraph * graph,CodeGeneratorX86 * codegen)154   LocationsBuilderX86(HGraph* graph, CodeGeneratorX86* codegen)
155       : HGraphVisitor(graph), codegen_(codegen) {}
156 
157 #define DECLARE_VISIT_INSTRUCTION(name, super)     \
158   void Visit##name(H##name* instr) override;
159 
160   FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
FOR_EACH_CONCRETE_INSTRUCTION_X86(DECLARE_VISIT_INSTRUCTION)161   FOR_EACH_CONCRETE_INSTRUCTION_X86(DECLARE_VISIT_INSTRUCTION)
162   FOR_EACH_CONCRETE_INSTRUCTION_X86_COMMON(DECLARE_VISIT_INSTRUCTION)
163 
164 #undef DECLARE_VISIT_INSTRUCTION
165 
166   void VisitInstruction(HInstruction* instruction) override {
167     LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
168                << " (id " << instruction->GetId() << ")";
169   }
170 
171  private:
172   void HandleBitwiseOperation(HBinaryOperation* instruction);
173   void HandleInvoke(HInvoke* invoke);
174   void HandleCondition(HCondition* condition);
175   void HandleShift(HBinaryOperation* instruction);
176   void HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info);
177   void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info);
178   bool CpuHasAvxFeatureFlag();
179   bool CpuHasAvx2FeatureFlag();
180 
181   CodeGeneratorX86* const codegen_;
182   InvokeDexCallingConventionVisitorX86 parameter_visitor_;
183 
184   DISALLOW_COPY_AND_ASSIGN(LocationsBuilderX86);
185 };
186 
187 class InstructionCodeGeneratorX86 : public InstructionCodeGenerator {
188  public:
189   InstructionCodeGeneratorX86(HGraph* graph, CodeGeneratorX86* codegen);
190 
191 #define DECLARE_VISIT_INSTRUCTION(name, super)     \
192   void Visit##name(H##name* instr) override;
193 
194   FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
FOR_EACH_CONCRETE_INSTRUCTION_X86(DECLARE_VISIT_INSTRUCTION)195   FOR_EACH_CONCRETE_INSTRUCTION_X86(DECLARE_VISIT_INSTRUCTION)
196   FOR_EACH_CONCRETE_INSTRUCTION_X86_COMMON(DECLARE_VISIT_INSTRUCTION)
197 
198 #undef DECLARE_VISIT_INSTRUCTION
199 
200   void VisitInstruction(HInstruction* instruction) override {
201     LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
202                << " (id " << instruction->GetId() << ")";
203   }
204 
GetAssembler()205   X86Assembler* GetAssembler() const { return assembler_; }
206 
207   // The compare/jump sequence will generate about (1.5 * num_entries) instructions. A jump
208   // table version generates 7 instructions and num_entries literals. Compare/jump sequence will
209   // generates less code/data with a small num_entries.
210   static constexpr uint32_t kPackedSwitchJumpTableThreshold = 5;
211 
212  private:
213   // Generate code for the given suspend check. If not null, `successor`
214   // is the block to branch to if the suspend check is not needed, and after
215   // the suspend call.
216   void GenerateSuspendCheck(HSuspendCheck* check, HBasicBlock* successor);
217   void GenerateClassInitializationCheck(SlowPathCode* slow_path, Register class_reg);
218   void GenerateBitstringTypeCheckCompare(HTypeCheckInstruction* check, Register temp);
219   void HandleBitwiseOperation(HBinaryOperation* instruction);
220   void GenerateDivRemIntegral(HBinaryOperation* instruction);
221   void DivRemOneOrMinusOne(HBinaryOperation* instruction);
222   void DivByPowerOfTwo(HDiv* instruction);
223   void RemByPowerOfTwo(HRem* instruction);
224   void GenerateDivRemWithAnyConstant(HBinaryOperation* instruction);
225   void GenerateRemFP(HRem* rem);
226   void HandleCondition(HCondition* condition);
227   void HandleShift(HBinaryOperation* instruction);
228   void GenerateShlLong(const Location& loc, Register shifter);
229   void GenerateShrLong(const Location& loc, Register shifter);
230   void GenerateUShrLong(const Location& loc, Register shifter);
231   void GenerateShlLong(const Location& loc, int shift);
232   void GenerateShrLong(const Location& loc, int shift);
233   void GenerateUShrLong(const Location& loc, int shift);
234   void GenerateMinMaxInt(LocationSummary* locations, bool is_min, DataType::Type type);
235   void GenerateMinMaxFP(LocationSummary* locations, bool is_min, DataType::Type type);
236   void GenerateMinMax(HBinaryOperation* minmax, bool is_min);
237 
238   void HandleFieldSet(HInstruction* instruction,
239                       const FieldInfo& field_info,
240                       bool value_can_be_null);
241   void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info);
242 
243   // Generate a heap reference load using one register `out`:
244   //
245   //   out <- *(out + offset)
246   //
247   // while honoring heap poisoning and/or read barriers (if any).
248   //
249   // Location `maybe_temp` is used when generating a read barrier and
250   // shall be a register in that case; it may be an invalid location
251   // otherwise.
252   void GenerateReferenceLoadOneRegister(HInstruction* instruction,
253                                         Location out,
254                                         uint32_t offset,
255                                         Location maybe_temp,
256                                         ReadBarrierOption read_barrier_option);
257   // Generate a heap reference load using two different registers
258   // `out` and `obj`:
259   //
260   //   out <- *(obj + offset)
261   //
262   // while honoring heap poisoning and/or read barriers (if any).
263   //
264   // Location `maybe_temp` is used when generating a Baker's (fast
265   // path) read barrier and shall be a register in that case; it may
266   // be an invalid location otherwise.
267   void GenerateReferenceLoadTwoRegisters(HInstruction* instruction,
268                                          Location out,
269                                          Location obj,
270                                          uint32_t offset,
271                                          ReadBarrierOption read_barrier_option);
272   // Generate a GC root reference load:
273   //
274   //   root <- *address
275   //
276   // while honoring read barriers based on read_barrier_option.
277   void GenerateGcRootFieldLoad(HInstruction* instruction,
278                                Location root,
279                                const Address& address,
280                                Label* fixup_label,
281                                ReadBarrierOption read_barrier_option);
282 
283   // Push value to FPU stack. `is_fp` specifies whether the value is floating point or not.
284   // `is_wide` specifies whether it is long/double or not.
285   void PushOntoFPStack(Location source, uint32_t temp_offset,
286                        uint32_t stack_adjustment, bool is_fp, bool is_wide);
287 
288   template<class LabelType>
289   void GenerateTestAndBranch(HInstruction* instruction,
290                              size_t condition_input_index,
291                              LabelType* true_target,
292                              LabelType* false_target);
293   template<class LabelType>
294   void GenerateCompareTestAndBranch(HCondition* condition,
295                                     LabelType* true_target,
296                                     LabelType* false_target);
297   template<class LabelType>
298   void GenerateFPJumps(HCondition* cond, LabelType* true_label, LabelType* false_label);
299   template<class LabelType>
300   void GenerateLongComparesAndJumps(HCondition* cond,
301                                     LabelType* true_label,
302                                     LabelType* false_label);
303 
304   void HandleGoto(HInstruction* got, HBasicBlock* successor);
305   void GenPackedSwitchWithCompares(Register value_reg,
306                                    int32_t lower_bound,
307                                    uint32_t num_entries,
308                                    HBasicBlock* switch_block,
309                                    HBasicBlock* default_block);
310 
311   void GenerateFPCompare(Location lhs, Location rhs, HInstruction* insn, bool is_double);
312   bool CpuHasAvxFeatureFlag();
313   bool CpuHasAvx2FeatureFlag();
314 
315   X86Assembler* const assembler_;
316   CodeGeneratorX86* const codegen_;
317 
318   DISALLOW_COPY_AND_ASSIGN(InstructionCodeGeneratorX86);
319 };
320 
321 class JumpTableRIPFixup;
322 
323 class CodeGeneratorX86 : public CodeGenerator {
324  public:
325   CodeGeneratorX86(HGraph* graph,
326                    const CompilerOptions& compiler_options,
327                    OptimizingCompilerStats* stats = nullptr);
~CodeGeneratorX86()328   virtual ~CodeGeneratorX86() {}
329 
330   void GenerateFrameEntry() override;
331   void GenerateFrameExit() override;
332   void Bind(HBasicBlock* block) override;
333   void MoveConstant(Location destination, int32_t value) override;
334   void MoveLocation(Location dst, Location src, DataType::Type dst_type) override;
335   void AddLocationAsTemp(Location location, LocationSummary* locations) override;
336 
337   size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) override;
338   size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) override;
339   size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) override;
340   size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) override;
341 
342   // Generate code to invoke a runtime entry point.
343   void InvokeRuntime(QuickEntrypointEnum entrypoint,
344                      HInstruction* instruction,
345                      uint32_t dex_pc,
346                      SlowPathCode* slow_path = nullptr) override;
347 
348   // Generate code to invoke a runtime entry point, but do not record
349   // PC-related information in a stack map.
350   void InvokeRuntimeWithoutRecordingPcInfo(int32_t entry_point_offset,
351                                            HInstruction* instruction,
352                                            SlowPathCode* slow_path);
353 
354   void GenerateInvokeRuntime(int32_t entry_point_offset);
355 
GetWordSize()356   size_t GetWordSize() const override {
357     return kX86WordSize;
358   }
359 
GetSlowPathFPWidth()360   size_t GetSlowPathFPWidth() const override {
361     return GetGraph()->HasSIMD()
362         ? 4 * kX86WordSize   // 16 bytes == 4 words for each spill
363         : 2 * kX86WordSize;  //  8 bytes == 2 words for each spill
364   }
365 
GetCalleePreservedFPWidth()366   size_t GetCalleePreservedFPWidth() const override {
367     return 2 * kX86WordSize;
368   }
369 
GetLocationBuilder()370   HGraphVisitor* GetLocationBuilder() override {
371     return &location_builder_;
372   }
373 
GetInstructionVisitor()374   HGraphVisitor* GetInstructionVisitor() override {
375     return &instruction_visitor_;
376   }
377 
GetAssembler()378   X86Assembler* GetAssembler() override {
379     return &assembler_;
380   }
381 
GetAssembler()382   const X86Assembler& GetAssembler() const override {
383     return assembler_;
384   }
385 
GetAddressOf(HBasicBlock * block)386   uintptr_t GetAddressOf(HBasicBlock* block) override {
387     return GetLabelOf(block)->Position();
388   }
389 
390   void SetupBlockedRegisters() const override;
391 
392   void DumpCoreRegister(std::ostream& stream, int reg) const override;
393   void DumpFloatingPointRegister(std::ostream& stream, int reg) const override;
394 
GetMoveResolver()395   ParallelMoveResolverX86* GetMoveResolver() override {
396     return &move_resolver_;
397   }
398 
GetInstructionSet()399   InstructionSet GetInstructionSet() const override {
400     return InstructionSet::kX86;
401   }
402 
403   const X86InstructionSetFeatures& GetInstructionSetFeatures() const;
404 
405   // Helper method to move a 32bits value between two locations.
406   void Move32(Location destination, Location source);
407   // Helper method to move a 64bits value between two locations.
408   void Move64(Location destination, Location source);
409 
410   // Check if the desired_string_load_kind is supported. If it is, return it,
411   // otherwise return a fall-back kind that should be used instead.
412   HLoadString::LoadKind GetSupportedLoadStringKind(
413       HLoadString::LoadKind desired_string_load_kind) override;
414 
415   // Check if the desired_class_load_kind is supported. If it is, return it,
416   // otherwise return a fall-back kind that should be used instead.
417   HLoadClass::LoadKind GetSupportedLoadClassKind(
418       HLoadClass::LoadKind desired_class_load_kind) override;
419 
420   // Check if the desired_dispatch_info is supported. If it is, return it,
421   // otherwise return a fall-back info that should be used instead.
422   HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
423       const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
424       ArtMethod* method) override;
425 
426   // Generate a call to a static or direct method.
427   void GenerateStaticOrDirectCall(
428       HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
429   // Generate a call to a virtual method.
430   void GenerateVirtualCall(
431       HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
432 
433   void RecordBootImageIntrinsicPatch(HX86ComputeBaseMethodAddress* method_address,
434                                      uint32_t intrinsic_data);
435   void RecordBootImageRelRoPatch(HX86ComputeBaseMethodAddress* method_address,
436                                  uint32_t boot_image_offset);
437   void RecordBootImageMethodPatch(HInvokeStaticOrDirect* invoke);
438   void RecordMethodBssEntryPatch(HInvokeStaticOrDirect* invoke);
439   void RecordBootImageTypePatch(HLoadClass* load_class);
440   Label* NewTypeBssEntryPatch(HLoadClass* load_class);
441   void RecordBootImageStringPatch(HLoadString* load_string);
442   Label* NewStringBssEntryPatch(HLoadString* load_string);
443 
444   void LoadBootImageAddress(Register reg,
445                             uint32_t boot_image_reference,
446                             HInvokeStaticOrDirect* invoke);
447   void AllocateInstanceForIntrinsic(HInvokeStaticOrDirect* invoke, uint32_t boot_image_offset);
448 
449   Label* NewJitRootStringPatch(const DexFile& dex_file,
450                                dex::StringIndex string_index,
451                                Handle<mirror::String> handle);
452   Label* NewJitRootClassPatch(const DexFile& dex_file,
453                               dex::TypeIndex type_index,
454                               Handle<mirror::Class> handle);
455 
456   void MoveFromReturnRegister(Location trg, DataType::Type type) override;
457 
458   // Emit linker patches.
459   void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) override;
460 
461   void PatchJitRootUse(uint8_t* code,
462                        const uint8_t* roots_data,
463                        const PatchInfo<Label>& info,
464                        uint64_t index_in_table) const;
465   void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) override;
466 
467   // Emit a write barrier.
468   void MarkGCCard(Register temp,
469                   Register card,
470                   Register object,
471                   Register value,
472                   bool value_can_be_null);
473 
474   void GenerateMemoryBarrier(MemBarrierKind kind);
475 
GetLabelOf(HBasicBlock * block)476   Label* GetLabelOf(HBasicBlock* block) const {
477     return CommonGetLabelOf<Label>(block_labels_, block);
478   }
479 
Initialize()480   void Initialize() override {
481     block_labels_ = CommonInitializeLabels<Label>();
482   }
483 
NeedsTwoRegisters(DataType::Type type)484   bool NeedsTwoRegisters(DataType::Type type) const override {
485     return type == DataType::Type::kInt64;
486   }
487 
ShouldSplitLongMoves()488   bool ShouldSplitLongMoves() const override { return true; }
489 
GetFrameEntryLabel()490   Label* GetFrameEntryLabel() { return &frame_entry_label_; }
491 
AddMethodAddressOffset(HX86ComputeBaseMethodAddress * method_base,int32_t offset)492   void AddMethodAddressOffset(HX86ComputeBaseMethodAddress* method_base, int32_t offset) {
493     method_address_offset_.Put(method_base->GetId(), offset);
494   }
495 
GetMethodAddressOffset(HX86ComputeBaseMethodAddress * method_base)496   int32_t GetMethodAddressOffset(HX86ComputeBaseMethodAddress* method_base) const {
497     return method_address_offset_.Get(method_base->GetId());
498   }
499 
ConstantAreaStart()500   int32_t ConstantAreaStart() const {
501     return constant_area_start_;
502   }
503 
504   Address LiteralDoubleAddress(double v, HX86ComputeBaseMethodAddress* method_base, Register reg);
505   Address LiteralFloatAddress(float v, HX86ComputeBaseMethodAddress* method_base, Register reg);
506   Address LiteralInt32Address(int32_t v, HX86ComputeBaseMethodAddress* method_base, Register reg);
507   Address LiteralInt64Address(int64_t v, HX86ComputeBaseMethodAddress* method_base, Register reg);
508 
509   // Load a 32-bit value into a register in the most efficient manner.
510   void Load32BitValue(Register dest, int32_t value);
511 
512   // Compare a register with a 32-bit value in the most efficient manner.
513   void Compare32BitValue(Register dest, int32_t value);
514 
515   // Compare int values. Supports only register locations for `lhs`.
516   void GenerateIntCompare(Location lhs, Location rhs);
517   void GenerateIntCompare(Register lhs, Location rhs);
518 
519   // Construct address for array access.
520   static Address ArrayAddress(Register obj,
521                               Location index,
522                               ScaleFactor scale,
523                               uint32_t data_offset);
524 
525   Address LiteralCaseTable(HX86PackedSwitch* switch_instr, Register reg, Register value);
526 
527   void Finalize(CodeAllocator* allocator) override;
528 
529   // Fast path implementation of ReadBarrier::Barrier for a heap
530   // reference field load when Baker's read barriers are used.
531   void GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction,
532                                              Location ref,
533                                              Register obj,
534                                              uint32_t offset,
535                                              bool needs_null_check);
536   // Fast path implementation of ReadBarrier::Barrier for a heap
537   // reference array load when Baker's read barriers are used.
538   void GenerateArrayLoadWithBakerReadBarrier(HInstruction* instruction,
539                                              Location ref,
540                                              Register obj,
541                                              uint32_t data_offset,
542                                              Location index,
543                                              bool needs_null_check);
544   // Factored implementation, used by GenerateFieldLoadWithBakerReadBarrier,
545   // GenerateArrayLoadWithBakerReadBarrier and some intrinsics.
546   //
547   // Load the object reference located at address `src`, held by
548   // object `obj`, into `ref`, and mark it if needed.  The base of
549   // address `src` must be `obj`.
550   //
551   // If `always_update_field` is true, the value of the reference is
552   // atomically updated in the holder (`obj`).  This operation
553   // requires a temporary register, which must be provided as a
554   // non-null pointer (`temp`).
555   void GenerateReferenceLoadWithBakerReadBarrier(HInstruction* instruction,
556                                                  Location ref,
557                                                  Register obj,
558                                                  const Address& src,
559                                                  bool needs_null_check,
560                                                  bool always_update_field = false,
561                                                  Register* temp = nullptr);
562 
563   // Generate a read barrier for a heap reference within `instruction`
564   // using a slow path.
565   //
566   // A read barrier for an object reference read from the heap is
567   // implemented as a call to the artReadBarrierSlow runtime entry
568   // point, which is passed the values in locations `ref`, `obj`, and
569   // `offset`:
570   //
571   //   mirror::Object* artReadBarrierSlow(mirror::Object* ref,
572   //                                      mirror::Object* obj,
573   //                                      uint32_t offset);
574   //
575   // The `out` location contains the value returned by
576   // artReadBarrierSlow.
577   //
578   // When `index` is provided (i.e. for array accesses), the offset
579   // value passed to artReadBarrierSlow is adjusted to take `index`
580   // into account.
581   void GenerateReadBarrierSlow(HInstruction* instruction,
582                                Location out,
583                                Location ref,
584                                Location obj,
585                                uint32_t offset,
586                                Location index = Location::NoLocation());
587 
588   // If read barriers are enabled, generate a read barrier for a heap
589   // reference using a slow path. If heap poisoning is enabled, also
590   // unpoison the reference in `out`.
591   void MaybeGenerateReadBarrierSlow(HInstruction* instruction,
592                                     Location out,
593                                     Location ref,
594                                     Location obj,
595                                     uint32_t offset,
596                                     Location index = Location::NoLocation());
597 
598   // Generate a read barrier for a GC root within `instruction` using
599   // a slow path.
600   //
601   // A read barrier for an object reference GC root is implemented as
602   // a call to the artReadBarrierForRootSlow runtime entry point,
603   // which is passed the value in location `root`:
604   //
605   //   mirror::Object* artReadBarrierForRootSlow(GcRoot<mirror::Object>* root);
606   //
607   // The `out` location contains the value returned by
608   // artReadBarrierForRootSlow.
609   void GenerateReadBarrierForRootSlow(HInstruction* instruction, Location out, Location root);
610 
611   // Ensure that prior stores complete to memory before subsequent loads.
612   // The locked add implementation will avoid serializing device memory, but will
613   // touch (but not change) the top of the stack.
614   // The 'non_temporal' parameter should be used to ensure ordering of non-temporal stores.
615   void MemoryFence(bool non_temporal = false) {
616     if (!non_temporal) {
617       assembler_.lock()->addl(Address(ESP, 0), Immediate(0));
618     } else {
619       assembler_.mfence();
620     }
621   }
622 
623   void GenerateNop() override;
624   void GenerateImplicitNullCheck(HNullCheck* instruction) override;
625   void GenerateExplicitNullCheck(HNullCheck* instruction) override;
626 
627   void MaybeGenerateInlineCacheCheck(HInstruction* instruction, Register klass);
628   void MaybeIncrementHotness(bool is_frame_entry);
629 
630   // When we don't know the proper offset for the value, we use kDummy32BitOffset.
631   // The correct value will be inserted when processing Assembler fixups.
632   static constexpr int32_t kDummy32BitOffset = 256;
633 
634  private:
635   struct X86PcRelativePatchInfo : PatchInfo<Label> {
X86PcRelativePatchInfoX86PcRelativePatchInfo636     X86PcRelativePatchInfo(HX86ComputeBaseMethodAddress* address,
637                            const DexFile* target_dex_file,
638                            uint32_t target_index)
639         : PatchInfo(target_dex_file, target_index),
640           method_address(address) {}
641     HX86ComputeBaseMethodAddress* method_address;
642   };
643 
644   template <linker::LinkerPatch (*Factory)(size_t, const DexFile*, uint32_t, uint32_t)>
645   void EmitPcRelativeLinkerPatches(const ArenaDeque<X86PcRelativePatchInfo>& infos,
646                                    ArenaVector<linker::LinkerPatch>* linker_patches);
647 
648   Register GetInvokeStaticOrDirectExtraParameter(HInvokeStaticOrDirect* invoke, Register temp);
649 
650   // Labels for each block that will be compiled.
651   Label* block_labels_;  // Indexed by block id.
652   Label frame_entry_label_;
653   LocationsBuilderX86 location_builder_;
654   InstructionCodeGeneratorX86 instruction_visitor_;
655   ParallelMoveResolverX86 move_resolver_;
656   X86Assembler assembler_;
657 
658   // PC-relative method patch info for kBootImageLinkTimePcRelative.
659   ArenaDeque<X86PcRelativePatchInfo> boot_image_method_patches_;
660   // PC-relative method patch info for kBssEntry.
661   ArenaDeque<X86PcRelativePatchInfo> method_bss_entry_patches_;
662   // PC-relative type patch info for kBootImageLinkTimePcRelative.
663   ArenaDeque<X86PcRelativePatchInfo> boot_image_type_patches_;
664   // PC-relative type patch info for kBssEntry.
665   ArenaDeque<X86PcRelativePatchInfo> type_bss_entry_patches_;
666   // PC-relative String patch info for kBootImageLinkTimePcRelative.
667   ArenaDeque<X86PcRelativePatchInfo> boot_image_string_patches_;
668   // PC-relative String patch info for kBssEntry.
669   ArenaDeque<X86PcRelativePatchInfo> string_bss_entry_patches_;
670   // PC-relative patch info for IntrinsicObjects for the boot image,
671   // and for method/type/string patches for kBootImageRelRo otherwise.
672   ArenaDeque<X86PcRelativePatchInfo> boot_image_other_patches_;
673 
674   // Patches for string root accesses in JIT compiled code.
675   ArenaDeque<PatchInfo<Label>> jit_string_patches_;
676   // Patches for class root accesses in JIT compiled code.
677   ArenaDeque<PatchInfo<Label>> jit_class_patches_;
678 
679   // Offset to the start of the constant area in the assembled code.
680   // Used for fixups to the constant area.
681   int32_t constant_area_start_;
682 
683   // Fixups for jump tables that need to be patched after the constant table is generated.
684   ArenaVector<JumpTableRIPFixup*> fixups_to_jump_tables_;
685 
686   // Maps a HX86ComputeBaseMethodAddress instruction id, to its offset in the
687   // compiled code.
688   ArenaSafeMap<uint32_t, int32_t> method_address_offset_;
689 
690   DISALLOW_COPY_AND_ASSIGN(CodeGeneratorX86);
691 };
692 
693 }  // namespace x86
694 }  // namespace art
695 
696 #endif  // ART_COMPILER_OPTIMIZING_CODE_GENERATOR_X86_H_
697