root/src/x64/lithium-codegen-x64.cc

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. deopt_mode_
  2. BeforeCall
  3. AfterCall
  4. GenerateCode
  5. FinishCode
  6. Abort
  7. Comment
  8. GeneratePrologue
  9. GenerateBody
  10. GenerateJumpTable
  11. GenerateDeferredCode
  12. GenerateSafepointTable
  13. ToRegister
  14. ToDoubleRegister
  15. ToRegister
  16. ToDoubleRegister
  17. IsInteger32Constant
  18. IsTaggedConstant
  19. ToInteger32
  20. ToDouble
  21. ToHandle
  22. ToOperand
  23. WriteTranslation
  24. AddToTranslation
  25. CallCodeGeneric
  26. CallCode
  27. CallRuntime
  28. CallRuntimeFromDeferred
  29. RegisterEnvironmentForDeoptimization
  30. DeoptimizeIf
  31. PopulateDeoptimizationData
  32. DefineDeoptimizationLiteral
  33. PopulateDeoptimizationLiteralsWithInlinedFunctions
  34. RecordSafepointWithLazyDeopt
  35. RecordSafepoint
  36. RecordSafepoint
  37. RecordSafepoint
  38. RecordSafepointWithRegisters
  39. RecordPosition
  40. DoLabel
  41. DoParallelMove
  42. DoGap
  43. DoInstructionGap
  44. DoParameter
  45. DoCallStub
  46. DoUnknownOSRValue
  47. DoModI
  48. DoMathFloorOfDiv
  49. DoDivI
  50. DoMulI
  51. DoBitI
  52. DoShiftI
  53. DoSubI
  54. DoConstantI
  55. DoConstantD
  56. DoConstantT
  57. DoJSArrayLength
  58. DoFixedArrayBaseLength
  59. DoElementsKind
  60. DoValueOf
  61. DoDateField
  62. DoBitNotI
  63. DoThrow
  64. DoAddI
  65. DoArithmeticD
  66. DoArithmeticT
  67. GetNextEmittedBlock
  68. EmitBranch
  69. DoBranch
  70. EmitGoto
  71. DoGoto
  72. TokenToCondition
  73. DoCmpIDAndBranch
  74. DoCmpObjectEqAndBranch
  75. DoCmpConstantEqAndBranch
  76. DoIsNilAndBranch
  77. EmitIsObject
  78. DoIsObjectAndBranch
  79. EmitIsString
  80. DoIsStringAndBranch
  81. DoIsSmiAndBranch
  82. DoIsUndetectableAndBranch
  83. DoStringCompareAndBranch
  84. TestType
  85. BranchCondition
  86. DoHasInstanceTypeAndBranch
  87. DoGetCachedArrayIndex
  88. DoHasCachedArrayIndexAndBranch
  89. EmitClassOfTest
  90. DoClassOfTestAndBranch
  91. DoCmpMapAndBranch
  92. DoInstanceOf
  93. DoInstanceOfKnownGlobal
  94. instr_
  95. Generate
  96. instr
  97. map_check
  98. DoDeferredInstanceOfKnownGlobal
  99. DoCmpT
  100. DoReturn
  101. DoLoadGlobalCell
  102. DoLoadGlobalGeneric
  103. DoStoreGlobalCell
  104. DoStoreGlobalGeneric
  105. DoLoadContextSlot
  106. DoStoreContextSlot
  107. DoLoadNamedField
  108. EmitLoadFieldOrConstantFunction
  109. CompactEmit
  110. DoLoadNamedFieldPolymorphic
  111. DoLoadNamedGeneric
  112. DoLoadFunctionPrototype
  113. DoLoadElements
  114. DoLoadExternalArrayPointer
  115. DoAccessArgumentsAt
  116. DoLoadKeyedFastElement
  117. DoLoadKeyedFastDoubleElement
  118. BuildFastArrayOperand
  119. DoLoadKeyedSpecializedArrayElement
  120. DoLoadKeyedGeneric
  121. DoArgumentsElements
  122. DoArgumentsLength
  123. DoWrapReceiver
  124. DoApplyArguments
  125. DoPushArgument
  126. DoDrop
  127. DoThisFunction
  128. DoContext
  129. DoOuterContext
  130. DoDeclareGlobals
  131. DoGlobalObject
  132. DoGlobalReceiver
  133. CallKnownFunction
  134. DoCallConstantFunction
  135. DoDeferredMathAbsTaggedHeapNumber
  136. EmitIntegerMathAbs
  137. DoMathAbs
  138. instr_
  139. Generate
  140. instr
  141. DoMathFloor
  142. DoMathRound
  143. DoMathSqrt
  144. DoMathPowHalf
  145. DoPower
  146. DoRandom
  147. instr_
  148. Generate
  149. instr
  150. DoDeferredRandom
  151. DoMathLog
  152. DoMathTan
  153. DoMathCos
  154. DoMathSin
  155. DoUnaryMathOperation
  156. DoInvokeFunction
  157. DoCallKeyed
  158. DoCallNamed
  159. DoCallFunction
  160. DoCallGlobal
  161. DoCallKnownGlobal
  162. DoCallNew
  163. DoCallRuntime
  164. DoStoreNamedField
  165. DoStoreNamedGeneric
  166. DoStoreKeyedSpecializedArrayElement
  167. DoBoundsCheck
  168. DoStoreKeyedFastElement
  169. DoStoreKeyedFastDoubleElement
  170. DoStoreKeyedGeneric
  171. DoTransitionElementsKind
  172. DoStringAdd
  173. DoStringCharCodeAt
  174. instr_
  175. Generate
  176. instr
  177. DoDeferredStringCharCodeAt
  178. DoStringCharFromCode
  179. instr_
  180. Generate
  181. instr
  182. DoDeferredStringCharFromCode
  183. DoStringLength
  184. DoInteger32ToDouble
  185. DoNumberTagI
  186. DoNumberTagD
  187. instr_
  188. Generate
  189. instr
  190. DoDeferredNumberTagD
  191. DoSmiTag
  192. DoSmiUntag
  193. EmitNumberUntagD
  194. DoDeferredTaggedToI
  195. DoTaggedToI
  196. instr_
  197. Generate
  198. instr
  199. DoNumberUntagD
  200. DoDoubleToI
  201. DoCheckSmi
  202. DoCheckNonSmi
  203. DoCheckInstanceType
  204. DoCheckFunction
  205. DoCheckMapCommon
  206. DoCheckMaps
  207. DoClampDToUint8
  208. DoClampIToUint8
  209. DoClampTToUint8
  210. DoCheckPrototypeMaps
  211. DoAllocateObject
  212. instr_
  213. Generate
  214. instr
  215. DoDeferredAllocateObject
  216. DoArrayLiteral
  217. EmitDeepCopy
  218. DoFastLiteral
  219. DoObjectLiteral
  220. DoToFastProperties
  221. DoRegExpLiteral
  222. DoFunctionLiteral
  223. DoTypeof
  224. EmitPushTaggedOperand
  225. DoTypeofIsAndBranch
  226. EmitTypeofIs
  227. DoIsConstructCallAndBranch
  228. EmitIsConstructCall
  229. EnsureSpaceForLazyDeopt
  230. DoLazyBailout
  231. DoDeoptimize
  232. DoDeleteProperty
  233. DoIn
  234. DoDeferredStackCheck
  235. DoStackCheck
  236. instr_
  237. Generate
  238. instr
  239. DoOsrEntry
  240. DoForInPrepareMap
  241. DoForInCacheArray
  242. DoCheckMapValue
  243. DoLoadFieldByIndex

// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
//     * Redistributions of source code must retain the above copyright
//       notice, this list of conditions and the following disclaimer.
//     * Redistributions in binary form must reproduce the above
//       copyright notice, this list of conditions and the following
//       disclaimer in the documentation and/or other materials provided
//       with the distribution.
//     * Neither the name of Google Inc. nor the names of its
//       contributors may be used to endorse or promote products derived
//       from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

#include "v8.h"

#if defined(V8_TARGET_ARCH_X64)

#include "x64/lithium-codegen-x64.h"
#include "code-stubs.h"
#include "stub-cache.h"

namespace v8 {
namespace internal {


// When invoking builtins, we need to record the safepoint in the middle of
// the invoke instruction sequence generated by the macro assembler.
class SafepointGenerator : public CallWrapper {
 public:
  SafepointGenerator(LCodeGen* codegen,
                     LPointerMap* pointers,
                     Safepoint::DeoptMode mode)
      : codegen_(codegen),
        pointers_(pointers),
        deopt_mode_(mode) { }
  virtual ~SafepointGenerator() { }

  virtual void BeforeCall(int call_size) const {
    codegen_->EnsureSpaceForLazyDeopt(Deoptimizer::patch_size() - call_size);
  }

  virtual void AfterCall() const {
    codegen_->RecordSafepoint(pointers_, deopt_mode_);
  }

 private:
  LCodeGen* codegen_;
  LPointerMap* pointers_;
  Safepoint::DeoptMode deopt_mode_;
};


#define __ masm()->

bool LCodeGen::GenerateCode() {
  HPhase phase("Z_Code generation", chunk());
  ASSERT(is_unused());
  status_ = GENERATING;

  // Open a frame scope to indicate that there is a frame on the stack.  The
  // MANUAL indicates that the scope shouldn't actually generate code to set up
  // the frame (that is done in GeneratePrologue).
  FrameScope frame_scope(masm_, StackFrame::MANUAL);

  return GeneratePrologue() &&
      GenerateBody() &&
      GenerateDeferredCode() &&
      GenerateJumpTable() &&
      GenerateSafepointTable();
}


void LCodeGen::FinishCode(Handle<Code> code) {
  ASSERT(is_done());
  code->set_stack_slots(GetStackSlotCount());
  code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
  PopulateDeoptimizationData(code);
}


void LCodeGen::Abort(const char* format, ...) {
  if (FLAG_trace_bailout) {
    SmartArrayPointer<char> name(
        info()->shared_info()->DebugName()->ToCString());
    PrintF("Aborting LCodeGen in @\"%s\": ", *name);
    va_list arguments;
    va_start(arguments, format);
    OS::VPrint(format, arguments);
    va_end(arguments);
    PrintF("\n");
  }
  status_ = ABORTED;
}


void LCodeGen::Comment(const char* format, ...) {
  if (!FLAG_code_comments) return;
  char buffer[4 * KB];
  StringBuilder builder(buffer, ARRAY_SIZE(buffer));
  va_list arguments;
  va_start(arguments, format);
  builder.AddFormattedList(format, arguments);
  va_end(arguments);

  // Copy the string before recording it in the assembler to avoid
  // issues when the stack allocated buffer goes out of scope.
  int length = builder.position();
  Vector<char> copy = Vector<char>::New(length + 1);
  memcpy(copy.start(), builder.Finalize(), copy.length());
  masm()->RecordComment(copy.start());
}


bool LCodeGen::GeneratePrologue() {
  ASSERT(is_generating());

  ProfileEntryHookStub::MaybeCallEntryHook(masm_);

#ifdef DEBUG
  if (strlen(FLAG_stop_at) > 0 &&
      info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
    __ int3();
  }
#endif

  // Strict mode functions need to replace the receiver with undefined
  // when called as functions (without an explicit receiver
  // object). rcx is zero for method calls and non-zero for function
  // calls.
  if (!info_->is_classic_mode() || info_->is_native()) {
    Label ok;
    __ testq(rcx, rcx);
    __ j(zero, &ok, Label::kNear);
    // +1 for return address.
    int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
    __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
    __ movq(Operand(rsp, receiver_offset), kScratchRegister);
    __ bind(&ok);
  }

  __ push(rbp);  // Caller's frame pointer.
  __ movq(rbp, rsp);
  __ push(rsi);  // Callee's context.
  __ push(rdi);  // Callee's JS function.

  // Reserve space for the stack slots needed by the code.
  int slots = GetStackSlotCount();
  if (slots > 0) {
    if (FLAG_debug_code) {
      __ Set(rax, slots);
      __ movq(kScratchRegister, kSlotsZapValue, RelocInfo::NONE);
      Label loop;
      __ bind(&loop);
      __ push(kScratchRegister);
      __ decl(rax);
      __ j(not_zero, &loop);
    } else {
      __ subq(rsp, Immediate(slots * kPointerSize));
#ifdef _MSC_VER
      // On windows, you may not access the stack more than one page below
      // the most recently mapped page. To make the allocated area randomly
      // accessible, we write to each page in turn (the value is irrelevant).
      const int kPageSize = 4 * KB;
      for (int offset = slots * kPointerSize - kPageSize;
           offset > 0;
           offset -= kPageSize) {
        __ movq(Operand(rsp, offset), rax);
      }
#endif
    }
  }

  // Possibly allocate a local context.
  int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
  if (heap_slots > 0) {
    Comment(";;; Allocate local context");
    // Argument to NewContext is the function, which is still in rdi.
    __ push(rdi);
    if (heap_slots <= FastNewContextStub::kMaximumSlots) {
      FastNewContextStub stub(heap_slots);
      __ CallStub(&stub);
    } else {
      __ CallRuntime(Runtime::kNewFunctionContext, 1);
    }
    RecordSafepoint(Safepoint::kNoLazyDeopt);
    // Context is returned in both rax and rsi.  It replaces the context
    // passed to us.  It's saved in the stack and kept live in rsi.
    __ movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);

    // Copy any necessary parameters into the context.
    int num_parameters = scope()->num_parameters();
    for (int i = 0; i < num_parameters; i++) {
      Variable* var = scope()->parameter(i);
      if (var->IsContextSlot()) {
        int parameter_offset = StandardFrameConstants::kCallerSPOffset +
            (num_parameters - 1 - i) * kPointerSize;
        // Load parameter from stack.
        __ movq(rax, Operand(rbp, parameter_offset));
        // Store it in the context.
        int context_offset = Context::SlotOffset(var->index());
        __ movq(Operand(rsi, context_offset), rax);
        // Update the write barrier. This clobbers rax and rbx.
        __ RecordWriteContextSlot(rsi, context_offset, rax, rbx, kSaveFPRegs);
      }
    }
    Comment(";;; End allocate local context");
  }

  // Trace the call.
  if (FLAG_trace) {
    __ CallRuntime(Runtime::kTraceEnter, 0);
  }
  return !is_aborted();
}


bool LCodeGen::GenerateBody() {
  ASSERT(is_generating());
  bool emit_instructions = true;
  for (current_instruction_ = 0;
       !is_aborted() && current_instruction_ < instructions_->length();
       current_instruction_++) {
    LInstruction* instr = instructions_->at(current_instruction_);
    if (instr->IsLabel()) {
      LLabel* label = LLabel::cast(instr);
      emit_instructions = !label->HasReplacement();
    }

    if (emit_instructions) {
      Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
      instr->CompileToNative(this);
    }
  }
  EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
  return !is_aborted();
}


bool LCodeGen::GenerateJumpTable() {
  for (int i = 0; i < jump_table_.length(); i++) {
    __ bind(&jump_table_[i].label);
    __ Jump(jump_table_[i].address, RelocInfo::RUNTIME_ENTRY);
  }
  return !is_aborted();
}


bool LCodeGen::GenerateDeferredCode() {
  ASSERT(is_generating());
  if (deferred_.length() > 0) {
    for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
      LDeferredCode* code = deferred_[i];
      __ bind(code->entry());
      Comment(";;; Deferred code @%d: %s.",
              code->instruction_index(),
              code->instr()->Mnemonic());
      code->Generate();
      __ jmp(code->exit());
    }
  }

  // Deferred code is the last part of the instruction sequence. Mark
  // the generated code as done unless we bailed out.
  if (!is_aborted()) status_ = DONE;
  return !is_aborted();
}


bool LCodeGen::GenerateSafepointTable() {
  ASSERT(is_done());
  safepoints_.Emit(masm(), GetStackSlotCount());
  return !is_aborted();
}


Register LCodeGen::ToRegister(int index) const {
  return Register::FromAllocationIndex(index);
}


XMMRegister LCodeGen::ToDoubleRegister(int index) const {
  return XMMRegister::FromAllocationIndex(index);
}


Register LCodeGen::ToRegister(LOperand* op) const {
  ASSERT(op->IsRegister());
  return ToRegister(op->index());
}


XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
  ASSERT(op->IsDoubleRegister());
  return ToDoubleRegister(op->index());
}


bool LCodeGen::IsInteger32Constant(LConstantOperand* op) const {
  return op->IsConstantOperand() &&
      chunk_->LookupLiteralRepresentation(op).IsInteger32();
}


bool LCodeGen::IsTaggedConstant(LConstantOperand* op) const {
  return op->IsConstantOperand() &&
      chunk_->LookupLiteralRepresentation(op).IsTagged();
}


int LCodeGen::ToInteger32(LConstantOperand* op) const {
  HConstant* constant = chunk_->LookupConstant(op);
  ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32());
  ASSERT(constant->HasInteger32Value());
  return constant->Integer32Value();
}


double LCodeGen::ToDouble(LConstantOperand* op) const {
  HConstant* constant = chunk_->LookupConstant(op);
  ASSERT(constant->HasDoubleValue());
  return constant->DoubleValue();
}


Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
  HConstant* constant = chunk_->LookupConstant(op);
  ASSERT(chunk_->LookupLiteralRepresentation(op).IsTagged());
  return constant->handle();
}


Operand LCodeGen::ToOperand(LOperand* op) const {
  // Does not handle registers. In X64 assembler, plain registers are not
  // representable as an Operand.
  ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
  int index = op->index();
  if (index >= 0) {
    // Local or spill slot. Skip the frame pointer, function, and
    // context in the fixed part of the frame.
    return Operand(rbp, -(index + 3) * kPointerSize);
  } else {
    // Incoming parameter. Skip the return address.
    return Operand(rbp, -(index - 1) * kPointerSize);
  }
}


void LCodeGen::WriteTranslation(LEnvironment* environment,
                                Translation* translation) {
  if (environment == NULL) return;

  // The translation includes one command per value in the environment.
  int translation_size = environment->values()->length();
  // The output frame height does not include the parameters.
  int height = translation_size - environment->parameter_count();

  WriteTranslation(environment->outer(), translation);
  int closure_id = *info()->closure() != *environment->closure()
      ? DefineDeoptimizationLiteral(environment->closure())
      : Translation::kSelfLiteralId;

  switch (environment->frame_type()) {
    case JS_FUNCTION:
      translation->BeginJSFrame(environment->ast_id(), closure_id, height);
      break;
    case JS_CONSTRUCT:
      translation->BeginConstructStubFrame(closure_id, translation_size);
      break;
    case ARGUMENTS_ADAPTOR:
      translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
      break;
    default:
      UNREACHABLE();
  }
  for (int i = 0; i < translation_size; ++i) {
    LOperand* value = environment->values()->at(i);
    // spilled_registers_ and spilled_double_registers_ are either
    // both NULL or both set.
    if (environment->spilled_registers() != NULL && value != NULL) {
      if (value->IsRegister() &&
          environment->spilled_registers()[value->index()] != NULL) {
        translation->MarkDuplicate();
        AddToTranslation(translation,
                         environment->spilled_registers()[value->index()],
                         environment->HasTaggedValueAt(i));
      } else if (
          value->IsDoubleRegister() &&
          environment->spilled_double_registers()[value->index()] != NULL) {
        translation->MarkDuplicate();
        AddToTranslation(
            translation,
            environment->spilled_double_registers()[value->index()],
            false);
      }
    }

    AddToTranslation(translation, value, environment->HasTaggedValueAt(i));
  }
}


void LCodeGen::AddToTranslation(Translation* translation,
                                LOperand* op,
                                bool is_tagged) {
  if (op == NULL) {
    // TODO(twuerthinger): Introduce marker operands to indicate that this value
    // is not present and must be reconstructed from the deoptimizer. Currently
    // this is only used for the arguments object.
    translation->StoreArgumentsObject();
  } else if (op->IsStackSlot()) {
    if (is_tagged) {
      translation->StoreStackSlot(op->index());
    } else {
      translation->StoreInt32StackSlot(op->index());
    }
  } else if (op->IsDoubleStackSlot()) {
    translation->StoreDoubleStackSlot(op->index());
  } else if (op->IsArgument()) {
    ASSERT(is_tagged);
    int src_index = GetStackSlotCount() + op->index();
    translation->StoreStackSlot(src_index);
  } else if (op->IsRegister()) {
    Register reg = ToRegister(op);
    if (is_tagged) {
      translation->StoreRegister(reg);
    } else {
      translation->StoreInt32Register(reg);
    }
  } else if (op->IsDoubleRegister()) {
    XMMRegister reg = ToDoubleRegister(op);
    translation->StoreDoubleRegister(reg);
  } else if (op->IsConstantOperand()) {
    HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
    int src_index = DefineDeoptimizationLiteral(constant->handle());
    translation->StoreLiteral(src_index);
  } else {
    UNREACHABLE();
  }
}


void LCodeGen::CallCodeGeneric(Handle<Code> code,
                               RelocInfo::Mode mode,
                               LInstruction* instr,
                               SafepointMode safepoint_mode,
                               int argc) {
  EnsureSpaceForLazyDeopt(Deoptimizer::patch_size() - masm()->CallSize(code));
  ASSERT(instr != NULL);
  LPointerMap* pointers = instr->pointer_map();
  RecordPosition(pointers->position());
  __ call(code, mode);
  RecordSafepointWithLazyDeopt(instr, safepoint_mode, argc);

  // Signal that we don't inline smi code before these stubs in the
  // optimizing code generator.
  if (code->kind() == Code::BINARY_OP_IC ||
      code->kind() == Code::COMPARE_IC) {
    __ nop();
  }
}


void LCodeGen::CallCode(Handle<Code> code,
                        RelocInfo::Mode mode,
                        LInstruction* instr) {
  CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT, 0);
}


void LCodeGen::CallRuntime(const Runtime::Function* function,
                           int num_arguments,
                           LInstruction* instr) {
  ASSERT(instr != NULL);
  ASSERT(instr->HasPointerMap());
  LPointerMap* pointers = instr->pointer_map();
  RecordPosition(pointers->position());

  __ CallRuntime(function, num_arguments);
  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
}


void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
                                       int argc,
                                       LInstruction* instr) {
  __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
  __ CallRuntimeSaveDoubles(id);
  RecordSafepointWithRegisters(
      instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
}


void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
                                                    Safepoint::DeoptMode mode) {
  if (!environment->HasBeenRegistered()) {
    // Physical stack frame layout:
    // -x ............. -4  0 ..................................... y
    // [incoming arguments] [spill slots] [pushed outgoing arguments]

    // Layout of the environment:
    // 0 ..................................................... size-1
    // [parameters] [locals] [expression stack including arguments]

    // Layout of the translation:
    // 0 ........................................................ size - 1 + 4
    // [expression stack including arguments] [locals] [4 words] [parameters]
    // |>------------  translation_size ------------<|

    int frame_count = 0;
    int jsframe_count = 0;
    for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
      ++frame_count;
      if (e->frame_type() == JS_FUNCTION) {
        ++jsframe_count;
      }
    }
    Translation translation(&translations_, frame_count, jsframe_count,
                            environment->zone());
    WriteTranslation(environment, &translation);
    int deoptimization_index = deoptimizations_.length();
    int pc_offset = masm()->pc_offset();
    environment->Register(deoptimization_index,
                          translation.index(),
                          (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
    deoptimizations_.Add(environment, environment->zone());
  }
}


void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
  ASSERT(environment->HasBeenRegistered());
  int id = environment->deoptimization_index();
  Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
  if (entry == NULL) {
    Abort("bailout was not prepared");
    return;
  }

  if (cc == no_condition) {
    __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
  } else {
    // We often have several deopts to the same entry, reuse the last
    // jump entry if this is the case.
    if (jump_table_.is_empty() ||
        jump_table_.last().address != entry) {
      jump_table_.Add(JumpTableEntry(entry), zone());
    }
    __ j(cc, &jump_table_.last().label);
  }
}


void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
  int length = deoptimizations_.length();
  if (length == 0) return;
  Handle<DeoptimizationInputData> data =
      factory()->NewDeoptimizationInputData(length, TENURED);

  Handle<ByteArray> translations = translations_.CreateByteArray();
  data->SetTranslationByteArray(*translations);
  data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));

  Handle<FixedArray> literals =
      factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
  for (int i = 0; i < deoptimization_literals_.length(); i++) {
    literals->set(i, *deoptimization_literals_[i]);
  }
  data->SetLiteralArray(*literals);

  data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id()));
  data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));

  // Populate the deoptimization entries.
  for (int i = 0; i < length; i++) {
    LEnvironment* env = deoptimizations_[i];
    data->SetAstId(i, Smi::FromInt(env->ast_id()));
    data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
    data->SetArgumentsStackHeight(i,
                                  Smi::FromInt(env->arguments_stack_height()));
    data->SetPc(i, Smi::FromInt(env->pc_offset()));
  }
  code->set_deoptimization_data(*data);
}


int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
  int result = deoptimization_literals_.length();
  for (int i = 0; i < deoptimization_literals_.length(); ++i) {
    if (deoptimization_literals_[i].is_identical_to(literal)) return i;
  }
  deoptimization_literals_.Add(literal, zone());
  return result;
}


void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
  ASSERT(deoptimization_literals_.length() == 0);

  const ZoneList<Handle<JSFunction> >* inlined_closures =
      chunk()->inlined_closures();

  for (int i = 0, length = inlined_closures->length();
       i < length;
       i++) {
    DefineDeoptimizationLiteral(inlined_closures->at(i));
  }

  inlined_function_count_ = deoptimization_literals_.length();
}


void LCodeGen::RecordSafepointWithLazyDeopt(
    LInstruction* instr, SafepointMode safepoint_mode, int argc) {
  if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
    RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
  } else {
    ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS);
    RecordSafepointWithRegisters(
        instr->pointer_map(), argc, Safepoint::kLazyDeopt);
  }
}


void LCodeGen::RecordSafepoint(
    LPointerMap* pointers,
    Safepoint::Kind kind,
    int arguments,
    Safepoint::DeoptMode deopt_mode) {
  ASSERT(kind == expected_safepoint_kind_);

  const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();

  Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
      kind, arguments, deopt_mode);
  for (int i = 0; i < operands->length(); i++) {
    LOperand* pointer = operands->at(i);
    if (pointer->IsStackSlot()) {
      safepoint.DefinePointerSlot(pointer->index(), zone());
    } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
      safepoint.DefinePointerRegister(ToRegister(pointer), zone());
    }
  }
  if (kind & Safepoint::kWithRegisters) {
    // Register rsi always contains a pointer to the context.
    safepoint.DefinePointerRegister(rsi, zone());
  }
}


void LCodeGen::RecordSafepoint(LPointerMap* pointers,
                               Safepoint::DeoptMode deopt_mode) {
  RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
}


void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
  LPointerMap empty_pointers(RelocInfo::kNoPosition, zone());
  RecordSafepoint(&empty_pointers, deopt_mode);
}


void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
                                            int arguments,
                                            Safepoint::DeoptMode deopt_mode) {
  RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
}


void LCodeGen::RecordPosition(int position) {
  if (position == RelocInfo::kNoPosition) return;
  masm()->positions_recorder()->RecordPosition(position);
}


void LCodeGen::DoLabel(LLabel* label) {
  if (label->is_loop_header()) {
    Comment(";;; B%d - LOOP entry", label->block_id());
  } else {
    Comment(";;; B%d", label->block_id());
  }
  __ bind(label->label());
  current_block_ = label->block_id();
  DoGap(label);
}


void LCodeGen::DoParallelMove(LParallelMove* move) {
  resolver_.Resolve(move);
}


void LCodeGen::DoGap(LGap* gap) {
  for (int i = LGap::FIRST_INNER_POSITION;
       i <= LGap::LAST_INNER_POSITION;
       i++) {
    LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
    LParallelMove* move = gap->GetParallelMove(inner_pos);
    if (move != NULL) DoParallelMove(move);
  }
}


void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
  DoGap(instr);
}


void LCodeGen::DoParameter(LParameter* instr) {
  // Nothing to do.
}


void LCodeGen::DoCallStub(LCallStub* instr) {
  ASSERT(ToRegister(instr->result()).is(rax));
  switch (instr->hydrogen()->major_key()) {
    case CodeStub::RegExpConstructResult: {
      RegExpConstructResultStub stub;
      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
      break;
    }
    case CodeStub::RegExpExec: {
      RegExpExecStub stub;
      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
      break;
    }
    case CodeStub::SubString: {
      SubStringStub stub;
      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
      break;
    }
    case CodeStub::NumberToString: {
      NumberToStringStub stub;
      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
      break;
    }
    case CodeStub::StringAdd: {
      StringAddStub stub(NO_STRING_ADD_FLAGS);
      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
      break;
    }
    case CodeStub::StringCompare: {
      StringCompareStub stub;
      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
      break;
    }
    case CodeStub::TranscendentalCache: {
      TranscendentalCacheStub stub(instr->transcendental_type(),
                                   TranscendentalCacheStub::TAGGED);
      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
      break;
    }
    default:
      UNREACHABLE();
  }
}


void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
  // Nothing to do.
}


void LCodeGen::DoModI(LModI* instr) {
  if (instr->hydrogen()->HasPowerOf2Divisor()) {
    Register dividend = ToRegister(instr->InputAt(0));

    int32_t divisor =
        HConstant::cast(instr->hydrogen()->right())->Integer32Value();

    if (divisor < 0) divisor = -divisor;

    Label positive_dividend, done;
    __ testl(dividend, dividend);
    __ j(not_sign, &positive_dividend, Label::kNear);
    __ negl(dividend);
    __ andl(dividend, Immediate(divisor - 1));
    __ negl(dividend);
    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
      __ j(not_zero, &done, Label::kNear);
      DeoptimizeIf(no_condition, instr->environment());
    } else {
      __ jmp(&done, Label::kNear);
    }
    __ bind(&positive_dividend);
    __ andl(dividend, Immediate(divisor - 1));
    __ bind(&done);
  } else {
    Label done, remainder_eq_dividend, slow, do_subtraction, both_positive;
    Register left_reg = ToRegister(instr->InputAt(0));
    Register right_reg = ToRegister(instr->InputAt(1));
    Register result_reg = ToRegister(instr->result());

    ASSERT(left_reg.is(rax));
    ASSERT(result_reg.is(rdx));
    ASSERT(!right_reg.is(rax));
    ASSERT(!right_reg.is(rdx));

    // Check for x % 0.
    if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
      __ testl(right_reg, right_reg);
      DeoptimizeIf(zero, instr->environment());
    }

    __ testl(left_reg, left_reg);
    __ j(zero, &remainder_eq_dividend, Label::kNear);
    __ j(sign, &slow, Label::kNear);

    __ testl(right_reg, right_reg);
    __ j(not_sign, &both_positive, Label::kNear);
    // The sign of the divisor doesn't matter.
    __ neg(right_reg);

    __ bind(&both_positive);
    // If the dividend is smaller than the nonnegative
    // divisor, the dividend is the result.
    __ cmpl(left_reg, right_reg);
    __ j(less, &remainder_eq_dividend, Label::kNear);

    // Check if the divisor is a PowerOfTwo integer.
    Register scratch = ToRegister(instr->TempAt(0));
    __ movl(scratch, right_reg);
    __ subl(scratch, Immediate(1));
    __ testl(scratch, right_reg);
    __ j(not_zero, &do_subtraction, Label::kNear);
    __ andl(left_reg, scratch);
    __ jmp(&remainder_eq_dividend, Label::kNear);

    __ bind(&do_subtraction);
    const int kUnfolds = 3;
    // Try a few subtractions of the dividend.
    __ movl(scratch, left_reg);
    for (int i = 0; i < kUnfolds; i++) {
      // Reduce the dividend by the divisor.
      __ subl(left_reg, right_reg);
      // Check if the dividend is less than the divisor.
      __ cmpl(left_reg, right_reg);
      __ j(less, &remainder_eq_dividend, Label::kNear);
    }
    __ movl(left_reg, scratch);

    // Slow case, using idiv instruction.
    __ bind(&slow);
    // Sign extend eax to edx.
    // (We are using only the low 32 bits of the values.)
    __ cdq();

    // Check for (0 % -x) that will produce negative zero.
    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
      Label positive_left;
      Label done;
      __ testl(left_reg, left_reg);
      __ j(not_sign, &positive_left, Label::kNear);
      __ idivl(right_reg);

      // Test the remainder for 0, because then the result would be -0.
      __ testl(result_reg, result_reg);
      __ j(not_zero, &done, Label::kNear);

      DeoptimizeIf(no_condition, instr->environment());
      __ bind(&positive_left);
      __ idivl(right_reg);
      __ bind(&done);
    } else {
      __ idivl(right_reg);
    }
    __ jmp(&done, Label::kNear);

    __ bind(&remainder_eq_dividend);
    __ movl(result_reg, left_reg);

    __ bind(&done);
  }
}


void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
  ASSERT(instr->InputAt(1)->IsConstantOperand());

  const Register dividend = ToRegister(instr->InputAt(0));
  int32_t divisor = ToInteger32(LConstantOperand::cast(instr->InputAt(1)));
  const Register result = ToRegister(instr->result());

  switch (divisor) {
  case 0:
    DeoptimizeIf(no_condition, instr->environment());
    return;

  case 1:
    if (!result.is(dividend)) {
        __ movl(result, dividend);
    }
    return;

  case -1:
    if (!result.is(dividend)) {
      __ movl(result, dividend);
    }
    __ negl(result);
    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
      DeoptimizeIf(zero, instr->environment());
    }
    if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
      DeoptimizeIf(overflow, instr->environment());
    }
    return;
  }

  uint32_t divisor_abs = abs(divisor);
  if (IsPowerOf2(divisor_abs)) {
    int32_t power = WhichPowerOf2(divisor_abs);
    if (divisor < 0) {
      __ movsxlq(result, dividend);
      __ neg(result);
      if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
        DeoptimizeIf(zero, instr->environment());
      }
      __ sar(result, Immediate(power));
    } else {
      if (!result.is(dividend)) {
        __ movl(result, dividend);
      }
      __ sarl(result, Immediate(power));
    }
  } else {
    Register reg1 = ToRegister(instr->TempAt(0));
    Register reg2 = ToRegister(instr->result());

    // Find b which: 2^b < divisor_abs < 2^(b+1).
    unsigned b = 31 - CompilerIntrinsics::CountLeadingZeros(divisor_abs);
    unsigned shift = 32 + b;  // Precision +1bit (effectively).
    double multiplier_f =
        static_cast<double>(static_cast<uint64_t>(1) << shift) / divisor_abs;
    int64_t multiplier;
    if (multiplier_f - floor(multiplier_f) < 0.5) {
        multiplier = static_cast<int64_t>(floor(multiplier_f));
    } else {
        multiplier = static_cast<int64_t>(floor(multiplier_f)) + 1;
    }
    // The multiplier is a uint32.
    ASSERT(multiplier > 0 &&
           multiplier < (static_cast<int64_t>(1) << 32));
    // The multiply is int64, so sign-extend to r64.
    __ movsxlq(reg1, dividend);
    if (divisor < 0 &&
        instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
      __ neg(reg1);
      DeoptimizeIf(zero, instr->environment());
    }
    __ movq(reg2, multiplier, RelocInfo::NONE);
    // Result just fit in r64, because it's int32 * uint32.
    __ imul(reg2, reg1);

    __ addq(reg2, Immediate(1 << 30));
    __ sar(reg2, Immediate(shift));
  }
}


void LCodeGen::DoDivI(LDivI* instr) {
  LOperand* right = instr->InputAt(1);
  ASSERT(ToRegister(instr->result()).is(rax));
  ASSERT(ToRegister(instr->InputAt(0)).is(rax));
  ASSERT(!ToRegister(instr->InputAt(1)).is(rax));
  ASSERT(!ToRegister(instr->InputAt(1)).is(rdx));

  Register left_reg = rax;

  // Check for x / 0.
  Register right_reg = ToRegister(right);
  if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
    __ testl(right_reg, right_reg);
    DeoptimizeIf(zero, instr->environment());
  }

  // Check for (0 / -x) that will produce negative zero.
  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
    Label left_not_zero;
    __ testl(left_reg, left_reg);
    __ j(not_zero, &left_not_zero, Label::kNear);
    __ testl(right_reg, right_reg);
    DeoptimizeIf(sign, instr->environment());
    __ bind(&left_not_zero);
  }

  // Check for (-kMinInt / -1).
  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
    Label left_not_min_int;
    __ cmpl(left_reg, Immediate(kMinInt));
    __ j(not_zero, &left_not_min_int, Label::kNear);
    __ cmpl(right_reg, Immediate(-1));
    DeoptimizeIf(zero, instr->environment());
    __ bind(&left_not_min_int);
  }

  // Sign extend to rdx.
  __ cdq();
  __ idivl(right_reg);

  // Deoptimize if remainder is not 0.
  __ testl(rdx, rdx);
  DeoptimizeIf(not_zero, instr->environment());
}


void LCodeGen::DoMulI(LMulI* instr) {
  Register left = ToRegister(instr->InputAt(0));
  LOperand* right = instr->InputAt(1);

  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
    __ movl(kScratchRegister, left);
  }

  bool can_overflow =
      instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
  if (right->IsConstantOperand()) {
    int right_value = ToInteger32(LConstantOperand::cast(right));
    if (right_value == -1) {
      __ negl(left);
    } else if (right_value == 0) {
      __ xorl(left, left);
    } else if (right_value == 2) {
      __ addl(left, left);
    } else if (!can_overflow) {
      // If the multiplication is known to not overflow, we
      // can use operations that don't set the overflow flag
      // correctly.
      switch (right_value) {
        case 1:
          // Do nothing.
          break;
        case 3:
          __ leal(left, Operand(left, left, times_2, 0));
          break;
        case 4:
          __ shll(left, Immediate(2));
          break;
        case 5:
          __ leal(left, Operand(left, left, times_4, 0));
          break;
        case 8:
          __ shll(left, Immediate(3));
          break;
        case 9:
          __ leal(left, Operand(left, left, times_8, 0));
          break;
        case 16:
          __ shll(left, Immediate(4));
          break;
        default:
          __ imull(left, left, Immediate(right_value));
          break;
      }
    } else {
      __ imull(left, left, Immediate(right_value));
    }
  } else if (right->IsStackSlot()) {
    __ imull(left, ToOperand(right));
  } else {
    __ imull(left, ToRegister(right));
  }

  if (can_overflow) {
    DeoptimizeIf(overflow, instr->environment());
  }

  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
    // Bail out if the result is supposed to be negative zero.
    Label done;
    __ testl(left, left);
    __ j(not_zero, &done, Label::kNear);
    if (right->IsConstantOperand()) {
      if (ToInteger32(LConstantOperand::cast(right)) <= 0) {
        DeoptimizeIf(no_condition, instr->environment());
      }
    } else if (right->IsStackSlot()) {
      __ orl(kScratchRegister, ToOperand(right));
      DeoptimizeIf(sign, instr->environment());
    } else {
      // Test the non-zero operand for negative sign.
      __ orl(kScratchRegister, ToRegister(right));
      DeoptimizeIf(sign, instr->environment());
    }
    __ bind(&done);
  }
}


void LCodeGen::DoBitI(LBitI* instr) {
  LOperand* left = instr->InputAt(0);
  LOperand* right = instr->InputAt(1);
  ASSERT(left->Equals(instr->result()));
  ASSERT(left->IsRegister());

  if (right->IsConstantOperand()) {
    int right_operand = ToInteger32(LConstantOperand::cast(right));
    switch (instr->op()) {
      case Token::BIT_AND:
        __ andl(ToRegister(left), Immediate(right_operand));
        break;
      case Token::BIT_OR:
        __ orl(ToRegister(left), Immediate(right_operand));
        break;
      case Token::BIT_XOR:
        __ xorl(ToRegister(left), Immediate(right_operand));
        break;
      default:
        UNREACHABLE();
        break;
    }
  } else if (right->IsStackSlot()) {
    switch (instr->op()) {
      case Token::BIT_AND:
        __ andl(ToRegister(left), ToOperand(right));
        break;
      case Token::BIT_OR:
        __ orl(ToRegister(left), ToOperand(right));
        break;
      case Token::BIT_XOR:
        __ xorl(ToRegister(left), ToOperand(right));
        break;
      default:
        UNREACHABLE();
        break;
    }
  } else {
    ASSERT(right->IsRegister());
    switch (instr->op()) {
      case Token::BIT_AND:
        __ andl(ToRegister(left), ToRegister(right));
        break;
      case Token::BIT_OR:
        __ orl(ToRegister(left), ToRegister(right));
        break;
      case Token::BIT_XOR:
        __ xorl(ToRegister(left), ToRegister(right));
        break;
      default:
        UNREACHABLE();
        break;
    }
  }
}


void LCodeGen::DoShiftI(LShiftI* instr) {
  LOperand* left = instr->InputAt(0);
  LOperand* right = instr->InputAt(1);
  ASSERT(left->Equals(instr->result()));
  ASSERT(left->IsRegister());
  if (right->IsRegister()) {
    ASSERT(ToRegister(right).is(rcx));

    switch (instr->op()) {
      case Token::SAR:
        __ sarl_cl(ToRegister(left));
        break;
      case Token::SHR:
        __ shrl_cl(ToRegister(left));
        if (instr->can_deopt()) {
          __ testl(ToRegister(left), ToRegister(left));
          DeoptimizeIf(negative, instr->environment());
        }
        break;
      case Token::SHL:
        __ shll_cl(ToRegister(left));
        break;
      default:
        UNREACHABLE();
        break;
    }
  } else {
    int value = ToInteger32(LConstantOperand::cast(right));
    uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
    switch (instr->op()) {
      case Token::SAR:
        if (shift_count != 0) {
          __ sarl(ToRegister(left), Immediate(shift_count));
        }
        break;
      case Token::SHR:
        if (shift_count == 0 && instr->can_deopt()) {
          __ testl(ToRegister(left), ToRegister(left));
          DeoptimizeIf(negative, instr->environment());
        } else {
          __ shrl(ToRegister(left), Immediate(shift_count));
        }
        break;
      case Token::SHL:
        if (shift_count != 0) {
          __ shll(ToRegister(left), Immediate(shift_count));
        }
        break;
      default:
        UNREACHABLE();
        break;
    }
  }
}


void LCodeGen::DoSubI(LSubI* instr) {
  LOperand* left = instr->InputAt(0);
  LOperand* right = instr->InputAt(1);
  ASSERT(left->Equals(instr->result()));

  if (right->IsConstantOperand()) {
    __ subl(ToRegister(left),
            Immediate(ToInteger32(LConstantOperand::cast(right))));
  } else if (right->IsRegister()) {
    __ subl(ToRegister(left), ToRegister(right));
  } else {
    __ subl(ToRegister(left), ToOperand(right));
  }

  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
    DeoptimizeIf(overflow, instr->environment());
  }
}


void LCodeGen::DoConstantI(LConstantI* instr) {
  ASSERT(instr->result()->IsRegister());
  __ Set(ToRegister(instr->result()), instr->value());
}


void LCodeGen::DoConstantD(LConstantD* instr) {
  ASSERT(instr->result()->IsDoubleRegister());
  XMMRegister res = ToDoubleRegister(instr->result());
  double v = instr->value();
  uint64_t int_val = BitCast<uint64_t, double>(v);
  // Use xor to produce +0.0 in a fast and compact way, but avoid to
  // do so if the constant is -0.0.
  if (int_val == 0) {
    __ xorps(res, res);
  } else {
    Register tmp = ToRegister(instr->TempAt(0));
    __ Set(tmp, int_val);
    __ movq(res, tmp);
  }
}


void LCodeGen::DoConstantT(LConstantT* instr) {
  Handle<Object> value = instr->value();
  if (value->IsSmi()) {
    __ Move(ToRegister(instr->result()), value);
  } else {
    __ LoadHeapObject(ToRegister(instr->result()),
                      Handle<HeapObject>::cast(value));
  }
}


void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
  Register result = ToRegister(instr->result());
  Register array = ToRegister(instr->InputAt(0));
  __ movq(result, FieldOperand(array, JSArray::kLengthOffset));
}


void LCodeGen::DoFixedArrayBaseLength(LFixedArrayBaseLength* instr) {
  Register result = ToRegister(instr->result());
  Register array = ToRegister(instr->InputAt(0));
  __ movq(result, FieldOperand(array, FixedArrayBase::kLengthOffset));
}


void LCodeGen::DoElementsKind(LElementsKind* instr) {
  Register result = ToRegister(instr->result());
  Register input = ToRegister(instr->InputAt(0));

  // Load map into |result|.
  __ movq(result, FieldOperand(input, HeapObject::kMapOffset));
  // Load the map's "bit field 2" into |result|. We only need the first byte.
  __ movzxbq(result, FieldOperand(result, Map::kBitField2Offset));
  // Retrieve elements_kind from bit field 2.
  __ and_(result, Immediate(Map::kElementsKindMask));
  __ shr(result, Immediate(Map::kElementsKindShift));
}


void LCodeGen::DoValueOf(LValueOf* instr) {
  Register input = ToRegister(instr->InputAt(0));
  Register result = ToRegister(instr->result());
  ASSERT(input.is(result));
  Label done;
  // If the object is a smi return the object.
  __ JumpIfSmi(input, &done, Label::kNear);

  // If the object is not a value type, return the object.
  __ CmpObjectType(input, JS_VALUE_TYPE, kScratchRegister);
  __ j(not_equal, &done, Label::kNear);
  __ movq(result, FieldOperand(input, JSValue::kValueOffset));

  __ bind(&done);
}


void LCodeGen::DoDateField(LDateField* instr) {
  Register object = ToRegister(instr->InputAt(0));
  Register result = ToRegister(instr->result());
  Smi* index = instr->index();
  Label runtime, done;
  ASSERT(object.is(result));
  ASSERT(object.is(rax));

#ifdef DEBUG
  __ AbortIfSmi(object);
  __ CmpObjectType(object, JS_DATE_TYPE, kScratchRegister);
  __ Assert(equal, "Trying to get date field from non-date.");
#endif

  if (index->value() == 0) {
    __ movq(result, FieldOperand(object, JSDate::kValueOffset));
  } else {
    if (index->value() < JSDate::kFirstUncachedField) {
      ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
      __ movq(kScratchRegister, stamp);
      __ cmpq(kScratchRegister, FieldOperand(object,
                                             JSDate::kCacheStampOffset));
      __ j(not_equal, &runtime, Label::kNear);
      __ movq(result, FieldOperand(object, JSDate::kValueOffset +
                                           kPointerSize * index->value()));
      __ jmp(&done);
    }
    __ bind(&runtime);
    __ PrepareCallCFunction(2);
#ifdef _WIN64
  __ movq(rcx, object);
  __ movq(rdx, index, RelocInfo::NONE);
#else
  __ movq(rdi, object);
  __ movq(rsi, index, RelocInfo::NONE);
#endif
    __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
    __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
    __ bind(&done);
  }
}


void LCodeGen::DoBitNotI(LBitNotI* instr) {
  LOperand* input = instr->InputAt(0);
  ASSERT(input->Equals(instr->result()));
  __ not_(ToRegister(input));
}


void LCodeGen::DoThrow(LThrow* instr) {
  __ push(ToRegister(instr->InputAt(0)));
  CallRuntime(Runtime::kThrow, 1, instr);

  if (FLAG_debug_code) {
    Comment("Unreachable code.");
    __ int3();
  }
}


void LCodeGen::DoAddI(LAddI* instr) {
  LOperand* left = instr->InputAt(0);
  LOperand* right = instr->InputAt(1);
  ASSERT(left->Equals(instr->result()));

  if (right->IsConstantOperand()) {
    __ addl(ToRegister(left),
            Immediate(ToInteger32(LConstantOperand::cast(right))));
  } else if (right->IsRegister()) {
    __ addl(ToRegister(left), ToRegister(right));
  } else {
    __ addl(ToRegister(left), ToOperand(right));
  }

  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
    DeoptimizeIf(overflow, instr->environment());
  }
}


void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
  XMMRegister left = ToDoubleRegister(instr->InputAt(0));
  XMMRegister right = ToDoubleRegister(instr->InputAt(1));
  XMMRegister result = ToDoubleRegister(instr->result());
  // All operations except MOD are computed in-place.
  ASSERT(instr->op() == Token::MOD || left.is(result));
  switch (instr->op()) {
    case Token::ADD:
      __ addsd(left, right);
      break;
    case Token::SUB:
       __ subsd(left, right);
       break;
    case Token::MUL:
      __ mulsd(left, right);
      break;
    case Token::DIV:
      __ divsd(left, right);
      break;
    case Token::MOD:
      __ PrepareCallCFunction(2);
      __ movaps(xmm0, left);
      ASSERT(right.is(xmm1));
      __ CallCFunction(
          ExternalReference::double_fp_operation(Token::MOD, isolate()), 2);
      __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
      __ movaps(result, xmm0);
      break;
    default:
      UNREACHABLE();
      break;
  }
}


void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
  ASSERT(ToRegister(instr->InputAt(0)).is(rdx));
  ASSERT(ToRegister(instr->InputAt(1)).is(rax));
  ASSERT(ToRegister(instr->result()).is(rax));

  BinaryOpStub stub(instr->op(), NO_OVERWRITE);
  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
  __ nop();  // Signals no inlined code.
}


int LCodeGen::GetNextEmittedBlock(int block) {
  for (int i = block + 1; i < graph()->blocks()->length(); ++i) {
    LLabel* label = chunk_->GetLabel(i);
    if (!label->HasReplacement()) return i;
  }
  return -1;
}


void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) {
  int next_block = GetNextEmittedBlock(current_block_);
  right_block = chunk_->LookupDestination(right_block);
  left_block = chunk_->LookupDestination(left_block);

  if (right_block == left_block) {
    EmitGoto(left_block);
  } else if (left_block == next_block) {
    __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
  } else if (right_block == next_block) {
    __ j(cc, chunk_->GetAssemblyLabel(left_block));
  } else {
    __ j(cc, chunk_->GetAssemblyLabel(left_block));
    if (cc != always) {
      __ jmp(chunk_->GetAssemblyLabel(right_block));
    }
  }
}


void LCodeGen::DoBranch(LBranch* instr) {
  int true_block = chunk_->LookupDestination(instr->true_block_id());
  int false_block = chunk_->LookupDestination(instr->false_block_id());

  Representation r = instr->hydrogen()->value()->representation();
  if (r.IsInteger32()) {
    Register reg = ToRegister(instr->InputAt(0));
    __ testl(reg, reg);
    EmitBranch(true_block, false_block, not_zero);
  } else if (r.IsDouble()) {
    XMMRegister reg = ToDoubleRegister(instr->InputAt(0));
    __ xorps(xmm0, xmm0);
    __ ucomisd(reg, xmm0);
    EmitBranch(true_block, false_block, not_equal);
  } else {
    ASSERT(r.IsTagged());
    Register reg = ToRegister(instr->InputAt(0));
    HType type = instr->hydrogen()->value()->type();
    if (type.IsBoolean()) {
      __ CompareRoot(reg, Heap::kTrueValueRootIndex);
      EmitBranch(true_block, false_block, equal);
    } else if (type.IsSmi()) {
      __ SmiCompare(reg, Smi::FromInt(0));
      EmitBranch(true_block, false_block, not_equal);
    } else {
      Label* true_label = chunk_->GetAssemblyLabel(true_block);
      Label* false_label = chunk_->GetAssemblyLabel(false_block);

      ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
      // Avoid deopts in the case where we've never executed this path before.
      if (expected.IsEmpty()) expected = ToBooleanStub::all_types();

      if (expected.Contains(ToBooleanStub::UNDEFINED)) {
        // undefined -> false.
        __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
        __ j(equal, false_label);
      }
      if (expected.Contains(ToBooleanStub::BOOLEAN)) {
        // true -> true.
        __ CompareRoot(reg, Heap::kTrueValueRootIndex);
        __ j(equal, true_label);
        // false -> false.
        __ CompareRoot(reg, Heap::kFalseValueRootIndex);
        __ j(equal, false_label);
      }
      if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
        // 'null' -> false.
        __ CompareRoot(reg, Heap::kNullValueRootIndex);
        __ j(equal, false_label);
      }

      if (expected.Contains(ToBooleanStub::SMI)) {
        // Smis: 0 -> false, all other -> true.
        __ Cmp(reg, Smi::FromInt(0));
        __ j(equal, false_label);
        __ JumpIfSmi(reg, true_label);
      } else if (expected.NeedsMap()) {
        // If we need a map later and have a Smi -> deopt.
        __ testb(reg, Immediate(kSmiTagMask));
        DeoptimizeIf(zero, instr->environment());
      }

      const Register map = kScratchRegister;
      if (expected.NeedsMap()) {
        __ movq(map, FieldOperand(reg, HeapObject::kMapOffset));

        if (expected.CanBeUndetectable()) {
          // Undetectable -> false.
          __ testb(FieldOperand(map, Map::kBitFieldOffset),
                   Immediate(1 << Map::kIsUndetectable));
          __ j(not_zero, false_label);
        }
      }

      if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
        // spec object -> true.
        __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
        __ j(above_equal, true_label);
      }

      if (expected.Contains(ToBooleanStub::STRING)) {
        // String value -> false iff empty.
        Label not_string;
        __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
        __ j(above_equal, &not_string, Label::kNear);
        __ cmpq(FieldOperand(reg, String::kLengthOffset), Immediate(0));
        __ j(not_zero, true_label);
        __ jmp(false_label);
        __ bind(&not_string);
      }

      if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
        // heap number -> false iff +0, -0, or NaN.
        Label not_heap_number;
        __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
        __ j(not_equal, &not_heap_number, Label::kNear);
        __ xorps(xmm0, xmm0);
        __ ucomisd(xmm0, FieldOperand(reg, HeapNumber::kValueOffset));
        __ j(zero, false_label);
        __ jmp(true_label);
        __ bind(&not_heap_number);
      }

      // We've seen something for the first time -> deopt.
      DeoptimizeIf(no_condition, instr->environment());
    }
  }
}


void LCodeGen::EmitGoto(int block) {
  block = chunk_->LookupDestination(block);
  int next_block = GetNextEmittedBlock(current_block_);
  if (block != next_block) {
    __ jmp(chunk_->GetAssemblyLabel(block));
  }
}


void LCodeGen::DoGoto(LGoto* instr) {
  EmitGoto(instr->block_id());
}


inline Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
  Condition cond = no_condition;
  switch (op) {
    case Token::EQ:
    case Token::EQ_STRICT:
      cond = equal;
      break;
    case Token::LT:
      cond = is_unsigned ? below : less;
      break;
    case Token::GT:
      cond = is_unsigned ? above : greater;
      break;
    case Token::LTE:
      cond = is_unsigned ? below_equal : less_equal;
      break;
    case Token::GTE:
      cond = is_unsigned ? above_equal : greater_equal;
      break;
    case Token::IN:
    case Token::INSTANCEOF:
    default:
      UNREACHABLE();
  }
  return cond;
}


void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
  LOperand* left = instr->InputAt(0);
  LOperand* right = instr->InputAt(1);
  int false_block = chunk_->LookupDestination(instr->false_block_id());
  int true_block = chunk_->LookupDestination(instr->true_block_id());
  Condition cc = TokenToCondition(instr->op(), instr->is_double());

  if (left->IsConstantOperand() && right->IsConstantOperand()) {
    // We can statically evaluate the comparison.
    double left_val = ToDouble(LConstantOperand::cast(left));
    double right_val = ToDouble(LConstantOperand::cast(right));
    int next_block =
      EvalComparison(instr->op(), left_val, right_val) ? true_block
                                                       : false_block;
    EmitGoto(next_block);
  } else {
    if (instr->is_double()) {
      // Don't base result on EFLAGS when a NaN is involved. Instead
      // jump to the false block.
      __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
      __ j(parity_even, chunk_->GetAssemblyLabel(false_block));
    } else {
      int32_t value;
      if (right->IsConstantOperand()) {
        value = ToInteger32(LConstantOperand::cast(right));
        __ cmpl(ToRegister(left), Immediate(value));
      } else if (left->IsConstantOperand()) {
        value = ToInteger32(LConstantOperand::cast(left));
        if (right->IsRegister()) {
          __ cmpl(ToRegister(right), Immediate(value));
        } else {
          __ cmpl(ToOperand(right), Immediate(value));
        }
        // We transposed the operands. Reverse the condition.
        cc = ReverseCondition(cc);
      } else {
        if (right->IsRegister()) {
          __ cmpl(ToRegister(left), ToRegister(right));
        } else {
          __ cmpl(ToRegister(left), ToOperand(right));
        }
      }
    }
    EmitBranch(true_block, false_block, cc);
  }
}


void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
  Register left = ToRegister(instr->InputAt(0));
  Register right = ToRegister(instr->InputAt(1));
  int false_block = chunk_->LookupDestination(instr->false_block_id());
  int true_block = chunk_->LookupDestination(instr->true_block_id());

  __ cmpq(left, right);
  EmitBranch(true_block, false_block, equal);
}


void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
  Register left = ToRegister(instr->InputAt(0));
  int true_block = chunk_->LookupDestination(instr->true_block_id());
  int false_block = chunk_->LookupDestination(instr->false_block_id());

  __ cmpq(left, Immediate(instr->hydrogen()->right()));
  EmitBranch(true_block, false_block, equal);
}


void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
  Register reg = ToRegister(instr->InputAt(0));
  int false_block = chunk_->LookupDestination(instr->false_block_id());

  // If the expression is known to be untagged or a smi, then it's definitely
  // not null, and it can't be a an undetectable object.
  if (instr->hydrogen()->representation().IsSpecialization() ||
      instr->hydrogen()->type().IsSmi()) {
    EmitGoto(false_block);
    return;
  }

  int true_block = chunk_->LookupDestination(instr->true_block_id());
  Heap::RootListIndex nil_value = instr->nil() == kNullValue ?
      Heap::kNullValueRootIndex :
      Heap::kUndefinedValueRootIndex;
  __ CompareRoot(reg, nil_value);
  if (instr->kind() == kStrictEquality) {
    EmitBranch(true_block, false_block, equal);
  } else {
    Heap::RootListIndex other_nil_value = instr->nil() == kNullValue ?
        Heap::kUndefinedValueRootIndex :
        Heap::kNullValueRootIndex;
    Label* true_label = chunk_->GetAssemblyLabel(true_block);
    Label* false_label = chunk_->GetAssemblyLabel(false_block);
    __ j(equal, true_label);
    __ CompareRoot(reg, other_nil_value);
    __ j(equal, true_label);
    __ JumpIfSmi(reg, false_label);
    // Check for undetectable objects by looking in the bit field in
    // the map. The object has already been smi checked.
    Register scratch = ToRegister(instr->TempAt(0));
    __ movq(scratch, FieldOperand(reg, HeapObject::kMapOffset));
    __ testb(FieldOperand(scratch, Map::kBitFieldOffset),
             Immediate(1 << Map::kIsUndetectable));
    EmitBranch(true_block, false_block, not_zero);
  }
}


Condition LCodeGen::EmitIsObject(Register input,
                                 Label* is_not_object,
                                 Label* is_object) {
  ASSERT(!input.is(kScratchRegister));

  __ JumpIfSmi(input, is_not_object);

  __ CompareRoot(input, Heap::kNullValueRootIndex);
  __ j(equal, is_object);

  __ movq(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));
  // Undetectable objects behave like undefined.
  __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
           Immediate(1 << Map::kIsUndetectable));
  __ j(not_zero, is_not_object);

  __ movzxbl(kScratchRegister,
             FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
  __ cmpb(kScratchRegister, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
  __ j(below, is_not_object);
  __ cmpb(kScratchRegister, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
  return below_equal;
}


void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
  Register reg = ToRegister(instr->InputAt(0));

  int true_block = chunk_->LookupDestination(instr->true_block_id());
  int false_block = chunk_->LookupDestination(instr->false_block_id());
  Label* true_label = chunk_->GetAssemblyLabel(true_block);
  Label* false_label = chunk_->GetAssemblyLabel(false_block);

  Condition true_cond = EmitIsObject(reg, false_label, true_label);

  EmitBranch(true_block, false_block, true_cond);
}


Condition LCodeGen::EmitIsString(Register input,
                                 Register temp1,
                                 Label* is_not_string) {
  __ JumpIfSmi(input, is_not_string);
  Condition cond =  masm_->IsObjectStringType(input, temp1, temp1);

  return cond;
}


void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
  Register reg = ToRegister(instr->InputAt(0));
  Register temp = ToRegister(instr->TempAt(0));

  int true_block = chunk_->LookupDestination(instr->true_block_id());
  int false_block = chunk_->LookupDestination(instr->false_block_id());
  Label* false_label = chunk_->GetAssemblyLabel(false_block);

  Condition true_cond = EmitIsString(reg, temp, false_label);

  EmitBranch(true_block, false_block, true_cond);
}


void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
  int true_block = chunk_->LookupDestination(instr->true_block_id());
  int false_block = chunk_->LookupDestination(instr->false_block_id());

  Condition is_smi;
  if (instr->InputAt(0)->IsRegister()) {
    Register input = ToRegister(instr->InputAt(0));
    is_smi = masm()->CheckSmi(input);
  } else {
    Operand input = ToOperand(instr->InputAt(0));
    is_smi = masm()->CheckSmi(input);
  }
  EmitBranch(true_block, false_block, is_smi);
}


void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
  Register input = ToRegister(instr->InputAt(0));
  Register temp = ToRegister(instr->TempAt(0));

  int true_block = chunk_->LookupDestination(instr->true_block_id());
  int false_block = chunk_->LookupDestination(instr->false_block_id());

  __ JumpIfSmi(input, chunk_->GetAssemblyLabel(false_block));
  __ movq(temp, FieldOperand(input, HeapObject::kMapOffset));
  __ testb(FieldOperand(temp, Map::kBitFieldOffset),
           Immediate(1 << Map::kIsUndetectable));
  EmitBranch(true_block, false_block, not_zero);
}


void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
  Token::Value op = instr->op();
  int true_block = chunk_->LookupDestination(instr->true_block_id());
  int false_block = chunk_->LookupDestination(instr->false_block_id());

  Handle<Code> ic = CompareIC::GetUninitialized(op);
  CallCode(ic, RelocInfo::CODE_TARGET, instr);

  Condition condition = TokenToCondition(op, false);
  __ testq(rax, rax);

  EmitBranch(true_block, false_block, condition);
}


static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
  InstanceType from = instr->from();
  InstanceType to = instr->to();
  if (from == FIRST_TYPE) return to;
  ASSERT(from == to || to == LAST_TYPE);
  return from;
}


static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
  InstanceType from = instr->from();
  InstanceType to = instr->to();
  if (from == to) return equal;
  if (to == LAST_TYPE) return above_equal;
  if (from == FIRST_TYPE) return below_equal;
  UNREACHABLE();
  return equal;
}


void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
  Register input = ToRegister(instr->InputAt(0));

  int true_block = chunk_->LookupDestination(instr->true_block_id());
  int false_block = chunk_->LookupDestination(instr->false_block_id());

  Label* false_label = chunk_->GetAssemblyLabel(false_block);

  __ JumpIfSmi(input, false_label);

  __ CmpObjectType(input, TestType(instr->hydrogen()), kScratchRegister);
  EmitBranch(true_block, false_block, BranchCondition(instr->hydrogen()));
}


void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
  Register input = ToRegister(instr->InputAt(0));
  Register result = ToRegister(instr->result());

  if (FLAG_debug_code) {
    __ AbortIfNotString(input);
  }

  __ movl(result, FieldOperand(input, String::kHashFieldOffset));
  ASSERT(String::kHashShift >= kSmiTagSize);
  __ IndexFromHash(result, result);
}


void LCodeGen::DoHasCachedArrayIndexAndBranch(
    LHasCachedArrayIndexAndBranch* instr) {
  Register input = ToRegister(instr->InputAt(0));

  int true_block = chunk_->LookupDestination(instr->true_block_id());
  int false_block = chunk_->LookupDestination(instr->false_block_id());

  __ testl(FieldOperand(input, String::kHashFieldOffset),
           Immediate(String::kContainsCachedArrayIndexMask));
  EmitBranch(true_block, false_block, equal);
}


// Branches to a label or falls through with the answer in the z flag.
// Trashes the temp register.
void LCodeGen::EmitClassOfTest(Label* is_true,
                               Label* is_false,
                               Handle<String> class_name,
                               Register input,
                               Register temp,
                               Register temp2) {
  ASSERT(!input.is(temp));
  ASSERT(!input.is(temp2));
  ASSERT(!temp.is(temp2));

  __ JumpIfSmi(input, is_false);

  if (class_name->IsEqualTo(CStrVector("Function"))) {
    // Assuming the following assertions, we can use the same compares to test
    // for both being a function type and being in the object type range.
    STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
    STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
                  FIRST_SPEC_OBJECT_TYPE + 1);
    STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
                  LAST_SPEC_OBJECT_TYPE - 1);
    STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
    __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
    __ j(below, is_false);
    __ j(equal, is_true);
    __ CmpInstanceType(temp, LAST_SPEC_OBJECT_TYPE);
    __ j(equal, is_true);
  } else {
    // Faster code path to avoid two compares: subtract lower bound from the
    // actual type and do a signed compare with the width of the type range.
    __ movq(temp, FieldOperand(input, HeapObject::kMapOffset));
    __ movzxbl(temp2, FieldOperand(temp, Map::kInstanceTypeOffset));
    __ subq(temp2, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
    __ cmpq(temp2, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
                             FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
    __ j(above, is_false);
  }

  // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
  // Check if the constructor in the map is a function.
  __ movq(temp, FieldOperand(temp, Map::kConstructorOffset));

  // Objects with a non-function constructor have class 'Object'.
  __ CmpObjectType(temp, JS_FUNCTION_TYPE, kScratchRegister);
  if (class_name->IsEqualTo(CStrVector("Object"))) {
    __ j(not_equal, is_true);
  } else {
    __ j(not_equal, is_false);
  }

  // temp now contains the constructor function. Grab the
  // instance class name from there.
  __ movq(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
  __ movq(temp, FieldOperand(temp,
                             SharedFunctionInfo::kInstanceClassNameOffset));
  // The class name we are testing against is a symbol because it's a literal.
  // The name in the constructor is a symbol because of the way the context is
  // booted.  This routine isn't expected to work for random API-created
  // classes and it doesn't have to because you can't access it with natives
  // syntax.  Since both sides are symbols it is sufficient to use an identity
  // comparison.
  ASSERT(class_name->IsSymbol());
  __ Cmp(temp, class_name);
  // End with the answer in the z flag.
}


void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
  Register input = ToRegister(instr->InputAt(0));
  Register temp = ToRegister(instr->TempAt(0));
  Register temp2 = ToRegister(instr->TempAt(1));
  Handle<String> class_name = instr->hydrogen()->class_name();

  int true_block = chunk_->LookupDestination(instr->true_block_id());
  int false_block = chunk_->LookupDestination(instr->false_block_id());

  Label* true_label = chunk_->GetAssemblyLabel(true_block);
  Label* false_label = chunk_->GetAssemblyLabel(false_block);

  EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2);

  EmitBranch(true_block, false_block, equal);
}


void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
  Register reg = ToRegister(instr->InputAt(0));
  int true_block = instr->true_block_id();
  int false_block = instr->false_block_id();

  __ Cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
  EmitBranch(true_block, false_block, equal);
}


void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
  InstanceofStub stub(InstanceofStub::kNoFlags);
  __ push(ToRegister(instr->InputAt(0)));
  __ push(ToRegister(instr->InputAt(1)));
  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
  Label true_value, done;
  __ testq(rax, rax);
  __ j(zero, &true_value, Label::kNear);
  __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
  __ jmp(&done, Label::kNear);
  __ bind(&true_value);
  __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
  __ bind(&done);
}


void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
  class DeferredInstanceOfKnownGlobal: public LDeferredCode {
   public:
    DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
                                  LInstanceOfKnownGlobal* instr)
        : LDeferredCode(codegen), instr_(instr) { }
    virtual void Generate() {
      codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
    }
    virtual LInstruction* instr() { return instr_; }
    Label* map_check() { return &map_check_; }
   private:
    LInstanceOfKnownGlobal* instr_;
    Label map_check_;
  };


  DeferredInstanceOfKnownGlobal* deferred;
  deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);

  Label done, false_result;
  Register object = ToRegister(instr->InputAt(0));

  // A Smi is not an instance of anything.
  __ JumpIfSmi(object, &false_result);

  // This is the inlined call site instanceof cache. The two occurences of the
  // hole value will be patched to the last map/result pair generated by the
  // instanceof stub.
  Label cache_miss;
  // Use a temp register to avoid memory operands with variable lengths.
  Register map = ToRegister(instr->TempAt(0));
  __ movq(map, FieldOperand(object, HeapObject::kMapOffset));
  __ bind(deferred->map_check());  // Label for calculating code patching.
  Handle<JSGlobalPropertyCell> cache_cell =
      factory()->NewJSGlobalPropertyCell(factory()->the_hole_value());
  __ movq(kScratchRegister, cache_cell, RelocInfo::GLOBAL_PROPERTY_CELL);
  __ cmpq(map, Operand(kScratchRegister, 0));
  __ j(not_equal, &cache_miss, Label::kNear);
  // Patched to load either true or false.
  __ LoadRoot(ToRegister(instr->result()), Heap::kTheHoleValueRootIndex);
#ifdef DEBUG
  // Check that the code size between patch label and patch sites is invariant.
  Label end_of_patched_code;
  __ bind(&end_of_patched_code);
  ASSERT(true);
#endif
  __ jmp(&done);

  // The inlined call site cache did not match. Check for null and string
  // before calling the deferred code.
  __ bind(&cache_miss);  // Null is not an instance of anything.
  __ CompareRoot(object, Heap::kNullValueRootIndex);
  __ j(equal, &false_result, Label::kNear);

  // String values are not instances of anything.
  __ JumpIfNotString(object, kScratchRegister, deferred->entry());

  __ bind(&false_result);
  __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);

  __ bind(deferred->exit());
  __ bind(&done);
}


void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
                                               Label* map_check) {
  {
    PushSafepointRegistersScope scope(this);
    InstanceofStub::Flags flags = static_cast<InstanceofStub::Flags>(
        InstanceofStub::kNoFlags | InstanceofStub::kCallSiteInlineCheck);
    InstanceofStub stub(flags);

    __ push(ToRegister(instr->InputAt(0)));
    __ PushHeapObject(instr->function());

    static const int kAdditionalDelta = 10;
    int delta =
        masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
    ASSERT(delta >= 0);
    __ push_imm32(delta);

    // We are pushing three values on the stack but recording a
    // safepoint with two arguments because stub is going to
    // remove the third argument from the stack before jumping
    // to instanceof builtin on the slow path.
    CallCodeGeneric(stub.GetCode(),
                    RelocInfo::CODE_TARGET,
                    instr,
                    RECORD_SAFEPOINT_WITH_REGISTERS,
                    2);
    ASSERT(delta == masm_->SizeOfCodeGeneratedSince(map_check));
    LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
    safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
    // Move result to a register that survives the end of the
    // PushSafepointRegisterScope.
    __ movq(kScratchRegister, rax);
  }
  __ testq(kScratchRegister, kScratchRegister);
  Label load_false;
  Label done;
  __ j(not_zero, &load_false);
  __ LoadRoot(rax, Heap::kTrueValueRootIndex);
  __ jmp(&done);
  __ bind(&load_false);
  __ LoadRoot(rax, Heap::kFalseValueRootIndex);
  __ bind(&done);
}


void LCodeGen::DoCmpT(LCmpT* instr) {
  Token::Value op = instr->op();

  Handle<Code> ic = CompareIC::GetUninitialized(op);
  CallCode(ic, RelocInfo::CODE_TARGET, instr);

  Condition condition = TokenToCondition(op, false);
  Label true_value, done;
  __ testq(rax, rax);
  __ j(condition, &true_value, Label::kNear);
  __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
  __ jmp(&done, Label::kNear);
  __ bind(&true_value);
  __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
  __ bind(&done);
}


void LCodeGen::DoReturn(LReturn* instr) {
  if (FLAG_trace) {
    // Preserve the return value on the stack and rely on the runtime
    // call to return the value in the same register.
    __ push(rax);
    __ CallRuntime(Runtime::kTraceExit, 1);
  }
  __ movq(rsp, rbp);
  __ pop(rbp);
  __ Ret((GetParameterCount() + 1) * kPointerSize, rcx);
}


void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
  Register result = ToRegister(instr->result());
  __ LoadGlobalCell(result, instr->hydrogen()->cell());
  if (instr->hydrogen()->RequiresHoleCheck()) {
    __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
    DeoptimizeIf(equal, instr->environment());
  }
}


void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
  ASSERT(ToRegister(instr->global_object()).is(rax));
  ASSERT(ToRegister(instr->result()).is(rax));

  __ Move(rcx, instr->name());
  RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET :
                                               RelocInfo::CODE_TARGET_CONTEXT;
  Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
  CallCode(ic, mode, instr);
}


void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
  Register value = ToRegister(instr->value());
  Handle<JSGlobalPropertyCell> cell_handle = instr->hydrogen()->cell();

  // If the cell we are storing to contains the hole it could have
  // been deleted from the property dictionary. In that case, we need
  // to update the property details in the property dictionary to mark
  // it as no longer deleted. We deoptimize in that case.
  if (instr->hydrogen()->RequiresHoleCheck()) {
    // We have a temp because CompareRoot might clobber kScratchRegister.
    Register cell = ToRegister(instr->TempAt(0));
    ASSERT(!value.is(cell));
    __ movq(cell, cell_handle, RelocInfo::GLOBAL_PROPERTY_CELL);
    __ CompareRoot(Operand(cell, 0), Heap::kTheHoleValueRootIndex);
    DeoptimizeIf(equal, instr->environment());
    // Store the value.
    __ movq(Operand(cell, 0), value);
  } else {
    // Store the value.
    __ movq(kScratchRegister, cell_handle, RelocInfo::GLOBAL_PROPERTY_CELL);
    __ movq(Operand(kScratchRegister, 0), value);
  }
  // Cells are always rescanned, so no write barrier here.
}


void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
  ASSERT(ToRegister(instr->global_object()).is(rdx));
  ASSERT(ToRegister(instr->value()).is(rax));

  __ Move(rcx, instr->name());
  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
      ? isolate()->builtins()->StoreIC_Initialize_Strict()
      : isolate()->builtins()->StoreIC_Initialize();
  CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
}


void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
  Register context = ToRegister(instr->context());
  Register result = ToRegister(instr->result());
  __ movq(result, ContextOperand(context, instr->slot_index()));
  if (instr->hydrogen()->RequiresHoleCheck()) {
    __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
    if (instr->hydrogen()->DeoptimizesOnHole()) {
      DeoptimizeIf(equal, instr->environment());
    } else {
      Label is_not_hole;
      __ j(not_equal, &is_not_hole, Label::kNear);
      __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
      __ bind(&is_not_hole);
    }
  }
}


void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
  Register context = ToRegister(instr->context());
  Register value = ToRegister(instr->value());

  Operand target = ContextOperand(context, instr->slot_index());

  Label skip_assignment;
  if (instr->hydrogen()->RequiresHoleCheck()) {
    __ CompareRoot(target, Heap::kTheHoleValueRootIndex);
    if (instr->hydrogen()->DeoptimizesOnHole()) {
      DeoptimizeIf(equal, instr->environment());
    } else {
      __ j(not_equal, &skip_assignment);
    }
  }
  __ movq(target, value);

  if (instr->hydrogen()->NeedsWriteBarrier()) {
    HType type = instr->hydrogen()->value()->type();
    SmiCheck check_needed =
        type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
    int offset = Context::SlotOffset(instr->slot_index());
    Register scratch = ToRegister(instr->TempAt(0));
    __ RecordWriteContextSlot(context,
                              offset,
                              value,
                              scratch,
                              kSaveFPRegs,
                              EMIT_REMEMBERED_SET,
                              check_needed);
  }

  __ bind(&skip_assignment);
}


void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
  Register object = ToRegister(instr->InputAt(0));
  Register result = ToRegister(instr->result());
  if (instr->hydrogen()->is_in_object()) {
    __ movq(result, FieldOperand(object, instr->hydrogen()->offset()));
  } else {
    __ movq(result, FieldOperand(object, JSObject::kPropertiesOffset));
    __ movq(result, FieldOperand(result, instr->hydrogen()->offset()));
  }
}


void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
                                               Register object,
                                               Handle<Map> type,
                                               Handle<String> name,
                                               LEnvironment* env) {
  LookupResult lookup(isolate());
  type->LookupDescriptor(NULL, *name, &lookup);
  ASSERT(lookup.IsFound() || lookup.IsCacheable());
  if (lookup.IsField()) {
    int index = lookup.GetLocalFieldIndexFromMap(*type);
    int offset = index * kPointerSize;
    if (index < 0) {
      // Negative property indices are in-object properties, indexed
      // from the end of the fixed part of the object.
      __ movq(result, FieldOperand(object, offset + type->instance_size()));
    } else {
      // Non-negative property indices are in the properties array.
      __ movq(result, FieldOperand(object, JSObject::kPropertiesOffset));
      __ movq(result, FieldOperand(result, offset + FixedArray::kHeaderSize));
    }
  } else if (lookup.IsConstantFunction()) {
    Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
    __ LoadHeapObject(result, function);
  } else {
    // Negative lookup.
    // Check prototypes.
    Handle<HeapObject> current(HeapObject::cast((*type)->prototype()));
    Heap* heap = type->GetHeap();
    while (*current != heap->null_value()) {
      __ LoadHeapObject(result, current);
      __ Cmp(FieldOperand(result, HeapObject::kMapOffset),
                          Handle<Map>(current->map()));
      DeoptimizeIf(not_equal, env);
      current =
          Handle<HeapObject>(HeapObject::cast(current->map()->prototype()));
    }
    __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
  }
}


// Check for cases where EmitLoadFieldOrConstantFunction needs to walk the
// prototype chain, which causes unbounded code generation.
static bool CompactEmit(SmallMapList* list,
                        Handle<String> name,
                        int i,
                        Isolate* isolate) {
  Handle<Map> map = list->at(i);
  // If the map has ElementsKind transitions, we will generate map checks
  // for each kind in __ CompareMap(..., ALLOW_ELEMENTS_TRANSITION_MAPS).
  if (map->HasElementsTransition()) return false;
  LookupResult lookup(isolate);
  map->LookupDescriptor(NULL, *name, &lookup);
  return lookup.IsField() || lookup.IsConstantFunction();
}


void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
  Register object = ToRegister(instr->object());
  Register result = ToRegister(instr->result());

  int map_count = instr->hydrogen()->types()->length();
  bool need_generic = instr->hydrogen()->need_generic();

  if (map_count == 0 && !need_generic) {
    DeoptimizeIf(no_condition, instr->environment());
    return;
  }
  Handle<String> name = instr->hydrogen()->name();
  Label done;
  bool all_are_compact = true;
  for (int i = 0; i < map_count; ++i) {
    if (!CompactEmit(instr->hydrogen()->types(), name, i, isolate())) {
      all_are_compact = false;
      break;
    }
  }
  for (int i = 0; i < map_count; ++i) {
    bool last = (i == map_count - 1);
    Handle<Map> map = instr->hydrogen()->types()->at(i);
    Label check_passed;
    __ CompareMap(object, map, &check_passed, ALLOW_ELEMENT_TRANSITION_MAPS);
    if (last && !need_generic) {
      DeoptimizeIf(not_equal, instr->environment());
      __ bind(&check_passed);
      EmitLoadFieldOrConstantFunction(
          result, object, map, name, instr->environment());
    } else {
      Label next;
      bool compact = all_are_compact ? true :
          CompactEmit(instr->hydrogen()->types(), name, i, isolate());
      __ j(not_equal, &next, compact ? Label::kNear : Label::kFar);
      __ bind(&check_passed);
      EmitLoadFieldOrConstantFunction(
          result, object, map, name, instr->environment());
      __ jmp(&done, all_are_compact ? Label::kNear : Label::kFar);
      __ bind(&next);
    }
  }
  if (need_generic) {
    __ Move(rcx, name);
    Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
    CallCode(ic, RelocInfo::CODE_TARGET, instr);
  }
  __ bind(&done);
}


void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
  ASSERT(ToRegister(instr->object()).is(rax));
  ASSERT(ToRegister(instr->result()).is(rax));

  __ Move(rcx, instr->name());
  Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
  CallCode(ic, RelocInfo::CODE_TARGET, instr);
}


void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
  Register function = ToRegister(instr->function());
  Register result = ToRegister(instr->result());

  // Check that the function really is a function.
  __ CmpObjectType(function, JS_FUNCTION_TYPE, result);
  DeoptimizeIf(not_equal, instr->environment());

  // Check whether the function has an instance prototype.
  Label non_instance;
  __ testb(FieldOperand(result, Map::kBitFieldOffset),
           Immediate(1 << Map::kHasNonInstancePrototype));
  __ j(not_zero, &non_instance, Label::kNear);

  // Get the prototype or initial map from the function.
  __ movq(result,
         FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));

  // Check that the function has a prototype or an initial map.
  __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
  DeoptimizeIf(equal, instr->environment());

  // If the function does not have an initial map, we're done.
  Label done;
  __ CmpObjectType(result, MAP_TYPE, kScratchRegister);
  __ j(not_equal, &done, Label::kNear);

  // Get the prototype from the initial map.
  __ movq(result, FieldOperand(result, Map::kPrototypeOffset));
  __ jmp(&done, Label::kNear);

  // Non-instance prototype: Fetch prototype from constructor field
  // in the function's map.
  __ bind(&non_instance);
  __ movq(result, FieldOperand(result, Map::kConstructorOffset));

  // All done.
  __ bind(&done);
}


void LCodeGen::DoLoadElements(LLoadElements* instr) {
  Register result = ToRegister(instr->result());
  Register input = ToRegister(instr->InputAt(0));
  __ movq(result, FieldOperand(input, JSObject::kElementsOffset));
  if (FLAG_debug_code) {
    Label done, ok, fail;
    __ CompareRoot(FieldOperand(result, HeapObject::kMapOffset),
                   Heap::kFixedArrayMapRootIndex);
    __ j(equal, &done, Label::kNear);
    __ CompareRoot(FieldOperand(result, HeapObject::kMapOffset),
                   Heap::kFixedCOWArrayMapRootIndex);
    __ j(equal, &done, Label::kNear);
    Register temp((result.is(rax)) ? rbx : rax);
    __ push(temp);
    __ movq(temp, FieldOperand(result, HeapObject::kMapOffset));
    __ movzxbq(temp, FieldOperand(temp, Map::kBitField2Offset));
    __ and_(temp, Immediate(Map::kElementsKindMask));
    __ shr(temp, Immediate(Map::kElementsKindShift));
    __ cmpl(temp, Immediate(GetInitialFastElementsKind()));
    __ j(less, &fail, Label::kNear);
    __ cmpl(temp, Immediate(TERMINAL_FAST_ELEMENTS_KIND));
    __ j(less_equal, &ok, Label::kNear);
    __ cmpl(temp, Immediate(FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND));
    __ j(less, &fail, Label::kNear);
    __ cmpl(temp, Immediate(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND));
    __ j(less_equal, &ok, Label::kNear);
    __ bind(&fail);
    __ Abort("Check for fast or external elements failed");
    __ bind(&ok);
    __ pop(temp);
    __ bind(&done);
  }
}


void LCodeGen::DoLoadExternalArrayPointer(
    LLoadExternalArrayPointer* instr) {
  Register result = ToRegister(instr->result());
  Register input = ToRegister(instr->InputAt(0));
  __ movq(result, FieldOperand(input,
                               ExternalPixelArray::kExternalPointerOffset));
}


void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
  Register arguments = ToRegister(instr->arguments());
  Register length = ToRegister(instr->length());
  Register result = ToRegister(instr->result());

  if (instr->index()->IsRegister()) {
    __ subl(length, ToRegister(instr->index()));
  } else {
    __ subl(length, ToOperand(instr->index()));
  }
  DeoptimizeIf(below_equal, instr->environment());

  // There are two words between the frame pointer and the last argument.
  // Subtracting from length accounts for one of them add one more.
  __ movq(result, Operand(arguments, length, times_pointer_size, kPointerSize));
}


void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
  Register result = ToRegister(instr->result());

  if (instr->hydrogen()->IsDehoisted() && !instr->key()->IsConstantOperand()) {
    // Sign extend key because it could be a 32 bit negative value
    // and the dehoisted address computation happens in 64 bits.
    Register key_reg = ToRegister(instr->key());
    __ movsxlq(key_reg, key_reg);
  }

  // Load the result.
  __ movq(result,
          BuildFastArrayOperand(instr->elements(),
                                instr->key(),
                                FAST_ELEMENTS,
                                FixedArray::kHeaderSize - kHeapObjectTag,
                                instr->additional_index()));

  // Check for the hole value.
  if (instr->hydrogen()->RequiresHoleCheck()) {
    if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
      Condition smi = __ CheckSmi(result);
      DeoptimizeIf(NegateCondition(smi), instr->environment());
    } else {
      __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
      DeoptimizeIf(equal, instr->environment());
    }
  }
}


void LCodeGen::DoLoadKeyedFastDoubleElement(
    LLoadKeyedFastDoubleElement* instr) {
  XMMRegister result(ToDoubleRegister(instr->result()));

  if (instr->hydrogen()->IsDehoisted() && !instr->key()->IsConstantOperand()) {
    // Sign extend key because it could be a 32 bit negative value
    // and the dehoisted address computation happens in 64 bits
    Register key_reg = ToRegister(instr->key());
    __ movsxlq(key_reg, key_reg);
  }

  if (instr->hydrogen()->RequiresHoleCheck()) {
    int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
        sizeof(kHoleNanLower32);
    Operand hole_check_operand = BuildFastArrayOperand(
        instr->elements(),
        instr->key(),
        FAST_DOUBLE_ELEMENTS,
        offset,
        instr->additional_index());
    __ cmpl(hole_check_operand, Immediate(kHoleNanUpper32));
    DeoptimizeIf(equal, instr->environment());
  }

  Operand double_load_operand = BuildFastArrayOperand(
      instr->elements(),
      instr->key(),
      FAST_DOUBLE_ELEMENTS,
      FixedDoubleArray::kHeaderSize - kHeapObjectTag,
      instr->additional_index());
  __ movsd(result, double_load_operand);
}


Operand LCodeGen::BuildFastArrayOperand(
    LOperand* elements_pointer,
    LOperand* key,
    ElementsKind elements_kind,
    uint32_t offset,
    uint32_t additional_index) {
  Register elements_pointer_reg = ToRegister(elements_pointer);
  int shift_size = ElementsKindToShiftSize(elements_kind);
  if (key->IsConstantOperand()) {
    int constant_value = ToInteger32(LConstantOperand::cast(key));
    if (constant_value & 0xF0000000) {
      Abort("array index constant value too big");
    }
    return Operand(elements_pointer_reg,
                   ((constant_value + additional_index) << shift_size)
                       + offset);
  } else {
    ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
    return Operand(elements_pointer_reg,
                   ToRegister(key),
                   scale_factor,
                   offset + (additional_index << shift_size));
  }
}


void LCodeGen::DoLoadKeyedSpecializedArrayElement(
    LLoadKeyedSpecializedArrayElement* instr) {
  ElementsKind elements_kind = instr->elements_kind();
  Operand operand(BuildFastArrayOperand(instr->external_pointer(),
                                        instr->key(),
                                        elements_kind,
                                        0,
                                        instr->additional_index()));
  if (instr->hydrogen()->IsDehoisted() && !instr->key()->IsConstantOperand()) {
    // Sign extend key because it could be a 32 bit negative value
    // and the dehoisted address computation happens in 64 bits
    Register key_reg = ToRegister(instr->key());
    __ movsxlq(key_reg, key_reg);
  }

  if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
    XMMRegister result(ToDoubleRegister(instr->result()));
    __ movss(result, operand);
    __ cvtss2sd(result, result);
  } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
    __ movsd(ToDoubleRegister(instr->result()), operand);
  } else {
    Register result(ToRegister(instr->result()));
    switch (elements_kind) {
      case EXTERNAL_BYTE_ELEMENTS:
        __ movsxbq(result, operand);
        break;
      case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
      case EXTERNAL_PIXEL_ELEMENTS:
        __ movzxbq(result, operand);
        break;
      case EXTERNAL_SHORT_ELEMENTS:
        __ movsxwq(result, operand);
        break;
      case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
        __ movzxwq(result, operand);
        break;
      case EXTERNAL_INT_ELEMENTS:
        __ movsxlq(result, operand);
        break;
      case EXTERNAL_UNSIGNED_INT_ELEMENTS:
        __ movl(result, operand);
        __ testl(result, result);
        // TODO(danno): we could be more clever here, perhaps having a special
        // version of the stub that detects if the overflow case actually
        // happens, and generate code that returns a double rather than int.
        DeoptimizeIf(negative, instr->environment());
        break;
      case EXTERNAL_FLOAT_ELEMENTS:
      case EXTERNAL_DOUBLE_ELEMENTS:
      case FAST_ELEMENTS:
      case FAST_SMI_ELEMENTS:
      case FAST_DOUBLE_ELEMENTS:
      case FAST_HOLEY_ELEMENTS:
      case FAST_HOLEY_SMI_ELEMENTS:
      case FAST_HOLEY_DOUBLE_ELEMENTS:
      case DICTIONARY_ELEMENTS:
      case NON_STRICT_ARGUMENTS_ELEMENTS:
        UNREACHABLE();
        break;
    }
  }
}


void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
  ASSERT(ToRegister(instr->object()).is(rdx));
  ASSERT(ToRegister(instr->key()).is(rax));

  Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
  CallCode(ic, RelocInfo::CODE_TARGET, instr);
}


void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
  Register result = ToRegister(instr->result());

  if (instr->hydrogen()->from_inlined()) {
    __ lea(result, Operand(rsp, -2 * kPointerSize));
  } else {
    // Check for arguments adapter frame.
    Label done, adapted;
    __ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
    __ Cmp(Operand(result, StandardFrameConstants::kContextOffset),
           Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
    __ j(equal, &adapted, Label::kNear);

    // No arguments adaptor frame.
    __ movq(result, rbp);
    __ jmp(&done, Label::kNear);

    // Arguments adaptor frame present.
    __ bind(&adapted);
    __ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));

    // Result is the frame pointer for the frame if not adapted and for the real
    // frame below the adaptor frame if adapted.
    __ bind(&done);
  }
}


void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
  Register result = ToRegister(instr->result());

  Label done;

  // If no arguments adaptor frame the number of arguments is fixed.
  if (instr->InputAt(0)->IsRegister()) {
    __ cmpq(rbp, ToRegister(instr->InputAt(0)));
  } else {
    __ cmpq(rbp, ToOperand(instr->InputAt(0)));
  }
  __ movl(result, Immediate(scope()->num_parameters()));
  __ j(equal, &done, Label::kNear);

  // Arguments adaptor frame present. Get argument length from there.
  __ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
  __ SmiToInteger32(result,
                    Operand(result,
                            ArgumentsAdaptorFrameConstants::kLengthOffset));

  // Argument length is in result register.
  __ bind(&done);
}


void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
  Register receiver = ToRegister(instr->receiver());
  Register function = ToRegister(instr->function());

  // If the receiver is null or undefined, we have to pass the global
  // object as a receiver to normal functions. Values have to be
  // passed unchanged to builtins and strict-mode functions.
  Label global_object, receiver_ok;

  // Do not transform the receiver to object for strict mode
  // functions.
  __ movq(kScratchRegister,
          FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
  __ testb(FieldOperand(kScratchRegister,
                        SharedFunctionInfo::kStrictModeByteOffset),
           Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
  __ j(not_equal, &receiver_ok, Label::kNear);

  // Do not transform the receiver to object for builtins.
  __ testb(FieldOperand(kScratchRegister,
                        SharedFunctionInfo::kNativeByteOffset),
           Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
  __ j(not_equal, &receiver_ok, Label::kNear);

  // Normal function. Replace undefined or null with global receiver.
  __ CompareRoot(receiver, Heap::kNullValueRootIndex);
  __ j(equal, &global_object, Label::kNear);
  __ CompareRoot(receiver, Heap::kUndefinedValueRootIndex);
  __ j(equal, &global_object, Label::kNear);

  // The receiver should be a JS object.
  Condition is_smi = __ CheckSmi(receiver);
  DeoptimizeIf(is_smi, instr->environment());
  __ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, kScratchRegister);
  DeoptimizeIf(below, instr->environment());
  __ jmp(&receiver_ok, Label::kNear);

  __ bind(&global_object);
  // TODO(kmillikin): We have a hydrogen value for the global object.  See
  // if it's better to use it than to explicitly fetch it from the context
  // here.
  __ movq(receiver, ContextOperand(rsi, Context::GLOBAL_INDEX));
  __ movq(receiver,
          FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
  __ bind(&receiver_ok);
}


void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
  Register receiver = ToRegister(instr->receiver());
  Register function = ToRegister(instr->function());
  Register length = ToRegister(instr->length());
  Register elements = ToRegister(instr->elements());
  ASSERT(receiver.is(rax));  // Used for parameter count.
  ASSERT(function.is(rdi));  // Required by InvokeFunction.
  ASSERT(ToRegister(instr->result()).is(rax));

  // Copy the arguments to this function possibly from the
  // adaptor frame below it.
  const uint32_t kArgumentsLimit = 1 * KB;
  __ cmpq(length, Immediate(kArgumentsLimit));
  DeoptimizeIf(above, instr->environment());

  __ push(receiver);
  __ movq(receiver, length);

  // Loop through the arguments pushing them onto the execution
  // stack.
  Label invoke, loop;
  // length is a small non-negative integer, due to the test above.
  __ testl(length, length);
  __ j(zero, &invoke, Label::kNear);
  __ bind(&loop);
  __ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize));
  __ decl(length);
  __ j(not_zero, &loop);

  // Invoke the function.
  __ bind(&invoke);
  ASSERT(instr->HasPointerMap());
  LPointerMap* pointers = instr->pointer_map();
  RecordPosition(pointers->position());
  SafepointGenerator safepoint_generator(
      this, pointers, Safepoint::kLazyDeopt);
  ParameterCount actual(rax);
  __ InvokeFunction(function, actual, CALL_FUNCTION,
                    safepoint_generator, CALL_AS_METHOD);
  __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
}


void LCodeGen::DoPushArgument(LPushArgument* instr) {
  LOperand* argument = instr->InputAt(0);
  EmitPushTaggedOperand(argument);
}


void LCodeGen::DoDrop(LDrop* instr) {
  __ Drop(instr->count());
}


void LCodeGen::DoThisFunction(LThisFunction* instr) {
  Register result = ToRegister(instr->result());
  __ movq(result, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
}


void LCodeGen::DoContext(LContext* instr) {
  Register result = ToRegister(instr->result());
  __ movq(result, rsi);
}


void LCodeGen::DoOuterContext(LOuterContext* instr) {
  Register context = ToRegister(instr->context());
  Register result = ToRegister(instr->result());
  __ movq(result,
          Operand(context, Context::SlotOffset(Context::PREVIOUS_INDEX)));
}


void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
  __ push(rsi);  // The context is the first argument.
  __ PushHeapObject(instr->hydrogen()->pairs());
  __ Push(Smi::FromInt(instr->hydrogen()->flags()));
  CallRuntime(Runtime::kDeclareGlobals, 3, instr);
}


void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
  Register result = ToRegister(instr->result());
  __ movq(result, GlobalObjectOperand());
}


void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
  Register global = ToRegister(instr->global());
  Register result = ToRegister(instr->result());
  __ movq(result, FieldOperand(global, GlobalObject::kGlobalReceiverOffset));
}


void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
                                 int arity,
                                 LInstruction* instr,
                                 CallKind call_kind,
                                 RDIState rdi_state) {
  bool can_invoke_directly = !function->NeedsArgumentsAdaption() ||
      function->shared()->formal_parameter_count() == arity;

  LPointerMap* pointers = instr->pointer_map();
  RecordPosition(pointers->position());

  if (can_invoke_directly) {
    if (rdi_state == RDI_UNINITIALIZED) {
      __ LoadHeapObject(rdi, function);
    }

    // Change context if needed.
    bool change_context =
        (info()->closure()->context() != function->context()) ||
        scope()->contains_with() ||
        (scope()->num_heap_slots() > 0);
    if (change_context) {
      __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
    }

    // Set rax to arguments count if adaption is not needed. Assumes that rax
    // is available to write to at this point.
    if (!function->NeedsArgumentsAdaption()) {
      __ Set(rax, arity);
    }

    // Invoke function.
    __ SetCallKind(rcx, call_kind);
    if (*function == *info()->closure()) {
      __ CallSelf();
    } else {
      __ call(FieldOperand(rdi, JSFunction::kCodeEntryOffset));
    }

    // Set up deoptimization.
    RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
  } else {
    // We need to adapt arguments.
    SafepointGenerator generator(
        this, pointers, Safepoint::kLazyDeopt);
    ParameterCount count(arity);
    __ InvokeFunction(function, count, CALL_FUNCTION, generator, call_kind);
  }

  // Restore context.
  __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
}


void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
  ASSERT(ToRegister(instr->result()).is(rax));
  CallKnownFunction(instr->function(),
                    instr->arity(),
                    instr,
                    CALL_AS_METHOD,
                    RDI_UNINITIALIZED);
}


void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
  Register input_reg = ToRegister(instr->InputAt(0));
  __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
                 Heap::kHeapNumberMapRootIndex);
  DeoptimizeIf(not_equal, instr->environment());

  Label done;
  Register tmp = input_reg.is(rax) ? rcx : rax;
  Register tmp2 = tmp.is(rcx) ? rdx : input_reg.is(rcx) ? rdx : rcx;

  // Preserve the value of all registers.
  PushSafepointRegistersScope scope(this);

  Label negative;
  __ movl(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
  // Check the sign of the argument. If the argument is positive, just
  // return it. We do not need to patch the stack since |input| and
  // |result| are the same register and |input| will be restored
  // unchanged by popping safepoint registers.
  __ testl(tmp, Immediate(HeapNumber::kSignMask));
  __ j(not_zero, &negative);
  __ jmp(&done);

  __ bind(&negative);

  Label allocated, slow;
  __ AllocateHeapNumber(tmp, tmp2, &slow);
  __ jmp(&allocated);

  // Slow case: Call the runtime system to do the number allocation.
  __ bind(&slow);

  CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
  // Set the pointer to the new heap number in tmp.
  if (!tmp.is(rax)) {
    __ movq(tmp, rax);
  }

  // Restore input_reg after call to runtime.
  __ LoadFromSafepointRegisterSlot(input_reg, input_reg);

  __ bind(&allocated);
  __ movq(tmp2, FieldOperand(input_reg, HeapNumber::kValueOffset));
  __ shl(tmp2, Immediate(1));
  __ shr(tmp2, Immediate(1));
  __ movq(FieldOperand(tmp, HeapNumber::kValueOffset), tmp2);
  __ StoreToSafepointRegisterSlot(input_reg, tmp);

  __ bind(&done);
}


void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
  Register input_reg = ToRegister(instr->InputAt(0));
  __ testl(input_reg, input_reg);
  Label is_positive;
  __ j(not_sign, &is_positive);
  __ negl(input_reg);  // Sets flags.
  DeoptimizeIf(negative, instr->environment());
  __ bind(&is_positive);
}


void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
  // Class for deferred case.
  class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
   public:
    DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
                                    LUnaryMathOperation* instr)
        : LDeferredCode(codegen), instr_(instr) { }
    virtual void Generate() {
      codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
    }
    virtual LInstruction* instr() { return instr_; }
   private:
    LUnaryMathOperation* instr_;
  };

  ASSERT(instr->InputAt(0)->Equals(instr->result()));
  Representation r = instr->hydrogen()->value()->representation();

  if (r.IsDouble()) {
    XMMRegister scratch = xmm0;
    XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
    __ xorps(scratch, scratch);
    __ subsd(scratch, input_reg);
    __ andpd(input_reg, scratch);
  } else if (r.IsInteger32()) {
    EmitIntegerMathAbs(instr);
  } else {  // Tagged case.
    DeferredMathAbsTaggedHeapNumber* deferred =
        new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
    Register input_reg = ToRegister(instr->InputAt(0));
    // Smi check.
    __ JumpIfNotSmi(input_reg, deferred->entry());
    __ SmiToInteger32(input_reg, input_reg);
    EmitIntegerMathAbs(instr);
    __ Integer32ToSmi(input_reg, input_reg);
    __ bind(deferred->exit());
  }
}


void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
  XMMRegister xmm_scratch = xmm0;
  Register output_reg = ToRegister(instr->result());
  XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));

  if (CpuFeatures::IsSupported(SSE4_1)) {
    CpuFeatures::Scope scope(SSE4_1);
    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
      // Deoptimize if minus zero.
      __ movq(output_reg, input_reg);
      __ subq(output_reg, Immediate(1));
      DeoptimizeIf(overflow, instr->environment());
    }
    __ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown);
    __ cvttsd2si(output_reg, xmm_scratch);
    __ cmpl(output_reg, Immediate(0x80000000));
    DeoptimizeIf(equal, instr->environment());
  } else {
    Label negative_sign, done;
    // Deoptimize on negative inputs.
    __ xorps(xmm_scratch, xmm_scratch);  // Zero the register.
    __ ucomisd(input_reg, xmm_scratch);
    DeoptimizeIf(parity_even, instr->environment());
    __ j(below, &negative_sign, Label::kNear);

    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
      // Check for negative zero.
      Label positive_sign;
      __ j(above, &positive_sign, Label::kNear);
      __ movmskpd(output_reg, input_reg);
      __ testq(output_reg, Immediate(1));
      DeoptimizeIf(not_zero, instr->environment());
      __ Set(output_reg, 0);
      __ jmp(&done);
      __ bind(&positive_sign);
    }

    // Use truncating instruction (OK because input is positive).
    __ cvttsd2si(output_reg, input_reg);
    // Overflow is signalled with minint.
    __ cmpl(output_reg, Immediate(0x80000000));
    DeoptimizeIf(equal, instr->environment());
    __ jmp(&done, Label::kNear);

    // Non-zero negative reaches here.
    __ bind(&negative_sign);
    // Truncate, then compare and compensate.
    __ cvttsd2si(output_reg, input_reg);
    __ cvtlsi2sd(xmm_scratch, output_reg);
    __ ucomisd(input_reg, xmm_scratch);
    __ j(equal, &done, Label::kNear);
    __ subl(output_reg, Immediate(1));
    DeoptimizeIf(overflow, instr->environment());

    __ bind(&done);
  }
}


void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
  const XMMRegister xmm_scratch = xmm0;
  Register output_reg = ToRegister(instr->result());
  XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));

  Label done;
  // xmm_scratch = 0.5
  __ movq(kScratchRegister, V8_INT64_C(0x3FE0000000000000), RelocInfo::NONE);
  __ movq(xmm_scratch, kScratchRegister);
  Label below_half;
  __ ucomisd(xmm_scratch, input_reg);
  // If input_reg is NaN, this doesn't jump.
  __ j(above, &below_half, Label::kNear);
  // input = input + 0.5
  // This addition might give a result that isn't the correct for
  // rounding, due to loss of precision, but only for a number that's
  // so big that the conversion below will overflow anyway.
  __ addsd(xmm_scratch, input_reg);
  // Compute Math.floor(input).
  // Use truncating instruction (OK because input is positive).
  __ cvttsd2si(output_reg, xmm_scratch);
  // Overflow is signalled with minint.
  __ cmpl(output_reg, Immediate(0x80000000));
  DeoptimizeIf(equal, instr->environment());
  __ jmp(&done);

  __ bind(&below_half);
  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
    // Bailout if negative (including -0).
    __ movq(output_reg, input_reg);
    __ testq(output_reg, output_reg);
    DeoptimizeIf(negative, instr->environment());
  } else {
    // Bailout if below -0.5, otherwise round to (positive) zero, even
    // if negative.
    // xmm_scrach = -0.5
    __ movq(kScratchRegister, V8_INT64_C(0xBFE0000000000000), RelocInfo::NONE);
    __ movq(xmm_scratch, kScratchRegister);
    __ ucomisd(input_reg, xmm_scratch);
    DeoptimizeIf(below, instr->environment());
  }
  __ xorl(output_reg, output_reg);

  __ bind(&done);
}


void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
  XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
  ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
  __ sqrtsd(input_reg, input_reg);
}


void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
  XMMRegister xmm_scratch = xmm0;
  XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
  ASSERT(ToDoubleRegister(instr->result()).is(input_reg));

  // Note that according to ECMA-262 15.8.2.13:
  // Math.pow(-Infinity, 0.5) == Infinity
  // Math.sqrt(-Infinity) == NaN
  Label done, sqrt;
  // Check base for -Infinity.  According to IEEE-754, double-precision
  // -Infinity has the highest 12 bits set and the lowest 52 bits cleared.
  __ movq(kScratchRegister, V8_INT64_C(0xFFF0000000000000), RelocInfo::NONE);
  __ movq(xmm_scratch, kScratchRegister);
  __ ucomisd(xmm_scratch, input_reg);
  // Comparing -Infinity with NaN results in "unordered", which sets the
  // zero flag as if both were equal.  However, it also sets the carry flag.
  __ j(not_equal, &sqrt, Label::kNear);
  __ j(carry, &sqrt, Label::kNear);
  // If input is -Infinity, return Infinity.
  __ xorps(input_reg, input_reg);
  __ subsd(input_reg, xmm_scratch);
  __ jmp(&done, Label::kNear);

  // Square root.
  __ bind(&sqrt);
  __ xorps(xmm_scratch, xmm_scratch);
  __ addsd(input_reg, xmm_scratch);  // Convert -0 to +0.
  __ sqrtsd(input_reg, input_reg);
  __ bind(&done);
}


void LCodeGen::DoPower(LPower* instr) {
  Representation exponent_type = instr->hydrogen()->right()->representation();
  // Having marked this as a call, we can use any registers.
  // Just make sure that the input/output registers are the expected ones.

  // Choose register conforming to calling convention (when bailing out).
#ifdef _WIN64
  Register exponent = rdx;
#else
  Register exponent = rdi;
#endif
  ASSERT(!instr->InputAt(1)->IsRegister() ||
         ToRegister(instr->InputAt(1)).is(exponent));
  ASSERT(!instr->InputAt(1)->IsDoubleRegister() ||
         ToDoubleRegister(instr->InputAt(1)).is(xmm1));
  ASSERT(ToDoubleRegister(instr->InputAt(0)).is(xmm2));
  ASSERT(ToDoubleRegister(instr->result()).is(xmm3));

  if (exponent_type.IsTagged()) {
    Label no_deopt;
    __ JumpIfSmi(exponent, &no_deopt);
    __ CmpObjectType(exponent, HEAP_NUMBER_TYPE, rcx);
    DeoptimizeIf(not_equal, instr->environment());
    __ bind(&no_deopt);
    MathPowStub stub(MathPowStub::TAGGED);
    __ CallStub(&stub);
  } else if (exponent_type.IsInteger32()) {
    MathPowStub stub(MathPowStub::INTEGER);
    __ CallStub(&stub);
  } else {
    ASSERT(exponent_type.IsDouble());
    MathPowStub stub(MathPowStub::DOUBLE);
    __ CallStub(&stub);
  }
}


void LCodeGen::DoRandom(LRandom* instr) {
  class DeferredDoRandom: public LDeferredCode {
   public:
    DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
        : LDeferredCode(codegen), instr_(instr) { }
    virtual void Generate() { codegen()->DoDeferredRandom(instr_); }
    virtual LInstruction* instr() { return instr_; }
   private:
    LRandom* instr_;
  };

  DeferredDoRandom* deferred = new(zone()) DeferredDoRandom(this, instr);

  // Having marked this instruction as a call we can use any
  // registers.
  ASSERT(ToDoubleRegister(instr->result()).is(xmm1));

  // Choose the right register for the first argument depending on
  // calling convention.
#ifdef _WIN64
  ASSERT(ToRegister(instr->InputAt(0)).is(rcx));
  Register global_object = rcx;
#else
  ASSERT(ToRegister(instr->InputAt(0)).is(rdi));
  Register global_object = rdi;
#endif

  static const int kSeedSize = sizeof(uint32_t);
  STATIC_ASSERT(kPointerSize == 2 * kSeedSize);

  __ movq(global_object,
          FieldOperand(global_object, GlobalObject::kGlobalContextOffset));
  static const int kRandomSeedOffset =
      FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
  __ movq(rbx, FieldOperand(global_object, kRandomSeedOffset));
  // rbx: FixedArray of the global context's random seeds

  // Load state[0].
  __ movl(rax, FieldOperand(rbx, ByteArray::kHeaderSize));
  // If state[0] == 0, call runtime to initialize seeds.
  __ testl(rax, rax);
  __ j(zero, deferred->entry());
  // Load state[1].
  __ movl(rcx, FieldOperand(rbx, ByteArray::kHeaderSize + kSeedSize));

  // state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
  // Only operate on the lower 32 bit of rax.
  __ movl(rdx, rax);
  __ andl(rdx, Immediate(0xFFFF));
  __ imull(rdx, rdx, Immediate(18273));
  __ shrl(rax, Immediate(16));
  __ addl(rax, rdx);
  // Save state[0].
  __ movl(FieldOperand(rbx, ByteArray::kHeaderSize), rax);

  // state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
  __ movl(rdx, rcx);
  __ andl(rdx, Immediate(0xFFFF));
  __ imull(rdx, rdx, Immediate(36969));
  __ shrl(rcx, Immediate(16));
  __ addl(rcx, rdx);
  // Save state[1].
  __ movl(FieldOperand(rbx, ByteArray::kHeaderSize + kSeedSize), rcx);

  // Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
  __ shll(rax, Immediate(14));
  __ andl(rcx, Immediate(0x3FFFF));
  __ addl(rax, rcx);

  __ bind(deferred->exit());
  // Convert 32 random bits in rax to 0.(32 random bits) in a double
  // by computing:
  // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
  __ movl(rcx, Immediate(0x49800000));  // 1.0 x 2^20 as single.
  __ movd(xmm2, rcx);
  __ movd(xmm1, rax);
  __ cvtss2sd(xmm2, xmm2);
  __ xorps(xmm1, xmm2);
  __ subsd(xmm1, xmm2);
}


void LCodeGen::DoDeferredRandom(LRandom* instr) {
  __ PrepareCallCFunction(1);
  __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
  __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
  // Return value is in rax.
}


void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
  ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
  TranscendentalCacheStub stub(TranscendentalCache::LOG,
                               TranscendentalCacheStub::UNTAGGED);
  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}


void LCodeGen::DoMathTan(LUnaryMathOperation* instr) {
  ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
  TranscendentalCacheStub stub(TranscendentalCache::TAN,
                               TranscendentalCacheStub::UNTAGGED);
  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}


void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
  ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
  TranscendentalCacheStub stub(TranscendentalCache::COS,
                               TranscendentalCacheStub::UNTAGGED);
  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}


void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
  ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
  TranscendentalCacheStub stub(TranscendentalCache::SIN,
                               TranscendentalCacheStub::UNTAGGED);
  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}


void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
  switch (instr->op()) {
    case kMathAbs:
      DoMathAbs(instr);
      break;
    case kMathFloor:
      DoMathFloor(instr);
      break;
    case kMathRound:
      DoMathRound(instr);
      break;
    case kMathSqrt:
      DoMathSqrt(instr);
      break;
    case kMathPowHalf:
      DoMathPowHalf(instr);
      break;
    case kMathCos:
      DoMathCos(instr);
      break;
    case kMathSin:
      DoMathSin(instr);
      break;
    case kMathTan:
      DoMathTan(instr);
      break;
    case kMathLog:
      DoMathLog(instr);
      break;

    default:
      UNREACHABLE();
  }
}


void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
  ASSERT(ToRegister(instr->function()).is(rdi));
  ASSERT(instr->HasPointerMap());

  if (instr->known_function().is_null()) {
    LPointerMap* pointers = instr->pointer_map();
    RecordPosition(pointers->position());
    SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
    ParameterCount count(instr->arity());
    __ InvokeFunction(rdi, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
    __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
  } else {
    CallKnownFunction(instr->known_function(),
                      instr->arity(),
                      instr,
                      CALL_AS_METHOD,
                      RDI_CONTAINS_TARGET);
  }
}


void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
  ASSERT(ToRegister(instr->key()).is(rcx));
  ASSERT(ToRegister(instr->result()).is(rax));

  int arity = instr->arity();
  Handle<Code> ic =
      isolate()->stub_cache()->ComputeKeyedCallInitialize(arity);
  CallCode(ic, RelocInfo::CODE_TARGET, instr);
  __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
}


void LCodeGen::DoCallNamed(LCallNamed* instr) {
  ASSERT(ToRegister(instr->result()).is(rax));

  int arity = instr->arity();
  RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
  Handle<Code> ic =
      isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
  __ Move(rcx, instr->name());
  CallCode(ic, mode, instr);
  __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
}


void LCodeGen::DoCallFunction(LCallFunction* instr) {
  ASSERT(ToRegister(instr->function()).is(rdi));
  ASSERT(ToRegister(instr->result()).is(rax));

  int arity = instr->arity();
  CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
  __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
}


void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
  ASSERT(ToRegister(instr->result()).is(rax));
  int arity = instr->arity();
  RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
  Handle<Code> ic =
      isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
  __ Move(rcx, instr->name());
  CallCode(ic, mode, instr);
  __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
}


void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
  ASSERT(ToRegister(instr->result()).is(rax));
  CallKnownFunction(instr->target(),
                    instr->arity(),
                    instr,
                    CALL_AS_FUNCTION,
                    RDI_UNINITIALIZED);
}


void LCodeGen::DoCallNew(LCallNew* instr) {
  ASSERT(ToRegister(instr->InputAt(0)).is(rdi));
  ASSERT(ToRegister(instr->result()).is(rax));

  CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
  __ Set(rax, instr->arity());
  CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
}


void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
  CallRuntime(instr->function(), instr->arity(), instr);
}


void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
  Register object = ToRegister(instr->object());
  Register value = ToRegister(instr->value());
  int offset = instr->offset();

  if (!instr->transition().is_null()) {
    if (!instr->hydrogen()->NeedsWriteBarrierForMap()) {
      __ Move(FieldOperand(object, HeapObject::kMapOffset),
              instr->transition());
    } else {
      Register temp = ToRegister(instr->TempAt(0));
      __ Move(kScratchRegister, instr->transition());
      __ movq(FieldOperand(object, HeapObject::kMapOffset), kScratchRegister);
      // Update the write barrier for the map field.
      __ RecordWriteField(object,
                          HeapObject::kMapOffset,
                          kScratchRegister,
                          temp,
                          kSaveFPRegs,
                          OMIT_REMEMBERED_SET,
                          OMIT_SMI_CHECK);
    }
  }

  // Do the store.
  HType type = instr->hydrogen()->value()->type();
  SmiCheck check_needed =
      type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
  if (instr->is_in_object()) {
    __ movq(FieldOperand(object, offset), value);
    if (instr->hydrogen()->NeedsWriteBarrier()) {
      Register temp = ToRegister(instr->TempAt(0));
      // Update the write barrier for the object for in-object properties.
      __ RecordWriteField(object,
                          offset,
                          value,
                          temp,
                          kSaveFPRegs,
                          EMIT_REMEMBERED_SET,
                          check_needed);
    }
  } else {
    Register temp = ToRegister(instr->TempAt(0));
    __ movq(temp, FieldOperand(object, JSObject::kPropertiesOffset));
    __ movq(FieldOperand(temp, offset), value);
    if (instr->hydrogen()->NeedsWriteBarrier()) {
      // Update the write barrier for the properties array.
      // object is used as a scratch register.
      __ RecordWriteField(temp,
                          offset,
                          value,
                          object,
                          kSaveFPRegs,
                          EMIT_REMEMBERED_SET,
                          check_needed);
    }
  }
}


void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
  ASSERT(ToRegister(instr->object()).is(rdx));
  ASSERT(ToRegister(instr->value()).is(rax));

  __ Move(rcx, instr->hydrogen()->name());
  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
      ? isolate()->builtins()->StoreIC_Initialize_Strict()
      : isolate()->builtins()->StoreIC_Initialize();
  CallCode(ic, RelocInfo::CODE_TARGET, instr);
}


void LCodeGen::DoStoreKeyedSpecializedArrayElement(
    LStoreKeyedSpecializedArrayElement* instr) {
  ElementsKind elements_kind = instr->elements_kind();
  Operand operand(BuildFastArrayOperand(instr->external_pointer(),
                                        instr->key(),
                                        elements_kind,
                                        0,
                                        instr->additional_index()));

  if (instr->hydrogen()->IsDehoisted() && !instr->key()->IsConstantOperand()) {
    // Sign extend key because it could be a 32 bit negative value
    // and the dehoisted address computation happens in 64 bits
    Register key_reg = ToRegister(instr->key());
    __ movsxlq(key_reg, key_reg);
  }

  if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
    XMMRegister value(ToDoubleRegister(instr->value()));
    __ cvtsd2ss(value, value);
    __ movss(operand, value);
  } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
    __ movsd(operand, ToDoubleRegister(instr->value()));
  } else {
    Register value(ToRegister(instr->value()));
    switch (elements_kind) {
      case EXTERNAL_PIXEL_ELEMENTS:
      case EXTERNAL_BYTE_ELEMENTS:
      case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
        __ movb(operand, value);
        break;
      case EXTERNAL_SHORT_ELEMENTS:
      case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
        __ movw(operand, value);
        break;
      case EXTERNAL_INT_ELEMENTS:
      case EXTERNAL_UNSIGNED_INT_ELEMENTS:
        __ movl(operand, value);
        break;
      case EXTERNAL_FLOAT_ELEMENTS:
      case EXTERNAL_DOUBLE_ELEMENTS:
      case FAST_ELEMENTS:
      case FAST_SMI_ELEMENTS:
      case FAST_DOUBLE_ELEMENTS:
      case FAST_HOLEY_ELEMENTS:
      case FAST_HOLEY_SMI_ELEMENTS:
      case FAST_HOLEY_DOUBLE_ELEMENTS:
      case DICTIONARY_ELEMENTS:
      case NON_STRICT_ARGUMENTS_ELEMENTS:
        UNREACHABLE();
        break;
    }
  }
}


void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
  if (instr->length()->IsRegister()) {
    Register reg = ToRegister(instr->length());
    if (FLAG_debug_code) {
      __ AbortIfNotZeroExtended(reg);
    }
    if (instr->index()->IsConstantOperand()) {
      __ cmpq(reg,
              Immediate(ToInteger32(LConstantOperand::cast(instr->index()))));
    } else {
      Register reg2 = ToRegister(instr->index());
      if (FLAG_debug_code) {
        __ AbortIfNotZeroExtended(reg2);
      }
      __ cmpq(reg, reg2);
    }
  } else {
    if (instr->index()->IsConstantOperand()) {
      __ cmpq(ToOperand(instr->length()),
              Immediate(ToInteger32(LConstantOperand::cast(instr->index()))));
    } else {
      __ cmpq(ToOperand(instr->length()), ToRegister(instr->index()));
    }
  }
  DeoptimizeIf(below_equal, instr->environment());
}


void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
  Register value = ToRegister(instr->value());
  Register elements = ToRegister(instr->object());
  Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;

  Operand operand =
      BuildFastArrayOperand(instr->object(),
                            instr->key(),
                            FAST_ELEMENTS,
                            FixedArray::kHeaderSize - kHeapObjectTag,
                            instr->additional_index());

  if (instr->hydrogen()->IsDehoisted() && !instr->key()->IsConstantOperand()) {
    // Sign extend key because it could be a 32 bit negative value
    // and the dehoisted address computation happens in 64 bits
    Register key_reg = ToRegister(instr->key());
    __ movsxlq(key_reg, key_reg);
  }

  __ movq(operand, value);

  if (instr->hydrogen()->NeedsWriteBarrier()) {
    ASSERT(!instr->key()->IsConstantOperand());
    HType type = instr->hydrogen()->value()->type();
    SmiCheck check_needed =
        type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
    // Compute address of modified element and store it into key register.
    __ lea(key, operand);
    __ RecordWrite(elements,
                   key,
                   value,
                   kSaveFPRegs,
                   EMIT_REMEMBERED_SET,
                   check_needed);
  }
}


void LCodeGen::DoStoreKeyedFastDoubleElement(
    LStoreKeyedFastDoubleElement* instr) {
  XMMRegister value = ToDoubleRegister(instr->value());

  if (instr->NeedsCanonicalization()) {
    Label have_value;

    __ ucomisd(value, value);
    __ j(parity_odd, &have_value);  // NaN.

    __ Set(kScratchRegister, BitCast<uint64_t>(
        FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
    __ movq(value, kScratchRegister);

    __ bind(&have_value);
  }

  Operand double_store_operand = BuildFastArrayOperand(
      instr->elements(),
      instr->key(),
      FAST_DOUBLE_ELEMENTS,
      FixedDoubleArray::kHeaderSize - kHeapObjectTag,
      instr->additional_index());

  if (instr->hydrogen()->IsDehoisted() && !instr->key()->IsConstantOperand()) {
    // Sign extend key because it could be a 32 bit negative value
    // and the dehoisted address computation happens in 64 bits
    Register key_reg = ToRegister(instr->key());
    __ movsxlq(key_reg, key_reg);
  }

  __ movsd(double_store_operand, value);
}

void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
  ASSERT(ToRegister(instr->object()).is(rdx));
  ASSERT(ToRegister(instr->key()).is(rcx));
  ASSERT(ToRegister(instr->value()).is(rax));

  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
      ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
      : isolate()->builtins()->KeyedStoreIC_Initialize();
  CallCode(ic, RelocInfo::CODE_TARGET, instr);
}


void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
  Register object_reg = ToRegister(instr->object());
  Register new_map_reg = ToRegister(instr->new_map_reg());

  Handle<Map> from_map = instr->original_map();
  Handle<Map> to_map = instr->transitioned_map();
  ElementsKind from_kind = from_map->elements_kind();
  ElementsKind to_kind = to_map->elements_kind();

  Label not_applicable;
  __ Cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
  __ j(not_equal, &not_applicable);
  __ movq(new_map_reg, to_map, RelocInfo::EMBEDDED_OBJECT);
  if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
    __ movq(FieldOperand(object_reg, HeapObject::kMapOffset), new_map_reg);
    // Write barrier.
    ASSERT_NE(instr->temp_reg(), NULL);
    __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
                        ToRegister(instr->temp_reg()), kDontSaveFPRegs);
  } else if (IsFastSmiElementsKind(from_kind) &&
             IsFastDoubleElementsKind(to_kind)) {
    Register fixed_object_reg = ToRegister(instr->temp_reg());
    ASSERT(fixed_object_reg.is(rdx));
    ASSERT(new_map_reg.is(rbx));
    __ movq(fixed_object_reg, object_reg);
    CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
             RelocInfo::CODE_TARGET, instr);
  } else if (IsFastDoubleElementsKind(from_kind) &&
             IsFastObjectElementsKind(to_kind)) {
    Register fixed_object_reg = ToRegister(instr->temp_reg());
    ASSERT(fixed_object_reg.is(rdx));
    ASSERT(new_map_reg.is(rbx));
    __ movq(fixed_object_reg, object_reg);
    CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(),
             RelocInfo::CODE_TARGET, instr);
  } else {
    UNREACHABLE();
  }
  __ bind(&not_applicable);
}


void LCodeGen::DoStringAdd(LStringAdd* instr) {
  EmitPushTaggedOperand(instr->left());
  EmitPushTaggedOperand(instr->right());
  StringAddStub stub(NO_STRING_CHECK_IN_STUB);
  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}


void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
  class DeferredStringCharCodeAt: public LDeferredCode {
   public:
    DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
        : LDeferredCode(codegen), instr_(instr) { }
    virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
    virtual LInstruction* instr() { return instr_; }
   private:
    LStringCharCodeAt* instr_;
  };

  DeferredStringCharCodeAt* deferred =
      new(zone()) DeferredStringCharCodeAt(this, instr);

  StringCharLoadGenerator::Generate(masm(),
                                    ToRegister(instr->string()),
                                    ToRegister(instr->index()),
                                    ToRegister(instr->result()),
                                    deferred->entry());
  __ bind(deferred->exit());
}


void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
  Register string = ToRegister(instr->string());
  Register result = ToRegister(instr->result());

  // TODO(3095996): Get rid of this. For now, we need to make the
  // result register contain a valid pointer because it is already
  // contained in the register pointer map.
  __ Set(result, 0);

  PushSafepointRegistersScope scope(this);
  __ push(string);
  // Push the index as a smi. This is safe because of the checks in
  // DoStringCharCodeAt above.
  STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
  if (instr->index()->IsConstantOperand()) {
    int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
    __ Push(Smi::FromInt(const_index));
  } else {
    Register index = ToRegister(instr->index());
    __ Integer32ToSmi(index, index);
    __ push(index);
  }
  CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr);
  if (FLAG_debug_code) {
    __ AbortIfNotSmi(rax);
  }
  __ SmiToInteger32(rax, rax);
  __ StoreToSafepointRegisterSlot(result, rax);
}


void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
  class DeferredStringCharFromCode: public LDeferredCode {
   public:
    DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
        : LDeferredCode(codegen), instr_(instr) { }
    virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
    virtual LInstruction* instr() { return instr_; }
   private:
    LStringCharFromCode* instr_;
  };

  DeferredStringCharFromCode* deferred =
      new(zone()) DeferredStringCharFromCode(this, instr);

  ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
  Register char_code = ToRegister(instr->char_code());
  Register result = ToRegister(instr->result());
  ASSERT(!char_code.is(result));

  __ cmpl(char_code, Immediate(String::kMaxAsciiCharCode));
  __ j(above, deferred->entry());
  __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
  __ movq(result, FieldOperand(result,
                               char_code, times_pointer_size,
                               FixedArray::kHeaderSize));
  __ CompareRoot(result, Heap::kUndefinedValueRootIndex);
  __ j(equal, deferred->entry());
  __ bind(deferred->exit());
}


void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
  Register char_code = ToRegister(instr->char_code());
  Register result = ToRegister(instr->result());

  // TODO(3095996): Get rid of this. For now, we need to make the
  // result register contain a valid pointer because it is already
  // contained in the register pointer map.
  __ Set(result, 0);

  PushSafepointRegistersScope scope(this);
  __ Integer32ToSmi(char_code, char_code);
  __ push(char_code);
  CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr);
  __ StoreToSafepointRegisterSlot(result, rax);
}


void LCodeGen::DoStringLength(LStringLength* instr) {
  Register string = ToRegister(instr->string());
  Register result = ToRegister(instr->result());
  __ movq(result, FieldOperand(string, String::kLengthOffset));
}


void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
  LOperand* input = instr->InputAt(0);
  ASSERT(input->IsRegister() || input->IsStackSlot());
  LOperand* output = instr->result();
  ASSERT(output->IsDoubleRegister());
  if (input->IsRegister()) {
    __ cvtlsi2sd(ToDoubleRegister(output), ToRegister(input));
  } else {
    __ cvtlsi2sd(ToDoubleRegister(output), ToOperand(input));
  }
}


void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
  LOperand* input = instr->InputAt(0);
  ASSERT(input->IsRegister() && input->Equals(instr->result()));
  Register reg = ToRegister(input);

  __ Integer32ToSmi(reg, reg);
}


void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
  class DeferredNumberTagD: public LDeferredCode {
   public:
    DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
        : LDeferredCode(codegen), instr_(instr) { }
    virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
    virtual LInstruction* instr() { return instr_; }
   private:
    LNumberTagD* instr_;
  };

  XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
  Register reg = ToRegister(instr->result());
  Register tmp = ToRegister(instr->TempAt(0));

  DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
  if (FLAG_inline_new) {
    __ AllocateHeapNumber(reg, tmp, deferred->entry());
  } else {
    __ jmp(deferred->entry());
  }
  __ bind(deferred->exit());
  __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
}


void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
  // TODO(3095996): Get rid of this. For now, we need to make the
  // result register contain a valid pointer because it is already
  // contained in the register pointer map.
  Register reg = ToRegister(instr->result());
  __ Move(reg, Smi::FromInt(0));

  {
    PushSafepointRegistersScope scope(this);
    CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
    // Ensure that value in rax survives popping registers.
    __ movq(kScratchRegister, rax);
  }
  __ movq(reg, kScratchRegister);
}


void LCodeGen::DoSmiTag(LSmiTag* instr) {
  ASSERT(instr->InputAt(0)->Equals(instr->result()));
  Register input = ToRegister(instr->InputAt(0));
  ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
  __ Integer32ToSmi(input, input);
}


void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
  ASSERT(instr->InputAt(0)->Equals(instr->result()));
  Register input = ToRegister(instr->InputAt(0));
  if (instr->needs_check()) {
    Condition is_smi = __ CheckSmi(input);
    DeoptimizeIf(NegateCondition(is_smi), instr->environment());
  } else {
    if (FLAG_debug_code) {
      __ AbortIfNotSmi(input);
    }
  }
  __ SmiToInteger32(input, input);
}


void LCodeGen::EmitNumberUntagD(Register input_reg,
                                XMMRegister result_reg,
                                bool deoptimize_on_undefined,
                                bool deoptimize_on_minus_zero,
                                LEnvironment* env) {
  Label load_smi, done;

  // Smi check.
  __ JumpIfSmi(input_reg, &load_smi, Label::kNear);

  // Heap number map check.
  __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
                 Heap::kHeapNumberMapRootIndex);
  if (deoptimize_on_undefined) {
    DeoptimizeIf(not_equal, env);
  } else {
    Label heap_number;
    __ j(equal, &heap_number, Label::kNear);

    __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
    DeoptimizeIf(not_equal, env);

    // Convert undefined to NaN. Compute NaN as 0/0.
    __ xorps(result_reg, result_reg);
    __ divsd(result_reg, result_reg);
    __ jmp(&done, Label::kNear);

    __ bind(&heap_number);
  }
  // Heap number to XMM conversion.
  __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
  if (deoptimize_on_minus_zero) {
    XMMRegister xmm_scratch = xmm0;
    __ xorps(xmm_scratch, xmm_scratch);
    __ ucomisd(xmm_scratch, result_reg);
    __ j(not_equal, &done, Label::kNear);
    __ movmskpd(kScratchRegister, result_reg);
    __ testq(kScratchRegister, Immediate(1));
    DeoptimizeIf(not_zero, env);
  }
  __ jmp(&done, Label::kNear);

  // Smi to XMM conversion
  __ bind(&load_smi);
  __ SmiToInteger32(kScratchRegister, input_reg);
  __ cvtlsi2sd(result_reg, kScratchRegister);
  __ bind(&done);
}


void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
  Label done, heap_number;
  Register input_reg = ToRegister(instr->InputAt(0));

  // Heap number map check.
  __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
                 Heap::kHeapNumberMapRootIndex);

  if (instr->truncating()) {
    __ j(equal, &heap_number, Label::kNear);
    // Check for undefined. Undefined is converted to zero for truncating
    // conversions.
    __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
    DeoptimizeIf(not_equal, instr->environment());
    __ Set(input_reg, 0);
    __ jmp(&done, Label::kNear);

    __ bind(&heap_number);

    __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
    __ cvttsd2siq(input_reg, xmm0);
    __ Set(kScratchRegister, V8_UINT64_C(0x8000000000000000));
    __ cmpq(input_reg, kScratchRegister);
    DeoptimizeIf(equal, instr->environment());
  } else {
    // Deoptimize if we don't have a heap number.
    DeoptimizeIf(not_equal, instr->environment());

    XMMRegister xmm_temp = ToDoubleRegister(instr->TempAt(0));
    __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
    __ cvttsd2si(input_reg, xmm0);
    __ cvtlsi2sd(xmm_temp, input_reg);
    __ ucomisd(xmm0, xmm_temp);
    DeoptimizeIf(not_equal, instr->environment());
    DeoptimizeIf(parity_even, instr->environment());  // NaN.
    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
      __ testl(input_reg, input_reg);
      __ j(not_zero, &done);
      __ movmskpd(input_reg, xmm0);
      __ andl(input_reg, Immediate(1));
      DeoptimizeIf(not_zero, instr->environment());
    }
  }
  __ bind(&done);
}


void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
  class DeferredTaggedToI: public LDeferredCode {
   public:
    DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
        : LDeferredCode(codegen), instr_(instr) { }
    virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
    virtual LInstruction* instr() { return instr_; }
   private:
    LTaggedToI* instr_;
  };

  LOperand* input = instr->InputAt(0);
  ASSERT(input->IsRegister());
  ASSERT(input->Equals(instr->result()));

  Register input_reg = ToRegister(input);
  DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
  __ JumpIfNotSmi(input_reg, deferred->entry());
  __ SmiToInteger32(input_reg, input_reg);
  __ bind(deferred->exit());
}


void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
  LOperand* input = instr->InputAt(0);
  ASSERT(input->IsRegister());
  LOperand* result = instr->result();
  ASSERT(result->IsDoubleRegister());

  Register input_reg = ToRegister(input);
  XMMRegister result_reg = ToDoubleRegister(result);

  EmitNumberUntagD(input_reg, result_reg,
                   instr->hydrogen()->deoptimize_on_undefined(),
                   instr->hydrogen()->deoptimize_on_minus_zero(),
                   instr->environment());
}


void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
  LOperand* input = instr->InputAt(0);
  ASSERT(input->IsDoubleRegister());
  LOperand* result = instr->result();
  ASSERT(result->IsRegister());

  XMMRegister input_reg = ToDoubleRegister(input);
  Register result_reg = ToRegister(result);

  if (instr->truncating()) {
    // Performs a truncating conversion of a floating point number as used by
    // the JS bitwise operations.
    __ cvttsd2siq(result_reg, input_reg);
    __ movq(kScratchRegister, V8_INT64_C(0x8000000000000000), RelocInfo::NONE);
    __ cmpq(result_reg, kScratchRegister);
    DeoptimizeIf(equal, instr->environment());
  } else {
    __ cvttsd2si(result_reg, input_reg);
    __ cvtlsi2sd(xmm0, result_reg);
    __ ucomisd(xmm0, input_reg);
    DeoptimizeIf(not_equal, instr->environment());
    DeoptimizeIf(parity_even, instr->environment());  // NaN.
    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
      Label done;
      // The integer converted back is equal to the original. We
      // only have to test if we got -0 as an input.
      __ testl(result_reg, result_reg);
      __ j(not_zero, &done, Label::kNear);
      __ movmskpd(result_reg, input_reg);
      // Bit 0 contains the sign of the double in input_reg.
      // If input was positive, we are ok and return 0, otherwise
      // deoptimize.
      __ andl(result_reg, Immediate(1));
      DeoptimizeIf(not_zero, instr->environment());
      __ bind(&done);
    }
  }
}


void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
  LOperand* input = instr->InputAt(0);
  Condition cc = masm()->CheckSmi(ToRegister(input));
  DeoptimizeIf(NegateCondition(cc), instr->environment());
}


void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
  LOperand* input = instr->InputAt(0);
  Condition cc = masm()->CheckSmi(ToRegister(input));
  DeoptimizeIf(cc, instr->environment());
}


void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
  Register input = ToRegister(instr->InputAt(0));

  __ movq(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));

  if (instr->hydrogen()->is_interval_check()) {
    InstanceType first;
    InstanceType last;
    instr->hydrogen()->GetCheckInterval(&first, &last);

    __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
            Immediate(static_cast<int8_t>(first)));

    // If there is only one type in the interval check for equality.
    if (first == last) {
      DeoptimizeIf(not_equal, instr->environment());
    } else {
      DeoptimizeIf(below, instr->environment());
      // Omit check for the last type.
      if (last != LAST_TYPE) {
        __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
                Immediate(static_cast<int8_t>(last)));
        DeoptimizeIf(above, instr->environment());
      }
    }
  } else {
    uint8_t mask;
    uint8_t tag;
    instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);

    if (IsPowerOf2(mask)) {
      ASSERT(tag == 0 || IsPowerOf2(tag));
      __ testb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
               Immediate(mask));
      DeoptimizeIf(tag == 0 ? not_zero : zero, instr->environment());
    } else {
      __ movzxbl(kScratchRegister,
                 FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
      __ andb(kScratchRegister, Immediate(mask));
      __ cmpb(kScratchRegister, Immediate(tag));
      DeoptimizeIf(not_equal, instr->environment());
    }
  }
}


void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
  Register reg = ToRegister(instr->value());
  Handle<JSFunction> target = instr->hydrogen()->target();
  if (isolate()->heap()->InNewSpace(*target)) {
    Handle<JSGlobalPropertyCell> cell =
        isolate()->factory()->NewJSGlobalPropertyCell(target);
    __ movq(kScratchRegister, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
    __ cmpq(reg, Operand(kScratchRegister, 0));
  } else {
    __ Cmp(reg, target);
  }
  DeoptimizeIf(not_equal, instr->environment());
}


void LCodeGen::DoCheckMapCommon(Register reg,
                                Handle<Map> map,
                                CompareMapMode mode,
                                LEnvironment* env) {
  Label success;
  __ CompareMap(reg, map, &success, mode);
  DeoptimizeIf(not_equal, env);
  __ bind(&success);
}


void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
  LOperand* input = instr->InputAt(0);
  ASSERT(input->IsRegister());
  Register reg = ToRegister(input);

  Label success;
  SmallMapList* map_set = instr->hydrogen()->map_set();
  for (int i = 0; i < map_set->length() - 1; i++) {
    Handle<Map> map = map_set->at(i);
    __ CompareMap(reg, map, &success, REQUIRE_EXACT_MAP);
    __ j(equal, &success);
  }
  Handle<Map> map = map_set->last();
  DoCheckMapCommon(reg, map, REQUIRE_EXACT_MAP, instr->environment());
  __ bind(&success);
}


void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
  XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
  Register result_reg = ToRegister(instr->result());
  Register temp_reg = ToRegister(instr->TempAt(0));
  __ ClampDoubleToUint8(value_reg, xmm0, result_reg, temp_reg);
}


void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
  ASSERT(instr->unclamped()->Equals(instr->result()));
  Register value_reg = ToRegister(instr->result());
  __ ClampUint8(value_reg);
}


void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
  ASSERT(instr->unclamped()->Equals(instr->result()));
  Register input_reg = ToRegister(instr->unclamped());
  Register temp_reg = ToRegister(instr->TempAt(0));
  XMMRegister temp_xmm_reg = ToDoubleRegister(instr->TempAt(1));
  Label is_smi, done, heap_number;

  __ JumpIfSmi(input_reg, &is_smi);

  // Check for heap number
  __ Cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
         factory()->heap_number_map());
  __ j(equal, &heap_number, Label::kNear);

  // Check for undefined. Undefined is converted to zero for clamping
  // conversions.
  __ Cmp(input_reg, factory()->undefined_value());
  DeoptimizeIf(not_equal, instr->environment());
  __ movq(input_reg, Immediate(0));
  __ jmp(&done, Label::kNear);

  // Heap number
  __ bind(&heap_number);
  __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
  __ ClampDoubleToUint8(xmm0, temp_xmm_reg, input_reg, temp_reg);
  __ jmp(&done, Label::kNear);

  // smi
  __ bind(&is_smi);
  __ SmiToInteger32(input_reg, input_reg);
  __ ClampUint8(input_reg);

  __ bind(&done);
}


void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
  Register reg = ToRegister(instr->TempAt(0));

  Handle<JSObject> holder = instr->holder();
  Handle<JSObject> current_prototype = instr->prototype();

  // Load prototype object.
  __ LoadHeapObject(reg, current_prototype);

  // Check prototype maps up to the holder.
  while (!current_prototype.is_identical_to(holder)) {
    DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()),
                     ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
    current_prototype =
        Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
    // Load next prototype object.
    __ LoadHeapObject(reg, current_prototype);
  }

  // Check the holder map.
    DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()),
                     ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
}


void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
  class DeferredAllocateObject: public LDeferredCode {
   public:
    DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr)
        : LDeferredCode(codegen), instr_(instr) { }
    virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); }
    virtual LInstruction* instr() { return instr_; }
   private:
    LAllocateObject* instr_;
  };

  DeferredAllocateObject* deferred =
      new(zone()) DeferredAllocateObject(this, instr);

  Register result = ToRegister(instr->result());
  Register scratch = ToRegister(instr->TempAt(0));
  Handle<JSFunction> constructor = instr->hydrogen()->constructor();
  Handle<Map> initial_map(constructor->initial_map());
  int instance_size = initial_map->instance_size();
  ASSERT(initial_map->pre_allocated_property_fields() +
         initial_map->unused_property_fields() -
         initial_map->inobject_properties() == 0);

  // Allocate memory for the object.  The initial map might change when
  // the constructor's prototype changes, but instance size and property
  // counts remain unchanged (if slack tracking finished).
  ASSERT(!constructor->shared()->IsInobjectSlackTrackingInProgress());
  __ AllocateInNewSpace(instance_size,
                        result,
                        no_reg,
                        scratch,
                        deferred->entry(),
                        TAG_OBJECT);

  __ bind(deferred->exit());
  if (FLAG_debug_code) {
    Label is_in_new_space;
    __ JumpIfInNewSpace(result, scratch, &is_in_new_space);
    __ Abort("Allocated object is not in new-space");
    __ bind(&is_in_new_space);
  }

  // Load the initial map.
  Register map = scratch;
  __ LoadHeapObject(scratch, constructor);
  __ movq(map, FieldOperand(scratch, JSFunction::kPrototypeOrInitialMapOffset));

  if (FLAG_debug_code) {
    __ AbortIfSmi(map);
    __ cmpb(FieldOperand(map, Map::kInstanceSizeOffset),
            Immediate(instance_size >> kPointerSizeLog2));
    __ Assert(equal, "Unexpected instance size");
    __ cmpb(FieldOperand(map, Map::kPreAllocatedPropertyFieldsOffset),
            Immediate(initial_map->pre_allocated_property_fields()));
    __ Assert(equal, "Unexpected pre-allocated property fields count");
    __ cmpb(FieldOperand(map, Map::kUnusedPropertyFieldsOffset),
            Immediate(initial_map->unused_property_fields()));
    __ Assert(equal, "Unexpected unused property fields count");
    __ cmpb(FieldOperand(map, Map::kInObjectPropertiesOffset),
            Immediate(initial_map->inobject_properties()));
    __ Assert(equal, "Unexpected in-object property fields count");
  }

  // Initialize map and fields of the newly allocated object.
  ASSERT(initial_map->instance_type() == JS_OBJECT_TYPE);
  __ movq(FieldOperand(result, JSObject::kMapOffset), map);
  __ LoadRoot(scratch, Heap::kEmptyFixedArrayRootIndex);
  __ movq(FieldOperand(result, JSObject::kElementsOffset), scratch);
  __ movq(FieldOperand(result, JSObject::kPropertiesOffset), scratch);
  if (initial_map->inobject_properties() != 0) {
    __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
    for (int i = 0; i < initial_map->inobject_properties(); i++) {
      int property_offset = JSObject::kHeaderSize + i * kPointerSize;
      __ movq(FieldOperand(result, property_offset), scratch);
    }
  }
}


void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
  Register result = ToRegister(instr->result());
  Handle<JSFunction> constructor = instr->hydrogen()->constructor();
  Handle<Map> initial_map(constructor->initial_map());
  int instance_size = initial_map->instance_size();

  // TODO(3095996): Get rid of this. For now, we need to make the
  // result register contain a valid pointer because it is already
  // contained in the register pointer map.
  __ Set(result, 0);

  PushSafepointRegistersScope scope(this);
  __ Push(Smi::FromInt(instance_size));
  CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
  __ StoreToSafepointRegisterSlot(result, rax);
}


void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
  Handle<FixedArray> literals(instr->environment()->closure()->literals());
  ElementsKind boilerplate_elements_kind =
      instr->hydrogen()->boilerplate_elements_kind();

  // Deopt if the array literal boilerplate ElementsKind is of a type different
  // than the expected one. The check isn't necessary if the boilerplate has
  // already been converted to TERMINAL_FAST_ELEMENTS_KIND.
  if (CanTransitionToMoreGeneralFastElementsKind(
          boilerplate_elements_kind, true)) {
    __ LoadHeapObject(rax, instr->hydrogen()->boilerplate_object());
    __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
    // Load the map's "bit field 2".
    __ movb(rbx, FieldOperand(rbx, Map::kBitField2Offset));
    // Retrieve elements_kind from bit field 2.
    __ and_(rbx, Immediate(Map::kElementsKindMask));
    __ cmpb(rbx, Immediate(boilerplate_elements_kind <<
                           Map::kElementsKindShift));
    DeoptimizeIf(not_equal, instr->environment());
  }

  // Set up the parameters to the stub/runtime call.
  __ PushHeapObject(literals);
  __ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
  // Boilerplate already exists, constant elements are never accessed.
  // Pass an empty fixed array.
  __ Push(isolate()->factory()->empty_fixed_array());

  // Pick the right runtime function or stub to call.
  int length = instr->hydrogen()->length();
  if (instr->hydrogen()->IsCopyOnWrite()) {
    ASSERT(instr->hydrogen()->depth() == 1);
    FastCloneShallowArrayStub::Mode mode =
        FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS;
    FastCloneShallowArrayStub stub(mode, length);
    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
  } else if (instr->hydrogen()->depth() > 1) {
    CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
  } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
    CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
  } else {
    FastCloneShallowArrayStub::Mode mode =
        boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS
            ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
            : FastCloneShallowArrayStub::CLONE_ELEMENTS;
    FastCloneShallowArrayStub stub(mode, length);
    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
  }
}


void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
                            Register result,
                            Register source,
                            int* offset) {
  ASSERT(!source.is(rcx));
  ASSERT(!result.is(rcx));

  // Only elements backing stores for non-COW arrays need to be copied.
  Handle<FixedArrayBase> elements(object->elements());
  bool has_elements = elements->length() > 0 &&
      elements->map() != isolate()->heap()->fixed_cow_array_map();

  // Increase the offset so that subsequent objects end up right after
  // this object and its backing store.
  int object_offset = *offset;
  int object_size = object->map()->instance_size();
  int elements_offset = *offset + object_size;
  int elements_size = has_elements ? elements->Size() : 0;
  *offset += object_size + elements_size;

  // Copy object header.
  ASSERT(object->properties()->length() == 0);
  int inobject_properties = object->map()->inobject_properties();
  int header_size = object_size - inobject_properties * kPointerSize;
  for (int i = 0; i < header_size; i += kPointerSize) {
    if (has_elements && i == JSObject::kElementsOffset) {
      __ lea(rcx, Operand(result, elements_offset));
    } else {
      __ movq(rcx, FieldOperand(source, i));
    }
    __ movq(FieldOperand(result, object_offset + i), rcx);
  }

  // Copy in-object properties.
  for (int i = 0; i < inobject_properties; i++) {
    int total_offset = object_offset + object->GetInObjectPropertyOffset(i);
    Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i));
    if (value->IsJSObject()) {
      Handle<JSObject> value_object = Handle<JSObject>::cast(value);
      __ lea(rcx, Operand(result, *offset));
      __ movq(FieldOperand(result, total_offset), rcx);
      __ LoadHeapObject(source, value_object);
      EmitDeepCopy(value_object, result, source, offset);
    } else if (value->IsHeapObject()) {
      __ LoadHeapObject(rcx, Handle<HeapObject>::cast(value));
      __ movq(FieldOperand(result, total_offset), rcx);
    } else {
      __ movq(rcx, value, RelocInfo::NONE);
      __ movq(FieldOperand(result, total_offset), rcx);
    }
  }

  if (has_elements) {
    // Copy elements backing store header.
    __ LoadHeapObject(source, elements);
    for (int i = 0; i < FixedArray::kHeaderSize; i += kPointerSize) {
      __ movq(rcx, FieldOperand(source, i));
      __ movq(FieldOperand(result, elements_offset + i), rcx);
    }

    // Copy elements backing store content.
    int elements_length = elements->length();
    if (elements->IsFixedDoubleArray()) {
      Handle<FixedDoubleArray> double_array =
          Handle<FixedDoubleArray>::cast(elements);
      for (int i = 0; i < elements_length; i++) {
        int64_t value = double_array->get_representation(i);
        int total_offset =
            elements_offset + FixedDoubleArray::OffsetOfElementAt(i);
        __ movq(rcx, value, RelocInfo::NONE);
        __ movq(FieldOperand(result, total_offset), rcx);
      }
    } else if (elements->IsFixedArray()) {
      Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
      for (int i = 0; i < elements_length; i++) {
        int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
        Handle<Object> value(fast_elements->get(i));
        if (value->IsJSObject()) {
          Handle<JSObject> value_object = Handle<JSObject>::cast(value);
          __ lea(rcx, Operand(result, *offset));
          __ movq(FieldOperand(result, total_offset), rcx);
          __ LoadHeapObject(source, value_object);
          EmitDeepCopy(value_object, result, source, offset);
        } else if (value->IsHeapObject()) {
          __ LoadHeapObject(rcx, Handle<HeapObject>::cast(value));
          __ movq(FieldOperand(result, total_offset), rcx);
        } else {
          __ movq(rcx, value, RelocInfo::NONE);
          __ movq(FieldOperand(result, total_offset), rcx);
        }
      }
    } else {
      UNREACHABLE();
    }
  }
}


void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
  int size = instr->hydrogen()->total_size();
  ElementsKind boilerplate_elements_kind =
      instr->hydrogen()->boilerplate()->GetElementsKind();

  // Deopt if the array literal boilerplate ElementsKind is of a type different
  // than the expected one. The check isn't necessary if the boilerplate has
  // already been converted to TERMINAL_FAST_ELEMENTS_KIND.
  if (CanTransitionToMoreGeneralFastElementsKind(
          boilerplate_elements_kind, true)) {
    __ LoadHeapObject(rbx, instr->hydrogen()->boilerplate());
    __ movq(rcx, FieldOperand(rbx, HeapObject::kMapOffset));
    // Load the map's "bit field 2".
    __ movb(rcx, FieldOperand(rcx, Map::kBitField2Offset));
    // Retrieve elements_kind from bit field 2.
    __ and_(rcx, Immediate(Map::kElementsKindMask));
    __ cmpb(rcx, Immediate(boilerplate_elements_kind <<
                           Map::kElementsKindShift));
    DeoptimizeIf(not_equal, instr->environment());
  }

  // Allocate all objects that are part of the literal in one big
  // allocation. This avoids multiple limit checks.
  Label allocated, runtime_allocate;
  __ AllocateInNewSpace(size, rax, rcx, rdx, &runtime_allocate, TAG_OBJECT);
  __ jmp(&allocated);

  __ bind(&runtime_allocate);
  __ Push(Smi::FromInt(size));
  CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);

  __ bind(&allocated);
  int offset = 0;
  __ LoadHeapObject(rbx, instr->hydrogen()->boilerplate());
  EmitDeepCopy(instr->hydrogen()->boilerplate(), rax, rbx, &offset);
  ASSERT_EQ(size, offset);
}


void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
  Handle<FixedArray> literals(instr->environment()->closure()->literals());
  Handle<FixedArray> constant_properties =
      instr->hydrogen()->constant_properties();

  // Set up the parameters to the stub/runtime call.
  __ PushHeapObject(literals);
  __ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
  __ Push(constant_properties);
  int flags = instr->hydrogen()->fast_elements()
      ? ObjectLiteral::kFastElements
      : ObjectLiteral::kNoFlags;
  flags |= instr->hydrogen()->has_function()
      ? ObjectLiteral::kHasFunction
      : ObjectLiteral::kNoFlags;
  __ Push(Smi::FromInt(flags));

  // Pick the right runtime function or stub to call.
  int properties_count = constant_properties->length() / 2;
  if (instr->hydrogen()->depth() > 1) {
    CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
  } else if (flags != ObjectLiteral::kFastElements ||
      properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
    CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
  } else {
    FastCloneShallowObjectStub stub(properties_count);
    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
  }
}


void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
  ASSERT(ToRegister(instr->InputAt(0)).is(rax));
  __ push(rax);
  CallRuntime(Runtime::kToFastProperties, 1, instr);
}


void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
  Label materialized;
  // Registers will be used as follows:
  // rcx = literals array.
  // rbx = regexp literal.
  // rax = regexp literal clone.
  int literal_offset =
      FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
  __ LoadHeapObject(rcx, instr->hydrogen()->literals());
  __ movq(rbx, FieldOperand(rcx, literal_offset));
  __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
  __ j(not_equal, &materialized, Label::kNear);

  // Create regexp literal using runtime function
  // Result will be in rax.
  __ push(rcx);
  __ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
  __ Push(instr->hydrogen()->pattern());
  __ Push(instr->hydrogen()->flags());
  CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
  __ movq(rbx, rax);

  __ bind(&materialized);
  int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
  Label allocated, runtime_allocate;
  __ AllocateInNewSpace(size, rax, rcx, rdx, &runtime_allocate, TAG_OBJECT);
  __ jmp(&allocated);

  __ bind(&runtime_allocate);
  __ push(rbx);
  __ Push(Smi::FromInt(size));
  CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
  __ pop(rbx);

  __ bind(&allocated);
  // Copy the content into the newly allocated memory.
  // (Unroll copy loop once for better throughput).
  for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
    __ movq(rdx, FieldOperand(rbx, i));
    __ movq(rcx, FieldOperand(rbx, i + kPointerSize));
    __ movq(FieldOperand(rax, i), rdx);
    __ movq(FieldOperand(rax, i + kPointerSize), rcx);
  }
  if ((size % (2 * kPointerSize)) != 0) {
    __ movq(rdx, FieldOperand(rbx, size - kPointerSize));
    __ movq(FieldOperand(rax, size - kPointerSize), rdx);
  }
}


void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
  // Use the fast case closure allocation code that allocates in new
  // space for nested functions that don't need literals cloning.
  Handle<SharedFunctionInfo> shared_info = instr->shared_info();
  bool pretenure = instr->hydrogen()->pretenure();
  if (!pretenure && shared_info->num_literals() == 0) {
    FastNewClosureStub stub(shared_info->language_mode());
    __ Push(shared_info);
    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
  } else {
    __ push(rsi);
    __ Push(shared_info);
    __ PushRoot(pretenure ?
                Heap::kTrueValueRootIndex :
                Heap::kFalseValueRootIndex);
    CallRuntime(Runtime::kNewClosure, 3, instr);
  }
}


void LCodeGen::DoTypeof(LTypeof* instr) {
  LOperand* input = instr->InputAt(0);
  EmitPushTaggedOperand(input);
  CallRuntime(Runtime::kTypeof, 1, instr);
}


void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
  ASSERT(!operand->IsDoubleRegister());
  if (operand->IsConstantOperand()) {
    Handle<Object> object = ToHandle(LConstantOperand::cast(operand));
    if (object->IsSmi()) {
      __ Push(Handle<Smi>::cast(object));
    } else {
      __ PushHeapObject(Handle<HeapObject>::cast(object));
    }
  } else if (operand->IsRegister()) {
    __ push(ToRegister(operand));
  } else {
    __ push(ToOperand(operand));
  }
}


void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
  Register input = ToRegister(instr->InputAt(0));
  int true_block = chunk_->LookupDestination(instr->true_block_id());
  int false_block = chunk_->LookupDestination(instr->false_block_id());
  Label* true_label = chunk_->GetAssemblyLabel(true_block);
  Label* false_label = chunk_->GetAssemblyLabel(false_block);

  Condition final_branch_condition =
      EmitTypeofIs(true_label, false_label, input, instr->type_literal());
  if (final_branch_condition != no_condition) {
    EmitBranch(true_block, false_block, final_branch_condition);
  }
}


Condition LCodeGen::EmitTypeofIs(Label* true_label,
                                 Label* false_label,
                                 Register input,
                                 Handle<String> type_name) {
  Condition final_branch_condition = no_condition;
  if (type_name->Equals(heap()->number_symbol())) {
    __ JumpIfSmi(input, true_label);
    __ CompareRoot(FieldOperand(input, HeapObject::kMapOffset),
                   Heap::kHeapNumberMapRootIndex);

    final_branch_condition = equal;

  } else if (type_name->Equals(heap()->string_symbol())) {
    __ JumpIfSmi(input, false_label);
    __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
    __ j(above_equal, false_label);
    __ testb(FieldOperand(input, Map::kBitFieldOffset),
             Immediate(1 << Map::kIsUndetectable));
    final_branch_condition = zero;

  } else if (type_name->Equals(heap()->boolean_symbol())) {
    __ CompareRoot(input, Heap::kTrueValueRootIndex);
    __ j(equal, true_label);
    __ CompareRoot(input, Heap::kFalseValueRootIndex);
    final_branch_condition = equal;

  } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_symbol())) {
    __ CompareRoot(input, Heap::kNullValueRootIndex);
    final_branch_condition = equal;

  } else if (type_name->Equals(heap()->undefined_symbol())) {
    __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
    __ j(equal, true_label);
    __ JumpIfSmi(input, false_label);
    // Check for undetectable objects => true.
    __ movq(input, FieldOperand(input, HeapObject::kMapOffset));
    __ testb(FieldOperand(input, Map::kBitFieldOffset),
             Immediate(1 << Map::kIsUndetectable));
    final_branch_condition = not_zero;

  } else if (type_name->Equals(heap()->function_symbol())) {
    STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
    __ JumpIfSmi(input, false_label);
    __ CmpObjectType(input, JS_FUNCTION_TYPE, input);
    __ j(equal, true_label);
    __ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE);
    final_branch_condition = equal;

  } else if (type_name->Equals(heap()->object_symbol())) {
    __ JumpIfSmi(input, false_label);
    if (!FLAG_harmony_typeof) {
      __ CompareRoot(input, Heap::kNullValueRootIndex);
      __ j(equal, true_label);
    }
    __ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input);
    __ j(below, false_label);
    __ CmpInstanceType(input, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
    __ j(above, false_label);
    // Check for undetectable objects => false.
    __ testb(FieldOperand(input, Map::kBitFieldOffset),
             Immediate(1 << Map::kIsUndetectable));
    final_branch_condition = zero;

  } else {
    __ jmp(false_label);
  }

  return final_branch_condition;
}


void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
  Register temp = ToRegister(instr->TempAt(0));
  int true_block = chunk_->LookupDestination(instr->true_block_id());
  int false_block = chunk_->LookupDestination(instr->false_block_id());

  EmitIsConstructCall(temp);
  EmitBranch(true_block, false_block, equal);
}


void LCodeGen::EmitIsConstructCall(Register temp) {
  // Get the frame pointer for the calling frame.
  __ movq(temp, Operand(rbp, StandardFrameConstants::kCallerFPOffset));

  // Skip the arguments adaptor frame if it exists.
  Label check_frame_marker;
  __ Cmp(Operand(temp, StandardFrameConstants::kContextOffset),
         Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
  __ j(not_equal, &check_frame_marker, Label::kNear);
  __ movq(temp, Operand(rax, StandardFrameConstants::kCallerFPOffset));

  // Check the marker in the calling frame.
  __ bind(&check_frame_marker);
  __ Cmp(Operand(temp, StandardFrameConstants::kMarkerOffset),
         Smi::FromInt(StackFrame::CONSTRUCT));
}


void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
  // Ensure that we have enough space after the previous lazy-bailout
  // instruction for patching the code here.
  int current_pc = masm()->pc_offset();
  if (current_pc < last_lazy_deopt_pc_ + space_needed) {
    int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
    __ Nop(padding_size);
  }
}


void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
  EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
  last_lazy_deopt_pc_ = masm()->pc_offset();
  ASSERT(instr->HasEnvironment());
  LEnvironment* env = instr->environment();
  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
}


void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
  DeoptimizeIf(no_condition, instr->environment());
}


void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
  LOperand* obj = instr->object();
  LOperand* key = instr->key();
  EmitPushTaggedOperand(obj);
  EmitPushTaggedOperand(key);
  ASSERT(instr->HasPointerMap());
  LPointerMap* pointers = instr->pointer_map();
  RecordPosition(pointers->position());
  // Create safepoint generator that will also ensure enough space in the
  // reloc info for patching in deoptimization (since this is invoking a
  // builtin)
  SafepointGenerator safepoint_generator(
      this, pointers, Safepoint::kLazyDeopt);
  __ Push(Smi::FromInt(strict_mode_flag()));
  __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, safepoint_generator);
}


void LCodeGen::DoIn(LIn* instr) {
  LOperand* obj = instr->object();
  LOperand* key = instr->key();
  EmitPushTaggedOperand(key);
  EmitPushTaggedOperand(obj);
  ASSERT(instr->HasPointerMap());
  LPointerMap* pointers = instr->pointer_map();
  RecordPosition(pointers->position());
  SafepointGenerator safepoint_generator(
      this, pointers, Safepoint::kLazyDeopt);
  __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION, safepoint_generator);
}


void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
  PushSafepointRegistersScope scope(this);
  __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
  __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
  RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0);
  ASSERT(instr->HasEnvironment());
  LEnvironment* env = instr->environment();
  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
}


void LCodeGen::DoStackCheck(LStackCheck* instr) {
  class DeferredStackCheck: public LDeferredCode {
   public:
    DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
        : LDeferredCode(codegen), instr_(instr) { }
    virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
    virtual LInstruction* instr() { return instr_; }
   private:
    LStackCheck* instr_;
  };

  ASSERT(instr->HasEnvironment());
  LEnvironment* env = instr->environment();
  // There is no LLazyBailout instruction for stack-checks. We have to
  // prepare for lazy deoptimization explicitly here.
  if (instr->hydrogen()->is_function_entry()) {
    // Perform stack overflow check.
    Label done;
    __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
    __ j(above_equal, &done, Label::kNear);
    StackCheckStub stub;
    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
    EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
    last_lazy_deopt_pc_ = masm()->pc_offset();
    __ bind(&done);
    RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
    safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
  } else {
    ASSERT(instr->hydrogen()->is_backwards_branch());
    // Perform stack overflow check if this goto needs it before jumping.
    DeferredStackCheck* deferred_stack_check =
        new(zone()) DeferredStackCheck(this, instr);
    __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
    __ j(below, deferred_stack_check->entry());
    EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
    last_lazy_deopt_pc_ = masm()->pc_offset();
    __ bind(instr->done_label());
    deferred_stack_check->SetExit(instr->done_label());
    RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
    // Don't record a deoptimization index for the safepoint here.
    // This will be done explicitly when emitting call and the safepoint in
    // the deferred code.
  }
}


void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
  // This is a pseudo-instruction that ensures that the environment here is
  // properly registered for deoptimization and records the assembler's PC
  // offset.
  LEnvironment* environment = instr->environment();
  environment->SetSpilledRegisters(instr->SpilledRegisterArray(),
                                   instr->SpilledDoubleRegisterArray());

  // If the environment were already registered, we would have no way of
  // backpatching it with the spill slot operands.
  ASSERT(!environment->HasBeenRegistered());
  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
  ASSERT(osr_pc_offset_ == -1);
  osr_pc_offset_ = masm()->pc_offset();
}


void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
  __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
  DeoptimizeIf(equal, instr->environment());

  Register null_value = rdi;
  __ LoadRoot(null_value, Heap::kNullValueRootIndex);
  __ cmpq(rax, null_value);
  DeoptimizeIf(equal, instr->environment());

  Condition cc = masm()->CheckSmi(rax);
  DeoptimizeIf(cc, instr->environment());

  STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
  __ CmpObjectType(rax, LAST_JS_PROXY_TYPE, rcx);
  DeoptimizeIf(below_equal, instr->environment());

  Label use_cache, call_runtime;
  __ CheckEnumCache(null_value, &call_runtime);

  __ movq(rax, FieldOperand(rax, HeapObject::kMapOffset));
  __ jmp(&use_cache, Label::kNear);

  // Get the set of properties to enumerate.
  __ bind(&call_runtime);
  __ push(rax);
  CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);

  __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
                 Heap::kMetaMapRootIndex);
  DeoptimizeIf(not_equal, instr->environment());
  __ bind(&use_cache);
}


void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
  Register map = ToRegister(instr->map());
  Register result = ToRegister(instr->result());
  __ LoadInstanceDescriptors(map, result);
  __ movq(result,
          FieldOperand(result, DescriptorArray::kLastAddedOffset));
  __ movq(result,
          FieldOperand(result, FixedArray::SizeFor(instr->idx())));
  Condition cc = masm()->CheckSmi(result);
  DeoptimizeIf(cc, instr->environment());
}


void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
  Register object = ToRegister(instr->value());
  __ cmpq(ToRegister(instr->map()),
          FieldOperand(object, HeapObject::kMapOffset));
  DeoptimizeIf(not_equal, instr->environment());
}


void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
  Register object = ToRegister(instr->object());
  Register index = ToRegister(instr->index());

  Label out_of_object, done;
  __ SmiToInteger32(index, index);
  __ cmpl(index, Immediate(0));
  __ j(less, &out_of_object);
  __ movq(object, FieldOperand(object,
                               index,
                               times_pointer_size,
                               JSObject::kHeaderSize));
  __ jmp(&done, Label::kNear);

  __ bind(&out_of_object);
  __ movq(object, FieldOperand(object, JSObject::kPropertiesOffset));
  __ negl(index);
  // Index is now equal to out of object property index plus 1.
  __ movq(object, FieldOperand(object,
                               index,
                               times_pointer_size,
                               FixedArray::kHeaderSize - kPointerSize));
  __ bind(&done);
}


#undef __

} }  // namespace v8::internal

#endif  // V8_TARGET_ARCH_X64

/* [<][>][^][v][top][bottom][index][help] */