RQ 367 nanojit/NativeX64.cpp void Assembler::PUSHR(R r) { emitr(X64_pushr,r); asm_output("push %s", RQ(r)); } RQ 368 nanojit/NativeX64.cpp void Assembler::POPR( R r) { emitr(X64_popr, r); asm_output("pop %s", RQ(r)); } RQ 376 nanojit/NativeX64.cpp void Assembler::SHRQ(R r) { emitr(X64_shrq, r); asm_output("shrq %s, ecx", RQ(r)); } RQ 377 nanojit/NativeX64.cpp void Assembler::SARQ(R r) { emitr(X64_sarq, r); asm_output("sarq %s, ecx", RQ(r)); } RQ 378 nanojit/NativeX64.cpp void Assembler::SHLQ(R r) { emitr(X64_shlq, r); asm_output("shlq %s, ecx", RQ(r)); } RQ 383 nanojit/NativeX64.cpp void Assembler::SHRQI(R r, I i) { emit8(rexrb(X64_shrqi | U64(r&7)<<48, (R)0, r), i); asm_output("shrq %s, %d", RQ(r), i); } RQ 384 nanojit/NativeX64.cpp void Assembler::SARQI(R r, I i) { emit8(rexrb(X64_sarqi | U64(r&7)<<48, (R)0, r), i); asm_output("sarq %s, %d", RQ(r), i); } RQ 385 nanojit/NativeX64.cpp void Assembler::SHLQI(R r, I i) { emit8(rexrb(X64_shlqi | U64(r&7)<<48, (R)0, r), i); asm_output("shlq %s, %d", RQ(r), i); } RQ 407 nanojit/NativeX64.cpp void Assembler::ADDQRR( R l, R r) { emitrr(X64_addqrr, l,r); asm_output("addq %s, %s", RQ(l),RQ(r)); } RQ 408 nanojit/NativeX64.cpp void Assembler::SUBQRR( R l, R r) { emitrr(X64_subqrr, l,r); asm_output("subq %s, %s", RQ(l),RQ(r)); } RQ 409 nanojit/NativeX64.cpp void Assembler::ANDQRR( R l, R r) { emitrr(X64_andqrr, l,r); asm_output("andq %s, %s", RQ(l),RQ(r)); } RQ 410 nanojit/NativeX64.cpp void Assembler::ORQRR( R l, R r) { emitrr(X64_orqrr, l,r); asm_output("orq %s, %s", RQ(l),RQ(r)); } RQ 411 nanojit/NativeX64.cpp void Assembler::XORQRR( R l, R r) { emitrr(X64_xorqrr, l,r); asm_output("xorq %s, %s", RQ(l),RQ(r)); } RQ 412 nanojit/NativeX64.cpp void Assembler::CMPQR( R l, R r) { emitrr(X64_cmpqr, l,r); asm_output("cmpq %s, %s", RQ(l),RQ(r)); } RQ 413 nanojit/NativeX64.cpp void Assembler::MOVQR( R l, R r) { emitrr(X64_movqr, l,r); asm_output("movq %s, %s", RQ(l),RQ(r)); } RQ 414 nanojit/NativeX64.cpp void Assembler::MOVAPSR(R l, R r) { emitrr(X64_movapsr,l,r); asm_output("movaps %s, %s",RQ(l),RQ(r)); } RQ 427 nanojit/NativeX64.cpp void Assembler::CMOVQNO( R l, R r) { emitrr(X64_cmovqno, l,r); asm_output("cmovqno %s, %s", RQ(l),RQ(r)); } RQ 428 nanojit/NativeX64.cpp void Assembler::CMOVQNE( R l, R r) { emitrr(X64_cmovqne, l,r); asm_output("cmovqne %s, %s", RQ(l),RQ(r)); } RQ 429 nanojit/NativeX64.cpp void Assembler::CMOVQNL( R l, R r) { emitrr(X64_cmovqnl, l,r); asm_output("cmovqnl %s, %s", RQ(l),RQ(r)); } RQ 430 nanojit/NativeX64.cpp void Assembler::CMOVQNLE(R l, R r) { emitrr(X64_cmovqnle,l,r); asm_output("cmovqnle %s, %s", RQ(l),RQ(r)); } RQ 431 nanojit/NativeX64.cpp void Assembler::CMOVQNG( R l, R r) { emitrr(X64_cmovqng, l,r); asm_output("cmovqng %s, %s", RQ(l),RQ(r)); } RQ 432 nanojit/NativeX64.cpp void Assembler::CMOVQNGE(R l, R r) { emitrr(X64_cmovqnge,l,r); asm_output("cmovqnge %s, %s", RQ(l),RQ(r)); } RQ 433 nanojit/NativeX64.cpp void Assembler::CMOVQNB( R l, R r) { emitrr(X64_cmovqnb, l,r); asm_output("cmovqnb %s, %s", RQ(l),RQ(r)); } RQ 434 nanojit/NativeX64.cpp void Assembler::CMOVQNBE(R l, R r) { emitrr(X64_cmovqnbe,l,r); asm_output("cmovqnbe %s, %s", RQ(l),RQ(r)); } RQ 435 nanojit/NativeX64.cpp void Assembler::CMOVQNA( R l, R r) { emitrr(X64_cmovqna, l,r); asm_output("cmovqna %s, %s", RQ(l),RQ(r)); } RQ 436 nanojit/NativeX64.cpp void Assembler::CMOVQNAE(R l, R r) { emitrr(X64_cmovqnae,l,r); asm_output("cmovqnae %s, %s", RQ(l),RQ(r)); } RQ 438 nanojit/NativeX64.cpp void Assembler::MOVSXDR(R l, R r) { emitrr(X64_movsxdr,l,r); asm_output("movsxd %s, %s",RQ(l),RL(r)); } RQ 440 nanojit/NativeX64.cpp void Assembler::MOVZX8(R l, R r) { emitrr8(X64_movzx8,l,r); asm_output("movzx %s, %s",RQ(l),RB(r)); } RQ 446 nanojit/NativeX64.cpp void Assembler::XORPS( R r) { emitrr(X64_xorps, r,r); asm_output("xorps %s, %s", RQ(r),RQ(r)); } RQ 447 nanojit/NativeX64.cpp void Assembler::DIVSD( R l, R r) { emitprr(X64_divsd, l,r); asm_output("divsd %s, %s", RQ(l),RQ(r)); } RQ 448 nanojit/NativeX64.cpp void Assembler::MULSD( R l, R r) { emitprr(X64_mulsd, l,r); asm_output("mulsd %s, %s", RQ(l),RQ(r)); } RQ 449 nanojit/NativeX64.cpp void Assembler::ADDSD( R l, R r) { emitprr(X64_addsd, l,r); asm_output("addsd %s, %s", RQ(l),RQ(r)); } RQ 450 nanojit/NativeX64.cpp void Assembler::SUBSD( R l, R r) { emitprr(X64_subsd, l,r); asm_output("subsd %s, %s", RQ(l),RQ(r)); } RQ 451 nanojit/NativeX64.cpp void Assembler::CVTSQ2SD(R l, R r) { emitprr(X64_cvtsq2sd,l,r); asm_output("cvtsq2sd %s, %s",RQ(l),RQ(r)); } RQ 452 nanojit/NativeX64.cpp void Assembler::CVTSI2SD(R l, R r) { emitprr(X64_cvtsi2sd,l,r); asm_output("cvtsi2sd %s, %s",RQ(l),RL(r)); } RQ 453 nanojit/NativeX64.cpp void Assembler::CVTSS2SD(R l, R r) { emitprr(X64_cvtss2sd,l,r); asm_output("cvtss2sd %s, %s",RQ(l),RL(r)); } RQ 454 nanojit/NativeX64.cpp void Assembler::CVTSD2SS(R l, R r) { emitprr(X64_cvtsd2ss,l,r); asm_output("cvtsd2ss %s, %s",RL(l),RQ(r)); } RQ 455 nanojit/NativeX64.cpp void Assembler::UCOMISD( R l, R r) { emitprr(X64_ucomisd, l,r); asm_output("ucomisd %s, %s", RQ(l),RQ(r)); } RQ 456 nanojit/NativeX64.cpp void Assembler::MOVQRX( R l, R r) { emitprr(X64_movqrx, r,l); asm_output("movq %s, %s", RQ(l),RQ(r)); } // Nb: r and l are deliberately reversed within the emitprr() call. RQ 457 nanojit/NativeX64.cpp void Assembler::MOVQXR( R l, R r) { emitprr(X64_movqxr, l,r); asm_output("movq %s, %s", RQ(l),RQ(r)); } RQ 468 nanojit/NativeX64.cpp void Assembler::ADDQRI( R r, I32 i32) { emitr_imm(X64_addqri, r,i32); asm_output("addq %s, %d", RQ(r),i32); } RQ 469 nanojit/NativeX64.cpp void Assembler::SUBQRI( R r, I32 i32) { emitr_imm(X64_subqri, r,i32); asm_output("subq %s, %d", RQ(r),i32); } RQ 470 nanojit/NativeX64.cpp void Assembler::ANDQRI( R r, I32 i32) { emitr_imm(X64_andqri, r,i32); asm_output("andq %s, %d", RQ(r),i32); } RQ 471 nanojit/NativeX64.cpp void Assembler::ORQRI( R r, I32 i32) { emitr_imm(X64_orqri, r,i32); asm_output("orq %s, %d", RQ(r),i32); } RQ 472 nanojit/NativeX64.cpp void Assembler::XORQRI( R r, I32 i32) { emitr_imm(X64_xorqri, r,i32); asm_output("xorq %s, %d", RQ(r),i32); } RQ 473 nanojit/NativeX64.cpp void Assembler::CMPQRI( R r, I32 i32) { emitr_imm(X64_cmpqri, r,i32); asm_output("cmpq %s, %d", RQ(r),i32); } RQ 474 nanojit/NativeX64.cpp void Assembler::MOVQI32(R r, I32 i32) { emitr_imm(X64_movqi32,r,i32); asm_output("movqi32 %s, %d",RQ(r),i32); } RQ 483 nanojit/NativeX64.cpp void Assembler::ADDQR8(R r, I32 i8) { emitr_imm8(X64_addqr8,r,i8); asm_output("addq %s, %d",RQ(r),i8); } RQ 484 nanojit/NativeX64.cpp void Assembler::SUBQR8(R r, I32 i8) { emitr_imm8(X64_subqr8,r,i8); asm_output("subq %s, %d",RQ(r),i8); } RQ 485 nanojit/NativeX64.cpp void Assembler::ANDQR8(R r, I32 i8) { emitr_imm8(X64_andqr8,r,i8); asm_output("andq %s, %d",RQ(r),i8); } RQ 486 nanojit/NativeX64.cpp void Assembler::ORQR8( R r, I32 i8) { emitr_imm8(X64_orqr8, r,i8); asm_output("orq %s, %d", RQ(r),i8); } RQ 487 nanojit/NativeX64.cpp void Assembler::XORQR8(R r, I32 i8) { emitr_imm8(X64_xorqr8,r,i8); asm_output("xorq %s, %d",RQ(r),i8); } RQ 488 nanojit/NativeX64.cpp void Assembler::CMPQR8(R r, I32 i8) { emitr_imm8(X64_cmpqr8,r,i8); asm_output("cmpq %s, %d",RQ(r),i8); } RQ 492 nanojit/NativeX64.cpp void Assembler::MOVQI(R r, U64 u64) { emitr_imm64(X64_movqi,r,u64); asm_output("movq %s, %p",RQ(r),(void*)u64); } RQ 494 nanojit/NativeX64.cpp void Assembler::LEARIP(R r, I32 d) { emitrm(X64_learip,r,d,(Register)0); asm_output("lea %s, %d(rip)",RQ(r),d); } RQ 496 nanojit/NativeX64.cpp void Assembler::LEAQRM(R r1, I d, R r2) { emitrm(X64_leaqrm,r1,d,r2); asm_output("leaq %s, %d(%s)",RQ(r1),d,RQ(r2)); } RQ 497 nanojit/NativeX64.cpp void Assembler::MOVLRM(R r1, I d, R r2) { emitrm(X64_movlrm,r1,d,r2); asm_output("movl %s, %d(%s)",RL(r1),d,RQ(r2)); } RQ 498 nanojit/NativeX64.cpp void Assembler::MOVQRM(R r1, I d, R r2) { emitrm(X64_movqrm,r1,d,r2); asm_output("movq %s, %d(%s)",RQ(r1),d,RQ(r2)); } RQ 499 nanojit/NativeX64.cpp void Assembler::MOVBMR(R r1, I d, R r2) { emitrm8(X64_movbmr,r1,d,r2); asm_output("movb %d(%s), %s",d,RQ(r1),RB(r2)); } RQ 500 nanojit/NativeX64.cpp void Assembler::MOVSMR(R r1, I d, R r2) { emitprm(X64_movsmr,r1,d,r2); asm_output("movs %d(%s), %s",d,RQ(r1),RS(r2)); } RQ 501 nanojit/NativeX64.cpp void Assembler::MOVLMR(R r1, I d, R r2) { emitrm(X64_movlmr,r1,d,r2); asm_output("movl %d(%s), %s",d,RQ(r1),RL(r2)); } RQ 502 nanojit/NativeX64.cpp void Assembler::MOVQMR(R r1, I d, R r2) { emitrm(X64_movqmr,r1,d,r2); asm_output("movq %d(%s), %s",d,RQ(r1),RQ(r2)); } RQ 504 nanojit/NativeX64.cpp void Assembler::MOVZX8M( R r1, I d, R r2) { emitrm_wide(X64_movzx8m, r1,d,r2); asm_output("movzxb %s, %d(%s)",RQ(r1),d,RQ(r2)); } RQ 505 nanojit/NativeX64.cpp void Assembler::MOVZX16M(R r1, I d, R r2) { emitrm_wide(X64_movzx16m,r1,d,r2); asm_output("movzxs %s, %d(%s)",RQ(r1),d,RQ(r2)); } RQ 507 nanojit/NativeX64.cpp void Assembler::MOVSX8M( R r1, I d, R r2) { emitrm_wide(X64_movsx8m, r1,d,r2); asm_output("movsxb %s, %d(%s)",RQ(r1),d,RQ(r2)); } RQ 508 nanojit/NativeX64.cpp void Assembler::MOVSX16M(R r1, I d, R r2) { emitrm_wide(X64_movsx16m,r1,d,r2); asm_output("movsxs %s, %d(%s)",RQ(r1),d,RQ(r2)); } RQ 510 nanojit/NativeX64.cpp void Assembler::MOVSDRM(R r1, I d, R r2) { emitprm(X64_movsdrm,r1,d,r2); asm_output("movsd %s, %d(%s)",RQ(r1),d,RQ(r2)); } RQ 511 nanojit/NativeX64.cpp void Assembler::MOVSDMR(R r1, I d, R r2) { emitprm(X64_movsdmr,r1,d,r2); asm_output("movsd %d(%s), %s",d,RQ(r1),RQ(r2)); } RQ 512 nanojit/NativeX64.cpp void Assembler::MOVSSRM(R r1, I d, R r2) { emitprm(X64_movssrm,r1,d,r2); asm_output("movss %s, %d(%s)",RQ(r1),d,RQ(r2)); } RQ 513 nanojit/NativeX64.cpp void Assembler::MOVSSMR(R r1, I d, R r2) { emitprm(X64_movssmr,r1,d,r2); asm_output("movss %d(%s), %s",d,RQ(r1),RQ(r2)); } RQ 519 nanojit/NativeX64.cpp void Assembler::JMPX(R indexreg, NIns** table) { emitrxb_imm(X64_jmpx, (R)0, indexreg, (Register)5, (int32_t)(uintptr_t)table); asm_output("jmpq [%s*8 + %p]", RQ(indexreg), (void*)table); } RQ 521 nanojit/NativeX64.cpp void Assembler::JMPXB(R indexreg, R tablereg) { emitxb(X64_jmpxb, indexreg, tablereg); asm_output("jmp [%s*8 + %s]", RQ(indexreg), RQ(tablereg)); } RQ 574 nanojit/NativeX64.cpp void Assembler::MOVQSPR(I d, R r) { emit(X64_movqspr | U64(d) << 56 | U64((r&7)<<3) << 40 | U64((r&8)>>1) << 24); asm_output("movq %d(rsp), %s", d, RQ(r)); } // insert r into mod/rm and rex bytes RQ 576 nanojit/NativeX64.cpp void Assembler::XORPSA(R r, I32 i32) { emitxm_abs(X64_xorpsa, r, i32); asm_output("xorps %s, (0x%x)",RQ(r), i32); } RQ 577 nanojit/NativeX64.cpp void Assembler::XORPSM(R r, NIns* a64) { emitxm_rel(X64_xorpsm, r, a64); asm_output("xorps %s, (%p)", RQ(r), a64); }