R 168 MMgc/GC.cpp , R(R_INITIAL_VALUE) // bytes/second; will be updated on-line R 280 MMgc/GC.cpp return P * R / W(); R 295 MMgc/GC.cpp R = double(bytesScannedTotal) / (double(adjustR_totalTime) / double(VMPI_getPerformanceFrequency())); R 296 MMgc/GC.cpp if (R < R_LOWER_LIMIT) R 297 MMgc/GC.cpp R = R_LOWER_LIMIT; R 345 MMgc/GC.cpp R / (1024*1024), R 656 MMgc/GC.h double R; R 644 core/CdeclThunk.cpp #define ASM_FUNC_BEGIN(R, N, A) static R __declspec(naked) N A { R 653 core/CdeclThunk.cpp #define ASM_FUNC_BEGIN(R, N, A) typedef R (* N##_type)A; static void* N##_container () { \ R 670 core/CdeclThunk.cpp typedef R (* N##_type)A; \ R 1228 core/WordcodeEmitter.cpp R[k++] = I[n][j]; R 1274 core/WordcodeEmitter.cpp *dest++ = R[i++]; R 1275 core/WordcodeEmitter.cpp int32_t offset = int32_t(R[i++]); R 1288 core/WordcodeEmitter.cpp *dest++ = R[i++]; R 1290 core/WordcodeEmitter.cpp *dest++ = R[i++]; R 1298 core/WordcodeEmitter.cpp *dest++ = R[i++]; R 1301 core/WordcodeEmitter.cpp *dest++ = R[i++]; R 1302 core/WordcodeEmitter.cpp *dest++ = R[i++]; R 1305 core/WordcodeEmitter.cpp *dest++ = R[i++]; R 1306 core/WordcodeEmitter.cpp *dest++ = R[i++]; R 1307 core/WordcodeEmitter.cpp *dest++ = R[i++]; R 1310 core/WordcodeEmitter.cpp *dest++ = R[i++]; R 1311 core/WordcodeEmitter.cpp *dest++ = R[i++]; R 1312 core/WordcodeEmitter.cpp *dest++ = R[i++]; R 1313 core/WordcodeEmitter.cpp *dest++ = R[i++]; R 1314 core/WordcodeEmitter.cpp *dest++ = R[i++]; R 216 core/WordcodeEmitter.h uintptr_t R[30]; // replacement data R 205 core/peephole.cpp R[0] = NEW_OPCODE(WOP_getlocal0 + I[0][1]); R 213 core/peephole.cpp R[0] = NEW_OPCODE(WOP_get2locals); R 214 core/peephole.cpp R[1] = (I[1][1] << 16) | I[0][1]; R 222 core/peephole.cpp R[0] = NEW_OPCODE(WOP_get3locals); R 223 core/peephole.cpp R[1] = (I[2][1] << 20) | (I[1][1] << 10) | I[0][1]; R 231 core/peephole.cpp R[0] = NEW_OPCODE(WOP_get4locals); R 232 core/peephole.cpp R[1] = (I[3][1] << 24) | (I[2][1] << 16) | (I[1][1] << 8) | I[0][1]; R 240 core/peephole.cpp R[0] = NEW_OPCODE(WOP_get5locals); R 241 core/peephole.cpp R[1] = (I[4][1] << 24) | (I[3][1] << 18) | (I[2][1] << 12) | (I[1][1] << 6) | I[0][1]; R 249 core/peephole.cpp R[0] = NEW_OPCODE(WOP_add_ll); R 250 core/peephole.cpp R[1] = (I[1][1] << 16) | I[0][1]; R 258 core/peephole.cpp R[0] = NEW_OPCODE(WOP_add_set_lll); R 259 core/peephole.cpp R[1] = (I[3][1] << 20) | (I[1][1] << 10) | I[0][1]; R 267 core/peephole.cpp R[0] = NEW_OPCODE(WOP_subtract_ll); R 268 core/peephole.cpp R[1] = (I[1][1] << 16) | I[0][1]; R 276 core/peephole.cpp R[0] = NEW_OPCODE(WOP_multiply_ll); R 277 core/peephole.cpp R[1] = (I[1][1] << 16) | I[0][1]; R 285 core/peephole.cpp R[0] = NEW_OPCODE(WOP_divide_ll); R 286 core/peephole.cpp R[1] = (I[1][1] << 16) | I[0][1]; R 294 core/peephole.cpp R[0] = NEW_OPCODE(WOP_modulo_ll); R 295 core/peephole.cpp R[1] = (I[1][1] << 16) | I[0][1]; R 303 core/peephole.cpp R[0] = NEW_OPCODE(WOP_bitand_ll); R 304 core/peephole.cpp R[1] = (I[1][1] << 16) | I[0][1]; R 312 core/peephole.cpp R[0] = NEW_OPCODE(WOP_bitor_ll); R 313 core/peephole.cpp R[1] = (I[1][1] << 16) | I[0][1]; R 321 core/peephole.cpp R[0] = NEW_OPCODE(WOP_bitxor_ll); R 322 core/peephole.cpp R[1] = (I[1][1] << 16) | I[0][1]; R 331 core/peephole.cpp R[0] = NEW_OPCODE(WOP_iflt_ll); R 332 core/peephole.cpp R[1] = I[2][1]; R 333 core/peephole.cpp R[2] = (I[1][1] << 16) | I[0][1]; R 342 core/peephole.cpp R[0] = NEW_OPCODE(WOP_ifnlt_ll); R 343 core/peephole.cpp R[1] = I[2][1]; R 344 core/peephole.cpp R[2] = (I[1][1] << 16) | I[0][1]; R 353 core/peephole.cpp R[0] = NEW_OPCODE(WOP_ifle_ll); R 354 core/peephole.cpp R[1] = I[2][1]; R 355 core/peephole.cpp R[2] = (I[1][1] << 16) | I[0][1]; R 364 core/peephole.cpp R[0] = NEW_OPCODE(WOP_ifnle_ll); R 365 core/peephole.cpp R[1] = I[2][1]; R 366 core/peephole.cpp R[2] = (I[1][1] << 16) | I[0][1]; R 375 core/peephole.cpp R[0] = NEW_OPCODE(WOP_ifgt_ll); R 376 core/peephole.cpp R[1] = I[2][1]; R 377 core/peephole.cpp R[2] = (I[1][1] << 16) | I[0][1]; R 386 core/peephole.cpp R[0] = NEW_OPCODE(WOP_ifngt_ll); R 387 core/peephole.cpp R[1] = I[2][1]; R 388 core/peephole.cpp R[2] = (I[1][1] << 16) | I[0][1]; R 397 core/peephole.cpp R[0] = NEW_OPCODE(WOP_ifge_ll); R 398 core/peephole.cpp R[1] = I[2][1]; R 399 core/peephole.cpp R[2] = (I[1][1] << 16) | I[0][1]; R 408 core/peephole.cpp R[0] = NEW_OPCODE(WOP_ifnge_ll); R 409 core/peephole.cpp R[1] = I[2][1]; R 410 core/peephole.cpp R[2] = (I[1][1] << 16) | I[0][1]; R 419 core/peephole.cpp R[0] = NEW_OPCODE(WOP_ifeq_ll); R 420 core/peephole.cpp R[1] = I[2][1]; R 421 core/peephole.cpp R[2] = (I[1][1] << 16) | I[0][1]; R 430 core/peephole.cpp R[0] = NEW_OPCODE(WOP_ifne_ll); R 431 core/peephole.cpp R[1] = I[2][1]; R 432 core/peephole.cpp R[2] = (I[1][1] << 16) | I[0][1]; R 441 core/peephole.cpp R[0] = NEW_OPCODE(WOP_ifstricteq_ll); R 442 core/peephole.cpp R[1] = I[2][1]; R 443 core/peephole.cpp R[2] = (I[1][1] << 16) | I[0][1]; R 452 core/peephole.cpp R[0] = NEW_OPCODE(WOP_ifstrictne_ll); R 453 core/peephole.cpp R[1] = I[2][1]; R 454 core/peephole.cpp R[2] = (I[1][1] << 16) | I[0][1]; R 462 core/peephole.cpp R[0] = NEW_OPCODE(WOP_add_lb); R 463 core/peephole.cpp R[1] = I[0][1]; R 464 core/peephole.cpp R[2] = I[1][1]; R 472 core/peephole.cpp R[0] = NEW_OPCODE(WOP_subtract_lb); R 473 core/peephole.cpp R[1] = I[0][1]; R 474 core/peephole.cpp R[2] = I[1][1]; R 482 core/peephole.cpp R[0] = NEW_OPCODE(WOP_multiply_lb); R 483 core/peephole.cpp R[1] = I[0][1]; R 484 core/peephole.cpp R[2] = I[1][1]; R 492 core/peephole.cpp R[0] = NEW_OPCODE(WOP_divide_lb); R 493 core/peephole.cpp R[1] = I[0][1]; R 494 core/peephole.cpp R[2] = I[1][1]; R 502 core/peephole.cpp R[0] = NEW_OPCODE(WOP_bitand_lb); R 503 core/peephole.cpp R[1] = I[0][1]; R 504 core/peephole.cpp R[2] = I[1][1]; R 512 core/peephole.cpp R[0] = NEW_OPCODE(WOP_bitor_lb); R 513 core/peephole.cpp R[1] = I[0][1]; R 514 core/peephole.cpp R[2] = I[1][1]; R 522 core/peephole.cpp R[0] = NEW_OPCODE(WOP_bitxor_lb); R 523 core/peephole.cpp R[1] = I[0][1]; R 524 core/peephole.cpp R[2] = I[1][1]; R 533 core/peephole.cpp R[0] = NEW_OPCODE(WOP_iflt_lb); R 534 core/peephole.cpp R[1] = I[2][1]; R 535 core/peephole.cpp R[2] = I[0][1]; R 536 core/peephole.cpp R[3] = I[1][1]; R 545 core/peephole.cpp R[0] = NEW_OPCODE(WOP_ifnlt_lb); R 546 core/peephole.cpp R[1] = I[2][1]; R 547 core/peephole.cpp R[2] = I[0][1]; R 548 core/peephole.cpp R[3] = I[1][1]; R 557 core/peephole.cpp R[0] = NEW_OPCODE(WOP_ifle_lb); R 558 core/peephole.cpp R[1] = I[2][1]; R 559 core/peephole.cpp R[2] = I[0][1]; R 560 core/peephole.cpp R[3] = I[1][1]; R 569 core/peephole.cpp R[0] = NEW_OPCODE(WOP_ifnle_lb); R 570 core/peephole.cpp R[1] = I[2][1]; R 571 core/peephole.cpp R[2] = I[0][1]; R 572 core/peephole.cpp R[3] = I[1][1]; R 581 core/peephole.cpp R[0] = NEW_OPCODE(WOP_ifgt_lb); R 582 core/peephole.cpp R[1] = I[2][1]; R 583 core/peephole.cpp R[2] = I[0][1]; R 584 core/peephole.cpp R[3] = I[1][1]; R 593 core/peephole.cpp R[0] = NEW_OPCODE(WOP_ifngt_lb); R 594 core/peephole.cpp R[1] = I[2][1]; R 595 core/peephole.cpp R[2] = I[0][1]; R 596 core/peephole.cpp R[3] = I[1][1]; R 605 core/peephole.cpp R[0] = NEW_OPCODE(WOP_ifge_lb); R 606 core/peephole.cpp R[1] = I[2][1]; R 607 core/peephole.cpp R[2] = I[0][1]; R 608 core/peephole.cpp R[3] = I[1][1]; R 617 core/peephole.cpp R[0] = NEW_OPCODE(WOP_ifnge_lb); R 618 core/peephole.cpp R[1] = I[2][1]; R 619 core/peephole.cpp R[2] = I[0][1]; R 620 core/peephole.cpp R[3] = I[1][1]; R 629 core/peephole.cpp R[0] = NEW_OPCODE(WOP_ifeq_lb); R 630 core/peephole.cpp R[1] = I[2][1]; R 631 core/peephole.cpp R[2] = I[0][1]; R 632 core/peephole.cpp R[3] = I[1][1]; R 641 core/peephole.cpp R[0] = NEW_OPCODE(WOP_ifne_lb); R 642 core/peephole.cpp R[1] = I[2][1]; R 643 core/peephole.cpp R[2] = I[0][1]; R 644 core/peephole.cpp R[3] = I[1][1]; R 653 core/peephole.cpp R[0] = NEW_OPCODE(WOP_ifstricteq_lb); R 654 core/peephole.cpp R[1] = I[2][1]; R 655 core/peephole.cpp R[2] = I[0][1]; R 656 core/peephole.cpp R[3] = I[1][1]; R 665 core/peephole.cpp R[0] = NEW_OPCODE(WOP_ifstrictne_lb); R 666 core/peephole.cpp R[1] = I[2][1]; R 667 core/peephole.cpp R[2] = I[0][1]; R 668 core/peephole.cpp R[3] = I[1][1]; R 676 core/peephole.cpp R[0] = NEW_OPCODE(WOP_setlocal0 + I[0][1]); R 684 core/peephole.cpp R[0] = NEW_OPCODE(WOP_storelocal); R 685 core/peephole.cpp R[1] = I[0][1]; R 693 core/peephole.cpp R[0] = NEW_OPCODE(WOP_swap_pop); R 422 nanojit/NativePPC.h #define SPR(spr) ((R##spr)>>5|(R##spr&31)<<5) R 367 nanojit/NativeX64.cpp void Assembler::PUSHR(R r) { emitr(X64_pushr,r); asm_output("push %s", RQ(r)); } R 368 nanojit/NativeX64.cpp void Assembler::POPR( R r) { emitr(X64_popr, r); asm_output("pop %s", RQ(r)); } R 369 nanojit/NativeX64.cpp void Assembler::NOT( R r) { emitr(X64_not, r); asm_output("notl %s", RL(r)); } R 370 nanojit/NativeX64.cpp void Assembler::NEG( R r) { emitr(X64_neg, r); asm_output("negl %s", RL(r)); } R 371 nanojit/NativeX64.cpp void Assembler::IDIV( R r) { emitr(X64_idiv, r); asm_output("idivl edx:eax, %s",RL(r)); } R 373 nanojit/NativeX64.cpp void Assembler::SHR( R r) { emitr(X64_shr, r); asm_output("shrl %s, ecx", RL(r)); } R 374 nanojit/NativeX64.cpp void Assembler::SAR( R r) { emitr(X64_sar, r); asm_output("sarl %s, ecx", RL(r)); } R 375 nanojit/NativeX64.cpp void Assembler::SHL( R r) { emitr(X64_shl, r); asm_output("shll %s, ecx", RL(r)); } R 376 nanojit/NativeX64.cpp void Assembler::SHRQ(R r) { emitr(X64_shrq, r); asm_output("shrq %s, ecx", RQ(r)); } R 377 nanojit/NativeX64.cpp void Assembler::SARQ(R r) { emitr(X64_sarq, r); asm_output("sarq %s, ecx", RQ(r)); } R 378 nanojit/NativeX64.cpp void Assembler::SHLQ(R r) { emitr(X64_shlq, r); asm_output("shlq %s, ecx", RQ(r)); } R 380 nanojit/NativeX64.cpp void Assembler::SHRI( R r, I i) { emit8(rexrb(X64_shri | U64(r&7)<<48, (R)0, r), i); asm_output("shrl %s, %d", RL(r), i); } R 381 nanojit/NativeX64.cpp void Assembler::SARI( R r, I i) { emit8(rexrb(X64_sari | U64(r&7)<<48, (R)0, r), i); asm_output("sarl %s, %d", RL(r), i); } R 382 nanojit/NativeX64.cpp void Assembler::SHLI( R r, I i) { emit8(rexrb(X64_shli | U64(r&7)<<48, (R)0, r), i); asm_output("shll %s, %d", RL(r), i); } R 383 nanojit/NativeX64.cpp void Assembler::SHRQI(R r, I i) { emit8(rexrb(X64_shrqi | U64(r&7)<<48, (R)0, r), i); asm_output("shrq %s, %d", RQ(r), i); } R 384 nanojit/NativeX64.cpp void Assembler::SARQI(R r, I i) { emit8(rexrb(X64_sarqi | U64(r&7)<<48, (R)0, r), i); asm_output("sarq %s, %d", RQ(r), i); } R 385 nanojit/NativeX64.cpp void Assembler::SHLQI(R r, I i) { emit8(rexrb(X64_shlqi | U64(r&7)<<48, (R)0, r), i); asm_output("shlq %s, %d", RQ(r), i); } R 387 nanojit/NativeX64.cpp void Assembler::SETE( R r) { emitr8(X64_sete, r); asm_output("sete %s", RB(r)); } R 388 nanojit/NativeX64.cpp void Assembler::SETL( R r) { emitr8(X64_setl, r); asm_output("setl %s", RB(r)); } R 389 nanojit/NativeX64.cpp void Assembler::SETLE(R r) { emitr8(X64_setle,r); asm_output("setle %s",RB(r)); } R 390 nanojit/NativeX64.cpp void Assembler::SETG( R r) { emitr8(X64_setg, r); asm_output("setg %s", RB(r)); } R 391 nanojit/NativeX64.cpp void Assembler::SETGE(R r) { emitr8(X64_setge,r); asm_output("setge %s",RB(r)); } R 392 nanojit/NativeX64.cpp void Assembler::SETB( R r) { emitr8(X64_setb, r); asm_output("setb %s", RB(r)); } R 393 nanojit/NativeX64.cpp void Assembler::SETBE(R r) { emitr8(X64_setbe,r); asm_output("setbe %s",RB(r)); } R 394 nanojit/NativeX64.cpp void Assembler::SETA( R r) { emitr8(X64_seta, r); asm_output("seta %s", RB(r)); } R 395 nanojit/NativeX64.cpp void Assembler::SETAE(R r) { emitr8(X64_setae,r); asm_output("setae %s",RB(r)); } R 396 nanojit/NativeX64.cpp void Assembler::SETO( R r) { emitr8(X64_seto, r); asm_output("seto %s", RB(r)); } R 398 nanojit/NativeX64.cpp void Assembler::ADDRR(R l, R r) { emitrr(X64_addrr,l,r); asm_output("addl %s, %s", RL(l),RL(r)); } R 399 nanojit/NativeX64.cpp void Assembler::SUBRR(R l, R r) { emitrr(X64_subrr,l,r); asm_output("subl %s, %s", RL(l),RL(r)); } R 400 nanojit/NativeX64.cpp void Assembler::ANDRR(R l, R r) { emitrr(X64_andrr,l,r); asm_output("andl %s, %s", RL(l),RL(r)); } R 401 nanojit/NativeX64.cpp void Assembler::ORLRR(R l, R r) { emitrr(X64_orlrr,l,r); asm_output("orl %s, %s", RL(l),RL(r)); } R 402 nanojit/NativeX64.cpp void Assembler::XORRR(R l, R r) { emitrr(X64_xorrr,l,r); asm_output("xorl %s, %s", RL(l),RL(r)); } R 403 nanojit/NativeX64.cpp void Assembler::IMUL( R l, R r) { emitrr(X64_imul, l,r); asm_output("imull %s, %s",RL(l),RL(r)); } R 404 nanojit/NativeX64.cpp void Assembler::CMPLR(R l, R r) { emitrr(X64_cmplr,l,r); asm_output("cmpl %s, %s", RL(l),RL(r)); } R 405 nanojit/NativeX64.cpp void Assembler::MOVLR(R l, R r) { emitrr(X64_movlr,l,r); asm_output("movl %s, %s", RL(l),RL(r)); } R 407 nanojit/NativeX64.cpp void Assembler::ADDQRR( R l, R r) { emitrr(X64_addqrr, l,r); asm_output("addq %s, %s", RQ(l),RQ(r)); } R 408 nanojit/NativeX64.cpp void Assembler::SUBQRR( R l, R r) { emitrr(X64_subqrr, l,r); asm_output("subq %s, %s", RQ(l),RQ(r)); } R 409 nanojit/NativeX64.cpp void Assembler::ANDQRR( R l, R r) { emitrr(X64_andqrr, l,r); asm_output("andq %s, %s", RQ(l),RQ(r)); } R 410 nanojit/NativeX64.cpp void Assembler::ORQRR( R l, R r) { emitrr(X64_orqrr, l,r); asm_output("orq %s, %s", RQ(l),RQ(r)); } R 411 nanojit/NativeX64.cpp void Assembler::XORQRR( R l, R r) { emitrr(X64_xorqrr, l,r); asm_output("xorq %s, %s", RQ(l),RQ(r)); } R 412 nanojit/NativeX64.cpp void Assembler::CMPQR( R l, R r) { emitrr(X64_cmpqr, l,r); asm_output("cmpq %s, %s", RQ(l),RQ(r)); } R 413 nanojit/NativeX64.cpp void Assembler::MOVQR( R l, R r) { emitrr(X64_movqr, l,r); asm_output("movq %s, %s", RQ(l),RQ(r)); } R 414 nanojit/NativeX64.cpp void Assembler::MOVAPSR(R l, R r) { emitrr(X64_movapsr,l,r); asm_output("movaps %s, %s",RQ(l),RQ(r)); } R 416 nanojit/NativeX64.cpp void Assembler::CMOVNO( R l, R r) { emitrr(X64_cmovno, l,r); asm_output("cmovlno %s, %s", RL(l),RL(r)); } R 417 nanojit/NativeX64.cpp void Assembler::CMOVNE( R l, R r) { emitrr(X64_cmovne, l,r); asm_output("cmovlne %s, %s", RL(l),RL(r)); } R 418 nanojit/NativeX64.cpp void Assembler::CMOVNL( R l, R r) { emitrr(X64_cmovnl, l,r); asm_output("cmovlnl %s, %s", RL(l),RL(r)); } R 419 nanojit/NativeX64.cpp void Assembler::CMOVNLE(R l, R r) { emitrr(X64_cmovnle,l,r); asm_output("cmovlnle %s, %s", RL(l),RL(r)); } R 420 nanojit/NativeX64.cpp void Assembler::CMOVNG( R l, R r) { emitrr(X64_cmovng, l,r); asm_output("cmovlng %s, %s", RL(l),RL(r)); } R 421 nanojit/NativeX64.cpp void Assembler::CMOVNGE(R l, R r) { emitrr(X64_cmovnge,l,r); asm_output("cmovlnge %s, %s", RL(l),RL(r)); } R 422 nanojit/NativeX64.cpp void Assembler::CMOVNB( R l, R r) { emitrr(X64_cmovnb, l,r); asm_output("cmovlnb %s, %s", RL(l),RL(r)); } R 423 nanojit/NativeX64.cpp void Assembler::CMOVNBE(R l, R r) { emitrr(X64_cmovnbe,l,r); asm_output("cmovlnbe %s, %s", RL(l),RL(r)); } R 424 nanojit/NativeX64.cpp void Assembler::CMOVNA( R l, R r) { emitrr(X64_cmovna, l,r); asm_output("cmovlna %s, %s", RL(l),RL(r)); } R 425 nanojit/NativeX64.cpp void Assembler::CMOVNAE(R l, R r) { emitrr(X64_cmovnae,l,r); asm_output("cmovlnae %s, %s", RL(l),RL(r)); } R 427 nanojit/NativeX64.cpp void Assembler::CMOVQNO( R l, R r) { emitrr(X64_cmovqno, l,r); asm_output("cmovqno %s, %s", RQ(l),RQ(r)); } R 428 nanojit/NativeX64.cpp void Assembler::CMOVQNE( R l, R r) { emitrr(X64_cmovqne, l,r); asm_output("cmovqne %s, %s", RQ(l),RQ(r)); } R 429 nanojit/NativeX64.cpp void Assembler::CMOVQNL( R l, R r) { emitrr(X64_cmovqnl, l,r); asm_output("cmovqnl %s, %s", RQ(l),RQ(r)); } R 430 nanojit/NativeX64.cpp void Assembler::CMOVQNLE(R l, R r) { emitrr(X64_cmovqnle,l,r); asm_output("cmovqnle %s, %s", RQ(l),RQ(r)); } R 431 nanojit/NativeX64.cpp void Assembler::CMOVQNG( R l, R r) { emitrr(X64_cmovqng, l,r); asm_output("cmovqng %s, %s", RQ(l),RQ(r)); } R 432 nanojit/NativeX64.cpp void Assembler::CMOVQNGE(R l, R r) { emitrr(X64_cmovqnge,l,r); asm_output("cmovqnge %s, %s", RQ(l),RQ(r)); } R 433 nanojit/NativeX64.cpp void Assembler::CMOVQNB( R l, R r) { emitrr(X64_cmovqnb, l,r); asm_output("cmovqnb %s, %s", RQ(l),RQ(r)); } R 434 nanojit/NativeX64.cpp void Assembler::CMOVQNBE(R l, R r) { emitrr(X64_cmovqnbe,l,r); asm_output("cmovqnbe %s, %s", RQ(l),RQ(r)); } R 435 nanojit/NativeX64.cpp void Assembler::CMOVQNA( R l, R r) { emitrr(X64_cmovqna, l,r); asm_output("cmovqna %s, %s", RQ(l),RQ(r)); } R 436 nanojit/NativeX64.cpp void Assembler::CMOVQNAE(R l, R r) { emitrr(X64_cmovqnae,l,r); asm_output("cmovqnae %s, %s", RQ(l),RQ(r)); } R 438 nanojit/NativeX64.cpp void Assembler::MOVSXDR(R l, R r) { emitrr(X64_movsxdr,l,r); asm_output("movsxd %s, %s",RQ(l),RL(r)); } R 440 nanojit/NativeX64.cpp void Assembler::MOVZX8(R l, R r) { emitrr8(X64_movzx8,l,r); asm_output("movzx %s, %s",RQ(l),RB(r)); } R 446 nanojit/NativeX64.cpp void Assembler::XORPS( R r) { emitrr(X64_xorps, r,r); asm_output("xorps %s, %s", RQ(r),RQ(r)); } R 447 nanojit/NativeX64.cpp void Assembler::DIVSD( R l, R r) { emitprr(X64_divsd, l,r); asm_output("divsd %s, %s", RQ(l),RQ(r)); } R 448 nanojit/NativeX64.cpp void Assembler::MULSD( R l, R r) { emitprr(X64_mulsd, l,r); asm_output("mulsd %s, %s", RQ(l),RQ(r)); } R 449 nanojit/NativeX64.cpp void Assembler::ADDSD( R l, R r) { emitprr(X64_addsd, l,r); asm_output("addsd %s, %s", RQ(l),RQ(r)); } R 450 nanojit/NativeX64.cpp void Assembler::SUBSD( R l, R r) { emitprr(X64_subsd, l,r); asm_output("subsd %s, %s", RQ(l),RQ(r)); } R 451 nanojit/NativeX64.cpp void Assembler::CVTSQ2SD(R l, R r) { emitprr(X64_cvtsq2sd,l,r); asm_output("cvtsq2sd %s, %s",RQ(l),RQ(r)); } R 452 nanojit/NativeX64.cpp void Assembler::CVTSI2SD(R l, R r) { emitprr(X64_cvtsi2sd,l,r); asm_output("cvtsi2sd %s, %s",RQ(l),RL(r)); } R 453 nanojit/NativeX64.cpp void Assembler::CVTSS2SD(R l, R r) { emitprr(X64_cvtss2sd,l,r); asm_output("cvtss2sd %s, %s",RQ(l),RL(r)); } R 454 nanojit/NativeX64.cpp void Assembler::CVTSD2SS(R l, R r) { emitprr(X64_cvtsd2ss,l,r); asm_output("cvtsd2ss %s, %s",RL(l),RQ(r)); } R 455 nanojit/NativeX64.cpp void Assembler::UCOMISD( R l, R r) { emitprr(X64_ucomisd, l,r); asm_output("ucomisd %s, %s", RQ(l),RQ(r)); } R 456 nanojit/NativeX64.cpp void Assembler::MOVQRX( R l, R r) { emitprr(X64_movqrx, r,l); asm_output("movq %s, %s", RQ(l),RQ(r)); } // Nb: r and l are deliberately reversed within the emitprr() call. R 457 nanojit/NativeX64.cpp void Assembler::MOVQXR( R l, R r) { emitprr(X64_movqxr, l,r); asm_output("movq %s, %s", RQ(l),RQ(r)); } R 460 nanojit/NativeX64.cpp void Assembler::MOVI( R r, I32 i32) { emitr_imm(X64_movi, r,i32); asm_output("movl %s, %d",RL(r),i32); } R 461 nanojit/NativeX64.cpp void Assembler::ADDLRI(R r, I32 i32) { emitr_imm(X64_addlri,r,i32); asm_output("addl %s, %d",RL(r),i32); } R 462 nanojit/NativeX64.cpp void Assembler::SUBLRI(R r, I32 i32) { emitr_imm(X64_sublri,r,i32); asm_output("subl %s, %d",RL(r),i32); } R 463 nanojit/NativeX64.cpp void Assembler::ANDLRI(R r, I32 i32) { emitr_imm(X64_andlri,r,i32); asm_output("andl %s, %d",RL(r),i32); } R 464 nanojit/NativeX64.cpp void Assembler::ORLRI( R r, I32 i32) { emitr_imm(X64_orlri, r,i32); asm_output("orl %s, %d", RL(r),i32); } R 465 nanojit/NativeX64.cpp void Assembler::XORLRI(R r, I32 i32) { emitr_imm(X64_xorlri,r,i32); asm_output("xorl %s, %d",RL(r),i32); } R 466 nanojit/NativeX64.cpp void Assembler::CMPLRI(R r, I32 i32) { emitr_imm(X64_cmplri,r,i32); asm_output("cmpl %s, %d",RL(r),i32); } R 468 nanojit/NativeX64.cpp void Assembler::ADDQRI( R r, I32 i32) { emitr_imm(X64_addqri, r,i32); asm_output("addq %s, %d", RQ(r),i32); } R 469 nanojit/NativeX64.cpp void Assembler::SUBQRI( R r, I32 i32) { emitr_imm(X64_subqri, r,i32); asm_output("subq %s, %d", RQ(r),i32); } R 470 nanojit/NativeX64.cpp void Assembler::ANDQRI( R r, I32 i32) { emitr_imm(X64_andqri, r,i32); asm_output("andq %s, %d", RQ(r),i32); } R 471 nanojit/NativeX64.cpp void Assembler::ORQRI( R r, I32 i32) { emitr_imm(X64_orqri, r,i32); asm_output("orq %s, %d", RQ(r),i32); } R 472 nanojit/NativeX64.cpp void Assembler::XORQRI( R r, I32 i32) { emitr_imm(X64_xorqri, r,i32); asm_output("xorq %s, %d", RQ(r),i32); } R 473 nanojit/NativeX64.cpp void Assembler::CMPQRI( R r, I32 i32) { emitr_imm(X64_cmpqri, r,i32); asm_output("cmpq %s, %d", RQ(r),i32); } R 474 nanojit/NativeX64.cpp void Assembler::MOVQI32(R r, I32 i32) { emitr_imm(X64_movqi32,r,i32); asm_output("movqi32 %s, %d",RQ(r),i32); } R 476 nanojit/NativeX64.cpp void Assembler::ADDLR8(R r, I32 i8) { emitr_imm8(X64_addlr8,r,i8); asm_output("addl %s, %d", RL(r),i8); } R 477 nanojit/NativeX64.cpp void Assembler::SUBLR8(R r, I32 i8) { emitr_imm8(X64_sublr8,r,i8); asm_output("subl %s, %d", RL(r),i8); } R 478 nanojit/NativeX64.cpp void Assembler::ANDLR8(R r, I32 i8) { emitr_imm8(X64_andlr8,r,i8); asm_output("andl %s, %d", RL(r),i8); } R 479 nanojit/NativeX64.cpp void Assembler::ORLR8( R r, I32 i8) { emitr_imm8(X64_orlr8, r,i8); asm_output("orl %s, %d", RL(r),i8); } R 480 nanojit/NativeX64.cpp void Assembler::XORLR8(R r, I32 i8) { emitr_imm8(X64_xorlr8,r,i8); asm_output("xorl %s, %d", RL(r),i8); } R 481 nanojit/NativeX64.cpp void Assembler::CMPLR8(R r, I32 i8) { emitr_imm8(X64_cmplr8,r,i8); asm_output("cmpl %s, %d", RL(r),i8); } R 483 nanojit/NativeX64.cpp void Assembler::ADDQR8(R r, I32 i8) { emitr_imm8(X64_addqr8,r,i8); asm_output("addq %s, %d",RQ(r),i8); } R 484 nanojit/NativeX64.cpp void Assembler::SUBQR8(R r, I32 i8) { emitr_imm8(X64_subqr8,r,i8); asm_output("subq %s, %d",RQ(r),i8); } R 485 nanojit/NativeX64.cpp void Assembler::ANDQR8(R r, I32 i8) { emitr_imm8(X64_andqr8,r,i8); asm_output("andq %s, %d",RQ(r),i8); } R 486 nanojit/NativeX64.cpp void Assembler::ORQR8( R r, I32 i8) { emitr_imm8(X64_orqr8, r,i8); asm_output("orq %s, %d", RQ(r),i8); } R 487 nanojit/NativeX64.cpp void Assembler::XORQR8(R r, I32 i8) { emitr_imm8(X64_xorqr8,r,i8); asm_output("xorq %s, %d",RQ(r),i8); } R 488 nanojit/NativeX64.cpp void Assembler::CMPQR8(R r, I32 i8) { emitr_imm8(X64_cmpqr8,r,i8); asm_output("cmpq %s, %d",RQ(r),i8); } R 490 nanojit/NativeX64.cpp void Assembler::IMULI(R l, R r, I32 i32) { emitrr_imm(X64_imuli,l,r,i32); asm_output("imuli %s, %s, %d",RL(l),RL(r),i32); } R 492 nanojit/NativeX64.cpp void Assembler::MOVQI(R r, U64 u64) { emitr_imm64(X64_movqi,r,u64); asm_output("movq %s, %p",RQ(r),(void*)u64); } R 494 nanojit/NativeX64.cpp void Assembler::LEARIP(R r, I32 d) { emitrm(X64_learip,r,d,(Register)0); asm_output("lea %s, %d(rip)",RQ(r),d); } R 496 nanojit/NativeX64.cpp void Assembler::LEAQRM(R r1, I d, R r2) { emitrm(X64_leaqrm,r1,d,r2); asm_output("leaq %s, %d(%s)",RQ(r1),d,RQ(r2)); } R 497 nanojit/NativeX64.cpp void Assembler::MOVLRM(R r1, I d, R r2) { emitrm(X64_movlrm,r1,d,r2); asm_output("movl %s, %d(%s)",RL(r1),d,RQ(r2)); } R 498 nanojit/NativeX64.cpp void Assembler::MOVQRM(R r1, I d, R r2) { emitrm(X64_movqrm,r1,d,r2); asm_output("movq %s, %d(%s)",RQ(r1),d,RQ(r2)); } R 499 nanojit/NativeX64.cpp void Assembler::MOVBMR(R r1, I d, R r2) { emitrm8(X64_movbmr,r1,d,r2); asm_output("movb %d(%s), %s",d,RQ(r1),RB(r2)); } R 500 nanojit/NativeX64.cpp void Assembler::MOVSMR(R r1, I d, R r2) { emitprm(X64_movsmr,r1,d,r2); asm_output("movs %d(%s), %s",d,RQ(r1),RS(r2)); } R 501 nanojit/NativeX64.cpp void Assembler::MOVLMR(R r1, I d, R r2) { emitrm(X64_movlmr,r1,d,r2); asm_output("movl %d(%s), %s",d,RQ(r1),RL(r2)); } R 502 nanojit/NativeX64.cpp void Assembler::MOVQMR(R r1, I d, R r2) { emitrm(X64_movqmr,r1,d,r2); asm_output("movq %d(%s), %s",d,RQ(r1),RQ(r2)); } R 504 nanojit/NativeX64.cpp void Assembler::MOVZX8M( R r1, I d, R r2) { emitrm_wide(X64_movzx8m, r1,d,r2); asm_output("movzxb %s, %d(%s)",RQ(r1),d,RQ(r2)); } R 505 nanojit/NativeX64.cpp void Assembler::MOVZX16M(R r1, I d, R r2) { emitrm_wide(X64_movzx16m,r1,d,r2); asm_output("movzxs %s, %d(%s)",RQ(r1),d,RQ(r2)); } R 507 nanojit/NativeX64.cpp void Assembler::MOVSX8M( R r1, I d, R r2) { emitrm_wide(X64_movsx8m, r1,d,r2); asm_output("movsxb %s, %d(%s)",RQ(r1),d,RQ(r2)); } R 508 nanojit/NativeX64.cpp void Assembler::MOVSX16M(R r1, I d, R r2) { emitrm_wide(X64_movsx16m,r1,d,r2); asm_output("movsxs %s, %d(%s)",RQ(r1),d,RQ(r2)); } R 510 nanojit/NativeX64.cpp void Assembler::MOVSDRM(R r1, I d, R r2) { emitprm(X64_movsdrm,r1,d,r2); asm_output("movsd %s, %d(%s)",RQ(r1),d,RQ(r2)); } R 511 nanojit/NativeX64.cpp void Assembler::MOVSDMR(R r1, I d, R r2) { emitprm(X64_movsdmr,r1,d,r2); asm_output("movsd %d(%s), %s",d,RQ(r1),RQ(r2)); } R 512 nanojit/NativeX64.cpp void Assembler::MOVSSRM(R r1, I d, R r2) { emitprm(X64_movssrm,r1,d,r2); asm_output("movss %s, %d(%s)",RQ(r1),d,RQ(r2)); } R 513 nanojit/NativeX64.cpp void Assembler::MOVSSMR(R r1, I d, R r2) { emitprm(X64_movssmr,r1,d,r2); asm_output("movss %d(%s), %s",d,RQ(r1),RQ(r2)); } R 519 nanojit/NativeX64.cpp void Assembler::JMPX(R indexreg, NIns** table) { emitrxb_imm(X64_jmpx, (R)0, indexreg, (Register)5, (int32_t)(uintptr_t)table); asm_output("jmpq [%s*8 + %p]", RQ(indexreg), (void*)table); } R 521 nanojit/NativeX64.cpp void Assembler::JMPXB(R indexreg, R tablereg) { emitxb(X64_jmpxb, indexreg, tablereg); asm_output("jmp [%s*8 + %s]", RQ(indexreg), RQ(tablereg)); } R 574 nanojit/NativeX64.cpp void Assembler::MOVQSPR(I d, R r) { emit(X64_movqspr | U64(d) << 56 | U64((r&7)<<3) << 40 | U64((r&8)>>1) << 24); asm_output("movq %d(rsp), %s", d, RQ(r)); } // insert r into mod/rm and rex bytes R 576 nanojit/NativeX64.cpp void Assembler::XORPSA(R r, I32 i32) { emitxm_abs(X64_xorpsa, r, i32); asm_output("xorps %s, (0x%x)",RQ(r), i32); } R 577 nanojit/NativeX64.cpp void Assembler::XORPSM(R r, NIns* a64) { emitxm_rel(X64_xorpsm, r, a64); asm_output("xorps %s, (%p)", RQ(r), a64); } R 579 nanojit/NativeX64.cpp void Assembler::X86_AND8R(R r) { emit(X86_and8r | U64(r<<3|(r|4))<<56); asm_output("andb %s, %s", RB(r), RBhi(r)); } R 580 nanojit/NativeX64.cpp void Assembler::X86_SETNP(R r) { emit(X86_setnp | U64(r|4)<<56); asm_output("setnp %s", RBhi(r)); } R 581 nanojit/NativeX64.cpp void Assembler::X86_SETE(R r) { emit(X86_sete | U64(r)<<56); asm_output("sete %s", RB(r)); }