#include "../../../cmd/ld/textflag.h"
TEXT 揃armCompareAndSwapUint32(SB),NOSPLIT,$0-13
MOVW addr+0(FP), R1
MOVW old+4(FP), R2
MOVW new+8(FP), R3
casloop:
LDREX (R1), R0
CMP R0, R2
BNE casfail
STREX R3, (R1), R0
CMP $0, R0
BNE casloop
MOVW $1, R0
MOVBU R0, ret+12(FP)
RET
casfail:
MOVW $0, R0
MOVBU R0, ret+12(FP)
RET
TEXT 揃armCompareAndSwapUint64(SB),NOSPLIT,$0-21
BL fastCheck64<>(SB)
MOVW addr+0(FP), R1
AND.S $7, R1, R2
BEQ 2(PC)
MOVW R2, (R2)
MOVW oldlo+4(FP), R2
MOVW oldhi+8(FP), R3
MOVW newlo+12(FP), R4
MOVW newhi+16(FP), R5
cas64loop:
LDREXD (R1), R6
CMP R2, R6
BNE cas64fail
CMP R3, R7
BNE cas64fail
STREXD R4, (R1), R0
CMP $0, R0
BNE cas64loop
MOVW $1, R0
MOVBU R0, ret+20(FP)
RET
cas64fail:
MOVW $0, R0
MOVBU R0, ret+20(FP)
RET
TEXT 揃armAddUint32(SB),NOSPLIT,$0-12
MOVW addr+0(FP), R1
MOVW delta+4(FP), R2
addloop:
LDREX (R1), R3
ADD R2, R3
STREX R3, (R1), R0
CMP $0, R0
BNE addloop
MOVW R3, ret+8(FP)
RET
TEXT 揃armAddUint64(SB),NOSPLIT,$0-20
BL fastCheck64<>(SB)
MOVW addr+0(FP), R1
AND.S $7, R1, R2
BEQ 2(PC)
MOVW R2, (R2)
MOVW deltalo+4(FP), R2
MOVW deltahi+8(FP), R3
add64loop:
LDREXD (R1), R4
ADD.S R2, R4
ADC R3, R5
STREXD R4, (R1), R0
CMP $0, R0
BNE add64loop
MOVW R4, retlo+12(FP)
MOVW R5, rethi+16(FP)
RET
TEXT 揃armSwapUint32(SB),NOSPLIT,$0-12
MOVW addr+0(FP), R1
MOVW new+4(FP), R2
swaploop:
LDREX (R1), R3
STREX R2, (R1), R0
CMP $0, R0
BNE swaploop
MOVW R3, old+8(FP)
RET
TEXT 揃armSwapUint64(SB),NOSPLIT,$0-20
BL fastCheck64<>(SB)
MOVW addr+0(FP), R1
AND.S $7, R1, R2
BEQ 2(PC)
MOVW R2, (R2)
MOVW newlo+4(FP), R2
MOVW newhi+8(FP), R3
swap64loop:
LDREXD (R1), R4
STREXD R2, (R1), R0
CMP $0, R0
BNE swap64loop
MOVW R4, oldlo+12(FP)
MOVW R5, oldhi+16(FP)
RET
TEXT 揃armLoadUint64(SB),NOSPLIT,$0-12
BL fastCheck64<>(SB)
MOVW addr+0(FP), R1
AND.S $7, R1, R2
BEQ 2(PC)
MOVW R2, (R2)
load64loop:
LDREXD (R1), R2
STREXD R2, (R1), R0
CMP $0, R0
BNE load64loop
MOVW R2, vallo+4(FP)
MOVW R3, valhi+8(FP)
RET
TEXT 揃armStoreUint64(SB),NOSPLIT,$0-12
BL fastCheck64<>(SB)
MOVW addr+0(FP), R1
AND.S $7, R1, R2
BEQ 2(PC)
MOVW R2, (R2)
MOVW vallo+4(FP), R2
MOVW valhi+8(FP), R3
store64loop:
LDREXD (R1), R4
STREXD R2, (R1), R0
CMP $0, R0
BNE store64loop
RET
TEXT check64<>(SB),NOSPLIT,$16-0
MOVW $10, R1
MOVW $8(R13), R5
AND $~7, R5
loop:
LDREXD (R5), R2
STREXD R2, (R5), R0
CMP $0, R0
BEQ ok
SUB $1, R1
CMP $0, R1
BNE loop
BL 揃panic64(SB)
ok:
RET
TEXT fastCheck64<>(SB),NOSPLIT,$-4
MOVW ok64<>(SB), R0
CMP $0, R0
RET.NE
B slowCheck64<>(SB)
TEXT slowCheck64<>(SB),NOSPLIT,$0-0
BL check64<>(SB)
MOVW $1, R0
MOVW R0, ok64<>(SB)
RET
GLOBL ok64<>(SB), $4