#include "libavutil/arm/asm.S"
#include "asm-offsets.h"
.macro resample_one fmt, es=2
function ff_resample_one_\fmt\()_neon, export=1
push {r4, r5}
add r1, r1, r2, lsl #\es
ldr r2, [r0, #PHASE_SHIFT+4]
ldr ip, [sp, #8]
ldr r5, [r0, #FILTER_LENGTH]
and r2, ip, r2
ldr r4, [r0, #PHASE_SHIFT]
lsr r4, ip, r4
mul r2, r2, r5
ldr ip, [r0, #FILTER_BANK]
add r3, r3, r4, lsl #\es
cmp r5, #8
add r0, ip, r2, lsl #\es
blt 5f
8:
subs r5, r5, #8
LOAD4
MUL4
7:
LOAD4
beq 6f
cmp r5, #8
MLA4
blt 4f
subs r5, r5, #8
LOAD4
MLA4
b 7b
6:
MLA4
STORE
pop {r4, r5}
bx lr
5:
INIT4
4:
cmp r5, #4
blt 2f
subs r5, r5, #4
LOAD4
MLA4
beq 0f
2:
cmp r5, #2
blt 1f
subs r5, r5, #2
LOAD2
MLA2
beq 0f
1:
LOAD1
MLA1
0:
STORE
pop {r4, r5}
bx lr
endfunc
.purgem LOAD1
.purgem LOAD2
.purgem LOAD4
.purgem MLA1
.purgem MLA2
.purgem MLA4
.purgem MUL4
.purgem INIT4
.purgem STORE
.endm
.macro LOAD1
veor.32 d0, d0
vld1.32 {d0[0]}, [r0]!
vld1.32 {d4[0]}, [r3]!
.endm
.macro LOAD2
vld1.32 {d0}, [r0]!
vld1.32 {d4}, [r3]!
.endm
.macro LOAD4
vld1.32 {d0,d1}, [r0]!
vld1.32 {d4,d5}, [r3]!
.endm
.macro MLA1
vmla.f32 d16, d0, d4[0]
.endm
.macro MLA2
vmla.f32 d16, d0, d4
.endm
.macro MLA4
vmla.f32 d16, d0, d4
vmla.f32 d17, d1, d5
.endm
.macro MUL4
vmul.f32 d16, d0, d4
vmul.f32 d17, d1, d5
.endm
.macro INIT4
veor.f32 q8, q8
.endm
.macro STORE
vpadd.f32 d16, d16, d17
vpadd.f32 d16, d16, d16
vst1.32 d16[0], [r1]
.endm
resample_one flt, 2
.macro LOAD1
veor.32 d0, d0
vld1.32 {d0[0]}, [r0]!
vld1.32 {d4[0]}, [r3]!
.endm
.macro LOAD2
vld1.32 {d0}, [r0]!
vld1.32 {d4}, [r3]!
.endm
.macro LOAD4
vld1.32 {d0,d1}, [r0]!
vld1.32 {d4,d5}, [r3]!
.endm
.macro MLA1
vmlal.s32 q8, d0, d4[0]
.endm
.macro MLA2
vmlal.s32 q8, d0, d4
.endm
.macro MLA4
vmlal.s32 q8, d0, d4
vmlal.s32 q9, d1, d5
.endm
.macro MUL4
vmull.s32 q8, d0, d4
vmull.s32 q9, d1, d5
.endm
.macro INIT4
veor.s64 q8, q8
veor.s64 q9, q9
.endm
.macro STORE
vadd.s64 q8, q8, q9
vadd.s64 d16, d16, d17
vqrshrn.s64 d16, q8, #30
vst1.32 d16[0], [r1]
.endm
resample_one s32, 2
.macro LOAD1
veor.16 d0, d0
vld1.16 {d0[0]}, [r0]!
vld1.16 {d4[0]}, [r3]!
.endm
.macro LOAD2
veor.16 d0, d0
vld1.32 {d0[0]}, [r0]!
veor.16 d4, d4
vld1.32 {d4[0]}, [r3]!
.endm
.macro LOAD4
vld1.16 {d0}, [r0]!
vld1.16 {d4}, [r3]!
.endm
.macro MLA1
vmlal.s16 q8, d0, d4[0]
.endm
.macro MLA2
vmlal.s16 q8, d0, d4
.endm
.macro MLA4
vmlal.s16 q8, d0, d4
.endm
.macro MUL4
vmull.s16 q8, d0, d4
.endm
.macro INIT4
veor.s32 q8, q8
.endm
.macro STORE
vpadd.s32 d16, d16, d17
vpadd.s32 d16, d16, d16
vqrshrn.s32 d16, q8, #15
vst1.16 d16[0], [r1]
.endm
resample_one s16, 1
.macro resample_linear fmt, es=2
function ff_resample_linear_\fmt\()_neon, export=1
push {r4, r5}
add r1, r1, r2, lsl #\es
ldr r2, [r0, #PHASE_SHIFT+4]
ldr ip, [sp, #8]
ldr r5, [r0, #FILTER_LENGTH]
and r2, ip, r2
ldr r4, [r0, #PHASE_SHIFT]
lsr r4, ip, r4
mul r2, r2, r5
ldr ip, [r0, #FILTER_BANK]
add r3, r3, r4, lsl #\es
cmp r5, #8
ldr r4, [r0, #SRC_INCR]
add r0, ip, r2, lsl #\es
add r2, r0, r5, lsl #\es
blt 5f
8:
subs r5, r5, #8
LOAD4
MUL4
7:
LOAD4
beq 6f
cmp r5, #8
MLA4
blt 4f
subs r5, r5, #8
LOAD4
MLA4
b 7b
6:
MLA4
STORE
pop {r4, r5}
bx lr
5:
INIT4
4:
cmp r5, #4
blt 2f
subs r5, r5, #4
LOAD4
MLA4
beq 0f
2:
cmp r5, #2
blt 1f
subs r5, r5, #2
LOAD2
MLA2
beq 0f
1:
LOAD1
MLA1
0:
STORE
pop {r4, r5}
bx lr
endfunc
.purgem LOAD1
.purgem LOAD2
.purgem LOAD4
.purgem MLA1
.purgem MLA2
.purgem MLA4
.purgem MUL4
.purgem INIT4
.purgem STORE
.endm
.macro LOAD1
veor.32 d0, d0
veor.32 d2, d2
vld1.32 {d0[0]}, [r0]!
vld1.32 {d2[0]}, [r2]!
vld1.32 {d4[0]}, [r3]!
.endm
.macro LOAD2
vld1.32 {d0}, [r0]!
vld1.32 {d2}, [r2]!
vld1.32 {d4}, [r3]!
.endm
.macro LOAD4
vld1.32 {d0,d1}, [r0]!
vld1.32 {d2,d3}, [r2]!
vld1.32 {d4,d5}, [r3]!
.endm
.macro MLA1
vmla.f32 d18, d0, d4[0]
vmla.f32 d16, d2, d4[0]
.endm
.macro MLA2
vmla.f32 d18, d0, d4
vmla.f32 d16, d2, d4
.endm
.macro MLA4
vmla.f32 q9, q0, q2
vmla.f32 q8, q1, q2
.endm
.macro MUL4
vmul.f32 q9, q0, q2
vmul.f32 q8, q1, q2
.endm
.macro INIT4
veor.f32 q9, q9
veor.f32 q8, q8
.endm
.macro STORE
vldr s0, [sp, #12]
vmov s1, r4
vcvt.f32.s32 d0, d0
vsub.f32 q8, q8, q9
vpadd.f32 d18, d18, d19
vpadd.f32 d16, d16, d17
vpadd.f32 d2, d18, d18
vpadd.f32 d1, d16, d16
vmul.f32 s2, s2, s0
vdiv.f32 s2, s2, s1
vadd.f32 s4, s4, s2
vstr s4, [r1]
.endm
resample_linear flt, 2