// linker define sc25519_barrett // linker use MU0 // linker use MU1 // linker use MU2 // linker use MU3 // linker use MU4 // linker use ORDER0 // linker use ORDER1 // linker use ORDER2 // linker use ORDER3 # qhasm: int64 rp # qhasm: int64 xp # qhasm: input rp # qhasm: input xp # qhasm: int64 caller1 # qhasm: int64 caller2 # qhasm: int64 caller3 # qhasm: int64 caller4 # qhasm: int64 caller5 # qhasm: int64 caller6 # qhasm: int64 caller7 # qhasm: caller caller1 # qhasm: caller caller2 # qhasm: caller caller3 # qhasm: caller caller4 # qhasm: caller caller5 # qhasm: caller caller6 # qhasm: caller caller7 # qhasm: stack64 caller1_stack # qhasm: stack64 caller2_stack # qhasm: stack64 caller3_stack # qhasm: stack64 caller4_stack # qhasm: stack64 caller5_stack # qhasm: stack64 caller6_stack # qhasm: stack64 caller7_stack # qhasm: int64 q23 # qhasm: int64 q24 # qhasm: int64 q30 # qhasm: int64 q31 # qhasm: int64 q32 # qhasm: int64 q33 # qhasm: int64 r20 # qhasm: int64 r21 # qhasm: int64 r22 # qhasm: int64 r23 # qhasm: int64 r24 # qhasm: int64 r0 # qhasm: int64 r1 # qhasm: int64 r2 # qhasm: int64 r3 # qhasm: int64 t0 # qhasm: int64 t1 # qhasm: int64 t2 # qhasm: int64 t3 # qhasm: int64 rax # qhasm: int64 rdx # qhasm: int64 c # qhasm: int64 zero # qhasm: int64 mask # qhasm: int64 nmask # qhasm: stack64 q30_stack # qhasm: stack64 q31_stack # qhasm: stack64 q32_stack # qhasm: stack64 q33_stack # qhasm: enter CRYPTO_SHARED_NAMESPACE(sc25519_barrett) .text .p2align 5 .globl _CRYPTO_SHARED_NAMESPACE(sc25519_barrett) .globl CRYPTO_SHARED_NAMESPACE(sc25519_barrett) _CRYPTO_SHARED_NAMESPACE(sc25519_barrett): CRYPTO_SHARED_NAMESPACE(sc25519_barrett): mov %rsp,%r11 and $31,%r11 add $96,%r11 sub %r11,%rsp # qhasm: caller1_stack = caller1 # asm 1: movq caller1_stack=stack64#1 # asm 2: movq caller1_stack=0(%rsp) movq %r11,0(%rsp) # qhasm: caller2_stack = caller2 # asm 1: movq caller2_stack=stack64#2 # asm 2: movq caller2_stack=8(%rsp) movq %r12,8(%rsp) # qhasm: caller3_stack = caller3 # asm 1: movq caller3_stack=stack64#3 # asm 2: movq caller3_stack=16(%rsp) movq %r13,16(%rsp) # qhasm: caller4_stack = caller4 # asm 1: movq caller4_stack=stack64#4 # asm 2: movq caller4_stack=24(%rsp) movq %r14,24(%rsp) # qhasm: caller5_stack = caller5 # asm 1: movq caller5_stack=stack64#5 # asm 2: movq caller5_stack=32(%rsp) movq %r15,32(%rsp) # qhasm: caller6_stack = caller6 # asm 1: movq caller6_stack=stack64#6 # asm 2: movq caller6_stack=40(%rsp) movq %rbx,40(%rsp) # qhasm: caller7_stack = caller7 # asm 1: movq caller7_stack=stack64#7 # asm 2: movq caller7_stack=48(%rsp) movq %rbp,48(%rsp) # qhasm: zero ^= zero # asm 1: xor rax=int64#7 # asm 2: movq 24(rax=%rax movq 24(%rsi),%rax # qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_SHARED_NAMESPACE(MU3) mulq CRYPTO_SHARED_NAMESPACE(MU3)(%rip) # qhasm: q23 = rax # asm 1: mov q23=int64#10 # asm 2: mov q23=%r12 mov %rax,%r12 # qhasm: c = rdx # asm 1: mov c=int64#11 # asm 2: mov c=%r13 mov %rdx,%r13 # qhasm: rax = *(uint64 *)(xp + 24) # asm 1: movq 24(rax=int64#7 # asm 2: movq 24(rax=%rax movq 24(%rsi),%rax # qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_SHARED_NAMESPACE(MU4) mulq CRYPTO_SHARED_NAMESPACE(MU4)(%rip) # qhasm: q24 = rax # asm 1: mov q24=int64#12 # asm 2: mov q24=%r14 mov %rax,%r14 # qhasm: carry? q24 += c # asm 1: add rax=int64#7 # asm 2: movq 32(rax=%rax movq 32(%rsi),%rax # qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_SHARED_NAMESPACE(MU2) mulq CRYPTO_SHARED_NAMESPACE(MU2)(%rip) # qhasm: carry? q23 += rax # asm 1: add c=int64#11 # asm 2: mov $0,>c=%r13 mov $0,%r13 # qhasm: c += rdx + carry # asm 1: adc rax=int64#7 # asm 2: movq 32(rax=%rax movq 32(%rsi),%rax # qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_SHARED_NAMESPACE(MU3) mulq CRYPTO_SHARED_NAMESPACE(MU3)(%rip) # qhasm: carry? q24 += rax # asm 1: add c=int64#11 # asm 2: mov $0,>c=%r13 mov $0,%r13 # qhasm: c += rdx + carry # asm 1: adc rax=int64#7 # asm 2: movq 32(rax=%rax movq 32(%rsi),%rax # qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_SHARED_NAMESPACE(MU4) mulq CRYPTO_SHARED_NAMESPACE(MU4)(%rip) # qhasm: carry? q30 += rax # asm 1: add rax=int64#7 # asm 2: movq 40(rax=%rax movq 40(%rsi),%rax # qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_SHARED_NAMESPACE(MU1) mulq CRYPTO_SHARED_NAMESPACE(MU1)(%rip) # qhasm: carry? q23 += rax # asm 1: add c=int64#11 # asm 2: mov $0,>c=%r13 mov $0,%r13 # qhasm: c += rdx + carry # asm 1: adc rax=int64#7 # asm 2: movq 40(rax=%rax movq 40(%rsi),%rax # qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_SHARED_NAMESPACE(MU2) mulq CRYPTO_SHARED_NAMESPACE(MU2)(%rip) # qhasm: carry? q24 += rax # asm 1: add c=int64#11 # asm 2: mov $0,>c=%r13 mov $0,%r13 # qhasm: c += rdx + carry # asm 1: adc rax=int64#7 # asm 2: movq 40(rax=%rax movq 40(%rsi),%rax # qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_SHARED_NAMESPACE(MU3) mulq CRYPTO_SHARED_NAMESPACE(MU3)(%rip) # qhasm: carry? q30 += rax # asm 1: add c=int64#11 # asm 2: mov $0,>c=%r13 mov $0,%r13 # qhasm: c += rdx + carry # asm 1: adc rax=int64#7 # asm 2: movq 40(rax=%rax movq 40(%rsi),%rax # qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_SHARED_NAMESPACE(MU4) mulq CRYPTO_SHARED_NAMESPACE(MU4)(%rip) # qhasm: carry? q31 += rax # asm 1: add rax=int64#7 # asm 2: movq 48(rax=%rax movq 48(%rsi),%rax # qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_SHARED_NAMESPACE(MU0) mulq CRYPTO_SHARED_NAMESPACE(MU0)(%rip) # qhasm: carry? q23 += rax # asm 1: add c=int64#10 # asm 2: mov $0,>c=%r12 mov $0,%r12 # qhasm: c += rdx + carry # asm 1: adc rax=int64#7 # asm 2: movq 48(rax=%rax movq 48(%rsi),%rax # qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_SHARED_NAMESPACE(MU1) mulq CRYPTO_SHARED_NAMESPACE(MU1)(%rip) # qhasm: carry? q24 += rax # asm 1: add c=int64#10 # asm 2: mov $0,>c=%r12 mov $0,%r12 # qhasm: c += rdx + carry # asm 1: adc rax=int64#7 # asm 2: movq 48(rax=%rax movq 48(%rsi),%rax # qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_SHARED_NAMESPACE(MU2) mulq CRYPTO_SHARED_NAMESPACE(MU2)(%rip) # qhasm: carry? q30 += rax # asm 1: add c=int64#10 # asm 2: mov $0,>c=%r12 mov $0,%r12 # qhasm: c += rdx + carry # asm 1: adc rax=int64#7 # asm 2: movq 48(rax=%rax movq 48(%rsi),%rax # qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_SHARED_NAMESPACE(MU3) mulq CRYPTO_SHARED_NAMESPACE(MU3)(%rip) # qhasm: carry? q31 += rax # asm 1: add c=int64#10 # asm 2: mov $0,>c=%r12 mov $0,%r12 # qhasm: c += rdx + carry # asm 1: adc rax=int64#7 # asm 2: movq 48(rax=%rax movq 48(%rsi),%rax # qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_SHARED_NAMESPACE(MU4) mulq CRYPTO_SHARED_NAMESPACE(MU4)(%rip) # qhasm: carry? q32 += rax # asm 1: add rax=int64#7 # asm 2: movq 56(rax=%rax movq 56(%rsi),%rax # qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_SHARED_NAMESPACE(MU0) mulq CRYPTO_SHARED_NAMESPACE(MU0)(%rip) # qhasm: carry? q24 += rax # asm 1: add c=int64#10 # asm 2: mov $0,>c=%r12 mov $0,%r12 # qhasm: c += rdx + carry # asm 1: adc rax=int64#7 # asm 2: movq 56(rax=%rax movq 56(%rsi),%rax # qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_SHARED_NAMESPACE(MU1) mulq CRYPTO_SHARED_NAMESPACE(MU1)(%rip) # qhasm: carry? q30 += rax # asm 1: add c=int64#10 # asm 2: mov $0,>c=%r12 mov $0,%r12 # qhasm: c += rdx + carry # asm 1: adc q30_stack=stack64#8 # asm 2: movq q30_stack=56(%rsp) movq %r8,56(%rsp) # qhasm: rax = *(uint64 *)(xp + 56) # asm 1: movq 56(rax=int64#7 # asm 2: movq 56(rax=%rax movq 56(%rsi),%rax # qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_SHARED_NAMESPACE(MU2) mulq CRYPTO_SHARED_NAMESPACE(MU2)(%rip) # qhasm: carry? q31 += rax # asm 1: add c=int64#5 # asm 2: mov $0,>c=%r8 mov $0,%r8 # qhasm: c += rdx + carry # asm 1: adc q31_stack=stack64#9 # asm 2: movq q31_stack=64(%rsp) movq %r9,64(%rsp) # qhasm: rax = *(uint64 *)(xp + 56) # asm 1: movq 56(rax=int64#7 # asm 2: movq 56(rax=%rax movq 56(%rsi),%rax # qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_SHARED_NAMESPACE(MU3) mulq CRYPTO_SHARED_NAMESPACE(MU3)(%rip) # qhasm: carry? q32 += rax # asm 1: add c=int64#5 # asm 2: mov $0,>c=%r8 mov $0,%r8 # qhasm: c += rdx + carry # asm 1: adc q32_stack=stack64#10 # asm 2: movq q32_stack=72(%rsp) movq %r10,72(%rsp) # qhasm: rax = *(uint64 *)(xp + 56) # asm 1: movq 56(rax=int64#7 # asm 2: movq 56(rax=%rax movq 56(%rsi),%rax # qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_SHARED_NAMESPACE(MU4) mulq CRYPTO_SHARED_NAMESPACE(MU4)(%rip) # qhasm: carry? q33 += rax # asm 1: add q33_stack=stack64#11 # asm 2: movq q33_stack=80(%rsp) movq %r11,80(%rsp) # qhasm: rax = q30_stack # asm 1: movq rax=int64#7 # asm 2: movq rax=%rax movq 56(%rsp),%rax # qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_SHARED_NAMESPACE(ORDER0) mulq CRYPTO_SHARED_NAMESPACE(ORDER0)(%rip) # qhasm: r20 = rax # asm 1: mov r20=int64#5 # asm 2: mov r20=%r8 mov %rax,%r8 # qhasm: c = rdx # asm 1: mov c=int64#6 # asm 2: mov c=%r9 mov %rdx,%r9 # qhasm: rax = q30_stack # asm 1: movq rax=int64#7 # asm 2: movq rax=%rax movq 56(%rsp),%rax # qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_SHARED_NAMESPACE(ORDER1) mulq CRYPTO_SHARED_NAMESPACE(ORDER1)(%rip) # qhasm: r21 = rax # asm 1: mov r21=int64#8 # asm 2: mov r21=%r10 mov %rax,%r10 # qhasm: carry? r21 += c # asm 1: add c=int64#6 # asm 2: mov $0,>c=%r9 mov $0,%r9 # qhasm: c += rdx + carry # asm 1: adc rax=int64#7 # asm 2: movq rax=%rax movq 56(%rsp),%rax # qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_SHARED_NAMESPACE(ORDER2) mulq CRYPTO_SHARED_NAMESPACE(ORDER2)(%rip) # qhasm: r22 = rax # asm 1: mov r22=int64#9 # asm 2: mov r22=%r11 mov %rax,%r11 # qhasm: carry? r22 += c # asm 1: add c=int64#6 # asm 2: mov $0,>c=%r9 mov $0,%r9 # qhasm: c += rdx + carry # asm 1: adc rax=int64#7 # asm 2: movq rax=%rax movq 56(%rsp),%rax # qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_SHARED_NAMESPACE(ORDER3) mulq CRYPTO_SHARED_NAMESPACE(ORDER3)(%rip) # qhasm: free rdx # qhasm: r23 = rax # asm 1: mov r23=int64#10 # asm 2: mov r23=%r12 mov %rax,%r12 # qhasm: r23 += c # asm 1: add rax=int64#7 # asm 2: movq rax=%rax movq 64(%rsp),%rax # qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_SHARED_NAMESPACE(ORDER0) mulq CRYPTO_SHARED_NAMESPACE(ORDER0)(%rip) # qhasm: carry? r21 += rax # asm 1: add c=int64#6 # asm 2: mov $0,>c=%r9 mov $0,%r9 # qhasm: c += rdx + carry # asm 1: adc rax=int64#7 # asm 2: movq rax=%rax movq 64(%rsp),%rax # qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_SHARED_NAMESPACE(ORDER1) mulq CRYPTO_SHARED_NAMESPACE(ORDER1)(%rip) # qhasm: carry? r22 += rax # asm 1: add c=int64#4 # asm 2: mov $0,>c=%rcx mov $0,%rcx # qhasm: c += rdx + carry # asm 1: adc rax=int64#7 # asm 2: movq rax=%rax movq 64(%rsp),%rax # qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_SHARED_NAMESPACE(ORDER2) mulq CRYPTO_SHARED_NAMESPACE(ORDER2)(%rip) # qhasm: free rdx # qhasm: r23 += rax # asm 1: add rax=int64#7 # asm 2: movq rax=%rax movq 72(%rsp),%rax # qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_SHARED_NAMESPACE(ORDER0) mulq CRYPTO_SHARED_NAMESPACE(ORDER0)(%rip) # qhasm: carry? r22 += rax # asm 1: add c=int64#4 # asm 2: mov $0,>c=%rcx mov $0,%rcx # qhasm: c += rdx + carry # asm 1: adc rax=int64#7 # asm 2: movq rax=%rax movq 72(%rsp),%rax # qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_SHARED_NAMESPACE(ORDER1) mulq CRYPTO_SHARED_NAMESPACE(ORDER1)(%rip) # qhasm: free rdx # qhasm: r23 += rax # asm 1: add rax=int64#7 # asm 2: movq rax=%rax movq 80(%rsp),%rax # qhasm: (uint128) rdx rax = rax * *(uint64 *) &CRYPTO_SHARED_NAMESPACE(ORDER0) mulq CRYPTO_SHARED_NAMESPACE(ORDER0)(%rip) # qhasm: free rdx # qhasm: r23 += rax # asm 1: add r0=int64#3 # asm 2: movq 0(r0=%rdx movq 0(%rsi),%rdx # qhasm: carry? r0 -= r20 # asm 1: sub t0=int64#4 # asm 2: mov t0=%rcx mov %rdx,%rcx # qhasm: r1 = *(uint64 *)(xp + 8) # asm 1: movq 8(r1=int64#5 # asm 2: movq 8(r1=%r8 movq 8(%rsi),%r8 # qhasm: carry? r1 -= r21 - carry # asm 1: sbb t1=int64#6 # asm 2: mov t1=%r9 mov %r8,%r9 # qhasm: r2 = *(uint64 *)(xp + 16) # asm 1: movq 16(r2=int64#7 # asm 2: movq 16(r2=%rax movq 16(%rsi),%rax # qhasm: carry? r2 -= r22 - carry # asm 1: sbb t2=int64#8 # asm 2: mov t2=%r10 mov %rax,%r10 # qhasm: r3 = *(uint64 *)(xp + 24) # asm 1: movq 24(r3=int64#2 # asm 2: movq 24(r3=%rsi movq 24(%rsi),%rsi # qhasm: r3 -= r23 - carry # asm 1: sbb t3=int64#9 # asm 2: mov t3=%r11 mov %rsi,%r11 # qhasm: carry? t0 -= *(uint64 *) &CRYPTO_SHARED_NAMESPACE(ORDER0) # asm 1: sub CRYPTO_SHARED_NAMESPACE(ORDER0),t0=int64#4 # asm 2: mov t0=%rcx mov %rdx,%rcx # qhasm: r1 = t1 if !unsigned< # asm 1: cmovae t1=int64#6 # asm 2: mov t1=%r9 mov %r8,%r9 # qhasm: r2 = t2 if !unsigned< # asm 1: cmovae t2=int64#8 # asm 2: mov t2=%r10 mov %rax,%r10 # qhasm: r3 = t3 if !unsigned< # asm 1: cmovae t3=int64#9 # asm 2: mov t3=%r11 mov %rsi,%r11 # qhasm: carry? t0 -= *(uint64 *) &CRYPTO_SHARED_NAMESPACE(ORDER0) # asm 1: sub CRYPTO_SHARED_NAMESPACE(ORDER0),caller1=int64#9 # asm 2: movq caller1=%r11 movq 0(%rsp),%r11 # qhasm: caller2 = caller2_stack # asm 1: movq caller2=int64#10 # asm 2: movq caller2=%r12 movq 8(%rsp),%r12 # qhasm: caller3 = caller3_stack # asm 1: movq caller3=int64#11 # asm 2: movq caller3=%r13 movq 16(%rsp),%r13 # qhasm: caller4 = caller4_stack # asm 1: movq caller4=int64#12 # asm 2: movq caller4=%r14 movq 24(%rsp),%r14 # qhasm: caller5 = caller5_stack # asm 1: movq caller5=int64#13 # asm 2: movq caller5=%r15 movq 32(%rsp),%r15 # qhasm: caller6 = caller6_stack # asm 1: movq caller6=int64#14 # asm 2: movq caller6=%rbx movq 40(%rsp),%rbx # qhasm: caller7 = caller7_stack # asm 1: movq caller7=int64#15 # asm 2: movq caller7=%rbp movq 48(%rsp),%rbp # qhasm: leave add %r11,%rsp mov %rdi,%rax mov %rsi,%rdx ret