Lines Matching +full:0 +full:x14

5 #define __has_feature(x) 0
18 .quad 0xffffffffffffffff,0x00000000ffffffff,0x0000000000000000,0xffffffff00000001
20 .quad 0x0000000000000001,0xffffffff00000000,0xffffffffffffffff,0x00000000fffffffe
22 .quad 1,0,0,0
23 …9,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
34 add x29,sp,#0
37 ldr x3,[x2] // bp[0]
57 add x29,sp,#0
80 add x29,sp,#0
82 ldp x14,x15,[x1]
102 add x29,sp,#0
105 mov x14,xzr // a = 0
118 // note that __ecp_nistz256_mul_mont expects a[0-3] input pre-loaded
119 // to x4-x7 and b[0] - to x3
123 mul x14,x4,x3 // a[0]*b[0]
126 mul x15,x5,x3 // a[1]*b[0]
129 mul x16,x6,x3 // a[2]*b[0]
132 mul x17,x7,x3 // a[3]*b[0]
137 lsl x8,x14,#32
139 lsr x9,x14,#32
143 subs x10,x14,x8 // "*0xffff0001"
144 sbc x11,x14,x9
145 adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0]
146 mul x8,x4,x3 // lo(a[0]*b[i])
149 adcs x16,x17,x10 // +=acc[0]*0xffff0001
155 adds x14,x14,x8 // accumulate low parts of multiplication
156 umulh x8,x4,x3 // hi(a[0]*b[i])
166 lsl x8,x14,#32
168 lsr x9,x14,#32
172 subs x10,x14,x8 // "*0xffff0001"
173 sbc x11,x14,x9
174 adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0]
175 mul x8,x4,x3 // lo(a[0]*b[i])
178 adcs x16,x17,x10 // +=acc[0]*0xffff0001
184 adds x14,x14,x8 // accumulate low parts of multiplication
185 umulh x8,x4,x3 // hi(a[0]*b[i])
195 lsl x8,x14,#32
197 lsr x9,x14,#32
201 subs x10,x14,x8 // "*0xffff0001"
202 sbc x11,x14,x9
203 adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0]
204 mul x8,x4,x3 // lo(a[0]*b[i])
207 adcs x16,x17,x10 // +=acc[0]*0xffff0001
213 adds x14,x14,x8 // accumulate low parts of multiplication
214 umulh x8,x4,x3 // hi(a[0]*b[i])
223 lsl x8,x14,#32
225 lsr x9,x14,#32
230 subs x10,x14,x8 // "*0xffff0001"
231 sbc x11,x14,x9
232 adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0]
234 adcs x16,x17,x10 // +=acc[0]*0xffff0001
238 adds x8,x14,#1 // subs x8,x14,#-1 // tmp = ret-modulus
244 csel x14,x14,x8,lo // ret = borrow ? ret : ret-modulus
247 stp x14,x15,[x0]
254 // note that __ecp_nistz256_sqr_mont expects a[0-3] input pre-loaded
273 mul x15,x5,x4 // a[1]*a[0]
275 mul x16,x6,x4 // a[2]*a[0]
277 mul x17,x7,x4 // a[3]*a[0]
292 mul x14,x4,x4 // a[0]*a[0]
320 lsl x8,x14,#32
322 lsr x9,x14,#32
324 subs x10,x14,x8 // "*0xffff0001"
325 sbc x11,x14,x9
326 adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0]
328 lsl x8,x14,#32
329 adcs x16,x17,x10 // +=acc[0]*0xffff0001
330 lsr x9,x14,#32
332 subs x10,x14,x8 // "*0xffff0001"
333 sbc x11,x14,x9
334 adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0]
336 lsl x8,x14,#32
337 adcs x16,x17,x10 // +=acc[0]*0xffff0001
338 lsr x9,x14,#32
340 subs x10,x14,x8 // "*0xffff0001"
341 sbc x11,x14,x9
342 adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0]
344 lsl x8,x14,#32
345 adcs x16,x17,x10 // +=acc[0]*0xffff0001
346 lsr x9,x14,#32
348 subs x10,x14,x8 // "*0xffff0001"
349 sbc x11,x14,x9
350 adds x14,x15,x8 // +=acc[0]<<96 and omit acc[0]
352 adcs x16,x17,x10 // +=acc[0]*0xffff0001
355 adds x14,x14,x19 // accumulate upper half
361 adds x8,x14,#1 // subs x8,x14,#-1 // tmp = ret-modulus
367 csel x14,x14,x8,lo // ret = borrow ? ret : ret-modulus
370 stp x14,x15,[x0]
383 adds x14,x14,x8 // ret = a+b
389 adds x8,x14,#1 // subs x8,x4,#-1 // tmp = ret-modulus
395 csel x14,x14,x8,lo // ret = borrow ? ret : ret-modulus
398 stp x14,x15,[x0]
410 subs x14,x14,x8 // ret = a-b
416 subs x8,x14,#1 // adds x8,x4,#-1 // tmp = ret+modulus
422 csel x14,x14,x8,eq // ret = borrow ? ret+modulus : ret
425 stp x14,x15,[x0]
437 subs x14,x8,x14 // ret = b-a
443 subs x8,x14,#1 // adds x8,x4,#-1 // tmp = ret+modulus
449 csel x14,x14,x8,eq // ret = borrow ? ret+modulus : ret
452 stp x14,x15,[x0]
462 subs x8,x14,#1 // adds x8,x4,#-1 // tmp = a+modulus
467 tst x14,#1 // is a even?
469 csel x14,x14,x8,eq // ret = even ? a : a+modulus
475 lsr x14,x14,#1 // ret >>= 1
476 orr x14,x14,x15,lsl#63
482 stp x14,x15,[x0]
494 add x29,sp,#0
500 ldp x14,x15,[x1,#32]
505 mov x8,x14
512 add x0,sp,#0
520 mov x4,x14 // put Zsqr aside for p256_sub
527 add x2,x22,#0
528 mov x14,x4 // restore Zsqr
530 ldp x4,x5,[sp,#0] // forward load for p256_sqr_mont
533 ldp x6,x7,[sp,#0+16]
537 add x0,sp,#0
547 mov x8,x14
549 ldp x4,x5,[sp,#0] // forward load for p256_sqr_mont
552 ldp x6,x7,[sp,#0+16]
569 mov x8,x14 // duplicate M
573 mov x4,x14 // put M aside
583 ldp x4,x5,[sp,#0]
585 ldp x6,x7,[sp,#0+16]
588 add x2,x22,#0
589 add x0,sp,#0
592 mov x8,x14
601 add x0,x21,#0
607 add x2,sp,#0
608 add x0,sp,#0
612 mov x4,x14 // copy S
623 add sp,x29,#0 // destroy frame
635 add x29,sp,#0
653 cmp x24,#0
656 ldp x14,x15,[x2] // in2_x
660 orr x14,x14,x15
664 orr x14,x14,x16
666 orr x25,x14,x8
667 cmp x25,#0
673 mov x4,x14
678 add x2,x23,#0
682 add x2,x22,#0
731 add x2,x22,#0
735 mov x8,x14
743 add x0,sp,#0
770 ldp x4,x5,[sp,#0] // res
771 ldp x6,x7,[sp,#0+16]
774 ldp x14,x15,[x22,#0] // in1
775 cmp x24,#0 // !, remember?
776 ldp x16,x17,[x22,#0+16]
779 ldp x4,x5,[sp,#0+0+32] // res
782 cmp x25,#0 // !, remember?
783 ldp x6,x7,[sp,#0+0+48]
784 csel x14,x8,x14,ne
786 ldp x8,x9,[x23,#0+32] // in2
789 ldp x10,x11,[x23,#0+48]
790 stp x14,x15,[x21,#0]
791 stp x16,x17,[x21,#0+16]
793 ldp x14,x15,[x22,#32] // in1
794 cmp x24,#0 // !, remember?
798 ldp x4,x5,[sp,#0+32+32] // res
801 cmp x25,#0 // !, remember?
802 ldp x6,x7,[sp,#0+32+48]
803 csel x14,x8,x14,ne
809 stp x14,x15,[x21,#32]
811 ldp x14,x15,[x22,#64] // in1
812 cmp x24,#0 // !, remember?
818 cmp x25,#0 // !, remember?
819 csel x14,x8,x14,ne
823 stp x14,x15,[x21,#64]
826 add sp,x29,#0 // destroy frame