diff options
Diffstat (limited to 'vendor/gmp-6.3.0/mpn/powerpc64/mode64/p6')
| -rw-r--r-- | vendor/gmp-6.3.0/mpn/powerpc64/mode64/p6/aorsmul_1.asm | 185 | ||||
| -rw-r--r-- | vendor/gmp-6.3.0/mpn/powerpc64/mode64/p6/gmp-mparam.h | 160 | ||||
| -rw-r--r-- | vendor/gmp-6.3.0/mpn/powerpc64/mode64/p6/mul_basecase.asm | 589 | 
3 files changed, 934 insertions, 0 deletions
| diff --git a/vendor/gmp-6.3.0/mpn/powerpc64/mode64/p6/aorsmul_1.asm b/vendor/gmp-6.3.0/mpn/powerpc64/mode64/p6/aorsmul_1.asm new file mode 100644 index 0000000..c572b91 --- /dev/null +++ b/vendor/gmp-6.3.0/mpn/powerpc64/mode64/p6/aorsmul_1.asm @@ -0,0 +1,185 @@ +dnl  PowerPC-64 mpn_addmul_1 and mpn_submul_1 optimised for power6. + +dnl  Copyright 1999-2001, 2003-2006, 2008, 2010, 2011 Free Software Foundation, +dnl  Inc. + +dnl  This file is part of the GNU MP Library. +dnl +dnl  The GNU MP Library is free software; you can redistribute it and/or modify +dnl  it under the terms of either: +dnl +dnl    * the GNU Lesser General Public License as published by the Free +dnl      Software Foundation; either version 3 of the License, or (at your +dnl      option) any later version. +dnl +dnl  or +dnl +dnl    * the GNU General Public License as published by the Free Software +dnl      Foundation; either version 2 of the License, or (at your option) any +dnl      later version. +dnl +dnl  or both in parallel, as here. +dnl +dnl  The GNU MP Library is distributed in the hope that it will be useful, but +dnl  WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +dnl  or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License +dnl  for more details. +dnl +dnl  You should have received copies of the GNU General Public License and the +dnl  GNU Lesser General Public License along with the GNU MP Library.  If not, +dnl  see https://www.gnu.org/licenses/. + +include(`../config.m4') + +C               mpn_addmul_1    mpn_submul_1 +C               cycles/limb     cycles/limb +C POWER3/PPC630     ?               ? +C POWER4/PPC970     ?               ? +C POWER5            ?               ? +C POWER6           12.25           12.8 +C POWER7            ?               ? + +C TODO +C  * Reduce register usage. +C  * Schedule function entry code. +C  * Unroll more.  8-way unrolling would bring us to 10 c/l, 16-way unrolling +C    would bring us to 9 c/l. +C  * Handle n = 1 and perhaps n = 2 separately, without saving any registers. + +C INPUT PARAMETERS +define(`rp',  `r3') +define(`up',  `r4') +define(`n',   `r5') +define(`v0',  `r6') + +ifdef(`OPERATION_addmul_1',` +  define(ADDSUBC,	adde) +  define(ADDSUB,	addc) +  define(func,		mpn_addmul_1) +  define(func_nc,	mpn_addmul_1c)	C FIXME: not really supported +  define(AM,		`$1') +  define(SM,		`') +  define(CLRRSC,	`addic	$1, r0, 0') +') +ifdef(`OPERATION_submul_1',` +  define(ADDSUBC,	subfe) +  define(ADDSUB,	subfc) +  define(func,		mpn_submul_1) +  define(func_nc,	mpn_submul_1c)	C FIXME: not really supported +  define(AM,		`') +  define(SM,		`$1') +  define(CLRRSC,	`subfc	$1, r0, r0') +') + +MULFUNC_PROLOGUE(mpn_addmul_1 mpn_submul_1) + +ASM_START() +PROLOGUE(func) +	std	r31, -8(r1) +	std	r30, -16(r1) +	std	r29, -24(r1) +	std	r28, -32(r1) +	std	r27, -40(r1) + +	rldicl.	r0, n, 0,62	C r0 = n & 3, set cr0 +	cmpdi	cr6, r0, 2 +	addi	n, n, 3		C compute count... +	srdi	n, n, 2		C ...for ctr +	mtctr	n		C copy loop count into ctr +	beq	cr0, L(b0) +	blt	cr6, L(b1) +	beq	cr6, L(b2) + +L(b3):	ld	r8, 0(up) +	ld	r7, 8(up) +	ld	r27, 16(up) +	addi	up, up, 16 +	addi	rp, rp, 16 +	mulld	r5,  r8, v0 +	mulhdu	r8,  r8, v0 +	mulld	r9,  r7, v0 +	mulhdu	r7,  r7, v0 +	mulld	r11, r27, v0 +	mulhdu	r27, r27, v0 +	ld	r29, -16(rp) +	ld	r30, -8(rp) +	ld	r31, 0(rp) +	addc	r9, r9, r8 +	adde	r11, r11, r7 +	addze	r12, r27 +	ADDSUB	r5, r5, r29 +	b	L(l3) + +L(b2):	ld	r7, 0(up) +	ld	r27, 8(up) +	addi	up, up, 8 +	addi	rp, rp, 8 +	mulld	r9,  r7, v0 +	mulhdu	r7,  r7, v0 +	mulld	r11, r27, v0 +	mulhdu	r27, r27, v0 +	ld	r30, -8(rp) +	ld	r31, 0(rp) +	addc	r11, r11, r7 +	addze	r12, r27 +	ADDSUB	r9, r9, r30 +	b	L(l2) + +L(b1):	ld	r27, 0(up) +	ld	r31, 0(rp) +	mulld	r11, r27, v0 +	mulhdu	r12, r27, v0 +	ADDSUB	r11, r11, r31 +	b	L(l1) + +L(b0):	addi	up, up, -8 +	addi	rp, rp, -8 +	CLRRSC(	r12)		C clear r12 and clr/set cy + +	ALIGN(32) +L(top): +SM(`	subfe	r11, r0, r0')	C complement... +SM(`	addic	r11, r11, 1')	C ...carry flag +	ld	r10, 8(up) +	ld	r8, 16(up) +	ld	r7, 24(up) +	ld	r27, 32(up) +	addi	up, up, 32 +	addi	rp, rp, 32 +	mulld	r0,  r10, v0 +	mulhdu	r10, r10, v0 +	mulld	r5,  r8, v0 +	mulhdu	r8,  r8, v0 +	mulld	r9,  r7, v0 +	mulhdu	r7,  r7, v0 +	mulld	r11, r27, v0 +	mulhdu	r27, r27, v0 +	ld	r28, -24(rp) +	adde	r0, r0, r12 +	ld	r29, -16(rp) +	adde	r5, r5, r10 +	ld	r30, -8(rp) +	ld	r31, 0(rp) +	adde	r9, r9, r8 +	adde	r11, r11, r7 +	addze	r12, r27 +	ADDSUB	r0, r0, r28 +	std	r0, -24(rp) +	ADDSUBC	r5, r5, r29 +L(l3):	std	r5, -16(rp) +	ADDSUBC	r9, r9, r30 +L(l2):	std	r9, -8(rp) +	ADDSUBC	r11, r11, r31 +L(l1):	std	r11, 0(rp) +	bdnz	L(top) + +AM(`	addze	r3, r12') +SM(`	subfe	r11, r0, r0')		C complement... +	ld	r31, -8(r1) +SM(`	subf	r3, r11, r12') +	ld	r30, -16(r1) +	ld	r29, -24(r1) +	ld	r28, -32(r1) +	ld	r27, -40(r1) +	blr +EPILOGUE() diff --git a/vendor/gmp-6.3.0/mpn/powerpc64/mode64/p6/gmp-mparam.h b/vendor/gmp-6.3.0/mpn/powerpc64/mode64/p6/gmp-mparam.h new file mode 100644 index 0000000..c7e2f89 --- /dev/null +++ b/vendor/gmp-6.3.0/mpn/powerpc64/mode64/p6/gmp-mparam.h @@ -0,0 +1,160 @@ +/* POWER6 gmp-mparam.h -- Compiler/machine parameter header file. + +Copyright 1991, 1993, 1994, 1999-2003, 2009-2011 Free Software Foundation, Inc. + +This file is part of the GNU MP Library. + +The GNU MP Library is free software; you can redistribute it and/or modify +it under the terms of either: + +  * the GNU Lesser General Public License as published by the Free +    Software Foundation; either version 3 of the License, or (at your +    option) any later version. + +or + +  * the GNU General Public License as published by the Free Software +    Foundation; either version 2 of the License, or (at your option) any +    later version. + +or both in parallel, as here. + +The GNU MP Library is distributed in the hope that it will be useful, but +WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License +for more details. + +You should have received copies of the GNU General Public License and the +GNU Lesser General Public License along with the GNU MP Library.  If not, +see https://www.gnu.org/licenses/.  */ + +#define GMP_LIMB_BITS 64 +#define GMP_LIMB_BYTES 8 + +/* 3500 MHz POWER6 (kolga.bibsys.no) */ + +#define MOD_1_NORM_THRESHOLD                 0  /* always */ +#define MOD_1_UNNORM_THRESHOLD               0  /* always */ +#define MOD_1N_TO_MOD_1_1_THRESHOLD          3 +#define MOD_1U_TO_MOD_1_1_THRESHOLD          3 +#define MOD_1_1_TO_MOD_1_2_THRESHOLD        12 +#define MOD_1_2_TO_MOD_1_4_THRESHOLD         0  /* never mpn_mod_1s_2p */ +#define PREINV_MOD_1_TO_MOD_1_THRESHOLD      6 +#define USE_PREINV_DIVREM_1                  0 +#define DIV_QR_2_PI2_THRESHOLD           MP_SIZE_T_MAX  /* never */ +#define DIVEXACT_1_THRESHOLD                 0  /* always (native) */ +#define BMOD_1_TO_MOD_1_THRESHOLD           21 + +#define MUL_TOOM22_THRESHOLD                20 +#define MUL_TOOM33_THRESHOLD                50 +#define MUL_TOOM44_THRESHOLD               106 +#define MUL_TOOM6H_THRESHOLD               274 +#define MUL_TOOM8H_THRESHOLD               339 + +#define MUL_TOOM32_TO_TOOM43_THRESHOLD      62 +#define MUL_TOOM32_TO_TOOM53_THRESHOLD      76 +#define MUL_TOOM42_TO_TOOM53_THRESHOLD      73 +#define MUL_TOOM42_TO_TOOM63_THRESHOLD      66 +#define MUL_TOOM43_TO_TOOM54_THRESHOLD      88 + +#define SQR_BASECASE_THRESHOLD               0  /* always (native) */ +#define SQR_TOOM2_THRESHOLD                 24 +#define SQR_TOOM3_THRESHOLD                 49 +#define SQR_TOOM4_THRESHOLD                130 +#define SQR_TOOM6_THRESHOLD                226 +#define SQR_TOOM8_THRESHOLD                272 + +#define MULMID_TOOM42_THRESHOLD             36 + +#define MULMOD_BNM1_THRESHOLD               14 +#define SQRMOD_BNM1_THRESHOLD               14 + +#define MUL_FFT_MODF_THRESHOLD             380  /* k = 5 */ +#define MUL_FFT_TABLE3                                      \ +  { {    340, 5}, {     19, 6}, {     10, 5}, {     21, 6}, \ +    {     11, 5}, {     23, 6}, {     21, 7}, {     11, 6}, \ +    {     23, 7}, {     12, 6}, {     25, 7}, {     21, 8}, \ +    {     11, 7}, {     24, 8}, {     13, 7}, {     27, 8}, \ +    {     21, 9}, {     11, 8}, {     25, 9}, {     15, 8}, \ +    {     33, 9}, {     23, 8}, {     47, 9}, {     27,10}, \ +    {     15, 9}, {     39,10}, {     23, 9}, {     47,11}, \ +    {     15,10}, {     31, 9}, {     63,10}, {     47,11}, \ +    {     31,10}, {     71,11}, {     47,12}, {     31,11}, \ +    {     63,10}, {    127, 9}, {    255, 8}, {    511,10}, \ +    {    135, 9}, {    271,11}, {     79, 9}, {    319, 8}, \ +    {    639,10}, {    175,11}, {     95,10}, {    191, 9}, \ +    {    383,10}, {    207,12}, {     63,10}, {    255, 9}, \ +    {    511,10}, {    271, 9}, {    543,11}, {    143,10}, \ +    {    287, 9}, {    575,10}, {    303, 9}, {    607,10}, \ +    {    319, 9}, {    639,11}, {    175,12}, {     95,11}, \ +    {    191,10}, {    383,11}, {    207,10}, {    415,13}, \ +    {   8192,14}, {  16384,15}, {  32768,16}, {  65536,17}, \ +    { 131072,18}, { 262144,19}, { 524288,20}, {1048576,21}, \ +    {2097152,22}, {4194304,23}, {8388608,24} } +#define MUL_FFT_TABLE3_SIZE 79 +#define MUL_FFT_THRESHOLD                 3520 + +#define SQR_FFT_MODF_THRESHOLD             308  /* k = 5 */ +#define SQR_FFT_TABLE3                                      \ +  { {    280, 5}, {     17, 6}, {      9, 5}, {     19, 6}, \ +    {     21, 7}, {     11, 6}, {     23, 7}, {     21, 8}, \ +    {     11, 7}, {     24, 8}, {     13, 7}, {     27, 8}, \ +    {     21, 9}, {     11, 8}, {     25, 9}, {     15, 8}, \ +    {     33, 9}, {     19, 8}, {     39, 9}, {     23, 8}, \ +    {     47, 9}, {     27,10}, {     15, 9}, {     39,10}, \ +    {     23, 9}, {     47,11}, {     15,10}, {     31, 9}, \ +    {     63,10}, {     47,11}, {     31,10}, {     71, 9}, \ +    {    143,11}, {     47,12}, {     31,11}, {     63, 9}, \ +    {    255, 8}, {    511, 9}, {    271,10}, {    143,11}, \ +    {     79,10}, {    159, 9}, {    319,10}, {    175, 9}, \ +    {    351,11}, {     95,10}, {    191, 9}, {    383,10}, \ +    {    207,12}, {     63,11}, {    127,10}, {    255, 9}, \ +    {    511, 8}, {   1023,10}, {    271, 9}, {    543,11}, \ +    {    143,10}, {    287, 9}, {    575,11}, {    159,10}, \ +    {    319, 9}, {    639,11}, {    175,10}, {    351,12}, \ +    {     95,11}, {    191,10}, {    383,11}, {    207,10}, \ +    {    415,13}, {   8192,14}, {  16384,15}, {  32768,16}, \ +    {  65536,17}, { 131072,18}, { 262144,19}, { 524288,20}, \ +    {1048576,21}, {2097152,22}, {4194304,23}, {8388608,24} } +#define SQR_FFT_TABLE3_SIZE 80 +#define SQR_FFT_THRESHOLD                 2752 + +#define MULLO_BASECASE_THRESHOLD             5 +#define MULLO_DC_THRESHOLD                  62 +#define MULLO_MUL_N_THRESHOLD             2995 + +#define DC_DIV_QR_THRESHOLD                 59 +#define DC_DIVAPPR_Q_THRESHOLD             200 +#define DC_BDIV_QR_THRESHOLD                70 +#define DC_BDIV_Q_THRESHOLD                168 + +#define INV_MULMOD_BNM1_THRESHOLD           53 +#define INV_NEWTON_THRESHOLD               170 +#define INV_APPR_THRESHOLD                 166 + +#define BINV_NEWTON_THRESHOLD              220 +#define REDC_1_TO_REDC_N_THRESHOLD          67 + +#define MU_DIV_QR_THRESHOLD                998 +#define MU_DIVAPPR_Q_THRESHOLD             942 +#define MUPI_DIV_QR_THRESHOLD               57 +#define MU_BDIV_QR_THRESHOLD               889 +#define MU_BDIV_Q_THRESHOLD               1078 + +#define POWM_SEC_TABLE  4,26,216,804,1731 + +#define MATRIX22_STRASSEN_THRESHOLD         13 +#define HGCD_THRESHOLD                     106 +#define HGCD_APPR_THRESHOLD                109 +#define HGCD_REDUCE_THRESHOLD             2205 +#define GCD_DC_THRESHOLD                   492 +#define GCDEXT_DC_THRESHOLD                327 +#define JACOBI_BASE_METHOD                   4 + +#define GET_STR_DC_THRESHOLD                16 +#define GET_STR_PRECOMPUTE_THRESHOLD        28 +#define SET_STR_DC_THRESHOLD               537 +#define SET_STR_PRECOMPUTE_THRESHOLD      1576 + +#define FAC_DSC_THRESHOLD                  426 +#define FAC_ODD_THRESHOLD                    0  /* always */ diff --git a/vendor/gmp-6.3.0/mpn/powerpc64/mode64/p6/mul_basecase.asm b/vendor/gmp-6.3.0/mpn/powerpc64/mode64/p6/mul_basecase.asm new file mode 100644 index 0000000..3d32b46 --- /dev/null +++ b/vendor/gmp-6.3.0/mpn/powerpc64/mode64/p6/mul_basecase.asm @@ -0,0 +1,589 @@ +dnl  PowerPC-64 mpn_mul_basecase. + +dnl  Copyright 1999-2001, 2003-2006, 2008, 2010 Free Software Foundation, Inc. + +dnl  This file is part of the GNU MP Library. +dnl +dnl  The GNU MP Library is free software; you can redistribute it and/or modify +dnl  it under the terms of either: +dnl +dnl    * the GNU Lesser General Public License as published by the Free +dnl      Software Foundation; either version 3 of the License, or (at your +dnl      option) any later version. +dnl +dnl  or +dnl +dnl    * the GNU General Public License as published by the Free Software +dnl      Foundation; either version 2 of the License, or (at your option) any +dnl      later version. +dnl +dnl  or both in parallel, as here. +dnl +dnl  The GNU MP Library is distributed in the hope that it will be useful, but +dnl  WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +dnl  or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License +dnl  for more details. +dnl +dnl  You should have received copies of the GNU General Public License and the +dnl  GNU Lesser General Public License along with the GNU MP Library.  If not, +dnl  see https://www.gnu.org/licenses/. + +include(`../config.m4') + +C		    cycles/limb +C POWER3/PPC630		 ? +C POWER4/PPC970		 ? +C POWER5		 ? +C POWER6		12.25 + +C TODO +C  * Reduce register usage.  At least 4 register less can be used. +C  * Unroll more.  8-way unrolling would bring us to 10 c/l, 16-way unrolling +C    would bring us to 9 c/l. +C  * The bdz insns for b1 and b2 will never branch, +C  * Align things better, perhaps by moving things like pointer updates from +C    before to after loops. + +C INPUT PARAMETERS +define(`rp', `r3') +define(`up', `r4') +define(`un', `r5') +define(`vp', `r6') +define(`vn', `r7') + +define(`v0',	   `r25') +define(`outer_rp', `r22') +define(`outer_up', `r23') + +ASM_START() +PROLOGUE(mpn_mul_basecase) + +C Special code for un <= 2, for efficiency of these important cases, +C and since it simplifies the default code. +	cmpdi	cr0, un, 2 +	bgt	cr0, L(un_gt2) +	cmpdi	cr6, vn, 1 +	ld	r7, 0(vp) +	ld	r5, 0(up) +	mulld	r8, r5, r7	C weight 0 +	mulhdu	r9, r5, r7	C weight 1 +	std	r8, 0(rp) +	beq	cr0, L(2x) +	std	r9, 8(rp) +	blr +	ALIGN(16) +L(2x):	ld	r0, 8(up) +	mulld	r8, r0, r7	C weight 1 +	mulhdu	r10, r0, r7	C weight 2 +	addc	r9, r9, r8 +	addze	r10, r10 +	bne	cr6, L(2x2) +	std	r9, 8(rp) +	std	r10, 16(rp) +	blr +	ALIGN(16) +L(2x2):	ld	r6, 8(vp) +	nop +	mulld	r8, r5, r6	C weight 1 +	mulhdu	r11, r5, r6	C weight 2 +	mulld	r12, r0, r6	C weight 2 +	mulhdu	r0, r0, r6	C weight 3 +	addc	r9, r9, r8 +	std	r9, 8(rp) +	adde	r11, r11, r10 +	addze	r0, r0 +	addc	r11, r11, r12 +	addze	r0, r0 +	std	r11, 16(rp) +	std	r0, 24(rp) +	blr + +L(un_gt2): +	std	r31, -8(r1) +	std	r30, -16(r1) +	std	r29, -24(r1) +	std	r28, -32(r1) +	std	r27, -40(r1) +	std	r26, -48(r1) +	std	r25, -56(r1) +	std	r24, -64(r1) +	std	r23, -72(r1) +	std	r22, -80(r1) +	std	r21, -88(r1) +	std	r20, -96(r1) + +	mr	outer_rp, rp +	mr	outer_up, up + +	ld	v0, 0(vp)	C new v limb +	addi	vp, vp, 8 +	ld	r26, 0(up) + +	rldicl.	r0, un, 0,62	C r0 = n & 3, set cr0 +	cmpdi	cr6, r0, 2 +	addi	un, un, 4	C compute count... +	srdi	un, un, 2	C ...for ctr +	mtctr	un		C copy inner loop count into ctr +	beq	cr0, L(b0) +	blt	cr6, L(b1) +	beq	cr6, L(b2) + + +	ALIGN(16) +L(b3): +	ld	r27, 8(up) +	ld	r20, 16(up) +	mulld	r0, r26, v0 +	mulhdu	r31, r26, v0 +	mulld	r24, r27, v0 +	mulhdu	r8, r27, v0 +	mulld	r9, r20, v0 +	mulhdu	r10, r20, v0 +	addc	r24, r24, r31 +	adde	r9, r9, r8 +	addze	r12, r10 +	std	r0, 0(rp) +	std	r24, 8(rp) +	std	r9, 16(rp) +	addi	up, up, 16 +	addi	rp, rp, 16 +	bdz	L(end_m_3) + +	ALIGN(32) +L(lo_m_3): +	ld	r26, 8(up) +	ld	r27, 16(up) +	ld	r20, 24(up) +	ld	r21, 32(up) +	mulld	r0, r26, v0 +	mulhdu	r31, r26, v0 +	mulld	r24, r27, v0 +	mulhdu	r8, r27, v0 +	mulld	r9, r20, v0 +	mulhdu	r27, r20, v0 +	mulld	r11, r21, v0 +	mulhdu	r26, r21, v0 +	adde	r0, r0, r12 +	adde	r24, r24, r31 +	std	r0, 8(rp) +	adde	r9, r9, r8 +	std	r24, 16(rp) +	adde	r11, r11, r27 +	std	r9, 24(rp) +	addi	up, up, 32 +	std	r11, 32(rp) +	addi	rp, rp, 32 +	mr	r12, r26 +	bdnz	L(lo_m_3) + +	ALIGN(16) +L(end_m_3): +	addze	r12, r12 +	addic.	vn, vn, -1 +	std	r12, 8(rp) +	beq	L(ret) + +	ALIGN(16) +L(outer_lo_3): +	mtctr	un		C copy inner loop count into ctr +	addi	rp, outer_rp, 24 +	addi	up, outer_up, 16 +	addi	outer_rp, outer_rp, 8 +	ld	v0, 0(vp)	C new v limb +	addi	vp, vp, 8 +	ld	r26, -16(up) +	ld	r27, -8(up) +	ld	r20, 0(up) +	mulld	r0, r26, v0 +	mulhdu	r31, r26, v0 +	mulld	r24, r27, v0 +	mulhdu	r8, r27, v0 +	mulld	r9, r20, v0 +	mulhdu	r10, r20, v0 +	ld	r28, -16(rp) +	ld	r29, -8(rp) +	ld	r30, 0(rp) +	addc	r24, r24, r31 +	adde	r9, r9, r8 +	addze	r12, r10 +	addc	r0, r0, r28 +	std	r0, -16(rp) +	adde	r24, r24, r29 +	std	r24, -8(rp) +	adde	r9, r9, r30 +	std	r9, 0(rp) +	bdz	L(end_3) + +	ALIGN(32)		C registers dying +L(lo_3): +	ld	r26, 8(up) +	ld	r27, 16(up) +	ld	r20, 24(up)	C +	ld	r21, 32(up)	C +	addi	up, up, 32	C +	addi	rp, rp, 32	C +	mulld	r0, r26, v0	C +	mulhdu	r10, r26, v0	C 26 +	mulld	r24, r27, v0	C +	mulhdu	r8, r27, v0	C 27 +	mulld	r9, r20, v0	C +	mulhdu	r27, r20, v0	C 26 +	mulld	r11, r21, v0	C +	mulhdu	r26, r21, v0	C 27 +	ld	r28, -24(rp)	C +	adde	r0, r0, r12	C 0 12 +	ld	r29, -16(rp)	C +	adde	r24, r24, r10	C 24 10 +	ld	r30, -8(rp)	C +	ld	r31, 0(rp)	C +	adde	r9, r9, r8	C 8 9 +	adde	r11, r11, r27	C 27 11 +	addze	r12, r26	C 26 +	addc	r0, r0, r28	C 0 28 +	std	r0, -24(rp)	C 0 +	adde	r24, r24, r29	C 7 29 +	std	r24, -16(rp)	C 7 +	adde	r9, r9, r30	C 9 30 +	std	r9, -8(rp)	C 9 +	adde	r11, r11, r31	C 11 31 +	std	r11, 0(rp)	C 11 +	bdnz	L(lo_3)		C + +	ALIGN(16) +L(end_3): +	addze	r12, r12 +	addic.	vn, vn, -1 +	std	r12, 8(rp) +	bne	L(outer_lo_3) +	b	L(ret) + + +	ALIGN(16) +L(b1): +	mulld	r0, r26, v0 +	mulhdu	r12, r26, v0 +	addic	r0, r0, 0 +	std	r0, 0(rp) +	bdz	L(end_m_1) + +	ALIGN(16) +L(lo_m_1): +	ld	r26, 8(up) +	ld	r27, 16(up) +	ld	r20, 24(up) +	ld	r21, 32(up) +	mulld	r0, r26, v0 +	mulhdu	r31, r26, v0 +	mulld	r24, r27, v0 +	mulhdu	r8, r27, v0 +	mulld	r9, r20, v0 +	mulhdu	r27, r20, v0 +	mulld	r11, r21, v0 +	mulhdu	r26, r21, v0 +	adde	r0, r0, r12 +	adde	r24, r24, r31 +	std	r0, 8(rp) +	adde	r9, r9, r8 +	std	r24, 16(rp) +	adde	r11, r11, r27 +	std	r9, 24(rp) +	addi	up, up, 32 +	std	r11, 32(rp) +	addi	rp, rp, 32 +	mr	r12, r26 +	bdnz	L(lo_m_1) + +	ALIGN(16) +L(end_m_1): +	addze	r12, r12 +	addic.	vn, vn, -1 +	std	r12, 8(rp) +	beq	L(ret) + +	ALIGN(16) +L(outer_lo_1): +	mtctr	un		C copy inner loop count into ctr +	addi	rp, outer_rp, 8 +	mr	up, outer_up +	addi	outer_rp, outer_rp, 8 +	ld	v0, 0(vp)	C new v limb +	addi	vp, vp, 8 +	ld	r26, 0(up) +	ld	r28, 0(rp) +	mulld	r0, r26, v0 +	mulhdu	r12, r26, v0 +	addc	r0, r0, r28 +	std	r0, 0(rp) +	bdz	L(end_1) + +	ALIGN(32)		C registers dying +L(lo_1): +	ld	r26, 8(up) +	ld	r27, 16(up) +	ld	r20, 24(up)	C +	ld	r21, 32(up)	C +	addi	up, up, 32	C +	addi	rp, rp, 32	C +	mulld	r0, r26, v0	C +	mulhdu	r10, r26, v0	C 26 +	mulld	r24, r27, v0	C +	mulhdu	r8, r27, v0	C 27 +	mulld	r9, r20, v0	C +	mulhdu	r27, r20, v0	C 26 +	mulld	r11, r21, v0	C +	mulhdu	r26, r21, v0	C 27 +	ld	r28, -24(rp)	C +	adde	r0, r0, r12	C 0 12 +	ld	r29, -16(rp)	C +	adde	r24, r24, r10	C 24 10 +	ld	r30, -8(rp)	C +	ld	r31, 0(rp)	C +	adde	r9, r9, r8	C 8 9 +	adde	r11, r11, r27	C 27 11 +	addze	r12, r26	C 26 +	addc	r0, r0, r28	C 0 28 +	std	r0, -24(rp)	C 0 +	adde	r24, r24, r29	C 7 29 +	std	r24, -16(rp)	C 7 +	adde	r9, r9, r30	C 9 30 +	std	r9, -8(rp)	C 9 +	adde	r11, r11, r31	C 11 31 +	std	r11, 0(rp)	C 11 +	bdnz	L(lo_1)		C + +	ALIGN(16) +L(end_1): +	addze	r12, r12 +	addic.	vn, vn, -1 +	std	r12, 8(rp) +	bne	L(outer_lo_1) +	b	L(ret) + + +	ALIGN(16) +L(b0): +	addi	up, up, -8 +	addi	rp, rp, -8 +	li	r12, 0 +	addic	r12, r12, 0 +	bdz	L(end_m_0) + +	ALIGN(16) +L(lo_m_0): +	ld	r26, 8(up) +	ld	r27, 16(up) +	ld	r20, 24(up) +	ld	r21, 32(up) +	mulld	r0, r26, v0 +	mulhdu	r31, r26, v0 +	mulld	r24, r27, v0 +	mulhdu	r8, r27, v0 +	mulld	r9, r20, v0 +	mulhdu	r27, r20, v0 +	mulld	r11, r21, v0 +	mulhdu	r26, r21, v0 +	adde	r0, r0, r12 +	adde	r24, r24, r31 +	std	r0, 8(rp) +	adde	r9, r9, r8 +	std	r24, 16(rp) +	adde	r11, r11, r27 +	std	r9, 24(rp) +	addi	up, up, 32 +	std	r11, 32(rp) +	addi	rp, rp, 32 +	mr	r12, r26 +	bdnz	L(lo_m_0) + +	ALIGN(16) +L(end_m_0): +	addze	r12, r12 +	addic.	vn, vn, -1 +	std	r12, 8(rp) +	beq	L(ret) + +	ALIGN(16) +L(outer_lo_0): +	mtctr	un		C copy inner loop count into ctr +	addi	rp, outer_rp, 0 +	addi	up, outer_up, -8 +	addi	outer_rp, outer_rp, 8 +	ld	v0, 0(vp)	C new v limb +	addi	vp, vp, 8 +	li	r12, 0 +	addic	r12, r12, 0 +	bdz	L(end_0) + +	ALIGN(32)		C registers dying +L(lo_0): +	ld	r26, 8(up) +	ld	r27, 16(up) +	ld	r20, 24(up)	C +	ld	r21, 32(up)	C +	addi	up, up, 32	C +	addi	rp, rp, 32	C +	mulld	r0, r26, v0	C +	mulhdu	r10, r26, v0	C 26 +	mulld	r24, r27, v0	C +	mulhdu	r8, r27, v0	C 27 +	mulld	r9, r20, v0	C +	mulhdu	r27, r20, v0	C 26 +	mulld	r11, r21, v0	C +	mulhdu	r26, r21, v0	C 27 +	ld	r28, -24(rp)	C +	adde	r0, r0, r12	C 0 12 +	ld	r29, -16(rp)	C +	adde	r24, r24, r10	C 24 10 +	ld	r30, -8(rp)	C +	ld	r31, 0(rp)	C +	adde	r9, r9, r8	C 8 9 +	adde	r11, r11, r27	C 27 11 +	addze	r12, r26	C 26 +	addc	r0, r0, r28	C 0 28 +	std	r0, -24(rp)	C 0 +	adde	r24, r24, r29	C 7 29 +	std	r24, -16(rp)	C 7 +	adde	r9, r9, r30	C 9 30 +	std	r9, -8(rp)	C 9 +	adde	r11, r11, r31	C 11 31 +	std	r11, 0(rp)	C 11 +	bdnz	L(lo_0)		C + +	ALIGN(16) +L(end_0): +	addze	r12, r12 +	addic.	vn, vn, -1 +	std	r12, 8(rp) +	bne	L(outer_lo_0) +	b	L(ret) + + +	ALIGN(16) +L(b2):	ld	r27, 8(up) +	addi	up, up, 8 +	mulld	r0, r26, v0 +	mulhdu	r10, r26, v0 +	mulld	r24, r27, v0 +	mulhdu	r8, r27, v0 +	addc	r24, r24, r10 +	addze	r12, r8 +	std	r0, 0(rp) +	std	r24, 8(rp) +	addi	rp, rp, 8 +	bdz	L(end_m_2) + +	ALIGN(16) +L(lo_m_2): +	ld	r26, 8(up) +	ld	r27, 16(up) +	ld	r20, 24(up) +	ld	r21, 32(up) +	mulld	r0, r26, v0 +	mulhdu	r31, r26, v0 +	mulld	r24, r27, v0 +	mulhdu	r8, r27, v0 +	mulld	r9, r20, v0 +	mulhdu	r27, r20, v0 +	mulld	r11, r21, v0 +	mulhdu	r26, r21, v0 +	adde	r0, r0, r12 +	adde	r24, r24, r31 +	std	r0, 8(rp) +	adde	r9, r9, r8 +	std	r24, 16(rp) +	adde	r11, r11, r27 +	std	r9, 24(rp) +	addi	up, up, 32 +	std	r11, 32(rp) +	addi	rp, rp, 32 +	mr	r12, r26 +	bdnz	L(lo_m_2) + +	ALIGN(16) +L(end_m_2): +	addze	r12, r12 +	addic.	vn, vn, -1 +	std	r12, 8(rp) +	beq	L(ret) + +	ALIGN(16) +L(outer_lo_2): +	mtctr	un		C copy inner loop count into ctr +	addi	rp, outer_rp, 16 +	addi	up, outer_up, 8 +	addi	outer_rp, outer_rp, 8 +	ld	v0, 0(vp)	C new v limb +	addi	vp, vp, 8 +	ld	r26, -8(up) +	ld	r27, 0(up) +	ld	r28, -8(rp) +	ld	r29, 0(rp) +	mulld	r0, r26, v0 +	mulhdu	r10, r26, v0 +	mulld	r24, r27, v0 +	mulhdu	r8, r27, v0 +	addc	r24, r24, r10 +	addze	r12, r8 +	addc	r0, r0, r28 +	std	r0, -8(rp) +	adde	r24, r24, r29 +	std	r24, 0(rp) +	bdz	L(end_2) + +	ALIGN(16)		C registers dying +L(lo_2): +	ld	r26, 8(up) +	ld	r27, 16(up) +	ld	r20, 24(up)	C +	ld	r21, 32(up)	C +	addi	up, up, 32	C +	addi	rp, rp, 32	C +	mulld	r0, r26, v0	C +	mulhdu	r10, r26, v0	C 26 +	mulld	r24, r27, v0	C +	mulhdu	r8, r27, v0	C 27 +	mulld	r9, r20, v0	C +	mulhdu	r27, r20, v0	C 26 +	mulld	r11, r21, v0	C +	mulhdu	r26, r21, v0	C 27 +	ld	r28, -24(rp)	C +	adde	r0, r0, r12	C 0 12 +	ld	r29, -16(rp)	C +	adde	r24, r24, r10	C 24 10 +	ld	r30, -8(rp)	C +	ld	r31, 0(rp)	C +	adde	r9, r9, r8	C 8 9 +	adde	r11, r11, r27	C 27 11 +	addze	r12, r26	C 26 +	addc	r0, r0, r28	C 0 28 +	std	r0, -24(rp)	C 0 +	adde	r24, r24, r29	C 7 29 +	std	r24, -16(rp)	C 7 +	adde	r9, r9, r30	C 9 30 +	std	r9, -8(rp)	C 9 +	adde	r11, r11, r31	C 11 31 +	std	r11, 0(rp)	C 11 +	bdnz	L(lo_2)		C + +	ALIGN(16) +L(end_2): +	addze	r12, r12 +	addic.	vn, vn, -1 +	std	r12, 8(rp) +	bne	L(outer_lo_2) +C	b	L(ret) + +L(ret):	ld	r31, -8(r1) +	ld	r30, -16(r1) +	ld	r29, -24(r1) +	ld	r28, -32(r1) +	ld	r27, -40(r1) +	ld	r26, -48(r1) +	ld	r25, -56(r1) +	ld	r24, -64(r1) +	ld	r23, -72(r1) +	ld	r22, -80(r1) +	ld	r21, -88(r1) +	ld	r20, -96(r1) +	blr +EPILOGUE() |