diff options
Diffstat (limited to 'vendor/gmp-6.3.0/mpn/x86_64/alderlake')
-rw-r--r-- | vendor/gmp-6.3.0/mpn/x86_64/alderlake/addmul_1.asm | 168 | ||||
-rw-r--r-- | vendor/gmp-6.3.0/mpn/x86_64/alderlake/gmp-mparam.h | 225 | ||||
-rw-r--r-- | vendor/gmp-6.3.0/mpn/x86_64/alderlake/mul_basecase.asm | 474 | ||||
-rw-r--r-- | vendor/gmp-6.3.0/mpn/x86_64/alderlake/submul_1.asm | 140 |
4 files changed, 1007 insertions, 0 deletions
diff --git a/vendor/gmp-6.3.0/mpn/x86_64/alderlake/addmul_1.asm b/vendor/gmp-6.3.0/mpn/x86_64/alderlake/addmul_1.asm new file mode 100644 index 0000000..d105da6 --- /dev/null +++ b/vendor/gmp-6.3.0/mpn/x86_64/alderlake/addmul_1.asm @@ -0,0 +1,168 @@ +dnl AMD64 mpn_addmul_1 for CPUs with mulx and adx. + +dnl Contributed to the GNU project by Torbjörn Granlund. + +dnl Copyright 2012, 2013, 2022 Free Software Foundation, Inc. + +dnl This file is part of the GNU MP Library. +dnl +dnl The GNU MP Library is free software; you can redistribute it and/or modify +dnl it under the terms of either: +dnl +dnl * the GNU Lesser General Public License as published by the Free +dnl Software Foundation; either version 3 of the License, or (at your +dnl option) any later version. +dnl +dnl or +dnl +dnl * the GNU General Public License as published by the Free Software +dnl Foundation; either version 2 of the License, or (at your option) any +dnl later version. +dnl +dnl or both in parallel, as here. +dnl +dnl The GNU MP Library is distributed in the hope that it will be useful, but +dnl WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +dnl or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +dnl for more details. +dnl +dnl You should have received copies of the GNU General Public License and the +dnl GNU Lesser General Public License along with the GNU MP Library. If not, +dnl see https://www.gnu.org/licenses/. + +include(`../config.m4') + +C cycles/limb +C AMD K8,K9 - +C AMD K10 - +C AMD bd1 - +C AMD bd2 - +C AMD bd3 - +C AMD bd4 - +C AMD zn1 ? +C AMD zn2 ? +C AMD zn3 ? +C AMD bt1 - +C AMD bt2 - +C Intel P4 - +C Intel CNR - +C Intel PNR - +C Intel NHM - +C Intel WSM - +C Intel SBR - +C Intel IBR - +C Intel HWL - +C Intel BWL ? +C Intel SKL ? +C Intel RKL ? +C Intel ALD 1.29 +C Intel atom - +C Intel SLM - +C Intel GLM - +C VIA nano - + +define(`rp', `%rdi') dnl rcx +define(`up', `%rsi') dnl rdx +define(`n_param', `%rdx') dnl r8 +define(`v0_param',`%rcx') dnl r9 + +define(`n', `%rcx') dnl +define(`v0', `%rdx') dnl + + +ASM_START() + TEXT + ALIGN(16) +PROLOGUE(mpn_addmul_1) + mov (up), %r8 + + push %rbx + push %r12 + push %r13 + + mov %rdx, %rax + mov %rcx, v0 + mov %rax, n + + and $3, R8(%rax) + jz L(b0) + cmp $2, R8(%rax) + jl L(b1) + jz L(b2) + +L(b3): mulx( %r8, %r11, %r10) + mulx( 8,(up), %r13, %r12) + mulx( 16,(up), %rbx, %rax) + inc n + lea -8(up), up + lea -24(rp), rp + jmp L(lo3) + +L(b0): mulx( %r8, %r9, %r8) + mulx( 8,(up), %r11, %r10) + mulx( 16,(up), %r13, %r12) + lea -16(rp), rp + jmp L(lo0) + +L(b2): mulx( %r8, %r13, %r12) + mulx( 8,(up), %rbx, %rax) + lea -2(n), n + jrcxz L(n2) + mulx( 16,(up), %r9, %r8) + lea 16(up), up + jmp L(lo2) +L(n2): jmp L(wd2) + +L(b1): mulx( %r8, %rbx, %rax) + sub $1, n + jrcxz L(n1) + mulx( 8,(up), %r9, %r8) + mulx( 16,(up), %r11, %r10) + lea 8(up), up + lea -8(rp), rp + jmp L(lo1) +L(n1): add (rp), %rbx + adc %rcx, %rax + mov %rbx, (rp) + pop %r13 + pop %r12 + pop %rbx + ret + +L(top): mulx( (up), %r9, %r8) + adcx( %r10, %r13) + mov %r11, -8(rp) +L(lo2): adox( (rp), %r13) + mulx( 8,(up), %r11, %r10) + adcx( %r12, %rbx) + mov %r13, (rp) +L(lo1): adox( 8,(rp), %rbx) + mulx( 16,(up), %r13, %r12) + adcx( %rax, %r9) + mov %rbx, 8(rp) +L(lo0): adox( 16,(rp), %r9) + mulx( 24,(up), %rbx, %rax) + adcx( %r8, %r11) + mov %r9, 16(rp) +L(lo3): adox( 24,(rp), %r11) + lea 32(up), up + lea 32(rp), rp + lea -4(n), n + jrcxz L(end) + jmp L(top) + +L(end): adcx( %r10, %r13) + mov %r11, -8(rp) +L(wd2): adox( (rp), %r13) + adcx( %r12, %rbx) + mov %r13, (rp) + adox( 8,(rp), %rbx) + adcx( %rcx, %rax) + adox( %rcx, %rax) + mov %rbx, 8(rp) + pop %r13 + pop %r12 + pop %rbx + ret +EPILOGUE() +ASM_END() diff --git a/vendor/gmp-6.3.0/mpn/x86_64/alderlake/gmp-mparam.h b/vendor/gmp-6.3.0/mpn/x86_64/alderlake/gmp-mparam.h new file mode 100644 index 0000000..0bffc3d --- /dev/null +++ b/vendor/gmp-6.3.0/mpn/x86_64/alderlake/gmp-mparam.h @@ -0,0 +1,225 @@ +/* Intel Alder Lake gmp-mparam.h -- Compiler/machine parameter header file. + +Copyright 2022 Free Software Foundation, Inc. + +This file is part of the GNU MP Library. + +The GNU MP Library is free software; you can redistribute it and/or modify +it under the terms of either: + + * the GNU Lesser General Public License as published by the Free + Software Foundation; either version 3 of the License, or (at your + option) any later version. + +or + + * the GNU General Public License as published by the Free Software + Foundation; either version 2 of the License, or (at your option) any + later version. + +or both in parallel, as here. + +The GNU MP Library is distributed in the hope that it will be useful, but +WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received copies of the GNU General Public License and the +GNU Lesser General Public License along with the GNU MP Library. If not, +see https://www.gnu.org/licenses/. */ + +#define GMP_LIMB_BITS 64 +#define GMP_LIMB_BYTES 8 + +/* Disable use of slow functions. FIXME: We should disable lib inclusion. */ +#undef HAVE_NATIVE_mpn_mul_2 +#undef HAVE_NATIVE_mpn_addmul_2 + +/* 3700-4900 MHz Alder Lake */ +/* FFT tuning limit = 10,000,000 */ +/* Generated by tuneup.c, 2022-03-15, gcc 11.2 */ + +#define MOD_1_NORM_THRESHOLD MP_SIZE_T_MAX /* never */ +#define MOD_1_UNNORM_THRESHOLD MP_SIZE_T_MAX /* never */ +#define MOD_1N_TO_MOD_1_1_THRESHOLD 5 +#define MOD_1U_TO_MOD_1_1_THRESHOLD 4 +#define MOD_1_1_TO_MOD_1_2_THRESHOLD 12 +#define MOD_1_2_TO_MOD_1_4_THRESHOLD 23 +#define PREINV_MOD_1_TO_MOD_1_THRESHOLD 8 +#define USE_PREINV_DIVREM_1 1 /* native */ +#define DIV_QR_1_NORM_THRESHOLD 34 +#define DIV_QR_1_UNNORM_THRESHOLD MP_SIZE_T_MAX /* never */ +#define DIV_QR_2_PI2_THRESHOLD 30 +#define DIVEXACT_1_THRESHOLD 0 /* always (native) */ +#define BMOD_1_TO_MOD_1_THRESHOLD 23 + +#define DIV_1_VS_MUL_1_PERCENT 559 + +#define MUL_TOOM22_THRESHOLD 13 +#define MUL_TOOM33_THRESHOLD 97 +#define MUL_TOOM44_THRESHOLD 148 +#define MUL_TOOM6H_THRESHOLD 562 +#define MUL_TOOM8H_THRESHOLD 608 + +#define MUL_TOOM32_TO_TOOM43_THRESHOLD 97 +#define MUL_TOOM32_TO_TOOM53_THRESHOLD 259 +#define MUL_TOOM42_TO_TOOM53_THRESHOLD 98 +#define MUL_TOOM42_TO_TOOM63_THRESHOLD 98 +#define MUL_TOOM43_TO_TOOM54_THRESHOLD 144 + +#define SQR_BASECASE_THRESHOLD 0 /* always (native) */ +#define SQR_TOOM2_THRESHOLD 24 +#define SQR_TOOM3_THRESHOLD 86 +#define SQR_TOOM4_THRESHOLD 582 +#define SQR_TOOM6_THRESHOLD 0 /* always */ +#define SQR_TOOM8_THRESHOLD 753 + +#define MULMID_TOOM42_THRESHOLD 40 + +#define MULMOD_BNM1_THRESHOLD 13 +#define SQRMOD_BNM1_THRESHOLD 16 + +#define MUL_FFT_MODF_THRESHOLD 384 /* k = 5 */ +#define MUL_FFT_TABLE3 \ + { { 384, 5}, { 21, 6}, { 11, 5}, { 23, 6}, \ + { 21, 7}, { 11, 6}, { 24, 7}, { 24, 8}, \ + { 13, 7}, { 27, 8}, { 15, 7}, { 31, 8}, \ + { 23, 7}, { 47, 8}, { 27, 9}, { 15, 8}, \ + { 33, 9}, { 19, 8}, { 39, 9}, { 23, 8}, \ + { 49, 9}, { 27,10}, { 15, 9}, { 31, 8}, \ + { 63, 9}, { 39,10}, { 23, 9}, { 51,11}, \ + { 15,10}, { 31, 9}, { 71,10}, { 39, 9}, \ + { 83,10}, { 47, 9}, { 95,10}, { 55,11}, \ + { 31,10}, { 79,11}, { 47,10}, { 95,12}, \ + { 31,11}, { 63,10}, { 127, 9}, { 255, 8}, \ + { 511,10}, { 135,11}, { 79, 9}, { 319, 8}, \ + { 639, 9}, { 335, 8}, { 671,11}, { 95,12}, \ + { 63,11}, { 127,10}, { 255, 9}, { 511,10}, \ + { 271, 9}, { 543, 8}, { 1087, 9}, { 575,10}, \ + { 303, 9}, { 607,10}, { 319, 9}, { 639,10}, \ + { 335, 9}, { 671,10}, { 351,12}, { 95,11}, \ + { 191,10}, { 383,13}, { 63,12}, { 127,11}, \ + { 255,10}, { 511,11}, { 271,10}, { 543, 9}, \ + { 1087,11}, { 287,10}, { 575,11}, { 303,10}, \ + { 607, 9}, { 1215,11}, { 319,10}, { 671,11}, \ + { 351,10}, { 703,11}, { 367,10}, { 735, 9}, \ + { 1471, 8}, { 2943,12}, { 191,11}, { 383,10}, \ + { 767,11}, { 415,10}, { 831,12}, { 223,11}, \ + { 447,10}, { 895,11}, { 479,10}, { 959,13}, \ + { 127,12}, { 255,11}, { 511,10}, { 1023,11}, \ + { 543,10}, { 1087, 9}, { 2175,12}, { 287,11}, \ + { 575,10}, { 1151,11}, { 607,12}, { 319,11}, \ + { 639,10}, { 1279,11}, { 671,12}, { 351,11}, \ + { 703,10}, { 1407,11}, { 735,10}, { 1471, 9}, \ + { 2943, 8}, { 5887,12}, { 383,11}, { 767,10}, \ + { 1535,12}, { 415,11}, { 831,10}, { 1663,12}, \ + { 447,11}, { 895,10}, { 1791,12}, { 479,11}, \ + { 959,14}, { 127,13}, { 255,12}, { 511,11}, \ + { 1023,12}, { 543,11}, { 1087,12}, { 575,11}, \ + { 1151,12}, { 607,11}, { 1215,13}, { 319,12}, \ + { 639,11}, { 1279,12}, { 671,11}, { 1343,12}, \ + { 703,11}, { 1407,12}, { 735,11}, { 1471,10}, \ + { 2943,13}, { 383,12}, { 767,11}, { 1535,12}, \ + { 831,13}, { 447,12}, { 959,11}, { 1919,13}, \ + { 511,12}, { 1087,13}, { 575,12}, { 1215,13}, \ + { 639,12}, { 1343,13}, { 703,12}, { 1471,11}, \ + { 2943,14}, { 383,13}, { 767,12}, { 1535,13}, \ + { 831,12}, { 1663,13}, { 959,12}, { 1919,14}, \ + { 511,13}, { 1087,12}, { 2175,13}, { 1215,14}, \ + { 16384,15}, { 32768,16}, { 65536,17}, { 131072,18}, \ + { 262144,19}, { 524288,20}, {1048576,21}, {2097152,22}, \ + {4194304,23}, {8388608,24} } +#define MUL_FFT_TABLE3_SIZE 190 +#define MUL_FFT_THRESHOLD 2496 + +#define SQR_FFT_MODF_THRESHOLD 344 /* k = 5 */ +#define SQR_FFT_TABLE3 \ + { { 344, 5}, { 21, 6}, { 11, 5}, { 23, 6}, \ + { 25, 7}, { 13, 6}, { 27, 7}, { 25, 8}, \ + { 13, 7}, { 27, 8}, { 15, 7}, { 31, 8}, \ + { 21, 9}, { 11, 8}, { 27, 9}, { 15, 8}, \ + { 35, 9}, { 19, 8}, { 41, 9}, { 23, 8}, \ + { 47, 9}, { 27,10}, { 15, 9}, { 39,10}, \ + { 23, 9}, { 51,11}, { 15,10}, { 31, 9}, \ + { 63,10}, { 39, 9}, { 79,10}, { 47,11}, \ + { 31,10}, { 79,11}, { 47,10}, { 95,12}, \ + { 31,11}, { 63,10}, { 127, 9}, { 255, 8}, \ + { 511,11}, { 79, 9}, { 319,11}, { 95,10}, \ + { 191, 9}, { 383,12}, { 63,11}, { 127,10}, \ + { 255, 9}, { 511,10}, { 271, 9}, { 543,11}, \ + { 143,10}, { 287, 9}, { 575,10}, { 303, 9}, \ + { 607,10}, { 319, 9}, { 639,12}, { 95,11}, \ + { 191,10}, { 383,13}, { 63,12}, { 127,11}, \ + { 255,10}, { 511,11}, { 271,10}, { 543,11}, \ + { 287,10}, { 575,11}, { 303,10}, { 607,11}, \ + { 319,10}, { 639,11}, { 335,10}, { 671,11}, \ + { 351,10}, { 703,12}, { 191,11}, { 383,10}, \ + { 767,11}, { 415,10}, { 831,12}, { 223,11}, \ + { 447,10}, { 895,11}, { 479,10}, { 959,13}, \ + { 127,12}, { 255,11}, { 511,10}, { 1023,11}, \ + { 543,10}, { 1087,12}, { 287,11}, { 575,10}, \ + { 1151,11}, { 607,10}, { 1215,12}, { 319,11}, \ + { 639,10}, { 1279,11}, { 671,12}, { 351,11}, \ + { 703,10}, { 1407,13}, { 191,12}, { 383,11}, \ + { 767,12}, { 415,11}, { 831,12}, { 447,11}, \ + { 895,12}, { 479,11}, { 959,10}, { 1919,14}, \ + { 127,13}, { 255,12}, { 511,11}, { 1023,12}, \ + { 543,11}, { 1087,12}, { 575,11}, { 1151,12}, \ + { 607,11}, { 1215,13}, { 319,12}, { 639,11}, \ + { 1279,12}, { 671,11}, { 1343,12}, { 703,11}, \ + { 1407,13}, { 383,12}, { 831,13}, { 447,12}, \ + { 959,14}, { 255,13}, { 511,12}, { 1087,13}, \ + { 575,12}, { 1215,13}, { 639,12}, { 1343,13}, \ + { 703,12}, { 1407,14}, { 383,13}, { 767,12}, \ + { 1535,13}, { 831,12}, { 1663,13}, { 959,14}, \ + { 511,13}, { 1087,12}, { 2175,13}, { 1215,14}, \ + { 16384,15}, { 32768,16}, { 65536,17}, { 131072,18}, \ + { 262144,19}, { 524288,20}, {1048576,21}, {2097152,22}, \ + {4194304,23}, {8388608,24} } +#define SQR_FFT_TABLE3_SIZE 166 +#define SQR_FFT_THRESHOLD 2240 + +#define MULLO_BASECASE_THRESHOLD 0 /* always */ +#define MULLO_DC_THRESHOLD 56 +#define MULLO_MUL_N_THRESHOLD 4940 +#define SQRLO_BASECASE_THRESHOLD 10 +#define SQRLO_DC_THRESHOLD 73 +#define SQRLO_SQR_THRESHOLD 4392 + +#define DC_DIV_QR_THRESHOLD 19 +#define DC_DIVAPPR_Q_THRESHOLD 139 +#define DC_BDIV_QR_THRESHOLD 62 +#define DC_BDIV_Q_THRESHOLD 126 + +#define INV_MULMOD_BNM1_THRESHOLD 24 +#define INV_NEWTON_THRESHOLD 108 +#define INV_APPR_THRESHOLD 108 + +#define BINV_NEWTON_THRESHOLD 208 +#define REDC_1_TO_REDC_2_THRESHOLD 36 +#define REDC_2_TO_REDC_N_THRESHOLD 53 + +#define MU_DIV_QR_THRESHOLD 855 +#define MU_DIVAPPR_Q_THRESHOLD 1120 +#define MUPI_DIV_QR_THRESHOLD 0 /* always */ +#define MU_BDIV_QR_THRESHOLD 807 +#define MU_BDIV_Q_THRESHOLD 1470 + +#define POWM_SEC_TABLE 1,11,70,702,2499 + +#define GET_STR_DC_THRESHOLD 11 +#define GET_STR_PRECOMPUTE_THRESHOLD 17 +#define SET_STR_DC_THRESHOLD 2150 +#define SET_STR_PRECOMPUTE_THRESHOLD 2943 + +#define FAC_DSC_THRESHOLD 298 +#define FAC_ODD_THRESHOLD 51 + +#define MATRIX22_STRASSEN_THRESHOLD 17 +#define HGCD2_DIV1_METHOD 1 /* 2.38% faster than 3 */ +#define HGCD_THRESHOLD 142 +#define HGCD_APPR_THRESHOLD 159 +#define HGCD_REDUCE_THRESHOLD 2384 +#define GCD_DC_THRESHOLD 483 +#define GCDEXT_DC_THRESHOLD 492 +#define JACOBI_BASE_METHOD 1 /* 0.94% faster than 3 */ diff --git a/vendor/gmp-6.3.0/mpn/x86_64/alderlake/mul_basecase.asm b/vendor/gmp-6.3.0/mpn/x86_64/alderlake/mul_basecase.asm new file mode 100644 index 0000000..9400fe5 --- /dev/null +++ b/vendor/gmp-6.3.0/mpn/x86_64/alderlake/mul_basecase.asm @@ -0,0 +1,474 @@ +dnl AMD64 mpn_mul_basecase. + +dnl Contributed to the GNU project by Torbjörn Granlund. + +dnl Copyright 2012, 2013, 2022 Free Software Foundation, Inc. + +dnl This file is part of the GNU MP Library. +dnl +dnl The GNU MP Library is free software; you can redistribute it and/or modify +dnl it under the terms of either: +dnl +dnl * the GNU Lesser General Public License as published by the Free +dnl Software Foundation; either version 3 of the License, or (at your +dnl option) any later version. +dnl +dnl or +dnl +dnl * the GNU General Public License as published by the Free Software +dnl Foundation; either version 2 of the License, or (at your option) any +dnl later version. +dnl +dnl or both in parallel, as here. +dnl +dnl The GNU MP Library is distributed in the hope that it will be useful, but +dnl WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +dnl or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +dnl for more details. +dnl +dnl You should have received copies of the GNU General Public License and the +dnl GNU Lesser General Public License along with the GNU MP Library. If not, +dnl see https://www.gnu.org/licenses/. + +include(`../config.m4') + +C cycles/limb +C AMD K8,K9 - +C AMD K10 - +C AMD bd1 - +C AMD bd2 - +C AMD bd3 - +C AMD bd4 - +C AMD zn1 ? +C AMD zn2 ? +C AMD zn3 ? +C AMD bt1 - +C AMD bt2 - +C Intel P4 - +C Intel CNR - +C Intel PNR - +C Intel NHM - +C Intel WSM - +C Intel SBR - +C Intel IBR - +C Intel HWL - +C Intel BWL ? +C Intel SKL ? +C Intel RKL ? +C Intel ALD 1.29 +C Intel atom - +C Intel SLM - +C Intel GLM - +C VIA nano - + +C TODO +C * Do overlapped software pipelining. +C * Try shallower pipeline, which would result in using fewer registers. +C * There are false dependencies on CF/OF between iterations. Try breaking +C them to see if it helps. + +define(`rp', `%rdi') dnl rcx +define(`up', `%rsi') dnl rdx +define(`un_arg',`%rdx') dnl r8 +define(`vp_arg',`%rcx') dnl r9 +define(`vn_arg',`%r8') dnl stack + +define(`un', `%r14') +define(`vp', `%r15') +define(`vn', `%rbp') + +define(`n', `%rcx') +define(`v0', `%rdx') + + +ASM_START() + TEXT + ALIGN(16) +PROLOGUE(mpn_mul_basecase) + cmp $2, un_arg + ja L(gen) + mov (vp_arg), %rdx + mulx( (up), %rax, %r9) + mov %rax, (rp) + je L(s2x) + + mov %r9, 8(rp) + ret + +L(s2x): mulx( 8,(up), %rax, %r10) + add %r9, %rax + adc $0, %r10 + cmp $2, R32(vn_arg) + je L(s22) + +L(s21): mov %rax, 8(rp) + mov %r10, 16(rp) + ret + +L(s22): mov 8(vp_arg), %rdx + mulx( (up), %r8, %r9) + add %r8, %rax + adc %r10, %r9 + mov %rax, 8(rp) + mulx( 8,(up), %rax, %r10) + adc $0, %r10 + adc %r9, %rax + mov %rax, 16(rp) + adc $0, %r10 + mov %r10, 24(rp) + ret + +L(gen): push %rbx + push %rbp + push %r12 + push %r13 + push %r14 + push %r15 + + mov un_arg, un + neg un + shl $3, un + mov vp_arg, vp + mov vn_arg, vn + + test $1, R8(un_arg) + mov (vp), %rdx + jz L(bx0) + +L(bx1): test $16, R8(un) + jnz L(b01) + +L(b11): lea 24(un), n + mulx( (up), %r11, %r10) + mulx( 8,(up), %r13, %r12) + mulx( 16,(up), %rbx, %rax) + lea 8(rp), rp + lea 24(up), up + jrcxz L(med3) +L(mtp3):mulx( (up), %r9, %r8) + adcx( %r10, %r13) + mov %r11, -8(rp) + mulx( 8,(up), %r11, %r10) + adcx( %r12, %rbx) + mov %r13, (rp) + mulx( 16,(up), %r13, %r12) + adcx( %rax, %r9) + mov %rbx, 8(rp) + mulx( 24,(up), %rbx, %rax) + adcx( %r8, %r11) + mov %r9, 16(rp) + lea 32(up), up + lea 32(rp), rp + lea 32(n), n + jrcxz L(med3) + jmp L(mtp3) +L(med3):adcx( %r10, %r13) + mov %r11, -8(rp) + adcx( %r12, %rbx) + mov %r13, (rp) + adcx( %rcx, %rax) + mov %rbx, 8(rp) + mov %rax, 16(rp) + dec vn + jz L(ret) +L(out3):lea 32(rp,un), rp + lea 24(up,un), up + lea 8(vp), vp + xor R32(%rdx), R32(%rdx) + mov (vp), %rdx + mulx( -24,(up), %r11, %r10) + mulx( -16,(up), %r13, %r12) + mulx( -8,(up), %rbx, %rax) + lea 24(un), n + adox( -8,(rp), %r11) + jrcxz L(ed3) +L(tp3): mulx( (up), %r9, %r8) + adcx( %r10, %r13) + mov %r11, -8(rp) + adox( (rp), %r13) + mulx( 8,(up), %r11, %r10) + adcx( %r12, %rbx) + mov %r13, (rp) + adox( 8,(rp), %rbx) + mulx( 16,(up), %r13, %r12) + adcx( %rax, %r9) + mov %rbx, 8(rp) + adox( 16,(rp), %r9) + mulx( 24,(up), %rbx, %rax) + adcx( %r8, %r11) + mov %r9, 16(rp) + adox( 24,(rp), %r11) + lea 32(up), up + lea 32(rp), rp + lea 32(n), n + jrcxz L(ed3) + jmp L(tp3) +L(ed3): adcx( %r10, %r13) + mov %r11, -8(rp) + adox( (rp), %r13) + adcx( %r12, %rbx) + mov %r13, (rp) + adox( 8,(rp), %rbx) + adcx( %rcx, %rax) + adox( %rcx, %rax) + mov %rbx, 8(rp) + mov %rax, 16(rp) + dec vn + jnz L(out3) + jmp L(ret) + + +L(b01): mulx( (up), %rbx, %rax) + lea 8(un), n + mulx( 8,(up), %r9, %r8) + mulx( 16,(up), %r11, %r10) + lea 8(up), up + lea -8(rp), rp + jmp L(ml1) +L(mtp1):mulx( (up), %r9, %r8) + adcx( %r10, %r13) + mov %r11, -8(rp) + mulx( 8,(up), %r11, %r10) + adcx( %r12, %rbx) + mov %r13, (rp) +L(ml1): mulx( 16,(up), %r13, %r12) + adcx( %rax, %r9) + mov %rbx, 8(rp) + mulx( 24,(up), %rbx, %rax) + adcx( %r8, %r11) + mov %r9, 16(rp) + lea 32(up), up + lea 32(rp), rp + lea 32(n), n + jrcxz L(med1) + jmp L(mtp1) +L(med1):adcx( %r10, %r13) + mov %r11, -8(rp) + adcx( %r12, %rbx) + mov %r13, (rp) + adcx( %rcx, %rax) + mov %rbx, 8(rp) + mov %rax, 16(rp) + dec vn + jz L(ret) +L(out1):lea 16(rp,un), rp + lea 8(up,un), up + lea 8(vp), vp + xor R32(%rdx), R32(%rdx) + mov (vp), %rdx + lea 8(un), n + mulx( -8,(up), %rbx, %rax) + mulx( (up), %r9, %r8) + mulx( 8,(up), %r11, %r10) + jmp L(lo1) +L(tp1): mulx( (up), %r9, %r8) + adcx( %r10, %r13) + mov %r11, -8(rp) + adox( (rp), %r13) + mulx( 8,(up), %r11, %r10) + adcx( %r12, %rbx) + mov %r13, (rp) +L(lo1): adox( 8,(rp), %rbx) + mulx( 16,(up), %r13, %r12) + adcx( %rax, %r9) + mov %rbx, 8(rp) + adox( 16,(rp), %r9) + mulx( 24,(up), %rbx, %rax) + adcx( %r8, %r11) + mov %r9, 16(rp) + adox( 24,(rp), %r11) + lea 32(up), up + lea 32(rp), rp + lea 32(n), n + jrcxz L(ed1) + jmp L(tp1) +L(ed1): adcx( %r10, %r13) + mov %r11, -8(rp) + adox( (rp), %r13) + adcx( %r12, %rbx) + mov %r13, (rp) + adox( 8,(rp), %rbx) + adcx( %rcx, %rax) + adox( %rcx, %rax) + mov %rbx, 8(rp) + mov %rax, 16(rp) + dec vn + jnz L(out1) + jmp L(ret) + + +L(bx0): test $16, R8(un) + jz L(b00) + +L(b10): mulx( (up), %r13, %r12) + mulx( 8,(up), %rbx, %rax) + lea 16(un), n + mulx( 16,(up), %r9, %r8) + lea 16(up), up + jmp L(ml2) +L(mtp2):mulx( (up), %r9, %r8) + adcx( %r10, %r13) + mov %r11, -8(rp) +L(ml2): mulx( 8,(up), %r11, %r10) + adcx( %r12, %rbx) + mov %r13, (rp) + mulx( 16,(up), %r13, %r12) + adcx( %rax, %r9) + mov %rbx, 8(rp) + mulx( 24,(up), %rbx, %rax) + adcx( %r8, %r11) + mov %r9, 16(rp) + lea 32(up), up + lea 32(rp), rp + lea 32(n), n + jrcxz L(med2) + jmp L(mtp2) +L(med2):adcx( %r10, %r13) + mov %r11, -8(rp) + adcx( %r12, %rbx) + mov %r13, (rp) + adcx( %rcx, %rax) + mov %rbx, 8(rp) + mov %rax, 16(rp) + dec vn + jz L(ret) +L(out2):lea 24(rp,un), rp + lea 16(up,un), up + lea 8(vp), vp + xor R32(%rdx), R32(%rdx) + mov (vp), %rdx + mulx( -16,(up), %r13, %r12) + mulx( -8,(up), %rbx, %rax) + lea 16(un), n + mulx( (up), %r9, %r8) + jmp L(lo2) +L(tp2): mulx( (up), %r9, %r8) + adcx( %r10, %r13) + mov %r11, -8(rp) +L(lo2): adox( (rp), %r13) + mulx( 8,(up), %r11, %r10) + adcx( %r12, %rbx) + mov %r13, (rp) + adox( 8,(rp), %rbx) + mulx( 16,(up), %r13, %r12) + adcx( %rax, %r9) + mov %rbx, 8(rp) + adox( 16,(rp), %r9) + mulx( 24,(up), %rbx, %rax) + adcx( %r8, %r11) + mov %r9, 16(rp) + adox( 24,(rp), %r11) + lea 32(up), up + lea 32(rp), rp + lea 32(n), n + jrcxz L(ed2) + jmp L(tp2) +L(ed2): adcx( %r10, %r13) + mov %r11, -8(rp) + adox( (rp), %r13) + adcx( %r12, %rbx) + mov %r13, (rp) + adox( 8,(rp), %rbx) + adcx( %rcx, %rax) + adox( %rcx, %rax) + mov %rbx, 8(rp) + mov %rax, 16(rp) + dec vn + jnz L(out2) + jmp L(ret) + + +L(b00): lea 32(un), n + mulx( (up), %r9, %r8) + mulx( 8,(up), %r11, %r10) + mulx( 16,(up), %r13, %r12) + mulx( 24,(up), %rbx, %rax) + adcx( %r8, %r11) + mov %r9, (rp) + lea 32(up), up + lea 16(rp), rp + jrcxz L(med0) +L(mtp0):mulx( (up), %r9, %r8) + adcx( %r10, %r13) + mov %r11, -8(rp) + mulx( 8,(up), %r11, %r10) + adcx( %r12, %rbx) + mov %r13, (rp) + mulx( 16,(up), %r13, %r12) + adcx( %rax, %r9) + mov %rbx, 8(rp) + mulx( 24,(up), %rbx, %rax) + adcx( %r8, %r11) + mov %r9, 16(rp) + lea 32(up), up + lea 32(rp), rp + lea 32(n), n + jrcxz L(med0) + jmp L(mtp0) +L(med0):adcx( %r10, %r13) + mov %r11, -8(rp) + adcx( %r12, %rbx) + mov %r13, (rp) + adcx( %rcx, %rax) + mov %rbx, 8(rp) + mov %rax, 16(rp) + dec vn + jz L(ret) +L(out0):lea 40(rp,un), rp + lea 32(up,un), up + lea 8(vp), vp + xor R32(%rdx), R32(%rdx) + mov (vp), %rdx + lea 32(un), n + mulx( -32,(up), %r9, %r8) + mulx( -24,(up), %r11, %r10) + mulx( -16,(up), %r13, %r12) + adox( -16,(rp), %r9) + mulx( -8,(up), %rbx, %rax) + adcx( %r8, %r11) + mov %r9, -16(rp) + adox( -8,(rp), %r11) + jrcxz L(ed0) +L(tp0): mulx( (up), %r9, %r8) + adcx( %r10, %r13) + mov %r11, -8(rp) + adox( (rp), %r13) + mulx( 8,(up), %r11, %r10) + adcx( %r12, %rbx) + mov %r13, (rp) + adox( 8,(rp), %rbx) + mulx( 16,(up), %r13, %r12) + adcx( %rax, %r9) + mov %rbx, 8(rp) + adox( 16,(rp), %r9) + mulx( 24,(up), %rbx, %rax) + adcx( %r8, %r11) + mov %r9, 16(rp) + adox( 24,(rp), %r11) + lea 32(up), up + lea 32(rp), rp + lea 32(n), n + jrcxz L(ed0) + jmp L(tp0) +L(ed0): adcx( %r10, %r13) + mov %r11, -8(rp) + adox( (rp), %r13) + adcx( %r12, %rbx) + mov %r13, (rp) + adox( 8,(rp), %rbx) + adcx( %rcx, %rax) + adox( %rcx, %rax) + mov %rbx, 8(rp) + mov %rax, 16(rp) + dec vn + jnz L(out0) + +L(ret): pop %r15 + pop %r14 + pop %r13 + pop %r12 + pop %rbp + pop %rbx + ret +EPILOGUE() +ASM_END() diff --git a/vendor/gmp-6.3.0/mpn/x86_64/alderlake/submul_1.asm b/vendor/gmp-6.3.0/mpn/x86_64/alderlake/submul_1.asm new file mode 100644 index 0000000..d7d6b0d --- /dev/null +++ b/vendor/gmp-6.3.0/mpn/x86_64/alderlake/submul_1.asm @@ -0,0 +1,140 @@ +dnl AMD64 mpn_submul_1 for CPUs with mulx and adx. + +dnl Contributed to the GNU project by Torbjörn Granlund. + +dnl Copyright 2022 Free Software Foundation, Inc. + +dnl This file is part of the GNU MP Library. +dnl +dnl The GNU MP Library is free software; you can redistribute it and/or modify +dnl it under the terms of either: +dnl +dnl * the GNU Lesser General Public License as published by the Free +dnl Software Foundation; either version 3 of the License, or (at your +dnl option) any later version. +dnl +dnl or +dnl +dnl * the GNU General Public License as published by the Free Software +dnl Foundation; either version 2 of the License, or (at your option) any +dnl later version. +dnl +dnl or both in parallel, as here. +dnl +dnl The GNU MP Library is distributed in the hope that it will be useful, but +dnl WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +dnl or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +dnl for more details. +dnl +dnl You should have received copies of the GNU General Public License and the +dnl GNU Lesser General Public License along with the GNU MP Library. If not, +dnl see https://www.gnu.org/licenses/. + +include(`../config.m4') + +C cycles/limb +C AMD K8,K9 - +C AMD K10 - +C AMD bd1 - +C AMD bd2 - +C AMD bd3 - +C AMD bd4 - +C AMD zn1 ? +C AMD zn2 ? +C AMD zn3 2.0 +C AMD bt1 - +C AMD bt2 - +C Intel P4 - +C Intel CNR - +C Intel PNR - +C Intel NHM - +C Intel WSM - +C Intel SBR - +C Intel IBR - +C Intel HWL - +C Intel BWL ? +C Intel SKL ? +C Intel RKL 2.0 +C Intel ALD 1.53 +C Intel atom - +C Intel SLM - +C Intel GLM - +C VIA nano - + +define(`rp', `%rdi') dnl rcx +define(`up', `%rsi') dnl rdx +define(`n_param', `%rdx') dnl r8 +define(`v0_param',`%rcx') dnl r9 + +define(`n', `%rcx') dnl +define(`v0', `%rdx') dnl + + +ASM_START() + TEXT + ALIGN(16) +PROLOGUE(mpn_submul_1) + mov n_param, %rax + mov v0_param, v0 + mov %rax, n + test $1, R8(n) + jz L(bx0) + +L(bx1): mulx( (up), %r9, %rax) + test $2, R8(n) + stc + jz L(b01) + +L(b11): lea 1(n), n + lea 16(up), up + lea 16(rp), rp + jmp L(lo3) + +L(b01): lea 3(n), n + jmp L(lo1) + +L(bx0): mulx( (up), %r9, %r8) + test $2, R8(n) + stc + jz L(b00) + +L(b10): lea 8(up), up + lea 8(rp), rp + lea 2(n), n + jmp L(lo2) + +L(b00): lea 24(up), up + lea 24(rp), rp + jmp L(lo0) + +L(top): lea 32(up), up + lea 32(rp), rp + mulx( -24,(up), %r9, %r8) + adox( %rax, %r9) +L(lo0): not %r9 + adcx( -24,(rp), %r9) + mov %r9, -24(rp) + mulx( -16,(up), %r9, %rax) + adox( %r8, %r9) +L(lo3): not %r9 + adcx( -16,(rp), %r9) + mov %r9, -16(rp) + mulx( -8,(up), %r9, %r8) + adox( %rax, %r9) +L(lo2): not %r9 + adcx( -8,(rp), %r9) + mov %r9, -8(rp) + mulx( (up), %r9, %rax) + adox( %r8, %r9) +L(lo1): not %r9 + adcx( (rp), %r9) + mov %r9, (rp) + lea -4(n), n + jrcxz L(end) + jmp L(top) + +L(end): adox( %rcx, %rax) + sbb $-1, %rax + ret +EPILOGUE() +ASM_END() |