aboutsummaryrefslogtreecommitdiff
path: root/vendor/gmp-6.3.0/mpn/x86/atom/sse2
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/gmp-6.3.0/mpn/x86/atom/sse2')
-rw-r--r--vendor/gmp-6.3.0/mpn/x86/atom/sse2/aorsmul_1.asm174
-rw-r--r--vendor/gmp-6.3.0/mpn/x86/atom/sse2/bdiv_dbm1c.asm34
-rw-r--r--vendor/gmp-6.3.0/mpn/x86/atom/sse2/divrem_1.asm34
-rw-r--r--vendor/gmp-6.3.0/mpn/x86/atom/sse2/mod_1_1.asm34
-rw-r--r--vendor/gmp-6.3.0/mpn/x86/atom/sse2/mod_1_4.asm34
-rw-r--r--vendor/gmp-6.3.0/mpn/x86/atom/sse2/mul_1.asm124
-rw-r--r--vendor/gmp-6.3.0/mpn/x86/atom/sse2/mul_basecase.asm501
-rw-r--r--vendor/gmp-6.3.0/mpn/x86/atom/sse2/popcount.asm35
-rw-r--r--vendor/gmp-6.3.0/mpn/x86/atom/sse2/sqr_basecase.asm634
9 files changed, 1604 insertions, 0 deletions
diff --git a/vendor/gmp-6.3.0/mpn/x86/atom/sse2/aorsmul_1.asm b/vendor/gmp-6.3.0/mpn/x86/atom/sse2/aorsmul_1.asm
new file mode 100644
index 0000000..969a14a
--- /dev/null
+++ b/vendor/gmp-6.3.0/mpn/x86/atom/sse2/aorsmul_1.asm
@@ -0,0 +1,174 @@
+dnl x86-32 mpn_addmul_1 and mpn_submul_1 optimised for Intel Atom.
+
+dnl Contributed to the GNU project by Torbjorn Granlund and Marco Bodrato.
+
+dnl Copyright 2011 Free Software Foundation, Inc.
+
+dnl This file is part of the GNU MP Library.
+dnl
+dnl The GNU MP Library is free software; you can redistribute it and/or modify
+dnl it under the terms of either:
+dnl
+dnl * the GNU Lesser General Public License as published by the Free
+dnl Software Foundation; either version 3 of the License, or (at your
+dnl option) any later version.
+dnl
+dnl or
+dnl
+dnl * the GNU General Public License as published by the Free Software
+dnl Foundation; either version 2 of the License, or (at your option) any
+dnl later version.
+dnl
+dnl or both in parallel, as here.
+dnl
+dnl The GNU MP Library is distributed in the hope that it will be useful, but
+dnl WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+dnl or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+dnl for more details.
+dnl
+dnl You should have received copies of the GNU General Public License and the
+dnl GNU Lesser General Public License along with the GNU MP Library. If not,
+dnl see https://www.gnu.org/licenses/.
+
+include(`../config.m4')
+
+C cycles/limb
+C cycles/limb
+C P5 -
+C P6 model 0-8,10-12 -
+C P6 model 9 (Banias)
+C P6 model 13 (Dothan)
+C P4 model 0 (Willamette)
+C P4 model 1 (?)
+C P4 model 2 (Northwood)
+C P4 model 3 (Prescott)
+C P4 model 4 (Nocona)
+C Intel Atom 8
+C AMD K6
+C AMD K7 -
+C AMD K8
+C AMD K10
+
+define(`rp', `%edi')
+define(`up', `%esi')
+define(`n', `%ecx')
+
+ifdef(`OPERATION_addmul_1',`
+ define(ADDSUB, add)
+ define(func_1, mpn_addmul_1)
+ define(func_1c, mpn_addmul_1c)')
+ifdef(`OPERATION_submul_1',`
+ define(ADDSUB, sub)
+ define(func_1, mpn_submul_1)
+ define(func_1c, mpn_submul_1c)')
+
+MULFUNC_PROLOGUE(mpn_addmul_1 mpn_addmul_1c mpn_submul_1 mpn_submul_1c)
+
+ TEXT
+ ALIGN(16)
+PROLOGUE(func_1)
+ xor %edx, %edx
+L(ent): push %edi
+ push %esi
+ push %ebx
+ mov 16(%esp), rp
+ mov 20(%esp), up
+ mov 24(%esp), n
+ movd 28(%esp), %mm7
+ test $1, n
+ jz L(fi0or2)
+ movd (up), %mm0
+ pmuludq %mm7, %mm0
+ shr $2, n
+ jnc L(fi1)
+
+L(fi3): lea -8(up), up
+ lea -8(rp), rp
+ movd 12(up), %mm1
+ movd %mm0, %ebx
+ pmuludq %mm7, %mm1
+ add $1, n C increment and clear carry
+ jmp L(lo3)
+
+L(fi1): movd %mm0, %ebx
+ jz L(wd1)
+ movd 4(up), %mm1
+ pmuludq %mm7, %mm1
+ jmp L(lo1)
+
+L(fi0or2):
+ movd (up), %mm1
+ pmuludq %mm7, %mm1
+ shr $2, n
+ movd 4(up), %mm0
+ jc L(fi2)
+ lea -4(up), up
+ lea -4(rp), rp
+ movd %mm1, %eax
+ pmuludq %mm7, %mm0
+ jmp L(lo0)
+
+L(fi2): lea 4(up), up
+ add $1, n C increment and clear carry
+ movd %mm1, %eax
+ lea -12(rp), rp
+ jmp L(lo2)
+
+C ALIGN(16) C alignment seems irrelevant
+L(top): movd 4(up), %mm1
+ adc $0, %edx
+ ADDSUB %eax, 12(rp)
+ movd %mm0, %ebx
+ pmuludq %mm7, %mm1
+ lea 16(rp), rp
+L(lo1): psrlq $32, %mm0
+ adc %edx, %ebx
+ movd %mm0, %edx
+ movd %mm1, %eax
+ movd 8(up), %mm0
+ pmuludq %mm7, %mm0
+ adc $0, %edx
+ ADDSUB %ebx, (rp)
+L(lo0): psrlq $32, %mm1
+ adc %edx, %eax
+ movd %mm1, %edx
+ movd %mm0, %ebx
+ movd 12(up), %mm1
+ pmuludq %mm7, %mm1
+ adc $0, %edx
+ ADDSUB %eax, 4(rp)
+L(lo3): psrlq $32, %mm0
+ adc %edx, %ebx
+ movd %mm0, %edx
+ movd %mm1, %eax
+ lea 16(up), up
+ movd (up), %mm0
+ adc $0, %edx
+ ADDSUB %ebx, 8(rp)
+L(lo2): psrlq $32, %mm1
+ adc %edx, %eax
+ movd %mm1, %edx
+ pmuludq %mm7, %mm0
+ dec n
+ jnz L(top)
+
+L(end): adc n, %edx C n is zero here
+ ADDSUB %eax, 12(rp)
+ movd %mm0, %ebx
+ lea 16(rp), rp
+L(wd1): psrlq $32, %mm0
+ adc %edx, %ebx
+ movd %mm0, %eax
+ adc n, %eax
+ ADDSUB %ebx, (rp)
+ emms
+ adc n, %eax
+ pop %ebx
+ pop %esi
+ pop %edi
+ ret
+EPILOGUE()
+PROLOGUE(func_1c)
+ mov 20(%esp), %edx C carry
+ jmp L(ent)
+EPILOGUE()
diff --git a/vendor/gmp-6.3.0/mpn/x86/atom/sse2/bdiv_dbm1c.asm b/vendor/gmp-6.3.0/mpn/x86/atom/sse2/bdiv_dbm1c.asm
new file mode 100644
index 0000000..782e914
--- /dev/null
+++ b/vendor/gmp-6.3.0/mpn/x86/atom/sse2/bdiv_dbm1c.asm
@@ -0,0 +1,34 @@
+dnl Intel Atom mpn_bdiv_dbm1.
+
+dnl Copyright 2011 Free Software Foundation, Inc.
+
+dnl This file is part of the GNU MP Library.
+dnl
+dnl The GNU MP Library is free software; you can redistribute it and/or modify
+dnl it under the terms of either:
+dnl
+dnl * the GNU Lesser General Public License as published by the Free
+dnl Software Foundation; either version 3 of the License, or (at your
+dnl option) any later version.
+dnl
+dnl or
+dnl
+dnl * the GNU General Public License as published by the Free Software
+dnl Foundation; either version 2 of the License, or (at your option) any
+dnl later version.
+dnl
+dnl or both in parallel, as here.
+dnl
+dnl The GNU MP Library is distributed in the hope that it will be useful, but
+dnl WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+dnl or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+dnl for more details.
+dnl
+dnl You should have received copies of the GNU General Public License and the
+dnl GNU Lesser General Public License along with the GNU MP Library. If not,
+dnl see https://www.gnu.org/licenses/.
+
+include(`../config.m4')
+
+MULFUNC_PROLOGUE(mpn_bdiv_dbm1c)
+include_mpn(`x86/pentium4/sse2/bdiv_dbm1c.asm')
diff --git a/vendor/gmp-6.3.0/mpn/x86/atom/sse2/divrem_1.asm b/vendor/gmp-6.3.0/mpn/x86/atom/sse2/divrem_1.asm
new file mode 100644
index 0000000..f84709a
--- /dev/null
+++ b/vendor/gmp-6.3.0/mpn/x86/atom/sse2/divrem_1.asm
@@ -0,0 +1,34 @@
+dnl Intel Atom mpn_divrem_1 -- mpn by limb division.
+
+dnl Copyright 2011 Free Software Foundation, Inc.
+
+dnl This file is part of the GNU MP Library.
+dnl
+dnl The GNU MP Library is free software; you can redistribute it and/or modify
+dnl it under the terms of either:
+dnl
+dnl * the GNU Lesser General Public License as published by the Free
+dnl Software Foundation; either version 3 of the License, or (at your
+dnl option) any later version.
+dnl
+dnl or
+dnl
+dnl * the GNU General Public License as published by the Free Software
+dnl Foundation; either version 2 of the License, or (at your option) any
+dnl later version.
+dnl
+dnl or both in parallel, as here.
+dnl
+dnl The GNU MP Library is distributed in the hope that it will be useful, but
+dnl WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+dnl or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+dnl for more details.
+dnl
+dnl You should have received copies of the GNU General Public License and the
+dnl GNU Lesser General Public License along with the GNU MP Library. If not,
+dnl see https://www.gnu.org/licenses/.
+
+include(`../config.m4')
+
+MULFUNC_PROLOGUE(mpn_preinv_divrem_1 mpn_divrem_1c mpn_divrem_1)
+include_mpn(`x86/pentium4/sse2/divrem_1.asm')
diff --git a/vendor/gmp-6.3.0/mpn/x86/atom/sse2/mod_1_1.asm b/vendor/gmp-6.3.0/mpn/x86/atom/sse2/mod_1_1.asm
new file mode 100644
index 0000000..ae6581d
--- /dev/null
+++ b/vendor/gmp-6.3.0/mpn/x86/atom/sse2/mod_1_1.asm
@@ -0,0 +1,34 @@
+dnl Intel Atom/SSE2 mpn_mod_1_1.
+
+dnl Copyright 2009, 2011 Free Software Foundation, Inc.
+
+dnl This file is part of the GNU MP Library.
+dnl
+dnl The GNU MP Library is free software; you can redistribute it and/or modify
+dnl it under the terms of either:
+dnl
+dnl * the GNU Lesser General Public License as published by the Free
+dnl Software Foundation; either version 3 of the License, or (at your
+dnl option) any later version.
+dnl
+dnl or
+dnl
+dnl * the GNU General Public License as published by the Free Software
+dnl Foundation; either version 2 of the License, or (at your option) any
+dnl later version.
+dnl
+dnl or both in parallel, as here.
+dnl
+dnl The GNU MP Library is distributed in the hope that it will be useful, but
+dnl WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+dnl or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+dnl for more details.
+dnl
+dnl You should have received copies of the GNU General Public License and the
+dnl GNU Lesser General Public License along with the GNU MP Library. If not,
+dnl see https://www.gnu.org/licenses/.
+
+include(`../config.m4')
+
+MULFUNC_PROLOGUE(mpn_mod_1_1p)
+include_mpn(`x86/pentium4/sse2/mod_1_1.asm')
diff --git a/vendor/gmp-6.3.0/mpn/x86/atom/sse2/mod_1_4.asm b/vendor/gmp-6.3.0/mpn/x86/atom/sse2/mod_1_4.asm
new file mode 100644
index 0000000..31faa3f
--- /dev/null
+++ b/vendor/gmp-6.3.0/mpn/x86/atom/sse2/mod_1_4.asm
@@ -0,0 +1,34 @@
+dnl Intel Atom/SSE2 mpn_mod_1_4.
+
+dnl Copyright 2009, 2011 Free Software Foundation, Inc.
+
+dnl This file is part of the GNU MP Library.
+dnl
+dnl The GNU MP Library is free software; you can redistribute it and/or modify
+dnl it under the terms of either:
+dnl
+dnl * the GNU Lesser General Public License as published by the Free
+dnl Software Foundation; either version 3 of the License, or (at your
+dnl option) any later version.
+dnl
+dnl or
+dnl
+dnl * the GNU General Public License as published by the Free Software
+dnl Foundation; either version 2 of the License, or (at your option) any
+dnl later version.
+dnl
+dnl or both in parallel, as here.
+dnl
+dnl The GNU MP Library is distributed in the hope that it will be useful, but
+dnl WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+dnl or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+dnl for more details.
+dnl
+dnl You should have received copies of the GNU General Public License and the
+dnl GNU Lesser General Public License along with the GNU MP Library. If not,
+dnl see https://www.gnu.org/licenses/.
+
+include(`../config.m4')
+
+MULFUNC_PROLOGUE(mpn_mod_1s_4p)
+include_mpn(`x86/pentium4/sse2/mod_1_4.asm')
diff --git a/vendor/gmp-6.3.0/mpn/x86/atom/sse2/mul_1.asm b/vendor/gmp-6.3.0/mpn/x86/atom/sse2/mul_1.asm
new file mode 100644
index 0000000..aa3bb97
--- /dev/null
+++ b/vendor/gmp-6.3.0/mpn/x86/atom/sse2/mul_1.asm
@@ -0,0 +1,124 @@
+dnl Intel Atom mpn_mul_1.
+
+dnl Contributed to the GNU project by Torbjorn Granlund and Marco Bodrato.
+
+dnl Copyright 2011 Free Software Foundation, Inc.
+
+dnl This file is part of the GNU MP Library.
+dnl
+dnl The GNU MP Library is free software; you can redistribute it and/or modify
+dnl it under the terms of either:
+dnl
+dnl * the GNU Lesser General Public License as published by the Free
+dnl Software Foundation; either version 3 of the License, or (at your
+dnl option) any later version.
+dnl
+dnl or
+dnl
+dnl * the GNU General Public License as published by the Free Software
+dnl Foundation; either version 2 of the License, or (at your option) any
+dnl later version.
+dnl
+dnl or both in parallel, as here.
+dnl
+dnl The GNU MP Library is distributed in the hope that it will be useful, but
+dnl WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+dnl or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+dnl for more details.
+dnl
+dnl You should have received copies of the GNU General Public License and the
+dnl GNU Lesser General Public License along with the GNU MP Library. If not,
+dnl see https://www.gnu.org/licenses/.
+
+include(`../config.m4')
+
+C cycles/limb
+C cycles/limb
+C P5 -
+C P6 model 0-8,10-12 -
+C P6 model 9 (Banias)
+C P6 model 13 (Dothan)
+C P4 model 0 (Willamette)
+C P4 model 1 (?)
+C P4 model 2 (Northwood)
+C P4 model 3 (Prescott)
+C P4 model 4 (Nocona)
+C Intel Atom 7.5
+C AMD K6 -
+C AMD K7 -
+C AMD K8
+C AMD K10
+
+defframe(PARAM_CARRY,20)
+defframe(PARAM_MUL, 16)
+defframe(PARAM_SIZE, 12)
+defframe(PARAM_SRC, 8)
+defframe(PARAM_DST, 4)
+
+define(`rp', `%edx')
+define(`up', `%esi')
+define(`n', `%ecx')
+
+ASM_START()
+ TEXT
+ ALIGN(16)
+deflit(`FRAME',0)
+
+PROLOGUE(mpn_mul_1c)
+ movd PARAM_CARRY, %mm6 C carry
+ jmp L(ent)
+EPILOGUE()
+
+ ALIGN(8) C for compact code
+PROLOGUE(mpn_mul_1)
+ pxor %mm6, %mm6
+L(ent): push %esi FRAME_pushl()
+ mov PARAM_SRC, up
+ mov PARAM_SIZE, %eax C size
+ movd PARAM_MUL, %mm7
+ movd (up), %mm0
+ mov %eax, n
+ and $3, %eax
+ pmuludq %mm7, %mm0
+ mov PARAM_DST, rp
+ jz L(lo0)
+ cmp $2, %eax
+ lea -16(up,%eax,4),up
+ lea -16(rp,%eax,4),rp
+ jc L(lo1)
+ jz L(lo2)
+ jmp L(lo3)
+
+ ALIGN(16)
+L(top): movd (up), %mm0
+ pmuludq %mm7, %mm0
+ psrlq $32, %mm6
+ lea 16(rp), rp
+L(lo0): paddq %mm0, %mm6
+ movd 4(up), %mm0
+ pmuludq %mm7, %mm0
+ movd %mm6, (rp)
+ psrlq $32, %mm6
+L(lo3): paddq %mm0, %mm6
+ movd 8(up), %mm0
+ pmuludq %mm7, %mm0
+ movd %mm6, 4(rp)
+ psrlq $32, %mm6
+L(lo2): paddq %mm0, %mm6
+ movd 12(up), %mm0
+ pmuludq %mm7, %mm0
+ movd %mm6, 8(rp)
+ psrlq $32, %mm6
+L(lo1): paddq %mm0, %mm6
+ sub $4, n
+ movd %mm6, 12(rp)
+ lea 16(up), up
+ ja L(top)
+
+ psrlq $32, %mm6
+ movd %mm6, %eax
+ emms
+ pop %esi FRAME_popl()
+ ret
+EPILOGUE()
+ASM_END()
diff --git a/vendor/gmp-6.3.0/mpn/x86/atom/sse2/mul_basecase.asm b/vendor/gmp-6.3.0/mpn/x86/atom/sse2/mul_basecase.asm
new file mode 100644
index 0000000..97d3aeb
--- /dev/null
+++ b/vendor/gmp-6.3.0/mpn/x86/atom/sse2/mul_basecase.asm
@@ -0,0 +1,501 @@
+dnl x86 mpn_mul_basecase -- Multiply two limb vectors and store the result in
+dnl a third limb vector.
+
+dnl Contributed to the GNU project by Torbjorn Granlund and Marco Bodrato.
+
+dnl Copyright 2011 Free Software Foundation, Inc.
+
+dnl This file is part of the GNU MP Library.
+dnl
+dnl The GNU MP Library is free software; you can redistribute it and/or modify
+dnl it under the terms of either:
+dnl
+dnl * the GNU Lesser General Public License as published by the Free
+dnl Software Foundation; either version 3 of the License, or (at your
+dnl option) any later version.
+dnl
+dnl or
+dnl
+dnl * the GNU General Public License as published by the Free Software
+dnl Foundation; either version 2 of the License, or (at your option) any
+dnl later version.
+dnl
+dnl or both in parallel, as here.
+dnl
+dnl The GNU MP Library is distributed in the hope that it will be useful, but
+dnl WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+dnl or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+dnl for more details.
+dnl
+dnl You should have received copies of the GNU General Public License and the
+dnl GNU Lesser General Public License along with the GNU MP Library. If not,
+dnl see https://www.gnu.org/licenses/.
+
+include(`../config.m4')
+
+C TODO
+C * Check if 'jmp N(%esp)' is well-predicted enough to allow us to combine the
+C 4 large loops into one; we could use it for the outer loop branch.
+C * Optimise code outside of inner loops.
+C * Write combined addmul_1 feed-in a wind-down code, and use when iterating
+C outer each loop. ("Overlapping software pipelining")
+C * Postpone push of ebx until we know vn > 1. Perhaps use caller-saves regs
+C for inlined mul_1, allowing us to postpone all pushes.
+C * Perhaps write special code for vn <= un < M, for some small M.
+
+C void mpn_mul_basecase (mp_ptr wp,
+C mp_srcptr xp, mp_size_t xn,
+C mp_srcptr yp, mp_size_t yn);
+C
+
+define(`rp', `%edi')
+define(`up', `%esi')
+define(`un', `%ecx')
+define(`vp', `%ebp')
+define(`vn', `36(%esp)')
+
+ TEXT
+ ALIGN(16)
+PROLOGUE(mpn_mul_basecase)
+ push %edi
+ push %esi
+ push %ebx
+ push %ebp
+ mov 20(%esp), rp
+ mov 24(%esp), up
+ mov 28(%esp), un
+ mov 32(%esp), vp
+
+ movd (up), %mm0
+ movd (vp), %mm7
+ pmuludq %mm7, %mm0
+ pxor %mm6, %mm6
+
+ mov un, %eax
+ and $3, %eax
+ jz L(of0)
+ cmp $2, %eax
+ jc L(of1)
+ jz L(of2)
+
+C ================================================================
+ jmp L(m3)
+ ALIGN(16)
+L(lm3): movd -4(up), %mm0
+ pmuludq %mm7, %mm0
+ psrlq $32, %mm6
+ lea 16(rp), rp
+ paddq %mm0, %mm6
+ movd (up), %mm0
+ pmuludq %mm7, %mm0
+ movd %mm6, -4(rp)
+ psrlq $32, %mm6
+L(m3): paddq %mm0, %mm6
+ movd 4(up), %mm0
+ pmuludq %mm7, %mm0
+ movd %mm6, (rp)
+ psrlq $32, %mm6
+ paddq %mm0, %mm6
+ movd 8(up), %mm0
+ pmuludq %mm7, %mm0
+ movd %mm6, 4(rp)
+ psrlq $32, %mm6
+ paddq %mm0, %mm6
+ sub $4, un
+ movd %mm6, 8(rp)
+ lea 16(up), up
+ ja L(lm3)
+
+ psrlq $32, %mm6
+ movd %mm6, 12(rp)
+
+ decl vn
+ jz L(done)
+ lea -8(rp), rp
+
+L(ol3): mov 28(%esp), un
+ neg un
+ lea 4(vp), vp
+ movd (vp), %mm7 C read next V limb
+ mov 24(%esp), up
+ lea 16(rp,un,4), rp
+
+ movd (up), %mm0
+ pmuludq %mm7, %mm0
+ sar $2, un
+ movd 4(up), %mm1
+ movd %mm0, %ebx
+ pmuludq %mm7, %mm1
+ lea -8(up), up
+ xor %edx, %edx C zero edx and CF
+ jmp L(a3)
+
+L(la3): movd 4(up), %mm1
+ adc $0, %edx
+ add %eax, 12(rp)
+ movd %mm0, %ebx
+ pmuludq %mm7, %mm1
+ lea 16(rp), rp
+ psrlq $32, %mm0
+ adc %edx, %ebx
+ movd %mm0, %edx
+ movd %mm1, %eax
+ movd 8(up), %mm0
+ pmuludq %mm7, %mm0
+ adc $0, %edx
+ add %ebx, (rp)
+ psrlq $32, %mm1
+ adc %edx, %eax
+ movd %mm1, %edx
+ movd %mm0, %ebx
+ movd 12(up), %mm1
+ pmuludq %mm7, %mm1
+ adc $0, %edx
+ add %eax, 4(rp)
+L(a3): psrlq $32, %mm0
+ adc %edx, %ebx
+ movd %mm0, %edx
+ movd %mm1, %eax
+ lea 16(up), up
+ movd (up), %mm0
+ adc $0, %edx
+ add %ebx, 8(rp)
+ psrlq $32, %mm1
+ adc %edx, %eax
+ movd %mm1, %edx
+ pmuludq %mm7, %mm0
+ inc un
+ jnz L(la3)
+
+ adc un, %edx C un is zero here
+ add %eax, 12(rp)
+ movd %mm0, %ebx
+ psrlq $32, %mm0
+ adc %edx, %ebx
+ movd %mm0, %eax
+ adc un, %eax
+ add %ebx, 16(rp)
+ adc un, %eax
+ mov %eax, 20(rp)
+
+ decl vn
+ jnz L(ol3)
+ jmp L(done)
+
+C ================================================================
+ ALIGN(16)
+L(lm0): movd (up), %mm0
+ pmuludq %mm7, %mm0
+ psrlq $32, %mm6
+ lea 16(rp), rp
+L(of0): paddq %mm0, %mm6
+ movd 4(up), %mm0
+ pmuludq %mm7, %mm0
+ movd %mm6, (rp)
+ psrlq $32, %mm6
+ paddq %mm0, %mm6
+ movd 8(up), %mm0
+ pmuludq %mm7, %mm0
+ movd %mm6, 4(rp)
+ psrlq $32, %mm6
+ paddq %mm0, %mm6
+ movd 12(up), %mm0
+ pmuludq %mm7, %mm0
+ movd %mm6, 8(rp)
+ psrlq $32, %mm6
+ paddq %mm0, %mm6
+ sub $4, un
+ movd %mm6, 12(rp)
+ lea 16(up), up
+ ja L(lm0)
+
+ psrlq $32, %mm6
+ movd %mm6, 16(rp)
+
+ decl vn
+ jz L(done)
+ lea -4(rp), rp
+
+L(ol0): mov 28(%esp), un
+ neg un
+ lea 4(vp), vp
+ movd (vp), %mm7 C read next V limb
+ mov 24(%esp), up
+ lea 20(rp,un,4), rp
+
+ movd (up), %mm1
+ pmuludq %mm7, %mm1
+ sar $2, un
+ movd 4(up), %mm0
+ lea -4(up), up
+ movd %mm1, %eax
+ pmuludq %mm7, %mm0
+ xor %edx, %edx C zero edx and CF
+ jmp L(a0)
+
+L(la0): movd 4(up), %mm1
+ adc $0, %edx
+ add %eax, 12(rp)
+ movd %mm0, %ebx
+ pmuludq %mm7, %mm1
+ lea 16(rp), rp
+ psrlq $32, %mm0
+ adc %edx, %ebx
+ movd %mm0, %edx
+ movd %mm1, %eax
+ movd 8(up), %mm0
+ pmuludq %mm7, %mm0
+ adc $0, %edx
+ add %ebx, (rp)
+L(a0): psrlq $32, %mm1
+ adc %edx, %eax
+ movd %mm1, %edx
+ movd %mm0, %ebx
+ movd 12(up), %mm1
+ pmuludq %mm7, %mm1
+ adc $0, %edx
+ add %eax, 4(rp)
+ psrlq $32, %mm0
+ adc %edx, %ebx
+ movd %mm0, %edx
+ movd %mm1, %eax
+ lea 16(up), up
+ movd (up), %mm0
+ adc $0, %edx
+ add %ebx, 8(rp)
+ psrlq $32, %mm1
+ adc %edx, %eax
+ movd %mm1, %edx
+ pmuludq %mm7, %mm0
+ inc un
+ jnz L(la0)
+
+ adc un, %edx C un is zero here
+ add %eax, 12(rp)
+ movd %mm0, %ebx
+ psrlq $32, %mm0
+ adc %edx, %ebx
+ movd %mm0, %eax
+ adc un, %eax
+ add %ebx, 16(rp)
+ adc un, %eax
+ mov %eax, 20(rp)
+
+ decl vn
+ jnz L(ol0)
+ jmp L(done)
+
+C ================================================================
+ ALIGN(16)
+L(lm1): movd -12(up), %mm0
+ pmuludq %mm7, %mm0
+ psrlq $32, %mm6
+ lea 16(rp), rp
+ paddq %mm0, %mm6
+ movd -8(up), %mm0
+ pmuludq %mm7, %mm0
+ movd %mm6, -12(rp)
+ psrlq $32, %mm6
+ paddq %mm0, %mm6
+ movd -4(up), %mm0
+ pmuludq %mm7, %mm0
+ movd %mm6, -8(rp)
+ psrlq $32, %mm6
+ paddq %mm0, %mm6
+ movd (up), %mm0
+ pmuludq %mm7, %mm0
+ movd %mm6, -4(rp)
+ psrlq $32, %mm6
+L(of1): paddq %mm0, %mm6
+ sub $4, un
+ movd %mm6, (rp)
+ lea 16(up), up
+ ja L(lm1)
+
+ psrlq $32, %mm6
+ movd %mm6, 4(rp)
+
+ decl vn
+ jz L(done)
+ lea -16(rp), rp
+
+L(ol1): mov 28(%esp), un
+ neg un
+ lea 4(vp), vp
+ movd (vp), %mm7 C read next V limb
+ mov 24(%esp), up
+ lea 24(rp,un,4), rp
+
+ movd (up), %mm0
+ pmuludq %mm7, %mm0
+ sar $2, un
+ movd %mm0, %ebx
+ movd 4(up), %mm1
+ pmuludq %mm7, %mm1
+ xor %edx, %edx C zero edx and CF
+ inc un
+ jmp L(a1)
+
+L(la1): movd 4(up), %mm1
+ adc $0, %edx
+ add %eax, 12(rp)
+ movd %mm0, %ebx
+ pmuludq %mm7, %mm1
+ lea 16(rp), rp
+L(a1): psrlq $32, %mm0
+ adc %edx, %ebx
+ movd %mm0, %edx
+ movd %mm1, %eax
+ movd 8(up), %mm0
+ pmuludq %mm7, %mm0
+ adc $0, %edx
+ add %ebx, (rp)
+ psrlq $32, %mm1
+ adc %edx, %eax
+ movd %mm1, %edx
+ movd %mm0, %ebx
+ movd 12(up), %mm1
+ pmuludq %mm7, %mm1
+ adc $0, %edx
+ add %eax, 4(rp)
+ psrlq $32, %mm0
+ adc %edx, %ebx
+ movd %mm0, %edx
+ movd %mm1, %eax
+ lea 16(up), up
+ movd (up), %mm0
+ adc $0, %edx
+ add %ebx, 8(rp)
+ psrlq $32, %mm1
+ adc %edx, %eax
+ movd %mm1, %edx
+ pmuludq %mm7, %mm0
+ inc un
+ jnz L(la1)
+
+ adc un, %edx C un is zero here
+ add %eax, 12(rp)
+ movd %mm0, %ebx
+ psrlq $32, %mm0
+ adc %edx, %ebx
+ movd %mm0, %eax
+ adc un, %eax
+ add %ebx, 16(rp)
+ adc un, %eax
+ mov %eax, 20(rp)
+
+ decl vn
+ jnz L(ol1)
+ jmp L(done)
+
+C ================================================================
+ ALIGN(16)
+L(lm2): movd -8(up), %mm0
+ pmuludq %mm7, %mm0
+ psrlq $32, %mm6
+ lea 16(rp), rp
+ paddq %mm0, %mm6
+ movd -4(up), %mm0
+ pmuludq %mm7, %mm0
+ movd %mm6, -8(rp)
+ psrlq $32, %mm6
+ paddq %mm0, %mm6
+ movd (up), %mm0
+ pmuludq %mm7, %mm0
+ movd %mm6, -4(rp)
+ psrlq $32, %mm6
+L(of2): paddq %mm0, %mm6
+ movd 4(up), %mm0
+ pmuludq %mm7, %mm0
+ movd %mm6, (rp)
+ psrlq $32, %mm6
+ paddq %mm0, %mm6
+ sub $4, un
+ movd %mm6, 4(rp)
+ lea 16(up), up
+ ja L(lm2)
+
+ psrlq $32, %mm6
+ movd %mm6, 8(rp)
+
+ decl vn
+ jz L(done)
+ lea -12(rp), rp
+
+L(ol2): mov 28(%esp), un
+ neg un
+ lea 4(vp), vp
+ movd (vp), %mm7 C read next V limb
+ mov 24(%esp), up
+ lea 12(rp,un,4), rp
+
+ movd (up), %mm1
+ pmuludq %mm7, %mm1
+ sar $2, un
+ movd 4(up), %mm0
+ lea 4(up), up
+ movd %mm1, %eax
+ xor %edx, %edx C zero edx and CF
+ jmp L(lo2)
+
+L(la2): movd 4(up), %mm1
+ adc $0, %edx
+ add %eax, 12(rp)
+ movd %mm0, %ebx
+ pmuludq %mm7, %mm1
+ lea 16(rp), rp
+ psrlq $32, %mm0
+ adc %edx, %ebx
+ movd %mm0, %edx
+ movd %mm1, %eax
+ movd 8(up), %mm0
+ pmuludq %mm7, %mm0
+ adc $0, %edx
+ add %ebx, (rp)
+ psrlq $32, %mm1
+ adc %edx, %eax
+ movd %mm1, %edx
+ movd %mm0, %ebx
+ movd 12(up), %mm1
+ pmuludq %mm7, %mm1
+ adc $0, %edx
+ add %eax, 4(rp)
+ psrlq $32, %mm0
+ adc %edx, %ebx
+ movd %mm0, %edx
+ movd %mm1, %eax
+ lea 16(up), up
+ movd (up), %mm0
+ adc $0, %edx
+ add %ebx, 8(rp)
+L(lo2): psrlq $32, %mm1
+ adc %edx, %eax
+ movd %mm1, %edx
+ pmuludq %mm7, %mm0
+ inc un
+ jnz L(la2)
+
+ adc un, %edx C un is zero here
+ add %eax, 12(rp)
+ movd %mm0, %ebx
+ psrlq $32, %mm0
+ adc %edx, %ebx
+ movd %mm0, %eax
+ adc un, %eax
+ add %ebx, 16(rp)
+ adc un, %eax
+ mov %eax, 20(rp)
+
+ decl vn
+ jnz L(ol2)
+C jmp L(done)
+
+C ================================================================
+L(done):
+ emms
+ pop %ebp
+ pop %ebx
+ pop %esi
+ pop %edi
+ ret
+EPILOGUE()
diff --git a/vendor/gmp-6.3.0/mpn/x86/atom/sse2/popcount.asm b/vendor/gmp-6.3.0/mpn/x86/atom/sse2/popcount.asm
new file mode 100644
index 0000000..7847aec
--- /dev/null
+++ b/vendor/gmp-6.3.0/mpn/x86/atom/sse2/popcount.asm
@@ -0,0 +1,35 @@
+dnl Intel Atom mpn_popcount -- population count.
+
+dnl Copyright 2011 Free Software Foundation, Inc.
+
+dnl This file is part of the GNU MP Library.
+dnl
+dnl The GNU MP Library is free software; you can redistribute it and/or modify
+dnl it under the terms of either:
+dnl
+dnl * the GNU Lesser General Public License as published by the Free
+dnl Software Foundation; either version 3 of the License, or (at your
+dnl option) any later version.
+dnl
+dnl or
+dnl
+dnl * the GNU General Public License as published by the Free Software
+dnl Foundation; either version 2 of the License, or (at your option) any
+dnl later version.
+dnl
+dnl or both in parallel, as here.
+dnl
+dnl The GNU MP Library is distributed in the hope that it will be useful, but
+dnl WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+dnl or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+dnl for more details.
+dnl
+dnl You should have received copies of the GNU General Public License and the
+dnl GNU Lesser General Public License along with the GNU MP Library. If not,
+dnl see https://www.gnu.org/licenses/.
+
+include(`../config.m4')
+
+
+MULFUNC_PROLOGUE(mpn_popcount)
+include_mpn(`x86/pentium4/sse2/popcount.asm')
diff --git a/vendor/gmp-6.3.0/mpn/x86/atom/sse2/sqr_basecase.asm b/vendor/gmp-6.3.0/mpn/x86/atom/sse2/sqr_basecase.asm
new file mode 100644
index 0000000..af19ed8
--- /dev/null
+++ b/vendor/gmp-6.3.0/mpn/x86/atom/sse2/sqr_basecase.asm
@@ -0,0 +1,634 @@
+dnl x86 mpn_sqr_basecase -- square an mpn number, optimised for atom.
+
+dnl Contributed to the GNU project by Torbjorn Granlund and Marco Bodrato.
+
+dnl Copyright 2011 Free Software Foundation, Inc.
+
+dnl This file is part of the GNU MP Library.
+dnl
+dnl The GNU MP Library is free software; you can redistribute it and/or modify
+dnl it under the terms of either:
+dnl
+dnl * the GNU Lesser General Public License as published by the Free
+dnl Software Foundation; either version 3 of the License, or (at your
+dnl option) any later version.
+dnl
+dnl or
+dnl
+dnl * the GNU General Public License as published by the Free Software
+dnl Foundation; either version 2 of the License, or (at your option) any
+dnl later version.
+dnl
+dnl or both in parallel, as here.
+dnl
+dnl The GNU MP Library is distributed in the hope that it will be useful, but
+dnl WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+dnl or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+dnl for more details.
+dnl
+dnl You should have received copies of the GNU General Public License and the
+dnl GNU Lesser General Public License along with the GNU MP Library. If not,
+dnl see https://www.gnu.org/licenses/.
+
+include(`../config.m4')
+
+C TODO
+C * Check if 'jmp N(%esp)' is well-predicted enough to allow us to combine the
+C 4 large loops into one; we could use it for the outer loop branch.
+C * Optimise code outside of inner loops.
+C * Write combined addmul_1 feed-in a wind-down code, and use when iterating
+C outer each loop. ("Overlapping software pipelining")
+C * Perhaps use caller-saves regs for inlined mul_1, allowing us to postpone
+C all pushes.
+C * Perhaps write special code for n < M, for some small M.
+C * Replace inlined addmul_1 with smaller code from aorsmul_1.asm, or perhaps
+C with even less pipelined code.
+C * We run the outer loop until we have a 2-limb by 1-limb addmul_1 left.
+C Consider breaking out earlier, saving high the cost of short loops.
+
+C void mpn_sqr_basecase (mp_ptr wp,
+C mp_srcptr xp, mp_size_t xn);
+
+define(`rp', `%edi')
+define(`up', `%esi')
+define(`n', `%ecx')
+
+define(`un', `%ebp')
+
+ TEXT
+ ALIGN(16)
+PROLOGUE(mpn_sqr_basecase)
+ push %edi
+ push %esi
+ mov 12(%esp), rp
+ mov 16(%esp), up
+ mov 20(%esp), n
+
+ lea 4(rp), rp C write triangular product starting at rp[1]
+ dec n
+ movd (up), %mm7
+
+ jz L(one)
+ lea 4(up), up
+ push %ebx
+ push %ebp
+ mov n, %eax
+
+ movd (up), %mm0
+ neg n
+ pmuludq %mm7, %mm0
+ pxor %mm6, %mm6
+ mov n, un
+
+ and $3, %eax
+ jz L(of0)
+ cmp $2, %eax
+ jc L(of1)
+ jz L(of2)
+
+C ================================================================
+ jmp L(m3)
+ ALIGN(16)
+L(lm3): movd -4(up), %mm0
+ pmuludq %mm7, %mm0
+ psrlq $32, %mm6
+ lea 16(rp), rp
+ paddq %mm0, %mm6
+ movd (up), %mm0
+ pmuludq %mm7, %mm0
+ movd %mm6, -4(rp)
+ psrlq $32, %mm6
+L(m3): paddq %mm0, %mm6
+ movd 4(up), %mm0
+ pmuludq %mm7, %mm0
+ movd %mm6, (rp)
+ psrlq $32, %mm6
+ paddq %mm0, %mm6
+ movd 8(up), %mm0
+ pmuludq %mm7, %mm0
+ movd %mm6, 4(rp)
+ psrlq $32, %mm6
+ paddq %mm0, %mm6
+ add $4, un
+ movd %mm6, 8(rp)
+ lea 16(up), up
+ js L(lm3)
+
+ psrlq $32, %mm6
+ movd %mm6, 12(rp)
+
+ inc n
+C jz L(done)
+ lea -12(up), up
+ lea 4(rp), rp
+ jmp L(ol2)
+
+C ================================================================
+ ALIGN(16)
+L(lm0): movd (up), %mm0
+ pmuludq %mm7, %mm0
+ psrlq $32, %mm6
+ lea 16(rp), rp
+L(of0): paddq %mm0, %mm6
+ movd 4(up), %mm0
+ pmuludq %mm7, %mm0
+ movd %mm6, (rp)
+ psrlq $32, %mm6
+ paddq %mm0, %mm6
+ movd 8(up), %mm0
+ pmuludq %mm7, %mm0
+ movd %mm6, 4(rp)
+ psrlq $32, %mm6
+ paddq %mm0, %mm6
+ movd 12(up), %mm0
+ pmuludq %mm7, %mm0
+ movd %mm6, 8(rp)
+ psrlq $32, %mm6
+ paddq %mm0, %mm6
+ add $4, un
+ movd %mm6, 12(rp)
+ lea 16(up), up
+ js L(lm0)
+
+ psrlq $32, %mm6
+ movd %mm6, 16(rp)
+
+ inc n
+C jz L(done)
+ lea -8(up), up
+ lea 8(rp), rp
+ jmp L(ol3)
+
+C ================================================================
+ ALIGN(16)
+L(lm1): movd -12(up), %mm0
+ pmuludq %mm7, %mm0
+ psrlq $32, %mm6
+ lea 16(rp), rp
+ paddq %mm0, %mm6
+ movd -8(up), %mm0
+ pmuludq %mm7, %mm0
+ movd %mm6, -12(rp)
+ psrlq $32, %mm6
+ paddq %mm0, %mm6
+ movd -4(up), %mm0
+ pmuludq %mm7, %mm0
+ movd %mm6, -8(rp)
+ psrlq $32, %mm6
+ paddq %mm0, %mm6
+ movd (up), %mm0
+ pmuludq %mm7, %mm0
+ movd %mm6, -4(rp)
+ psrlq $32, %mm6
+L(of1): paddq %mm0, %mm6
+ add $4, un
+ movd %mm6, (rp)
+ lea 16(up), up
+ js L(lm1)
+
+ psrlq $32, %mm6
+ movd %mm6, 4(rp)
+
+ inc n
+ jz L(done) C goes away when we add special n=2 code
+ lea -20(up), up
+ lea -4(rp), rp
+ jmp L(ol0)
+
+C ================================================================
+ ALIGN(16)
+L(lm2): movd -8(up), %mm0
+ pmuludq %mm7, %mm0
+ psrlq $32, %mm6
+ lea 16(rp), rp
+ paddq %mm0, %mm6
+ movd -4(up), %mm0
+ pmuludq %mm7, %mm0
+ movd %mm6, -8(rp)
+ psrlq $32, %mm6
+ paddq %mm0, %mm6
+ movd (up), %mm0
+ pmuludq %mm7, %mm0
+ movd %mm6, -4(rp)
+ psrlq $32, %mm6
+L(of2): paddq %mm0, %mm6
+ movd 4(up), %mm0
+ pmuludq %mm7, %mm0
+ movd %mm6, (rp)
+ psrlq $32, %mm6
+ paddq %mm0, %mm6
+ add $4, un
+ movd %mm6, 4(rp)
+ lea 16(up), up
+ js L(lm2)
+
+ psrlq $32, %mm6
+ movd %mm6, 8(rp)
+
+ inc n
+C jz L(done)
+ lea -16(up), up
+C lea (rp), rp
+C jmp L(ol1)
+
+C ================================================================
+
+L(ol1): lea 4(up,n,4), up
+ movd (up), %mm7 C read next U invariant limb
+ lea 8(rp,n,4), rp
+ mov n, un
+
+ movd 4(up), %mm1
+ pmuludq %mm7, %mm1
+ sar $2, un
+ movd %mm1, %ebx
+ inc un
+ jz L(re1)
+
+ movd 8(up), %mm0
+ pmuludq %mm7, %mm0
+ xor %edx, %edx C zero edx and CF
+ jmp L(a1)
+
+L(la1): adc $0, %edx
+ add %ebx, 12(rp)
+ movd %mm0, %eax
+ pmuludq %mm7, %mm1
+ lea 16(rp), rp
+ psrlq $32, %mm0
+ adc %edx, %eax
+ movd %mm0, %edx
+ movd %mm1, %ebx
+ movd 8(up), %mm0
+ pmuludq %mm7, %mm0
+ adc $0, %edx
+ add %eax, (rp)
+L(a1): psrlq $32, %mm1
+ adc %edx, %ebx
+ movd %mm1, %edx
+ movd %mm0, %eax
+ movd 12(up), %mm1
+ pmuludq %mm7, %mm1
+ adc $0, %edx
+ add %ebx, 4(rp)
+ psrlq $32, %mm0
+ adc %edx, %eax
+ movd %mm0, %edx
+ movd %mm1, %ebx
+ lea 16(up), up
+ movd (up), %mm0
+ adc $0, %edx
+ add %eax, 8(rp)
+ psrlq $32, %mm1
+ adc %edx, %ebx
+ movd %mm1, %edx
+ pmuludq %mm7, %mm0
+ inc un
+ movd 4(up), %mm1
+ jnz L(la1)
+
+ adc un, %edx C un is zero here
+ add %ebx, 12(rp)
+ movd %mm0, %eax
+ pmuludq %mm7, %mm1
+ lea 16(rp), rp
+ psrlq $32, %mm0
+ adc %edx, %eax
+ movd %mm0, %edx
+ movd %mm1, %ebx
+ adc un, %edx
+ add %eax, (rp)
+ psrlq $32, %mm1
+ adc %edx, %ebx
+ movd %mm1, %eax
+ adc un, %eax
+ add %ebx, 4(rp)
+ adc un, %eax
+ mov %eax, 8(rp)
+
+ inc n
+
+C ================================================================
+
+L(ol0): lea (up,n,4), up
+ movd 4(up), %mm7 C read next U invariant limb
+ lea 4(rp,n,4), rp
+ mov n, un
+
+ movd 8(up), %mm0
+ pmuludq %mm7, %mm0
+ sar $2, un
+ movd 12(up), %mm1
+ movd %mm0, %eax
+ pmuludq %mm7, %mm1
+ xor %edx, %edx C zero edx and CF
+ jmp L(a0)
+
+L(la0): adc $0, %edx
+ add %ebx, 12(rp)
+ movd %mm0, %eax
+ pmuludq %mm7, %mm1
+ lea 16(rp), rp
+ psrlq $32, %mm0
+ adc %edx, %eax
+ movd %mm0, %edx
+ movd %mm1, %ebx
+ movd 8(up), %mm0
+ pmuludq %mm7, %mm0
+ adc $0, %edx
+ add %eax, (rp)
+ psrlq $32, %mm1
+ adc %edx, %ebx
+ movd %mm1, %edx
+ movd %mm0, %eax
+ movd 12(up), %mm1
+ pmuludq %mm7, %mm1
+ adc $0, %edx
+ add %ebx, 4(rp)
+L(a0): psrlq $32, %mm0
+ adc %edx, %eax
+ movd %mm0, %edx
+ movd %mm1, %ebx
+ lea 16(up), up
+ movd (up), %mm0
+ adc $0, %edx
+ add %eax, 8(rp)
+ psrlq $32, %mm1
+ adc %edx, %ebx
+ movd %mm1, %edx
+ pmuludq %mm7, %mm0
+ inc un
+ movd 4(up), %mm1
+ jnz L(la0)
+
+ adc un, %edx C un is zero here
+ add %ebx, 12(rp)
+ movd %mm0, %eax
+ pmuludq %mm7, %mm1
+ lea 16(rp), rp
+ psrlq $32, %mm0
+ adc %edx, %eax
+ movd %mm0, %edx
+ movd %mm1, %ebx
+ adc un, %edx
+ add %eax, (rp)
+ psrlq $32, %mm1
+ adc %edx, %ebx
+ movd %mm1, %eax
+ adc un, %eax
+ add %ebx, 4(rp)
+ adc un, %eax
+ mov %eax, 8(rp)
+
+ inc n
+
+C ================================================================
+
+L(ol3): lea 12(up,n,4), up
+ movd -8(up), %mm7 C read next U invariant limb
+ lea (rp,n,4), rp C put rp back
+ mov n, un
+
+ movd -4(up), %mm1
+ pmuludq %mm7, %mm1
+ sar $2, un
+ movd %mm1, %ebx
+ movd (up), %mm0
+ xor %edx, %edx C zero edx and CF
+ jmp L(a3)
+
+L(la3): adc $0, %edx
+ add %ebx, 12(rp)
+ movd %mm0, %eax
+ pmuludq %mm7, %mm1
+ lea 16(rp), rp
+ psrlq $32, %mm0
+ adc %edx, %eax
+ movd %mm0, %edx
+ movd %mm1, %ebx
+ movd 8(up), %mm0
+ pmuludq %mm7, %mm0
+ adc $0, %edx
+ add %eax, (rp)
+ psrlq $32, %mm1
+ adc %edx, %ebx
+ movd %mm1, %edx
+ movd %mm0, %eax
+ movd 12(up), %mm1
+ pmuludq %mm7, %mm1
+ adc $0, %edx
+ add %ebx, 4(rp)
+ psrlq $32, %mm0
+ adc %edx, %eax
+ movd %mm0, %edx
+ movd %mm1, %ebx
+ lea 16(up), up
+ movd (up), %mm0
+ adc $0, %edx
+ add %eax, 8(rp)
+L(a3): psrlq $32, %mm1
+ adc %edx, %ebx
+ movd %mm1, %edx
+ pmuludq %mm7, %mm0
+ inc un
+ movd 4(up), %mm1
+ jnz L(la3)
+
+ adc un, %edx C un is zero here
+ add %ebx, 12(rp)
+ movd %mm0, %eax
+ pmuludq %mm7, %mm1
+ lea 16(rp), rp
+ psrlq $32, %mm0
+ adc %edx, %eax
+ movd %mm0, %edx
+ movd %mm1, %ebx
+ adc un, %edx
+ add %eax, (rp)
+ psrlq $32, %mm1
+ adc %edx, %ebx
+ movd %mm1, %eax
+ adc un, %eax
+ add %ebx, 4(rp)
+ adc un, %eax
+ mov %eax, 8(rp)
+
+ inc n
+
+C ================================================================
+
+L(ol2): lea 8(up,n,4), up
+ movd -4(up), %mm7 C read next U invariant limb
+ lea 12(rp,n,4), rp
+ mov n, un
+
+ movd (up), %mm0
+ pmuludq %mm7, %mm0
+ xor %edx, %edx
+ sar $2, un
+ movd 4(up), %mm1
+ test un, un C clear carry
+ movd %mm0, %eax
+ pmuludq %mm7, %mm1
+ inc un
+ jnz L(a2)
+ jmp L(re2)
+
+L(la2): adc $0, %edx
+ add %ebx, 12(rp)
+ movd %mm0, %eax
+ pmuludq %mm7, %mm1
+ lea 16(rp), rp
+L(a2): psrlq $32, %mm0
+ adc %edx, %eax
+ movd %mm0, %edx
+ movd %mm1, %ebx
+ movd 8(up), %mm0
+ pmuludq %mm7, %mm0
+ adc $0, %edx
+ add %eax, (rp)
+ psrlq $32, %mm1
+ adc %edx, %ebx
+ movd %mm1, %edx
+ movd %mm0, %eax
+ movd 12(up), %mm1
+ pmuludq %mm7, %mm1
+ adc $0, %edx
+ add %ebx, 4(rp)
+ psrlq $32, %mm0
+ adc %edx, %eax
+ movd %mm0, %edx
+ movd %mm1, %ebx
+ lea 16(up), up
+ movd (up), %mm0
+ adc $0, %edx
+ add %eax, 8(rp)
+ psrlq $32, %mm1
+ adc %edx, %ebx
+ movd %mm1, %edx
+ pmuludq %mm7, %mm0
+ inc un
+ movd 4(up), %mm1
+ jnz L(la2)
+
+ adc un, %edx C un is zero here
+ add %ebx, 12(rp)
+ movd %mm0, %eax
+ pmuludq %mm7, %mm1
+ lea 16(rp), rp
+ psrlq $32, %mm0
+ adc %edx, %eax
+ movd %mm0, %edx
+ movd %mm1, %ebx
+ adc un, %edx
+ add %eax, (rp)
+ psrlq $32, %mm1
+ adc %edx, %ebx
+ movd %mm1, %eax
+ adc un, %eax
+ add %ebx, 4(rp)
+ adc un, %eax
+ mov %eax, 8(rp)
+
+ inc n
+ jmp L(ol1)
+
+C ================================================================
+L(re2): psrlq $32, %mm0
+ movd (up), %mm7 C read next U invariant limb
+ adc %edx, %eax
+ movd %mm0, %edx
+ movd %mm1, %ebx
+ adc un, %edx
+ add %eax, (rp)
+ lea 4(rp), rp
+ psrlq $32, %mm1
+ adc %edx, %ebx
+ movd %mm1, %eax
+ movd 4(up), %mm1
+ adc un, %eax
+ add %ebx, (rp)
+ pmuludq %mm7, %mm1
+ adc un, %eax
+ mov %eax, 4(rp)
+ movd %mm1, %ebx
+
+L(re1): psrlq $32, %mm1
+ add %ebx, 4(rp)
+ movd %mm1, %eax
+ adc un, %eax
+ xor n, n C make n zeroness assumption below true
+ mov %eax, 8(rp)
+
+L(done): C n is zero here
+ mov 24(%esp), up
+ mov 28(%esp), %eax
+
+ movd (up), %mm0
+ inc %eax
+ pmuludq %mm0, %mm0
+ lea 4(up), up
+ mov 20(%esp), rp
+ shr %eax
+ movd %mm0, (rp)
+ psrlq $32, %mm0
+ lea -12(rp), rp
+ mov %eax, 28(%esp)
+ jnc L(odd)
+
+ movd %mm0, %ebp
+ movd (up), %mm0
+ lea 8(rp), rp
+ pmuludq %mm0, %mm0
+ lea -4(up), up
+ add 8(rp), %ebp
+ movd %mm0, %edx
+ adc 12(rp), %edx
+ rcr n
+ jmp L(ent)
+
+C ALIGN(16) C alignment seems irrelevant
+L(top): movd (up), %mm1
+ adc n, n
+ movd %mm0, %eax
+ pmuludq %mm1, %mm1
+ movd 4(up), %mm0
+ adc (rp), %eax
+ movd %mm1, %ebx
+ pmuludq %mm0, %mm0
+ psrlq $32, %mm1
+ adc 4(rp), %ebx
+ movd %mm1, %ebp
+ movd %mm0, %edx
+ adc 8(rp), %ebp
+ adc 12(rp), %edx
+ rcr n C FIXME: isn't this awfully slow on atom???
+ adc %eax, (rp)
+ adc %ebx, 4(rp)
+L(ent): lea 8(up), up
+ adc %ebp, 8(rp)
+ psrlq $32, %mm0
+ adc %edx, 12(rp)
+L(odd): decl 28(%esp)
+ lea 16(rp), rp
+ jnz L(top)
+
+L(end): adc n, n
+ movd %mm0, %eax
+ adc n, %eax
+ mov %eax, (rp)
+
+L(rtn): emms
+ pop %ebp
+ pop %ebx
+ pop %esi
+ pop %edi
+ ret
+
+L(one): pmuludq %mm7, %mm7
+ movq %mm7, -4(rp)
+ emms
+ pop %esi
+ pop %edi
+ ret
+EPILOGUE()