aboutsummaryrefslogtreecommitdiff
path: root/vendor/gmp-6.3.0/mpn/s390_64
diff options
context:
space:
mode:
authorThomas Voss <mail@thomasvoss.com> 2024-06-21 23:36:36 +0200
committerThomas Voss <mail@thomasvoss.com> 2024-06-21 23:42:26 +0200
commita89a14ef5da44684a16b204e7a70460cc8c4922a (patch)
treeb23b4c6b155977909ef508fdae2f48d33d802813 /vendor/gmp-6.3.0/mpn/s390_64
parent1db63fcedab0b288820d66e100b1877b1a5a8851 (diff)
Basic constant folding implementation
Diffstat (limited to 'vendor/gmp-6.3.0/mpn/s390_64')
-rw-r--r--vendor/gmp-6.3.0/mpn/s390_64/README88
-rw-r--r--vendor/gmp-6.3.0/mpn/s390_64/addmul_1.asm72
-rw-r--r--vendor/gmp-6.3.0/mpn/s390_64/aorrlsh1_n.asm168
-rw-r--r--vendor/gmp-6.3.0/mpn/s390_64/aors_n.asm136
-rw-r--r--vendor/gmp-6.3.0/mpn/s390_64/bdiv_dbm1c.asm65
-rw-r--r--vendor/gmp-6.3.0/mpn/s390_64/copyd.asm144
-rw-r--r--vendor/gmp-6.3.0/mpn/s390_64/copyi.asm68
-rw-r--r--vendor/gmp-6.3.0/mpn/s390_64/gmp-mparam.h181
-rw-r--r--vendor/gmp-6.3.0/mpn/s390_64/invert_limb.asm94
-rw-r--r--vendor/gmp-6.3.0/mpn/s390_64/logops_n.asm291
-rw-r--r--vendor/gmp-6.3.0/mpn/s390_64/lshift.asm196
-rw-r--r--vendor/gmp-6.3.0/mpn/s390_64/lshiftc.asm207
-rw-r--r--vendor/gmp-6.3.0/mpn/s390_64/mod_34lsub1.asm109
-rw-r--r--vendor/gmp-6.3.0/mpn/s390_64/mul_1.asm66
-rw-r--r--vendor/gmp-6.3.0/mpn/s390_64/mul_basecase.asm130
-rw-r--r--vendor/gmp-6.3.0/mpn/s390_64/rshift.asm195
-rw-r--r--vendor/gmp-6.3.0/mpn/s390_64/sec_tabselect.asm139
-rw-r--r--vendor/gmp-6.3.0/mpn/s390_64/sqr_basecase.asm203
-rw-r--r--vendor/gmp-6.3.0/mpn/s390_64/sublsh1_n.asm169
-rw-r--r--vendor/gmp-6.3.0/mpn/s390_64/submul_1.asm70
-rw-r--r--vendor/gmp-6.3.0/mpn/s390_64/z10/gmp-mparam.h233
-rw-r--r--vendor/gmp-6.3.0/mpn/s390_64/z13/addmul_1.asm173
-rw-r--r--vendor/gmp-6.3.0/mpn/s390_64/z13/addmul_1.c358
-rw-r--r--vendor/gmp-6.3.0/mpn/s390_64/z13/aormul_2.c476
-rw-r--r--vendor/gmp-6.3.0/mpn/s390_64/z13/common-vec.h175
-rw-r--r--vendor/gmp-6.3.0/mpn/s390_64/z13/gmp-mparam.h162
-rw-r--r--vendor/gmp-6.3.0/mpn/s390_64/z13/hamdist.asm76
-rw-r--r--vendor/gmp-6.3.0/mpn/s390_64/z13/mul_1.asm149
-rw-r--r--vendor/gmp-6.3.0/mpn/s390_64/z13/mul_1.c31
-rw-r--r--vendor/gmp-6.3.0/mpn/s390_64/z13/mul_2.asm121
-rw-r--r--vendor/gmp-6.3.0/mpn/s390_64/z13/mul_basecase.asm264
-rw-r--r--vendor/gmp-6.3.0/mpn/s390_64/z13/mul_basecase.c124
-rw-r--r--vendor/gmp-6.3.0/mpn/s390_64/z13/popcount.asm69
-rw-r--r--vendor/gmp-6.3.0/mpn/s390_64/z13/sqr_basecase.c82
-rw-r--r--vendor/gmp-6.3.0/mpn/s390_64/z13/submul_1.asm168
35 files changed, 5452 insertions, 0 deletions
diff --git a/vendor/gmp-6.3.0/mpn/s390_64/README b/vendor/gmp-6.3.0/mpn/s390_64/README
new file mode 100644
index 0000000..53702db
--- /dev/null
+++ b/vendor/gmp-6.3.0/mpn/s390_64/README
@@ -0,0 +1,88 @@
+Copyright 2011 Free Software Foundation, Inc.
+
+This file is part of the GNU MP Library.
+
+The GNU MP Library is free software; you can redistribute it and/or modify
+it under the terms of either:
+
+ * the GNU Lesser General Public License as published by the Free
+ Software Foundation; either version 3 of the License, or (at your
+ option) any later version.
+
+or
+
+ * the GNU General Public License as published by the Free Software
+ Foundation; either version 2 of the License, or (at your option) any
+ later version.
+
+or both in parallel, as here.
+
+The GNU MP Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received copies of the GNU General Public License and the
+GNU Lesser General Public License along with the GNU MP Library. If not,
+see https://www.gnu.org/licenses/.
+
+
+
+There are 5 generations of 64-bit s390 processors, z900, z990, z9,
+z10, and z196. The current GMP code was optimised for the two oldest,
+z900 and z990.
+
+
+mpn_copyi
+
+This code makes use of a loop around MVC. It almost surely runs very
+close to optimally. A small improvement could be done by using one
+MVC for size 256 bytes, now we use two (we use an extra MVC when
+copying any multiple of 256 bytes).
+
+
+mpn_copyd
+
+We have tried several feed-in variants here, branch tree, jump table
+and computed goto. The fastest (on z990) turned out to be computed
+goto.
+
+An approach not tried is EX of LMG and STMG, modifying the register set
+on-the-fly. Using that trick, we could completely avoid using
+separate feed-in paths.
+
+
+mpn_lshift, mpn_rshift
+
+The current code runs at pipeline decode bandwidth on z990.
+
+
+mpn_add_n, mpn_sub_n
+
+The current code is 4-way unrolled. It should be unrolled more, at
+least 8x, in order to reach 2.5 c/l.
+
+
+mpn_mul_1, mpn_addmul_1, mpn_submul_1
+
+The current code is very naive, but due to the non-pipelined nature of
+MLGR on z900 and z990, more sophisticated code would not gain much.
+
+On z10 one would need to cluster at least 4 MLGR together, in order to
+reduce stalling.
+
+On z196, one surely want to use unrolling and pipelining, to perhaps
+reach around 12 c/l. A major issue here and on z10 is ALCGR's 3 cycle
+stalling.
+
+
+mpn_mul_2, mpn_addmul_2
+
+At least for older machines (z900, z990) with very slow MLGR, we
+should use Karatsuba's algorithm on 2-limb units, making mul_2 and
+addmul_2 the main multiplication primitives. The newer machines might
+benefit less from this approach, perhaps in particular z10, where MLGR
+clustering is more important.
+
+With Karatsuba, one could hope for around 16 cycles per accumulated
+128 cross product, on z990.
diff --git a/vendor/gmp-6.3.0/mpn/s390_64/addmul_1.asm b/vendor/gmp-6.3.0/mpn/s390_64/addmul_1.asm
new file mode 100644
index 0000000..84cca12
--- /dev/null
+++ b/vendor/gmp-6.3.0/mpn/s390_64/addmul_1.asm
@@ -0,0 +1,72 @@
+dnl S/390-64 mpn_addmul_1
+
+dnl Copyright 2011 Free Software Foundation, Inc.
+
+dnl This file is part of the GNU MP Library.
+dnl
+dnl The GNU MP Library is free software; you can redistribute it and/or modify
+dnl it under the terms of either:
+dnl
+dnl * the GNU Lesser General Public License as published by the Free
+dnl Software Foundation; either version 3 of the License, or (at your
+dnl option) any later version.
+dnl
+dnl or
+dnl
+dnl * the GNU General Public License as published by the Free Software
+dnl Foundation; either version 2 of the License, or (at your option) any
+dnl later version.
+dnl
+dnl or both in parallel, as here.
+dnl
+dnl The GNU MP Library is distributed in the hope that it will be useful, but
+dnl WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+dnl or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+dnl for more details.
+dnl
+dnl You should have received copies of the GNU General Public License and the
+dnl GNU Lesser General Public License along with the GNU MP Library. If not,
+dnl see https://www.gnu.org/licenses/.
+
+include(`../config.m4')
+
+C cycles/limb
+C z900 34
+C z990 23
+C z9 ?
+C z10 28
+C z196 ?
+
+C INPUT PARAMETERS
+define(`rp', `%r2')
+define(`up', `%r3')
+define(`n', `%r4')
+define(`v0', `%r5')
+
+define(`z', `%r9')
+
+ASM_START()
+PROLOGUE(mpn_addmul_1)
+ stmg %r9, %r12, 72(%r15)
+ lghi %r12, 0 C zero index register
+ aghi %r12, 0 C clear carry flag
+ lghi %r11, 0 C clear carry limb
+ lghi z, 0 C keep register zero
+
+L(top): lg %r1, 0(%r12,up)
+ lg %r10, 0(%r12,rp)
+ mlgr %r0, v0
+ alcgr %r1, %r10
+ alcgr %r0, z
+ algr %r1, %r11
+ lgr %r11, %r0
+ stg %r1, 0(%r12,rp)
+ la %r12, 8(%r12)
+ brctg n, L(top)
+
+ lghi %r2, 0
+ alcgr %r2, %r11
+
+ lmg %r9, %r12, 72(%r15)
+ br %r14
+EPILOGUE()
diff --git a/vendor/gmp-6.3.0/mpn/s390_64/aorrlsh1_n.asm b/vendor/gmp-6.3.0/mpn/s390_64/aorrlsh1_n.asm
new file mode 100644
index 0000000..697259e
--- /dev/null
+++ b/vendor/gmp-6.3.0/mpn/s390_64/aorrlsh1_n.asm
@@ -0,0 +1,168 @@
+dnl S/390-64 mpn_addlsh1_n and mpn_rsblsh1_n.
+
+dnl Copyright 2011 Free Software Foundation, Inc.
+
+dnl This file is part of the GNU MP Library.
+dnl
+dnl The GNU MP Library is free software; you can redistribute it and/or modify
+dnl it under the terms of either:
+dnl
+dnl * the GNU Lesser General Public License as published by the Free
+dnl Software Foundation; either version 3 of the License, or (at your
+dnl option) any later version.
+dnl
+dnl or
+dnl
+dnl * the GNU General Public License as published by the Free Software
+dnl Foundation; either version 2 of the License, or (at your option) any
+dnl later version.
+dnl
+dnl or both in parallel, as here.
+dnl
+dnl The GNU MP Library is distributed in the hope that it will be useful, but
+dnl WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+dnl or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+dnl for more details.
+dnl
+dnl You should have received copies of the GNU General Public License and the
+dnl GNU Lesser General Public License along with the GNU MP Library. If not,
+dnl see https://www.gnu.org/licenses/.
+
+include(`../config.m4')
+
+C cycles/limb
+C z900 9
+C z990 4.75
+C z9 ?
+C z10 11
+C z196 ?
+
+C TODO
+C * Optimise for small n, avoid 'la' like in aors_n.asm.
+C * Tune to reach 3.5 c/l. For addlsh1, we could let the main alcgr propagate
+C carry to the lsh1 alcgr.
+C * Compute RETVAL for sublsh1_n less stupidly.
+
+C INPUT PARAMETERS
+define(`rp', `%r2')
+define(`up', `%r3')
+define(`vp', `%r4')
+define(`n', `%r5')
+
+ifdef(`OPERATION_addlsh1_n',`
+ define(ADSB, alg)
+ define(ADSBC, alcg)
+ define(INITCY, `lghi %r9, -1')
+ define(RETVAL, `la %r2, 2(%r1,%r9)')
+ define(func, mpn_addlsh1_n)
+')
+ifdef(`OPERATION_rsblsh1_n',`
+ define(ADSB, slg)
+ define(ADSBC, slbg)
+ define(INITCY, `lghi %r9, 0')
+ define(RETVAL,`dnl
+ algr %r1, %r9
+ lghi %r2, 1
+ algr %r2, %r1')
+ define(func, mpn_rsblsh1_n)
+')
+
+MULFUNC_PROLOGUE(mpn_addlsh1_n mpn_rsblsh1_n)
+
+ASM_START()
+PROLOGUE(func)
+ stmg %r6, %r9, 48(%r15)
+
+ aghi n, 3
+ lghi %r7, 3
+ srlg %r0, n, 2
+ ngr %r7, n C n mod 4
+ je L(b1)
+ cghi %r7, 2
+ jl L(b2)
+ jne L(b0)
+
+L(b3): lmg %r5, %r7, 0(vp)
+ la vp, 24(vp)
+
+ algr %r5, %r5
+ alcgr %r6, %r6
+ alcgr %r7, %r7
+ slbgr %r1, %r1
+
+ ADSB %r5, 0(up)
+ ADSBC %r6, 8(up)
+ ADSBC %r7, 16(up)
+ la up, 24(up)
+ slbgr %r9, %r9
+
+ stmg %r5, %r7, 0(rp)
+ la rp, 24(rp)
+ brctg %r0, L(top)
+ j L(end)
+
+L(b0): lghi %r1, -1
+ INITCY
+ j L(top)
+
+L(b1): lg %r5, 0(vp)
+ la vp, 8(vp)
+
+ algr %r5, %r5
+ slbgr %r1, %r1
+ ADSB %r5, 0(up)
+ la up, 8(up)
+ slbgr %r9, %r9
+
+ stg %r5, 0(rp)
+ la rp, 8(rp)
+ brctg %r0, L(top)
+ j L(end)
+
+L(b2): lmg %r5, %r6, 0(vp)
+ la vp, 16(vp)
+
+ algr %r5, %r5
+ alcgr %r6, %r6
+ slbgr %r1, %r1
+
+ ADSB %r5, 0(up)
+ ADSBC %r6, 8(up)
+ la up, 16(up)
+ slbgr %r9, %r9
+
+ stmg %r5, %r6, 0(rp)
+ la rp, 16(rp)
+ brctg %r0, L(top)
+ j L(end)
+
+L(top): lmg %r5, %r8, 0(vp)
+ la vp, 32(vp)
+
+ aghi %r1, 1 C restore carry
+
+ alcgr %r5, %r5
+ alcgr %r6, %r6
+ alcgr %r7, %r7
+ alcgr %r8, %r8
+
+ slbgr %r1, %r1 C save carry
+
+ aghi %r9, 1 C restore carry
+
+ ADSBC %r5, 0(up)
+ ADSBC %r6, 8(up)
+ ADSBC %r7, 16(up)
+ ADSBC %r8, 24(up)
+ la up, 32(up)
+
+ slbgr %r9, %r9 C save carry
+
+ stmg %r5, %r8, 0(rp)
+ la rp, 32(rp)
+ brctg %r0, L(top)
+
+L(end): RETVAL
+ lmg %r6, %r9, 48(%r15)
+ br %r14
+EPILOGUE()
diff --git a/vendor/gmp-6.3.0/mpn/s390_64/aors_n.asm b/vendor/gmp-6.3.0/mpn/s390_64/aors_n.asm
new file mode 100644
index 0000000..a3c3ca7
--- /dev/null
+++ b/vendor/gmp-6.3.0/mpn/s390_64/aors_n.asm
@@ -0,0 +1,136 @@
+dnl S/390-64 mpn_add_n and mpn_sub_n.
+
+dnl Copyright 2011 Free Software Foundation, Inc.
+
+dnl This file is part of the GNU MP Library.
+dnl
+dnl The GNU MP Library is free software; you can redistribute it and/or modify
+dnl it under the terms of either:
+dnl
+dnl * the GNU Lesser General Public License as published by the Free
+dnl Software Foundation; either version 3 of the License, or (at your
+dnl option) any later version.
+dnl
+dnl or
+dnl
+dnl * the GNU General Public License as published by the Free Software
+dnl Foundation; either version 2 of the License, or (at your option) any
+dnl later version.
+dnl
+dnl or both in parallel, as here.
+dnl
+dnl The GNU MP Library is distributed in the hope that it will be useful, but
+dnl WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+dnl or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+dnl for more details.
+dnl
+dnl You should have received copies of the GNU General Public License and the
+dnl GNU Lesser General Public License along with the GNU MP Library. If not,
+dnl see https://www.gnu.org/licenses/.
+
+include(`../config.m4')
+
+C cycles/limb
+C z900 5.5
+C z990 3
+C z9 ?
+C z10 6
+C z196 ?
+
+C TODO
+C * Optimise for small n
+C * Use r0 and save/restore one less register
+C * Using logops_n's v1 inner loop operand order make the loop about 20%
+C faster, at the expense of highly alignment-dependent performance.
+
+C INPUT PARAMETERS
+define(`rp', `%r2')
+define(`up', `%r3')
+define(`vp', `%r4')
+define(`n', `%r5')
+
+ifdef(`OPERATION_add_n', `
+ define(ADSB, alg)
+ define(ADSBCR, alcgr)
+ define(ADSBC, alcg)
+ define(RETVAL,`dnl
+ lghi %r2, 0
+ alcgr %r2, %r2')
+ define(func, mpn_add_n)
+ define(func_nc, mpn_add_nc)')
+ifdef(`OPERATION_sub_n', `
+ define(ADSB, slg)
+ define(ADSBCR, slbgr)
+ define(ADSBC, slbg)
+ define(RETVAL,`dnl
+ slbgr %r2, %r2
+ lcgr %r2, %r2')
+ define(func, mpn_sub_n)
+ define(func_nc, mpn_sub_nc)')
+
+MULFUNC_PROLOGUE(mpn_add_n mpn_sub_n)
+
+ASM_START()
+PROLOGUE(func)
+ stmg %r6, %r8, 48(%r15)
+
+ aghi n, 3
+ lghi %r7, 3
+ srlg %r1, n, 2
+ ngr %r7, n C n mod 4
+ je L(b1)
+ cghi %r7, 2
+ jl L(b2)
+ jne L(b0)
+
+L(b3): lmg %r5, %r7, 0(up)
+ la up, 24(up)
+ ADSB %r5, 0(vp)
+ ADSBC %r6, 8(vp)
+ ADSBC %r7, 16(vp)
+ la vp, 24(vp)
+ stmg %r5, %r7, 0(rp)
+ la rp, 24(rp)
+ brctg %r1, L(top)
+ j L(end)
+
+L(b0): lmg %r5, %r8, 0(up) C This redundant insns is no mistake,
+ la up, 32(up) C it is needed to make main loop run
+ ADSB %r5, 0(vp) C fast for n = 0 (mod 4).
+ ADSBC %r6, 8(vp)
+ j L(m0)
+
+L(b1): lg %r5, 0(up)
+ la up, 8(up)
+ ADSB %r5, 0(vp)
+ la vp, 8(vp)
+ stg %r5, 0(rp)
+ la rp, 8(rp)
+ brctg %r1, L(top)
+ j L(end)
+
+L(b2): lmg %r5, %r6, 0(up)
+ la up, 16(up)
+ ADSB %r5, 0(vp)
+ ADSBC %r6, 8(vp)
+ la vp, 16(vp)
+ stmg %r5, %r6, 0(rp)
+ la rp, 16(rp)
+ brctg %r1, L(top)
+ j L(end)
+
+L(top): lmg %r5, %r8, 0(up)
+ la up, 32(up)
+ ADSBC %r5, 0(vp)
+ ADSBC %r6, 8(vp)
+L(m0): ADSBC %r7, 16(vp)
+ ADSBC %r8, 24(vp)
+ la vp, 32(vp)
+ stmg %r5, %r8, 0(rp)
+ la rp, 32(rp)
+ brctg %r1, L(top)
+
+L(end): RETVAL
+ lmg %r6, %r8, 48(%r15)
+ br %r14
+EPILOGUE()
diff --git a/vendor/gmp-6.3.0/mpn/s390_64/bdiv_dbm1c.asm b/vendor/gmp-6.3.0/mpn/s390_64/bdiv_dbm1c.asm
new file mode 100644
index 0000000..35e900a
--- /dev/null
+++ b/vendor/gmp-6.3.0/mpn/s390_64/bdiv_dbm1c.asm
@@ -0,0 +1,65 @@
+dnl S/390-64 mpn_bdiv_dbm1c
+
+dnl Copyright 2011 Free Software Foundation, Inc.
+
+dnl This file is part of the GNU MP Library.
+dnl
+dnl The GNU MP Library is free software; you can redistribute it and/or modify
+dnl it under the terms of either:
+dnl
+dnl * the GNU Lesser General Public License as published by the Free
+dnl Software Foundation; either version 3 of the License, or (at your
+dnl option) any later version.
+dnl
+dnl or
+dnl
+dnl * the GNU General Public License as published by the Free Software
+dnl Foundation; either version 2 of the License, or (at your option) any
+dnl later version.
+dnl
+dnl or both in parallel, as here.
+dnl
+dnl The GNU MP Library is distributed in the hope that it will be useful, but
+dnl WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+dnl or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+dnl for more details.
+dnl
+dnl You should have received copies of the GNU General Public License and the
+dnl GNU Lesser General Public License along with the GNU MP Library. If not,
+dnl see https://www.gnu.org/licenses/.
+
+include(`../config.m4')
+
+C cycles/limb
+C z900 29
+C z990 22
+C z9 ?
+C z10 19
+C z196 ?
+
+C INPUT PARAMETERS
+define(`qp', `%r2')
+define(`up', `%r3')
+define(`n', `%r4')
+define(`bd', `%r5')
+define(`cy', `%r6')
+
+ASM_START()
+ TEXT
+ ALIGN(16)
+PROLOGUE(mpn_bdiv_dbm1c)
+ stmg %r6, %r7, 48(%r15)
+ lghi %r7, 0 C zero index register
+
+L(top): lg %r1, 0(%r7,up)
+ mlgr %r0, bd
+ slgr %r6, %r1
+ stg %r6, 0(%r7,qp)
+ la %r7, 8(%r7)
+ slbgr %r6, %r0
+ brctg n, L(top)
+
+ lgr %r2, %r6
+ lmg %r6, %r7, 48(%r15)
+ br %r14
+EPILOGUE()
diff --git a/vendor/gmp-6.3.0/mpn/s390_64/copyd.asm b/vendor/gmp-6.3.0/mpn/s390_64/copyd.asm
new file mode 100644
index 0000000..8631e19
--- /dev/null
+++ b/vendor/gmp-6.3.0/mpn/s390_64/copyd.asm
@@ -0,0 +1,144 @@
+dnl S/390-64 mpn_copyd
+
+dnl Copyright 2011 Free Software Foundation, Inc.
+
+dnl This file is part of the GNU MP Library.
+dnl
+dnl The GNU MP Library is free software; you can redistribute it and/or modify
+dnl it under the terms of either:
+dnl
+dnl * the GNU Lesser General Public License as published by the Free
+dnl Software Foundation; either version 3 of the License, or (at your
+dnl option) any later version.
+dnl
+dnl or
+dnl
+dnl * the GNU General Public License as published by the Free Software
+dnl Foundation; either version 2 of the License, or (at your option) any
+dnl later version.
+dnl
+dnl or both in parallel, as here.
+dnl
+dnl The GNU MP Library is distributed in the hope that it will be useful, but
+dnl WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+dnl or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+dnl for more details.
+dnl
+dnl You should have received copies of the GNU General Public License and the
+dnl GNU Lesser General Public License along with the GNU MP Library. If not,
+dnl see https://www.gnu.org/licenses/.
+
+
+include(`../config.m4')
+
+C cycles/limb
+C z900 2.67
+C z990 1.5
+C z9 ?
+C z10 1.8
+C z196 ?
+
+C FIXME:
+C * Avoid saving/restoring callee-saves registers for n < 3. This could be
+C done by setting rp=r1, up=r2, i=r0 and r3,r4,r5 for clock regs.
+C We could then use r3...r10 in main loop.
+C * Could we use some EX trick, modifying lmg/stmg, for the feed-in code?
+
+C INPUT PARAMETERS
+define(`rp_param', `%r2')
+define(`up_param', `%r3')
+define(`n', `%r4')
+
+define(`rp', `%r8')
+define(`up', `%r9')
+
+ASM_START()
+PROLOGUE(mpn_copyd)
+ stmg %r6, %r11, 48(%r15)
+
+ sllg %r1, n, 3
+ la %r10, 8(n)
+ aghi %r1, -64
+ srlg %r10, %r10, 3
+ lghi %r11, -64
+
+ la rp, 0(%r1,rp_param) C FIXME use lay on z990 and later
+ la up, 0(%r1,up_param) C FIXME use lay on z990 and later
+
+ lghi %r7, 7
+ ngr %r7, n C n mod 8
+ cghi %r7, 2
+ jh L(b34567)
+ cghi %r7, 1
+ je L(b1)
+ jh L(b2)
+
+L(b0): brctg %r10, L(top)
+ j L(end)
+
+L(b1): lg %r0, 56(up)
+ aghi up, -8
+ stg %r0, 56(rp)
+ aghi rp, -8
+ brctg %r10, L(top)
+ j L(end)
+
+L(b2): lmg %r0, %r1, 48(up)
+ aghi up, -16
+ stmg %r0, %r1, 48(rp)
+ aghi rp, -16
+ brctg %r10, L(top)
+ j L(end)
+
+L(b34567):
+ cghi %r7, 4
+ jl L(b3)
+ je L(b4)
+ cghi %r7, 6
+ je L(b6)
+ jh L(b7)
+
+L(b5): lmg %r0, %r4, 24(up)
+ aghi up, -40
+ stmg %r0, %r4, 24(rp)
+ aghi rp, -40
+ brctg %r10, L(top)
+ j L(end)
+
+L(b3): lmg %r0, %r2, 40(up)
+ aghi up, -24
+ stmg %r0, %r2, 40(rp)
+ aghi rp, -24
+ brctg %r10, L(top)
+ j L(end)
+
+L(b4): lmg %r0, %r3, 32(up)
+ aghi up, -32
+ stmg %r0, %r3, 32(rp)
+ aghi rp, -32
+ brctg %r10, L(top)
+ j L(end)
+
+L(b6): lmg %r0, %r5, 16(up)
+ aghi up, -48
+ stmg %r0, %r5, 16(rp)
+ aghi rp, -48
+ brctg %r10, L(top)
+ j L(end)
+
+L(b7): lmg %r0, %r6, 8(up)
+ aghi up, -56
+ stmg %r0, %r6, 8(rp)
+ aghi rp, -56
+ brctg %r10, L(top)
+ j L(end)
+
+L(top): lmg %r0, %r7, 0(up)
+ la up, 0(%r11,up)
+ stmg %r0, %r7, 0(rp)
+ la rp, 0(%r11,rp)
+ brctg %r10, L(top)
+
+L(end): lmg %r6, %r11, 48(%r15)
+ br %r14
+EPILOGUE()
diff --git a/vendor/gmp-6.3.0/mpn/s390_64/copyi.asm b/vendor/gmp-6.3.0/mpn/s390_64/copyi.asm
new file mode 100644
index 0000000..bfb8881
--- /dev/null
+++ b/vendor/gmp-6.3.0/mpn/s390_64/copyi.asm
@@ -0,0 +1,68 @@
+dnl S/390-64 mpn_copyi
+
+dnl Copyright 2011 Free Software Foundation, Inc.
+
+dnl This file is part of the GNU MP Library.
+dnl
+dnl The GNU MP Library is free software; you can redistribute it and/or modify
+dnl it under the terms of either:
+dnl
+dnl * the GNU Lesser General Public License as published by the Free
+dnl Software Foundation; either version 3 of the License, or (at your
+dnl option) any later version.
+dnl
+dnl or
+dnl
+dnl * the GNU General Public License as published by the Free Software
+dnl Foundation; either version 2 of the License, or (at your option) any
+dnl later version.
+dnl
+dnl or both in parallel, as here.
+dnl
+dnl The GNU MP Library is distributed in the hope that it will be useful, but
+dnl WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+dnl or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+dnl for more details.
+dnl
+dnl You should have received copies of the GNU General Public License and the
+dnl GNU Lesser General Public License along with the GNU MP Library. If not,
+dnl see https://www.gnu.org/licenses/.
+
+
+include(`../config.m4')
+
+C cycles/limb
+C z900 1.25
+C z990 0.75
+C z9 ?
+C z10 1
+C z196 ?
+
+C NOTE
+C * This is based on GNU libc memcpy which was written by Martin Schwidefsky.
+
+C INPUT PARAMETERS
+define(`rp', `%r2')
+define(`up', `%r3')
+define(`n', `%r4')
+
+ASM_START()
+PROLOGUE(mpn_copyi)
+ ltgr %r4, %r4
+ sllg %r4, %r4, 3
+ je L(rtn)
+ aghi %r4, -1
+ srlg %r5, %r4, 8
+ ltgr %r5, %r5 C < 256 bytes to copy?
+ je L(1)
+
+L(top): mvc 0(256, rp), 0(up)
+ la rp, 256(rp)
+ la up, 256(up)
+ brctg %r5, L(top)
+
+L(1): bras %r5, L(2) C make r5 point to mvc insn
+ mvc 0(1, rp), 0(up)
+L(2): ex %r4, 0(%r5) C execute mvc with length ((n-1) mod 256)+1
+L(rtn): br %r14
+EPILOGUE()
diff --git a/vendor/gmp-6.3.0/mpn/s390_64/gmp-mparam.h b/vendor/gmp-6.3.0/mpn/s390_64/gmp-mparam.h
new file mode 100644
index 0000000..062c3d2
--- /dev/null
+++ b/vendor/gmp-6.3.0/mpn/s390_64/gmp-mparam.h
@@ -0,0 +1,181 @@
+/* S/390-64 gmp-mparam.h -- Compiler/machine parameter header file.
+
+Copyright 1991, 1993, 1994, 2000-2011 Free Software Foundation, Inc.
+
+This file is part of the GNU MP Library.
+
+The GNU MP Library is free software; you can redistribute it and/or modify
+it under the terms of either:
+
+ * the GNU Lesser General Public License as published by the Free
+ Software Foundation; either version 3 of the License, or (at your
+ option) any later version.
+
+or
+
+ * the GNU General Public License as published by the Free Software
+ Foundation; either version 2 of the License, or (at your option) any
+ later version.
+
+or both in parallel, as here.
+
+The GNU MP Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received copies of the GNU General Public License and the
+GNU Lesser General Public License along with the GNU MP Library. If not,
+see https://www.gnu.org/licenses/. */
+
+#define GMP_LIMB_BITS 64
+#define GMP_LIMB_BYTES 8
+
+/* 4400 MHz z196 */
+/* Generated by tuneup.c, 2017-01-02, gcc 4.9 */
+
+#define DIVREM_1_NORM_THRESHOLD MP_SIZE_T_MAX /* never */
+#define DIVREM_1_UNNORM_THRESHOLD MP_SIZE_T_MAX /* never */
+#define MOD_1_1P_METHOD 2
+#define MOD_1_NORM_THRESHOLD MP_SIZE_T_MAX /* never */
+#define MOD_1_UNNORM_THRESHOLD MP_SIZE_T_MAX /* never */
+#define MOD_1N_TO_MOD_1_1_THRESHOLD 14
+#define MOD_1U_TO_MOD_1_1_THRESHOLD 15
+#define MOD_1_1_TO_MOD_1_2_THRESHOLD 0 /* never mpn_mod_1_1p */
+#define MOD_1_2_TO_MOD_1_4_THRESHOLD 31
+#define PREINV_MOD_1_TO_MOD_1_THRESHOLD 2
+#define USE_PREINV_DIVREM_1 0
+#define DIV_QR_1N_PI1_METHOD 1
+#define DIV_QR_1_NORM_THRESHOLD MP_SIZE_T_MAX /* never */
+#define DIV_QR_1_UNNORM_THRESHOLD MP_SIZE_T_MAX /* never */
+#define DIV_QR_2_PI2_THRESHOLD 10
+#define DIVEXACT_1_THRESHOLD 4
+#define BMOD_1_TO_MOD_1_THRESHOLD 0 /* always */
+
+#define DIV_1_VS_MUL_1_PERCENT 317
+
+#define MUL_TOOM22_THRESHOLD 14
+#define MUL_TOOM33_THRESHOLD 45
+#define MUL_TOOM44_THRESHOLD 121
+#define MUL_TOOM6H_THRESHOLD 177
+#define MUL_TOOM8H_THRESHOLD 260
+
+#define MUL_TOOM32_TO_TOOM43_THRESHOLD 81
+#define MUL_TOOM32_TO_TOOM53_THRESHOLD 78
+#define MUL_TOOM42_TO_TOOM53_THRESHOLD 81
+#define MUL_TOOM42_TO_TOOM63_THRESHOLD 88
+#define MUL_TOOM43_TO_TOOM54_THRESHOLD 118
+
+#define SQR_BASECASE_THRESHOLD 0 /* always (native) */
+#define SQR_TOOM2_THRESHOLD 13
+#define SQR_TOOM3_THRESHOLD 89
+#define SQR_TOOM4_THRESHOLD 242
+#define SQR_TOOM6_THRESHOLD 363
+#define SQR_TOOM8_THRESHOLD 482
+
+#define MULMID_TOOM42_THRESHOLD 38
+
+#define MULMOD_BNM1_THRESHOLD 9
+#define SQRMOD_BNM1_THRESHOLD 9
+
+#define MUL_FFT_MODF_THRESHOLD 236 /* k = 5 */
+#define MUL_FFT_TABLE3 \
+ { { 236, 5}, { 11, 6}, { 6, 5}, { 13, 6}, \
+ { 13, 7}, { 7, 6}, { 15, 7}, { 8, 6}, \
+ { 17, 7}, { 17, 8}, { 9, 7}, { 19, 8}, \
+ { 13, 9}, { 7, 8}, { 19, 9}, { 11, 8}, \
+ { 25,10}, { 7, 9}, { 15, 8}, { 33, 9}, \
+ { 19, 8}, { 39, 9}, { 23,10}, { 15, 9}, \
+ { 39,10}, { 23, 9}, { 47,11}, { 15,10}, \
+ { 31, 9}, { 67,10}, { 39, 9}, { 79,10}, \
+ { 47,11}, { 31,10}, { 63, 9}, { 127, 8}, \
+ { 255,10}, { 71, 9}, { 143, 8}, { 287, 7}, \
+ { 575, 9}, { 159,11}, { 47,12}, { 31,11}, \
+ { 63,10}, { 127, 9}, { 255, 8}, { 511,10}, \
+ { 143, 9}, { 287,11}, { 79,10}, { 159, 9}, \
+ { 319, 8}, { 639,10}, { 175, 9}, { 351, 8}, \
+ { 703,11}, { 95,10}, { 191, 9}, { 383, 8}, \
+ { 767,10}, { 207, 9}, { 415, 8}, { 831,10}, \
+ { 223,12}, { 63,11}, { 127,10}, { 255, 9}, \
+ { 511,11}, { 143,10}, { 287, 9}, { 575, 8}, \
+ { 1151,11}, { 159,10}, { 319, 9}, { 639,11}, \
+ { 175,10}, { 351, 9}, { 703, 8}, { 1407,12}, \
+ { 95,11}, { 191,10}, { 383, 9}, { 767,11}, \
+ { 207,10}, { 415, 9}, { 831,11}, { 223,13}, \
+ { 8192,14}, { 16384,15}, { 32768,16}, { 65536,17}, \
+ { 131072,18}, { 262144,19}, { 524288,20}, {1048576,21}, \
+ {2097152,22}, {4194304,23}, {8388608,24} }
+#define MUL_FFT_TABLE3_SIZE 99
+#define MUL_FFT_THRESHOLD 2240
+
+#define SQR_FFT_MODF_THRESHOLD 220 /* k = 5 */
+#define SQR_FFT_TABLE3 \
+ { { 220, 5}, { 7, 4}, { 15, 5}, { 13, 6}, \
+ { 7, 5}, { 15, 6}, { 8, 5}, { 17, 6}, \
+ { 13, 7}, { 7, 6}, { 15, 7}, { 8, 6}, \
+ { 17, 7}, { 9, 6}, { 19, 7}, { 13, 8}, \
+ { 7, 7}, { 17, 8}, { 9, 7}, { 20, 8}, \
+ { 11, 7}, { 23, 8}, { 13, 9}, { 7, 8}, \
+ { 19, 9}, { 11, 8}, { 23, 9}, { 15, 8}, \
+ { 31, 9}, { 19, 8}, { 39, 9}, { 23,10}, \
+ { 15, 9}, { 39,10}, { 23,11}, { 15,10}, \
+ { 31, 9}, { 63,10}, { 47,11}, { 31,10}, \
+ { 63, 9}, { 127, 8}, { 255,10}, { 71, 9}, \
+ { 143, 8}, { 287,11}, { 47,12}, { 31,11}, \
+ { 63,10}, { 127, 9}, { 255, 8}, { 511,10}, \
+ { 143, 9}, { 287, 8}, { 575, 7}, { 1151,10}, \
+ { 159, 9}, { 319, 8}, { 639,10}, { 175, 9}, \
+ { 351, 8}, { 703,11}, { 95,10}, { 191, 9}, \
+ { 383,12}, { 63,11}, { 127,10}, { 255, 9}, \
+ { 511,11}, { 143,10}, { 287, 9}, { 575, 8}, \
+ { 1151,11}, { 159,10}, { 319, 9}, { 639,11}, \
+ { 175,10}, { 351, 9}, { 703,12}, { 95,11}, \
+ { 191,10}, { 383,11}, { 207,13}, { 8192,14}, \
+ { 16384,15}, { 32768,16}, { 65536,17}, { 131072,18}, \
+ { 262144,19}, { 524288,20}, {1048576,21}, {2097152,22}, \
+ {4194304,23}, {8388608,24} }
+#define SQR_FFT_TABLE3_SIZE 94
+#define SQR_FFT_THRESHOLD 1728
+
+#define MULLO_BASECASE_THRESHOLD 0 /* always */
+#define MULLO_DC_THRESHOLD 38
+#define MULLO_MUL_N_THRESHOLD 4392
+#define SQRLO_BASECASE_THRESHOLD 0 /* always */
+#define SQRLO_DC_THRESHOLD 54
+#define SQRLO_SQR_THRESHOLD 3176
+
+#define DC_DIV_QR_THRESHOLD 42
+#define DC_DIVAPPR_Q_THRESHOLD 148
+#define DC_BDIV_QR_THRESHOLD 46
+#define DC_BDIV_Q_THRESHOLD 107
+
+#define INV_MULMOD_BNM1_THRESHOLD 34
+#define INV_NEWTON_THRESHOLD 163
+#define INV_APPR_THRESHOLD 131
+
+#define BINV_NEWTON_THRESHOLD 183
+#define REDC_1_TO_REDC_N_THRESHOLD 43
+
+#define MU_DIV_QR_THRESHOLD 807
+#define MU_DIVAPPR_Q_THRESHOLD 942
+#define MUPI_DIV_QR_THRESHOLD 78
+#define MU_BDIV_QR_THRESHOLD 680
+#define MU_BDIV_Q_THRESHOLD 828
+
+#define POWM_SEC_TABLE 3,35,285,1603
+
+#define GET_STR_DC_THRESHOLD 12
+#define GET_STR_PRECOMPUTE_THRESHOLD 21
+#define SET_STR_DC_THRESHOLD 1391
+#define SET_STR_PRECOMPUTE_THRESHOLD 2872
+
+#define FAC_DSC_THRESHOLD 151
+#define FAC_ODD_THRESHOLD 23
+
+#define MATRIX22_STRASSEN_THRESHOLD 15
+#define HGCD_THRESHOLD 135
+#define HGCD_APPR_THRESHOLD 169
+#define HGCD_REDUCE_THRESHOLD 1437
+#define GCD_DC_THRESHOLD 469
+#define GCDEXT_DC_THRESHOLD 342
+#define JACOBI_BASE_METHOD 4
diff --git a/vendor/gmp-6.3.0/mpn/s390_64/invert_limb.asm b/vendor/gmp-6.3.0/mpn/s390_64/invert_limb.asm
new file mode 100644
index 0000000..edcebdd
--- /dev/null
+++ b/vendor/gmp-6.3.0/mpn/s390_64/invert_limb.asm
@@ -0,0 +1,94 @@
+dnl S/390-64 mpn_invert_limb
+
+dnl Contributed to the GNU project by Torbjorn Granlund.
+
+dnl Copyright 2011, 2013 Free Software Foundation, Inc.
+
+dnl This file is part of the GNU MP Library.
+dnl
+dnl The GNU MP Library is free software; you can redistribute it and/or modify
+dnl it under the terms of either:
+dnl
+dnl * the GNU Lesser General Public License as published by the Free
+dnl Software Foundation; either version 3 of the License, or (at your
+dnl option) any later version.
+dnl
+dnl or
+dnl
+dnl * the GNU General Public License as published by the Free Software
+dnl Foundation; either version 2 of the License, or (at your option) any
+dnl later version.
+dnl
+dnl or both in parallel, as here.
+dnl
+dnl The GNU MP Library is distributed in the hope that it will be useful, but
+dnl WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+dnl or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+dnl for more details.
+dnl
+dnl You should have received copies of the GNU General Public License and the
+dnl GNU Lesser General Public License along with the GNU MP Library. If not,
+dnl see https://www.gnu.org/licenses/.
+
+include(`../config.m4')
+
+C cycles/limb
+C z900 142
+C z990 86
+C z9 ?
+C z10 120
+C z196 ?
+
+ASM_START()
+ TEXT
+ ALIGN(16)
+PROLOGUE(mpn_invert_limb)
+ stg %r9, 72(%r15)
+ srlg %r9, %r2, 55
+ agr %r9, %r9
+ larl %r4, approx_tab-512
+ srlg %r3, %r2, 24
+ aghi %r3, 1
+ lghi %r5, 1
+ llgh %r4, 0(%r9, %r4)
+ sllg %r9, %r4, 11
+ msgr %r4, %r4
+ msgr %r4, %r3
+ srlg %r4, %r4, 40
+ aghi %r9, -1
+ sgr %r9, %r4
+ sllg %r0, %r9, 60
+ sllg %r1, %r9, 13
+ msgr %r9, %r9
+ msgr %r9, %r3
+ sgr %r0, %r9
+ ngr %r5, %r2
+ srlg %r4, %r2, 1
+ srlg %r3, %r0, 47
+ agr %r3, %r1
+ agr %r4, %r5
+ msgr %r4, %r3
+ srlg %r1, %r3, 1
+ lcgr %r5, %r5
+ ngr %r1, %r5
+ sgr %r1, %r4
+ mlgr %r0, %r3
+ srlg %r9, %r0, 1
+ sllg %r4, %r3, 31
+ agr %r4, %r9
+ lgr %r1, %r4
+ mlgr %r0, %r2
+ algr %r1, %r2
+ alcgr %r0, %r2
+ lgr %r2, %r4
+ sgr %r2, %r0
+ lg %r9, 72(%r15)
+ br %r14
+EPILOGUE()
+ RODATA
+ ALIGN(2)
+approx_tab:
+forloop(i,256,512-1,dnl
+` .word eval(0x7fd00/i)
+')dnl
+ASM_END()
diff --git a/vendor/gmp-6.3.0/mpn/s390_64/logops_n.asm b/vendor/gmp-6.3.0/mpn/s390_64/logops_n.asm
new file mode 100644
index 0000000..914cfb6
--- /dev/null
+++ b/vendor/gmp-6.3.0/mpn/s390_64/logops_n.asm
@@ -0,0 +1,291 @@
+dnl S/390-64 logops.
+
+dnl Copyright 2011 Free Software Foundation, Inc.
+
+dnl This file is part of the GNU MP Library.
+dnl
+dnl The GNU MP Library is free software; you can redistribute it and/or modify
+dnl it under the terms of either:
+dnl
+dnl * the GNU Lesser General Public License as published by the Free
+dnl Software Foundation; either version 3 of the License, or (at your
+dnl option) any later version.
+dnl
+dnl or
+dnl
+dnl * the GNU General Public License as published by the Free Software
+dnl Foundation; either version 2 of the License, or (at your option) any
+dnl later version.
+dnl
+dnl or both in parallel, as here.
+dnl
+dnl The GNU MP Library is distributed in the hope that it will be useful, but
+dnl WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+dnl or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+dnl for more details.
+dnl
+dnl You should have received copies of the GNU General Public License and the
+dnl GNU Lesser General Public License along with the GNU MP Library. If not,
+dnl see https://www.gnu.org/licenses/.
+
+include(`../config.m4')
+
+C cycles/limb variant 1 variant 2 variant 3
+C rp!=up rp=up
+C z900 4.5 2.25 5.5 5.5
+C z990 2.75 2 3.25 3.25
+C z9 ? ? ?
+C z10 3.25 3.75 3.75
+C z196 ? ? ?
+
+C INPUT PARAMETERS
+define(`rp', `%r2')
+define(`up', `%r3')
+define(`vp', `%r4')
+define(`n', `%r5')
+
+ifdef(`OPERATION_and_n',`
+ define(`func',`mpn_and_n')
+ define(`VARIANT_1')
+ define(`LOGOPC',`nc')
+ define(`LOGOP',`ng')')
+ifdef(`OPERATION_andn_n',`
+ define(`func',`mpn_andn_n')
+ define(`VARIANT_2')
+ define(`LOGOP',`ng')')
+ifdef(`OPERATION_nand_n',`
+ define(`func',`mpn_nand_n')
+ define(`VARIANT_3')
+ define(`LOGOP',`ng')')
+ifdef(`OPERATION_ior_n',`
+ define(`func',`mpn_ior_n')
+ define(`VARIANT_1')
+ define(`LOGOPC',`oc')
+ define(`LOGOP',`og')')
+ifdef(`OPERATION_iorn_n',`
+ define(`func',`mpn_iorn_n')
+ define(`VARIANT_2')
+ define(`LOGOP',`og')')
+ifdef(`OPERATION_nior_n',`
+ define(`func',`mpn_nior_n')
+ define(`VARIANT_3')
+ define(`LOGOP',`og')')
+ifdef(`OPERATION_xor_n',`
+ define(`func',`mpn_xor_n')
+ define(`VARIANT_1')
+ define(`LOGOPC',`xc')
+ define(`LOGOP',`xg')')
+ifdef(`OPERATION_xnor_n',`
+ define(`func',`mpn_xnor_n')
+ define(`VARIANT_2')
+ define(`LOGOP',`xg')')
+
+MULFUNC_PROLOGUE(mpn_and_n mpn_andn_n mpn_nand_n mpn_ior_n mpn_iorn_n mpn_nior_n mpn_xor_n mpn_xnor_n)
+
+ASM_START()
+PROLOGUE(func)
+ifdef(`VARIANT_1',`
+ cgr rp, up
+ jne L(normal)
+
+ sllg n, n, 3
+ aghi n, -1
+ srlg %r1, n, 8
+ ltgr %r1, %r1 C < 256 bytes to copy?
+ je L(1)
+
+L(tp): LOGOPC 0(256, rp), 0(vp)
+ la rp, 256(rp)
+ la vp, 256(vp)
+ brctg %r1, L(tp)
+
+L(1): bras %r1, L(2) C make r1 point to mvc insn
+ LOGOPC 0(1, rp), 0(vp)
+L(2): ex n, 0(%r1) C execute mvc with length ((n-1) mod 256)+1
+L(rtn): br %r14
+
+
+L(normal):
+ stmg %r6, %r8, 48(%r15)
+ aghi n, 3
+ lghi %r7, 3
+ srlg %r0, n, 2
+ ngr %r7, n C n mod 4
+ je L(b1)
+ cghi %r7, 2
+ jl L(b2)
+ jne L(top)
+
+L(b3): lmg %r5, %r7, 0(up)
+ la up, 24(up)
+ LOGOP %r5, 0(vp)
+ LOGOP %r6, 8(vp)
+ LOGOP %r7, 16(vp)
+ stmg %r5, %r7, 0(rp)
+ la rp, 24(rp)
+ la vp, 24(vp)
+ j L(mid)
+
+L(b1): lg %r5, 0(up)
+ la up, 8(up)
+ LOGOP %r5, 0(vp)
+ stg %r5, 0(rp)
+ la rp, 8(rp)
+ la vp, 8(vp)
+ j L(mid)
+
+L(b2): lmg %r5, %r6, 0(up)
+ la up, 16(up)
+ LOGOP %r5, 0(vp)
+ LOGOP %r6, 8(vp)
+ stmg %r5, %r6, 0(rp)
+ la rp, 16(rp)
+ la vp, 16(vp)
+ j L(mid)
+
+L(top): lmg %r5, %r8, 0(up)
+ la up, 32(up)
+ LOGOP %r5, 0(vp)
+ LOGOP %r6, 8(vp)
+ LOGOP %r7, 16(vp)
+ LOGOP %r8, 24(vp)
+ stmg %r5, %r8, 0(rp)
+ la rp, 32(rp)
+ la vp, 32(vp)
+L(mid): brctg %r0, L(top)
+
+ lmg %r6, %r8, 48(%r15)
+ br %r14
+')
+
+ifdef(`VARIANT_2',`
+ stmg %r6, %r8, 48(%r15)
+ lghi %r1, -1
+
+ aghi n, 3
+ lghi %r7, 3
+ srlg %r0, n, 2
+ ngr %r7, n C n mod 4
+ je L(b1)
+ cghi %r7, 2
+ jl L(b2)
+ jne L(top)
+
+L(b3): lmg %r5, %r7, 0(vp)
+ la vp, 24(vp)
+ xgr %r5, %r1
+ xgr %r6, %r1
+ xgr %r7, %r1
+ LOGOP %r5, 0(up)
+ LOGOP %r6, 8(up)
+ LOGOP %r7, 16(up)
+ stmg %r5, %r7, 0(rp)
+ la rp, 24(rp)
+ la up, 24(up)
+ j L(mid)
+
+L(b1): lg %r5, 0(vp)
+ la vp, 8(vp)
+ xgr %r5, %r1
+ LOGOP %r5, 0(up)
+ stg %r5, 0(rp)
+ la rp, 8(rp)
+ la up, 8(up)
+ j L(mid)
+
+L(b2): lmg %r5, %r6, 0(vp)
+ la vp, 16(vp)
+ xgr %r5, %r1
+ xgr %r6, %r1
+ LOGOP %r5, 0(up)
+ LOGOP %r6, 8(up)
+ stmg %r5, %r6, 0(rp)
+ la rp, 16(rp)
+ la up, 16(up)
+ j L(mid)
+
+L(top): lmg %r5, %r8, 0(vp)
+ la vp, 32(vp)
+ xgr %r5, %r1
+ xgr %r6, %r1
+ xgr %r7, %r1
+ xgr %r8, %r1
+ LOGOP %r5, 0(up)
+ LOGOP %r6, 8(up)
+ LOGOP %r7, 16(up)
+ LOGOP %r8, 24(up)
+ la up, 32(up)
+ stmg %r5, %r8, 0(rp)
+ la rp, 32(rp)
+L(mid): brctg %r0, L(top)
+
+ lmg %r6, %r8, 48(%r15)
+ br %r14
+')
+
+ifdef(`VARIANT_3',`
+ stmg %r6, %r8, 48(%r15)
+ lghi %r1, -1
+
+ aghi n, 3
+ lghi %r7, 3
+ srlg %r0, n, 2
+ ngr %r7, n C n mod 4
+ je L(b1)
+ cghi %r7, 2
+ jl L(b2)
+ jne L(top)
+
+L(b3): lmg %r5, %r7, 0(vp)
+ la vp, 24(vp)
+ LOGOP %r5, 0(up)
+ LOGOP %r6, 8(up)
+ xgr %r5, %r1
+ xgr %r6, %r1
+ LOGOP %r7, 16(up)
+ xgr %r7, %r1
+ stmg %r5, %r7, 0(rp)
+ la rp, 24(rp)
+ la up, 24(up)
+ j L(mid)
+
+L(b1): lg %r5, 0(vp)
+ la vp, 8(vp)
+ LOGOP %r5, 0(up)
+ xgr %r5, %r1
+ stg %r5, 0(rp)
+ la rp, 8(rp)
+ la up, 8(up)
+ j L(mid)
+
+L(b2): lmg %r5, %r6, 0(vp)
+ la vp, 16(vp)
+ LOGOP %r5, 0(up)
+ LOGOP %r6, 8(up)
+ xgr %r5, %r1
+ xgr %r6, %r1
+ stmg %r5, %r6, 0(rp)
+ la rp, 16(rp)
+ la up, 16(up)
+ j L(mid)
+
+L(top): lmg %r5, %r8, 0(vp)
+ la vp, 32(vp)
+ LOGOP %r5, 0(up)
+ LOGOP %r6, 8(up)
+ xgr %r5, %r1
+ xgr %r6, %r1
+ LOGOP %r7, 16(up)
+ LOGOP %r8, 24(up)
+ xgr %r7, %r1
+ xgr %r8, %r1
+ stmg %r5, %r8, 0(rp)
+ la up, 32(up)
+ la rp, 32(rp)
+L(mid): brctg %r0, L(top)
+
+ lmg %r6, %r8, 48(%r15)
+ br %r14
+')
+
+EPILOGUE()
diff --git a/vendor/gmp-6.3.0/mpn/s390_64/lshift.asm b/vendor/gmp-6.3.0/mpn/s390_64/lshift.asm
new file mode 100644
index 0000000..4dae035
--- /dev/null
+++ b/vendor/gmp-6.3.0/mpn/s390_64/lshift.asm
@@ -0,0 +1,196 @@
+dnl S/390-64 mpn_lshift.
+
+dnl Copyright 2011, 2012, 2014 Free Software Foundation, Inc.
+
+dnl This file is part of the GNU MP Library.
+dnl
+dnl The GNU MP Library is free software; you can redistribute it and/or modify
+dnl it under the terms of either:
+dnl
+dnl * the GNU Lesser General Public License as published by the Free
+dnl Software Foundation; either version 3 of the License, or (at your
+dnl option) any later version.
+dnl
+dnl or
+dnl
+dnl * the GNU General Public License as published by the Free Software
+dnl Foundation; either version 2 of the License, or (at your option) any
+dnl later version.
+dnl
+dnl or both in parallel, as here.
+dnl
+dnl The GNU MP Library is distributed in the hope that it will be useful, but
+dnl WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+dnl or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+dnl for more details.
+dnl
+dnl You should have received copies of the GNU General Public License and the
+dnl GNU Lesser General Public License along with the GNU MP Library. If not,
+dnl see https://www.gnu.org/licenses/.
+
+include(`../config.m4')
+
+C cycles/limb
+C z900 7
+C z990 3
+C z9 ?
+C z10 6
+C z196 ?
+
+C NOTES
+C * This uses discrete loads and stores in a software pipeline. Using lmg and
+C stmg is not faster.
+C * One could assume more pipelining could approach 2.5 c/l, but we have not
+C found any 8-way loop that runs better than the current 4-way loop.
+C * Consider using the same feed-in code for 1 <= n <= 3 as for n mod 4,
+C similarly to the x86_64 sqr_basecase feed-in.
+
+C INPUT PARAMETERS
+define(`rp', `%r2')
+define(`up', `%r3')
+define(`n', `%r4')
+define(`cnt', `%r5')
+
+define(`tnc', `%r6')
+
+ASM_START()
+PROLOGUE(mpn_lshift)
+ cghi n, 3
+ jh L(gt1)
+
+ stmg %r6, %r7, 48(%r15)
+ larl %r1, L(tab)-4
+ lcgr tnc, cnt
+ sllg n, n, 2
+ b 0(n,%r1)
+L(tab): j L(n1)
+ j L(n2)
+ j L(n3)
+
+L(n1): lg %r1, 0(up)
+ sllg %r0, %r1, 0(cnt)
+ stg %r0, 0(rp)
+ srlg %r2, %r1, 0(tnc)
+ lg %r6, 48(%r15) C restoring r7 not needed
+ br %r14
+
+L(n2): lg %r1, 8(up)
+ srlg %r4, %r1, 0(tnc)
+ sllg %r0, %r1, 0(cnt)
+ j L(cj)
+
+L(n3): lg %r1, 16(up)
+ srlg %r4, %r1, 0(tnc)
+ sllg %r0, %r1, 0(cnt)
+ lg %r1, 8(up)
+ srlg %r7, %r1, 0(tnc)
+ ogr %r7, %r0
+ sllg %r0, %r1, 0(cnt)
+ stg %r7, 16(rp)
+L(cj): lg %r1, 0(up)
+ srlg %r7, %r1, 0(tnc)
+ ogr %r7, %r0
+ sllg %r0, %r1, 0(cnt)
+ stg %r7, 8(rp)
+ stg %r0, 0(rp)
+ lgr %r2, %r4
+ lmg %r6, %r7, 48(%r15)
+ br %r14
+
+L(gt1): stmg %r6, %r13, 48(%r15)
+ lcgr tnc, cnt C tnc = -cnt
+
+ sllg %r1, n, 3
+ srlg %r0, n, 2 C loop count
+
+ agr up, %r1 C point up at end of U
+ agr rp, %r1 C point rp at end of R
+ aghi up, -56
+ aghi rp, -40
+
+ lghi %r7, 3
+ ngr %r7, n
+ je L(b0)
+ cghi %r7, 2
+ jl L(b1)
+ je L(b2)
+
+L(b3): lg %r7, 48(up)
+ srlg %r9, %r7, 0(tnc)
+ sllg %r11, %r7, 0(cnt)
+ lg %r8, 40(up)
+ lg %r7, 32(up)
+ srlg %r4, %r8, 0(tnc)
+ sllg %r13, %r8, 0(cnt)
+ ogr %r11, %r4
+ la rp, 16(rp)
+ j L(lm3)
+
+L(b2): lg %r8, 48(up)
+ lg %r7, 40(up)
+ srlg %r9, %r8, 0(tnc)
+ sllg %r13, %r8, 0(cnt)
+ la rp, 24(rp)
+ la up, 8(up)
+ j L(lm2)
+
+L(b1): lg %r7, 48(up)
+ srlg %r9, %r7, 0(tnc)
+ sllg %r11, %r7, 0(cnt)
+ lg %r8, 40(up)
+ lg %r7, 32(up)
+ srlg %r4, %r8, 0(tnc)
+ sllg %r10, %r8, 0(cnt)
+ ogr %r11, %r4
+ la rp, 32(rp)
+ la up, 16(up)
+ j L(lm1)
+
+L(b0): lg %r8, 48(up)
+ lg %r7, 40(up)
+ srlg %r9, %r8, 0(tnc)
+ sllg %r10, %r8, 0(cnt)
+ la rp, 40(rp)
+ la up, 24(up)
+ j L(lm0)
+
+ ALIGN(8)
+L(top): srlg %r4, %r8, 0(tnc)
+ sllg %r13, %r8, 0(cnt)
+ ogr %r11, %r4
+ stg %r10, 24(rp)
+L(lm3): stg %r11, 16(rp)
+L(lm2): srlg %r12, %r7, 0(tnc)
+ sllg %r11, %r7, 0(cnt)
+ lg %r8, 24(up)
+ lg %r7, 16(up)
+ ogr %r13, %r12
+ srlg %r4, %r8, 0(tnc)
+ sllg %r10, %r8, 0(cnt)
+ ogr %r11, %r4
+ stg %r13, 8(rp)
+L(lm1): stg %r11, 0(rp)
+L(lm0): srlg %r12, %r7, 0(tnc)
+ aghi rp, -32
+ sllg %r11, %r7, 0(cnt)
+ lg %r8, 8(up)
+ lg %r7, 0(up)
+ aghi up, -32
+ ogr %r10, %r12
+ brctg %r0, L(top)
+
+L(end): srlg %r4, %r8, 0(tnc)
+ sllg %r13, %r8, 0(cnt)
+ ogr %r11, %r4
+ stg %r10, 24(rp)
+ stg %r11, 16(rp)
+ srlg %r12, %r7, 0(tnc)
+ sllg %r11, %r7, 0(cnt)
+ ogr %r13, %r12
+ stg %r13, 8(rp)
+ stg %r11, 0(rp)
+ lgr %r2, %r9
+
+ lmg %r6, %r13, 48(%r15)
+ br %r14
+EPILOGUE()
diff --git a/vendor/gmp-6.3.0/mpn/s390_64/lshiftc.asm b/vendor/gmp-6.3.0/mpn/s390_64/lshiftc.asm
new file mode 100644
index 0000000..92552d5
--- /dev/null
+++ b/vendor/gmp-6.3.0/mpn/s390_64/lshiftc.asm
@@ -0,0 +1,207 @@
+dnl S/390-64 mpn_lshiftc.
+
+dnl Copyright 2011, 2014 Free Software Foundation, Inc.
+
+dnl This file is part of the GNU MP Library.
+dnl
+dnl The GNU MP Library is free software; you can redistribute it and/or modify
+dnl it under the terms of either:
+dnl
+dnl * the GNU Lesser General Public License as published by the Free
+dnl Software Foundation; either version 3 of the License, or (at your
+dnl option) any later version.
+dnl
+dnl or
+dnl
+dnl * the GNU General Public License as published by the Free Software
+dnl Foundation; either version 2 of the License, or (at your option) any
+dnl later version.
+dnl
+dnl or both in parallel, as here.
+dnl
+dnl The GNU MP Library is distributed in the hope that it will be useful, but
+dnl WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+dnl or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+dnl for more details.
+dnl
+dnl You should have received copies of the GNU General Public License and the
+dnl GNU Lesser General Public License along with the GNU MP Library. If not,
+dnl see https://www.gnu.org/licenses/.
+
+include(`../config.m4')
+
+C cycles/limb
+C z900 9
+C z990 3.5
+C z9 ?
+C z10 7
+C z196 ?
+
+C NOTES
+C * See notes in lshift.asm.
+
+C INPUT PARAMETERS
+define(`rp', `%r2')
+define(`up', `%r3')
+define(`n', `%r4')
+define(`cnt', `%r5')
+
+define(`tnc', `%r6')
+
+ASM_START()
+PROLOGUE(mpn_lshiftc)
+ cghi n, 3
+ jh L(gt1)
+
+ stmg %r6, %r8, 48(%r15)
+ larl %r1, L(tab)-4
+ lcgr tnc, cnt
+ sllg n, n, 2
+ lghi %r8, -1
+ b 0(n,%r1)
+L(tab): j L(n1)
+ j L(n2)
+ j L(n3)
+
+L(n1): lg %r1, 0(up)
+ sllg %r0, %r1, 0(cnt)
+ xgr %r0, %r8
+ stg %r0, 0(rp)
+ srlg %r2, %r1, 0(tnc)
+ lmg %r6, %r8, 48(%r15)
+ br %r14
+
+L(n2): lg %r1, 8(up)
+ srlg %r4, %r1, 0(tnc)
+ sllg %r0, %r1, 0(cnt)
+ j L(cj)
+
+L(n3): lg %r1, 16(up)
+ srlg %r4, %r1, 0(tnc)
+ sllg %r0, %r1, 0(cnt)
+ lg %r1, 8(up)
+ srlg %r7, %r1, 0(tnc)
+ ogr %r7, %r0
+ sllg %r0, %r1, 0(cnt)
+ xgr %r7, %r8
+ stg %r7, 16(rp)
+L(cj): lg %r1, 0(up)
+ srlg %r7, %r1, 0(tnc)
+ ogr %r7, %r0
+ sllg %r0, %r1, 0(cnt)
+ xgr %r7, %r8
+ xgr %r0, %r8
+ stg %r7, 8(rp)
+ stg %r0, 0(rp)
+ lgr %r2, %r4
+ lmg %r6, %r8, 48(%r15)
+ br %r14
+
+L(gt1): stmg %r6, %r14, 48(%r15)
+ lcgr tnc, cnt C tnc = -cnt
+
+ sllg %r1, n, 3
+ srlg %r0, n, 2 C loop count
+
+ agr up, %r1 C point up at end of U
+ agr rp, %r1 C point rp at end of R
+ aghi up, -56
+ aghi rp, -40
+
+ lghi %r7, 3
+ lghi %r14, -1
+ ngr %r7, n
+ je L(b0)
+ cghi %r7, 2
+ jl L(b1)
+ je L(b2)
+
+L(b3): lg %r7, 48(up)
+ srlg %r9, %r7, 0(tnc)
+ sllg %r11, %r7, 0(cnt)
+ lg %r8, 40(up)
+ lg %r7, 32(up)
+ srlg %r4, %r8, 0(tnc)
+ sllg %r13, %r8, 0(cnt)
+ ogr %r11, %r4
+ la rp, 16(rp)
+ xgr %r11, %r14
+ j L(lm3)
+
+L(b2): lg %r8, 48(up)
+ lg %r7, 40(up)
+ srlg %r9, %r8, 0(tnc)
+ sllg %r13, %r8, 0(cnt)
+ la rp, 24(rp)
+ la up, 8(up)
+ j L(lm2)
+
+L(b1): lg %r7, 48(up)
+ srlg %r9, %r7, 0(tnc)
+ sllg %r11, %r7, 0(cnt)
+ lg %r8, 40(up)
+ lg %r7, 32(up)
+ srlg %r4, %r8, 0(tnc)
+ sllg %r10, %r8, 0(cnt)
+ ogr %r11, %r4
+ la rp, 32(rp)
+ la up, 16(up)
+ xgr %r11, %r14
+ j L(lm1)
+
+L(b0): lg %r8, 48(up)
+ lg %r7, 40(up)
+ srlg %r9, %r8, 0(tnc)
+ sllg %r10, %r8, 0(cnt)
+ la rp, 40(rp)
+ la up, 24(up)
+ j L(lm0)
+
+ ALIGN(8)
+L(top): srlg %r4, %r8, 0(tnc)
+ sllg %r13, %r8, 0(cnt)
+ ogr %r11, %r4
+ xgr %r10, %r14
+ xgr %r11, %r14
+ stg %r10, 24(rp)
+L(lm3): stg %r11, 16(rp)
+L(lm2): srlg %r12, %r7, 0(tnc)
+ sllg %r11, %r7, 0(cnt)
+ lg %r8, 24(up)
+ lg %r7, 16(up)
+ ogr %r13, %r12
+ srlg %r4, %r8, 0(tnc)
+ sllg %r10, %r8, 0(cnt)
+ ogr %r11, %r4
+ xgr %r13, %r14
+ xgr %r11, %r14
+ stg %r13, 8(rp)
+L(lm1): stg %r11, 0(rp)
+L(lm0): srlg %r12, %r7, 0(tnc)
+ aghi rp, -32
+ sllg %r11, %r7, 0(cnt)
+ lg %r8, 8(up)
+ lg %r7, 0(up)
+ aghi up, -32
+ ogr %r10, %r12
+ brctg %r0, L(top)
+
+L(end): srlg %r4, %r8, 0(tnc)
+ sllg %r13, %r8, 0(cnt)
+ ogr %r11, %r4
+ xgr %r10, %r14
+ xgr %r11, %r14
+ stg %r10, 24(rp)
+ stg %r11, 16(rp)
+ srlg %r12, %r7, 0(tnc)
+ sllg %r11, %r7, 0(cnt)
+ ogr %r13, %r12
+ xgr %r13, %r14
+ xgr %r11, %r14
+ stg %r13, 8(rp)
+ stg %r11, 0(rp)
+ lgr %r2, %r9
+
+ lmg %r6, %r14, 48(%r15)
+ br %r14
+EPILOGUE()
diff --git a/vendor/gmp-6.3.0/mpn/s390_64/mod_34lsub1.asm b/vendor/gmp-6.3.0/mpn/s390_64/mod_34lsub1.asm
new file mode 100644
index 0000000..fd40011
--- /dev/null
+++ b/vendor/gmp-6.3.0/mpn/s390_64/mod_34lsub1.asm
@@ -0,0 +1,109 @@
+dnl S/390-64 mpn_mod_34lsub1
+
+dnl Copyright 2011 Free Software Foundation, Inc.
+
+dnl This file is part of the GNU MP Library.
+dnl
+dnl The GNU MP Library is free software; you can redistribute it and/or modify
+dnl it under the terms of either:
+dnl
+dnl * the GNU Lesser General Public License as published by the Free
+dnl Software Foundation; either version 3 of the License, or (at your
+dnl option) any later version.
+dnl
+dnl or
+dnl
+dnl * the GNU General Public License as published by the Free Software
+dnl Foundation; either version 2 of the License, or (at your option) any
+dnl later version.
+dnl
+dnl or both in parallel, as here.
+dnl
+dnl The GNU MP Library is distributed in the hope that it will be useful, but
+dnl WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+dnl or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+dnl for more details.
+dnl
+dnl You should have received copies of the GNU General Public License and the
+dnl GNU Lesser General Public License along with the GNU MP Library. If not,
+dnl see https://www.gnu.org/licenses/.
+
+include(`../config.m4')
+
+C cycles/limb
+C z900 5.8
+C z990 2
+C z9 ?
+C z10 4.5
+C z196 ?
+
+C TODO
+C * Optimise summation code, see x86_64.
+
+C INPUT PARAMETERS
+define(`rp', `%r2')
+define(`n', `%r3')
+
+ASM_START()
+PROLOGUE(mpn_mod_34lsub1)
+ stmg %r7, %r12, 56(%r15)
+ lghi %r11, 0
+ lghi %r12, 0
+ lghi %r0, 0
+ lghi %r8, 0
+ lghi %r9, 0
+ lghi %r10, 0
+ lghi %r7, 0
+ aghi %r3, -3
+ jl .L3
+
+L(top): alg %r0, 0(%r2)
+ alcg %r12, 8(%r2)
+ alcg %r11, 16(%r2)
+ alcgr %r8, %r7
+ la %r2, 24(%r2)
+ aghi %r3, -3
+ jnl L(top)
+
+ lgr %r7, %r8
+ srlg %r1, %r11, 16
+ nihh %r7, 0 C 0xffffffffffff
+ agr %r7, %r1
+ srlg %r8, %r8, 48
+ agr %r7, %r8
+ sllg %r11, %r11, 32
+ nihh %r11, 0
+ agr %r7, %r11
+.L3:
+ cghi %r3, -3
+ je .L6
+ alg %r0, 0(%r2)
+ alcgr %r10, %r10
+ cghi %r3, -2
+ je .L6
+ alg %r12, 8(%r2)
+ alcgr %r9, %r9
+.L6:
+ srlg %r1, %r0, 48
+ nihh %r0, 0 C 0xffffffffffff
+ agr %r0, %r1
+ agr %r0, %r7
+ srlg %r1, %r12, 32
+ agr %r0, %r1
+ srlg %r1, %r10, 32
+ agr %r0, %r1
+ llgfr %r12, %r12
+ srlg %r1, %r9, 16
+ sllg %r12, %r12, 16
+ llgfr %r10, %r10
+ agr %r0, %r1
+ llill %r2, 65535
+ agr %r0, %r12
+ sllg %r10, %r10, 16
+ ngr %r2, %r9
+ agr %r0, %r10
+ sllg %r2, %r2, 32
+ agr %r2, %r0
+ lmg %r7, %r12, 56(%r15)
+ br %r14
+EPILOGUE()
diff --git a/vendor/gmp-6.3.0/mpn/s390_64/mul_1.asm b/vendor/gmp-6.3.0/mpn/s390_64/mul_1.asm
new file mode 100644
index 0000000..a8f6da9
--- /dev/null
+++ b/vendor/gmp-6.3.0/mpn/s390_64/mul_1.asm
@@ -0,0 +1,66 @@
+dnl S/390-64 mpn_mul_1
+
+dnl Copyright 2011 Free Software Foundation, Inc.
+
+dnl This file is part of the GNU MP Library.
+dnl
+dnl The GNU MP Library is free software; you can redistribute it and/or modify
+dnl it under the terms of either:
+dnl
+dnl * the GNU Lesser General Public License as published by the Free
+dnl Software Foundation; either version 3 of the License, or (at your
+dnl option) any later version.
+dnl
+dnl or
+dnl
+dnl * the GNU General Public License as published by the Free Software
+dnl Foundation; either version 2 of the License, or (at your option) any
+dnl later version.
+dnl
+dnl or both in parallel, as here.
+dnl
+dnl The GNU MP Library is distributed in the hope that it will be useful, but
+dnl WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+dnl or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+dnl for more details.
+dnl
+dnl You should have received copies of the GNU General Public License and the
+dnl GNU Lesser General Public License along with the GNU MP Library. If not,
+dnl see https://www.gnu.org/licenses/.
+
+include(`../config.m4')
+
+C cycles/limb
+C z900 29
+C z990 22
+C z9 ?
+C z10 20
+C z196 ?
+
+C INPUT PARAMETERS
+define(`rp', `%r2')
+define(`up', `%r3')
+define(`n', `%r4')
+define(`v0', `%r5')
+
+ASM_START()
+PROLOGUE(mpn_mul_1)
+ stmg %r11, %r12, 88(%r15)
+ lghi %r12, 0 C zero index register
+ aghi %r12, 0 C clear carry flag
+ lghi %r11, 0 C clear carry limb
+
+L(top): lg %r1, 0(%r12,up)
+ mlgr %r0, v0
+ alcgr %r1, %r11
+ lgr %r11, %r0 C copy high part to carry limb
+ stg %r1, 0(%r12,rp)
+ la %r12, 8(%r12)
+ brctg n, L(top)
+
+ lghi %r2, 0
+ alcgr %r2, %r11
+
+ lmg %r11, %r12, 88(%r15)
+ br %r14
+EPILOGUE()
diff --git a/vendor/gmp-6.3.0/mpn/s390_64/mul_basecase.asm b/vendor/gmp-6.3.0/mpn/s390_64/mul_basecase.asm
new file mode 100644
index 0000000..7d14ea9
--- /dev/null
+++ b/vendor/gmp-6.3.0/mpn/s390_64/mul_basecase.asm
@@ -0,0 +1,130 @@
+dnl S/390-64 mpn_mul_basecase.
+
+dnl Copyright 2011 Free Software Foundation, Inc.
+
+dnl This file is part of the GNU MP Library.
+dnl
+dnl The GNU MP Library is free software; you can redistribute it and/or modify
+dnl it under the terms of either:
+dnl
+dnl * the GNU Lesser General Public License as published by the Free
+dnl Software Foundation; either version 3 of the License, or (at your
+dnl option) any later version.
+dnl
+dnl or
+dnl
+dnl * the GNU General Public License as published by the Free Software
+dnl Foundation; either version 2 of the License, or (at your option) any
+dnl later version.
+dnl
+dnl or both in parallel, as here.
+dnl
+dnl The GNU MP Library is distributed in the hope that it will be useful, but
+dnl WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+dnl or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+dnl for more details.
+dnl
+dnl You should have received copies of the GNU General Public License and the
+dnl GNU Lesser General Public License along with the GNU MP Library. If not,
+dnl see https://www.gnu.org/licenses/.
+
+include(`../config.m4')
+
+C cycles/limb
+C z900 ?
+C z990 23
+C z9 ?
+C z10 28
+C z196 ?
+
+C TODO
+C * Perhaps add special case for un <= 2.
+C * Replace loops by faster code. The mul_1 and addmul_1 loops could be sped
+C up by about 10%.
+
+C INPUT PARAMETERS
+define(`rp', `%r2')
+define(`up', `%r3')
+define(`un', `%r4')
+define(`vp', `%r5')
+define(`vn', `%r6')
+
+define(`zero', `%r8')
+
+ASM_START()
+PROLOGUE(mpn_mul_basecase)
+ cghi un, 2
+ jhe L(ge2)
+
+C un = vn = 1
+ lg %r1, 0(vp)
+ mlg %r0, 0(up)
+ stg %r1, 0(rp)
+ stg %r0, 8(rp)
+ br %r14
+
+L(ge2): C jne L(gen)
+
+
+L(gen):
+C mul_1 =======================================================================
+
+ stmg %r6, %r12, 48(%r15)
+ lghi zero, 0
+ aghi un, -1
+
+ lg %r7, 0(vp)
+ lg %r11, 0(up)
+ lghi %r12, 8 C init index register
+ mlgr %r10, %r7
+ lgr %r9, un
+ stg %r11, 0(rp)
+ cr %r15, %r15 C clear carry flag
+
+L(tm): lg %r1, 0(%r12,up)
+ mlgr %r0, %r7
+ alcgr %r1, %r10
+ lgr %r10, %r0 C copy high part to carry limb
+ stg %r1, 0(%r12,rp)
+ la %r12, 8(%r12)
+ brctg %r9, L(tm)
+
+ alcgr %r0, zero
+ stg %r0, 0(%r12,rp)
+
+C addmul_1 loop ===============================================================
+
+ aghi vn, -1
+ je L(outer_end)
+L(outer_loop):
+
+ la rp, 8(rp) C rp += 1
+ la vp, 8(vp) C up += 1
+ lg %r7, 0(vp)
+ lg %r11, 0(up)
+ lghi %r12, 8 C init index register
+ mlgr %r10, %r7
+ lgr %r9, un
+ alg %r11, 0(rp)
+ stg %r11, 0(rp)
+
+L(tam): lg %r1, 0(%r12,up)
+ lg %r11, 0(%r12,rp)
+ mlgr %r0, %r7
+ alcgr %r1, %r11
+ alcgr %r0, zero
+ algr %r1, %r10
+ lgr %r10, %r0
+ stg %r1, 0(%r12,rp)
+ la %r12, 8(%r12)
+ brctg %r9, L(tam)
+
+ alcgr %r0, zero
+ stg %r0, 0(%r12,rp)
+
+ brctg vn, L(outer_loop)
+L(outer_end):
+
+ lmg %r6, %r12, 48(%r15)
+ br %r14
+EPILOGUE()
diff --git a/vendor/gmp-6.3.0/mpn/s390_64/rshift.asm b/vendor/gmp-6.3.0/mpn/s390_64/rshift.asm
new file mode 100644
index 0000000..e870971
--- /dev/null
+++ b/vendor/gmp-6.3.0/mpn/s390_64/rshift.asm
@@ -0,0 +1,195 @@
+dnl S/390-64 mpn_rshift.
+
+dnl Copyright 2011, 2014 Free Software Foundation, Inc.
+
+dnl This file is part of the GNU MP Library.
+dnl
+dnl The GNU MP Library is free software; you can redistribute it and/or modify
+dnl it under the terms of either:
+dnl
+dnl * the GNU Lesser General Public License as published by the Free
+dnl Software Foundation; either version 3 of the License, or (at your
+dnl option) any later version.
+dnl
+dnl or
+dnl
+dnl * the GNU General Public License as published by the Free Software
+dnl Foundation; either version 2 of the License, or (at your option) any
+dnl later version.
+dnl
+dnl or both in parallel, as here.
+dnl
+dnl The GNU MP Library is distributed in the hope that it will be useful, but
+dnl WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+dnl or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+dnl for more details.
+dnl
+dnl You should have received copies of the GNU General Public License and the
+dnl GNU Lesser General Public License along with the GNU MP Library. If not,
+dnl see https://www.gnu.org/licenses/.
+
+include(`../config.m4')
+
+C cycles/limb
+C z900 7
+C z990 3
+C z9 ?
+C z10 6
+C z196 ?
+
+C NOTES
+C * See notes in lshift.asm.
+
+C INPUT PARAMETERS
+define(`rp', `%r2')
+define(`up', `%r3')
+define(`n', `%r4')
+define(`cnt', `%r5')
+
+define(`tnc', `%r6')
+
+ASM_START()
+PROLOGUE(mpn_rshift)
+ cghi n, 3
+ jh L(gt1)
+
+ stmg %r6, %r7, 48(%r15)
+ larl %r1, L(tab)-4
+ lcgr tnc, cnt
+ sllg n, n, 2
+ b 0(n,%r1)
+L(tab): j L(n1)
+ j L(n2)
+ j L(n3)
+
+L(n1): lg %r1, 0(up)
+ srlg %r0, %r1, 0(cnt)
+ stg %r0, 0(rp)
+ sllg %r2, %r1, 0(tnc)
+ lg %r6, 48(%r15) C restoring r7 not needed
+ br %r14
+
+L(n2): lg %r1, 0(up)
+ sllg %r4, %r1, 0(tnc)
+ srlg %r0, %r1, 0(cnt)
+ lg %r1, 8(up)
+ sllg %r7, %r1, 0(tnc)
+ ogr %r7, %r0
+ srlg %r0, %r1, 0(cnt)
+ stg %r7, 0(rp)
+ stg %r0, 8(rp)
+ lgr %r2, %r4
+ lmg %r6, %r7, 48(%r15)
+ br %r14
+
+
+L(n3): lg %r1, 0(up)
+ sllg %r4, %r1, 0(tnc)
+ srlg %r0, %r1, 0(cnt)
+ lg %r1, 8(up)
+ sllg %r7, %r1, 0(tnc)
+ ogr %r7, %r0
+ srlg %r0, %r1, 0(cnt)
+ stg %r7, 0(rp)
+ lg %r1, 16(up)
+ sllg %r7, %r1, 0(tnc)
+ ogr %r7, %r0
+ srlg %r0, %r1, 0(cnt)
+ stg %r7, 8(rp)
+ stg %r0, 16(rp)
+ lgr %r2, %r4
+ lmg %r6, %r7, 48(%r15)
+ br %r14
+
+L(gt1): stmg %r6, %r13, 48(%r15)
+ lcgr tnc, cnt C tnc = -cnt
+
+ sllg %r1, n, 3
+ srlg %r0, n, 2 C loop count
+
+ lghi %r7, 3
+ ngr %r7, n
+ je L(b0)
+ cghi %r7, 2
+ jl L(b1)
+ je L(b2)
+
+L(b3): aghi rp, -8
+ lg %r7, 0(up)
+ sllg %r9, %r7, 0(tnc)
+ srlg %r11, %r7, 0(cnt)
+ lg %r8, 8(up)
+ lg %r7, 16(up)
+ sllg %r4, %r8, 0(tnc)
+ srlg %r13, %r8, 0(cnt)
+ ogr %r11, %r4
+ la up, 24(up)
+ j L(lm3)
+
+L(b2): aghi rp, -16
+ lg %r8, 0(up)
+ lg %r7, 8(up)
+ sllg %r9, %r8, 0(tnc)
+ srlg %r13, %r8, 0(cnt)
+ la up, 16(up)
+ j L(lm2)
+
+L(b1): aghi rp, -24
+ lg %r7, 0(up)
+ sllg %r9, %r7, 0(tnc)
+ srlg %r11, %r7, 0(cnt)
+ lg %r8, 8(up)
+ lg %r7, 16(up)
+ sllg %r4, %r8, 0(tnc)
+ srlg %r10, %r8, 0(cnt)
+ ogr %r11, %r4
+ la up, 8(up)
+ j L(lm1)
+
+L(b0): aghi rp, -32
+ lg %r8, 0(up)
+ lg %r7, 8(up)
+ sllg %r9, %r8, 0(tnc)
+ srlg %r10, %r8, 0(cnt)
+ j L(lm0)
+
+ ALIGN(8)
+L(top): sllg %r4, %r8, 0(tnc)
+ srlg %r13, %r8, 0(cnt)
+ ogr %r11, %r4
+ stg %r10, 0(rp)
+L(lm3): stg %r11, 8(rp)
+L(lm2): sllg %r12, %r7, 0(tnc)
+ srlg %r11, %r7, 0(cnt)
+ lg %r8, 0(up)
+ lg %r7, 8(up)
+ ogr %r13, %r12
+ sllg %r4, %r8, 0(tnc)
+ srlg %r10, %r8, 0(cnt)
+ ogr %r11, %r4
+ stg %r13, 16(rp)
+L(lm1): stg %r11, 24(rp)
+L(lm0): sllg %r12, %r7, 0(tnc)
+ aghi rp, 32
+ srlg %r11, %r7, 0(cnt)
+ lg %r8, 16(up)
+ lg %r7, 24(up)
+ aghi up, 32
+ ogr %r10, %r12
+ brctg %r0, L(top)
+
+L(end): sllg %r4, %r8, 0(tnc)
+ srlg %r13, %r8, 0(cnt)
+ ogr %r11, %r4
+ stg %r10, 0(rp)
+ stg %r11, 8(rp)
+ sllg %r12, %r7, 0(tnc)
+ srlg %r11, %r7, 0(cnt)
+ ogr %r13, %r12
+ stg %r13, 16(rp)
+ stg %r11, 24(rp)
+ lgr %r2, %r9
+
+ lmg %r6, %r13, 48(%r15)
+ br %r14
+EPILOGUE()
diff --git a/vendor/gmp-6.3.0/mpn/s390_64/sec_tabselect.asm b/vendor/gmp-6.3.0/mpn/s390_64/sec_tabselect.asm
new file mode 100644
index 0000000..2c97423
--- /dev/null
+++ b/vendor/gmp-6.3.0/mpn/s390_64/sec_tabselect.asm
@@ -0,0 +1,139 @@
+dnl S/390-64 mpn_sec_tabselect
+
+dnl Copyright 2021 Free Software Foundation, Inc.
+
+dnl This file is part of the GNU MP Library.
+dnl
+dnl The GNU MP Library is free software; you can redistribute it and/or modify
+dnl it under the terms of either:
+dnl
+dnl * the GNU Lesser General Public License as published by the Free
+dnl Software Foundation; either version 3 of the License, or (at your
+dnl option) any later version.
+dnl
+dnl or
+dnl
+dnl * the GNU General Public License as published by the Free Software
+dnl Foundation; either version 2 of the License, or (at your option) any
+dnl later version.
+dnl
+dnl or both in parallel, as here.
+dnl
+dnl The GNU MP Library is distributed in the hope that it will be useful, but
+dnl WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+dnl or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+dnl for more details.
+dnl
+dnl You should have received copies of the GNU General Public License and the
+dnl GNU Lesser General Public License along with the GNU MP Library. If not,
+dnl see https://www.gnu.org/licenses/.
+
+include(`../config.m4')
+
+C cycles/limb
+C z900 ?
+C z990 ?
+C z9 ?
+C z10 ?
+C z196 ?
+C z13 ?
+C z14 ?
+C z15 1.6
+
+dnl void
+dnl mpn_sec_tabselect (volatile mp_limb_t *rp, volatile const mp_limb_t *tab,
+dnl mp_size_t n, mp_size_t nents, mp_size_t which)
+
+define(`rp', `%r2')
+define(`tp', `%r3')
+define(`n', `%r4')
+define(`nents', `%r5')
+define(`which_arg',`%r6') C magicked to stack
+
+dnl r0 r1 r2 r3 r4 r5 r6 r7
+dnl r8 r9 r10 r11 r12 r13 r14 r15
+
+define(`mask', `%r14')
+define(`k', `%r1')
+define(`which', `%r0')
+
+define(`FRAME', 64)
+
+ASM_START()
+PROLOGUE(mpn_sec_tabselect)
+ stmg %r5, %r15, 40(%r15)
+ aghi %r15, -FRAME
+
+ sllg n, n, 3
+ msgr %r5, n
+ stg %r5, 16(%r15) C nents * n * LIMB_BYTES
+
+ srlg %r5, n, 2+3
+ ngr %r5, %r5
+ je L(end4)
+L(outer):
+ lg which, eval(48+FRAME)(%r15)
+ lg k, eval(40+FRAME)(%r15) C nents
+ lghi %r6, 0
+ lghi %r7, 0
+ lghi %r8, 0
+ lghi %r9, 0
+L(tp4): lghi mask, 1
+ slgr which, mask
+ slbgr mask, mask
+ lmg %r10, %r13, 0(tp)
+ ngr %r10, mask
+ ngr %r11, mask
+ ngr %r12, mask
+ ngr %r13, mask
+ agr %r6, %r10
+ agr %r7, %r11
+ agr %r8, %r12
+ agr %r9, %r13
+ agr tp, n
+ brctg k, L(tp4)
+ stmg %r6, %r9, 0(rp)
+ aghi rp, 32
+ slg tp, 16(%r15)
+ aghi tp, eval(4*8)
+ brctg %r5, L(outer)
+L(end4):
+ tmll n, 16
+ je L(end2)
+ lg which, eval(48+FRAME)(%r15)
+ lg k, eval(40+FRAME)(%r15) C nents
+ lghi %r6, 0
+ lghi %r7, 0
+L(tp2): lghi mask, 1
+ slgr which, mask
+ slbgr mask, mask
+ lmg %r10, %r11, 0(tp)
+ ngr %r10, mask
+ ngr %r11, mask
+ agr %r6, %r10
+ agr %r7, %r11
+ agr tp, n
+ brctg k, L(tp2)
+ stmg %r6, %r7, 0(rp)
+ aghi rp, 16
+ slg tp, 16(%r15)
+ aghi tp, eval(2*8)
+L(end2):
+ tmll n, 8
+ je L(end1)
+ lg which, eval(48+FRAME)(%r15)
+ lg k, eval(40+FRAME)(%r15) C nents
+ lghi %r6, 0
+L(tp1): lghi mask, 1
+ slgr which, mask
+ slbgr mask, mask
+ lg %r10, 0(tp)
+ ngr %r10, mask
+ agr %r6, %r10
+ agr tp, n
+ brctg k, L(tp1)
+ stg %r6, 0(rp)
+L(end1):
+ lmg %r5, %r15, eval(40+FRAME)(%r15)
+ br %r14
+EPILOGUE()
diff --git a/vendor/gmp-6.3.0/mpn/s390_64/sqr_basecase.asm b/vendor/gmp-6.3.0/mpn/s390_64/sqr_basecase.asm
new file mode 100644
index 0000000..bf31bd5
--- /dev/null
+++ b/vendor/gmp-6.3.0/mpn/s390_64/sqr_basecase.asm
@@ -0,0 +1,203 @@
+dnl S/390-64 mpn_sqr_basecase.
+
+dnl Copyright 2011 Free Software Foundation, Inc.
+
+dnl This file is part of the GNU MP Library.
+dnl
+dnl The GNU MP Library is free software; you can redistribute it and/or modify
+dnl it under the terms of either:
+dnl
+dnl * the GNU Lesser General Public License as published by the Free
+dnl Software Foundation; either version 3 of the License, or (at your
+dnl option) any later version.
+dnl
+dnl or
+dnl
+dnl * the GNU General Public License as published by the Free Software
+dnl Foundation; either version 2 of the License, or (at your option) any
+dnl later version.
+dnl
+dnl or both in parallel, as here.
+dnl
+dnl The GNU MP Library is distributed in the hope that it will be useful, but
+dnl WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+dnl or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+dnl for more details.
+dnl
+dnl You should have received copies of the GNU General Public License and the
+dnl GNU Lesser General Public License along with the GNU MP Library. If not,
+dnl see https://www.gnu.org/licenses/.
+
+include(`../config.m4')
+
+C cycles/limb
+C z900 ?
+C z990 23
+C z9 ?
+C z10 28
+C z196 ?
+
+C TODO
+C * Clean up.
+C * Stop iterating addmul_1 loop at latest for n = 2, implement longer tail.
+C This will ask for basecase handling of n = 3.
+C * Update counters and pointers more straightforwardly, possibly lowering
+C register usage.
+C * Should we use this allocation-free style for more sqr_basecase asm
+C implementations? The only disadvantage is that it requires R != U.
+C * Replace loops by faster code. The mul_1 and addmul_1 loops could be sped
+C up by about 10%. The sqr_diag_addlsh1 loop could probably be sped up even
+C more.
+
+C INPUT PARAMETERS
+define(`rp', `%r2')
+define(`up', `%r3')
+define(`n', `%r4')
+
+define(`zero', `%r8')
+define(`rp_saved', `%r9')
+define(`up_saved', `%r13')
+define(`n_saved', `%r14')
+
+ASM_START()
+PROLOGUE(mpn_sqr_basecase)
+ aghi n, -2
+ jhe L(ge2)
+
+C n = 1
+ lg %r5, 0(up)
+ mlgr %r4, %r5
+ stg %r5, 0(rp)
+ stg %r4, 8(rp)
+ br %r14
+
+L(ge2): jne L(gen)
+
+C n = 2
+ stmg %r6, %r8, 48(%r15)
+ lghi zero, 0
+
+ lg %r5, 0(up)
+ mlgr %r4, %r5 C u0 * u0
+ lg %r1, 8(up)
+ mlgr %r0, %r1 C u1 * u1
+ stg %r5, 0(rp)
+
+ lg %r7, 0(up)
+ mlg %r6, 8(up) C u0 * u1
+ algr %r7, %r7
+ alcgr %r6, %r6
+ alcgr %r0, zero
+
+ algr %r4, %r7
+ alcgr %r1, %r6
+ alcgr %r0, zero
+ stg %r4, 8(rp)
+ stg %r1, 16(rp)
+ stg %r0, 24(rp)
+
+ lmg %r6, %r8, 48(%r15)
+ br %r14
+
+L(gen):
+C mul_1 =======================================================================
+
+ stmg %r6, %r14, 48(%r15)
+ lghi zero, 0
+ lgr up_saved, up
+ lgr rp_saved, rp
+ lgr n_saved, n
+
+ lg %r6, 0(up)
+ lg %r11, 8(up)
+ lghi %r12, 16 C init index register
+ mlgr %r10, %r6
+ lgr %r5, n
+ stg %r11, 8(rp)
+ cr %r15, %r15 C clear carry flag
+
+L(tm): lg %r1, 0(%r12,up)
+ mlgr %r0, %r6
+ alcgr %r1, %r10
+ lgr %r10, %r0 C copy high part to carry limb
+ stg %r1, 0(%r12,rp)
+ la %r12, 8(%r12)
+ brctg %r5, L(tm)
+
+ alcgr %r0, zero
+ stg %r0, 0(%r12,rp)
+
+C addmul_1 loop ===============================================================
+
+ aghi n, -1
+ je L(outer_end)
+L(outer_loop):
+
+ la rp, 16(rp) C rp += 2
+ la up, 8(up) C up += 1
+ lg %r6, 0(up)
+ lg %r11, 8(up)
+ lghi %r12, 16 C init index register
+ mlgr %r10, %r6
+ lgr %r5, n
+ alg %r11, 8(rp)
+ stg %r11, 8(rp)
+
+L(tam): lg %r1, 0(%r12,up)
+ lg %r7, 0(%r12,rp)
+ mlgr %r0, %r6
+ alcgr %r1, %r7
+ alcgr %r0, zero
+ algr %r1, %r10
+ lgr %r10, %r0
+ stg %r1, 0(%r12,rp)
+ la %r12, 8(%r12)
+ brctg %r5, L(tam)
+
+ alcgr %r0, zero
+ stg %r0, 0(%r12,rp)
+
+ brctg n, L(outer_loop)
+L(outer_end):
+
+ lg %r6, 8(up)
+ lg %r1, 16(up)
+ lgr %r7, %r0 C Same as: lg %r7, 24(,rp)
+ mlgr %r0, %r6
+ algr %r1, %r7
+ alcgr %r0, zero
+ stg %r1, 24(rp)
+ stg %r0, 32(rp)
+
+C sqr_diag_addlsh1 ============================================================
+
+define(`up', `up_saved')
+define(`rp', `rp_saved')
+ la n, 1(n_saved)
+
+ lg %r1, 0(up)
+ mlgr %r0, %r1
+ stg %r1, 0(rp)
+C clr %r15, %r15 C clear carry (already clear per above)
+
+L(top): lg %r11, 8(up)
+ la up, 8(up)
+ lg %r6, 8(rp)
+ lg %r7, 16(rp)
+ mlgr %r10, %r11
+ alcgr %r6, %r6
+ alcgr %r7, %r7
+ alcgr %r10, zero C propagate carry to high product limb
+ algr %r6, %r0
+ alcgr %r7, %r11
+ stmg %r6, %r7, 8(rp)
+ la rp, 16(rp)
+ lgr %r0, %r10 C copy carry limb
+ brctg n, L(top)
+
+ alcgr %r0, zero
+ stg %r0, 8(rp)
+
+ lmg %r6, %r14, 48(%r15)
+ br %r14
+EPILOGUE()
diff --git a/vendor/gmp-6.3.0/mpn/s390_64/sublsh1_n.asm b/vendor/gmp-6.3.0/mpn/s390_64/sublsh1_n.asm
new file mode 100644
index 0000000..50f127a
--- /dev/null
+++ b/vendor/gmp-6.3.0/mpn/s390_64/sublsh1_n.asm
@@ -0,0 +1,169 @@
+dnl S/390-64 mpn_sublsh1_n
+
+dnl Copyright 2011 Free Software Foundation, Inc.
+
+dnl This file is part of the GNU MP Library.
+dnl
+dnl The GNU MP Library is free software; you can redistribute it and/or modify
+dnl it under the terms of either:
+dnl
+dnl * the GNU Lesser General Public License as published by the Free
+dnl Software Foundation; either version 3 of the License, or (at your
+dnl option) any later version.
+dnl
+dnl or
+dnl
+dnl * the GNU General Public License as published by the Free Software
+dnl Foundation; either version 2 of the License, or (at your option) any
+dnl later version.
+dnl
+dnl or both in parallel, as here.
+dnl
+dnl The GNU MP Library is distributed in the hope that it will be useful, but
+dnl WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+dnl or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+dnl for more details.
+dnl
+dnl You should have received copies of the GNU General Public License and the
+dnl GNU Lesser General Public License along with the GNU MP Library. If not,
+dnl see https://www.gnu.org/licenses/.
+
+include(`../config.m4')
+
+C cycles/limb
+C z900 10
+C z990 5
+C z9 ?
+C z10 12
+C z196 ?
+
+C TODO
+C * Optimise for small n
+C * Compute RETVAL for sublsh1_n less stupidly
+
+C INPUT PARAMETERS
+define(`rp', `%r2')
+define(`up', `%r3')
+define(`vp', `%r4')
+define(`n', `%r5')
+
+ifdef(`OPERATION_addlsh1_n',`
+ define(ADSBR, algr)
+ define(ADSBCR, alcgr)
+ define(INITCY, `lghi %r13, -1')
+ define(RETVAL, `la %r2, 2(%r1,%r13)')
+ define(func, mpn_addlsh1_n)
+')
+ifdef(`OPERATION_sublsh1_n',`
+ define(ADSBR, slgr)
+ define(ADSBCR, slbgr)
+ define(INITCY, `lghi %r13, 0')
+ define(RETVAL,`dnl
+ slgr %r1, %r13
+ lghi %r2, 1
+ algr %r2, %r1')
+ define(func, mpn_sublsh1_n)
+')
+
+ASM_START()
+PROLOGUE(mpn_sublsh1_n)
+ stmg %r6, %r13, 48(%r15)
+
+ aghi n, 3
+ lghi %r7, 3
+ srlg %r0, n, 2
+ ngr %r7, n C n mod 4
+ je L(b1)
+ cghi %r7, 2
+ jl L(b2)
+ jne L(b0)
+
+L(b3): lmg %r5, %r7, 0(up)
+ la up, 24(up)
+ lmg %r9, %r11, 0(vp)
+ la vp, 24(vp)
+
+ algr %r9, %r9
+ alcgr %r10, %r10
+ alcgr %r11, %r11
+ slbgr %r1, %r1
+
+ ADSBR %r5, %r9
+ ADSBCR %r6, %r10
+ ADSBCR %r7, %r11
+ slbgr %r13, %r13
+
+ stmg %r5, %r7, 0(rp)
+ la rp, 24(rp)
+ brctg %r0, L(top)
+ j L(end)
+
+L(b0): lghi %r1, -1
+ INITCY
+ j L(top)
+
+L(b1): lg %r5, 0(up)
+ la up, 8(up)
+ lg %r9, 0(vp)
+ la vp, 8(vp)
+
+ algr %r9, %r9
+ slbgr %r1, %r1
+ ADSBR %r5, %r9
+ slbgr %r13, %r13
+
+ stg %r5, 0(rp)
+ la rp, 8(rp)
+ brctg %r0, L(top)
+ j L(end)
+
+L(b2): lmg %r5, %r6, 0(up)
+ la up, 16(up)
+ lmg %r9, %r10, 0(vp)
+ la vp, 16(vp)
+
+ algr %r9, %r9
+ alcgr %r10, %r10
+ slbgr %r1, %r1
+
+ ADSBR %r5, %r9
+ ADSBCR %r6, %r10
+ slbgr %r13, %r13
+
+ stmg %r5, %r6, 0(rp)
+ la rp, 16(rp)
+ brctg %r0, L(top)
+ j L(end)
+
+L(top): lmg %r9, %r12, 0(vp)
+ la vp, 32(vp)
+
+ aghi %r1, 1 C restore carry
+
+ alcgr %r9, %r9
+ alcgr %r10, %r10
+ alcgr %r11, %r11
+ alcgr %r12, %r12
+
+ slbgr %r1, %r1 C save carry
+
+ lmg %r5, %r8, 0(up)
+ la up, 32(up)
+
+ aghi %r13, 1 C restore carry
+
+ ADSBCR %r5, %r9
+ ADSBCR %r6, %r10
+ ADSBCR %r7, %r11
+ ADSBCR %r8, %r12
+
+ slbgr %r13, %r13 C save carry
+
+ stmg %r5, %r8, 0(rp)
+ la rp, 32(rp)
+ brctg %r0, L(top)
+
+L(end): RETVAL
+ lmg %r6, %r13, 48(%r15)
+ br %r14
+EPILOGUE()
diff --git a/vendor/gmp-6.3.0/mpn/s390_64/submul_1.asm b/vendor/gmp-6.3.0/mpn/s390_64/submul_1.asm
new file mode 100644
index 0000000..3bb8b05
--- /dev/null
+++ b/vendor/gmp-6.3.0/mpn/s390_64/submul_1.asm
@@ -0,0 +1,70 @@
+dnl S/390-64 mpn_submul_1
+
+dnl Copyright 2011 Free Software Foundation, Inc.
+
+dnl This file is part of the GNU MP Library.
+dnl
+dnl The GNU MP Library is free software; you can redistribute it and/or modify
+dnl it under the terms of either:
+dnl
+dnl * the GNU Lesser General Public License as published by the Free
+dnl Software Foundation; either version 3 of the License, or (at your
+dnl option) any later version.
+dnl
+dnl or
+dnl
+dnl * the GNU General Public License as published by the Free Software
+dnl Foundation; either version 2 of the License, or (at your option) any
+dnl later version.
+dnl
+dnl or both in parallel, as here.
+dnl
+dnl The GNU MP Library is distributed in the hope that it will be useful, but
+dnl WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+dnl or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+dnl for more details.
+dnl
+dnl You should have received copies of the GNU General Public License and the
+dnl GNU Lesser General Public License along with the GNU MP Library. If not,
+dnl see https://www.gnu.org/licenses/.
+
+include(`../config.m4')
+
+C cycles/limb
+C z900 35
+C z990 24
+C z9 ?
+C z10 28
+C z196 ?
+
+C INPUT PARAMETERS
+define(`rp', `%r2')
+define(`up', `%r3')
+define(`n', `%r4')
+define(`v0', `%r5')
+
+ASM_START()
+PROLOGUE(mpn_submul_1)
+ stmg %r9, %r12, 72(%r15)
+ lghi %r12, 0
+ slgr %r11, %r11
+
+L(top): lg %r1, 0(%r12,up)
+ lg %r10, 0(%r12,rp)
+ mlgr %r0, v0
+ slbgr %r10, %r1
+ slbgr %r9, %r9
+ slgr %r0, %r9 C conditional incr
+ slgr %r10, %r11
+ lgr %r11, %r0
+ stg %r10, 0(%r12,rp)
+ la %r12, 8(%r12)
+ brctg %r4, L(top)
+
+ lgr %r2, %r11
+ slbgr %r9, %r9
+ slgr %r2, %r9
+
+ lmg %r9, %r12, 72(%r15)
+ br %r14
+EPILOGUE()
diff --git a/vendor/gmp-6.3.0/mpn/s390_64/z10/gmp-mparam.h b/vendor/gmp-6.3.0/mpn/s390_64/z10/gmp-mparam.h
new file mode 100644
index 0000000..c3a9416
--- /dev/null
+++ b/vendor/gmp-6.3.0/mpn/s390_64/z10/gmp-mparam.h
@@ -0,0 +1,233 @@
+/* S/390-64 gmp-mparam.h -- Compiler/machine parameter header file.
+
+Copyright 1991, 1993, 1994, 2000-2011, 2014, 2015 Free Software Foundation,
+Inc.
+
+This file is part of the GNU MP Library.
+
+The GNU MP Library is free software; you can redistribute it and/or modify
+it under the terms of either:
+
+ * the GNU Lesser General Public License as published by the Free
+ Software Foundation; either version 3 of the License, or (at your
+ option) any later version.
+
+or
+
+ * the GNU General Public License as published by the Free Software
+ Foundation; either version 2 of the License, or (at your option) any
+ later version.
+
+or both in parallel, as here.
+
+The GNU MP Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received copies of the GNU General Public License and the
+GNU Lesser General Public License along with the GNU MP Library. If not,
+see https://www.gnu.org/licenses/. */
+
+#define GMP_LIMB_BITS 64
+#define GMP_LIMB_BYTES 8
+
+/* 4400 MHz IBM z10 */
+/* FFT tuning limit = 30 M */
+/* Generated by tuneup.c, 2015-10-09, gcc 4.8 */
+
+#define DIVREM_1_NORM_THRESHOLD 0 /* always */
+#define DIVREM_1_UNNORM_THRESHOLD 3
+#define MOD_1_1P_METHOD 2
+#define MOD_1_NORM_THRESHOLD 0 /* always */
+#define MOD_1_UNNORM_THRESHOLD 3
+#define MOD_1N_TO_MOD_1_1_THRESHOLD 6
+#define MOD_1U_TO_MOD_1_1_THRESHOLD 5
+#define MOD_1_1_TO_MOD_1_2_THRESHOLD 15
+#define MOD_1_2_TO_MOD_1_4_THRESHOLD 17
+#define PREINV_MOD_1_TO_MOD_1_THRESHOLD 24
+#define USE_PREINV_DIVREM_1 1
+#define DIV_QR_1N_PI1_METHOD 1
+#define DIV_QR_1_NORM_THRESHOLD 2
+#define DIV_QR_1_UNNORM_THRESHOLD MP_SIZE_T_MAX /* never */
+#define DIV_QR_2_PI2_THRESHOLD MP_SIZE_T_MAX /* never */
+#define DIVEXACT_1_THRESHOLD 0 /* always */
+#define BMOD_1_TO_MOD_1_THRESHOLD 48
+
+#define MUL_TOOM22_THRESHOLD 9
+#define MUL_TOOM33_THRESHOLD 65
+#define MUL_TOOM44_THRESHOLD 94
+#define MUL_TOOM6H_THRESHOLD 129
+#define MUL_TOOM8H_THRESHOLD 187
+
+#define MUL_TOOM32_TO_TOOM43_THRESHOLD 65
+#define MUL_TOOM32_TO_TOOM53_THRESHOLD 61
+#define MUL_TOOM42_TO_TOOM53_THRESHOLD 62
+#define MUL_TOOM42_TO_TOOM63_THRESHOLD 64
+#define MUL_TOOM43_TO_TOOM54_THRESHOLD 85
+
+#define SQR_BASECASE_THRESHOLD 0 /* always (native) */
+#define SQR_TOOM2_THRESHOLD 11
+#define SQR_TOOM3_THRESHOLD 80
+#define SQR_TOOM4_THRESHOLD 118
+#define SQR_TOOM6_THRESHOLD 189
+#define SQR_TOOM8_THRESHOLD 236
+
+#define MULMID_TOOM42_THRESHOLD 24
+
+#define MULMOD_BNM1_THRESHOLD 7
+#define SQRMOD_BNM1_THRESHOLD 9
+
+#define MUL_FFT_MODF_THRESHOLD 252 /* k = 5 */
+#define MUL_FFT_TABLE3 \
+ { { 252, 5}, { 9, 6}, { 5, 5}, { 11, 6}, \
+ { 6, 5}, { 13, 6}, { 7, 5}, { 15, 6}, \
+ { 13, 7}, { 7, 6}, { 15, 7}, { 13, 8}, \
+ { 7, 7}, { 15, 8}, { 9, 7}, { 19, 8}, \
+ { 11, 7}, { 23, 8}, { 13, 9}, { 7, 8}, \
+ { 15, 7}, { 31, 8}, { 19, 9}, { 11, 8}, \
+ { 27,10}, { 7, 9}, { 15, 8}, { 31, 9}, \
+ { 19, 8}, { 41, 9}, { 27,10}, { 15, 9}, \
+ { 39,10}, { 23, 9}, { 47,11}, { 15,10}, \
+ { 31, 9}, { 67,10}, { 39, 9}, { 79,10}, \
+ { 47,11}, { 31,10}, { 63, 9}, { 127, 8}, \
+ { 255,10}, { 71, 9}, { 143, 8}, { 287, 7}, \
+ { 575, 6}, { 1151,10}, { 79,11}, { 47,12}, \
+ { 31,11}, { 63,10}, { 127, 9}, { 255, 8}, \
+ { 511,10}, { 143,11}, { 79,10}, { 159, 9}, \
+ { 319, 8}, { 639,10}, { 175, 8}, { 703,11}, \
+ { 95,10}, { 191, 9}, { 383, 8}, { 767, 9}, \
+ { 415, 8}, { 831, 7}, { 1663,10}, { 239, 9}, \
+ { 479,12}, { 63,11}, { 127,10}, { 255, 9}, \
+ { 511,11}, { 143,10}, { 287, 9}, { 575, 8}, \
+ { 1151,10}, { 319, 9}, { 639,11}, { 175,10}, \
+ { 351, 9}, { 703, 8}, { 1407, 7}, { 2815,11}, \
+ { 191,10}, { 383, 9}, { 767,10}, { 415,11}, \
+ { 223,10}, { 447, 9}, { 895,13}, { 63,11}, \
+ { 255,10}, { 575, 9}, { 1151,12}, { 159,11}, \
+ { 319,10}, { 639, 9}, { 1279,10}, { 703, 9}, \
+ { 1407,12}, { 191,10}, { 767,11}, { 415,12}, \
+ { 223,11}, { 447,10}, { 895,11}, { 479,13}, \
+ { 127,12}, { 255,11}, { 511,12}, { 287,10}, \
+ { 1151,12}, { 319,11}, { 703,10}, { 1407, 9}, \
+ { 2815,12}, { 383,11}, { 767,12}, { 415,11}, \
+ { 831,10}, { 1663,12}, { 447,11}, { 895,10}, \
+ { 1791, 9}, { 3583,12}, { 479,11}, { 959,10}, \
+ { 1919, 9}, { 3839,12}, { 511, 9}, { 4095, 6}, \
+ { 32767, 8}, { 8447,11}, { 1151,13}, { 319,12}, \
+ { 639,10}, { 2559,12}, { 703,10}, { 2815,12}, \
+ { 831,11}, { 1663,12}, { 895,11}, { 1791,12}, \
+ { 959,11}, { 1919,14}, { 255,13}, { 511,11}, \
+ { 2047,12}, { 1215,10}, { 4863,11}, { 2559,14}, \
+ { 383,12}, { 1535,13}, { 831,12}, { 1663,13}, \
+ { 895,12}, { 1791,11}, { 3583,15}, { 255,14}, \
+ { 511,13}, { 1151,14}, { 639,13}, { 1279,12}, \
+ { 2559,13}, { 1407,12}, { 2815,14}, { 767,13}, \
+ { 1663,10}, { 13311,14}, { 895,13}, { 1791,12}, \
+ { 3583,13}, { 1919,12}, { 3839,10}, { 15359,14}, \
+ { 1151,13}, { 2431,12}, { 4863,14}, { 1279,13}, \
+ { 2559,14}, { 1407,13}, { 2815,15}, { 767,14}, \
+ { 1791,13}, { 8192,14}, { 16384,15}, { 32768,16}, \
+ { 65536,17}, { 131072,18}, { 262144,19}, { 524288,20}, \
+ {1048576,21}, {2097152,22}, {4194304,23}, {8388608,24} }
+#define MUL_FFT_TABLE3_SIZE 200
+#define MUL_FFT_THRESHOLD 1728
+
+#define SQR_FFT_MODF_THRESHOLD 212 /* k = 5 */
+#define SQR_FFT_TABLE3 \
+ { { 212, 5}, { 7, 4}, { 15, 5}, { 9, 4}, \
+ { 19, 6}, { 5, 5}, { 11, 6}, { 6, 5}, \
+ { 13, 6}, { 7, 5}, { 15, 6}, { 9, 5}, \
+ { 19, 6}, { 13, 7}, { 7, 6}, { 15, 7}, \
+ { 9, 6}, { 19, 7}, { 13, 8}, { 7, 7}, \
+ { 16, 8}, { 9, 7}, { 19, 8}, { 11, 7}, \
+ { 23, 8}, { 13, 9}, { 7, 8}, { 19, 9}, \
+ { 11, 8}, { 25,10}, { 7, 9}, { 15, 8}, \
+ { 31, 9}, { 23,10}, { 15, 9}, { 39,10}, \
+ { 23,11}, { 15,10}, { 31, 9}, { 63,10}, \
+ { 47,11}, { 31,10}, { 63, 9}, { 127, 8}, \
+ { 255,10}, { 71, 9}, { 143, 8}, { 287, 7}, \
+ { 575,11}, { 47,12}, { 31,11}, { 63,10}, \
+ { 127, 9}, { 255, 8}, { 511,10}, { 143, 9}, \
+ { 287, 8}, { 575, 7}, { 1151,11}, { 79,10}, \
+ { 159, 9}, { 319,10}, { 175, 9}, { 351, 8}, \
+ { 703, 7}, { 1407,10}, { 191, 9}, { 383,10}, \
+ { 207,11}, { 111,10}, { 223,12}, { 63,11}, \
+ { 127,10}, { 255, 9}, { 511,11}, { 143,10}, \
+ { 287, 9}, { 575, 8}, { 1151,11}, { 159,10}, \
+ { 319, 9}, { 639,11}, { 175,10}, { 351, 9}, \
+ { 703, 8}, { 1407,11}, { 191,10}, { 383,11}, \
+ { 207,10}, { 415,11}, { 223,10}, { 447,13}, \
+ { 63,12}, { 127,11}, { 255,10}, { 511,11}, \
+ { 287,10}, { 575, 9}, { 1151,12}, { 159,11}, \
+ { 319,10}, { 639,11}, { 351,10}, { 703, 9}, \
+ { 1407,12}, { 191,11}, { 383,10}, { 767,11}, \
+ { 415,12}, { 223,11}, { 447,10}, { 895, 9}, \
+ { 1791,13}, { 127,12}, { 255,11}, { 511,12}, \
+ { 287,11}, { 575,10}, { 1151,11}, { 607,12}, \
+ { 319,11}, { 639,12}, { 351,11}, { 703,10}, \
+ { 1407,13}, { 191,12}, { 383,11}, { 767,12}, \
+ { 415,11}, { 831,10}, { 1663,12}, { 447,11}, \
+ { 895,10}, { 1791,14}, { 127,13}, { 255,12}, \
+ { 511,11}, { 1023,10}, { 2047,11}, { 1151,12}, \
+ { 607,13}, { 319,11}, { 1279, 9}, { 5119, 8}, \
+ { 10751, 4}, { 172031, 7}, { 22015,11}, { 1407,10}, \
+ { 2943, 8}, { 11775, 9}, { 6143,12}, { 831, 8}, \
+ { 13311,11}, { 1791,14}, { 255,11}, { 2047,13}, \
+ { 575,12}, { 1151,13}, { 639,12}, { 1279,13}, \
+ { 703,12}, { 1407,11}, { 2815,12}, { 1471, 9}, \
+ { 11775,13}, { 767,12}, { 1535,13}, { 831,12}, \
+ { 1663,13}, { 895,11}, { 3583,13}, { 959,12}, \
+ { 1919,10}, { 7679, 9}, { 15359,11}, { 3967,14}, \
+ { 511,13}, { 1151,12}, { 2303,13}, { 1215,14}, \
+ { 639,13}, { 1279,12}, { 2559,14}, { 767,13}, \
+ { 1663,14}, { 895,15}, { 511,13}, { 2047,14}, \
+ { 1279,13}, { 2815,15}, { 767,14}, { 1791,13}, \
+ { 3583,16}, { 65536,17}, { 131072,18}, { 262144,19}, \
+ { 524288,20}, {1048576,21}, {2097152,22}, {4194304,23}, \
+ {8388608,24} }
+#define SQR_FFT_TABLE3_SIZE 201
+#define SQR_FFT_THRESHOLD 1344
+
+#define MULLO_BASECASE_THRESHOLD 0 /* always */
+#define MULLO_DC_THRESHOLD 33
+#define MULLO_MUL_N_THRESHOLD 2586
+#define SQRLO_BASECASE_THRESHOLD 0 /* always */
+#define SQRLO_DC_THRESHOLD 63
+#define SQRLO_SQR_THRESHOLD 2663
+
+#define DC_DIV_QR_THRESHOLD 37
+#define DC_DIVAPPR_Q_THRESHOLD 143
+#define DC_BDIV_QR_THRESHOLD 37
+#define DC_BDIV_Q_THRESHOLD 86
+
+#define INV_MULMOD_BNM1_THRESHOLD 16
+#define INV_NEWTON_THRESHOLD 147
+#define INV_APPR_THRESHOLD 141
+
+#define BINV_NEWTON_THRESHOLD 141
+#define REDC_1_TO_REDC_N_THRESHOLD 39
+
+#define MU_DIV_QR_THRESHOLD 807
+#define MU_DIVAPPR_Q_THRESHOLD 807
+#define MUPI_DIV_QR_THRESHOLD 81
+#define MU_BDIV_QR_THRESHOLD 654
+#define MU_BDIV_Q_THRESHOLD 792
+
+#define POWM_SEC_TABLE 1,28,163,1083,2111
+
+#define GET_STR_DC_THRESHOLD 19
+#define GET_STR_PRECOMPUTE_THRESHOLD 33
+#define SET_STR_DC_THRESHOLD 898
+#define SET_STR_PRECOMPUTE_THRESHOLD 2031
+
+#define FAC_DSC_THRESHOLD 372
+#define FAC_ODD_THRESHOLD 23
+
+#define MATRIX22_STRASSEN_THRESHOLD 17
+#define HGCD_THRESHOLD 105
+#define HGCD_APPR_THRESHOLD 111
+#define HGCD_REDUCE_THRESHOLD 1137
+#define GCD_DC_THRESHOLD 285
+#define GCDEXT_DC_THRESHOLD 210
+#define JACOBI_BASE_METHOD 4
diff --git a/vendor/gmp-6.3.0/mpn/s390_64/z13/addmul_1.asm b/vendor/gmp-6.3.0/mpn/s390_64/z13/addmul_1.asm
new file mode 100644
index 0000000..2b00612
--- /dev/null
+++ b/vendor/gmp-6.3.0/mpn/s390_64/z13/addmul_1.asm
@@ -0,0 +1,173 @@
+dnl S/390-64 mpn_addmul_1 and mpn_addmul_1c.
+dnl Based on C code contributed by Marius Hillenbrand.
+
+dnl Copyright 2023 Free Software Foundation, Inc.
+
+dnl This file is part of the GNU MP Library.
+dnl
+dnl The GNU MP Library is free software; you can redistribute it and/or modify
+dnl it under the terms of either:
+dnl
+dnl * the GNU Lesser General Public License as published by the Free
+dnl Software Foundation; either version 3 of the License, or (at your
+dnl option) any later version.
+dnl
+dnl or
+dnl
+dnl * the GNU General Public License as published by the Free Software
+dnl Foundation; either version 2 of the License, or (at your option) any
+dnl later version.
+dnl
+dnl or both in parallel, as here.
+dnl
+dnl The GNU MP Library is distributed in the hope that it will be useful, but
+dnl WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+dnl or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+dnl for more details.
+dnl
+dnl You should have received copies of the GNU General Public License and the
+dnl GNU Lesser General Public License along with the GNU MP Library. If not,
+dnl see https://www.gnu.org/licenses/.
+
+include(`../config.m4')
+
+dnl TODO
+dnl * Schedule vlvgp away from mlgr; that saves 20% of the run time.
+dnl * Perhaps use vp[0]/vp[1] in innerloop instead preloading v0/v1.
+
+C cycles/limb
+C z900 -
+C z990 -
+C z9 -
+C z10 -
+C z196 -
+C z12 ?
+C z13 ?
+C z14 ?
+C z15 2.55
+
+
+define(`rp', `%r2')
+define(`ap', `%r3')
+define(`an', `%r4')
+define(`b0', `%r5')
+define(`cy', `%r6')
+
+define(`idx', `%r4')
+
+ASM_START()
+
+PROLOGUE(mpn_addmul_1c)
+ stmg %r6, %r13, 48(%r15)
+ j L(ent)
+EPILOGUE()
+
+PROLOGUE(mpn_addmul_1)
+ stmg %r6, %r13, 48(%r15)
+ lghi %r6, 0
+L(ent): vzero %v0
+ vzero %v2
+ srlg %r11, an, 2
+
+ tmll an, 1
+ je L(bx0)
+L(bx1): tmll an, 2
+ jne L(b11)
+
+L(b01): lghi idx, -24
+ vleg %v2, 0(rp), 1
+ lg %r13, 0(ap)
+ vzero %v4
+ mlgr %r12, b0
+ algr %r13, %r6
+ lghi %r6, 0
+ alcgr %r12, %r6
+ vlvgg %v4, %r13, 1
+ vaq %v2, %v2, %v4
+ vsteg %v2, 0(rp), 1
+ vmrhg %v2, %v2, %v2
+ cgije %r11, 0, L(1)
+ j L(cj0)
+
+L(b11): lghi idx, -8
+ vleg %v2, 0(rp), 1
+ lg %r9, 0(ap)
+ vzero %v4
+ mlgr %r8, b0
+ algr %r9, %r6
+ lghi %r6, 0
+ alcgr %r8, %r6
+ vlvgg %v4, %r9, 1
+ vaq %v2, %v2, %v4
+ vsteg %v2, 0(rp), 1
+ vmrhg %v2, %v2, %v2
+ j L(cj1)
+
+L(bx0): tmll an, 2
+ jne L(b10)
+L(b00): lghi idx, -32
+ lgr %r12, %r6
+L(cj0): lg %r1, 32(idx, ap)
+ lg %r9, 40(idx, ap)
+ mlgr %r0, b0
+ mlgr %r8, b0
+ vlvgp %v6, %r0, %r1
+ vlvgp %v7, %r9, %r12
+ j L(mid)
+
+L(b10): lghi idx, -16
+ lgr %r8, %r6
+L(cj1): lg %r7, 16(idx, ap)
+ lg %r13, 24(idx, ap)
+ mlgr %r6, b0
+ mlgr %r12, b0
+ vlvgp %v6, %r6, %r7
+ vlvgp %v7, %r13, %r8
+ cgije %r11, 0, L(end)
+
+L(top): lg %r1, 32(idx, ap)
+ lg %r9, 40(idx, ap)
+ mlgr %r0, b0
+ mlgr %r8, b0
+ vl %v1, 16(idx, rp), 3
+ vpdi %v1, %v1, %v1, 4
+ vacq %v5, %v6, %v1, %v0
+ vacccq %v0, %v6, %v1, %v0
+ vacq %v3, %v5, %v7, %v2
+ vacccq %v2, %v5, %v7, %v2
+ vpdi %v3, %v3, %v3, 4
+ vst %v3, 16(idx, rp), 3
+ vlvgp %v6, %r0, %r1
+ vlvgp %v7, %r9, %r12
+L(mid): lg %r7, 48(idx, ap)
+ lg %r13, 56(idx, ap)
+ mlgr %r6, b0
+ mlgr %r12, b0
+ vl %v4, 32(idx, rp), 3
+ vpdi %v4, %v4, %v4, 4
+ vacq %v5, %v6, %v4, %v0
+ vacccq %v0, %v6, %v4, %v0
+ vacq %v1, %v5, %v7, %v2
+ vacccq %v2, %v5, %v7, %v2
+ vpdi %v1, %v1, %v1, 4
+ vst %v1, 32(idx, rp), 3
+ vlvgp %v6, %r6, %r7
+ vlvgp %v7, %r13, %r8
+ la idx, 32(idx)
+ brctg %r11, L(top)
+
+L(end): vl %v1, 16(idx, rp), 3
+ vpdi %v1, %v1, %v1, 4
+ vacq %v5, %v6, %v1, %v0
+ vacccq %v0, %v6, %v1, %v0
+ vacq %v3, %v5, %v7, %v2
+ vacccq %v2, %v5, %v7, %v2
+ vpdi %v3, %v3, %v3, 4
+ vst %v3, 16(idx, rp), 3
+
+ vag %v2, %v0, %v2
+L(1): vlgvg %r2, %v2, 1
+ algr %r2, %r12
+ lmg %r6, %r13, 48(%r15)
+ br %r14
+EPILOGUE()
diff --git a/vendor/gmp-6.3.0/mpn/s390_64/z13/addmul_1.c b/vendor/gmp-6.3.0/mpn/s390_64/z13/addmul_1.c
new file mode 100644
index 0000000..022e5ed
--- /dev/null
+++ b/vendor/gmp-6.3.0/mpn/s390_64/z13/addmul_1.c
@@ -0,0 +1,358 @@
+/* Addmul_1 / mul_1 for IBM z13 and later
+ Contributed by Marius Hillenbrand
+
+Copyright 2021 Free Software Foundation, Inc.
+
+This file is part of the GNU MP Library.
+
+The GNU MP Library is free software; you can redistribute it and/or modify
+it under the terms of either:
+
+ * the GNU Lesser General Public License as published by the Free
+ Software Foundation; either version 3 of the License, or (at your
+ option) any later version.
+
+or
+
+ * the GNU General Public License as published by the Free Software
+ Foundation; either version 2 of the License, or (at your option) any
+ later version.
+
+or both in parallel, as here.
+
+The GNU MP Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received copies of the GNU General Public License and the
+GNU Lesser General Public License along with the GNU MP Library. If not,
+see https://www.gnu.org/licenses/. */
+
+#include "gmp-impl.h"
+#include "s390_64/z13/common-vec.h"
+
+#undef FUNCNAME
+
+#ifdef DO_INLINE
+# ifdef OPERATION_addmul_1
+# define ADD
+# define FUNCNAME inline_addmul_1
+# elif defined(OPERATION_mul_1)
+# define FUNCNAME inline_mul_1
+# endif
+
+#else
+# ifdef OPERATION_addmul_1
+# define ADD
+# define FUNCNAME mpn_addmul_1
+# elif defined(OPERATION_mul_1)
+# define FUNCNAME mpn_mul_1
+# endif
+#endif
+
+#ifdef DO_INLINE
+static inline mp_limb_t
+FUNCNAME (mp_ptr rp, mp_srcptr s1p, mp_size_t n, mp_limb_t s2limb)
+ __attribute__ ((always_inline));
+
+static inline
+#endif
+mp_limb_t
+FUNCNAME (mp_ptr rp, mp_srcptr s1p, mp_size_t n, mp_limb_t s2limb)
+{
+ ASSERT (n >= 1);
+ ASSERT (MPN_SAME_OR_INCR_P(rp, s1p, n));
+
+ /* Combine 64x64 multiplication into GPR pairs (MLGR) with 128-bit adds in
+ VRs (using each VR as a single 128-bit accumulator).
+ The inner loop is unrolled to four limbs, with two blocks of four
+ multiplications each. Since the MLGR operation operates on even/odd GPR
+ pairs, pin the products appropriately. */
+
+ /* products as GPR pairs */
+ register mp_limb_t p0_high asm("r0");
+ register mp_limb_t p0_low asm("r1");
+
+ register mp_limb_t p1_high asm("r8");
+ register mp_limb_t p1_low asm("r9");
+
+ register mp_limb_t p2_high asm("r6");
+ register mp_limb_t p2_low asm("r7");
+
+ register mp_limb_t p3_high asm("r10");
+ register mp_limb_t p3_low asm("r11");
+
+ /* carry flag for 128-bit add in VR for first carry chain */
+ vec_t carry_vec0 = { .dw = vec_splat_u64 (0) };
+ mp_limb_t carry_limb = 0;
+
+#ifdef ADD
+ /* 2nd carry flag for 2nd carry chain with addmul */
+ vec_t carry_vec1 = { .dw = vec_splat_u64 (0) };
+ vec_t sum0;
+ vec_t rp0_addend, rp1_addend;
+ rp0_addend.dw = vec_splat_u64 (0);
+ rp1_addend.dw = vec_splat_u64 (0);
+#endif
+ vec_t sum1;
+
+ vec_t carry_prod = { .dw = vec_splat_u64 (0) };
+
+ /* The scalar multiplications compete with pointer and index increments for
+ * issue ports. Thus, increment the loop index in the middle of the loop so
+ * that the operations for the next iteration's multiplications can be
+ * loaded in time (looks horrible, yet helps performance) and make sure we
+ * use addressing with base reg + index reg + immediate displacement
+ * (so that only the single index needs incrementing, instead of multiple
+ * pointers). */
+#undef LOOP_ADVANCE
+#undef IDX_OFFSET
+
+#define LOOP_ADVANCE 4 * sizeof (mp_limb_t)
+#define IDX_OFFSET (LOOP_ADVANCE)
+ register ssize_t idx = 0 - IDX_OFFSET;
+
+ /*
+ * branch-on-count implicitly hint to the branch prediction as taken, while
+ * compare-and-branch hints as not taken. currently, using branch-on-count
+ * has a performance advantage, but it is not clear that it is generally the
+ * better choice (e.g., branch-on-count requires decrementing the separate
+ * counter). so, allow switching the loop condition to enable either
+ * category of branch instructions:
+ * - idx is less than an upper bound, for compare-and-branch
+ * - iteration counter greater than zero, for branch-on-count
+ */
+#define BRCTG
+#ifdef BRCTG
+ ssize_t iterations = (size_t)n / 4;
+#else
+ ssize_t const idx_bound = n * sizeof (mp_limb_t) - IDX_OFFSET;
+#endif
+
+ /* products will be transferred into VRs before adding up.
+ * see main loop below for comments on accumulation scheme. */
+ vec_t product0, product1, product2;
+
+ product0.dw = vec_splat_u64 (0);
+
+ switch ((size_t)n % 4)
+ {
+ case 0:
+ break;
+
+ case 1:
+ idx = 1 * sizeof (mp_limb_t) - IDX_OFFSET;
+
+ p3_low = s1p[0];
+ s390_umul_ppmm (p3_high, p3_low, s2limb);
+
+#ifdef ADD
+ rp0_addend.dw[1] = rp[0];
+ product0.dw[1] = p3_low;
+
+ sum0.sw = vec_add_u128 (product0.sw, rp0_addend.sw);
+ carry_vec1.dw = vec_permi (sum0.dw, sum0.dw, 0);
+
+ rp[0] = sum0.dw[1];
+#else
+ rp[0] = p3_low;
+#endif
+
+ carry_limb = p3_high;
+ break;
+
+ case 2:
+ p0_low = s1p[0];
+ p3_low = s1p[1];
+ idx = 2 * sizeof (mp_limb_t) - IDX_OFFSET;
+
+ s390_double_umul_ppmm (p0_high, p0_low, p3_high, p3_low, s2limb);
+
+ carry_prod.dw[0] = p3_low;
+
+ product0.dw = vec_load_2di_as_pair (p0_high, p0_low);
+
+ carry_limb = p3_high;
+
+#ifdef ADD
+ rp0_addend = vec_load_elements_reversed (rp, 0);
+ sum0.sw = vec_add_u128 (carry_prod.sw, rp0_addend.sw);
+ carry_vec0.sw = vec_addc_u128 (carry_prod.sw, rp0_addend.sw);
+
+ sum1.sw = vec_add_u128 (sum0.sw, product0.sw);
+ carry_vec1.sw = vec_addc_u128 (sum0.sw, product0.sw);
+#else
+ sum1.sw = vec_add_u128 (carry_prod.sw, product0.sw);
+ carry_vec0.sw = vec_addc_u128 (carry_prod.sw, product0.sw);
+#endif
+
+ vec_store_elements_reversed (rp, 0, sum1);
+
+ break;
+
+ case 3:
+ idx = 3 * sizeof (mp_limb_t) - IDX_OFFSET;
+
+ p0_low = s1p[0];
+ s390_umul_ppmm (p0_high, p0_low, s2limb);
+
+#ifdef ADD
+ rp0_addend.dw[1] = rp[0];
+ product0.dw[1] = p0_low;
+
+ sum0.sw = vec_add_u128 (product0.sw, rp0_addend.sw);
+ carry_vec1.dw = vec_permi (sum0.dw, sum0.dw, 0);
+
+ rp[0] = sum0.dw[1];
+#else
+ rp[0] = p0_low;
+#endif
+ carry_limb = p0_high;
+
+ p1_low = s1p[1];
+ p3_low = s1p[2];
+
+ s390_double_umul_ppmm (p1_high, p1_low, p3_high, p3_low, s2limb);
+
+ carry_prod.dw = vec_load_2di_as_pair (p3_low, carry_limb);
+ product1.dw = vec_load_2di_as_pair (p1_high, p1_low);
+ carry_limb = p3_high;
+
+#ifdef ADD
+ rp0_addend = vec_load_elements_reversed (rp, 8);
+ sum0.sw = vec_add_u128 (carry_prod.sw, rp0_addend.sw);
+ carry_vec0.sw = vec_addc_u128 (carry_prod.sw, rp0_addend.sw);
+
+ sum1.sw = vec_adde_u128 (sum0.sw, product1.sw, carry_vec1.sw);
+ carry_vec1.sw = vec_addec_u128 (sum0.sw, product1.sw, carry_vec1.sw);
+#else
+ sum1.sw = vec_adde_u128 (carry_prod.sw, product1.sw, carry_vec0.sw);
+ carry_vec0.sw
+ = vec_addec_u128 (carry_prod.sw, product1.sw, carry_vec0.sw);
+#endif
+ vec_store_elements_reversed (rp, 8, sum1);
+ break;
+ }
+
+#ifdef BRCTG
+ for (; iterations > 0; iterations--)
+ {
+#else
+ while (idx < idx_bound)
+ {
+#endif
+ vec_t overlap_addend0;
+ vec_t overlap_addend1;
+
+ /* The 64x64->128 MLGR multiplies two factors in GPRs and stores the
+ * result in a GPR pair. One of the factors is taken from the GPR pair
+ * and overwritten.
+ * To reuse factors, it turned out cheaper to load limbs multiple times
+ * than copying GPR contents. Enforce that and the use of addressing by
+ * base + index gpr + immediate displacement via inline asm.
+ */
+ ASM_LOADGPR (p0_low, s1p, idx, 0 + IDX_OFFSET);
+ ASM_LOADGPR (p1_low, s1p, idx, 8 + IDX_OFFSET);
+ ASM_LOADGPR (p2_low, s1p, idx, 16 + IDX_OFFSET);
+ ASM_LOADGPR (p3_low, s1p, idx, 24 + IDX_OFFSET);
+
+ /*
+ * accumulate products as follows (for addmul):
+ * | rp[i+3] | rp[i+2] | rp[i+1] | rp[i] |
+ * p0_high | p0_low |
+ * p1_high | p1_low | carry-limb in
+ * p2_high | p2_low |
+ * c-limb out <- p3_high | p3_low |
+ * | < 128-bit VR > < 128-bit VR >
+ *
+ * < rp1_addend > < rp0_addend >
+ * carry-chain 0 <- + <- + <- carry_vec0[127]
+ * < product1 > < product0 >
+ * carry-chain 1 <- + <- + <- carry_vec1[127]
+ * < overlap_addend1 > < overlap_addend0 >
+ *
+ * note that a 128-bit add with carry in + out is built from two insns
+ * - vec_adde_u128 (vacq) provides sum
+ * - vec_addec_u128 (vacccq) provides the new carry bit
+ */
+
+ s390_double_umul_ppmm (p0_high, p0_low, p1_high, p1_low, s2limb);
+
+ /*
+ * "barrier" to enforce scheduling loads for all limbs and first round
+ * of MLGR before anything else.
+ */
+ asm volatile("");
+
+ product0.dw = vec_load_2di_as_pair (p0_high, p0_low);
+
+#ifdef ADD
+ rp0_addend = vec_load_elements_reversed_idx (rp, idx, 0 + IDX_OFFSET);
+ rp1_addend = vec_load_elements_reversed_idx (rp, idx, 16 + IDX_OFFSET);
+#endif
+ /* increment loop index to unblock dependant loads of limbs for the next
+ * iteration (see above at #define LOOP_ADVANCE) */
+ idx += LOOP_ADVANCE;
+
+ s390_double_umul_ppmm (p2_high, p2_low, p3_high, p3_low, s2limb);
+
+ overlap_addend0.dw = vec_load_2di_as_pair (p1_low, carry_limb);
+ asm volatile("");
+
+#ifdef ADD
+ sum0.sw = vec_adde_u128 (product0.sw, rp0_addend.sw, carry_vec0.sw);
+ sum1.sw = vec_adde_u128 (sum0.sw, overlap_addend0.sw, carry_vec1.sw);
+
+ carry_vec0.sw
+ = vec_addec_u128 (product0.sw, rp0_addend.sw, carry_vec0.sw);
+ carry_vec1.sw
+ = vec_addec_u128 (sum0.sw, overlap_addend0.sw, carry_vec1.sw);
+#else
+ sum1.sw = vec_adde_u128 (product0.sw, overlap_addend0.sw, carry_vec0.sw);
+ carry_vec0.sw
+ = vec_addec_u128 (product0.sw, overlap_addend0.sw, carry_vec0.sw);
+#endif
+
+ asm volatile("");
+ product2.dw = vec_load_2di_as_pair (p2_high, p2_low);
+ overlap_addend1.dw = vec_load_2di_as_pair (p3_low, p1_high);
+
+ vec_t sum4;
+
+#ifdef ADD
+ vec_t sum3;
+ sum3.sw = vec_adde_u128 (product2.sw, rp1_addend.sw, carry_vec0.sw);
+ sum4.sw = vec_adde_u128 (sum3.sw, overlap_addend1.sw, carry_vec1.sw);
+
+ carry_vec0.sw
+ = vec_addec_u128 (product2.sw, rp1_addend.sw, carry_vec0.sw);
+ carry_vec1.sw
+ = vec_addec_u128 (sum3.sw, overlap_addend1.sw, carry_vec1.sw);
+#else
+ sum4.sw = vec_adde_u128 (product2.sw, overlap_addend1.sw, carry_vec0.sw);
+ carry_vec0.sw
+ = vec_addec_u128 (product2.sw, overlap_addend1.sw, carry_vec0.sw);
+#endif
+ vec_store_elements_reversed_idx (rp, idx, IDX_OFFSET - LOOP_ADVANCE,
+ sum1);
+ vec_store_elements_reversed_idx (rp, idx, 16 + IDX_OFFSET - LOOP_ADVANCE,
+ sum4);
+
+ carry_limb = p3_high;
+ }
+
+#ifdef ADD
+ carry_vec0.dw += carry_vec1.dw;
+ carry_limb += carry_vec0.dw[1];
+#else
+ carry_limb += carry_vec0.dw[1];
+#endif
+
+ return carry_limb;
+}
+
+#undef OPERATION_addmul_1
+#undef OPERATION_mul_1
+#undef FUNCNAME
+#undef ADD
diff --git a/vendor/gmp-6.3.0/mpn/s390_64/z13/aormul_2.c b/vendor/gmp-6.3.0/mpn/s390_64/z13/aormul_2.c
new file mode 100644
index 0000000..9a69fc3
--- /dev/null
+++ b/vendor/gmp-6.3.0/mpn/s390_64/z13/aormul_2.c
@@ -0,0 +1,476 @@
+/* Addmul_2 / mul_2 for IBM z13 or later
+
+Copyright 2021 Free Software Foundation, Inc.
+
+This file is part of the GNU MP Library.
+
+The GNU MP Library is free software; you can redistribute it and/or modify
+it under the terms of either:
+
+ * the GNU Lesser General Public License as published by the Free
+ Software Foundation; either version 3 of the License, or (at your
+ option) any later version.
+
+or
+
+ * the GNU General Public License as published by the Free Software
+ Foundation; either version 2 of the License, or (at your option) any
+ later version.
+
+or both in parallel, as here.
+
+The GNU MP Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received copies of the GNU General Public License and the
+GNU Lesser General Public License along with the GNU MP Library. If not,
+see https://www.gnu.org/licenses/. */
+
+#include "gmp-impl.h"
+
+#include "s390_64/z13/common-vec.h"
+
+#undef FUNCNAME
+
+#ifdef DO_INLINE
+# ifdef OPERATION_addmul_2
+# define ADD
+# define FUNCNAME inline_addmul_2
+# elif defined(OPERATION_mul_2)
+# define FUNCNAME inline_mul_2
+# else
+# error Missing define for operation to perform
+# endif
+#else
+# ifdef OPERATION_addmul_2
+# define ADD
+# define FUNCNAME mpn_addmul_2
+# elif defined(OPERATION_mul_2)
+# define FUNCNAME mpn_mul_2
+# else
+# error Missing define for operation to perform
+# endif
+#endif
+
+#ifdef DO_INLINE
+static inline mp_limb_t
+FUNCNAME (mp_limb_t *rp, const mp_limb_t *up, mp_size_t n, const mp_limb_t *vp)
+ __attribute__ ((always_inline));
+
+static inline
+#endif
+mp_limb_t
+FUNCNAME (mp_limb_t *rp, const mp_limb_t *up, mp_size_t n,
+ const mp_limb_t *vp)
+{
+
+ /* Combine 64x64 multiplication into GPR pairs (MLGR) with 128-bit adds in
+ VRs (using each VR as a single 128-bit accumulator).
+ The inner loop is unrolled to four limbs, with two blocks of four
+ multiplications each. Since the MLGR operation operates on even/odd GPR
+ pairs, pin the products appropriately. */
+
+ register mp_limb_t p0_high asm("r0");
+ register mp_limb_t p0_low asm("r1");
+
+ register mp_limb_t p1_high asm("r8");
+ register mp_limb_t p1_low asm("r9");
+
+ register mp_limb_t p2_high asm("r6");
+ register mp_limb_t p2_low asm("r7");
+
+ register mp_limb_t p3_high asm("r10");
+ register mp_limb_t p3_low asm("r11");
+
+ vec_t carry_prod = { .dw = vec_splat_u64 (0) };
+ vec_t zero = { .dw = vec_splat_u64 (0) };
+
+ /* two carry-bits for the 128-bit VR adds - stored in VRs */
+#ifdef ADD
+ vec_t carry_vec0 = { .dw = vec_splat_u64 (0) };
+#endif
+ vec_t carry_vec1 = { .dw = vec_splat_u64 (0) };
+
+ vec_t tmp;
+
+ vec_t sum0, sum1;
+
+ /* products transferred into VRs for accumulating there */
+ vec_t pv0, pv3;
+ vec_t pv1_low, pv1_high, pv2_low, pv2_high;
+ vec_t low, middle, high;
+#ifdef ADD
+ vec_t rp0, rp1;
+#endif
+
+ register mp_limb_t v0 asm("r12");
+ register mp_limb_t v1 asm("r5");
+ v0 = vp[0];
+ v1 = vp[1];
+
+ /* The scalar multiplications compete with pointer and index increments for
+ * issue ports. Thus, increment the loop index in the middle of the loop so
+ * that the operations for the next iteration's multiplications can be
+ * loaded in time (looks horrible, yet helps performance) and make sure we
+ * use addressing with base reg + index reg + immediate displacement
+ * (so that only the single index needs incrementing, instead of multiple
+ * pointers). */
+#undef LOOP_ADVANCE
+#define LOOP_ADVANCE (4 * sizeof (mp_limb_t))
+#define IDX_OFFSET (LOOP_ADVANCE)
+
+ register ssize_t idx = 0 - IDX_OFFSET;
+#ifdef BRCTG
+ ssize_t iterations = (size_t)n / 4;
+#else
+ ssize_t const idx_bound = n * sizeof (mp_limb_t) - IDX_OFFSET;
+#endif
+
+ /*
+ * To minimize latency in the carry chain, accumulate in VRs with 128-bit
+ * adds with carry in and out. As a downside, these require two insns for
+ * each add - one to calculate the sum, one to deliver the carry out.
+ * To reduce the overall number of insns to execute, combine adding up
+ * product limbs such that there cannot be a carry out and one (for mul) or
+ * two (for addmul) adds with carry chains.
+ *
+ * Since (2^64-1) * (2^64-1) = (2^128-1) - 2 * (2^64-1), we can add two
+ * limbs into each 128-bit product without causing carry out.
+ *
+ * For each block of 2 limbs * 2 limbs
+ *
+ * | u[i] * v[0] (p2) |
+ * | u[i] * v[1] (p0) |
+ * | u[i+1] * v[0](p1) |
+ * | u[i+1] * v[1](p3) |
+ * < 128 bits > < 128 bits >
+ *
+ * we can begin accumulating with "simple" carry-oblivious 128-bit adds:
+ * - p0 + low limb of p1
+ * + high limb of p2
+ * and combine resulting low limb with p2's low limb
+ * - p3 + high limb of p1
+ * + high limb of sum above
+ * ... which will will result in two 128-bit limbs to be fed into the carry
+ * chain(s).
+ * Overall, that scheme saves instructions and improves performance, despite
+ * slightly increasing latency between multiplications and carry chain (yet
+ * not in the carry chain).
+ */
+
+#define LOAD_LOW_LIMB(VEC, LIMB) \
+ do \
+ { \
+ asm("vzero\t%[vec]\n\t" \
+ "vlvgg\t%[vec],%[limb],1" \
+ : [vec] "=v"(VEC) \
+ : [limb] "r"(LIMB)); \
+ } \
+ while (0)
+
+ /* for the 128-bit adds in the carry chain, to calculate a + b + carry-in we
+ * need paired vec_adde_u128 (delivers sum) and vec_addec_u128 (delivers new
+ * carry) */
+#define ADD_UP2_CARRY_INOUT(SUMIDX, CARRYIDX, ADDEND1, ADDEND2) \
+ do \
+ { \
+ sum##SUMIDX.sw \
+ = vec_adde_u128 (ADDEND1.sw, ADDEND2.sw, carry_vec##CARRYIDX.sw); \
+ carry_vec##CARRYIDX.sw \
+ = vec_addec_u128 (ADDEND1.sw, ADDEND2.sw, carry_vec##CARRYIDX.sw); \
+ } \
+ while (0)
+
+#define ADD_UP_CARRY_INOUT(SUMIDX, ADDEND1, ADDEND2) \
+ ADD_UP2_CARRY_INOUT (SUMIDX, SUMIDX, ADDEND1, ADDEND2)
+
+ /* variant without carry-in for prologue */
+#define ADD_UP2_CARRY_OUT(SUMIDX, CARRYIDX, ADDEND1, ADDEND2) \
+ do \
+ { \
+ sum##SUMIDX.sw = vec_add_u128 (ADDEND1.sw, ADDEND2.sw); \
+ carry_vec##CARRYIDX.sw = vec_addc_u128 (ADDEND1.sw, ADDEND2.sw); \
+ } \
+ while (0)
+
+#define ADD_UP_CARRY_OUT(SUMIDX, ADDEND1, ADDEND2) \
+ ADD_UP2_CARRY_OUT (SUMIDX, SUMIDX, ADDEND1, ADDEND2)
+
+ /* prologue for 4x-unrolled main loop */
+ switch ((size_t)n % 4)
+ {
+ case 1:
+ ASM_LOADGPR_BASE (p0_low, up, 0);
+ ASM_LOADGPR_BASE (p1_low, up, 0);
+ s390_double_umul_ppmm_distinct (p0_high, p0_low, p1_high, p1_low, v0, v1);
+ carry_prod.dw = vec_load_2di_as_pair (p1_high, p1_low);
+
+/* gcc tries to be too clever and vlr from a reg that is already zero. vzero is
+ * cheaper. */
+# define NEW_CARRY(VEC, LIMB) \
+ do \
+ { \
+ asm("vzero\t%[vec]\n\t" \
+ "vlvgg\t%[vec],%[limb],1" \
+ : [vec] "=v"(VEC) \
+ : [limb] "r"(LIMB)); \
+ } \
+ while (0)
+
+ NEW_CARRY (tmp, p0_high);
+
+ carry_prod.sw = vec_add_u128 (carry_prod.sw, tmp.sw);
+#ifdef ADD
+ carry_vec1.dw[1] = __builtin_add_overflow (rp[0], p0_low, rp);
+#else
+ rp[0] = p0_low;
+#endif
+ idx += sizeof (mp_limb_t);
+ break;
+
+ case 2:
+ ASM_LOADGPR_BASE (p0_low, up, 0);
+ ASM_LOADGPR_BASE (p1_low, up, 8);
+ ASM_LOADGPR_BASE (p2_low, up, 0);
+ ASM_LOADGPR_BASE (p3_low, up, 8);
+
+ asm(""
+ : "=r"(p0_low), "=r"(p2_low)
+ : "r"(p3_low), "0"(p0_low), "r"(p1_low), "1"(p2_low));
+ s390_double_umul_ppmm_distinct (p0_high, p0_low, p1_high, p1_low, v1, v0);
+ s390_double_umul_ppmm_distinct (p2_high, p2_low, p3_high, p3_low, v0, v1);
+
+ pv0.dw = vec_load_2di_as_pair (p0_high, p0_low);
+ LOAD_LOW_LIMB (pv1_low, p1_low);
+ LOAD_LOW_LIMB (pv1_high, p1_high);
+ pv0.sw = vec_add_u128 (pv0.sw, pv1_low.sw);
+ LOAD_LOW_LIMB (pv2_high, p2_high);
+ pv3.dw = vec_load_2di_as_pair (p3_high, p3_low);
+ LOAD_LOW_LIMB (pv2_low, p2_low);
+ pv3.sw = vec_add_u128 (pv3.sw, pv1_high.sw);
+ middle.sw = vec_add_u128 (pv0.sw, pv2_high.sw);
+ low.dw = vec_permi (middle.dw, pv2_low.dw, 3);
+ middle.dw = vec_permi (zero.dw, middle.dw, 0);
+ high.sw = vec_add_u128 (middle.sw, pv3.sw);
+#ifdef ADD
+ rp0 = vec_load_elements_reversed (rp, 0);
+ ADD_UP_CARRY_OUT (0, rp0, carry_prod);
+#else
+ sum0 = carry_prod;
+#endif
+ ADD_UP_CARRY_OUT (1, sum0, low);
+ vec_store_elements_reversed (rp, 0, sum1);
+ carry_prod = high;
+
+ idx += 2 * sizeof (mp_limb_t);
+ break;
+
+ case 3:
+ ASM_LOADGPR_BASE (p0_low, up, 0);
+ ASM_LOADGPR_BASE (p1_low, up, 0);
+ s390_double_umul_ppmm_distinct (p0_high, p0_low, p1_high, p1_low, v0, v1);
+ carry_prod.dw = vec_load_2di_as_pair (p1_high, p1_low);
+ NEW_CARRY (tmp, p0_high);
+ carry_prod.sw = vec_add_u128 (carry_prod.sw, tmp.sw);
+
+#ifdef ADD
+ carry_vec1.dw[1] = __builtin_add_overflow (rp[0], p0_low, rp);
+#else
+ rp[0] = p0_low;
+#endif
+
+ ASM_LOADGPR_BASE (p0_low, up, 8);
+ ASM_LOADGPR_BASE (p1_low, up, 16);
+ ASM_LOADGPR_BASE (p2_low, up, 8);
+ ASM_LOADGPR_BASE (p3_low, up, 16);
+
+ asm(""
+ : "=r"(p0_low), "=r"(p2_low)
+ : "r"(p3_low), "0"(p0_low), "r"(p1_low), "1"(p2_low));
+ s390_double_umul_ppmm_distinct (p0_high, p0_low, p1_high, p1_low, v1, v0);
+ s390_double_umul_ppmm_distinct (p2_high, p2_low, p3_high, p3_low, v0, v1);
+
+ pv0.dw = vec_load_2di_as_pair (p0_high, p0_low);
+
+ LOAD_LOW_LIMB (pv1_low, p1_low);
+ LOAD_LOW_LIMB (pv1_high, p1_high);
+
+ pv0.sw = vec_add_u128 (pv0.sw, pv1_low.sw);
+ LOAD_LOW_LIMB (pv2_high, p2_high);
+ pv3.dw = vec_load_2di_as_pair (p3_high, p3_low);
+
+ LOAD_LOW_LIMB (pv2_low, p2_low);
+
+ pv3.sw = vec_add_u128 (pv3.sw, pv1_high.sw);
+ middle.sw = vec_add_u128 (pv0.sw, pv2_high.sw);
+
+ low.dw = vec_permi (middle.dw, pv2_low.dw, 3);
+ middle.dw = vec_permi (zero.dw, middle.dw, 0);
+ high.sw = vec_add_u128 (middle.sw, pv3.sw);
+
+#ifdef ADD
+ vec_t rp0 = vec_load_elements_reversed (rp, 8);
+ ADD_UP_CARRY_OUT (0, rp0, carry_prod);
+#else
+ sum0 = carry_prod;
+#endif
+ ADD_UP_CARRY_INOUT (1, sum0, low);
+
+ vec_store_elements_reversed (rp, 8, sum1);
+
+ carry_prod = high;
+
+ idx += 3 * sizeof (mp_limb_t);
+ break;
+ }
+
+ /*
+ * branch-on-count implicitly hint to the branch prediction as taken, while
+ * compare-and-branch hints as not taken. currently, using branch-on-count
+ * has a performance advantage, but it is not clear that it is generally
+ * the better choice (e.g., branch-on-count requires decrementing the
+ * separate counter). so, allow switching the loop condition to enable
+ * either category of branch instructions:
+ * - idx is less than an upper bound, for compare-and-branch
+ * - iteration counter greater than zero, for branch-on-count
+ */
+#ifdef BRCTG
+ for (; iterations > 0; iterations--)
+ {
+#else
+ while (idx < idx_bound)
+ {
+#endif
+ /* The 64x64->128 MLGR multiplies two factors in GPRs and stores the
+ * result in a GPR pair. One of the factors is taken from the GPR pair
+ * and overwritten.
+ * To reuse factors, it turned out cheaper to load limbs multiple times
+ * than copying GPR contents. Enforce that and the use of addressing by
+ * base + index gpr + immediate displacement via inline asm.
+ */
+ ASM_LOADGPR (p0_low, up, idx, 0 + IDX_OFFSET);
+ ASM_LOADGPR (p1_low, up, idx, 8 + IDX_OFFSET);
+ ASM_LOADGPR (p2_low, up, idx, 0 + IDX_OFFSET);
+ ASM_LOADGPR (p3_low, up, idx, 8 + IDX_OFFSET);
+
+ s390_double_umul_ppmm_distinct (p0_high, p0_low, p1_high, p1_low, v1, v0);
+
+ pv0.dw = vec_load_2di_as_pair (p0_high, p0_low);
+
+ LOAD_LOW_LIMB (pv1_low, p1_low);
+ LOAD_LOW_LIMB (pv1_high, p1_high);
+
+ s390_double_umul_ppmm_distinct (p2_high, p2_low, p3_high, p3_low, v0, v1);
+
+ pv0.sw = vec_add_u128 (pv0.sw, pv1_low.sw);
+ LOAD_LOW_LIMB (pv2_high, p2_high);
+ pv3.dw = vec_load_2di_as_pair (p3_high, p3_low);
+
+ LOAD_LOW_LIMB (pv2_low, p2_low);
+
+ ASM_LOADGPR (p0_low, up, idx, 16 + IDX_OFFSET);
+ ASM_LOADGPR (p1_low, up, idx, 24 + IDX_OFFSET);
+ ASM_LOADGPR (p2_low, up, idx, 16 + IDX_OFFSET);
+ ASM_LOADGPR (p3_low, up, idx, 24 + IDX_OFFSET);
+
+ idx += LOOP_ADVANCE;
+
+ /*
+ * "barrier" to enforce scheduling the index increment before the second
+ * block of multiplications. not required for clang.
+ */
+#ifndef __clang__
+ asm(""
+ : "=r"(idx), "=r"(p0_high), "=r"(p2_high)
+ : "0"(idx), "1"(p0_high), "2"(p2_high));
+#endif
+
+ s390_double_umul_ppmm_distinct (p0_high, p0_low, p1_high, p1_low, v1, v0);
+ s390_double_umul_ppmm_distinct (p2_high, p2_low, p3_high, p3_low, v0, v1);
+
+ /*
+ * "barrier" to enforce scheduling all MLGRs first, before any adding
+ * up. note that clang produces better code without.
+ */
+#ifndef __clang__
+ asm(""
+ : "=v"(pv0.sw), "=v"(pv3.sw)
+ : "1"(pv3.sw), "0"(pv0.sw), "r"(p0_high), "r"(p2_high));
+#endif
+
+ pv3.sw = vec_add_u128 (pv3.sw, pv1_high.sw);
+ middle.sw = vec_add_u128 (pv0.sw, pv2_high.sw);
+
+ low.dw = vec_permi (middle.dw, pv2_low.dw,
+ 3); /* least-significant doubleword from both vectors */
+ middle.dw = vec_permi (zero.dw, middle.dw, 0);
+ high.sw = vec_add_u128 (middle.sw, pv3.sw);
+
+#ifdef ADD
+ rp0 = vec_load_elements_reversed_idx (rp, idx,
+ 0 + IDX_OFFSET - LOOP_ADVANCE);
+ ADD_UP_CARRY_INOUT (0, rp0, carry_prod);
+#else
+ sum0 = carry_prod;
+#endif
+ ADD_UP_CARRY_INOUT (1, sum0, low);
+
+ vec_store_elements_reversed_idx (rp, idx, 0 + IDX_OFFSET - LOOP_ADVANCE,
+ sum1);
+
+ carry_prod = high;
+
+ vec_t pv0_2, pv3_2;
+ vec_t pv1_low_2, pv1_high_2, pv2_low_2, pv2_high_2;
+ vec_t low_2, middle_2, high_2;
+ vec_t sum2, sum3;
+
+ pv0_2.dw = vec_load_2di_as_pair (p0_high, p0_low);
+ LOAD_LOW_LIMB (pv1_low_2, p1_low);
+ LOAD_LOW_LIMB (pv1_high_2, p1_high);
+
+ pv0_2.sw = vec_add_u128 (pv0_2.sw, pv1_low_2.sw);
+ LOAD_LOW_LIMB (pv2_high_2, p2_high);
+ pv3_2.dw = vec_load_2di_as_pair (p3_high, p3_low);
+ pv3_2.sw = vec_add_u128 (pv3_2.sw, pv1_high_2.sw);
+ middle_2.sw = vec_add_u128 (pv0_2.sw, pv2_high_2.sw);
+
+ LOAD_LOW_LIMB (pv2_low_2, p2_low);
+ low_2.dw
+ = vec_permi (middle_2.dw, pv2_low_2.dw,
+ 3); /* least-significant doubleword from both vectors */
+ middle_2.dw = vec_permi (zero.dw, middle_2.dw, 0);
+ high_2.sw = vec_add_u128 (middle_2.sw, pv3_2.sw);
+
+ /*
+ * another "barrier" to influence scheduling. (also helps in clang)
+ */
+ asm("" : : "v"(pv0_2.sw), "r"(p2_high), "r"(p3_high), "v"(pv3_2.sw));
+
+#ifdef ADD
+ rp1 = vec_load_elements_reversed_idx (rp, idx,
+ 16 + IDX_OFFSET - LOOP_ADVANCE);
+ ADD_UP2_CARRY_INOUT (2, 0, rp1, carry_prod);
+#else
+ sum2 = carry_prod;
+#endif
+ ADD_UP2_CARRY_INOUT (3, 1, sum2, low_2);
+
+ vec_store_elements_reversed_idx (rp, idx, 16 + IDX_OFFSET - LOOP_ADVANCE,
+ sum3);
+
+ carry_prod = high_2;
+ }
+
+#ifdef ADD
+ sum0.sw = vec_adde_u128 (carry_prod.sw, carry_vec0.sw, carry_vec1.sw);
+#else
+ sum0.sw = vec_add_u128 (carry_prod.sw, carry_vec1.sw);
+#endif
+
+ *(mp_ptr) (((char *)rp) + idx + 0 + IDX_OFFSET) = (mp_limb_t)sum0.dw[1];
+
+ return (mp_limb_t)sum0.dw[0];
+}
diff --git a/vendor/gmp-6.3.0/mpn/s390_64/z13/common-vec.h b/vendor/gmp-6.3.0/mpn/s390_64/z13/common-vec.h
new file mode 100644
index 0000000..a59e6ee
--- /dev/null
+++ b/vendor/gmp-6.3.0/mpn/s390_64/z13/common-vec.h
@@ -0,0 +1,175 @@
+/* Common vector helpers and macros for IBM z13 and later
+
+Copyright 2021 Free Software Foundation, Inc.
+
+This file is part of the GNU MP Library.
+
+The GNU MP Library is free software; you can redistribute it and/or modify
+it under the terms of either:
+
+ * the GNU Lesser General Public License as published by the Free
+ Software Foundation; either version 3 of the License, or (at your
+ option) any later version.
+
+or
+
+ * the GNU General Public License as published by the Free Software
+ Foundation; either version 2 of the License, or (at your option) any
+ later version.
+
+or both in parallel, as here.
+
+The GNU MP Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received copies of the GNU General Public License and the
+GNU Lesser General Public License along with the GNU MP Library. If not,
+see https://www.gnu.org/licenses/. */
+
+#ifndef __S390_64_Z13_COMMON_VEC_H
+#define __S390_64_Z13_COMMON_VEC_H
+
+#include <unistd.h>
+#include <vecintrin.h>
+
+/*
+ * Vector intrinsics use vector element types that kind-of make sense for the
+ * specific operation (e.g., vec_permi permutes doublewords). To use VRs
+ * interchangeably with different intrinsics, typedef the two variants and wrap
+ * them in a union.
+ */
+#define VLEN_BYTES 16
+typedef unsigned long long v2di __attribute__ ((vector_size (VLEN_BYTES)));
+typedef unsigned char v16qi __attribute__ ((vector_size (VLEN_BYTES)));
+
+/*
+ * The Z vector intrinsics use vectors with different element types (e.g.,
+ * v16qi for the 128-bit adds and v2di for vec_permi).
+ */
+union vec
+{
+ v2di dw;
+ v16qi sw;
+};
+
+typedef union vec vec_t;
+
+/*
+ * single-instruction combine of two GPRs into a VR
+ */
+static inline v2di
+vec_load_2di_as_pair (unsigned long a, unsigned long b)
+{
+ v2di res;
+ __asm__("vlvgp\t%0,%1,%2" : "=v"(res) : "r"(a), "r"(b));
+ return res;
+}
+
+/*
+ * 64x64 mult where caller needs to care about proper register allocation:
+ * multiply xl with m1, treating both as unsigned, and place the result in
+ * xh:xl.
+ * mlgr operates on register pairs, so xh must be an even gpr followed by xl
+ */
+#define s390_umul_ppmm(xh, xl, m1) \
+ do \
+ { \
+ asm("mlgr\t%0,%3" : "=r"(xh), "=r"(xl) : "%1"(xl), "r"(m1)); \
+ } \
+ while (0);
+
+/*
+ * two 64x64 multiplications, scheduled so that they will dispatch and issue to
+ * different sides: each mlgr is dispatched alone in an instruction group and
+ * subsequent groups will issue on different execution sides.
+ * there is a variant where both products use the same multiplicand and one
+ * that uses two different multiplicands. constraints from s390_umul_ppmm apply
+ * here.
+ */
+#define s390_double_umul_ppmm(X0H, X0L, X1H, X1L, MX) \
+ do \
+ { \
+ asm("mlgr\t%[x0h],%[mx]\n\t" \
+ "mlgr\t%[x1h],%[mx]" \
+ : [x0h] "=&r"(X0H), [x0l] "=&r"(X0L), [x1h] "=r"(X1H), \
+ [x1l] "=r"(X1L) \
+ : "[x0l]"(X0L), "[x1l]"(X1L), [mx] "r"(MX)); \
+ } \
+ while (0);
+
+#define s390_double_umul_ppmm_distinct(X0H, X0L, X1H, X1L, MX0, MX1) \
+ do \
+ { \
+ asm("mlgr\t%[x0h],%[mx0]\n\t" \
+ "mlgr\t%[x1h],%[mx1]" \
+ : [x0h] "=&r"(X0H), [x0l] "=&r"(X0L), [x1h] "=r"(X1H), \
+ [x1l] "=r"(X1L) \
+ : "[x0l]"(X0L), "[x1l]"(X1L), [mx0] "r"(MX0), [mx1] "r"(MX1)); \
+ } \
+ while (0);
+
+#define ASM_LOADGPR_BASE(DST, BASE, OFFSET) \
+ asm volatile("lg\t%[r],%[off](%[b])" \
+ : [r] "=r"(DST) \
+ : [b] "a"(BASE), [off] "L"(OFFSET) \
+ : "memory");
+
+#define ASM_LOADGPR(DST, BASE, INDEX, OFFSET) \
+ asm volatile("lg\t%[r],%[off](%[b],%[x])" \
+ : [r] "=r"(DST) \
+ : [b] "a"(BASE), [x] "a"(INDEX), [off] "L"(OFFSET) \
+ : "memory");
+
+/*
+ * Load a vector register from memory and swap the two 64-bit doubleword
+ * elements.
+ */
+static inline vec_t
+vec_load_elements_reversed_idx (mp_limb_t const *base, ssize_t const index,
+ ssize_t const offset)
+{
+ vec_t res;
+ char *ptr = (char *)base;
+
+ res.sw = *(v16qi *)(ptr + index + offset);
+ res.dw = vec_permi (res.dw, res.dw, 2);
+
+ return res;
+}
+
+static inline vec_t
+vec_load_elements_reversed (mp_limb_t const *base, ssize_t const offset)
+{
+ return vec_load_elements_reversed_idx (base, 0, offset);
+}
+
+/*
+ * Store a vector register to memory and swap the two 64-bit doubleword
+ * elements.
+ */
+static inline void
+vec_store_elements_reversed_idx (mp_limb_t *base, ssize_t const index,
+ ssize_t const offset, vec_t vec)
+{
+ char *ptr = (char *)base;
+
+ vec.dw = vec_permi (vec.dw, vec.dw, 2);
+ *(v16qi *)(ptr + index + offset) = vec.sw;
+}
+
+static inline void
+vec_store_elements_reversed (mp_limb_t *base, ssize_t const offset, vec_t vec)
+{
+ vec_store_elements_reversed_idx (base, 0, offset, vec);
+}
+
+#define ASM_VZERO(VEC) \
+ do \
+ { \
+ asm("vzero\t%[vec]" : [vec] "=v"(VEC)); \
+ } \
+ while (0)
+
+#endif
diff --git a/vendor/gmp-6.3.0/mpn/s390_64/z13/gmp-mparam.h b/vendor/gmp-6.3.0/mpn/s390_64/z13/gmp-mparam.h
new file mode 100644
index 0000000..50e7f39
--- /dev/null
+++ b/vendor/gmp-6.3.0/mpn/s390_64/z13/gmp-mparam.h
@@ -0,0 +1,162 @@
+/* S/390-64 for IBM z13 gmp-mparam.h -- Compiler/machine parameter header file.
+
+Copyright 2021 Free Software Foundation, Inc.
+
+This file is part of the GNU MP Library.
+
+The GNU MP Library is free software; you can redistribute it and/or modify
+it under the terms of either:
+
+ * the GNU Lesser General Public License as published by the Free
+ Software Foundation; either version 3 of the License, or (at your
+ option) any later version.
+
+or
+
+ * the GNU General Public License as published by the Free Software
+ Foundation; either version 2 of the License, or (at your option) any
+ later version.
+
+or both in parallel, as here.
+
+The GNU MP Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received copies of the GNU General Public License and the
+GNU Lesser General Public License along with the GNU MP Library. If not,
+see https://www.gnu.org/licenses/. */
+
+#define GMP_LIMB_BITS 64
+#define GMP_LIMB_BYTES 8
+
+#define HAVE_NATIVE_mpn_addmul_2 1
+#define HAVE_NATIVE_mpn_mul_2 1
+
+/* Generated by tuneup.c, 2021-07-30, gcc 10.2 */
+
+#define DIVREM_1_NORM_THRESHOLD MP_SIZE_T_MAX /* never */
+#define DIVREM_1_UNNORM_THRESHOLD MP_SIZE_T_MAX /* never */
+#define MOD_1_1P_METHOD 2
+#define MOD_1_NORM_THRESHOLD MP_SIZE_T_MAX /* never */
+#define MOD_1_UNNORM_THRESHOLD MP_SIZE_T_MAX /* never */
+#define MOD_1N_TO_MOD_1_1_THRESHOLD 17
+#define MOD_1U_TO_MOD_1_1_THRESHOLD 15
+#define MOD_1_1_TO_MOD_1_2_THRESHOLD 0 /* never mpn_mod_1_1p */
+#define MOD_1_2_TO_MOD_1_4_THRESHOLD 0 /* never mpn_mod_1s_2p */
+#define PREINV_MOD_1_TO_MOD_1_THRESHOLD 5
+#define USE_PREINV_DIVREM_1 0
+#define DIV_QR_1N_PI1_METHOD 3
+#define DIV_QR_1_NORM_THRESHOLD MP_SIZE_T_MAX /* never */
+#define DIV_QR_1_UNNORM_THRESHOLD MP_SIZE_T_MAX /* never */
+#define DIV_QR_2_PI2_THRESHOLD 996
+#define DIVEXACT_1_THRESHOLD 4
+#define BMOD_1_TO_MOD_1_THRESHOLD 0 /* always */
+
+#define DIV_1_VS_MUL_1_PERCENT 404
+
+#define MUL_TOOM22_THRESHOLD 23
+#define MUL_TOOM33_THRESHOLD 94
+#define MUL_TOOM44_THRESHOLD 166
+#define MUL_TOOM6H_THRESHOLD 286
+#define MUL_TOOM8H_THRESHOLD 626
+
+#define MUL_TOOM32_TO_TOOM43_THRESHOLD 113
+#define MUL_TOOM32_TO_TOOM53_THRESHOLD 138
+#define MUL_TOOM42_TO_TOOM53_THRESHOLD 143
+#define MUL_TOOM42_TO_TOOM63_THRESHOLD 145
+#define MUL_TOOM43_TO_TOOM54_THRESHOLD 130
+
+#define SQR_BASECASE_THRESHOLD 0 /* always (native) */
+#define SQR_TOOM2_THRESHOLD 12
+#define SQR_TOOM3_THRESHOLD 84
+#define SQR_TOOM4_THRESHOLD 234
+#define SQR_TOOM6_THRESHOLD 318
+#define SQR_TOOM8_THRESHOLD 478
+
+#define MULMID_TOOM42_THRESHOLD 42
+
+#define MULMOD_BNM1_THRESHOLD 13
+#define SQRMOD_BNM1_THRESHOLD 7
+
+#define MUL_FFT_MODF_THRESHOLD 332 /* k = 5 */
+#define MUL_FFT_TABLE3 \
+ { { 332, 5}, { 19, 6}, { 10, 5}, { 21, 6}, \
+ { 21, 7}, { 21, 8}, { 11, 7}, { 24, 8}, \
+ { 13, 7}, { 27, 8}, { 15, 7}, { 31, 8}, \
+ { 17, 7}, { 35, 8}, { 19, 7}, { 39, 8}, \
+ { 21, 9}, { 11, 8}, { 27, 9}, { 15, 8}, \
+ { 35, 9}, { 19, 8}, { 41, 9}, { 23, 8}, \
+ { 47, 9}, { 27,10}, { 15, 9}, { 39,10}, \
+ { 23, 9}, { 47,11}, { 15,10}, { 31, 9}, \
+ { 67,10}, { 47,11}, { 2048,12}, { 4096,13}, \
+ { 8192,14}, { 16384,15}, { 32768,16}, { 65536,17}, \
+ { 131072,18}, { 262144,19}, { 524288,20}, {1048576,21}, \
+ {2097152,22}, {4194304,23}, {8388608,24} }
+#define MUL_FFT_TABLE3_SIZE 47
+#define MUL_FFT_THRESHOLD 2752
+
+#define SQR_FFT_MODF_THRESHOLD 240 /* k = 5 */
+#define SQR_FFT_TABLE3 \
+ { { 240, 5}, { 8, 4}, { 17, 5}, { 13, 6}, \
+ { 7, 5}, { 15, 6}, { 8, 5}, { 17, 6}, \
+ { 9, 5}, { 19, 6}, { 15, 7}, { 8, 6}, \
+ { 17, 7}, { 9, 6}, { 19, 7}, { 10, 6}, \
+ { 21, 7}, { 17, 8}, { 9, 7}, { 20, 8}, \
+ { 11, 7}, { 23, 8}, { 13, 9}, { 7, 8}, \
+ { 21, 9}, { 11, 8}, { 23, 9}, { 15, 8}, \
+ { 31, 9}, { 19, 8}, { 39, 9}, { 23,10}, \
+ { 15, 9}, { 39,10}, { 23,11}, { 15,10}, \
+ { 31, 9}, { 63,10}, { 47,11}, { 2048,12}, \
+ { 4096,13}, { 8192,14}, { 16384,15}, { 32768,16}, \
+ { 65536,17}, { 131072,18}, { 262144,19}, { 524288,20}, \
+ {1048576,21}, {2097152,22}, {4194304,23}, {8388608,24} }
+#define SQR_FFT_TABLE3_SIZE 52
+#define SQR_FFT_THRESHOLD 1856
+
+#define MULLO_BASECASE_THRESHOLD 0 /* always */
+#define MULLO_DC_THRESHOLD 25
+#define MULLO_MUL_N_THRESHOLD 5397
+#define SQRLO_BASECASE_THRESHOLD 0 /* always */
+#define SQRLO_DC_THRESHOLD 396
+#define SQRLO_SQR_THRESHOLD 3704
+
+#define DC_DIV_QR_THRESHOLD 15
+#define DC_DIVAPPR_Q_THRESHOLD 50
+#define DC_BDIV_QR_THRESHOLD 66
+#define DC_BDIV_Q_THRESHOLD 202
+
+#define INV_MULMOD_BNM1_THRESHOLD 46
+#define INV_NEWTON_THRESHOLD 29
+#define INV_APPR_THRESHOLD 13
+
+#define BINV_NEWTON_THRESHOLD 312
+#define REDC_1_TO_REDC_2_THRESHOLD 79
+#define REDC_2_TO_REDC_N_THRESHOLD 0 /* always */
+
+#define MU_DIV_QR_THRESHOLD 979
+#define MU_DIVAPPR_Q_THRESHOLD 979
+#define MUPI_DIV_QR_THRESHOLD 13
+#define MU_BDIV_QR_THRESHOLD 942
+#define MU_BDIV_Q_THRESHOLD 1367
+
+#define POWM_SEC_TABLE 3,19,215,1730
+
+#define GET_STR_DC_THRESHOLD 10
+#define GET_STR_PRECOMPUTE_THRESHOLD 15
+#define SET_STR_DC_THRESHOLD 882
+#define SET_STR_PRECOMPUTE_THRESHOLD 2520
+
+#define FAC_DSC_THRESHOLD 228
+#define FAC_ODD_THRESHOLD 24
+
+#define MATRIX22_STRASSEN_THRESHOLD 19
+#define HGCD2_DIV1_METHOD 1
+#define HGCD_THRESHOLD 61
+#define HGCD_APPR_THRESHOLD 51
+#define HGCD_REDUCE_THRESHOLD 1962
+#define GCD_DC_THRESHOLD 217
+#define GCDEXT_DC_THRESHOLD 263
+#define JACOBI_BASE_METHOD 4
+
diff --git a/vendor/gmp-6.3.0/mpn/s390_64/z13/hamdist.asm b/vendor/gmp-6.3.0/mpn/s390_64/z13/hamdist.asm
new file mode 100644
index 0000000..81c5174
--- /dev/null
+++ b/vendor/gmp-6.3.0/mpn/s390_64/z13/hamdist.asm
@@ -0,0 +1,76 @@
+dnl S/390-64 mpn_hamdist
+
+dnl Copyright 2023 Free Software Foundation, Inc.
+
+dnl This file is part of the GNU MP Library.
+dnl
+dnl The GNU MP Library is free software; you can redistribute it and/or modify
+dnl it under the terms of either:
+dnl
+dnl * the GNU Lesser General Public License as published by the Free
+dnl Software Foundation; either version 3 of the License, or (at your
+dnl option) any later version.
+dnl
+dnl or
+dnl
+dnl * the GNU General Public License as published by the Free Software
+dnl Foundation; either version 2 of the License, or (at your option) any
+dnl later version.
+dnl
+dnl or both in parallel, as here.
+dnl
+dnl The GNU MP Library is distributed in the hope that it will be useful, but
+dnl WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+dnl or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+dnl for more details.
+dnl
+dnl You should have received copies of the GNU General Public License and the
+dnl GNU Lesser General Public License along with the GNU MP Library. If not,
+dnl see https://www.gnu.org/licenses/.
+
+include(`../config.m4')
+
+C cycles/limb
+C z900 -
+C z990 -
+C z9 -
+C z10 -
+C z196 -
+C z12 ?
+C z13 ?
+C z14 ?
+C z15 ?
+
+define(`ap', `%r2')
+define(`bp', `%r3')
+define(`n', `%r4')
+
+ASM_START()
+PROLOGUE(mpn_hamdist)
+ vzero %v30
+ tmll n, 1
+ srlg n, n, 1
+ je L(top)
+
+L(odd): vllezg %v16, 0(ap)
+ vllezg %v17, 0(bp)
+ vx %v16, %v16, %v17
+ vpopct %v30, %v16, 3
+ la ap, 8(ap)
+ la bp, 8(bp)
+ clgije n, 0, L(end)
+
+L(top): vl %v16, 0(ap), 3
+ vl %v17, 0(bp), 3
+ vx %v16, %v16, %v17
+ vpopct %v20, %v16, 3
+ vag %v30, %v30, %v20
+ la ap, 16(ap)
+ la bp, 16(bp)
+ brctg n, L(top)
+
+L(end): vzero %v29
+ vsumqg %v30, %v30, %v29
+ vlgvg %r2, %v30, 1(%r0)
+ br %r14
+EPILOGUE()
diff --git a/vendor/gmp-6.3.0/mpn/s390_64/z13/mul_1.asm b/vendor/gmp-6.3.0/mpn/s390_64/z13/mul_1.asm
new file mode 100644
index 0000000..04eb718
--- /dev/null
+++ b/vendor/gmp-6.3.0/mpn/s390_64/z13/mul_1.asm
@@ -0,0 +1,149 @@
+dnl S/390-64 mpn_mul_1 and mpn_mul_1c.
+dnl Based on C code contributed by Marius Hillenbrand.
+
+dnl Copyright 2023 Free Software Foundation, Inc.
+
+dnl This file is part of the GNU MP Library.
+dnl
+dnl The GNU MP Library is free software; you can redistribute it and/or modify
+dnl it under the terms of either:
+dnl
+dnl * the GNU Lesser General Public License as published by the Free
+dnl Software Foundation; either version 3 of the License, or (at your
+dnl option) any later version.
+dnl
+dnl or
+dnl
+dnl * the GNU General Public License as published by the Free Software
+dnl Foundation; either version 2 of the License, or (at your option) any
+dnl later version.
+dnl
+dnl or both in parallel, as here.
+dnl
+dnl The GNU MP Library is distributed in the hope that it will be useful, but
+dnl WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+dnl or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+dnl for more details.
+dnl
+dnl You should have received copies of the GNU General Public License and the
+dnl GNU Lesser General Public License along with the GNU MP Library. If not,
+dnl see https://www.gnu.org/licenses/.
+
+include(`../config.m4')
+
+dnl TODO
+dnl * Schedule vlvgp away from mlgr; that saves 20% of the run time.
+dnl * Perhaps use vp[0]/vp[1] in innerloop instead preloading v0/v1.
+
+C cycles/limb
+C z900 -
+C z990 -
+C z9 -
+C z10 -
+C z196 -
+C z12 ?
+C z13 ?
+C z14 ?
+C z15 2.25
+
+
+define(`rp', `%r2')
+define(`ap', `%r3')
+define(`an', `%r4')
+define(`b0', `%r5')
+define(`cy', `%r6')
+
+define(`idx', `%r4')
+
+ASM_START()
+
+PROLOGUE(mpn_mul_1c)
+ stmg %r6, %r13, 48(%r15)
+ j L(ent)
+EPILOGUE()
+
+PROLOGUE(mpn_mul_1)
+ stmg %r6, %r13, 48(%r15)
+ lghi %r6, 0
+L(ent): vzero %v2
+ srlg %r11, an, 2
+
+ tmll an, 1
+ je L(bx0)
+L(bx1): tmll an, 2
+ jne L(b11)
+
+L(b01): lghi idx, -24
+ lg %r13, 0(ap)
+ mlgr %r12, b0
+ algr %r13, %r6
+ lghi %r6, 0
+ alcgr %r12, %r6
+ stg %r13, 0(rp)
+ cgije %r11, 0, L(1)
+ j L(cj0)
+
+L(b11): lghi idx, -8
+ lg %r9, 0(ap)
+ mlgr %r8, b0
+ algr %r9, %r6
+ lghi %r6, 0
+ alcgr %r8, %r6
+ stg %r9, 0(rp)
+ j L(cj1)
+
+L(bx0): tmll an, 2
+ jne L(b10)
+L(b00): lghi idx, -32
+ lgr %r12, %r6
+L(cj0): lg %r1, 32(idx, ap)
+ lg %r9, 40(idx, ap)
+ mlgr %r0, b0
+ mlgr %r8, b0
+ vlvgp %v6, %r0, %r1
+ vlvgp %v7, %r9, %r12
+ j L(mid)
+
+L(b10): lghi idx, -16
+ lgr %r8, %r6
+L(cj1): lg %r7, 16(idx, ap)
+ lg %r13, 24(idx, ap)
+ mlgr %r6, b0
+ mlgr %r12, b0
+ vlvgp %v6, %r6, %r7
+ vlvgp %v7, %r13, %r8
+ cgije %r11, 0, L(end)
+
+L(top): lg %r1, 32(idx, ap)
+ lg %r9, 40(idx, ap)
+ mlgr %r0, b0
+ mlgr %r8, b0
+ vacq %v3, %v6, %v7, %v2
+ vacccq %v2, %v6, %v7, %v2
+ vpdi %v3, %v3, %v3, 4
+ vst %v3, 16(idx, rp), 3
+ vlvgp %v6, %r0, %r1
+ vlvgp %v7, %r9, %r12
+L(mid): lg %r7, 48(idx, ap)
+ lg %r13, 56(idx, ap)
+ mlgr %r6, b0
+ mlgr %r12, b0
+ vacq %v1, %v6, %v7, %v2
+ vacccq %v2, %v6, %v7, %v2
+ vpdi %v1, %v1, %v1, 4
+ vst %v1, 32(idx, rp), 3
+ vlvgp %v6, %r6, %r7
+ vlvgp %v7, %r13, %r8
+ la idx, 32(idx)
+ brctg %r11, L(top)
+
+L(end): vacq %v3, %v6, %v7, %v2
+ vacccq %v2, %v6, %v7, %v2
+ vpdi %v3, %v3, %v3, 4
+ vst %v3, 16(idx, rp), 3
+
+L(1): vlgvg %r2, %v2, 1
+ agr %r2, %r12
+ lmg %r6, %r13, 48(%r15)
+ br %r14
+EPILOGUE()
diff --git a/vendor/gmp-6.3.0/mpn/s390_64/z13/mul_1.c b/vendor/gmp-6.3.0/mpn/s390_64/z13/mul_1.c
new file mode 100644
index 0000000..7584dc8
--- /dev/null
+++ b/vendor/gmp-6.3.0/mpn/s390_64/z13/mul_1.c
@@ -0,0 +1,31 @@
+/* mul_1 for IBM z13 or later
+
+Copyright 2021 Free Software Foundation, Inc.
+
+This file is part of the GNU MP Library.
+
+The GNU MP Library is free software; you can redistribute it and/or modify
+it under the terms of either:
+
+ * the GNU Lesser General Public License as published by the Free
+ Software Foundation; either version 3 of the License, or (at your
+ option) any later version.
+
+or
+
+ * the GNU General Public License as published by the Free Software
+ Foundation; either version 2 of the License, or (at your option) any
+ later version.
+
+or both in parallel, as here.
+
+The GNU MP Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received copies of the GNU General Public License and the
+GNU Lesser General Public License along with the GNU MP Library. If not,
+see https://www.gnu.org/licenses/. */
+
+#include "s390_64/z13/addmul_1.c"
diff --git a/vendor/gmp-6.3.0/mpn/s390_64/z13/mul_2.asm b/vendor/gmp-6.3.0/mpn/s390_64/z13/mul_2.asm
new file mode 100644
index 0000000..ec61201
--- /dev/null
+++ b/vendor/gmp-6.3.0/mpn/s390_64/z13/mul_2.asm
@@ -0,0 +1,121 @@
+dnl S/390-64 mpn_mul_2
+
+dnl Copyright 2023 Free Software Foundation, Inc.
+
+dnl This file is part of the GNU MP Library.
+dnl
+dnl The GNU MP Library is free software; you can redistribute it and/or modify
+dnl it under the terms of either:
+dnl
+dnl * the GNU Lesser General Public License as published by the Free
+dnl Software Foundation; either version 3 of the License, or (at your
+dnl option) any later version.
+dnl
+dnl or
+dnl
+dnl * the GNU General Public License as published by the Free Software
+dnl Foundation; either version 2 of the License, or (at your option) any
+dnl later version.
+dnl
+dnl or both in parallel, as here.
+dnl
+dnl The GNU MP Library is distributed in the hope that it will be useful, but
+dnl WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+dnl or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+dnl for more details.
+dnl
+dnl You should have received copies of the GNU General Public License and the
+dnl GNU Lesser General Public License along with the GNU MP Library. If not,
+dnl see https://www.gnu.org/licenses/.
+
+include(`../config.m4')
+
+C cycles/limb
+C z900 -
+C z990 -
+C z9 -
+C z10 ?
+C z196 ?
+C z12 ?
+C z13 ?
+C z14 ?
+C z15 2.8
+
+
+define(`rp', `%r2')
+define(`up', `%r3')
+define(`un', `%r4')
+define(`vp', `%r5')
+
+define(`idx', `%r12')
+define(`v0', `%r11')
+define(`v1', `%r5')
+
+ASM_START()
+PROLOGUE(mpn_mul_2)
+ stmg %r6, %r12, 48(%r15)
+
+ vzero %v27
+ vzero %v28
+ vzero %v29
+ vzero %v30
+ lghi %r10, 0
+ lg v0, 0(vp)
+ lg v1, 8(vp)
+ tmll un, 1
+ srlg un, un, 1
+ je L(evn)
+
+L(odd): lg %r7, 0(up)
+ mlgr %r6, v0 C W2 W1
+ lg %r1, 0(up)
+ stg %r7, 0(rp)
+ lghi idx, 8
+dnl clgije un, 0, L(end)
+ j L(top)
+
+L(evn): lghi %r6, 0
+ lghi idx, 0
+ lghi %r1, 0
+
+L(top): lg %r9, 0(idx, up)
+ mlgr %r0, v1 C W2 W1
+ mlgr %r8, v1 C W3 W2
+ vlvgp %v22, %r0, %r1 C W2 W1
+ vlvgp %v23, %r9, %r6 C W2 W1
+ lg %r1, 0(idx, up)
+ lg %r7, 8(idx, up)
+ mlgr %r0, v0 C W2 W1
+ mlgr %r6, v0 C W3 W2
+ vlvgp %v20, %r0, %r1 C W2 W1
+ vlvgp %v21, %r7, %r10 C W2 W1
+ vacq %v24, %v22, %v23, %v27 C
+ vacccq %v27, %v22, %v23, %v27 C carry critical path 1
+ vacq %v23, %v24, %v20, %v28 C
+ vacccq %v28, %v24, %v20, %v28 C carry critical path 2
+ vacq %v20, %v23, %v21, %v29 C
+ vacccq %v29, %v23, %v21, %v29 C carry critical path 3
+ vpdi %v20, %v20, %v20, 4
+ lg %r1, 8(idx, up)
+ vst %v20, 0(idx, rp), 3
+ lgr %r10, %r8
+ la idx, 16(idx)
+ brctg un, L(top)
+
+L(end): mlgr %r0, v1
+ algr %r1, %r6
+ alcgr %r0, un
+ algr %r1, %r8
+ alcgr %r0, un
+ vag %v27, %v27, %v28
+ vag %v29, %v29, %v30
+ vag %v27, %v27, %v29
+ vlgvg %r10, %v27, 1
+ algr %r1, %r10
+ stg %r1, 0(idx, rp)
+ alcgr %r0, un
+ lgr %r2, %r0
+
+ lmg %r6, %r12, 48(%r15)
+ br %r14
+EPILOGUE()
diff --git a/vendor/gmp-6.3.0/mpn/s390_64/z13/mul_basecase.asm b/vendor/gmp-6.3.0/mpn/s390_64/z13/mul_basecase.asm
new file mode 100644
index 0000000..0de1150
--- /dev/null
+++ b/vendor/gmp-6.3.0/mpn/s390_64/z13/mul_basecase.asm
@@ -0,0 +1,264 @@
+dnl S/390-64 mpn_mul_basecase.
+
+dnl Copyright 2023 Free Software Foundation, Inc.
+
+dnl This file is part of the GNU MP Library.
+dnl
+dnl The GNU MP Library is free software; you can redistribute it and/or modify
+dnl it under the terms of either:
+dnl
+dnl * the GNU Lesser General Public License as published by the Free
+dnl Software Foundation; either version 3 of the License, or (at your
+dnl option) any later version.
+dnl
+dnl or
+dnl
+dnl * the GNU General Public License as published by the Free Software
+dnl Foundation; either version 2 of the License, or (at your option) any
+dnl later version.
+dnl
+dnl or both in parallel, as here.
+dnl
+dnl The GNU MP Library is distributed in the hope that it will be useful, but
+dnl WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+dnl or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+dnl for more details.
+dnl
+dnl You should have received copies of the GNU General Public License and the
+dnl GNU Lesser General Public License along with the GNU MP Library. If not,
+dnl see https://www.gnu.org/licenses/.
+
+include(`../config.m4')
+
+
+C INPUT PARAMETERS
+define(`rp', `%r2')
+define(`ap', `%r3')
+define(`an', `%r4') C 32
+define(`bp', `%r5') C 40
+define(`bn', `%r6') C 48
+
+define(`idx', `%r14')
+define(`b0', `%r10')
+
+dnl live in addmul_1:
+dnl r0 r1 r2 r3 r4 r5 r6 r7 r8 r9 r10 r11 r12 r13 r14
+dnl xx xx rp ap an bp xx xx xx xx b0 i xx xx idx
+dnl stack: bn
+
+dnl TODO
+dnl * Have mul_1 start without initial (un mod 4) separation, instead handle
+dnl after loop. Then fall into 4 separate addmul_1 loops.
+dnl * Streamline handling of bn, an, %r11 to reduce the # if memops.
+
+define(`MUL_1',`
+pushdef(`L',
+defn(`L')$1`'_m1)
+ vzero %v2
+ srlg %r11, %r0, 2
+
+ tmll %r0, 1
+ je L(bx0)
+L(bx1): tmll %r0, 2
+ jne L(b11)
+
+L(b01): lghi idx, -24
+ lg %r13, 0(ap)
+ mlgr %r12, b0
+ stg %r13, 0(rp)
+ cgijne %r11, 0, L(cj0)
+
+L(1): stg %r12, 8(rp)
+ lmg %r6, %r14, 48(%r15)
+ br %r14
+
+L(b11): lghi idx, -8
+ lg %r9, 0(ap)
+ mlgr %r8, b0
+ stg %r9, 0(rp)
+ j L(cj1)
+
+L(bx0): tmll %r0, 2
+ jne L(b10)
+L(b00): lghi idx, -32
+ lghi %r12, 0
+L(cj0): lg %r1, 32(idx, ap)
+ lg %r9, 40(idx, ap)
+ mlgr %r0, b0
+ mlgr %r8, b0
+ vlvgp %v6, %r0, %r1
+ vlvgp %v7, %r9, %r12
+ j L(mid)
+
+L(b10): lghi idx, -16
+ lghi %r8, 0
+L(cj1): lg %r7, 16(idx, ap)
+ lg %r13, 24(idx, ap)
+ mlgr %r6, b0
+ mlgr %r12, b0
+ vlvgp %v6, %r6, %r7
+ vlvgp %v7, %r13, %r8
+ cgije %r11, 0, L(end)
+
+L(top): lg %r1, 32(idx, ap)
+ lg %r9, 40(idx, ap)
+ mlgr %r0, b0
+ mlgr %r8, b0
+ vacq %v3, %v6, %v7, %v2
+ vacccq %v2, %v6, %v7, %v2
+ vpdi %v3, %v3, %v3, 4
+ vst %v3, 16(idx, rp), 3
+ vlvgp %v6, %r0, %r1
+ vlvgp %v7, %r9, %r12
+L(mid): lg %r7, 48(idx, ap)
+ lg %r13, 56(idx, ap)
+ mlgr %r6, b0
+ mlgr %r12, b0
+ vacq %v1, %v6, %v7, %v2
+ vacccq %v2, %v6, %v7, %v2
+ vpdi %v1, %v1, %v1, 4
+ vst %v1, 32(idx, rp), 3
+ vlvgp %v6, %r6, %r7
+ vlvgp %v7, %r13, %r8
+ la idx, 32(idx)
+ brctg %r11, L(top)
+
+L(end): vacq %v3, %v6, %v7, %v2
+ vacccq %v2, %v6, %v7, %v2
+ vpdi %v3, %v3, %v3, 4
+ vst %v3, 16(idx, rp), 3
+
+ vlgvg %r0, %v2, 1
+ algr %r0, %r12
+ stg %r0, 32(idx, rp)
+popdef(`L')
+')
+
+define(`ADDMUL_1',`
+pushdef(`L',
+defn(`L')$1`'_am1)
+ vzero %v0
+ vzero %v2
+ srlg %r11, %r0, 2
+
+ tmll %r0, 1
+ je L(bx0)
+L(bx1): tmll %r0, 2
+ jne L(b11)
+
+L(b01): lghi idx, -24
+ vleg %v2, 0(rp), 1
+ lg %r13, 0(ap)
+ vzero %v4
+ mlgr %r12, b0
+ vlvgg %v4, %r13, 1
+ vaq %v2, %v2, %v4
+ vsteg %v2, 0(rp), 1
+ vmrhg %v2, %v2, %v2
+ j L(cj0)
+
+L(b11): lghi idx, -8
+ vleg %v2, 0(rp), 1
+ lg %r9, 0(ap)
+ vzero %v4
+ mlgr %r8, b0
+ vlvgg %v4, %r9, 1
+ vaq %v2, %v2, %v4
+ vsteg %v2, 0(rp), 1
+ vmrhg %v2, %v2, %v2
+ j L(cj1)
+
+L(bx0): tmll %r0, 2
+ jne L(b10)
+L(b00): lghi idx, -32
+ lghi %r12, 0
+L(cj0): lg %r1, 32(idx, ap)
+ lg %r9, 40(idx, ap)
+ mlgr %r0, b0
+ mlgr %r8, b0
+ vlvgp %v6, %r0, %r1
+ vlvgp %v7, %r9, %r12
+ j L(mid)
+
+L(b10): lghi idx, -16
+ lghi %r8, 0
+L(cj1): lg %r7, 16(idx, ap)
+ lg %r13, 24(idx, ap)
+ mlgr %r6, b0
+ mlgr %r12, b0
+ vlvgp %v6, %r6, %r7
+ vlvgp %v7, %r13, %r8
+ cgije %r11, 0, L(end)
+
+L(top): lg %r1, 32(idx, ap)
+ lg %r9, 40(idx, ap)
+ mlgr %r0, b0
+ mlgr %r8, b0
+ vl %v1, 16(idx, rp), 3
+ vpdi %v1, %v1, %v1, 4
+ vacq %v5, %v6, %v1, %v0
+ vacccq %v0, %v6, %v1, %v0
+ vacq %v3, %v5, %v7, %v2
+ vacccq %v2, %v5, %v7, %v2
+ vpdi %v3, %v3, %v3, 4
+ vst %v3, 16(idx, rp), 3
+ vlvgp %v6, %r0, %r1
+ vlvgp %v7, %r9, %r12
+L(mid): lg %r7, 48(idx, ap)
+ lg %r13, 56(idx, ap)
+ mlgr %r6, b0
+ mlgr %r12, b0
+ vl %v4, 32(idx, rp), 3
+ vpdi %v4, %v4, %v4, 4
+ vacq %v5, %v6, %v4, %v0
+ vacccq %v0, %v6, %v4, %v0
+ vacq %v1, %v5, %v7, %v2
+ vacccq %v2, %v5, %v7, %v2
+ vpdi %v1, %v1, %v1, 4
+ vst %v1, 32(idx, rp), 3
+ vlvgp %v6, %r6, %r7
+ vlvgp %v7, %r13, %r8
+ la idx, 32(idx)
+ brctg %r11, L(top)
+
+L(end): vl %v1, 16(idx, rp), 3
+ vpdi %v1, %v1, %v1, 4
+ vacq %v5, %v6, %v1, %v0
+ vacccq %v0, %v6, %v1, %v0
+ vacq %v3, %v5, %v7, %v2
+ vacccq %v2, %v5, %v7, %v2
+ vpdi %v3, %v3, %v3, 4
+ vst %v3, 16(idx, rp), 3
+
+ vag %v2, %v0, %v2
+ vlgvg %r0, %v2, 1
+ algr %r0, %r12
+ stg %r0, 32(idx, rp)
+popdef(`L')
+')
+
+
+ASM_START()
+
+PROLOGUE(mpn_mul_basecase)
+ stmg %r4, %r14, 32(%r15)
+
+ lgr %r4, bn
+
+ lg %r0, 32(%r15)
+ lg b0, 0(bp)
+ MUL_1() C implicitly pass r0 = an
+
+ aghi %r4, -1
+ je L(end)
+L(top): lg %r0, 32(%r15)
+ la bp, 8(bp)
+ la rp, 8(rp)
+ lg b0, 0(bp)
+ ADDMUL_1() C implicitly pass r0 = an
+ brctg %r4, L(top)
+
+L(end): lmg %r6, %r14, 48(%r15)
+ br %r14
+EPILOGUE()
+ .section .note.GNU-stack
diff --git a/vendor/gmp-6.3.0/mpn/s390_64/z13/mul_basecase.c b/vendor/gmp-6.3.0/mpn/s390_64/z13/mul_basecase.c
new file mode 100644
index 0000000..f1b7160
--- /dev/null
+++ b/vendor/gmp-6.3.0/mpn/s390_64/z13/mul_basecase.c
@@ -0,0 +1,124 @@
+/* mpn_mul_basecase for IBM z13 and later -- Internal routine to multiply two
+ natural numbers of length m and n.
+
+ THIS IS AN INTERNAL FUNCTION WITH A MUTABLE INTERFACE. IT IS ONLY
+ SAFE TO REACH THIS FUNCTION THROUGH DOCUMENTED INTERFACES.
+
+Copyright 2021 Free Software Foundation, Inc.
+
+This file is part of the GNU MP Library.
+
+The GNU MP Library is free software; you can redistribute it and/or modify
+it under the terms of either:
+
+ * the GNU Lesser General Public License as published by the Free
+ Software Foundation; either version 3 of the License, or (at your
+ option) any later version.
+
+or
+
+ * the GNU General Public License as published by the Free Software
+ Foundation; either version 2 of the License, or (at your option) any
+ later version.
+
+or both in parallel, as here.
+
+The GNU MP Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received copies of the GNU General Public License and the
+GNU Lesser General Public License along with the GNU MP Library. If not,
+see https://www.gnu.org/licenses/. */
+
+#include <stdlib.h>
+
+#include "gmp-impl.h"
+
+/* Note: we explicitly inline all mul and addmul routines here to reduce the
+ * number of branches in prologues of unrolled functions. That comes at the
+ cost of duplicating common loop bodies in object code. */
+#define DO_INLINE
+
+/*
+ * tweak loop conditions in addmul subroutines to enable use of
+ * branch-relative-on-count (BRCTG) instructions, which currently results in
+ * better performance.
+ */
+#define BRCTG
+
+#include "s390_64/z13/common-vec.h"
+
+#define OPERATION_mul_1
+#include "s390_64/z13/addmul_1.c"
+#undef OPERATION_mul_1
+
+#define OPERATION_addmul_1
+#include "s390_64/z13/addmul_1.c"
+#undef OPERATION_addmul_1
+
+#define OPERATION_mul_2
+#include "s390_64/z13/aormul_2.c"
+#undef OPERATION_mul_2
+
+#define OPERATION_addmul_2
+#include "s390_64/z13/aormul_2.c"
+#undef OPERATION_addmul_2
+
+void
+mpn_mul_basecase (mp_ptr rp, mp_srcptr up, mp_size_t un, mp_srcptr vp,
+ mp_size_t vn)
+{
+ ASSERT (un >= vn);
+ ASSERT (vn >= 1);
+ ASSERT (!MPN_OVERLAP_P (rp, un + vn, up, un));
+ ASSERT (!MPN_OVERLAP_P (rp, un + vn, vp, vn));
+
+ /* The implementations of (add)mul_1/2 are 4x-unrolled. Pull out the branch
+ * for un%4 and inline specific variants. */
+
+#define BRANCH_FOR_MOD(N) \
+ do \
+ { \
+ if (vn >= 2) \
+ { \
+ rp[un + 1] = inline_mul_2 (rp, up, un, vp); \
+ rp += 2, vp += 2, vn -= 2; \
+ } \
+ else \
+ { \
+ rp[un] = inline_mul_1 (rp, up, un, vp[0]); \
+ return; \
+ } \
+ \
+ while (vn >= 2) \
+ { \
+ rp[un + 2 - 1] = inline_addmul_2 (rp, up, un, vp); \
+ rp += 2, vp += 2, vn -= 2; \
+ } \
+ \
+ while (vn >= 1) \
+ { \
+ rp[un] = inline_addmul_1 (rp, up, un, vp[0]); \
+ rp += 1, vp += 1, vn -= 1; \
+ } \
+ } \
+ while (0);
+
+ switch (((size_t)un) % 4)
+ {
+ case 0:
+ BRANCH_FOR_MOD (0);
+ break;
+ case 1:
+ BRANCH_FOR_MOD (1);
+ break;
+ case 2:
+ BRANCH_FOR_MOD (2);
+ break;
+ case 3:
+ BRANCH_FOR_MOD (3);
+ break;
+ }
+}
diff --git a/vendor/gmp-6.3.0/mpn/s390_64/z13/popcount.asm b/vendor/gmp-6.3.0/mpn/s390_64/z13/popcount.asm
new file mode 100644
index 0000000..35b1fc4
--- /dev/null
+++ b/vendor/gmp-6.3.0/mpn/s390_64/z13/popcount.asm
@@ -0,0 +1,69 @@
+dnl S/390-64 mpn_popcount
+
+dnl Copyright 2023 Free Software Foundation, Inc.
+
+dnl This file is part of the GNU MP Library.
+dnl
+dnl The GNU MP Library is free software; you can redistribute it and/or modify
+dnl it under the terms of either:
+dnl
+dnl * the GNU Lesser General Public License as published by the Free
+dnl Software Foundation; either version 3 of the License, or (at your
+dnl option) any later version.
+dnl
+dnl or
+dnl
+dnl * the GNU General Public License as published by the Free Software
+dnl Foundation; either version 2 of the License, or (at your option) any
+dnl later version.
+dnl
+dnl or both in parallel, as here.
+dnl
+dnl The GNU MP Library is distributed in the hope that it will be useful, but
+dnl WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+dnl or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+dnl for more details.
+dnl
+dnl You should have received copies of the GNU General Public License and the
+dnl GNU Lesser General Public License along with the GNU MP Library. If not,
+dnl see https://www.gnu.org/licenses/.
+
+include(`../config.m4')
+
+C cycles/limb
+C z900 -
+C z990 -
+C z9 -
+C z10 -
+C z196 -
+C z12 ?
+C z13 ?
+C z14 ?
+C z15 ?
+
+define(`ap', `%r2')
+define(`n', `%r3')
+
+ASM_START()
+PROLOGUE(mpn_popcount)
+ vzero %v30
+ tmll n, 1
+ srlg n, n, 1
+ je L(top)
+
+L(odd): vllezg %v16, 0(ap)
+ vpopct %v30, %v16, 3
+ la ap, 8(ap)
+ clgije n, 0, L(end)
+
+L(top): vl %v16, 0(ap), 3
+ vpopct %v20, %v16, 3
+ vag %v30, %v30, %v20
+ la ap, 16(ap)
+ brctg n, L(top)
+
+L(end): vzero %v29
+ vsumqg %v30, %v30, %v29
+ vlgvg %r2, %v30, 1(%r0)
+ br %r14
+EPILOGUE()
diff --git a/vendor/gmp-6.3.0/mpn/s390_64/z13/sqr_basecase.c b/vendor/gmp-6.3.0/mpn/s390_64/z13/sqr_basecase.c
new file mode 100644
index 0000000..91dc47c
--- /dev/null
+++ b/vendor/gmp-6.3.0/mpn/s390_64/z13/sqr_basecase.c
@@ -0,0 +1,82 @@
+/* mpn_sqr_basecase -- Internal routine to square a natural number of length n.
+ This is a place-holder for z13 to suppress the use of the plain z/arch code.
+ FIXME: This should really be written in assembly with outer-loop early exit.
+
+ THIS IS AN INTERNAL FUNCTION WITH A MUTABLE INTERFACE. IT IS ONLY
+ SAFE TO REACH THIS FUNCTION THROUGH DOCUMENTED INTERFACES.
+
+
+Copyright 1991-1994, 1996, 1997, 2000-2005, 2008, 2010, 2011, 2017, 2023 Free
+Software Foundation, Inc.
+
+This file is part of the GNU MP Library.
+
+The GNU MP Library is free software; you can redistribute it and/or modify
+it under the terms of either:
+
+ * the GNU Lesser General Public License as published by the Free
+ Software Foundation; either version 3 of the License, or (at your
+ option) any later version.
+
+or
+
+ * the GNU General Public License as published by the Free Software
+ Foundation; either version 2 of the License, or (at your option) any
+ later version.
+
+or both in parallel, as here.
+
+The GNU MP Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received copies of the GNU General Public License and the
+GNU Lesser General Public License along with the GNU MP Library. If not,
+see https://www.gnu.org/licenses/. */
+
+#include "gmp-impl.h"
+#include "longlong.h"
+
+void
+mpn_sqr_basecase (mp_ptr rp, mp_srcptr up, mp_size_t un)
+{
+ mp_limb_t u0;
+ mp_limb_t cin;
+
+ u0 = up[0];
+ umul_ppmm (cin, rp[0], u0, u0);
+ ++rp;
+
+ if (--un) {
+ u0 = u0 << 1;
+ up += 1;
+
+ rp[un] = mpn_mul_1c (rp, up, un, u0, cin);
+
+ for (;;) {
+ mp_limb_t ci, x0, c0, hi, lo, x1, c1;
+
+ u0 = up[0];
+ ci = -(up[-1] >> (GMP_NUMB_BITS-1)) & u0; // correction term
+ x0 = rp[1] + ci;
+ c0 = x0 < ci;
+ hi, lo;
+
+ umul_ppmm (hi, lo, u0, u0);
+ x1 = x0 + lo;
+ c1 = x1 < lo;
+ cin = hi + c0 + c1;
+ rp[1] = x1;
+ rp += 2;
+
+ if (--un == 0) break;
+ u0 = (up[-1] >> (GMP_NUMB_BITS-1)) + (u0 << 1);
+ up += 1;
+
+ rp[un] = mpn_addmul_1c (rp, up, un, u0, cin);
+ }
+ }
+
+ rp[0] = cin;
+}
diff --git a/vendor/gmp-6.3.0/mpn/s390_64/z13/submul_1.asm b/vendor/gmp-6.3.0/mpn/s390_64/z13/submul_1.asm
new file mode 100644
index 0000000..64f0628
--- /dev/null
+++ b/vendor/gmp-6.3.0/mpn/s390_64/z13/submul_1.asm
@@ -0,0 +1,168 @@
+dnl S/390-64 mpn_submul_1
+dnl Based on C code contributed by Marius Hillenbrand.
+
+dnl Copyright 2023 Free Software Foundation, Inc.
+
+dnl This file is part of the GNU MP Library.
+dnl
+dnl The GNU MP Library is free software; you can redistribute it and/or modify
+dnl it under the terms of either:
+dnl
+dnl * the GNU Lesser General Public License as published by the Free
+dnl Software Foundation; either version 3 of the License, or (at your
+dnl option) any later version.
+dnl
+dnl or
+dnl
+dnl * the GNU General Public License as published by the Free Software
+dnl Foundation; either version 2 of the License, or (at your option) any
+dnl later version.
+dnl
+dnl or both in parallel, as here.
+dnl
+dnl The GNU MP Library is distributed in the hope that it will be useful, but
+dnl WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+dnl or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+dnl for more details.
+dnl
+dnl You should have received copies of the GNU General Public License and the
+dnl GNU Lesser General Public License along with the GNU MP Library. If not,
+dnl see https://www.gnu.org/licenses/.
+
+include(`../config.m4')
+
+dnl TODO
+dnl * Schedule vlvgp away from mlgr; that saves 20% of the run time.
+dnl * Perhaps use vp[0]/vp[1] in innerloop instead preloading v0/v1.
+
+C cycles/limb
+C z900 -
+C z990 -
+C z9 -
+C z10 -
+C z196 -
+C z12 ?
+C z13 ?
+C z14 ?
+C z15 2.55
+
+
+define(`rp', `%r2')
+define(`ap', `%r3')
+define(`an', `%r4')
+define(`b0', `%r5')
+define(`cy', `%r6')
+
+define(`idx', `%r4')
+
+ASM_START()
+
+PROLOGUE(mpn_submul_1)
+ stmg %r6, %r13, 48(%r15)
+L(ent): vzero %v0
+ vone %v2
+ srlg %r11, an, 2
+
+ tmll an, 1
+ je L(bx0)
+L(bx1): tmll an, 2
+ jne L(b11)
+
+L(b01): lghi idx, -24
+ vleg %v2, 0(rp), 1
+ lg %r13, 0(ap)
+ vzero %v4
+ mlgr %r12, b0
+ vlvgg %v4, %r13, 1
+ vsq %v2, %v2, %v4
+ vsteg %v2, 0(rp), 1
+ vmrhg %v2, %v2, %v2
+ cgije %r11, 0, L(1)
+ j L(cj0)
+
+L(b11): lghi idx, -8
+ vleg %v2, 0(rp), 1
+ lg %r9, 0(ap)
+ vzero %v4
+ mlgr %r8, b0
+ vlvgg %v4, %r9, 1
+ vsq %v2, %v2, %v4
+ vsteg %v2, 0(rp), 1
+ vmrhg %v2, %v2, %v2
+ j L(cj1)
+
+L(bx0): tmll an, 2
+ jne L(b10)
+L(b00): lghi idx, -32
+ lghi %r12, 0
+L(cj0): lg %r1, 32(idx, ap)
+ lg %r9, 40(idx, ap)
+ mlgr %r0, b0
+ mlgr %r8, b0
+ vlvgp %v6, %r0, %r1
+ vlvgp %v7, %r9, %r12
+ j L(mid)
+
+L(b10): lghi idx, -16
+ lghi %r8, 0
+L(cj1): lg %r7, 16(idx, ap)
+ lg %r13, 24(idx, ap)
+ mlgr %r6, b0
+ mlgr %r12, b0
+ vlvgp %v6, %r6, %r7
+ vlvgp %v7, %r13, %r8
+ cgije %r11, 0, L(end)
+
+L(top): lg %r1, 32(idx, ap)
+ lg %r9, 40(idx, ap)
+ mlgr %r0, b0
+ mlgr %r8, b0
+ vl %v1, 16(idx, rp), 3
+ vpdi %v1, %v1, %v1, 4
+ vacq %v5, %v6, %v7, %v0
+ vacccq %v0, %v6, %v7, %v0
+ vsbiq %v3, %v1, %v5, %v2
+ vsbcbiq %v2, %v1, %v5, %v2
+ vpdi %v3, %v3, %v3, 4
+ vst %v3, 16(idx, rp), 3
+ vlvgp %v6, %r0, %r1
+ vlvgp %v7, %r9, %r12
+L(mid): lg %r7, 48(idx, ap)
+ lg %r13, 56(idx, ap)
+ mlgr %r6, b0
+ mlgr %r12, b0
+ vl %v4, 32(idx, rp), 3
+ vpdi %v4, %v4, %v4, 4
+ vacq %v5, %v6, %v7, %v0
+ vacccq %v0, %v6, %v7, %v0
+ vsbiq %v1, %v4, %v5, %v2
+ vsbcbiq %v2, %v4, %v5, %v2
+ vpdi %v1, %v1, %v1, 4
+ vst %v1, 32(idx, rp), 3
+ vlvgp %v6, %r6, %r7
+ vlvgp %v7, %r13, %r8
+ la idx, 32(idx)
+ brctg %r11, L(top)
+
+L(end): vl %v1, 16(idx, rp), 3
+ vpdi %v1, %v1, %v1, 4
+ vacq %v5, %v6, %v7, %v0
+ vacccq %v0, %v6, %v7, %v0
+ vsbiq %v3, %v1, %v5, %v2
+ vsbcbiq %v2, %v1, %v5, %v2
+ vpdi %v3, %v3, %v3, 4
+ vst %v3, 16(idx, rp), 3
+
+ vsg %v2, %v0, %v2
+ vlgvg %r2, %v2, 1
+ algr %r2, %r12
+ aghi %r2, 1
+ lmg %r6, %r13, 48(%r15)
+ br %r14
+L(1): vsg %v2, %v0, %v2
+ vlgvg %r2, %v2, 1
+ algr %r2, %r12
+ aghi %r2, -1
+ lmg %r6, %r13, 48(%r15)
+ br %r14
+EPILOGUE()