aboutsummaryrefslogtreecommitdiff
path: root/vendor/gmp-6.3.0/mpn/s390_64/z13
diff options
context:
space:
mode:
authorThomas Voss <mail@thomasvoss.com> 2024-06-21 23:36:36 +0200
committerThomas Voss <mail@thomasvoss.com> 2024-06-21 23:42:26 +0200
commita89a14ef5da44684a16b204e7a70460cc8c4922a (patch)
treeb23b4c6b155977909ef508fdae2f48d33d802813 /vendor/gmp-6.3.0/mpn/s390_64/z13
parent1db63fcedab0b288820d66e100b1877b1a5a8851 (diff)
Basic constant folding implementation
Diffstat (limited to 'vendor/gmp-6.3.0/mpn/s390_64/z13')
-rw-r--r--vendor/gmp-6.3.0/mpn/s390_64/z13/addmul_1.asm173
-rw-r--r--vendor/gmp-6.3.0/mpn/s390_64/z13/addmul_1.c358
-rw-r--r--vendor/gmp-6.3.0/mpn/s390_64/z13/aormul_2.c476
-rw-r--r--vendor/gmp-6.3.0/mpn/s390_64/z13/common-vec.h175
-rw-r--r--vendor/gmp-6.3.0/mpn/s390_64/z13/gmp-mparam.h162
-rw-r--r--vendor/gmp-6.3.0/mpn/s390_64/z13/hamdist.asm76
-rw-r--r--vendor/gmp-6.3.0/mpn/s390_64/z13/mul_1.asm149
-rw-r--r--vendor/gmp-6.3.0/mpn/s390_64/z13/mul_1.c31
-rw-r--r--vendor/gmp-6.3.0/mpn/s390_64/z13/mul_2.asm121
-rw-r--r--vendor/gmp-6.3.0/mpn/s390_64/z13/mul_basecase.asm264
-rw-r--r--vendor/gmp-6.3.0/mpn/s390_64/z13/mul_basecase.c124
-rw-r--r--vendor/gmp-6.3.0/mpn/s390_64/z13/popcount.asm69
-rw-r--r--vendor/gmp-6.3.0/mpn/s390_64/z13/sqr_basecase.c82
-rw-r--r--vendor/gmp-6.3.0/mpn/s390_64/z13/submul_1.asm168
14 files changed, 2428 insertions, 0 deletions
diff --git a/vendor/gmp-6.3.0/mpn/s390_64/z13/addmul_1.asm b/vendor/gmp-6.3.0/mpn/s390_64/z13/addmul_1.asm
new file mode 100644
index 0000000..2b00612
--- /dev/null
+++ b/vendor/gmp-6.3.0/mpn/s390_64/z13/addmul_1.asm
@@ -0,0 +1,173 @@
+dnl S/390-64 mpn_addmul_1 and mpn_addmul_1c.
+dnl Based on C code contributed by Marius Hillenbrand.
+
+dnl Copyright 2023 Free Software Foundation, Inc.
+
+dnl This file is part of the GNU MP Library.
+dnl
+dnl The GNU MP Library is free software; you can redistribute it and/or modify
+dnl it under the terms of either:
+dnl
+dnl * the GNU Lesser General Public License as published by the Free
+dnl Software Foundation; either version 3 of the License, or (at your
+dnl option) any later version.
+dnl
+dnl or
+dnl
+dnl * the GNU General Public License as published by the Free Software
+dnl Foundation; either version 2 of the License, or (at your option) any
+dnl later version.
+dnl
+dnl or both in parallel, as here.
+dnl
+dnl The GNU MP Library is distributed in the hope that it will be useful, but
+dnl WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+dnl or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+dnl for more details.
+dnl
+dnl You should have received copies of the GNU General Public License and the
+dnl GNU Lesser General Public License along with the GNU MP Library. If not,
+dnl see https://www.gnu.org/licenses/.
+
+include(`../config.m4')
+
+dnl TODO
+dnl * Schedule vlvgp away from mlgr; that saves 20% of the run time.
+dnl * Perhaps use vp[0]/vp[1] in innerloop instead preloading v0/v1.
+
+C cycles/limb
+C z900 -
+C z990 -
+C z9 -
+C z10 -
+C z196 -
+C z12 ?
+C z13 ?
+C z14 ?
+C z15 2.55
+
+
+define(`rp', `%r2')
+define(`ap', `%r3')
+define(`an', `%r4')
+define(`b0', `%r5')
+define(`cy', `%r6')
+
+define(`idx', `%r4')
+
+ASM_START()
+
+PROLOGUE(mpn_addmul_1c)
+ stmg %r6, %r13, 48(%r15)
+ j L(ent)
+EPILOGUE()
+
+PROLOGUE(mpn_addmul_1)
+ stmg %r6, %r13, 48(%r15)
+ lghi %r6, 0
+L(ent): vzero %v0
+ vzero %v2
+ srlg %r11, an, 2
+
+ tmll an, 1
+ je L(bx0)
+L(bx1): tmll an, 2
+ jne L(b11)
+
+L(b01): lghi idx, -24
+ vleg %v2, 0(rp), 1
+ lg %r13, 0(ap)
+ vzero %v4
+ mlgr %r12, b0
+ algr %r13, %r6
+ lghi %r6, 0
+ alcgr %r12, %r6
+ vlvgg %v4, %r13, 1
+ vaq %v2, %v2, %v4
+ vsteg %v2, 0(rp), 1
+ vmrhg %v2, %v2, %v2
+ cgije %r11, 0, L(1)
+ j L(cj0)
+
+L(b11): lghi idx, -8
+ vleg %v2, 0(rp), 1
+ lg %r9, 0(ap)
+ vzero %v4
+ mlgr %r8, b0
+ algr %r9, %r6
+ lghi %r6, 0
+ alcgr %r8, %r6
+ vlvgg %v4, %r9, 1
+ vaq %v2, %v2, %v4
+ vsteg %v2, 0(rp), 1
+ vmrhg %v2, %v2, %v2
+ j L(cj1)
+
+L(bx0): tmll an, 2
+ jne L(b10)
+L(b00): lghi idx, -32
+ lgr %r12, %r6
+L(cj0): lg %r1, 32(idx, ap)
+ lg %r9, 40(idx, ap)
+ mlgr %r0, b0
+ mlgr %r8, b0
+ vlvgp %v6, %r0, %r1
+ vlvgp %v7, %r9, %r12
+ j L(mid)
+
+L(b10): lghi idx, -16
+ lgr %r8, %r6
+L(cj1): lg %r7, 16(idx, ap)
+ lg %r13, 24(idx, ap)
+ mlgr %r6, b0
+ mlgr %r12, b0
+ vlvgp %v6, %r6, %r7
+ vlvgp %v7, %r13, %r8
+ cgije %r11, 0, L(end)
+
+L(top): lg %r1, 32(idx, ap)
+ lg %r9, 40(idx, ap)
+ mlgr %r0, b0
+ mlgr %r8, b0
+ vl %v1, 16(idx, rp), 3
+ vpdi %v1, %v1, %v1, 4
+ vacq %v5, %v6, %v1, %v0
+ vacccq %v0, %v6, %v1, %v0
+ vacq %v3, %v5, %v7, %v2
+ vacccq %v2, %v5, %v7, %v2
+ vpdi %v3, %v3, %v3, 4
+ vst %v3, 16(idx, rp), 3
+ vlvgp %v6, %r0, %r1
+ vlvgp %v7, %r9, %r12
+L(mid): lg %r7, 48(idx, ap)
+ lg %r13, 56(idx, ap)
+ mlgr %r6, b0
+ mlgr %r12, b0
+ vl %v4, 32(idx, rp), 3
+ vpdi %v4, %v4, %v4, 4
+ vacq %v5, %v6, %v4, %v0
+ vacccq %v0, %v6, %v4, %v0
+ vacq %v1, %v5, %v7, %v2
+ vacccq %v2, %v5, %v7, %v2
+ vpdi %v1, %v1, %v1, 4
+ vst %v1, 32(idx, rp), 3
+ vlvgp %v6, %r6, %r7
+ vlvgp %v7, %r13, %r8
+ la idx, 32(idx)
+ brctg %r11, L(top)
+
+L(end): vl %v1, 16(idx, rp), 3
+ vpdi %v1, %v1, %v1, 4
+ vacq %v5, %v6, %v1, %v0
+ vacccq %v0, %v6, %v1, %v0
+ vacq %v3, %v5, %v7, %v2
+ vacccq %v2, %v5, %v7, %v2
+ vpdi %v3, %v3, %v3, 4
+ vst %v3, 16(idx, rp), 3
+
+ vag %v2, %v0, %v2
+L(1): vlgvg %r2, %v2, 1
+ algr %r2, %r12
+ lmg %r6, %r13, 48(%r15)
+ br %r14
+EPILOGUE()
diff --git a/vendor/gmp-6.3.0/mpn/s390_64/z13/addmul_1.c b/vendor/gmp-6.3.0/mpn/s390_64/z13/addmul_1.c
new file mode 100644
index 0000000..022e5ed
--- /dev/null
+++ b/vendor/gmp-6.3.0/mpn/s390_64/z13/addmul_1.c
@@ -0,0 +1,358 @@
+/* Addmul_1 / mul_1 for IBM z13 and later
+ Contributed by Marius Hillenbrand
+
+Copyright 2021 Free Software Foundation, Inc.
+
+This file is part of the GNU MP Library.
+
+The GNU MP Library is free software; you can redistribute it and/or modify
+it under the terms of either:
+
+ * the GNU Lesser General Public License as published by the Free
+ Software Foundation; either version 3 of the License, or (at your
+ option) any later version.
+
+or
+
+ * the GNU General Public License as published by the Free Software
+ Foundation; either version 2 of the License, or (at your option) any
+ later version.
+
+or both in parallel, as here.
+
+The GNU MP Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received copies of the GNU General Public License and the
+GNU Lesser General Public License along with the GNU MP Library. If not,
+see https://www.gnu.org/licenses/. */
+
+#include "gmp-impl.h"
+#include "s390_64/z13/common-vec.h"
+
+#undef FUNCNAME
+
+#ifdef DO_INLINE
+# ifdef OPERATION_addmul_1
+# define ADD
+# define FUNCNAME inline_addmul_1
+# elif defined(OPERATION_mul_1)
+# define FUNCNAME inline_mul_1
+# endif
+
+#else
+# ifdef OPERATION_addmul_1
+# define ADD
+# define FUNCNAME mpn_addmul_1
+# elif defined(OPERATION_mul_1)
+# define FUNCNAME mpn_mul_1
+# endif
+#endif
+
+#ifdef DO_INLINE
+static inline mp_limb_t
+FUNCNAME (mp_ptr rp, mp_srcptr s1p, mp_size_t n, mp_limb_t s2limb)
+ __attribute__ ((always_inline));
+
+static inline
+#endif
+mp_limb_t
+FUNCNAME (mp_ptr rp, mp_srcptr s1p, mp_size_t n, mp_limb_t s2limb)
+{
+ ASSERT (n >= 1);
+ ASSERT (MPN_SAME_OR_INCR_P(rp, s1p, n));
+
+ /* Combine 64x64 multiplication into GPR pairs (MLGR) with 128-bit adds in
+ VRs (using each VR as a single 128-bit accumulator).
+ The inner loop is unrolled to four limbs, with two blocks of four
+ multiplications each. Since the MLGR operation operates on even/odd GPR
+ pairs, pin the products appropriately. */
+
+ /* products as GPR pairs */
+ register mp_limb_t p0_high asm("r0");
+ register mp_limb_t p0_low asm("r1");
+
+ register mp_limb_t p1_high asm("r8");
+ register mp_limb_t p1_low asm("r9");
+
+ register mp_limb_t p2_high asm("r6");
+ register mp_limb_t p2_low asm("r7");
+
+ register mp_limb_t p3_high asm("r10");
+ register mp_limb_t p3_low asm("r11");
+
+ /* carry flag for 128-bit add in VR for first carry chain */
+ vec_t carry_vec0 = { .dw = vec_splat_u64 (0) };
+ mp_limb_t carry_limb = 0;
+
+#ifdef ADD
+ /* 2nd carry flag for 2nd carry chain with addmul */
+ vec_t carry_vec1 = { .dw = vec_splat_u64 (0) };
+ vec_t sum0;
+ vec_t rp0_addend, rp1_addend;
+ rp0_addend.dw = vec_splat_u64 (0);
+ rp1_addend.dw = vec_splat_u64 (0);
+#endif
+ vec_t sum1;
+
+ vec_t carry_prod = { .dw = vec_splat_u64 (0) };
+
+ /* The scalar multiplications compete with pointer and index increments for
+ * issue ports. Thus, increment the loop index in the middle of the loop so
+ * that the operations for the next iteration's multiplications can be
+ * loaded in time (looks horrible, yet helps performance) and make sure we
+ * use addressing with base reg + index reg + immediate displacement
+ * (so that only the single index needs incrementing, instead of multiple
+ * pointers). */
+#undef LOOP_ADVANCE
+#undef IDX_OFFSET
+
+#define LOOP_ADVANCE 4 * sizeof (mp_limb_t)
+#define IDX_OFFSET (LOOP_ADVANCE)
+ register ssize_t idx = 0 - IDX_OFFSET;
+
+ /*
+ * branch-on-count implicitly hint to the branch prediction as taken, while
+ * compare-and-branch hints as not taken. currently, using branch-on-count
+ * has a performance advantage, but it is not clear that it is generally the
+ * better choice (e.g., branch-on-count requires decrementing the separate
+ * counter). so, allow switching the loop condition to enable either
+ * category of branch instructions:
+ * - idx is less than an upper bound, for compare-and-branch
+ * - iteration counter greater than zero, for branch-on-count
+ */
+#define BRCTG
+#ifdef BRCTG
+ ssize_t iterations = (size_t)n / 4;
+#else
+ ssize_t const idx_bound = n * sizeof (mp_limb_t) - IDX_OFFSET;
+#endif
+
+ /* products will be transferred into VRs before adding up.
+ * see main loop below for comments on accumulation scheme. */
+ vec_t product0, product1, product2;
+
+ product0.dw = vec_splat_u64 (0);
+
+ switch ((size_t)n % 4)
+ {
+ case 0:
+ break;
+
+ case 1:
+ idx = 1 * sizeof (mp_limb_t) - IDX_OFFSET;
+
+ p3_low = s1p[0];
+ s390_umul_ppmm (p3_high, p3_low, s2limb);
+
+#ifdef ADD
+ rp0_addend.dw[1] = rp[0];
+ product0.dw[1] = p3_low;
+
+ sum0.sw = vec_add_u128 (product0.sw, rp0_addend.sw);
+ carry_vec1.dw = vec_permi (sum0.dw, sum0.dw, 0);
+
+ rp[0] = sum0.dw[1];
+#else
+ rp[0] = p3_low;
+#endif
+
+ carry_limb = p3_high;
+ break;
+
+ case 2:
+ p0_low = s1p[0];
+ p3_low = s1p[1];
+ idx = 2 * sizeof (mp_limb_t) - IDX_OFFSET;
+
+ s390_double_umul_ppmm (p0_high, p0_low, p3_high, p3_low, s2limb);
+
+ carry_prod.dw[0] = p3_low;
+
+ product0.dw = vec_load_2di_as_pair (p0_high, p0_low);
+
+ carry_limb = p3_high;
+
+#ifdef ADD
+ rp0_addend = vec_load_elements_reversed (rp, 0);
+ sum0.sw = vec_add_u128 (carry_prod.sw, rp0_addend.sw);
+ carry_vec0.sw = vec_addc_u128 (carry_prod.sw, rp0_addend.sw);
+
+ sum1.sw = vec_add_u128 (sum0.sw, product0.sw);
+ carry_vec1.sw = vec_addc_u128 (sum0.sw, product0.sw);
+#else
+ sum1.sw = vec_add_u128 (carry_prod.sw, product0.sw);
+ carry_vec0.sw = vec_addc_u128 (carry_prod.sw, product0.sw);
+#endif
+
+ vec_store_elements_reversed (rp, 0, sum1);
+
+ break;
+
+ case 3:
+ idx = 3 * sizeof (mp_limb_t) - IDX_OFFSET;
+
+ p0_low = s1p[0];
+ s390_umul_ppmm (p0_high, p0_low, s2limb);
+
+#ifdef ADD
+ rp0_addend.dw[1] = rp[0];
+ product0.dw[1] = p0_low;
+
+ sum0.sw = vec_add_u128 (product0.sw, rp0_addend.sw);
+ carry_vec1.dw = vec_permi (sum0.dw, sum0.dw, 0);
+
+ rp[0] = sum0.dw[1];
+#else
+ rp[0] = p0_low;
+#endif
+ carry_limb = p0_high;
+
+ p1_low = s1p[1];
+ p3_low = s1p[2];
+
+ s390_double_umul_ppmm (p1_high, p1_low, p3_high, p3_low, s2limb);
+
+ carry_prod.dw = vec_load_2di_as_pair (p3_low, carry_limb);
+ product1.dw = vec_load_2di_as_pair (p1_high, p1_low);
+ carry_limb = p3_high;
+
+#ifdef ADD
+ rp0_addend = vec_load_elements_reversed (rp, 8);
+ sum0.sw = vec_add_u128 (carry_prod.sw, rp0_addend.sw);
+ carry_vec0.sw = vec_addc_u128 (carry_prod.sw, rp0_addend.sw);
+
+ sum1.sw = vec_adde_u128 (sum0.sw, product1.sw, carry_vec1.sw);
+ carry_vec1.sw = vec_addec_u128 (sum0.sw, product1.sw, carry_vec1.sw);
+#else
+ sum1.sw = vec_adde_u128 (carry_prod.sw, product1.sw, carry_vec0.sw);
+ carry_vec0.sw
+ = vec_addec_u128 (carry_prod.sw, product1.sw, carry_vec0.sw);
+#endif
+ vec_store_elements_reversed (rp, 8, sum1);
+ break;
+ }
+
+#ifdef BRCTG
+ for (; iterations > 0; iterations--)
+ {
+#else
+ while (idx < idx_bound)
+ {
+#endif
+ vec_t overlap_addend0;
+ vec_t overlap_addend1;
+
+ /* The 64x64->128 MLGR multiplies two factors in GPRs and stores the
+ * result in a GPR pair. One of the factors is taken from the GPR pair
+ * and overwritten.
+ * To reuse factors, it turned out cheaper to load limbs multiple times
+ * than copying GPR contents. Enforce that and the use of addressing by
+ * base + index gpr + immediate displacement via inline asm.
+ */
+ ASM_LOADGPR (p0_low, s1p, idx, 0 + IDX_OFFSET);
+ ASM_LOADGPR (p1_low, s1p, idx, 8 + IDX_OFFSET);
+ ASM_LOADGPR (p2_low, s1p, idx, 16 + IDX_OFFSET);
+ ASM_LOADGPR (p3_low, s1p, idx, 24 + IDX_OFFSET);
+
+ /*
+ * accumulate products as follows (for addmul):
+ * | rp[i+3] | rp[i+2] | rp[i+1] | rp[i] |
+ * p0_high | p0_low |
+ * p1_high | p1_low | carry-limb in
+ * p2_high | p2_low |
+ * c-limb out <- p3_high | p3_low |
+ * | < 128-bit VR > < 128-bit VR >
+ *
+ * < rp1_addend > < rp0_addend >
+ * carry-chain 0 <- + <- + <- carry_vec0[127]
+ * < product1 > < product0 >
+ * carry-chain 1 <- + <- + <- carry_vec1[127]
+ * < overlap_addend1 > < overlap_addend0 >
+ *
+ * note that a 128-bit add with carry in + out is built from two insns
+ * - vec_adde_u128 (vacq) provides sum
+ * - vec_addec_u128 (vacccq) provides the new carry bit
+ */
+
+ s390_double_umul_ppmm (p0_high, p0_low, p1_high, p1_low, s2limb);
+
+ /*
+ * "barrier" to enforce scheduling loads for all limbs and first round
+ * of MLGR before anything else.
+ */
+ asm volatile("");
+
+ product0.dw = vec_load_2di_as_pair (p0_high, p0_low);
+
+#ifdef ADD
+ rp0_addend = vec_load_elements_reversed_idx (rp, idx, 0 + IDX_OFFSET);
+ rp1_addend = vec_load_elements_reversed_idx (rp, idx, 16 + IDX_OFFSET);
+#endif
+ /* increment loop index to unblock dependant loads of limbs for the next
+ * iteration (see above at #define LOOP_ADVANCE) */
+ idx += LOOP_ADVANCE;
+
+ s390_double_umul_ppmm (p2_high, p2_low, p3_high, p3_low, s2limb);
+
+ overlap_addend0.dw = vec_load_2di_as_pair (p1_low, carry_limb);
+ asm volatile("");
+
+#ifdef ADD
+ sum0.sw = vec_adde_u128 (product0.sw, rp0_addend.sw, carry_vec0.sw);
+ sum1.sw = vec_adde_u128 (sum0.sw, overlap_addend0.sw, carry_vec1.sw);
+
+ carry_vec0.sw
+ = vec_addec_u128 (product0.sw, rp0_addend.sw, carry_vec0.sw);
+ carry_vec1.sw
+ = vec_addec_u128 (sum0.sw, overlap_addend0.sw, carry_vec1.sw);
+#else
+ sum1.sw = vec_adde_u128 (product0.sw, overlap_addend0.sw, carry_vec0.sw);
+ carry_vec0.sw
+ = vec_addec_u128 (product0.sw, overlap_addend0.sw, carry_vec0.sw);
+#endif
+
+ asm volatile("");
+ product2.dw = vec_load_2di_as_pair (p2_high, p2_low);
+ overlap_addend1.dw = vec_load_2di_as_pair (p3_low, p1_high);
+
+ vec_t sum4;
+
+#ifdef ADD
+ vec_t sum3;
+ sum3.sw = vec_adde_u128 (product2.sw, rp1_addend.sw, carry_vec0.sw);
+ sum4.sw = vec_adde_u128 (sum3.sw, overlap_addend1.sw, carry_vec1.sw);
+
+ carry_vec0.sw
+ = vec_addec_u128 (product2.sw, rp1_addend.sw, carry_vec0.sw);
+ carry_vec1.sw
+ = vec_addec_u128 (sum3.sw, overlap_addend1.sw, carry_vec1.sw);
+#else
+ sum4.sw = vec_adde_u128 (product2.sw, overlap_addend1.sw, carry_vec0.sw);
+ carry_vec0.sw
+ = vec_addec_u128 (product2.sw, overlap_addend1.sw, carry_vec0.sw);
+#endif
+ vec_store_elements_reversed_idx (rp, idx, IDX_OFFSET - LOOP_ADVANCE,
+ sum1);
+ vec_store_elements_reversed_idx (rp, idx, 16 + IDX_OFFSET - LOOP_ADVANCE,
+ sum4);
+
+ carry_limb = p3_high;
+ }
+
+#ifdef ADD
+ carry_vec0.dw += carry_vec1.dw;
+ carry_limb += carry_vec0.dw[1];
+#else
+ carry_limb += carry_vec0.dw[1];
+#endif
+
+ return carry_limb;
+}
+
+#undef OPERATION_addmul_1
+#undef OPERATION_mul_1
+#undef FUNCNAME
+#undef ADD
diff --git a/vendor/gmp-6.3.0/mpn/s390_64/z13/aormul_2.c b/vendor/gmp-6.3.0/mpn/s390_64/z13/aormul_2.c
new file mode 100644
index 0000000..9a69fc3
--- /dev/null
+++ b/vendor/gmp-6.3.0/mpn/s390_64/z13/aormul_2.c
@@ -0,0 +1,476 @@
+/* Addmul_2 / mul_2 for IBM z13 or later
+
+Copyright 2021 Free Software Foundation, Inc.
+
+This file is part of the GNU MP Library.
+
+The GNU MP Library is free software; you can redistribute it and/or modify
+it under the terms of either:
+
+ * the GNU Lesser General Public License as published by the Free
+ Software Foundation; either version 3 of the License, or (at your
+ option) any later version.
+
+or
+
+ * the GNU General Public License as published by the Free Software
+ Foundation; either version 2 of the License, or (at your option) any
+ later version.
+
+or both in parallel, as here.
+
+The GNU MP Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received copies of the GNU General Public License and the
+GNU Lesser General Public License along with the GNU MP Library. If not,
+see https://www.gnu.org/licenses/. */
+
+#include "gmp-impl.h"
+
+#include "s390_64/z13/common-vec.h"
+
+#undef FUNCNAME
+
+#ifdef DO_INLINE
+# ifdef OPERATION_addmul_2
+# define ADD
+# define FUNCNAME inline_addmul_2
+# elif defined(OPERATION_mul_2)
+# define FUNCNAME inline_mul_2
+# else
+# error Missing define for operation to perform
+# endif
+#else
+# ifdef OPERATION_addmul_2
+# define ADD
+# define FUNCNAME mpn_addmul_2
+# elif defined(OPERATION_mul_2)
+# define FUNCNAME mpn_mul_2
+# else
+# error Missing define for operation to perform
+# endif
+#endif
+
+#ifdef DO_INLINE
+static inline mp_limb_t
+FUNCNAME (mp_limb_t *rp, const mp_limb_t *up, mp_size_t n, const mp_limb_t *vp)
+ __attribute__ ((always_inline));
+
+static inline
+#endif
+mp_limb_t
+FUNCNAME (mp_limb_t *rp, const mp_limb_t *up, mp_size_t n,
+ const mp_limb_t *vp)
+{
+
+ /* Combine 64x64 multiplication into GPR pairs (MLGR) with 128-bit adds in
+ VRs (using each VR as a single 128-bit accumulator).
+ The inner loop is unrolled to four limbs, with two blocks of four
+ multiplications each. Since the MLGR operation operates on even/odd GPR
+ pairs, pin the products appropriately. */
+
+ register mp_limb_t p0_high asm("r0");
+ register mp_limb_t p0_low asm("r1");
+
+ register mp_limb_t p1_high asm("r8");
+ register mp_limb_t p1_low asm("r9");
+
+ register mp_limb_t p2_high asm("r6");
+ register mp_limb_t p2_low asm("r7");
+
+ register mp_limb_t p3_high asm("r10");
+ register mp_limb_t p3_low asm("r11");
+
+ vec_t carry_prod = { .dw = vec_splat_u64 (0) };
+ vec_t zero = { .dw = vec_splat_u64 (0) };
+
+ /* two carry-bits for the 128-bit VR adds - stored in VRs */
+#ifdef ADD
+ vec_t carry_vec0 = { .dw = vec_splat_u64 (0) };
+#endif
+ vec_t carry_vec1 = { .dw = vec_splat_u64 (0) };
+
+ vec_t tmp;
+
+ vec_t sum0, sum1;
+
+ /* products transferred into VRs for accumulating there */
+ vec_t pv0, pv3;
+ vec_t pv1_low, pv1_high, pv2_low, pv2_high;
+ vec_t low, middle, high;
+#ifdef ADD
+ vec_t rp0, rp1;
+#endif
+
+ register mp_limb_t v0 asm("r12");
+ register mp_limb_t v1 asm("r5");
+ v0 = vp[0];
+ v1 = vp[1];
+
+ /* The scalar multiplications compete with pointer and index increments for
+ * issue ports. Thus, increment the loop index in the middle of the loop so
+ * that the operations for the next iteration's multiplications can be
+ * loaded in time (looks horrible, yet helps performance) and make sure we
+ * use addressing with base reg + index reg + immediate displacement
+ * (so that only the single index needs incrementing, instead of multiple
+ * pointers). */
+#undef LOOP_ADVANCE
+#define LOOP_ADVANCE (4 * sizeof (mp_limb_t))
+#define IDX_OFFSET (LOOP_ADVANCE)
+
+ register ssize_t idx = 0 - IDX_OFFSET;
+#ifdef BRCTG
+ ssize_t iterations = (size_t)n / 4;
+#else
+ ssize_t const idx_bound = n * sizeof (mp_limb_t) - IDX_OFFSET;
+#endif
+
+ /*
+ * To minimize latency in the carry chain, accumulate in VRs with 128-bit
+ * adds with carry in and out. As a downside, these require two insns for
+ * each add - one to calculate the sum, one to deliver the carry out.
+ * To reduce the overall number of insns to execute, combine adding up
+ * product limbs such that there cannot be a carry out and one (for mul) or
+ * two (for addmul) adds with carry chains.
+ *
+ * Since (2^64-1) * (2^64-1) = (2^128-1) - 2 * (2^64-1), we can add two
+ * limbs into each 128-bit product without causing carry out.
+ *
+ * For each block of 2 limbs * 2 limbs
+ *
+ * | u[i] * v[0] (p2) |
+ * | u[i] * v[1] (p0) |
+ * | u[i+1] * v[0](p1) |
+ * | u[i+1] * v[1](p3) |
+ * < 128 bits > < 128 bits >
+ *
+ * we can begin accumulating with "simple" carry-oblivious 128-bit adds:
+ * - p0 + low limb of p1
+ * + high limb of p2
+ * and combine resulting low limb with p2's low limb
+ * - p3 + high limb of p1
+ * + high limb of sum above
+ * ... which will will result in two 128-bit limbs to be fed into the carry
+ * chain(s).
+ * Overall, that scheme saves instructions and improves performance, despite
+ * slightly increasing latency between multiplications and carry chain (yet
+ * not in the carry chain).
+ */
+
+#define LOAD_LOW_LIMB(VEC, LIMB) \
+ do \
+ { \
+ asm("vzero\t%[vec]\n\t" \
+ "vlvgg\t%[vec],%[limb],1" \
+ : [vec] "=v"(VEC) \
+ : [limb] "r"(LIMB)); \
+ } \
+ while (0)
+
+ /* for the 128-bit adds in the carry chain, to calculate a + b + carry-in we
+ * need paired vec_adde_u128 (delivers sum) and vec_addec_u128 (delivers new
+ * carry) */
+#define ADD_UP2_CARRY_INOUT(SUMIDX, CARRYIDX, ADDEND1, ADDEND2) \
+ do \
+ { \
+ sum##SUMIDX.sw \
+ = vec_adde_u128 (ADDEND1.sw, ADDEND2.sw, carry_vec##CARRYIDX.sw); \
+ carry_vec##CARRYIDX.sw \
+ = vec_addec_u128 (ADDEND1.sw, ADDEND2.sw, carry_vec##CARRYIDX.sw); \
+ } \
+ while (0)
+
+#define ADD_UP_CARRY_INOUT(SUMIDX, ADDEND1, ADDEND2) \
+ ADD_UP2_CARRY_INOUT (SUMIDX, SUMIDX, ADDEND1, ADDEND2)
+
+ /* variant without carry-in for prologue */
+#define ADD_UP2_CARRY_OUT(SUMIDX, CARRYIDX, ADDEND1, ADDEND2) \
+ do \
+ { \
+ sum##SUMIDX.sw = vec_add_u128 (ADDEND1.sw, ADDEND2.sw); \
+ carry_vec##CARRYIDX.sw = vec_addc_u128 (ADDEND1.sw, ADDEND2.sw); \
+ } \
+ while (0)
+
+#define ADD_UP_CARRY_OUT(SUMIDX, ADDEND1, ADDEND2) \
+ ADD_UP2_CARRY_OUT (SUMIDX, SUMIDX, ADDEND1, ADDEND2)
+
+ /* prologue for 4x-unrolled main loop */
+ switch ((size_t)n % 4)
+ {
+ case 1:
+ ASM_LOADGPR_BASE (p0_low, up, 0);
+ ASM_LOADGPR_BASE (p1_low, up, 0);
+ s390_double_umul_ppmm_distinct (p0_high, p0_low, p1_high, p1_low, v0, v1);
+ carry_prod.dw = vec_load_2di_as_pair (p1_high, p1_low);
+
+/* gcc tries to be too clever and vlr from a reg that is already zero. vzero is
+ * cheaper. */
+# define NEW_CARRY(VEC, LIMB) \
+ do \
+ { \
+ asm("vzero\t%[vec]\n\t" \
+ "vlvgg\t%[vec],%[limb],1" \
+ : [vec] "=v"(VEC) \
+ : [limb] "r"(LIMB)); \
+ } \
+ while (0)
+
+ NEW_CARRY (tmp, p0_high);
+
+ carry_prod.sw = vec_add_u128 (carry_prod.sw, tmp.sw);
+#ifdef ADD
+ carry_vec1.dw[1] = __builtin_add_overflow (rp[0], p0_low, rp);
+#else
+ rp[0] = p0_low;
+#endif
+ idx += sizeof (mp_limb_t);
+ break;
+
+ case 2:
+ ASM_LOADGPR_BASE (p0_low, up, 0);
+ ASM_LOADGPR_BASE (p1_low, up, 8);
+ ASM_LOADGPR_BASE (p2_low, up, 0);
+ ASM_LOADGPR_BASE (p3_low, up, 8);
+
+ asm(""
+ : "=r"(p0_low), "=r"(p2_low)
+ : "r"(p3_low), "0"(p0_low), "r"(p1_low), "1"(p2_low));
+ s390_double_umul_ppmm_distinct (p0_high, p0_low, p1_high, p1_low, v1, v0);
+ s390_double_umul_ppmm_distinct (p2_high, p2_low, p3_high, p3_low, v0, v1);
+
+ pv0.dw = vec_load_2di_as_pair (p0_high, p0_low);
+ LOAD_LOW_LIMB (pv1_low, p1_low);
+ LOAD_LOW_LIMB (pv1_high, p1_high);
+ pv0.sw = vec_add_u128 (pv0.sw, pv1_low.sw);
+ LOAD_LOW_LIMB (pv2_high, p2_high);
+ pv3.dw = vec_load_2di_as_pair (p3_high, p3_low);
+ LOAD_LOW_LIMB (pv2_low, p2_low);
+ pv3.sw = vec_add_u128 (pv3.sw, pv1_high.sw);
+ middle.sw = vec_add_u128 (pv0.sw, pv2_high.sw);
+ low.dw = vec_permi (middle.dw, pv2_low.dw, 3);
+ middle.dw = vec_permi (zero.dw, middle.dw, 0);
+ high.sw = vec_add_u128 (middle.sw, pv3.sw);
+#ifdef ADD
+ rp0 = vec_load_elements_reversed (rp, 0);
+ ADD_UP_CARRY_OUT (0, rp0, carry_prod);
+#else
+ sum0 = carry_prod;
+#endif
+ ADD_UP_CARRY_OUT (1, sum0, low);
+ vec_store_elements_reversed (rp, 0, sum1);
+ carry_prod = high;
+
+ idx += 2 * sizeof (mp_limb_t);
+ break;
+
+ case 3:
+ ASM_LOADGPR_BASE (p0_low, up, 0);
+ ASM_LOADGPR_BASE (p1_low, up, 0);
+ s390_double_umul_ppmm_distinct (p0_high, p0_low, p1_high, p1_low, v0, v1);
+ carry_prod.dw = vec_load_2di_as_pair (p1_high, p1_low);
+ NEW_CARRY (tmp, p0_high);
+ carry_prod.sw = vec_add_u128 (carry_prod.sw, tmp.sw);
+
+#ifdef ADD
+ carry_vec1.dw[1] = __builtin_add_overflow (rp[0], p0_low, rp);
+#else
+ rp[0] = p0_low;
+#endif
+
+ ASM_LOADGPR_BASE (p0_low, up, 8);
+ ASM_LOADGPR_BASE (p1_low, up, 16);
+ ASM_LOADGPR_BASE (p2_low, up, 8);
+ ASM_LOADGPR_BASE (p3_low, up, 16);
+
+ asm(""
+ : "=r"(p0_low), "=r"(p2_low)
+ : "r"(p3_low), "0"(p0_low), "r"(p1_low), "1"(p2_low));
+ s390_double_umul_ppmm_distinct (p0_high, p0_low, p1_high, p1_low, v1, v0);
+ s390_double_umul_ppmm_distinct (p2_high, p2_low, p3_high, p3_low, v0, v1);
+
+ pv0.dw = vec_load_2di_as_pair (p0_high, p0_low);
+
+ LOAD_LOW_LIMB (pv1_low, p1_low);
+ LOAD_LOW_LIMB (pv1_high, p1_high);
+
+ pv0.sw = vec_add_u128 (pv0.sw, pv1_low.sw);
+ LOAD_LOW_LIMB (pv2_high, p2_high);
+ pv3.dw = vec_load_2di_as_pair (p3_high, p3_low);
+
+ LOAD_LOW_LIMB (pv2_low, p2_low);
+
+ pv3.sw = vec_add_u128 (pv3.sw, pv1_high.sw);
+ middle.sw = vec_add_u128 (pv0.sw, pv2_high.sw);
+
+ low.dw = vec_permi (middle.dw, pv2_low.dw, 3);
+ middle.dw = vec_permi (zero.dw, middle.dw, 0);
+ high.sw = vec_add_u128 (middle.sw, pv3.sw);
+
+#ifdef ADD
+ vec_t rp0 = vec_load_elements_reversed (rp, 8);
+ ADD_UP_CARRY_OUT (0, rp0, carry_prod);
+#else
+ sum0 = carry_prod;
+#endif
+ ADD_UP_CARRY_INOUT (1, sum0, low);
+
+ vec_store_elements_reversed (rp, 8, sum1);
+
+ carry_prod = high;
+
+ idx += 3 * sizeof (mp_limb_t);
+ break;
+ }
+
+ /*
+ * branch-on-count implicitly hint to the branch prediction as taken, while
+ * compare-and-branch hints as not taken. currently, using branch-on-count
+ * has a performance advantage, but it is not clear that it is generally
+ * the better choice (e.g., branch-on-count requires decrementing the
+ * separate counter). so, allow switching the loop condition to enable
+ * either category of branch instructions:
+ * - idx is less than an upper bound, for compare-and-branch
+ * - iteration counter greater than zero, for branch-on-count
+ */
+#ifdef BRCTG
+ for (; iterations > 0; iterations--)
+ {
+#else
+ while (idx < idx_bound)
+ {
+#endif
+ /* The 64x64->128 MLGR multiplies two factors in GPRs and stores the
+ * result in a GPR pair. One of the factors is taken from the GPR pair
+ * and overwritten.
+ * To reuse factors, it turned out cheaper to load limbs multiple times
+ * than copying GPR contents. Enforce that and the use of addressing by
+ * base + index gpr + immediate displacement via inline asm.
+ */
+ ASM_LOADGPR (p0_low, up, idx, 0 + IDX_OFFSET);
+ ASM_LOADGPR (p1_low, up, idx, 8 + IDX_OFFSET);
+ ASM_LOADGPR (p2_low, up, idx, 0 + IDX_OFFSET);
+ ASM_LOADGPR (p3_low, up, idx, 8 + IDX_OFFSET);
+
+ s390_double_umul_ppmm_distinct (p0_high, p0_low, p1_high, p1_low, v1, v0);
+
+ pv0.dw = vec_load_2di_as_pair (p0_high, p0_low);
+
+ LOAD_LOW_LIMB (pv1_low, p1_low);
+ LOAD_LOW_LIMB (pv1_high, p1_high);
+
+ s390_double_umul_ppmm_distinct (p2_high, p2_low, p3_high, p3_low, v0, v1);
+
+ pv0.sw = vec_add_u128 (pv0.sw, pv1_low.sw);
+ LOAD_LOW_LIMB (pv2_high, p2_high);
+ pv3.dw = vec_load_2di_as_pair (p3_high, p3_low);
+
+ LOAD_LOW_LIMB (pv2_low, p2_low);
+
+ ASM_LOADGPR (p0_low, up, idx, 16 + IDX_OFFSET);
+ ASM_LOADGPR (p1_low, up, idx, 24 + IDX_OFFSET);
+ ASM_LOADGPR (p2_low, up, idx, 16 + IDX_OFFSET);
+ ASM_LOADGPR (p3_low, up, idx, 24 + IDX_OFFSET);
+
+ idx += LOOP_ADVANCE;
+
+ /*
+ * "barrier" to enforce scheduling the index increment before the second
+ * block of multiplications. not required for clang.
+ */
+#ifndef __clang__
+ asm(""
+ : "=r"(idx), "=r"(p0_high), "=r"(p2_high)
+ : "0"(idx), "1"(p0_high), "2"(p2_high));
+#endif
+
+ s390_double_umul_ppmm_distinct (p0_high, p0_low, p1_high, p1_low, v1, v0);
+ s390_double_umul_ppmm_distinct (p2_high, p2_low, p3_high, p3_low, v0, v1);
+
+ /*
+ * "barrier" to enforce scheduling all MLGRs first, before any adding
+ * up. note that clang produces better code without.
+ */
+#ifndef __clang__
+ asm(""
+ : "=v"(pv0.sw), "=v"(pv3.sw)
+ : "1"(pv3.sw), "0"(pv0.sw), "r"(p0_high), "r"(p2_high));
+#endif
+
+ pv3.sw = vec_add_u128 (pv3.sw, pv1_high.sw);
+ middle.sw = vec_add_u128 (pv0.sw, pv2_high.sw);
+
+ low.dw = vec_permi (middle.dw, pv2_low.dw,
+ 3); /* least-significant doubleword from both vectors */
+ middle.dw = vec_permi (zero.dw, middle.dw, 0);
+ high.sw = vec_add_u128 (middle.sw, pv3.sw);
+
+#ifdef ADD
+ rp0 = vec_load_elements_reversed_idx (rp, idx,
+ 0 + IDX_OFFSET - LOOP_ADVANCE);
+ ADD_UP_CARRY_INOUT (0, rp0, carry_prod);
+#else
+ sum0 = carry_prod;
+#endif
+ ADD_UP_CARRY_INOUT (1, sum0, low);
+
+ vec_store_elements_reversed_idx (rp, idx, 0 + IDX_OFFSET - LOOP_ADVANCE,
+ sum1);
+
+ carry_prod = high;
+
+ vec_t pv0_2, pv3_2;
+ vec_t pv1_low_2, pv1_high_2, pv2_low_2, pv2_high_2;
+ vec_t low_2, middle_2, high_2;
+ vec_t sum2, sum3;
+
+ pv0_2.dw = vec_load_2di_as_pair (p0_high, p0_low);
+ LOAD_LOW_LIMB (pv1_low_2, p1_low);
+ LOAD_LOW_LIMB (pv1_high_2, p1_high);
+
+ pv0_2.sw = vec_add_u128 (pv0_2.sw, pv1_low_2.sw);
+ LOAD_LOW_LIMB (pv2_high_2, p2_high);
+ pv3_2.dw = vec_load_2di_as_pair (p3_high, p3_low);
+ pv3_2.sw = vec_add_u128 (pv3_2.sw, pv1_high_2.sw);
+ middle_2.sw = vec_add_u128 (pv0_2.sw, pv2_high_2.sw);
+
+ LOAD_LOW_LIMB (pv2_low_2, p2_low);
+ low_2.dw
+ = vec_permi (middle_2.dw, pv2_low_2.dw,
+ 3); /* least-significant doubleword from both vectors */
+ middle_2.dw = vec_permi (zero.dw, middle_2.dw, 0);
+ high_2.sw = vec_add_u128 (middle_2.sw, pv3_2.sw);
+
+ /*
+ * another "barrier" to influence scheduling. (also helps in clang)
+ */
+ asm("" : : "v"(pv0_2.sw), "r"(p2_high), "r"(p3_high), "v"(pv3_2.sw));
+
+#ifdef ADD
+ rp1 = vec_load_elements_reversed_idx (rp, idx,
+ 16 + IDX_OFFSET - LOOP_ADVANCE);
+ ADD_UP2_CARRY_INOUT (2, 0, rp1, carry_prod);
+#else
+ sum2 = carry_prod;
+#endif
+ ADD_UP2_CARRY_INOUT (3, 1, sum2, low_2);
+
+ vec_store_elements_reversed_idx (rp, idx, 16 + IDX_OFFSET - LOOP_ADVANCE,
+ sum3);
+
+ carry_prod = high_2;
+ }
+
+#ifdef ADD
+ sum0.sw = vec_adde_u128 (carry_prod.sw, carry_vec0.sw, carry_vec1.sw);
+#else
+ sum0.sw = vec_add_u128 (carry_prod.sw, carry_vec1.sw);
+#endif
+
+ *(mp_ptr) (((char *)rp) + idx + 0 + IDX_OFFSET) = (mp_limb_t)sum0.dw[1];
+
+ return (mp_limb_t)sum0.dw[0];
+}
diff --git a/vendor/gmp-6.3.0/mpn/s390_64/z13/common-vec.h b/vendor/gmp-6.3.0/mpn/s390_64/z13/common-vec.h
new file mode 100644
index 0000000..a59e6ee
--- /dev/null
+++ b/vendor/gmp-6.3.0/mpn/s390_64/z13/common-vec.h
@@ -0,0 +1,175 @@
+/* Common vector helpers and macros for IBM z13 and later
+
+Copyright 2021 Free Software Foundation, Inc.
+
+This file is part of the GNU MP Library.
+
+The GNU MP Library is free software; you can redistribute it and/or modify
+it under the terms of either:
+
+ * the GNU Lesser General Public License as published by the Free
+ Software Foundation; either version 3 of the License, or (at your
+ option) any later version.
+
+or
+
+ * the GNU General Public License as published by the Free Software
+ Foundation; either version 2 of the License, or (at your option) any
+ later version.
+
+or both in parallel, as here.
+
+The GNU MP Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received copies of the GNU General Public License and the
+GNU Lesser General Public License along with the GNU MP Library. If not,
+see https://www.gnu.org/licenses/. */
+
+#ifndef __S390_64_Z13_COMMON_VEC_H
+#define __S390_64_Z13_COMMON_VEC_H
+
+#include <unistd.h>
+#include <vecintrin.h>
+
+/*
+ * Vector intrinsics use vector element types that kind-of make sense for the
+ * specific operation (e.g., vec_permi permutes doublewords). To use VRs
+ * interchangeably with different intrinsics, typedef the two variants and wrap
+ * them in a union.
+ */
+#define VLEN_BYTES 16
+typedef unsigned long long v2di __attribute__ ((vector_size (VLEN_BYTES)));
+typedef unsigned char v16qi __attribute__ ((vector_size (VLEN_BYTES)));
+
+/*
+ * The Z vector intrinsics use vectors with different element types (e.g.,
+ * v16qi for the 128-bit adds and v2di for vec_permi).
+ */
+union vec
+{
+ v2di dw;
+ v16qi sw;
+};
+
+typedef union vec vec_t;
+
+/*
+ * single-instruction combine of two GPRs into a VR
+ */
+static inline v2di
+vec_load_2di_as_pair (unsigned long a, unsigned long b)
+{
+ v2di res;
+ __asm__("vlvgp\t%0,%1,%2" : "=v"(res) : "r"(a), "r"(b));
+ return res;
+}
+
+/*
+ * 64x64 mult where caller needs to care about proper register allocation:
+ * multiply xl with m1, treating both as unsigned, and place the result in
+ * xh:xl.
+ * mlgr operates on register pairs, so xh must be an even gpr followed by xl
+ */
+#define s390_umul_ppmm(xh, xl, m1) \
+ do \
+ { \
+ asm("mlgr\t%0,%3" : "=r"(xh), "=r"(xl) : "%1"(xl), "r"(m1)); \
+ } \
+ while (0);
+
+/*
+ * two 64x64 multiplications, scheduled so that they will dispatch and issue to
+ * different sides: each mlgr is dispatched alone in an instruction group and
+ * subsequent groups will issue on different execution sides.
+ * there is a variant where both products use the same multiplicand and one
+ * that uses two different multiplicands. constraints from s390_umul_ppmm apply
+ * here.
+ */
+#define s390_double_umul_ppmm(X0H, X0L, X1H, X1L, MX) \
+ do \
+ { \
+ asm("mlgr\t%[x0h],%[mx]\n\t" \
+ "mlgr\t%[x1h],%[mx]" \
+ : [x0h] "=&r"(X0H), [x0l] "=&r"(X0L), [x1h] "=r"(X1H), \
+ [x1l] "=r"(X1L) \
+ : "[x0l]"(X0L), "[x1l]"(X1L), [mx] "r"(MX)); \
+ } \
+ while (0);
+
+#define s390_double_umul_ppmm_distinct(X0H, X0L, X1H, X1L, MX0, MX1) \
+ do \
+ { \
+ asm("mlgr\t%[x0h],%[mx0]\n\t" \
+ "mlgr\t%[x1h],%[mx1]" \
+ : [x0h] "=&r"(X0H), [x0l] "=&r"(X0L), [x1h] "=r"(X1H), \
+ [x1l] "=r"(X1L) \
+ : "[x0l]"(X0L), "[x1l]"(X1L), [mx0] "r"(MX0), [mx1] "r"(MX1)); \
+ } \
+ while (0);
+
+#define ASM_LOADGPR_BASE(DST, BASE, OFFSET) \
+ asm volatile("lg\t%[r],%[off](%[b])" \
+ : [r] "=r"(DST) \
+ : [b] "a"(BASE), [off] "L"(OFFSET) \
+ : "memory");
+
+#define ASM_LOADGPR(DST, BASE, INDEX, OFFSET) \
+ asm volatile("lg\t%[r],%[off](%[b],%[x])" \
+ : [r] "=r"(DST) \
+ : [b] "a"(BASE), [x] "a"(INDEX), [off] "L"(OFFSET) \
+ : "memory");
+
+/*
+ * Load a vector register from memory and swap the two 64-bit doubleword
+ * elements.
+ */
+static inline vec_t
+vec_load_elements_reversed_idx (mp_limb_t const *base, ssize_t const index,
+ ssize_t const offset)
+{
+ vec_t res;
+ char *ptr = (char *)base;
+
+ res.sw = *(v16qi *)(ptr + index + offset);
+ res.dw = vec_permi (res.dw, res.dw, 2);
+
+ return res;
+}
+
+static inline vec_t
+vec_load_elements_reversed (mp_limb_t const *base, ssize_t const offset)
+{
+ return vec_load_elements_reversed_idx (base, 0, offset);
+}
+
+/*
+ * Store a vector register to memory and swap the two 64-bit doubleword
+ * elements.
+ */
+static inline void
+vec_store_elements_reversed_idx (mp_limb_t *base, ssize_t const index,
+ ssize_t const offset, vec_t vec)
+{
+ char *ptr = (char *)base;
+
+ vec.dw = vec_permi (vec.dw, vec.dw, 2);
+ *(v16qi *)(ptr + index + offset) = vec.sw;
+}
+
+static inline void
+vec_store_elements_reversed (mp_limb_t *base, ssize_t const offset, vec_t vec)
+{
+ vec_store_elements_reversed_idx (base, 0, offset, vec);
+}
+
+#define ASM_VZERO(VEC) \
+ do \
+ { \
+ asm("vzero\t%[vec]" : [vec] "=v"(VEC)); \
+ } \
+ while (0)
+
+#endif
diff --git a/vendor/gmp-6.3.0/mpn/s390_64/z13/gmp-mparam.h b/vendor/gmp-6.3.0/mpn/s390_64/z13/gmp-mparam.h
new file mode 100644
index 0000000..50e7f39
--- /dev/null
+++ b/vendor/gmp-6.3.0/mpn/s390_64/z13/gmp-mparam.h
@@ -0,0 +1,162 @@
+/* S/390-64 for IBM z13 gmp-mparam.h -- Compiler/machine parameter header file.
+
+Copyright 2021 Free Software Foundation, Inc.
+
+This file is part of the GNU MP Library.
+
+The GNU MP Library is free software; you can redistribute it and/or modify
+it under the terms of either:
+
+ * the GNU Lesser General Public License as published by the Free
+ Software Foundation; either version 3 of the License, or (at your
+ option) any later version.
+
+or
+
+ * the GNU General Public License as published by the Free Software
+ Foundation; either version 2 of the License, or (at your option) any
+ later version.
+
+or both in parallel, as here.
+
+The GNU MP Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received copies of the GNU General Public License and the
+GNU Lesser General Public License along with the GNU MP Library. If not,
+see https://www.gnu.org/licenses/. */
+
+#define GMP_LIMB_BITS 64
+#define GMP_LIMB_BYTES 8
+
+#define HAVE_NATIVE_mpn_addmul_2 1
+#define HAVE_NATIVE_mpn_mul_2 1
+
+/* Generated by tuneup.c, 2021-07-30, gcc 10.2 */
+
+#define DIVREM_1_NORM_THRESHOLD MP_SIZE_T_MAX /* never */
+#define DIVREM_1_UNNORM_THRESHOLD MP_SIZE_T_MAX /* never */
+#define MOD_1_1P_METHOD 2
+#define MOD_1_NORM_THRESHOLD MP_SIZE_T_MAX /* never */
+#define MOD_1_UNNORM_THRESHOLD MP_SIZE_T_MAX /* never */
+#define MOD_1N_TO_MOD_1_1_THRESHOLD 17
+#define MOD_1U_TO_MOD_1_1_THRESHOLD 15
+#define MOD_1_1_TO_MOD_1_2_THRESHOLD 0 /* never mpn_mod_1_1p */
+#define MOD_1_2_TO_MOD_1_4_THRESHOLD 0 /* never mpn_mod_1s_2p */
+#define PREINV_MOD_1_TO_MOD_1_THRESHOLD 5
+#define USE_PREINV_DIVREM_1 0
+#define DIV_QR_1N_PI1_METHOD 3
+#define DIV_QR_1_NORM_THRESHOLD MP_SIZE_T_MAX /* never */
+#define DIV_QR_1_UNNORM_THRESHOLD MP_SIZE_T_MAX /* never */
+#define DIV_QR_2_PI2_THRESHOLD 996
+#define DIVEXACT_1_THRESHOLD 4
+#define BMOD_1_TO_MOD_1_THRESHOLD 0 /* always */
+
+#define DIV_1_VS_MUL_1_PERCENT 404
+
+#define MUL_TOOM22_THRESHOLD 23
+#define MUL_TOOM33_THRESHOLD 94
+#define MUL_TOOM44_THRESHOLD 166
+#define MUL_TOOM6H_THRESHOLD 286
+#define MUL_TOOM8H_THRESHOLD 626
+
+#define MUL_TOOM32_TO_TOOM43_THRESHOLD 113
+#define MUL_TOOM32_TO_TOOM53_THRESHOLD 138
+#define MUL_TOOM42_TO_TOOM53_THRESHOLD 143
+#define MUL_TOOM42_TO_TOOM63_THRESHOLD 145
+#define MUL_TOOM43_TO_TOOM54_THRESHOLD 130
+
+#define SQR_BASECASE_THRESHOLD 0 /* always (native) */
+#define SQR_TOOM2_THRESHOLD 12
+#define SQR_TOOM3_THRESHOLD 84
+#define SQR_TOOM4_THRESHOLD 234
+#define SQR_TOOM6_THRESHOLD 318
+#define SQR_TOOM8_THRESHOLD 478
+
+#define MULMID_TOOM42_THRESHOLD 42
+
+#define MULMOD_BNM1_THRESHOLD 13
+#define SQRMOD_BNM1_THRESHOLD 7
+
+#define MUL_FFT_MODF_THRESHOLD 332 /* k = 5 */
+#define MUL_FFT_TABLE3 \
+ { { 332, 5}, { 19, 6}, { 10, 5}, { 21, 6}, \
+ { 21, 7}, { 21, 8}, { 11, 7}, { 24, 8}, \
+ { 13, 7}, { 27, 8}, { 15, 7}, { 31, 8}, \
+ { 17, 7}, { 35, 8}, { 19, 7}, { 39, 8}, \
+ { 21, 9}, { 11, 8}, { 27, 9}, { 15, 8}, \
+ { 35, 9}, { 19, 8}, { 41, 9}, { 23, 8}, \
+ { 47, 9}, { 27,10}, { 15, 9}, { 39,10}, \
+ { 23, 9}, { 47,11}, { 15,10}, { 31, 9}, \
+ { 67,10}, { 47,11}, { 2048,12}, { 4096,13}, \
+ { 8192,14}, { 16384,15}, { 32768,16}, { 65536,17}, \
+ { 131072,18}, { 262144,19}, { 524288,20}, {1048576,21}, \
+ {2097152,22}, {4194304,23}, {8388608,24} }
+#define MUL_FFT_TABLE3_SIZE 47
+#define MUL_FFT_THRESHOLD 2752
+
+#define SQR_FFT_MODF_THRESHOLD 240 /* k = 5 */
+#define SQR_FFT_TABLE3 \
+ { { 240, 5}, { 8, 4}, { 17, 5}, { 13, 6}, \
+ { 7, 5}, { 15, 6}, { 8, 5}, { 17, 6}, \
+ { 9, 5}, { 19, 6}, { 15, 7}, { 8, 6}, \
+ { 17, 7}, { 9, 6}, { 19, 7}, { 10, 6}, \
+ { 21, 7}, { 17, 8}, { 9, 7}, { 20, 8}, \
+ { 11, 7}, { 23, 8}, { 13, 9}, { 7, 8}, \
+ { 21, 9}, { 11, 8}, { 23, 9}, { 15, 8}, \
+ { 31, 9}, { 19, 8}, { 39, 9}, { 23,10}, \
+ { 15, 9}, { 39,10}, { 23,11}, { 15,10}, \
+ { 31, 9}, { 63,10}, { 47,11}, { 2048,12}, \
+ { 4096,13}, { 8192,14}, { 16384,15}, { 32768,16}, \
+ { 65536,17}, { 131072,18}, { 262144,19}, { 524288,20}, \
+ {1048576,21}, {2097152,22}, {4194304,23}, {8388608,24} }
+#define SQR_FFT_TABLE3_SIZE 52
+#define SQR_FFT_THRESHOLD 1856
+
+#define MULLO_BASECASE_THRESHOLD 0 /* always */
+#define MULLO_DC_THRESHOLD 25
+#define MULLO_MUL_N_THRESHOLD 5397
+#define SQRLO_BASECASE_THRESHOLD 0 /* always */
+#define SQRLO_DC_THRESHOLD 396
+#define SQRLO_SQR_THRESHOLD 3704
+
+#define DC_DIV_QR_THRESHOLD 15
+#define DC_DIVAPPR_Q_THRESHOLD 50
+#define DC_BDIV_QR_THRESHOLD 66
+#define DC_BDIV_Q_THRESHOLD 202
+
+#define INV_MULMOD_BNM1_THRESHOLD 46
+#define INV_NEWTON_THRESHOLD 29
+#define INV_APPR_THRESHOLD 13
+
+#define BINV_NEWTON_THRESHOLD 312
+#define REDC_1_TO_REDC_2_THRESHOLD 79
+#define REDC_2_TO_REDC_N_THRESHOLD 0 /* always */
+
+#define MU_DIV_QR_THRESHOLD 979
+#define MU_DIVAPPR_Q_THRESHOLD 979
+#define MUPI_DIV_QR_THRESHOLD 13
+#define MU_BDIV_QR_THRESHOLD 942
+#define MU_BDIV_Q_THRESHOLD 1367
+
+#define POWM_SEC_TABLE 3,19,215,1730
+
+#define GET_STR_DC_THRESHOLD 10
+#define GET_STR_PRECOMPUTE_THRESHOLD 15
+#define SET_STR_DC_THRESHOLD 882
+#define SET_STR_PRECOMPUTE_THRESHOLD 2520
+
+#define FAC_DSC_THRESHOLD 228
+#define FAC_ODD_THRESHOLD 24
+
+#define MATRIX22_STRASSEN_THRESHOLD 19
+#define HGCD2_DIV1_METHOD 1
+#define HGCD_THRESHOLD 61
+#define HGCD_APPR_THRESHOLD 51
+#define HGCD_REDUCE_THRESHOLD 1962
+#define GCD_DC_THRESHOLD 217
+#define GCDEXT_DC_THRESHOLD 263
+#define JACOBI_BASE_METHOD 4
+
diff --git a/vendor/gmp-6.3.0/mpn/s390_64/z13/hamdist.asm b/vendor/gmp-6.3.0/mpn/s390_64/z13/hamdist.asm
new file mode 100644
index 0000000..81c5174
--- /dev/null
+++ b/vendor/gmp-6.3.0/mpn/s390_64/z13/hamdist.asm
@@ -0,0 +1,76 @@
+dnl S/390-64 mpn_hamdist
+
+dnl Copyright 2023 Free Software Foundation, Inc.
+
+dnl This file is part of the GNU MP Library.
+dnl
+dnl The GNU MP Library is free software; you can redistribute it and/or modify
+dnl it under the terms of either:
+dnl
+dnl * the GNU Lesser General Public License as published by the Free
+dnl Software Foundation; either version 3 of the License, or (at your
+dnl option) any later version.
+dnl
+dnl or
+dnl
+dnl * the GNU General Public License as published by the Free Software
+dnl Foundation; either version 2 of the License, or (at your option) any
+dnl later version.
+dnl
+dnl or both in parallel, as here.
+dnl
+dnl The GNU MP Library is distributed in the hope that it will be useful, but
+dnl WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+dnl or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+dnl for more details.
+dnl
+dnl You should have received copies of the GNU General Public License and the
+dnl GNU Lesser General Public License along with the GNU MP Library. If not,
+dnl see https://www.gnu.org/licenses/.
+
+include(`../config.m4')
+
+C cycles/limb
+C z900 -
+C z990 -
+C z9 -
+C z10 -
+C z196 -
+C z12 ?
+C z13 ?
+C z14 ?
+C z15 ?
+
+define(`ap', `%r2')
+define(`bp', `%r3')
+define(`n', `%r4')
+
+ASM_START()
+PROLOGUE(mpn_hamdist)
+ vzero %v30
+ tmll n, 1
+ srlg n, n, 1
+ je L(top)
+
+L(odd): vllezg %v16, 0(ap)
+ vllezg %v17, 0(bp)
+ vx %v16, %v16, %v17
+ vpopct %v30, %v16, 3
+ la ap, 8(ap)
+ la bp, 8(bp)
+ clgije n, 0, L(end)
+
+L(top): vl %v16, 0(ap), 3
+ vl %v17, 0(bp), 3
+ vx %v16, %v16, %v17
+ vpopct %v20, %v16, 3
+ vag %v30, %v30, %v20
+ la ap, 16(ap)
+ la bp, 16(bp)
+ brctg n, L(top)
+
+L(end): vzero %v29
+ vsumqg %v30, %v30, %v29
+ vlgvg %r2, %v30, 1(%r0)
+ br %r14
+EPILOGUE()
diff --git a/vendor/gmp-6.3.0/mpn/s390_64/z13/mul_1.asm b/vendor/gmp-6.3.0/mpn/s390_64/z13/mul_1.asm
new file mode 100644
index 0000000..04eb718
--- /dev/null
+++ b/vendor/gmp-6.3.0/mpn/s390_64/z13/mul_1.asm
@@ -0,0 +1,149 @@
+dnl S/390-64 mpn_mul_1 and mpn_mul_1c.
+dnl Based on C code contributed by Marius Hillenbrand.
+
+dnl Copyright 2023 Free Software Foundation, Inc.
+
+dnl This file is part of the GNU MP Library.
+dnl
+dnl The GNU MP Library is free software; you can redistribute it and/or modify
+dnl it under the terms of either:
+dnl
+dnl * the GNU Lesser General Public License as published by the Free
+dnl Software Foundation; either version 3 of the License, or (at your
+dnl option) any later version.
+dnl
+dnl or
+dnl
+dnl * the GNU General Public License as published by the Free Software
+dnl Foundation; either version 2 of the License, or (at your option) any
+dnl later version.
+dnl
+dnl or both in parallel, as here.
+dnl
+dnl The GNU MP Library is distributed in the hope that it will be useful, but
+dnl WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+dnl or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+dnl for more details.
+dnl
+dnl You should have received copies of the GNU General Public License and the
+dnl GNU Lesser General Public License along with the GNU MP Library. If not,
+dnl see https://www.gnu.org/licenses/.
+
+include(`../config.m4')
+
+dnl TODO
+dnl * Schedule vlvgp away from mlgr; that saves 20% of the run time.
+dnl * Perhaps use vp[0]/vp[1] in innerloop instead preloading v0/v1.
+
+C cycles/limb
+C z900 -
+C z990 -
+C z9 -
+C z10 -
+C z196 -
+C z12 ?
+C z13 ?
+C z14 ?
+C z15 2.25
+
+
+define(`rp', `%r2')
+define(`ap', `%r3')
+define(`an', `%r4')
+define(`b0', `%r5')
+define(`cy', `%r6')
+
+define(`idx', `%r4')
+
+ASM_START()
+
+PROLOGUE(mpn_mul_1c)
+ stmg %r6, %r13, 48(%r15)
+ j L(ent)
+EPILOGUE()
+
+PROLOGUE(mpn_mul_1)
+ stmg %r6, %r13, 48(%r15)
+ lghi %r6, 0
+L(ent): vzero %v2
+ srlg %r11, an, 2
+
+ tmll an, 1
+ je L(bx0)
+L(bx1): tmll an, 2
+ jne L(b11)
+
+L(b01): lghi idx, -24
+ lg %r13, 0(ap)
+ mlgr %r12, b0
+ algr %r13, %r6
+ lghi %r6, 0
+ alcgr %r12, %r6
+ stg %r13, 0(rp)
+ cgije %r11, 0, L(1)
+ j L(cj0)
+
+L(b11): lghi idx, -8
+ lg %r9, 0(ap)
+ mlgr %r8, b0
+ algr %r9, %r6
+ lghi %r6, 0
+ alcgr %r8, %r6
+ stg %r9, 0(rp)
+ j L(cj1)
+
+L(bx0): tmll an, 2
+ jne L(b10)
+L(b00): lghi idx, -32
+ lgr %r12, %r6
+L(cj0): lg %r1, 32(idx, ap)
+ lg %r9, 40(idx, ap)
+ mlgr %r0, b0
+ mlgr %r8, b0
+ vlvgp %v6, %r0, %r1
+ vlvgp %v7, %r9, %r12
+ j L(mid)
+
+L(b10): lghi idx, -16
+ lgr %r8, %r6
+L(cj1): lg %r7, 16(idx, ap)
+ lg %r13, 24(idx, ap)
+ mlgr %r6, b0
+ mlgr %r12, b0
+ vlvgp %v6, %r6, %r7
+ vlvgp %v7, %r13, %r8
+ cgije %r11, 0, L(end)
+
+L(top): lg %r1, 32(idx, ap)
+ lg %r9, 40(idx, ap)
+ mlgr %r0, b0
+ mlgr %r8, b0
+ vacq %v3, %v6, %v7, %v2
+ vacccq %v2, %v6, %v7, %v2
+ vpdi %v3, %v3, %v3, 4
+ vst %v3, 16(idx, rp), 3
+ vlvgp %v6, %r0, %r1
+ vlvgp %v7, %r9, %r12
+L(mid): lg %r7, 48(idx, ap)
+ lg %r13, 56(idx, ap)
+ mlgr %r6, b0
+ mlgr %r12, b0
+ vacq %v1, %v6, %v7, %v2
+ vacccq %v2, %v6, %v7, %v2
+ vpdi %v1, %v1, %v1, 4
+ vst %v1, 32(idx, rp), 3
+ vlvgp %v6, %r6, %r7
+ vlvgp %v7, %r13, %r8
+ la idx, 32(idx)
+ brctg %r11, L(top)
+
+L(end): vacq %v3, %v6, %v7, %v2
+ vacccq %v2, %v6, %v7, %v2
+ vpdi %v3, %v3, %v3, 4
+ vst %v3, 16(idx, rp), 3
+
+L(1): vlgvg %r2, %v2, 1
+ agr %r2, %r12
+ lmg %r6, %r13, 48(%r15)
+ br %r14
+EPILOGUE()
diff --git a/vendor/gmp-6.3.0/mpn/s390_64/z13/mul_1.c b/vendor/gmp-6.3.0/mpn/s390_64/z13/mul_1.c
new file mode 100644
index 0000000..7584dc8
--- /dev/null
+++ b/vendor/gmp-6.3.0/mpn/s390_64/z13/mul_1.c
@@ -0,0 +1,31 @@
+/* mul_1 for IBM z13 or later
+
+Copyright 2021 Free Software Foundation, Inc.
+
+This file is part of the GNU MP Library.
+
+The GNU MP Library is free software; you can redistribute it and/or modify
+it under the terms of either:
+
+ * the GNU Lesser General Public License as published by the Free
+ Software Foundation; either version 3 of the License, or (at your
+ option) any later version.
+
+or
+
+ * the GNU General Public License as published by the Free Software
+ Foundation; either version 2 of the License, or (at your option) any
+ later version.
+
+or both in parallel, as here.
+
+The GNU MP Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received copies of the GNU General Public License and the
+GNU Lesser General Public License along with the GNU MP Library. If not,
+see https://www.gnu.org/licenses/. */
+
+#include "s390_64/z13/addmul_1.c"
diff --git a/vendor/gmp-6.3.0/mpn/s390_64/z13/mul_2.asm b/vendor/gmp-6.3.0/mpn/s390_64/z13/mul_2.asm
new file mode 100644
index 0000000..ec61201
--- /dev/null
+++ b/vendor/gmp-6.3.0/mpn/s390_64/z13/mul_2.asm
@@ -0,0 +1,121 @@
+dnl S/390-64 mpn_mul_2
+
+dnl Copyright 2023 Free Software Foundation, Inc.
+
+dnl This file is part of the GNU MP Library.
+dnl
+dnl The GNU MP Library is free software; you can redistribute it and/or modify
+dnl it under the terms of either:
+dnl
+dnl * the GNU Lesser General Public License as published by the Free
+dnl Software Foundation; either version 3 of the License, or (at your
+dnl option) any later version.
+dnl
+dnl or
+dnl
+dnl * the GNU General Public License as published by the Free Software
+dnl Foundation; either version 2 of the License, or (at your option) any
+dnl later version.
+dnl
+dnl or both in parallel, as here.
+dnl
+dnl The GNU MP Library is distributed in the hope that it will be useful, but
+dnl WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+dnl or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+dnl for more details.
+dnl
+dnl You should have received copies of the GNU General Public License and the
+dnl GNU Lesser General Public License along with the GNU MP Library. If not,
+dnl see https://www.gnu.org/licenses/.
+
+include(`../config.m4')
+
+C cycles/limb
+C z900 -
+C z990 -
+C z9 -
+C z10 ?
+C z196 ?
+C z12 ?
+C z13 ?
+C z14 ?
+C z15 2.8
+
+
+define(`rp', `%r2')
+define(`up', `%r3')
+define(`un', `%r4')
+define(`vp', `%r5')
+
+define(`idx', `%r12')
+define(`v0', `%r11')
+define(`v1', `%r5')
+
+ASM_START()
+PROLOGUE(mpn_mul_2)
+ stmg %r6, %r12, 48(%r15)
+
+ vzero %v27
+ vzero %v28
+ vzero %v29
+ vzero %v30
+ lghi %r10, 0
+ lg v0, 0(vp)
+ lg v1, 8(vp)
+ tmll un, 1
+ srlg un, un, 1
+ je L(evn)
+
+L(odd): lg %r7, 0(up)
+ mlgr %r6, v0 C W2 W1
+ lg %r1, 0(up)
+ stg %r7, 0(rp)
+ lghi idx, 8
+dnl clgije un, 0, L(end)
+ j L(top)
+
+L(evn): lghi %r6, 0
+ lghi idx, 0
+ lghi %r1, 0
+
+L(top): lg %r9, 0(idx, up)
+ mlgr %r0, v1 C W2 W1
+ mlgr %r8, v1 C W3 W2
+ vlvgp %v22, %r0, %r1 C W2 W1
+ vlvgp %v23, %r9, %r6 C W2 W1
+ lg %r1, 0(idx, up)
+ lg %r7, 8(idx, up)
+ mlgr %r0, v0 C W2 W1
+ mlgr %r6, v0 C W3 W2
+ vlvgp %v20, %r0, %r1 C W2 W1
+ vlvgp %v21, %r7, %r10 C W2 W1
+ vacq %v24, %v22, %v23, %v27 C
+ vacccq %v27, %v22, %v23, %v27 C carry critical path 1
+ vacq %v23, %v24, %v20, %v28 C
+ vacccq %v28, %v24, %v20, %v28 C carry critical path 2
+ vacq %v20, %v23, %v21, %v29 C
+ vacccq %v29, %v23, %v21, %v29 C carry critical path 3
+ vpdi %v20, %v20, %v20, 4
+ lg %r1, 8(idx, up)
+ vst %v20, 0(idx, rp), 3
+ lgr %r10, %r8
+ la idx, 16(idx)
+ brctg un, L(top)
+
+L(end): mlgr %r0, v1
+ algr %r1, %r6
+ alcgr %r0, un
+ algr %r1, %r8
+ alcgr %r0, un
+ vag %v27, %v27, %v28
+ vag %v29, %v29, %v30
+ vag %v27, %v27, %v29
+ vlgvg %r10, %v27, 1
+ algr %r1, %r10
+ stg %r1, 0(idx, rp)
+ alcgr %r0, un
+ lgr %r2, %r0
+
+ lmg %r6, %r12, 48(%r15)
+ br %r14
+EPILOGUE()
diff --git a/vendor/gmp-6.3.0/mpn/s390_64/z13/mul_basecase.asm b/vendor/gmp-6.3.0/mpn/s390_64/z13/mul_basecase.asm
new file mode 100644
index 0000000..0de1150
--- /dev/null
+++ b/vendor/gmp-6.3.0/mpn/s390_64/z13/mul_basecase.asm
@@ -0,0 +1,264 @@
+dnl S/390-64 mpn_mul_basecase.
+
+dnl Copyright 2023 Free Software Foundation, Inc.
+
+dnl This file is part of the GNU MP Library.
+dnl
+dnl The GNU MP Library is free software; you can redistribute it and/or modify
+dnl it under the terms of either:
+dnl
+dnl * the GNU Lesser General Public License as published by the Free
+dnl Software Foundation; either version 3 of the License, or (at your
+dnl option) any later version.
+dnl
+dnl or
+dnl
+dnl * the GNU General Public License as published by the Free Software
+dnl Foundation; either version 2 of the License, or (at your option) any
+dnl later version.
+dnl
+dnl or both in parallel, as here.
+dnl
+dnl The GNU MP Library is distributed in the hope that it will be useful, but
+dnl WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+dnl or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+dnl for more details.
+dnl
+dnl You should have received copies of the GNU General Public License and the
+dnl GNU Lesser General Public License along with the GNU MP Library. If not,
+dnl see https://www.gnu.org/licenses/.
+
+include(`../config.m4')
+
+
+C INPUT PARAMETERS
+define(`rp', `%r2')
+define(`ap', `%r3')
+define(`an', `%r4') C 32
+define(`bp', `%r5') C 40
+define(`bn', `%r6') C 48
+
+define(`idx', `%r14')
+define(`b0', `%r10')
+
+dnl live in addmul_1:
+dnl r0 r1 r2 r3 r4 r5 r6 r7 r8 r9 r10 r11 r12 r13 r14
+dnl xx xx rp ap an bp xx xx xx xx b0 i xx xx idx
+dnl stack: bn
+
+dnl TODO
+dnl * Have mul_1 start without initial (un mod 4) separation, instead handle
+dnl after loop. Then fall into 4 separate addmul_1 loops.
+dnl * Streamline handling of bn, an, %r11 to reduce the # if memops.
+
+define(`MUL_1',`
+pushdef(`L',
+defn(`L')$1`'_m1)
+ vzero %v2
+ srlg %r11, %r0, 2
+
+ tmll %r0, 1
+ je L(bx0)
+L(bx1): tmll %r0, 2
+ jne L(b11)
+
+L(b01): lghi idx, -24
+ lg %r13, 0(ap)
+ mlgr %r12, b0
+ stg %r13, 0(rp)
+ cgijne %r11, 0, L(cj0)
+
+L(1): stg %r12, 8(rp)
+ lmg %r6, %r14, 48(%r15)
+ br %r14
+
+L(b11): lghi idx, -8
+ lg %r9, 0(ap)
+ mlgr %r8, b0
+ stg %r9, 0(rp)
+ j L(cj1)
+
+L(bx0): tmll %r0, 2
+ jne L(b10)
+L(b00): lghi idx, -32
+ lghi %r12, 0
+L(cj0): lg %r1, 32(idx, ap)
+ lg %r9, 40(idx, ap)
+ mlgr %r0, b0
+ mlgr %r8, b0
+ vlvgp %v6, %r0, %r1
+ vlvgp %v7, %r9, %r12
+ j L(mid)
+
+L(b10): lghi idx, -16
+ lghi %r8, 0
+L(cj1): lg %r7, 16(idx, ap)
+ lg %r13, 24(idx, ap)
+ mlgr %r6, b0
+ mlgr %r12, b0
+ vlvgp %v6, %r6, %r7
+ vlvgp %v7, %r13, %r8
+ cgije %r11, 0, L(end)
+
+L(top): lg %r1, 32(idx, ap)
+ lg %r9, 40(idx, ap)
+ mlgr %r0, b0
+ mlgr %r8, b0
+ vacq %v3, %v6, %v7, %v2
+ vacccq %v2, %v6, %v7, %v2
+ vpdi %v3, %v3, %v3, 4
+ vst %v3, 16(idx, rp), 3
+ vlvgp %v6, %r0, %r1
+ vlvgp %v7, %r9, %r12
+L(mid): lg %r7, 48(idx, ap)
+ lg %r13, 56(idx, ap)
+ mlgr %r6, b0
+ mlgr %r12, b0
+ vacq %v1, %v6, %v7, %v2
+ vacccq %v2, %v6, %v7, %v2
+ vpdi %v1, %v1, %v1, 4
+ vst %v1, 32(idx, rp), 3
+ vlvgp %v6, %r6, %r7
+ vlvgp %v7, %r13, %r8
+ la idx, 32(idx)
+ brctg %r11, L(top)
+
+L(end): vacq %v3, %v6, %v7, %v2
+ vacccq %v2, %v6, %v7, %v2
+ vpdi %v3, %v3, %v3, 4
+ vst %v3, 16(idx, rp), 3
+
+ vlgvg %r0, %v2, 1
+ algr %r0, %r12
+ stg %r0, 32(idx, rp)
+popdef(`L')
+')
+
+define(`ADDMUL_1',`
+pushdef(`L',
+defn(`L')$1`'_am1)
+ vzero %v0
+ vzero %v2
+ srlg %r11, %r0, 2
+
+ tmll %r0, 1
+ je L(bx0)
+L(bx1): tmll %r0, 2
+ jne L(b11)
+
+L(b01): lghi idx, -24
+ vleg %v2, 0(rp), 1
+ lg %r13, 0(ap)
+ vzero %v4
+ mlgr %r12, b0
+ vlvgg %v4, %r13, 1
+ vaq %v2, %v2, %v4
+ vsteg %v2, 0(rp), 1
+ vmrhg %v2, %v2, %v2
+ j L(cj0)
+
+L(b11): lghi idx, -8
+ vleg %v2, 0(rp), 1
+ lg %r9, 0(ap)
+ vzero %v4
+ mlgr %r8, b0
+ vlvgg %v4, %r9, 1
+ vaq %v2, %v2, %v4
+ vsteg %v2, 0(rp), 1
+ vmrhg %v2, %v2, %v2
+ j L(cj1)
+
+L(bx0): tmll %r0, 2
+ jne L(b10)
+L(b00): lghi idx, -32
+ lghi %r12, 0
+L(cj0): lg %r1, 32(idx, ap)
+ lg %r9, 40(idx, ap)
+ mlgr %r0, b0
+ mlgr %r8, b0
+ vlvgp %v6, %r0, %r1
+ vlvgp %v7, %r9, %r12
+ j L(mid)
+
+L(b10): lghi idx, -16
+ lghi %r8, 0
+L(cj1): lg %r7, 16(idx, ap)
+ lg %r13, 24(idx, ap)
+ mlgr %r6, b0
+ mlgr %r12, b0
+ vlvgp %v6, %r6, %r7
+ vlvgp %v7, %r13, %r8
+ cgije %r11, 0, L(end)
+
+L(top): lg %r1, 32(idx, ap)
+ lg %r9, 40(idx, ap)
+ mlgr %r0, b0
+ mlgr %r8, b0
+ vl %v1, 16(idx, rp), 3
+ vpdi %v1, %v1, %v1, 4
+ vacq %v5, %v6, %v1, %v0
+ vacccq %v0, %v6, %v1, %v0
+ vacq %v3, %v5, %v7, %v2
+ vacccq %v2, %v5, %v7, %v2
+ vpdi %v3, %v3, %v3, 4
+ vst %v3, 16(idx, rp), 3
+ vlvgp %v6, %r0, %r1
+ vlvgp %v7, %r9, %r12
+L(mid): lg %r7, 48(idx, ap)
+ lg %r13, 56(idx, ap)
+ mlgr %r6, b0
+ mlgr %r12, b0
+ vl %v4, 32(idx, rp), 3
+ vpdi %v4, %v4, %v4, 4
+ vacq %v5, %v6, %v4, %v0
+ vacccq %v0, %v6, %v4, %v0
+ vacq %v1, %v5, %v7, %v2
+ vacccq %v2, %v5, %v7, %v2
+ vpdi %v1, %v1, %v1, 4
+ vst %v1, 32(idx, rp), 3
+ vlvgp %v6, %r6, %r7
+ vlvgp %v7, %r13, %r8
+ la idx, 32(idx)
+ brctg %r11, L(top)
+
+L(end): vl %v1, 16(idx, rp), 3
+ vpdi %v1, %v1, %v1, 4
+ vacq %v5, %v6, %v1, %v0
+ vacccq %v0, %v6, %v1, %v0
+ vacq %v3, %v5, %v7, %v2
+ vacccq %v2, %v5, %v7, %v2
+ vpdi %v3, %v3, %v3, 4
+ vst %v3, 16(idx, rp), 3
+
+ vag %v2, %v0, %v2
+ vlgvg %r0, %v2, 1
+ algr %r0, %r12
+ stg %r0, 32(idx, rp)
+popdef(`L')
+')
+
+
+ASM_START()
+
+PROLOGUE(mpn_mul_basecase)
+ stmg %r4, %r14, 32(%r15)
+
+ lgr %r4, bn
+
+ lg %r0, 32(%r15)
+ lg b0, 0(bp)
+ MUL_1() C implicitly pass r0 = an
+
+ aghi %r4, -1
+ je L(end)
+L(top): lg %r0, 32(%r15)
+ la bp, 8(bp)
+ la rp, 8(rp)
+ lg b0, 0(bp)
+ ADDMUL_1() C implicitly pass r0 = an
+ brctg %r4, L(top)
+
+L(end): lmg %r6, %r14, 48(%r15)
+ br %r14
+EPILOGUE()
+ .section .note.GNU-stack
diff --git a/vendor/gmp-6.3.0/mpn/s390_64/z13/mul_basecase.c b/vendor/gmp-6.3.0/mpn/s390_64/z13/mul_basecase.c
new file mode 100644
index 0000000..f1b7160
--- /dev/null
+++ b/vendor/gmp-6.3.0/mpn/s390_64/z13/mul_basecase.c
@@ -0,0 +1,124 @@
+/* mpn_mul_basecase for IBM z13 and later -- Internal routine to multiply two
+ natural numbers of length m and n.
+
+ THIS IS AN INTERNAL FUNCTION WITH A MUTABLE INTERFACE. IT IS ONLY
+ SAFE TO REACH THIS FUNCTION THROUGH DOCUMENTED INTERFACES.
+
+Copyright 2021 Free Software Foundation, Inc.
+
+This file is part of the GNU MP Library.
+
+The GNU MP Library is free software; you can redistribute it and/or modify
+it under the terms of either:
+
+ * the GNU Lesser General Public License as published by the Free
+ Software Foundation; either version 3 of the License, or (at your
+ option) any later version.
+
+or
+
+ * the GNU General Public License as published by the Free Software
+ Foundation; either version 2 of the License, or (at your option) any
+ later version.
+
+or both in parallel, as here.
+
+The GNU MP Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received copies of the GNU General Public License and the
+GNU Lesser General Public License along with the GNU MP Library. If not,
+see https://www.gnu.org/licenses/. */
+
+#include <stdlib.h>
+
+#include "gmp-impl.h"
+
+/* Note: we explicitly inline all mul and addmul routines here to reduce the
+ * number of branches in prologues of unrolled functions. That comes at the
+ cost of duplicating common loop bodies in object code. */
+#define DO_INLINE
+
+/*
+ * tweak loop conditions in addmul subroutines to enable use of
+ * branch-relative-on-count (BRCTG) instructions, which currently results in
+ * better performance.
+ */
+#define BRCTG
+
+#include "s390_64/z13/common-vec.h"
+
+#define OPERATION_mul_1
+#include "s390_64/z13/addmul_1.c"
+#undef OPERATION_mul_1
+
+#define OPERATION_addmul_1
+#include "s390_64/z13/addmul_1.c"
+#undef OPERATION_addmul_1
+
+#define OPERATION_mul_2
+#include "s390_64/z13/aormul_2.c"
+#undef OPERATION_mul_2
+
+#define OPERATION_addmul_2
+#include "s390_64/z13/aormul_2.c"
+#undef OPERATION_addmul_2
+
+void
+mpn_mul_basecase (mp_ptr rp, mp_srcptr up, mp_size_t un, mp_srcptr vp,
+ mp_size_t vn)
+{
+ ASSERT (un >= vn);
+ ASSERT (vn >= 1);
+ ASSERT (!MPN_OVERLAP_P (rp, un + vn, up, un));
+ ASSERT (!MPN_OVERLAP_P (rp, un + vn, vp, vn));
+
+ /* The implementations of (add)mul_1/2 are 4x-unrolled. Pull out the branch
+ * for un%4 and inline specific variants. */
+
+#define BRANCH_FOR_MOD(N) \
+ do \
+ { \
+ if (vn >= 2) \
+ { \
+ rp[un + 1] = inline_mul_2 (rp, up, un, vp); \
+ rp += 2, vp += 2, vn -= 2; \
+ } \
+ else \
+ { \
+ rp[un] = inline_mul_1 (rp, up, un, vp[0]); \
+ return; \
+ } \
+ \
+ while (vn >= 2) \
+ { \
+ rp[un + 2 - 1] = inline_addmul_2 (rp, up, un, vp); \
+ rp += 2, vp += 2, vn -= 2; \
+ } \
+ \
+ while (vn >= 1) \
+ { \
+ rp[un] = inline_addmul_1 (rp, up, un, vp[0]); \
+ rp += 1, vp += 1, vn -= 1; \
+ } \
+ } \
+ while (0);
+
+ switch (((size_t)un) % 4)
+ {
+ case 0:
+ BRANCH_FOR_MOD (0);
+ break;
+ case 1:
+ BRANCH_FOR_MOD (1);
+ break;
+ case 2:
+ BRANCH_FOR_MOD (2);
+ break;
+ case 3:
+ BRANCH_FOR_MOD (3);
+ break;
+ }
+}
diff --git a/vendor/gmp-6.3.0/mpn/s390_64/z13/popcount.asm b/vendor/gmp-6.3.0/mpn/s390_64/z13/popcount.asm
new file mode 100644
index 0000000..35b1fc4
--- /dev/null
+++ b/vendor/gmp-6.3.0/mpn/s390_64/z13/popcount.asm
@@ -0,0 +1,69 @@
+dnl S/390-64 mpn_popcount
+
+dnl Copyright 2023 Free Software Foundation, Inc.
+
+dnl This file is part of the GNU MP Library.
+dnl
+dnl The GNU MP Library is free software; you can redistribute it and/or modify
+dnl it under the terms of either:
+dnl
+dnl * the GNU Lesser General Public License as published by the Free
+dnl Software Foundation; either version 3 of the License, or (at your
+dnl option) any later version.
+dnl
+dnl or
+dnl
+dnl * the GNU General Public License as published by the Free Software
+dnl Foundation; either version 2 of the License, or (at your option) any
+dnl later version.
+dnl
+dnl or both in parallel, as here.
+dnl
+dnl The GNU MP Library is distributed in the hope that it will be useful, but
+dnl WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+dnl or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+dnl for more details.
+dnl
+dnl You should have received copies of the GNU General Public License and the
+dnl GNU Lesser General Public License along with the GNU MP Library. If not,
+dnl see https://www.gnu.org/licenses/.
+
+include(`../config.m4')
+
+C cycles/limb
+C z900 -
+C z990 -
+C z9 -
+C z10 -
+C z196 -
+C z12 ?
+C z13 ?
+C z14 ?
+C z15 ?
+
+define(`ap', `%r2')
+define(`n', `%r3')
+
+ASM_START()
+PROLOGUE(mpn_popcount)
+ vzero %v30
+ tmll n, 1
+ srlg n, n, 1
+ je L(top)
+
+L(odd): vllezg %v16, 0(ap)
+ vpopct %v30, %v16, 3
+ la ap, 8(ap)
+ clgije n, 0, L(end)
+
+L(top): vl %v16, 0(ap), 3
+ vpopct %v20, %v16, 3
+ vag %v30, %v30, %v20
+ la ap, 16(ap)
+ brctg n, L(top)
+
+L(end): vzero %v29
+ vsumqg %v30, %v30, %v29
+ vlgvg %r2, %v30, 1(%r0)
+ br %r14
+EPILOGUE()
diff --git a/vendor/gmp-6.3.0/mpn/s390_64/z13/sqr_basecase.c b/vendor/gmp-6.3.0/mpn/s390_64/z13/sqr_basecase.c
new file mode 100644
index 0000000..91dc47c
--- /dev/null
+++ b/vendor/gmp-6.3.0/mpn/s390_64/z13/sqr_basecase.c
@@ -0,0 +1,82 @@
+/* mpn_sqr_basecase -- Internal routine to square a natural number of length n.
+ This is a place-holder for z13 to suppress the use of the plain z/arch code.
+ FIXME: This should really be written in assembly with outer-loop early exit.
+
+ THIS IS AN INTERNAL FUNCTION WITH A MUTABLE INTERFACE. IT IS ONLY
+ SAFE TO REACH THIS FUNCTION THROUGH DOCUMENTED INTERFACES.
+
+
+Copyright 1991-1994, 1996, 1997, 2000-2005, 2008, 2010, 2011, 2017, 2023 Free
+Software Foundation, Inc.
+
+This file is part of the GNU MP Library.
+
+The GNU MP Library is free software; you can redistribute it and/or modify
+it under the terms of either:
+
+ * the GNU Lesser General Public License as published by the Free
+ Software Foundation; either version 3 of the License, or (at your
+ option) any later version.
+
+or
+
+ * the GNU General Public License as published by the Free Software
+ Foundation; either version 2 of the License, or (at your option) any
+ later version.
+
+or both in parallel, as here.
+
+The GNU MP Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received copies of the GNU General Public License and the
+GNU Lesser General Public License along with the GNU MP Library. If not,
+see https://www.gnu.org/licenses/. */
+
+#include "gmp-impl.h"
+#include "longlong.h"
+
+void
+mpn_sqr_basecase (mp_ptr rp, mp_srcptr up, mp_size_t un)
+{
+ mp_limb_t u0;
+ mp_limb_t cin;
+
+ u0 = up[0];
+ umul_ppmm (cin, rp[0], u0, u0);
+ ++rp;
+
+ if (--un) {
+ u0 = u0 << 1;
+ up += 1;
+
+ rp[un] = mpn_mul_1c (rp, up, un, u0, cin);
+
+ for (;;) {
+ mp_limb_t ci, x0, c0, hi, lo, x1, c1;
+
+ u0 = up[0];
+ ci = -(up[-1] >> (GMP_NUMB_BITS-1)) & u0; // correction term
+ x0 = rp[1] + ci;
+ c0 = x0 < ci;
+ hi, lo;
+
+ umul_ppmm (hi, lo, u0, u0);
+ x1 = x0 + lo;
+ c1 = x1 < lo;
+ cin = hi + c0 + c1;
+ rp[1] = x1;
+ rp += 2;
+
+ if (--un == 0) break;
+ u0 = (up[-1] >> (GMP_NUMB_BITS-1)) + (u0 << 1);
+ up += 1;
+
+ rp[un] = mpn_addmul_1c (rp, up, un, u0, cin);
+ }
+ }
+
+ rp[0] = cin;
+}
diff --git a/vendor/gmp-6.3.0/mpn/s390_64/z13/submul_1.asm b/vendor/gmp-6.3.0/mpn/s390_64/z13/submul_1.asm
new file mode 100644
index 0000000..64f0628
--- /dev/null
+++ b/vendor/gmp-6.3.0/mpn/s390_64/z13/submul_1.asm
@@ -0,0 +1,168 @@
+dnl S/390-64 mpn_submul_1
+dnl Based on C code contributed by Marius Hillenbrand.
+
+dnl Copyright 2023 Free Software Foundation, Inc.
+
+dnl This file is part of the GNU MP Library.
+dnl
+dnl The GNU MP Library is free software; you can redistribute it and/or modify
+dnl it under the terms of either:
+dnl
+dnl * the GNU Lesser General Public License as published by the Free
+dnl Software Foundation; either version 3 of the License, or (at your
+dnl option) any later version.
+dnl
+dnl or
+dnl
+dnl * the GNU General Public License as published by the Free Software
+dnl Foundation; either version 2 of the License, or (at your option) any
+dnl later version.
+dnl
+dnl or both in parallel, as here.
+dnl
+dnl The GNU MP Library is distributed in the hope that it will be useful, but
+dnl WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+dnl or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+dnl for more details.
+dnl
+dnl You should have received copies of the GNU General Public License and the
+dnl GNU Lesser General Public License along with the GNU MP Library. If not,
+dnl see https://www.gnu.org/licenses/.
+
+include(`../config.m4')
+
+dnl TODO
+dnl * Schedule vlvgp away from mlgr; that saves 20% of the run time.
+dnl * Perhaps use vp[0]/vp[1] in innerloop instead preloading v0/v1.
+
+C cycles/limb
+C z900 -
+C z990 -
+C z9 -
+C z10 -
+C z196 -
+C z12 ?
+C z13 ?
+C z14 ?
+C z15 2.55
+
+
+define(`rp', `%r2')
+define(`ap', `%r3')
+define(`an', `%r4')
+define(`b0', `%r5')
+define(`cy', `%r6')
+
+define(`idx', `%r4')
+
+ASM_START()
+
+PROLOGUE(mpn_submul_1)
+ stmg %r6, %r13, 48(%r15)
+L(ent): vzero %v0
+ vone %v2
+ srlg %r11, an, 2
+
+ tmll an, 1
+ je L(bx0)
+L(bx1): tmll an, 2
+ jne L(b11)
+
+L(b01): lghi idx, -24
+ vleg %v2, 0(rp), 1
+ lg %r13, 0(ap)
+ vzero %v4
+ mlgr %r12, b0
+ vlvgg %v4, %r13, 1
+ vsq %v2, %v2, %v4
+ vsteg %v2, 0(rp), 1
+ vmrhg %v2, %v2, %v2
+ cgije %r11, 0, L(1)
+ j L(cj0)
+
+L(b11): lghi idx, -8
+ vleg %v2, 0(rp), 1
+ lg %r9, 0(ap)
+ vzero %v4
+ mlgr %r8, b0
+ vlvgg %v4, %r9, 1
+ vsq %v2, %v2, %v4
+ vsteg %v2, 0(rp), 1
+ vmrhg %v2, %v2, %v2
+ j L(cj1)
+
+L(bx0): tmll an, 2
+ jne L(b10)
+L(b00): lghi idx, -32
+ lghi %r12, 0
+L(cj0): lg %r1, 32(idx, ap)
+ lg %r9, 40(idx, ap)
+ mlgr %r0, b0
+ mlgr %r8, b0
+ vlvgp %v6, %r0, %r1
+ vlvgp %v7, %r9, %r12
+ j L(mid)
+
+L(b10): lghi idx, -16
+ lghi %r8, 0
+L(cj1): lg %r7, 16(idx, ap)
+ lg %r13, 24(idx, ap)
+ mlgr %r6, b0
+ mlgr %r12, b0
+ vlvgp %v6, %r6, %r7
+ vlvgp %v7, %r13, %r8
+ cgije %r11, 0, L(end)
+
+L(top): lg %r1, 32(idx, ap)
+ lg %r9, 40(idx, ap)
+ mlgr %r0, b0
+ mlgr %r8, b0
+ vl %v1, 16(idx, rp), 3
+ vpdi %v1, %v1, %v1, 4
+ vacq %v5, %v6, %v7, %v0
+ vacccq %v0, %v6, %v7, %v0
+ vsbiq %v3, %v1, %v5, %v2
+ vsbcbiq %v2, %v1, %v5, %v2
+ vpdi %v3, %v3, %v3, 4
+ vst %v3, 16(idx, rp), 3
+ vlvgp %v6, %r0, %r1
+ vlvgp %v7, %r9, %r12
+L(mid): lg %r7, 48(idx, ap)
+ lg %r13, 56(idx, ap)
+ mlgr %r6, b0
+ mlgr %r12, b0
+ vl %v4, 32(idx, rp), 3
+ vpdi %v4, %v4, %v4, 4
+ vacq %v5, %v6, %v7, %v0
+ vacccq %v0, %v6, %v7, %v0
+ vsbiq %v1, %v4, %v5, %v2
+ vsbcbiq %v2, %v4, %v5, %v2
+ vpdi %v1, %v1, %v1, 4
+ vst %v1, 32(idx, rp), 3
+ vlvgp %v6, %r6, %r7
+ vlvgp %v7, %r13, %r8
+ la idx, 32(idx)
+ brctg %r11, L(top)
+
+L(end): vl %v1, 16(idx, rp), 3
+ vpdi %v1, %v1, %v1, 4
+ vacq %v5, %v6, %v7, %v0
+ vacccq %v0, %v6, %v7, %v0
+ vsbiq %v3, %v1, %v5, %v2
+ vsbcbiq %v2, %v1, %v5, %v2
+ vpdi %v3, %v3, %v3, 4
+ vst %v3, 16(idx, rp), 3
+
+ vsg %v2, %v0, %v2
+ vlgvg %r2, %v2, 1
+ algr %r2, %r12
+ aghi %r2, 1
+ lmg %r6, %r13, 48(%r15)
+ br %r14
+L(1): vsg %v2, %v0, %v2
+ vlgvg %r2, %v2, 1
+ algr %r2, %r12
+ aghi %r2, -1
+ lmg %r6, %r13, 48(%r15)
+ br %r14
+EPILOGUE()