aboutsummaryrefslogtreecommitdiff
path: root/vendor/gmp-6.3.0/mpn/sparc64/mode1o.c
blob: 771c99915ca5eb116cc8e915ba32c8266945d850 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
/* UltraSPARC 64 mpn_modexact_1c_odd -- mpn by limb exact style remainder.

   THE FUNCTIONS IN THIS FILE ARE FOR INTERNAL USE ONLY.  THEY'RE ALMOST
   CERTAIN TO BE SUBJECT TO INCOMPATIBLE CHANGES OR DISAPPEAR COMPLETELY IN
   FUTURE GNU MP RELEASES.

Copyright 2000-2003 Free Software Foundation, Inc.

This file is part of the GNU MP Library.

The GNU MP Library is free software; you can redistribute it and/or modify
it under the terms of either:

  * the GNU Lesser General Public License as published by the Free
    Software Foundation; either version 3 of the License, or (at your
    option) any later version.

or

  * the GNU General Public License as published by the Free Software
    Foundation; either version 2 of the License, or (at your option) any
    later version.

or both in parallel, as here.

The GNU MP Library is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
for more details.

You should have received copies of the GNU General Public License and the
GNU Lesser General Public License along with the GNU MP Library.  If not,
see https://www.gnu.org/licenses/.  */

#include "gmp-impl.h"
#include "longlong.h"

#include "mpn/sparc64/sparc64.h"


/*                 64-bit divisor   32-bit divisor
                    cycles/limb      cycles/limb
                     (approx)         (approx)
   Ultrasparc 2i:       ?                ?
*/


/* This implementation reduces the number of multiplies done, knowing that
   on ultrasparc 1 and 2 the mulx instruction stalls the whole chip.

   The key idea is to use the fact that the low limb of q*d equals l, this
   being the whole purpose of the q calculated.  It means there's no need to
   calculate the lowest 32x32->64 part of the q*d, instead it can be
   inferred from l and the other three 32x32->64 parts.  See sparc64.h for
   details.

   When d is 32-bits, the same applies, but in this case there's only one
   other 32x32->64 part (ie. HIGH(q)*d).

   The net effect is that for 64-bit divisor each limb is 4 mulx, or for
   32-bit divisor each is 2 mulx.

   Enhancements:

   No doubt this could be done in assembler, if that helped the scheduling,
   or perhaps guaranteed good code irrespective of the compiler.

   Alternatives:

   It might be possibly to use floating point.  The loop is dominated by
   multiply latency, so not sure if floats would improve that.  One
   possibility would be to take two limbs at a time, with a 128 bit inverse,
   if there's enough registers, which could effectively use float throughput
   to reduce total latency across two limbs.  */

#define ASSERT_RETVAL(r)                \
  ASSERT (orig_c < d ? r < d : r <= d)

mp_limb_t
mpn_modexact_1c_odd (mp_srcptr src, mp_size_t size, mp_limb_t d, mp_limb_t orig_c)
{
  mp_limb_t  c = orig_c;
  mp_limb_t  s, l, q, h, inverse;

  ASSERT (size >= 1);
  ASSERT (d & 1);
  ASSERT_MPN (src, size);
  ASSERT_LIMB (d);
  ASSERT_LIMB (c);

  /* udivx is faster than 10 or 12 mulx's for one limb via an inverse */
  if (size == 1)
    {
      s = src[0];
      if (s > c)
	{
	  l = s-c;
	  h = l % d;
	  if (h != 0)
	    h = d - h;
	}
      else
	{
	  l = c-s;
	  h = l % d;
	}
      return h;
    }

  binvert_limb (inverse, d);

  if (d <= 0xFFFFFFFF)
    {
      s = *src++;
      size--;
      do
        {
          SUBC_LIMB (c, l, s, c);
          s = *src++;
          q = l * inverse;
          umul_ppmm_half_lowequal (h, q, d, l);
          c += h;
          size--;
        }
      while (size != 0);

      if (s <= d)
        {
          /* With high s <= d the final step can be a subtract and addback.
             If c==0 then the addback will restore to l>=0.  If c==d then
             will get l==d if s==0, but that's ok per the function
             definition.  */

          l = c - s;
          l += (l > c ? d : 0);

          ASSERT_RETVAL (l);
          return l;
        }
      else
        {
          /* Can't skip a divide, just do the loop code once more. */
          SUBC_LIMB (c, l, s, c);
          q = l * inverse;
          umul_ppmm_half_lowequal (h, q, d, l);
          c += h;

          ASSERT_RETVAL (c);
          return c;
        }
    }
  else
    {
      mp_limb_t  dl = LOW32 (d);
      mp_limb_t  dh = HIGH32 (d);
      long i;

      s = *src++;
      size--;
      do
        {
          SUBC_LIMB (c, l, s, c);
          s = *src++;
          q = l * inverse;
          umul_ppmm_lowequal (h, q, d, dh, dl, l);
          c += h;
          size--;
        }
      while (size != 0);

      if (s <= d)
        {
          /* With high s <= d the final step can be a subtract and addback.
             If c==0 then the addback will restore to l>=0.  If c==d then
             will get l==d if s==0, but that's ok per the function
             definition.  */

          l = c - s;
          l += (l > c ? d : 0);

          ASSERT_RETVAL (l);
          return l;
        }
      else
        {
          /* Can't skip a divide, just do the loop code once more. */
          SUBC_LIMB (c, l, s, c);
          q = l * inverse;
          umul_ppmm_lowequal (h, q, d, dh, dl, l);
          c += h;

          ASSERT_RETVAL (c);
          return c;
        }
    }
}