aboutsummaryrefslogtreecommitdiff
path: root/vendor/gmp-6.3.0/mpn/generic/mode1o.c
blob: 9ba0ae1ccf9219af763dd3b30785a47c8ddf5b94 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
/* mpn_modexact_1c_odd -- mpn by limb exact division style remainder.

   THE FUNCTIONS IN THIS FILE ARE FOR INTERNAL USE ONLY.  THEY'RE ALMOST
   CERTAIN TO BE SUBJECT TO INCOMPATIBLE CHANGES OR DISAPPEAR COMPLETELY IN
   FUTURE GNU MP RELEASES.

Copyright 2000-2004 Free Software Foundation, Inc.

This file is part of the GNU MP Library.

The GNU MP Library is free software; you can redistribute it and/or modify
it under the terms of either:

  * the GNU Lesser General Public License as published by the Free
    Software Foundation; either version 3 of the License, or (at your
    option) any later version.

or

  * the GNU General Public License as published by the Free Software
    Foundation; either version 2 of the License, or (at your option) any
    later version.

or both in parallel, as here.

The GNU MP Library is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
for more details.

You should have received copies of the GNU General Public License and the
GNU Lesser General Public License along with the GNU MP Library.  If not,
see https://www.gnu.org/licenses/.  */

#include "gmp-impl.h"
#include "longlong.h"


/* Calculate an r satisfying

           r*B^k + a - c == q*d

   where B=2^GMP_LIMB_BITS, a is {src,size}, k is either size or size-1
   (the caller won't know which), and q is the quotient (discarded).  d must
   be odd, c can be any limb value.

   If c<d then r will be in the range 0<=r<d, or if c>=d then 0<=r<=d.

   This slightly strange function suits the initial Nx1 reduction for GCDs
   or Jacobi symbols since the factors of 2 in B^k can be ignored, leaving
   -r == a mod d (by passing c=0).  For a GCD the factor of -1 on r can be
   ignored, or for the Jacobi symbol it can be accounted for.  The function
   also suits divisibility and congruence testing since if r=0 (or r=d) is
   obtained then a==c mod d.


   r is a bit like the remainder returned by mpn_divexact_by3c, and is the
   sort of remainder mpn_divexact_1 might return.  Like mpn_divexact_by3c, r
   represents a borrow, since effectively quotient limbs are chosen so that
   subtracting that multiple of d from src at each step will produce a zero
   limb.

   A long calculation can be done piece by piece from low to high by passing
   the return value from one part as the carry parameter to the next part.
   The effective final k becomes anything between size and size-n, if n
   pieces are used.


   A similar sort of routine could be constructed based on adding multiples
   of d at each limb, much like redc in mpz_powm does.  Subtracting however
   has a small advantage that when subtracting to cancel out l there's never
   a borrow into h, whereas using an addition would put a carry into h
   depending whether l==0 or l!=0.


   In terms of efficiency, this function is similar to a mul-by-inverse
   mpn_mod_1.  Both are essentially two multiplies and are best suited to
   CPUs with low latency multipliers (in comparison to a divide instruction
   at least.)  But modexact has a few less supplementary operations, only
   needs low part and high part multiplies, and has fewer working quantities
   (helping CPUs with few registers).


   In the main loop it will be noted that the new carry (call it r) is the
   sum of the high product h and any borrow from l=s-c.  If c<d then we will
   have r<d too, for the following reasons.  Let q=l*inverse be the quotient
   limb, so that q*d = B*h + l, where B=2^GMP_NUMB_BITS.  Now if h=d-1 then

       l = q*d - B*(d-1) <= (B-1)*d - B*(d-1) = B-d

   But if l=s-c produces a borrow when c<d, then l>=B-d+1 and hence will
   never have h=d-1 and so r=h+borrow <= d-1.

   When c>=d, on the other hand, h=d-1 can certainly occur together with a
   borrow, thereby giving only r<=d, as per the function definition above.

   As a design decision it's left to the caller to check for r=d if it might
   be passing c>=d.  Several applications have c<d initially so the extra
   test is often unnecessary, for example the GCDs or a plain divisibility
   d|a test will pass c=0.


   The special case for size==1 is so that it can be assumed c<=d in the
   high<=divisor test at the end.  c<=d is only guaranteed after at least
   one iteration of the main loop.  There's also a decent chance one % is
   faster than a binvert_limb, though that will depend on the processor.

   A CPU specific implementation might want to omit the size==1 code or the
   high<divisor test.  mpn/x86/k6/mode1o.asm for instance finds neither
   useful.  */


mp_limb_t
mpn_modexact_1c_odd (mp_srcptr src, mp_size_t size, mp_limb_t d,
                     mp_limb_t orig_c)
{
  mp_limb_t  s, h, l, inverse, dummy, dmul, ret;
  mp_limb_t  c = orig_c;
  mp_size_t  i;

  ASSERT (size >= 1);
  ASSERT (d & 1);
  ASSERT_MPN (src, size);
  ASSERT_LIMB (d);
  ASSERT_LIMB (c);

  if (size == 1)
    {
      s = src[0];
      if (s > c)
	{
	  l = s-c;
	  h = l % d;
	  if (h != 0)
	    h = d - h;
	}
      else
	{
	  l = c-s;
	  h = l % d;
	}
      return h;
    }


  binvert_limb (inverse, d);
  dmul = d << GMP_NAIL_BITS;

  i = 0;
  do
    {
      s = src[i];
      SUBC_LIMB (c, l, s, c);
      l = (l * inverse) & GMP_NUMB_MASK;
      umul_ppmm (h, dummy, l, dmul);
      c += h;
    }
  while (++i < size-1);


  s = src[i];
  if (s <= d)
    {
      /* With high<=d the final step can be a subtract and addback.  If c==0
	 then the addback will restore to l>=0.  If c==d then will get l==d
	 if s==0, but that's ok per the function definition.  */

      l = c - s;
      if (c < s)
	l += d;

      ret = l;
    }
  else
    {
      /* Can't skip a divide, just do the loop code once more. */

      SUBC_LIMB (c, l, s, c);
      l = (l * inverse) & GMP_NUMB_MASK;
      umul_ppmm (h, dummy, l, dmul);
      c += h;
      ret = c;
    }

  ASSERT (orig_c < d ? ret < d : ret <= d);
  return ret;
}



#if 0

/* The following is an alternate form that might shave one cycle on a
   superscalar processor since it takes c+=h off the dependent chain,
   leaving just a low product, high product, and a subtract.

   This is for CPU specific implementations to consider.  A special case for
   high<divisor and/or size==1 can be added if desired.

   Notice that c is only ever 0 or 1, since if s-c produces a borrow then
   x=0xFF..FF and x-h cannot produce a borrow.  The c=(x>s) could become
   c=(x==0xFF..FF) too, if that helped.  */

mp_limb_t
mpn_modexact_1c_odd (mp_srcptr src, mp_size_t size, mp_limb_t d, mp_limb_t h)
{
  mp_limb_t  s, x, y, inverse, dummy, dmul, c1, c2;
  mp_limb_t  c = 0;
  mp_size_t  i;

  ASSERT (size >= 1);
  ASSERT (d & 1);

  binvert_limb (inverse, d);
  dmul = d << GMP_NAIL_BITS;

  for (i = 0; i < size; i++)
    {
      ASSERT (c==0 || c==1);

      s = src[i];
      SUBC_LIMB (c1, x, s, c);

      SUBC_LIMB (c2, y, x, h);
      c = c1 + c2;

      y = (y * inverse) & GMP_NUMB_MASK;
      umul_ppmm (h, dummy, y, dmul);
    }

  h += c;
  return h;
}

#endif