aboutsummaryrefslogtreecommitdiff
path: root/vendor/gmp-6.3.0/mpn/x86/k6/mmx/dive_1.asm
blob: 1bbad3a3185ab8294f823fe4ca1f2d1289e955d3 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
dnl  AMD K6 mpn_divexact_1 -- mpn by limb exact division.

dnl  Copyright 2000-2002, 2007 Free Software Foundation, Inc.

dnl  This file is part of the GNU MP Library.
dnl
dnl  The GNU MP Library is free software; you can redistribute it and/or modify
dnl  it under the terms of either:
dnl
dnl    * the GNU Lesser General Public License as published by the Free
dnl      Software Foundation; either version 3 of the License, or (at your
dnl      option) any later version.
dnl
dnl  or
dnl
dnl    * the GNU General Public License as published by the Free Software
dnl      Foundation; either version 2 of the License, or (at your option) any
dnl      later version.
dnl
dnl  or both in parallel, as here.
dnl
dnl  The GNU MP Library is distributed in the hope that it will be useful, but
dnl  WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
dnl  or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
dnl  for more details.
dnl
dnl  You should have received copies of the GNU General Public License and the
dnl  GNU Lesser General Public License along with the GNU MP Library.  If not,
dnl  see https://www.gnu.org/licenses/.

include(`../config.m4')


C         divisor
C       odd   even
C K6:   10.0  12.0  cycles/limb
C K6-2: 10.0  11.5


C void mpn_divexact_1 (mp_ptr dst, mp_srcptr src, mp_size_t size,
C                      mp_limb_t divisor);
C
C A simple divl is used for size==1.  This is about 10 cycles faster for an
C odd divisor or 20 cycles for an even divisor.
C
C The loops are quite sensitive to code alignment, speeds should be
C rechecked (odd and even divisor, pic and non-pic) if contemplating
C changing anything.

defframe(PARAM_DIVISOR,16)
defframe(PARAM_SIZE,   12)
defframe(PARAM_SRC,    8)
defframe(PARAM_DST,    4)

dnl  re-use parameter space
define(VAR_INVERSE,`PARAM_DST')

	TEXT

	ALIGN(32)
PROLOGUE(mpn_divexact_1)
deflit(`FRAME',0)

	movl	PARAM_SIZE, %ecx

	movl	PARAM_SRC, %eax
	xorl	%edx, %edx

	cmpl	$1, %ecx
	jnz	L(two_or_more)

	movl	(%eax), %eax

	divl	PARAM_DIVISOR

	movl	PARAM_DST, %ecx
	movl	%eax, (%ecx)

	ret


L(two_or_more):
	movl	PARAM_DIVISOR, %eax
	pushl	%ebx		FRAME_pushl()

	movl	PARAM_SRC, %ebx
	pushl	%ebp		FRAME_pushl()

L(strip_twos):
	shrl	%eax
	incl	%edx			C will get shift+1

	jnc	L(strip_twos)
	pushl	%esi		FRAME_pushl()

	leal	1(%eax,%eax), %esi	C d without twos
	andl	$127, %eax		C d/2, 7 bits

ifdef(`PIC',`
	LEA(	binvert_limb_table, %ebp)
Zdisp(	movzbl,	0,(%eax,%ebp), %eax)
',`
	movzbl	binvert_limb_table(%eax), %eax	C inv 8 bits
')
	pushl	%edi		FRAME_pushl()

	leal	(%eax,%eax), %ebp	C 2*inv

	imull	%eax, %eax		C inv*inv

	movl	PARAM_DST, %edi

	imull	%esi, %eax		C inv*inv*d

	subl	%eax, %ebp		C inv = 2*inv - inv*inv*d
	leal	(%ebp,%ebp), %eax	C 2*inv

	imull	%ebp, %ebp		C inv*inv

	movl	%esi, PARAM_DIVISOR	C d without twos
	leal	(%ebx,%ecx,4), %ebx	C src end

	imull	%esi, %ebp		C inv*inv*d

	leal	(%edi,%ecx,4), %edi	C dst end
	negl	%ecx			C -size

	subl	%ebp, %eax		C inv = 2*inv - inv*inv*d
	subl	$1, %edx		C shift amount, and clear carry

	ASSERT(e,`	C expect d*inv == 1 mod 2^GMP_LIMB_BITS
	pushl	%eax	FRAME_pushl()
	imull	PARAM_DIVISOR, %eax
	cmpl	$1, %eax
	popl	%eax	FRAME_popl()')

	movl	%eax, VAR_INVERSE
	jnz	L(even)

	movl	(%ebx,%ecx,4), %esi	C src low limb
	jmp	L(odd_entry)


	ALIGN(16)
	nop	C code alignment
L(odd_top):
	C eax	scratch
	C ebx	src end
	C ecx	counter, limbs, negative
	C edx	inverse
	C esi	next limb, adjusted for carry
	C edi	dst end
	C ebp	carry bit, 0 or -1

	imull	%edx, %esi

	movl	PARAM_DIVISOR, %eax
	movl	%esi, -4(%edi,%ecx,4)

	mull	%esi			C carry limb in edx

	subl	%ebp, %edx		C apply carry bit
	movl	(%ebx,%ecx,4), %esi

L(odd_entry):
	subl	%edx, %esi		C apply carry limb
	movl	VAR_INVERSE, %edx

	sbbl	%ebp, %ebp		C 0 or -1

	incl	%ecx
	jnz	L(odd_top)


	imull	%edx, %esi

	movl	%esi, -4(%edi,%ecx,4)

	popl	%edi
	popl	%esi

	popl	%ebp
	popl	%ebx

	ret


L(even):
	C eax
	C ebx	src end
	C ecx	-size
	C edx	twos
	C esi
	C edi	dst end
	C ebp

	xorl	%ebp, %ebp
Zdisp(	movq,	0,(%ebx,%ecx,4), %mm0)	C src[0,1]

	movd	%edx, %mm7
	movl	VAR_INVERSE, %edx

	addl	$2, %ecx
	psrlq	%mm7, %mm0

	movd	%mm0, %esi
	jz	L(even_two)		C if only two limbs


C Out-of-order execution is good enough to hide the load/rshift/movd
C latency.  Having imul at the top of the loop gives 11.5 c/l instead of 12,
C on K6-2.  In fact there's only 11 of decode, but nothing running at 11 has
C been found.  Maybe the fact every second movq is unaligned costs the extra
C 0.5.

L(even_top):
	C eax	scratch
	C ebx	src end
	C ecx	counter, limbs, negative
	C edx	inverse
	C esi	next limb, adjusted for carry
	C edi	dst end
	C ebp	carry bit, 0 or -1
	C
	C mm0	scratch, source limbs
	C mm7	twos

	imull	%edx, %esi

	movl	%esi, -8(%edi,%ecx,4)
	movl	PARAM_DIVISOR, %eax

	mull	%esi			C carry limb in edx

	movq	-4(%ebx,%ecx,4), %mm0
	psrlq	%mm7, %mm0

	movd	%mm0, %esi
	subl	%ebp, %edx		C apply carry bit

	subl	%edx, %esi		C apply carry limb
	movl	VAR_INVERSE, %edx

	sbbl	%ebp, %ebp		C 0 or -1

	incl	%ecx
	jnz	L(even_top)


L(even_two):
	movd	-4(%ebx), %mm0		C src high limb
	psrlq	%mm7, %mm0

	imull	%edx, %esi

	movl	%esi, -8(%edi)
	movl	PARAM_DIVISOR, %eax

	mull	%esi			C carry limb in edx

	movd	%mm0, %esi
	subl	%ebp, %edx		C apply carry bit

	movl	VAR_INVERSE, %eax
	subl	%edx, %esi		C apply carry limb

	imull	%eax, %esi

	movl	%esi, -4(%edi)

	popl	%edi
	popl	%esi

	popl	%ebp
	popl	%ebx

	emms_or_femms

	ret

EPILOGUE()
ASM_END()