aboutsummaryrefslogtreecommitdiff
path: root/vendor/gmp-6.3.0/mpn/alpha/ev6/add_n.asm
blob: 9261f31b8a13c0c20a669c5c63114979f4ee63d6 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
dnl  Alpha ev6 mpn_add_n -- Add two limb vectors of the same length > 0 and
dnl  store sum in a third limb vector.

dnl  Copyright 2000, 2003, 2005 Free Software Foundation, Inc.

dnl  This file is part of the GNU MP Library.
dnl
dnl  The GNU MP Library is free software; you can redistribute it and/or modify
dnl  it under the terms of either:
dnl
dnl    * the GNU Lesser General Public License as published by the Free
dnl      Software Foundation; either version 3 of the License, or (at your
dnl      option) any later version.
dnl
dnl  or
dnl
dnl    * the GNU General Public License as published by the Free Software
dnl      Foundation; either version 2 of the License, or (at your option) any
dnl      later version.
dnl
dnl  or both in parallel, as here.
dnl
dnl  The GNU MP Library is distributed in the hope that it will be useful, but
dnl  WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
dnl  or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
dnl  for more details.
dnl
dnl  You should have received copies of the GNU General Public License and the
dnl  GNU Lesser General Public License along with the GNU MP Library.  If not,
dnl  see https://www.gnu.org/licenses/.

include(`../config.m4')

C      cycles/limb
C EV4:     ?
C EV5:     5.4
C EV6:     2.125

C  INPUT PARAMETERS
C  rp	r16
C  up	r17
C  vp	r18
C  n	r19
C  cy	r20   (for mpn_add_nc)

C TODO
C   Finish cleaning up cy registers r22, r23 (make them use cy0/cy1)
C   Use multi-pronged feed-in.
C   Perform additional micro-tuning

C  This code was written in cooperation with ev6 pipeline expert Steve Root.

C  Pair loads and stores where possible
C  Store pairs oct-aligned where possible (didn't need it here)
C  Stores are delayed every third cycle
C  Loads and stores are delayed by fills
C  U stays still, put code there where possible (note alternation of U1 and U0)
C  L moves because of loads and stores
C  Note dampers in L to limit damage

C  This odd-looking optimization expects that were having random bits in our
C  data, so that a pure zero result is unlikely. so we penalize the unlikely
C  case to help the common case.

define(`u0', `r0')  define(`u1', `r3')
define(`v0', `r1')  define(`v1', `r4')

define(`cy0', `r20')  define(`cy1', `r21')

MULFUNC_PROLOGUE(mpn_add_n mpn_add_nc)

ASM_START()
PROLOGUE(mpn_add_nc)
	br	r31,	$entry
EPILOGUE()
PROLOGUE(mpn_add_n)
	bis	r31,	r31,	cy0	C clear carry in
$entry:	cmpult	r19,	5,	r22	C L1 move counter
	ldq	u1,	0(r17)		C L0 get next ones
	ldq	v1,	0(r18)		C L1
	bne	r22,	$Lsmall

	ldq	u0,	8(r17)		C L0 get next ones
	ldq	v0,	8(r18)		C L1
	addq	u1,	v1,	r5	C U0 add two data

	cmpult	r5,	v1,	r23	C U0 did it carry
	ldq	u1,	16(r17)		C L0 get next ones
	ldq	v1,	16(r18)		C L1

	addq	u0,	v0,	r8	C U1 add two data
	addq	r5,	cy0,	r5	C U0 carry in

	cmpult	r8,	v0,	r22	C U1 did it carry
	beq	r5,	$fix5f		C U0 fix exact zero
$ret5f:	ldq	u0,	24(r17)		C L0 get next ones
	ldq	v0,	24(r18)		C L1

	addq	r8,	r23,	r8	C U1 carry from last
	addq	u1,	v1,	r7	C U0 add two data

	beq	r8,	$fix6f		C U1 fix exact zero
$ret6f:	cmpult	r7,	v1,	r23	C U0 did it carry
	ldq	u1,	32(r17)		C L0 get next ones
	ldq	v1,	32(r18)		C L1

	lda	r17,	40(r17)		C L0 move pointer
	lda	r18,	40(r18)		C L1 move pointer

	lda	r16,	-8(r16)
	lda	r19,	-13(r19)	C L1 move counter
	blt	r19,	$Lend		C U1 loop control


C Main loop.  8-way unrolled.
	ALIGN(16)
$Loop:	addq	u0,	v0,	r2	C U1 add two data
	addq	r7,	r22,	r7	C U0 add in carry
	stq	r5,	8(r16)		C L0 put an answer
	stq	r8,	16(r16)		C L1 pair

	cmpult	r2,	v0,	cy1	C U1 did it carry
	beq	r7,	$fix7		C U0 fix exact 0
$ret7:	ldq	u0,	0(r17)		C L0 get next ones
	ldq	v0,	0(r18)		C L1

	bis	r31,	r31,	r31	C L  damp out
	addq	r2,	r23,	r2	C U1 carry from last
	bis	r31,	r31,	r31	C L  moves in L !
	addq	u1,	v1,	r5	C U0 add two data

	beq	r2,	$fix0		C U1 fix exact zero
$ret0:	cmpult	r5,	v1,	cy0	C U0 did it carry
	ldq	u1,	8(r17)		C L0 get next ones
	ldq	v1,	8(r18)		C L1

	addq	u0,	v0,	r8	C U1 add two data
	addq	r5,	cy1,	r5	C U0 carry from last
	stq	r7,	24(r16)		C L0 store pair
	stq	r2,	32(r16)		C L1

	cmpult	r8,	v0,	r22	C U1 did it carry
	beq	r5,	$fix1		C U0 fix exact zero
$ret1:	ldq	u0,	16(r17)		C L0 get next ones
	ldq	v0,	16(r18)		C L1

	lda	r16,	64(r16)		C L0 move pointer
	addq	r8,	cy0,	r8	C U1 carry from last
	lda	r19,	-8(r19)		C L1 move counter
	addq	u1,	v1,	r7	C U0 add two data

	beq	r8,	$fix2		C U1 fix exact zero
$ret2:	cmpult	r7,	v1,	r23	C U0 did it carry
	ldq	u1,	24(r17)		C L0 get next ones
	ldq	v1,	24(r18)		C L1

	addq	u0,	v0,	r2	C U1 add two data
	addq	r7,	r22,	r7	C U0 add in carry
	stq	r5,	-24(r16)	C L0 put an answer
	stq	r8,	-16(r16)	C L1 pair

	cmpult	r2,	v0,	cy1	C U1 did it carry
	beq	r7,	$fix3		C U0 fix exact 0
$ret3:	ldq	u0,	32(r17)		C L0 get next ones
	ldq	v0,	32(r18)		C L1

	bis	r31,	r31,	r31	C L  damp out
	addq	r2,	r23,	r2	C U1 carry from last
	bis	r31,	r31,	r31	C L  moves in L !
	addq	u1,	v1,	r5	C U0 add two data

	beq	r2,	$fix4		C U1 fix exact zero
$ret4:	cmpult	r5,	v1,	cy0	C U0 did it carry
	ldq	u1,	40(r17)		C L0 get next ones
	ldq	v1,	40(r18)		C L1

	addq	u0,	v0,	r8	C U1 add two data
	addq	r5,	cy1,	r5	C U0 carry from last
	stq	r7,	-8(r16)		C L0 store pair
	stq	r2,	0(r16)		C L1

	cmpult	r8,	v0,	r22	C U1 did it carry
	beq	r5,	$fix5		C U0 fix exact zero
$ret5:	ldq	u0,	48(r17)		C L0 get next ones
	ldq	v0,	48(r18)		C L1

	ldl	r31, 256(r17)		C L0 prefetch
	addq	r8,	cy0,	r8	C U1 carry from last
	ldl	r31, 256(r18)		C L1 prefetch
	addq	u1,	v1,	r7	C U0 add two data

	beq	r8,	$fix6		C U1 fix exact zero
$ret6:	cmpult	r7,	v1,	r23	C U0 did it carry
	ldq	u1,	56(r17)		C L0 get next ones
	ldq	v1,	56(r18)		C L1

	lda	r17,	64(r17)		C L0 move pointer
	bis	r31,	r31,	r31	C U
	lda	r18,	64(r18)		C L1 move pointer
	bge	r19,	$Loop		C U1 loop control
C ==== main loop end

$Lend:	addq	u0,	v0,	r2	C U1 add two data
	addq	r7,	r22,	r7	C U0 add in carry
	stq	r5,	8(r16)		C L0 put an answer
	stq	r8,	16(r16)		C L1 pair
	cmpult	r2,	v0,	cy1	C U1 did it carry
	beq	r7,	$fix7c		C U0 fix exact 0
$ret7c:	addq	r2,	r23,	r2	C U1 carry from last
	addq	u1,	v1,	r5	C U0 add two data
	beq	r2,	$fix0c		C U1 fix exact zero
$ret0c:	cmpult	r5,	v1,	cy0	C U0 did it carry
	addq	r5,	cy1,	r5	C U0 carry from last
	stq	r7,	24(r16)		C L0 store pair
	stq	r2,	32(r16)		C L1
	beq	r5,	$fix1c		C U0 fix exact zero
$ret1c:	stq	r5,	40(r16)		C L0 put an answer
	lda	r16,	48(r16)		C L0 move pointer

	lda	r19,	8(r19)
	beq	r19,	$Lret

	ldq	u1,	0(r17)
	ldq	v1,	0(r18)
$Lsmall:
	lda	r19,	-1(r19)
	beq	r19,	$Lend0

	ALIGN(8)
$Loop0:	addq	u1,	v1,	r2	C main add
	cmpult	r2,	v1,	r8	C compute cy from last add
	ldq	u1,	8(r17)
	ldq	v1,	8(r18)
	addq	r2,	cy0,	r5	C carry add
	lda	r17,	8(r17)
	lda	r18,	8(r18)
	stq	r5,	0(r16)
	cmpult	r5,	r2,	cy0	C compute cy from last add
	lda	r19,	-1(r19)		C decr loop cnt
	bis	r8,	cy0,	cy0	C combine cy from the two adds
	lda	r16,	8(r16)
	bne	r19,	$Loop0
$Lend0:	addq	u1,	v1,	r2	C main add
	addq	r2,	cy0,	r5	C carry add
	cmpult	r2,	v1,	r8	C compute cy from last add
	cmpult	r5,	r2,	cy0	C compute cy from last add
	stq	r5,	0(r16)
	bis	r8,	cy0,	r0	C combine cy from the two adds
	ret	r31,(r26),1

	ALIGN(8)
$Lret:	lda	r0,	0(cy0)		C copy carry into return register
	ret	r31,(r26),1

$fix5f:	bis	r23,	cy0,	r23	C bring forward carry
	br	r31,	$ret5f
$fix6f:	bis	r22,	r23,	r22	C bring forward carry
	br	r31,	$ret6f
$fix0:	bis	cy1,	r23,	cy1	C bring forward carry
	br	r31,	$ret0
$fix1:	bis	cy0,	cy1,	cy0	C bring forward carry
	br	r31,	$ret1
$fix2:	bis	r22,	cy0,	r22	C bring forward carry
	br	r31,	$ret2
$fix3:	bis	r23,	r22,	r23	C bring forward carry
	br	r31,	$ret3
$fix4:	bis	cy1,	r23,	cy1	C bring forward carry
	br	r31,	$ret4
$fix5:	bis	cy1,	cy0,	cy0	C bring forward carry
	br	r31,	$ret5
$fix6:	bis	r22,	cy0,	r22	C bring forward carry
	br	r31,	$ret6
$fix7:	bis	r23,	r22,	r23	C bring forward carry
	br	r31,	$ret7
$fix0c:	bis	cy1,	r23,	cy1	C bring forward carry
	br	r31,	$ret0c
$fix1c:	bis	cy0,	cy1,	cy0	C bring forward carry
	br	r31,	$ret1c
$fix7c:	bis	r23,	r22,	r23	C bring forward carry
	br	r31,	$ret7c

EPILOGUE()
ASM_END()