aboutsummaryrefslogtreecommitdiff
path: root/vendor/gmp-6.3.0/mpn/x86_64/fastsse/lshiftc-movdqu2.asm
blob: 8250910c52a60af1b30c3eda33889b28a5c84c63 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
dnl  AMD64 mpn_lshiftc optimised for CPUs with fast SSE including fast movdqu.

dnl  Contributed to the GNU project by Torbjorn Granlund.

dnl  Copyright 2010-2012 Free Software Foundation, Inc.

dnl  This file is part of the GNU MP Library.
dnl
dnl  The GNU MP Library is free software; you can redistribute it and/or modify
dnl  it under the terms of either:
dnl
dnl    * the GNU Lesser General Public License as published by the Free
dnl      Software Foundation; either version 3 of the License, or (at your
dnl      option) any later version.
dnl
dnl  or
dnl
dnl    * the GNU General Public License as published by the Free Software
dnl      Foundation; either version 2 of the License, or (at your option) any
dnl      later version.
dnl
dnl  or both in parallel, as here.
dnl
dnl  The GNU MP Library is distributed in the hope that it will be useful, but
dnl  WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
dnl  or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
dnl  for more details.
dnl
dnl  You should have received copies of the GNU General Public License and the
dnl  GNU Lesser General Public License along with the GNU MP Library.  If not,
dnl  see https://www.gnu.org/licenses/.

include(`../config.m4')


C	     cycles/limb     cycles/limb     cycles/limb    good
C              aligned	      unaligned	      best seen	   for cpu?
C AMD K8,K9	 3		 3		 ?	  no, use shl/shr
C AMD K10	 1.8-2.0	 1.8-2.0	 ?	  yes
C AMD bd1	 1.9		 1.9		 ?	  yes
C AMD bobcat	 3.67		 3.67			  yes, bad for n < 20
C Intel P4	 4.75		 4.75		 ?	  no, slow movdqu
C Intel core2	 2.27		 2.27		 ?	  no, use shld/shrd
C Intel NHM	 2.15		 2.15		 ?	  no, use shld/shrd
C Intel SBR	 1.45		 1.45		 ?	  yes, bad for n = 4-6
C Intel atom	12.9		12.9		 ?	  no
C VIA nano	 6.18		 6.44		 ?	  no, slow movdqu

C We try to do as many aligned 16-byte operations as possible.  The top-most
C and bottom-most writes might need 8-byte operations.
C
C This variant rely on fast load movdqu, and uses it even for aligned operands,
C in order to avoid the need for two separate loops.
C
C TODO
C  * Could 2-limb wind-down code be simplified?
C  * Improve basecase code, using shld/shrd for SBR, discrete integer shifts
C    for other affected CPUs.

C INPUT PARAMETERS
define(`rp',  `%rdi')
define(`ap',  `%rsi')
define(`n',   `%rdx')
define(`cnt', `%rcx')

ASM_START()
	TEXT
	ALIGN(64)
PROLOGUE(mpn_lshiftc)
	FUNC_ENTRY(4)
	movd	R32(%rcx), %xmm4
	mov	$64, R32(%rax)
	sub	R32(%rcx), R32(%rax)
	movd	R32(%rax), %xmm5

	neg	R32(%rcx)
	mov	-8(ap,n,8), %rax
	shr	R8(%rcx), %rax

	pcmpeqb	%xmm3, %xmm3		C set to 111...111

	cmp	$3, n
	jle	L(bc)

	lea	(rp,n,8), R32(%rcx)
	test	$8, R8(%rcx)
	jz	L(rp_aligned)

C Do one initial limb in order to make rp aligned
	movq	-8(ap,n,8), %xmm0
	movq	-16(ap,n,8), %xmm1
	psllq	%xmm4, %xmm0
	psrlq	%xmm5, %xmm1
	por	%xmm1, %xmm0
	pxor	%xmm3, %xmm0
	movq	%xmm0, -8(rp,n,8)
	dec	n

L(rp_aligned):
	lea	1(n), %r8d

	and	$6, R32(%r8)
	jz	L(ba0)
	cmp	$4, R32(%r8)
	jz	L(ba4)
	jc	L(ba2)
L(ba6):	add	$-4, n
	jmp	L(i56)
L(ba0):	add	$-6, n
	jmp	L(i70)
L(ba4):	add	$-2, n
	jmp	L(i34)
L(ba2):	add	$-8, n
	jle	L(end)

	ALIGN(16)
L(top):	movdqu	40(ap,n,8), %xmm1
	movdqu	48(ap,n,8), %xmm0
	psllq	%xmm4, %xmm0
	psrlq	%xmm5, %xmm1
	por	%xmm1, %xmm0
	pxor	%xmm3, %xmm0
	movdqa	%xmm0, 48(rp,n,8)
L(i70):
	movdqu	24(ap,n,8), %xmm1
	movdqu	32(ap,n,8), %xmm0
	psllq	%xmm4, %xmm0
	psrlq	%xmm5, %xmm1
	por	%xmm1, %xmm0
	pxor	%xmm3, %xmm0
	movdqa	%xmm0, 32(rp,n,8)
L(i56):
	movdqu	8(ap,n,8), %xmm1
	movdqu	16(ap,n,8), %xmm0
	psllq	%xmm4, %xmm0
	psrlq	%xmm5, %xmm1
	por	%xmm1, %xmm0
	pxor	%xmm3, %xmm0
	movdqa	%xmm0, 16(rp,n,8)
L(i34):
	movdqu	-8(ap,n,8), %xmm1
	movdqu	(ap,n,8), %xmm0
	psllq	%xmm4, %xmm0
	psrlq	%xmm5, %xmm1
	por	%xmm1, %xmm0
	pxor	%xmm3, %xmm0
	movdqa	%xmm0, (rp,n,8)
	sub	$8, n
	jg	L(top)

L(end):	test	$1, R8(n)
	jnz	L(end8)

	movdqu	(ap), %xmm1
	pxor	%xmm0, %xmm0
	punpcklqdq  %xmm1, %xmm0
	psllq	%xmm4, %xmm1
	psrlq	%xmm5, %xmm0
	por	%xmm1, %xmm0
	pxor	%xmm3, %xmm0
	movdqa	%xmm0, (rp)
	FUNC_EXIT()
	ret

C Basecase
	ALIGN(16)
L(bc):	dec	R32(n)
	jz	L(end8)

	movq	(ap,n,8), %xmm1
	movq	-8(ap,n,8), %xmm0
	psllq	%xmm4, %xmm1
	psrlq	%xmm5, %xmm0
	por	%xmm1, %xmm0
	pxor	%xmm3, %xmm0
	movq	%xmm0, (rp,n,8)
	sub	$2, R32(n)
	jl	L(end8)
	movq	8(ap), %xmm1
	movq	(ap), %xmm0
	psllq	%xmm4, %xmm1
	psrlq	%xmm5, %xmm0
	por	%xmm1, %xmm0
	pxor	%xmm3, %xmm0
	movq	%xmm0, 8(rp)

L(end8):movq	(ap), %xmm0
	psllq	%xmm4, %xmm0
	pxor	%xmm3, %xmm0
	movq	%xmm0, (rp)
	FUNC_EXIT()
	ret
EPILOGUE()