xref: /linux/arch/sparc/lib/NG2memcpy.S (revision f3a8b6645dc2e60d11f20c1c23afd964ff4e55ae)
1/* NG2memcpy.S: Niagara-2 optimized memcpy.
2 *
3 * Copyright (C) 2007 David S. Miller (davem@davemloft.net)
4 */
5
6#ifdef __KERNEL__
7#include <linux/linkage.h>
8#include <asm/visasm.h>
9#include <asm/asi.h>
10#define GLOBAL_SPARE	%g7
11#else
12#define ASI_PNF 0x82
13#define ASI_BLK_P 0xf0
14#define ASI_BLK_INIT_QUAD_LDD_P 0xe2
15#define FPRS_FEF  0x04
16#ifdef MEMCPY_DEBUG
17#define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs; \
18		     clr %g1; clr %g2; clr %g3; clr %g5; subcc %g0, %g0, %g0;
19#define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
20#else
21#define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs
22#define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
23#endif
24#define GLOBAL_SPARE	%g5
25#endif
26
27#ifndef STORE_ASI
28#ifndef SIMULATE_NIAGARA_ON_NON_NIAGARA
29#define STORE_ASI	ASI_BLK_INIT_QUAD_LDD_P
30#else
31#define STORE_ASI	0x80		/* ASI_P */
32#endif
33#endif
34
35#ifndef EX_LD
36#define EX_LD(x,y)	x
37#endif
38#ifndef EX_LD_FP
39#define EX_LD_FP(x,y)	x
40#endif
41
42#ifndef EX_ST
43#define EX_ST(x,y)	x
44#endif
45#ifndef EX_ST_FP
46#define EX_ST_FP(x,y)	x
47#endif
48
49#ifndef LOAD
50#define LOAD(type,addr,dest)	type [addr], dest
51#endif
52
53#ifndef LOAD_BLK
54#define LOAD_BLK(addr,dest)	ldda [addr] ASI_BLK_P, dest
55#endif
56
57#ifndef STORE
58#ifndef MEMCPY_DEBUG
59#define STORE(type,src,addr)	type src, [addr]
60#else
61#define STORE(type,src,addr)	type##a src, [addr] 0x80
62#endif
63#endif
64
65#ifndef STORE_BLK
66#define STORE_BLK(src,addr)	stda src, [addr] ASI_BLK_P
67#endif
68
69#ifndef STORE_INIT
70#define STORE_INIT(src,addr)	stxa src, [addr] STORE_ASI
71#endif
72
73#ifndef FUNC_NAME
74#define FUNC_NAME	NG2memcpy
75#endif
76
77#ifndef PREAMBLE
78#define PREAMBLE
79#endif
80
81#ifndef XCC
82#define XCC xcc
83#endif
84
85#define FREG_FROB(x0, x1, x2, x3, x4, x5, x6, x7, x8) \
86	faligndata	%x0, %x1, %f0; \
87	faligndata	%x1, %x2, %f2; \
88	faligndata	%x2, %x3, %f4; \
89	faligndata	%x3, %x4, %f6; \
90	faligndata	%x4, %x5, %f8; \
91	faligndata	%x5, %x6, %f10; \
92	faligndata	%x6, %x7, %f12; \
93	faligndata	%x7, %x8, %f14;
94
95#define FREG_MOVE_1(x0) \
96	fsrc2		%x0, %f0;
97#define FREG_MOVE_2(x0, x1) \
98	fsrc2		%x0, %f0; \
99	fsrc2		%x1, %f2;
100#define FREG_MOVE_3(x0, x1, x2) \
101	fsrc2		%x0, %f0; \
102	fsrc2		%x1, %f2; \
103	fsrc2		%x2, %f4;
104#define FREG_MOVE_4(x0, x1, x2, x3) \
105	fsrc2		%x0, %f0; \
106	fsrc2		%x1, %f2; \
107	fsrc2		%x2, %f4; \
108	fsrc2		%x3, %f6;
109#define FREG_MOVE_5(x0, x1, x2, x3, x4) \
110	fsrc2		%x0, %f0; \
111	fsrc2		%x1, %f2; \
112	fsrc2		%x2, %f4; \
113	fsrc2		%x3, %f6; \
114	fsrc2		%x4, %f8;
115#define FREG_MOVE_6(x0, x1, x2, x3, x4, x5) \
116	fsrc2		%x0, %f0; \
117	fsrc2		%x1, %f2; \
118	fsrc2		%x2, %f4; \
119	fsrc2		%x3, %f6; \
120	fsrc2		%x4, %f8; \
121	fsrc2		%x5, %f10;
122#define FREG_MOVE_7(x0, x1, x2, x3, x4, x5, x6) \
123	fsrc2		%x0, %f0; \
124	fsrc2		%x1, %f2; \
125	fsrc2		%x2, %f4; \
126	fsrc2		%x3, %f6; \
127	fsrc2		%x4, %f8; \
128	fsrc2		%x5, %f10; \
129	fsrc2		%x6, %f12;
130#define FREG_MOVE_8(x0, x1, x2, x3, x4, x5, x6, x7) \
131	fsrc2		%x0, %f0; \
132	fsrc2		%x1, %f2; \
133	fsrc2		%x2, %f4; \
134	fsrc2		%x3, %f6; \
135	fsrc2		%x4, %f8; \
136	fsrc2		%x5, %f10; \
137	fsrc2		%x6, %f12; \
138	fsrc2		%x7, %f14;
139#define FREG_LOAD_1(base, x0) \
140	EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1)
141#define FREG_LOAD_2(base, x0, x1) \
142	EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1); \
143	EX_LD_FP(LOAD(ldd, base + 0x08, %x1), NG2_retl_o2_plus_g1);
144#define FREG_LOAD_3(base, x0, x1, x2) \
145	EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1); \
146	EX_LD_FP(LOAD(ldd, base + 0x08, %x1), NG2_retl_o2_plus_g1); \
147	EX_LD_FP(LOAD(ldd, base + 0x10, %x2), NG2_retl_o2_plus_g1);
148#define FREG_LOAD_4(base, x0, x1, x2, x3) \
149	EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1); \
150	EX_LD_FP(LOAD(ldd, base + 0x08, %x1), NG2_retl_o2_plus_g1); \
151	EX_LD_FP(LOAD(ldd, base + 0x10, %x2), NG2_retl_o2_plus_g1); \
152	EX_LD_FP(LOAD(ldd, base + 0x18, %x3), NG2_retl_o2_plus_g1);
153#define FREG_LOAD_5(base, x0, x1, x2, x3, x4) \
154	EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1); \
155	EX_LD_FP(LOAD(ldd, base + 0x08, %x1), NG2_retl_o2_plus_g1); \
156	EX_LD_FP(LOAD(ldd, base + 0x10, %x2), NG2_retl_o2_plus_g1); \
157	EX_LD_FP(LOAD(ldd, base + 0x18, %x3), NG2_retl_o2_plus_g1); \
158	EX_LD_FP(LOAD(ldd, base + 0x20, %x4), NG2_retl_o2_plus_g1);
159#define FREG_LOAD_6(base, x0, x1, x2, x3, x4, x5) \
160	EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1); \
161	EX_LD_FP(LOAD(ldd, base + 0x08, %x1), NG2_retl_o2_plus_g1); \
162	EX_LD_FP(LOAD(ldd, base + 0x10, %x2), NG2_retl_o2_plus_g1); \
163	EX_LD_FP(LOAD(ldd, base + 0x18, %x3), NG2_retl_o2_plus_g1); \
164	EX_LD_FP(LOAD(ldd, base + 0x20, %x4), NG2_retl_o2_plus_g1); \
165	EX_LD_FP(LOAD(ldd, base + 0x28, %x5), NG2_retl_o2_plus_g1);
166#define FREG_LOAD_7(base, x0, x1, x2, x3, x4, x5, x6) \
167	EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1); \
168	EX_LD_FP(LOAD(ldd, base + 0x08, %x1), NG2_retl_o2_plus_g1); \
169	EX_LD_FP(LOAD(ldd, base + 0x10, %x2), NG2_retl_o2_plus_g1); \
170	EX_LD_FP(LOAD(ldd, base + 0x18, %x3), NG2_retl_o2_plus_g1); \
171	EX_LD_FP(LOAD(ldd, base + 0x20, %x4), NG2_retl_o2_plus_g1); \
172	EX_LD_FP(LOAD(ldd, base + 0x28, %x5), NG2_retl_o2_plus_g1); \
173	EX_LD_FP(LOAD(ldd, base + 0x30, %x6), NG2_retl_o2_plus_g1);
174
175	.register	%g2,#scratch
176	.register	%g3,#scratch
177
178	.text
179#ifndef EX_RETVAL
180#define EX_RETVAL(x)	x
181__restore_fp:
182	VISExitHalf
183__restore_asi:
184	retl
185	 wr	%g0, ASI_AIUS, %asi
186ENTRY(NG2_retl_o2)
187	ba,pt	%xcc, __restore_asi
188	 mov	%o2, %o0
189ENDPROC(NG2_retl_o2)
190ENTRY(NG2_retl_o2_plus_1)
191	ba,pt	%xcc, __restore_asi
192	 add	%o2, 1, %o0
193ENDPROC(NG2_retl_o2_plus_1)
194ENTRY(NG2_retl_o2_plus_4)
195	ba,pt	%xcc, __restore_asi
196	 add	%o2, 4, %o0
197ENDPROC(NG2_retl_o2_plus_4)
198ENTRY(NG2_retl_o2_plus_8)
199	ba,pt	%xcc, __restore_asi
200	 add	%o2, 8, %o0
201ENDPROC(NG2_retl_o2_plus_8)
202ENTRY(NG2_retl_o2_plus_o4_plus_1)
203	add	%o4, 1, %o4
204	ba,pt	%xcc, __restore_asi
205	 add	%o2, %o4, %o0
206ENDPROC(NG2_retl_o2_plus_o4_plus_1)
207ENTRY(NG2_retl_o2_plus_o4_plus_8)
208	add	%o4, 8, %o4
209	ba,pt	%xcc, __restore_asi
210	 add	%o2, %o4, %o0
211ENDPROC(NG2_retl_o2_plus_o4_plus_8)
212ENTRY(NG2_retl_o2_plus_o4_plus_16)
213	add	%o4, 16, %o4
214	ba,pt	%xcc, __restore_asi
215	 add	%o2, %o4, %o0
216ENDPROC(NG2_retl_o2_plus_o4_plus_16)
217ENTRY(NG2_retl_o2_plus_g1_fp)
218	ba,pt	%xcc, __restore_fp
219	 add	%o2, %g1, %o0
220ENDPROC(NG2_retl_o2_plus_g1_fp)
221ENTRY(NG2_retl_o2_plus_g1_plus_64_fp)
222	add	%g1, 64, %g1
223	ba,pt	%xcc, __restore_fp
224	 add	%o2, %g1, %o0
225ENDPROC(NG2_retl_o2_plus_g1_plus_64_fp)
226ENTRY(NG2_retl_o2_plus_g1_plus_1)
227	add	%g1, 1, %g1
228	ba,pt	%xcc, __restore_asi
229	 add	%o2, %g1, %o0
230ENDPROC(NG2_retl_o2_plus_g1_plus_1)
231ENTRY(NG2_retl_o2_and_7_plus_o4)
232	and	%o2, 7, %o2
233	ba,pt	%xcc, __restore_asi
234	 add	%o2, %o4, %o0
235ENDPROC(NG2_retl_o2_and_7_plus_o4)
236ENTRY(NG2_retl_o2_and_7_plus_o4_plus_8)
237	and	%o2, 7, %o2
238	add	%o4, 8, %o4
239	ba,pt	%xcc, __restore_asi
240	 add	%o2, %o4, %o0
241ENDPROC(NG2_retl_o2_and_7_plus_o4_plus_8)
242#endif
243
244	.align		64
245
246	.globl	FUNC_NAME
247	.type	FUNC_NAME,#function
248FUNC_NAME:	/* %o0=dst, %o1=src, %o2=len */
249	srlx		%o2, 31, %g2
250	cmp		%g2, 0
251	tne		%xcc, 5
252	PREAMBLE
253	mov		%o0, %o3
254	cmp		%o2, 0
255	be,pn		%XCC, 85f
256	 or		%o0, %o1, GLOBAL_SPARE
257	cmp		%o2, 16
258	blu,a,pn	%XCC, 80f
259	 or		GLOBAL_SPARE, %o2, GLOBAL_SPARE
260
261	/* 2 blocks (128 bytes) is the minimum we can do the block
262	 * copy with.  We need to ensure that we'll iterate at least
263	 * once in the block copy loop.  At worst we'll need to align
264	 * the destination to a 64-byte boundary which can chew up
265	 * to (64 - 1) bytes from the length before we perform the
266	 * block copy loop.
267	 *
268	 * However, the cut-off point, performance wise, is around
269	 * 4 64-byte blocks.
270	 */
271	cmp		%o2, (4 * 64)
272	blu,pt		%XCC, 75f
273	 andcc		GLOBAL_SPARE, 0x7, %g0
274
275	/* %o0:	dst
276	 * %o1:	src
277	 * %o2:	len  (known to be >= 128)
278	 *
279	 * The block copy loops can use %o4, %g2, %g3 as
280	 * temporaries while copying the data.  %o5 must
281	 * be preserved between VISEntryHalf and VISExitHalf
282	 */
283
284	LOAD(prefetch, %o1 + 0x000, #one_read)
285	LOAD(prefetch, %o1 + 0x040, #one_read)
286	LOAD(prefetch, %o1 + 0x080, #one_read)
287
288	/* Align destination on 64-byte boundary.  */
289	andcc		%o0, (64 - 1), %o4
290	be,pt		%XCC, 2f
291	 sub		%o4, 64, %o4
292	sub		%g0, %o4, %o4	! bytes to align dst
293	sub		%o2, %o4, %o2
2941:	subcc		%o4, 1, %o4
295	EX_LD(LOAD(ldub, %o1, %g1), NG2_retl_o2_plus_o4_plus_1)
296	EX_ST(STORE(stb, %g1, %o0), NG2_retl_o2_plus_o4_plus_1)
297	add		%o1, 1, %o1
298	bne,pt		%XCC, 1b
299	add		%o0, 1, %o0
300
3012:
302	/* Clobbers o5/g1/g2/g3/g7/icc/xcc.  We must preserve
303	 * o5 from here until we hit VISExitHalf.
304	 */
305	VISEntryHalf
306
307	membar		#Sync
308	alignaddr	%o1, %g0, %g0
309
310	add		%o1, (64 - 1), %o4
311	andn		%o4, (64 - 1), %o4
312	andn		%o2, (64 - 1), %g1
313	sub		%o2, %g1, %o2
314
315	and		%o1, (64 - 1), %g2
316	add		%o1, %g1, %o1
317	sub		%o0, %o4, %g3
318	brz,pt		%g2, 190f
319	 cmp		%g2, 32
320	blu,a		5f
321	 cmp		%g2, 16
322	cmp		%g2, 48
323	blu,a		4f
324	 cmp		%g2, 40
325	cmp		%g2, 56
326	blu		170f
327	 nop
328	ba,a,pt		%xcc, 180f
329
3304:	/* 32 <= low bits < 48 */
331	blu		150f
332	 nop
333	ba,a,pt		%xcc, 160f
3345:	/* 0 < low bits < 32 */
335	blu,a		6f
336	 cmp		%g2, 8
337	cmp		%g2, 24
338	blu		130f
339	 nop
340	ba,a,pt		%xcc, 140f
3416:	/* 0 < low bits < 16 */
342	bgeu		120f
343	 nop
344	/* fall through for 0 < low bits < 8 */
345110:	sub		%o4, 64, %g2
346	EX_LD_FP(LOAD_BLK(%g2, %f0), NG2_retl_o2_plus_g1)
3471:	EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1)
348	EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1)
349	FREG_FROB(f0, f2, f4, f6, f8, f10, f12, f14, f16)
350	EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1)
351	FREG_MOVE_8(f16, f18, f20, f22, f24, f26, f28, f30)
352	subcc		%g1, 64, %g1
353	add		%o4, 64, %o4
354	bne,pt		%xcc, 1b
355	 LOAD(prefetch, %o4 + 64, #one_read)
356	ba,pt		%xcc, 195f
357	 nop
358
359120:	sub		%o4, 56, %g2
360	FREG_LOAD_7(%g2, f0, f2, f4, f6, f8, f10, f12)
3611:	EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1)
362	EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1)
363	FREG_FROB(f0, f2, f4, f6, f8, f10, f12, f16, f18)
364	EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1)
365	FREG_MOVE_7(f18, f20, f22, f24, f26, f28, f30)
366	subcc		%g1, 64, %g1
367	add		%o4, 64, %o4
368	bne,pt		%xcc, 1b
369	 LOAD(prefetch, %o4 + 64, #one_read)
370	ba,pt		%xcc, 195f
371	 nop
372
373130:	sub		%o4, 48, %g2
374	FREG_LOAD_6(%g2, f0, f2, f4, f6, f8, f10)
3751:	EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1)
376	EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1)
377	FREG_FROB(f0, f2, f4, f6, f8, f10, f16, f18, f20)
378	EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1)
379	FREG_MOVE_6(f20, f22, f24, f26, f28, f30)
380	subcc		%g1, 64, %g1
381	add		%o4, 64, %o4
382	bne,pt		%xcc, 1b
383	 LOAD(prefetch, %o4 + 64, #one_read)
384	ba,pt		%xcc, 195f
385	 nop
386
387140:	sub		%o4, 40, %g2
388	FREG_LOAD_5(%g2, f0, f2, f4, f6, f8)
3891:	EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1)
390	EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1)
391	FREG_FROB(f0, f2, f4, f6, f8, f16, f18, f20, f22)
392	EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1)
393	FREG_MOVE_5(f22, f24, f26, f28, f30)
394	subcc		%g1, 64, %g1
395	add		%o4, 64, %o4
396	bne,pt		%xcc, 1b
397	 LOAD(prefetch, %o4 + 64, #one_read)
398	ba,pt		%xcc, 195f
399	 nop
400
401150:	sub		%o4, 32, %g2
402	FREG_LOAD_4(%g2, f0, f2, f4, f6)
4031:	EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1)
404	EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1)
405	FREG_FROB(f0, f2, f4, f6, f16, f18, f20, f22, f24)
406	EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1)
407	FREG_MOVE_4(f24, f26, f28, f30)
408	subcc		%g1, 64, %g1
409	add		%o4, 64, %o4
410	bne,pt		%xcc, 1b
411	 LOAD(prefetch, %o4 + 64, #one_read)
412	ba,pt		%xcc, 195f
413	 nop
414
415160:	sub		%o4, 24, %g2
416	FREG_LOAD_3(%g2, f0, f2, f4)
4171:	EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1)
418	EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1)
419	FREG_FROB(f0, f2, f4, f16, f18, f20, f22, f24, f26)
420	EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1)
421	FREG_MOVE_3(f26, f28, f30)
422	subcc		%g1, 64, %g1
423	add		%o4, 64, %o4
424	bne,pt		%xcc, 1b
425	 LOAD(prefetch, %o4 + 64, #one_read)
426	ba,pt		%xcc, 195f
427	 nop
428
429170:	sub		%o4, 16, %g2
430	FREG_LOAD_2(%g2, f0, f2)
4311:	EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1)
432	EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1)
433	FREG_FROB(f0, f2, f16, f18, f20, f22, f24, f26, f28)
434	EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1)
435	FREG_MOVE_2(f28, f30)
436	subcc		%g1, 64, %g1
437	add		%o4, 64, %o4
438	bne,pt		%xcc, 1b
439	 LOAD(prefetch, %o4 + 64, #one_read)
440	ba,pt		%xcc, 195f
441	 nop
442
443180:	sub		%o4, 8, %g2
444	FREG_LOAD_1(%g2, f0)
4451:	EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1)
446	EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1)
447	FREG_FROB(f0, f16, f18, f20, f22, f24, f26, f28, f30)
448	EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1)
449	FREG_MOVE_1(f30)
450	subcc		%g1, 64, %g1
451	add		%o4, 64, %o4
452	bne,pt		%xcc, 1b
453	 LOAD(prefetch, %o4 + 64, #one_read)
454	ba,pt		%xcc, 195f
455	 nop
456
457190:
4581:	EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1)
459	subcc		%g1, 64, %g1
460	EX_LD_FP(LOAD_BLK(%o4, %f0), NG2_retl_o2_plus_g1_plus_64)
461	EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1_plus_64)
462	add		%o4, 64, %o4
463	bne,pt		%xcc, 1b
464	 LOAD(prefetch, %o4 + 64, #one_read)
465
466195:
467	add		%o4, %g3, %o0
468	membar		#Sync
469
470	VISExitHalf
471
472	/* %o2 contains any final bytes still needed to be copied
473	 * over. If anything is left, we copy it one byte at a time.
474	 */
475	brz,pt		%o2, 85f
476	 sub		%o0, %o1, GLOBAL_SPARE
477	ba,a,pt		%XCC, 90f
478
479	.align		64
48075: /* 16 < len <= 64 */
481	bne,pn		%XCC, 75f
482	 sub		%o0, %o1, GLOBAL_SPARE
483
48472:
485	andn		%o2, 0xf, %o4
486	and		%o2, 0xf, %o2
4871:	subcc		%o4, 0x10, %o4
488	EX_LD(LOAD(ldx, %o1, %o5), NG2_retl_o2_plus_o4_plus_16)
489	add		%o1, 0x08, %o1
490	EX_LD(LOAD(ldx, %o1, %g1), NG2_retl_o2_plus_o4_plus_16)
491	sub		%o1, 0x08, %o1
492	EX_ST(STORE(stx, %o5, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_o4_plus_16)
493	add		%o1, 0x8, %o1
494	EX_ST(STORE(stx, %g1, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_o4_plus_8)
495	bgu,pt		%XCC, 1b
496	 add		%o1, 0x8, %o1
49773:	andcc		%o2, 0x8, %g0
498	be,pt		%XCC, 1f
499	 nop
500	sub		%o2, 0x8, %o2
501	EX_LD(LOAD(ldx, %o1, %o5), NG2_retl_o2_plus_8)
502	EX_ST(STORE(stx, %o5, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_8)
503	add		%o1, 0x8, %o1
5041:	andcc		%o2, 0x4, %g0
505	be,pt		%XCC, 1f
506	 nop
507	sub		%o2, 0x4, %o2
508	EX_LD(LOAD(lduw, %o1, %o5), NG2_retl_o2_plus_4)
509	EX_ST(STORE(stw, %o5, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_4)
510	add		%o1, 0x4, %o1
5111:	cmp		%o2, 0
512	be,pt		%XCC, 85f
513	 nop
514	ba,pt		%xcc, 90f
515	 nop
516
51775:
518	andcc		%o0, 0x7, %g1
519	sub		%g1, 0x8, %g1
520	be,pn		%icc, 2f
521	 sub		%g0, %g1, %g1
522	sub		%o2, %g1, %o2
523
5241:	subcc		%g1, 1, %g1
525	EX_LD(LOAD(ldub, %o1, %o5), NG2_retl_o2_plus_g1_plus_1)
526	EX_ST(STORE(stb, %o5, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_g1_plus_1)
527	bgu,pt		%icc, 1b
528	 add		%o1, 1, %o1
529
5302:	add		%o1, GLOBAL_SPARE, %o0
531	andcc		%o1, 0x7, %g1
532	bne,pt		%icc, 8f
533	 sll		%g1, 3, %g1
534
535	cmp		%o2, 16
536	bgeu,pt		%icc, 72b
537	 nop
538	ba,a,pt		%xcc, 73b
539
5408:	mov		64, GLOBAL_SPARE
541	andn		%o1, 0x7, %o1
542	EX_LD(LOAD(ldx, %o1, %g2), NG2_retl_o2)
543	sub		GLOBAL_SPARE, %g1, GLOBAL_SPARE
544	andn		%o2, 0x7, %o4
545	sllx		%g2, %g1, %g2
5461:	add		%o1, 0x8, %o1
547	EX_LD(LOAD(ldx, %o1, %g3), NG2_retl_o2_and_7_plus_o4)
548	subcc		%o4, 0x8, %o4
549	srlx		%g3, GLOBAL_SPARE, %o5
550	or		%o5, %g2, %o5
551	EX_ST(STORE(stx, %o5, %o0), NG2_retl_o2_and_7_plus_o4_plus_8)
552	add		%o0, 0x8, %o0
553	bgu,pt		%icc, 1b
554	 sllx		%g3, %g1, %g2
555
556	srl		%g1, 3, %g1
557	andcc		%o2, 0x7, %o2
558	be,pn		%icc, 85f
559	 add		%o1, %g1, %o1
560	ba,pt		%xcc, 90f
561	 sub		%o0, %o1, GLOBAL_SPARE
562
563	.align		64
56480: /* 0 < len <= 16 */
565	andcc		GLOBAL_SPARE, 0x3, %g0
566	bne,pn		%XCC, 90f
567	 sub		%o0, %o1, GLOBAL_SPARE
568
5691:
570	subcc		%o2, 4, %o2
571	EX_LD(LOAD(lduw, %o1, %g1), NG2_retl_o2_plus_4)
572	EX_ST(STORE(stw, %g1, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_4)
573	bgu,pt		%XCC, 1b
574	 add		%o1, 4, %o1
575
57685:	retl
577	 mov		EX_RETVAL(%o3), %o0
578
579	.align		32
58090:
581	subcc		%o2, 1, %o2
582	EX_LD(LOAD(ldub, %o1, %g1), NG2_retl_o2_plus_1)
583	EX_ST(STORE(stb, %g1, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_1)
584	bgu,pt		%XCC, 90b
585	 add		%o1, 1, %o1
586	retl
587	 mov		EX_RETVAL(%o3), %o0
588
589	.size		FUNC_NAME, .-FUNC_NAME
590