xref: /linux/arch/alpha/lib/ev6-memset.S (revision e5c86679d5e864947a52fb31e45a425dea3e7fa9)
1/*
2 * arch/alpha/lib/ev6-memset.S
3 *
4 * This is an efficient (and relatively small) implementation of the C library
5 * "memset()" function for the 21264 implementation of Alpha.
6 *
7 * 21264 version  contributed by Rick Gorton <rick.gorton@alpha-processor.com>
8 *
9 * Much of the information about 21264 scheduling/coding comes from:
10 *	Compiler Writer's Guide for the Alpha 21264
11 *	abbreviated as 'CWG' in other comments here
12 *	ftp.digital.com/pub/Digital/info/semiconductor/literature/dsc-library.html
13 * Scheduling notation:
14 *	E	- either cluster
15 *	U	- upper subcluster; U0 - subcluster U0; U1 - subcluster U1
16 *	L	- lower subcluster; L0 - subcluster L0; L1 - subcluster L1
17 * The algorithm for the leading and trailing quadwords remains the same,
18 * however the loop has been unrolled to enable better memory throughput,
19 * and the code has been replicated for each of the entry points: __memset
20 * and __memsetw to permit better scheduling to eliminate the stalling
21 * encountered during the mask replication.
22 * A future enhancement might be to put in a byte store loop for really
23 * small (say < 32 bytes) memset()s.  Whether or not that change would be
24 * a win in the kernel would depend upon the contextual usage.
25 * WARNING: Maintaining this is going to be more work than the above version,
26 * as fixes will need to be made in multiple places.  The performance gain
27 * is worth it.
28 */
29#include <asm/export.h>
30	.set noat
31	.set noreorder
32.text
33	.globl memset
34	.globl __memset
35	.globl ___memset
36	.globl __memsetw
37	.globl __constant_c_memset
38
39	.ent ___memset
40.align 5
41___memset:
42	.frame $30,0,$26,0
43	.prologue 0
44
45	/*
46	 * Serious stalling happens.  The only way to mitigate this is to
47	 * undertake a major re-write to interleave the constant materialization
48	 * with other parts of the fall-through code.  This is important, even
49	 * though it makes maintenance tougher.
50	 * Do this later.
51	 */
52	and $17,255,$1		# E : 00000000000000ch
53	insbl $17,1,$2		# U : 000000000000ch00
54	bis $16,$16,$0		# E : return value
55	ble $18,end_b		# U : zero length requested?
56
57	addq $18,$16,$6		# E : max address to write to
58	bis	$1,$2,$17	# E : 000000000000chch
59	insbl	$1,2,$3		# U : 0000000000ch0000
60	insbl	$1,3,$4		# U : 00000000ch000000
61
62	or	$3,$4,$3	# E : 00000000chch0000
63	inswl	$17,4,$5	# U : 0000chch00000000
64	xor	$16,$6,$1	# E : will complete write be within one quadword?
65	inswl	$17,6,$2	# U : chch000000000000
66
67	or	$17,$3,$17	# E : 00000000chchchch
68	or	$2,$5,$2	# E : chchchch00000000
69	bic	$1,7,$1		# E : fit within a single quadword?
70	and	$16,7,$3	# E : Target addr misalignment
71
72	or	$17,$2,$17	# E : chchchchchchchch
73	beq	$1,within_quad_b # U :
74	nop			# E :
75	beq	$3,aligned_b	# U : target is 0mod8
76
77	/*
78	 * Target address is misaligned, and won't fit within a quadword
79	 */
80	ldq_u $4,0($16)		# L : Fetch first partial
81	bis $16,$16,$5		# E : Save the address
82	insql $17,$16,$2	# U : Insert new bytes
83	subq $3,8,$3		# E : Invert (for addressing uses)
84
85	addq $18,$3,$18		# E : $18 is new count ($3 is negative)
86	mskql $4,$16,$4		# U : clear relevant parts of the quad
87	subq $16,$3,$16		# E : $16 is new aligned destination
88	bis $2,$4,$1		# E : Final bytes
89
90	nop
91	stq_u $1,0($5)		# L : Store result
92	nop
93	nop
94
95.align 4
96aligned_b:
97	/*
98	 * We are now guaranteed to be quad aligned, with at least
99	 * one partial quad to write.
100	 */
101
102	sra $18,3,$3		# U : Number of remaining quads to write
103	and $18,7,$18		# E : Number of trailing bytes to write
104	bis $16,$16,$5		# E : Save dest address
105	beq $3,no_quad_b	# U : tail stuff only
106
107	/*
108	 * it's worth the effort to unroll this and use wh64 if possible
109	 * Lifted a bunch of code from clear_user.S
110	 * At this point, entry values are:
111	 * $16	Current destination address
112	 * $5	A copy of $16
113	 * $6	The max quadword address to write to
114	 * $18	Number trailer bytes
115	 * $3	Number quads to write
116	 */
117
118	and	$16, 0x3f, $2	# E : Forward work (only useful for unrolled loop)
119	subq	$3, 16, $4	# E : Only try to unroll if > 128 bytes
120	subq	$2, 0x40, $1	# E : bias counter (aligning stuff 0mod64)
121	blt	$4, loop_b	# U :
122
123	/*
124	 * We know we've got at least 16 quads, minimum of one trip
125	 * through unrolled loop.  Do a quad at a time to get us 0mod64
126	 * aligned.
127	 */
128
129	nop			# E :
130	nop			# E :
131	nop			# E :
132	beq	$1, $bigalign_b	# U :
133
134$alignmod64_b:
135	stq	$17, 0($5)	# L :
136	subq	$3, 1, $3	# E : For consistency later
137	addq	$1, 8, $1	# E : Increment towards zero for alignment
138	addq	$5, 8, $4	# E : Initial wh64 address (filler instruction)
139
140	nop
141	nop
142	addq	$5, 8, $5	# E : Inc address
143	blt	$1, $alignmod64_b # U :
144
145$bigalign_b:
146	/*
147	 * $3 - number quads left to go
148	 * $5 - target address (aligned 0mod64)
149	 * $17 - mask of stuff to store
150	 * Scratch registers available: $7, $2, $4, $1
151	 * we know that we'll be taking a minimum of one trip through
152 	 * CWG Section 3.7.6: do not expect a sustained store rate of > 1/cycle
153	 * Assumes the wh64 needs to be for 2 trips through the loop in the future
154	 * The wh64 is issued on for the starting destination address for trip +2
155	 * through the loop, and if there are less than two trips left, the target
156	 * address will be for the current trip.
157	 */
158
159$do_wh64_b:
160	wh64	($4)		# L1 : memory subsystem write hint
161	subq	$3, 24, $2	# E : For determining future wh64 addresses
162	stq	$17, 0($5)	# L :
163	nop			# E :
164
165	addq	$5, 128, $4	# E : speculative target of next wh64
166	stq	$17, 8($5)	# L :
167	stq	$17, 16($5)	# L :
168	addq	$5, 64, $7	# E : Fallback address for wh64 (== next trip addr)
169
170	stq	$17, 24($5)	# L :
171	stq	$17, 32($5)	# L :
172	cmovlt	$2, $7, $4	# E : Latency 2, extra mapping cycle
173	nop
174
175	stq	$17, 40($5)	# L :
176	stq	$17, 48($5)	# L :
177	subq	$3, 16, $2	# E : Repeat the loop at least once more?
178	nop
179
180	stq	$17, 56($5)	# L :
181	addq	$5, 64, $5	# E :
182	subq	$3, 8, $3	# E :
183	bge	$2, $do_wh64_b	# U :
184
185	nop
186	nop
187	nop
188	beq	$3, no_quad_b	# U : Might have finished already
189
190.align 4
191	/*
192	 * Simple loop for trailing quadwords, or for small amounts
193	 * of data (where we can't use an unrolled loop and wh64)
194	 */
195loop_b:
196	stq $17,0($5)		# L :
197	subq $3,1,$3		# E : Decrement number quads left
198	addq $5,8,$5		# E : Inc address
199	bne $3,loop_b		# U : more?
200
201no_quad_b:
202	/*
203	 * Write 0..7 trailing bytes.
204	 */
205	nop			# E :
206	beq $18,end_b		# U : All done?
207	ldq $7,0($5)		# L :
208	mskqh $7,$6,$2		# U : Mask final quad
209
210	insqh $17,$6,$4		# U : New bits
211	bis $2,$4,$1		# E : Put it all together
212	stq $1,0($5)		# L : And back to memory
213	ret $31,($26),1		# L0 :
214
215within_quad_b:
216	ldq_u $1,0($16)		# L :
217	insql $17,$16,$2	# U : New bits
218	mskql $1,$16,$4		# U : Clear old
219	bis $2,$4,$2		# E : New result
220
221	mskql $2,$6,$4		# U :
222	mskqh $1,$6,$2		# U :
223	bis $2,$4,$1		# E :
224	stq_u $1,0($16)		# L :
225
226end_b:
227	nop
228	nop
229	nop
230	ret $31,($26),1		# L0 :
231	.end ___memset
232	EXPORT_SYMBOL(___memset)
233
234	/*
235	 * This is the original body of code, prior to replication and
236	 * rescheduling.  Leave it here, as there may be calls to this
237	 * entry point.
238	 */
239.align 4
240	.ent __constant_c_memset
241__constant_c_memset:
242	.frame $30,0,$26,0
243	.prologue 0
244
245	addq $18,$16,$6		# E : max address to write to
246	bis $16,$16,$0		# E : return value
247	xor $16,$6,$1		# E : will complete write be within one quadword?
248	ble $18,end		# U : zero length requested?
249
250	bic $1,7,$1		# E : fit within a single quadword
251	beq $1,within_one_quad	# U :
252	and $16,7,$3		# E : Target addr misalignment
253	beq $3,aligned		# U : target is 0mod8
254
255	/*
256	 * Target address is misaligned, and won't fit within a quadword
257	 */
258	ldq_u $4,0($16)		# L : Fetch first partial
259	bis $16,$16,$5		# E : Save the address
260	insql $17,$16,$2	# U : Insert new bytes
261	subq $3,8,$3		# E : Invert (for addressing uses)
262
263	addq $18,$3,$18		# E : $18 is new count ($3 is negative)
264	mskql $4,$16,$4		# U : clear relevant parts of the quad
265	subq $16,$3,$16		# E : $16 is new aligned destination
266	bis $2,$4,$1		# E : Final bytes
267
268	nop
269	stq_u $1,0($5)		# L : Store result
270	nop
271	nop
272
273.align 4
274aligned:
275	/*
276	 * We are now guaranteed to be quad aligned, with at least
277	 * one partial quad to write.
278	 */
279
280	sra $18,3,$3		# U : Number of remaining quads to write
281	and $18,7,$18		# E : Number of trailing bytes to write
282	bis $16,$16,$5		# E : Save dest address
283	beq $3,no_quad		# U : tail stuff only
284
285	/*
286	 * it's worth the effort to unroll this and use wh64 if possible
287	 * Lifted a bunch of code from clear_user.S
288	 * At this point, entry values are:
289	 * $16	Current destination address
290	 * $5	A copy of $16
291	 * $6	The max quadword address to write to
292	 * $18	Number trailer bytes
293	 * $3	Number quads to write
294	 */
295
296	and	$16, 0x3f, $2	# E : Forward work (only useful for unrolled loop)
297	subq	$3, 16, $4	# E : Only try to unroll if > 128 bytes
298	subq	$2, 0x40, $1	# E : bias counter (aligning stuff 0mod64)
299	blt	$4, loop	# U :
300
301	/*
302	 * We know we've got at least 16 quads, minimum of one trip
303	 * through unrolled loop.  Do a quad at a time to get us 0mod64
304	 * aligned.
305	 */
306
307	nop			# E :
308	nop			# E :
309	nop			# E :
310	beq	$1, $bigalign	# U :
311
312$alignmod64:
313	stq	$17, 0($5)	# L :
314	subq	$3, 1, $3	# E : For consistency later
315	addq	$1, 8, $1	# E : Increment towards zero for alignment
316	addq	$5, 8, $4	# E : Initial wh64 address (filler instruction)
317
318	nop
319	nop
320	addq	$5, 8, $5	# E : Inc address
321	blt	$1, $alignmod64	# U :
322
323$bigalign:
324	/*
325	 * $3 - number quads left to go
326	 * $5 - target address (aligned 0mod64)
327	 * $17 - mask of stuff to store
328	 * Scratch registers available: $7, $2, $4, $1
329	 * we know that we'll be taking a minimum of one trip through
330 	 * CWG Section 3.7.6: do not expect a sustained store rate of > 1/cycle
331	 * Assumes the wh64 needs to be for 2 trips through the loop in the future
332	 * The wh64 is issued on for the starting destination address for trip +2
333	 * through the loop, and if there are less than two trips left, the target
334	 * address will be for the current trip.
335	 */
336
337$do_wh64:
338	wh64	($4)		# L1 : memory subsystem write hint
339	subq	$3, 24, $2	# E : For determining future wh64 addresses
340	stq	$17, 0($5)	# L :
341	nop			# E :
342
343	addq	$5, 128, $4	# E : speculative target of next wh64
344	stq	$17, 8($5)	# L :
345	stq	$17, 16($5)	# L :
346	addq	$5, 64, $7	# E : Fallback address for wh64 (== next trip addr)
347
348	stq	$17, 24($5)	# L :
349	stq	$17, 32($5)	# L :
350	cmovlt	$2, $7, $4	# E : Latency 2, extra mapping cycle
351	nop
352
353	stq	$17, 40($5)	# L :
354	stq	$17, 48($5)	# L :
355	subq	$3, 16, $2	# E : Repeat the loop at least once more?
356	nop
357
358	stq	$17, 56($5)	# L :
359	addq	$5, 64, $5	# E :
360	subq	$3, 8, $3	# E :
361	bge	$2, $do_wh64	# U :
362
363	nop
364	nop
365	nop
366	beq	$3, no_quad	# U : Might have finished already
367
368.align 4
369	/*
370	 * Simple loop for trailing quadwords, or for small amounts
371	 * of data (where we can't use an unrolled loop and wh64)
372	 */
373loop:
374	stq $17,0($5)		# L :
375	subq $3,1,$3		# E : Decrement number quads left
376	addq $5,8,$5		# E : Inc address
377	bne $3,loop		# U : more?
378
379no_quad:
380	/*
381	 * Write 0..7 trailing bytes.
382	 */
383	nop			# E :
384	beq $18,end		# U : All done?
385	ldq $7,0($5)		# L :
386	mskqh $7,$6,$2		# U : Mask final quad
387
388	insqh $17,$6,$4		# U : New bits
389	bis $2,$4,$1		# E : Put it all together
390	stq $1,0($5)		# L : And back to memory
391	ret $31,($26),1		# L0 :
392
393within_one_quad:
394	ldq_u $1,0($16)		# L :
395	insql $17,$16,$2	# U : New bits
396	mskql $1,$16,$4		# U : Clear old
397	bis $2,$4,$2		# E : New result
398
399	mskql $2,$6,$4		# U :
400	mskqh $1,$6,$2		# U :
401	bis $2,$4,$1		# E :
402	stq_u $1,0($16)		# L :
403
404end:
405	nop
406	nop
407	nop
408	ret $31,($26),1		# L0 :
409	.end __constant_c_memset
410	EXPORT_SYMBOL(__constant_c_memset)
411
412	/*
413	 * This is a replicant of the __constant_c_memset code, rescheduled
414	 * to mask stalls.  Note that entry point names also had to change
415	 */
416	.align 5
417	.ent __memsetw
418
419__memsetw:
420	.frame $30,0,$26,0
421	.prologue 0
422
423	inswl $17,0,$5		# U : 000000000000c1c2
424	inswl $17,2,$2		# U : 00000000c1c20000
425	bis $16,$16,$0		# E : return value
426	addq	$18,$16,$6	# E : max address to write to
427
428	ble $18, end_w		# U : zero length requested?
429	inswl	$17,4,$3	# U : 0000c1c200000000
430	inswl	$17,6,$4	# U : c1c2000000000000
431	xor	$16,$6,$1	# E : will complete write be within one quadword?
432
433	or	$2,$5,$2	# E : 00000000c1c2c1c2
434	or	$3,$4,$17	# E : c1c2c1c200000000
435	bic	$1,7,$1		# E : fit within a single quadword
436	and	$16,7,$3	# E : Target addr misalignment
437
438	or	$17,$2,$17	# E : c1c2c1c2c1c2c1c2
439	beq $1,within_quad_w	# U :
440	nop
441	beq $3,aligned_w	# U : target is 0mod8
442
443	/*
444	 * Target address is misaligned, and won't fit within a quadword
445	 */
446	ldq_u $4,0($16)		# L : Fetch first partial
447	bis $16,$16,$5		# E : Save the address
448	insql $17,$16,$2	# U : Insert new bytes
449	subq $3,8,$3		# E : Invert (for addressing uses)
450
451	addq $18,$3,$18		# E : $18 is new count ($3 is negative)
452	mskql $4,$16,$4		# U : clear relevant parts of the quad
453	subq $16,$3,$16		# E : $16 is new aligned destination
454	bis $2,$4,$1		# E : Final bytes
455
456	nop
457	stq_u $1,0($5)		# L : Store result
458	nop
459	nop
460
461.align 4
462aligned_w:
463	/*
464	 * We are now guaranteed to be quad aligned, with at least
465	 * one partial quad to write.
466	 */
467
468	sra $18,3,$3		# U : Number of remaining quads to write
469	and $18,7,$18		# E : Number of trailing bytes to write
470	bis $16,$16,$5		# E : Save dest address
471	beq $3,no_quad_w	# U : tail stuff only
472
473	/*
474	 * it's worth the effort to unroll this and use wh64 if possible
475	 * Lifted a bunch of code from clear_user.S
476	 * At this point, entry values are:
477	 * $16	Current destination address
478	 * $5	A copy of $16
479	 * $6	The max quadword address to write to
480	 * $18	Number trailer bytes
481	 * $3	Number quads to write
482	 */
483
484	and	$16, 0x3f, $2	# E : Forward work (only useful for unrolled loop)
485	subq	$3, 16, $4	# E : Only try to unroll if > 128 bytes
486	subq	$2, 0x40, $1	# E : bias counter (aligning stuff 0mod64)
487	blt	$4, loop_w	# U :
488
489	/*
490	 * We know we've got at least 16 quads, minimum of one trip
491	 * through unrolled loop.  Do a quad at a time to get us 0mod64
492	 * aligned.
493	 */
494
495	nop			# E :
496	nop			# E :
497	nop			# E :
498	beq	$1, $bigalign_w	# U :
499
500$alignmod64_w:
501	stq	$17, 0($5)	# L :
502	subq	$3, 1, $3	# E : For consistency later
503	addq	$1, 8, $1	# E : Increment towards zero for alignment
504	addq	$5, 8, $4	# E : Initial wh64 address (filler instruction)
505
506	nop
507	nop
508	addq	$5, 8, $5	# E : Inc address
509	blt	$1, $alignmod64_w	# U :
510
511$bigalign_w:
512	/*
513	 * $3 - number quads left to go
514	 * $5 - target address (aligned 0mod64)
515	 * $17 - mask of stuff to store
516	 * Scratch registers available: $7, $2, $4, $1
517	 * we know that we'll be taking a minimum of one trip through
518 	 * CWG Section 3.7.6: do not expect a sustained store rate of > 1/cycle
519	 * Assumes the wh64 needs to be for 2 trips through the loop in the future
520	 * The wh64 is issued on for the starting destination address for trip +2
521	 * through the loop, and if there are less than two trips left, the target
522	 * address will be for the current trip.
523	 */
524
525$do_wh64_w:
526	wh64	($4)		# L1 : memory subsystem write hint
527	subq	$3, 24, $2	# E : For determining future wh64 addresses
528	stq	$17, 0($5)	# L :
529	nop			# E :
530
531	addq	$5, 128, $4	# E : speculative target of next wh64
532	stq	$17, 8($5)	# L :
533	stq	$17, 16($5)	# L :
534	addq	$5, 64, $7	# E : Fallback address for wh64 (== next trip addr)
535
536	stq	$17, 24($5)	# L :
537	stq	$17, 32($5)	# L :
538	cmovlt	$2, $7, $4	# E : Latency 2, extra mapping cycle
539	nop
540
541	stq	$17, 40($5)	# L :
542	stq	$17, 48($5)	# L :
543	subq	$3, 16, $2	# E : Repeat the loop at least once more?
544	nop
545
546	stq	$17, 56($5)	# L :
547	addq	$5, 64, $5	# E :
548	subq	$3, 8, $3	# E :
549	bge	$2, $do_wh64_w	# U :
550
551	nop
552	nop
553	nop
554	beq	$3, no_quad_w	# U : Might have finished already
555
556.align 4
557	/*
558	 * Simple loop for trailing quadwords, or for small amounts
559	 * of data (where we can't use an unrolled loop and wh64)
560	 */
561loop_w:
562	stq $17,0($5)		# L :
563	subq $3,1,$3		# E : Decrement number quads left
564	addq $5,8,$5		# E : Inc address
565	bne $3,loop_w		# U : more?
566
567no_quad_w:
568	/*
569	 * Write 0..7 trailing bytes.
570	 */
571	nop			# E :
572	beq $18,end_w		# U : All done?
573	ldq $7,0($5)		# L :
574	mskqh $7,$6,$2		# U : Mask final quad
575
576	insqh $17,$6,$4		# U : New bits
577	bis $2,$4,$1		# E : Put it all together
578	stq $1,0($5)		# L : And back to memory
579	ret $31,($26),1		# L0 :
580
581within_quad_w:
582	ldq_u $1,0($16)		# L :
583	insql $17,$16,$2	# U : New bits
584	mskql $1,$16,$4		# U : Clear old
585	bis $2,$4,$2		# E : New result
586
587	mskql $2,$6,$4		# U :
588	mskqh $1,$6,$2		# U :
589	bis $2,$4,$1		# E :
590	stq_u $1,0($16)		# L :
591
592end_w:
593	nop
594	nop
595	nop
596	ret $31,($26),1		# L0 :
597
598	.end __memsetw
599	EXPORT_SYMBOL(__memsetw)
600
601memset = ___memset
602__memset = ___memset
603	EXPORT_SYMBOL(memset)
604	EXPORT_SYMBOL(__memset)
605