xref: /linux/arch/alpha/lib/copy_user.S (revision 93d90ad708b8da6efc0e487b66111aa9db7f70c7)
1/*
2 * arch/alpha/lib/copy_user.S
3 *
4 * Copy to/from user space, handling exceptions as we go..  This
5 * isn't exactly pretty.
6 *
7 * This is essentially the same as "memcpy()", but with a few twists.
8 * Notably, we have to make sure that $0 is always up-to-date and
9 * contains the right "bytes left to copy" value (and that it is updated
10 * only _after_ a successful copy). There is also some rather minor
11 * exception setup stuff..
12 *
13 * NOTE! This is not directly C-callable, because the calling semantics are
14 * different:
15 *
16 * Inputs:
17 *	length in $0
18 *	destination address in $6
19 *	source address in $7
20 *	return address in $28
21 *
22 * Outputs:
23 *	bytes left to copy in $0
24 *
25 * Clobbers:
26 *	$1,$2,$3,$4,$5,$6,$7
27 */
28
29/* Allow an exception for an insn; exit if we get one.  */
30#define EXI(x,y...)			\
31	99: x,##y;			\
32	.section __ex_table,"a";	\
33	.long 99b - .;			\
34	lda $31, $exitin-99b($31);	\
35	.previous
36
37#define EXO(x,y...)			\
38	99: x,##y;			\
39	.section __ex_table,"a";	\
40	.long 99b - .;			\
41	lda $31, $exitout-99b($31);	\
42	.previous
43
44	.set noat
45	.align 4
46	.globl __copy_user
47	.ent __copy_user
48__copy_user:
49	.prologue 0
50	and $6,7,$3
51	beq $0,$35
52	beq $3,$36
53	subq $3,8,$3
54	.align 4
55$37:
56	EXI( ldq_u $1,0($7) )
57	EXO( ldq_u $2,0($6) )
58	extbl $1,$7,$1
59	mskbl $2,$6,$2
60	insbl $1,$6,$1
61	addq $3,1,$3
62	bis $1,$2,$1
63	EXO( stq_u $1,0($6) )
64	subq $0,1,$0
65	addq $6,1,$6
66	addq $7,1,$7
67	beq $0,$41
68	bne $3,$37
69$36:
70	and $7,7,$1
71	bic $0,7,$4
72	beq $1,$43
73	beq $4,$48
74	EXI( ldq_u $3,0($7) )
75	.align 4
76$50:
77	EXI( ldq_u $2,8($7) )
78	subq $4,8,$4
79	extql $3,$7,$3
80	extqh $2,$7,$1
81	bis $3,$1,$1
82	EXO( stq $1,0($6) )
83	addq $7,8,$7
84	subq $0,8,$0
85	addq $6,8,$6
86	bis $2,$2,$3
87	bne $4,$50
88$48:
89	beq $0,$41
90	.align 4
91$57:
92	EXI( ldq_u $1,0($7) )
93	EXO( ldq_u $2,0($6) )
94	extbl $1,$7,$1
95	mskbl $2,$6,$2
96	insbl $1,$6,$1
97	bis $1,$2,$1
98	EXO( stq_u $1,0($6) )
99	subq $0,1,$0
100	addq $6,1,$6
101	addq $7,1,$7
102	bne $0,$57
103	br $31,$41
104	.align 4
105$43:
106	beq $4,$65
107	.align 4
108$66:
109	EXI( ldq $1,0($7) )
110	subq $4,8,$4
111	EXO( stq $1,0($6) )
112	addq $7,8,$7
113	subq $0,8,$0
114	addq $6,8,$6
115	bne $4,$66
116$65:
117	beq $0,$41
118	EXI( ldq $2,0($7) )
119	EXO( ldq $1,0($6) )
120	mskql $2,$0,$2
121	mskqh $1,$0,$1
122	bis $2,$1,$2
123	EXO( stq $2,0($6) )
124	bis $31,$31,$0
125$41:
126$35:
127$exitout:
128	ret $31,($28),1
129
130$exitin:
131	/* A stupid byte-by-byte zeroing of the rest of the output
132	   buffer.  This cures security holes by never leaving
133	   random kernel data around to be copied elsewhere.  */
134
135	mov $0,$1
136$101:
137	EXO ( ldq_u $2,0($6) )
138	subq $1,1,$1
139	mskbl $2,$6,$2
140	EXO ( stq_u $2,0($6) )
141	addq $6,1,$6
142	bgt $1,$101
143	ret $31,($28),1
144
145	.end __copy_user
146