xref: /linux/arch/hexagon/mm/copy_from_user.S (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1/*
2 * User memory copy functions for kernel
3 *
4 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20
21/*
22 * The right way to do this involves valignb
23 * The easy way to do this is only speed up src/dest similar alignment.
24 */
25
26/*
27 * Copy to/from user are the same, except that for packets with a load and
28 * a store, I don't know how to tell which kind of exception we got.
29 * Therefore, we duplicate the function, and handle faulting addresses
30 * differently for each function
31 */
32
33/*
34 * copy from user: loads can fault
35 */
36#define src_sav r13
37#define dst_sav r12
38#define src_dst_sav r13:12
39#define d_dbuf r15:14
40#define w_dbuf r15
41
42#define dst r0
43#define src r1
44#define bytes r2
45#define loopcount r5
46
47#define FUNCNAME __copy_from_user_hexagon
48#include "copy_user_template.S"
49
50	/* LOAD FAULTS from COPY_FROM_USER */
51
52	/* Alignment loop.  r2 has been updated. Return it. */
53	.falign
541009:
552009:
564009:
57	{
58		r0 = r2
59		jumpr r31
60	}
61	/* Normal copy loops. Do epilog. Use src-src_sav to compute distance */
62	/* X - (A - B) == X + B - A */
63	.falign
648089:
65	{
66		memd(dst) = d_dbuf
67		r2 += sub(src_sav,src)
68	}
69	{
70		r0 = r2
71		jumpr r31
72	}
73	.falign
744089:
75	{
76		memw(dst) = w_dbuf
77		r2 += sub(src_sav,src)
78	}
79	{
80		r0 = r2
81		jumpr r31
82	}
83	.falign
842089:
85	{
86		memh(dst) = w_dbuf
87		r2 += sub(src_sav,src)
88	}
89	{
90		r0 = r2
91		jumpr r31
92	}
93	.falign
941089:
95	{
96		memb(dst) = w_dbuf
97		r2 += sub(src_sav,src)
98	}
99	{
100		r0 = r2
101		jumpr r31
102	}
103
104	/* COPY FROM USER: only loads can fail */
105
106	.section __ex_table,"a"
107	.long 1000b,1009b
108	.long 2000b,2009b
109	.long 4000b,4009b
110	.long 8080b,8089b
111	.long 4080b,4089b
112	.long 2080b,2089b
113	.long 1080b,1089b
114	.previous
115