xref: /linux/arch/riscv/kernel/ptrace.c (revision ca220141fa8ebae09765a242076b2b77338106b0)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright 2010 Tilera Corporation. All Rights Reserved.
4  * Copyright 2015 Regents of the University of California
5  * Copyright 2017 SiFive
6  *
7  * Copied from arch/tile/kernel/ptrace.c
8  */
9 
10 #include <asm/vector.h>
11 #include <asm/ptrace.h>
12 #include <asm/syscall.h>
13 #include <asm/thread_info.h>
14 #include <asm/switch_to.h>
15 #include <linux/audit.h>
16 #include <linux/compat.h>
17 #include <linux/ptrace.h>
18 #include <linux/elf.h>
19 #include <linux/regset.h>
20 #include <linux/sched.h>
21 #include <linux/sched/task_stack.h>
22 #include <asm/usercfi.h>
23 
24 enum riscv_regset {
25 	REGSET_X,
26 #ifdef CONFIG_FPU
27 	REGSET_F,
28 #endif
29 #ifdef CONFIG_RISCV_ISA_V
30 	REGSET_V,
31 #endif
32 #ifdef CONFIG_RISCV_ISA_SUPM
33 	REGSET_TAGGED_ADDR_CTRL,
34 #endif
35 #ifdef CONFIG_RISCV_USER_CFI
36 	REGSET_CFI,
37 #endif
38 };
39 
40 static int riscv_gpr_get(struct task_struct *target,
41 			 const struct user_regset *regset,
42 			 struct membuf to)
43 {
44 	return membuf_write(&to, task_pt_regs(target),
45 			    sizeof(struct user_regs_struct));
46 }
47 
48 static int riscv_gpr_set(struct task_struct *target,
49 			 const struct user_regset *regset,
50 			 unsigned int pos, unsigned int count,
51 			 const void *kbuf, const void __user *ubuf)
52 {
53 	struct pt_regs *regs;
54 
55 	regs = task_pt_regs(target);
56 	return user_regset_copyin(&pos, &count, &kbuf, &ubuf, regs, 0, -1);
57 }
58 
59 #ifdef CONFIG_FPU
60 static int riscv_fpr_get(struct task_struct *target,
61 			 const struct user_regset *regset,
62 			 struct membuf to)
63 {
64 	struct __riscv_d_ext_state *fstate = &target->thread.fstate;
65 
66 	if (target == current)
67 		fstate_save(current, task_pt_regs(current));
68 
69 	membuf_write(&to, fstate, offsetof(struct __riscv_d_ext_state, fcsr));
70 	membuf_store(&to, fstate->fcsr);
71 	return membuf_zero(&to, 4);	// explicitly pad
72 }
73 
74 static int riscv_fpr_set(struct task_struct *target,
75 			 const struct user_regset *regset,
76 			 unsigned int pos, unsigned int count,
77 			 const void *kbuf, const void __user *ubuf)
78 {
79 	int ret;
80 	struct __riscv_d_ext_state *fstate = &target->thread.fstate;
81 
82 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, fstate, 0,
83 				 offsetof(struct __riscv_d_ext_state, fcsr));
84 	if (!ret) {
85 		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, fstate, 0,
86 					 offsetof(struct __riscv_d_ext_state, fcsr) +
87 					 sizeof(fstate->fcsr));
88 	}
89 
90 	return ret;
91 }
92 #endif
93 
94 #ifdef CONFIG_RISCV_ISA_V
95 static int riscv_vr_get(struct task_struct *target,
96 			const struct user_regset *regset,
97 			struct membuf to)
98 {
99 	struct __riscv_v_ext_state *vstate = &target->thread.vstate;
100 	struct __riscv_v_regset_state ptrace_vstate;
101 
102 	if (!(has_vector() || has_xtheadvector()))
103 		return -EINVAL;
104 
105 	if (!riscv_v_vstate_query(task_pt_regs(target)))
106 		return -ENODATA;
107 
108 	/*
109 	 * Ensure the vector registers have been saved to the memory before
110 	 * copying them to membuf.
111 	 */
112 	if (target == current) {
113 		get_cpu_vector_context();
114 		riscv_v_vstate_save(&current->thread.vstate, task_pt_regs(current));
115 		put_cpu_vector_context();
116 	}
117 
118 	ptrace_vstate.vstart = vstate->vstart;
119 	ptrace_vstate.vl = vstate->vl;
120 	ptrace_vstate.vtype = vstate->vtype;
121 	ptrace_vstate.vcsr = vstate->vcsr;
122 	ptrace_vstate.vlenb = vstate->vlenb;
123 
124 	/* Copy vector header from vstate. */
125 	membuf_write(&to, &ptrace_vstate, sizeof(struct __riscv_v_regset_state));
126 
127 	/* Copy all the vector registers from vstate. */
128 	return membuf_write(&to, vstate->datap, riscv_v_vsize);
129 }
130 
131 static int invalid_ptrace_v_csr(struct __riscv_v_ext_state *vstate,
132 				struct __riscv_v_regset_state *ptrace)
133 {
134 	unsigned long vsew, vlmul, vfrac, vl;
135 	unsigned long elen, vlen;
136 	unsigned long sew, lmul;
137 	unsigned long reserved;
138 
139 	vlen = vstate->vlenb * 8;
140 	if (vstate->vlenb != ptrace->vlenb)
141 		return 1;
142 
143 	/* do not allow to set vcsr/vxrm/vxsat reserved bits */
144 	reserved = ~(CSR_VXSAT_MASK | (CSR_VXRM_MASK << CSR_VXRM_SHIFT));
145 	if (ptrace->vcsr & reserved)
146 		return 1;
147 
148 	if (has_vector()) {
149 		/* do not allow to set vtype reserved bits and vill bit */
150 		reserved = ~(VTYPE_VSEW | VTYPE_VLMUL | VTYPE_VMA | VTYPE_VTA);
151 		if (ptrace->vtype & reserved)
152 			return 1;
153 
154 		elen = riscv_has_extension_unlikely(RISCV_ISA_EXT_ZVE64X) ? 64 : 32;
155 		vsew = (ptrace->vtype & VTYPE_VSEW) >> VTYPE_VSEW_SHIFT;
156 		sew = 8 << vsew;
157 
158 		if (sew > elen)
159 			return 1;
160 
161 		vfrac = (ptrace->vtype & VTYPE_VLMUL_FRAC);
162 		vlmul = (ptrace->vtype & VTYPE_VLMUL);
163 
164 		/* RVV 1.0 spec 3.4.2: VLMUL(0x4) reserved */
165 		if (vlmul == 4)
166 			return 1;
167 
168 		/* RVV 1.0 spec 3.4.2: (LMUL < SEW_min / ELEN) reserved */
169 		if (vlmul == 5 && elen == 32)
170 			return 1;
171 
172 		/* for zero vl verify that at least one element is possible */
173 		vl = ptrace->vl ? ptrace->vl : 1;
174 
175 		if (vfrac) {
176 			/* integer 1/LMUL: VL =< VLMAX = VLEN / SEW / LMUL */
177 			lmul = 2 << (3 - (vlmul - vfrac));
178 			if (vlen < vl * sew * lmul)
179 				return 1;
180 		} else {
181 			/* integer LMUL: VL =< VLMAX = LMUL * VLEN / SEW */
182 			lmul = 1 << vlmul;
183 			if (vl * sew > lmul * vlen)
184 				return 1;
185 		}
186 	}
187 
188 	if (has_xtheadvector()) {
189 		/* do not allow to set vtype reserved bits and vill bit */
190 		reserved = ~(VTYPE_VSEW_THEAD | VTYPE_VLMUL_THEAD | VTYPE_VEDIV_THEAD);
191 		if (ptrace->vtype & reserved)
192 			return 1;
193 
194 		/*
195 		 * THead ISA Extension spec chapter 16:
196 		 * divided element extension ('Zvediv') is not part of XTheadVector
197 		 */
198 		if (ptrace->vtype & VTYPE_VEDIV_THEAD)
199 			return 1;
200 
201 		vsew = (ptrace->vtype & VTYPE_VSEW_THEAD) >> VTYPE_VSEW_THEAD_SHIFT;
202 		sew = 8 << vsew;
203 
204 		vlmul = (ptrace->vtype & VTYPE_VLMUL_THEAD);
205 		lmul = 1 << vlmul;
206 
207 		/* for zero vl verify that at least one element is possible */
208 		vl = ptrace->vl ? ptrace->vl : 1;
209 
210 		if (vl * sew > lmul * vlen)
211 			return 1;
212 	}
213 
214 	return 0;
215 }
216 
217 static int riscv_vr_set(struct task_struct *target,
218 			const struct user_regset *regset,
219 			unsigned int pos, unsigned int count,
220 			const void *kbuf, const void __user *ubuf)
221 {
222 	int ret;
223 	struct __riscv_v_ext_state *vstate = &target->thread.vstate;
224 	struct __riscv_v_regset_state ptrace_vstate;
225 
226 	if (!(has_vector() || has_xtheadvector()))
227 		return -EINVAL;
228 
229 	if (!riscv_v_vstate_query(task_pt_regs(target)))
230 		return -ENODATA;
231 
232 	/* Copy rest of the vstate except datap */
233 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ptrace_vstate, 0,
234 				 sizeof(struct __riscv_v_regset_state));
235 	if (unlikely(ret))
236 		return ret;
237 
238 	if (invalid_ptrace_v_csr(vstate, &ptrace_vstate))
239 		return -EINVAL;
240 
241 	vstate->vstart = ptrace_vstate.vstart;
242 	vstate->vl = ptrace_vstate.vl;
243 	vstate->vtype = ptrace_vstate.vtype;
244 	vstate->vcsr = ptrace_vstate.vcsr;
245 
246 	/* Copy all the vector registers. */
247 	pos = 0;
248 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vstate->datap,
249 				 0, riscv_v_vsize);
250 	return ret;
251 }
252 
253 static int riscv_vr_active(struct task_struct *target, const struct user_regset *regset)
254 {
255 	if (!(has_vector() || has_xtheadvector()))
256 		return -ENODEV;
257 
258 	if (!riscv_v_vstate_query(task_pt_regs(target)))
259 		return 0;
260 
261 	return regset->n;
262 }
263 #endif
264 
265 #ifdef CONFIG_RISCV_ISA_SUPM
266 static int tagged_addr_ctrl_get(struct task_struct *target,
267 				const struct user_regset *regset,
268 				struct membuf to)
269 {
270 	long ctrl = get_tagged_addr_ctrl(target);
271 
272 	if (IS_ERR_VALUE(ctrl))
273 		return ctrl;
274 
275 	return membuf_write(&to, &ctrl, sizeof(ctrl));
276 }
277 
278 static int tagged_addr_ctrl_set(struct task_struct *target,
279 				const struct user_regset *regset,
280 				unsigned int pos, unsigned int count,
281 				const void *kbuf, const void __user *ubuf)
282 {
283 	int ret;
284 	long ctrl;
285 
286 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, 0, -1);
287 	if (ret)
288 		return ret;
289 
290 	return set_tagged_addr_ctrl(target, ctrl);
291 }
292 #endif
293 
294 #ifdef CONFIG_RISCV_USER_CFI
295 static int riscv_cfi_get(struct task_struct *target,
296 			 const struct user_regset *regset,
297 			 struct membuf to)
298 {
299 	struct user_cfi_state user_cfi;
300 	struct pt_regs *regs;
301 
302 	memset(&user_cfi, 0, sizeof(user_cfi));
303 	regs = task_pt_regs(target);
304 
305 	if (is_indir_lp_enabled(target)) {
306 		user_cfi.cfi_status.cfi_state |= PTRACE_CFI_LP_EN_STATE;
307 		user_cfi.cfi_status.cfi_state |= is_indir_lp_locked(target) ?
308 						 PTRACE_CFI_LP_LOCK_STATE : 0;
309 		user_cfi.cfi_status.cfi_state |= (regs->status & SR_ELP) ?
310 						PTRACE_CFI_ELP_STATE : 0;
311 	}
312 
313 	if (is_shstk_enabled(target)) {
314 		user_cfi.cfi_status.cfi_state |= (PTRACE_CFI_SS_EN_STATE |
315 						  PTRACE_CFI_SS_PTR_STATE);
316 		user_cfi.cfi_status.cfi_state |= is_shstk_locked(target) ?
317 						 PTRACE_CFI_SS_LOCK_STATE : 0;
318 		user_cfi.shstk_ptr = get_active_shstk(target);
319 	}
320 
321 	return membuf_write(&to, &user_cfi, sizeof(user_cfi));
322 }
323 
324 /*
325  * Does it make sense to allow enable / disable of cfi via ptrace?
326  * We don't allow enable / disable / locking control via ptrace for now.
327  * Setting the shadow stack pointer is allowed. GDB might use it to unwind or
328  * some other fixup. Similarly gdb might want to suppress elp and may want
329  * to reset elp state.
330  */
331 static int riscv_cfi_set(struct task_struct *target,
332 			 const struct user_regset *regset,
333 			 unsigned int pos, unsigned int count,
334 			 const void *kbuf, const void __user *ubuf)
335 {
336 	int ret;
337 	struct user_cfi_state user_cfi;
338 	struct pt_regs *regs;
339 
340 	regs = task_pt_regs(target);
341 
342 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &user_cfi, 0, -1);
343 	if (ret)
344 		return ret;
345 
346 	/*
347 	 * Not allowing enabling or locking shadow stack or landing pad
348 	 * There is no disabling of shadow stack or landing pad via ptrace
349 	 * rsvd field should be set to zero so that if those fields are needed in future
350 	 */
351 	if ((user_cfi.cfi_status.cfi_state &
352 	     (PTRACE_CFI_LP_EN_STATE | PTRACE_CFI_LP_LOCK_STATE |
353 	      PTRACE_CFI_SS_EN_STATE | PTRACE_CFI_SS_LOCK_STATE)) ||
354 	     (user_cfi.cfi_status.cfi_state & PRACE_CFI_STATE_INVALID_MASK))
355 		return -EINVAL;
356 
357 	/* If lpad is enabled on target and ptrace requests to set / clear elp, do that */
358 	if (is_indir_lp_enabled(target)) {
359 		if (user_cfi.cfi_status.cfi_state &
360 		    PTRACE_CFI_ELP_STATE) /* set elp state */
361 			regs->status |= SR_ELP;
362 		else
363 			regs->status &= ~SR_ELP; /* clear elp state */
364 	}
365 
366 	/* If shadow stack enabled on target, set new shadow stack pointer */
367 	if (is_shstk_enabled(target) &&
368 	    (user_cfi.cfi_status.cfi_state & PTRACE_CFI_SS_PTR_STATE))
369 		set_active_shstk(target, user_cfi.shstk_ptr);
370 
371 	return 0;
372 }
373 #endif
374 
375 static struct user_regset riscv_user_regset[] __ro_after_init = {
376 	[REGSET_X] = {
377 		USER_REGSET_NOTE_TYPE(PRSTATUS),
378 		.n = ELF_NGREG,
379 		.size = sizeof(elf_greg_t),
380 		.align = sizeof(elf_greg_t),
381 		.regset_get = riscv_gpr_get,
382 		.set = riscv_gpr_set,
383 	},
384 #ifdef CONFIG_FPU
385 	[REGSET_F] = {
386 		USER_REGSET_NOTE_TYPE(PRFPREG),
387 		.n = ELF_NFPREG,
388 		.size = sizeof(elf_fpreg_t),
389 		.align = sizeof(elf_fpreg_t),
390 		.regset_get = riscv_fpr_get,
391 		.set = riscv_fpr_set,
392 	},
393 #endif
394 #ifdef CONFIG_RISCV_ISA_V
395 	[REGSET_V] = {
396 		USER_REGSET_NOTE_TYPE(RISCV_VECTOR),
397 		.align = 16,
398 		.size = sizeof(__u32),
399 		.regset_get = riscv_vr_get,
400 		.set = riscv_vr_set,
401 		.active = riscv_vr_active,
402 	},
403 #endif
404 #ifdef CONFIG_RISCV_ISA_SUPM
405 	[REGSET_TAGGED_ADDR_CTRL] = {
406 		USER_REGSET_NOTE_TYPE(RISCV_TAGGED_ADDR_CTRL),
407 		.n = 1,
408 		.size = sizeof(long),
409 		.align = sizeof(long),
410 		.regset_get = tagged_addr_ctrl_get,
411 		.set = tagged_addr_ctrl_set,
412 	},
413 #endif
414 #ifdef CONFIG_RISCV_USER_CFI
415 	[REGSET_CFI] = {
416 		.core_note_type = NT_RISCV_USER_CFI,
417 		.align = sizeof(__u64),
418 		.n = sizeof(struct user_cfi_state) / sizeof(__u64),
419 		.size = sizeof(__u64),
420 		.regset_get = riscv_cfi_get,
421 		.set = riscv_cfi_set,
422 	},
423 #endif
424 };
425 
426 static const struct user_regset_view riscv_user_native_view = {
427 	.name = "riscv",
428 	.e_machine = EM_RISCV,
429 	.regsets = riscv_user_regset,
430 	.n = ARRAY_SIZE(riscv_user_regset),
431 };
432 
433 #ifdef CONFIG_RISCV_ISA_V
434 void __init update_regset_vector_info(unsigned long size)
435 {
436 	riscv_user_regset[REGSET_V].n = (size + sizeof(struct __riscv_v_regset_state)) /
437 					sizeof(__u32);
438 }
439 #endif
440 
441 struct pt_regs_offset {
442 	const char *name;
443 	int offset;
444 };
445 
446 #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
447 #define REG_OFFSET_END {.name = NULL, .offset = 0}
448 
449 static const struct pt_regs_offset regoffset_table[] = {
450 	REG_OFFSET_NAME(epc),
451 	REG_OFFSET_NAME(ra),
452 	REG_OFFSET_NAME(sp),
453 	REG_OFFSET_NAME(gp),
454 	REG_OFFSET_NAME(tp),
455 	REG_OFFSET_NAME(t0),
456 	REG_OFFSET_NAME(t1),
457 	REG_OFFSET_NAME(t2),
458 	REG_OFFSET_NAME(s0),
459 	REG_OFFSET_NAME(s1),
460 	REG_OFFSET_NAME(a0),
461 	REG_OFFSET_NAME(a1),
462 	REG_OFFSET_NAME(a2),
463 	REG_OFFSET_NAME(a3),
464 	REG_OFFSET_NAME(a4),
465 	REG_OFFSET_NAME(a5),
466 	REG_OFFSET_NAME(a6),
467 	REG_OFFSET_NAME(a7),
468 	REG_OFFSET_NAME(s2),
469 	REG_OFFSET_NAME(s3),
470 	REG_OFFSET_NAME(s4),
471 	REG_OFFSET_NAME(s5),
472 	REG_OFFSET_NAME(s6),
473 	REG_OFFSET_NAME(s7),
474 	REG_OFFSET_NAME(s8),
475 	REG_OFFSET_NAME(s9),
476 	REG_OFFSET_NAME(s10),
477 	REG_OFFSET_NAME(s11),
478 	REG_OFFSET_NAME(t3),
479 	REG_OFFSET_NAME(t4),
480 	REG_OFFSET_NAME(t5),
481 	REG_OFFSET_NAME(t6),
482 	REG_OFFSET_NAME(status),
483 	REG_OFFSET_NAME(badaddr),
484 	REG_OFFSET_NAME(cause),
485 	REG_OFFSET_NAME(orig_a0),
486 	REG_OFFSET_END,
487 };
488 
489 /**
490  * regs_query_register_offset() - query register offset from its name
491  * @name:	the name of a register
492  *
493  * regs_query_register_offset() returns the offset of a register in struct
494  * pt_regs from its name. If the name is invalid, this returns -EINVAL;
495  */
496 int regs_query_register_offset(const char *name)
497 {
498 	const struct pt_regs_offset *roff;
499 
500 	for (roff = regoffset_table; roff->name != NULL; roff++)
501 		if (!strcmp(roff->name, name))
502 			return roff->offset;
503 	return -EINVAL;
504 }
505 
506 /**
507  * regs_within_kernel_stack() - check the address in the stack
508  * @regs:      pt_regs which contains kernel stack pointer.
509  * @addr:      address which is checked.
510  *
511  * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
512  * If @addr is within the kernel stack, it returns true. If not, returns false.
513  */
514 static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
515 {
516 	return (addr & ~(THREAD_SIZE - 1))  ==
517 		(kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1));
518 }
519 
520 /**
521  * regs_get_kernel_stack_nth() - get Nth entry of the stack
522  * @regs:	pt_regs which contains kernel stack pointer.
523  * @n:		stack entry number.
524  *
525  * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
526  * is specified by @regs. If the @n th entry is NOT in the kernel stack,
527  * this returns 0.
528  */
529 unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
530 {
531 	unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
532 
533 	addr += n;
534 	if (regs_within_kernel_stack(regs, (unsigned long)addr))
535 		return *addr;
536 	else
537 		return 0;
538 }
539 
540 void ptrace_disable(struct task_struct *child)
541 {
542 }
543 
544 long arch_ptrace(struct task_struct *child, long request,
545 		 unsigned long addr, unsigned long data)
546 {
547 	long ret = -EIO;
548 
549 	switch (request) {
550 	default:
551 		ret = ptrace_request(child, request, addr, data);
552 		break;
553 	}
554 
555 	return ret;
556 }
557 
558 #ifdef CONFIG_COMPAT
559 static int compat_riscv_gpr_get(struct task_struct *target,
560 				const struct user_regset *regset,
561 				struct membuf to)
562 {
563 	struct compat_user_regs_struct cregs;
564 
565 	regs_to_cregs(&cregs, task_pt_regs(target));
566 
567 	return membuf_write(&to, &cregs,
568 			    sizeof(struct compat_user_regs_struct));
569 }
570 
571 static int compat_riscv_gpr_set(struct task_struct *target,
572 				const struct user_regset *regset,
573 				unsigned int pos, unsigned int count,
574 				const void *kbuf, const void __user *ubuf)
575 {
576 	int ret;
577 	struct compat_user_regs_struct cregs;
578 
579 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &cregs, 0, -1);
580 
581 	cregs_to_regs(&cregs, task_pt_regs(target));
582 
583 	return ret;
584 }
585 
586 static const struct user_regset compat_riscv_user_regset[] = {
587 	[REGSET_X] = {
588 		USER_REGSET_NOTE_TYPE(PRSTATUS),
589 		.n = ELF_NGREG,
590 		.size = sizeof(compat_elf_greg_t),
591 		.align = sizeof(compat_elf_greg_t),
592 		.regset_get = compat_riscv_gpr_get,
593 		.set = compat_riscv_gpr_set,
594 	},
595 #ifdef CONFIG_FPU
596 	[REGSET_F] = {
597 		USER_REGSET_NOTE_TYPE(PRFPREG),
598 		.n = ELF_NFPREG,
599 		.size = sizeof(elf_fpreg_t),
600 		.align = sizeof(elf_fpreg_t),
601 		.regset_get = riscv_fpr_get,
602 		.set = riscv_fpr_set,
603 	},
604 #endif
605 };
606 
607 static const struct user_regset_view compat_riscv_user_native_view = {
608 	.name = "riscv",
609 	.e_machine = EM_RISCV,
610 	.regsets = compat_riscv_user_regset,
611 	.n = ARRAY_SIZE(compat_riscv_user_regset),
612 };
613 
614 long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
615 			compat_ulong_t caddr, compat_ulong_t cdata)
616 {
617 	long ret = -EIO;
618 
619 	switch (request) {
620 	default:
621 		ret = compat_ptrace_request(child, request, caddr, cdata);
622 		break;
623 	}
624 
625 	return ret;
626 }
627 #else
628 static const struct user_regset_view compat_riscv_user_native_view = {};
629 #endif /* CONFIG_COMPAT */
630 
631 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
632 {
633 	if (is_compat_thread(&task->thread_info))
634 		return &compat_riscv_user_native_view;
635 	else
636 		return &riscv_user_native_view;
637 }
638