xref: /linux/arch/arm64/kernel/mte.c (revision 6fb44438a5e1897a72dd11139274735256be8069)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020 ARM Ltd.
4  */
5 
6 #include <linux/bitops.h>
7 #include <linux/cpu.h>
8 #include <linux/kernel.h>
9 #include <linux/mm.h>
10 #include <linux/prctl.h>
11 #include <linux/sched.h>
12 #include <linux/sched/mm.h>
13 #include <linux/string.h>
14 #include <linux/swap.h>
15 #include <linux/swapops.h>
16 #include <linux/thread_info.h>
17 #include <linux/types.h>
18 #include <linux/uaccess.h>
19 #include <linux/uio.h>
20 
21 #include <asm/barrier.h>
22 #include <asm/cpufeature.h>
23 #include <asm/mte.h>
24 #include <asm/ptrace.h>
25 #include <asm/sysreg.h>
26 
27 static DEFINE_PER_CPU_READ_MOSTLY(u64, mte_tcf_preferred);
28 
29 #ifdef CONFIG_KASAN_HW_TAGS
30 /*
31  * The asynchronous and asymmetric MTE modes have the same behavior for
32  * store operations. This flag is set when either of these modes is enabled.
33  */
34 DEFINE_STATIC_KEY_FALSE(mte_async_or_asymm_mode);
35 EXPORT_SYMBOL_GPL(mte_async_or_asymm_mode);
36 #endif
37 
mte_sync_tags(pte_t pte,unsigned int nr_pages)38 void mte_sync_tags(pte_t pte, unsigned int nr_pages)
39 {
40 	struct page *page = pte_page(pte);
41 	struct folio *folio = page_folio(page);
42 	unsigned long i;
43 
44 	if (folio_test_hugetlb(folio)) {
45 		unsigned long nr = folio_nr_pages(folio);
46 
47 		/* Hugetlb MTE flags are set for head page only */
48 		if (folio_try_hugetlb_mte_tagging(folio)) {
49 			for (i = 0; i < nr; i++, page++)
50 				mte_clear_page_tags(page_address(page));
51 			folio_set_hugetlb_mte_tagged(folio);
52 		}
53 
54 		/* ensure the tags are visible before the PTE is set */
55 		smp_wmb();
56 
57 		return;
58 	}
59 
60 	/* if PG_mte_tagged is set, tags have already been initialised */
61 	for (i = 0; i < nr_pages; i++, page++) {
62 		if (try_page_mte_tagging(page)) {
63 			mte_clear_page_tags(page_address(page));
64 			set_page_mte_tagged(page);
65 		}
66 	}
67 
68 	/* ensure the tags are visible before the PTE is set */
69 	smp_wmb();
70 }
71 
memcmp_pages(struct page * page1,struct page * page2)72 int memcmp_pages(struct page *page1, struct page *page2)
73 {
74 	char *addr1, *addr2;
75 	int ret;
76 
77 	addr1 = page_address(page1);
78 	addr2 = page_address(page2);
79 	ret = memcmp(addr1, addr2, PAGE_SIZE);
80 
81 	if (!system_supports_mte() || ret)
82 		return ret;
83 
84 	/*
85 	 * If the page content is identical but at least one of the pages is
86 	 * tagged, return non-zero to avoid KSM merging. If only one of the
87 	 * pages is tagged, __set_ptes() may zero or change the tags of the
88 	 * other page via mte_sync_tags().
89 	 */
90 	if (page_mte_tagged(page1) || page_mte_tagged(page2))
91 		return addr1 != addr2;
92 
93 	return ret;
94 }
95 
__mte_enable_kernel(const char * mode,unsigned long tcf)96 static inline void __mte_enable_kernel(const char *mode, unsigned long tcf)
97 {
98 	/* Enable MTE Sync Mode for EL1. */
99 	sysreg_clear_set(sctlr_el1, SCTLR_EL1_TCF_MASK,
100 			 SYS_FIELD_PREP(SCTLR_EL1, TCF, tcf));
101 	isb();
102 
103 	pr_info_once("MTE: enabled in %s mode at EL1\n", mode);
104 }
105 
106 #ifdef CONFIG_KASAN_HW_TAGS
mte_enable_kernel_sync(void)107 void mte_enable_kernel_sync(void)
108 {
109 	/*
110 	 * Make sure we enter this function when no PE has set
111 	 * async mode previously.
112 	 */
113 	WARN_ONCE(system_uses_mte_async_or_asymm_mode(),
114 			"MTE async mode enabled system wide!");
115 
116 	__mte_enable_kernel("synchronous", SCTLR_EL1_TCF_SYNC);
117 }
118 
mte_enable_kernel_async(void)119 void mte_enable_kernel_async(void)
120 {
121 	__mte_enable_kernel("asynchronous", SCTLR_EL1_TCF_ASYNC);
122 
123 	/*
124 	 * MTE async mode is set system wide by the first PE that
125 	 * executes this function.
126 	 *
127 	 * Note: If in future KASAN acquires a runtime switching
128 	 * mode in between sync and async, this strategy needs
129 	 * to be reviewed.
130 	 */
131 	if (!system_uses_mte_async_or_asymm_mode())
132 		static_branch_enable(&mte_async_or_asymm_mode);
133 }
134 
mte_enable_kernel_asymm(void)135 void mte_enable_kernel_asymm(void)
136 {
137 	if (cpus_have_cap(ARM64_MTE_ASYMM)) {
138 		__mte_enable_kernel("asymmetric", SCTLR_EL1_TCF_ASYMM);
139 
140 		/*
141 		 * MTE asymm mode behaves as async mode for store
142 		 * operations. The mode is set system wide by the
143 		 * first PE that executes this function.
144 		 *
145 		 * Note: If in future KASAN acquires a runtime switching
146 		 * mode in between sync and async, this strategy needs
147 		 * to be reviewed.
148 		 */
149 		if (!system_uses_mte_async_or_asymm_mode())
150 			static_branch_enable(&mte_async_or_asymm_mode);
151 	} else {
152 		/*
153 		 * If the CPU does not support MTE asymmetric mode the
154 		 * kernel falls back on synchronous mode which is the
155 		 * default for kasan=on.
156 		 */
157 		mte_enable_kernel_sync();
158 	}
159 }
160 #endif
161 
162 #ifdef CONFIG_KASAN_HW_TAGS
mte_check_tfsr_el1(void)163 void mte_check_tfsr_el1(void)
164 {
165 	u64 tfsr_el1 = read_sysreg_s(SYS_TFSR_EL1);
166 
167 	if (unlikely(tfsr_el1 & SYS_TFSR_EL1_TF1)) {
168 		/*
169 		 * Note: isb() is not required after this direct write
170 		 * because there is no indirect read subsequent to it
171 		 * (per ARM DDI 0487F.c table D13-1).
172 		 */
173 		write_sysreg_s(0, SYS_TFSR_EL1);
174 
175 		kasan_report_async();
176 	}
177 }
178 #endif
179 
180 /*
181  * This is where we actually resolve the system and process MTE mode
182  * configuration into an actual value in SCTLR_EL1 that affects
183  * userspace.
184  */
mte_update_sctlr_user(struct task_struct * task)185 static void mte_update_sctlr_user(struct task_struct *task)
186 {
187 	/*
188 	 * This must be called with preemption disabled and can only be called
189 	 * on the current or next task since the CPU must match where the thread
190 	 * is going to run. The caller is responsible for calling
191 	 * update_sctlr_el1() later in the same preemption disabled block.
192 	 */
193 	unsigned long sctlr = task->thread.sctlr_user;
194 	unsigned long mte_ctrl = task->thread.mte_ctrl;
195 	unsigned long pref, resolved_mte_tcf;
196 
197 	pref = __this_cpu_read(mte_tcf_preferred);
198 	/*
199 	 * If there is no overlap between the system preferred and
200 	 * program requested values go with what was requested.
201 	 */
202 	resolved_mte_tcf = (mte_ctrl & pref) ? pref : mte_ctrl;
203 	sctlr &= ~(SCTLR_EL1_TCF0_MASK | SCTLR_EL1_TCSO0_MASK);
204 	/*
205 	 * Pick an actual setting. The order in which we check for
206 	 * set bits and map into register values determines our
207 	 * default order.
208 	 */
209 	if (resolved_mte_tcf & MTE_CTRL_TCF_ASYMM)
210 		sctlr |= SYS_FIELD_PREP_ENUM(SCTLR_EL1, TCF0, ASYMM);
211 	else if (resolved_mte_tcf & MTE_CTRL_TCF_ASYNC)
212 		sctlr |= SYS_FIELD_PREP_ENUM(SCTLR_EL1, TCF0, ASYNC);
213 	else if (resolved_mte_tcf & MTE_CTRL_TCF_SYNC)
214 		sctlr |= SYS_FIELD_PREP_ENUM(SCTLR_EL1, TCF0, SYNC);
215 
216 	if (mte_ctrl & MTE_CTRL_STORE_ONLY)
217 		sctlr |= SYS_FIELD_PREP(SCTLR_EL1, TCSO0, 1);
218 
219 	task->thread.sctlr_user = sctlr;
220 }
221 
mte_update_gcr_excl(struct task_struct * task)222 static void mte_update_gcr_excl(struct task_struct *task)
223 {
224 	/*
225 	 * SYS_GCR_EL1 will be set to current->thread.mte_ctrl value by
226 	 * mte_set_user_gcr() in kernel_exit, but only if KASAN is enabled.
227 	 */
228 	if (kasan_hw_tags_enabled())
229 		return;
230 
231 	write_sysreg_s(
232 		((task->thread.mte_ctrl >> MTE_CTRL_GCR_USER_EXCL_SHIFT) &
233 		 SYS_GCR_EL1_EXCL_MASK) | SYS_GCR_EL1_RRND,
234 		SYS_GCR_EL1);
235 }
236 
237 #ifdef CONFIG_KASAN_HW_TAGS
238 /* Only called from assembly, silence sparse */
239 void __init kasan_hw_tags_enable(struct alt_instr *alt, __le32 *origptr,
240 				 __le32 *updptr, int nr_inst);
241 
kasan_hw_tags_enable(struct alt_instr * alt,__le32 * origptr,__le32 * updptr,int nr_inst)242 void __init kasan_hw_tags_enable(struct alt_instr *alt, __le32 *origptr,
243 				 __le32 *updptr, int nr_inst)
244 {
245 	BUG_ON(nr_inst != 1); /* Branch -> NOP */
246 
247 	if (kasan_hw_tags_enabled())
248 		*updptr = cpu_to_le32(aarch64_insn_gen_nop());
249 }
250 #endif
251 
mte_thread_init_user(void)252 void mte_thread_init_user(void)
253 {
254 	if (!system_supports_mte())
255 		return;
256 
257 	/* clear any pending asynchronous tag fault */
258 	dsb(ish);
259 	write_sysreg_s(0, SYS_TFSRE0_EL1);
260 	clear_thread_flag(TIF_MTE_ASYNC_FAULT);
261 	/* disable tag checking and reset tag generation mask */
262 	set_mte_ctrl(current, 0);
263 }
264 
mte_thread_switch(struct task_struct * next)265 void mte_thread_switch(struct task_struct *next)
266 {
267 	if (!system_supports_mte())
268 		return;
269 
270 	mte_update_sctlr_user(next);
271 	mte_update_gcr_excl(next);
272 
273 	/* TCO may not have been disabled on exception entry for the current task. */
274 	mte_disable_tco_entry(next);
275 
276 	/*
277 	 * Check if an async tag exception occurred at EL1.
278 	 *
279 	 * Note: On the context switch path we rely on the dsb() present
280 	 * in __switch_to() to guarantee that the indirect writes to TFSR_EL1
281 	 * are synchronized before this point.
282 	 */
283 	isb();
284 	mte_check_tfsr_el1();
285 }
286 
mte_cpu_setup(void)287 void mte_cpu_setup(void)
288 {
289 	u64 rgsr;
290 
291 	/*
292 	 * CnP must be enabled only after the MAIR_EL1 register has been set
293 	 * up. Inconsistent MAIR_EL1 between CPUs sharing the same TLB may
294 	 * lead to the wrong memory type being used for a brief window during
295 	 * CPU power-up.
296 	 *
297 	 * CnP is not a boot feature so MTE gets enabled before CnP, but let's
298 	 * make sure that is the case.
299 	 */
300 	BUG_ON(read_sysreg(ttbr0_el1) & TTBR_CNP_BIT);
301 	BUG_ON(read_sysreg(ttbr1_el1) & TTBR_CNP_BIT);
302 
303 	/* Normal Tagged memory type at the corresponding MAIR index */
304 	sysreg_clear_set(mair_el1,
305 			 MAIR_ATTRIDX(MAIR_ATTR_MASK, MT_NORMAL_TAGGED),
306 			 MAIR_ATTRIDX(MAIR_ATTR_NORMAL_TAGGED,
307 				      MT_NORMAL_TAGGED));
308 
309 	write_sysreg_s(KERNEL_GCR_EL1, SYS_GCR_EL1);
310 
311 	/*
312 	 * If GCR_EL1.RRND=1 is implemented the same way as RRND=0, then
313 	 * RGSR_EL1.SEED must be non-zero for IRG to produce
314 	 * pseudorandom numbers. As RGSR_EL1 is UNKNOWN out of reset, we
315 	 * must initialize it.
316 	 */
317 	rgsr = (read_sysreg(CNTVCT_EL0) & SYS_RGSR_EL1_SEED_MASK) <<
318 	       SYS_RGSR_EL1_SEED_SHIFT;
319 	if (rgsr == 0)
320 		rgsr = 1 << SYS_RGSR_EL1_SEED_SHIFT;
321 	write_sysreg_s(rgsr, SYS_RGSR_EL1);
322 
323 	/* clear any pending tag check faults in TFSR*_EL1 */
324 	write_sysreg_s(0, SYS_TFSR_EL1);
325 	write_sysreg_s(0, SYS_TFSRE0_EL1);
326 
327 	local_flush_tlb_all();
328 }
329 
mte_suspend_enter(void)330 void mte_suspend_enter(void)
331 {
332 	if (!system_supports_mte())
333 		return;
334 
335 	/*
336 	 * The barriers are required to guarantee that the indirect writes
337 	 * to TFSR_EL1 are synchronized before we report the state.
338 	 */
339 	dsb(nsh);
340 	isb();
341 
342 	/* Report SYS_TFSR_EL1 before suspend entry */
343 	mte_check_tfsr_el1();
344 }
345 
mte_suspend_exit(void)346 void mte_suspend_exit(void)
347 {
348 	if (!system_supports_mte())
349 		return;
350 
351 	mte_cpu_setup();
352 }
353 
set_mte_ctrl(struct task_struct * task,unsigned long arg)354 long set_mte_ctrl(struct task_struct *task, unsigned long arg)
355 {
356 	u64 mte_ctrl = (~((arg & PR_MTE_TAG_MASK) >> PR_MTE_TAG_SHIFT) &
357 			SYS_GCR_EL1_EXCL_MASK) << MTE_CTRL_GCR_USER_EXCL_SHIFT;
358 
359 	if (!system_supports_mte())
360 		return 0;
361 
362 	if (arg & PR_MTE_TCF_ASYNC)
363 		mte_ctrl |= MTE_CTRL_TCF_ASYNC;
364 	if (arg & PR_MTE_TCF_SYNC)
365 		mte_ctrl |= MTE_CTRL_TCF_SYNC;
366 
367 	/*
368 	 * If the system supports it and both sync and async modes are
369 	 * specified then implicitly enable asymmetric mode.
370 	 * Userspace could see a mix of both sync and async anyway due
371 	 * to differing or changing defaults on CPUs.
372 	 */
373 	if (cpus_have_cap(ARM64_MTE_ASYMM) &&
374 	    (arg & PR_MTE_TCF_ASYNC) &&
375 	    (arg & PR_MTE_TCF_SYNC))
376 		mte_ctrl |= MTE_CTRL_TCF_ASYMM;
377 
378 	if (arg & PR_MTE_STORE_ONLY)
379 		mte_ctrl |= MTE_CTRL_STORE_ONLY;
380 
381 	task->thread.mte_ctrl = mte_ctrl;
382 	if (task == current) {
383 		preempt_disable();
384 		mte_update_sctlr_user(task);
385 		mte_update_gcr_excl(task);
386 		update_sctlr_el1(task->thread.sctlr_user);
387 		preempt_enable();
388 	}
389 
390 	return 0;
391 }
392 
get_mte_ctrl(struct task_struct * task)393 long get_mte_ctrl(struct task_struct *task)
394 {
395 	unsigned long ret;
396 	u64 mte_ctrl = task->thread.mte_ctrl;
397 	u64 incl = (~mte_ctrl >> MTE_CTRL_GCR_USER_EXCL_SHIFT) &
398 		   SYS_GCR_EL1_EXCL_MASK;
399 
400 	if (!system_supports_mte())
401 		return 0;
402 
403 	ret = incl << PR_MTE_TAG_SHIFT;
404 	if (mte_ctrl & MTE_CTRL_TCF_ASYNC)
405 		ret |= PR_MTE_TCF_ASYNC;
406 	if (mte_ctrl & MTE_CTRL_TCF_SYNC)
407 		ret |= PR_MTE_TCF_SYNC;
408 	if (mte_ctrl & MTE_CTRL_STORE_ONLY)
409 		ret |= PR_MTE_STORE_ONLY;
410 
411 	return ret;
412 }
413 
414 /*
415  * Access MTE tags in another process' address space as given in mm. Update
416  * the number of tags copied. Return 0 if any tags copied, error otherwise.
417  * Inspired by __access_remote_vm().
418  */
__access_remote_tags(struct mm_struct * mm,unsigned long addr,struct iovec * kiov,unsigned int gup_flags)419 static int __access_remote_tags(struct mm_struct *mm, unsigned long addr,
420 				struct iovec *kiov, unsigned int gup_flags)
421 {
422 	void __user *buf = kiov->iov_base;
423 	size_t len = kiov->iov_len;
424 	int err = 0;
425 	int write = gup_flags & FOLL_WRITE;
426 
427 	if (!access_ok(buf, len))
428 		return -EFAULT;
429 
430 	if (mmap_read_lock_killable(mm))
431 		return -EIO;
432 
433 	while (len) {
434 		struct vm_area_struct *vma;
435 		unsigned long tags, offset;
436 		void *maddr;
437 		struct page *page = get_user_page_vma_remote(mm, addr,
438 							     gup_flags, &vma);
439 		struct folio *folio;
440 
441 		if (IS_ERR(page)) {
442 			err = PTR_ERR(page);
443 			break;
444 		}
445 
446 		/*
447 		 * Only copy tags if the page has been mapped as PROT_MTE
448 		 * (PG_mte_tagged set). Otherwise the tags are not valid and
449 		 * not accessible to user. Moreover, an mprotect(PROT_MTE)
450 		 * would cause the existing tags to be cleared if the page
451 		 * was never mapped with PROT_MTE.
452 		 */
453 		if (!(vma->vm_flags & VM_MTE)) {
454 			err = -EOPNOTSUPP;
455 			put_page(page);
456 			break;
457 		}
458 
459 		folio = page_folio(page);
460 		if (folio_test_hugetlb(folio))
461 			WARN_ON_ONCE(!folio_test_hugetlb_mte_tagged(folio));
462 		else
463 			WARN_ON_ONCE(!page_mte_tagged(page));
464 
465 		/* limit access to the end of the page */
466 		offset = offset_in_page(addr);
467 		tags = min(len, (PAGE_SIZE - offset) / MTE_GRANULE_SIZE);
468 
469 		maddr = page_address(page);
470 		if (write) {
471 			tags = mte_copy_tags_from_user(maddr + offset, buf, tags);
472 			set_page_dirty_lock(page);
473 		} else {
474 			tags = mte_copy_tags_to_user(buf, maddr + offset, tags);
475 		}
476 		put_page(page);
477 
478 		/* error accessing the tracer's buffer */
479 		if (!tags)
480 			break;
481 
482 		len -= tags;
483 		buf += tags;
484 		addr += tags * MTE_GRANULE_SIZE;
485 	}
486 	mmap_read_unlock(mm);
487 
488 	/* return an error if no tags copied */
489 	kiov->iov_len = buf - kiov->iov_base;
490 	if (!kiov->iov_len) {
491 		/* check for error accessing the tracee's address space */
492 		if (err)
493 			return -EIO;
494 		else
495 			return -EFAULT;
496 	}
497 
498 	return 0;
499 }
500 
501 /*
502  * Copy MTE tags in another process' address space at 'addr' to/from tracer's
503  * iovec buffer. Return 0 on success. Inspired by ptrace_access_vm().
504  */
access_remote_tags(struct task_struct * tsk,unsigned long addr,struct iovec * kiov,unsigned int gup_flags)505 static int access_remote_tags(struct task_struct *tsk, unsigned long addr,
506 			      struct iovec *kiov, unsigned int gup_flags)
507 {
508 	struct mm_struct *mm;
509 	int ret;
510 
511 	mm = get_task_mm(tsk);
512 	if (!mm)
513 		return -EPERM;
514 
515 	if (!tsk->ptrace || (current != tsk->parent) ||
516 	    ((get_dumpable(mm) != SUID_DUMP_USER) &&
517 	     !ptracer_capable(tsk, mm->user_ns))) {
518 		mmput(mm);
519 		return -EPERM;
520 	}
521 
522 	ret = __access_remote_tags(mm, addr, kiov, gup_flags);
523 	mmput(mm);
524 
525 	return ret;
526 }
527 
mte_ptrace_copy_tags(struct task_struct * child,long request,unsigned long addr,unsigned long data)528 int mte_ptrace_copy_tags(struct task_struct *child, long request,
529 			 unsigned long addr, unsigned long data)
530 {
531 	int ret;
532 	struct iovec kiov;
533 	struct iovec __user *uiov = (void __user *)data;
534 	unsigned int gup_flags = FOLL_FORCE;
535 
536 	if (!system_supports_mte())
537 		return -EIO;
538 
539 	if (get_user(kiov.iov_base, &uiov->iov_base) ||
540 	    get_user(kiov.iov_len, &uiov->iov_len))
541 		return -EFAULT;
542 
543 	if (request == PTRACE_POKEMTETAGS)
544 		gup_flags |= FOLL_WRITE;
545 
546 	/* align addr to the MTE tag granule */
547 	addr &= MTE_GRANULE_MASK;
548 
549 	ret = access_remote_tags(child, addr, &kiov, gup_flags);
550 	if (!ret)
551 		ret = put_user(kiov.iov_len, &uiov->iov_len);
552 
553 	return ret;
554 }
555 
mte_tcf_preferred_show(struct device * dev,struct device_attribute * attr,char * buf)556 static ssize_t mte_tcf_preferred_show(struct device *dev,
557 				      struct device_attribute *attr, char *buf)
558 {
559 	switch (per_cpu(mte_tcf_preferred, dev->id)) {
560 	case MTE_CTRL_TCF_ASYNC:
561 		return sysfs_emit(buf, "async\n");
562 	case MTE_CTRL_TCF_SYNC:
563 		return sysfs_emit(buf, "sync\n");
564 	case MTE_CTRL_TCF_ASYMM:
565 		return sysfs_emit(buf, "asymm\n");
566 	default:
567 		return sysfs_emit(buf, "???\n");
568 	}
569 }
570 
mte_tcf_preferred_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)571 static ssize_t mte_tcf_preferred_store(struct device *dev,
572 				       struct device_attribute *attr,
573 				       const char *buf, size_t count)
574 {
575 	u64 tcf;
576 
577 	if (sysfs_streq(buf, "async"))
578 		tcf = MTE_CTRL_TCF_ASYNC;
579 	else if (sysfs_streq(buf, "sync"))
580 		tcf = MTE_CTRL_TCF_SYNC;
581 	else if (cpus_have_cap(ARM64_MTE_ASYMM) && sysfs_streq(buf, "asymm"))
582 		tcf = MTE_CTRL_TCF_ASYMM;
583 	else
584 		return -EINVAL;
585 
586 	device_lock(dev);
587 	per_cpu(mte_tcf_preferred, dev->id) = tcf;
588 	device_unlock(dev);
589 
590 	return count;
591 }
592 static DEVICE_ATTR_RW(mte_tcf_preferred);
593 
register_mte_tcf_preferred_sysctl(void)594 static int register_mte_tcf_preferred_sysctl(void)
595 {
596 	unsigned int cpu;
597 
598 	if (!system_supports_mte())
599 		return 0;
600 
601 	for_each_possible_cpu(cpu) {
602 		per_cpu(mte_tcf_preferred, cpu) = MTE_CTRL_TCF_ASYNC;
603 		device_create_file(get_cpu_device(cpu),
604 				   &dev_attr_mte_tcf_preferred);
605 	}
606 
607 	return 0;
608 }
609 subsys_initcall(register_mte_tcf_preferred_sysctl);
610 
611 /*
612  * Return 0 on success, the number of bytes not probed otherwise.
613  */
mte_probe_user_range(const char __user * uaddr,size_t size)614 size_t mte_probe_user_range(const char __user *uaddr, size_t size)
615 {
616 	const char __user *end = uaddr + size;
617 	char val;
618 
619 	__raw_get_user(val, uaddr, efault);
620 
621 	uaddr = PTR_ALIGN(uaddr, MTE_GRANULE_SIZE);
622 	while (uaddr < end) {
623 		/*
624 		 * A read is sufficient for mte, the caller should have probed
625 		 * for the pte write permission if required.
626 		 */
627 		__raw_get_user(val, uaddr, efault);
628 		uaddr += MTE_GRANULE_SIZE;
629 	}
630 	(void)val;
631 
632 	return 0;
633 
634 efault:
635 	return end - uaddr;
636 }
637