1d2912cb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds * linux/arch/arm/mm/copypage-v6.c
41da177e4SLinus Torvalds *
51da177e4SLinus Torvalds * Copyright (C) 2002 Deep Blue Solutions Ltd, All Rights Reserved.
61da177e4SLinus Torvalds */
71da177e4SLinus Torvalds #include <linux/init.h>
81da177e4SLinus Torvalds #include <linux/spinlock.h>
91da177e4SLinus Torvalds #include <linux/mm.h>
10063b0a42SRussell King #include <linux/highmem.h>
11842ca547SMatthew Wilcox (Oracle) #include <linux/pagemap.h>
121da177e4SLinus Torvalds
131da177e4SLinus Torvalds #include <asm/shmparam.h>
141da177e4SLinus Torvalds #include <asm/tlbflush.h>
151da177e4SLinus Torvalds #include <asm/cacheflush.h>
1646097c7dSRussell King #include <asm/cachetype.h>
171da177e4SLinus Torvalds
181b2e2b73SRussell King #include "mm.h"
191b2e2b73SRussell King
201da177e4SLinus Torvalds #if SHMLBA > 16384
211da177e4SLinus Torvalds #error FIX ME
221da177e4SLinus Torvalds #endif
231da177e4SLinus Torvalds
24bd31b859SThomas Gleixner static DEFINE_RAW_SPINLOCK(v6_lock);
251da177e4SLinus Torvalds
261da177e4SLinus Torvalds /*
271da177e4SLinus Torvalds * Copy the user page. No aliasing to deal with so we can just
281da177e4SLinus Torvalds * attack the kernel's existing mapping of these pages.
291da177e4SLinus Torvalds */
v6_copy_user_highpage_nonaliasing(struct page * to,struct page * from,unsigned long vaddr,struct vm_area_struct * vma)30063b0a42SRussell King static void v6_copy_user_highpage_nonaliasing(struct page *to,
31f00a75c0SRussell King struct page *from, unsigned long vaddr, struct vm_area_struct *vma)
321da177e4SLinus Torvalds {
33063b0a42SRussell King void *kto, *kfrom;
34063b0a42SRussell King
355472e862SCong Wang kfrom = kmap_atomic(from);
365472e862SCong Wang kto = kmap_atomic(to);
371da177e4SLinus Torvalds copy_page(kto, kfrom);
385472e862SCong Wang kunmap_atomic(kto);
395472e862SCong Wang kunmap_atomic(kfrom);
401da177e4SLinus Torvalds }
411da177e4SLinus Torvalds
421da177e4SLinus Torvalds /*
431da177e4SLinus Torvalds * Clear the user page. No aliasing to deal with so we can just
441da177e4SLinus Torvalds * attack the kernel's existing mapping of this page.
451da177e4SLinus Torvalds */
v6_clear_user_highpage_nonaliasing(struct page * page,unsigned long vaddr)46303c6443SRussell King static void v6_clear_user_highpage_nonaliasing(struct page *page, unsigned long vaddr)
471da177e4SLinus Torvalds {
485472e862SCong Wang void *kaddr = kmap_atomic(page);
491da177e4SLinus Torvalds clear_page(kaddr);
505472e862SCong Wang kunmap_atomic(kaddr);
511da177e4SLinus Torvalds }
521da177e4SLinus Torvalds
531da177e4SLinus Torvalds /*
541da177e4SLinus Torvalds * Discard data in the kernel mapping for the new page.
551da177e4SLinus Torvalds * FIXME: needs this MCRR to be supported.
561da177e4SLinus Torvalds */
discard_old_kernel_data(void * kto)57063b0a42SRussell King static void discard_old_kernel_data(void *kto)
58063b0a42SRussell King {
591da177e4SLinus Torvalds __asm__("mcrr p15, 0, %1, %0, c6 @ 0xec401f06"
601da177e4SLinus Torvalds :
611da177e4SLinus Torvalds : "r" (kto),
6280231874SJungseung Lee "r" ((unsigned long)kto + PAGE_SIZE - 1)
631da177e4SLinus Torvalds : "cc");
64063b0a42SRussell King }
65063b0a42SRussell King
66063b0a42SRussell King /*
67063b0a42SRussell King * Copy the page, taking account of the cache colour.
68063b0a42SRussell King */
v6_copy_user_highpage_aliasing(struct page * to,struct page * from,unsigned long vaddr,struct vm_area_struct * vma)69063b0a42SRussell King static void v6_copy_user_highpage_aliasing(struct page *to,
70f00a75c0SRussell King struct page *from, unsigned long vaddr, struct vm_area_struct *vma)
71063b0a42SRussell King {
72*8b5989f3SMatthew Wilcox (Oracle) struct folio *src = page_folio(from);
73063b0a42SRussell King unsigned int offset = CACHE_COLOUR(vaddr);
74063b0a42SRussell King unsigned long kfrom, kto;
75063b0a42SRussell King
76*8b5989f3SMatthew Wilcox (Oracle) if (!test_and_set_bit(PG_dcache_clean, &src->flags))
77*8b5989f3SMatthew Wilcox (Oracle) __flush_dcache_folio(folio_flush_mapping(src), src);
78063b0a42SRussell King
79063b0a42SRussell King /* FIXME: not highmem safe */
80063b0a42SRussell King discard_old_kernel_data(page_address(to));
811da177e4SLinus Torvalds
821da177e4SLinus Torvalds /*
831da177e4SLinus Torvalds * Now copy the page using the same cache colour as the
841da177e4SLinus Torvalds * pages ultimate destination.
851da177e4SLinus Torvalds */
86bd31b859SThomas Gleixner raw_spin_lock(&v6_lock);
871da177e4SLinus Torvalds
88de27c308SRussell King kfrom = COPYPAGE_V6_FROM + (offset << PAGE_SHIFT);
89de27c308SRussell King kto = COPYPAGE_V6_TO + (offset << PAGE_SHIFT);
901da177e4SLinus Torvalds
9167ece144SRussell King set_top_pte(kfrom, mk_pte(from, PAGE_KERNEL));
9267ece144SRussell King set_top_pte(kto, mk_pte(to, PAGE_KERNEL));
931da177e4SLinus Torvalds
94063b0a42SRussell King copy_page((void *)kto, (void *)kfrom);
951da177e4SLinus Torvalds
96bd31b859SThomas Gleixner raw_spin_unlock(&v6_lock);
971da177e4SLinus Torvalds }
981da177e4SLinus Torvalds
991da177e4SLinus Torvalds /*
1001da177e4SLinus Torvalds * Clear the user page. We need to deal with the aliasing issues,
1011da177e4SLinus Torvalds * so remap the kernel page into the same cache colour as the user
1021da177e4SLinus Torvalds * page.
1031da177e4SLinus Torvalds */
v6_clear_user_highpage_aliasing(struct page * page,unsigned long vaddr)104303c6443SRussell King static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vaddr)
1051da177e4SLinus Torvalds {
106de27c308SRussell King unsigned long to = COPYPAGE_V6_TO + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
1071da177e4SLinus Torvalds
108303c6443SRussell King /* FIXME: not highmem safe */
109303c6443SRussell King discard_old_kernel_data(page_address(page));
1101da177e4SLinus Torvalds
1111da177e4SLinus Torvalds /*
1121da177e4SLinus Torvalds * Now clear the page using the same cache colour as
1131da177e4SLinus Torvalds * the pages ultimate destination.
1141da177e4SLinus Torvalds */
115bd31b859SThomas Gleixner raw_spin_lock(&v6_lock);
1161da177e4SLinus Torvalds
11767ece144SRussell King set_top_pte(to, mk_pte(page, PAGE_KERNEL));
1181da177e4SLinus Torvalds clear_page((void *)to);
1191da177e4SLinus Torvalds
120bd31b859SThomas Gleixner raw_spin_unlock(&v6_lock);
1211da177e4SLinus Torvalds }
1221da177e4SLinus Torvalds
1231da177e4SLinus Torvalds struct cpu_user_fns v6_user_fns __initdata = {
124303c6443SRussell King .cpu_clear_user_highpage = v6_clear_user_highpage_nonaliasing,
125063b0a42SRussell King .cpu_copy_user_highpage = v6_copy_user_highpage_nonaliasing,
1261da177e4SLinus Torvalds };
1271da177e4SLinus Torvalds
v6_userpage_init(void)1281da177e4SLinus Torvalds static int __init v6_userpage_init(void)
1291da177e4SLinus Torvalds {
1301da177e4SLinus Torvalds if (cache_is_vipt_aliasing()) {
131303c6443SRussell King cpu_user.cpu_clear_user_highpage = v6_clear_user_highpage_aliasing;
132063b0a42SRussell King cpu_user.cpu_copy_user_highpage = v6_copy_user_highpage_aliasing;
1331da177e4SLinus Torvalds }
1341da177e4SLinus Torvalds
1351da177e4SLinus Torvalds return 0;
1361da177e4SLinus Torvalds }
1371da177e4SLinus Torvalds
13808ee4e4cSRussell King core_initcall(v6_userpage_init);
139