xref: /linux/mm/highmem.c (revision 955cc774f286adddeef85bd2e69edb0f2dab3214)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds  * High memory handling common code and variables.
41da177e4SLinus Torvalds  *
51da177e4SLinus Torvalds  * (C) 1999 Andrea Arcangeli, SuSE GmbH, andrea@suse.de
61da177e4SLinus Torvalds  *          Gerhard Wichert, Siemens AG, Gerhard.Wichert@pdb.siemens.de
71da177e4SLinus Torvalds  *
81da177e4SLinus Torvalds  *
91da177e4SLinus Torvalds  * Redesigned the x86 32-bit VM architecture to deal with
101da177e4SLinus Torvalds  * 64-bit physical space. With current x86 CPUs this
111da177e4SLinus Torvalds  * means up to 64 Gigabytes physical RAM.
121da177e4SLinus Torvalds  *
131da177e4SLinus Torvalds  * Rewrote high memory support to move the page cache into
141da177e4SLinus Torvalds  * high memory. Implemented permanent (schedulable) kmaps
151da177e4SLinus Torvalds  * based on Linus' idea.
161da177e4SLinus Torvalds  *
171da177e4SLinus Torvalds  * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
181da177e4SLinus Torvalds  */
191da177e4SLinus Torvalds 
201da177e4SLinus Torvalds #include <linux/mm.h>
21b95f1b31SPaul Gortmaker #include <linux/export.h>
221da177e4SLinus Torvalds #include <linux/swap.h>
231da177e4SLinus Torvalds #include <linux/bio.h>
241da177e4SLinus Torvalds #include <linux/pagemap.h>
251da177e4SLinus Torvalds #include <linux/mempool.h>
261da177e4SLinus Torvalds #include <linux/blkdev.h>
271da177e4SLinus Torvalds #include <linux/init.h>
281da177e4SLinus Torvalds #include <linux/hash.h>
291da177e4SLinus Torvalds #include <linux/highmem.h>
30eac79005SJason Wessel #include <linux/kgdb.h>
311da177e4SLinus Torvalds #include <asm/tlbflush.h>
32186525bdSIngo Molnar #include <linux/vmalloc.h>
33a8e23a29SPeter Zijlstra 
34a8e23a29SPeter Zijlstra #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
35a8e23a29SPeter Zijlstra DEFINE_PER_CPU(int, __kmap_atomic_idx);
36a8e23a29SPeter Zijlstra #endif
37a8e23a29SPeter Zijlstra 
381da177e4SLinus Torvalds /*
391da177e4SLinus Torvalds  * Virtual_count is not a pure "count".
401da177e4SLinus Torvalds  *  0 means that it is not mapped, and has not been mapped
411da177e4SLinus Torvalds  *    since a TLB flush - it is usable.
421da177e4SLinus Torvalds  *  1 means that there are no users, but it has been mapped
431da177e4SLinus Torvalds  *    since the last TLB flush - so we can't use it.
441da177e4SLinus Torvalds  *  n means that there are (n-1) current users of it.
451da177e4SLinus Torvalds  */
461da177e4SLinus Torvalds #ifdef CONFIG_HIGHMEM
47260b2367SAl Viro 
4815de36a4SMax Filippov /*
4915de36a4SMax Filippov  * Architecture with aliasing data cache may define the following family of
5015de36a4SMax Filippov  * helper functions in its asm/highmem.h to control cache color of virtual
5115de36a4SMax Filippov  * addresses where physical memory pages are mapped by kmap.
5215de36a4SMax Filippov  */
5315de36a4SMax Filippov #ifndef get_pkmap_color
5415de36a4SMax Filippov 
5515de36a4SMax Filippov /*
5615de36a4SMax Filippov  * Determine color of virtual address where the page should be mapped.
5715de36a4SMax Filippov  */
5815de36a4SMax Filippov static inline unsigned int get_pkmap_color(struct page *page)
5915de36a4SMax Filippov {
6015de36a4SMax Filippov 	return 0;
6115de36a4SMax Filippov }
6215de36a4SMax Filippov #define get_pkmap_color get_pkmap_color
6315de36a4SMax Filippov 
6415de36a4SMax Filippov /*
6515de36a4SMax Filippov  * Get next index for mapping inside PKMAP region for page with given color.
6615de36a4SMax Filippov  */
6715de36a4SMax Filippov static inline unsigned int get_next_pkmap_nr(unsigned int color)
6815de36a4SMax Filippov {
6915de36a4SMax Filippov 	static unsigned int last_pkmap_nr;
7015de36a4SMax Filippov 
7115de36a4SMax Filippov 	last_pkmap_nr = (last_pkmap_nr + 1) & LAST_PKMAP_MASK;
7215de36a4SMax Filippov 	return last_pkmap_nr;
7315de36a4SMax Filippov }
7415de36a4SMax Filippov 
7515de36a4SMax Filippov /*
7615de36a4SMax Filippov  * Determine if page index inside PKMAP region (pkmap_nr) of given color
7715de36a4SMax Filippov  * has wrapped around PKMAP region end. When this happens an attempt to
7815de36a4SMax Filippov  * flush all unused PKMAP slots is made.
7915de36a4SMax Filippov  */
8015de36a4SMax Filippov static inline int no_more_pkmaps(unsigned int pkmap_nr, unsigned int color)
8115de36a4SMax Filippov {
8215de36a4SMax Filippov 	return pkmap_nr == 0;
8315de36a4SMax Filippov }
8415de36a4SMax Filippov 
8515de36a4SMax Filippov /*
8615de36a4SMax Filippov  * Get the number of PKMAP entries of the given color. If no free slot is
8715de36a4SMax Filippov  * found after checking that many entries, kmap will sleep waiting for
8815de36a4SMax Filippov  * someone to call kunmap and free PKMAP slot.
8915de36a4SMax Filippov  */
9015de36a4SMax Filippov static inline int get_pkmap_entries_count(unsigned int color)
9115de36a4SMax Filippov {
9215de36a4SMax Filippov 	return LAST_PKMAP;
9315de36a4SMax Filippov }
9415de36a4SMax Filippov 
9515de36a4SMax Filippov /*
9615de36a4SMax Filippov  * Get head of a wait queue for PKMAP entries of the given color.
9715de36a4SMax Filippov  * Wait queues for different mapping colors should be independent to avoid
9815de36a4SMax Filippov  * unnecessary wakeups caused by freeing of slots of other colors.
9915de36a4SMax Filippov  */
10015de36a4SMax Filippov static inline wait_queue_head_t *get_pkmap_wait_queue_head(unsigned int color)
10115de36a4SMax Filippov {
10215de36a4SMax Filippov 	static DECLARE_WAIT_QUEUE_HEAD(pkmap_map_wait);
10315de36a4SMax Filippov 
10415de36a4SMax Filippov 	return &pkmap_map_wait;
10515de36a4SMax Filippov }
10615de36a4SMax Filippov #endif
10715de36a4SMax Filippov 
108ca79b0c2SArun KS atomic_long_t _totalhigh_pages __read_mostly;
109ca79b0c2SArun KS EXPORT_SYMBOL(_totalhigh_pages);
1103e4d3af5SPeter Zijlstra 
1113e4d3af5SPeter Zijlstra EXPORT_PER_CPU_SYMBOL(__kmap_atomic_idx);
1123e4d3af5SPeter Zijlstra 
113c1f60a5aSChristoph Lameter unsigned int nr_free_highpages (void)
114c1f60a5aSChristoph Lameter {
11533499bfeSJoonsoo Kim 	struct zone *zone;
116c1f60a5aSChristoph Lameter 	unsigned int pages = 0;
117c1f60a5aSChristoph Lameter 
11833499bfeSJoonsoo Kim 	for_each_populated_zone(zone) {
11933499bfeSJoonsoo Kim 		if (is_highmem(zone))
12033499bfeSJoonsoo Kim 			pages += zone_page_state(zone, NR_FREE_PAGES);
1212a1e274aSMel Gorman 	}
122c1f60a5aSChristoph Lameter 
123c1f60a5aSChristoph Lameter 	return pages;
124c1f60a5aSChristoph Lameter }
125c1f60a5aSChristoph Lameter 
1261da177e4SLinus Torvalds static int pkmap_count[LAST_PKMAP];
1271da177e4SLinus Torvalds static  __cacheline_aligned_in_smp DEFINE_SPINLOCK(kmap_lock);
1281da177e4SLinus Torvalds 
1291da177e4SLinus Torvalds pte_t * pkmap_page_table;
1301da177e4SLinus Torvalds 
1313297e760SNicolas Pitre /*
1323297e760SNicolas Pitre  * Most architectures have no use for kmap_high_get(), so let's abstract
1333297e760SNicolas Pitre  * the disabling of IRQ out of the locking in that case to save on a
1343297e760SNicolas Pitre  * potential useless overhead.
1353297e760SNicolas Pitre  */
1363297e760SNicolas Pitre #ifdef ARCH_NEEDS_KMAP_HIGH_GET
1373297e760SNicolas Pitre #define lock_kmap()             spin_lock_irq(&kmap_lock)
1383297e760SNicolas Pitre #define unlock_kmap()           spin_unlock_irq(&kmap_lock)
1393297e760SNicolas Pitre #define lock_kmap_any(flags)    spin_lock_irqsave(&kmap_lock, flags)
1403297e760SNicolas Pitre #define unlock_kmap_any(flags)  spin_unlock_irqrestore(&kmap_lock, flags)
1413297e760SNicolas Pitre #else
1423297e760SNicolas Pitre #define lock_kmap()             spin_lock(&kmap_lock)
1433297e760SNicolas Pitre #define unlock_kmap()           spin_unlock(&kmap_lock)
1443297e760SNicolas Pitre #define lock_kmap_any(flags)    \
1453297e760SNicolas Pitre 		do { spin_lock(&kmap_lock); (void)(flags); } while (0)
1463297e760SNicolas Pitre #define unlock_kmap_any(flags)  \
1473297e760SNicolas Pitre 		do { spin_unlock(&kmap_lock); (void)(flags); } while (0)
1483297e760SNicolas Pitre #endif
1493297e760SNicolas Pitre 
1505a178119SMel Gorman struct page *kmap_to_page(void *vaddr)
1515a178119SMel Gorman {
1525a178119SMel Gorman 	unsigned long addr = (unsigned long)vaddr;
1535a178119SMel Gorman 
154498c2280SWill Deacon 	if (addr >= PKMAP_ADDR(0) && addr < PKMAP_ADDR(LAST_PKMAP)) {
1554de22c05SJoonsoo Kim 		int i = PKMAP_NR(addr);
1565a178119SMel Gorman 		return pte_page(pkmap_page_table[i]);
1575a178119SMel Gorman 	}
1585a178119SMel Gorman 
1595a178119SMel Gorman 	return virt_to_page(addr);
1605a178119SMel Gorman }
161f0263d2dSWill Deacon EXPORT_SYMBOL(kmap_to_page);
1625a178119SMel Gorman 
1631da177e4SLinus Torvalds static void flush_all_zero_pkmaps(void)
1641da177e4SLinus Torvalds {
1651da177e4SLinus Torvalds 	int i;
1665843d9a4SNick Piggin 	int need_flush = 0;
1671da177e4SLinus Torvalds 
1681da177e4SLinus Torvalds 	flush_cache_kmaps();
1691da177e4SLinus Torvalds 
1701da177e4SLinus Torvalds 	for (i = 0; i < LAST_PKMAP; i++) {
1711da177e4SLinus Torvalds 		struct page *page;
1721da177e4SLinus Torvalds 
1731da177e4SLinus Torvalds 		/*
1741da177e4SLinus Torvalds 		 * zero means we don't have anything to do,
1751da177e4SLinus Torvalds 		 * >1 means that it is still in use. Only
1761da177e4SLinus Torvalds 		 * a count of 1 means that it is free but
1771da177e4SLinus Torvalds 		 * needs to be unmapped
1781da177e4SLinus Torvalds 		 */
1791da177e4SLinus Torvalds 		if (pkmap_count[i] != 1)
1801da177e4SLinus Torvalds 			continue;
1811da177e4SLinus Torvalds 		pkmap_count[i] = 0;
1821da177e4SLinus Torvalds 
1831da177e4SLinus Torvalds 		/* sanity check */
18475babcacSEric Sesterhenn 		BUG_ON(pte_none(pkmap_page_table[i]));
1851da177e4SLinus Torvalds 
1861da177e4SLinus Torvalds 		/*
1871da177e4SLinus Torvalds 		 * Don't need an atomic fetch-and-clear op here;
1881da177e4SLinus Torvalds 		 * no-one has the page mapped, and cannot get at
1891da177e4SLinus Torvalds 		 * its virtual address (and hence PTE) without first
1901da177e4SLinus Torvalds 		 * getting the kmap_lock (which is held here).
1911da177e4SLinus Torvalds 		 * So no dangers, even with speculative execution.
1921da177e4SLinus Torvalds 		 */
1931da177e4SLinus Torvalds 		page = pte_page(pkmap_page_table[i]);
194eb2db439SJoonsoo Kim 		pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]);
1951da177e4SLinus Torvalds 
1961da177e4SLinus Torvalds 		set_page_address(page, NULL);
1975843d9a4SNick Piggin 		need_flush = 1;
1981da177e4SLinus Torvalds 	}
1995843d9a4SNick Piggin 	if (need_flush)
2001da177e4SLinus Torvalds 		flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP));
2011da177e4SLinus Torvalds }
2021da177e4SLinus Torvalds 
20377f6078aSRandy Dunlap /**
20477f6078aSRandy Dunlap  * kmap_flush_unused - flush all unused kmap mappings in order to remove stray mappings
20577f6078aSRandy Dunlap  */
206ce6234b5SJeremy Fitzhardinge void kmap_flush_unused(void)
207ce6234b5SJeremy Fitzhardinge {
2083297e760SNicolas Pitre 	lock_kmap();
209ce6234b5SJeremy Fitzhardinge 	flush_all_zero_pkmaps();
2103297e760SNicolas Pitre 	unlock_kmap();
211ce6234b5SJeremy Fitzhardinge }
212ce6234b5SJeremy Fitzhardinge 
2131da177e4SLinus Torvalds static inline unsigned long map_new_virtual(struct page *page)
2141da177e4SLinus Torvalds {
2151da177e4SLinus Torvalds 	unsigned long vaddr;
2161da177e4SLinus Torvalds 	int count;
21715de36a4SMax Filippov 	unsigned int last_pkmap_nr;
21815de36a4SMax Filippov 	unsigned int color = get_pkmap_color(page);
2191da177e4SLinus Torvalds 
2201da177e4SLinus Torvalds start:
22115de36a4SMax Filippov 	count = get_pkmap_entries_count(color);
2221da177e4SLinus Torvalds 	/* Find an empty entry */
2231da177e4SLinus Torvalds 	for (;;) {
22415de36a4SMax Filippov 		last_pkmap_nr = get_next_pkmap_nr(color);
22515de36a4SMax Filippov 		if (no_more_pkmaps(last_pkmap_nr, color)) {
2261da177e4SLinus Torvalds 			flush_all_zero_pkmaps();
22715de36a4SMax Filippov 			count = get_pkmap_entries_count(color);
2281da177e4SLinus Torvalds 		}
2291da177e4SLinus Torvalds 		if (!pkmap_count[last_pkmap_nr])
2301da177e4SLinus Torvalds 			break;	/* Found a usable entry */
2311da177e4SLinus Torvalds 		if (--count)
2321da177e4SLinus Torvalds 			continue;
2331da177e4SLinus Torvalds 
2341da177e4SLinus Torvalds 		/*
2351da177e4SLinus Torvalds 		 * Sleep for somebody else to unmap their entries
2361da177e4SLinus Torvalds 		 */
2371da177e4SLinus Torvalds 		{
2381da177e4SLinus Torvalds 			DECLARE_WAITQUEUE(wait, current);
23915de36a4SMax Filippov 			wait_queue_head_t *pkmap_map_wait =
24015de36a4SMax Filippov 				get_pkmap_wait_queue_head(color);
2411da177e4SLinus Torvalds 
2421da177e4SLinus Torvalds 			__set_current_state(TASK_UNINTERRUPTIBLE);
24315de36a4SMax Filippov 			add_wait_queue(pkmap_map_wait, &wait);
2443297e760SNicolas Pitre 			unlock_kmap();
2451da177e4SLinus Torvalds 			schedule();
24615de36a4SMax Filippov 			remove_wait_queue(pkmap_map_wait, &wait);
2473297e760SNicolas Pitre 			lock_kmap();
2481da177e4SLinus Torvalds 
2491da177e4SLinus Torvalds 			/* Somebody else might have mapped it while we slept */
2501da177e4SLinus Torvalds 			if (page_address(page))
2511da177e4SLinus Torvalds 				return (unsigned long)page_address(page);
2521da177e4SLinus Torvalds 
2531da177e4SLinus Torvalds 			/* Re-start */
2541da177e4SLinus Torvalds 			goto start;
2551da177e4SLinus Torvalds 		}
2561da177e4SLinus Torvalds 	}
2571da177e4SLinus Torvalds 	vaddr = PKMAP_ADDR(last_pkmap_nr);
2581da177e4SLinus Torvalds 	set_pte_at(&init_mm, vaddr,
2591da177e4SLinus Torvalds 		   &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
2601da177e4SLinus Torvalds 
2611da177e4SLinus Torvalds 	pkmap_count[last_pkmap_nr] = 1;
2621da177e4SLinus Torvalds 	set_page_address(page, (void *)vaddr);
2631da177e4SLinus Torvalds 
2641da177e4SLinus Torvalds 	return vaddr;
2651da177e4SLinus Torvalds }
2661da177e4SLinus Torvalds 
26777f6078aSRandy Dunlap /**
26877f6078aSRandy Dunlap  * kmap_high - map a highmem page into memory
26977f6078aSRandy Dunlap  * @page: &struct page to map
27077f6078aSRandy Dunlap  *
27177f6078aSRandy Dunlap  * Returns the page's virtual memory address.
27277f6078aSRandy Dunlap  *
27377f6078aSRandy Dunlap  * We cannot call this from interrupts, as it may block.
27477f6078aSRandy Dunlap  */
275920c7a5dSHarvey Harrison void *kmap_high(struct page *page)
2761da177e4SLinus Torvalds {
2771da177e4SLinus Torvalds 	unsigned long vaddr;
2781da177e4SLinus Torvalds 
2791da177e4SLinus Torvalds 	/*
2801da177e4SLinus Torvalds 	 * For highmem pages, we can't trust "virtual" until
2811da177e4SLinus Torvalds 	 * after we have the lock.
2821da177e4SLinus Torvalds 	 */
2833297e760SNicolas Pitre 	lock_kmap();
2841da177e4SLinus Torvalds 	vaddr = (unsigned long)page_address(page);
2851da177e4SLinus Torvalds 	if (!vaddr)
2861da177e4SLinus Torvalds 		vaddr = map_new_virtual(page);
2871da177e4SLinus Torvalds 	pkmap_count[PKMAP_NR(vaddr)]++;
28875babcacSEric Sesterhenn 	BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 2);
2893297e760SNicolas Pitre 	unlock_kmap();
2901da177e4SLinus Torvalds 	return (void*) vaddr;
2911da177e4SLinus Torvalds }
2921da177e4SLinus Torvalds 
2931da177e4SLinus Torvalds EXPORT_SYMBOL(kmap_high);
2941da177e4SLinus Torvalds 
2953297e760SNicolas Pitre #ifdef ARCH_NEEDS_KMAP_HIGH_GET
2963297e760SNicolas Pitre /**
2973297e760SNicolas Pitre  * kmap_high_get - pin a highmem page into memory
2983297e760SNicolas Pitre  * @page: &struct page to pin
2993297e760SNicolas Pitre  *
3003297e760SNicolas Pitre  * Returns the page's current virtual memory address, or NULL if no mapping
3015e39df56SUwe Kleine-König  * exists.  If and only if a non null address is returned then a
3023297e760SNicolas Pitre  * matching call to kunmap_high() is necessary.
3033297e760SNicolas Pitre  *
3043297e760SNicolas Pitre  * This can be called from any context.
3053297e760SNicolas Pitre  */
3063297e760SNicolas Pitre void *kmap_high_get(struct page *page)
3073297e760SNicolas Pitre {
3083297e760SNicolas Pitre 	unsigned long vaddr, flags;
3093297e760SNicolas Pitre 
3103297e760SNicolas Pitre 	lock_kmap_any(flags);
3113297e760SNicolas Pitre 	vaddr = (unsigned long)page_address(page);
3123297e760SNicolas Pitre 	if (vaddr) {
3133297e760SNicolas Pitre 		BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 1);
3143297e760SNicolas Pitre 		pkmap_count[PKMAP_NR(vaddr)]++;
3153297e760SNicolas Pitre 	}
3163297e760SNicolas Pitre 	unlock_kmap_any(flags);
3173297e760SNicolas Pitre 	return (void*) vaddr;
3183297e760SNicolas Pitre }
3193297e760SNicolas Pitre #endif
3203297e760SNicolas Pitre 
32177f6078aSRandy Dunlap /**
3224e9dc5dfSLi Haifeng  * kunmap_high - unmap a highmem page into memory
32377f6078aSRandy Dunlap  * @page: &struct page to unmap
3243297e760SNicolas Pitre  *
3253297e760SNicolas Pitre  * If ARCH_NEEDS_KMAP_HIGH_GET is not defined then this may be called
3263297e760SNicolas Pitre  * only from user context.
32777f6078aSRandy Dunlap  */
328920c7a5dSHarvey Harrison void kunmap_high(struct page *page)
3291da177e4SLinus Torvalds {
3301da177e4SLinus Torvalds 	unsigned long vaddr;
3311da177e4SLinus Torvalds 	unsigned long nr;
3323297e760SNicolas Pitre 	unsigned long flags;
3331da177e4SLinus Torvalds 	int need_wakeup;
33415de36a4SMax Filippov 	unsigned int color = get_pkmap_color(page);
33515de36a4SMax Filippov 	wait_queue_head_t *pkmap_map_wait;
3361da177e4SLinus Torvalds 
3373297e760SNicolas Pitre 	lock_kmap_any(flags);
3381da177e4SLinus Torvalds 	vaddr = (unsigned long)page_address(page);
33975babcacSEric Sesterhenn 	BUG_ON(!vaddr);
3401da177e4SLinus Torvalds 	nr = PKMAP_NR(vaddr);
3411da177e4SLinus Torvalds 
3421da177e4SLinus Torvalds 	/*
3431da177e4SLinus Torvalds 	 * A count must never go down to zero
3441da177e4SLinus Torvalds 	 * without a TLB flush!
3451da177e4SLinus Torvalds 	 */
3461da177e4SLinus Torvalds 	need_wakeup = 0;
3471da177e4SLinus Torvalds 	switch (--pkmap_count[nr]) {
3481da177e4SLinus Torvalds 	case 0:
3491da177e4SLinus Torvalds 		BUG();
3501da177e4SLinus Torvalds 	case 1:
3511da177e4SLinus Torvalds 		/*
3521da177e4SLinus Torvalds 		 * Avoid an unnecessary wake_up() function call.
3531da177e4SLinus Torvalds 		 * The common case is pkmap_count[] == 1, but
3541da177e4SLinus Torvalds 		 * no waiters.
3551da177e4SLinus Torvalds 		 * The tasks queued in the wait-queue are guarded
3561da177e4SLinus Torvalds 		 * by both the lock in the wait-queue-head and by
3571da177e4SLinus Torvalds 		 * the kmap_lock.  As the kmap_lock is held here,
3581da177e4SLinus Torvalds 		 * no need for the wait-queue-head's lock.  Simply
3591da177e4SLinus Torvalds 		 * test if the queue is empty.
3601da177e4SLinus Torvalds 		 */
36115de36a4SMax Filippov 		pkmap_map_wait = get_pkmap_wait_queue_head(color);
36215de36a4SMax Filippov 		need_wakeup = waitqueue_active(pkmap_map_wait);
3631da177e4SLinus Torvalds 	}
3643297e760SNicolas Pitre 	unlock_kmap_any(flags);
3651da177e4SLinus Torvalds 
3661da177e4SLinus Torvalds 	/* do wake-up, if needed, race-free outside of the spin lock */
3671da177e4SLinus Torvalds 	if (need_wakeup)
36815de36a4SMax Filippov 		wake_up(pkmap_map_wait);
3691da177e4SLinus Torvalds }
3701da177e4SLinus Torvalds 
3711da177e4SLinus Torvalds EXPORT_SYMBOL(kunmap_high);
372*955cc774SIra Weiny #endif	/* CONFIG_HIGHMEM */
3731da177e4SLinus Torvalds 
3741da177e4SLinus Torvalds #if defined(HASHED_PAGE_VIRTUAL)
3751da177e4SLinus Torvalds 
3761da177e4SLinus Torvalds #define PA_HASH_ORDER	7
3771da177e4SLinus Torvalds 
3781da177e4SLinus Torvalds /*
3791da177e4SLinus Torvalds  * Describes one page->virtual association
3801da177e4SLinus Torvalds  */
3811da177e4SLinus Torvalds struct page_address_map {
3821da177e4SLinus Torvalds 	struct page *page;
3831da177e4SLinus Torvalds 	void *virtual;
3841da177e4SLinus Torvalds 	struct list_head list;
3851da177e4SLinus Torvalds };
3861da177e4SLinus Torvalds 
387a354e2c8SJoonsoo Kim static struct page_address_map page_address_maps[LAST_PKMAP];
3881da177e4SLinus Torvalds 
3891da177e4SLinus Torvalds /*
3901da177e4SLinus Torvalds  * Hash table bucket
3911da177e4SLinus Torvalds  */
3921da177e4SLinus Torvalds static struct page_address_slot {
3931da177e4SLinus Torvalds 	struct list_head lh;			/* List of page_address_maps */
3941da177e4SLinus Torvalds 	spinlock_t lock;			/* Protect this bucket's list */
3951da177e4SLinus Torvalds } ____cacheline_aligned_in_smp page_address_htable[1<<PA_HASH_ORDER];
3961da177e4SLinus Torvalds 
397f9918794SIan Campbell static struct page_address_slot *page_slot(const struct page *page)
3981da177e4SLinus Torvalds {
3991da177e4SLinus Torvalds 	return &page_address_htable[hash_ptr(page, PA_HASH_ORDER)];
4001da177e4SLinus Torvalds }
4011da177e4SLinus Torvalds 
40277f6078aSRandy Dunlap /**
40377f6078aSRandy Dunlap  * page_address - get the mapped virtual address of a page
40477f6078aSRandy Dunlap  * @page: &struct page to get the virtual address of
40577f6078aSRandy Dunlap  *
40677f6078aSRandy Dunlap  * Returns the page's virtual address.
40777f6078aSRandy Dunlap  */
408f9918794SIan Campbell void *page_address(const struct page *page)
4091da177e4SLinus Torvalds {
4101da177e4SLinus Torvalds 	unsigned long flags;
4111da177e4SLinus Torvalds 	void *ret;
4121da177e4SLinus Torvalds 	struct page_address_slot *pas;
4131da177e4SLinus Torvalds 
4141da177e4SLinus Torvalds 	if (!PageHighMem(page))
4151da177e4SLinus Torvalds 		return lowmem_page_address(page);
4161da177e4SLinus Torvalds 
4171da177e4SLinus Torvalds 	pas = page_slot(page);
4181da177e4SLinus Torvalds 	ret = NULL;
4191da177e4SLinus Torvalds 	spin_lock_irqsave(&pas->lock, flags);
4201da177e4SLinus Torvalds 	if (!list_empty(&pas->lh)) {
4211da177e4SLinus Torvalds 		struct page_address_map *pam;
4221da177e4SLinus Torvalds 
4231da177e4SLinus Torvalds 		list_for_each_entry(pam, &pas->lh, list) {
4241da177e4SLinus Torvalds 			if (pam->page == page) {
4251da177e4SLinus Torvalds 				ret = pam->virtual;
4261da177e4SLinus Torvalds 				goto done;
4271da177e4SLinus Torvalds 			}
4281da177e4SLinus Torvalds 		}
4291da177e4SLinus Torvalds 	}
4301da177e4SLinus Torvalds done:
4311da177e4SLinus Torvalds 	spin_unlock_irqrestore(&pas->lock, flags);
4321da177e4SLinus Torvalds 	return ret;
4331da177e4SLinus Torvalds }
4341da177e4SLinus Torvalds 
4351da177e4SLinus Torvalds EXPORT_SYMBOL(page_address);
4361da177e4SLinus Torvalds 
43777f6078aSRandy Dunlap /**
43877f6078aSRandy Dunlap  * set_page_address - set a page's virtual address
43977f6078aSRandy Dunlap  * @page: &struct page to set
44077f6078aSRandy Dunlap  * @virtual: virtual address to use
44177f6078aSRandy Dunlap  */
4421da177e4SLinus Torvalds void set_page_address(struct page *page, void *virtual)
4431da177e4SLinus Torvalds {
4441da177e4SLinus Torvalds 	unsigned long flags;
4451da177e4SLinus Torvalds 	struct page_address_slot *pas;
4461da177e4SLinus Torvalds 	struct page_address_map *pam;
4471da177e4SLinus Torvalds 
4481da177e4SLinus Torvalds 	BUG_ON(!PageHighMem(page));
4491da177e4SLinus Torvalds 
4501da177e4SLinus Torvalds 	pas = page_slot(page);
4511da177e4SLinus Torvalds 	if (virtual) {		/* Add */
452a354e2c8SJoonsoo Kim 		pam = &page_address_maps[PKMAP_NR((unsigned long)virtual)];
4531da177e4SLinus Torvalds 		pam->page = page;
4541da177e4SLinus Torvalds 		pam->virtual = virtual;
4551da177e4SLinus Torvalds 
4561da177e4SLinus Torvalds 		spin_lock_irqsave(&pas->lock, flags);
4571da177e4SLinus Torvalds 		list_add_tail(&pam->list, &pas->lh);
4581da177e4SLinus Torvalds 		spin_unlock_irqrestore(&pas->lock, flags);
4591da177e4SLinus Torvalds 	} else {		/* Remove */
4601da177e4SLinus Torvalds 		spin_lock_irqsave(&pas->lock, flags);
4611da177e4SLinus Torvalds 		list_for_each_entry(pam, &pas->lh, list) {
4621da177e4SLinus Torvalds 			if (pam->page == page) {
4631da177e4SLinus Torvalds 				list_del(&pam->list);
4641da177e4SLinus Torvalds 				spin_unlock_irqrestore(&pas->lock, flags);
4651da177e4SLinus Torvalds 				goto done;
4661da177e4SLinus Torvalds 			}
4671da177e4SLinus Torvalds 		}
4681da177e4SLinus Torvalds 		spin_unlock_irqrestore(&pas->lock, flags);
4691da177e4SLinus Torvalds 	}
4701da177e4SLinus Torvalds done:
4711da177e4SLinus Torvalds 	return;
4721da177e4SLinus Torvalds }
4731da177e4SLinus Torvalds 
4741da177e4SLinus Torvalds void __init page_address_init(void)
4751da177e4SLinus Torvalds {
4761da177e4SLinus Torvalds 	int i;
4771da177e4SLinus Torvalds 
4781da177e4SLinus Torvalds 	for (i = 0; i < ARRAY_SIZE(page_address_htable); i++) {
4791da177e4SLinus Torvalds 		INIT_LIST_HEAD(&page_address_htable[i].lh);
4801da177e4SLinus Torvalds 		spin_lock_init(&page_address_htable[i].lock);
4811da177e4SLinus Torvalds 	}
4821da177e4SLinus Torvalds }
4831da177e4SLinus Torvalds 
484*955cc774SIra Weiny #endif	/* defined(HASHED_PAGE_VIRTUAL) */
485