xref: /freebsd/sys/compat/linuxkpi/common/include/linux/highmem.h (revision ade8a27ea4c28d12fabc2d5f8e44386a3add23d1)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2010 Isilon Systems, Inc.
5  * Copyright (c) 2016 Matthew Macy (mmacy@mattmacy.io)
6  * Copyright (c) 2017 Mellanox Technologies, Ltd.
7  * Copyright (c) 2021 Vladimir Kondratyev <wulf@FreeBSD.org>
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions are
11  * met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in
16  *    the documentation and/or other materials provided with the
17  *    distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 #ifndef _LINUXKPI_LINUX_HIGHMEM_H_
33 #define _LINUXKPI_LINUX_HIGHMEM_H_
34 
35 #include <sys/types.h>
36 #include <sys/lock.h>
37 #include <sys/mutex.h>
38 #include <sys/proc.h>
39 #include <sys/sched.h>
40 #include <sys/sf_buf.h>
41 
42 #include <vm/vm.h>
43 #include <vm/vm_page.h>
44 #include <vm/pmap.h>
45 
46 #include <linux/mm.h>
47 #include <linux/page.h>
48 #include <linux/hardirq.h>
49 
50 #define	PageHighMem(p)		(0)
51 
52 static inline struct page *
53 kmap_to_page(void *addr)
54 {
55 
56 	return (virt_to_page(addr));
57 }
58 
59 static inline void *
60 kmap(struct page *page)
61 {
62 	struct sf_buf *sf;
63 
64 	if (PMAP_HAS_DMAP) {
65 		return ((void *)PHYS_TO_DMAP(page_to_phys(page)));
66 	} else {
67 		sched_pin();
68 		sf = sf_buf_alloc(page, SFB_NOWAIT | SFB_CPUPRIVATE);
69 		if (sf == NULL) {
70 			sched_unpin();
71 			return (NULL);
72 		}
73 		return ((void *)sf_buf_kva(sf));
74 	}
75 }
76 
77 static inline void *
78 kmap_atomic_prot(struct page *page, pgprot_t prot)
79 {
80 	vm_memattr_t attr = pgprot2cachemode(prot);
81 
82 	if (attr != VM_MEMATTR_DEFAULT) {
83 		page->flags |= PG_FICTITIOUS;
84 		pmap_page_set_memattr(page, attr);
85 	}
86 	return (kmap(page));
87 }
88 
89 static inline void *
90 kmap_atomic(struct page *page)
91 {
92 
93 	return (kmap_atomic_prot(page, VM_PROT_ALL));
94 }
95 
96 static inline void *
97 kmap_local_page(struct page *page)
98 {
99 	return (kmap(page));
100 }
101 
102 static inline void *
103 kmap_local_folio(struct folio *folio, size_t offset)
104 {
105 	struct page *page;
106 	char *vaddr;
107 
108 	page = &folio->page;
109 	vaddr = kmap_local_page(page);
110 	vaddr += offset;
111 
112 	return (vaddr);
113 }
114 
115 static inline void *
116 kmap_local_page_prot(struct page *page, pgprot_t prot)
117 {
118 
119 	return (kmap_atomic_prot(page, prot));
120 }
121 
122 static inline void
123 kunmap(struct page *page)
124 {
125 	struct sf_buf *sf;
126 
127 	if (!PMAP_HAS_DMAP) {
128 		/* lookup SF buffer in list */
129 		sf = sf_buf_alloc(page, SFB_NOWAIT | SFB_CPUPRIVATE);
130 
131 		/* double-free */
132 		sf_buf_free(sf);
133 		sf_buf_free(sf);
134 
135 		sched_unpin();
136 	}
137 }
138 
139 static inline void
140 kunmap_atomic(void *vaddr)
141 {
142 
143 	if (!PMAP_HAS_DMAP)
144 		kunmap(virt_to_page(vaddr));
145 }
146 
147 static inline void
148 kunmap_local(void *addr)
149 {
150 
151 	kunmap_atomic(addr);
152 }
153 
154 static inline void
155 memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
156 {
157 	char *from;
158 
159 	KASSERT(offset + len <= PAGE_SIZE,
160 	    ("%s: memcpy from page %p to address %p: "
161 	     "offset+len (%zu+%zu) would go beyond page end",
162 	     __func__, page, to, offset, len));
163 
164 	from = kmap_local_page(page);
165 	memcpy(to, from + offset, len);
166 	kunmap_local(from);
167 }
168 
169 static inline void
170 memcpy_to_page(struct page *page, size_t offset, const char *from, size_t len)
171 {
172 	char *to;
173 
174 	KASSERT(offset + len <= PAGE_SIZE,
175 	    ("%s: memcpy from address %p to page %p: "
176 	     "offset+len (%zu+%zu) would go beyond page end",
177 	     __func__, from, page, offset, len));
178 
179 	to = kmap_local_page(page);
180 	memcpy(to + offset, from, len);
181 	kunmap_local(to);
182 }
183 
184 static inline void
185 memcpy_to_folio(struct folio *folio, size_t offset, const char *from, size_t len)
186 {
187 	struct page *page;
188 
189 	page = &folio->page;
190 	memcpy_to_page(page, offset, from, len);
191 }
192 
193 #endif	/* _LINUXKPI_LINUX_HIGHMEM_H_ */
194