xref: /linux/drivers/video/fbdev/core/fb_defio.c (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 /*
2  *  linux/drivers/video/fb_defio.c
3  *
4  *  Copyright (C) 2006 Jaya Kumar
5  *
6  * This file is subject to the terms and conditions of the GNU General Public
7  * License. See the file COPYING in the main directory of this archive
8  * for more details.
9  */
10 
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/string.h>
15 #include <linux/mm.h>
16 #include <linux/vmalloc.h>
17 #include <linux/delay.h>
18 #include <linux/interrupt.h>
19 #include <linux/fb.h>
20 #include <linux/list.h>
21 
22 /* to support deferred IO */
23 #include <linux/rmap.h>
24 #include <linux/pagemap.h>
25 
26 static struct page *fb_deferred_io_get_page(struct fb_info *info, unsigned long offs)
27 {
28 	struct fb_deferred_io *fbdefio = info->fbdefio;
29 	const void *screen_buffer = info->screen_buffer;
30 	struct page *page = NULL;
31 
32 	if (fbdefio->get_page)
33 		return fbdefio->get_page(info, offs);
34 
35 	if (is_vmalloc_addr(screen_buffer + offs))
36 		page = vmalloc_to_page(screen_buffer + offs);
37 	else if (info->fix.smem_start)
38 		page = pfn_to_page((info->fix.smem_start + offs) >> PAGE_SHIFT);
39 
40 	if (page)
41 		get_page(page);
42 
43 	return page;
44 }
45 
46 static struct fb_deferred_io_pageref *fb_deferred_io_pageref_lookup(struct fb_info *info,
47 								    unsigned long offset,
48 								    struct page *page)
49 {
50 	unsigned long pgoff = offset >> PAGE_SHIFT;
51 	struct fb_deferred_io_pageref *pageref;
52 
53 	if (fb_WARN_ON_ONCE(info, pgoff >= info->npagerefs))
54 		return NULL; /* incorrect allocation size */
55 
56 	/* 1:1 mapping between pageref and page offset */
57 	pageref = &info->pagerefs[pgoff];
58 
59 	if (pageref->page)
60 		goto out;
61 
62 	pageref->page = page;
63 	pageref->offset = pgoff << PAGE_SHIFT;
64 	INIT_LIST_HEAD(&pageref->list);
65 
66 out:
67 	if (fb_WARN_ON_ONCE(info, pageref->page != page))
68 		return NULL; /* inconsistent state */
69 	return pageref;
70 }
71 
72 static void fb_deferred_io_pageref_clear(struct fb_deferred_io_pageref *pageref)
73 {
74 	struct page *page = pageref->page;
75 
76 	if (page)
77 		page->mapping = NULL;
78 }
79 
80 static struct fb_deferred_io_pageref *fb_deferred_io_pageref_get(struct fb_info *info,
81 								 unsigned long offset,
82 								 struct page *page)
83 {
84 	struct fb_deferred_io *fbdefio = info->fbdefio;
85 	struct list_head *pos = &fbdefio->pagereflist;
86 	struct fb_deferred_io_pageref *pageref, *cur;
87 
88 	pageref = fb_deferred_io_pageref_lookup(info, offset, page);
89 	if (!pageref)
90 		return NULL;
91 
92 	/*
93 	 * This check is to catch the case where a new process could start
94 	 * writing to the same page through a new PTE. This new access
95 	 * can cause a call to .page_mkwrite even if the original process'
96 	 * PTE is marked writable.
97 	 */
98 	if (!list_empty(&pageref->list))
99 		goto pageref_already_added;
100 
101 	if (unlikely(fbdefio->sort_pagereflist)) {
102 		/*
103 		 * We loop through the list of pagerefs before adding in
104 		 * order to keep the pagerefs sorted. This has significant
105 		 * overhead of O(n^2) with n being the number of written
106 		 * pages. If possible, drivers should try to work with
107 		 * unsorted page lists instead.
108 		 */
109 		list_for_each_entry(cur, &fbdefio->pagereflist, list) {
110 			if (cur->offset > pageref->offset)
111 				break;
112 		}
113 		pos = &cur->list;
114 	}
115 
116 	list_add_tail(&pageref->list, pos);
117 
118 pageref_already_added:
119 	return pageref;
120 }
121 
122 static void fb_deferred_io_pageref_put(struct fb_deferred_io_pageref *pageref,
123 				       struct fb_info *info)
124 {
125 	list_del_init(&pageref->list);
126 }
127 
128 /* this is to find and return the vmalloc-ed fb pages */
129 static vm_fault_t fb_deferred_io_fault(struct vm_fault *vmf)
130 {
131 	unsigned long offset;
132 	struct page *page;
133 	struct fb_info *info = vmf->vma->vm_private_data;
134 
135 	offset = vmf->pgoff << PAGE_SHIFT;
136 	if (offset >= info->fix.smem_len)
137 		return VM_FAULT_SIGBUS;
138 
139 	page = fb_deferred_io_get_page(info, offset);
140 	if (!page)
141 		return VM_FAULT_SIGBUS;
142 
143 	if (vmf->vma->vm_file)
144 		page->mapping = vmf->vma->vm_file->f_mapping;
145 	else
146 		printk(KERN_ERR "no mapping available\n");
147 
148 	BUG_ON(!page->mapping);
149 	page->index = vmf->pgoff; /* for folio_mkclean() */
150 
151 	vmf->page = page;
152 	return 0;
153 }
154 
155 int fb_deferred_io_fsync(struct file *file, loff_t start, loff_t end, int datasync)
156 {
157 	struct fb_info *info = file->private_data;
158 	struct inode *inode = file_inode(file);
159 	int err = file_write_and_wait_range(file, start, end);
160 	if (err)
161 		return err;
162 
163 	/* Skip if deferred io is compiled-in but disabled on this fbdev */
164 	if (!info->fbdefio)
165 		return 0;
166 
167 	inode_lock(inode);
168 	flush_delayed_work(&info->deferred_work);
169 	inode_unlock(inode);
170 
171 	return 0;
172 }
173 EXPORT_SYMBOL_GPL(fb_deferred_io_fsync);
174 
175 /*
176  * Adds a page to the dirty list. Call this from struct
177  * vm_operations_struct.page_mkwrite.
178  */
179 static vm_fault_t fb_deferred_io_track_page(struct fb_info *info, unsigned long offset,
180 					    struct page *page)
181 {
182 	struct fb_deferred_io *fbdefio = info->fbdefio;
183 	struct fb_deferred_io_pageref *pageref;
184 	vm_fault_t ret;
185 
186 	/* protect against the workqueue changing the page list */
187 	mutex_lock(&fbdefio->lock);
188 
189 	pageref = fb_deferred_io_pageref_get(info, offset, page);
190 	if (WARN_ON_ONCE(!pageref)) {
191 		ret = VM_FAULT_OOM;
192 		goto err_mutex_unlock;
193 	}
194 
195 	/*
196 	 * We want the page to remain locked from ->page_mkwrite until
197 	 * the PTE is marked dirty to avoid folio_mkclean() being called
198 	 * before the PTE is updated, which would leave the page ignored
199 	 * by defio.
200 	 * Do this by locking the page here and informing the caller
201 	 * about it with VM_FAULT_LOCKED.
202 	 */
203 	lock_page(pageref->page);
204 
205 	mutex_unlock(&fbdefio->lock);
206 
207 	/* come back after delay to process the deferred IO */
208 	schedule_delayed_work(&info->deferred_work, fbdefio->delay);
209 	return VM_FAULT_LOCKED;
210 
211 err_mutex_unlock:
212 	mutex_unlock(&fbdefio->lock);
213 	return ret;
214 }
215 
216 /*
217  * fb_deferred_io_page_mkwrite - Mark a page as written for deferred I/O
218  * @fb_info: The fbdev info structure
219  * @vmf: The VM fault
220  *
221  * This is a callback we get when userspace first tries to
222  * write to the page. We schedule a workqueue. That workqueue
223  * will eventually mkclean the touched pages and execute the
224  * deferred framebuffer IO. Then if userspace touches a page
225  * again, we repeat the same scheme.
226  *
227  * Returns:
228  * VM_FAULT_LOCKED on success, or a VM_FAULT error otherwise.
229  */
230 static vm_fault_t fb_deferred_io_page_mkwrite(struct fb_info *info, struct vm_fault *vmf)
231 {
232 	unsigned long offset = vmf->pgoff << PAGE_SHIFT;
233 	struct page *page = vmf->page;
234 
235 	file_update_time(vmf->vma->vm_file);
236 
237 	return fb_deferred_io_track_page(info, offset, page);
238 }
239 
240 /* vm_ops->page_mkwrite handler */
241 static vm_fault_t fb_deferred_io_mkwrite(struct vm_fault *vmf)
242 {
243 	struct fb_info *info = vmf->vma->vm_private_data;
244 
245 	return fb_deferred_io_page_mkwrite(info, vmf);
246 }
247 
248 static const struct vm_operations_struct fb_deferred_io_vm_ops = {
249 	.fault		= fb_deferred_io_fault,
250 	.page_mkwrite	= fb_deferred_io_mkwrite,
251 };
252 
253 static const struct address_space_operations fb_deferred_io_aops = {
254 	.dirty_folio	= noop_dirty_folio,
255 };
256 
257 int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
258 {
259 	vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
260 
261 	vma->vm_ops = &fb_deferred_io_vm_ops;
262 	vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
263 	if (!(info->flags & FBINFO_VIRTFB))
264 		vm_flags_set(vma, VM_IO);
265 	vma->vm_private_data = info;
266 	return 0;
267 }
268 EXPORT_SYMBOL_GPL(fb_deferred_io_mmap);
269 
270 /* workqueue callback */
271 static void fb_deferred_io_work(struct work_struct *work)
272 {
273 	struct fb_info *info = container_of(work, struct fb_info, deferred_work.work);
274 	struct fb_deferred_io_pageref *pageref, *next;
275 	struct fb_deferred_io *fbdefio = info->fbdefio;
276 
277 	/* here we mkclean the pages, then do all deferred IO */
278 	mutex_lock(&fbdefio->lock);
279 	list_for_each_entry(pageref, &fbdefio->pagereflist, list) {
280 		struct folio *folio = page_folio(pageref->page);
281 
282 		folio_lock(folio);
283 		folio_mkclean(folio);
284 		folio_unlock(folio);
285 	}
286 
287 	/* driver's callback with pagereflist */
288 	fbdefio->deferred_io(info, &fbdefio->pagereflist);
289 
290 	/* clear the list */
291 	list_for_each_entry_safe(pageref, next, &fbdefio->pagereflist, list)
292 		fb_deferred_io_pageref_put(pageref, info);
293 
294 	mutex_unlock(&fbdefio->lock);
295 }
296 
297 int fb_deferred_io_init(struct fb_info *info)
298 {
299 	struct fb_deferred_io *fbdefio = info->fbdefio;
300 	struct fb_deferred_io_pageref *pagerefs;
301 	unsigned long npagerefs;
302 	int ret;
303 
304 	BUG_ON(!fbdefio);
305 
306 	if (WARN_ON(!info->fix.smem_len))
307 		return -EINVAL;
308 
309 	mutex_init(&fbdefio->lock);
310 	INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
311 	INIT_LIST_HEAD(&fbdefio->pagereflist);
312 	if (fbdefio->delay == 0) /* set a default of 1 s */
313 		fbdefio->delay = HZ;
314 
315 	npagerefs = DIV_ROUND_UP(info->fix.smem_len, PAGE_SIZE);
316 
317 	/* alloc a page ref for each page of the display memory */
318 	pagerefs = kvcalloc(npagerefs, sizeof(*pagerefs), GFP_KERNEL);
319 	if (!pagerefs) {
320 		ret = -ENOMEM;
321 		goto err;
322 	}
323 	info->npagerefs = npagerefs;
324 	info->pagerefs = pagerefs;
325 
326 	return 0;
327 
328 err:
329 	mutex_destroy(&fbdefio->lock);
330 	return ret;
331 }
332 EXPORT_SYMBOL_GPL(fb_deferred_io_init);
333 
334 void fb_deferred_io_open(struct fb_info *info,
335 			 struct inode *inode,
336 			 struct file *file)
337 {
338 	struct fb_deferred_io *fbdefio = info->fbdefio;
339 
340 	file->f_mapping->a_ops = &fb_deferred_io_aops;
341 	fbdefio->open_count++;
342 }
343 EXPORT_SYMBOL_GPL(fb_deferred_io_open);
344 
345 static void fb_deferred_io_lastclose(struct fb_info *info)
346 {
347 	unsigned long i;
348 
349 	flush_delayed_work(&info->deferred_work);
350 
351 	/* clear out the mapping that we setup */
352 	for (i = 0; i < info->npagerefs; ++i)
353 		fb_deferred_io_pageref_clear(&info->pagerefs[i]);
354 }
355 
356 void fb_deferred_io_release(struct fb_info *info)
357 {
358 	struct fb_deferred_io *fbdefio = info->fbdefio;
359 
360 	if (!--fbdefio->open_count)
361 		fb_deferred_io_lastclose(info);
362 }
363 EXPORT_SYMBOL_GPL(fb_deferred_io_release);
364 
365 void fb_deferred_io_cleanup(struct fb_info *info)
366 {
367 	struct fb_deferred_io *fbdefio = info->fbdefio;
368 
369 	fb_deferred_io_lastclose(info);
370 
371 	kvfree(info->pagerefs);
372 	mutex_destroy(&fbdefio->lock);
373 }
374 EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
375