xref: /linux/drivers/video/fbdev/core/fb_defio.c (revision 40286d6379aacfcc053253ef78dc78b09addffda)
1 /*
2  *  linux/drivers/video/fb_defio.c
3  *
4  *  Copyright (C) 2006 Jaya Kumar
5  *
6  * This file is subject to the terms and conditions of the GNU General Public
7  * License. See the file COPYING in the main directory of this archive
8  * for more details.
9  */
10 
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/export.h>
15 #include <linux/string.h>
16 #include <linux/mm.h>
17 #include <linux/module.h>
18 #include <linux/vmalloc.h>
19 #include <linux/delay.h>
20 #include <linux/interrupt.h>
21 #include <linux/fb.h>
22 #include <linux/list.h>
23 
24 /* to support deferred IO */
25 #include <linux/rmap.h>
26 #include <linux/pagemap.h>
27 
28 struct address_space;
29 
30 /*
31  * struct fb_deferred_io_state
32  */
33 
34 struct fb_deferred_io_state {
35 	struct kref ref;
36 
37 	int open_count; /* number of opened files; protected by fb_info lock */
38 	struct address_space *mapping; /* page cache object for fb device */
39 
40 	struct mutex lock; /* mutex that protects the pageref list */
41 	/* fields protected by lock */
42 	struct fb_info *info;
43 	struct list_head pagereflist; /* list of pagerefs for touched pages */
44 	unsigned long npagerefs;
45 	struct fb_deferred_io_pageref *pagerefs;
46 };
47 
48 static struct fb_deferred_io_state *fb_deferred_io_state_alloc(unsigned long len)
49 {
50 	struct fb_deferred_io_state *fbdefio_state;
51 	struct fb_deferred_io_pageref *pagerefs;
52 	unsigned long npagerefs;
53 
54 	fbdefio_state = kzalloc_obj(*fbdefio_state);
55 	if (!fbdefio_state)
56 		return NULL;
57 
58 	npagerefs = DIV_ROUND_UP(len, PAGE_SIZE);
59 
60 	/* alloc a page ref for each page of the display memory */
61 	pagerefs = kvzalloc_objs(*pagerefs, npagerefs);
62 	if (!pagerefs)
63 		goto err_kfree;
64 	fbdefio_state->npagerefs = npagerefs;
65 	fbdefio_state->pagerefs = pagerefs;
66 
67 	kref_init(&fbdefio_state->ref);
68 	mutex_init(&fbdefio_state->lock);
69 
70 	INIT_LIST_HEAD(&fbdefio_state->pagereflist);
71 
72 	return fbdefio_state;
73 
74 err_kfree:
75 	kfree(fbdefio_state);
76 	return NULL;
77 }
78 
79 static void fb_deferred_io_state_release(struct fb_deferred_io_state *fbdefio_state)
80 {
81 	WARN_ON(!list_empty(&fbdefio_state->pagereflist));
82 	mutex_destroy(&fbdefio_state->lock);
83 	kvfree(fbdefio_state->pagerefs);
84 
85 	kfree(fbdefio_state);
86 }
87 
88 static void fb_deferred_io_state_get(struct fb_deferred_io_state *fbdefio_state)
89 {
90 	kref_get(&fbdefio_state->ref);
91 }
92 
93 static void __fb_deferred_io_state_release(struct kref *ref)
94 {
95 	struct fb_deferred_io_state *fbdefio_state =
96 		container_of(ref, struct fb_deferred_io_state, ref);
97 
98 	fb_deferred_io_state_release(fbdefio_state);
99 }
100 
101 static void fb_deferred_io_state_put(struct fb_deferred_io_state *fbdefio_state)
102 {
103 	kref_put(&fbdefio_state->ref, __fb_deferred_io_state_release);
104 }
105 
106 /*
107  * struct vm_operations_struct
108  */
109 
110 static void fb_deferred_io_vm_open(struct vm_area_struct *vma)
111 {
112 	struct fb_deferred_io_state *fbdefio_state = vma->vm_private_data;
113 
114 	WARN_ON_ONCE(!try_module_get(THIS_MODULE));
115 	fb_deferred_io_state_get(fbdefio_state);
116 }
117 
118 static void fb_deferred_io_vm_close(struct vm_area_struct *vma)
119 {
120 	struct fb_deferred_io_state *fbdefio_state = vma->vm_private_data;
121 
122 	fb_deferred_io_state_put(fbdefio_state);
123 	module_put(THIS_MODULE);
124 }
125 
126 static struct page *fb_deferred_io_get_page(struct fb_info *info, unsigned long offs)
127 {
128 	struct fb_deferred_io *fbdefio = info->fbdefio;
129 	const void *screen_buffer = info->screen_buffer;
130 	struct page *page = NULL;
131 
132 	if (fbdefio->get_page)
133 		return fbdefio->get_page(info, offs);
134 
135 	if (is_vmalloc_addr(screen_buffer + offs))
136 		page = vmalloc_to_page(screen_buffer + offs);
137 	else if (info->fix.smem_start)
138 		page = pfn_to_page((info->fix.smem_start + offs) >> PAGE_SHIFT);
139 
140 	if (page)
141 		get_page(page);
142 
143 	return page;
144 }
145 
146 static struct fb_deferred_io_pageref *
147 fb_deferred_io_pageref_lookup(struct fb_deferred_io_state *fbdefio_state, unsigned long offset,
148 			      struct page *page)
149 {
150 	struct fb_info *info = fbdefio_state->info;
151 	unsigned long pgoff = offset >> PAGE_SHIFT;
152 	struct fb_deferred_io_pageref *pageref;
153 
154 	if (fb_WARN_ON_ONCE(info, pgoff >= fbdefio_state->npagerefs))
155 		return NULL; /* incorrect allocation size */
156 
157 	/* 1:1 mapping between pageref and page offset */
158 	pageref = &fbdefio_state->pagerefs[pgoff];
159 
160 	if (pageref->page)
161 		goto out;
162 
163 	pageref->page = page;
164 	pageref->offset = pgoff << PAGE_SHIFT;
165 	INIT_LIST_HEAD(&pageref->list);
166 
167 out:
168 	if (fb_WARN_ON_ONCE(info, pageref->page != page))
169 		return NULL; /* inconsistent state */
170 	return pageref;
171 }
172 
173 static struct fb_deferred_io_pageref *fb_deferred_io_pageref_get(struct fb_info *info,
174 								 unsigned long offset,
175 								 struct page *page)
176 {
177 	struct fb_deferred_io *fbdefio = info->fbdefio;
178 	struct fb_deferred_io_state *fbdefio_state = info->fbdefio_state;
179 	struct list_head *pos = &fbdefio_state->pagereflist;
180 	struct fb_deferred_io_pageref *pageref, *cur;
181 
182 	pageref = fb_deferred_io_pageref_lookup(fbdefio_state, offset, page);
183 	if (!pageref)
184 		return NULL;
185 
186 	/*
187 	 * This check is to catch the case where a new process could start
188 	 * writing to the same page through a new PTE. This new access
189 	 * can cause a call to .page_mkwrite even if the original process'
190 	 * PTE is marked writable.
191 	 */
192 	if (!list_empty(&pageref->list))
193 		goto pageref_already_added;
194 
195 	if (unlikely(fbdefio->sort_pagereflist)) {
196 		/*
197 		 * We loop through the list of pagerefs before adding in
198 		 * order to keep the pagerefs sorted. This has significant
199 		 * overhead of O(n^2) with n being the number of written
200 		 * pages. If possible, drivers should try to work with
201 		 * unsorted page lists instead.
202 		 */
203 		list_for_each_entry(cur, &fbdefio_state->pagereflist, list) {
204 			if (cur->offset > pageref->offset)
205 				break;
206 		}
207 		pos = &cur->list;
208 	}
209 
210 	list_add_tail(&pageref->list, pos);
211 
212 pageref_already_added:
213 	return pageref;
214 }
215 
216 static void fb_deferred_io_pageref_put(struct fb_deferred_io_pageref *pageref,
217 				       struct fb_info *info)
218 {
219 	list_del_init(&pageref->list);
220 }
221 
222 /* this is to find and return the vmalloc-ed fb pages */
223 static vm_fault_t fb_deferred_io_fault(struct vm_fault *vmf)
224 {
225 	struct fb_info *info;
226 	unsigned long offset;
227 	struct page *page;
228 	vm_fault_t ret;
229 	struct fb_deferred_io_state *fbdefio_state = vmf->vma->vm_private_data;
230 
231 	mutex_lock(&fbdefio_state->lock);
232 
233 	info = fbdefio_state->info;
234 	if (!info) {
235 		ret = VM_FAULT_SIGBUS; /* our device is gone */
236 		goto err_mutex_unlock;
237 	}
238 
239 	offset = vmf->pgoff << PAGE_SHIFT;
240 	if (offset >= info->fix.smem_len) {
241 		ret = VM_FAULT_SIGBUS;
242 		goto err_mutex_unlock;
243 	}
244 
245 	page = fb_deferred_io_get_page(info, offset);
246 	if (!page) {
247 		ret = VM_FAULT_SIGBUS;
248 		goto err_mutex_unlock;
249 	}
250 
251 	if (!vmf->vma->vm_file)
252 		fb_err(info, "no mapping available\n");
253 
254 	fb_WARN_ON_ONCE(info, !fbdefio_state->mapping);
255 
256 	mutex_unlock(&fbdefio_state->lock);
257 
258 	vmf->page = page;
259 
260 	return 0;
261 
262 err_mutex_unlock:
263 	mutex_unlock(&fbdefio_state->lock);
264 	return ret;
265 }
266 
267 int fb_deferred_io_fsync(struct file *file, loff_t start, loff_t end, int datasync)
268 {
269 	struct fb_info *info = file->private_data;
270 	struct inode *inode = file_inode(file);
271 	int err = file_write_and_wait_range(file, start, end);
272 	if (err)
273 		return err;
274 
275 	/* Skip if deferred io is compiled-in but disabled on this fbdev */
276 	if (!info->fbdefio)
277 		return 0;
278 
279 	inode_lock(inode);
280 	flush_delayed_work(&info->deferred_work);
281 	inode_unlock(inode);
282 
283 	return 0;
284 }
285 EXPORT_SYMBOL_GPL(fb_deferred_io_fsync);
286 
287 /*
288  * Adds a page to the dirty list. Call this from struct
289  * vm_operations_struct.page_mkwrite.
290  */
291 static vm_fault_t fb_deferred_io_track_page(struct fb_deferred_io_state *fbdefio_state,
292 					    unsigned long offset, struct page *page)
293 {
294 	struct fb_info *info;
295 	struct fb_deferred_io *fbdefio;
296 	struct fb_deferred_io_pageref *pageref;
297 	vm_fault_t ret;
298 
299 	/* protect against the workqueue changing the page list */
300 	mutex_lock(&fbdefio_state->lock);
301 
302 	info = fbdefio_state->info;
303 	if (!info) {
304 		ret = VM_FAULT_SIGBUS; /* our device is gone */
305 		goto err_mutex_unlock;
306 	}
307 
308 	fbdefio = info->fbdefio;
309 
310 	pageref = fb_deferred_io_pageref_get(info, offset, page);
311 	if (WARN_ON_ONCE(!pageref)) {
312 		ret = VM_FAULT_OOM;
313 		goto err_mutex_unlock;
314 	}
315 
316 	/*
317 	 * We want the page to remain locked from ->page_mkwrite until
318 	 * the PTE is marked dirty to avoid mapping_wrprotect_range()
319 	 * being called before the PTE is updated, which would leave
320 	 * the page ignored by defio.
321 	 * Do this by locking the page here and informing the caller
322 	 * about it with VM_FAULT_LOCKED.
323 	 */
324 	lock_page(pageref->page);
325 
326 	mutex_unlock(&fbdefio_state->lock);
327 
328 	/* come back after delay to process the deferred IO */
329 	schedule_delayed_work(&info->deferred_work, fbdefio->delay);
330 	return VM_FAULT_LOCKED;
331 
332 err_mutex_unlock:
333 	mutex_unlock(&fbdefio_state->lock);
334 	return ret;
335 }
336 
337 static vm_fault_t fb_deferred_io_page_mkwrite(struct fb_deferred_io_state *fbdefio_state,
338 					      struct vm_fault *vmf)
339 {
340 	unsigned long offset = vmf->pgoff << PAGE_SHIFT;
341 	struct page *page = vmf->page;
342 
343 	file_update_time(vmf->vma->vm_file);
344 
345 	return fb_deferred_io_track_page(fbdefio_state, offset, page);
346 }
347 
348 static vm_fault_t fb_deferred_io_mkwrite(struct vm_fault *vmf)
349 {
350 	struct fb_deferred_io_state *fbdefio_state = vmf->vma->vm_private_data;
351 
352 	return fb_deferred_io_page_mkwrite(fbdefio_state, vmf);
353 }
354 
355 static const struct vm_operations_struct fb_deferred_io_vm_ops = {
356 	.open		= fb_deferred_io_vm_open,
357 	.close		= fb_deferred_io_vm_close,
358 	.fault		= fb_deferred_io_fault,
359 	.page_mkwrite	= fb_deferred_io_mkwrite,
360 };
361 
362 static const struct address_space_operations fb_deferred_io_aops = {
363 	.dirty_folio	= noop_dirty_folio,
364 };
365 
366 int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
367 {
368 	vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
369 
370 	if (!try_module_get(THIS_MODULE))
371 		return -EINVAL;
372 
373 	vma->vm_ops = &fb_deferred_io_vm_ops;
374 	vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
375 	if (!(info->flags & FBINFO_VIRTFB))
376 		vm_flags_set(vma, VM_IO);
377 	vma->vm_private_data = info->fbdefio_state;
378 
379 	fb_deferred_io_state_get(info->fbdefio_state); /* released in vma->vm_ops->close() */
380 
381 	return 0;
382 }
383 EXPORT_SYMBOL_GPL(fb_deferred_io_mmap);
384 
385 /* workqueue callback */
386 static void fb_deferred_io_work(struct work_struct *work)
387 {
388 	struct fb_info *info = container_of(work, struct fb_info, deferred_work.work);
389 	struct fb_deferred_io_pageref *pageref, *next;
390 	struct fb_deferred_io *fbdefio = info->fbdefio;
391 	struct fb_deferred_io_state *fbdefio_state = info->fbdefio_state;
392 
393 	/* here we wrprotect the page's mappings, then do all deferred IO. */
394 	mutex_lock(&fbdefio_state->lock);
395 #ifdef CONFIG_MMU
396 	list_for_each_entry(pageref, &fbdefio_state->pagereflist, list) {
397 		struct page *page = pageref->page;
398 		pgoff_t pgoff = pageref->offset >> PAGE_SHIFT;
399 
400 		mapping_wrprotect_range(fbdefio_state->mapping, pgoff,
401 					page_to_pfn(page), 1);
402 	}
403 #endif
404 
405 	/* driver's callback with pagereflist */
406 	fbdefio->deferred_io(info, &fbdefio_state->pagereflist);
407 
408 	/* clear the list */
409 	list_for_each_entry_safe(pageref, next, &fbdefio_state->pagereflist, list)
410 		fb_deferred_io_pageref_put(pageref, info);
411 
412 	mutex_unlock(&fbdefio_state->lock);
413 }
414 
415 int fb_deferred_io_init(struct fb_info *info)
416 {
417 	struct fb_deferred_io *fbdefio = info->fbdefio;
418 	struct fb_deferred_io_state *fbdefio_state;
419 
420 	BUG_ON(!fbdefio);
421 
422 	if (WARN_ON(!info->fix.smem_len))
423 		return -EINVAL;
424 
425 	fbdefio_state = fb_deferred_io_state_alloc(info->fix.smem_len);
426 	if (!fbdefio_state)
427 		return -ENOMEM;
428 	fbdefio_state->info = info;
429 
430 	INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
431 	if (fbdefio->delay == 0) /* set a default of 1 s */
432 		fbdefio->delay = HZ;
433 
434 	info->fbdefio_state = fbdefio_state;
435 
436 	return 0;
437 }
438 EXPORT_SYMBOL_GPL(fb_deferred_io_init);
439 
440 void fb_deferred_io_open(struct fb_info *info,
441 			 struct inode *inode,
442 			 struct file *file)
443 {
444 	struct fb_deferred_io_state *fbdefio_state = info->fbdefio_state;
445 
446 	fbdefio_state->mapping = file->f_mapping;
447 	file->f_mapping->a_ops = &fb_deferred_io_aops;
448 	fbdefio_state->open_count++;
449 }
450 EXPORT_SYMBOL_GPL(fb_deferred_io_open);
451 
452 static void fb_deferred_io_lastclose(struct fb_info *info)
453 {
454 	flush_delayed_work(&info->deferred_work);
455 }
456 
457 void fb_deferred_io_release(struct fb_info *info)
458 {
459 	struct fb_deferred_io_state *fbdefio_state = info->fbdefio_state;
460 
461 	if (!--fbdefio_state->open_count)
462 		fb_deferred_io_lastclose(info);
463 }
464 EXPORT_SYMBOL_GPL(fb_deferred_io_release);
465 
466 void fb_deferred_io_cleanup(struct fb_info *info)
467 {
468 	struct fb_deferred_io_state *fbdefio_state = info->fbdefio_state;
469 
470 	fb_deferred_io_lastclose(info);
471 
472 	info->fbdefio_state = NULL;
473 
474 	mutex_lock(&fbdefio_state->lock);
475 	fbdefio_state->info = NULL;
476 	mutex_unlock(&fbdefio_state->lock);
477 
478 	fb_deferred_io_state_put(fbdefio_state);
479 }
480 EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
481