xref: /linux/drivers/gpu/drm/i915/gem/i915_gem_pages.c (revision 6fdcba32711044c35c0e1b094cbd8f3f0b4472c9)
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2014-2016 Intel Corporation
5  */
6 
7 #include "i915_drv.h"
8 #include "i915_gem_object.h"
9 #include "i915_scatterlist.h"
10 #include "i915_gem_lmem.h"
11 
12 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
13 				 struct sg_table *pages,
14 				 unsigned int sg_page_sizes)
15 {
16 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
17 	unsigned long supported = INTEL_INFO(i915)->page_sizes;
18 	int i;
19 
20 	lockdep_assert_held(&obj->mm.lock);
21 
22 	if (i915_gem_object_is_volatile(obj))
23 		obj->mm.madv = I915_MADV_DONTNEED;
24 
25 	/* Make the pages coherent with the GPU (flushing any swapin). */
26 	if (obj->cache_dirty) {
27 		obj->write_domain = 0;
28 		if (i915_gem_object_has_struct_page(obj))
29 			drm_clflush_sg(pages);
30 		obj->cache_dirty = false;
31 	}
32 
33 	obj->mm.get_page.sg_pos = pages->sgl;
34 	obj->mm.get_page.sg_idx = 0;
35 
36 	obj->mm.pages = pages;
37 
38 	if (i915_gem_object_is_tiled(obj) &&
39 	    i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
40 		GEM_BUG_ON(obj->mm.quirked);
41 		__i915_gem_object_pin_pages(obj);
42 		obj->mm.quirked = true;
43 	}
44 
45 	GEM_BUG_ON(!sg_page_sizes);
46 	obj->mm.page_sizes.phys = sg_page_sizes;
47 
48 	/*
49 	 * Calculate the supported page-sizes which fit into the given
50 	 * sg_page_sizes. This will give us the page-sizes which we may be able
51 	 * to use opportunistically when later inserting into the GTT. For
52 	 * example if phys=2G, then in theory we should be able to use 1G, 2M,
53 	 * 64K or 4K pages, although in practice this will depend on a number of
54 	 * other factors.
55 	 */
56 	obj->mm.page_sizes.sg = 0;
57 	for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
58 		if (obj->mm.page_sizes.phys & ~0u << i)
59 			obj->mm.page_sizes.sg |= BIT(i);
60 	}
61 	GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
62 
63 	if (i915_gem_object_is_shrinkable(obj)) {
64 		struct list_head *list;
65 		unsigned long flags;
66 
67 		spin_lock_irqsave(&i915->mm.obj_lock, flags);
68 
69 		i915->mm.shrink_count++;
70 		i915->mm.shrink_memory += obj->base.size;
71 
72 		if (obj->mm.madv != I915_MADV_WILLNEED)
73 			list = &i915->mm.purge_list;
74 		else
75 			list = &i915->mm.shrink_list;
76 		list_add_tail(&obj->mm.link, list);
77 
78 		atomic_set(&obj->mm.shrink_pin, 0);
79 		spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
80 	}
81 }
82 
83 int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
84 {
85 	int err;
86 
87 	if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
88 		DRM_DEBUG("Attempting to obtain a purgeable object\n");
89 		return -EFAULT;
90 	}
91 
92 	err = obj->ops->get_pages(obj);
93 	GEM_BUG_ON(!err && !i915_gem_object_has_pages(obj));
94 
95 	return err;
96 }
97 
98 /* Ensure that the associated pages are gathered from the backing storage
99  * and pinned into our object. i915_gem_object_pin_pages() may be called
100  * multiple times before they are released by a single call to
101  * i915_gem_object_unpin_pages() - once the pages are no longer referenced
102  * either as a result of memory pressure (reaping pages under the shrinker)
103  * or as the object is itself released.
104  */
105 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
106 {
107 	int err;
108 
109 	err = mutex_lock_interruptible(&obj->mm.lock);
110 	if (err)
111 		return err;
112 
113 	if (unlikely(!i915_gem_object_has_pages(obj))) {
114 		GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
115 
116 		err = ____i915_gem_object_get_pages(obj);
117 		if (err)
118 			goto unlock;
119 
120 		smp_mb__before_atomic();
121 	}
122 	atomic_inc(&obj->mm.pages_pin_count);
123 
124 unlock:
125 	mutex_unlock(&obj->mm.lock);
126 	return err;
127 }
128 
129 /* Immediately discard the backing storage */
130 void i915_gem_object_truncate(struct drm_i915_gem_object *obj)
131 {
132 	drm_gem_free_mmap_offset(&obj->base);
133 	if (obj->ops->truncate)
134 		obj->ops->truncate(obj);
135 }
136 
137 /* Try to discard unwanted pages */
138 void i915_gem_object_writeback(struct drm_i915_gem_object *obj)
139 {
140 	lockdep_assert_held(&obj->mm.lock);
141 	GEM_BUG_ON(i915_gem_object_has_pages(obj));
142 
143 	if (obj->ops->writeback)
144 		obj->ops->writeback(obj);
145 }
146 
147 static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
148 {
149 	struct radix_tree_iter iter;
150 	void __rcu **slot;
151 
152 	rcu_read_lock();
153 	radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
154 		radix_tree_delete(&obj->mm.get_page.radix, iter.index);
155 	rcu_read_unlock();
156 }
157 
158 static void unmap_object(struct drm_i915_gem_object *obj, void *ptr)
159 {
160 	if (i915_gem_object_is_lmem(obj))
161 		io_mapping_unmap((void __force __iomem *)ptr);
162 	else if (is_vmalloc_addr(ptr))
163 		vunmap(ptr);
164 	else
165 		kunmap(kmap_to_page(ptr));
166 }
167 
168 struct sg_table *
169 __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
170 {
171 	struct sg_table *pages;
172 
173 	pages = fetch_and_zero(&obj->mm.pages);
174 	if (IS_ERR_OR_NULL(pages))
175 		return pages;
176 
177 	if (i915_gem_object_is_volatile(obj))
178 		obj->mm.madv = I915_MADV_WILLNEED;
179 
180 	i915_gem_object_make_unshrinkable(obj);
181 
182 	if (obj->mm.mapping) {
183 		unmap_object(obj, page_mask_bits(obj->mm.mapping));
184 		obj->mm.mapping = NULL;
185 	}
186 
187 	__i915_gem_object_reset_page_iter(obj);
188 	obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
189 
190 	return pages;
191 }
192 
193 int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
194 				enum i915_mm_subclass subclass)
195 {
196 	struct sg_table *pages;
197 	int err;
198 
199 	if (i915_gem_object_has_pinned_pages(obj))
200 		return -EBUSY;
201 
202 	GEM_BUG_ON(atomic_read(&obj->bind_count));
203 
204 	/* May be called by shrinker from within get_pages() (on another bo) */
205 	mutex_lock_nested(&obj->mm.lock, subclass);
206 	if (unlikely(atomic_read(&obj->mm.pages_pin_count))) {
207 		err = -EBUSY;
208 		goto unlock;
209 	}
210 
211 	/*
212 	 * ->put_pages might need to allocate memory for the bit17 swizzle
213 	 * array, hence protect them from being reaped by removing them from gtt
214 	 * lists early.
215 	 */
216 	pages = __i915_gem_object_unset_pages(obj);
217 
218 	/*
219 	 * XXX Temporary hijinx to avoid updating all backends to handle
220 	 * NULL pages. In the future, when we have more asynchronous
221 	 * get_pages backends we should be better able to handle the
222 	 * cancellation of the async task in a more uniform manner.
223 	 */
224 	if (!pages && !i915_gem_object_needs_async_cancel(obj))
225 		pages = ERR_PTR(-EINVAL);
226 
227 	if (!IS_ERR(pages))
228 		obj->ops->put_pages(obj, pages);
229 
230 	err = 0;
231 unlock:
232 	mutex_unlock(&obj->mm.lock);
233 
234 	return err;
235 }
236 
237 /* The 'mapping' part of i915_gem_object_pin_map() below */
238 static void *i915_gem_object_map(struct drm_i915_gem_object *obj,
239 				 enum i915_map_type type)
240 {
241 	unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
242 	struct sg_table *sgt = obj->mm.pages;
243 	struct sgt_iter sgt_iter;
244 	struct page *page;
245 	struct page *stack_pages[32];
246 	struct page **pages = stack_pages;
247 	unsigned long i = 0;
248 	pgprot_t pgprot;
249 	void *addr;
250 
251 	if (i915_gem_object_is_lmem(obj)) {
252 		void __iomem *io;
253 
254 		if (type != I915_MAP_WC)
255 			return NULL;
256 
257 		io = i915_gem_object_lmem_io_map(obj, 0, obj->base.size);
258 		return (void __force *)io;
259 	}
260 
261 	/* A single page can always be kmapped */
262 	if (n_pages == 1 && type == I915_MAP_WB)
263 		return kmap(sg_page(sgt->sgl));
264 
265 	if (n_pages > ARRAY_SIZE(stack_pages)) {
266 		/* Too big for stack -- allocate temporary array instead */
267 		pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
268 		if (!pages)
269 			return NULL;
270 	}
271 
272 	for_each_sgt_page(page, sgt_iter, sgt)
273 		pages[i++] = page;
274 
275 	/* Check that we have the expected number of pages */
276 	GEM_BUG_ON(i != n_pages);
277 
278 	switch (type) {
279 	default:
280 		MISSING_CASE(type);
281 		/* fallthrough - to use PAGE_KERNEL anyway */
282 	case I915_MAP_WB:
283 		pgprot = PAGE_KERNEL;
284 		break;
285 	case I915_MAP_WC:
286 		pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
287 		break;
288 	}
289 	addr = vmap(pages, n_pages, 0, pgprot);
290 
291 	if (pages != stack_pages)
292 		kvfree(pages);
293 
294 	return addr;
295 }
296 
297 /* get, pin, and map the pages of the object into kernel space */
298 void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
299 			      enum i915_map_type type)
300 {
301 	enum i915_map_type has_type;
302 	unsigned int flags;
303 	bool pinned;
304 	void *ptr;
305 	int err;
306 
307 	flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | I915_GEM_OBJECT_HAS_IOMEM;
308 	if (!i915_gem_object_type_has(obj, flags))
309 		return ERR_PTR(-ENXIO);
310 
311 	err = mutex_lock_interruptible(&obj->mm.lock);
312 	if (err)
313 		return ERR_PTR(err);
314 
315 	pinned = !(type & I915_MAP_OVERRIDE);
316 	type &= ~I915_MAP_OVERRIDE;
317 
318 	if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
319 		if (unlikely(!i915_gem_object_has_pages(obj))) {
320 			GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
321 
322 			err = ____i915_gem_object_get_pages(obj);
323 			if (err)
324 				goto err_unlock;
325 
326 			smp_mb__before_atomic();
327 		}
328 		atomic_inc(&obj->mm.pages_pin_count);
329 		pinned = false;
330 	}
331 	GEM_BUG_ON(!i915_gem_object_has_pages(obj));
332 
333 	ptr = page_unpack_bits(obj->mm.mapping, &has_type);
334 	if (ptr && has_type != type) {
335 		if (pinned) {
336 			err = -EBUSY;
337 			goto err_unpin;
338 		}
339 
340 		unmap_object(obj, ptr);
341 
342 		ptr = obj->mm.mapping = NULL;
343 	}
344 
345 	if (!ptr) {
346 		ptr = i915_gem_object_map(obj, type);
347 		if (!ptr) {
348 			err = -ENOMEM;
349 			goto err_unpin;
350 		}
351 
352 		obj->mm.mapping = page_pack_bits(ptr, type);
353 	}
354 
355 out_unlock:
356 	mutex_unlock(&obj->mm.lock);
357 	return ptr;
358 
359 err_unpin:
360 	atomic_dec(&obj->mm.pages_pin_count);
361 err_unlock:
362 	ptr = ERR_PTR(err);
363 	goto out_unlock;
364 }
365 
366 void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
367 				 unsigned long offset,
368 				 unsigned long size)
369 {
370 	enum i915_map_type has_type;
371 	void *ptr;
372 
373 	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
374 	GEM_BUG_ON(range_overflows_t(typeof(obj->base.size),
375 				     offset, size, obj->base.size));
376 
377 	obj->mm.dirty = true;
378 
379 	if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)
380 		return;
381 
382 	ptr = page_unpack_bits(obj->mm.mapping, &has_type);
383 	if (has_type == I915_MAP_WC)
384 		return;
385 
386 	drm_clflush_virt_range(ptr + offset, size);
387 	if (size == obj->base.size) {
388 		obj->write_domain &= ~I915_GEM_DOMAIN_CPU;
389 		obj->cache_dirty = false;
390 	}
391 }
392 
393 struct scatterlist *
394 i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
395 		       unsigned int n,
396 		       unsigned int *offset)
397 {
398 	struct i915_gem_object_page_iter *iter = &obj->mm.get_page;
399 	struct scatterlist *sg;
400 	unsigned int idx, count;
401 
402 	might_sleep();
403 	GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
404 	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
405 
406 	/* As we iterate forward through the sg, we record each entry in a
407 	 * radixtree for quick repeated (backwards) lookups. If we have seen
408 	 * this index previously, we will have an entry for it.
409 	 *
410 	 * Initial lookup is O(N), but this is amortized to O(1) for
411 	 * sequential page access (where each new request is consecutive
412 	 * to the previous one). Repeated lookups are O(lg(obj->base.size)),
413 	 * i.e. O(1) with a large constant!
414 	 */
415 	if (n < READ_ONCE(iter->sg_idx))
416 		goto lookup;
417 
418 	mutex_lock(&iter->lock);
419 
420 	/* We prefer to reuse the last sg so that repeated lookup of this
421 	 * (or the subsequent) sg are fast - comparing against the last
422 	 * sg is faster than going through the radixtree.
423 	 */
424 
425 	sg = iter->sg_pos;
426 	idx = iter->sg_idx;
427 	count = __sg_page_count(sg);
428 
429 	while (idx + count <= n) {
430 		void *entry;
431 		unsigned long i;
432 		int ret;
433 
434 		/* If we cannot allocate and insert this entry, or the
435 		 * individual pages from this range, cancel updating the
436 		 * sg_idx so that on this lookup we are forced to linearly
437 		 * scan onwards, but on future lookups we will try the
438 		 * insertion again (in which case we need to be careful of
439 		 * the error return reporting that we have already inserted
440 		 * this index).
441 		 */
442 		ret = radix_tree_insert(&iter->radix, idx, sg);
443 		if (ret && ret != -EEXIST)
444 			goto scan;
445 
446 		entry = xa_mk_value(idx);
447 		for (i = 1; i < count; i++) {
448 			ret = radix_tree_insert(&iter->radix, idx + i, entry);
449 			if (ret && ret != -EEXIST)
450 				goto scan;
451 		}
452 
453 		idx += count;
454 		sg = ____sg_next(sg);
455 		count = __sg_page_count(sg);
456 	}
457 
458 scan:
459 	iter->sg_pos = sg;
460 	iter->sg_idx = idx;
461 
462 	mutex_unlock(&iter->lock);
463 
464 	if (unlikely(n < idx)) /* insertion completed by another thread */
465 		goto lookup;
466 
467 	/* In case we failed to insert the entry into the radixtree, we need
468 	 * to look beyond the current sg.
469 	 */
470 	while (idx + count <= n) {
471 		idx += count;
472 		sg = ____sg_next(sg);
473 		count = __sg_page_count(sg);
474 	}
475 
476 	*offset = n - idx;
477 	return sg;
478 
479 lookup:
480 	rcu_read_lock();
481 
482 	sg = radix_tree_lookup(&iter->radix, n);
483 	GEM_BUG_ON(!sg);
484 
485 	/* If this index is in the middle of multi-page sg entry,
486 	 * the radix tree will contain a value entry that points
487 	 * to the start of that range. We will return the pointer to
488 	 * the base page and the offset of this page within the
489 	 * sg entry's range.
490 	 */
491 	*offset = 0;
492 	if (unlikely(xa_is_value(sg))) {
493 		unsigned long base = xa_to_value(sg);
494 
495 		sg = radix_tree_lookup(&iter->radix, base);
496 		GEM_BUG_ON(!sg);
497 
498 		*offset = n - base;
499 	}
500 
501 	rcu_read_unlock();
502 
503 	return sg;
504 }
505 
506 struct page *
507 i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
508 {
509 	struct scatterlist *sg;
510 	unsigned int offset;
511 
512 	GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
513 
514 	sg = i915_gem_object_get_sg(obj, n, &offset);
515 	return nth_page(sg_page(sg), offset);
516 }
517 
518 /* Like i915_gem_object_get_page(), but mark the returned page dirty */
519 struct page *
520 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
521 			       unsigned int n)
522 {
523 	struct page *page;
524 
525 	page = i915_gem_object_get_page(obj, n);
526 	if (!obj->mm.dirty)
527 		set_page_dirty(page);
528 
529 	return page;
530 }
531 
532 dma_addr_t
533 i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
534 				    unsigned long n,
535 				    unsigned int *len)
536 {
537 	struct scatterlist *sg;
538 	unsigned int offset;
539 
540 	sg = i915_gem_object_get_sg(obj, n, &offset);
541 
542 	if (len)
543 		*len = sg_dma_len(sg) - (offset << PAGE_SHIFT);
544 
545 	return sg_dma_address(sg) + (offset << PAGE_SHIFT);
546 }
547 
548 dma_addr_t
549 i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
550 				unsigned long n)
551 {
552 	return i915_gem_object_get_dma_address_len(obj, n, NULL);
553 }
554