xref: /linux/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c (revision e7d759f31ca295d589f7420719c311870bb3166f)
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2016 Intel Corporation
5  */
6 
7 #include "i915_scatterlist.h"
8 
9 #include "huge_gem_object.h"
10 
11 static void huge_free_pages(struct drm_i915_gem_object *obj,
12 			    struct sg_table *pages)
13 {
14 	unsigned long nreal = obj->scratch / PAGE_SIZE;
15 	struct sgt_iter sgt_iter;
16 	struct page *page;
17 
18 	for_each_sgt_page(page, sgt_iter, pages) {
19 		__free_page(page);
20 		if (!--nreal)
21 			break;
22 	}
23 
24 	sg_free_table(pages);
25 	kfree(pages);
26 }
27 
28 static int huge_get_pages(struct drm_i915_gem_object *obj)
29 {
30 #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_RETRY_MAYFAIL)
31 	const unsigned long nreal = obj->scratch / PAGE_SIZE;
32 	unsigned int npages; /* restricted by sg_alloc_table */
33 	struct scatterlist *sg, *src, *end;
34 	struct sg_table *pages;
35 	unsigned long n;
36 
37 	if (overflows_type(obj->base.size / PAGE_SIZE, npages))
38 		return -E2BIG;
39 
40 	npages = obj->base.size / PAGE_SIZE;
41 	pages = kmalloc(sizeof(*pages), GFP);
42 	if (!pages)
43 		return -ENOMEM;
44 
45 	if (sg_alloc_table(pages, npages, GFP)) {
46 		kfree(pages);
47 		return -ENOMEM;
48 	}
49 
50 	sg = pages->sgl;
51 	for (n = 0; n < nreal; n++) {
52 		struct page *page;
53 
54 		page = alloc_page(GFP | __GFP_HIGHMEM);
55 		if (!page) {
56 			sg_mark_end(sg);
57 			goto err;
58 		}
59 
60 		sg_set_page(sg, page, PAGE_SIZE, 0);
61 		sg = __sg_next(sg);
62 	}
63 	if (nreal < npages) {
64 		for (end = sg, src = pages->sgl; sg; sg = __sg_next(sg)) {
65 			sg_set_page(sg, sg_page(src), PAGE_SIZE, 0);
66 			src = __sg_next(src);
67 			if (src == end)
68 				src = pages->sgl;
69 		}
70 	}
71 
72 	if (i915_gem_gtt_prepare_pages(obj, pages))
73 		goto err;
74 
75 	__i915_gem_object_set_pages(obj, pages);
76 
77 	return 0;
78 
79 err:
80 	huge_free_pages(obj, pages);
81 	return -ENOMEM;
82 #undef GFP
83 }
84 
85 static void huge_put_pages(struct drm_i915_gem_object *obj,
86 			   struct sg_table *pages)
87 {
88 	i915_gem_gtt_finish_pages(obj, pages);
89 	huge_free_pages(obj, pages);
90 
91 	obj->mm.dirty = false;
92 }
93 
94 static const struct drm_i915_gem_object_ops huge_ops = {
95 	.name = "huge-gem",
96 	.get_pages = huge_get_pages,
97 	.put_pages = huge_put_pages,
98 };
99 
100 struct drm_i915_gem_object *
101 huge_gem_object(struct drm_i915_private *i915,
102 		phys_addr_t phys_size,
103 		dma_addr_t dma_size)
104 {
105 	static struct lock_class_key lock_class;
106 	struct drm_i915_gem_object *obj;
107 	unsigned int cache_level;
108 
109 	GEM_BUG_ON(!phys_size || phys_size > dma_size);
110 	GEM_BUG_ON(!IS_ALIGNED(phys_size, PAGE_SIZE));
111 	GEM_BUG_ON(!IS_ALIGNED(dma_size, I915_GTT_PAGE_SIZE));
112 
113 	if (overflows_type(dma_size, obj->base.size))
114 		return ERR_PTR(-E2BIG);
115 
116 	obj = i915_gem_object_alloc();
117 	if (!obj)
118 		return ERR_PTR(-ENOMEM);
119 
120 	drm_gem_private_object_init(&i915->drm, &obj->base, dma_size);
121 	i915_gem_object_init(obj, &huge_ops, &lock_class, 0);
122 	obj->mem_flags |= I915_BO_FLAG_STRUCT_PAGE;
123 
124 	obj->read_domains = I915_GEM_DOMAIN_CPU;
125 	obj->write_domain = I915_GEM_DOMAIN_CPU;
126 	cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
127 	i915_gem_object_set_cache_coherency(obj, cache_level);
128 	obj->scratch = phys_size;
129 
130 	return obj;
131 }
132