xref: /linux/drivers/gpu/drm/i915/selftests/igt_spinner.c (revision 07fdad3a93756b872da7b53647715c48d0f4a2d0)
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2018 Intel Corporation
5  */
6 
7 #include "gem/i915_gem_internal.h"
8 #include "gem/selftests/igt_gem_utils.h"
9 #include "gt/intel_gpu_commands.h"
10 #include "gt/intel_gt.h"
11 
12 #include "i915_wait_util.h"
13 #include "igt_spinner.h"
14 
15 int igt_spinner_init(struct igt_spinner *spin, struct intel_gt *gt)
16 {
17 	int err;
18 
19 	memset(spin, 0, sizeof(*spin));
20 	spin->gt = gt;
21 
22 	spin->hws = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
23 	if (IS_ERR(spin->hws)) {
24 		err = PTR_ERR(spin->hws);
25 		goto err;
26 	}
27 	i915_gem_object_set_cache_coherency(spin->hws, I915_CACHE_LLC);
28 
29 	spin->obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
30 	if (IS_ERR(spin->obj)) {
31 		err = PTR_ERR(spin->obj);
32 		goto err_hws;
33 	}
34 
35 	return 0;
36 
37 err_hws:
38 	i915_gem_object_put(spin->hws);
39 err:
40 	return err;
41 }
42 
43 static void *igt_spinner_pin_obj(struct intel_context *ce,
44 				 struct i915_gem_ww_ctx *ww,
45 				 struct drm_i915_gem_object *obj,
46 				 unsigned int mode, struct i915_vma **vma)
47 {
48 	void *vaddr;
49 	int ret;
50 
51 	*vma = i915_vma_instance(obj, ce->vm, NULL);
52 	if (IS_ERR(*vma))
53 		return ERR_CAST(*vma);
54 
55 	ret = i915_gem_object_lock(obj, ww);
56 	if (ret)
57 		return ERR_PTR(ret);
58 
59 	vaddr = i915_gem_object_pin_map(obj, mode);
60 
61 	if (!ww)
62 		i915_gem_object_unlock(obj);
63 
64 	if (IS_ERR(vaddr))
65 		return vaddr;
66 
67 	if (ww)
68 		ret = i915_vma_pin_ww(*vma, ww, 0, 0, PIN_USER);
69 	else
70 		ret = i915_vma_pin(*vma, 0, 0, PIN_USER);
71 
72 	if (ret) {
73 		i915_gem_object_unpin_map(obj);
74 		return ERR_PTR(ret);
75 	}
76 
77 	return vaddr;
78 }
79 
80 int igt_spinner_pin(struct igt_spinner *spin,
81 		    struct intel_context *ce,
82 		    struct i915_gem_ww_ctx *ww)
83 {
84 	void *vaddr;
85 
86 	if (spin->ce && WARN_ON(spin->ce != ce))
87 		return -ENODEV;
88 	spin->ce = ce;
89 
90 	if (!spin->seqno) {
91 		vaddr = igt_spinner_pin_obj(ce, ww, spin->hws, I915_MAP_WB, &spin->hws_vma);
92 		if (IS_ERR(vaddr))
93 			return PTR_ERR(vaddr);
94 
95 		spin->seqno = memset(vaddr, 0xff, PAGE_SIZE);
96 	}
97 
98 	if (!spin->batch) {
99 		unsigned int mode;
100 
101 		mode = intel_gt_coherent_map_type(spin->gt, spin->obj, false);
102 		vaddr = igt_spinner_pin_obj(ce, ww, spin->obj, mode, &spin->batch_vma);
103 		if (IS_ERR(vaddr))
104 			return PTR_ERR(vaddr);
105 
106 		spin->batch = vaddr;
107 	}
108 
109 	return 0;
110 }
111 
112 static unsigned int seqno_offset(u64 fence)
113 {
114 	return offset_in_page(sizeof(u32) * fence);
115 }
116 
117 static u64 hws_address(const struct i915_vma *hws,
118 		       const struct i915_request *rq)
119 {
120 	return i915_vma_offset(hws) + seqno_offset(rq->fence.context);
121 }
122 
123 struct i915_request *
124 igt_spinner_create_request(struct igt_spinner *spin,
125 			   struct intel_context *ce,
126 			   u32 arbitration_command)
127 {
128 	struct intel_engine_cs *engine = ce->engine;
129 	struct i915_request *rq = NULL;
130 	struct i915_vma *hws, *vma;
131 	unsigned int flags;
132 	u32 *batch;
133 	int err;
134 
135 	GEM_BUG_ON(spin->gt != ce->vm->gt);
136 
137 	if (!intel_engine_can_store_dword(ce->engine))
138 		return ERR_PTR(-ENODEV);
139 
140 	if (!spin->batch) {
141 		err = igt_spinner_pin(spin, ce, NULL);
142 		if (err)
143 			return ERR_PTR(err);
144 	}
145 
146 	hws = spin->hws_vma;
147 	vma = spin->batch_vma;
148 
149 	rq = intel_context_create_request(ce);
150 	if (IS_ERR(rq))
151 		return ERR_CAST(rq);
152 
153 	err = igt_vma_move_to_active_unlocked(vma, rq, 0);
154 	if (err)
155 		goto cancel_rq;
156 
157 	err = igt_vma_move_to_active_unlocked(hws, rq, 0);
158 	if (err)
159 		goto cancel_rq;
160 
161 	batch = spin->batch;
162 
163 	if (GRAPHICS_VER(rq->i915) >= 8) {
164 		*batch++ = MI_STORE_DWORD_IMM_GEN4;
165 		*batch++ = lower_32_bits(hws_address(hws, rq));
166 		*batch++ = upper_32_bits(hws_address(hws, rq));
167 	} else if (GRAPHICS_VER(rq->i915) >= 6) {
168 		*batch++ = MI_STORE_DWORD_IMM_GEN4;
169 		*batch++ = 0;
170 		*batch++ = hws_address(hws, rq);
171 	} else if (GRAPHICS_VER(rq->i915) >= 4) {
172 		*batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
173 		*batch++ = 0;
174 		*batch++ = hws_address(hws, rq);
175 	} else {
176 		*batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
177 		*batch++ = hws_address(hws, rq);
178 	}
179 	*batch++ = rq->fence.seqno;
180 
181 	*batch++ = arbitration_command;
182 
183 	memset32(batch, MI_NOOP, 128);
184 	batch += 128;
185 
186 	if (GRAPHICS_VER(rq->i915) >= 8)
187 		*batch++ = MI_BATCH_BUFFER_START | BIT(8) | 1;
188 	else if (IS_HASWELL(rq->i915))
189 		*batch++ = MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW;
190 	else if (GRAPHICS_VER(rq->i915) >= 6)
191 		*batch++ = MI_BATCH_BUFFER_START;
192 	else
193 		*batch++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
194 	*batch++ = lower_32_bits(i915_vma_offset(vma));
195 	*batch++ = upper_32_bits(i915_vma_offset(vma));
196 
197 	*batch++ = MI_BATCH_BUFFER_END; /* not reached */
198 
199 	intel_gt_chipset_flush(engine->gt);
200 
201 	if (engine->emit_init_breadcrumb) {
202 		err = engine->emit_init_breadcrumb(rq);
203 		if (err)
204 			goto cancel_rq;
205 	}
206 
207 	flags = 0;
208 	if (GRAPHICS_VER(rq->i915) <= 5)
209 		flags |= I915_DISPATCH_SECURE;
210 	err = engine->emit_bb_start(rq, i915_vma_offset(vma), PAGE_SIZE, flags);
211 
212 cancel_rq:
213 	if (err) {
214 		i915_request_set_error_once(rq, err);
215 		i915_request_add(rq);
216 	}
217 	return err ? ERR_PTR(err) : rq;
218 }
219 
220 static u32
221 hws_seqno(const struct igt_spinner *spin, const struct i915_request *rq)
222 {
223 	u32 *seqno = spin->seqno + seqno_offset(rq->fence.context);
224 
225 	return READ_ONCE(*seqno);
226 }
227 
228 void igt_spinner_end(struct igt_spinner *spin)
229 {
230 	if (!spin->batch)
231 		return;
232 
233 	*spin->batch = MI_BATCH_BUFFER_END;
234 	intel_gt_chipset_flush(spin->gt);
235 }
236 
237 void igt_spinner_fini(struct igt_spinner *spin)
238 {
239 	igt_spinner_end(spin);
240 
241 	if (spin->batch) {
242 		i915_vma_unpin(spin->batch_vma);
243 		i915_gem_object_unpin_map(spin->obj);
244 	}
245 	i915_gem_object_put(spin->obj);
246 
247 	if (spin->seqno) {
248 		i915_vma_unpin(spin->hws_vma);
249 		i915_gem_object_unpin_map(spin->hws);
250 	}
251 	i915_gem_object_put(spin->hws);
252 }
253 
254 bool igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq)
255 {
256 	if (i915_request_is_ready(rq))
257 		intel_engine_flush_submission(rq->engine);
258 
259 	return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq),
260 					       rq->fence.seqno),
261 			     100) &&
262 		 wait_for(i915_seqno_passed(hws_seqno(spin, rq),
263 					    rq->fence.seqno),
264 			  50));
265 }
266