xref: /linux/drivers/gpu/drm/i915/selftests/igt_spinner.c (revision 37744feebc086908fd89760650f458ab19071750)
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2018 Intel Corporation
5  */
6 #include "gt/intel_gt.h"
7 
8 #include "gem/selftests/igt_gem_utils.h"
9 
10 #include "igt_spinner.h"
11 
12 int igt_spinner_init(struct igt_spinner *spin, struct intel_gt *gt)
13 {
14 	unsigned int mode;
15 	void *vaddr;
16 	int err;
17 
18 	memset(spin, 0, sizeof(*spin));
19 	spin->gt = gt;
20 
21 	spin->hws = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
22 	if (IS_ERR(spin->hws)) {
23 		err = PTR_ERR(spin->hws);
24 		goto err;
25 	}
26 
27 	spin->obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
28 	if (IS_ERR(spin->obj)) {
29 		err = PTR_ERR(spin->obj);
30 		goto err_hws;
31 	}
32 
33 	i915_gem_object_set_cache_coherency(spin->hws, I915_CACHE_LLC);
34 	vaddr = i915_gem_object_pin_map(spin->hws, I915_MAP_WB);
35 	if (IS_ERR(vaddr)) {
36 		err = PTR_ERR(vaddr);
37 		goto err_obj;
38 	}
39 	spin->seqno = memset(vaddr, 0xff, PAGE_SIZE);
40 
41 	mode = i915_coherent_map_type(gt->i915);
42 	vaddr = i915_gem_object_pin_map(spin->obj, mode);
43 	if (IS_ERR(vaddr)) {
44 		err = PTR_ERR(vaddr);
45 		goto err_unpin_hws;
46 	}
47 	spin->batch = vaddr;
48 
49 	return 0;
50 
51 err_unpin_hws:
52 	i915_gem_object_unpin_map(spin->hws);
53 err_obj:
54 	i915_gem_object_put(spin->obj);
55 err_hws:
56 	i915_gem_object_put(spin->hws);
57 err:
58 	return err;
59 }
60 
61 static unsigned int seqno_offset(u64 fence)
62 {
63 	return offset_in_page(sizeof(u32) * fence);
64 }
65 
66 static u64 hws_address(const struct i915_vma *hws,
67 		       const struct i915_request *rq)
68 {
69 	return hws->node.start + seqno_offset(rq->fence.context);
70 }
71 
72 static int move_to_active(struct i915_vma *vma,
73 			  struct i915_request *rq,
74 			  unsigned int flags)
75 {
76 	int err;
77 
78 	i915_vma_lock(vma);
79 	err = i915_request_await_object(rq, vma->obj,
80 					flags & EXEC_OBJECT_WRITE);
81 	if (err == 0)
82 		err = i915_vma_move_to_active(vma, rq, flags);
83 	i915_vma_unlock(vma);
84 
85 	return err;
86 }
87 
88 struct i915_request *
89 igt_spinner_create_request(struct igt_spinner *spin,
90 			   struct intel_context *ce,
91 			   u32 arbitration_command)
92 {
93 	struct intel_engine_cs *engine = ce->engine;
94 	struct i915_request *rq = NULL;
95 	struct i915_vma *hws, *vma;
96 	unsigned int flags;
97 	u32 *batch;
98 	int err;
99 
100 	GEM_BUG_ON(spin->gt != ce->vm->gt);
101 
102 	if (!intel_engine_can_store_dword(ce->engine))
103 		return ERR_PTR(-ENODEV);
104 
105 	vma = i915_vma_instance(spin->obj, ce->vm, NULL);
106 	if (IS_ERR(vma))
107 		return ERR_CAST(vma);
108 
109 	hws = i915_vma_instance(spin->hws, ce->vm, NULL);
110 	if (IS_ERR(hws))
111 		return ERR_CAST(hws);
112 
113 	err = i915_vma_pin(vma, 0, 0, PIN_USER);
114 	if (err)
115 		return ERR_PTR(err);
116 
117 	err = i915_vma_pin(hws, 0, 0, PIN_USER);
118 	if (err)
119 		goto unpin_vma;
120 
121 	rq = intel_context_create_request(ce);
122 	if (IS_ERR(rq)) {
123 		err = PTR_ERR(rq);
124 		goto unpin_hws;
125 	}
126 
127 	err = move_to_active(vma, rq, 0);
128 	if (err)
129 		goto cancel_rq;
130 
131 	err = move_to_active(hws, rq, 0);
132 	if (err)
133 		goto cancel_rq;
134 
135 	batch = spin->batch;
136 
137 	if (INTEL_GEN(rq->i915) >= 8) {
138 		*batch++ = MI_STORE_DWORD_IMM_GEN4;
139 		*batch++ = lower_32_bits(hws_address(hws, rq));
140 		*batch++ = upper_32_bits(hws_address(hws, rq));
141 	} else if (INTEL_GEN(rq->i915) >= 6) {
142 		*batch++ = MI_STORE_DWORD_IMM_GEN4;
143 		*batch++ = 0;
144 		*batch++ = hws_address(hws, rq);
145 	} else if (INTEL_GEN(rq->i915) >= 4) {
146 		*batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
147 		*batch++ = 0;
148 		*batch++ = hws_address(hws, rq);
149 	} else {
150 		*batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
151 		*batch++ = hws_address(hws, rq);
152 	}
153 	*batch++ = rq->fence.seqno;
154 
155 	*batch++ = arbitration_command;
156 
157 	if (INTEL_GEN(rq->i915) >= 8)
158 		*batch++ = MI_BATCH_BUFFER_START | BIT(8) | 1;
159 	else if (IS_HASWELL(rq->i915))
160 		*batch++ = MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW;
161 	else if (INTEL_GEN(rq->i915) >= 6)
162 		*batch++ = MI_BATCH_BUFFER_START;
163 	else
164 		*batch++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
165 	*batch++ = lower_32_bits(vma->node.start);
166 	*batch++ = upper_32_bits(vma->node.start);
167 
168 	*batch++ = MI_BATCH_BUFFER_END; /* not reached */
169 
170 	intel_gt_chipset_flush(engine->gt);
171 
172 	if (engine->emit_init_breadcrumb &&
173 	    i915_request_timeline(rq)->has_initial_breadcrumb) {
174 		err = engine->emit_init_breadcrumb(rq);
175 		if (err)
176 			goto cancel_rq;
177 	}
178 
179 	flags = 0;
180 	if (INTEL_GEN(rq->i915) <= 5)
181 		flags |= I915_DISPATCH_SECURE;
182 	err = engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags);
183 
184 cancel_rq:
185 	if (err) {
186 		i915_request_set_error_once(rq, err);
187 		i915_request_add(rq);
188 	}
189 unpin_hws:
190 	i915_vma_unpin(hws);
191 unpin_vma:
192 	i915_vma_unpin(vma);
193 	return err ? ERR_PTR(err) : rq;
194 }
195 
196 static u32
197 hws_seqno(const struct igt_spinner *spin, const struct i915_request *rq)
198 {
199 	u32 *seqno = spin->seqno + seqno_offset(rq->fence.context);
200 
201 	return READ_ONCE(*seqno);
202 }
203 
204 void igt_spinner_end(struct igt_spinner *spin)
205 {
206 	*spin->batch = MI_BATCH_BUFFER_END;
207 	intel_gt_chipset_flush(spin->gt);
208 }
209 
210 void igt_spinner_fini(struct igt_spinner *spin)
211 {
212 	igt_spinner_end(spin);
213 
214 	i915_gem_object_unpin_map(spin->obj);
215 	i915_gem_object_put(spin->obj);
216 
217 	i915_gem_object_unpin_map(spin->hws);
218 	i915_gem_object_put(spin->hws);
219 }
220 
221 bool igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq)
222 {
223 	return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq),
224 					       rq->fence.seqno),
225 			     10) &&
226 		 wait_for(i915_seqno_passed(hws_seqno(spin, rq),
227 					    rq->fence.seqno),
228 			  1000));
229 }
230