Lines Matching +full:- +full:gt
1 // SPDX-License-Identifier: MIT
18 __igt_reset_stolen(struct intel_gt *gt, in __igt_reset_stolen() argument
22 struct i915_ggtt *ggtt = gt->ggtt; in __igt_reset_stolen()
23 const struct resource *dsm = >->i915->dsm.stolen; in __igt_reset_stolen()
34 if (!drm_mm_node_allocated(&ggtt->error_capture)) in __igt_reset_stolen()
43 return -ENOMEM; in __igt_reset_stolen()
47 err = -ENOMEM; in __igt_reset_stolen()
51 igt_global_reset_lock(gt); in __igt_reset_stolen()
52 wakeref = intel_runtime_pm_get(gt->uncore->rpm); in __igt_reset_stolen()
54 err = igt_spinner_init(&spin, gt); in __igt_reset_stolen()
58 for_each_engine(engine, gt, id) { in __igt_reset_stolen()
62 if (!(mask & engine->mask)) in __igt_reset_stolen()
83 dma_addr_t dma = (dma_addr_t)dsm->start + (page << PAGE_SHIFT); in __igt_reset_stolen()
87 ggtt->vm.insert_page(&ggtt->vm, dma, in __igt_reset_stolen()
88 ggtt->error_capture.start, in __igt_reset_stolen()
89 i915_gem_get_pat_index(gt->i915, in __igt_reset_stolen()
94 s = io_mapping_map_wc(&ggtt->iomap, in __igt_reset_stolen()
95 ggtt->error_capture.start, in __igt_reset_stolen()
98 if (!__drm_mm_interval_first(>->i915->mm.stolen, in __igt_reset_stolen()
100 ((page + 1) << PAGE_SHIFT) - 1)) in __igt_reset_stolen()
111 ggtt->vm.clear_range(&ggtt->vm, ggtt->error_capture.start, PAGE_SIZE); in __igt_reset_stolen()
114 intel_gt_reset(gt, mask, NULL); in __igt_reset_stolen()
116 for_each_engine(engine, gt, id) { in __igt_reset_stolen()
117 if (mask & engine->mask) in __igt_reset_stolen()
122 max = -1; in __igt_reset_stolen()
125 dma_addr_t dma = (dma_addr_t)dsm->start + (page << PAGE_SHIFT); in __igt_reset_stolen()
130 ggtt->vm.insert_page(&ggtt->vm, dma, in __igt_reset_stolen()
131 ggtt->error_capture.start, in __igt_reset_stolen()
132 i915_gem_get_pat_index(gt->i915, in __igt_reset_stolen()
137 s = io_mapping_map_wc(&ggtt->iomap, in __igt_reset_stolen()
138 ggtt->error_capture.start, in __igt_reset_stolen()
147 !__drm_mm_interval_first(>->i915->mm.stolen, in __igt_reset_stolen()
149 ((page + 1) << PAGE_SHIFT) - 1)) { in __igt_reset_stolen()
160 ggtt->vm.clear_range(&ggtt->vm, ggtt->error_capture.start, PAGE_SIZE); in __igt_reset_stolen()
169 err = -EINVAL; in __igt_reset_stolen()
176 intel_runtime_pm_put(gt->uncore->rpm, wakeref); in __igt_reset_stolen()
177 igt_global_reset_unlock(gt); in __igt_reset_stolen()
192 struct intel_gt *gt = arg; in igt_reset_engines_stolen() local
197 if (!intel_has_reset_engine(gt)) in igt_reset_engines_stolen()
200 for_each_engine(engine, gt, id) { in igt_reset_engines_stolen()
201 err = __igt_reset_stolen(gt, engine->mask, engine->name); in igt_reset_engines_stolen()
211 struct intel_gt *gt = arg; in igt_global_reset() local
218 igt_global_reset_lock(gt); in igt_global_reset()
219 wakeref = intel_runtime_pm_get(gt->uncore->rpm); in igt_global_reset()
221 reset_count = i915_reset_count(>->i915->gpu_error); in igt_global_reset()
223 intel_gt_reset(gt, ALL_ENGINES, NULL); in igt_global_reset()
225 if (i915_reset_count(>->i915->gpu_error) == reset_count) { in igt_global_reset()
227 err = -EINVAL; in igt_global_reset()
230 intel_runtime_pm_put(gt->uncore->rpm, wakeref); in igt_global_reset()
231 igt_global_reset_unlock(gt); in igt_global_reset()
233 if (intel_gt_is_wedged(gt)) in igt_global_reset()
234 err = -EIO; in igt_global_reset()
241 struct intel_gt *gt = arg; in igt_wedged_reset() local
246 igt_global_reset_lock(gt); in igt_wedged_reset()
247 wakeref = intel_runtime_pm_get(gt->uncore->rpm); in igt_wedged_reset()
249 intel_gt_set_wedged(gt); in igt_wedged_reset()
251 GEM_BUG_ON(!intel_gt_is_wedged(gt)); in igt_wedged_reset()
252 intel_gt_reset(gt, ALL_ENGINES, NULL); in igt_wedged_reset()
254 intel_runtime_pm_put(gt->uncore->rpm, wakeref); in igt_wedged_reset()
255 igt_global_reset_unlock(gt); in igt_wedged_reset()
257 return intel_gt_is_wedged(gt) ? -EIO : 0; in igt_wedged_reset()
262 struct intel_gt *gt = arg; in igt_atomic_reset() local
269 wakeref = intel_gt_pm_get(gt); in igt_atomic_reset()
270 igt_global_reset_lock(gt); in igt_atomic_reset()
273 if (!igt_force_reset(gt)) in igt_atomic_reset()
276 for (p = igt_atomic_phases; p->name; p++) { in igt_atomic_reset()
279 GEM_TRACE("__intel_gt_reset under %s\n", p->name); in igt_atomic_reset()
281 awake = reset_prepare(gt); in igt_atomic_reset()
282 p->critical_section_begin(); in igt_atomic_reset()
284 err = intel_gt_reset_all_engines(gt); in igt_atomic_reset()
286 p->critical_section_end(); in igt_atomic_reset()
287 reset_finish(gt, awake); in igt_atomic_reset()
290 pr_err("__intel_gt_reset failed under %s\n", p->name); in igt_atomic_reset()
296 igt_force_reset(gt); in igt_atomic_reset()
299 igt_global_reset_unlock(gt); in igt_atomic_reset()
300 intel_gt_pm_put(gt, wakeref); in igt_atomic_reset()
307 struct intel_gt *gt = arg; in igt_atomic_engine_reset() local
316 if (!intel_has_reset_engine(gt)) in igt_atomic_engine_reset()
319 if (intel_uc_uses_guc_submission(>->uc)) in igt_atomic_engine_reset()
322 wakeref = intel_gt_pm_get(gt); in igt_atomic_engine_reset()
323 igt_global_reset_lock(gt); in igt_atomic_engine_reset()
326 if (!igt_force_reset(gt)) in igt_atomic_engine_reset()
329 for_each_engine(engine, gt, id) { in igt_atomic_engine_reset()
330 struct tasklet_struct *t = &engine->sched_engine->tasklet; in igt_atomic_engine_reset()
332 if (t->func) in igt_atomic_engine_reset()
336 for (p = igt_atomic_phases; p->name; p++) { in igt_atomic_engine_reset()
338 engine->name, p->name); in igt_atomic_engine_reset()
339 if (strcmp(p->name, "softirq")) in igt_atomic_engine_reset()
342 p->critical_section_begin(); in igt_atomic_engine_reset()
344 p->critical_section_end(); in igt_atomic_engine_reset()
346 if (strcmp(p->name, "softirq")) in igt_atomic_engine_reset()
351 engine->name, p->name); in igt_atomic_engine_reset()
357 if (t->func) { in igt_atomic_engine_reset()
366 igt_force_reset(gt); in igt_atomic_engine_reset()
369 igt_global_reset_unlock(gt); in igt_atomic_engine_reset()
370 intel_gt_pm_put(gt, wakeref); in igt_atomic_engine_reset()
385 struct intel_gt *gt = to_gt(i915); in intel_reset_live_selftests() local
387 if (!intel_has_gpu_reset(gt)) in intel_reset_live_selftests()
390 if (intel_gt_is_wedged(gt)) in intel_reset_live_selftests()
391 return -EIO; /* we're long past hope of a successful reset */ in intel_reset_live_selftests()
393 return intel_gt_live_subtests(tests, gt); in intel_reset_live_selftests()