1b414fcd5SChris Wilson /*
2b414fcd5SChris Wilson * SPDX-License-Identifier: MIT
3b414fcd5SChris Wilson *
4b414fcd5SChris Wilson * Copyright © 2016 Intel Corporation
5b414fcd5SChris Wilson */
6b414fcd5SChris Wilson
7e9b67ec2SJani Nikula #include <linux/highmem.h>
8b414fcd5SChris Wilson #include <linux/prime_numbers.h>
9b414fcd5SChris Wilson
10b508d01fSJani Nikula #include "gem/i915_gem_internal.h"
11d09aa852SJani Nikula #include "gem/i915_gem_lmem.h"
12b508d01fSJani Nikula #include "gem/i915_gem_region.h"
13fb87550dSMatthew Auld #include "gem/i915_gem_ttm.h"
14bfe53be2SMatthew Auld #include "gem/i915_gem_ttm_move.h"
15de5825beSChris Wilson #include "gt/intel_engine_pm.h"
1645233ab2SChris Wilson #include "gt/intel_gpu_commands.h"
17a1c8a09eSTvrtko Ursulin #include "gt/intel_gt.h"
18b414fcd5SChris Wilson #include "gt/intel_gt_pm.h"
19fb87550dSMatthew Auld #include "gt/intel_migrate.h"
20801543b2SJani Nikula #include "i915_reg.h"
21fb87550dSMatthew Auld #include "i915_ttm_buddy_manager.h"
22b508d01fSJani Nikula
2310be98a7SChris Wilson #include "huge_gem_object.h"
24b414fcd5SChris Wilson #include "i915_selftest.h"
2507e98eb0SChris Wilson #include "selftests/i915_random.h"
26b414fcd5SChris Wilson #include "selftests/igt_flush_test.h"
27bfe53be2SMatthew Auld #include "selftests/igt_reset.h"
286fedafacSChris Wilson #include "selftests/igt_mmap.h"
29b414fcd5SChris Wilson
30b414fcd5SChris Wilson struct tile {
31b414fcd5SChris Wilson unsigned int width;
32b414fcd5SChris Wilson unsigned int height;
33b414fcd5SChris Wilson unsigned int stride;
34b414fcd5SChris Wilson unsigned int size;
35b414fcd5SChris Wilson unsigned int tiling;
36b414fcd5SChris Wilson unsigned int swizzle;
37b414fcd5SChris Wilson };
38b414fcd5SChris Wilson
swizzle_bit(unsigned int bit,u64 offset)39b414fcd5SChris Wilson static u64 swizzle_bit(unsigned int bit, u64 offset)
40b414fcd5SChris Wilson {
41b414fcd5SChris Wilson return (offset & BIT_ULL(bit)) >> (bit - 6);
42b414fcd5SChris Wilson }
43b414fcd5SChris Wilson
tiled_offset(const struct tile * tile,u64 v)44b414fcd5SChris Wilson static u64 tiled_offset(const struct tile *tile, u64 v)
45b414fcd5SChris Wilson {
46b414fcd5SChris Wilson u64 x, y;
47b414fcd5SChris Wilson
48b414fcd5SChris Wilson if (tile->tiling == I915_TILING_NONE)
49b414fcd5SChris Wilson return v;
50b414fcd5SChris Wilson
51b414fcd5SChris Wilson y = div64_u64_rem(v, tile->stride, &x);
52b414fcd5SChris Wilson v = div64_u64_rem(y, tile->height, &y) * tile->stride * tile->height;
53b414fcd5SChris Wilson
54b414fcd5SChris Wilson if (tile->tiling == I915_TILING_X) {
55b414fcd5SChris Wilson v += y * tile->width;
56b414fcd5SChris Wilson v += div64_u64_rem(x, tile->width, &x) << tile->size;
57b414fcd5SChris Wilson v += x;
58b414fcd5SChris Wilson } else if (tile->width == 128) {
59b414fcd5SChris Wilson const unsigned int ytile_span = 16;
60b414fcd5SChris Wilson const unsigned int ytile_height = 512;
61b414fcd5SChris Wilson
62b414fcd5SChris Wilson v += y * ytile_span;
63b414fcd5SChris Wilson v += div64_u64_rem(x, ytile_span, &x) * ytile_height;
64b414fcd5SChris Wilson v += x;
65b414fcd5SChris Wilson } else {
66b414fcd5SChris Wilson const unsigned int ytile_span = 32;
67b414fcd5SChris Wilson const unsigned int ytile_height = 256;
68b414fcd5SChris Wilson
69b414fcd5SChris Wilson v += y * ytile_span;
70b414fcd5SChris Wilson v += div64_u64_rem(x, ytile_span, &x) * ytile_height;
71b414fcd5SChris Wilson v += x;
72b414fcd5SChris Wilson }
73b414fcd5SChris Wilson
74b414fcd5SChris Wilson switch (tile->swizzle) {
75b414fcd5SChris Wilson case I915_BIT_6_SWIZZLE_9:
76b414fcd5SChris Wilson v ^= swizzle_bit(9, v);
77b414fcd5SChris Wilson break;
78b414fcd5SChris Wilson case I915_BIT_6_SWIZZLE_9_10:
79b414fcd5SChris Wilson v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v);
80b414fcd5SChris Wilson break;
81b414fcd5SChris Wilson case I915_BIT_6_SWIZZLE_9_11:
82b414fcd5SChris Wilson v ^= swizzle_bit(9, v) ^ swizzle_bit(11, v);
83b414fcd5SChris Wilson break;
84b414fcd5SChris Wilson case I915_BIT_6_SWIZZLE_9_10_11:
85b414fcd5SChris Wilson v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v) ^ swizzle_bit(11, v);
86b414fcd5SChris Wilson break;
87b414fcd5SChris Wilson }
88b414fcd5SChris Wilson
89b414fcd5SChris Wilson return v;
90b414fcd5SChris Wilson }
91b414fcd5SChris Wilson
check_partial_mapping(struct drm_i915_gem_object * obj,const struct tile * tile,struct rnd_state * prng)92b414fcd5SChris Wilson static int check_partial_mapping(struct drm_i915_gem_object *obj,
93b414fcd5SChris Wilson const struct tile *tile,
9407e98eb0SChris Wilson struct rnd_state *prng)
9507e98eb0SChris Wilson {
9607e98eb0SChris Wilson const unsigned long npages = obj->base.size / PAGE_SIZE;
971a9c4db4SMichał Winiarski struct drm_i915_private *i915 = to_i915(obj->base.dev);
983bb6a442SNiranjana Vishwanathapura struct i915_gtt_view view;
9907e98eb0SChris Wilson struct i915_vma *vma;
100f47e6306SChris Wilson unsigned long offset;
10107e98eb0SChris Wilson unsigned long page;
10207e98eb0SChris Wilson u32 __iomem *io;
10307e98eb0SChris Wilson struct page *p;
10407e98eb0SChris Wilson unsigned int n;
10507e98eb0SChris Wilson u32 *cpu;
10607e98eb0SChris Wilson int err;
10707e98eb0SChris Wilson
10807e98eb0SChris Wilson err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride);
10907e98eb0SChris Wilson if (err) {
11007e98eb0SChris Wilson pr_err("Failed to set tiling mode=%u, stride=%u, err=%d\n",
11107e98eb0SChris Wilson tile->tiling, tile->stride, err);
11207e98eb0SChris Wilson return err;
11307e98eb0SChris Wilson }
11407e98eb0SChris Wilson
11507e98eb0SChris Wilson GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling);
11607e98eb0SChris Wilson GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride);
11707e98eb0SChris Wilson
11880f0b679SMaarten Lankhorst i915_gem_object_lock(obj, NULL);
11907e98eb0SChris Wilson err = i915_gem_object_set_to_gtt_domain(obj, true);
12007e98eb0SChris Wilson i915_gem_object_unlock(obj);
12107e98eb0SChris Wilson if (err) {
12207e98eb0SChris Wilson pr_err("Failed to flush to GTT write domain; err=%d\n", err);
12307e98eb0SChris Wilson return err;
12407e98eb0SChris Wilson }
12507e98eb0SChris Wilson
12607e98eb0SChris Wilson page = i915_prandom_u32_max_state(npages, prng);
12707e98eb0SChris Wilson view = compute_partial_view(obj, page, MIN_CHUNK_PAGES);
12807e98eb0SChris Wilson
12907e98eb0SChris Wilson vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
13007e98eb0SChris Wilson if (IS_ERR(vma)) {
13107e98eb0SChris Wilson pr_err("Failed to pin partial view: offset=%lu; err=%d\n",
13207e98eb0SChris Wilson page, (int)PTR_ERR(vma));
13307e98eb0SChris Wilson return PTR_ERR(vma);
13407e98eb0SChris Wilson }
13507e98eb0SChris Wilson
13607e98eb0SChris Wilson n = page - view.partial.offset;
13707e98eb0SChris Wilson GEM_BUG_ON(n >= view.partial.size);
13807e98eb0SChris Wilson
13907e98eb0SChris Wilson io = i915_vma_pin_iomap(vma);
14007e98eb0SChris Wilson i915_vma_unpin(vma);
14107e98eb0SChris Wilson if (IS_ERR(io)) {
14207e98eb0SChris Wilson pr_err("Failed to iomap partial view: offset=%lu; err=%d\n",
14307e98eb0SChris Wilson page, (int)PTR_ERR(io));
14407e98eb0SChris Wilson err = PTR_ERR(io);
14507e98eb0SChris Wilson goto out;
14607e98eb0SChris Wilson }
14707e98eb0SChris Wilson
14807e98eb0SChris Wilson iowrite32(page, io + n * PAGE_SIZE / sizeof(*io));
14907e98eb0SChris Wilson i915_vma_unpin_iomap(vma);
15007e98eb0SChris Wilson
15107e98eb0SChris Wilson offset = tiled_offset(tile, page << PAGE_SHIFT);
15207e98eb0SChris Wilson if (offset >= obj->base.size)
15307e98eb0SChris Wilson goto out;
15407e98eb0SChris Wilson
1551a9c4db4SMichał Winiarski intel_gt_flush_ggtt_writes(to_gt(i915));
15607e98eb0SChris Wilson
15707e98eb0SChris Wilson p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
15807e98eb0SChris Wilson cpu = kmap(p) + offset_in_page(offset);
15907e98eb0SChris Wilson drm_clflush_virt_range(cpu, sizeof(*cpu));
16007e98eb0SChris Wilson if (*cpu != (u32)page) {
161f47e6306SChris Wilson pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%lu + %u [0x%lx]) of 0x%x, found 0x%x\n",
16207e98eb0SChris Wilson page, n,
16307e98eb0SChris Wilson view.partial.offset,
16407e98eb0SChris Wilson view.partial.size,
16507e98eb0SChris Wilson vma->size >> PAGE_SHIFT,
16607e98eb0SChris Wilson tile->tiling ? tile_row_pages(obj) : 0,
16707e98eb0SChris Wilson vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride,
16807e98eb0SChris Wilson offset >> PAGE_SHIFT,
16907e98eb0SChris Wilson (unsigned int)offset_in_page(offset),
17007e98eb0SChris Wilson offset,
17107e98eb0SChris Wilson (u32)page, *cpu);
17207e98eb0SChris Wilson err = -EINVAL;
17307e98eb0SChris Wilson }
17407e98eb0SChris Wilson *cpu = 0;
17507e98eb0SChris Wilson drm_clflush_virt_range(cpu, sizeof(*cpu));
17607e98eb0SChris Wilson kunmap(p);
17707e98eb0SChris Wilson
17807e98eb0SChris Wilson out:
1790f341974SMaarten Lankhorst i915_gem_object_lock(obj, NULL);
180c03d9826SThomas Hellström i915_vma_destroy(vma);
1810f341974SMaarten Lankhorst i915_gem_object_unlock(obj);
18207e98eb0SChris Wilson return err;
18307e98eb0SChris Wilson }
18407e98eb0SChris Wilson
check_partial_mappings(struct drm_i915_gem_object * obj,const struct tile * tile,unsigned long end_time)18507e98eb0SChris Wilson static int check_partial_mappings(struct drm_i915_gem_object *obj,
18607e98eb0SChris Wilson const struct tile *tile,
187b414fcd5SChris Wilson unsigned long end_time)
188b414fcd5SChris Wilson {
189b414fcd5SChris Wilson const unsigned int nreal = obj->scratch / PAGE_SIZE;
190b414fcd5SChris Wilson const unsigned long npages = obj->base.size / PAGE_SIZE;
1911a9c4db4SMichał Winiarski struct drm_i915_private *i915 = to_i915(obj->base.dev);
192b414fcd5SChris Wilson struct i915_vma *vma;
193b414fcd5SChris Wilson unsigned long page;
194b414fcd5SChris Wilson int err;
195b414fcd5SChris Wilson
196b414fcd5SChris Wilson err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride);
197b414fcd5SChris Wilson if (err) {
198b414fcd5SChris Wilson pr_err("Failed to set tiling mode=%u, stride=%u, err=%d\n",
199b414fcd5SChris Wilson tile->tiling, tile->stride, err);
200b414fcd5SChris Wilson return err;
201b414fcd5SChris Wilson }
202b414fcd5SChris Wilson
203b414fcd5SChris Wilson GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling);
204b414fcd5SChris Wilson GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride);
205b414fcd5SChris Wilson
20680f0b679SMaarten Lankhorst i915_gem_object_lock(obj, NULL);
20787d1372dSChris Wilson err = i915_gem_object_set_to_gtt_domain(obj, true);
20887d1372dSChris Wilson i915_gem_object_unlock(obj);
20987d1372dSChris Wilson if (err) {
21087d1372dSChris Wilson pr_err("Failed to flush to GTT write domain; err=%d\n", err);
21187d1372dSChris Wilson return err;
21287d1372dSChris Wilson }
21387d1372dSChris Wilson
214b414fcd5SChris Wilson for_each_prime_number_from(page, 1, npages) {
2153bb6a442SNiranjana Vishwanathapura struct i915_gtt_view view =
216b414fcd5SChris Wilson compute_partial_view(obj, page, MIN_CHUNK_PAGES);
217f47e6306SChris Wilson unsigned long offset;
218b414fcd5SChris Wilson u32 __iomem *io;
219b414fcd5SChris Wilson struct page *p;
220b414fcd5SChris Wilson unsigned int n;
221b414fcd5SChris Wilson u32 *cpu;
222b414fcd5SChris Wilson
223b414fcd5SChris Wilson GEM_BUG_ON(view.partial.size > nreal);
224b414fcd5SChris Wilson cond_resched();
225b414fcd5SChris Wilson
226b414fcd5SChris Wilson vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
227b414fcd5SChris Wilson if (IS_ERR(vma)) {
228b414fcd5SChris Wilson pr_err("Failed to pin partial view: offset=%lu; err=%d\n",
229b414fcd5SChris Wilson page, (int)PTR_ERR(vma));
230b414fcd5SChris Wilson return PTR_ERR(vma);
231b414fcd5SChris Wilson }
232b414fcd5SChris Wilson
233b414fcd5SChris Wilson n = page - view.partial.offset;
234b414fcd5SChris Wilson GEM_BUG_ON(n >= view.partial.size);
235b414fcd5SChris Wilson
236b414fcd5SChris Wilson io = i915_vma_pin_iomap(vma);
237b414fcd5SChris Wilson i915_vma_unpin(vma);
238b414fcd5SChris Wilson if (IS_ERR(io)) {
239b414fcd5SChris Wilson pr_err("Failed to iomap partial view: offset=%lu; err=%d\n",
240b414fcd5SChris Wilson page, (int)PTR_ERR(io));
241b414fcd5SChris Wilson return PTR_ERR(io);
242b414fcd5SChris Wilson }
243b414fcd5SChris Wilson
244b414fcd5SChris Wilson iowrite32(page, io + n * PAGE_SIZE / sizeof(*io));
245b414fcd5SChris Wilson i915_vma_unpin_iomap(vma);
246b414fcd5SChris Wilson
247b414fcd5SChris Wilson offset = tiled_offset(tile, page << PAGE_SHIFT);
248b414fcd5SChris Wilson if (offset >= obj->base.size)
249b414fcd5SChris Wilson continue;
250b414fcd5SChris Wilson
2511a9c4db4SMichał Winiarski intel_gt_flush_ggtt_writes(to_gt(i915));
252b414fcd5SChris Wilson
253b414fcd5SChris Wilson p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
254b414fcd5SChris Wilson cpu = kmap(p) + offset_in_page(offset);
255b414fcd5SChris Wilson drm_clflush_virt_range(cpu, sizeof(*cpu));
256b414fcd5SChris Wilson if (*cpu != (u32)page) {
257f47e6306SChris Wilson pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%lu + %u [0x%lx]) of 0x%x, found 0x%x\n",
258b414fcd5SChris Wilson page, n,
259b414fcd5SChris Wilson view.partial.offset,
260b414fcd5SChris Wilson view.partial.size,
261b414fcd5SChris Wilson vma->size >> PAGE_SHIFT,
262b414fcd5SChris Wilson tile->tiling ? tile_row_pages(obj) : 0,
263b414fcd5SChris Wilson vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride,
264b414fcd5SChris Wilson offset >> PAGE_SHIFT,
265b414fcd5SChris Wilson (unsigned int)offset_in_page(offset),
266b414fcd5SChris Wilson offset,
267b414fcd5SChris Wilson (u32)page, *cpu);
268b414fcd5SChris Wilson err = -EINVAL;
269b414fcd5SChris Wilson }
270b414fcd5SChris Wilson *cpu = 0;
271b414fcd5SChris Wilson drm_clflush_virt_range(cpu, sizeof(*cpu));
272b414fcd5SChris Wilson kunmap(p);
273b414fcd5SChris Wilson if (err)
274b414fcd5SChris Wilson return err;
275b414fcd5SChris Wilson
2760f341974SMaarten Lankhorst i915_gem_object_lock(obj, NULL);
277c03d9826SThomas Hellström i915_vma_destroy(vma);
2780f341974SMaarten Lankhorst i915_gem_object_unlock(obj);
27907e98eb0SChris Wilson
28007e98eb0SChris Wilson if (igt_timeout(end_time,
28107e98eb0SChris Wilson "%s: timed out after tiling=%d stride=%d\n",
28207e98eb0SChris Wilson __func__, tile->tiling, tile->stride))
28307e98eb0SChris Wilson return -EINTR;
284b414fcd5SChris Wilson }
285b414fcd5SChris Wilson
286b414fcd5SChris Wilson return 0;
287b414fcd5SChris Wilson }
288b414fcd5SChris Wilson
28907e98eb0SChris Wilson static unsigned int
setup_tile_size(struct tile * tile,struct drm_i915_private * i915)29007e98eb0SChris Wilson setup_tile_size(struct tile *tile, struct drm_i915_private *i915)
29107e98eb0SChris Wilson {
29240e1956eSLucas De Marchi if (GRAPHICS_VER(i915) <= 2) {
29307e98eb0SChris Wilson tile->height = 16;
29407e98eb0SChris Wilson tile->width = 128;
29507e98eb0SChris Wilson tile->size = 11;
29607e98eb0SChris Wilson } else if (tile->tiling == I915_TILING_Y &&
29707e98eb0SChris Wilson HAS_128_BYTE_Y_TILING(i915)) {
29807e98eb0SChris Wilson tile->height = 32;
29907e98eb0SChris Wilson tile->width = 128;
30007e98eb0SChris Wilson tile->size = 12;
30107e98eb0SChris Wilson } else {
30207e98eb0SChris Wilson tile->height = 8;
30307e98eb0SChris Wilson tile->width = 512;
30407e98eb0SChris Wilson tile->size = 12;
30507e98eb0SChris Wilson }
30607e98eb0SChris Wilson
30740e1956eSLucas De Marchi if (GRAPHICS_VER(i915) < 4)
30807e98eb0SChris Wilson return 8192 / tile->width;
30940e1956eSLucas De Marchi else if (GRAPHICS_VER(i915) < 7)
31007e98eb0SChris Wilson return 128 * I965_FENCE_MAX_PITCH_VAL / tile->width;
31107e98eb0SChris Wilson else
31207e98eb0SChris Wilson return 128 * GEN7_FENCE_MAX_PITCH_VAL / tile->width;
31307e98eb0SChris Wilson }
31407e98eb0SChris Wilson
igt_partial_tiling(void * arg)315b414fcd5SChris Wilson static int igt_partial_tiling(void *arg)
316b414fcd5SChris Wilson {
317b414fcd5SChris Wilson const unsigned int nreal = 1 << 12; /* largest tile row x2 */
318b414fcd5SChris Wilson struct drm_i915_private *i915 = arg;
319b414fcd5SChris Wilson struct drm_i915_gem_object *obj;
320b414fcd5SChris Wilson intel_wakeref_t wakeref;
321b414fcd5SChris Wilson int tiling;
322b414fcd5SChris Wilson int err;
323b414fcd5SChris Wilson
3245c24c9d2SMichał Winiarski if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt))
325e60f7bb7SMatthew Auld return 0;
326e60f7bb7SMatthew Auld
327b414fcd5SChris Wilson /* We want to check the page mapping and fencing of a large object
328b414fcd5SChris Wilson * mmapped through the GTT. The object we create is larger than can
329b414fcd5SChris Wilson * possibly be mmaped as a whole, and so we must use partial GGTT vma.
330b414fcd5SChris Wilson * We then check that a write through each partial GGTT vma ends up
331b414fcd5SChris Wilson * in the right set of pages within the object, and with the expected
332b414fcd5SChris Wilson * tiling, which we verify by manual swizzling.
333b414fcd5SChris Wilson */
334b414fcd5SChris Wilson
335b414fcd5SChris Wilson obj = huge_gem_object(i915,
336b414fcd5SChris Wilson nreal << PAGE_SHIFT,
3375c24c9d2SMichał Winiarski (1 + next_prime_number(to_gt(i915)->ggtt->vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
338b414fcd5SChris Wilson if (IS_ERR(obj))
339b414fcd5SChris Wilson return PTR_ERR(obj);
340b414fcd5SChris Wilson
3416f791ffeSMaarten Lankhorst err = i915_gem_object_pin_pages_unlocked(obj);
342b414fcd5SChris Wilson if (err) {
343b414fcd5SChris Wilson pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
344b414fcd5SChris Wilson nreal, obj->base.size / PAGE_SIZE, err);
345b414fcd5SChris Wilson goto out;
346b414fcd5SChris Wilson }
347b414fcd5SChris Wilson
348d858d569SDaniele Ceraolo Spurio wakeref = intel_runtime_pm_get(&i915->runtime_pm);
349b414fcd5SChris Wilson
350b414fcd5SChris Wilson if (1) {
351b414fcd5SChris Wilson IGT_TIMEOUT(end);
352b414fcd5SChris Wilson struct tile tile;
353b414fcd5SChris Wilson
354b414fcd5SChris Wilson tile.height = 1;
355b414fcd5SChris Wilson tile.width = 1;
356b414fcd5SChris Wilson tile.size = 0;
357b414fcd5SChris Wilson tile.stride = 0;
358b414fcd5SChris Wilson tile.swizzle = I915_BIT_6_SWIZZLE_NONE;
359b414fcd5SChris Wilson tile.tiling = I915_TILING_NONE;
360b414fcd5SChris Wilson
36107e98eb0SChris Wilson err = check_partial_mappings(obj, &tile, end);
362b414fcd5SChris Wilson if (err && err != -EINTR)
363b414fcd5SChris Wilson goto out_unlock;
364b414fcd5SChris Wilson }
365b414fcd5SChris Wilson
366b414fcd5SChris Wilson for (tiling = I915_TILING_X; tiling <= I915_TILING_Y; tiling++) {
367b414fcd5SChris Wilson IGT_TIMEOUT(end);
368b414fcd5SChris Wilson unsigned int max_pitch;
369b414fcd5SChris Wilson unsigned int pitch;
370b414fcd5SChris Wilson struct tile tile;
371b414fcd5SChris Wilson
37295086cb9SJani Nikula if (i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES)
373b414fcd5SChris Wilson /*
374b414fcd5SChris Wilson * The swizzling pattern is actually unknown as it
375b414fcd5SChris Wilson * varies based on physical address of each page.
376b414fcd5SChris Wilson * See i915_gem_detect_bit_6_swizzle().
377b414fcd5SChris Wilson */
378b414fcd5SChris Wilson break;
379b414fcd5SChris Wilson
380b414fcd5SChris Wilson tile.tiling = tiling;
381b414fcd5SChris Wilson switch (tiling) {
382b414fcd5SChris Wilson case I915_TILING_X:
3835c24c9d2SMichał Winiarski tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_x;
384b414fcd5SChris Wilson break;
385b414fcd5SChris Wilson case I915_TILING_Y:
3865c24c9d2SMichał Winiarski tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_y;
387b414fcd5SChris Wilson break;
388b414fcd5SChris Wilson }
389b414fcd5SChris Wilson
390b414fcd5SChris Wilson GEM_BUG_ON(tile.swizzle == I915_BIT_6_SWIZZLE_UNKNOWN);
391b414fcd5SChris Wilson if (tile.swizzle == I915_BIT_6_SWIZZLE_9_17 ||
392b414fcd5SChris Wilson tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17)
393b414fcd5SChris Wilson continue;
394b414fcd5SChris Wilson
39507e98eb0SChris Wilson max_pitch = setup_tile_size(&tile, i915);
396b414fcd5SChris Wilson
397b414fcd5SChris Wilson for (pitch = max_pitch; pitch; pitch >>= 1) {
398b414fcd5SChris Wilson tile.stride = tile.width * pitch;
39907e98eb0SChris Wilson err = check_partial_mappings(obj, &tile, end);
400b414fcd5SChris Wilson if (err == -EINTR)
401b414fcd5SChris Wilson goto next_tiling;
402b414fcd5SChris Wilson if (err)
403b414fcd5SChris Wilson goto out_unlock;
404b414fcd5SChris Wilson
40540e1956eSLucas De Marchi if (pitch > 2 && GRAPHICS_VER(i915) >= 4) {
406b414fcd5SChris Wilson tile.stride = tile.width * (pitch - 1);
40707e98eb0SChris Wilson err = check_partial_mappings(obj, &tile, end);
408b414fcd5SChris Wilson if (err == -EINTR)
409b414fcd5SChris Wilson goto next_tiling;
410b414fcd5SChris Wilson if (err)
411b414fcd5SChris Wilson goto out_unlock;
412b414fcd5SChris Wilson }
413b414fcd5SChris Wilson
41440e1956eSLucas De Marchi if (pitch < max_pitch && GRAPHICS_VER(i915) >= 4) {
415b414fcd5SChris Wilson tile.stride = tile.width * (pitch + 1);
41607e98eb0SChris Wilson err = check_partial_mappings(obj, &tile, end);
417b414fcd5SChris Wilson if (err == -EINTR)
418b414fcd5SChris Wilson goto next_tiling;
419b414fcd5SChris Wilson if (err)
420b414fcd5SChris Wilson goto out_unlock;
421b414fcd5SChris Wilson }
422b414fcd5SChris Wilson }
423b414fcd5SChris Wilson
42440e1956eSLucas De Marchi if (GRAPHICS_VER(i915) >= 4) {
425b414fcd5SChris Wilson for_each_prime_number(pitch, max_pitch) {
426b414fcd5SChris Wilson tile.stride = tile.width * pitch;
42707e98eb0SChris Wilson err = check_partial_mappings(obj, &tile, end);
428b414fcd5SChris Wilson if (err == -EINTR)
429b414fcd5SChris Wilson goto next_tiling;
430b414fcd5SChris Wilson if (err)
431b414fcd5SChris Wilson goto out_unlock;
432b414fcd5SChris Wilson }
433b414fcd5SChris Wilson }
434b414fcd5SChris Wilson
435b414fcd5SChris Wilson next_tiling: ;
436b414fcd5SChris Wilson }
437b414fcd5SChris Wilson
438b414fcd5SChris Wilson out_unlock:
439d858d569SDaniele Ceraolo Spurio intel_runtime_pm_put(&i915->runtime_pm, wakeref);
440b414fcd5SChris Wilson i915_gem_object_unpin_pages(obj);
441b414fcd5SChris Wilson out:
442b414fcd5SChris Wilson i915_gem_object_put(obj);
443b414fcd5SChris Wilson return err;
444b414fcd5SChris Wilson }
445b414fcd5SChris Wilson
igt_smoke_tiling(void * arg)44607e98eb0SChris Wilson static int igt_smoke_tiling(void *arg)
44707e98eb0SChris Wilson {
44807e98eb0SChris Wilson const unsigned int nreal = 1 << 12; /* largest tile row x2 */
44907e98eb0SChris Wilson struct drm_i915_private *i915 = arg;
45007e98eb0SChris Wilson struct drm_i915_gem_object *obj;
45107e98eb0SChris Wilson intel_wakeref_t wakeref;
45207e98eb0SChris Wilson I915_RND_STATE(prng);
45307e98eb0SChris Wilson unsigned long count;
45407e98eb0SChris Wilson IGT_TIMEOUT(end);
45507e98eb0SChris Wilson int err;
45607e98eb0SChris Wilson
4575c24c9d2SMichał Winiarski if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt))
458e60f7bb7SMatthew Auld return 0;
459e60f7bb7SMatthew Auld
46007e98eb0SChris Wilson /*
46107e98eb0SChris Wilson * igt_partial_tiling() does an exhastive check of partial tiling
46207e98eb0SChris Wilson * chunking, but will undoubtably run out of time. Here, we do a
46307e98eb0SChris Wilson * randomised search and hope over many runs of 1s with different
46407e98eb0SChris Wilson * seeds we will do a thorough check.
46507e98eb0SChris Wilson *
46607e98eb0SChris Wilson * Remember to look at the st_seed if we see a flip-flop in BAT!
46707e98eb0SChris Wilson */
46807e98eb0SChris Wilson
46995086cb9SJani Nikula if (i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES)
47007e98eb0SChris Wilson return 0;
47107e98eb0SChris Wilson
47207e98eb0SChris Wilson obj = huge_gem_object(i915,
47307e98eb0SChris Wilson nreal << PAGE_SHIFT,
4745c24c9d2SMichał Winiarski (1 + next_prime_number(to_gt(i915)->ggtt->vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
47507e98eb0SChris Wilson if (IS_ERR(obj))
47607e98eb0SChris Wilson return PTR_ERR(obj);
47707e98eb0SChris Wilson
4786f791ffeSMaarten Lankhorst err = i915_gem_object_pin_pages_unlocked(obj);
47907e98eb0SChris Wilson if (err) {
48007e98eb0SChris Wilson pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
48107e98eb0SChris Wilson nreal, obj->base.size / PAGE_SIZE, err);
48207e98eb0SChris Wilson goto out;
48307e98eb0SChris Wilson }
48407e98eb0SChris Wilson
48507e98eb0SChris Wilson wakeref = intel_runtime_pm_get(&i915->runtime_pm);
48607e98eb0SChris Wilson
48707e98eb0SChris Wilson count = 0;
48807e98eb0SChris Wilson do {
48907e98eb0SChris Wilson struct tile tile;
49007e98eb0SChris Wilson
49107e98eb0SChris Wilson tile.tiling =
49207e98eb0SChris Wilson i915_prandom_u32_max_state(I915_TILING_Y + 1, &prng);
49307e98eb0SChris Wilson switch (tile.tiling) {
49407e98eb0SChris Wilson case I915_TILING_NONE:
49507e98eb0SChris Wilson tile.height = 1;
49607e98eb0SChris Wilson tile.width = 1;
49707e98eb0SChris Wilson tile.size = 0;
49807e98eb0SChris Wilson tile.stride = 0;
49907e98eb0SChris Wilson tile.swizzle = I915_BIT_6_SWIZZLE_NONE;
50007e98eb0SChris Wilson break;
50107e98eb0SChris Wilson
50207e98eb0SChris Wilson case I915_TILING_X:
5035c24c9d2SMichał Winiarski tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_x;
50407e98eb0SChris Wilson break;
50507e98eb0SChris Wilson case I915_TILING_Y:
5065c24c9d2SMichał Winiarski tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_y;
50707e98eb0SChris Wilson break;
50807e98eb0SChris Wilson }
50907e98eb0SChris Wilson
51007e98eb0SChris Wilson if (tile.swizzle == I915_BIT_6_SWIZZLE_9_17 ||
51107e98eb0SChris Wilson tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17)
51207e98eb0SChris Wilson continue;
51307e98eb0SChris Wilson
51407e98eb0SChris Wilson if (tile.tiling != I915_TILING_NONE) {
51507e98eb0SChris Wilson unsigned int max_pitch = setup_tile_size(&tile, i915);
51607e98eb0SChris Wilson
51707e98eb0SChris Wilson tile.stride =
51807e98eb0SChris Wilson i915_prandom_u32_max_state(max_pitch, &prng);
51907e98eb0SChris Wilson tile.stride = (1 + tile.stride) * tile.width;
52040e1956eSLucas De Marchi if (GRAPHICS_VER(i915) < 4)
52107e98eb0SChris Wilson tile.stride = rounddown_pow_of_two(tile.stride);
52207e98eb0SChris Wilson }
52307e98eb0SChris Wilson
52407e98eb0SChris Wilson err = check_partial_mapping(obj, &tile, &prng);
52507e98eb0SChris Wilson if (err)
52607e98eb0SChris Wilson break;
52707e98eb0SChris Wilson
52807e98eb0SChris Wilson count++;
52907e98eb0SChris Wilson } while (!__igt_timeout(end, NULL));
53007e98eb0SChris Wilson
53107e98eb0SChris Wilson pr_info("%s: Completed %lu trials\n", __func__, count);
53207e98eb0SChris Wilson
53307e98eb0SChris Wilson intel_runtime_pm_put(&i915->runtime_pm, wakeref);
53407e98eb0SChris Wilson i915_gem_object_unpin_pages(obj);
53507e98eb0SChris Wilson out:
53607e98eb0SChris Wilson i915_gem_object_put(obj);
53707e98eb0SChris Wilson return err;
53807e98eb0SChris Wilson }
53907e98eb0SChris Wilson
make_obj_busy(struct drm_i915_gem_object * obj)540b414fcd5SChris Wilson static int make_obj_busy(struct drm_i915_gem_object *obj)
541b414fcd5SChris Wilson {
542b414fcd5SChris Wilson struct drm_i915_private *i915 = to_i915(obj->base.dev);
5438f856c74SChris Wilson struct intel_engine_cs *engine;
544e948761fSChris Wilson
545e948761fSChris Wilson for_each_uabi_engine(engine, i915) {
546e948761fSChris Wilson struct i915_request *rq;
547b414fcd5SChris Wilson struct i915_vma *vma;
54815b6c924SMaarten Lankhorst struct i915_gem_ww_ctx ww;
549b414fcd5SChris Wilson int err;
550b414fcd5SChris Wilson
551e948761fSChris Wilson vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
552b414fcd5SChris Wilson if (IS_ERR(vma))
553b414fcd5SChris Wilson return PTR_ERR(vma);
554b414fcd5SChris Wilson
55515b6c924SMaarten Lankhorst i915_gem_ww_ctx_init(&ww, false);
55615b6c924SMaarten Lankhorst retry:
55715b6c924SMaarten Lankhorst err = i915_gem_object_lock(obj, &ww);
55815b6c924SMaarten Lankhorst if (!err)
55915b6c924SMaarten Lankhorst err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER);
560b414fcd5SChris Wilson if (err)
56115b6c924SMaarten Lankhorst goto err;
562b414fcd5SChris Wilson
563de5825beSChris Wilson rq = intel_engine_create_kernel_request(engine);
564b414fcd5SChris Wilson if (IS_ERR(rq)) {
56515b6c924SMaarten Lankhorst err = PTR_ERR(rq);
56615b6c924SMaarten Lankhorst goto err_unpin;
567b414fcd5SChris Wilson }
568b414fcd5SChris Wilson
56970d6894dSChris Wilson err = i915_vma_move_to_active(vma, rq,
57070d6894dSChris Wilson EXEC_OBJECT_WRITE);
571b414fcd5SChris Wilson
572b414fcd5SChris Wilson i915_request_add(rq);
57315b6c924SMaarten Lankhorst err_unpin:
574e948761fSChris Wilson i915_vma_unpin(vma);
57515b6c924SMaarten Lankhorst err:
57615b6c924SMaarten Lankhorst if (err == -EDEADLK) {
57715b6c924SMaarten Lankhorst err = i915_gem_ww_ctx_backoff(&ww);
57815b6c924SMaarten Lankhorst if (!err)
57915b6c924SMaarten Lankhorst goto retry;
58015b6c924SMaarten Lankhorst }
58115b6c924SMaarten Lankhorst i915_gem_ww_ctx_fini(&ww);
582e948761fSChris Wilson if (err)
583e948761fSChris Wilson return err;
5848f856c74SChris Wilson }
585b414fcd5SChris Wilson
586c017cf6bSChris Wilson i915_gem_object_put(obj); /* leave it only alive via its active ref */
587e948761fSChris Wilson return 0;
588b414fcd5SChris Wilson }
589b414fcd5SChris Wilson
default_mapping(struct drm_i915_private * i915)5907961c5b6SMaarten Lankhorst static enum i915_mmap_type default_mapping(struct drm_i915_private *i915)
5917961c5b6SMaarten Lankhorst {
5927961c5b6SMaarten Lankhorst if (HAS_LMEM(i915))
5937961c5b6SMaarten Lankhorst return I915_MMAP_TYPE_FIXED;
5947961c5b6SMaarten Lankhorst
5957961c5b6SMaarten Lankhorst return I915_MMAP_TYPE_GTT;
5967961c5b6SMaarten Lankhorst }
5977961c5b6SMaarten Lankhorst
598450cede7SThomas Hellström static struct drm_i915_gem_object *
create_sys_or_internal(struct drm_i915_private * i915,unsigned long size)599450cede7SThomas Hellström create_sys_or_internal(struct drm_i915_private *i915,
600450cede7SThomas Hellström unsigned long size)
601450cede7SThomas Hellström {
602450cede7SThomas Hellström if (HAS_LMEM(i915)) {
603450cede7SThomas Hellström struct intel_memory_region *sys_region =
604450cede7SThomas Hellström i915->mm.regions[INTEL_REGION_SMEM];
605450cede7SThomas Hellström
606450cede7SThomas Hellström return __i915_gem_object_create_user(i915, size, &sys_region, 1);
607450cede7SThomas Hellström }
608450cede7SThomas Hellström
609450cede7SThomas Hellström return i915_gem_object_create_internal(i915, size);
610450cede7SThomas Hellström }
611450cede7SThomas Hellström
assert_mmap_offset(struct drm_i915_private * i915,unsigned long size,int expected)612b414fcd5SChris Wilson static bool assert_mmap_offset(struct drm_i915_private *i915,
613b414fcd5SChris Wilson unsigned long size,
614b414fcd5SChris Wilson int expected)
615b414fcd5SChris Wilson {
616b414fcd5SChris Wilson struct drm_i915_gem_object *obj;
617cf3e3e86SMaarten Lankhorst u64 offset;
618cf3e3e86SMaarten Lankhorst int ret;
619b414fcd5SChris Wilson
620450cede7SThomas Hellström obj = create_sys_or_internal(i915, size);
621b414fcd5SChris Wilson if (IS_ERR(obj))
622cf3e3e86SMaarten Lankhorst return expected && expected == PTR_ERR(obj);
623b414fcd5SChris Wilson
6247961c5b6SMaarten Lankhorst ret = __assign_mmap_offset(obj, default_mapping(i915), &offset, NULL);
625b414fcd5SChris Wilson i915_gem_object_put(obj);
626b414fcd5SChris Wilson
627cf3e3e86SMaarten Lankhorst return ret == expected;
628b414fcd5SChris Wilson }
629b414fcd5SChris Wilson
disable_retire_worker(struct drm_i915_private * i915)630b414fcd5SChris Wilson static void disable_retire_worker(struct drm_i915_private *i915)
631b414fcd5SChris Wilson {
632c29579d2SChris Wilson i915_gem_driver_unregister__shrinker(i915);
6335e4e06e4SAndrzej Hajda intel_gt_pm_get_untracked(to_gt(i915));
6341a9c4db4SMichał Winiarski cancel_delayed_work_sync(&to_gt(i915)->requests.retire_work);
635b414fcd5SChris Wilson }
636b414fcd5SChris Wilson
restore_retire_worker(struct drm_i915_private * i915)637b414fcd5SChris Wilson static void restore_retire_worker(struct drm_i915_private *i915)
638b414fcd5SChris Wilson {
6397e805762SChris Wilson igt_flush_test(i915);
6405e4e06e4SAndrzej Hajda intel_gt_pm_put_untracked(to_gt(i915));
641c29579d2SChris Wilson i915_gem_driver_register__shrinker(i915);
642b414fcd5SChris Wilson }
643b414fcd5SChris Wilson
mmap_offset_lock(struct drm_i915_private * i915)644f63dfc14SChris Wilson static void mmap_offset_lock(struct drm_i915_private *i915)
645f63dfc14SChris Wilson __acquires(&i915->drm.vma_offset_manager->vm_lock)
646f63dfc14SChris Wilson {
647f63dfc14SChris Wilson write_lock(&i915->drm.vma_offset_manager->vm_lock);
648f63dfc14SChris Wilson }
649f63dfc14SChris Wilson
mmap_offset_unlock(struct drm_i915_private * i915)650f63dfc14SChris Wilson static void mmap_offset_unlock(struct drm_i915_private *i915)
651f63dfc14SChris Wilson __releases(&i915->drm.vma_offset_manager->vm_lock)
652f63dfc14SChris Wilson {
653f63dfc14SChris Wilson write_unlock(&i915->drm.vma_offset_manager->vm_lock);
654f63dfc14SChris Wilson }
655f63dfc14SChris Wilson
igt_mmap_offset_exhaustion(void * arg)656b414fcd5SChris Wilson static int igt_mmap_offset_exhaustion(void *arg)
657b414fcd5SChris Wilson {
658b414fcd5SChris Wilson struct drm_i915_private *i915 = arg;
659b414fcd5SChris Wilson struct drm_mm *mm = &i915->drm.vma_offset_manager->vm_addr_space_mm;
660b414fcd5SChris Wilson struct drm_i915_gem_object *obj;
6611af65515SChris Wilson struct drm_mm_node *hole, *next;
662cc662126SAbdiel Janulgue int loop, err = 0;
663cf3e3e86SMaarten Lankhorst u64 offset;
664450cede7SThomas Hellström int enospc = HAS_LMEM(i915) ? -ENXIO : -ENOSPC;
665b414fcd5SChris Wilson
666b414fcd5SChris Wilson /* Disable background reaper */
667b414fcd5SChris Wilson disable_retire_worker(i915);
6681a9c4db4SMichał Winiarski GEM_BUG_ON(!to_gt(i915)->awake);
6691a9c4db4SMichał Winiarski intel_gt_retire_requests(to_gt(i915));
6701af65515SChris Wilson i915_gem_drain_freed_objects(i915);
671b414fcd5SChris Wilson
672b414fcd5SChris Wilson /* Trim the device mmap space to only a page */
673f63dfc14SChris Wilson mmap_offset_lock(i915);
6741af65515SChris Wilson loop = 1; /* PAGE_SIZE units */
6751af65515SChris Wilson list_for_each_entry_safe(hole, next, &mm->hole_stack, hole_stack) {
6761af65515SChris Wilson struct drm_mm_node *resv;
6771af65515SChris Wilson
6781af65515SChris Wilson resv = kzalloc(sizeof(*resv), GFP_NOWAIT);
6791af65515SChris Wilson if (!resv) {
6801af65515SChris Wilson err = -ENOMEM;
681b414fcd5SChris Wilson goto out_park;
682b414fcd5SChris Wilson }
6831af65515SChris Wilson
6841af65515SChris Wilson resv->start = drm_mm_hole_node_start(hole) + loop;
6851af65515SChris Wilson resv->size = hole->hole_size - loop;
6861af65515SChris Wilson resv->color = -1ul;
6871af65515SChris Wilson loop = 0;
6881af65515SChris Wilson
6891af65515SChris Wilson if (!resv->size) {
6901af65515SChris Wilson kfree(resv);
6911af65515SChris Wilson continue;
692b414fcd5SChris Wilson }
693b414fcd5SChris Wilson
6941af65515SChris Wilson pr_debug("Reserving hole [%llx + %llx]\n",
6951af65515SChris Wilson resv->start, resv->size);
6961af65515SChris Wilson
6971af65515SChris Wilson err = drm_mm_reserve_node(mm, resv);
6981af65515SChris Wilson if (err) {
6991af65515SChris Wilson pr_err("Failed to trim VMA manager, err=%d\n", err);
7001af65515SChris Wilson kfree(resv);
7011af65515SChris Wilson goto out_park;
7021af65515SChris Wilson }
7031af65515SChris Wilson }
7041af65515SChris Wilson GEM_BUG_ON(!list_is_singular(&mm->hole_stack));
7051af65515SChris Wilson mmap_offset_unlock(i915);
7061af65515SChris Wilson
707b414fcd5SChris Wilson /* Just fits! */
708b414fcd5SChris Wilson if (!assert_mmap_offset(i915, PAGE_SIZE, 0)) {
709b414fcd5SChris Wilson pr_err("Unable to insert object into single page hole\n");
710b414fcd5SChris Wilson err = -EINVAL;
711b414fcd5SChris Wilson goto out;
712b414fcd5SChris Wilson }
713b414fcd5SChris Wilson
714b414fcd5SChris Wilson /* Too large */
715450cede7SThomas Hellström if (!assert_mmap_offset(i915, 2 * PAGE_SIZE, enospc)) {
716b414fcd5SChris Wilson pr_err("Unexpectedly succeeded in inserting too large object into single page hole\n");
717b414fcd5SChris Wilson err = -EINVAL;
718b414fcd5SChris Wilson goto out;
719b414fcd5SChris Wilson }
720b414fcd5SChris Wilson
721b414fcd5SChris Wilson /* Fill the hole, further allocation attempts should then fail */
722450cede7SThomas Hellström obj = create_sys_or_internal(i915, PAGE_SIZE);
723b414fcd5SChris Wilson if (IS_ERR(obj)) {
724b414fcd5SChris Wilson err = PTR_ERR(obj);
725cf3e3e86SMaarten Lankhorst pr_err("Unable to create object for reclaimed hole\n");
726b414fcd5SChris Wilson goto out;
727b414fcd5SChris Wilson }
728b414fcd5SChris Wilson
7297961c5b6SMaarten Lankhorst err = __assign_mmap_offset(obj, default_mapping(i915), &offset, NULL);
730cf3e3e86SMaarten Lankhorst if (err) {
731b414fcd5SChris Wilson pr_err("Unable to insert object into reclaimed hole\n");
732b414fcd5SChris Wilson goto err_obj;
733b414fcd5SChris Wilson }
734b414fcd5SChris Wilson
735450cede7SThomas Hellström if (!assert_mmap_offset(i915, PAGE_SIZE, enospc)) {
736b414fcd5SChris Wilson pr_err("Unexpectedly succeeded in inserting object into no holes!\n");
737b414fcd5SChris Wilson err = -EINVAL;
738b414fcd5SChris Wilson goto err_obj;
739b414fcd5SChris Wilson }
740b414fcd5SChris Wilson
741b414fcd5SChris Wilson i915_gem_object_put(obj);
742b414fcd5SChris Wilson
743b414fcd5SChris Wilson /* Now fill with busy dead objects that we expect to reap */
744b414fcd5SChris Wilson for (loop = 0; loop < 3; loop++) {
7451a9c4db4SMichał Winiarski if (intel_gt_is_wedged(to_gt(i915)))
746b414fcd5SChris Wilson break;
747b414fcd5SChris Wilson
748b414fcd5SChris Wilson obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
749b414fcd5SChris Wilson if (IS_ERR(obj)) {
750b414fcd5SChris Wilson err = PTR_ERR(obj);
751b414fcd5SChris Wilson goto out;
752b414fcd5SChris Wilson }
753b414fcd5SChris Wilson
754b414fcd5SChris Wilson err = make_obj_busy(obj);
755b414fcd5SChris Wilson if (err) {
756b414fcd5SChris Wilson pr_err("[loop %d] Failed to busy the object\n", loop);
757b414fcd5SChris Wilson goto err_obj;
758b414fcd5SChris Wilson }
759b414fcd5SChris Wilson }
760b414fcd5SChris Wilson
761b414fcd5SChris Wilson out:
762f63dfc14SChris Wilson mmap_offset_lock(i915);
763b414fcd5SChris Wilson out_park:
7641af65515SChris Wilson drm_mm_for_each_node_safe(hole, next, mm) {
7651af65515SChris Wilson if (hole->color != -1ul)
7661af65515SChris Wilson continue;
7671af65515SChris Wilson
7681af65515SChris Wilson drm_mm_remove_node(hole);
7691af65515SChris Wilson kfree(hole);
7701af65515SChris Wilson }
7711af65515SChris Wilson mmap_offset_unlock(i915);
772b414fcd5SChris Wilson restore_retire_worker(i915);
773b414fcd5SChris Wilson return err;
774b414fcd5SChris Wilson err_obj:
775b414fcd5SChris Wilson i915_gem_object_put(obj);
776b414fcd5SChris Wilson goto out;
777b414fcd5SChris Wilson }
778b414fcd5SChris Wilson
gtt_set(struct drm_i915_gem_object * obj)7799771d5f7SAbdiel Janulgue static int gtt_set(struct drm_i915_gem_object *obj)
7806fedafacSChris Wilson {
7815e4e06e4SAndrzej Hajda intel_wakeref_t wakeref;
7829771d5f7SAbdiel Janulgue struct i915_vma *vma;
7839771d5f7SAbdiel Janulgue void __iomem *map;
7849771d5f7SAbdiel Janulgue int err = 0;
7856fedafacSChris Wilson
7869771d5f7SAbdiel Janulgue vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
7879771d5f7SAbdiel Janulgue if (IS_ERR(vma))
7889771d5f7SAbdiel Janulgue return PTR_ERR(vma);
7896fedafacSChris Wilson
7905e4e06e4SAndrzej Hajda wakeref = intel_gt_pm_get(vma->vm->gt);
7919771d5f7SAbdiel Janulgue map = i915_vma_pin_iomap(vma);
7929771d5f7SAbdiel Janulgue i915_vma_unpin(vma);
7939771d5f7SAbdiel Janulgue if (IS_ERR(map)) {
7949771d5f7SAbdiel Janulgue err = PTR_ERR(map);
7956fedafacSChris Wilson goto out;
7966fedafacSChris Wilson }
7979771d5f7SAbdiel Janulgue
7989771d5f7SAbdiel Janulgue memset_io(map, POISON_INUSE, obj->base.size);
7999771d5f7SAbdiel Janulgue i915_vma_unpin_iomap(vma);
8009771d5f7SAbdiel Janulgue
8019771d5f7SAbdiel Janulgue out:
8025e4e06e4SAndrzej Hajda intel_gt_pm_put(vma->vm->gt, wakeref);
8039771d5f7SAbdiel Janulgue return err;
8049771d5f7SAbdiel Janulgue }
8059771d5f7SAbdiel Janulgue
gtt_check(struct drm_i915_gem_object * obj)8069771d5f7SAbdiel Janulgue static int gtt_check(struct drm_i915_gem_object *obj)
8079771d5f7SAbdiel Janulgue {
8085e4e06e4SAndrzej Hajda intel_wakeref_t wakeref;
8099771d5f7SAbdiel Janulgue struct i915_vma *vma;
8109771d5f7SAbdiel Janulgue void __iomem *map;
8119771d5f7SAbdiel Janulgue int err = 0;
8129771d5f7SAbdiel Janulgue
8139771d5f7SAbdiel Janulgue vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
8149771d5f7SAbdiel Janulgue if (IS_ERR(vma))
8159771d5f7SAbdiel Janulgue return PTR_ERR(vma);
8169771d5f7SAbdiel Janulgue
8175e4e06e4SAndrzej Hajda wakeref = intel_gt_pm_get(vma->vm->gt);
8189771d5f7SAbdiel Janulgue map = i915_vma_pin_iomap(vma);
8199771d5f7SAbdiel Janulgue i915_vma_unpin(vma);
8209771d5f7SAbdiel Janulgue if (IS_ERR(map)) {
8219771d5f7SAbdiel Janulgue err = PTR_ERR(map);
8229771d5f7SAbdiel Janulgue goto out;
8239771d5f7SAbdiel Janulgue }
8249771d5f7SAbdiel Janulgue
8259771d5f7SAbdiel Janulgue if (memchr_inv((void __force *)map, POISON_FREE, obj->base.size)) {
8269771d5f7SAbdiel Janulgue pr_err("%s: Write via mmap did not land in backing store (GTT)\n",
8279771d5f7SAbdiel Janulgue obj->mm.region->name);
8289771d5f7SAbdiel Janulgue err = -EINVAL;
8299771d5f7SAbdiel Janulgue }
8309771d5f7SAbdiel Janulgue i915_vma_unpin_iomap(vma);
8319771d5f7SAbdiel Janulgue
8329771d5f7SAbdiel Janulgue out:
8335e4e06e4SAndrzej Hajda intel_gt_pm_put(vma->vm->gt, wakeref);
8349771d5f7SAbdiel Janulgue return err;
8359771d5f7SAbdiel Janulgue }
8369771d5f7SAbdiel Janulgue
wc_set(struct drm_i915_gem_object * obj)8379771d5f7SAbdiel Janulgue static int wc_set(struct drm_i915_gem_object *obj)
8389771d5f7SAbdiel Janulgue {
8399771d5f7SAbdiel Janulgue void *vaddr;
8409771d5f7SAbdiel Janulgue
8416f791ffeSMaarten Lankhorst vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
8429771d5f7SAbdiel Janulgue if (IS_ERR(vaddr))
8439771d5f7SAbdiel Janulgue return PTR_ERR(vaddr);
8449771d5f7SAbdiel Janulgue
8459771d5f7SAbdiel Janulgue memset(vaddr, POISON_INUSE, obj->base.size);
8466fedafacSChris Wilson i915_gem_object_flush_map(obj);
8476fedafacSChris Wilson i915_gem_object_unpin_map(obj);
8486fedafacSChris Wilson
8499771d5f7SAbdiel Janulgue return 0;
850cc662126SAbdiel Janulgue }
8516fedafacSChris Wilson
wc_check(struct drm_i915_gem_object * obj)8529771d5f7SAbdiel Janulgue static int wc_check(struct drm_i915_gem_object *obj)
8539771d5f7SAbdiel Janulgue {
8549771d5f7SAbdiel Janulgue void *vaddr;
8559771d5f7SAbdiel Janulgue int err = 0;
8569771d5f7SAbdiel Janulgue
8576f791ffeSMaarten Lankhorst vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
8589771d5f7SAbdiel Janulgue if (IS_ERR(vaddr))
8599771d5f7SAbdiel Janulgue return PTR_ERR(vaddr);
8609771d5f7SAbdiel Janulgue
8619771d5f7SAbdiel Janulgue if (memchr_inv(vaddr, POISON_FREE, obj->base.size)) {
8629771d5f7SAbdiel Janulgue pr_err("%s: Write via mmap did not land in backing store (WC)\n",
8639771d5f7SAbdiel Janulgue obj->mm.region->name);
8649771d5f7SAbdiel Janulgue err = -EINVAL;
8659771d5f7SAbdiel Janulgue }
8669771d5f7SAbdiel Janulgue i915_gem_object_unpin_map(obj);
8679771d5f7SAbdiel Janulgue
8689771d5f7SAbdiel Janulgue return err;
8699771d5f7SAbdiel Janulgue }
8709771d5f7SAbdiel Janulgue
can_mmap(struct drm_i915_gem_object * obj,enum i915_mmap_type type)8719771d5f7SAbdiel Janulgue static bool can_mmap(struct drm_i915_gem_object *obj, enum i915_mmap_type type)
8729771d5f7SAbdiel Janulgue {
8735c24c9d2SMichał Winiarski struct drm_i915_private *i915 = to_i915(obj->base.dev);
8740ff37575SThomas Hellström bool no_map;
8750ff37575SThomas Hellström
876450cede7SThomas Hellström if (obj->ops->mmap_offset)
8777961c5b6SMaarten Lankhorst return type == I915_MMAP_TYPE_FIXED;
8787961c5b6SMaarten Lankhorst else if (type == I915_MMAP_TYPE_FIXED)
8797961c5b6SMaarten Lankhorst return false;
8807961c5b6SMaarten Lankhorst
8819771d5f7SAbdiel Janulgue if (type == I915_MMAP_TYPE_GTT &&
8825c24c9d2SMichał Winiarski !i915_ggtt_has_aperture(to_gt(i915)->ggtt))
8839771d5f7SAbdiel Janulgue return false;
8849771d5f7SAbdiel Janulgue
8850ff37575SThomas Hellström i915_gem_object_lock(obj, NULL);
8860ff37575SThomas Hellström no_map = (type != I915_MMAP_TYPE_GTT &&
887c471748dSMaarten Lankhorst !i915_gem_object_has_struct_page(obj) &&
8880ff37575SThomas Hellström !i915_gem_object_has_iomem(obj));
8890ff37575SThomas Hellström i915_gem_object_unlock(obj);
8909771d5f7SAbdiel Janulgue
8910ff37575SThomas Hellström return !no_map;
8929771d5f7SAbdiel Janulgue }
8939771d5f7SAbdiel Janulgue
8949771d5f7SAbdiel Janulgue #define expand32(x) (((x) << 0) | ((x) << 8) | ((x) << 16) | ((x) << 24))
__igt_mmap(struct drm_i915_private * i915,struct drm_i915_gem_object * obj,enum i915_mmap_type type)8959771d5f7SAbdiel Janulgue static int __igt_mmap(struct drm_i915_private *i915,
8969771d5f7SAbdiel Janulgue struct drm_i915_gem_object *obj,
8979771d5f7SAbdiel Janulgue enum i915_mmap_type type)
8989771d5f7SAbdiel Janulgue {
8999771d5f7SAbdiel Janulgue struct vm_area_struct *area;
9009771d5f7SAbdiel Janulgue unsigned long addr;
9019771d5f7SAbdiel Janulgue int err, i;
902cf3e3e86SMaarten Lankhorst u64 offset;
9039771d5f7SAbdiel Janulgue
9049771d5f7SAbdiel Janulgue if (!can_mmap(obj, type))
9059771d5f7SAbdiel Janulgue return 0;
9069771d5f7SAbdiel Janulgue
9079771d5f7SAbdiel Janulgue err = wc_set(obj);
9089771d5f7SAbdiel Janulgue if (err == -ENXIO)
9099771d5f7SAbdiel Janulgue err = gtt_set(obj);
9109771d5f7SAbdiel Janulgue if (err)
9119771d5f7SAbdiel Janulgue return err;
9129771d5f7SAbdiel Janulgue
913cf3e3e86SMaarten Lankhorst err = __assign_mmap_offset(obj, type, &offset, NULL);
914cf3e3e86SMaarten Lankhorst if (err)
915cf3e3e86SMaarten Lankhorst return err;
9169771d5f7SAbdiel Janulgue
917cf3e3e86SMaarten Lankhorst addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED);
9189771d5f7SAbdiel Janulgue if (IS_ERR_VALUE(addr))
9199771d5f7SAbdiel Janulgue return addr;
9206fedafacSChris Wilson
9219771d5f7SAbdiel Janulgue pr_debug("igt_mmap(%s, %d) @ %lx\n", obj->mm.region->name, type, addr);
9226fedafacSChris Wilson
923ce079f6dSMaarten Lankhorst mmap_read_lock(current->mm);
924064b2663SLiam Howlett area = vma_lookup(current->mm, addr);
925ce079f6dSMaarten Lankhorst mmap_read_unlock(current->mm);
9266fedafacSChris Wilson if (!area) {
9279771d5f7SAbdiel Janulgue pr_err("%s: Did not create a vm_area_struct for the mmap\n",
9289771d5f7SAbdiel Janulgue obj->mm.region->name);
9296fedafacSChris Wilson err = -EINVAL;
9306fedafacSChris Wilson goto out_unmap;
9316fedafacSChris Wilson }
9326fedafacSChris Wilson
9339771d5f7SAbdiel Janulgue for (i = 0; i < obj->base.size / sizeof(u32); i++) {
9346fedafacSChris Wilson u32 __user *ux = u64_to_user_ptr((u64)(addr + i * sizeof(*ux)));
9356fedafacSChris Wilson u32 x;
9366fedafacSChris Wilson
9376fedafacSChris Wilson if (get_user(x, ux)) {
9389771d5f7SAbdiel Janulgue pr_err("%s: Unable to read from mmap, offset:%zd\n",
9399771d5f7SAbdiel Janulgue obj->mm.region->name, i * sizeof(x));
9406fedafacSChris Wilson err = -EFAULT;
9419771d5f7SAbdiel Janulgue goto out_unmap;
9426fedafacSChris Wilson }
9436fedafacSChris Wilson
9446fedafacSChris Wilson if (x != expand32(POISON_INUSE)) {
9459771d5f7SAbdiel Janulgue pr_err("%s: Read incorrect value from mmap, offset:%zd, found:%x, expected:%x\n",
9469771d5f7SAbdiel Janulgue obj->mm.region->name,
9476fedafacSChris Wilson i * sizeof(x), x, expand32(POISON_INUSE));
9486fedafacSChris Wilson err = -EINVAL;
9499771d5f7SAbdiel Janulgue goto out_unmap;
9506fedafacSChris Wilson }
9516fedafacSChris Wilson
9526fedafacSChris Wilson x = expand32(POISON_FREE);
9536fedafacSChris Wilson if (put_user(x, ux)) {
9549771d5f7SAbdiel Janulgue pr_err("%s: Unable to write to mmap, offset:%zd\n",
9559771d5f7SAbdiel Janulgue obj->mm.region->name, i * sizeof(x));
9566fedafacSChris Wilson err = -EFAULT;
9579771d5f7SAbdiel Janulgue goto out_unmap;
9586fedafacSChris Wilson }
9596fedafacSChris Wilson }
9606fedafacSChris Wilson
9619771d5f7SAbdiel Janulgue if (type == I915_MMAP_TYPE_GTT)
9621a9c4db4SMichał Winiarski intel_gt_flush_ggtt_writes(to_gt(i915));
9639771d5f7SAbdiel Janulgue
9649771d5f7SAbdiel Janulgue err = wc_check(obj);
9659771d5f7SAbdiel Janulgue if (err == -ENXIO)
9669771d5f7SAbdiel Janulgue err = gtt_check(obj);
9676fedafacSChris Wilson out_unmap:
9689771d5f7SAbdiel Janulgue vm_munmap(addr, obj->base.size);
9696fedafacSChris Wilson return err;
9706fedafacSChris Wilson }
9716fedafacSChris Wilson
igt_mmap(void * arg)9729771d5f7SAbdiel Janulgue static int igt_mmap(void *arg)
973cc662126SAbdiel Janulgue {
9749771d5f7SAbdiel Janulgue struct drm_i915_private *i915 = arg;
9759771d5f7SAbdiel Janulgue struct intel_memory_region *mr;
9769771d5f7SAbdiel Janulgue enum intel_region_id id;
9779771d5f7SAbdiel Janulgue
9789771d5f7SAbdiel Janulgue for_each_memory_region(mr, i915, id) {
9799771d5f7SAbdiel Janulgue unsigned long sizes[] = {
9809771d5f7SAbdiel Janulgue PAGE_SIZE,
9819771d5f7SAbdiel Janulgue mr->min_page_size,
9829771d5f7SAbdiel Janulgue SZ_4M,
9839771d5f7SAbdiel Janulgue };
9849771d5f7SAbdiel Janulgue int i;
9859771d5f7SAbdiel Janulgue
986938d2fd1SMatthew Auld if (mr->private)
987938d2fd1SMatthew Auld continue;
988938d2fd1SMatthew Auld
9899771d5f7SAbdiel Janulgue for (i = 0; i < ARRAY_SIZE(sizes); i++) {
9909771d5f7SAbdiel Janulgue struct drm_i915_gem_object *obj;
9919771d5f7SAbdiel Janulgue int err;
9929771d5f7SAbdiel Janulgue
9936d0e4f07SMatthew Auld obj = __i915_gem_object_create_user(i915, sizes[i], &mr, 1);
9949771d5f7SAbdiel Janulgue if (obj == ERR_PTR(-ENODEV))
9959771d5f7SAbdiel Janulgue continue;
9969771d5f7SAbdiel Janulgue
9979771d5f7SAbdiel Janulgue if (IS_ERR(obj))
9989771d5f7SAbdiel Janulgue return PTR_ERR(obj);
9999771d5f7SAbdiel Janulgue
10009771d5f7SAbdiel Janulgue err = __igt_mmap(i915, obj, I915_MMAP_TYPE_GTT);
10019771d5f7SAbdiel Janulgue if (err == 0)
10029771d5f7SAbdiel Janulgue err = __igt_mmap(i915, obj, I915_MMAP_TYPE_WC);
10037961c5b6SMaarten Lankhorst if (err == 0)
10047961c5b6SMaarten Lankhorst err = __igt_mmap(i915, obj, I915_MMAP_TYPE_FIXED);
10059771d5f7SAbdiel Janulgue
10069771d5f7SAbdiel Janulgue i915_gem_object_put(obj);
10079771d5f7SAbdiel Janulgue if (err)
10089771d5f7SAbdiel Janulgue return err;
10099771d5f7SAbdiel Janulgue }
1010cc662126SAbdiel Janulgue }
1011cc662126SAbdiel Janulgue
10129771d5f7SAbdiel Janulgue return 0;
1013cc662126SAbdiel Janulgue }
1014cc662126SAbdiel Janulgue
igt_close_objects(struct drm_i915_private * i915,struct list_head * objects)1015fb87550dSMatthew Auld static void igt_close_objects(struct drm_i915_private *i915,
1016fb87550dSMatthew Auld struct list_head *objects)
1017fb87550dSMatthew Auld {
1018fb87550dSMatthew Auld struct drm_i915_gem_object *obj, *on;
1019fb87550dSMatthew Auld
1020fb87550dSMatthew Auld list_for_each_entry_safe(obj, on, objects, st_link) {
1021fb87550dSMatthew Auld i915_gem_object_lock(obj, NULL);
1022fb87550dSMatthew Auld if (i915_gem_object_has_pinned_pages(obj))
1023fb87550dSMatthew Auld i915_gem_object_unpin_pages(obj);
1024fb87550dSMatthew Auld /* No polluting the memory region between tests */
1025fb87550dSMatthew Auld __i915_gem_object_put_pages(obj);
1026fb87550dSMatthew Auld i915_gem_object_unlock(obj);
1027fb87550dSMatthew Auld list_del(&obj->st_link);
1028fb87550dSMatthew Auld i915_gem_object_put(obj);
1029fb87550dSMatthew Auld }
1030fb87550dSMatthew Auld
1031fb87550dSMatthew Auld cond_resched();
1032fb87550dSMatthew Auld
1033fb87550dSMatthew Auld i915_gem_drain_freed_objects(i915);
1034fb87550dSMatthew Auld }
1035fb87550dSMatthew Auld
igt_make_evictable(struct list_head * objects)1036fb87550dSMatthew Auld static void igt_make_evictable(struct list_head *objects)
1037fb87550dSMatthew Auld {
1038fb87550dSMatthew Auld struct drm_i915_gem_object *obj;
1039fb87550dSMatthew Auld
1040fb87550dSMatthew Auld list_for_each_entry(obj, objects, st_link) {
1041fb87550dSMatthew Auld i915_gem_object_lock(obj, NULL);
1042fb87550dSMatthew Auld if (i915_gem_object_has_pinned_pages(obj))
1043fb87550dSMatthew Auld i915_gem_object_unpin_pages(obj);
1044fb87550dSMatthew Auld i915_gem_object_unlock(obj);
1045fb87550dSMatthew Auld }
1046fb87550dSMatthew Auld
1047fb87550dSMatthew Auld cond_resched();
1048fb87550dSMatthew Auld }
1049fb87550dSMatthew Auld
igt_fill_mappable(struct intel_memory_region * mr,struct list_head * objects)1050fb87550dSMatthew Auld static int igt_fill_mappable(struct intel_memory_region *mr,
1051fb87550dSMatthew Auld struct list_head *objects)
1052fb87550dSMatthew Auld {
1053fb87550dSMatthew Auld u64 size, total;
1054fb87550dSMatthew Auld int err;
1055fb87550dSMatthew Auld
1056fb87550dSMatthew Auld total = 0;
1057*3c0fa9f4SVille Syrjälä size = resource_size(&mr->io);
1058fb87550dSMatthew Auld do {
1059fb87550dSMatthew Auld struct drm_i915_gem_object *obj;
1060fb87550dSMatthew Auld
1061fb87550dSMatthew Auld obj = i915_gem_object_create_region(mr, size, 0, 0);
1062fb87550dSMatthew Auld if (IS_ERR(obj)) {
1063fb87550dSMatthew Auld err = PTR_ERR(obj);
1064fb87550dSMatthew Auld goto err_close;
1065fb87550dSMatthew Auld }
1066fb87550dSMatthew Auld
1067fb87550dSMatthew Auld list_add(&obj->st_link, objects);
1068fb87550dSMatthew Auld
1069fb87550dSMatthew Auld err = i915_gem_object_pin_pages_unlocked(obj);
1070fb87550dSMatthew Auld if (err) {
1071fb87550dSMatthew Auld if (err != -ENXIO && err != -ENOMEM)
1072fb87550dSMatthew Auld goto err_close;
1073fb87550dSMatthew Auld
1074fb87550dSMatthew Auld if (size == mr->min_page_size) {
1075fb87550dSMatthew Auld err = 0;
1076fb87550dSMatthew Auld break;
1077fb87550dSMatthew Auld }
1078fb87550dSMatthew Auld
1079fb87550dSMatthew Auld size >>= 1;
1080fb87550dSMatthew Auld continue;
1081fb87550dSMatthew Auld }
1082fb87550dSMatthew Auld
1083fb87550dSMatthew Auld total += obj->base.size;
1084fb87550dSMatthew Auld } while (1);
1085fb87550dSMatthew Auld
1086fb87550dSMatthew Auld pr_info("%s filled=%lluMiB\n", __func__, total >> 20);
1087fb87550dSMatthew Auld return 0;
1088fb87550dSMatthew Auld
1089fb87550dSMatthew Auld err_close:
1090fb87550dSMatthew Auld igt_close_objects(mr->i915, objects);
1091fb87550dSMatthew Auld return err;
1092fb87550dSMatthew Auld }
1093fb87550dSMatthew Auld
___igt_mmap_migrate(struct drm_i915_private * i915,struct drm_i915_gem_object * obj,unsigned long addr,bool unfaultable)1094fb87550dSMatthew Auld static int ___igt_mmap_migrate(struct drm_i915_private *i915,
1095fb87550dSMatthew Auld struct drm_i915_gem_object *obj,
1096fb87550dSMatthew Auld unsigned long addr,
1097fb87550dSMatthew Auld bool unfaultable)
1098fb87550dSMatthew Auld {
1099fb87550dSMatthew Auld struct vm_area_struct *area;
1100fb87550dSMatthew Auld int err = 0, i;
1101fb87550dSMatthew Auld
1102fb87550dSMatthew Auld pr_info("igt_mmap(%s, %d) @ %lx\n",
1103fb87550dSMatthew Auld obj->mm.region->name, I915_MMAP_TYPE_FIXED, addr);
1104fb87550dSMatthew Auld
1105fb87550dSMatthew Auld mmap_read_lock(current->mm);
1106fb87550dSMatthew Auld area = vma_lookup(current->mm, addr);
1107fb87550dSMatthew Auld mmap_read_unlock(current->mm);
1108fb87550dSMatthew Auld if (!area) {
1109fb87550dSMatthew Auld pr_err("%s: Did not create a vm_area_struct for the mmap\n",
1110fb87550dSMatthew Auld obj->mm.region->name);
1111fb87550dSMatthew Auld err = -EINVAL;
1112fb87550dSMatthew Auld goto out_unmap;
1113fb87550dSMatthew Auld }
1114fb87550dSMatthew Auld
1115fb87550dSMatthew Auld for (i = 0; i < obj->base.size / sizeof(u32); i++) {
1116fb87550dSMatthew Auld u32 __user *ux = u64_to_user_ptr((u64)(addr + i * sizeof(*ux)));
1117fb87550dSMatthew Auld u32 x;
1118fb87550dSMatthew Auld
1119fb87550dSMatthew Auld if (get_user(x, ux)) {
1120fb87550dSMatthew Auld err = -EFAULT;
1121fb87550dSMatthew Auld if (!unfaultable) {
1122fb87550dSMatthew Auld pr_err("%s: Unable to read from mmap, offset:%zd\n",
1123fb87550dSMatthew Auld obj->mm.region->name, i * sizeof(x));
1124fb87550dSMatthew Auld goto out_unmap;
1125fb87550dSMatthew Auld }
1126fb87550dSMatthew Auld
1127fb87550dSMatthew Auld continue;
1128fb87550dSMatthew Auld }
1129fb87550dSMatthew Auld
1130fb87550dSMatthew Auld if (unfaultable) {
1131fb87550dSMatthew Auld pr_err("%s: Faulted unmappable memory\n",
1132fb87550dSMatthew Auld obj->mm.region->name);
1133fb87550dSMatthew Auld err = -EINVAL;
1134fb87550dSMatthew Auld goto out_unmap;
1135fb87550dSMatthew Auld }
1136fb87550dSMatthew Auld
1137fb87550dSMatthew Auld if (x != expand32(POISON_INUSE)) {
1138fb87550dSMatthew Auld pr_err("%s: Read incorrect value from mmap, offset:%zd, found:%x, expected:%x\n",
1139fb87550dSMatthew Auld obj->mm.region->name,
1140fb87550dSMatthew Auld i * sizeof(x), x, expand32(POISON_INUSE));
1141fb87550dSMatthew Auld err = -EINVAL;
1142fb87550dSMatthew Auld goto out_unmap;
1143fb87550dSMatthew Auld }
1144fb87550dSMatthew Auld
1145fb87550dSMatthew Auld x = expand32(POISON_FREE);
1146fb87550dSMatthew Auld if (put_user(x, ux)) {
1147fb87550dSMatthew Auld pr_err("%s: Unable to write to mmap, offset:%zd\n",
1148fb87550dSMatthew Auld obj->mm.region->name, i * sizeof(x));
1149fb87550dSMatthew Auld err = -EFAULT;
1150fb87550dSMatthew Auld goto out_unmap;
1151fb87550dSMatthew Auld }
1152fb87550dSMatthew Auld }
1153fb87550dSMatthew Auld
1154fb87550dSMatthew Auld if (unfaultable) {
1155fb87550dSMatthew Auld if (err == -EFAULT)
1156fb87550dSMatthew Auld err = 0;
1157fb87550dSMatthew Auld } else {
1158fb87550dSMatthew Auld obj->flags &= ~I915_BO_ALLOC_GPU_ONLY;
1159fb87550dSMatthew Auld err = wc_check(obj);
1160fb87550dSMatthew Auld }
1161fb87550dSMatthew Auld out_unmap:
1162fb87550dSMatthew Auld vm_munmap(addr, obj->base.size);
1163fb87550dSMatthew Auld return err;
1164fb87550dSMatthew Auld }
1165fb87550dSMatthew Auld
1166fb87550dSMatthew Auld #define IGT_MMAP_MIGRATE_TOPDOWN (1 << 0)
1167fb87550dSMatthew Auld #define IGT_MMAP_MIGRATE_FILL (1 << 1)
1168fb87550dSMatthew Auld #define IGT_MMAP_MIGRATE_EVICTABLE (1 << 2)
1169fb87550dSMatthew Auld #define IGT_MMAP_MIGRATE_UNFAULTABLE (1 << 3)
1170bfe53be2SMatthew Auld #define IGT_MMAP_MIGRATE_FAIL_GPU (1 << 4)
__igt_mmap_migrate(struct intel_memory_region ** placements,int n_placements,struct intel_memory_region * expected_mr,unsigned int flags)1171fb87550dSMatthew Auld static int __igt_mmap_migrate(struct intel_memory_region **placements,
1172fb87550dSMatthew Auld int n_placements,
1173fb87550dSMatthew Auld struct intel_memory_region *expected_mr,
1174fb87550dSMatthew Auld unsigned int flags)
1175fb87550dSMatthew Auld {
1176fb87550dSMatthew Auld struct drm_i915_private *i915 = placements[0]->i915;
1177fb87550dSMatthew Auld struct drm_i915_gem_object *obj;
1178fb87550dSMatthew Auld struct i915_request *rq = NULL;
1179fb87550dSMatthew Auld unsigned long addr;
1180fb87550dSMatthew Auld LIST_HEAD(objects);
1181fb87550dSMatthew Auld u64 offset;
1182fb87550dSMatthew Auld int err;
1183fb87550dSMatthew Auld
1184fb87550dSMatthew Auld obj = __i915_gem_object_create_user(i915, PAGE_SIZE,
1185fb87550dSMatthew Auld placements,
1186fb87550dSMatthew Auld n_placements);
1187fb87550dSMatthew Auld if (IS_ERR(obj))
1188fb87550dSMatthew Auld return PTR_ERR(obj);
1189fb87550dSMatthew Auld
1190fb87550dSMatthew Auld if (flags & IGT_MMAP_MIGRATE_TOPDOWN)
1191fb87550dSMatthew Auld obj->flags |= I915_BO_ALLOC_GPU_ONLY;
1192fb87550dSMatthew Auld
1193fb87550dSMatthew Auld err = __assign_mmap_offset(obj, I915_MMAP_TYPE_FIXED, &offset, NULL);
1194fb87550dSMatthew Auld if (err)
1195fb87550dSMatthew Auld goto out_put;
1196fb87550dSMatthew Auld
1197fb87550dSMatthew Auld /*
1198fb87550dSMatthew Auld * This will eventually create a GEM context, due to opening dummy drm
1199fb87550dSMatthew Auld * file, which needs a tiny amount of mappable device memory for the top
1200fb87550dSMatthew Auld * level paging structures(and perhaps scratch), so make sure we
1201fb87550dSMatthew Auld * allocate early, to avoid tears.
1202fb87550dSMatthew Auld */
1203fb87550dSMatthew Auld addr = igt_mmap_offset(i915, offset, obj->base.size,
1204fb87550dSMatthew Auld PROT_WRITE, MAP_SHARED);
1205fb87550dSMatthew Auld if (IS_ERR_VALUE(addr)) {
1206fb87550dSMatthew Auld err = addr;
1207fb87550dSMatthew Auld goto out_put;
1208fb87550dSMatthew Auld }
1209fb87550dSMatthew Auld
1210fb87550dSMatthew Auld if (flags & IGT_MMAP_MIGRATE_FILL) {
1211fb87550dSMatthew Auld err = igt_fill_mappable(placements[0], &objects);
1212fb87550dSMatthew Auld if (err)
1213fb87550dSMatthew Auld goto out_put;
1214fb87550dSMatthew Auld }
1215fb87550dSMatthew Auld
1216fb87550dSMatthew Auld err = i915_gem_object_lock(obj, NULL);
1217fb87550dSMatthew Auld if (err)
1218fb87550dSMatthew Auld goto out_put;
1219fb87550dSMatthew Auld
1220fb87550dSMatthew Auld err = i915_gem_object_pin_pages(obj);
1221fb87550dSMatthew Auld if (err) {
1222fb87550dSMatthew Auld i915_gem_object_unlock(obj);
1223fb87550dSMatthew Auld goto out_put;
1224fb87550dSMatthew Auld }
1225fb87550dSMatthew Auld
1226fb87550dSMatthew Auld err = intel_context_migrate_clear(to_gt(i915)->migrate.context, NULL,
12279275277dSFei Yang obj->mm.pages->sgl, obj->pat_index,
1228fb87550dSMatthew Auld i915_gem_object_is_lmem(obj),
1229fb87550dSMatthew Auld expand32(POISON_INUSE), &rq);
1230fb87550dSMatthew Auld i915_gem_object_unpin_pages(obj);
1231fb87550dSMatthew Auld if (rq) {
123211f01dcfSMatthew Auld err = dma_resv_reserve_fences(obj->base.resv, 1);
123311f01dcfSMatthew Auld if (!err)
123473511edfSChristian König dma_resv_add_fence(obj->base.resv, &rq->fence,
12351d7f5e6cSChristian König DMA_RESV_USAGE_KERNEL);
1236fb87550dSMatthew Auld i915_request_put(rq);
1237fb87550dSMatthew Auld }
1238fb87550dSMatthew Auld i915_gem_object_unlock(obj);
1239fb87550dSMatthew Auld if (err)
1240fb87550dSMatthew Auld goto out_put;
1241fb87550dSMatthew Auld
1242fb87550dSMatthew Auld if (flags & IGT_MMAP_MIGRATE_EVICTABLE)
1243fb87550dSMatthew Auld igt_make_evictable(&objects);
1244fb87550dSMatthew Auld
1245bfe53be2SMatthew Auld if (flags & IGT_MMAP_MIGRATE_FAIL_GPU) {
1246bfe53be2SMatthew Auld err = i915_gem_object_lock(obj, NULL);
1247bfe53be2SMatthew Auld if (err)
1248bfe53be2SMatthew Auld goto out_put;
1249bfe53be2SMatthew Auld
1250bfe53be2SMatthew Auld /*
1251bfe53be2SMatthew Auld * Ensure we only simulate the gpu failuire when faulting the
1252bfe53be2SMatthew Auld * pages.
1253bfe53be2SMatthew Auld */
1254bfe53be2SMatthew Auld err = i915_gem_object_wait_moving_fence(obj, true);
1255bfe53be2SMatthew Auld i915_gem_object_unlock(obj);
1256bfe53be2SMatthew Auld if (err)
1257bfe53be2SMatthew Auld goto out_put;
1258bfe53be2SMatthew Auld i915_ttm_migrate_set_failure_modes(true, false);
1259bfe53be2SMatthew Auld }
1260bfe53be2SMatthew Auld
1261fb87550dSMatthew Auld err = ___igt_mmap_migrate(i915, obj, addr,
1262fb87550dSMatthew Auld flags & IGT_MMAP_MIGRATE_UNFAULTABLE);
1263bfe53be2SMatthew Auld
1264fb87550dSMatthew Auld if (!err && obj->mm.region != expected_mr) {
1265fb87550dSMatthew Auld pr_err("%s region mismatch %s\n", __func__, expected_mr->name);
1266fb87550dSMatthew Auld err = -EINVAL;
1267fb87550dSMatthew Auld }
1268fb87550dSMatthew Auld
1269bfe53be2SMatthew Auld if (flags & IGT_MMAP_MIGRATE_FAIL_GPU) {
1270bfe53be2SMatthew Auld struct intel_gt *gt;
1271bfe53be2SMatthew Auld unsigned int id;
1272bfe53be2SMatthew Auld
1273bfe53be2SMatthew Auld i915_ttm_migrate_set_failure_modes(false, false);
1274bfe53be2SMatthew Auld
1275bfe53be2SMatthew Auld for_each_gt(gt, i915, id) {
1276bfe53be2SMatthew Auld intel_wakeref_t wakeref;
1277bfe53be2SMatthew Auld bool wedged;
1278bfe53be2SMatthew Auld
1279bfe53be2SMatthew Auld mutex_lock(>->reset.mutex);
1280bfe53be2SMatthew Auld wedged = test_bit(I915_WEDGED, >->reset.flags);
1281bfe53be2SMatthew Auld mutex_unlock(>->reset.mutex);
1282bfe53be2SMatthew Auld if (!wedged) {
1283bfe53be2SMatthew Auld pr_err("gt(%u) not wedged\n", id);
1284bfe53be2SMatthew Auld err = -EINVAL;
1285bfe53be2SMatthew Auld continue;
1286bfe53be2SMatthew Auld }
1287bfe53be2SMatthew Auld
1288bfe53be2SMatthew Auld wakeref = intel_runtime_pm_get(gt->uncore->rpm);
1289bfe53be2SMatthew Auld igt_global_reset_lock(gt);
1290bfe53be2SMatthew Auld intel_gt_reset(gt, ALL_ENGINES, NULL);
1291bfe53be2SMatthew Auld igt_global_reset_unlock(gt);
1292bfe53be2SMatthew Auld intel_runtime_pm_put(gt->uncore->rpm, wakeref);
1293bfe53be2SMatthew Auld }
1294bfe53be2SMatthew Auld
1295bfe53be2SMatthew Auld if (!i915_gem_object_has_unknown_state(obj)) {
1296bfe53be2SMatthew Auld pr_err("object missing unknown_state\n");
1297bfe53be2SMatthew Auld err = -EINVAL;
1298bfe53be2SMatthew Auld }
1299bfe53be2SMatthew Auld }
1300bfe53be2SMatthew Auld
1301fb87550dSMatthew Auld out_put:
1302fb87550dSMatthew Auld i915_gem_object_put(obj);
1303fb87550dSMatthew Auld igt_close_objects(i915, &objects);
1304fb87550dSMatthew Auld return err;
1305fb87550dSMatthew Auld }
1306fb87550dSMatthew Auld
igt_mmap_migrate(void * arg)1307fb87550dSMatthew Auld static int igt_mmap_migrate(void *arg)
1308fb87550dSMatthew Auld {
1309fb87550dSMatthew Auld struct drm_i915_private *i915 = arg;
1310fb87550dSMatthew Auld struct intel_memory_region *system = i915->mm.regions[INTEL_REGION_SMEM];
1311fb87550dSMatthew Auld struct intel_memory_region *mr;
1312fb87550dSMatthew Auld enum intel_region_id id;
1313fb87550dSMatthew Auld
1314fb87550dSMatthew Auld for_each_memory_region(mr, i915, id) {
1315fb87550dSMatthew Auld struct intel_memory_region *mixed[] = { mr, system };
1316fb87550dSMatthew Auld struct intel_memory_region *single[] = { mr };
1317fb87550dSMatthew Auld struct ttm_resource_manager *man = mr->region_private;
1318*3c0fa9f4SVille Syrjälä struct resource saved_io;
1319fb87550dSMatthew Auld int err;
1320fb87550dSMatthew Auld
1321fb87550dSMatthew Auld if (mr->private)
1322fb87550dSMatthew Auld continue;
1323fb87550dSMatthew Auld
1324*3c0fa9f4SVille Syrjälä if (!resource_size(&mr->io))
1325fb87550dSMatthew Auld continue;
1326fb87550dSMatthew Auld
1327fb87550dSMatthew Auld /*
1328fb87550dSMatthew Auld * For testing purposes let's force small BAR, if not already
1329fb87550dSMatthew Auld * present.
1330fb87550dSMatthew Auld */
1331*3c0fa9f4SVille Syrjälä saved_io = mr->io;
1332*3c0fa9f4SVille Syrjälä if (resource_size(&mr->io) == mr->total) {
1333*3c0fa9f4SVille Syrjälä resource_size_t io_size = resource_size(&mr->io);
1334fb87550dSMatthew Auld
1335fb87550dSMatthew Auld io_size = rounddown_pow_of_two(io_size >> 1);
1336fb87550dSMatthew Auld if (io_size < PAGE_SIZE)
1337fb87550dSMatthew Auld continue;
1338fb87550dSMatthew Auld
1339*3c0fa9f4SVille Syrjälä mr->io = DEFINE_RES_MEM(mr->io.start, io_size);
1340fb87550dSMatthew Auld i915_ttm_buddy_man_force_visible_size(man,
1341fb87550dSMatthew Auld io_size >> PAGE_SHIFT);
1342fb87550dSMatthew Auld }
1343fb87550dSMatthew Auld
1344fb87550dSMatthew Auld /*
1345fb87550dSMatthew Auld * Allocate in the mappable portion, should be no suprises here.
1346fb87550dSMatthew Auld */
1347fb87550dSMatthew Auld err = __igt_mmap_migrate(mixed, ARRAY_SIZE(mixed), mr, 0);
1348fb87550dSMatthew Auld if (err)
1349fb87550dSMatthew Auld goto out_io_size;
1350fb87550dSMatthew Auld
1351fb87550dSMatthew Auld /*
1352fb87550dSMatthew Auld * Allocate in the non-mappable portion, but force migrating to
1353fb87550dSMatthew Auld * the mappable portion on fault (LMEM -> LMEM)
1354fb87550dSMatthew Auld */
1355fb87550dSMatthew Auld err = __igt_mmap_migrate(single, ARRAY_SIZE(single), mr,
1356fb87550dSMatthew Auld IGT_MMAP_MIGRATE_TOPDOWN |
1357fb87550dSMatthew Auld IGT_MMAP_MIGRATE_FILL |
1358fb87550dSMatthew Auld IGT_MMAP_MIGRATE_EVICTABLE);
1359fb87550dSMatthew Auld if (err)
1360fb87550dSMatthew Auld goto out_io_size;
1361fb87550dSMatthew Auld
1362fb87550dSMatthew Auld /*
1363fb87550dSMatthew Auld * Allocate in the non-mappable portion, but force spilling into
1364fb87550dSMatthew Auld * system memory on fault (LMEM -> SMEM)
1365fb87550dSMatthew Auld */
1366fb87550dSMatthew Auld err = __igt_mmap_migrate(mixed, ARRAY_SIZE(mixed), system,
1367fb87550dSMatthew Auld IGT_MMAP_MIGRATE_TOPDOWN |
1368fb87550dSMatthew Auld IGT_MMAP_MIGRATE_FILL);
1369fb87550dSMatthew Auld if (err)
1370fb87550dSMatthew Auld goto out_io_size;
1371fb87550dSMatthew Auld
1372fb87550dSMatthew Auld /*
1373fb87550dSMatthew Auld * Allocate in the non-mappable portion, but since the mappable
1374fb87550dSMatthew Auld * portion is already full, and we can't spill to system memory,
1375fb87550dSMatthew Auld * then we should expect the fault to fail.
1376fb87550dSMatthew Auld */
1377fb87550dSMatthew Auld err = __igt_mmap_migrate(single, ARRAY_SIZE(single), mr,
1378fb87550dSMatthew Auld IGT_MMAP_MIGRATE_TOPDOWN |
1379fb87550dSMatthew Auld IGT_MMAP_MIGRATE_FILL |
1380fb87550dSMatthew Auld IGT_MMAP_MIGRATE_UNFAULTABLE);
1381bfe53be2SMatthew Auld if (err)
1382bfe53be2SMatthew Auld goto out_io_size;
1383bfe53be2SMatthew Auld
1384bfe53be2SMatthew Auld /*
1385bfe53be2SMatthew Auld * Allocate in the non-mappable portion, but force migrating to
1386bfe53be2SMatthew Auld * the mappable portion on fault (LMEM -> LMEM). We then also
1387bfe53be2SMatthew Auld * simulate a gpu error when moving the pages when faulting the
1388bfe53be2SMatthew Auld * pages, which should result in wedging the gpu and returning
1389bfe53be2SMatthew Auld * SIGBUS in the fault handler, since we can't fallback to
1390bfe53be2SMatthew Auld * memcpy.
1391bfe53be2SMatthew Auld */
1392bfe53be2SMatthew Auld err = __igt_mmap_migrate(single, ARRAY_SIZE(single), mr,
1393bfe53be2SMatthew Auld IGT_MMAP_MIGRATE_TOPDOWN |
1394bfe53be2SMatthew Auld IGT_MMAP_MIGRATE_FILL |
1395bfe53be2SMatthew Auld IGT_MMAP_MIGRATE_EVICTABLE |
1396bfe53be2SMatthew Auld IGT_MMAP_MIGRATE_FAIL_GPU |
1397bfe53be2SMatthew Auld IGT_MMAP_MIGRATE_UNFAULTABLE);
1398fb87550dSMatthew Auld out_io_size:
1399*3c0fa9f4SVille Syrjälä mr->io = saved_io;
1400fb87550dSMatthew Auld i915_ttm_buddy_man_force_visible_size(man,
1401*3c0fa9f4SVille Syrjälä resource_size(&mr->io) >> PAGE_SHIFT);
1402fb87550dSMatthew Auld if (err)
1403fb87550dSMatthew Auld return err;
1404fb87550dSMatthew Auld }
1405fb87550dSMatthew Auld
1406fb87550dSMatthew Auld return 0;
1407fb87550dSMatthew Auld }
1408fb87550dSMatthew Auld
repr_mmap_type(enum i915_mmap_type type)14099f909e21SChris Wilson static const char *repr_mmap_type(enum i915_mmap_type type)
14109f909e21SChris Wilson {
14119f909e21SChris Wilson switch (type) {
14129f909e21SChris Wilson case I915_MMAP_TYPE_GTT: return "gtt";
14139f909e21SChris Wilson case I915_MMAP_TYPE_WB: return "wb";
14149f909e21SChris Wilson case I915_MMAP_TYPE_WC: return "wc";
14159f909e21SChris Wilson case I915_MMAP_TYPE_UC: return "uc";
14167961c5b6SMaarten Lankhorst case I915_MMAP_TYPE_FIXED: return "fixed";
14179f909e21SChris Wilson default: return "unknown";
14189f909e21SChris Wilson }
14199f909e21SChris Wilson }
14209f909e21SChris Wilson
can_access(struct drm_i915_gem_object * obj)14210ff37575SThomas Hellström static bool can_access(struct drm_i915_gem_object *obj)
14229f909e21SChris Wilson {
14230ff37575SThomas Hellström bool access;
14240ff37575SThomas Hellström
14250ff37575SThomas Hellström i915_gem_object_lock(obj, NULL);
14260ff37575SThomas Hellström access = i915_gem_object_has_struct_page(obj) ||
14270ff37575SThomas Hellström i915_gem_object_has_iomem(obj);
14280ff37575SThomas Hellström i915_gem_object_unlock(obj);
14290ff37575SThomas Hellström
14300ff37575SThomas Hellström return access;
14319f909e21SChris Wilson }
14329f909e21SChris Wilson
__igt_mmap_access(struct drm_i915_private * i915,struct drm_i915_gem_object * obj,enum i915_mmap_type type)14339f909e21SChris Wilson static int __igt_mmap_access(struct drm_i915_private *i915,
14349f909e21SChris Wilson struct drm_i915_gem_object *obj,
14359f909e21SChris Wilson enum i915_mmap_type type)
14369f909e21SChris Wilson {
14379f909e21SChris Wilson unsigned long __user *ptr;
14389f909e21SChris Wilson unsigned long A, B;
14399f909e21SChris Wilson unsigned long x, y;
14409f909e21SChris Wilson unsigned long addr;
14419f909e21SChris Wilson int err;
1442cf3e3e86SMaarten Lankhorst u64 offset;
14439f909e21SChris Wilson
14449f909e21SChris Wilson memset(&A, 0xAA, sizeof(A));
14459f909e21SChris Wilson memset(&B, 0xBB, sizeof(B));
14469f909e21SChris Wilson
14479f909e21SChris Wilson if (!can_mmap(obj, type) || !can_access(obj))
14489f909e21SChris Wilson return 0;
14499f909e21SChris Wilson
1450cf3e3e86SMaarten Lankhorst err = __assign_mmap_offset(obj, type, &offset, NULL);
1451cf3e3e86SMaarten Lankhorst if (err)
1452cf3e3e86SMaarten Lankhorst return err;
14539f909e21SChris Wilson
1454cf3e3e86SMaarten Lankhorst addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED);
14559f909e21SChris Wilson if (IS_ERR_VALUE(addr))
14569f909e21SChris Wilson return addr;
14579f909e21SChris Wilson ptr = (unsigned long __user *)addr;
14589f909e21SChris Wilson
14599f909e21SChris Wilson err = __put_user(A, ptr);
14609f909e21SChris Wilson if (err) {
14619f909e21SChris Wilson pr_err("%s(%s): failed to write into user mmap\n",
14629f909e21SChris Wilson obj->mm.region->name, repr_mmap_type(type));
14639f909e21SChris Wilson goto out_unmap;
14649f909e21SChris Wilson }
14659f909e21SChris Wilson
14661a9c4db4SMichał Winiarski intel_gt_flush_ggtt_writes(to_gt(i915));
14679f909e21SChris Wilson
14689f909e21SChris Wilson err = access_process_vm(current, addr, &x, sizeof(x), 0);
14699f909e21SChris Wilson if (err != sizeof(x)) {
14709f909e21SChris Wilson pr_err("%s(%s): access_process_vm() read failed\n",
14719f909e21SChris Wilson obj->mm.region->name, repr_mmap_type(type));
14729f909e21SChris Wilson goto out_unmap;
14739f909e21SChris Wilson }
14749f909e21SChris Wilson
14759f909e21SChris Wilson err = access_process_vm(current, addr, &B, sizeof(B), FOLL_WRITE);
14769f909e21SChris Wilson if (err != sizeof(B)) {
14779f909e21SChris Wilson pr_err("%s(%s): access_process_vm() write failed\n",
14789f909e21SChris Wilson obj->mm.region->name, repr_mmap_type(type));
14799f909e21SChris Wilson goto out_unmap;
14809f909e21SChris Wilson }
14819f909e21SChris Wilson
14821a9c4db4SMichał Winiarski intel_gt_flush_ggtt_writes(to_gt(i915));
14839f909e21SChris Wilson
14849f909e21SChris Wilson err = __get_user(y, ptr);
14859f909e21SChris Wilson if (err) {
14869f909e21SChris Wilson pr_err("%s(%s): failed to read from user mmap\n",
14879f909e21SChris Wilson obj->mm.region->name, repr_mmap_type(type));
14889f909e21SChris Wilson goto out_unmap;
14899f909e21SChris Wilson }
14909f909e21SChris Wilson
14919f909e21SChris Wilson if (x != A || y != B) {
14929f909e21SChris Wilson pr_err("%s(%s): failed to read/write values, found (%lx, %lx)\n",
14939f909e21SChris Wilson obj->mm.region->name, repr_mmap_type(type),
14949f909e21SChris Wilson x, y);
14959f909e21SChris Wilson err = -EINVAL;
14969f909e21SChris Wilson goto out_unmap;
14979f909e21SChris Wilson }
14989f909e21SChris Wilson
14999f909e21SChris Wilson out_unmap:
15009f909e21SChris Wilson vm_munmap(addr, obj->base.size);
15019f909e21SChris Wilson return err;
15029f909e21SChris Wilson }
15039f909e21SChris Wilson
igt_mmap_access(void * arg)15049f909e21SChris Wilson static int igt_mmap_access(void *arg)
15059f909e21SChris Wilson {
15069f909e21SChris Wilson struct drm_i915_private *i915 = arg;
15079f909e21SChris Wilson struct intel_memory_region *mr;
15089f909e21SChris Wilson enum intel_region_id id;
15099f909e21SChris Wilson
15109f909e21SChris Wilson for_each_memory_region(mr, i915, id) {
15119f909e21SChris Wilson struct drm_i915_gem_object *obj;
15129f909e21SChris Wilson int err;
15139f909e21SChris Wilson
1514938d2fd1SMatthew Auld if (mr->private)
1515938d2fd1SMatthew Auld continue;
1516938d2fd1SMatthew Auld
15176d0e4f07SMatthew Auld obj = __i915_gem_object_create_user(i915, PAGE_SIZE, &mr, 1);
15189f909e21SChris Wilson if (obj == ERR_PTR(-ENODEV))
15199f909e21SChris Wilson continue;
15209f909e21SChris Wilson
15219f909e21SChris Wilson if (IS_ERR(obj))
15229f909e21SChris Wilson return PTR_ERR(obj);
15239f909e21SChris Wilson
15249f909e21SChris Wilson err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_GTT);
15259f909e21SChris Wilson if (err == 0)
15269f909e21SChris Wilson err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_WB);
15279f909e21SChris Wilson if (err == 0)
15289f909e21SChris Wilson err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_WC);
15299f909e21SChris Wilson if (err == 0)
15309f909e21SChris Wilson err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_UC);
15317961c5b6SMaarten Lankhorst if (err == 0)
15327961c5b6SMaarten Lankhorst err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_FIXED);
15339f909e21SChris Wilson
15349f909e21SChris Wilson i915_gem_object_put(obj);
15359f909e21SChris Wilson if (err)
15369f909e21SChris Wilson return err;
15379f909e21SChris Wilson }
15389f909e21SChris Wilson
15399f909e21SChris Wilson return 0;
15409f909e21SChris Wilson }
15419f909e21SChris Wilson
__igt_mmap_gpu(struct drm_i915_private * i915,struct drm_i915_gem_object * obj,enum i915_mmap_type type)154206581862SChris Wilson static int __igt_mmap_gpu(struct drm_i915_private *i915,
154306581862SChris Wilson struct drm_i915_gem_object *obj,
154406581862SChris Wilson enum i915_mmap_type type)
154506581862SChris Wilson {
154606581862SChris Wilson struct intel_engine_cs *engine;
154706581862SChris Wilson unsigned long addr;
1548a5799832SChris Wilson u32 __user *ux;
1549a5799832SChris Wilson u32 bbe;
155006581862SChris Wilson int err;
1551cf3e3e86SMaarten Lankhorst u64 offset;
155206581862SChris Wilson
155306581862SChris Wilson /*
155406581862SChris Wilson * Verify that the mmap access into the backing store aligns with
155506581862SChris Wilson * that of the GPU, i.e. that mmap is indeed writing into the same
155606581862SChris Wilson * page as being read by the GPU.
155706581862SChris Wilson */
155806581862SChris Wilson
155906581862SChris Wilson if (!can_mmap(obj, type))
156006581862SChris Wilson return 0;
156106581862SChris Wilson
156206581862SChris Wilson err = wc_set(obj);
156306581862SChris Wilson if (err == -ENXIO)
156406581862SChris Wilson err = gtt_set(obj);
156506581862SChris Wilson if (err)
156606581862SChris Wilson return err;
156706581862SChris Wilson
1568cf3e3e86SMaarten Lankhorst err = __assign_mmap_offset(obj, type, &offset, NULL);
1569cf3e3e86SMaarten Lankhorst if (err)
1570cf3e3e86SMaarten Lankhorst return err;
157106581862SChris Wilson
1572cf3e3e86SMaarten Lankhorst addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED);
157306581862SChris Wilson if (IS_ERR_VALUE(addr))
157406581862SChris Wilson return addr;
157506581862SChris Wilson
157606581862SChris Wilson ux = u64_to_user_ptr((u64)addr);
157706581862SChris Wilson bbe = MI_BATCH_BUFFER_END;
157806581862SChris Wilson if (put_user(bbe, ux)) {
157906581862SChris Wilson pr_err("%s: Unable to write to mmap\n", obj->mm.region->name);
158006581862SChris Wilson err = -EFAULT;
158106581862SChris Wilson goto out_unmap;
158206581862SChris Wilson }
158306581862SChris Wilson
158406581862SChris Wilson if (type == I915_MMAP_TYPE_GTT)
15851a9c4db4SMichał Winiarski intel_gt_flush_ggtt_writes(to_gt(i915));
158606581862SChris Wilson
158706581862SChris Wilson for_each_uabi_engine(engine, i915) {
158806581862SChris Wilson struct i915_request *rq;
158906581862SChris Wilson struct i915_vma *vma;
159015b6c924SMaarten Lankhorst struct i915_gem_ww_ctx ww;
159106581862SChris Wilson
159206581862SChris Wilson vma = i915_vma_instance(obj, engine->kernel_context->vm, NULL);
159306581862SChris Wilson if (IS_ERR(vma)) {
159406581862SChris Wilson err = PTR_ERR(vma);
159506581862SChris Wilson goto out_unmap;
159606581862SChris Wilson }
159706581862SChris Wilson
159815b6c924SMaarten Lankhorst i915_gem_ww_ctx_init(&ww, false);
159915b6c924SMaarten Lankhorst retry:
160015b6c924SMaarten Lankhorst err = i915_gem_object_lock(obj, &ww);
160115b6c924SMaarten Lankhorst if (!err)
160215b6c924SMaarten Lankhorst err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER);
160306581862SChris Wilson if (err)
160415b6c924SMaarten Lankhorst goto out_ww;
160506581862SChris Wilson
160606581862SChris Wilson rq = i915_request_create(engine->kernel_context);
160706581862SChris Wilson if (IS_ERR(rq)) {
160806581862SChris Wilson err = PTR_ERR(rq);
160906581862SChris Wilson goto out_unpin;
161006581862SChris Wilson }
161106581862SChris Wilson
161206581862SChris Wilson err = i915_vma_move_to_active(vma, rq, 0);
161306581862SChris Wilson
16148e4ee5e8SChris Wilson err = engine->emit_bb_start(rq, i915_vma_offset(vma), 0, 0);
161506581862SChris Wilson i915_request_get(rq);
161606581862SChris Wilson i915_request_add(rq);
161706581862SChris Wilson
161806581862SChris Wilson if (i915_request_wait(rq, 0, HZ / 5) < 0) {
161906581862SChris Wilson struct drm_printer p =
162006581862SChris Wilson drm_info_printer(engine->i915->drm.dev);
162106581862SChris Wilson
162206581862SChris Wilson pr_err("%s(%s, %s): Failed to execute batch\n",
162306581862SChris Wilson __func__, engine->name, obj->mm.region->name);
162406581862SChris Wilson intel_engine_dump(engine, &p,
162506581862SChris Wilson "%s\n", engine->name);
162606581862SChris Wilson
162706581862SChris Wilson intel_gt_set_wedged(engine->gt);
162806581862SChris Wilson err = -EIO;
162906581862SChris Wilson }
163006581862SChris Wilson i915_request_put(rq);
163106581862SChris Wilson
163206581862SChris Wilson out_unpin:
163306581862SChris Wilson i915_vma_unpin(vma);
163415b6c924SMaarten Lankhorst out_ww:
163515b6c924SMaarten Lankhorst if (err == -EDEADLK) {
163615b6c924SMaarten Lankhorst err = i915_gem_ww_ctx_backoff(&ww);
163715b6c924SMaarten Lankhorst if (!err)
163815b6c924SMaarten Lankhorst goto retry;
163915b6c924SMaarten Lankhorst }
164015b6c924SMaarten Lankhorst i915_gem_ww_ctx_fini(&ww);
164106581862SChris Wilson if (err)
164206581862SChris Wilson goto out_unmap;
164306581862SChris Wilson }
164406581862SChris Wilson
164506581862SChris Wilson out_unmap:
164606581862SChris Wilson vm_munmap(addr, obj->base.size);
164706581862SChris Wilson return err;
164806581862SChris Wilson }
164906581862SChris Wilson
igt_mmap_gpu(void * arg)165006581862SChris Wilson static int igt_mmap_gpu(void *arg)
165106581862SChris Wilson {
165206581862SChris Wilson struct drm_i915_private *i915 = arg;
165306581862SChris Wilson struct intel_memory_region *mr;
165406581862SChris Wilson enum intel_region_id id;
165506581862SChris Wilson
165606581862SChris Wilson for_each_memory_region(mr, i915, id) {
165706581862SChris Wilson struct drm_i915_gem_object *obj;
165806581862SChris Wilson int err;
165906581862SChris Wilson
1660938d2fd1SMatthew Auld if (mr->private)
1661938d2fd1SMatthew Auld continue;
1662938d2fd1SMatthew Auld
16636d0e4f07SMatthew Auld obj = __i915_gem_object_create_user(i915, PAGE_SIZE, &mr, 1);
166406581862SChris Wilson if (obj == ERR_PTR(-ENODEV))
166506581862SChris Wilson continue;
166606581862SChris Wilson
166706581862SChris Wilson if (IS_ERR(obj))
166806581862SChris Wilson return PTR_ERR(obj);
166906581862SChris Wilson
167006581862SChris Wilson err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_GTT);
167106581862SChris Wilson if (err == 0)
167206581862SChris Wilson err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_WC);
16737961c5b6SMaarten Lankhorst if (err == 0)
16747961c5b6SMaarten Lankhorst err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_FIXED);
167506581862SChris Wilson
167606581862SChris Wilson i915_gem_object_put(obj);
167706581862SChris Wilson if (err)
167806581862SChris Wilson return err;
167906581862SChris Wilson }
168006581862SChris Wilson
168106581862SChris Wilson return 0;
168206581862SChris Wilson }
168306581862SChris Wilson
check_present_pte(pte_t * pte,unsigned long addr,void * data)16841d1d0af6SChris Wilson static int check_present_pte(pte_t *pte, unsigned long addr, void *data)
16851d1d0af6SChris Wilson {
1686c33c7948SRyan Roberts pte_t ptent = ptep_get(pte);
1687c33c7948SRyan Roberts
1688c33c7948SRyan Roberts if (!pte_present(ptent) || pte_none(ptent)) {
16891d1d0af6SChris Wilson pr_err("missing PTE:%lx\n",
16901d1d0af6SChris Wilson (addr - (unsigned long)data) >> PAGE_SHIFT);
16911d1d0af6SChris Wilson return -EINVAL;
16921d1d0af6SChris Wilson }
16931d1d0af6SChris Wilson
16941d1d0af6SChris Wilson return 0;
16951d1d0af6SChris Wilson }
16961d1d0af6SChris Wilson
check_absent_pte(pte_t * pte,unsigned long addr,void * data)16971d1d0af6SChris Wilson static int check_absent_pte(pte_t *pte, unsigned long addr, void *data)
16981d1d0af6SChris Wilson {
1699c33c7948SRyan Roberts pte_t ptent = ptep_get(pte);
1700c33c7948SRyan Roberts
1701c33c7948SRyan Roberts if (pte_present(ptent) && !pte_none(ptent)) {
17021d1d0af6SChris Wilson pr_err("present PTE:%lx; expected to be revoked\n",
17031d1d0af6SChris Wilson (addr - (unsigned long)data) >> PAGE_SHIFT);
17041d1d0af6SChris Wilson return -EINVAL;
17051d1d0af6SChris Wilson }
17061d1d0af6SChris Wilson
17071d1d0af6SChris Wilson return 0;
17081d1d0af6SChris Wilson }
17091d1d0af6SChris Wilson
check_present(unsigned long addr,unsigned long len)17101d1d0af6SChris Wilson static int check_present(unsigned long addr, unsigned long len)
17111d1d0af6SChris Wilson {
17121d1d0af6SChris Wilson return apply_to_page_range(current->mm, addr, len,
17131d1d0af6SChris Wilson check_present_pte, (void *)addr);
17141d1d0af6SChris Wilson }
17151d1d0af6SChris Wilson
check_absent(unsigned long addr,unsigned long len)17161d1d0af6SChris Wilson static int check_absent(unsigned long addr, unsigned long len)
17171d1d0af6SChris Wilson {
17181d1d0af6SChris Wilson return apply_to_page_range(current->mm, addr, len,
17191d1d0af6SChris Wilson check_absent_pte, (void *)addr);
17201d1d0af6SChris Wilson }
17211d1d0af6SChris Wilson
prefault_range(u64 start,u64 len)17221d1d0af6SChris Wilson static int prefault_range(u64 start, u64 len)
17231d1d0af6SChris Wilson {
17241d1d0af6SChris Wilson const char __user *addr, *end;
17251d1d0af6SChris Wilson char __maybe_unused c;
17261d1d0af6SChris Wilson int err;
17271d1d0af6SChris Wilson
17281d1d0af6SChris Wilson addr = u64_to_user_ptr(start);
17291d1d0af6SChris Wilson end = addr + len;
17301d1d0af6SChris Wilson
17311d1d0af6SChris Wilson for (; addr < end; addr += PAGE_SIZE) {
17321d1d0af6SChris Wilson err = __get_user(c, addr);
17331d1d0af6SChris Wilson if (err)
17341d1d0af6SChris Wilson return err;
17351d1d0af6SChris Wilson }
17361d1d0af6SChris Wilson
17371d1d0af6SChris Wilson return __get_user(c, end - 1);
17381d1d0af6SChris Wilson }
17391d1d0af6SChris Wilson
__igt_mmap_revoke(struct drm_i915_private * i915,struct drm_i915_gem_object * obj,enum i915_mmap_type type)17409771d5f7SAbdiel Janulgue static int __igt_mmap_revoke(struct drm_i915_private *i915,
17419771d5f7SAbdiel Janulgue struct drm_i915_gem_object *obj,
17429771d5f7SAbdiel Janulgue enum i915_mmap_type type)
17431d1d0af6SChris Wilson {
17441d1d0af6SChris Wilson unsigned long addr;
17451d1d0af6SChris Wilson int err;
1746cf3e3e86SMaarten Lankhorst u64 offset;
17471d1d0af6SChris Wilson
17489771d5f7SAbdiel Janulgue if (!can_mmap(obj, type))
17491d1d0af6SChris Wilson return 0;
17501d1d0af6SChris Wilson
1751cf3e3e86SMaarten Lankhorst err = __assign_mmap_offset(obj, type, &offset, NULL);
1752cf3e3e86SMaarten Lankhorst if (err)
1753cf3e3e86SMaarten Lankhorst return err;
17541d1d0af6SChris Wilson
1755cf3e3e86SMaarten Lankhorst addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED);
17569771d5f7SAbdiel Janulgue if (IS_ERR_VALUE(addr))
17579771d5f7SAbdiel Janulgue return addr;
17581d1d0af6SChris Wilson
17591d1d0af6SChris Wilson err = prefault_range(addr, obj->base.size);
17601d1d0af6SChris Wilson if (err)
17611d1d0af6SChris Wilson goto out_unmap;
17621d1d0af6SChris Wilson
17631d1d0af6SChris Wilson err = check_present(addr, obj->base.size);
17649771d5f7SAbdiel Janulgue if (err) {
17659771d5f7SAbdiel Janulgue pr_err("%s: was not present\n", obj->mm.region->name);
17661d1d0af6SChris Wilson goto out_unmap;
17679771d5f7SAbdiel Janulgue }
17681d1d0af6SChris Wilson
17691d1d0af6SChris Wilson /*
17701d1d0af6SChris Wilson * After unbinding the object from the GGTT, its address may be reused
17711d1d0af6SChris Wilson * for other objects. Ergo we have to revoke the previous mmap PTE
17721d1d0af6SChris Wilson * access as it no longer points to the same object.
17731d1d0af6SChris Wilson */
17740f341974SMaarten Lankhorst i915_gem_object_lock(obj, NULL);
17751d1d0af6SChris Wilson err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
17760f341974SMaarten Lankhorst i915_gem_object_unlock(obj);
17771d1d0af6SChris Wilson if (err) {
17781d1d0af6SChris Wilson pr_err("Failed to unbind object!\n");
17791d1d0af6SChris Wilson goto out_unmap;
17801d1d0af6SChris Wilson }
17811d1d0af6SChris Wilson
1782cc662126SAbdiel Janulgue if (type != I915_MMAP_TYPE_GTT) {
17836f791ffeSMaarten Lankhorst i915_gem_object_lock(obj, NULL);
1784cc662126SAbdiel Janulgue __i915_gem_object_put_pages(obj);
17856f791ffeSMaarten Lankhorst i915_gem_object_unlock(obj);
1786cc662126SAbdiel Janulgue if (i915_gem_object_has_pages(obj)) {
1787cc662126SAbdiel Janulgue pr_err("Failed to put-pages object!\n");
1788cc662126SAbdiel Janulgue err = -EINVAL;
1789cc662126SAbdiel Janulgue goto out_unmap;
1790cc662126SAbdiel Janulgue }
1791cc662126SAbdiel Janulgue }
1792cc662126SAbdiel Janulgue
17931d1d0af6SChris Wilson err = check_absent(addr, obj->base.size);
17949771d5f7SAbdiel Janulgue if (err) {
17959771d5f7SAbdiel Janulgue pr_err("%s: was not absent\n", obj->mm.region->name);
17961d1d0af6SChris Wilson goto out_unmap;
17979771d5f7SAbdiel Janulgue }
17981d1d0af6SChris Wilson
17991d1d0af6SChris Wilson out_unmap:
18001d1d0af6SChris Wilson vm_munmap(addr, obj->base.size);
18011d1d0af6SChris Wilson return err;
18021d1d0af6SChris Wilson }
18031d1d0af6SChris Wilson
igt_mmap_revoke(void * arg)18049771d5f7SAbdiel Janulgue static int igt_mmap_revoke(void *arg)
1805cc662126SAbdiel Janulgue {
18069771d5f7SAbdiel Janulgue struct drm_i915_private *i915 = arg;
18079771d5f7SAbdiel Janulgue struct intel_memory_region *mr;
18089771d5f7SAbdiel Janulgue enum intel_region_id id;
18099771d5f7SAbdiel Janulgue
18109771d5f7SAbdiel Janulgue for_each_memory_region(mr, i915, id) {
18119771d5f7SAbdiel Janulgue struct drm_i915_gem_object *obj;
18129771d5f7SAbdiel Janulgue int err;
18139771d5f7SAbdiel Janulgue
1814938d2fd1SMatthew Auld if (mr->private)
1815938d2fd1SMatthew Auld continue;
1816938d2fd1SMatthew Auld
18176d0e4f07SMatthew Auld obj = __i915_gem_object_create_user(i915, PAGE_SIZE, &mr, 1);
18189771d5f7SAbdiel Janulgue if (obj == ERR_PTR(-ENODEV))
18199771d5f7SAbdiel Janulgue continue;
18209771d5f7SAbdiel Janulgue
18219771d5f7SAbdiel Janulgue if (IS_ERR(obj))
18229771d5f7SAbdiel Janulgue return PTR_ERR(obj);
18239771d5f7SAbdiel Janulgue
18249771d5f7SAbdiel Janulgue err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_GTT);
18259771d5f7SAbdiel Janulgue if (err == 0)
18269771d5f7SAbdiel Janulgue err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_WC);
18277961c5b6SMaarten Lankhorst if (err == 0)
18287961c5b6SMaarten Lankhorst err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_FIXED);
18299771d5f7SAbdiel Janulgue
18309771d5f7SAbdiel Janulgue i915_gem_object_put(obj);
18319771d5f7SAbdiel Janulgue if (err)
18329771d5f7SAbdiel Janulgue return err;
1833cc662126SAbdiel Janulgue }
1834cc662126SAbdiel Janulgue
18359771d5f7SAbdiel Janulgue return 0;
1836cc662126SAbdiel Janulgue }
1837cc662126SAbdiel Janulgue
i915_gem_mman_live_selftests(struct drm_i915_private * i915)1838b414fcd5SChris Wilson int i915_gem_mman_live_selftests(struct drm_i915_private *i915)
1839b414fcd5SChris Wilson {
1840b414fcd5SChris Wilson static const struct i915_subtest tests[] = {
1841b414fcd5SChris Wilson SUBTEST(igt_partial_tiling),
184207e98eb0SChris Wilson SUBTEST(igt_smoke_tiling),
1843b414fcd5SChris Wilson SUBTEST(igt_mmap_offset_exhaustion),
18449771d5f7SAbdiel Janulgue SUBTEST(igt_mmap),
1845fb87550dSMatthew Auld SUBTEST(igt_mmap_migrate),
18469f909e21SChris Wilson SUBTEST(igt_mmap_access),
18479771d5f7SAbdiel Janulgue SUBTEST(igt_mmap_revoke),
184806581862SChris Wilson SUBTEST(igt_mmap_gpu),
1849b414fcd5SChris Wilson };
1850b414fcd5SChris Wilson
185161faec5fSMatthew Brost return i915_live_subtests(tests, i915);
1852b414fcd5SChris Wilson }
1853