xref: /linux/drivers/gpu/drm/i915/gt/intel_ggtt_gmch.c (revision 34dc1baba215b826e454b8d19e4f24adbeb7d00d)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "intel_ggtt_gmch.h"
7 
8 #include <drm/intel-gtt.h>
9 
10 #include <linux/agp_backend.h>
11 
12 #include "i915_drv.h"
13 #include "i915_utils.h"
14 #include "intel_gtt.h"
15 #include "intel_gt_regs.h"
16 #include "intel_gt.h"
17 
18 static void gmch_ggtt_insert_page(struct i915_address_space *vm,
19 				  dma_addr_t addr,
20 				  u64 offset,
21 				  unsigned int pat_index,
22 				  u32 unused)
23 {
24 	unsigned int flags = (pat_index == I915_CACHE_NONE) ?
25 		AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
26 
27 	intel_gmch_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
28 }
29 
30 static void gmch_ggtt_insert_entries(struct i915_address_space *vm,
31 				     struct i915_vma_resource *vma_res,
32 				     unsigned int pat_index,
33 				     u32 unused)
34 {
35 	unsigned int flags = (pat_index == I915_CACHE_NONE) ?
36 		AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
37 
38 	intel_gmch_gtt_insert_sg_entries(vma_res->bi.pages, vma_res->start >> PAGE_SHIFT,
39 					 flags);
40 }
41 
42 static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt)
43 {
44 	intel_gmch_gtt_flush();
45 }
46 
47 static void gmch_ggtt_clear_range(struct i915_address_space *vm,
48 				  u64 start, u64 length)
49 {
50 	intel_gmch_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
51 }
52 
53 static void gmch_ggtt_remove(struct i915_address_space *vm)
54 {
55 	intel_gmch_remove();
56 }
57 
58 /*
59  * Certain Gen5 chipsets require idling the GPU before unmapping anything from
60  * the GTT when VT-d is enabled.
61  */
62 static bool needs_idle_maps(struct drm_i915_private *i915)
63 {
64 	/*
65 	 * Query intel_iommu to see if we need the workaround. Presumably that
66 	 * was loaded first.
67 	 */
68 	if (!i915_vtd_active(i915))
69 		return false;
70 
71 	if (GRAPHICS_VER(i915) == 5 && IS_MOBILE(i915))
72 		return true;
73 
74 	return false;
75 }
76 
77 int intel_ggtt_gmch_probe(struct i915_ggtt *ggtt)
78 {
79 	struct drm_i915_private *i915 = ggtt->vm.i915;
80 	phys_addr_t gmadr_base;
81 	int ret;
82 
83 	ret = intel_gmch_probe(i915->gmch.pdev, to_pci_dev(i915->drm.dev), NULL);
84 	if (!ret) {
85 		drm_err(&i915->drm, "failed to set up gmch\n");
86 		return -EIO;
87 	}
88 
89 	intel_gmch_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end);
90 
91 	ggtt->gmadr = DEFINE_RES_MEM(gmadr_base, ggtt->mappable_end);
92 
93 	ggtt->vm.alloc_pt_dma = alloc_pt_dma;
94 	ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
95 
96 	if (needs_idle_maps(i915)) {
97 		drm_notice(&i915->drm,
98 			   "Flushing DMA requests before IOMMU unmaps; performance may be degraded\n");
99 		ggtt->do_idle_maps = true;
100 	}
101 
102 	ggtt->vm.insert_page = gmch_ggtt_insert_page;
103 	ggtt->vm.insert_entries = gmch_ggtt_insert_entries;
104 	ggtt->vm.clear_range = gmch_ggtt_clear_range;
105 	ggtt->vm.scratch_range = gmch_ggtt_clear_range;
106 	ggtt->vm.cleanup = gmch_ggtt_remove;
107 
108 	ggtt->invalidate = gmch_ggtt_invalidate;
109 
110 	ggtt->vm.vma_ops.bind_vma    = intel_ggtt_bind_vma;
111 	ggtt->vm.vma_ops.unbind_vma  = intel_ggtt_unbind_vma;
112 
113 	if (unlikely(ggtt->do_idle_maps))
114 		drm_notice(&i915->drm,
115 			   "Applying Ironlake quirks for intel_iommu\n");
116 
117 	return 0;
118 }
119 
120 int intel_ggtt_gmch_enable_hw(struct drm_i915_private *i915)
121 {
122 	if (!intel_gmch_enable_gtt())
123 		return -EIO;
124 
125 	return 0;
126 }
127 
128 void intel_ggtt_gmch_flush(void)
129 {
130 	intel_gmch_gtt_flush();
131 }
132