xref: /linux/drivers/gpu/drm/i915/gt/intel_ggtt_gmch.c (revision 4359a011e259a4608afc7fb3635370c9d4ba5943)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "intel_ggtt_gmch.h"
7 
8 #include <drm/intel-gtt.h>
9 #include <drm/i915_drm.h>
10 
11 #include <linux/agp_backend.h>
12 
13 #include "i915_drv.h"
14 #include "i915_utils.h"
15 #include "intel_gtt.h"
16 #include "intel_gt_regs.h"
17 #include "intel_gt.h"
18 
19 static void gmch_ggtt_insert_page(struct i915_address_space *vm,
20 				  dma_addr_t addr,
21 				  u64 offset,
22 				  enum i915_cache_level cache_level,
23 				  u32 unused)
24 {
25 	unsigned int flags = (cache_level == I915_CACHE_NONE) ?
26 		AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
27 
28 	intel_gmch_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
29 }
30 
31 static void gmch_ggtt_insert_entries(struct i915_address_space *vm,
32 				     struct i915_vma_resource *vma_res,
33 				     enum i915_cache_level cache_level,
34 				     u32 unused)
35 {
36 	unsigned int flags = (cache_level == I915_CACHE_NONE) ?
37 		AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
38 
39 	intel_gmch_gtt_insert_sg_entries(vma_res->bi.pages, vma_res->start >> PAGE_SHIFT,
40 					 flags);
41 }
42 
43 static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt)
44 {
45 	intel_gmch_gtt_flush();
46 }
47 
48 static void gmch_ggtt_clear_range(struct i915_address_space *vm,
49 				  u64 start, u64 length)
50 {
51 	intel_gmch_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
52 }
53 
54 static void gmch_ggtt_remove(struct i915_address_space *vm)
55 {
56 	intel_gmch_remove();
57 }
58 
59 /*
60  * Certain Gen5 chipsets require idling the GPU before unmapping anything from
61  * the GTT when VT-d is enabled.
62  */
63 static bool needs_idle_maps(struct drm_i915_private *i915)
64 {
65 	/*
66 	 * Query intel_iommu to see if we need the workaround. Presumably that
67 	 * was loaded first.
68 	 */
69 	if (!i915_vtd_active(i915))
70 		return false;
71 
72 	if (GRAPHICS_VER(i915) == 5 && IS_MOBILE(i915))
73 		return true;
74 
75 	return false;
76 }
77 
78 int intel_ggtt_gmch_probe(struct i915_ggtt *ggtt)
79 {
80 	struct drm_i915_private *i915 = ggtt->vm.i915;
81 	phys_addr_t gmadr_base;
82 	int ret;
83 
84 	ret = intel_gmch_probe(i915->bridge_dev, to_pci_dev(i915->drm.dev), NULL);
85 	if (!ret) {
86 		drm_err(&i915->drm, "failed to set up gmch\n");
87 		return -EIO;
88 	}
89 
90 	intel_gmch_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end);
91 
92 	ggtt->gmadr =
93 		(struct resource)DEFINE_RES_MEM(gmadr_base, ggtt->mappable_end);
94 
95 	ggtt->vm.alloc_pt_dma = alloc_pt_dma;
96 	ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
97 
98 	if (needs_idle_maps(i915)) {
99 		drm_notice(&i915->drm,
100 			   "Flushing DMA requests before IOMMU unmaps; performance may be degraded\n");
101 		ggtt->do_idle_maps = true;
102 	}
103 
104 	ggtt->vm.insert_page = gmch_ggtt_insert_page;
105 	ggtt->vm.insert_entries = gmch_ggtt_insert_entries;
106 	ggtt->vm.clear_range = gmch_ggtt_clear_range;
107 	ggtt->vm.cleanup = gmch_ggtt_remove;
108 
109 	ggtt->invalidate = gmch_ggtt_invalidate;
110 
111 	ggtt->vm.vma_ops.bind_vma    = intel_ggtt_bind_vma;
112 	ggtt->vm.vma_ops.unbind_vma  = intel_ggtt_unbind_vma;
113 
114 	if (unlikely(ggtt->do_idle_maps))
115 		drm_notice(&i915->drm,
116 			   "Applying Ironlake quirks for intel_iommu\n");
117 
118 	return 0;
119 }
120 
121 int intel_ggtt_gmch_enable_hw(struct drm_i915_private *i915)
122 {
123 	if (!intel_gmch_enable_gtt())
124 		return -EIO;
125 
126 	return 0;
127 }
128 
129 void intel_ggtt_gmch_flush(void)
130 {
131 	intel_gmch_gtt_flush();
132 }
133