1 /* 2 * Copyright © 2016 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 */ 24 25 #include <linux/pm_domain.h> 26 #include <linux/pm_runtime.h> 27 #include <linux/iommu.h> 28 29 #include <drm/drm_managed.h> 30 31 #include "display/intel_display_device.h" 32 33 #include "gt/intel_gt.h" 34 #include "gt/intel_gt_requests.h" 35 #include "gt/mock_engine.h" 36 #include "intel_memory_region.h" 37 #include "intel_region_ttm.h" 38 39 #include "mock_request.h" 40 #include "mock_gem_device.h" 41 #include "mock_gtt.h" 42 #include "mock_uncore.h" 43 #include "mock_region.h" 44 45 #include "gem/selftests/mock_context.h" 46 #include "gem/selftests/mock_gem_object.h" 47 48 void mock_device_flush(struct drm_i915_private *i915) 49 { 50 struct intel_gt *gt = to_gt(i915); 51 struct intel_engine_cs *engine; 52 enum intel_engine_id id; 53 54 do { 55 for_each_engine(engine, gt, id) 56 mock_engine_flush(engine); 57 } while (intel_gt_retire_requests_timeout(gt, MAX_SCHEDULE_TIMEOUT, 58 NULL)); 59 } 60 61 static void mock_device_release(struct drm_device *dev) 62 { 63 struct drm_i915_private *i915 = to_i915(dev); 64 65 if (!i915->do_release) 66 goto out; 67 68 mock_device_flush(i915); 69 intel_gt_driver_remove(to_gt(i915)); 70 71 i915_gem_drain_workqueue(i915); 72 73 mock_fini_ggtt(to_gt(i915)->ggtt); 74 destroy_workqueue(i915->unordered_wq); 75 destroy_workqueue(i915->wq); 76 77 intel_region_ttm_device_fini(i915); 78 intel_gt_driver_late_release_all(i915); 79 intel_memory_regions_driver_release(i915); 80 81 drm_mode_config_cleanup(&i915->drm); 82 83 out: 84 i915_params_free(&i915->params); 85 } 86 87 static const struct drm_driver mock_driver = { 88 .name = "mock", 89 .driver_features = DRIVER_GEM, 90 .release = mock_device_release, 91 }; 92 93 static void release_dev(struct device *dev) 94 { 95 struct pci_dev *pdev = to_pci_dev(dev); 96 97 kfree(pdev); 98 } 99 100 static int pm_domain_resume(struct device *dev) 101 { 102 return pm_generic_runtime_resume(dev); 103 } 104 105 static int pm_domain_suspend(struct device *dev) 106 { 107 return pm_generic_runtime_suspend(dev); 108 } 109 110 static struct dev_pm_domain pm_domain = { 111 .ops = { 112 .runtime_suspend = pm_domain_suspend, 113 .runtime_resume = pm_domain_resume, 114 }, 115 }; 116 117 static void mock_gt_probe(struct drm_i915_private *i915) 118 { 119 i915->gt[0]->name = "Mock GT"; 120 } 121 122 static const struct intel_device_info mock_info = { 123 .__runtime.graphics.ip.ver = -1, 124 .__runtime.page_sizes = (I915_GTT_PAGE_SIZE_4K | 125 I915_GTT_PAGE_SIZE_64K | 126 I915_GTT_PAGE_SIZE_2M), 127 .memory_regions = BIT(INTEL_REGION_SMEM), 128 .platform_engine_mask = BIT(0), 129 130 /* simply use legacy cache level for mock device */ 131 .max_pat_index = 3, 132 .cachelevel_to_pat = { 133 [I915_CACHE_NONE] = 0, 134 [I915_CACHE_LLC] = 1, 135 [I915_CACHE_L3_LLC] = 2, 136 [I915_CACHE_WT] = 3, 137 }, 138 }; 139 140 struct drm_i915_private *mock_gem_device(void) 141 { 142 #if IS_ENABLED(CONFIG_IOMMU_API) && IS_ENABLED(CONFIG_INTEL_IOMMU) 143 static struct dev_iommu fake_iommu = { .priv = (void *)-1 }; 144 #endif 145 struct drm_i915_private *i915; 146 struct intel_display *display; 147 struct pci_dev *pdev; 148 int ret; 149 150 pdev = kzalloc(sizeof(*pdev), GFP_KERNEL); 151 if (!pdev) 152 return NULL; 153 device_initialize(&pdev->dev); 154 pdev->class = PCI_BASE_CLASS_DISPLAY << 16; 155 pdev->dev.release = release_dev; 156 dev_set_name(&pdev->dev, "mock"); 157 dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 158 159 #if IS_ENABLED(CONFIG_IOMMU_API) && IS_ENABLED(CONFIG_INTEL_IOMMU) 160 /* HACK to disable iommu for the fake device; force identity mapping */ 161 pdev->dev.iommu = &fake_iommu; 162 #endif 163 if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) { 164 put_device(&pdev->dev); 165 return NULL; 166 } 167 168 i915 = devm_drm_dev_alloc(&pdev->dev, &mock_driver, 169 struct drm_i915_private, drm); 170 if (IS_ERR(i915)) { 171 pr_err("Failed to allocate mock GEM device: err=%ld\n", PTR_ERR(i915)); 172 devres_release_group(&pdev->dev, NULL); 173 put_device(&pdev->dev); 174 175 return NULL; 176 } 177 178 pci_set_drvdata(pdev, &i915->drm); 179 180 /* Device parameters start as a copy of module parameters. */ 181 i915_params_copy(&i915->params, &i915_modparams); 182 183 /* Set up device info and initial runtime info. */ 184 intel_device_info_driver_create(i915, pdev->device, &mock_info); 185 186 display = intel_display_device_probe(pdev); 187 if (IS_ERR(display)) 188 goto err_device; 189 190 i915->display = display; 191 192 dev_pm_domain_set(&pdev->dev, &pm_domain); 193 pm_runtime_enable(&pdev->dev); 194 pm_runtime_dont_use_autosuspend(&pdev->dev); 195 if (pm_runtime_enabled(&pdev->dev)) 196 WARN_ON(pm_runtime_get_sync(&pdev->dev)); 197 198 intel_runtime_pm_init_early(&i915->runtime_pm); 199 /* wakeref tracking has significant overhead */ 200 i915->runtime_pm.no_wakeref_tracking = true; 201 202 /* Using the global GTT may ask questions about KMS users, so prepare */ 203 drm_mode_config_init(&i915->drm); 204 205 intel_memory_regions_hw_probe(i915); 206 207 spin_lock_init(&i915->gpu_error.lock); 208 209 i915_gem_init__mm(i915); 210 intel_root_gt_init_early(i915); 211 mock_uncore_init(&i915->uncore, i915); 212 atomic_inc(&to_gt(i915)->wakeref.count); /* disable; no hw support */ 213 to_gt(i915)->awake = INTEL_WAKEREF_MOCK_GT; 214 mock_gt_probe(i915); 215 216 ret = intel_region_ttm_device_init(i915); 217 if (ret) 218 goto err_ttm; 219 220 i915->wq = alloc_ordered_workqueue("mock", 0); 221 if (!i915->wq) 222 goto err_drv; 223 224 i915->unordered_wq = alloc_workqueue("mock-unordered", 0, 0); 225 if (!i915->unordered_wq) 226 goto err_wq; 227 228 mock_init_contexts(i915); 229 230 /* allocate the ggtt */ 231 ret = intel_gt_assign_ggtt(to_gt(i915)); 232 if (ret) 233 goto err_unlock; 234 235 mock_init_ggtt(to_gt(i915)); 236 to_gt(i915)->vm = i915_vm_get(&to_gt(i915)->ggtt->vm); 237 238 to_gt(i915)->info.engine_mask = BIT(0); 239 240 to_gt(i915)->engine[RCS0] = mock_engine(i915, "mock", RCS0); 241 if (!to_gt(i915)->engine[RCS0]) 242 goto err_unlock; 243 244 if (mock_engine_init(to_gt(i915)->engine[RCS0])) 245 goto err_context; 246 247 __clear_bit(I915_WEDGED, &to_gt(i915)->reset.flags); 248 intel_engines_driver_register(i915); 249 250 i915->do_release = true; 251 ida_init(&i915->selftest.mock_region_instances); 252 253 return i915; 254 255 err_context: 256 intel_gt_driver_remove(to_gt(i915)); 257 err_unlock: 258 destroy_workqueue(i915->unordered_wq); 259 err_wq: 260 destroy_workqueue(i915->wq); 261 err_drv: 262 intel_region_ttm_device_fini(i915); 263 err_ttm: 264 intel_gt_driver_late_release_all(i915); 265 intel_memory_regions_driver_release(i915); 266 drm_mode_config_cleanup(&i915->drm); 267 err_device: 268 mock_destroy_device(i915); 269 270 return NULL; 271 } 272 273 void mock_destroy_device(struct drm_i915_private *i915) 274 { 275 struct device *dev = i915->drm.dev; 276 277 intel_display_device_remove(i915->display); 278 279 devres_release_group(dev, NULL); 280 put_device(dev); 281 } 282