1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright(c) 2019-2022, Intel Corporation. All rights reserved. 4 */ 5 6 #include <linux/irq.h> 7 #include <linux/mei_aux.h> 8 #include "i915_drv.h" 9 #include "i915_reg.h" 10 #include "gem/i915_gem_lmem.h" 11 #include "gem/i915_gem_region.h" 12 #include "gt/intel_gsc.h" 13 #include "gt/intel_gt.h" 14 15 #define GSC_BAR_LENGTH 0x00000FFC 16 17 static void gsc_irq_mask(struct irq_data *d) 18 { 19 /* generic irq handling */ 20 } 21 22 static void gsc_irq_unmask(struct irq_data *d) 23 { 24 /* generic irq handling */ 25 } 26 27 static struct irq_chip gsc_irq_chip = { 28 .name = "gsc_irq_chip", 29 .irq_mask = gsc_irq_mask, 30 .irq_unmask = gsc_irq_unmask, 31 }; 32 33 static int gsc_irq_init(int irq) 34 { 35 irq_set_chip_and_handler_name(irq, &gsc_irq_chip, 36 handle_simple_irq, "gsc_irq_handler"); 37 38 return irq_set_chip_data(irq, NULL); 39 } 40 41 static int 42 gsc_ext_om_alloc(struct intel_gsc *gsc, struct intel_gsc_intf *intf, size_t size) 43 { 44 struct intel_gt *gt = gsc_to_gt(gsc); 45 struct drm_i915_gem_object *obj; 46 int err; 47 48 obj = i915_gem_object_create_lmem(gt->i915, size, 49 I915_BO_ALLOC_CONTIGUOUS | 50 I915_BO_ALLOC_CPU_CLEAR); 51 if (IS_ERR(obj)) { 52 drm_err(>->i915->drm, "Failed to allocate gsc memory\n"); 53 return PTR_ERR(obj); 54 } 55 56 err = i915_gem_object_pin_pages_unlocked(obj); 57 if (err) { 58 drm_err(>->i915->drm, "Failed to pin pages for gsc memory\n"); 59 goto out_put; 60 } 61 62 intf->gem_obj = obj; 63 64 return 0; 65 66 out_put: 67 i915_gem_object_put(obj); 68 return err; 69 } 70 71 static void gsc_ext_om_destroy(struct intel_gsc_intf *intf) 72 { 73 struct drm_i915_gem_object *obj = fetch_and_zero(&intf->gem_obj); 74 75 if (!obj) 76 return; 77 78 if (i915_gem_object_has_pinned_pages(obj)) 79 i915_gem_object_unpin_pages(obj); 80 81 i915_gem_object_put(obj); 82 } 83 84 struct gsc_def { 85 const char *name; 86 unsigned long bar; 87 size_t bar_size; 88 bool use_polling; 89 bool slow_firmware; 90 size_t lmem_size; 91 }; 92 93 /* gsc resources and definitions (HECI1 and HECI2) */ 94 static const struct gsc_def gsc_def_dg1[] = { 95 { 96 /* HECI1 not yet implemented. */ 97 }, 98 { 99 .name = "mei-gscfi", 100 .bar = DG1_GSC_HECI2_BASE, 101 .bar_size = GSC_BAR_LENGTH, 102 } 103 }; 104 105 static const struct gsc_def gsc_def_xehpsdv[] = { 106 { 107 /* HECI1 not enabled on the device. */ 108 }, 109 { 110 .name = "mei-gscfi", 111 .bar = DG1_GSC_HECI2_BASE, 112 .bar_size = GSC_BAR_LENGTH, 113 .use_polling = true, 114 .slow_firmware = true, 115 } 116 }; 117 118 static const struct gsc_def gsc_def_dg2[] = { 119 { 120 .name = "mei-gsc", 121 .bar = DG2_GSC_HECI1_BASE, 122 .bar_size = GSC_BAR_LENGTH, 123 .lmem_size = SZ_4M, 124 }, 125 { 126 .name = "mei-gscfi", 127 .bar = DG2_GSC_HECI2_BASE, 128 .bar_size = GSC_BAR_LENGTH, 129 } 130 }; 131 132 static void gsc_release_dev(struct device *dev) 133 { 134 struct auxiliary_device *aux_dev = to_auxiliary_dev(dev); 135 struct mei_aux_device *adev = auxiliary_dev_to_mei_aux_dev(aux_dev); 136 137 kfree(adev); 138 } 139 140 static void gsc_destroy_one(struct drm_i915_private *i915, 141 struct intel_gsc *gsc, unsigned int intf_id) 142 { 143 struct intel_gsc_intf *intf = &gsc->intf[intf_id]; 144 145 if (intf->adev) { 146 struct auxiliary_device *aux_dev = &intf->adev->aux_dev; 147 148 if (intf_id == 0) 149 intel_huc_unregister_gsc_notifier(&gsc_to_gt(gsc)->uc.huc, 150 aux_dev->dev.bus); 151 152 auxiliary_device_delete(aux_dev); 153 auxiliary_device_uninit(aux_dev); 154 intf->adev = NULL; 155 } 156 157 if (intf->irq >= 0) 158 irq_free_desc(intf->irq); 159 intf->irq = -1; 160 161 gsc_ext_om_destroy(intf); 162 } 163 164 static void gsc_init_one(struct drm_i915_private *i915, struct intel_gsc *gsc, 165 unsigned int intf_id) 166 { 167 struct pci_dev *pdev = to_pci_dev(i915->drm.dev); 168 struct mei_aux_device *adev; 169 struct auxiliary_device *aux_dev; 170 const struct gsc_def *def; 171 struct intel_gsc_intf *intf = &gsc->intf[intf_id]; 172 int ret; 173 174 intf->irq = -1; 175 intf->id = intf_id; 176 177 if (intf_id == 0 && !HAS_HECI_PXP(i915)) 178 return; 179 180 if (IS_DG1(i915)) { 181 def = &gsc_def_dg1[intf_id]; 182 } else if (IS_XEHPSDV(i915)) { 183 def = &gsc_def_xehpsdv[intf_id]; 184 } else if (IS_DG2(i915)) { 185 def = &gsc_def_dg2[intf_id]; 186 } else { 187 drm_warn_once(&i915->drm, "Unknown platform\n"); 188 return; 189 } 190 191 if (!def->name) { 192 drm_warn_once(&i915->drm, "HECI%d is not implemented!\n", intf_id + 1); 193 return; 194 } 195 196 /* skip irq initialization */ 197 if (def->use_polling) 198 goto add_device; 199 200 intf->irq = irq_alloc_desc(0); 201 if (intf->irq < 0) { 202 drm_err(&i915->drm, "gsc irq error %d\n", intf->irq); 203 goto fail; 204 } 205 206 ret = gsc_irq_init(intf->irq); 207 if (ret < 0) { 208 drm_err(&i915->drm, "gsc irq init failed %d\n", ret); 209 goto fail; 210 } 211 212 add_device: 213 adev = kzalloc(sizeof(*adev), GFP_KERNEL); 214 if (!adev) 215 goto fail; 216 217 if (def->lmem_size) { 218 drm_dbg(&i915->drm, "setting up GSC lmem\n"); 219 220 if (gsc_ext_om_alloc(gsc, intf, def->lmem_size)) { 221 drm_err(&i915->drm, "setting up gsc extended operational memory failed\n"); 222 kfree(adev); 223 goto fail; 224 } 225 226 adev->ext_op_mem.start = i915_gem_object_get_dma_address(intf->gem_obj, 0); 227 adev->ext_op_mem.end = adev->ext_op_mem.start + def->lmem_size; 228 } 229 230 adev->irq = intf->irq; 231 adev->bar.parent = &pdev->resource[0]; 232 adev->bar.start = def->bar + pdev->resource[0].start; 233 adev->bar.end = adev->bar.start + def->bar_size - 1; 234 adev->bar.flags = IORESOURCE_MEM; 235 adev->bar.desc = IORES_DESC_NONE; 236 adev->slow_firmware = def->slow_firmware; 237 238 aux_dev = &adev->aux_dev; 239 aux_dev->name = def->name; 240 aux_dev->id = (pci_domain_nr(pdev->bus) << 16) | 241 PCI_DEVID(pdev->bus->number, pdev->devfn); 242 aux_dev->dev.parent = &pdev->dev; 243 aux_dev->dev.release = gsc_release_dev; 244 245 ret = auxiliary_device_init(aux_dev); 246 if (ret < 0) { 247 drm_err(&i915->drm, "gsc aux init failed %d\n", ret); 248 kfree(adev); 249 goto fail; 250 } 251 252 intf->adev = adev; /* needed by the notifier */ 253 254 if (intf_id == 0) 255 intel_huc_register_gsc_notifier(&gsc_to_gt(gsc)->uc.huc, 256 aux_dev->dev.bus); 257 258 ret = auxiliary_device_add(aux_dev); 259 if (ret < 0) { 260 drm_err(&i915->drm, "gsc aux add failed %d\n", ret); 261 if (intf_id == 0) 262 intel_huc_unregister_gsc_notifier(&gsc_to_gt(gsc)->uc.huc, 263 aux_dev->dev.bus); 264 intf->adev = NULL; 265 266 /* adev will be freed with the put_device() and .release sequence */ 267 auxiliary_device_uninit(aux_dev); 268 goto fail; 269 } 270 271 return; 272 fail: 273 gsc_destroy_one(i915, gsc, intf->id); 274 } 275 276 static void gsc_irq_handler(struct intel_gt *gt, unsigned int intf_id) 277 { 278 int ret; 279 280 if (intf_id >= INTEL_GSC_NUM_INTERFACES) { 281 drm_warn_once(>->i915->drm, "GSC irq: intf_id %d is out of range", intf_id); 282 return; 283 } 284 285 if (!HAS_HECI_GSC(gt->i915)) { 286 drm_warn_once(>->i915->drm, "GSC irq: not supported"); 287 return; 288 } 289 290 if (gt->gsc.intf[intf_id].irq < 0) 291 return; 292 293 ret = generic_handle_irq(gt->gsc.intf[intf_id].irq); 294 if (ret) 295 drm_err_ratelimited(>->i915->drm, "error handling GSC irq: %d\n", ret); 296 } 297 298 void intel_gsc_irq_handler(struct intel_gt *gt, u32 iir) 299 { 300 if (iir & GSC_IRQ_INTF(0)) 301 gsc_irq_handler(gt, 0); 302 if (iir & GSC_IRQ_INTF(1)) 303 gsc_irq_handler(gt, 1); 304 } 305 306 void intel_gsc_init(struct intel_gsc *gsc, struct drm_i915_private *i915) 307 { 308 unsigned int i; 309 310 if (!HAS_HECI_GSC(i915)) 311 return; 312 313 for (i = 0; i < INTEL_GSC_NUM_INTERFACES; i++) 314 gsc_init_one(i915, gsc, i); 315 } 316 317 void intel_gsc_fini(struct intel_gsc *gsc) 318 { 319 struct intel_gt *gt = gsc_to_gt(gsc); 320 unsigned int i; 321 322 if (!HAS_HECI_GSC(gt->i915)) 323 return; 324 325 for (i = 0; i < INTEL_GSC_NUM_INTERFACES; i++) 326 gsc_destroy_one(gt->i915, gsc, i); 327 } 328