1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright(c) 2019-2022, Intel Corporation. All rights reserved. 4 */ 5 6 #include <linux/irq.h> 7 #include <linux/mei_aux.h> 8 #include "i915_drv.h" 9 #include "i915_reg.h" 10 #include "gem/i915_gem_lmem.h" 11 #include "gem/i915_gem_region.h" 12 #include "gt/intel_gsc.h" 13 #include "gt/intel_gt.h" 14 #include "gt/intel_gt_print.h" 15 16 #define GSC_BAR_LENGTH 0x00000FFC 17 18 static void gsc_irq_mask(struct irq_data *d) 19 { 20 /* generic irq handling */ 21 } 22 23 static void gsc_irq_unmask(struct irq_data *d) 24 { 25 /* generic irq handling */ 26 } 27 28 static struct irq_chip gsc_irq_chip = { 29 .name = "gsc_irq_chip", 30 .irq_mask = gsc_irq_mask, 31 .irq_unmask = gsc_irq_unmask, 32 }; 33 34 static int gsc_irq_init(int irq) 35 { 36 irq_set_chip_and_handler_name(irq, &gsc_irq_chip, 37 handle_simple_irq, "gsc_irq_handler"); 38 39 return irq_set_chip_data(irq, NULL); 40 } 41 42 static int 43 gsc_ext_om_alloc(struct intel_gsc *gsc, struct intel_gsc_intf *intf, size_t size) 44 { 45 struct intel_gt *gt = gsc_to_gt(gsc); 46 struct drm_i915_gem_object *obj; 47 int err; 48 49 obj = i915_gem_object_create_lmem(gt->i915, size, 50 I915_BO_ALLOC_CONTIGUOUS | 51 I915_BO_ALLOC_CPU_CLEAR); 52 if (IS_ERR(obj)) { 53 gt_err(gt, "Failed to allocate gsc memory\n"); 54 return PTR_ERR(obj); 55 } 56 57 err = i915_gem_object_pin_pages_unlocked(obj); 58 if (err) { 59 gt_err(gt, "Failed to pin pages for gsc memory\n"); 60 goto out_put; 61 } 62 63 intf->gem_obj = obj; 64 65 return 0; 66 67 out_put: 68 i915_gem_object_put(obj); 69 return err; 70 } 71 72 static void gsc_ext_om_destroy(struct intel_gsc_intf *intf) 73 { 74 struct drm_i915_gem_object *obj = fetch_and_zero(&intf->gem_obj); 75 76 if (!obj) 77 return; 78 79 if (i915_gem_object_has_pinned_pages(obj)) 80 i915_gem_object_unpin_pages(obj); 81 82 i915_gem_object_put(obj); 83 } 84 85 struct gsc_def { 86 const char *name; 87 unsigned long bar; 88 size_t bar_size; 89 bool use_polling; 90 bool slow_firmware; 91 size_t lmem_size; 92 }; 93 94 /* gsc resources and definitions (HECI1 and HECI2) */ 95 static const struct gsc_def gsc_def_dg1[] = { 96 { 97 /* HECI1 not yet implemented. */ 98 }, 99 { 100 .name = "mei-gscfi", 101 .bar = DG1_GSC_HECI2_BASE, 102 .bar_size = GSC_BAR_LENGTH, 103 } 104 }; 105 106 static const struct gsc_def gsc_def_dg2[] = { 107 { 108 .name = "mei-gsc", 109 .bar = DG2_GSC_HECI1_BASE, 110 .bar_size = GSC_BAR_LENGTH, 111 .lmem_size = SZ_4M, 112 }, 113 { 114 .name = "mei-gscfi", 115 .bar = DG2_GSC_HECI2_BASE, 116 .bar_size = GSC_BAR_LENGTH, 117 } 118 }; 119 120 static void gsc_release_dev(struct device *dev) 121 { 122 struct auxiliary_device *aux_dev = to_auxiliary_dev(dev); 123 struct mei_aux_device *adev = auxiliary_dev_to_mei_aux_dev(aux_dev); 124 125 kfree(adev); 126 } 127 128 static void gsc_destroy_one(struct drm_i915_private *i915, 129 struct intel_gsc *gsc, unsigned int intf_id) 130 { 131 struct intel_gsc_intf *intf = &gsc->intf[intf_id]; 132 133 if (intf->adev) { 134 struct auxiliary_device *aux_dev = &intf->adev->aux_dev; 135 136 if (intf_id == 0) 137 intel_huc_unregister_gsc_notifier(&gsc_to_gt(gsc)->uc.huc, 138 aux_dev->dev.bus); 139 140 auxiliary_device_delete(aux_dev); 141 auxiliary_device_uninit(aux_dev); 142 intf->adev = NULL; 143 } 144 145 if (intf->irq >= 0) 146 irq_free_desc(intf->irq); 147 intf->irq = -1; 148 149 gsc_ext_om_destroy(intf); 150 } 151 152 static void gsc_init_one(struct drm_i915_private *i915, struct intel_gsc *gsc, 153 unsigned int intf_id) 154 { 155 struct pci_dev *pdev = to_pci_dev(i915->drm.dev); 156 struct mei_aux_device *adev; 157 struct auxiliary_device *aux_dev; 158 const struct gsc_def *def; 159 struct intel_gsc_intf *intf = &gsc->intf[intf_id]; 160 int ret; 161 162 intf->irq = -1; 163 intf->id = intf_id; 164 165 /* 166 * On the multi-tile setups the GSC is functional on the first tile only 167 */ 168 if (gsc_to_gt(gsc)->info.id != 0) { 169 drm_dbg(&i915->drm, "Not initializing gsc for remote tiles\n"); 170 return; 171 } 172 173 if (intf_id == 0 && !HAS_HECI_PXP(i915)) 174 return; 175 176 if (IS_DG1(i915)) { 177 def = &gsc_def_dg1[intf_id]; 178 } else if (IS_DG2(i915)) { 179 def = &gsc_def_dg2[intf_id]; 180 } else { 181 drm_warn_once(&i915->drm, "Unknown platform\n"); 182 return; 183 } 184 185 if (!def->name) { 186 drm_warn_once(&i915->drm, "HECI%d is not implemented!\n", intf_id + 1); 187 return; 188 } 189 190 /* skip irq initialization */ 191 if (def->use_polling) 192 goto add_device; 193 194 intf->irq = irq_alloc_desc(0); 195 if (intf->irq < 0) { 196 drm_err(&i915->drm, "gsc irq error %d\n", intf->irq); 197 goto fail; 198 } 199 200 ret = gsc_irq_init(intf->irq); 201 if (ret < 0) { 202 drm_err(&i915->drm, "gsc irq init failed %d\n", ret); 203 goto fail; 204 } 205 206 add_device: 207 adev = kzalloc(sizeof(*adev), GFP_KERNEL); 208 if (!adev) 209 goto fail; 210 211 if (def->lmem_size) { 212 drm_dbg(&i915->drm, "setting up GSC lmem\n"); 213 214 if (gsc_ext_om_alloc(gsc, intf, def->lmem_size)) { 215 drm_err(&i915->drm, "setting up gsc extended operational memory failed\n"); 216 kfree(adev); 217 goto fail; 218 } 219 220 adev->ext_op_mem.start = i915_gem_object_get_dma_address(intf->gem_obj, 0); 221 adev->ext_op_mem.end = adev->ext_op_mem.start + def->lmem_size; 222 } 223 224 adev->irq = intf->irq; 225 adev->bar.parent = &pdev->resource[0]; 226 adev->bar.start = def->bar + pdev->resource[0].start; 227 adev->bar.end = adev->bar.start + def->bar_size - 1; 228 adev->bar.flags = IORESOURCE_MEM; 229 adev->bar.desc = IORES_DESC_NONE; 230 adev->slow_firmware = def->slow_firmware; 231 232 aux_dev = &adev->aux_dev; 233 aux_dev->name = def->name; 234 aux_dev->id = (pci_domain_nr(pdev->bus) << 16) | 235 PCI_DEVID(pdev->bus->number, pdev->devfn); 236 aux_dev->dev.parent = &pdev->dev; 237 aux_dev->dev.release = gsc_release_dev; 238 239 ret = auxiliary_device_init(aux_dev); 240 if (ret < 0) { 241 drm_err(&i915->drm, "gsc aux init failed %d\n", ret); 242 kfree(adev); 243 goto fail; 244 } 245 246 intf->adev = adev; /* needed by the notifier */ 247 248 if (intf_id == 0) 249 intel_huc_register_gsc_notifier(&gsc_to_gt(gsc)->uc.huc, 250 aux_dev->dev.bus); 251 252 ret = auxiliary_device_add(aux_dev); 253 if (ret < 0) { 254 drm_err(&i915->drm, "gsc aux add failed %d\n", ret); 255 if (intf_id == 0) 256 intel_huc_unregister_gsc_notifier(&gsc_to_gt(gsc)->uc.huc, 257 aux_dev->dev.bus); 258 intf->adev = NULL; 259 260 /* adev will be freed with the put_device() and .release sequence */ 261 auxiliary_device_uninit(aux_dev); 262 goto fail; 263 } 264 265 return; 266 fail: 267 gsc_destroy_one(i915, gsc, intf->id); 268 } 269 270 static void gsc_irq_handler(struct intel_gt *gt, unsigned int intf_id) 271 { 272 int ret; 273 274 if (intf_id >= INTEL_GSC_NUM_INTERFACES) { 275 gt_warn_once(gt, "GSC irq: intf_id %d is out of range", intf_id); 276 return; 277 } 278 279 if (!HAS_HECI_GSC(gt->i915)) { 280 gt_warn_once(gt, "GSC irq: not supported"); 281 return; 282 } 283 284 if (gt->gsc.intf[intf_id].irq < 0) 285 return; 286 287 ret = generic_handle_irq(gt->gsc.intf[intf_id].irq); 288 if (ret) 289 gt_err_ratelimited(gt, "error handling GSC irq: %d\n", ret); 290 } 291 292 void intel_gsc_irq_handler(struct intel_gt *gt, u32 iir) 293 { 294 if (iir & GSC_IRQ_INTF(0)) 295 gsc_irq_handler(gt, 0); 296 if (iir & GSC_IRQ_INTF(1)) 297 gsc_irq_handler(gt, 1); 298 } 299 300 void intel_gsc_init(struct intel_gsc *gsc, struct drm_i915_private *i915) 301 { 302 unsigned int i; 303 304 if (!HAS_HECI_GSC(i915)) 305 return; 306 307 for (i = 0; i < INTEL_GSC_NUM_INTERFACES; i++) 308 gsc_init_one(i915, gsc, i); 309 } 310 311 void intel_gsc_fini(struct intel_gsc *gsc) 312 { 313 struct intel_gt *gt = gsc_to_gt(gsc); 314 unsigned int i; 315 316 if (!HAS_HECI_GSC(gt->i915)) 317 return; 318 319 for (i = 0; i < INTEL_GSC_NUM_INTERFACES; i++) 320 gsc_destroy_one(gt->i915, gsc, i); 321 } 322