1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) 2 /* Copyright(c) 2014 - 2020 Intel Corporation */ 3 #include <linux/kernel.h> 4 #include <linux/init.h> 5 #include <linux/types.h> 6 #include <linux/pci.h> 7 #include <linux/slab.h> 8 #include <linux/errno.h> 9 #include <linux/interrupt.h> 10 #include "adf_accel_devices.h" 11 #include "adf_common_drv.h" 12 #include "adf_cfg.h" 13 #include "adf_cfg_strings.h" 14 #include "adf_cfg_common.h" 15 #include "adf_transport_access_macros.h" 16 #include "adf_transport_internal.h" 17 18 #define ADF_MAX_NUM_VFS 32 19 static struct workqueue_struct *adf_misc_wq; 20 21 static int adf_enable_msix(struct adf_accel_dev *accel_dev) 22 { 23 struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev; 24 struct adf_hw_device_data *hw_data = accel_dev->hw_device; 25 u32 msix_num_entries = hw_data->num_banks + 1; 26 int ret; 27 28 if (hw_data->set_msix_rttable) 29 hw_data->set_msix_rttable(accel_dev); 30 31 ret = pci_alloc_irq_vectors(pci_dev_info->pci_dev, msix_num_entries, 32 msix_num_entries, PCI_IRQ_MSIX); 33 if (unlikely(ret < 0)) { 34 dev_err(&GET_DEV(accel_dev), 35 "Failed to allocate %d MSI-X vectors\n", 36 msix_num_entries); 37 return ret; 38 } 39 return 0; 40 } 41 42 static void adf_disable_msix(struct adf_accel_pci *pci_dev_info) 43 { 44 pci_free_irq_vectors(pci_dev_info->pci_dev); 45 } 46 47 static irqreturn_t adf_msix_isr_bundle(int irq, void *bank_ptr) 48 { 49 struct adf_etr_bank_data *bank = bank_ptr; 50 struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev); 51 52 csr_ops->write_csr_int_flag_and_col(bank->csr_addr, bank->bank_number, 53 0); 54 tasklet_hi_schedule(&bank->resp_handler); 55 return IRQ_HANDLED; 56 } 57 58 #ifdef CONFIG_PCI_IOV 59 void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask) 60 { 61 void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev); 62 unsigned long flags; 63 64 spin_lock_irqsave(&accel_dev->pf.vf2pf_ints_lock, flags); 65 GET_PFVF_OPS(accel_dev)->enable_vf2pf_interrupts(pmisc_addr, vf_mask); 66 spin_unlock_irqrestore(&accel_dev->pf.vf2pf_ints_lock, flags); 67 } 68 69 void adf_disable_all_vf2pf_interrupts(struct adf_accel_dev *accel_dev) 70 { 71 void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev); 72 unsigned long flags; 73 74 spin_lock_irqsave(&accel_dev->pf.vf2pf_ints_lock, flags); 75 GET_PFVF_OPS(accel_dev)->disable_all_vf2pf_interrupts(pmisc_addr); 76 spin_unlock_irqrestore(&accel_dev->pf.vf2pf_ints_lock, flags); 77 } 78 79 static u32 adf_disable_pending_vf2pf_interrupts(struct adf_accel_dev *accel_dev) 80 { 81 void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev); 82 u32 pending; 83 84 spin_lock(&accel_dev->pf.vf2pf_ints_lock); 85 pending = GET_PFVF_OPS(accel_dev)->disable_pending_vf2pf_interrupts(pmisc_addr); 86 spin_unlock(&accel_dev->pf.vf2pf_ints_lock); 87 88 return pending; 89 } 90 91 static bool adf_handle_vf2pf_int(struct adf_accel_dev *accel_dev) 92 { 93 bool irq_handled = false; 94 unsigned long vf_mask; 95 96 /* Get the interrupt sources triggered by VFs, except for those already disabled */ 97 vf_mask = adf_disable_pending_vf2pf_interrupts(accel_dev); 98 if (vf_mask) { 99 struct adf_accel_vf_info *vf_info; 100 int i; 101 102 /* 103 * Handle VF2PF interrupt unless the VF is malicious and 104 * is attempting to flood the host OS with VF2PF interrupts. 105 */ 106 for_each_set_bit(i, &vf_mask, ADF_MAX_NUM_VFS) { 107 vf_info = accel_dev->pf.vf_info + i; 108 109 if (!__ratelimit(&vf_info->vf2pf_ratelimit)) { 110 dev_info(&GET_DEV(accel_dev), 111 "Too many ints from VF%d\n", 112 vf_info->vf_nr); 113 continue; 114 } 115 116 adf_schedule_vf2pf_handler(vf_info); 117 irq_handled = true; 118 } 119 } 120 return irq_handled; 121 } 122 #endif /* CONFIG_PCI_IOV */ 123 124 static bool adf_handle_pm_int(struct adf_accel_dev *accel_dev) 125 { 126 struct adf_hw_device_data *hw_data = accel_dev->hw_device; 127 128 if (hw_data->handle_pm_interrupt && 129 hw_data->handle_pm_interrupt(accel_dev)) 130 return true; 131 132 return false; 133 } 134 135 static bool adf_handle_ras_int(struct adf_accel_dev *accel_dev) 136 { 137 struct adf_ras_ops *ras_ops = &accel_dev->hw_device->ras_ops; 138 bool reset_required; 139 140 if (ras_ops->handle_interrupt && 141 ras_ops->handle_interrupt(accel_dev, &reset_required)) { 142 if (reset_required) { 143 dev_err(&GET_DEV(accel_dev), "Fatal error, reset required\n"); 144 if (adf_notify_fatal_error(accel_dev)) 145 dev_err(&GET_DEV(accel_dev), 146 "Failed to notify fatal error\n"); 147 } 148 149 return true; 150 } 151 152 return false; 153 } 154 155 static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr) 156 { 157 struct adf_accel_dev *accel_dev = dev_ptr; 158 159 #ifdef CONFIG_PCI_IOV 160 /* If SR-IOV is enabled (vf_info is non-NULL), check for VF->PF ints */ 161 if (accel_dev->pf.vf_info && adf_handle_vf2pf_int(accel_dev)) 162 return IRQ_HANDLED; 163 #endif /* CONFIG_PCI_IOV */ 164 165 if (adf_handle_pm_int(accel_dev)) 166 return IRQ_HANDLED; 167 168 if (adf_handle_ras_int(accel_dev)) 169 return IRQ_HANDLED; 170 171 dev_dbg(&GET_DEV(accel_dev), "qat_dev%d spurious AE interrupt\n", 172 accel_dev->accel_id); 173 174 return IRQ_NONE; 175 } 176 177 static void adf_free_irqs(struct adf_accel_dev *accel_dev) 178 { 179 struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev; 180 struct adf_hw_device_data *hw_data = accel_dev->hw_device; 181 struct adf_irq *irqs = pci_dev_info->msix_entries.irqs; 182 struct adf_etr_data *etr_data = accel_dev->transport; 183 int clust_irq = hw_data->num_banks; 184 int irq, i = 0; 185 186 if (pci_dev_info->msix_entries.num_entries > 1) { 187 for (i = 0; i < hw_data->num_banks; i++) { 188 if (irqs[i].enabled) { 189 irq = pci_irq_vector(pci_dev_info->pci_dev, i); 190 irq_set_affinity_hint(irq, NULL); 191 free_irq(irq, &etr_data->banks[i]); 192 } 193 } 194 } 195 196 if (irqs[i].enabled) { 197 irq = pci_irq_vector(pci_dev_info->pci_dev, clust_irq); 198 free_irq(irq, accel_dev); 199 } 200 } 201 202 static int adf_request_irqs(struct adf_accel_dev *accel_dev) 203 { 204 struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev; 205 struct adf_hw_device_data *hw_data = accel_dev->hw_device; 206 struct adf_irq *irqs = pci_dev_info->msix_entries.irqs; 207 struct adf_etr_data *etr_data = accel_dev->transport; 208 int clust_irq = hw_data->num_banks; 209 int ret, irq, i = 0; 210 char *name; 211 212 /* Request msix irq for all banks unless SR-IOV enabled */ 213 if (!accel_dev->pf.vf_info) { 214 for (i = 0; i < hw_data->num_banks; i++) { 215 struct adf_etr_bank_data *bank = &etr_data->banks[i]; 216 unsigned int cpu, cpus = num_online_cpus(); 217 218 name = irqs[i].name; 219 snprintf(name, ADF_MAX_MSIX_VECTOR_NAME, 220 "qat%d-bundle%d", accel_dev->accel_id, i); 221 irq = pci_irq_vector(pci_dev_info->pci_dev, i); 222 if (unlikely(irq < 0)) { 223 dev_err(&GET_DEV(accel_dev), 224 "Failed to get IRQ number of device vector %d - %s\n", 225 i, name); 226 ret = irq; 227 goto err; 228 } 229 ret = request_irq(irq, adf_msix_isr_bundle, 0, 230 &name[0], bank); 231 if (ret) { 232 dev_err(&GET_DEV(accel_dev), 233 "Failed to allocate IRQ %d for %s\n", 234 irq, name); 235 goto err; 236 } 237 238 cpu = ((accel_dev->accel_id * hw_data->num_banks) + 239 i) % cpus; 240 irq_set_affinity_hint(irq, get_cpu_mask(cpu)); 241 irqs[i].enabled = true; 242 } 243 } 244 245 /* Request msix irq for AE */ 246 name = irqs[i].name; 247 snprintf(name, ADF_MAX_MSIX_VECTOR_NAME, 248 "qat%d-ae-cluster", accel_dev->accel_id); 249 irq = pci_irq_vector(pci_dev_info->pci_dev, clust_irq); 250 if (unlikely(irq < 0)) { 251 dev_err(&GET_DEV(accel_dev), 252 "Failed to get IRQ number of device vector %d - %s\n", 253 i, name); 254 ret = irq; 255 goto err; 256 } 257 ret = request_irq(irq, adf_msix_isr_ae, 0, &name[0], accel_dev); 258 if (ret) { 259 dev_err(&GET_DEV(accel_dev), 260 "Failed to allocate IRQ %d for %s\n", irq, name); 261 goto err; 262 } 263 irqs[i].enabled = true; 264 return ret; 265 err: 266 adf_free_irqs(accel_dev); 267 return ret; 268 } 269 270 static int adf_isr_alloc_msix_vectors_data(struct adf_accel_dev *accel_dev) 271 { 272 struct adf_hw_device_data *hw_data = accel_dev->hw_device; 273 u32 msix_num_entries = 1; 274 struct adf_irq *irqs; 275 276 /* If SR-IOV is disabled (vf_info is NULL), add entries for each bank */ 277 if (!accel_dev->pf.vf_info) 278 msix_num_entries += hw_data->num_banks; 279 280 irqs = kcalloc_node(msix_num_entries, sizeof(*irqs), 281 GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev))); 282 if (!irqs) 283 return -ENOMEM; 284 285 accel_dev->accel_pci_dev.msix_entries.num_entries = msix_num_entries; 286 accel_dev->accel_pci_dev.msix_entries.irqs = irqs; 287 return 0; 288 } 289 290 static void adf_isr_free_msix_vectors_data(struct adf_accel_dev *accel_dev) 291 { 292 kfree(accel_dev->accel_pci_dev.msix_entries.irqs); 293 accel_dev->accel_pci_dev.msix_entries.irqs = NULL; 294 } 295 296 static int adf_setup_bh(struct adf_accel_dev *accel_dev) 297 { 298 struct adf_etr_data *priv_data = accel_dev->transport; 299 struct adf_hw_device_data *hw_data = accel_dev->hw_device; 300 int i; 301 302 for (i = 0; i < hw_data->num_banks; i++) 303 tasklet_init(&priv_data->banks[i].resp_handler, 304 adf_response_handler, 305 (unsigned long)&priv_data->banks[i]); 306 return 0; 307 } 308 309 static void adf_cleanup_bh(struct adf_accel_dev *accel_dev) 310 { 311 struct adf_etr_data *priv_data = accel_dev->transport; 312 struct adf_hw_device_data *hw_data = accel_dev->hw_device; 313 int i; 314 315 for (i = 0; i < hw_data->num_banks; i++) { 316 tasklet_disable(&priv_data->banks[i].resp_handler); 317 tasklet_kill(&priv_data->banks[i].resp_handler); 318 } 319 } 320 321 /** 322 * adf_isr_resource_free() - Free IRQ for acceleration device 323 * @accel_dev: Pointer to acceleration device. 324 * 325 * Function frees interrupts for acceleration device. 326 */ 327 void adf_isr_resource_free(struct adf_accel_dev *accel_dev) 328 { 329 adf_free_irqs(accel_dev); 330 adf_cleanup_bh(accel_dev); 331 adf_disable_msix(&accel_dev->accel_pci_dev); 332 adf_isr_free_msix_vectors_data(accel_dev); 333 } 334 EXPORT_SYMBOL_GPL(adf_isr_resource_free); 335 336 /** 337 * adf_isr_resource_alloc() - Allocate IRQ for acceleration device 338 * @accel_dev: Pointer to acceleration device. 339 * 340 * Function allocates interrupts for acceleration device. 341 * 342 * Return: 0 on success, error code otherwise. 343 */ 344 int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev) 345 { 346 int ret; 347 348 ret = adf_isr_alloc_msix_vectors_data(accel_dev); 349 if (ret) 350 goto err_out; 351 352 ret = adf_enable_msix(accel_dev); 353 if (ret) 354 goto err_free_msix_table; 355 356 ret = adf_setup_bh(accel_dev); 357 if (ret) 358 goto err_disable_msix; 359 360 ret = adf_request_irqs(accel_dev); 361 if (ret) 362 goto err_cleanup_bh; 363 364 return 0; 365 366 err_cleanup_bh: 367 adf_cleanup_bh(accel_dev); 368 369 err_disable_msix: 370 adf_disable_msix(&accel_dev->accel_pci_dev); 371 372 err_free_msix_table: 373 adf_isr_free_msix_vectors_data(accel_dev); 374 375 err_out: 376 return ret; 377 } 378 EXPORT_SYMBOL_GPL(adf_isr_resource_alloc); 379 380 /** 381 * adf_init_misc_wq() - Init misc workqueue 382 * 383 * Return: 0 on success, error code otherwise. 384 */ 385 int __init adf_init_misc_wq(void) 386 { 387 adf_misc_wq = alloc_workqueue("qat_misc_wq", WQ_MEM_RECLAIM, 0); 388 389 return !adf_misc_wq ? -ENOMEM : 0; 390 } 391 392 void adf_exit_misc_wq(void) 393 { 394 if (adf_misc_wq) 395 destroy_workqueue(adf_misc_wq); 396 397 adf_misc_wq = NULL; 398 } 399 400 bool adf_misc_wq_queue_work(struct work_struct *work) 401 { 402 return queue_work(adf_misc_wq, work); 403 } 404 405 bool adf_misc_wq_queue_delayed_work(struct delayed_work *work, 406 unsigned long delay) 407 { 408 return queue_delayed_work(adf_misc_wq, work, delay); 409 } 410