1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) 2 /* Copyright(c) 2014 - 2020 Intel Corporation */ 3 #include <linux/kernel.h> 4 #include <linux/init.h> 5 #include <linux/types.h> 6 #include <linux/pci.h> 7 #include <linux/slab.h> 8 #include <linux/errno.h> 9 #include <linux/interrupt.h> 10 #include "adf_accel_devices.h" 11 #include "adf_common_drv.h" 12 #include "adf_cfg.h" 13 #include "adf_cfg_strings.h" 14 #include "adf_cfg_common.h" 15 #include "adf_transport_access_macros.h" 16 #include "adf_transport_internal.h" 17 18 #define ADF_MAX_NUM_VFS 32 19 static struct workqueue_struct *adf_misc_wq; 20 21 static int adf_enable_msix(struct adf_accel_dev *accel_dev) 22 { 23 struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev; 24 struct adf_hw_device_data *hw_data = accel_dev->hw_device; 25 u32 msix_num_entries = hw_data->num_banks + 1; 26 int ret; 27 28 if (hw_data->set_msix_rttable) 29 hw_data->set_msix_rttable(accel_dev); 30 31 ret = pci_alloc_irq_vectors(pci_dev_info->pci_dev, msix_num_entries, 32 msix_num_entries, PCI_IRQ_MSIX); 33 if (unlikely(ret < 0)) { 34 dev_err(&GET_DEV(accel_dev), 35 "Failed to allocate %d MSI-X vectors\n", 36 msix_num_entries); 37 return ret; 38 } 39 return 0; 40 } 41 42 static void adf_disable_msix(struct adf_accel_pci *pci_dev_info) 43 { 44 pci_free_irq_vectors(pci_dev_info->pci_dev); 45 } 46 47 static irqreturn_t adf_msix_isr_bundle(int irq, void *bank_ptr) 48 { 49 struct adf_etr_bank_data *bank = bank_ptr; 50 struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev); 51 52 csr_ops->write_csr_int_flag_and_col(bank->csr_addr, bank->bank_number, 53 0); 54 tasklet_hi_schedule(&bank->resp_handler); 55 return IRQ_HANDLED; 56 } 57 58 #ifdef CONFIG_PCI_IOV 59 void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask) 60 { 61 void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev); 62 unsigned long flags; 63 64 spin_lock_irqsave(&accel_dev->pf.vf2pf_ints_lock, flags); 65 GET_PFVF_OPS(accel_dev)->enable_vf2pf_interrupts(pmisc_addr, vf_mask); 66 spin_unlock_irqrestore(&accel_dev->pf.vf2pf_ints_lock, flags); 67 } 68 69 void adf_disable_all_vf2pf_interrupts(struct adf_accel_dev *accel_dev) 70 { 71 void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev); 72 unsigned long flags; 73 74 spin_lock_irqsave(&accel_dev->pf.vf2pf_ints_lock, flags); 75 GET_PFVF_OPS(accel_dev)->disable_all_vf2pf_interrupts(pmisc_addr); 76 spin_unlock_irqrestore(&accel_dev->pf.vf2pf_ints_lock, flags); 77 } 78 79 static u32 adf_disable_pending_vf2pf_interrupts(struct adf_accel_dev *accel_dev) 80 { 81 void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev); 82 u32 pending; 83 84 spin_lock(&accel_dev->pf.vf2pf_ints_lock); 85 pending = GET_PFVF_OPS(accel_dev)->disable_pending_vf2pf_interrupts(pmisc_addr); 86 spin_unlock(&accel_dev->pf.vf2pf_ints_lock); 87 88 return pending; 89 } 90 91 static bool adf_handle_vf2pf_int(struct adf_accel_dev *accel_dev) 92 { 93 bool irq_handled = false; 94 unsigned long vf_mask; 95 96 /* Get the interrupt sources triggered by VFs, except for those already disabled */ 97 vf_mask = adf_disable_pending_vf2pf_interrupts(accel_dev); 98 if (vf_mask) { 99 struct adf_accel_vf_info *vf_info; 100 int i; 101 102 /* 103 * Handle VF2PF interrupt unless the VF is malicious and 104 * is attempting to flood the host OS with VF2PF interrupts. 105 */ 106 for_each_set_bit(i, &vf_mask, ADF_MAX_NUM_VFS) { 107 vf_info = accel_dev->pf.vf_info + i; 108 109 if (!__ratelimit(&vf_info->vf2pf_ratelimit)) { 110 dev_info(&GET_DEV(accel_dev), 111 "Too many ints from VF%d\n", 112 vf_info->vf_nr); 113 continue; 114 } 115 116 adf_schedule_vf2pf_handler(vf_info); 117 irq_handled = true; 118 } 119 } 120 return irq_handled; 121 } 122 #endif /* CONFIG_PCI_IOV */ 123 124 static bool adf_handle_pm_int(struct adf_accel_dev *accel_dev) 125 { 126 struct adf_hw_device_data *hw_data = accel_dev->hw_device; 127 128 if (hw_data->handle_pm_interrupt && 129 hw_data->handle_pm_interrupt(accel_dev)) 130 return true; 131 132 return false; 133 } 134 135 static bool adf_handle_ras_int(struct adf_accel_dev *accel_dev) 136 { 137 struct adf_ras_ops *ras_ops = &accel_dev->hw_device->ras_ops; 138 bool reset_required; 139 140 if (ras_ops->handle_interrupt && 141 ras_ops->handle_interrupt(accel_dev, &reset_required)) { 142 if (reset_required) 143 dev_err(&GET_DEV(accel_dev), "Fatal error, reset required\n"); 144 return true; 145 } 146 147 return false; 148 } 149 150 static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr) 151 { 152 struct adf_accel_dev *accel_dev = dev_ptr; 153 154 #ifdef CONFIG_PCI_IOV 155 /* If SR-IOV is enabled (vf_info is non-NULL), check for VF->PF ints */ 156 if (accel_dev->pf.vf_info && adf_handle_vf2pf_int(accel_dev)) 157 return IRQ_HANDLED; 158 #endif /* CONFIG_PCI_IOV */ 159 160 if (adf_handle_pm_int(accel_dev)) 161 return IRQ_HANDLED; 162 163 if (adf_handle_ras_int(accel_dev)) 164 return IRQ_HANDLED; 165 166 dev_dbg(&GET_DEV(accel_dev), "qat_dev%d spurious AE interrupt\n", 167 accel_dev->accel_id); 168 169 return IRQ_NONE; 170 } 171 172 static void adf_free_irqs(struct adf_accel_dev *accel_dev) 173 { 174 struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev; 175 struct adf_hw_device_data *hw_data = accel_dev->hw_device; 176 struct adf_irq *irqs = pci_dev_info->msix_entries.irqs; 177 struct adf_etr_data *etr_data = accel_dev->transport; 178 int clust_irq = hw_data->num_banks; 179 int irq, i = 0; 180 181 if (pci_dev_info->msix_entries.num_entries > 1) { 182 for (i = 0; i < hw_data->num_banks; i++) { 183 if (irqs[i].enabled) { 184 irq = pci_irq_vector(pci_dev_info->pci_dev, i); 185 irq_set_affinity_hint(irq, NULL); 186 free_irq(irq, &etr_data->banks[i]); 187 } 188 } 189 } 190 191 if (irqs[i].enabled) { 192 irq = pci_irq_vector(pci_dev_info->pci_dev, clust_irq); 193 free_irq(irq, accel_dev); 194 } 195 } 196 197 static int adf_request_irqs(struct adf_accel_dev *accel_dev) 198 { 199 struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev; 200 struct adf_hw_device_data *hw_data = accel_dev->hw_device; 201 struct adf_irq *irqs = pci_dev_info->msix_entries.irqs; 202 struct adf_etr_data *etr_data = accel_dev->transport; 203 int clust_irq = hw_data->num_banks; 204 int ret, irq, i = 0; 205 char *name; 206 207 /* Request msix irq for all banks unless SR-IOV enabled */ 208 if (!accel_dev->pf.vf_info) { 209 for (i = 0; i < hw_data->num_banks; i++) { 210 struct adf_etr_bank_data *bank = &etr_data->banks[i]; 211 unsigned int cpu, cpus = num_online_cpus(); 212 213 name = irqs[i].name; 214 snprintf(name, ADF_MAX_MSIX_VECTOR_NAME, 215 "qat%d-bundle%d", accel_dev->accel_id, i); 216 irq = pci_irq_vector(pci_dev_info->pci_dev, i); 217 if (unlikely(irq < 0)) { 218 dev_err(&GET_DEV(accel_dev), 219 "Failed to get IRQ number of device vector %d - %s\n", 220 i, name); 221 ret = irq; 222 goto err; 223 } 224 ret = request_irq(irq, adf_msix_isr_bundle, 0, 225 &name[0], bank); 226 if (ret) { 227 dev_err(&GET_DEV(accel_dev), 228 "Failed to allocate IRQ %d for %s\n", 229 irq, name); 230 goto err; 231 } 232 233 cpu = ((accel_dev->accel_id * hw_data->num_banks) + 234 i) % cpus; 235 irq_set_affinity_hint(irq, get_cpu_mask(cpu)); 236 irqs[i].enabled = true; 237 } 238 } 239 240 /* Request msix irq for AE */ 241 name = irqs[i].name; 242 snprintf(name, ADF_MAX_MSIX_VECTOR_NAME, 243 "qat%d-ae-cluster", accel_dev->accel_id); 244 irq = pci_irq_vector(pci_dev_info->pci_dev, clust_irq); 245 if (unlikely(irq < 0)) { 246 dev_err(&GET_DEV(accel_dev), 247 "Failed to get IRQ number of device vector %d - %s\n", 248 i, name); 249 ret = irq; 250 goto err; 251 } 252 ret = request_irq(irq, adf_msix_isr_ae, 0, &name[0], accel_dev); 253 if (ret) { 254 dev_err(&GET_DEV(accel_dev), 255 "Failed to allocate IRQ %d for %s\n", irq, name); 256 goto err; 257 } 258 irqs[i].enabled = true; 259 return ret; 260 err: 261 adf_free_irqs(accel_dev); 262 return ret; 263 } 264 265 static int adf_isr_alloc_msix_vectors_data(struct adf_accel_dev *accel_dev) 266 { 267 struct adf_hw_device_data *hw_data = accel_dev->hw_device; 268 u32 msix_num_entries = 1; 269 struct adf_irq *irqs; 270 271 /* If SR-IOV is disabled (vf_info is NULL), add entries for each bank */ 272 if (!accel_dev->pf.vf_info) 273 msix_num_entries += hw_data->num_banks; 274 275 irqs = kzalloc_node(msix_num_entries * sizeof(*irqs), 276 GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev))); 277 if (!irqs) 278 return -ENOMEM; 279 280 accel_dev->accel_pci_dev.msix_entries.num_entries = msix_num_entries; 281 accel_dev->accel_pci_dev.msix_entries.irqs = irqs; 282 return 0; 283 } 284 285 static void adf_isr_free_msix_vectors_data(struct adf_accel_dev *accel_dev) 286 { 287 kfree(accel_dev->accel_pci_dev.msix_entries.irqs); 288 accel_dev->accel_pci_dev.msix_entries.irqs = NULL; 289 } 290 291 static int adf_setup_bh(struct adf_accel_dev *accel_dev) 292 { 293 struct adf_etr_data *priv_data = accel_dev->transport; 294 struct adf_hw_device_data *hw_data = accel_dev->hw_device; 295 int i; 296 297 for (i = 0; i < hw_data->num_banks; i++) 298 tasklet_init(&priv_data->banks[i].resp_handler, 299 adf_response_handler, 300 (unsigned long)&priv_data->banks[i]); 301 return 0; 302 } 303 304 static void adf_cleanup_bh(struct adf_accel_dev *accel_dev) 305 { 306 struct adf_etr_data *priv_data = accel_dev->transport; 307 struct adf_hw_device_data *hw_data = accel_dev->hw_device; 308 int i; 309 310 for (i = 0; i < hw_data->num_banks; i++) { 311 tasklet_disable(&priv_data->banks[i].resp_handler); 312 tasklet_kill(&priv_data->banks[i].resp_handler); 313 } 314 } 315 316 /** 317 * adf_isr_resource_free() - Free IRQ for acceleration device 318 * @accel_dev: Pointer to acceleration device. 319 * 320 * Function frees interrupts for acceleration device. 321 */ 322 void adf_isr_resource_free(struct adf_accel_dev *accel_dev) 323 { 324 adf_free_irqs(accel_dev); 325 adf_cleanup_bh(accel_dev); 326 adf_disable_msix(&accel_dev->accel_pci_dev); 327 adf_isr_free_msix_vectors_data(accel_dev); 328 } 329 EXPORT_SYMBOL_GPL(adf_isr_resource_free); 330 331 /** 332 * adf_isr_resource_alloc() - Allocate IRQ for acceleration device 333 * @accel_dev: Pointer to acceleration device. 334 * 335 * Function allocates interrupts for acceleration device. 336 * 337 * Return: 0 on success, error code otherwise. 338 */ 339 int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev) 340 { 341 int ret; 342 343 ret = adf_isr_alloc_msix_vectors_data(accel_dev); 344 if (ret) 345 goto err_out; 346 347 ret = adf_enable_msix(accel_dev); 348 if (ret) 349 goto err_free_msix_table; 350 351 ret = adf_setup_bh(accel_dev); 352 if (ret) 353 goto err_disable_msix; 354 355 ret = adf_request_irqs(accel_dev); 356 if (ret) 357 goto err_cleanup_bh; 358 359 return 0; 360 361 err_cleanup_bh: 362 adf_cleanup_bh(accel_dev); 363 364 err_disable_msix: 365 adf_disable_msix(&accel_dev->accel_pci_dev); 366 367 err_free_msix_table: 368 adf_isr_free_msix_vectors_data(accel_dev); 369 370 err_out: 371 return ret; 372 } 373 EXPORT_SYMBOL_GPL(adf_isr_resource_alloc); 374 375 /** 376 * adf_init_misc_wq() - Init misc workqueue 377 * 378 * Function init workqueue 'qat_misc_wq' for general purpose. 379 * 380 * Return: 0 on success, error code otherwise. 381 */ 382 int __init adf_init_misc_wq(void) 383 { 384 adf_misc_wq = alloc_workqueue("qat_misc_wq", WQ_MEM_RECLAIM, 0); 385 386 return !adf_misc_wq ? -ENOMEM : 0; 387 } 388 389 void adf_exit_misc_wq(void) 390 { 391 if (adf_misc_wq) 392 destroy_workqueue(adf_misc_wq); 393 394 adf_misc_wq = NULL; 395 } 396 397 bool adf_misc_wq_queue_work(struct work_struct *work) 398 { 399 return queue_work(adf_misc_wq, work); 400 } 401 402 bool adf_misc_wq_queue_delayed_work(struct delayed_work *work, 403 unsigned long delay) 404 { 405 return queue_delayed_work(adf_misc_wq, work, delay); 406 } 407