1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) 2 /* Copyright(c) 2014 - 2020 Intel Corporation */ 3 #include <linux/kernel.h> 4 #include <linux/init.h> 5 #include <linux/types.h> 6 #include <linux/pci.h> 7 #include <linux/slab.h> 8 #include <linux/errno.h> 9 #include <linux/interrupt.h> 10 #include <linux/workqueue.h> 11 #include "adf_accel_devices.h" 12 #include "adf_common_drv.h" 13 #include "adf_cfg.h" 14 #include "adf_cfg_strings.h" 15 #include "adf_cfg_common.h" 16 #include "adf_transport_access_macros.h" 17 #include "adf_transport_internal.h" 18 19 #define ADF_VINTSOU_OFFSET 0x204 20 #define ADF_VINTMSK_OFFSET 0x208 21 #define ADF_VINTSOU_BUN BIT(0) 22 #define ADF_VINTSOU_PF2VF BIT(1) 23 24 static struct workqueue_struct *adf_vf_stop_wq; 25 26 struct adf_vf_stop_data { 27 struct adf_accel_dev *accel_dev; 28 struct work_struct work; 29 }; 30 31 void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev) 32 { 33 void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev); 34 35 ADF_CSR_WR(pmisc_addr, ADF_VINTMSK_OFFSET, 0x0); 36 } 37 38 void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev) 39 { 40 void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev); 41 42 ADF_CSR_WR(pmisc_addr, ADF_VINTMSK_OFFSET, 0x2); 43 } 44 EXPORT_SYMBOL_GPL(adf_disable_pf2vf_interrupts); 45 46 static int adf_enable_msi(struct adf_accel_dev *accel_dev) 47 { 48 struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev; 49 int stat = pci_alloc_irq_vectors(pci_dev_info->pci_dev, 1, 1, 50 PCI_IRQ_MSI); 51 if (unlikely(stat < 0)) { 52 dev_err(&GET_DEV(accel_dev), 53 "Failed to enable MSI interrupt: %d\n", stat); 54 return stat; 55 } 56 57 return 0; 58 } 59 60 static void adf_disable_msi(struct adf_accel_dev *accel_dev) 61 { 62 struct pci_dev *pdev = accel_to_pci_dev(accel_dev); 63 64 pci_free_irq_vectors(pdev); 65 } 66 67 static void adf_dev_stop_async(struct work_struct *work) 68 { 69 struct adf_vf_stop_data *stop_data = 70 container_of(work, struct adf_vf_stop_data, work); 71 struct adf_accel_dev *accel_dev = stop_data->accel_dev; 72 73 adf_dev_restarting_notify(accel_dev); 74 adf_dev_down(accel_dev, false); 75 76 /* Re-enable PF2VF interrupts */ 77 adf_enable_pf2vf_interrupts(accel_dev); 78 kfree(stop_data); 79 } 80 81 int adf_pf2vf_handle_pf_restarting(struct adf_accel_dev *accel_dev) 82 { 83 struct adf_vf_stop_data *stop_data; 84 85 clear_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status); 86 stop_data = kzalloc(sizeof(*stop_data), GFP_ATOMIC); 87 if (!stop_data) { 88 dev_err(&GET_DEV(accel_dev), 89 "Couldn't schedule stop for vf_%d\n", 90 accel_dev->accel_id); 91 return -ENOMEM; 92 } 93 stop_data->accel_dev = accel_dev; 94 INIT_WORK(&stop_data->work, adf_dev_stop_async); 95 queue_work(adf_vf_stop_wq, &stop_data->work); 96 97 return 0; 98 } 99 100 static void adf_pf2vf_bh_handler(void *data) 101 { 102 struct adf_accel_dev *accel_dev = data; 103 bool ret; 104 105 ret = adf_recv_and_handle_pf2vf_msg(accel_dev); 106 if (ret) 107 /* Re-enable PF2VF interrupts */ 108 adf_enable_pf2vf_interrupts(accel_dev); 109 110 return; 111 112 } 113 114 static int adf_setup_pf2vf_bh(struct adf_accel_dev *accel_dev) 115 { 116 tasklet_init(&accel_dev->vf.pf2vf_bh_tasklet, 117 (void *)adf_pf2vf_bh_handler, (unsigned long)accel_dev); 118 119 mutex_init(&accel_dev->vf.vf2pf_lock); 120 return 0; 121 } 122 123 static void adf_cleanup_pf2vf_bh(struct adf_accel_dev *accel_dev) 124 { 125 tasklet_disable(&accel_dev->vf.pf2vf_bh_tasklet); 126 tasklet_kill(&accel_dev->vf.pf2vf_bh_tasklet); 127 mutex_destroy(&accel_dev->vf.vf2pf_lock); 128 } 129 130 static irqreturn_t adf_isr(int irq, void *privdata) 131 { 132 struct adf_accel_dev *accel_dev = privdata; 133 struct adf_hw_device_data *hw_data = accel_dev->hw_device; 134 struct adf_hw_csr_ops *csr_ops = &hw_data->csr_ops; 135 struct adf_bar *pmisc = 136 &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)]; 137 void __iomem *pmisc_bar_addr = pmisc->virt_addr; 138 bool handled = false; 139 u32 v_int, v_mask; 140 141 /* Read VF INT source CSR to determine the source of VF interrupt */ 142 v_int = ADF_CSR_RD(pmisc_bar_addr, ADF_VINTSOU_OFFSET); 143 144 /* Read VF INT mask CSR to determine which sources are masked */ 145 v_mask = ADF_CSR_RD(pmisc_bar_addr, ADF_VINTMSK_OFFSET); 146 147 /* 148 * Recompute v_int ignoring sources that are masked. This is to 149 * avoid rescheduling the tasklet for interrupts already handled 150 */ 151 v_int &= ~v_mask; 152 153 /* Check for PF2VF interrupt */ 154 if (v_int & ADF_VINTSOU_PF2VF) { 155 /* Disable PF to VF interrupt */ 156 adf_disable_pf2vf_interrupts(accel_dev); 157 158 /* Schedule tasklet to handle interrupt BH */ 159 tasklet_hi_schedule(&accel_dev->vf.pf2vf_bh_tasklet); 160 handled = true; 161 } 162 163 /* Check bundle interrupt */ 164 if (v_int & ADF_VINTSOU_BUN) { 165 struct adf_etr_data *etr_data = accel_dev->transport; 166 struct adf_etr_bank_data *bank = &etr_data->banks[0]; 167 168 /* Disable Flag and Coalesce Ring Interrupts */ 169 csr_ops->write_csr_int_flag_and_col(bank->csr_addr, 170 bank->bank_number, 0); 171 tasklet_hi_schedule(&bank->resp_handler); 172 handled = true; 173 } 174 175 return handled ? IRQ_HANDLED : IRQ_NONE; 176 } 177 178 static int adf_request_msi_irq(struct adf_accel_dev *accel_dev) 179 { 180 struct pci_dev *pdev = accel_to_pci_dev(accel_dev); 181 unsigned int cpu; 182 int ret; 183 184 snprintf(accel_dev->vf.irq_name, ADF_MAX_MSIX_VECTOR_NAME, 185 "qat_%02x:%02d.%02d", pdev->bus->number, PCI_SLOT(pdev->devfn), 186 PCI_FUNC(pdev->devfn)); 187 ret = request_irq(pdev->irq, adf_isr, 0, accel_dev->vf.irq_name, 188 (void *)accel_dev); 189 if (ret) { 190 dev_err(&GET_DEV(accel_dev), "failed to enable irq for %s\n", 191 accel_dev->vf.irq_name); 192 return ret; 193 } 194 cpu = accel_dev->accel_id % num_online_cpus(); 195 irq_set_affinity_hint(pdev->irq, get_cpu_mask(cpu)); 196 accel_dev->vf.irq_enabled = true; 197 198 return ret; 199 } 200 201 static int adf_setup_bh(struct adf_accel_dev *accel_dev) 202 { 203 struct adf_etr_data *priv_data = accel_dev->transport; 204 205 tasklet_init(&priv_data->banks[0].resp_handler, adf_response_handler, 206 (unsigned long)priv_data->banks); 207 return 0; 208 } 209 210 static void adf_cleanup_bh(struct adf_accel_dev *accel_dev) 211 { 212 struct adf_etr_data *priv_data = accel_dev->transport; 213 214 tasklet_disable(&priv_data->banks[0].resp_handler); 215 tasklet_kill(&priv_data->banks[0].resp_handler); 216 } 217 218 /** 219 * adf_vf_isr_resource_free() - Free IRQ for acceleration device 220 * @accel_dev: Pointer to acceleration device. 221 * 222 * Function frees interrupts for acceleration device virtual function. 223 */ 224 void adf_vf_isr_resource_free(struct adf_accel_dev *accel_dev) 225 { 226 struct pci_dev *pdev = accel_to_pci_dev(accel_dev); 227 228 if (accel_dev->vf.irq_enabled) { 229 irq_set_affinity_hint(pdev->irq, NULL); 230 free_irq(pdev->irq, accel_dev); 231 } 232 adf_cleanup_bh(accel_dev); 233 adf_cleanup_pf2vf_bh(accel_dev); 234 adf_disable_msi(accel_dev); 235 } 236 EXPORT_SYMBOL_GPL(adf_vf_isr_resource_free); 237 238 /** 239 * adf_vf_isr_resource_alloc() - Allocate IRQ for acceleration device 240 * @accel_dev: Pointer to acceleration device. 241 * 242 * Function allocates interrupts for acceleration device virtual function. 243 * 244 * Return: 0 on success, error code otherwise. 245 */ 246 int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev) 247 { 248 if (adf_enable_msi(accel_dev)) 249 goto err_out; 250 251 if (adf_setup_pf2vf_bh(accel_dev)) 252 goto err_disable_msi; 253 254 if (adf_setup_bh(accel_dev)) 255 goto err_cleanup_pf2vf_bh; 256 257 if (adf_request_msi_irq(accel_dev)) 258 goto err_cleanup_bh; 259 260 return 0; 261 262 err_cleanup_bh: 263 adf_cleanup_bh(accel_dev); 264 265 err_cleanup_pf2vf_bh: 266 adf_cleanup_pf2vf_bh(accel_dev); 267 268 err_disable_msi: 269 adf_disable_msi(accel_dev); 270 271 err_out: 272 return -EFAULT; 273 } 274 EXPORT_SYMBOL_GPL(adf_vf_isr_resource_alloc); 275 276 /** 277 * adf_flush_vf_wq() - Flush workqueue for VF 278 * @accel_dev: Pointer to acceleration device. 279 * 280 * Function disables the PF/VF interrupts on the VF so that no new messages 281 * are received and flushes the workqueue 'adf_vf_stop_wq'. 282 * 283 * Return: void. 284 */ 285 void adf_flush_vf_wq(struct adf_accel_dev *accel_dev) 286 { 287 adf_disable_pf2vf_interrupts(accel_dev); 288 289 flush_workqueue(adf_vf_stop_wq); 290 } 291 EXPORT_SYMBOL_GPL(adf_flush_vf_wq); 292 293 /** 294 * adf_init_vf_wq() - Init workqueue for VF 295 * 296 * Function init workqueue 'adf_vf_stop_wq' for VF. 297 * 298 * Return: 0 on success, error code otherwise. 299 */ 300 int __init adf_init_vf_wq(void) 301 { 302 adf_vf_stop_wq = alloc_workqueue("adf_vf_stop_wq", WQ_MEM_RECLAIM, 0); 303 304 return !adf_vf_stop_wq ? -EFAULT : 0; 305 } 306 307 void adf_exit_vf_wq(void) 308 { 309 if (adf_vf_stop_wq) 310 destroy_workqueue(adf_vf_stop_wq); 311 312 adf_vf_stop_wq = NULL; 313 } 314