1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) 2 /* Copyright(c) 2014 - 2020 Intel Corporation */ 3 #include <linux/kernel.h> 4 #include <linux/init.h> 5 #include <linux/types.h> 6 #include <linux/pci.h> 7 #include <linux/slab.h> 8 #include <linux/errno.h> 9 #include <linux/interrupt.h> 10 #include <linux/workqueue.h> 11 #include "adf_accel_devices.h" 12 #include "adf_common_drv.h" 13 #include "adf_cfg.h" 14 #include "adf_cfg_strings.h" 15 #include "adf_cfg_common.h" 16 #include "adf_pfvf_vf_msg.h" 17 #include "adf_transport_access_macros.h" 18 #include "adf_transport_internal.h" 19 20 #define ADF_VINTSOU_OFFSET 0x204 21 #define ADF_VINTMSK_OFFSET 0x208 22 #define ADF_VINTSOU_BUN BIT(0) 23 #define ADF_VINTSOU_PF2VF BIT(1) 24 25 static struct workqueue_struct *adf_vf_stop_wq; 26 27 struct adf_vf_stop_data { 28 struct adf_accel_dev *accel_dev; 29 struct work_struct work; 30 }; 31 32 void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev) 33 { 34 void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev); 35 36 ADF_CSR_WR(pmisc_addr, ADF_VINTMSK_OFFSET, 0x0); 37 } 38 39 void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev) 40 { 41 void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev); 42 43 ADF_CSR_WR(pmisc_addr, ADF_VINTMSK_OFFSET, 0x2); 44 } 45 EXPORT_SYMBOL_GPL(adf_disable_pf2vf_interrupts); 46 47 static int adf_enable_msi(struct adf_accel_dev *accel_dev) 48 { 49 struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev; 50 int stat = pci_alloc_irq_vectors(pci_dev_info->pci_dev, 1, 1, 51 PCI_IRQ_MSI); 52 if (unlikely(stat < 0)) { 53 dev_err(&GET_DEV(accel_dev), 54 "Failed to enable MSI interrupt: %d\n", stat); 55 return stat; 56 } 57 58 return 0; 59 } 60 61 static void adf_disable_msi(struct adf_accel_dev *accel_dev) 62 { 63 struct pci_dev *pdev = accel_to_pci_dev(accel_dev); 64 65 pci_free_irq_vectors(pdev); 66 } 67 68 static void adf_dev_stop_async(struct work_struct *work) 69 { 70 struct adf_vf_stop_data *stop_data = 71 container_of(work, struct adf_vf_stop_data, work); 72 struct adf_accel_dev *accel_dev = stop_data->accel_dev; 73 74 adf_dev_restarting_notify(accel_dev); 75 adf_dev_down(accel_dev); 76 77 /* Re-enable PF2VF interrupts */ 78 adf_enable_pf2vf_interrupts(accel_dev); 79 adf_vf2pf_notify_restart_complete(accel_dev); 80 kfree(stop_data); 81 } 82 83 int adf_pf2vf_handle_pf_restarting(struct adf_accel_dev *accel_dev) 84 { 85 struct adf_vf_stop_data *stop_data; 86 87 clear_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status); 88 stop_data = kzalloc(sizeof(*stop_data), GFP_ATOMIC); 89 if (!stop_data) { 90 dev_err(&GET_DEV(accel_dev), 91 "Couldn't schedule stop for vf_%d\n", 92 accel_dev->accel_id); 93 return -ENOMEM; 94 } 95 stop_data->accel_dev = accel_dev; 96 INIT_WORK(&stop_data->work, adf_dev_stop_async); 97 queue_work(adf_vf_stop_wq, &stop_data->work); 98 99 return 0; 100 } 101 102 static void adf_pf2vf_bh_handler(void *data) 103 { 104 struct adf_accel_dev *accel_dev = data; 105 bool ret; 106 107 ret = adf_recv_and_handle_pf2vf_msg(accel_dev); 108 if (ret) 109 /* Re-enable PF2VF interrupts */ 110 adf_enable_pf2vf_interrupts(accel_dev); 111 112 return; 113 114 } 115 116 static int adf_setup_pf2vf_bh(struct adf_accel_dev *accel_dev) 117 { 118 tasklet_init(&accel_dev->vf.pf2vf_bh_tasklet, 119 (void *)adf_pf2vf_bh_handler, (unsigned long)accel_dev); 120 121 mutex_init(&accel_dev->vf.vf2pf_lock); 122 return 0; 123 } 124 125 static void adf_cleanup_pf2vf_bh(struct adf_accel_dev *accel_dev) 126 { 127 tasklet_disable(&accel_dev->vf.pf2vf_bh_tasklet); 128 tasklet_kill(&accel_dev->vf.pf2vf_bh_tasklet); 129 mutex_destroy(&accel_dev->vf.vf2pf_lock); 130 } 131 132 static irqreturn_t adf_isr(int irq, void *privdata) 133 { 134 struct adf_accel_dev *accel_dev = privdata; 135 struct adf_hw_device_data *hw_data = accel_dev->hw_device; 136 struct adf_hw_csr_ops *csr_ops = &hw_data->csr_ops; 137 struct adf_bar *pmisc = 138 &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)]; 139 void __iomem *pmisc_bar_addr = pmisc->virt_addr; 140 bool handled = false; 141 u32 v_int, v_mask; 142 143 /* Read VF INT source CSR to determine the source of VF interrupt */ 144 v_int = ADF_CSR_RD(pmisc_bar_addr, ADF_VINTSOU_OFFSET); 145 146 /* Read VF INT mask CSR to determine which sources are masked */ 147 v_mask = ADF_CSR_RD(pmisc_bar_addr, ADF_VINTMSK_OFFSET); 148 149 /* 150 * Recompute v_int ignoring sources that are masked. This is to 151 * avoid rescheduling the tasklet for interrupts already handled 152 */ 153 v_int &= ~v_mask; 154 155 /* Check for PF2VF interrupt */ 156 if (v_int & ADF_VINTSOU_PF2VF) { 157 /* Disable PF to VF interrupt */ 158 adf_disable_pf2vf_interrupts(accel_dev); 159 160 /* Schedule tasklet to handle interrupt BH */ 161 tasklet_hi_schedule(&accel_dev->vf.pf2vf_bh_tasklet); 162 handled = true; 163 } 164 165 /* Check bundle interrupt */ 166 if (v_int & ADF_VINTSOU_BUN) { 167 struct adf_etr_data *etr_data = accel_dev->transport; 168 struct adf_etr_bank_data *bank = &etr_data->banks[0]; 169 170 /* Disable Flag and Coalesce Ring Interrupts */ 171 csr_ops->write_csr_int_flag_and_col(bank->csr_addr, 172 bank->bank_number, 0); 173 tasklet_hi_schedule(&bank->resp_handler); 174 handled = true; 175 } 176 177 return handled ? IRQ_HANDLED : IRQ_NONE; 178 } 179 180 static int adf_request_msi_irq(struct adf_accel_dev *accel_dev) 181 { 182 struct pci_dev *pdev = accel_to_pci_dev(accel_dev); 183 unsigned int cpu; 184 int ret; 185 186 snprintf(accel_dev->vf.irq_name, ADF_MAX_MSIX_VECTOR_NAME, 187 "qat_%02x:%02d.%02d", pdev->bus->number, PCI_SLOT(pdev->devfn), 188 PCI_FUNC(pdev->devfn)); 189 ret = request_irq(pdev->irq, adf_isr, 0, accel_dev->vf.irq_name, 190 (void *)accel_dev); 191 if (ret) { 192 dev_err(&GET_DEV(accel_dev), "failed to enable irq for %s\n", 193 accel_dev->vf.irq_name); 194 return ret; 195 } 196 cpu = accel_dev->accel_id % num_online_cpus(); 197 irq_set_affinity_hint(pdev->irq, get_cpu_mask(cpu)); 198 accel_dev->vf.irq_enabled = true; 199 200 return ret; 201 } 202 203 static int adf_setup_bh(struct adf_accel_dev *accel_dev) 204 { 205 struct adf_etr_data *priv_data = accel_dev->transport; 206 207 tasklet_init(&priv_data->banks[0].resp_handler, adf_response_handler, 208 (unsigned long)priv_data->banks); 209 return 0; 210 } 211 212 static void adf_cleanup_bh(struct adf_accel_dev *accel_dev) 213 { 214 struct adf_etr_data *priv_data = accel_dev->transport; 215 216 tasklet_disable(&priv_data->banks[0].resp_handler); 217 tasklet_kill(&priv_data->banks[0].resp_handler); 218 } 219 220 /** 221 * adf_vf_isr_resource_free() - Free IRQ for acceleration device 222 * @accel_dev: Pointer to acceleration device. 223 * 224 * Function frees interrupts for acceleration device virtual function. 225 */ 226 void adf_vf_isr_resource_free(struct adf_accel_dev *accel_dev) 227 { 228 struct pci_dev *pdev = accel_to_pci_dev(accel_dev); 229 230 if (accel_dev->vf.irq_enabled) { 231 irq_set_affinity_hint(pdev->irq, NULL); 232 free_irq(pdev->irq, accel_dev); 233 } 234 adf_cleanup_bh(accel_dev); 235 adf_cleanup_pf2vf_bh(accel_dev); 236 adf_disable_msi(accel_dev); 237 } 238 EXPORT_SYMBOL_GPL(adf_vf_isr_resource_free); 239 240 /** 241 * adf_vf_isr_resource_alloc() - Allocate IRQ for acceleration device 242 * @accel_dev: Pointer to acceleration device. 243 * 244 * Function allocates interrupts for acceleration device virtual function. 245 * 246 * Return: 0 on success, error code otherwise. 247 */ 248 int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev) 249 { 250 if (adf_enable_msi(accel_dev)) 251 goto err_out; 252 253 if (adf_setup_pf2vf_bh(accel_dev)) 254 goto err_disable_msi; 255 256 if (adf_setup_bh(accel_dev)) 257 goto err_cleanup_pf2vf_bh; 258 259 if (adf_request_msi_irq(accel_dev)) 260 goto err_cleanup_bh; 261 262 return 0; 263 264 err_cleanup_bh: 265 adf_cleanup_bh(accel_dev); 266 267 err_cleanup_pf2vf_bh: 268 adf_cleanup_pf2vf_bh(accel_dev); 269 270 err_disable_msi: 271 adf_disable_msi(accel_dev); 272 273 err_out: 274 return -EFAULT; 275 } 276 EXPORT_SYMBOL_GPL(adf_vf_isr_resource_alloc); 277 278 /** 279 * adf_flush_vf_wq() - Flush workqueue for VF 280 * @accel_dev: Pointer to acceleration device. 281 * 282 * Function disables the PF/VF interrupts on the VF so that no new messages 283 * are received and flushes the workqueue 'adf_vf_stop_wq'. 284 * 285 * Return: void. 286 */ 287 void adf_flush_vf_wq(struct adf_accel_dev *accel_dev) 288 { 289 adf_disable_pf2vf_interrupts(accel_dev); 290 291 flush_workqueue(adf_vf_stop_wq); 292 } 293 EXPORT_SYMBOL_GPL(adf_flush_vf_wq); 294 295 /** 296 * adf_init_vf_wq() - Init workqueue for VF 297 * 298 * Return: 0 on success, error code otherwise. 299 */ 300 int __init adf_init_vf_wq(void) 301 { 302 adf_vf_stop_wq = alloc_workqueue("adf_vf_stop_wq", WQ_MEM_RECLAIM, 0); 303 304 return !adf_vf_stop_wq ? -EFAULT : 0; 305 } 306 307 void adf_exit_vf_wq(void) 308 { 309 if (adf_vf_stop_wq) 310 destroy_workqueue(adf_vf_stop_wq); 311 312 adf_vf_stop_wq = NULL; 313 } 314