1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) 2 /* Copyright(c) 2015 - 2021 Intel Corporation */ 3 #include <linux/workqueue.h> 4 #include <linux/pci.h> 5 #include <linux/device.h> 6 #include "adf_common_drv.h" 7 #include "adf_cfg.h" 8 #include "adf_pfvf_pf_msg.h" 9 10 #define ADF_VF2PF_RATELIMIT_INTERVAL 8 11 #define ADF_VF2PF_RATELIMIT_BURST 130 12 13 static struct workqueue_struct *pf2vf_resp_wq; 14 15 struct adf_pf2vf_resp { 16 struct work_struct pf2vf_resp_work; 17 struct adf_accel_vf_info *vf_info; 18 }; 19 20 static void adf_iov_send_resp(struct work_struct *work) 21 { 22 struct adf_pf2vf_resp *pf2vf_resp = 23 container_of(work, struct adf_pf2vf_resp, pf2vf_resp_work); 24 struct adf_accel_vf_info *vf_info = pf2vf_resp->vf_info; 25 struct adf_accel_dev *accel_dev = vf_info->accel_dev; 26 u32 vf_nr = vf_info->vf_nr; 27 bool ret; 28 29 mutex_lock(&vf_info->pfvf_mig_lock); 30 ret = adf_recv_and_handle_vf2pf_msg(accel_dev, vf_nr); 31 if (ret) 32 /* re-enable interrupt on PF from this VF */ 33 adf_enable_vf2pf_interrupts(accel_dev, 1 << vf_nr); 34 mutex_unlock(&vf_info->pfvf_mig_lock); 35 36 kfree(pf2vf_resp); 37 } 38 39 void adf_schedule_vf2pf_handler(struct adf_accel_vf_info *vf_info) 40 { 41 struct adf_pf2vf_resp *pf2vf_resp; 42 43 pf2vf_resp = kzalloc(sizeof(*pf2vf_resp), GFP_ATOMIC); 44 if (!pf2vf_resp) 45 return; 46 47 pf2vf_resp->vf_info = vf_info; 48 INIT_WORK(&pf2vf_resp->pf2vf_resp_work, adf_iov_send_resp); 49 queue_work(pf2vf_resp_wq, &pf2vf_resp->pf2vf_resp_work); 50 } 51 52 static int adf_enable_sriov(struct adf_accel_dev *accel_dev) 53 { 54 struct pci_dev *pdev = accel_to_pci_dev(accel_dev); 55 int totalvfs = pci_sriov_get_totalvfs(pdev); 56 struct adf_hw_device_data *hw_data = accel_dev->hw_device; 57 struct adf_accel_vf_info *vf_info; 58 int i; 59 60 for (i = 0, vf_info = accel_dev->pf.vf_info; i < totalvfs; 61 i++, vf_info++) { 62 /* This ptr will be populated when VFs will be created */ 63 vf_info->accel_dev = accel_dev; 64 vf_info->vf_nr = i; 65 66 mutex_init(&vf_info->pf2vf_lock); 67 mutex_init(&vf_info->pfvf_mig_lock); 68 ratelimit_state_init(&vf_info->vf2pf_ratelimit, 69 ADF_VF2PF_RATELIMIT_INTERVAL, 70 ADF_VF2PF_RATELIMIT_BURST); 71 } 72 73 /* Set Valid bits in AE Thread to PCIe Function Mapping */ 74 if (hw_data->configure_iov_threads) 75 hw_data->configure_iov_threads(accel_dev, true); 76 77 /* Enable VF to PF interrupts for all VFs */ 78 adf_enable_vf2pf_interrupts(accel_dev, BIT_ULL(totalvfs) - 1); 79 80 /* 81 * Due to the hardware design, when SR-IOV and the ring arbiter 82 * are enabled all the VFs supported in hardware must be enabled in 83 * order for all the hardware resources (i.e. bundles) to be usable. 84 * When SR-IOV is enabled, each of the VFs will own one bundle. 85 */ 86 return pci_enable_sriov(pdev, totalvfs); 87 } 88 89 static int adf_add_sriov_configuration(struct adf_accel_dev *accel_dev) 90 { 91 unsigned long val = 0; 92 int ret; 93 94 ret = adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC); 95 if (ret) 96 return ret; 97 98 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY, 99 &val, ADF_DEC); 100 if (ret) 101 return ret; 102 103 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC, 104 &val, ADF_DEC); 105 if (ret) 106 return ret; 107 108 set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status); 109 110 return ret; 111 } 112 113 static int adf_do_disable_sriov(struct adf_accel_dev *accel_dev) 114 { 115 int ret; 116 117 if (adf_dev_in_use(accel_dev)) { 118 dev_err(&GET_DEV(accel_dev), 119 "Cannot disable SR-IOV, device in use\n"); 120 return -EBUSY; 121 } 122 123 if (adf_dev_started(accel_dev)) { 124 if (adf_devmgr_in_reset(accel_dev)) { 125 dev_err(&GET_DEV(accel_dev), 126 "Cannot disable SR-IOV, device in reset\n"); 127 return -EBUSY; 128 } 129 130 ret = adf_dev_down(accel_dev); 131 if (ret) 132 goto err_del_cfg; 133 } 134 135 adf_disable_sriov(accel_dev); 136 137 ret = adf_dev_up(accel_dev, true); 138 if (ret) 139 goto err_del_cfg; 140 141 return 0; 142 143 err_del_cfg: 144 adf_cfg_del_all_except(accel_dev, ADF_GENERAL_SEC); 145 return ret; 146 } 147 148 static int adf_do_enable_sriov(struct adf_accel_dev *accel_dev) 149 { 150 struct pci_dev *pdev = accel_to_pci_dev(accel_dev); 151 int totalvfs = pci_sriov_get_totalvfs(pdev); 152 unsigned long val; 153 int ret; 154 155 if (!device_iommu_mapped(&GET_DEV(accel_dev))) { 156 dev_warn(&GET_DEV(accel_dev), 157 "IOMMU should be enabled for SR-IOV to work correctly\n"); 158 return -EINVAL; 159 } 160 161 if (adf_dev_started(accel_dev)) { 162 if (adf_devmgr_in_reset(accel_dev) || adf_dev_in_use(accel_dev)) { 163 dev_err(&GET_DEV(accel_dev), "Device busy\n"); 164 return -EBUSY; 165 } 166 167 ret = adf_dev_down(accel_dev); 168 if (ret) 169 return ret; 170 } 171 172 ret = adf_add_sriov_configuration(accel_dev); 173 if (ret) 174 goto err_del_cfg; 175 176 /* Allocate memory for VF info structs */ 177 accel_dev->pf.vf_info = kcalloc(totalvfs, sizeof(struct adf_accel_vf_info), 178 GFP_KERNEL); 179 ret = -ENOMEM; 180 if (!accel_dev->pf.vf_info) 181 goto err_del_cfg; 182 183 ret = adf_dev_up(accel_dev, false); 184 if (ret) { 185 dev_err(&GET_DEV(accel_dev), "Failed to start qat_dev%d\n", 186 accel_dev->accel_id); 187 goto err_free_vf_info; 188 } 189 190 ret = adf_enable_sriov(accel_dev); 191 if (ret) 192 goto err_free_vf_info; 193 194 val = 1; 195 ret = adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC, ADF_SRIOV_ENABLED, 196 &val, ADF_DEC); 197 if (ret) 198 goto err_free_vf_info; 199 200 return totalvfs; 201 202 err_free_vf_info: 203 adf_dev_down(accel_dev); 204 kfree(accel_dev->pf.vf_info); 205 accel_dev->pf.vf_info = NULL; 206 return ret; 207 err_del_cfg: 208 adf_cfg_del_all_except(accel_dev, ADF_GENERAL_SEC); 209 return ret; 210 } 211 212 void adf_reenable_sriov(struct adf_accel_dev *accel_dev) 213 { 214 struct pci_dev *pdev = accel_to_pci_dev(accel_dev); 215 char cfg[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0}; 216 217 if (adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, 218 ADF_SRIOV_ENABLED, cfg)) 219 return; 220 221 if (!accel_dev->pf.vf_info) 222 return; 223 224 if (adf_add_sriov_configuration(accel_dev)) 225 return; 226 227 dev_dbg(&pdev->dev, "Re-enabling SRIOV\n"); 228 adf_enable_sriov(accel_dev); 229 } 230 231 /** 232 * adf_disable_sriov() - Disable SRIOV for the device 233 * @accel_dev: Pointer to accel device. 234 * 235 * Function disables SRIOV for the accel device. 236 * 237 * Return: 0 on success, error code otherwise. 238 */ 239 void adf_disable_sriov(struct adf_accel_dev *accel_dev) 240 { 241 struct adf_hw_device_data *hw_data = accel_dev->hw_device; 242 int totalvfs = pci_sriov_get_totalvfs(accel_to_pci_dev(accel_dev)); 243 struct adf_accel_vf_info *vf; 244 int i; 245 246 if (!accel_dev->pf.vf_info) 247 return; 248 249 adf_pf2vf_notify_restarting(accel_dev); 250 adf_pf2vf_wait_for_restarting_complete(accel_dev); 251 pci_disable_sriov(accel_to_pci_dev(accel_dev)); 252 253 /* Disable VF to PF interrupts */ 254 adf_disable_all_vf2pf_interrupts(accel_dev); 255 256 /* Clear Valid bits in AE Thread to PCIe Function Mapping */ 257 if (hw_data->configure_iov_threads) 258 hw_data->configure_iov_threads(accel_dev, false); 259 260 for (i = 0, vf = accel_dev->pf.vf_info; i < totalvfs; i++, vf++) { 261 mutex_destroy(&vf->pf2vf_lock); 262 mutex_destroy(&vf->pfvf_mig_lock); 263 } 264 265 if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status)) { 266 kfree(accel_dev->pf.vf_info); 267 accel_dev->pf.vf_info = NULL; 268 } 269 } 270 EXPORT_SYMBOL_GPL(adf_disable_sriov); 271 272 /** 273 * adf_sriov_configure() - Enable SRIOV for the device 274 * @pdev: Pointer to PCI device. 275 * @numvfs: Number of virtual functions (VFs) to enable. 276 * 277 * Note that the @numvfs parameter is ignored and all VFs supported by the 278 * device are enabled due to the design of the hardware. 279 * 280 * Function enables SRIOV for the PCI device. 281 * 282 * Return: number of VFs enabled on success, error code otherwise. 283 */ 284 int adf_sriov_configure(struct pci_dev *pdev, int numvfs) 285 { 286 struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev); 287 288 if (!accel_dev) { 289 dev_err(&pdev->dev, "Failed to find accel_dev\n"); 290 return -EFAULT; 291 } 292 293 if (numvfs) 294 return adf_do_enable_sriov(accel_dev); 295 else 296 return adf_do_disable_sriov(accel_dev); 297 } 298 EXPORT_SYMBOL_GPL(adf_sriov_configure); 299 300 int __init adf_init_pf_wq(void) 301 { 302 /* Workqueue for PF2VF responses */ 303 pf2vf_resp_wq = alloc_workqueue("qat_pf2vf_resp_wq", WQ_MEM_RECLAIM, 0); 304 305 return !pf2vf_resp_wq ? -ENOMEM : 0; 306 } 307 308 void adf_exit_pf_wq(void) 309 { 310 if (pf2vf_resp_wq) { 311 destroy_workqueue(pf2vf_resp_wq); 312 pf2vf_resp_wq = NULL; 313 } 314 } 315