1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2023-2024 Intel Corporation 4 */ 5 6 #include "xe_assert.h" 7 #include "xe_device.h" 8 #include "xe_gt_sriov_pf_config.h" 9 #include "xe_gt_sriov_pf_control.h" 10 #include "xe_gt_sriov_printk.h" 11 #include "xe_guc_engine_activity.h" 12 #include "xe_pci_sriov.h" 13 #include "xe_pm.h" 14 #include "xe_sriov.h" 15 #include "xe_sriov_pf.h" 16 #include "xe_sriov_pf_helpers.h" 17 #include "xe_sriov_printk.h" 18 19 static int pf_needs_provisioning(struct xe_gt *gt, unsigned int num_vfs) 20 { 21 unsigned int n; 22 23 for (n = 1; n <= num_vfs; n++) 24 if (!xe_gt_sriov_pf_config_is_empty(gt, n)) 25 return false; 26 27 return true; 28 } 29 30 static int pf_provision_vfs(struct xe_device *xe, unsigned int num_vfs) 31 { 32 struct xe_gt *gt; 33 unsigned int id; 34 int result = 0, err; 35 36 for_each_gt(gt, xe, id) { 37 if (!pf_needs_provisioning(gt, num_vfs)) 38 continue; 39 err = xe_gt_sriov_pf_config_set_fair(gt, VFID(1), num_vfs); 40 result = result ?: err; 41 } 42 43 return result; 44 } 45 46 static void pf_unprovision_vfs(struct xe_device *xe, unsigned int num_vfs) 47 { 48 struct xe_gt *gt; 49 unsigned int id; 50 unsigned int n; 51 52 for_each_gt(gt, xe, id) 53 for (n = 1; n <= num_vfs; n++) 54 xe_gt_sriov_pf_config_release(gt, n, true); 55 } 56 57 static void pf_reset_vfs(struct xe_device *xe, unsigned int num_vfs) 58 { 59 struct xe_gt *gt; 60 unsigned int id; 61 unsigned int n; 62 63 for_each_gt(gt, xe, id) 64 for (n = 1; n <= num_vfs; n++) 65 xe_gt_sriov_pf_control_trigger_flr(gt, n); 66 } 67 68 static struct pci_dev *xe_pci_pf_get_vf_dev(struct xe_device *xe, unsigned int vf_id) 69 { 70 struct pci_dev *pdev = to_pci_dev(xe->drm.dev); 71 72 xe_assert(xe, IS_SRIOV_PF(xe)); 73 74 /* caller must use pci_dev_put() */ 75 return pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus), 76 pdev->bus->number, 77 pci_iov_virtfn_devfn(pdev, vf_id)); 78 } 79 80 static void pf_link_vfs(struct xe_device *xe, int num_vfs) 81 { 82 struct pci_dev *pdev_pf = to_pci_dev(xe->drm.dev); 83 struct device_link *link; 84 struct pci_dev *pdev_vf; 85 unsigned int n; 86 87 /* 88 * When both PF and VF devices are enabled on the host, during system 89 * resume they are resuming in parallel. 90 * 91 * But PF has to complete the provision of VF first to allow any VFs to 92 * successfully resume. 93 * 94 * Create a parent-child device link between PF and VF devices that will 95 * enforce correct resume order. 96 */ 97 for (n = 1; n <= num_vfs; n++) { 98 pdev_vf = xe_pci_pf_get_vf_dev(xe, n - 1); 99 100 /* unlikely, something weird is happening, abort */ 101 if (!pdev_vf) { 102 xe_sriov_err(xe, "Cannot find VF%u device, aborting link%s creation!\n", 103 n, str_plural(num_vfs)); 104 break; 105 } 106 107 link = device_link_add(&pdev_vf->dev, &pdev_pf->dev, 108 DL_FLAG_AUTOREMOVE_CONSUMER); 109 /* unlikely and harmless, continue with other VFs */ 110 if (!link) 111 xe_sriov_notice(xe, "Failed linking VF%u\n", n); 112 113 pci_dev_put(pdev_vf); 114 } 115 } 116 117 static void pf_engine_activity_stats(struct xe_device *xe, unsigned int num_vfs, bool enable) 118 { 119 struct xe_gt *gt; 120 unsigned int id; 121 int ret = 0; 122 123 for_each_gt(gt, xe, id) { 124 ret = xe_guc_engine_activity_function_stats(>->uc.guc, num_vfs, enable); 125 if (ret) 126 xe_gt_sriov_info(gt, "Failed to %s engine activity function stats (%pe)\n", 127 str_enable_disable(enable), ERR_PTR(ret)); 128 } 129 } 130 131 static int pf_enable_vfs(struct xe_device *xe, int num_vfs) 132 { 133 struct pci_dev *pdev = to_pci_dev(xe->drm.dev); 134 int total_vfs = xe_sriov_pf_get_totalvfs(xe); 135 int err; 136 137 xe_assert(xe, IS_SRIOV_PF(xe)); 138 xe_assert(xe, num_vfs > 0); 139 xe_assert(xe, num_vfs <= total_vfs); 140 xe_sriov_dbg(xe, "enabling %u VF%s\n", num_vfs, str_plural(num_vfs)); 141 142 err = xe_sriov_pf_wait_ready(xe); 143 if (err) 144 goto out; 145 146 /* 147 * We must hold additional reference to the runtime PM to keep PF in D0 148 * during VFs lifetime, as our VFs do not implement the PM capability. 149 * 150 * With PF being in D0 state, all VFs will also behave as in D0 state. 151 * This will also keep GuC alive with all VFs' configurations. 152 * 153 * We will release this additional PM reference in pf_disable_vfs(). 154 */ 155 xe_pm_runtime_get_noresume(xe); 156 157 err = pf_provision_vfs(xe, num_vfs); 158 if (err < 0) 159 goto failed; 160 161 err = pci_enable_sriov(pdev, num_vfs); 162 if (err < 0) 163 goto failed; 164 165 pf_link_vfs(xe, num_vfs); 166 167 xe_sriov_info(xe, "Enabled %u of %u VF%s\n", 168 num_vfs, total_vfs, str_plural(total_vfs)); 169 170 pf_engine_activity_stats(xe, num_vfs, true); 171 172 return num_vfs; 173 174 failed: 175 pf_unprovision_vfs(xe, num_vfs); 176 xe_pm_runtime_put(xe); 177 out: 178 xe_sriov_notice(xe, "Failed to enable %u VF%s (%pe)\n", 179 num_vfs, str_plural(num_vfs), ERR_PTR(err)); 180 return err; 181 } 182 183 static int pf_disable_vfs(struct xe_device *xe) 184 { 185 struct device *dev = xe->drm.dev; 186 struct pci_dev *pdev = to_pci_dev(dev); 187 u16 num_vfs = pci_num_vf(pdev); 188 189 xe_assert(xe, IS_SRIOV_PF(xe)); 190 xe_sriov_dbg(xe, "disabling %u VF%s\n", num_vfs, str_plural(num_vfs)); 191 192 if (!num_vfs) 193 return 0; 194 195 pf_engine_activity_stats(xe, num_vfs, false); 196 197 pci_disable_sriov(pdev); 198 199 pf_reset_vfs(xe, num_vfs); 200 201 pf_unprovision_vfs(xe, num_vfs); 202 203 /* not needed anymore - see pf_enable_vfs() */ 204 xe_pm_runtime_put(xe); 205 206 xe_sriov_info(xe, "Disabled %u VF%s\n", num_vfs, str_plural(num_vfs)); 207 return 0; 208 } 209 210 /** 211 * xe_pci_sriov_configure - Configure SR-IOV (enable/disable VFs). 212 * @pdev: the &pci_dev 213 * @num_vfs: number of VFs to enable or zero to disable all VFs 214 * 215 * This is the Xe implementation of struct pci_driver.sriov_configure callback. 216 * 217 * This callback will be called by the PCI subsystem to enable or disable SR-IOV 218 * Virtual Functions (VFs) as requested by the used via the PCI sysfs interface. 219 * 220 * Return: number of configured VFs or a negative error code on failure. 221 */ 222 int xe_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) 223 { 224 struct xe_device *xe = pdev_to_xe_device(pdev); 225 int ret; 226 227 if (!IS_SRIOV_PF(xe)) 228 return -ENODEV; 229 230 if (num_vfs < 0) 231 return -EINVAL; 232 233 if (num_vfs > xe_sriov_pf_get_totalvfs(xe)) 234 return -ERANGE; 235 236 if (num_vfs && pci_num_vf(pdev)) 237 return -EBUSY; 238 239 xe_pm_runtime_get(xe); 240 if (num_vfs > 0) 241 ret = pf_enable_vfs(xe, num_vfs); 242 else 243 ret = pf_disable_vfs(xe); 244 xe_pm_runtime_put(xe); 245 246 return ret; 247 } 248