1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2023-2024 Intel Corporation 4 */ 5 6 #include <linux/bitops.h> 7 #include <linux/pci.h> 8 9 #include "regs/xe_bars.h" 10 #include "xe_assert.h" 11 #include "xe_device.h" 12 #include "xe_gt_sriov_pf_config.h" 13 #include "xe_gt_sriov_pf_control.h" 14 #include "xe_gt_sriov_printk.h" 15 #include "xe_guc_engine_activity.h" 16 #include "xe_pci_sriov.h" 17 #include "xe_pm.h" 18 #include "xe_sriov.h" 19 #include "xe_sriov_pf.h" 20 #include "xe_sriov_pf_control.h" 21 #include "xe_sriov_pf_helpers.h" 22 #include "xe_sriov_pf_provision.h" 23 #include "xe_sriov_printk.h" 24 25 static void pf_reset_vfs(struct xe_device *xe, unsigned int num_vfs) 26 { 27 unsigned int n; 28 29 for (n = 1; n <= num_vfs; n++) 30 xe_sriov_pf_control_reset_vf(xe, n); 31 } 32 33 static struct pci_dev *xe_pci_pf_get_vf_dev(struct xe_device *xe, unsigned int vf_id) 34 { 35 struct pci_dev *pdev = to_pci_dev(xe->drm.dev); 36 37 xe_assert(xe, IS_SRIOV_PF(xe)); 38 39 /* caller must use pci_dev_put() */ 40 return pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus), 41 pdev->bus->number, 42 pci_iov_virtfn_devfn(pdev, vf_id)); 43 } 44 45 static void pf_link_vfs(struct xe_device *xe, int num_vfs) 46 { 47 struct pci_dev *pdev_pf = to_pci_dev(xe->drm.dev); 48 struct device_link *link; 49 struct pci_dev *pdev_vf; 50 unsigned int n; 51 52 /* 53 * When both PF and VF devices are enabled on the host, during system 54 * resume they are resuming in parallel. 55 * 56 * But PF has to complete the provision of VF first to allow any VFs to 57 * successfully resume. 58 * 59 * Create a parent-child device link between PF and VF devices that will 60 * enforce correct resume order. 61 */ 62 for (n = 1; n <= num_vfs; n++) { 63 pdev_vf = xe_pci_pf_get_vf_dev(xe, n - 1); 64 65 /* unlikely, something weird is happening, abort */ 66 if (!pdev_vf) { 67 xe_sriov_err(xe, "Cannot find VF%u device, aborting link%s creation!\n", 68 n, str_plural(num_vfs)); 69 break; 70 } 71 72 link = device_link_add(&pdev_vf->dev, &pdev_pf->dev, 73 DL_FLAG_AUTOREMOVE_CONSUMER); 74 /* unlikely and harmless, continue with other VFs */ 75 if (!link) 76 xe_sriov_notice(xe, "Failed linking VF%u\n", n); 77 78 pci_dev_put(pdev_vf); 79 } 80 } 81 82 static void pf_engine_activity_stats(struct xe_device *xe, unsigned int num_vfs, bool enable) 83 { 84 struct xe_gt *gt; 85 unsigned int id; 86 int ret = 0; 87 88 for_each_gt(gt, xe, id) { 89 ret = xe_guc_engine_activity_function_stats(>->uc.guc, num_vfs, enable); 90 if (ret) 91 xe_gt_sriov_info(gt, "Failed to %s engine activity function stats (%pe)\n", 92 str_enable_disable(enable), ERR_PTR(ret)); 93 } 94 } 95 96 static int resize_vf_vram_bar(struct xe_device *xe, int num_vfs) 97 { 98 struct pci_dev *pdev = to_pci_dev(xe->drm.dev); 99 u32 sizes; 100 101 sizes = pci_iov_vf_bar_get_sizes(pdev, VF_LMEM_BAR, num_vfs); 102 if (!sizes) 103 return 0; 104 105 return pci_iov_vf_bar_set_size(pdev, VF_LMEM_BAR, __fls(sizes)); 106 } 107 108 static int pf_enable_vfs(struct xe_device *xe, int num_vfs) 109 { 110 struct pci_dev *pdev = to_pci_dev(xe->drm.dev); 111 int total_vfs = xe_sriov_pf_get_totalvfs(xe); 112 int err; 113 114 xe_assert(xe, IS_SRIOV_PF(xe)); 115 xe_assert(xe, num_vfs > 0); 116 xe_assert(xe, num_vfs <= total_vfs); 117 xe_sriov_dbg(xe, "enabling %u VF%s\n", num_vfs, str_plural(num_vfs)); 118 119 err = xe_sriov_pf_wait_ready(xe); 120 if (err) 121 goto out; 122 123 /* 124 * We must hold additional reference to the runtime PM to keep PF in D0 125 * during VFs lifetime, as our VFs do not implement the PM capability. 126 * 127 * With PF being in D0 state, all VFs will also behave as in D0 state. 128 * This will also keep GuC alive with all VFs' configurations. 129 * 130 * We will release this additional PM reference in pf_disable_vfs(). 131 */ 132 xe_pm_runtime_get_noresume(xe); 133 134 err = xe_sriov_pf_provision_vfs(xe, num_vfs); 135 if (err < 0) 136 goto failed; 137 138 if (IS_DGFX(xe)) { 139 err = resize_vf_vram_bar(xe, num_vfs); 140 if (err) 141 xe_sriov_info(xe, "Failed to set VF LMEM BAR size: %d\n", err); 142 } 143 144 err = pci_enable_sriov(pdev, num_vfs); 145 if (err < 0) 146 goto failed; 147 148 pf_link_vfs(xe, num_vfs); 149 150 xe_sriov_info(xe, "Enabled %u of %u VF%s\n", 151 num_vfs, total_vfs, str_plural(total_vfs)); 152 153 pf_engine_activity_stats(xe, num_vfs, true); 154 155 return num_vfs; 156 157 failed: 158 xe_sriov_pf_unprovision_vfs(xe, num_vfs); 159 xe_pm_runtime_put(xe); 160 out: 161 xe_sriov_notice(xe, "Failed to enable %u VF%s (%pe)\n", 162 num_vfs, str_plural(num_vfs), ERR_PTR(err)); 163 return err; 164 } 165 166 static int pf_disable_vfs(struct xe_device *xe) 167 { 168 struct device *dev = xe->drm.dev; 169 struct pci_dev *pdev = to_pci_dev(dev); 170 u16 num_vfs = pci_num_vf(pdev); 171 172 xe_assert(xe, IS_SRIOV_PF(xe)); 173 xe_sriov_dbg(xe, "disabling %u VF%s\n", num_vfs, str_plural(num_vfs)); 174 175 if (!num_vfs) 176 return 0; 177 178 pf_engine_activity_stats(xe, num_vfs, false); 179 180 pci_disable_sriov(pdev); 181 182 pf_reset_vfs(xe, num_vfs); 183 184 xe_sriov_pf_unprovision_vfs(xe, num_vfs); 185 186 /* not needed anymore - see pf_enable_vfs() */ 187 xe_pm_runtime_put(xe); 188 189 xe_sriov_info(xe, "Disabled %u VF%s\n", num_vfs, str_plural(num_vfs)); 190 return 0; 191 } 192 193 /** 194 * xe_pci_sriov_configure - Configure SR-IOV (enable/disable VFs). 195 * @pdev: the &pci_dev 196 * @num_vfs: number of VFs to enable or zero to disable all VFs 197 * 198 * This is the Xe implementation of struct pci_driver.sriov_configure callback. 199 * 200 * This callback will be called by the PCI subsystem to enable or disable SR-IOV 201 * Virtual Functions (VFs) as requested by the used via the PCI sysfs interface. 202 * 203 * Return: number of configured VFs or a negative error code on failure. 204 */ 205 int xe_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) 206 { 207 struct xe_device *xe = pdev_to_xe_device(pdev); 208 int ret; 209 210 if (!IS_SRIOV_PF(xe)) 211 return -ENODEV; 212 213 if (num_vfs < 0) 214 return -EINVAL; 215 216 if (num_vfs > xe_sriov_pf_get_totalvfs(xe)) 217 return -ERANGE; 218 219 if (num_vfs && pci_num_vf(pdev)) 220 return -EBUSY; 221 222 xe_pm_runtime_get(xe); 223 if (num_vfs > 0) 224 ret = pf_enable_vfs(xe, num_vfs); 225 else 226 ret = pf_disable_vfs(xe); 227 xe_pm_runtime_put(xe); 228 229 return ret; 230 } 231