1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2023-2024 Intel Corporation 4 */ 5 6 #include <linux/debugfs.h> 7 #include <drm/drm_debugfs.h> 8 #include <drm/drm_managed.h> 9 10 #include "xe_assert.h" 11 #include "xe_configfs.h" 12 #include "xe_device.h" 13 #include "xe_gt_sriov_pf.h" 14 #include "xe_module.h" 15 #include "xe_sriov.h" 16 #include "xe_sriov_pf.h" 17 #include "xe_sriov_pf_helpers.h" 18 #include "xe_sriov_pf_migration.h" 19 #include "xe_sriov_pf_service.h" 20 #include "xe_sriov_pf_sysfs.h" 21 #include "xe_sriov_printk.h" 22 23 static unsigned int wanted_max_vfs(struct xe_device *xe) 24 { 25 if (IS_ENABLED(CONFIG_CONFIGFS_FS)) 26 return xe_configfs_get_max_vfs(to_pci_dev(xe->drm.dev)); 27 return xe_modparam.max_vfs; 28 } 29 30 static int pf_reduce_totalvfs(struct xe_device *xe, int limit) 31 { 32 struct device *dev = xe->drm.dev; 33 struct pci_dev *pdev = to_pci_dev(dev); 34 int err; 35 36 err = pci_sriov_set_totalvfs(pdev, limit); 37 if (err) 38 xe_sriov_notice(xe, "Failed to set number of VFs to %d (%pe)\n", 39 limit, ERR_PTR(err)); 40 return err; 41 } 42 43 static bool pf_continue_as_native(struct xe_device *xe, const char *why) 44 { 45 xe_sriov_dbg(xe, "%s, continuing as native\n", why); 46 pf_reduce_totalvfs(xe, 0); 47 return false; 48 } 49 50 /** 51 * xe_sriov_pf_readiness - Check if PF functionality can be enabled. 52 * @xe: the &xe_device to check 53 * 54 * This function is called as part of the SR-IOV probe to validate if all 55 * PF prerequisites are satisfied and we can continue with enabling PF mode. 56 * 57 * Return: true if the PF mode can be turned on. 58 */ 59 bool xe_sriov_pf_readiness(struct xe_device *xe) 60 { 61 struct device *dev = xe->drm.dev; 62 struct pci_dev *pdev = to_pci_dev(dev); 63 int totalvfs = pci_sriov_get_totalvfs(pdev); 64 int newlimit = min_t(u16, wanted_max_vfs(xe), totalvfs); 65 66 xe_assert(xe, totalvfs <= U16_MAX); 67 68 if (!dev_is_pf(dev)) 69 return false; 70 71 if (!xe_device_uc_enabled(xe)) 72 return pf_continue_as_native(xe, "Guc submission disabled"); 73 74 if (!newlimit) 75 return pf_continue_as_native(xe, "all VFs disabled"); 76 77 pf_reduce_totalvfs(xe, newlimit); 78 79 xe->sriov.pf.device_total_vfs = totalvfs; 80 xe->sriov.pf.driver_max_vfs = newlimit; 81 82 return true; 83 } 84 85 /** 86 * xe_sriov_pf_init_early - Initialize SR-IOV PF specific data. 87 * @xe: the &xe_device to initialize 88 * 89 * Return: 0 on success or a negative error code on failure. 90 */ 91 int xe_sriov_pf_init_early(struct xe_device *xe) 92 { 93 int err; 94 95 xe_assert(xe, IS_SRIOV_PF(xe)); 96 97 xe->sriov.pf.vfs = drmm_kcalloc(&xe->drm, 1 + xe_sriov_pf_get_totalvfs(xe), 98 sizeof(*xe->sriov.pf.vfs), GFP_KERNEL); 99 if (!xe->sriov.pf.vfs) 100 return -ENOMEM; 101 102 err = drmm_mutex_init(&xe->drm, &xe->sriov.pf.master_lock); 103 if (err) 104 return err; 105 106 err = xe_sriov_pf_migration_init(xe); 107 if (err) 108 return err; 109 110 xe_guard_init(&xe->sriov.pf.guard_vfs_enabling, "vfs_enabling"); 111 112 xe_sriov_pf_service_init(xe); 113 114 return 0; 115 } 116 117 /** 118 * xe_sriov_pf_init_late() - Late initialization of the SR-IOV PF. 119 * @xe: the &xe_device to initialize 120 * 121 * This function can only be called on PF. 122 * 123 * Return: 0 on success or a negative error code on failure. 124 */ 125 int xe_sriov_pf_init_late(struct xe_device *xe) 126 { 127 struct xe_gt *gt; 128 unsigned int id; 129 int err; 130 131 xe_assert(xe, IS_SRIOV_PF(xe)); 132 133 for_each_gt(gt, xe, id) { 134 err = xe_gt_sriov_pf_init(gt); 135 if (err) 136 return err; 137 } 138 139 err = xe_sriov_pf_sysfs_init(xe); 140 if (err) 141 return err; 142 143 return 0; 144 } 145 146 /** 147 * xe_sriov_pf_wait_ready() - Wait until PF is ready to operate. 148 * @xe: the &xe_device to test 149 * 150 * This function can only be called on PF. 151 * 152 * Return: 0 on success or a negative error code on failure. 153 */ 154 int xe_sriov_pf_wait_ready(struct xe_device *xe) 155 { 156 struct xe_gt *gt; 157 unsigned int id; 158 int err; 159 160 if (xe_device_wedged(xe)) 161 return -ECANCELED; 162 163 for_each_gt(gt, xe, id) { 164 err = xe_gt_sriov_pf_wait_ready(gt); 165 if (err) 166 return err; 167 } 168 169 return 0; 170 } 171 172 /** 173 * xe_sriov_pf_arm_guard() - Arm the guard for exclusive/lockdown mode. 174 * @xe: the PF &xe_device 175 * @guard: the &xe_guard to arm 176 * @lockdown: arm for lockdown(true) or exclusive(false) mode 177 * @who: the address of the new owner, or NULL if it's a caller 178 * 179 * This function can only be called on PF. 180 * 181 * It is a simple wrapper for xe_guard_arm() with additional debug 182 * messages. 183 * 184 * Return: 0 on success or a negative error code on failure. 185 */ 186 int xe_sriov_pf_arm_guard(struct xe_device *xe, struct xe_guard *guard, 187 bool lockdown, void *who) 188 { 189 void *new_owner = who ?: __builtin_return_address(0); 190 int err; 191 192 err = xe_guard_arm(guard, lockdown, new_owner); 193 if (err) { 194 xe_sriov_dbg(xe, "%s/%s mode denied (%pe) last owner %ps\n", 195 guard->name, xe_guard_mode_str(lockdown), 196 ERR_PTR(err), guard->owner); 197 return err; 198 } 199 200 xe_sriov_dbg_verbose(xe, "%s/%s by %ps\n", 201 guard->name, xe_guard_mode_str(lockdown), 202 new_owner); 203 return 0; 204 } 205 206 /** 207 * xe_sriov_pf_disarm_guard() - Disarm the guard. 208 * @xe: the PF &xe_device 209 * @guard: the &xe_guard to disarm 210 * @lockdown: disarm from lockdown(true) or exclusive(false) mode 211 * @who: the address of the indirect owner, or NULL if it's a caller 212 * 213 * This function can only be called on PF. 214 * 215 * It is a simple wrapper for xe_guard_disarm() with additional debug 216 * messages and xe_assert() to easily catch any illegal calls. 217 */ 218 void xe_sriov_pf_disarm_guard(struct xe_device *xe, struct xe_guard *guard, 219 bool lockdown, void *who) 220 { 221 bool disarmed; 222 223 xe_sriov_dbg_verbose(xe, "%s/%s by %ps\n", 224 guard->name, xe_guard_mode_str(lockdown), 225 who ?: __builtin_return_address(0)); 226 227 disarmed = xe_guard_disarm(guard, lockdown); 228 xe_assert_msg(xe, disarmed, "%s/%s not armed? last owner %ps", 229 guard->name, xe_guard_mode_str(lockdown), guard->owner); 230 } 231 232 /** 233 * xe_sriov_pf_lockdown() - Lockdown the PF to prevent VFs enabling. 234 * @xe: the PF &xe_device 235 * 236 * This function can only be called on PF. 237 * 238 * Once the PF is locked down, it will not enable VFs. 239 * If VFs are already enabled, the -EBUSY will be returned. 240 * To allow the PF enable VFs again call xe_sriov_pf_end_lockdown(). 241 * 242 * Return: 0 on success or a negative error code on failure. 243 */ 244 int xe_sriov_pf_lockdown(struct xe_device *xe) 245 { 246 xe_assert(xe, IS_SRIOV_PF(xe)); 247 248 return xe_sriov_pf_arm_guard(xe, &xe->sriov.pf.guard_vfs_enabling, true, 249 __builtin_return_address(0)); 250 } 251 252 /** 253 * xe_sriov_pf_end_lockdown() - Allow the PF to enable VFs again. 254 * @xe: the PF &xe_device 255 * 256 * This function can only be called on PF. 257 * See xe_sriov_pf_lockdown() for details. 258 */ 259 void xe_sriov_pf_end_lockdown(struct xe_device *xe) 260 { 261 xe_assert(xe, IS_SRIOV_PF(xe)); 262 263 xe_sriov_pf_disarm_guard(xe, &xe->sriov.pf.guard_vfs_enabling, true, 264 __builtin_return_address(0)); 265 } 266 267 /** 268 * xe_sriov_pf_print_vfs_summary - Print SR-IOV PF information. 269 * @xe: the &xe_device to print info from 270 * @p: the &drm_printer 271 * 272 * Print SR-IOV PF related information into provided DRM printer. 273 */ 274 void xe_sriov_pf_print_vfs_summary(struct xe_device *xe, struct drm_printer *p) 275 { 276 struct pci_dev *pdev = to_pci_dev(xe->drm.dev); 277 278 xe_assert(xe, IS_SRIOV_PF(xe)); 279 280 drm_printf(p, "total: %u\n", xe->sriov.pf.device_total_vfs); 281 drm_printf(p, "supported: %u\n", xe->sriov.pf.driver_max_vfs); 282 drm_printf(p, "enabled: %u\n", pci_num_vf(pdev)); 283 } 284