1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2023-2024 Intel Corporation 4 */ 5 6 #include <linux/debugfs.h> 7 #include <drm/drm_debugfs.h> 8 #include <drm/drm_managed.h> 9 10 #include "xe_assert.h" 11 #include "xe_configfs.h" 12 #include "xe_device.h" 13 #include "xe_gt_sriov_pf.h" 14 #include "xe_module.h" 15 #include "xe_sriov.h" 16 #include "xe_sriov_pf.h" 17 #include "xe_sriov_pf_helpers.h" 18 #include "xe_sriov_pf_migration.h" 19 #include "xe_sriov_pf_service.h" 20 #include "xe_sriov_pf_sysfs.h" 21 #include "xe_sriov_printk.h" 22 23 static unsigned int wanted_max_vfs(struct xe_device *xe) 24 { 25 if (IS_ENABLED(CONFIG_CONFIGFS_FS)) 26 return xe_configfs_get_max_vfs(to_pci_dev(xe->drm.dev)); 27 return xe_modparam.max_vfs; 28 } 29 30 static int pf_reduce_totalvfs(struct xe_device *xe, int limit) 31 { 32 struct device *dev = xe->drm.dev; 33 struct pci_dev *pdev = to_pci_dev(dev); 34 int err; 35 36 err = pci_sriov_set_totalvfs(pdev, limit); 37 if (err) 38 xe_sriov_notice(xe, "Failed to set number of VFs to %d (%pe)\n", 39 limit, ERR_PTR(err)); 40 return err; 41 } 42 43 static bool pf_continue_as_native(struct xe_device *xe, const char *why) 44 { 45 xe_sriov_dbg(xe, "%s, continuing as native\n", why); 46 pf_reduce_totalvfs(xe, 0); 47 return false; 48 } 49 50 /** 51 * xe_sriov_pf_readiness - Check if PF functionality can be enabled. 52 * @xe: the &xe_device to check 53 * 54 * This function is called as part of the SR-IOV probe to validate if all 55 * PF prerequisites are satisfied and we can continue with enabling PF mode. 56 * 57 * Return: true if the PF mode can be turned on. 58 */ 59 bool xe_sriov_pf_readiness(struct xe_device *xe) 60 { 61 struct device *dev = xe->drm.dev; 62 struct pci_dev *pdev = to_pci_dev(dev); 63 int totalvfs = pci_sriov_get_totalvfs(pdev); 64 int newlimit = min_t(u16, wanted_max_vfs(xe), totalvfs); 65 66 xe_assert(xe, totalvfs <= U16_MAX); 67 68 if (!dev_is_pf(dev)) 69 return false; 70 71 if (!xe_device_uc_enabled(xe)) 72 return pf_continue_as_native(xe, "Guc submission disabled"); 73 74 if (!newlimit) 75 return pf_continue_as_native(xe, "all VFs disabled"); 76 77 pf_reduce_totalvfs(xe, newlimit); 78 79 xe->sriov.pf.device_total_vfs = totalvfs; 80 xe->sriov.pf.driver_max_vfs = newlimit; 81 82 return true; 83 } 84 85 /** 86 * xe_sriov_pf_init_early - Initialize SR-IOV PF specific data. 87 * @xe: the &xe_device to initialize 88 * 89 * Return: 0 on success or a negative error code on failure. 90 */ 91 int xe_sriov_pf_init_early(struct xe_device *xe) 92 { 93 int err; 94 95 xe_assert(xe, IS_SRIOV_PF(xe)); 96 97 xe->sriov.pf.vfs = drmm_kcalloc(&xe->drm, 1 + xe_sriov_pf_get_totalvfs(xe), 98 sizeof(*xe->sriov.pf.vfs), GFP_KERNEL); 99 if (!xe->sriov.pf.vfs) 100 return -ENOMEM; 101 102 err = drmm_mutex_init(&xe->drm, &xe->sriov.pf.master_lock); 103 if (err) 104 return err; 105 106 err = xe_sriov_pf_migration_init(xe); 107 if (err) 108 return err; 109 110 xe_guard_init(&xe->sriov.pf.guard_vfs_enabling, "vfs_enabling"); 111 112 xe_sriov_pf_service_init(xe); 113 114 xe_mert_init_early(xe); 115 116 return 0; 117 } 118 119 /** 120 * xe_sriov_pf_init_late() - Late initialization of the SR-IOV PF. 121 * @xe: the &xe_device to initialize 122 * 123 * This function can only be called on PF. 124 * 125 * Return: 0 on success or a negative error code on failure. 126 */ 127 int xe_sriov_pf_init_late(struct xe_device *xe) 128 { 129 struct xe_gt *gt; 130 unsigned int id; 131 int err; 132 133 xe_assert(xe, IS_SRIOV_PF(xe)); 134 135 for_each_gt(gt, xe, id) { 136 err = xe_gt_sriov_pf_init(gt); 137 if (err) 138 return err; 139 } 140 141 err = xe_sriov_pf_sysfs_init(xe); 142 if (err) 143 return err; 144 145 return 0; 146 } 147 148 /** 149 * xe_sriov_pf_wait_ready() - Wait until PF is ready to operate. 150 * @xe: the &xe_device to test 151 * 152 * This function can only be called on PF. 153 * 154 * Return: 0 on success or a negative error code on failure. 155 */ 156 int xe_sriov_pf_wait_ready(struct xe_device *xe) 157 { 158 struct xe_gt *gt; 159 unsigned int id; 160 int err; 161 162 if (xe_device_wedged(xe)) 163 return -ECANCELED; 164 165 for_each_gt(gt, xe, id) { 166 err = xe_gt_sriov_pf_wait_ready(gt); 167 if (err) 168 return err; 169 } 170 171 return 0; 172 } 173 174 /** 175 * xe_sriov_pf_arm_guard() - Arm the guard for exclusive/lockdown mode. 176 * @xe: the PF &xe_device 177 * @guard: the &xe_guard to arm 178 * @lockdown: arm for lockdown(true) or exclusive(false) mode 179 * @who: the address of the new owner, or NULL if it's a caller 180 * 181 * This function can only be called on PF. 182 * 183 * It is a simple wrapper for xe_guard_arm() with additional debug 184 * messages. 185 * 186 * Return: 0 on success or a negative error code on failure. 187 */ 188 int xe_sriov_pf_arm_guard(struct xe_device *xe, struct xe_guard *guard, 189 bool lockdown, void *who) 190 { 191 void *new_owner = who ?: __builtin_return_address(0); 192 int err; 193 194 err = xe_guard_arm(guard, lockdown, new_owner); 195 if (err) { 196 xe_sriov_dbg(xe, "%s/%s mode denied (%pe) last owner %ps\n", 197 guard->name, xe_guard_mode_str(lockdown), 198 ERR_PTR(err), guard->owner); 199 return err; 200 } 201 202 xe_sriov_dbg_verbose(xe, "%s/%s by %ps\n", 203 guard->name, xe_guard_mode_str(lockdown), 204 new_owner); 205 return 0; 206 } 207 208 /** 209 * xe_sriov_pf_disarm_guard() - Disarm the guard. 210 * @xe: the PF &xe_device 211 * @guard: the &xe_guard to disarm 212 * @lockdown: disarm from lockdown(true) or exclusive(false) mode 213 * @who: the address of the indirect owner, or NULL if it's a caller 214 * 215 * This function can only be called on PF. 216 * 217 * It is a simple wrapper for xe_guard_disarm() with additional debug 218 * messages and xe_assert() to easily catch any illegal calls. 219 */ 220 void xe_sriov_pf_disarm_guard(struct xe_device *xe, struct xe_guard *guard, 221 bool lockdown, void *who) 222 { 223 bool disarmed; 224 225 xe_sriov_dbg_verbose(xe, "%s/%s by %ps\n", 226 guard->name, xe_guard_mode_str(lockdown), 227 who ?: __builtin_return_address(0)); 228 229 disarmed = xe_guard_disarm(guard, lockdown); 230 xe_assert_msg(xe, disarmed, "%s/%s not armed? last owner %ps", 231 guard->name, xe_guard_mode_str(lockdown), guard->owner); 232 } 233 234 /** 235 * xe_sriov_pf_lockdown() - Lockdown the PF to prevent VFs enabling. 236 * @xe: the PF &xe_device 237 * 238 * This function can only be called on PF. 239 * 240 * Once the PF is locked down, it will not enable VFs. 241 * If VFs are already enabled, the -EBUSY will be returned. 242 * To allow the PF enable VFs again call xe_sriov_pf_end_lockdown(). 243 * 244 * Return: 0 on success or a negative error code on failure. 245 */ 246 int xe_sriov_pf_lockdown(struct xe_device *xe) 247 { 248 xe_assert(xe, IS_SRIOV_PF(xe)); 249 250 return xe_sriov_pf_arm_guard(xe, &xe->sriov.pf.guard_vfs_enabling, true, 251 __builtin_return_address(0)); 252 } 253 254 /** 255 * xe_sriov_pf_end_lockdown() - Allow the PF to enable VFs again. 256 * @xe: the PF &xe_device 257 * 258 * This function can only be called on PF. 259 * See xe_sriov_pf_lockdown() for details. 260 */ 261 void xe_sriov_pf_end_lockdown(struct xe_device *xe) 262 { 263 xe_assert(xe, IS_SRIOV_PF(xe)); 264 265 xe_sriov_pf_disarm_guard(xe, &xe->sriov.pf.guard_vfs_enabling, true, 266 __builtin_return_address(0)); 267 } 268 269 /** 270 * xe_sriov_pf_print_vfs_summary - Print SR-IOV PF information. 271 * @xe: the &xe_device to print info from 272 * @p: the &drm_printer 273 * 274 * Print SR-IOV PF related information into provided DRM printer. 275 */ 276 void xe_sriov_pf_print_vfs_summary(struct xe_device *xe, struct drm_printer *p) 277 { 278 struct pci_dev *pdev = to_pci_dev(xe->drm.dev); 279 280 xe_assert(xe, IS_SRIOV_PF(xe)); 281 282 drm_printf(p, "total: %u\n", xe->sriov.pf.device_total_vfs); 283 drm_printf(p, "supported: %u\n", xe->sriov.pf.driver_max_vfs); 284 drm_printf(p, "enabled: %u\n", pci_num_vf(pdev)); 285 } 286