1a5efeaf8SMichal Wajdeczko // SPDX-License-Identifier: MIT 2a5efeaf8SMichal Wajdeczko /* 3a5efeaf8SMichal Wajdeczko * Copyright © 2025 Intel Corporation 4a5efeaf8SMichal Wajdeczko */ 5a5efeaf8SMichal Wajdeczko 6a5efeaf8SMichal Wajdeczko #include "xe_assert.h" 7a5efeaf8SMichal Wajdeczko #include "xe_device.h" 8a5efeaf8SMichal Wajdeczko #include "xe_gt_sriov_pf_config.h" 9*23ceec1eSMichal Wajdeczko #include "xe_gt_sriov_pf_policy.h" 10a5efeaf8SMichal Wajdeczko #include "xe_sriov.h" 11a5efeaf8SMichal Wajdeczko #include "xe_sriov_pf_helpers.h" 12a5efeaf8SMichal Wajdeczko #include "xe_sriov_pf_provision.h" 135546bc20SMichal Wajdeczko #include "xe_sriov_pf_provision_types.h" 14b1767ca1SMichal Wajdeczko #include "xe_sriov_printk.h" 15b1767ca1SMichal Wajdeczko 16b1767ca1SMichal Wajdeczko static const char *mode_to_string(enum xe_sriov_provisioning_mode mode) 17b1767ca1SMichal Wajdeczko { 18b1767ca1SMichal Wajdeczko switch (mode) { 19b1767ca1SMichal Wajdeczko case XE_SRIOV_PROVISIONING_MODE_AUTO: 20b1767ca1SMichal Wajdeczko return "auto"; 21b1767ca1SMichal Wajdeczko case XE_SRIOV_PROVISIONING_MODE_CUSTOM: 22b1767ca1SMichal Wajdeczko return "custom"; 23b1767ca1SMichal Wajdeczko default: 24b1767ca1SMichal Wajdeczko return "<invalid>"; 25b1767ca1SMichal Wajdeczko } 26b1767ca1SMichal Wajdeczko } 275546bc20SMichal Wajdeczko 285546bc20SMichal Wajdeczko static bool pf_auto_provisioning_mode(struct xe_device *xe) 295546bc20SMichal Wajdeczko { 305546bc20SMichal Wajdeczko xe_assert(xe, IS_SRIOV_PF(xe)); 315546bc20SMichal Wajdeczko 325546bc20SMichal Wajdeczko return xe->sriov.pf.provision.mode == XE_SRIOV_PROVISIONING_MODE_AUTO; 335546bc20SMichal Wajdeczko } 34a5efeaf8SMichal Wajdeczko 35a5efeaf8SMichal Wajdeczko static bool pf_needs_provisioning(struct xe_gt *gt, unsigned int num_vfs) 36a5efeaf8SMichal Wajdeczko { 37a5efeaf8SMichal Wajdeczko unsigned int n; 38a5efeaf8SMichal Wajdeczko 39a5efeaf8SMichal Wajdeczko for (n = 1; n <= num_vfs; n++) 40a5efeaf8SMichal Wajdeczko if (!xe_gt_sriov_pf_config_is_empty(gt, n)) 41a5efeaf8SMichal Wajdeczko return false; 42a5efeaf8SMichal Wajdeczko 43a5efeaf8SMichal Wajdeczko return true; 44a5efeaf8SMichal Wajdeczko } 45a5efeaf8SMichal Wajdeczko 46a5efeaf8SMichal Wajdeczko static int pf_provision_vfs(struct xe_device *xe, unsigned int num_vfs) 47a5efeaf8SMichal Wajdeczko { 48a5efeaf8SMichal Wajdeczko struct xe_gt *gt; 49a5efeaf8SMichal Wajdeczko unsigned int id; 50a5efeaf8SMichal Wajdeczko int result = 0; 51a5efeaf8SMichal Wajdeczko int err; 52a5efeaf8SMichal Wajdeczko 53a5efeaf8SMichal Wajdeczko for_each_gt(gt, xe, id) { 54a5efeaf8SMichal Wajdeczko if (!pf_needs_provisioning(gt, num_vfs)) 555546bc20SMichal Wajdeczko return -EUCLEAN; 56a5efeaf8SMichal Wajdeczko err = xe_gt_sriov_pf_config_set_fair(gt, VFID(1), num_vfs); 57a5efeaf8SMichal Wajdeczko result = result ?: err; 58a5efeaf8SMichal Wajdeczko } 59a5efeaf8SMichal Wajdeczko 60a5efeaf8SMichal Wajdeczko return result; 61a5efeaf8SMichal Wajdeczko } 62a5efeaf8SMichal Wajdeczko 63a5efeaf8SMichal Wajdeczko static void pf_unprovision_vfs(struct xe_device *xe, unsigned int num_vfs) 64a5efeaf8SMichal Wajdeczko { 65a5efeaf8SMichal Wajdeczko struct xe_gt *gt; 66a5efeaf8SMichal Wajdeczko unsigned int id; 67a5efeaf8SMichal Wajdeczko unsigned int n; 68a5efeaf8SMichal Wajdeczko 69a5efeaf8SMichal Wajdeczko for_each_gt(gt, xe, id) 70a5efeaf8SMichal Wajdeczko for (n = 1; n <= num_vfs; n++) 71a5efeaf8SMichal Wajdeczko xe_gt_sriov_pf_config_release(gt, n, true); 72a5efeaf8SMichal Wajdeczko } 73a5efeaf8SMichal Wajdeczko 74ee746346SMichal Wajdeczko static void pf_unprovision_all_vfs(struct xe_device *xe) 75ee746346SMichal Wajdeczko { 76ee746346SMichal Wajdeczko pf_unprovision_vfs(xe, xe_sriov_pf_get_totalvfs(xe)); 77ee746346SMichal Wajdeczko } 78ee746346SMichal Wajdeczko 79a5efeaf8SMichal Wajdeczko /** 80a5efeaf8SMichal Wajdeczko * xe_sriov_pf_provision_vfs() - Provision VFs in auto-mode. 81a5efeaf8SMichal Wajdeczko * @xe: the PF &xe_device 82a5efeaf8SMichal Wajdeczko * @num_vfs: the number of VFs to auto-provision 83a5efeaf8SMichal Wajdeczko * 84a5efeaf8SMichal Wajdeczko * This function can only be called on PF. 85a5efeaf8SMichal Wajdeczko * 86a5efeaf8SMichal Wajdeczko * Return: 0 on success or a negative error code on failure. 87a5efeaf8SMichal Wajdeczko */ 88a5efeaf8SMichal Wajdeczko int xe_sriov_pf_provision_vfs(struct xe_device *xe, unsigned int num_vfs) 89a5efeaf8SMichal Wajdeczko { 90a5efeaf8SMichal Wajdeczko xe_assert(xe, IS_SRIOV_PF(xe)); 91a5efeaf8SMichal Wajdeczko 925546bc20SMichal Wajdeczko if (!pf_auto_provisioning_mode(xe)) 935546bc20SMichal Wajdeczko return 0; 945546bc20SMichal Wajdeczko 95a5efeaf8SMichal Wajdeczko return pf_provision_vfs(xe, num_vfs); 96a5efeaf8SMichal Wajdeczko } 97a5efeaf8SMichal Wajdeczko 98a5efeaf8SMichal Wajdeczko /** 99a5efeaf8SMichal Wajdeczko * xe_sriov_pf_unprovision_vfs() - Unprovision VFs in auto-mode. 100a5efeaf8SMichal Wajdeczko * @xe: the PF &xe_device 101a5efeaf8SMichal Wajdeczko * @num_vfs: the number of VFs to unprovision 102a5efeaf8SMichal Wajdeczko * 103a5efeaf8SMichal Wajdeczko * This function can only be called on PF. 104a5efeaf8SMichal Wajdeczko * 105a5efeaf8SMichal Wajdeczko * Return: 0 on success or a negative error code on failure. 106a5efeaf8SMichal Wajdeczko */ 107a5efeaf8SMichal Wajdeczko int xe_sriov_pf_unprovision_vfs(struct xe_device *xe, unsigned int num_vfs) 108a5efeaf8SMichal Wajdeczko { 109a5efeaf8SMichal Wajdeczko xe_assert(xe, IS_SRIOV_PF(xe)); 110a5efeaf8SMichal Wajdeczko 1115546bc20SMichal Wajdeczko if (!pf_auto_provisioning_mode(xe)) 1125546bc20SMichal Wajdeczko return 0; 1135546bc20SMichal Wajdeczko 114a5efeaf8SMichal Wajdeczko pf_unprovision_vfs(xe, num_vfs); 115a5efeaf8SMichal Wajdeczko return 0; 116a5efeaf8SMichal Wajdeczko } 117b1767ca1SMichal Wajdeczko 118b1767ca1SMichal Wajdeczko /** 119b1767ca1SMichal Wajdeczko * xe_sriov_pf_provision_set_mode() - Change VFs provision mode. 120b1767ca1SMichal Wajdeczko * @xe: the PF &xe_device 121b1767ca1SMichal Wajdeczko * @mode: the new VFs provisioning mode 122b1767ca1SMichal Wajdeczko * 123b1767ca1SMichal Wajdeczko * When changing from AUTO to CUSTOM mode, any already allocated VFs resources 124b1767ca1SMichal Wajdeczko * will remain allocated and will not be released upon VFs disabling. 125b1767ca1SMichal Wajdeczko * 126ee746346SMichal Wajdeczko * When changing back to AUTO mode, if VFs are not enabled, already allocated 127ee746346SMichal Wajdeczko * VFs resources will be immediately released. If VFs are still enabled, such 128ee746346SMichal Wajdeczko * mode change is rejected. 129ee746346SMichal Wajdeczko * 130b1767ca1SMichal Wajdeczko * This function can only be called on PF. 131b1767ca1SMichal Wajdeczko * 132b1767ca1SMichal Wajdeczko * Return: 0 on success or a negative error code on failure. 133b1767ca1SMichal Wajdeczko */ 134b1767ca1SMichal Wajdeczko int xe_sriov_pf_provision_set_mode(struct xe_device *xe, enum xe_sriov_provisioning_mode mode) 135b1767ca1SMichal Wajdeczko { 136b1767ca1SMichal Wajdeczko xe_assert(xe, IS_SRIOV_PF(xe)); 137b1767ca1SMichal Wajdeczko 138b1767ca1SMichal Wajdeczko if (mode == xe->sriov.pf.provision.mode) 139b1767ca1SMichal Wajdeczko return 0; 140b1767ca1SMichal Wajdeczko 141ee746346SMichal Wajdeczko if (mode == XE_SRIOV_PROVISIONING_MODE_AUTO) { 142ee746346SMichal Wajdeczko if (xe_sriov_pf_num_vfs(xe)) { 143ee746346SMichal Wajdeczko xe_sriov_dbg(xe, "can't restore %s: VFs must be disabled!\n", 144ee746346SMichal Wajdeczko mode_to_string(mode)); 145ee746346SMichal Wajdeczko return -EBUSY; 146ee746346SMichal Wajdeczko } 147ee746346SMichal Wajdeczko pf_unprovision_all_vfs(xe); 148ee746346SMichal Wajdeczko } 149ee746346SMichal Wajdeczko 150b1767ca1SMichal Wajdeczko xe_sriov_dbg(xe, "mode %s changed to %s by %ps\n", 151b1767ca1SMichal Wajdeczko mode_to_string(xe->sriov.pf.provision.mode), 152b1767ca1SMichal Wajdeczko mode_to_string(mode), __builtin_return_address(0)); 153b1767ca1SMichal Wajdeczko xe->sriov.pf.provision.mode = mode; 154b1767ca1SMichal Wajdeczko return 0; 155b1767ca1SMichal Wajdeczko } 1563f984d70SMichal Wajdeczko 1573f984d70SMichal Wajdeczko /** 158b7a73b57SMichal Wajdeczko * xe_sriov_pf_provision_bulk_apply_eq() - Change execution quantum for all VFs and PF. 159b7a73b57SMichal Wajdeczko * @xe: the PF &xe_device 160b7a73b57SMichal Wajdeczko * @eq: execution quantum in [ms] to set 161b7a73b57SMichal Wajdeczko * 162b7a73b57SMichal Wajdeczko * Change execution quantum (EQ) provisioning on all tiles/GTs. 163b7a73b57SMichal Wajdeczko * 164b7a73b57SMichal Wajdeczko * This function can only be called on PF. 165b7a73b57SMichal Wajdeczko * 166b7a73b57SMichal Wajdeczko * Return: 0 on success or a negative error code on failure. 167b7a73b57SMichal Wajdeczko */ 168b7a73b57SMichal Wajdeczko int xe_sriov_pf_provision_bulk_apply_eq(struct xe_device *xe, u32 eq) 169b7a73b57SMichal Wajdeczko { 170b7a73b57SMichal Wajdeczko struct xe_gt *gt; 171b7a73b57SMichal Wajdeczko unsigned int id; 172b7a73b57SMichal Wajdeczko int result = 0; 173b7a73b57SMichal Wajdeczko int err; 174b7a73b57SMichal Wajdeczko 175b7a73b57SMichal Wajdeczko guard(mutex)(xe_sriov_pf_master_mutex(xe)); 176b7a73b57SMichal Wajdeczko 177b7a73b57SMichal Wajdeczko for_each_gt(gt, xe, id) { 178b7a73b57SMichal Wajdeczko err = xe_gt_sriov_pf_config_bulk_set_exec_quantum_locked(gt, eq); 179b7a73b57SMichal Wajdeczko result = result ?: err; 180b7a73b57SMichal Wajdeczko } 181b7a73b57SMichal Wajdeczko 182b7a73b57SMichal Wajdeczko return result; 183b7a73b57SMichal Wajdeczko } 184b7a73b57SMichal Wajdeczko 185b7a73b57SMichal Wajdeczko /** 1863f984d70SMichal Wajdeczko * xe_sriov_pf_provision_apply_vf_eq() - Change VF's execution quantum. 1873f984d70SMichal Wajdeczko * @xe: the PF &xe_device 1883f984d70SMichal Wajdeczko * @vfid: the VF identifier 1893f984d70SMichal Wajdeczko * @eq: execution quantum in [ms] to set 1903f984d70SMichal Wajdeczko * 1913f984d70SMichal Wajdeczko * Change VF's execution quantum (EQ) provisioning on all tiles/GTs. 1923f984d70SMichal Wajdeczko * 1933f984d70SMichal Wajdeczko * This function can only be called on PF. 1943f984d70SMichal Wajdeczko * 1953f984d70SMichal Wajdeczko * Return: 0 on success or a negative error code on failure. 1963f984d70SMichal Wajdeczko */ 1973f984d70SMichal Wajdeczko int xe_sriov_pf_provision_apply_vf_eq(struct xe_device *xe, unsigned int vfid, u32 eq) 1983f984d70SMichal Wajdeczko { 1993f984d70SMichal Wajdeczko struct xe_gt *gt; 2003f984d70SMichal Wajdeczko unsigned int id; 2013f984d70SMichal Wajdeczko int result = 0; 2023f984d70SMichal Wajdeczko int err; 2033f984d70SMichal Wajdeczko 2043f984d70SMichal Wajdeczko guard(mutex)(xe_sriov_pf_master_mutex(xe)); 2053f984d70SMichal Wajdeczko 2063f984d70SMichal Wajdeczko for_each_gt(gt, xe, id) { 2073f984d70SMichal Wajdeczko err = xe_gt_sriov_pf_config_set_exec_quantum_locked(gt, vfid, eq); 2083f984d70SMichal Wajdeczko result = result ?: err; 2093f984d70SMichal Wajdeczko } 2103f984d70SMichal Wajdeczko 2113f984d70SMichal Wajdeczko return result; 2123f984d70SMichal Wajdeczko } 2133f984d70SMichal Wajdeczko 2143f984d70SMichal Wajdeczko static int pf_report_unclean(struct xe_gt *gt, unsigned int vfid, 2153f984d70SMichal Wajdeczko const char *what, u32 found, u32 expected) 2163f984d70SMichal Wajdeczko { 2173f984d70SMichal Wajdeczko char name[8]; 2183f984d70SMichal Wajdeczko 2193f984d70SMichal Wajdeczko xe_sriov_dbg(gt_to_xe(gt), "%s on GT%u has %s=%u (expected %u)\n", 2203f984d70SMichal Wajdeczko xe_sriov_function_name(vfid, name, sizeof(name)), 2213f984d70SMichal Wajdeczko gt->info.id, what, found, expected); 2223f984d70SMichal Wajdeczko return -EUCLEAN; 2233f984d70SMichal Wajdeczko } 2243f984d70SMichal Wajdeczko 2253f984d70SMichal Wajdeczko /** 2263f984d70SMichal Wajdeczko * xe_sriov_pf_provision_query_vf_eq() - Query VF's execution quantum. 2273f984d70SMichal Wajdeczko * @xe: the PF &xe_device 2283f984d70SMichal Wajdeczko * @vfid: the VF identifier 2293f984d70SMichal Wajdeczko * @eq: placeholder for the returned execution quantum in [ms] 2303f984d70SMichal Wajdeczko * 2313f984d70SMichal Wajdeczko * Query VF's execution quantum (EQ) provisioning from all tiles/GTs. 2323f984d70SMichal Wajdeczko * If values across tiles/GTs are inconsistent then -EUCLEAN error will be returned. 2333f984d70SMichal Wajdeczko * 2343f984d70SMichal Wajdeczko * This function can only be called on PF. 2353f984d70SMichal Wajdeczko * 2363f984d70SMichal Wajdeczko * Return: 0 on success or a negative error code on failure. 2373f984d70SMichal Wajdeczko */ 2383f984d70SMichal Wajdeczko int xe_sriov_pf_provision_query_vf_eq(struct xe_device *xe, unsigned int vfid, u32 *eq) 2393f984d70SMichal Wajdeczko { 2403f984d70SMichal Wajdeczko struct xe_gt *gt; 2413f984d70SMichal Wajdeczko unsigned int id; 2423f984d70SMichal Wajdeczko int count = 0; 2433f984d70SMichal Wajdeczko u32 value; 2443f984d70SMichal Wajdeczko 2453f984d70SMichal Wajdeczko guard(mutex)(xe_sriov_pf_master_mutex(xe)); 2463f984d70SMichal Wajdeczko 2473f984d70SMichal Wajdeczko for_each_gt(gt, xe, id) { 2483f984d70SMichal Wajdeczko value = xe_gt_sriov_pf_config_get_exec_quantum_locked(gt, vfid); 2493f984d70SMichal Wajdeczko if (!count++) 2503f984d70SMichal Wajdeczko *eq = value; 2513f984d70SMichal Wajdeczko else if (value != *eq) 2523f984d70SMichal Wajdeczko return pf_report_unclean(gt, vfid, "EQ", value, *eq); 2533f984d70SMichal Wajdeczko } 2543f984d70SMichal Wajdeczko 2553f984d70SMichal Wajdeczko return !count ? -ENODATA : 0; 2563f984d70SMichal Wajdeczko } 2573f984d70SMichal Wajdeczko 2583f984d70SMichal Wajdeczko /** 259b7a73b57SMichal Wajdeczko * xe_sriov_pf_provision_bulk_apply_pt() - Change preemption timeout for all VFs and PF. 260b7a73b57SMichal Wajdeczko * @xe: the PF &xe_device 261b7a73b57SMichal Wajdeczko * @pt: preemption timeout in [us] to set 262b7a73b57SMichal Wajdeczko * 263b7a73b57SMichal Wajdeczko * Change preemption timeout (PT) provisioning on all tiles/GTs. 264b7a73b57SMichal Wajdeczko * 265b7a73b57SMichal Wajdeczko * This function can only be called on PF. 266b7a73b57SMichal Wajdeczko * 267b7a73b57SMichal Wajdeczko * Return: 0 on success or a negative error code on failure. 268b7a73b57SMichal Wajdeczko */ 269b7a73b57SMichal Wajdeczko int xe_sriov_pf_provision_bulk_apply_pt(struct xe_device *xe, u32 pt) 270b7a73b57SMichal Wajdeczko { 271b7a73b57SMichal Wajdeczko struct xe_gt *gt; 272b7a73b57SMichal Wajdeczko unsigned int id; 273b7a73b57SMichal Wajdeczko int result = 0; 274b7a73b57SMichal Wajdeczko int err; 275b7a73b57SMichal Wajdeczko 276b7a73b57SMichal Wajdeczko guard(mutex)(xe_sriov_pf_master_mutex(xe)); 277b7a73b57SMichal Wajdeczko 278b7a73b57SMichal Wajdeczko for_each_gt(gt, xe, id) { 279b7a73b57SMichal Wajdeczko err = xe_gt_sriov_pf_config_bulk_set_preempt_timeout_locked(gt, pt); 280b7a73b57SMichal Wajdeczko result = result ?: err; 281b7a73b57SMichal Wajdeczko } 282b7a73b57SMichal Wajdeczko 283b7a73b57SMichal Wajdeczko return result; 284b7a73b57SMichal Wajdeczko } 285b7a73b57SMichal Wajdeczko 286b7a73b57SMichal Wajdeczko /** 2873f984d70SMichal Wajdeczko * xe_sriov_pf_provision_apply_vf_pt() - Change VF's preemption timeout. 2883f984d70SMichal Wajdeczko * @xe: the PF &xe_device 2893f984d70SMichal Wajdeczko * @vfid: the VF identifier 2903f984d70SMichal Wajdeczko * @pt: preemption timeout in [us] to set 2913f984d70SMichal Wajdeczko * 2923f984d70SMichal Wajdeczko * Change VF's preemption timeout (PT) provisioning on all tiles/GTs. 2933f984d70SMichal Wajdeczko * 2943f984d70SMichal Wajdeczko * This function can only be called on PF. 2953f984d70SMichal Wajdeczko * 2963f984d70SMichal Wajdeczko * Return: 0 on success or a negative error code on failure. 2973f984d70SMichal Wajdeczko */ 2983f984d70SMichal Wajdeczko int xe_sriov_pf_provision_apply_vf_pt(struct xe_device *xe, unsigned int vfid, u32 pt) 2993f984d70SMichal Wajdeczko { 3003f984d70SMichal Wajdeczko struct xe_gt *gt; 3013f984d70SMichal Wajdeczko unsigned int id; 3023f984d70SMichal Wajdeczko int result = 0; 3033f984d70SMichal Wajdeczko int err; 3043f984d70SMichal Wajdeczko 3053f984d70SMichal Wajdeczko guard(mutex)(xe_sriov_pf_master_mutex(xe)); 3063f984d70SMichal Wajdeczko 3073f984d70SMichal Wajdeczko for_each_gt(gt, xe, id) { 3083f984d70SMichal Wajdeczko err = xe_gt_sriov_pf_config_set_preempt_timeout_locked(gt, vfid, pt); 3093f984d70SMichal Wajdeczko result = result ?: err; 3103f984d70SMichal Wajdeczko } 3113f984d70SMichal Wajdeczko 3123f984d70SMichal Wajdeczko return result; 3133f984d70SMichal Wajdeczko } 3143f984d70SMichal Wajdeczko 3153f984d70SMichal Wajdeczko /** 3163f984d70SMichal Wajdeczko * xe_sriov_pf_provision_query_vf_pt() - Query VF's preemption timeout. 3173f984d70SMichal Wajdeczko * @xe: the PF &xe_device 3183f984d70SMichal Wajdeczko * @vfid: the VF identifier 3193f984d70SMichal Wajdeczko * @pt: placeholder for the returned preemption timeout in [us] 3203f984d70SMichal Wajdeczko * 3213f984d70SMichal Wajdeczko * Query VF's preemption timeout (PT) provisioning from all tiles/GTs. 3223f984d70SMichal Wajdeczko * If values across tiles/GTs are inconsistent then -EUCLEAN error will be returned. 3233f984d70SMichal Wajdeczko * 3243f984d70SMichal Wajdeczko * This function can only be called on PF. 3253f984d70SMichal Wajdeczko * 3263f984d70SMichal Wajdeczko * Return: 0 on success or a negative error code on failure. 3273f984d70SMichal Wajdeczko */ 3283f984d70SMichal Wajdeczko int xe_sriov_pf_provision_query_vf_pt(struct xe_device *xe, unsigned int vfid, u32 *pt) 3293f984d70SMichal Wajdeczko { 3303f984d70SMichal Wajdeczko struct xe_gt *gt; 3313f984d70SMichal Wajdeczko unsigned int id; 3323f984d70SMichal Wajdeczko int count = 0; 3333f984d70SMichal Wajdeczko u32 value; 3343f984d70SMichal Wajdeczko 3353f984d70SMichal Wajdeczko guard(mutex)(xe_sriov_pf_master_mutex(xe)); 3363f984d70SMichal Wajdeczko 3373f984d70SMichal Wajdeczko for_each_gt(gt, xe, id) { 3383f984d70SMichal Wajdeczko value = xe_gt_sriov_pf_config_get_preempt_timeout_locked(gt, vfid); 3393f984d70SMichal Wajdeczko if (!count++) 3403f984d70SMichal Wajdeczko *pt = value; 3413f984d70SMichal Wajdeczko else if (value != *pt) 3423f984d70SMichal Wajdeczko return pf_report_unclean(gt, vfid, "PT", value, *pt); 3433f984d70SMichal Wajdeczko } 3443f984d70SMichal Wajdeczko 3453f984d70SMichal Wajdeczko return !count ? -ENODATA : 0; 3463f984d70SMichal Wajdeczko } 347*23ceec1eSMichal Wajdeczko 348*23ceec1eSMichal Wajdeczko /** 349*23ceec1eSMichal Wajdeczko * xe_sriov_pf_provision_bulk_apply_priority() - Change scheduling priority of all VFs and PF. 350*23ceec1eSMichal Wajdeczko * @xe: the PF &xe_device 351*23ceec1eSMichal Wajdeczko * @prio: scheduling priority to set 352*23ceec1eSMichal Wajdeczko * 353*23ceec1eSMichal Wajdeczko * Change the scheduling priority provisioning on all tiles/GTs. 354*23ceec1eSMichal Wajdeczko * 355*23ceec1eSMichal Wajdeczko * This function can only be called on PF. 356*23ceec1eSMichal Wajdeczko * 357*23ceec1eSMichal Wajdeczko * Return: 0 on success or a negative error code on failure. 358*23ceec1eSMichal Wajdeczko */ 359*23ceec1eSMichal Wajdeczko int xe_sriov_pf_provision_bulk_apply_priority(struct xe_device *xe, u32 prio) 360*23ceec1eSMichal Wajdeczko { 361*23ceec1eSMichal Wajdeczko bool sched_if_idle; 362*23ceec1eSMichal Wajdeczko struct xe_gt *gt; 363*23ceec1eSMichal Wajdeczko unsigned int id; 364*23ceec1eSMichal Wajdeczko int result = 0; 365*23ceec1eSMichal Wajdeczko int err; 366*23ceec1eSMichal Wajdeczko 367*23ceec1eSMichal Wajdeczko /* 368*23ceec1eSMichal Wajdeczko * Currently, priority changes that involves VFs are only allowed using 369*23ceec1eSMichal Wajdeczko * the 'sched_if_idle' policy KLV, so only LOW and NORMAL are supported. 370*23ceec1eSMichal Wajdeczko */ 371*23ceec1eSMichal Wajdeczko xe_assert(xe, prio < GUC_SCHED_PRIORITY_HIGH); 372*23ceec1eSMichal Wajdeczko sched_if_idle = prio == GUC_SCHED_PRIORITY_NORMAL; 373*23ceec1eSMichal Wajdeczko 374*23ceec1eSMichal Wajdeczko for_each_gt(gt, xe, id) { 375*23ceec1eSMichal Wajdeczko err = xe_gt_sriov_pf_policy_set_sched_if_idle(gt, sched_if_idle); 376*23ceec1eSMichal Wajdeczko result = result ?: err; 377*23ceec1eSMichal Wajdeczko } 378*23ceec1eSMichal Wajdeczko 379*23ceec1eSMichal Wajdeczko return result; 380*23ceec1eSMichal Wajdeczko } 381*23ceec1eSMichal Wajdeczko 382*23ceec1eSMichal Wajdeczko /** 383*23ceec1eSMichal Wajdeczko * xe_sriov_pf_provision_apply_vf_priority() - Change VF's scheduling priority. 384*23ceec1eSMichal Wajdeczko * @xe: the PF &xe_device 385*23ceec1eSMichal Wajdeczko * @vfid: the VF identifier 386*23ceec1eSMichal Wajdeczko * @prio: scheduling priority to set 387*23ceec1eSMichal Wajdeczko * 388*23ceec1eSMichal Wajdeczko * Change VF's scheduling priority provisioning on all tiles/GTs. 389*23ceec1eSMichal Wajdeczko * 390*23ceec1eSMichal Wajdeczko * This function can only be called on PF. 391*23ceec1eSMichal Wajdeczko * 392*23ceec1eSMichal Wajdeczko * Return: 0 on success or a negative error code on failure. 393*23ceec1eSMichal Wajdeczko */ 394*23ceec1eSMichal Wajdeczko int xe_sriov_pf_provision_apply_vf_priority(struct xe_device *xe, unsigned int vfid, u32 prio) 395*23ceec1eSMichal Wajdeczko { 396*23ceec1eSMichal Wajdeczko struct xe_gt *gt; 397*23ceec1eSMichal Wajdeczko unsigned int id; 398*23ceec1eSMichal Wajdeczko int result = 0; 399*23ceec1eSMichal Wajdeczko int err; 400*23ceec1eSMichal Wajdeczko 401*23ceec1eSMichal Wajdeczko for_each_gt(gt, xe, id) { 402*23ceec1eSMichal Wajdeczko err = xe_gt_sriov_pf_config_set_sched_priority(gt, vfid, prio); 403*23ceec1eSMichal Wajdeczko result = result ?: err; 404*23ceec1eSMichal Wajdeczko } 405*23ceec1eSMichal Wajdeczko 406*23ceec1eSMichal Wajdeczko return result; 407*23ceec1eSMichal Wajdeczko } 408*23ceec1eSMichal Wajdeczko 409*23ceec1eSMichal Wajdeczko /** 410*23ceec1eSMichal Wajdeczko * xe_sriov_pf_provision_query_vf_priority() - Query VF's scheduling priority. 411*23ceec1eSMichal Wajdeczko * @xe: the PF &xe_device 412*23ceec1eSMichal Wajdeczko * @vfid: the VF identifier 413*23ceec1eSMichal Wajdeczko * @prio: placeholder for the returned scheduling priority 414*23ceec1eSMichal Wajdeczko * 415*23ceec1eSMichal Wajdeczko * Query VF's scheduling priority provisioning from all tiles/GTs. 416*23ceec1eSMichal Wajdeczko * If values across tiles/GTs are inconsistent then -EUCLEAN error will be returned. 417*23ceec1eSMichal Wajdeczko * 418*23ceec1eSMichal Wajdeczko * This function can only be called on PF. 419*23ceec1eSMichal Wajdeczko * 420*23ceec1eSMichal Wajdeczko * Return: 0 on success or a negative error code on failure. 421*23ceec1eSMichal Wajdeczko */ 422*23ceec1eSMichal Wajdeczko int xe_sriov_pf_provision_query_vf_priority(struct xe_device *xe, unsigned int vfid, u32 *prio) 423*23ceec1eSMichal Wajdeczko { 424*23ceec1eSMichal Wajdeczko struct xe_gt *gt; 425*23ceec1eSMichal Wajdeczko unsigned int id; 426*23ceec1eSMichal Wajdeczko int count = 0; 427*23ceec1eSMichal Wajdeczko u32 value; 428*23ceec1eSMichal Wajdeczko 429*23ceec1eSMichal Wajdeczko for_each_gt(gt, xe, id) { 430*23ceec1eSMichal Wajdeczko value = xe_gt_sriov_pf_config_get_sched_priority(gt, vfid); 431*23ceec1eSMichal Wajdeczko if (!count++) 432*23ceec1eSMichal Wajdeczko *prio = value; 433*23ceec1eSMichal Wajdeczko else if (value != *prio) 434*23ceec1eSMichal Wajdeczko return pf_report_unclean(gt, vfid, "priority", value, *prio); 435*23ceec1eSMichal Wajdeczko } 436*23ceec1eSMichal Wajdeczko 437*23ceec1eSMichal Wajdeczko return !count ? -ENODATA : 0; 438*23ceec1eSMichal Wajdeczko } 439