xref: /linux/drivers/gpu/drm/xe/xe_gt_sriov_pf.c (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2023-2024 Intel Corporation
4  */
5 
6 #include <drm/drm_managed.h>
7 
8 #include "regs/xe_guc_regs.h"
9 #include "regs/xe_regs.h"
10 
11 #include "xe_gt.h"
12 #include "xe_gt_sriov_pf.h"
13 #include "xe_gt_sriov_pf_config.h"
14 #include "xe_gt_sriov_pf_control.h"
15 #include "xe_gt_sriov_pf_helpers.h"
16 #include "xe_gt_sriov_pf_migration.h"
17 #include "xe_gt_sriov_pf_service.h"
18 #include "xe_mmio.h"
19 
20 /*
21  * VF's metadata is maintained in the flexible array where:
22  *   - entry [0] contains metadata for the PF (only if applicable),
23  *   - entries [1..n] contain metadata for VF1..VFn::
24  *
25  *       <--------------------------- 1 + total_vfs ----------->
26  *      +-------+-------+-------+-----------------------+-------+
27  *      |   0   |   1   |   2   |                       |   n   |
28  *      +-------+-------+-------+-----------------------+-------+
29  *      |  PF   |  VF1  |  VF2  |      ...     ...      |  VFn  |
30  *      +-------+-------+-------+-----------------------+-------+
31  */
32 static int pf_alloc_metadata(struct xe_gt *gt)
33 {
34 	unsigned int num_vfs = xe_gt_sriov_pf_get_totalvfs(gt);
35 
36 	gt->sriov.pf.vfs = drmm_kcalloc(&gt_to_xe(gt)->drm, 1 + num_vfs,
37 					sizeof(*gt->sriov.pf.vfs), GFP_KERNEL);
38 	if (!gt->sriov.pf.vfs)
39 		return -ENOMEM;
40 
41 	return 0;
42 }
43 
44 /**
45  * xe_gt_sriov_pf_init_early - Prepare SR-IOV PF data structures on PF.
46  * @gt: the &xe_gt to initialize
47  *
48  * Early initialization of the PF data.
49  *
50  * Return: 0 on success or a negative error code on failure.
51  */
52 int xe_gt_sriov_pf_init_early(struct xe_gt *gt)
53 {
54 	int err;
55 
56 	err = pf_alloc_metadata(gt);
57 	if (err)
58 		return err;
59 
60 	err = xe_gt_sriov_pf_service_init(gt);
61 	if (err)
62 		return err;
63 
64 	err = xe_gt_sriov_pf_control_init(gt);
65 	if (err)
66 		return err;
67 
68 	return 0;
69 }
70 
71 static bool pf_needs_enable_ggtt_guest_update(struct xe_device *xe)
72 {
73 	return GRAPHICS_VERx100(xe) == 1200;
74 }
75 
76 static void pf_enable_ggtt_guest_update(struct xe_gt *gt)
77 {
78 	xe_mmio_write32(&gt->mmio, VIRTUAL_CTRL_REG, GUEST_GTT_UPDATE_EN);
79 }
80 
81 /**
82  * xe_gt_sriov_pf_init_hw - Initialize SR-IOV hardware support.
83  * @gt: the &xe_gt to initialize
84  *
85  * On some platforms the PF must explicitly enable VF's access to the GGTT.
86  */
87 void xe_gt_sriov_pf_init_hw(struct xe_gt *gt)
88 {
89 	if (pf_needs_enable_ggtt_guest_update(gt_to_xe(gt)))
90 		pf_enable_ggtt_guest_update(gt);
91 
92 	xe_gt_sriov_pf_service_update(gt);
93 	xe_gt_sriov_pf_migration_init(gt);
94 }
95 
96 static u32 pf_get_vf_regs_stride(struct xe_device *xe)
97 {
98 	return GRAPHICS_VERx100(xe) > 1200 ? 0x400 : 0x1000;
99 }
100 
101 static struct xe_reg xe_reg_vf_to_pf(struct xe_reg vf_reg, unsigned int vfid, u32 stride)
102 {
103 	struct xe_reg pf_reg = vf_reg;
104 
105 	pf_reg.vf = 0;
106 	pf_reg.addr += stride * vfid;
107 
108 	return pf_reg;
109 }
110 
111 static void pf_clear_vf_scratch_regs(struct xe_gt *gt, unsigned int vfid)
112 {
113 	u32 stride = pf_get_vf_regs_stride(gt_to_xe(gt));
114 	struct xe_reg scratch;
115 	int n, count;
116 
117 	if (xe_gt_is_media_type(gt)) {
118 		count = MED_VF_SW_FLAG_COUNT;
119 		for (n = 0; n < count; n++) {
120 			scratch = xe_reg_vf_to_pf(MED_VF_SW_FLAG(n), vfid, stride);
121 			xe_mmio_write32(&gt->mmio, scratch, 0);
122 		}
123 	} else {
124 		count = VF_SW_FLAG_COUNT;
125 		for (n = 0; n < count; n++) {
126 			scratch = xe_reg_vf_to_pf(VF_SW_FLAG(n), vfid, stride);
127 			xe_mmio_write32(&gt->mmio, scratch, 0);
128 		}
129 	}
130 }
131 
132 /**
133  * xe_gt_sriov_pf_sanitize_hw() - Reset hardware state related to a VF.
134  * @gt: the &xe_gt
135  * @vfid: the VF identifier
136  *
137  * This function can only be called on PF.
138  */
139 void xe_gt_sriov_pf_sanitize_hw(struct xe_gt *gt, unsigned int vfid)
140 {
141 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
142 
143 	pf_clear_vf_scratch_regs(gt, vfid);
144 }
145 
146 /**
147  * xe_gt_sriov_pf_restart - Restart SR-IOV support after a GT reset.
148  * @gt: the &xe_gt
149  *
150  * This function can only be called on PF.
151  */
152 void xe_gt_sriov_pf_restart(struct xe_gt *gt)
153 {
154 	xe_gt_sriov_pf_config_restart(gt);
155 	xe_gt_sriov_pf_control_restart(gt);
156 }
157