xref: /linux/drivers/gpu/drm/xe/xe_gt_sriov_pf.c (revision face6a3615a649456eb4549f6d474221d877d604)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2023-2024 Intel Corporation
4  */
5 
6 #include <drm/drm_managed.h>
7 
8 #include "regs/xe_guc_regs.h"
9 #include "regs/xe_regs.h"
10 
11 #include "xe_gt.h"
12 #include "xe_gt_sriov_pf.h"
13 #include "xe_gt_sriov_pf_config.h"
14 #include "xe_gt_sriov_pf_control.h"
15 #include "xe_gt_sriov_pf_helpers.h"
16 #include "xe_gt_sriov_pf_migration.h"
17 #include "xe_gt_sriov_pf_service.h"
18 #include "xe_gt_sriov_printk.h"
19 #include "xe_guc_submit.h"
20 #include "xe_mmio.h"
21 #include "xe_pm.h"
22 
23 static void pf_worker_restart_func(struct work_struct *w);
24 
25 /*
26  * VF's metadata is maintained in the flexible array where:
27  *   - entry [0] contains metadata for the PF (only if applicable),
28  *   - entries [1..n] contain metadata for VF1..VFn::
29  *
30  *       <--------------------------- 1 + total_vfs ----------->
31  *      +-------+-------+-------+-----------------------+-------+
32  *      |   0   |   1   |   2   |                       |   n   |
33  *      +-------+-------+-------+-----------------------+-------+
34  *      |  PF   |  VF1  |  VF2  |      ...     ...      |  VFn  |
35  *      +-------+-------+-------+-----------------------+-------+
36  */
37 static int pf_alloc_metadata(struct xe_gt *gt)
38 {
39 	unsigned int num_vfs = xe_gt_sriov_pf_get_totalvfs(gt);
40 
41 	gt->sriov.pf.vfs = drmm_kcalloc(&gt_to_xe(gt)->drm, 1 + num_vfs,
42 					sizeof(*gt->sriov.pf.vfs), GFP_KERNEL);
43 	if (!gt->sriov.pf.vfs)
44 		return -ENOMEM;
45 
46 	return 0;
47 }
48 
49 static void pf_init_workers(struct xe_gt *gt)
50 {
51 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
52 	INIT_WORK(&gt->sriov.pf.workers.restart, pf_worker_restart_func);
53 }
54 
55 static void pf_fini_workers(struct xe_gt *gt)
56 {
57 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
58 
59 	if (disable_work_sync(&gt->sriov.pf.workers.restart)) {
60 		xe_gt_sriov_dbg_verbose(gt, "pending restart disabled!\n");
61 		/* release an rpm reference taken on the worker's behalf */
62 		xe_pm_runtime_put(gt_to_xe(gt));
63 	}
64 }
65 
66 /**
67  * xe_gt_sriov_pf_init_early - Prepare SR-IOV PF data structures on PF.
68  * @gt: the &xe_gt to initialize
69  *
70  * Early initialization of the PF data.
71  *
72  * Return: 0 on success or a negative error code on failure.
73  */
74 int xe_gt_sriov_pf_init_early(struct xe_gt *gt)
75 {
76 	int err;
77 
78 	err = pf_alloc_metadata(gt);
79 	if (err)
80 		return err;
81 
82 	err = xe_gt_sriov_pf_service_init(gt);
83 	if (err)
84 		return err;
85 
86 	err = xe_gt_sriov_pf_control_init(gt);
87 	if (err)
88 		return err;
89 
90 	pf_init_workers(gt);
91 
92 	return 0;
93 }
94 
95 static void pf_fini_action(void *arg)
96 {
97 	struct xe_gt *gt = arg;
98 
99 	pf_fini_workers(gt);
100 }
101 
102 static int pf_init_late(struct xe_gt *gt)
103 {
104 	struct xe_device *xe = gt_to_xe(gt);
105 
106 	xe_gt_assert(gt, IS_SRIOV_PF(xe));
107 	return devm_add_action_or_reset(xe->drm.dev, pf_fini_action, gt);
108 }
109 
110 /**
111  * xe_gt_sriov_pf_init - Prepare SR-IOV PF data structures on PF.
112  * @gt: the &xe_gt to initialize
113  *
114  * Late one-time initialization of the PF data.
115  *
116  * Return: 0 on success or a negative error code on failure.
117  */
118 int xe_gt_sriov_pf_init(struct xe_gt *gt)
119 {
120 	int err;
121 
122 	err = xe_gt_sriov_pf_config_init(gt);
123 	if (err)
124 		return err;
125 
126 	err = xe_gt_sriov_pf_migration_init(gt);
127 	if (err)
128 		return err;
129 
130 	err = pf_init_late(gt);
131 	if (err)
132 		return err;
133 
134 	return 0;
135 }
136 
137 static bool pf_needs_enable_ggtt_guest_update(struct xe_device *xe)
138 {
139 	return GRAPHICS_VERx100(xe) == 1200;
140 }
141 
142 static void pf_enable_ggtt_guest_update(struct xe_gt *gt)
143 {
144 	xe_mmio_write32(&gt->mmio, VIRTUAL_CTRL_REG, GUEST_GTT_UPDATE_EN);
145 }
146 
147 /**
148  * xe_gt_sriov_pf_init_hw - Initialize SR-IOV hardware support.
149  * @gt: the &xe_gt to initialize
150  *
151  * On some platforms the PF must explicitly enable VF's access to the GGTT.
152  */
153 void xe_gt_sriov_pf_init_hw(struct xe_gt *gt)
154 {
155 	if (pf_needs_enable_ggtt_guest_update(gt_to_xe(gt)))
156 		pf_enable_ggtt_guest_update(gt);
157 
158 	xe_gt_sriov_pf_service_update(gt);
159 }
160 
161 static void pf_clear_vf_scratch_regs(struct xe_gt *gt, unsigned int vfid)
162 {
163 	struct xe_mmio mmio;
164 	int n;
165 
166 	xe_mmio_init_vf_view(&mmio, &gt->mmio, vfid);
167 
168 	if (xe_gt_is_media_type(gt)) {
169 		for (n = 0; n < MED_VF_SW_FLAG_COUNT; n++)
170 			xe_mmio_write32(&mmio, MED_VF_SW_FLAG(n), 0);
171 	} else {
172 		for (n = 0; n < VF_SW_FLAG_COUNT; n++)
173 			xe_mmio_write32(&mmio, VF_SW_FLAG(n), 0);
174 	}
175 }
176 
177 /**
178  * xe_gt_sriov_pf_sanitize_hw() - Reset hardware state related to a VF.
179  * @gt: the &xe_gt
180  * @vfid: the VF identifier
181  *
182  * This function can only be called on PF.
183  */
184 void xe_gt_sriov_pf_sanitize_hw(struct xe_gt *gt, unsigned int vfid)
185 {
186 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
187 
188 	pf_clear_vf_scratch_regs(gt, vfid);
189 }
190 
191 static void pf_cancel_restart(struct xe_gt *gt)
192 {
193 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
194 
195 	if (cancel_work_sync(&gt->sriov.pf.workers.restart)) {
196 		xe_gt_sriov_dbg_verbose(gt, "pending restart canceled!\n");
197 		/* release an rpm reference taken on the worker's behalf */
198 		xe_pm_runtime_put(gt_to_xe(gt));
199 	}
200 }
201 
202 /**
203  * xe_gt_sriov_pf_stop_prepare() - Prepare to stop SR-IOV support.
204  * @gt: the &xe_gt
205  *
206  * This function can only be called on the PF.
207  */
208 void xe_gt_sriov_pf_stop_prepare(struct xe_gt *gt)
209 {
210 	pf_cancel_restart(gt);
211 }
212 
213 static void pf_restart(struct xe_gt *gt)
214 {
215 	struct xe_device *xe = gt_to_xe(gt);
216 
217 	xe_gt_assert(gt, !xe_pm_runtime_suspended(xe));
218 
219 	xe_gt_sriov_pf_config_restart(gt);
220 	xe_gt_sriov_pf_control_restart(gt);
221 
222 	/* release an rpm reference taken on our behalf */
223 	xe_pm_runtime_put(xe);
224 
225 	xe_gt_sriov_dbg(gt, "restart completed\n");
226 }
227 
228 static void pf_worker_restart_func(struct work_struct *w)
229 {
230 	struct xe_gt *gt = container_of(w, typeof(*gt), sriov.pf.workers.restart);
231 
232 	pf_restart(gt);
233 }
234 
235 static void pf_queue_restart(struct xe_gt *gt)
236 {
237 	struct xe_device *xe = gt_to_xe(gt);
238 
239 	xe_gt_assert(gt, IS_SRIOV_PF(xe));
240 
241 	/* take an rpm reference on behalf of the worker */
242 	xe_pm_runtime_get_noresume(xe);
243 
244 	if (!queue_work(xe->sriov.wq, &gt->sriov.pf.workers.restart)) {
245 		xe_gt_sriov_dbg(gt, "restart already in queue!\n");
246 		xe_pm_runtime_put(xe);
247 	}
248 }
249 
250 /**
251  * xe_gt_sriov_pf_restart - Restart SR-IOV support after a GT reset.
252  * @gt: the &xe_gt
253  *
254  * This function can only be called on PF.
255  */
256 void xe_gt_sriov_pf_restart(struct xe_gt *gt)
257 {
258 	pf_queue_restart(gt);
259 }
260 
261 static void pf_flush_restart(struct xe_gt *gt)
262 {
263 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
264 	flush_work(&gt->sriov.pf.workers.restart);
265 }
266 
267 /**
268  * xe_gt_sriov_pf_wait_ready() - Wait until per-GT PF SR-IOV support is ready.
269  * @gt: the &xe_gt
270  *
271  * This function can only be called on PF.
272  *
273  * Return: 0 on success or a negative error code on failure.
274  */
275 int xe_gt_sriov_pf_wait_ready(struct xe_gt *gt)
276 {
277 	/* don't wait if there is another ongoing reset */
278 	if (xe_guc_read_stopped(&gt->uc.guc))
279 		return -EBUSY;
280 
281 	pf_flush_restart(gt);
282 	return 0;
283 }
284