xref: /linux/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c (revision 60675d4ca1ef0857e44eba5849b74a3a998d0c0f)
1d86e3737SMichal Wajdeczko // SPDX-License-Identifier: MIT
2d86e3737SMichal Wajdeczko /*
3d86e3737SMichal Wajdeczko  * Copyright © 2024 Intel Corporation
4d86e3737SMichal Wajdeczko  */
5d86e3737SMichal Wajdeczko 
6d86e3737SMichal Wajdeczko #include <drm/drm_managed.h>
7d86e3737SMichal Wajdeczko 
8d86e3737SMichal Wajdeczko #include "abi/guc_actions_sriov_abi.h"
9d86e3737SMichal Wajdeczko #include "xe_bo.h"
10d86e3737SMichal Wajdeczko #include "xe_gt_sriov_pf_helpers.h"
11d86e3737SMichal Wajdeczko #include "xe_gt_sriov_pf_migration.h"
12d86e3737SMichal Wajdeczko #include "xe_gt_sriov_printk.h"
13d86e3737SMichal Wajdeczko #include "xe_guc.h"
14d86e3737SMichal Wajdeczko #include "xe_guc_ct.h"
15d86e3737SMichal Wajdeczko #include "xe_sriov.h"
16d86e3737SMichal Wajdeczko 
17d86e3737SMichal Wajdeczko /* Return: number of dwords saved/restored/required or a negative error code on failure */
18d86e3737SMichal Wajdeczko static int guc_action_vf_save_restore(struct xe_guc *guc, u32 vfid, u32 opcode,
19d86e3737SMichal Wajdeczko 				      u64 addr, u32 ndwords)
20d86e3737SMichal Wajdeczko {
21d86e3737SMichal Wajdeczko 	u32 request[PF2GUC_SAVE_RESTORE_VF_REQUEST_MSG_LEN] = {
22d86e3737SMichal Wajdeczko 		FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
23d86e3737SMichal Wajdeczko 		FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
24d86e3737SMichal Wajdeczko 		FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_ACTION_PF2GUC_SAVE_RESTORE_VF) |
25d86e3737SMichal Wajdeczko 		FIELD_PREP(PF2GUC_SAVE_RESTORE_VF_REQUEST_MSG_0_OPCODE, opcode),
26d86e3737SMichal Wajdeczko 		FIELD_PREP(PF2GUC_SAVE_RESTORE_VF_REQUEST_MSG_1_VFID, vfid),
27d86e3737SMichal Wajdeczko 		FIELD_PREP(PF2GUC_SAVE_RESTORE_VF_REQUEST_MSG_2_ADDR_LO, lower_32_bits(addr)),
28d86e3737SMichal Wajdeczko 		FIELD_PREP(PF2GUC_SAVE_RESTORE_VF_REQUEST_MSG_3_ADDR_HI, upper_32_bits(addr)),
29d86e3737SMichal Wajdeczko 		FIELD_PREP(PF2GUC_SAVE_RESTORE_VF_REQUEST_MSG_4_SIZE, ndwords),
30d86e3737SMichal Wajdeczko 	};
31d86e3737SMichal Wajdeczko 
32d86e3737SMichal Wajdeczko 	return xe_guc_ct_send_block(&guc->ct, request, ARRAY_SIZE(request));
33d86e3737SMichal Wajdeczko }
34d86e3737SMichal Wajdeczko 
35d86e3737SMichal Wajdeczko /* Return: size of the state in dwords or a negative error code on failure */
36d86e3737SMichal Wajdeczko static int pf_send_guc_query_vf_state_size(struct xe_gt *gt, unsigned int vfid)
37d86e3737SMichal Wajdeczko {
38d86e3737SMichal Wajdeczko 	int ret;
39d86e3737SMichal Wajdeczko 
40d86e3737SMichal Wajdeczko 	ret = guc_action_vf_save_restore(&gt->uc.guc, vfid, GUC_PF_OPCODE_VF_SAVE, 0, 0);
41d86e3737SMichal Wajdeczko 	return ret ?: -ENODATA;
42d86e3737SMichal Wajdeczko }
43d86e3737SMichal Wajdeczko 
44d86e3737SMichal Wajdeczko /* Return: number of state dwords saved or a negative error code on failure */
45d86e3737SMichal Wajdeczko static int pf_send_guc_save_vf_state(struct xe_gt *gt, unsigned int vfid,
46d86e3737SMichal Wajdeczko 				     void *buff, size_t size)
47d86e3737SMichal Wajdeczko {
48d86e3737SMichal Wajdeczko 	const int ndwords = size / sizeof(u32);
49d86e3737SMichal Wajdeczko 	struct xe_tile *tile = gt_to_tile(gt);
50d86e3737SMichal Wajdeczko 	struct xe_device *xe = tile_to_xe(tile);
51d86e3737SMichal Wajdeczko 	struct xe_guc *guc = &gt->uc.guc;
52d86e3737SMichal Wajdeczko 	struct xe_bo *bo;
53d86e3737SMichal Wajdeczko 	int ret;
54d86e3737SMichal Wajdeczko 
55d86e3737SMichal Wajdeczko 	xe_gt_assert(gt, size % sizeof(u32) == 0);
56d86e3737SMichal Wajdeczko 	xe_gt_assert(gt, size == ndwords * sizeof(u32));
57d86e3737SMichal Wajdeczko 
58d86e3737SMichal Wajdeczko 	bo = xe_bo_create_pin_map(xe, tile, NULL,
59d86e3737SMichal Wajdeczko 				  ALIGN(size, PAGE_SIZE),
60d86e3737SMichal Wajdeczko 				  ttm_bo_type_kernel,
61d86e3737SMichal Wajdeczko 				  XE_BO_FLAG_SYSTEM |
62d86e3737SMichal Wajdeczko 				  XE_BO_FLAG_GGTT |
63d86e3737SMichal Wajdeczko 				  XE_BO_FLAG_GGTT_INVALIDATE);
64d86e3737SMichal Wajdeczko 	if (IS_ERR(bo))
65d86e3737SMichal Wajdeczko 		return PTR_ERR(bo);
66d86e3737SMichal Wajdeczko 
67d86e3737SMichal Wajdeczko 	ret = guc_action_vf_save_restore(guc, vfid, GUC_PF_OPCODE_VF_SAVE,
68d86e3737SMichal Wajdeczko 					 xe_bo_ggtt_addr(bo), ndwords);
69d86e3737SMichal Wajdeczko 	if (!ret)
70d86e3737SMichal Wajdeczko 		ret = -ENODATA;
71d86e3737SMichal Wajdeczko 	else if (ret > ndwords)
72d86e3737SMichal Wajdeczko 		ret = -EPROTO;
73d86e3737SMichal Wajdeczko 	else if (ret > 0)
74d86e3737SMichal Wajdeczko 		xe_map_memcpy_from(xe, buff, &bo->vmap, 0, ret * sizeof(u32));
75d86e3737SMichal Wajdeczko 
76d86e3737SMichal Wajdeczko 	xe_bo_unpin_map_no_vm(bo);
77d86e3737SMichal Wajdeczko 	return ret;
78d86e3737SMichal Wajdeczko }
79d86e3737SMichal Wajdeczko 
80d86e3737SMichal Wajdeczko /* Return: number of state dwords restored or a negative error code on failure */
81d86e3737SMichal Wajdeczko static int pf_send_guc_restore_vf_state(struct xe_gt *gt, unsigned int vfid,
82d86e3737SMichal Wajdeczko 					const void *buff, size_t size)
83d86e3737SMichal Wajdeczko {
84d86e3737SMichal Wajdeczko 	const int ndwords = size / sizeof(u32);
85d86e3737SMichal Wajdeczko 	struct xe_tile *tile = gt_to_tile(gt);
86d86e3737SMichal Wajdeczko 	struct xe_device *xe = tile_to_xe(tile);
87d86e3737SMichal Wajdeczko 	struct xe_guc *guc = &gt->uc.guc;
88d86e3737SMichal Wajdeczko 	struct xe_bo *bo;
89d86e3737SMichal Wajdeczko 	int ret;
90d86e3737SMichal Wajdeczko 
91d86e3737SMichal Wajdeczko 	xe_gt_assert(gt, size % sizeof(u32) == 0);
92d86e3737SMichal Wajdeczko 	xe_gt_assert(gt, size == ndwords * sizeof(u32));
93d86e3737SMichal Wajdeczko 
94d86e3737SMichal Wajdeczko 	bo = xe_bo_create_pin_map(xe, tile, NULL,
95d86e3737SMichal Wajdeczko 				  ALIGN(size, PAGE_SIZE),
96d86e3737SMichal Wajdeczko 				  ttm_bo_type_kernel,
97d86e3737SMichal Wajdeczko 				  XE_BO_FLAG_SYSTEM |
98d86e3737SMichal Wajdeczko 				  XE_BO_FLAG_GGTT |
99d86e3737SMichal Wajdeczko 				  XE_BO_FLAG_GGTT_INVALIDATE);
100d86e3737SMichal Wajdeczko 	if (IS_ERR(bo))
101d86e3737SMichal Wajdeczko 		return PTR_ERR(bo);
102d86e3737SMichal Wajdeczko 
103d86e3737SMichal Wajdeczko 	xe_map_memcpy_to(xe, &bo->vmap, 0, buff, size);
104d86e3737SMichal Wajdeczko 
105d86e3737SMichal Wajdeczko 	ret = guc_action_vf_save_restore(guc, vfid, GUC_PF_OPCODE_VF_RESTORE,
106d86e3737SMichal Wajdeczko 					 xe_bo_ggtt_addr(bo), ndwords);
107d86e3737SMichal Wajdeczko 	if (!ret)
108d86e3737SMichal Wajdeczko 		ret = -ENODATA;
109d86e3737SMichal Wajdeczko 	else if (ret > ndwords)
110d86e3737SMichal Wajdeczko 		ret = -EPROTO;
111d86e3737SMichal Wajdeczko 
112d86e3737SMichal Wajdeczko 	xe_bo_unpin_map_no_vm(bo);
113d86e3737SMichal Wajdeczko 	return ret;
114d86e3737SMichal Wajdeczko }
115d86e3737SMichal Wajdeczko 
116d86e3737SMichal Wajdeczko static bool pf_migration_supported(struct xe_gt *gt)
117d86e3737SMichal Wajdeczko {
118d86e3737SMichal Wajdeczko 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
119d86e3737SMichal Wajdeczko 	return gt->sriov.pf.migration.supported;
120d86e3737SMichal Wajdeczko }
121d86e3737SMichal Wajdeczko 
122d86e3737SMichal Wajdeczko static struct mutex *pf_migration_mutex(struct xe_gt *gt)
123d86e3737SMichal Wajdeczko {
124d86e3737SMichal Wajdeczko 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
125d86e3737SMichal Wajdeczko 	return &gt->sriov.pf.migration.snapshot_lock;
126d86e3737SMichal Wajdeczko }
127d86e3737SMichal Wajdeczko 
128d86e3737SMichal Wajdeczko static struct xe_gt_sriov_state_snapshot *pf_pick_vf_snapshot(struct xe_gt *gt,
129d86e3737SMichal Wajdeczko 							      unsigned int vfid)
130d86e3737SMichal Wajdeczko {
131d86e3737SMichal Wajdeczko 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
132d86e3737SMichal Wajdeczko 	xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
133d86e3737SMichal Wajdeczko 	lockdep_assert_held(pf_migration_mutex(gt));
134d86e3737SMichal Wajdeczko 
135d86e3737SMichal Wajdeczko 	return &gt->sriov.pf.vfs[vfid].snapshot;
136d86e3737SMichal Wajdeczko }
137d86e3737SMichal Wajdeczko 
138d86e3737SMichal Wajdeczko static unsigned int pf_snapshot_index(struct xe_gt *gt, struct xe_gt_sriov_state_snapshot *snapshot)
139d86e3737SMichal Wajdeczko {
140d86e3737SMichal Wajdeczko 	return container_of(snapshot, struct xe_gt_sriov_metadata, snapshot) - gt->sriov.pf.vfs;
141d86e3737SMichal Wajdeczko }
142d86e3737SMichal Wajdeczko 
143d86e3737SMichal Wajdeczko static void pf_free_guc_state(struct xe_gt *gt, struct xe_gt_sriov_state_snapshot *snapshot)
144d86e3737SMichal Wajdeczko {
145d86e3737SMichal Wajdeczko 	struct xe_device *xe = gt_to_xe(gt);
146d86e3737SMichal Wajdeczko 
147d86e3737SMichal Wajdeczko 	drmm_kfree(&xe->drm, snapshot->guc.buff);
148d86e3737SMichal Wajdeczko 	snapshot->guc.buff = NULL;
149d86e3737SMichal Wajdeczko 	snapshot->guc.size = 0;
150d86e3737SMichal Wajdeczko }
151d86e3737SMichal Wajdeczko 
152d86e3737SMichal Wajdeczko static int pf_alloc_guc_state(struct xe_gt *gt,
153d86e3737SMichal Wajdeczko 			      struct xe_gt_sriov_state_snapshot *snapshot,
154d86e3737SMichal Wajdeczko 			      size_t size)
155d86e3737SMichal Wajdeczko {
156d86e3737SMichal Wajdeczko 	struct xe_device *xe = gt_to_xe(gt);
157d86e3737SMichal Wajdeczko 	void *p;
158d86e3737SMichal Wajdeczko 
159d86e3737SMichal Wajdeczko 	pf_free_guc_state(gt, snapshot);
160d86e3737SMichal Wajdeczko 
161d86e3737SMichal Wajdeczko 	if (!size)
162d86e3737SMichal Wajdeczko 		return -ENODATA;
163d86e3737SMichal Wajdeczko 
164d86e3737SMichal Wajdeczko 	if (size % sizeof(u32))
165d86e3737SMichal Wajdeczko 		return -EINVAL;
166d86e3737SMichal Wajdeczko 
167d86e3737SMichal Wajdeczko 	if (size > SZ_2M)
168d86e3737SMichal Wajdeczko 		return -EFBIG;
169d86e3737SMichal Wajdeczko 
170d86e3737SMichal Wajdeczko 	p = drmm_kzalloc(&xe->drm, size, GFP_KERNEL);
171d86e3737SMichal Wajdeczko 	if (!p)
172d86e3737SMichal Wajdeczko 		return -ENOMEM;
173d86e3737SMichal Wajdeczko 
174d86e3737SMichal Wajdeczko 	snapshot->guc.buff = p;
175d86e3737SMichal Wajdeczko 	snapshot->guc.size = size;
176d86e3737SMichal Wajdeczko 	return 0;
177d86e3737SMichal Wajdeczko }
178d86e3737SMichal Wajdeczko 
179d86e3737SMichal Wajdeczko static void pf_dump_guc_state(struct xe_gt *gt, struct xe_gt_sriov_state_snapshot *snapshot)
180d86e3737SMichal Wajdeczko {
181d86e3737SMichal Wajdeczko 	if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) {
182d86e3737SMichal Wajdeczko 		unsigned int vfid __maybe_unused = pf_snapshot_index(gt, snapshot);
183d86e3737SMichal Wajdeczko 
184d86e3737SMichal Wajdeczko 		xe_gt_sriov_dbg_verbose(gt, "VF%u GuC state is %zu dwords:\n",
185d86e3737SMichal Wajdeczko 					vfid, snapshot->guc.size / sizeof(u32));
186d86e3737SMichal Wajdeczko 		print_hex_dump_bytes("state: ", DUMP_PREFIX_OFFSET,
187d86e3737SMichal Wajdeczko 				     snapshot->guc.buff, min(SZ_64, snapshot->guc.size));
188d86e3737SMichal Wajdeczko 	}
189d86e3737SMichal Wajdeczko }
190d86e3737SMichal Wajdeczko 
191d86e3737SMichal Wajdeczko static int pf_save_vf_guc_state(struct xe_gt *gt, unsigned int vfid)
192d86e3737SMichal Wajdeczko {
193d86e3737SMichal Wajdeczko 	struct xe_gt_sriov_state_snapshot *snapshot = pf_pick_vf_snapshot(gt, vfid);
194d86e3737SMichal Wajdeczko 	size_t size;
195d86e3737SMichal Wajdeczko 	int ret;
196d86e3737SMichal Wajdeczko 
197d86e3737SMichal Wajdeczko 	ret = pf_send_guc_query_vf_state_size(gt, vfid);
198d86e3737SMichal Wajdeczko 	if (ret < 0)
199d86e3737SMichal Wajdeczko 		goto fail;
200d86e3737SMichal Wajdeczko 	size = ret * sizeof(u32);
201d86e3737SMichal Wajdeczko 	xe_gt_sriov_dbg_verbose(gt, "VF%u state size is %d dwords (%zu bytes)\n", vfid, ret, size);
202d86e3737SMichal Wajdeczko 
203d86e3737SMichal Wajdeczko 	ret = pf_alloc_guc_state(gt, snapshot, size);
204d86e3737SMichal Wajdeczko 	if (ret < 0)
205d86e3737SMichal Wajdeczko 		goto fail;
206d86e3737SMichal Wajdeczko 
207d86e3737SMichal Wajdeczko 	ret = pf_send_guc_save_vf_state(gt, vfid, snapshot->guc.buff, size);
208d86e3737SMichal Wajdeczko 	if (ret < 0)
209d86e3737SMichal Wajdeczko 		goto fail;
210d86e3737SMichal Wajdeczko 	size = ret * sizeof(u32);
211d86e3737SMichal Wajdeczko 	xe_gt_assert(gt, size);
212d86e3737SMichal Wajdeczko 	xe_gt_assert(gt, size <= snapshot->guc.size);
213d86e3737SMichal Wajdeczko 	snapshot->guc.size = size;
214d86e3737SMichal Wajdeczko 
215d86e3737SMichal Wajdeczko 	pf_dump_guc_state(gt, snapshot);
216d86e3737SMichal Wajdeczko 	return 0;
217d86e3737SMichal Wajdeczko 
218d86e3737SMichal Wajdeczko fail:
219d86e3737SMichal Wajdeczko 	xe_gt_sriov_dbg(gt, "Unable to save VF%u state (%pe)\n", vfid, ERR_PTR(ret));
220d86e3737SMichal Wajdeczko 	pf_free_guc_state(gt, snapshot);
221d86e3737SMichal Wajdeczko 	return ret;
222d86e3737SMichal Wajdeczko }
223d86e3737SMichal Wajdeczko 
224d86e3737SMichal Wajdeczko /**
225d86e3737SMichal Wajdeczko  * xe_gt_sriov_pf_migration_save_guc_state() - Take a GuC VF state snapshot.
226d86e3737SMichal Wajdeczko  * @gt: the &xe_gt
227d86e3737SMichal Wajdeczko  * @vfid: the VF identifier
228d86e3737SMichal Wajdeczko  *
229d86e3737SMichal Wajdeczko  * This function is for PF only.
230d86e3737SMichal Wajdeczko  *
231d86e3737SMichal Wajdeczko  * Return: 0 on success or a negative error code on failure.
232d86e3737SMichal Wajdeczko  */
233d86e3737SMichal Wajdeczko int xe_gt_sriov_pf_migration_save_guc_state(struct xe_gt *gt, unsigned int vfid)
234d86e3737SMichal Wajdeczko {
235d86e3737SMichal Wajdeczko 	int err;
236d86e3737SMichal Wajdeczko 
237d86e3737SMichal Wajdeczko 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
238d86e3737SMichal Wajdeczko 	xe_gt_assert(gt, vfid != PFID);
239d86e3737SMichal Wajdeczko 	xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
240d86e3737SMichal Wajdeczko 
241d86e3737SMichal Wajdeczko 	if (!pf_migration_supported(gt))
242d86e3737SMichal Wajdeczko 		return -ENOPKG;
243d86e3737SMichal Wajdeczko 
244d86e3737SMichal Wajdeczko 	mutex_lock(pf_migration_mutex(gt));
245d86e3737SMichal Wajdeczko 	err = pf_save_vf_guc_state(gt, vfid);
246d86e3737SMichal Wajdeczko 	mutex_unlock(pf_migration_mutex(gt));
247d86e3737SMichal Wajdeczko 
248d86e3737SMichal Wajdeczko 	return err;
249d86e3737SMichal Wajdeczko }
250d86e3737SMichal Wajdeczko 
251d86e3737SMichal Wajdeczko static int pf_restore_vf_guc_state(struct xe_gt *gt, unsigned int vfid)
252d86e3737SMichal Wajdeczko {
253d86e3737SMichal Wajdeczko 	struct xe_gt_sriov_state_snapshot *snapshot = pf_pick_vf_snapshot(gt, vfid);
254d86e3737SMichal Wajdeczko 	int ret;
255d86e3737SMichal Wajdeczko 
256d86e3737SMichal Wajdeczko 	if (!snapshot->guc.size)
257d86e3737SMichal Wajdeczko 		return -ENODATA;
258d86e3737SMichal Wajdeczko 
259d86e3737SMichal Wajdeczko 	xe_gt_sriov_dbg_verbose(gt, "restoring %zu dwords of VF%u GuC state\n",
260d86e3737SMichal Wajdeczko 				snapshot->guc.size / sizeof(u32), vfid);
261d86e3737SMichal Wajdeczko 	ret = pf_send_guc_restore_vf_state(gt, vfid, snapshot->guc.buff, snapshot->guc.size);
262d86e3737SMichal Wajdeczko 	if (ret < 0)
263d86e3737SMichal Wajdeczko 		goto fail;
264d86e3737SMichal Wajdeczko 
265d86e3737SMichal Wajdeczko 	xe_gt_sriov_dbg_verbose(gt, "restored %d dwords of VF%u GuC state\n", ret, vfid);
266d86e3737SMichal Wajdeczko 	return 0;
267d86e3737SMichal Wajdeczko 
268d86e3737SMichal Wajdeczko fail:
269d86e3737SMichal Wajdeczko 	xe_gt_sriov_dbg(gt, "Failed to restore VF%u GuC state (%pe)\n", vfid, ERR_PTR(ret));
270d86e3737SMichal Wajdeczko 	return ret;
271d86e3737SMichal Wajdeczko }
272d86e3737SMichal Wajdeczko 
273d86e3737SMichal Wajdeczko /**
274d86e3737SMichal Wajdeczko  * xe_gt_sriov_pf_migration_restore_guc_state() - Restore a GuC VF state.
275d86e3737SMichal Wajdeczko  * @gt: the &xe_gt
276d86e3737SMichal Wajdeczko  * @vfid: the VF identifier
277d86e3737SMichal Wajdeczko  *
278d86e3737SMichal Wajdeczko  * This function is for PF only.
279d86e3737SMichal Wajdeczko  *
280d86e3737SMichal Wajdeczko  * Return: 0 on success or a negative error code on failure.
281d86e3737SMichal Wajdeczko  */
282d86e3737SMichal Wajdeczko int xe_gt_sriov_pf_migration_restore_guc_state(struct xe_gt *gt, unsigned int vfid)
283d86e3737SMichal Wajdeczko {
284d86e3737SMichal Wajdeczko 	int ret;
285d86e3737SMichal Wajdeczko 
286d86e3737SMichal Wajdeczko 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
287d86e3737SMichal Wajdeczko 	xe_gt_assert(gt, vfid != PFID);
288d86e3737SMichal Wajdeczko 	xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
289d86e3737SMichal Wajdeczko 
290d86e3737SMichal Wajdeczko 	if (!pf_migration_supported(gt))
291d86e3737SMichal Wajdeczko 		return -ENOPKG;
292d86e3737SMichal Wajdeczko 
293d86e3737SMichal Wajdeczko 	mutex_lock(pf_migration_mutex(gt));
294d86e3737SMichal Wajdeczko 	ret = pf_restore_vf_guc_state(gt, vfid);
295d86e3737SMichal Wajdeczko 	mutex_unlock(pf_migration_mutex(gt));
296d86e3737SMichal Wajdeczko 
297d86e3737SMichal Wajdeczko 	return ret;
298d86e3737SMichal Wajdeczko }
299d86e3737SMichal Wajdeczko 
300*d620448fSMichal Wajdeczko #ifdef CONFIG_DEBUG_FS
301*d620448fSMichal Wajdeczko /**
302*d620448fSMichal Wajdeczko  * xe_gt_sriov_pf_migration_read_guc_state() - Read a GuC VF state.
303*d620448fSMichal Wajdeczko  * @gt: the &xe_gt
304*d620448fSMichal Wajdeczko  * @vfid: the VF identifier
305*d620448fSMichal Wajdeczko  * @buf: the user space buffer to read to
306*d620448fSMichal Wajdeczko  * @count: the maximum number of bytes to read
307*d620448fSMichal Wajdeczko  * @pos: the current position in the buffer
308*d620448fSMichal Wajdeczko  *
309*d620448fSMichal Wajdeczko  * This function is for PF only.
310*d620448fSMichal Wajdeczko  *
311*d620448fSMichal Wajdeczko  * This function reads up to @count bytes from the saved VF GuC state buffer
312*d620448fSMichal Wajdeczko  * at offset @pos into the user space address starting at @buf.
313*d620448fSMichal Wajdeczko  *
314*d620448fSMichal Wajdeczko  * Return: the number of bytes read or a negative error code on failure.
315*d620448fSMichal Wajdeczko  */
316*d620448fSMichal Wajdeczko ssize_t xe_gt_sriov_pf_migration_read_guc_state(struct xe_gt *gt, unsigned int vfid,
317*d620448fSMichal Wajdeczko 						char __user *buf, size_t count, loff_t *pos)
318*d620448fSMichal Wajdeczko {
319*d620448fSMichal Wajdeczko 	struct xe_gt_sriov_state_snapshot *snapshot;
320*d620448fSMichal Wajdeczko 	ssize_t ret;
321*d620448fSMichal Wajdeczko 
322*d620448fSMichal Wajdeczko 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
323*d620448fSMichal Wajdeczko 	xe_gt_assert(gt, vfid != PFID);
324*d620448fSMichal Wajdeczko 	xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
325*d620448fSMichal Wajdeczko 
326*d620448fSMichal Wajdeczko 	if (!pf_migration_supported(gt))
327*d620448fSMichal Wajdeczko 		return -ENOPKG;
328*d620448fSMichal Wajdeczko 
329*d620448fSMichal Wajdeczko 	mutex_lock(pf_migration_mutex(gt));
330*d620448fSMichal Wajdeczko 	snapshot = pf_pick_vf_snapshot(gt, vfid);
331*d620448fSMichal Wajdeczko 	if (snapshot->guc.size)
332*d620448fSMichal Wajdeczko 		ret = simple_read_from_buffer(buf, count, pos, snapshot->guc.buff,
333*d620448fSMichal Wajdeczko 					      snapshot->guc.size);
334*d620448fSMichal Wajdeczko 	else
335*d620448fSMichal Wajdeczko 		ret = -ENODATA;
336*d620448fSMichal Wajdeczko 	mutex_unlock(pf_migration_mutex(gt));
337*d620448fSMichal Wajdeczko 
338*d620448fSMichal Wajdeczko 	return ret;
339*d620448fSMichal Wajdeczko }
340*d620448fSMichal Wajdeczko 
341*d620448fSMichal Wajdeczko /**
342*d620448fSMichal Wajdeczko  * xe_gt_sriov_pf_migration_write_guc_state() - Write a GuC VF state.
343*d620448fSMichal Wajdeczko  * @gt: the &xe_gt
344*d620448fSMichal Wajdeczko  * @vfid: the VF identifier
345*d620448fSMichal Wajdeczko  * @buf: the user space buffer with GuC VF state
346*d620448fSMichal Wajdeczko  * @size: the size of GuC VF state (in bytes)
347*d620448fSMichal Wajdeczko  *
348*d620448fSMichal Wajdeczko  * This function is for PF only.
349*d620448fSMichal Wajdeczko  *
350*d620448fSMichal Wajdeczko  * This function reads @size bytes of the VF GuC state stored at user space
351*d620448fSMichal Wajdeczko  * address @buf and writes it into a internal VF state buffer.
352*d620448fSMichal Wajdeczko  *
353*d620448fSMichal Wajdeczko  * Return: the number of bytes used or a negative error code on failure.
354*d620448fSMichal Wajdeczko  */
355*d620448fSMichal Wajdeczko ssize_t xe_gt_sriov_pf_migration_write_guc_state(struct xe_gt *gt, unsigned int vfid,
356*d620448fSMichal Wajdeczko 						 const char __user *buf, size_t size)
357*d620448fSMichal Wajdeczko {
358*d620448fSMichal Wajdeczko 	struct xe_gt_sriov_state_snapshot *snapshot;
359*d620448fSMichal Wajdeczko 	loff_t pos = 0;
360*d620448fSMichal Wajdeczko 	ssize_t ret;
361*d620448fSMichal Wajdeczko 
362*d620448fSMichal Wajdeczko 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
363*d620448fSMichal Wajdeczko 	xe_gt_assert(gt, vfid != PFID);
364*d620448fSMichal Wajdeczko 	xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
365*d620448fSMichal Wajdeczko 
366*d620448fSMichal Wajdeczko 	if (!pf_migration_supported(gt))
367*d620448fSMichal Wajdeczko 		return -ENOPKG;
368*d620448fSMichal Wajdeczko 
369*d620448fSMichal Wajdeczko 	mutex_lock(pf_migration_mutex(gt));
370*d620448fSMichal Wajdeczko 	snapshot = pf_pick_vf_snapshot(gt, vfid);
371*d620448fSMichal Wajdeczko 	ret = pf_alloc_guc_state(gt, snapshot, size);
372*d620448fSMichal Wajdeczko 	if (!ret) {
373*d620448fSMichal Wajdeczko 		ret = simple_write_to_buffer(snapshot->guc.buff, size, &pos, buf, size);
374*d620448fSMichal Wajdeczko 		if (ret < 0)
375*d620448fSMichal Wajdeczko 			pf_free_guc_state(gt, snapshot);
376*d620448fSMichal Wajdeczko 		else
377*d620448fSMichal Wajdeczko 			pf_dump_guc_state(gt, snapshot);
378*d620448fSMichal Wajdeczko 	}
379*d620448fSMichal Wajdeczko 	mutex_unlock(pf_migration_mutex(gt));
380*d620448fSMichal Wajdeczko 
381*d620448fSMichal Wajdeczko 	return ret;
382*d620448fSMichal Wajdeczko }
383*d620448fSMichal Wajdeczko #endif /* CONFIG_DEBUG_FS */
384*d620448fSMichal Wajdeczko 
385d86e3737SMichal Wajdeczko static bool pf_check_migration_support(struct xe_gt *gt)
386d86e3737SMichal Wajdeczko {
387d86e3737SMichal Wajdeczko 	/* GuC 70.25 with save/restore v2 is required */
388d86e3737SMichal Wajdeczko 	xe_gt_assert(gt, GUC_FIRMWARE_VER(&gt->uc.guc) >= MAKE_GUC_VER(70, 25, 0));
389d86e3737SMichal Wajdeczko 
390d86e3737SMichal Wajdeczko 	/* XXX: for now this is for feature enabling only */
391d86e3737SMichal Wajdeczko 	return IS_ENABLED(CONFIG_DRM_XE_DEBUG);
392d86e3737SMichal Wajdeczko }
393d86e3737SMichal Wajdeczko 
394d86e3737SMichal Wajdeczko /**
395d86e3737SMichal Wajdeczko  * xe_gt_sriov_pf_migration_init() - Initialize support for VF migration.
396d86e3737SMichal Wajdeczko  * @gt: the &xe_gt
397d86e3737SMichal Wajdeczko  *
398d86e3737SMichal Wajdeczko  * This function is for PF only.
399d86e3737SMichal Wajdeczko  *
400d86e3737SMichal Wajdeczko  * Return: 0 on success or a negative error code on failure.
401d86e3737SMichal Wajdeczko  */
402d86e3737SMichal Wajdeczko int xe_gt_sriov_pf_migration_init(struct xe_gt *gt)
403d86e3737SMichal Wajdeczko {
404d86e3737SMichal Wajdeczko 	struct xe_device *xe = gt_to_xe(gt);
405d86e3737SMichal Wajdeczko 	int err;
406d86e3737SMichal Wajdeczko 
407d86e3737SMichal Wajdeczko 	xe_gt_assert(gt, IS_SRIOV_PF(xe));
408d86e3737SMichal Wajdeczko 
409d86e3737SMichal Wajdeczko 	gt->sriov.pf.migration.supported = pf_check_migration_support(gt);
410d86e3737SMichal Wajdeczko 
411d86e3737SMichal Wajdeczko 	if (!pf_migration_supported(gt))
412d86e3737SMichal Wajdeczko 		return 0;
413d86e3737SMichal Wajdeczko 
414d86e3737SMichal Wajdeczko 	err = drmm_mutex_init(&xe->drm, &gt->sriov.pf.migration.snapshot_lock);
415d86e3737SMichal Wajdeczko 	if (err)
416d86e3737SMichal Wajdeczko 		return err;
417d86e3737SMichal Wajdeczko 
418d86e3737SMichal Wajdeczko 	return 0;
419d86e3737SMichal Wajdeczko }
420