xref: /linux/drivers/gpu/drm/xe/xe_gt_sriov_pf.c (revision 7ee983c850b40043ac4751836fbd9a2b4d0c5937)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2023-2024 Intel Corporation
4  */
5 
6 #include <drm/drm_managed.h>
7 
8 #include "regs/xe_guc_regs.h"
9 #include "regs/xe_regs.h"
10 
11 #include "xe_gt.h"
12 #include "xe_gt_sriov_pf.h"
13 #include "xe_gt_sriov_pf_config.h"
14 #include "xe_gt_sriov_pf_control.h"
15 #include "xe_gt_sriov_pf_helpers.h"
16 #include "xe_gt_sriov_pf_migration.h"
17 #include "xe_gt_sriov_pf_service.h"
18 #include "xe_mmio.h"
19 
20 /*
21  * VF's metadata is maintained in the flexible array where:
22  *   - entry [0] contains metadata for the PF (only if applicable),
23  *   - entries [1..n] contain metadata for VF1..VFn::
24  *
25  *       <--------------------------- 1 + total_vfs ----------->
26  *      +-------+-------+-------+-----------------------+-------+
27  *      |   0   |   1   |   2   |                       |   n   |
28  *      +-------+-------+-------+-----------------------+-------+
29  *      |  PF   |  VF1  |  VF2  |      ...     ...      |  VFn  |
30  *      +-------+-------+-------+-----------------------+-------+
31  */
pf_alloc_metadata(struct xe_gt * gt)32 static int pf_alloc_metadata(struct xe_gt *gt)
33 {
34 	unsigned int num_vfs = xe_gt_sriov_pf_get_totalvfs(gt);
35 
36 	gt->sriov.pf.vfs = drmm_kcalloc(&gt_to_xe(gt)->drm, 1 + num_vfs,
37 					sizeof(*gt->sriov.pf.vfs), GFP_KERNEL);
38 	if (!gt->sriov.pf.vfs)
39 		return -ENOMEM;
40 
41 	return 0;
42 }
43 
44 /**
45  * xe_gt_sriov_pf_init_early - Prepare SR-IOV PF data structures on PF.
46  * @gt: the &xe_gt to initialize
47  *
48  * Early initialization of the PF data.
49  *
50  * Return: 0 on success or a negative error code on failure.
51  */
xe_gt_sriov_pf_init_early(struct xe_gt * gt)52 int xe_gt_sriov_pf_init_early(struct xe_gt *gt)
53 {
54 	int err;
55 
56 	err = pf_alloc_metadata(gt);
57 	if (err)
58 		return err;
59 
60 	err = xe_gt_sriov_pf_service_init(gt);
61 	if (err)
62 		return err;
63 
64 	err = xe_gt_sriov_pf_control_init(gt);
65 	if (err)
66 		return err;
67 
68 	return 0;
69 }
70 
71 /**
72  * xe_gt_sriov_pf_init - Prepare SR-IOV PF data structures on PF.
73  * @gt: the &xe_gt to initialize
74  *
75  * Late one-time initialization of the PF data.
76  *
77  * Return: 0 on success or a negative error code on failure.
78  */
xe_gt_sriov_pf_init(struct xe_gt * gt)79 int xe_gt_sriov_pf_init(struct xe_gt *gt)
80 {
81 	return xe_gt_sriov_pf_migration_init(gt);
82 }
83 
pf_needs_enable_ggtt_guest_update(struct xe_device * xe)84 static bool pf_needs_enable_ggtt_guest_update(struct xe_device *xe)
85 {
86 	return GRAPHICS_VERx100(xe) == 1200;
87 }
88 
pf_enable_ggtt_guest_update(struct xe_gt * gt)89 static void pf_enable_ggtt_guest_update(struct xe_gt *gt)
90 {
91 	xe_mmio_write32(&gt->mmio, VIRTUAL_CTRL_REG, GUEST_GTT_UPDATE_EN);
92 }
93 
94 /**
95  * xe_gt_sriov_pf_init_hw - Initialize SR-IOV hardware support.
96  * @gt: the &xe_gt to initialize
97  *
98  * On some platforms the PF must explicitly enable VF's access to the GGTT.
99  */
xe_gt_sriov_pf_init_hw(struct xe_gt * gt)100 void xe_gt_sriov_pf_init_hw(struct xe_gt *gt)
101 {
102 	if (pf_needs_enable_ggtt_guest_update(gt_to_xe(gt)))
103 		pf_enable_ggtt_guest_update(gt);
104 
105 	xe_gt_sriov_pf_service_update(gt);
106 }
107 
pf_get_vf_regs_stride(struct xe_device * xe)108 static u32 pf_get_vf_regs_stride(struct xe_device *xe)
109 {
110 	return GRAPHICS_VERx100(xe) > 1200 ? 0x400 : 0x1000;
111 }
112 
xe_reg_vf_to_pf(struct xe_reg vf_reg,unsigned int vfid,u32 stride)113 static struct xe_reg xe_reg_vf_to_pf(struct xe_reg vf_reg, unsigned int vfid, u32 stride)
114 {
115 	struct xe_reg pf_reg = vf_reg;
116 
117 	pf_reg.vf = 0;
118 	pf_reg.addr += stride * vfid;
119 
120 	return pf_reg;
121 }
122 
pf_clear_vf_scratch_regs(struct xe_gt * gt,unsigned int vfid)123 static void pf_clear_vf_scratch_regs(struct xe_gt *gt, unsigned int vfid)
124 {
125 	u32 stride = pf_get_vf_regs_stride(gt_to_xe(gt));
126 	struct xe_reg scratch;
127 	int n, count;
128 
129 	if (xe_gt_is_media_type(gt)) {
130 		count = MED_VF_SW_FLAG_COUNT;
131 		for (n = 0; n < count; n++) {
132 			scratch = xe_reg_vf_to_pf(MED_VF_SW_FLAG(n), vfid, stride);
133 			xe_mmio_write32(&gt->mmio, scratch, 0);
134 		}
135 	} else {
136 		count = VF_SW_FLAG_COUNT;
137 		for (n = 0; n < count; n++) {
138 			scratch = xe_reg_vf_to_pf(VF_SW_FLAG(n), vfid, stride);
139 			xe_mmio_write32(&gt->mmio, scratch, 0);
140 		}
141 	}
142 }
143 
144 /**
145  * xe_gt_sriov_pf_sanitize_hw() - Reset hardware state related to a VF.
146  * @gt: the &xe_gt
147  * @vfid: the VF identifier
148  *
149  * This function can only be called on PF.
150  */
xe_gt_sriov_pf_sanitize_hw(struct xe_gt * gt,unsigned int vfid)151 void xe_gt_sriov_pf_sanitize_hw(struct xe_gt *gt, unsigned int vfid)
152 {
153 	xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
154 
155 	pf_clear_vf_scratch_regs(gt, vfid);
156 }
157 
158 /**
159  * xe_gt_sriov_pf_restart - Restart SR-IOV support after a GT reset.
160  * @gt: the &xe_gt
161  *
162  * This function can only be called on PF.
163  */
xe_gt_sriov_pf_restart(struct xe_gt * gt)164 void xe_gt_sriov_pf_restart(struct xe_gt *gt)
165 {
166 	xe_gt_sriov_pf_config_restart(gt);
167 	xe_gt_sriov_pf_control_restart(gt);
168 }
169