1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2023-2024 Intel Corporation
4 */
5
6 #include <drm/drm_managed.h>
7
8 #include "regs/xe_guc_regs.h"
9 #include "regs/xe_regs.h"
10
11 #include "xe_gt.h"
12 #include "xe_gt_sriov_pf.h"
13 #include "xe_gt_sriov_pf_config.h"
14 #include "xe_gt_sriov_pf_control.h"
15 #include "xe_gt_sriov_pf_helpers.h"
16 #include "xe_gt_sriov_pf_migration.h"
17 #include "xe_gt_sriov_pf_service.h"
18 #include "xe_gt_sriov_printk.h"
19 #include "xe_mmio.h"
20 #include "xe_pm.h"
21
22 static void pf_worker_restart_func(struct work_struct *w);
23
24 /*
25 * VF's metadata is maintained in the flexible array where:
26 * - entry [0] contains metadata for the PF (only if applicable),
27 * - entries [1..n] contain metadata for VF1..VFn::
28 *
29 * <--------------------------- 1 + total_vfs ----------->
30 * +-------+-------+-------+-----------------------+-------+
31 * | 0 | 1 | 2 | | n |
32 * +-------+-------+-------+-----------------------+-------+
33 * | PF | VF1 | VF2 | ... ... | VFn |
34 * +-------+-------+-------+-----------------------+-------+
35 */
pf_alloc_metadata(struct xe_gt * gt)36 static int pf_alloc_metadata(struct xe_gt *gt)
37 {
38 unsigned int num_vfs = xe_gt_sriov_pf_get_totalvfs(gt);
39
40 gt->sriov.pf.vfs = drmm_kcalloc(>_to_xe(gt)->drm, 1 + num_vfs,
41 sizeof(*gt->sriov.pf.vfs), GFP_KERNEL);
42 if (!gt->sriov.pf.vfs)
43 return -ENOMEM;
44
45 return 0;
46 }
47
pf_init_workers(struct xe_gt * gt)48 static void pf_init_workers(struct xe_gt *gt)
49 {
50 INIT_WORK(>->sriov.pf.workers.restart, pf_worker_restart_func);
51 }
52
53 /**
54 * xe_gt_sriov_pf_init_early - Prepare SR-IOV PF data structures on PF.
55 * @gt: the &xe_gt to initialize
56 *
57 * Early initialization of the PF data.
58 *
59 * Return: 0 on success or a negative error code on failure.
60 */
xe_gt_sriov_pf_init_early(struct xe_gt * gt)61 int xe_gt_sriov_pf_init_early(struct xe_gt *gt)
62 {
63 int err;
64
65 err = pf_alloc_metadata(gt);
66 if (err)
67 return err;
68
69 err = xe_gt_sriov_pf_service_init(gt);
70 if (err)
71 return err;
72
73 err = xe_gt_sriov_pf_control_init(gt);
74 if (err)
75 return err;
76
77 pf_init_workers(gt);
78
79 return 0;
80 }
81
82 /**
83 * xe_gt_sriov_pf_init - Prepare SR-IOV PF data structures on PF.
84 * @gt: the &xe_gt to initialize
85 *
86 * Late one-time initialization of the PF data.
87 *
88 * Return: 0 on success or a negative error code on failure.
89 */
xe_gt_sriov_pf_init(struct xe_gt * gt)90 int xe_gt_sriov_pf_init(struct xe_gt *gt)
91 {
92 int err;
93
94 err = xe_gt_sriov_pf_config_init(gt);
95 if (err)
96 return err;
97
98 return xe_gt_sriov_pf_migration_init(gt);
99 }
100
pf_needs_enable_ggtt_guest_update(struct xe_device * xe)101 static bool pf_needs_enable_ggtt_guest_update(struct xe_device *xe)
102 {
103 return GRAPHICS_VERx100(xe) == 1200;
104 }
105
pf_enable_ggtt_guest_update(struct xe_gt * gt)106 static void pf_enable_ggtt_guest_update(struct xe_gt *gt)
107 {
108 xe_mmio_write32(>->mmio, VIRTUAL_CTRL_REG, GUEST_GTT_UPDATE_EN);
109 }
110
111 /**
112 * xe_gt_sriov_pf_init_hw - Initialize SR-IOV hardware support.
113 * @gt: the &xe_gt to initialize
114 *
115 * On some platforms the PF must explicitly enable VF's access to the GGTT.
116 */
xe_gt_sriov_pf_init_hw(struct xe_gt * gt)117 void xe_gt_sriov_pf_init_hw(struct xe_gt *gt)
118 {
119 if (pf_needs_enable_ggtt_guest_update(gt_to_xe(gt)))
120 pf_enable_ggtt_guest_update(gt);
121
122 xe_gt_sriov_pf_service_update(gt);
123 }
124
pf_get_vf_regs_stride(struct xe_device * xe)125 static u32 pf_get_vf_regs_stride(struct xe_device *xe)
126 {
127 return GRAPHICS_VERx100(xe) > 1200 ? 0x400 : 0x1000;
128 }
129
xe_reg_vf_to_pf(struct xe_reg vf_reg,unsigned int vfid,u32 stride)130 static struct xe_reg xe_reg_vf_to_pf(struct xe_reg vf_reg, unsigned int vfid, u32 stride)
131 {
132 struct xe_reg pf_reg = vf_reg;
133
134 pf_reg.vf = 0;
135 pf_reg.addr += stride * vfid;
136
137 return pf_reg;
138 }
139
pf_clear_vf_scratch_regs(struct xe_gt * gt,unsigned int vfid)140 static void pf_clear_vf_scratch_regs(struct xe_gt *gt, unsigned int vfid)
141 {
142 u32 stride = pf_get_vf_regs_stride(gt_to_xe(gt));
143 struct xe_reg scratch;
144 int n, count;
145
146 if (xe_gt_is_media_type(gt)) {
147 count = MED_VF_SW_FLAG_COUNT;
148 for (n = 0; n < count; n++) {
149 scratch = xe_reg_vf_to_pf(MED_VF_SW_FLAG(n), vfid, stride);
150 xe_mmio_write32(>->mmio, scratch, 0);
151 }
152 } else {
153 count = VF_SW_FLAG_COUNT;
154 for (n = 0; n < count; n++) {
155 scratch = xe_reg_vf_to_pf(VF_SW_FLAG(n), vfid, stride);
156 xe_mmio_write32(>->mmio, scratch, 0);
157 }
158 }
159 }
160
161 /**
162 * xe_gt_sriov_pf_sanitize_hw() - Reset hardware state related to a VF.
163 * @gt: the &xe_gt
164 * @vfid: the VF identifier
165 *
166 * This function can only be called on PF.
167 */
xe_gt_sriov_pf_sanitize_hw(struct xe_gt * gt,unsigned int vfid)168 void xe_gt_sriov_pf_sanitize_hw(struct xe_gt *gt, unsigned int vfid)
169 {
170 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
171
172 pf_clear_vf_scratch_regs(gt, vfid);
173 }
174
pf_restart(struct xe_gt * gt)175 static void pf_restart(struct xe_gt *gt)
176 {
177 struct xe_device *xe = gt_to_xe(gt);
178
179 xe_pm_runtime_get(xe);
180 xe_gt_sriov_pf_config_restart(gt);
181 xe_gt_sriov_pf_control_restart(gt);
182 xe_pm_runtime_put(xe);
183
184 xe_gt_sriov_dbg(gt, "restart completed\n");
185 }
186
pf_worker_restart_func(struct work_struct * w)187 static void pf_worker_restart_func(struct work_struct *w)
188 {
189 struct xe_gt *gt = container_of(w, typeof(*gt), sriov.pf.workers.restart);
190
191 pf_restart(gt);
192 }
193
pf_queue_restart(struct xe_gt * gt)194 static void pf_queue_restart(struct xe_gt *gt)
195 {
196 struct xe_device *xe = gt_to_xe(gt);
197
198 xe_gt_assert(gt, IS_SRIOV_PF(xe));
199
200 if (!queue_work(xe->sriov.wq, >->sriov.pf.workers.restart))
201 xe_gt_sriov_dbg(gt, "restart already in queue!\n");
202 }
203
204 /**
205 * xe_gt_sriov_pf_restart - Restart SR-IOV support after a GT reset.
206 * @gt: the &xe_gt
207 *
208 * This function can only be called on PF.
209 */
xe_gt_sriov_pf_restart(struct xe_gt * gt)210 void xe_gt_sriov_pf_restart(struct xe_gt *gt)
211 {
212 pf_queue_restart(gt);
213 }
214