xref: /linux/drivers/gpu/drm/xe/xe_tile_sriov_vf.c (revision 53597deca0e38c30e6cd4ba2114fa42d2bcd85bb)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2025 Intel Corporation
4  */
5 
6 #include <drm/drm_managed.h>
7 
8 #include "regs/xe_gtt_defs.h"
9 
10 #include "xe_assert.h"
11 #include "xe_ggtt.h"
12 #include "xe_sriov.h"
13 #include "xe_sriov_printk.h"
14 #include "xe_tile_sriov_vf.h"
15 #include "xe_wopcm.h"
16 
17 /**
18  * DOC: GGTT nodes shifting during VF post-migration recovery
19  *
20  * The first fixup applied to the VF KMD structures as part of post-migration
21  * recovery is shifting nodes within &xe_ggtt instance. The nodes are moved
22  * from range previously assigned to this VF, into newly provisioned area.
23  *
24  * Below is a GGTT layout of example VF, with a certain address range assigned to
25  * said VF, and inaccessible areas above and below:
26  *
27  *  0                                                                        4GiB
28  *  |<--------------------------- Total GGTT size ----------------------------->|
29  *      WOPCM                                                         GUC_TOP
30  *      |<-------------- Area mappable by xe_ggtt instance ---------------->|
31  *
32  *  +---+---------------------------------+----------+----------------------+---+
33  *  |\\\|/////////////////////////////////|  VF mem  |//////////////////////|\\\|
34  *  +---+---------------------------------+----------+----------------------+---+
35  *
36  * Hardware enforced access rules before migration:
37  *
38  *  |<------- inaccessible for VF ------->|<VF owned>|<-- inaccessible for VF ->|
39  *
40  * After the migration, GGTT area assigned to the VF might have shifted, either
41  * to lower or to higher address. But we expect the total size and extra areas to
42  * be identical, as migration can only happen between matching platforms.
43  * Below is an example of GGTT layout of the VF after migration. Content of the
44  * GGTT for VF has been moved to a new area, and we receive its address from GuC:
45  *
46  *  +---+----------------------+----------+---------------------------------+---+
47  *  |\\\|//////////////////////|  VF mem  |/////////////////////////////////|\\\|
48  *  +---+----------------------+----------+---------------------------------+---+
49  *
50  * Hardware enforced access rules after migration:
51  *
52  *  |<- inaccessible for VF -->|<VF owned>|<------- inaccessible for VF ------->|
53  *
54  * So the VF has a new slice of GGTT assigned, and during migration process, the
55  * memory content was copied to that new area. But the &xe_ggtt nodes are still
56  * tracking allocations using the old addresses. The nodes within VF owned area
57  * have to be shifted, and the start offset for GGTT adjusted.
58  *
59  * Due to use of GPU profiles, we do not expect the old and new GGTT areas to
60  * overlap; but our node shifting will fix addresses properly regardless.
61  */
62 
63 /**
64  * xe_tile_sriov_vf_lmem - VF LMEM configuration.
65  * @tile: the &xe_tile
66  *
67  * This function is for VF use only.
68  *
69  * Return: size of the LMEM assigned to VF.
70  */
71 u64 xe_tile_sriov_vf_lmem(struct xe_tile *tile)
72 {
73 	struct xe_tile_sriov_vf_selfconfig *config = &tile->sriov.vf.self_config;
74 
75 	xe_tile_assert(tile, IS_SRIOV_VF(tile_to_xe(tile)));
76 
77 	return config->lmem_size;
78 }
79 
80 /**
81  * xe_tile_sriov_vf_lmem_store - Store VF LMEM configuration
82  * @tile: the &xe_tile
83  * @lmem_size: VF LMEM size to store
84  *
85  * This function is for VF use only.
86  */
87 void xe_tile_sriov_vf_lmem_store(struct xe_tile *tile, u64 lmem_size)
88 {
89 	struct xe_tile_sriov_vf_selfconfig *config = &tile->sriov.vf.self_config;
90 
91 	xe_tile_assert(tile, IS_SRIOV_VF(tile_to_xe(tile)));
92 
93 	config->lmem_size = lmem_size;
94 }
95 
96 /**
97  * xe_tile_sriov_vf_ggtt - VF GGTT configuration.
98  * @tile: the &xe_tile
99  *
100  * This function is for VF use only.
101  *
102  * Return: size of the GGTT assigned to VF.
103  */
104 u64 xe_tile_sriov_vf_ggtt(struct xe_tile *tile)
105 {
106 	struct xe_tile_sriov_vf_selfconfig *config = &tile->sriov.vf.self_config;
107 
108 	xe_tile_assert(tile, IS_SRIOV_VF(tile_to_xe(tile)));
109 
110 	return config->ggtt_size;
111 }
112 
113 /**
114  * xe_tile_sriov_vf_ggtt_store - Store VF GGTT configuration
115  * @tile: the &xe_tile
116  * @ggtt_size: VF GGTT size to store
117  *
118  * This function is for VF use only.
119  */
120 void xe_tile_sriov_vf_ggtt_store(struct xe_tile *tile, u64 ggtt_size)
121 {
122 	struct xe_tile_sriov_vf_selfconfig *config = &tile->sriov.vf.self_config;
123 
124 	xe_tile_assert(tile, IS_SRIOV_VF(tile_to_xe(tile)));
125 
126 	config->ggtt_size = ggtt_size;
127 }
128 
129 /**
130  * xe_tile_sriov_vf_ggtt_base - VF GGTT base configuration.
131  * @tile: the &xe_tile
132  *
133  * This function is for VF use only.
134  *
135  * Return: base of the GGTT assigned to VF.
136  */
137 u64 xe_tile_sriov_vf_ggtt_base(struct xe_tile *tile)
138 {
139 	struct xe_tile_sriov_vf_selfconfig *config = &tile->sriov.vf.self_config;
140 
141 	xe_tile_assert(tile, IS_SRIOV_VF(tile_to_xe(tile)));
142 
143 	return READ_ONCE(config->ggtt_base);
144 }
145 
146 /**
147  * xe_tile_sriov_vf_ggtt_base_store - Store VF GGTT base configuration
148  * @tile: the &xe_tile
149  * @ggtt_base: VF GGTT base to store
150  *
151  * This function is for VF use only.
152  */
153 void xe_tile_sriov_vf_ggtt_base_store(struct xe_tile *tile, u64 ggtt_base)
154 {
155 	struct xe_tile_sriov_vf_selfconfig *config = &tile->sriov.vf.self_config;
156 
157 	xe_tile_assert(tile, IS_SRIOV_VF(tile_to_xe(tile)));
158 
159 	WRITE_ONCE(config->ggtt_base, ggtt_base);
160 }
161