xref: /linux/drivers/accel/amdxdna/aie2_pci.h (revision 2c1ed907520c50326b8f604907a8478b27881a2e)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
4  */
5 
6 #ifndef _AIE2_PCI_H_
7 #define _AIE2_PCI_H_
8 
9 #include <drm/amdxdna_accel.h>
10 #include <linux/semaphore.h>
11 
12 #include "amdxdna_mailbox.h"
13 
14 #define AIE2_INTERVAL	20000	/* us */
15 #define AIE2_TIMEOUT	1000000	/* us */
16 
17 /* Firmware determines device memory base address and size */
18 #define AIE2_DEVM_BASE	0x4000000
19 #define AIE2_DEVM_SIZE	SZ_64M
20 
21 #define NDEV2PDEV(ndev) (to_pci_dev((ndev)->xdna->ddev.dev))
22 
23 #define AIE2_SRAM_OFF(ndev, addr) ((addr) - (ndev)->priv->sram_dev_addr)
24 #define AIE2_MBOX_OFF(ndev, addr) ((addr) - (ndev)->priv->mbox_dev_addr)
25 
26 #define PSP_REG_BAR(ndev, idx) ((ndev)->priv->psp_regs_off[(idx)].bar_idx)
27 #define PSP_REG_OFF(ndev, idx) ((ndev)->priv->psp_regs_off[(idx)].offset)
28 #define SRAM_REG_OFF(ndev, idx) ((ndev)->priv->sram_offs[(idx)].offset)
29 
30 #define SMU_REG(ndev, idx) \
31 ({ \
32 	typeof(ndev) _ndev = ndev; \
33 	((_ndev)->smu_base + (_ndev)->priv->smu_regs_off[(idx)].offset); \
34 })
35 #define SRAM_GET_ADDR(ndev, idx) \
36 ({ \
37 	typeof(ndev) _ndev = ndev; \
38 	((_ndev)->sram_base + SRAM_REG_OFF((_ndev), (idx))); \
39 })
40 
41 #define CHAN_SLOT_SZ SZ_8K
42 #define MBOX_SIZE(ndev) \
43 ({ \
44 	typeof(ndev) _ndev = (ndev); \
45 	((_ndev)->priv->mbox_size) ? (_ndev)->priv->mbox_size : \
46 	pci_resource_len(NDEV2PDEV(_ndev), (_ndev)->xdna->dev_info->mbox_bar); \
47 })
48 
49 enum aie2_smu_reg_idx {
50 	SMU_CMD_REG = 0,
51 	SMU_ARG_REG,
52 	SMU_INTR_REG,
53 	SMU_RESP_REG,
54 	SMU_OUT_REG,
55 	SMU_MAX_REGS /* Keep this at the end */
56 };
57 
58 enum aie2_sram_reg_idx {
59 	MBOX_CHANN_OFF = 0,
60 	FW_ALIVE_OFF,
61 	SRAM_MAX_INDEX /* Keep this at the end */
62 };
63 
64 enum psp_reg_idx {
65 	PSP_CMD_REG = 0,
66 	PSP_ARG0_REG,
67 	PSP_ARG1_REG,
68 	PSP_ARG2_REG,
69 	PSP_NUM_IN_REGS, /* number of input registers */
70 	PSP_INTR_REG = PSP_NUM_IN_REGS,
71 	PSP_STATUS_REG,
72 	PSP_RESP_REG,
73 	PSP_MAX_REGS /* Keep this at the end */
74 };
75 
76 struct amdxdna_client;
77 struct amdxdna_fw_ver;
78 struct amdxdna_hwctx;
79 struct amdxdna_sched_job;
80 
81 struct psp_config {
82 	const void	*fw_buf;
83 	u32		fw_size;
84 	void __iomem	*psp_regs[PSP_MAX_REGS];
85 };
86 
87 struct aie_version {
88 	u16 major;
89 	u16 minor;
90 };
91 
92 struct aie_tile_metadata {
93 	u16 row_count;
94 	u16 row_start;
95 	u16 dma_channel_count;
96 	u16 lock_count;
97 	u16 event_reg_count;
98 };
99 
100 struct aie_metadata {
101 	u32 size;
102 	u16 cols;
103 	u16 rows;
104 	struct aie_version version;
105 	struct aie_tile_metadata core;
106 	struct aie_tile_metadata mem;
107 	struct aie_tile_metadata shim;
108 };
109 
110 enum rt_config_category {
111 	AIE2_RT_CFG_INIT,
112 	AIE2_RT_CFG_CLK_GATING,
113 };
114 
115 struct rt_config {
116 	u32	type;
117 	u32	value;
118 	u32	category;
119 };
120 
121 struct dpm_clk_freq {
122 	u32	npuclk;
123 	u32	hclk;
124 };
125 
126 /*
127  * Define the maximum number of pending commands in a hardware context.
128  * Must be power of 2!
129  */
130 #define HWCTX_MAX_CMDS		4
131 #define get_job_idx(seq) ((seq) & (HWCTX_MAX_CMDS - 1))
132 struct amdxdna_hwctx_priv {
133 	struct amdxdna_gem_obj		*heap;
134 	void				*mbox_chann;
135 
136 	struct drm_gpu_scheduler	sched;
137 	struct drm_sched_entity		entity;
138 
139 	struct mutex			io_lock; /* protect seq and cmd order */
140 	struct wait_queue_head		job_free_wq;
141 	u32				num_pending;
142 	u64				seq;
143 	struct semaphore		job_sem;
144 	bool				job_done;
145 
146 	/* Completed job counter */
147 	u64				completed;
148 
149 	struct amdxdna_gem_obj		*cmd_buf[HWCTX_MAX_CMDS];
150 	struct drm_syncobj		*syncobj;
151 };
152 
153 enum aie2_dev_status {
154 	AIE2_DEV_UNINIT,
155 	AIE2_DEV_INIT,
156 	AIE2_DEV_START,
157 };
158 
159 struct amdxdna_dev_hdl {
160 	struct amdxdna_dev		*xdna;
161 	const struct amdxdna_dev_priv	*priv;
162 	void			__iomem *sram_base;
163 	void			__iomem *smu_base;
164 	void			__iomem *mbox_base;
165 	struct psp_device		*psp_hdl;
166 
167 	struct xdna_mailbox_chann_res	mgmt_x2i;
168 	struct xdna_mailbox_chann_res	mgmt_i2x;
169 	u32				mgmt_chan_idx;
170 	u32				mgmt_prot_major;
171 	u32				mgmt_prot_minor;
172 
173 	u32				total_col;
174 	struct aie_version		version;
175 	struct aie_metadata		metadata;
176 
177 	/* power management and clock*/
178 	enum amdxdna_power_mode_type	pw_mode;
179 	u32				dpm_level;
180 	u32				dft_dpm_level;
181 	u32				max_dpm_level;
182 	u32				clk_gating;
183 	u32				npuclk_freq;
184 	u32				hclk_freq;
185 
186 	/* Mailbox and the management channel */
187 	struct mailbox			*mbox;
188 	struct mailbox_channel		*mgmt_chann;
189 	struct async_events		*async_events;
190 
191 	enum aie2_dev_status		dev_status;
192 	u32				hwctx_num;
193 };
194 
195 #define DEFINE_BAR_OFFSET(reg_name, bar, reg_addr) \
196 	[reg_name] = {bar##_BAR_INDEX, (reg_addr) - bar##_BAR_BASE}
197 
198 struct aie2_bar_off_pair {
199 	int	bar_idx;
200 	u32	offset;
201 };
202 
203 struct aie2_hw_ops {
204 	int (*set_dpm)(struct amdxdna_dev_hdl *ndev, u32 dpm_level);
205 };
206 
207 struct amdxdna_dev_priv {
208 	const char			*fw_path;
209 	u64				protocol_major;
210 	u64				protocol_minor;
211 	const struct rt_config		*rt_config;
212 	const struct dpm_clk_freq	*dpm_clk_tbl;
213 
214 #define COL_ALIGN_NONE   0
215 #define COL_ALIGN_NATURE 1
216 	u32				col_align;
217 	u32				mbox_dev_addr;
218 	/* If mbox_size is 0, use BAR size. See MBOX_SIZE macro */
219 	u32				mbox_size;
220 	u32				sram_dev_addr;
221 	struct aie2_bar_off_pair	sram_offs[SRAM_MAX_INDEX];
222 	struct aie2_bar_off_pair	psp_regs_off[PSP_MAX_REGS];
223 	struct aie2_bar_off_pair	smu_regs_off[SMU_MAX_REGS];
224 	struct aie2_hw_ops		hw_ops;
225 };
226 
227 extern const struct amdxdna_dev_ops aie2_ops;
228 
229 int aie2_runtime_cfg(struct amdxdna_dev_hdl *ndev,
230 		     enum rt_config_category category, u32 *val);
231 
232 /* aie2 npu hw config */
233 extern const struct dpm_clk_freq npu1_dpm_clk_table[];
234 extern const struct dpm_clk_freq npu4_dpm_clk_table[];
235 extern const struct rt_config npu1_default_rt_cfg[];
236 extern const struct rt_config npu4_default_rt_cfg[];
237 
238 /* aie2_smu.c */
239 int aie2_smu_init(struct amdxdna_dev_hdl *ndev);
240 void aie2_smu_fini(struct amdxdna_dev_hdl *ndev);
241 int npu1_set_dpm(struct amdxdna_dev_hdl *ndev, u32 dpm_level);
242 int npu4_set_dpm(struct amdxdna_dev_hdl *ndev, u32 dpm_level);
243 
244 /* aie2_pm.c */
245 int aie2_pm_init(struct amdxdna_dev_hdl *ndev);
246 int aie2_pm_set_mode(struct amdxdna_dev_hdl *ndev, enum amdxdna_power_mode_type target);
247 
248 /* aie2_psp.c */
249 struct psp_device *aie2m_psp_create(struct drm_device *ddev, struct psp_config *conf);
250 int aie2_psp_start(struct psp_device *psp);
251 void aie2_psp_stop(struct psp_device *psp);
252 
253 /* aie2_error.c */
254 int aie2_error_async_events_alloc(struct amdxdna_dev_hdl *ndev);
255 void aie2_error_async_events_free(struct amdxdna_dev_hdl *ndev);
256 int aie2_error_async_events_send(struct amdxdna_dev_hdl *ndev);
257 int aie2_error_async_msg_thread(void *data);
258 
259 /* aie2_message.c */
260 int aie2_suspend_fw(struct amdxdna_dev_hdl *ndev);
261 int aie2_resume_fw(struct amdxdna_dev_hdl *ndev);
262 int aie2_set_runtime_cfg(struct amdxdna_dev_hdl *ndev, u32 type, u64 value);
263 int aie2_get_runtime_cfg(struct amdxdna_dev_hdl *ndev, u32 type, u64 *value);
264 int aie2_assign_mgmt_pasid(struct amdxdna_dev_hdl *ndev, u16 pasid);
265 int aie2_query_aie_version(struct amdxdna_dev_hdl *ndev, struct aie_version *version);
266 int aie2_query_aie_metadata(struct amdxdna_dev_hdl *ndev, struct aie_metadata *metadata);
267 int aie2_query_firmware_version(struct amdxdna_dev_hdl *ndev,
268 				struct amdxdna_fw_ver *fw_ver);
269 int aie2_create_context(struct amdxdna_dev_hdl *ndev, struct amdxdna_hwctx *hwctx);
270 int aie2_destroy_context(struct amdxdna_dev_hdl *ndev, struct amdxdna_hwctx *hwctx);
271 int aie2_map_host_buf(struct amdxdna_dev_hdl *ndev, u32 context_id, u64 addr, u64 size);
272 int aie2_query_status(struct amdxdna_dev_hdl *ndev, char __user *buf, u32 size, u32 *cols_filled);
273 int aie2_register_asyn_event_msg(struct amdxdna_dev_hdl *ndev, dma_addr_t addr, u32 size,
274 				 void *handle, int (*cb)(void*, const u32 *, size_t));
275 int aie2_config_cu(struct amdxdna_hwctx *hwctx);
276 int aie2_execbuf(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job,
277 		 int (*notify_cb)(void *, const u32 *, size_t));
278 int aie2_cmdlist_single_execbuf(struct amdxdna_hwctx *hwctx,
279 				struct amdxdna_sched_job *job,
280 				int (*notify_cb)(void *, const u32 *, size_t));
281 int aie2_cmdlist_multi_execbuf(struct amdxdna_hwctx *hwctx,
282 			       struct amdxdna_sched_job *job,
283 			       int (*notify_cb)(void *, const u32 *, size_t));
284 int aie2_sync_bo(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job,
285 		 int (*notify_cb)(void *, const u32 *, size_t));
286 
287 /* aie2_hwctx.c */
288 int aie2_hwctx_init(struct amdxdna_hwctx *hwctx);
289 void aie2_hwctx_fini(struct amdxdna_hwctx *hwctx);
290 int aie2_hwctx_config(struct amdxdna_hwctx *hwctx, u32 type, u64 value, void *buf, u32 size);
291 void aie2_hwctx_suspend(struct amdxdna_hwctx *hwctx);
292 void aie2_hwctx_resume(struct amdxdna_hwctx *hwctx);
293 int aie2_cmd_submit(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, u64 *seq);
294 void aie2_hmm_invalidate(struct amdxdna_gem_obj *abo, unsigned long cur_seq);
295 void aie2_restart_ctx(struct amdxdna_client *client);
296 
297 #endif /* _AIE2_PCI_H_ */
298