xref: /linux/drivers/accel/amdxdna/amdxdna_ctx.h (revision face6a3615a649456eb4549f6d474221d877d604)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (C) 2022-2024, Advanced Micro Devices, Inc.
4  */
5 
6 #ifndef _AMDXDNA_CTX_H_
7 #define _AMDXDNA_CTX_H_
8 
9 #include <linux/bitfield.h>
10 
11 #include "amdxdna_gem.h"
12 
13 struct amdxdna_hwctx_priv;
14 
15 enum ert_cmd_opcode {
16 	ERT_START_CU      = 0,
17 	ERT_CMD_CHAIN     = 19,
18 	ERT_START_NPU     = 20,
19 };
20 
21 enum ert_cmd_state {
22 	ERT_CMD_STATE_INVALID,
23 	ERT_CMD_STATE_NEW,
24 	ERT_CMD_STATE_QUEUED,
25 	ERT_CMD_STATE_RUNNING,
26 	ERT_CMD_STATE_COMPLETED,
27 	ERT_CMD_STATE_ERROR,
28 	ERT_CMD_STATE_ABORT,
29 	ERT_CMD_STATE_SUBMITTED,
30 	ERT_CMD_STATE_TIMEOUT,
31 	ERT_CMD_STATE_NORESPONSE,
32 };
33 
34 /*
35  * Interpretation of the beginning of data payload for ERT_START_NPU in
36  * amdxdna_cmd. The rest of the payload in amdxdna_cmd is regular kernel args.
37  */
38 struct amdxdna_cmd_start_npu {
39 	u64 buffer;       /* instruction buffer address */
40 	u32 buffer_size;  /* size of buffer in bytes */
41 	u32 prop_count;	  /* properties count */
42 	u32 prop_args[];  /* properties and regular kernel arguments */
43 };
44 
45 /*
46  * Interpretation of the beginning of data payload for ERT_CMD_CHAIN in
47  * amdxdna_cmd. The rest of the payload in amdxdna_cmd is cmd BO handles.
48  */
49 struct amdxdna_cmd_chain {
50 	u32 command_count;
51 	u32 submit_index;
52 	u32 error_index;
53 	u32 reserved[3];
54 	u64 data[] __counted_by(command_count);
55 };
56 
57 /* Exec buffer command header format */
58 #define AMDXDNA_CMD_STATE		GENMASK(3, 0)
59 #define AMDXDNA_CMD_EXTRA_CU_MASK	GENMASK(11, 10)
60 #define AMDXDNA_CMD_COUNT		GENMASK(22, 12)
61 #define AMDXDNA_CMD_OPCODE		GENMASK(27, 23)
62 struct amdxdna_cmd {
63 	u32 header;
64 	u32 data[];
65 };
66 
67 struct amdxdna_hwctx {
68 	struct amdxdna_client		*client;
69 	struct amdxdna_hwctx_priv	*priv;
70 	char				*name;
71 
72 	u32				id;
73 	u32				max_opc;
74 	u32				num_tiles;
75 	u32				mem_size;
76 	u32				fw_ctx_id;
77 	u32				col_list_len;
78 	u32				*col_list;
79 	u32				start_col;
80 	u32				num_col;
81 #define HWCTX_STAT_INIT  0
82 #define HWCTX_STAT_READY 1
83 #define HWCTX_STAT_STOP  2
84 	u32				status;
85 	u32				old_status;
86 
87 	struct amdxdna_qos_info		     qos;
88 	struct amdxdna_hwctx_param_config_cu *cus;
89 	u32				syncobj_hdl;
90 
91 	atomic64_t			job_submit_cnt;
92 	atomic64_t			job_free_cnt ____cacheline_aligned_in_smp;
93 };
94 
95 #define drm_job_to_xdna_job(j) \
96 	container_of(j, struct amdxdna_sched_job, base)
97 
98 enum amdxdna_job_opcode {
99 	SYNC_DEBUG_BO,
100 	ATTACH_DEBUG_BO,
101 	DETACH_DEBUG_BO,
102 };
103 
104 struct amdxdna_drv_cmd {
105 	enum amdxdna_job_opcode	opcode;
106 	u32			result;
107 };
108 
109 struct amdxdna_sched_job {
110 	struct drm_sched_job	base;
111 	struct kref		refcnt;
112 	struct amdxdna_hwctx	*hwctx;
113 	struct mm_struct	*mm;
114 	/* The fence to notice DRM scheduler that job is done by hardware */
115 	struct dma_fence	*fence;
116 	/* user can wait on this fence */
117 	struct dma_fence	*out_fence;
118 	bool			job_done;
119 	u64			seq;
120 	struct amdxdna_drv_cmd	*drv_cmd;
121 	struct amdxdna_gem_obj	*cmd_bo;
122 	size_t			bo_cnt;
123 	struct drm_gem_object	*bos[] __counted_by(bo_cnt);
124 };
125 
126 static inline u32
127 amdxdna_cmd_get_op(struct amdxdna_gem_obj *abo)
128 {
129 	struct amdxdna_cmd *cmd = abo->mem.kva;
130 
131 	return FIELD_GET(AMDXDNA_CMD_OPCODE, cmd->header);
132 }
133 
134 static inline void
135 amdxdna_cmd_set_state(struct amdxdna_gem_obj *abo, enum ert_cmd_state s)
136 {
137 	struct amdxdna_cmd *cmd = abo->mem.kva;
138 
139 	cmd->header &= ~AMDXDNA_CMD_STATE;
140 	cmd->header |= FIELD_PREP(AMDXDNA_CMD_STATE, s);
141 }
142 
143 static inline enum ert_cmd_state
144 amdxdna_cmd_get_state(struct amdxdna_gem_obj *abo)
145 {
146 	struct amdxdna_cmd *cmd = abo->mem.kva;
147 
148 	return FIELD_GET(AMDXDNA_CMD_STATE, cmd->header);
149 }
150 
151 void *amdxdna_cmd_get_payload(struct amdxdna_gem_obj *abo, u32 *size);
152 int amdxdna_cmd_get_cu_idx(struct amdxdna_gem_obj *abo);
153 
154 void amdxdna_sched_job_cleanup(struct amdxdna_sched_job *job);
155 void amdxdna_hwctx_remove_all(struct amdxdna_client *client);
156 int amdxdna_hwctx_walk(struct amdxdna_client *client, void *arg,
157 		       int (*walk)(struct amdxdna_hwctx *hwctx, void *arg));
158 int amdxdna_hwctx_sync_debug_bo(struct amdxdna_client *client, u32 debug_bo_hdl);
159 
160 int amdxdna_cmd_submit(struct amdxdna_client *client,
161 		       struct amdxdna_drv_cmd *drv_cmd, u32 cmd_bo_hdls,
162 		       u32 *arg_bo_hdls, u32 arg_bo_cnt,
163 		       u32 hwctx_hdl, u64 *seq);
164 
165 int amdxdna_cmd_wait(struct amdxdna_client *client, u32 hwctx_hdl,
166 		     u64 seq, u32 timeout);
167 
168 int amdxdna_drm_create_hwctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
169 int amdxdna_drm_config_hwctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
170 int amdxdna_drm_destroy_hwctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
171 int amdxdna_drm_submit_cmd_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
172 
173 #endif /* _AMDXDNA_CTX_H_ */
174