xref: /linux/drivers/bus/mhi/host/internal.h (revision acbf6de674ef7b1b5870b25e7b3c695bf84273d0)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
4  *
5  */
6 
7 #ifndef _MHI_INT_H
8 #define _MHI_INT_H
9 
10 #include "../common.h"
11 
12 extern struct bus_type mhi_bus_type;
13 
14 /* Host request register */
15 #define MHI_SOC_RESET_REQ_OFFSET			0xb0
16 #define MHI_SOC_RESET_REQ				BIT(0)
17 
18 #define SOC_HW_VERSION_OFFS				0x224
19 #define SOC_HW_VERSION_FAM_NUM_BMSK			GENMASK(31, 28)
20 #define SOC_HW_VERSION_DEV_NUM_BMSK			GENMASK(27, 16)
21 #define SOC_HW_VERSION_MAJOR_VER_BMSK			GENMASK(15, 8)
22 #define SOC_HW_VERSION_MINOR_VER_BMSK			GENMASK(7, 0)
23 
24 struct mhi_ctxt {
25 	struct mhi_event_ctxt *er_ctxt;
26 	struct mhi_chan_ctxt *chan_ctxt;
27 	struct mhi_cmd_ctxt *cmd_ctxt;
28 	dma_addr_t er_ctxt_addr;
29 	dma_addr_t chan_ctxt_addr;
30 	dma_addr_t cmd_ctxt_addr;
31 };
32 
33 struct bhi_vec_entry {
34 	u64 dma_addr;
35 	u64 size;
36 };
37 
38 enum mhi_ch_state_type {
39 	MHI_CH_STATE_TYPE_RESET,
40 	MHI_CH_STATE_TYPE_STOP,
41 	MHI_CH_STATE_TYPE_START,
42 	MHI_CH_STATE_TYPE_MAX,
43 };
44 
45 extern const char * const mhi_ch_state_type_str[MHI_CH_STATE_TYPE_MAX];
46 #define TO_CH_STATE_TYPE_STR(state) (((state) >= MHI_CH_STATE_TYPE_MAX) ? \
47 				     "INVALID_STATE" : \
48 				     mhi_ch_state_type_str[(state)])
49 
50 #define MHI_INVALID_BRSTMODE(mode) (mode != MHI_DB_BRST_DISABLE && \
51 				    mode != MHI_DB_BRST_ENABLE)
52 
53 extern const char * const mhi_ee_str[MHI_EE_MAX];
54 #define TO_MHI_EXEC_STR(ee) (((ee) >= MHI_EE_MAX) ? \
55 			     "INVALID_EE" : mhi_ee_str[ee])
56 
57 #define MHI_IN_PBL(ee) (ee == MHI_EE_PBL || ee == MHI_EE_PTHRU || \
58 			ee == MHI_EE_EDL)
59 #define MHI_POWER_UP_CAPABLE(ee) (MHI_IN_PBL(ee) || ee == MHI_EE_AMSS)
60 #define MHI_FW_LOAD_CAPABLE(ee) (ee == MHI_EE_PBL || ee == MHI_EE_EDL)
61 #define MHI_IN_MISSION_MODE(ee) (ee == MHI_EE_AMSS || ee == MHI_EE_WFW || \
62 				 ee == MHI_EE_FP)
63 
64 enum dev_st_transition {
65 	DEV_ST_TRANSITION_PBL,
66 	DEV_ST_TRANSITION_READY,
67 	DEV_ST_TRANSITION_SBL,
68 	DEV_ST_TRANSITION_MISSION_MODE,
69 	DEV_ST_TRANSITION_FP,
70 	DEV_ST_TRANSITION_SYS_ERR,
71 	DEV_ST_TRANSITION_DISABLE,
72 	DEV_ST_TRANSITION_MAX,
73 };
74 
75 extern const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX];
76 #define TO_DEV_STATE_TRANS_STR(state) (((state) >= DEV_ST_TRANSITION_MAX) ? \
77 				"INVALID_STATE" : dev_state_tran_str[state])
78 
79 /* internal power states */
80 enum mhi_pm_state {
81 	MHI_PM_STATE_DISABLE,
82 	MHI_PM_STATE_POR,
83 	MHI_PM_STATE_M0,
84 	MHI_PM_STATE_M2,
85 	MHI_PM_STATE_M3_ENTER,
86 	MHI_PM_STATE_M3,
87 	MHI_PM_STATE_M3_EXIT,
88 	MHI_PM_STATE_FW_DL_ERR,
89 	MHI_PM_STATE_SYS_ERR_DETECT,
90 	MHI_PM_STATE_SYS_ERR_PROCESS,
91 	MHI_PM_STATE_SHUTDOWN_PROCESS,
92 	MHI_PM_STATE_LD_ERR_FATAL_DETECT,
93 	MHI_PM_STATE_MAX
94 };
95 
96 #define MHI_PM_DISABLE					BIT(0)
97 #define MHI_PM_POR					BIT(1)
98 #define MHI_PM_M0					BIT(2)
99 #define MHI_PM_M2					BIT(3)
100 #define MHI_PM_M3_ENTER					BIT(4)
101 #define MHI_PM_M3					BIT(5)
102 #define MHI_PM_M3_EXIT					BIT(6)
103 /* firmware download failure state */
104 #define MHI_PM_FW_DL_ERR				BIT(7)
105 #define MHI_PM_SYS_ERR_DETECT				BIT(8)
106 #define MHI_PM_SYS_ERR_PROCESS				BIT(9)
107 #define MHI_PM_SHUTDOWN_PROCESS				BIT(10)
108 /* link not accessible */
109 #define MHI_PM_LD_ERR_FATAL_DETECT			BIT(11)
110 
111 #define MHI_REG_ACCESS_VALID(pm_state)			((pm_state & (MHI_PM_POR | MHI_PM_M0 | \
112 						MHI_PM_M2 | MHI_PM_M3_ENTER | MHI_PM_M3_EXIT | \
113 						MHI_PM_SYS_ERR_DETECT | MHI_PM_SYS_ERR_PROCESS | \
114 						MHI_PM_SHUTDOWN_PROCESS | MHI_PM_FW_DL_ERR)))
115 #define MHI_PM_IN_ERROR_STATE(pm_state)			(pm_state >= MHI_PM_FW_DL_ERR)
116 #define MHI_PM_IN_FATAL_STATE(pm_state)			(pm_state == MHI_PM_LD_ERR_FATAL_DETECT)
117 #define MHI_DB_ACCESS_VALID(mhi_cntrl)			(mhi_cntrl->pm_state & mhi_cntrl->db_access)
118 #define MHI_WAKE_DB_CLEAR_VALID(pm_state)		(pm_state & (MHI_PM_M0 | \
119 							MHI_PM_M2 | MHI_PM_M3_EXIT))
120 #define MHI_WAKE_DB_SET_VALID(pm_state)			(pm_state & MHI_PM_M2)
121 #define MHI_WAKE_DB_FORCE_SET_VALID(pm_state)		MHI_WAKE_DB_CLEAR_VALID(pm_state)
122 #define MHI_EVENT_ACCESS_INVALID(pm_state)		(pm_state == MHI_PM_DISABLE || \
123 							MHI_PM_IN_ERROR_STATE(pm_state))
124 #define MHI_PM_IN_SUSPEND_STATE(pm_state)		(pm_state & \
125 							(MHI_PM_M3_ENTER | MHI_PM_M3))
126 
127 #define NR_OF_CMD_RINGS					1
128 #define CMD_EL_PER_RING					128
129 #define PRIMARY_CMD_RING				0
130 #define MHI_DEV_WAKE_DB					127
131 #define MHI_MAX_MTU					0xffff
132 #define MHI_RANDOM_U32_NONZERO(bmsk)			(get_random_u32_inclusive(1, bmsk))
133 
134 enum mhi_er_type {
135 	MHI_ER_TYPE_INVALID = 0x0,
136 	MHI_ER_TYPE_VALID = 0x1,
137 };
138 
139 struct db_cfg {
140 	bool reset_req;
141 	bool db_mode;
142 	u32 pollcfg;
143 	enum mhi_db_brst_mode brstmode;
144 	dma_addr_t db_val;
145 	void (*process_db)(struct mhi_controller *mhi_cntrl,
146 			   struct db_cfg *db_cfg, void __iomem *io_addr,
147 			   dma_addr_t db_val);
148 };
149 
150 struct mhi_pm_transitions {
151 	enum mhi_pm_state from_state;
152 	u32 to_states;
153 };
154 
155 struct state_transition {
156 	struct list_head node;
157 	enum dev_st_transition state;
158 };
159 
160 struct mhi_ring {
161 	dma_addr_t dma_handle;
162 	dma_addr_t iommu_base;
163 	__le64 *ctxt_wp; /* point to ctxt wp */
164 	void *pre_aligned;
165 	void *base;
166 	void *rp;
167 	void *wp;
168 	size_t el_size;
169 	size_t len;
170 	size_t elements;
171 	size_t alloc_size;
172 	void __iomem *db_addr;
173 };
174 
175 struct mhi_cmd {
176 	struct mhi_ring ring;
177 	spinlock_t lock;
178 };
179 
180 struct mhi_buf_info {
181 	void *v_addr;
182 	void *bb_addr;
183 	void *wp;
184 	void *cb_buf;
185 	dma_addr_t p_addr;
186 	size_t len;
187 	enum dma_data_direction dir;
188 	bool used; /* Indicates whether the buffer is used or not */
189 	bool pre_mapped; /* Already pre-mapped by client */
190 };
191 
192 struct mhi_event {
193 	struct mhi_controller *mhi_cntrl;
194 	struct mhi_chan *mhi_chan; /* dedicated to channel */
195 	u32 er_index;
196 	u32 intmod;
197 	u32 irq;
198 	int chan; /* this event ring is dedicated to a channel (optional) */
199 	u32 priority;
200 	enum mhi_er_data_type data_type;
201 	struct mhi_ring ring;
202 	struct db_cfg db_cfg;
203 	struct tasklet_struct task;
204 	spinlock_t lock;
205 	int (*process_event)(struct mhi_controller *mhi_cntrl,
206 			     struct mhi_event *mhi_event,
207 			     u32 event_quota);
208 	bool hw_ring;
209 	bool cl_manage;
210 	bool offload_ev; /* managed by a device driver */
211 };
212 
213 struct mhi_chan {
214 	const char *name;
215 	/*
216 	 * Important: When consuming, increment tre_ring first and when
217 	 * releasing, decrement buf_ring first. If tre_ring has space, buf_ring
218 	 * is guranteed to have space so we do not need to check both rings.
219 	 */
220 	struct mhi_ring buf_ring;
221 	struct mhi_ring tre_ring;
222 	u32 chan;
223 	u32 er_index;
224 	u32 intmod;
225 	enum mhi_ch_type type;
226 	enum dma_data_direction dir;
227 	struct db_cfg db_cfg;
228 	enum mhi_ch_ee_mask ee_mask;
229 	enum mhi_ch_state ch_state;
230 	enum mhi_ev_ccs ccs;
231 	struct mhi_device *mhi_dev;
232 	void (*xfer_cb)(struct mhi_device *mhi_dev, struct mhi_result *result);
233 	struct mutex mutex;
234 	struct completion completion;
235 	rwlock_t lock;
236 	struct list_head node;
237 	bool lpm_notify;
238 	bool configured;
239 	bool offload_ch;
240 	bool pre_alloc;
241 	bool wake_capable;
242 };
243 
244 /* Default MHI timeout */
245 #define MHI_TIMEOUT_MS (1000)
246 
247 /* debugfs related functions */
248 #ifdef CONFIG_MHI_BUS_DEBUG
249 void mhi_create_debugfs(struct mhi_controller *mhi_cntrl);
250 void mhi_destroy_debugfs(struct mhi_controller *mhi_cntrl);
251 void mhi_debugfs_init(void);
252 void mhi_debugfs_exit(void);
253 #else
254 static inline void mhi_create_debugfs(struct mhi_controller *mhi_cntrl)
255 {
256 }
257 
258 static inline void mhi_destroy_debugfs(struct mhi_controller *mhi_cntrl)
259 {
260 }
261 
262 static inline void mhi_debugfs_init(void)
263 {
264 }
265 
266 static inline void mhi_debugfs_exit(void)
267 {
268 }
269 #endif
270 
271 struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl);
272 
273 int mhi_destroy_device(struct device *dev, void *data);
274 void mhi_create_devices(struct mhi_controller *mhi_cntrl);
275 
276 int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl,
277 			 struct image_info **image_info, size_t alloc_size);
278 void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl,
279 			 struct image_info *image_info);
280 
281 /* Power management APIs */
282 enum mhi_pm_state __must_check mhi_tryset_pm_state(
283 					struct mhi_controller *mhi_cntrl,
284 					enum mhi_pm_state state);
285 const char *to_mhi_pm_state_str(u32 state);
286 int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl,
287 			       enum dev_st_transition state);
288 void mhi_pm_st_worker(struct work_struct *work);
289 void mhi_pm_sys_err_handler(struct mhi_controller *mhi_cntrl);
290 int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl);
291 int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl);
292 void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl);
293 int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl);
294 int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl);
295 int mhi_send_cmd(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
296 		 enum mhi_cmd_type cmd);
297 int mhi_download_amss_image(struct mhi_controller *mhi_cntrl);
298 static inline bool mhi_is_active(struct mhi_controller *mhi_cntrl)
299 {
300 	return (mhi_cntrl->dev_state >= MHI_STATE_M0 &&
301 		mhi_cntrl->dev_state <= MHI_STATE_M3_FAST);
302 }
303 
304 static inline void mhi_trigger_resume(struct mhi_controller *mhi_cntrl)
305 {
306 	pm_wakeup_event(&mhi_cntrl->mhi_dev->dev, 0);
307 	mhi_cntrl->runtime_get(mhi_cntrl);
308 	mhi_cntrl->runtime_put(mhi_cntrl);
309 }
310 
311 /* Register access methods */
312 void mhi_db_brstmode(struct mhi_controller *mhi_cntrl, struct db_cfg *db_cfg,
313 		     void __iomem *db_addr, dma_addr_t db_val);
314 void mhi_db_brstmode_disable(struct mhi_controller *mhi_cntrl,
315 			     struct db_cfg *db_mode, void __iomem *db_addr,
316 			     dma_addr_t db_val);
317 int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl,
318 			      void __iomem *base, u32 offset, u32 *out);
319 int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl,
320 				    void __iomem *base, u32 offset, u32 mask,
321 				    u32 *out);
322 int __must_check mhi_poll_reg_field(struct mhi_controller *mhi_cntrl,
323 				    void __iomem *base, u32 offset, u32 mask,
324 				    u32 val, u32 delayus);
325 void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base,
326 		   u32 offset, u32 val);
327 int __must_check mhi_write_reg_field(struct mhi_controller *mhi_cntrl,
328 				     void __iomem *base, u32 offset, u32 mask,
329 				     u32 val);
330 void mhi_ring_er_db(struct mhi_event *mhi_event);
331 void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr,
332 		  dma_addr_t db_val);
333 void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd);
334 void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl,
335 		      struct mhi_chan *mhi_chan);
336 
337 /* Initialization methods */
338 int mhi_init_mmio(struct mhi_controller *mhi_cntrl);
339 int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl);
340 void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl);
341 int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl);
342 void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl);
343 int mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
344 		      struct image_info *img_info);
345 void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl);
346 
347 /* Automatically allocate and queue inbound buffers */
348 #define MHI_CH_INBOUND_ALLOC_BUFS BIT(0)
349 int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
350 			struct mhi_chan *mhi_chan, unsigned int flags);
351 
352 int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,
353 		       struct mhi_chan *mhi_chan);
354 void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
355 			  struct mhi_chan *mhi_chan);
356 void mhi_reset_chan(struct mhi_controller *mhi_cntrl,
357 		    struct mhi_chan *mhi_chan);
358 
359 /* Event processing methods */
360 void mhi_ctrl_ev_task(unsigned long data);
361 void mhi_ev_task(unsigned long data);
362 int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
363 				struct mhi_event *mhi_event, u32 event_quota);
364 int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
365 			     struct mhi_event *mhi_event, u32 event_quota);
366 
367 /* ISR handlers */
368 irqreturn_t mhi_irq_handler(int irq_number, void *dev);
369 irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *dev);
370 irqreturn_t mhi_intvec_handler(int irq_number, void *dev);
371 
372 int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
373 		struct mhi_buf_info *info, enum mhi_flags flags);
374 int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl,
375 			 struct mhi_buf_info *buf_info);
376 int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl,
377 			  struct mhi_buf_info *buf_info);
378 void mhi_unmap_single_no_bb(struct mhi_controller *mhi_cntrl,
379 			    struct mhi_buf_info *buf_info);
380 void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl,
381 			     struct mhi_buf_info *buf_info);
382 
383 #endif /* _MHI_INT_H */
384