xref: /linux/drivers/bus/mhi/host/internal.h (revision 1f20a5769446a1acae67ac9e63d07a594829a789)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
4  *
5  */
6 
7 #ifndef _MHI_INT_H
8 #define _MHI_INT_H
9 
10 #include "../common.h"
11 
12 extern struct bus_type mhi_bus_type;
13 
14 /* Host request register */
15 #define MHI_SOC_RESET_REQ_OFFSET			0xb0
16 #define MHI_SOC_RESET_REQ				BIT(0)
17 
18 struct mhi_ctxt {
19 	struct mhi_event_ctxt *er_ctxt;
20 	struct mhi_chan_ctxt *chan_ctxt;
21 	struct mhi_cmd_ctxt *cmd_ctxt;
22 	dma_addr_t er_ctxt_addr;
23 	dma_addr_t chan_ctxt_addr;
24 	dma_addr_t cmd_ctxt_addr;
25 };
26 
27 struct bhi_vec_entry {
28 	u64 dma_addr;
29 	u64 size;
30 };
31 
32 enum mhi_ch_state_type {
33 	MHI_CH_STATE_TYPE_RESET,
34 	MHI_CH_STATE_TYPE_STOP,
35 	MHI_CH_STATE_TYPE_START,
36 	MHI_CH_STATE_TYPE_MAX,
37 };
38 
39 #define MHI_CH_STATE_TYPE_LIST				\
40 	ch_state_type(RESET,		"RESET")	\
41 	ch_state_type(STOP,		"STOP")		\
42 	ch_state_type_end(START,	"START")
43 
44 extern const char * const mhi_ch_state_type_str[MHI_CH_STATE_TYPE_MAX];
45 #define TO_CH_STATE_TYPE_STR(state) (((state) >= MHI_CH_STATE_TYPE_MAX) ? \
46 				     "INVALID_STATE" : \
47 				     mhi_ch_state_type_str[(state)])
48 
49 #define MHI_INVALID_BRSTMODE(mode) (mode != MHI_DB_BRST_DISABLE && \
50 				    mode != MHI_DB_BRST_ENABLE)
51 
52 #define MHI_EE_LIST						\
53 	mhi_ee(PBL,			"PRIMARY BOOTLOADER")	\
54 	mhi_ee(SBL,			"SECONDARY BOOTLOADER")	\
55 	mhi_ee(AMSS,			"MISSION MODE")		\
56 	mhi_ee(RDDM,			"RAMDUMP DOWNLOAD MODE")\
57 	mhi_ee(WFW,			"WLAN FIRMWARE")	\
58 	mhi_ee(PTHRU,			"PASS THROUGH")		\
59 	mhi_ee(EDL,			"EMERGENCY DOWNLOAD")	\
60 	mhi_ee(FP,			"FLASH PROGRAMMER")	\
61 	mhi_ee(DISABLE_TRANSITION,	"DISABLE")		\
62 	mhi_ee_end(NOT_SUPPORTED,	"NOT SUPPORTED")
63 
64 extern const char * const mhi_ee_str[MHI_EE_MAX];
65 #define TO_MHI_EXEC_STR(ee) (((ee) >= MHI_EE_MAX) ? \
66 			     "INVALID_EE" : mhi_ee_str[ee])
67 
68 #define MHI_IN_PBL(ee) (ee == MHI_EE_PBL || ee == MHI_EE_PTHRU || \
69 			ee == MHI_EE_EDL)
70 #define MHI_POWER_UP_CAPABLE(ee) (MHI_IN_PBL(ee) || ee == MHI_EE_AMSS)
71 #define MHI_FW_LOAD_CAPABLE(ee) (ee == MHI_EE_PBL || ee == MHI_EE_EDL)
72 #define MHI_IN_MISSION_MODE(ee) (ee == MHI_EE_AMSS || ee == MHI_EE_WFW || \
73 				 ee == MHI_EE_FP)
74 
75 enum dev_st_transition {
76 	DEV_ST_TRANSITION_PBL,
77 	DEV_ST_TRANSITION_READY,
78 	DEV_ST_TRANSITION_SBL,
79 	DEV_ST_TRANSITION_MISSION_MODE,
80 	DEV_ST_TRANSITION_FP,
81 	DEV_ST_TRANSITION_SYS_ERR,
82 	DEV_ST_TRANSITION_DISABLE,
83 	DEV_ST_TRANSITION_MAX,
84 };
85 
86 #define DEV_ST_TRANSITION_LIST					\
87 	dev_st_trans(PBL,		"PBL")			\
88 	dev_st_trans(READY,		"READY")		\
89 	dev_st_trans(SBL,		"SBL")			\
90 	dev_st_trans(MISSION_MODE,	"MISSION MODE")		\
91 	dev_st_trans(FP,		"FLASH PROGRAMMER")	\
92 	dev_st_trans(SYS_ERR,		"SYS ERROR")		\
93 	dev_st_trans_end(DISABLE,	"DISABLE")
94 
95 extern const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX];
96 #define TO_DEV_STATE_TRANS_STR(state) (((state) >= DEV_ST_TRANSITION_MAX) ? \
97 				"INVALID_STATE" : dev_state_tran_str[state])
98 
99 /* internal power states */
100 enum mhi_pm_state {
101 	MHI_PM_STATE_DISABLE,
102 	MHI_PM_STATE_POR,
103 	MHI_PM_STATE_M0,
104 	MHI_PM_STATE_M2,
105 	MHI_PM_STATE_M3_ENTER,
106 	MHI_PM_STATE_M3,
107 	MHI_PM_STATE_M3_EXIT,
108 	MHI_PM_STATE_FW_DL_ERR,
109 	MHI_PM_STATE_SYS_ERR_DETECT,
110 	MHI_PM_STATE_SYS_ERR_PROCESS,
111 	MHI_PM_STATE_SYS_ERR_FAIL,
112 	MHI_PM_STATE_SHUTDOWN_PROCESS,
113 	MHI_PM_STATE_LD_ERR_FATAL_DETECT,
114 	MHI_PM_STATE_MAX
115 };
116 
117 #define MHI_PM_STATE_LIST							\
118 	mhi_pm_state(DISABLE,			"DISABLE")			\
119 	mhi_pm_state(POR,			"POWER ON RESET")		\
120 	mhi_pm_state(M0,			"M0")				\
121 	mhi_pm_state(M2,			"M2")				\
122 	mhi_pm_state(M3_ENTER,			"M?->M3")			\
123 	mhi_pm_state(M3,			"M3")				\
124 	mhi_pm_state(M3_EXIT,			"M3->M0")			\
125 	mhi_pm_state(FW_DL_ERR,			"Firmware Download Error")	\
126 	mhi_pm_state(SYS_ERR_DETECT,		"SYS ERROR Detect")		\
127 	mhi_pm_state(SYS_ERR_PROCESS,		"SYS ERROR Process")		\
128 	mhi_pm_state(SYS_ERR_FAIL,		"SYS ERROR Failure")		\
129 	mhi_pm_state(SHUTDOWN_PROCESS,		"SHUTDOWN Process")		\
130 	mhi_pm_state_end(LD_ERR_FATAL_DETECT,	"Linkdown or Error Fatal Detect")
131 
132 #define MHI_PM_DISABLE					BIT(0)
133 #define MHI_PM_POR					BIT(1)
134 #define MHI_PM_M0					BIT(2)
135 #define MHI_PM_M2					BIT(3)
136 #define MHI_PM_M3_ENTER					BIT(4)
137 #define MHI_PM_M3					BIT(5)
138 #define MHI_PM_M3_EXIT					BIT(6)
139 /* firmware download failure state */
140 #define MHI_PM_FW_DL_ERR				BIT(7)
141 #define MHI_PM_SYS_ERR_DETECT				BIT(8)
142 #define MHI_PM_SYS_ERR_PROCESS				BIT(9)
143 #define MHI_PM_SYS_ERR_FAIL				BIT(10)
144 #define MHI_PM_SHUTDOWN_PROCESS				BIT(11)
145 /* link not accessible */
146 #define MHI_PM_LD_ERR_FATAL_DETECT			BIT(12)
147 
148 #define MHI_REG_ACCESS_VALID(pm_state)			((pm_state & (MHI_PM_POR | MHI_PM_M0 | \
149 						MHI_PM_M2 | MHI_PM_M3_ENTER | MHI_PM_M3_EXIT | \
150 						MHI_PM_SYS_ERR_DETECT | MHI_PM_SYS_ERR_PROCESS | \
151 						MHI_PM_SYS_ERR_FAIL | MHI_PM_SHUTDOWN_PROCESS |  \
152 						MHI_PM_FW_DL_ERR)))
153 #define MHI_PM_IN_ERROR_STATE(pm_state)			(pm_state >= MHI_PM_FW_DL_ERR)
154 #define MHI_PM_IN_FATAL_STATE(pm_state)			(pm_state == MHI_PM_LD_ERR_FATAL_DETECT)
155 #define MHI_DB_ACCESS_VALID(mhi_cntrl)			(mhi_cntrl->pm_state & mhi_cntrl->db_access)
156 #define MHI_WAKE_DB_CLEAR_VALID(pm_state)		(pm_state & (MHI_PM_M0 | \
157 							MHI_PM_M2 | MHI_PM_M3_EXIT))
158 #define MHI_WAKE_DB_SET_VALID(pm_state)			(pm_state & MHI_PM_M2)
159 #define MHI_WAKE_DB_FORCE_SET_VALID(pm_state)		MHI_WAKE_DB_CLEAR_VALID(pm_state)
160 #define MHI_EVENT_ACCESS_INVALID(pm_state)		(pm_state == MHI_PM_DISABLE || \
161 							MHI_PM_IN_ERROR_STATE(pm_state))
162 #define MHI_PM_IN_SUSPEND_STATE(pm_state)		(pm_state & \
163 							(MHI_PM_M3_ENTER | MHI_PM_M3))
164 
165 #define NR_OF_CMD_RINGS					1
166 #define CMD_EL_PER_RING					128
167 #define PRIMARY_CMD_RING				0
168 #define MHI_DEV_WAKE_DB					127
169 #define MHI_MAX_MTU					0xffff
170 #define MHI_RANDOM_U32_NONZERO(bmsk)			(get_random_u32_inclusive(1, bmsk))
171 
172 enum mhi_er_type {
173 	MHI_ER_TYPE_INVALID = 0x0,
174 	MHI_ER_TYPE_VALID = 0x1,
175 };
176 
177 struct db_cfg {
178 	bool reset_req;
179 	bool db_mode;
180 	u32 pollcfg;
181 	enum mhi_db_brst_mode brstmode;
182 	dma_addr_t db_val;
183 	void (*process_db)(struct mhi_controller *mhi_cntrl,
184 			   struct db_cfg *db_cfg, void __iomem *io_addr,
185 			   dma_addr_t db_val);
186 };
187 
188 struct mhi_pm_transitions {
189 	enum mhi_pm_state from_state;
190 	u32 to_states;
191 };
192 
193 struct state_transition {
194 	struct list_head node;
195 	enum dev_st_transition state;
196 };
197 
198 struct mhi_ring {
199 	dma_addr_t dma_handle;
200 	dma_addr_t iommu_base;
201 	__le64 *ctxt_wp; /* point to ctxt wp */
202 	void *pre_aligned;
203 	void *base;
204 	void *rp;
205 	void *wp;
206 	size_t el_size;
207 	size_t len;
208 	size_t elements;
209 	size_t alloc_size;
210 	void __iomem *db_addr;
211 };
212 
213 struct mhi_cmd {
214 	struct mhi_ring ring;
215 	spinlock_t lock;
216 };
217 
218 struct mhi_buf_info {
219 	void *v_addr;
220 	void *bb_addr;
221 	void *wp;
222 	void *cb_buf;
223 	dma_addr_t p_addr;
224 	size_t len;
225 	enum dma_data_direction dir;
226 	bool used; /* Indicates whether the buffer is used or not */
227 	bool pre_mapped; /* Already pre-mapped by client */
228 };
229 
230 struct mhi_event {
231 	struct mhi_controller *mhi_cntrl;
232 	struct mhi_chan *mhi_chan; /* dedicated to channel */
233 	u32 er_index;
234 	u32 intmod;
235 	u32 irq;
236 	int chan; /* this event ring is dedicated to a channel (optional) */
237 	u32 priority;
238 	enum mhi_er_data_type data_type;
239 	struct mhi_ring ring;
240 	struct db_cfg db_cfg;
241 	struct tasklet_struct task;
242 	spinlock_t lock;
243 	int (*process_event)(struct mhi_controller *mhi_cntrl,
244 			     struct mhi_event *mhi_event,
245 			     u32 event_quota);
246 	bool hw_ring;
247 	bool cl_manage;
248 	bool offload_ev; /* managed by a device driver */
249 };
250 
251 struct mhi_chan {
252 	const char *name;
253 	/*
254 	 * Important: When consuming, increment tre_ring first and when
255 	 * releasing, decrement buf_ring first. If tre_ring has space, buf_ring
256 	 * is guranteed to have space so we do not need to check both rings.
257 	 */
258 	struct mhi_ring buf_ring;
259 	struct mhi_ring tre_ring;
260 	u32 chan;
261 	u32 er_index;
262 	u32 intmod;
263 	enum mhi_ch_type type;
264 	enum dma_data_direction dir;
265 	struct db_cfg db_cfg;
266 	enum mhi_ch_ee_mask ee_mask;
267 	enum mhi_ch_state ch_state;
268 	enum mhi_ev_ccs ccs;
269 	struct mhi_device *mhi_dev;
270 	void (*xfer_cb)(struct mhi_device *mhi_dev, struct mhi_result *result);
271 	struct mutex mutex;
272 	struct completion completion;
273 	rwlock_t lock;
274 	struct list_head node;
275 	bool lpm_notify;
276 	bool configured;
277 	bool offload_ch;
278 	bool pre_alloc;
279 	bool wake_capable;
280 };
281 
282 /* Default MHI timeout */
283 #define MHI_TIMEOUT_MS (1000)
284 
285 /* debugfs related functions */
286 #ifdef CONFIG_MHI_BUS_DEBUG
287 void mhi_create_debugfs(struct mhi_controller *mhi_cntrl);
288 void mhi_destroy_debugfs(struct mhi_controller *mhi_cntrl);
289 void mhi_debugfs_init(void);
290 void mhi_debugfs_exit(void);
291 #else
292 static inline void mhi_create_debugfs(struct mhi_controller *mhi_cntrl)
293 {
294 }
295 
296 static inline void mhi_destroy_debugfs(struct mhi_controller *mhi_cntrl)
297 {
298 }
299 
300 static inline void mhi_debugfs_init(void)
301 {
302 }
303 
304 static inline void mhi_debugfs_exit(void)
305 {
306 }
307 #endif
308 
309 struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl);
310 
311 int mhi_destroy_device(struct device *dev, void *data);
312 void mhi_create_devices(struct mhi_controller *mhi_cntrl);
313 
314 int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl,
315 			 struct image_info **image_info, size_t alloc_size);
316 void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl,
317 			 struct image_info *image_info);
318 
319 /* Power management APIs */
320 enum mhi_pm_state __must_check mhi_tryset_pm_state(
321 					struct mhi_controller *mhi_cntrl,
322 					enum mhi_pm_state state);
323 const char *to_mhi_pm_state_str(u32 state);
324 int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl,
325 			       enum dev_st_transition state);
326 void mhi_pm_st_worker(struct work_struct *work);
327 void mhi_pm_sys_err_handler(struct mhi_controller *mhi_cntrl);
328 int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl);
329 int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl);
330 void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl);
331 int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl);
332 int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl);
333 int mhi_send_cmd(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
334 		 enum mhi_cmd_type cmd);
335 int mhi_download_amss_image(struct mhi_controller *mhi_cntrl);
336 static inline bool mhi_is_active(struct mhi_controller *mhi_cntrl)
337 {
338 	return (mhi_cntrl->dev_state >= MHI_STATE_M0 &&
339 		mhi_cntrl->dev_state <= MHI_STATE_M3_FAST);
340 }
341 
342 static inline void mhi_trigger_resume(struct mhi_controller *mhi_cntrl)
343 {
344 	pm_wakeup_event(&mhi_cntrl->mhi_dev->dev, 0);
345 	mhi_cntrl->runtime_get(mhi_cntrl);
346 	mhi_cntrl->runtime_put(mhi_cntrl);
347 }
348 
349 /* Register access methods */
350 void mhi_db_brstmode(struct mhi_controller *mhi_cntrl, struct db_cfg *db_cfg,
351 		     void __iomem *db_addr, dma_addr_t db_val);
352 void mhi_db_brstmode_disable(struct mhi_controller *mhi_cntrl,
353 			     struct db_cfg *db_mode, void __iomem *db_addr,
354 			     dma_addr_t db_val);
355 int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl,
356 			      void __iomem *base, u32 offset, u32 *out);
357 int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl,
358 				    void __iomem *base, u32 offset, u32 mask,
359 				    u32 *out);
360 int __must_check mhi_poll_reg_field(struct mhi_controller *mhi_cntrl,
361 				    void __iomem *base, u32 offset, u32 mask,
362 				    u32 val, u32 delayus, u32 timeout_ms);
363 void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base,
364 		   u32 offset, u32 val);
365 int __must_check mhi_write_reg_field(struct mhi_controller *mhi_cntrl,
366 				     void __iomem *base, u32 offset, u32 mask,
367 				     u32 val);
368 void mhi_ring_er_db(struct mhi_event *mhi_event);
369 void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr,
370 		  dma_addr_t db_val);
371 void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd);
372 void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl,
373 		      struct mhi_chan *mhi_chan);
374 
375 /* Initialization methods */
376 int mhi_init_mmio(struct mhi_controller *mhi_cntrl);
377 int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl);
378 void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl);
379 int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl);
380 void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl);
381 int mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
382 		      struct image_info *img_info);
383 void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl);
384 
385 /* Automatically allocate and queue inbound buffers */
386 #define MHI_CH_INBOUND_ALLOC_BUFS BIT(0)
387 int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
388 			struct mhi_chan *mhi_chan, unsigned int flags);
389 
390 int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,
391 		       struct mhi_chan *mhi_chan);
392 void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
393 			  struct mhi_chan *mhi_chan);
394 void mhi_reset_chan(struct mhi_controller *mhi_cntrl,
395 		    struct mhi_chan *mhi_chan);
396 
397 /* Event processing methods */
398 void mhi_ctrl_ev_task(unsigned long data);
399 void mhi_ev_task(unsigned long data);
400 int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
401 				struct mhi_event *mhi_event, u32 event_quota);
402 int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
403 			     struct mhi_event *mhi_event, u32 event_quota);
404 
405 /* ISR handlers */
406 irqreturn_t mhi_irq_handler(int irq_number, void *dev);
407 irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *dev);
408 irqreturn_t mhi_intvec_handler(int irq_number, void *dev);
409 
410 int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
411 		struct mhi_buf_info *info, enum mhi_flags flags);
412 int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl,
413 			 struct mhi_buf_info *buf_info);
414 int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl,
415 			  struct mhi_buf_info *buf_info);
416 void mhi_unmap_single_no_bb(struct mhi_controller *mhi_cntrl,
417 			    struct mhi_buf_info *buf_info);
418 void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl,
419 			     struct mhi_buf_info *buf_info);
420 
421 #endif /* _MHI_INT_H */
422