xref: /freebsd/sys/dev/ufshci/ufshci_private.h (revision 9d0d55e398bfea1ebc2ef18e8d83edcf7e55f1b2)
1 /*-
2  * Copyright (c) 2025, Samsung Electronics Co., Ltd.
3  * Written by Jaeyoon Choi
4  *
5  * SPDX-License-Identifier: BSD-2-Clause
6  */
7 
8 #ifndef __UFSHCI_PRIVATE_H__
9 #define __UFSHCI_PRIVATE_H__
10 
11 #ifdef _KERNEL
12 #include <sys/types.h>
13 #else /* !_KERNEL */
14 #include <stdbool.h>
15 #include <stdint.h>
16 #endif /* _KERNEL */
17 
18 #include <sys/param.h>
19 #include <sys/systm.h>
20 #include <sys/bio.h>
21 #include <sys/bus.h>
22 #include <sys/counter.h>
23 #include <sys/kernel.h>
24 #include <sys/lock.h>
25 #include <sys/malloc.h>
26 #include <sys/memdesc.h>
27 #include <sys/module.h>
28 #include <sys/mutex.h>
29 #include <sys/rman.h>
30 #include <sys/taskqueue.h>
31 
32 #include <machine/bus.h>
33 
34 #include "ufshci.h"
35 
36 MALLOC_DECLARE(M_UFSHCI);
37 
38 #define UFSHCI_DEVICE_INIT_TIMEOUT_MS (2000) /* in milliseconds */
39 #define UFSHCI_UIC_CMD_TIMEOUT_MS     (500)  /* in milliseconds */
40 #define UFSHCI_DEFAULT_TIMEOUT_PERIOD (10)   /* in seconds */
41 #define UFSHCI_MIN_TIMEOUT_PERIOD     (5)    /* in seconds */
42 #define UFSHCI_MAX_TIMEOUT_PERIOD     (120)  /* in seconds */
43 
44 #define UFSHCI_DEFAULT_RETRY_COUNT    (4)
45 
46 #define UFSHCI_UTR_ENTRIES	      (32)
47 #define UFSHCI_UTRM_ENTRIES	      (8)
48 
49 struct ufshci_controller;
50 
51 struct ufshci_completion_poll_status {
52 	struct ufshci_completion cpl;
53 	int done;
54 	bool error;
55 };
56 
57 struct ufshci_request {
58 	struct ufshci_upiu request_upiu;
59 	size_t request_size;
60 	size_t response_size;
61 
62 	struct memdesc payload;
63 	enum ufshci_data_direction data_direction;
64 	ufshci_cb_fn_t cb_fn;
65 	void *cb_arg;
66 	bool is_admin;
67 	int32_t retries;
68 	bool payload_valid;
69 	bool timeout;
70 	bool spare[2]; /* Future use */
71 	STAILQ_ENTRY(ufshci_request) stailq;
72 };
73 
74 enum ufshci_slot_state {
75 	UFSHCI_SLOT_STATE_FREE = 0x0,
76 	UFSHCI_SLOT_STATE_RESERVED = 0x1,
77 	UFSHCI_SLOT_STATE_SCHEDULED = 0x2,
78 	UFSHCI_SLOT_STATE_TIMEOUT = 0x3,
79 	UFSHCI_SLOT_STATE_NEED_ERROR_HANDLING = 0x4,
80 };
81 
82 struct ufshci_tracker {
83 	struct ufshci_request *req;
84 	struct ufshci_req_queue *req_queue;
85 	struct ufshci_hw_queue *hwq;
86 	uint8_t slot_num;
87 	enum ufshci_slot_state slot_state;
88 	size_t response_size;
89 	sbintime_t deadline;
90 
91 	bus_dmamap_t payload_dma_map;
92 	uint64_t payload_addr;
93 
94 	struct ufshci_utp_cmd_desc *ucd;
95 	bus_addr_t ucd_bus_addr;
96 
97 	uint16_t prdt_off;
98 	uint16_t prdt_entry_cnt;
99 };
100 
101 enum ufshci_queue_mode {
102 	UFSHCI_Q_MODE_SDB = 0x00, /* Single Doorbell Mode*/
103 	UFSHCI_Q_MODE_MCQ = 0x01, /* Multi-Circular Queue Mode*/
104 };
105 
106 /*
107  * UFS uses slot-based Single Doorbell (SDB) mode for request submission by
108  * default and additionally supports Multi-Circular Queue (MCQ) in UFS 4.0. To
109  * minimize duplicated code between SDB and MCQ, mode dependent operations are
110  * extracted into ufshci_qops.
111  */
112 struct ufshci_qops {
113 	int (*construct)(struct ufshci_controller *ctrlr,
114 	    struct ufshci_req_queue *req_queue, uint32_t num_entries,
115 	    bool is_task_mgmt);
116 	void (*destroy)(struct ufshci_controller *ctrlr,
117 	    struct ufshci_req_queue *req_queue);
118 	struct ufshci_hw_queue *(*get_hw_queue)(
119 	    struct ufshci_req_queue *req_queue);
120 	int (*enable)(struct ufshci_controller *ctrlr,
121 	    struct ufshci_req_queue *req_queue);
122 	int (*reserve_slot)(struct ufshci_req_queue *req_queue,
123 	    struct ufshci_tracker **tr);
124 	int (*reserve_admin_slot)(struct ufshci_req_queue *req_queue,
125 	    struct ufshci_tracker **tr);
126 	void (*ring_doorbell)(struct ufshci_controller *ctrlr,
127 	    struct ufshci_tracker *tr);
128 	void (*clear_cpl_ntf)(struct ufshci_controller *ctrlr,
129 	    struct ufshci_tracker *tr);
130 	bool (*process_cpl)(struct ufshci_req_queue *req_queue);
131 	int (*get_inflight_io)(struct ufshci_controller *ctrlr);
132 };
133 
134 #define UFSHCI_SDB_Q 0 /* Queue number for a single doorbell queue */
135 
136 /*
137  * Generic queue container used by both SDB (fixed 32-slot bitmap) and MCQ
138  * (ring buffer) modes. Fields are shared; some such as sq_head, sq_tail and
139  * cq_head are not used in SDB but used in MCQ.
140  */
141 struct ufshci_hw_queue {
142 	uint32_t id;
143 	int domain;
144 	int cpu;
145 
146 	struct ufshci_utp_xfer_req_desc *utrd;
147 
148 	bus_dma_tag_t dma_tag_queue;
149 	bus_dmamap_t queuemem_map;
150 	bus_addr_t req_queue_addr;
151 
152 	uint32_t num_entries;
153 	uint32_t num_trackers;
154 
155 	/*
156 	 * A Request List using the single doorbell method uses a dedicated
157 	 * ufshci_tracker, one per slot.
158 	 */
159 	struct ufshci_tracker **act_tr;
160 
161 	uint32_t sq_head; /* MCQ mode */
162 	uint32_t sq_tail; /* MCQ mode */
163 	uint32_t cq_head; /* MCQ mode */
164 
165 	uint32_t phase;
166 	int64_t num_cmds;
167 	int64_t num_intr_handler_calls;
168 	int64_t num_retries;
169 	int64_t num_failures;
170 
171 	struct mtx_padalign qlock;
172 };
173 
174 struct ufshci_req_queue {
175 	struct ufshci_controller *ctrlr;
176 	int domain;
177 
178 	/*
179 	 *  queue_mode: active transfer scheme
180 	 *  UFSHCI_Q_MODE_SDB – legacy single‑doorbell list
181 	 *  UFSHCI_Q_MODE_MCQ – modern multi‑circular queue (UFSHCI 4.0+)
182 	 */
183 	enum ufshci_queue_mode queue_mode;
184 
185 	uint8_t num_q;
186 	struct ufshci_hw_queue *hwq;
187 
188 	struct ufshci_qops qops;
189 
190 	bool is_task_mgmt;
191 	uint32_t num_entries;
192 	uint32_t num_trackers;
193 
194 	/* Shared DMA resource */
195 	struct ufshci_utp_cmd_desc *ucd;
196 
197 	bus_dma_tag_t dma_tag_ucd;
198 	bus_dma_tag_t dma_tag_payload;
199 
200 	bus_dmamap_t ucdmem_map;
201 
202 	bus_addr_t ucd_addr;
203 };
204 
205 struct ufshci_device {
206 	uint32_t max_lun_count;
207 
208 	struct ufshci_device_descriptor dev_desc;
209 	struct ufshci_geometry_descriptor geo_desc;
210 
211 	uint32_t unipro_version;
212 };
213 
214 /*
215  * One of these per allocated device.
216  */
217 struct ufshci_controller {
218 	device_t dev;
219 
220 	uint32_t quirks;
221 #define UFSHCI_QUIRK_IGNORE_UIC_POWER_MODE \
222 	1 /* QEMU does not support UIC POWER MODE */
223 #define UFSHCI_QUIRK_LONG_PEER_PA_TACTIVATE \
224 	2 /* Need an additional 200 ms of PA_TActivate */
225 #define UFSHCI_QUIRK_WAIT_AFTER_POWER_MODE_CHANGE \
226 	4 /* Need to wait 1250us after power mode change */
227 
228 	uint32_t ref_clk;
229 
230 	struct cam_sim *ufshci_sim;
231 	struct cam_path *ufshci_path;
232 
233 	struct mtx sc_mtx;
234 	uint32_t sc_unit;
235 	uint8_t sc_name[16];
236 
237 	struct ufshci_device ufs_dev;
238 
239 	bus_space_tag_t bus_tag;
240 	bus_space_handle_t bus_handle;
241 	int resource_id;
242 	struct resource *resource;
243 
244 	/* Currently, there is no UFSHCI that supports MSI, MSI-X.  */
245 	int msi_count;
246 
247 	/* Fields for tracking progress during controller initialization. */
248 	struct intr_config_hook config_hook;
249 
250 	/* For shared legacy interrupt. */
251 	int rid;
252 	struct resource *res;
253 	void *tag;
254 
255 	uint32_t major_version;
256 	uint32_t minor_version;
257 
258 	uint32_t num_io_queues;
259 	uint32_t max_hw_pend_io;
260 
261 	/* Maximum logical unit number */
262 	uint32_t max_lun_count;
263 
264 	/* Maximum i/o size in bytes */
265 	uint32_t max_xfer_size;
266 
267 	/* Controller capacity */
268 	uint32_t cap;
269 
270 	/* Page size and log2(page_size) - 12 that we're currently using */
271 	uint32_t page_size;
272 
273 	/* Timeout value on device initialization */
274 	uint32_t device_init_timeout_in_ms;
275 
276 	/* Timeout value on UIC command */
277 	uint32_t uic_cmd_timeout_in_ms;
278 
279 	/* UTMR/UTR queue timeout period in seconds */
280 	uint32_t timeout_period;
281 
282 	/* UTMR/UTR queue retry count */
283 	uint32_t retry_count;
284 
285 	/* UFS Host Controller Interface Registers */
286 	struct ufshci_registers *regs;
287 
288 	/* UFS Transport Protocol Layer (UTP) */
289 	struct ufshci_req_queue task_mgmt_req_queue;
290 	struct ufshci_req_queue transfer_req_queue;
291 	bool is_single_db_supported; /* 0 = supported */
292 	bool is_mcq_supported;	     /* 1 = supported */
293 
294 	/* UFS Interconnect Layer (UIC) */
295 	struct mtx uic_cmd_lock;
296 	uint32_t unipro_version;
297 	uint8_t hs_gear;
298 	uint32_t tx_lanes;
299 	uint32_t rx_lanes;
300 	uint32_t max_rx_hs_gear;
301 	uint32_t max_tx_lanes;
302 	uint32_t max_rx_lanes;
303 
304 	bool is_failed;
305 };
306 
307 #define ufshci_mmio_offsetof(reg) offsetof(struct ufshci_registers, reg)
308 
309 #define ufshci_mmio_read_4(sc, reg)                       \
310 	bus_space_read_4((sc)->bus_tag, (sc)->bus_handle, \
311 	    ufshci_mmio_offsetof(reg))
312 
313 #define ufshci_mmio_write_4(sc, reg, val)                  \
314 	bus_space_write_4((sc)->bus_tag, (sc)->bus_handle, \
315 	    ufshci_mmio_offsetof(reg), val)
316 
317 #define ufshci_printf(ctrlr, fmt, args...) \
318 	device_printf(ctrlr->dev, fmt, ##args)
319 
320 /* UFSHCI */
321 void ufshci_completion_poll_cb(void *arg, const struct ufshci_completion *cpl,
322     bool error);
323 
324 /* SIM */
325 int ufshci_sim_attach(struct ufshci_controller *ctrlr);
326 void ufshci_sim_detach(struct ufshci_controller *ctrlr);
327 
328 /* Controller */
329 int ufshci_ctrlr_construct(struct ufshci_controller *ctrlr, device_t dev);
330 void ufshci_ctrlr_destruct(struct ufshci_controller *ctrlr, device_t dev);
331 int ufshci_ctrlr_reset(struct ufshci_controller *ctrlr);
332 /* ctrlr defined as void * to allow use with config_intrhook. */
333 void ufshci_ctrlr_start_config_hook(void *arg);
334 void ufshci_ctrlr_poll(struct ufshci_controller *ctrlr);
335 
336 int ufshci_ctrlr_submit_admin_request(struct ufshci_controller *ctrlr,
337     struct ufshci_request *req);
338 int ufshci_ctrlr_submit_io_request(struct ufshci_controller *ctrlr,
339     struct ufshci_request *req);
340 int ufshci_ctrlr_send_nop(struct ufshci_controller *ctrlr);
341 
342 void ufshci_reg_dump(struct ufshci_controller *ctrlr);
343 
344 /* Device */
345 int ufshci_dev_init(struct ufshci_controller *ctrlr);
346 int ufshci_dev_reset(struct ufshci_controller *ctrlr);
347 int ufshci_dev_init_reference_clock(struct ufshci_controller *ctrlr);
348 int ufshci_dev_init_unipro(struct ufshci_controller *ctrlr);
349 int ufshci_dev_init_uic_power_mode(struct ufshci_controller *ctrlr);
350 int ufshci_dev_init_ufs_power_mode(struct ufshci_controller *ctrlr);
351 int ufshci_dev_get_descriptor(struct ufshci_controller *ctrlr);
352 
353 /* Controller Command */
354 void ufshci_ctrlr_cmd_send_nop(struct ufshci_controller *ctrlr,
355     ufshci_cb_fn_t cb_fn, void *cb_arg);
356 void ufshci_ctrlr_cmd_send_query_request(struct ufshci_controller *ctrlr,
357     ufshci_cb_fn_t cb_fn, void *cb_arg, struct ufshci_query_param param);
358 void ufshci_ctrlr_cmd_send_scsi_command(struct ufshci_controller *ctrlr,
359     ufshci_cb_fn_t cb_fn, void *cb_arg, uint8_t *cmd_ptr, uint8_t cmd_len,
360     uint32_t data_len, uint8_t lun, bool is_write);
361 
362 /* Request Queue */
363 bool ufshci_req_queue_process_completions(struct ufshci_req_queue *req_queue);
364 int ufshci_utm_req_queue_construct(struct ufshci_controller *ctrlr);
365 int ufshci_ut_req_queue_construct(struct ufshci_controller *ctrlr);
366 void ufshci_utm_req_queue_destroy(struct ufshci_controller *ctrlr);
367 void ufshci_ut_req_queue_destroy(struct ufshci_controller *ctrlr);
368 int ufshci_utm_req_queue_enable(struct ufshci_controller *ctrlr);
369 int ufshci_ut_req_queue_enable(struct ufshci_controller *ctrlr);
370 void ufshci_req_queue_fail(struct ufshci_controller *ctrlr,
371     struct ufshci_hw_queue *hwq);
372 int ufshci_req_queue_submit_request(struct ufshci_req_queue *req_queue,
373     struct ufshci_request *req, bool is_admin);
374 void ufshci_req_queue_complete_tracker(struct ufshci_tracker *tr);
375 
376 /* Request Single Doorbell Queue */
377 int ufshci_req_sdb_construct(struct ufshci_controller *ctrlr,
378     struct ufshci_req_queue *req_queue, uint32_t num_entries,
379     bool is_task_mgmt);
380 void ufshci_req_sdb_destroy(struct ufshci_controller *ctrlr,
381     struct ufshci_req_queue *req_queue);
382 struct ufshci_hw_queue *ufshci_req_sdb_get_hw_queue(
383     struct ufshci_req_queue *req_queue);
384 int ufshci_req_sdb_enable(struct ufshci_controller *ctrlr,
385     struct ufshci_req_queue *req_queue);
386 int ufshci_req_sdb_reserve_slot(struct ufshci_req_queue *req_queue,
387     struct ufshci_tracker **tr);
388 void ufshci_req_sdb_ring_doorbell(struct ufshci_controller *ctrlr,
389     struct ufshci_tracker *tr);
390 void ufshci_req_sdb_clear_cpl_ntf(struct ufshci_controller *ctrlr,
391     struct ufshci_tracker *tr);
392 bool ufshci_req_sdb_process_cpl(struct ufshci_req_queue *req_queue);
393 int ufshci_req_sdb_get_inflight_io(struct ufshci_controller *ctrlr);
394 
395 /* UIC Command */
396 int ufshci_uic_power_mode_ready(struct ufshci_controller *ctrlr);
397 int ufshci_uic_cmd_ready(struct ufshci_controller *ctrlr);
398 int ufshci_uic_send_dme_link_startup(struct ufshci_controller *ctrlr);
399 int ufshci_uic_send_dme_get(struct ufshci_controller *ctrlr, uint16_t attribute,
400     uint32_t *return_value);
401 int ufshci_uic_send_dme_set(struct ufshci_controller *ctrlr, uint16_t attribute,
402     uint32_t value);
403 int ufshci_uic_send_dme_peer_get(struct ufshci_controller *ctrlr,
404     uint16_t attribute, uint32_t *return_value);
405 int ufshci_uic_send_dme_peer_set(struct ufshci_controller *ctrlr,
406     uint16_t attribute, uint32_t value);
407 int ufshci_uic_send_dme_endpoint_reset(struct ufshci_controller *ctrlr);
408 
409 /* SYSCTL */
410 void ufshci_sysctl_initialize_ctrlr(struct ufshci_controller *ctrlr);
411 
412 int ufshci_attach(device_t dev);
413 int ufshci_detach(device_t dev);
414 
415 /*
416  * Wait for a command to complete using the ufshci_completion_poll_cb. Used in
417  * limited contexts where the caller knows it's OK to block briefly while the
418  * command runs. The ISR will run the callback which will set status->done to
419  * true, usually within microseconds. If not, then after one second timeout
420  * handler should reset the controller and abort all outstanding requests
421  * including this polled one. If still not after ten seconds, then something is
422  * wrong with the driver, and panic is the only way to recover.
423  *
424  * Most commands using this interface aren't actual I/O to the drive's media so
425  * complete within a few microseconds. Adaptively spin for one tick to catch the
426  * vast majority of these without waiting for a tick plus scheduling delays.
427  * Since these are on startup, this drastically reduces startup time.
428  */
429 static __inline void
ufshci_completion_poll(struct ufshci_completion_poll_status * status)430 ufshci_completion_poll(struct ufshci_completion_poll_status *status)
431 {
432 	int timeout = ticks + 10 * hz;
433 	sbintime_t delta_t = SBT_1US;
434 
435 	while (!atomic_load_acq_int(&status->done)) {
436 		if (timeout - ticks < 0)
437 			panic(
438 			    "UFSHCI polled command failed to complete within 10s.");
439 		pause_sbt("ufshci_cpl", delta_t, 0, C_PREL(1));
440 		delta_t = min(SBT_1MS, delta_t * 3 / 2);
441 	}
442 }
443 
444 static __inline void
ufshci_single_map(void * arg,bus_dma_segment_t * seg,int nseg,int error)445 ufshci_single_map(void *arg, bus_dma_segment_t *seg, int nseg, int error)
446 {
447 	uint64_t *bus_addr = (uint64_t *)arg;
448 
449 	KASSERT(nseg == 1, ("number of segments (%d) is not 1", nseg));
450 	if (error != 0)
451 		printf("ufshci_single_map err %d\n", error);
452 	*bus_addr = seg[0].ds_addr;
453 }
454 
455 static __inline struct ufshci_request *
_ufshci_allocate_request(const int how,ufshci_cb_fn_t cb_fn,void * cb_arg)456 _ufshci_allocate_request(const int how, ufshci_cb_fn_t cb_fn, void *cb_arg)
457 {
458 	struct ufshci_request *req;
459 
460 	KASSERT(how == M_WAITOK || how == M_NOWAIT,
461 	    ("nvme_allocate_request: invalid how %d", how));
462 
463 	req = malloc(sizeof(*req), M_UFSHCI, how | M_ZERO);
464 	if (req != NULL) {
465 		req->cb_fn = cb_fn;
466 		req->cb_arg = cb_arg;
467 		req->timeout = true;
468 	}
469 	return (req);
470 }
471 
472 static __inline struct ufshci_request *
ufshci_allocate_request_vaddr(void * payload,uint32_t payload_size,const int how,ufshci_cb_fn_t cb_fn,void * cb_arg)473 ufshci_allocate_request_vaddr(void *payload, uint32_t payload_size,
474     const int how, ufshci_cb_fn_t cb_fn, void *cb_arg)
475 {
476 	struct ufshci_request *req;
477 
478 	req = _ufshci_allocate_request(how, cb_fn, cb_arg);
479 	if (req != NULL) {
480 		if (payload_size) {
481 			req->payload = memdesc_vaddr(payload, payload_size);
482 			req->payload_valid = true;
483 		}
484 	}
485 	return (req);
486 }
487 
488 static __inline struct ufshci_request *
ufshci_allocate_request_bio(struct bio * bio,const int how,ufshci_cb_fn_t cb_fn,void * cb_arg)489 ufshci_allocate_request_bio(struct bio *bio, const int how,
490     ufshci_cb_fn_t cb_fn, void *cb_arg)
491 {
492 	struct ufshci_request *req;
493 
494 	req = _ufshci_allocate_request(how, cb_fn, cb_arg);
495 	if (req != NULL) {
496 		req->payload = memdesc_bio(bio);
497 		req->payload_valid = true;
498 	}
499 	return (req);
500 }
501 
502 #define ufshci_free_request(req) free(req, M_UFSHCI)
503 
504 void ufshci_ctrlr_shared_handler(void *arg);
505 
506 #endif /* __UFSHCI_PRIVATE_H__ */
507