xref: /freebsd/sys/dev/ufshci/ufshci_private.h (revision 4ba91e076ee84101112d8296785098ae31dac35e)
1 /*-
2  * Copyright (c) 2025, Samsung Electronics Co., Ltd.
3  * Written by Jaeyoon Choi
4  *
5  * SPDX-License-Identifier: BSD-2-Clause
6  */
7 
8 #ifndef __UFSHCI_PRIVATE_H__
9 #define __UFSHCI_PRIVATE_H__
10 
11 #ifdef _KERNEL
12 #include <sys/types.h>
13 #else /* !_KERNEL */
14 #include <stdbool.h>
15 #include <stdint.h>
16 #endif /* _KERNEL */
17 
18 #include <sys/param.h>
19 #include <sys/systm.h>
20 #include <sys/bio.h>
21 #include <sys/bus.h>
22 #include <sys/counter.h>
23 #include <sys/kernel.h>
24 #include <sys/lock.h>
25 #include <sys/malloc.h>
26 #include <sys/memdesc.h>
27 #include <sys/module.h>
28 #include <sys/mutex.h>
29 #include <sys/rman.h>
30 #include <sys/taskqueue.h>
31 
32 #include <machine/bus.h>
33 
34 #include "ufshci.h"
35 
36 MALLOC_DECLARE(M_UFSHCI);
37 
38 #define UFSHCI_DEVICE_INIT_TIMEOUT_MS (2000) /* in milliseconds */
39 #define UFSHCI_UIC_CMD_TIMEOUT_MS     (500)  /* in milliseconds */
40 #define UFSHCI_DEFAULT_TIMEOUT_PERIOD (10)   /* in seconds */
41 #define UFSHCI_MIN_TIMEOUT_PERIOD     (5)    /* in seconds */
42 #define UFSHCI_MAX_TIMEOUT_PERIOD     (120)  /* in seconds */
43 
44 #define UFSHCI_DEFAULT_RETRY_COUNT    (4)
45 
46 #define UFSHCI_UTR_ENTRIES	      (32)
47 #define UFSHCI_UTRM_ENTRIES	      (8)
48 
49 struct ufshci_controller;
50 
51 struct ufshci_completion_poll_status {
52 	struct ufshci_completion cpl;
53 	int done;
54 	bool error;
55 };
56 
57 struct ufshci_request {
58 	struct ufshci_upiu request_upiu;
59 	size_t request_size;
60 	size_t response_size;
61 
62 	struct memdesc payload;
63 	enum ufshci_data_direction data_direction;
64 	ufshci_cb_fn_t cb_fn;
65 	void *cb_arg;
66 	bool is_admin;
67 	int32_t retries;
68 	bool payload_valid;
69 	bool timeout;
70 	bool spare[2]; /* Future use */
71 	STAILQ_ENTRY(ufshci_request) stailq;
72 };
73 
74 enum ufshci_slot_state {
75 	UFSHCI_SLOT_STATE_FREE = 0x0,
76 	UFSHCI_SLOT_STATE_RESERVED = 0x1,
77 	UFSHCI_SLOT_STATE_SCHEDULED = 0x2,
78 	UFSHCI_SLOT_STATE_TIMEOUT = 0x3,
79 	UFSHCI_SLOT_STATE_NEED_ERROR_HANDLING = 0x4,
80 };
81 
82 struct ufshci_tracker {
83 	struct ufshci_request *req;
84 	struct ufshci_req_queue *req_queue;
85 	struct ufshci_hw_queue *hwq;
86 	uint8_t slot_num;
87 	enum ufshci_slot_state slot_state;
88 	size_t response_size;
89 	sbintime_t deadline;
90 
91 	bus_dmamap_t payload_dma_map;
92 	uint64_t payload_addr;
93 
94 	struct ufshci_utp_cmd_desc *ucd;
95 	bus_addr_t ucd_bus_addr;
96 
97 	uint16_t prdt_off;
98 	uint16_t prdt_entry_cnt;
99 };
100 
101 enum ufshci_queue_mode {
102 	UFSHCI_Q_MODE_SDB = 0x00, /* Single Doorbell Mode*/
103 	UFSHCI_Q_MODE_MCQ = 0x01, /* Multi-Circular Queue Mode*/
104 };
105 
106 /*
107  * UFS uses slot-based Single Doorbell (SDB) mode for request submission by
108  * default and additionally supports Multi-Circular Queue (MCQ) in UFS 4.0. To
109  * minimize duplicated code between SDB and MCQ, mode dependent operations are
110  * extracted into ufshci_qops.
111  */
112 struct ufshci_qops {
113 	int (*construct)(struct ufshci_controller *ctrlr,
114 	    struct ufshci_req_queue *req_queue, uint32_t num_entries,
115 	    bool is_task_mgmt);
116 	void (*destroy)(struct ufshci_controller *ctrlr,
117 	    struct ufshci_req_queue *req_queue);
118 	struct ufshci_hw_queue *(*get_hw_queue)(
119 	    struct ufshci_req_queue *req_queue);
120 	int (*enable)(struct ufshci_controller *ctrlr,
121 	    struct ufshci_req_queue *req_queue);
122 	int (*reserve_slot)(struct ufshci_req_queue *req_queue,
123 	    struct ufshci_tracker **tr);
124 	int (*reserve_admin_slot)(struct ufshci_req_queue *req_queue,
125 	    struct ufshci_tracker **tr);
126 	void (*ring_doorbell)(struct ufshci_controller *ctrlr,
127 	    struct ufshci_tracker *tr);
128 	bool (*is_doorbell_cleared)(struct ufshci_controller *ctrlr,
129 	    uint8_t slot);
130 	void (*clear_cpl_ntf)(struct ufshci_controller *ctrlr,
131 	    struct ufshci_tracker *tr);
132 	bool (*process_cpl)(struct ufshci_req_queue *req_queue);
133 	int (*get_inflight_io)(struct ufshci_controller *ctrlr);
134 };
135 
136 #define UFSHCI_SDB_Q 0 /* Queue number for a single doorbell queue */
137 
138 /*
139  * Generic queue container used by both SDB (fixed 32-slot bitmap) and MCQ
140  * (ring buffer) modes. Fields are shared; some such as sq_head, sq_tail and
141  * cq_head are not used in SDB but used in MCQ.
142  */
143 struct ufshci_hw_queue {
144 	uint32_t id;
145 	int domain;
146 	int cpu;
147 
148 	union {
149 		struct ufshci_utp_xfer_req_desc *utrd;
150 		struct ufshci_utp_task_mgmt_req_desc *utmrd;
151 	};
152 
153 	bus_dma_tag_t dma_tag_queue;
154 	bus_dmamap_t queuemem_map;
155 	bus_addr_t req_queue_addr;
156 
157 	bus_addr_t *ucd_bus_addr;
158 
159 	uint32_t num_entries;
160 	uint32_t num_trackers;
161 
162 	/*
163 	 * A Request List using the single doorbell method uses a dedicated
164 	 * ufshci_tracker, one per slot.
165 	 */
166 	struct ufshci_tracker **act_tr;
167 
168 	uint32_t sq_head; /* MCQ mode */
169 	uint32_t sq_tail; /* MCQ mode */
170 	uint32_t cq_head; /* MCQ mode */
171 
172 	uint32_t phase;
173 	int64_t num_cmds;
174 	int64_t num_intr_handler_calls;
175 	int64_t num_retries;
176 	int64_t num_failures;
177 
178 	struct mtx_padalign qlock;
179 };
180 
181 struct ufshci_req_queue {
182 	struct ufshci_controller *ctrlr;
183 	int domain;
184 
185 	/*
186 	 *  queue_mode: active transfer scheme
187 	 *  UFSHCI_Q_MODE_SDB – legacy single‑doorbell list
188 	 *  UFSHCI_Q_MODE_MCQ – modern multi‑circular queue (UFSHCI 4.0+)
189 	 */
190 	enum ufshci_queue_mode queue_mode;
191 
192 	uint8_t num_q;
193 	struct ufshci_hw_queue *hwq;
194 
195 	struct ufshci_qops qops;
196 
197 	bool is_task_mgmt;
198 	uint32_t num_entries;
199 	uint32_t num_trackers;
200 
201 	/* Shared DMA resource */
202 	struct ufshci_utp_cmd_desc *ucd;
203 
204 	bus_dma_tag_t dma_tag_ucd;
205 	bus_dma_tag_t dma_tag_payload;
206 
207 	bus_dmamap_t ucdmem_map;
208 };
209 
210 struct ufshci_device {
211 	uint32_t max_lun_count;
212 
213 	struct ufshci_device_descriptor dev_desc;
214 	struct ufshci_geometry_descriptor geo_desc;
215 
216 	uint32_t unipro_version;
217 };
218 
219 /*
220  * One of these per allocated device.
221  */
222 struct ufshci_controller {
223 	device_t dev;
224 
225 	uint32_t quirks;
226 #define UFSHCI_QUIRK_IGNORE_UIC_POWER_MODE \
227 	1 /* QEMU does not support UIC POWER MODE */
228 #define UFSHCI_QUIRK_LONG_PEER_PA_TACTIVATE \
229 	2 /* Need an additional 200 ms of PA_TActivate */
230 #define UFSHCI_QUIRK_WAIT_AFTER_POWER_MODE_CHANGE \
231 	4 /* Need to wait 1250us after power mode change */
232 
233 	uint32_t ref_clk;
234 
235 	struct cam_sim *ufshci_sim;
236 	struct cam_path *ufshci_path;
237 
238 	struct mtx sc_mtx;
239 	uint32_t sc_unit;
240 	uint8_t sc_name[16];
241 
242 	struct ufshci_device ufs_dev;
243 
244 	bus_space_tag_t bus_tag;
245 	bus_space_handle_t bus_handle;
246 	int resource_id;
247 	struct resource *resource;
248 
249 	/* Currently, there is no UFSHCI that supports MSI, MSI-X.  */
250 	int msi_count;
251 
252 	/* Fields for tracking progress during controller initialization. */
253 	struct intr_config_hook config_hook;
254 
255 	/* For shared legacy interrupt. */
256 	int rid;
257 	struct resource *res;
258 	void *tag;
259 
260 	uint32_t major_version;
261 	uint32_t minor_version;
262 
263 	uint32_t num_io_queues;
264 	uint32_t max_hw_pend_io;
265 
266 	/* Maximum logical unit number */
267 	uint32_t max_lun_count;
268 
269 	/* Maximum i/o size in bytes */
270 	uint32_t max_xfer_size;
271 
272 	/* Controller capacity */
273 	uint32_t cap;
274 
275 	/* Page size and log2(page_size) - 12 that we're currently using */
276 	uint32_t page_size;
277 
278 	/* Timeout value on device initialization */
279 	uint32_t device_init_timeout_in_ms;
280 
281 	/* Timeout value on UIC command */
282 	uint32_t uic_cmd_timeout_in_ms;
283 
284 	/* UTMR/UTR queue timeout period in seconds */
285 	uint32_t timeout_period;
286 
287 	/* UTMR/UTR queue retry count */
288 	uint32_t retry_count;
289 
290 	/* UFS Host Controller Interface Registers */
291 	struct ufshci_registers *regs;
292 
293 	/* UFS Transport Protocol Layer (UTP) */
294 	struct ufshci_req_queue task_mgmt_req_queue;
295 	struct ufshci_req_queue transfer_req_queue;
296 	bool is_single_db_supported; /* 0 = supported */
297 	bool is_mcq_supported;	     /* 1 = supported */
298 
299 	/* UFS Interconnect Layer (UIC) */
300 	struct mtx uic_cmd_lock;
301 	uint32_t unipro_version;
302 	uint8_t hs_gear;
303 	uint32_t tx_lanes;
304 	uint32_t rx_lanes;
305 	uint32_t max_rx_hs_gear;
306 	uint32_t max_tx_lanes;
307 	uint32_t max_rx_lanes;
308 
309 	bool is_failed;
310 };
311 
312 #define ufshci_mmio_offsetof(reg) offsetof(struct ufshci_registers, reg)
313 
314 #define ufshci_mmio_read_4(sc, reg)                       \
315 	bus_space_read_4((sc)->bus_tag, (sc)->bus_handle, \
316 	    ufshci_mmio_offsetof(reg))
317 
318 #define ufshci_mmio_write_4(sc, reg, val)                  \
319 	bus_space_write_4((sc)->bus_tag, (sc)->bus_handle, \
320 	    ufshci_mmio_offsetof(reg), val)
321 
322 #define ufshci_printf(ctrlr, fmt, args...) \
323 	device_printf(ctrlr->dev, fmt, ##args)
324 
325 /* UFSHCI */
326 void ufshci_completion_poll_cb(void *arg, const struct ufshci_completion *cpl,
327     bool error);
328 
329 /* SIM */
330 int ufshci_sim_attach(struct ufshci_controller *ctrlr);
331 void ufshci_sim_detach(struct ufshci_controller *ctrlr);
332 
333 /* Controller */
334 int ufshci_ctrlr_construct(struct ufshci_controller *ctrlr, device_t dev);
335 void ufshci_ctrlr_destruct(struct ufshci_controller *ctrlr, device_t dev);
336 int ufshci_ctrlr_reset(struct ufshci_controller *ctrlr);
337 /* ctrlr defined as void * to allow use with config_intrhook. */
338 void ufshci_ctrlr_start_config_hook(void *arg);
339 void ufshci_ctrlr_poll(struct ufshci_controller *ctrlr);
340 
341 int ufshci_ctrlr_submit_task_mgmt_request(struct ufshci_controller *ctrlr,
342     struct ufshci_request *req);
343 int ufshci_ctrlr_submit_admin_request(struct ufshci_controller *ctrlr,
344     struct ufshci_request *req);
345 int ufshci_ctrlr_submit_io_request(struct ufshci_controller *ctrlr,
346     struct ufshci_request *req);
347 int ufshci_ctrlr_send_nop(struct ufshci_controller *ctrlr);
348 
349 void ufshci_reg_dump(struct ufshci_controller *ctrlr);
350 
351 /* Device */
352 int ufshci_dev_init(struct ufshci_controller *ctrlr);
353 int ufshci_dev_reset(struct ufshci_controller *ctrlr);
354 int ufshci_dev_init_reference_clock(struct ufshci_controller *ctrlr);
355 int ufshci_dev_init_unipro(struct ufshci_controller *ctrlr);
356 int ufshci_dev_init_uic_power_mode(struct ufshci_controller *ctrlr);
357 int ufshci_dev_init_ufs_power_mode(struct ufshci_controller *ctrlr);
358 int ufshci_dev_get_descriptor(struct ufshci_controller *ctrlr);
359 
360 /* Controller Command */
361 void ufshci_ctrlr_cmd_send_task_mgmt_request(struct ufshci_controller *ctrlr,
362     ufshci_cb_fn_t cb_fn, void *cb_arg, uint8_t function, uint8_t lun,
363     uint8_t task_tag, uint8_t iid);
364 void ufshci_ctrlr_cmd_send_nop(struct ufshci_controller *ctrlr,
365     ufshci_cb_fn_t cb_fn, void *cb_arg);
366 void ufshci_ctrlr_cmd_send_query_request(struct ufshci_controller *ctrlr,
367     ufshci_cb_fn_t cb_fn, void *cb_arg, struct ufshci_query_param param);
368 void ufshci_ctrlr_cmd_send_scsi_command(struct ufshci_controller *ctrlr,
369     ufshci_cb_fn_t cb_fn, void *cb_arg, uint8_t *cmd_ptr, uint8_t cmd_len,
370     uint32_t data_len, uint8_t lun, bool is_write);
371 
372 /* Request Queue */
373 bool ufshci_req_queue_process_completions(struct ufshci_req_queue *req_queue);
374 int ufshci_utmr_req_queue_construct(struct ufshci_controller *ctrlr);
375 int ufshci_utr_req_queue_construct(struct ufshci_controller *ctrlr);
376 void ufshci_utmr_req_queue_destroy(struct ufshci_controller *ctrlr);
377 void ufshci_utr_req_queue_destroy(struct ufshci_controller *ctrlr);
378 int ufshci_utmr_req_queue_enable(struct ufshci_controller *ctrlr);
379 int ufshci_utr_req_queue_enable(struct ufshci_controller *ctrlr);
380 void ufshci_req_queue_fail(struct ufshci_controller *ctrlr,
381     struct ufshci_hw_queue *hwq);
382 int ufshci_req_queue_submit_request(struct ufshci_req_queue *req_queue,
383     struct ufshci_request *req, bool is_admin);
384 void ufshci_req_queue_complete_tracker(struct ufshci_tracker *tr);
385 
386 /* Request Single Doorbell Queue */
387 int ufshci_req_sdb_construct(struct ufshci_controller *ctrlr,
388     struct ufshci_req_queue *req_queue, uint32_t num_entries,
389     bool is_task_mgmt);
390 void ufshci_req_sdb_destroy(struct ufshci_controller *ctrlr,
391     struct ufshci_req_queue *req_queue);
392 struct ufshci_hw_queue *ufshci_req_sdb_get_hw_queue(
393     struct ufshci_req_queue *req_queue);
394 int ufshci_req_sdb_enable(struct ufshci_controller *ctrlr,
395     struct ufshci_req_queue *req_queue);
396 int ufshci_req_sdb_reserve_slot(struct ufshci_req_queue *req_queue,
397     struct ufshci_tracker **tr);
398 void ufshci_req_sdb_utmr_ring_doorbell(struct ufshci_controller *ctrlr,
399     struct ufshci_tracker *tr);
400 void ufshci_req_sdb_utr_ring_doorbell(struct ufshci_controller *ctrlr,
401     struct ufshci_tracker *tr);
402 bool ufshci_req_sdb_utmr_is_doorbell_cleared(struct ufshci_controller *ctrlr,
403     uint8_t slot);
404 bool ufshci_req_sdb_utr_is_doorbell_cleared(struct ufshci_controller *ctrlr,
405     uint8_t slot);
406 void ufshci_req_sdb_utmr_clear_cpl_ntf(struct ufshci_controller *ctrlr,
407     struct ufshci_tracker *tr);
408 void ufshci_req_sdb_utr_clear_cpl_ntf(struct ufshci_controller *ctrlr,
409     struct ufshci_tracker *tr);
410 bool ufshci_req_sdb_process_cpl(struct ufshci_req_queue *req_queue);
411 int ufshci_req_sdb_get_inflight_io(struct ufshci_controller *ctrlr);
412 
413 /* UIC Command */
414 int ufshci_uic_power_mode_ready(struct ufshci_controller *ctrlr);
415 int ufshci_uic_cmd_ready(struct ufshci_controller *ctrlr);
416 int ufshci_uic_send_dme_link_startup(struct ufshci_controller *ctrlr);
417 int ufshci_uic_send_dme_get(struct ufshci_controller *ctrlr, uint16_t attribute,
418     uint32_t *return_value);
419 int ufshci_uic_send_dme_set(struct ufshci_controller *ctrlr, uint16_t attribute,
420     uint32_t value);
421 int ufshci_uic_send_dme_peer_get(struct ufshci_controller *ctrlr,
422     uint16_t attribute, uint32_t *return_value);
423 int ufshci_uic_send_dme_peer_set(struct ufshci_controller *ctrlr,
424     uint16_t attribute, uint32_t value);
425 int ufshci_uic_send_dme_endpoint_reset(struct ufshci_controller *ctrlr);
426 
427 /* SYSCTL */
428 void ufshci_sysctl_initialize_ctrlr(struct ufshci_controller *ctrlr);
429 
430 int ufshci_attach(device_t dev);
431 int ufshci_detach(device_t dev);
432 
433 /*
434  * Wait for a command to complete using the ufshci_completion_poll_cb. Used in
435  * limited contexts where the caller knows it's OK to block briefly while the
436  * command runs. The ISR will run the callback which will set status->done to
437  * true, usually within microseconds. If not, then after one second timeout
438  * handler should reset the controller and abort all outstanding requests
439  * including this polled one. If still not after ten seconds, then something is
440  * wrong with the driver, and panic is the only way to recover.
441  *
442  * Most commands using this interface aren't actual I/O to the drive's media so
443  * complete within a few microseconds. Adaptively spin for one tick to catch the
444  * vast majority of these without waiting for a tick plus scheduling delays.
445  * Since these are on startup, this drastically reduces startup time.
446  */
447 static __inline void
448 ufshci_completion_poll(struct ufshci_completion_poll_status *status)
449 {
450 	int timeout = ticks + 10 * hz;
451 	sbintime_t delta_t = SBT_1US;
452 
453 	while (!atomic_load_acq_int(&status->done)) {
454 		if (timeout - ticks < 0)
455 			panic(
456 			    "UFSHCI polled command failed to complete within 10s.");
457 		pause_sbt("ufshci_cpl", delta_t, 0, C_PREL(1));
458 		delta_t = min(SBT_1MS, delta_t * 3 / 2);
459 	}
460 }
461 
462 static __inline void
463 ufshci_single_map(void *arg, bus_dma_segment_t *seg, int nseg, int error)
464 {
465 	uint64_t *bus_addr = (uint64_t *)arg;
466 
467 	KASSERT(nseg == 1, ("number of segments (%d) is not 1", nseg));
468 	if (error != 0)
469 		printf("ufshci_single_map err %d\n", error);
470 	*bus_addr = seg[0].ds_addr;
471 }
472 
473 static __inline struct ufshci_request *
474 _ufshci_allocate_request(const int how, ufshci_cb_fn_t cb_fn, void *cb_arg)
475 {
476 	struct ufshci_request *req;
477 
478 	KASSERT(how == M_WAITOK || how == M_NOWAIT,
479 	    ("nvme_allocate_request: invalid how %d", how));
480 
481 	req = malloc(sizeof(*req), M_UFSHCI, how | M_ZERO);
482 	if (req != NULL) {
483 		req->cb_fn = cb_fn;
484 		req->cb_arg = cb_arg;
485 		req->timeout = true;
486 	}
487 	return (req);
488 }
489 
490 static __inline struct ufshci_request *
491 ufshci_allocate_request_vaddr(void *payload, uint32_t payload_size,
492     const int how, ufshci_cb_fn_t cb_fn, void *cb_arg)
493 {
494 	struct ufshci_request *req;
495 
496 	req = _ufshci_allocate_request(how, cb_fn, cb_arg);
497 	if (req != NULL) {
498 		if (payload_size) {
499 			req->payload = memdesc_vaddr(payload, payload_size);
500 			req->payload_valid = true;
501 		}
502 	}
503 	return (req);
504 }
505 
506 static __inline struct ufshci_request *
507 ufshci_allocate_request_bio(struct bio *bio, const int how,
508     ufshci_cb_fn_t cb_fn, void *cb_arg)
509 {
510 	struct ufshci_request *req;
511 
512 	req = _ufshci_allocate_request(how, cb_fn, cb_arg);
513 	if (req != NULL) {
514 		req->payload = memdesc_bio(bio);
515 		req->payload_valid = true;
516 	}
517 	return (req);
518 }
519 
520 #define ufshci_free_request(req) free(req, M_UFSHCI)
521 
522 void ufshci_ctrlr_shared_handler(void *arg);
523 
524 #endif /* __UFSHCI_PRIVATE_H__ */
525