1 /*-
2 * Copyright (c) 2025, Samsung Electronics Co., Ltd.
3 * Written by Jaeyoon Choi
4 *
5 * SPDX-License-Identifier: BSD-2-Clause
6 */
7
8 #ifndef __UFSHCI_PRIVATE_H__
9 #define __UFSHCI_PRIVATE_H__
10
11 #ifdef _KERNEL
12 #include <sys/types.h>
13 #else /* !_KERNEL */
14 #include <stdbool.h>
15 #include <stdint.h>
16 #endif /* _KERNEL */
17
18 #include <sys/param.h>
19 #include <sys/systm.h>
20 #include <sys/bio.h>
21 #include <sys/bus.h>
22 #include <sys/counter.h>
23 #include <sys/kernel.h>
24 #include <sys/lock.h>
25 #include <sys/malloc.h>
26 #include <sys/memdesc.h>
27 #include <sys/module.h>
28 #include <sys/mutex.h>
29 #include <sys/rman.h>
30 #include <sys/taskqueue.h>
31
32 #include <machine/bus.h>
33
34 #include "ufshci.h"
35
36 MALLOC_DECLARE(M_UFSHCI);
37
38 #define UFSHCI_DEVICE_INIT_TIMEOUT_MS (2000) /* in milliseconds */
39 #define UFSHCI_UIC_CMD_TIMEOUT_MS (500) /* in milliseconds */
40 #define UFSHCI_DEFAULT_TIMEOUT_PERIOD (10) /* in seconds */
41 #define UFSHCI_MIN_TIMEOUT_PERIOD (5) /* in seconds */
42 #define UFSHCI_MAX_TIMEOUT_PERIOD (120) /* in seconds */
43
44 #define UFSHCI_DEFAULT_RETRY_COUNT (4)
45
46 #define UFSHCI_UTR_ENTRIES (32)
47 #define UFSHCI_UTRM_ENTRIES (8)
48
49 #define UFSHCI_SECTOR_SIZE (512)
50
51 struct ufshci_controller;
52
53 struct ufshci_completion_poll_status {
54 struct ufshci_completion cpl;
55 int done;
56 bool error;
57 };
58
59 struct ufshci_request {
60 struct ufshci_upiu request_upiu;
61 size_t request_size;
62 size_t response_size;
63
64 struct memdesc payload;
65 enum ufshci_data_direction data_direction;
66 ufshci_cb_fn_t cb_fn;
67 void *cb_arg;
68 bool is_admin;
69 int32_t retries;
70 bool payload_valid;
71 bool timeout;
72 bool spare[2]; /* Future use */
73 STAILQ_ENTRY(ufshci_request) stailq;
74 };
75
76 enum ufshci_slot_state {
77 UFSHCI_SLOT_STATE_FREE = 0x0,
78 UFSHCI_SLOT_STATE_RESERVED = 0x1,
79 UFSHCI_SLOT_STATE_SCHEDULED = 0x2,
80 UFSHCI_SLOT_STATE_TIMEOUT = 0x3,
81 UFSHCI_SLOT_STATE_NEED_ERROR_HANDLING = 0x4,
82 };
83
84 struct ufshci_tracker {
85 struct ufshci_request *req;
86 struct ufshci_req_queue *req_queue;
87 struct ufshci_hw_queue *hwq;
88 uint8_t slot_num;
89 enum ufshci_slot_state slot_state;
90 size_t response_size;
91 sbintime_t deadline;
92
93 bus_dmamap_t payload_dma_map;
94 uint64_t payload_addr;
95
96 struct ufshci_utp_cmd_desc *ucd;
97 bus_addr_t ucd_bus_addr;
98
99 uint16_t prdt_off;
100 uint16_t prdt_entry_cnt;
101 };
102
103 enum ufshci_queue_mode {
104 UFSHCI_Q_MODE_SDB = 0x00, /* Single Doorbell Mode*/
105 UFSHCI_Q_MODE_MCQ = 0x01, /* Multi-Circular Queue Mode*/
106 };
107
108 /*
109 * UFS uses slot-based Single Doorbell (SDB) mode for request submission by
110 * default and additionally supports Multi-Circular Queue (MCQ) in UFS 4.0. To
111 * minimize duplicated code between SDB and MCQ, mode dependent operations are
112 * extracted into ufshci_qops.
113 */
114 struct ufshci_qops {
115 int (*construct)(struct ufshci_controller *ctrlr,
116 struct ufshci_req_queue *req_queue, uint32_t num_entries,
117 bool is_task_mgmt);
118 void (*destroy)(struct ufshci_controller *ctrlr,
119 struct ufshci_req_queue *req_queue);
120 struct ufshci_hw_queue *(*get_hw_queue)(
121 struct ufshci_req_queue *req_queue);
122 int (*enable)(struct ufshci_controller *ctrlr,
123 struct ufshci_req_queue *req_queue);
124 int (*reserve_slot)(struct ufshci_req_queue *req_queue,
125 struct ufshci_tracker **tr);
126 int (*reserve_admin_slot)(struct ufshci_req_queue *req_queue,
127 struct ufshci_tracker **tr);
128 void (*ring_doorbell)(struct ufshci_controller *ctrlr,
129 struct ufshci_tracker *tr);
130 bool (*is_doorbell_cleared)(struct ufshci_controller *ctrlr,
131 uint8_t slot);
132 void (*clear_cpl_ntf)(struct ufshci_controller *ctrlr,
133 struct ufshci_tracker *tr);
134 bool (*process_cpl)(struct ufshci_req_queue *req_queue);
135 int (*get_inflight_io)(struct ufshci_controller *ctrlr);
136 };
137
138 #define UFSHCI_SDB_Q 0 /* Queue number for a single doorbell queue */
139
140 /*
141 * Generic queue container used by both SDB (fixed 32-slot bitmap) and MCQ
142 * (ring buffer) modes. Fields are shared; some such as sq_head, sq_tail and
143 * cq_head are not used in SDB but used in MCQ.
144 */
145 struct ufshci_hw_queue {
146 uint32_t id;
147 int domain;
148 int cpu;
149
150 union {
151 struct ufshci_utp_xfer_req_desc *utrd;
152 struct ufshci_utp_task_mgmt_req_desc *utmrd;
153 };
154
155 bus_dma_tag_t dma_tag_queue;
156 bus_dmamap_t queuemem_map;
157 bus_addr_t req_queue_addr;
158
159 bus_addr_t *ucd_bus_addr;
160
161 uint32_t num_entries;
162 uint32_t num_trackers;
163
164 /*
165 * A Request List using the single doorbell method uses a dedicated
166 * ufshci_tracker, one per slot.
167 */
168 struct ufshci_tracker **act_tr;
169
170 uint32_t sq_head; /* MCQ mode */
171 uint32_t sq_tail; /* MCQ mode */
172 uint32_t cq_head; /* MCQ mode */
173
174 uint32_t phase;
175 int64_t num_cmds;
176 int64_t num_intr_handler_calls;
177 int64_t num_retries;
178 int64_t num_failures;
179
180 struct mtx_padalign qlock;
181 };
182
183 struct ufshci_req_queue {
184 struct ufshci_controller *ctrlr;
185 int domain;
186
187 /*
188 * queue_mode: active transfer scheme
189 * UFSHCI_Q_MODE_SDB – legacy single‑doorbell list
190 * UFSHCI_Q_MODE_MCQ – modern multi‑circular queue (UFSHCI 4.0+)
191 */
192 enum ufshci_queue_mode queue_mode;
193
194 uint8_t num_q;
195 struct ufshci_hw_queue *hwq;
196
197 struct ufshci_qops qops;
198
199 bool is_task_mgmt;
200 uint32_t num_entries;
201 uint32_t num_trackers;
202
203 /* Shared DMA resource */
204 struct ufshci_utp_cmd_desc *ucd;
205
206 bus_dma_tag_t dma_tag_ucd;
207 bus_dma_tag_t dma_tag_payload;
208
209 bus_dmamap_t ucdmem_map;
210 };
211
212 struct ufshci_device {
213 uint32_t max_lun_count;
214
215 struct ufshci_device_descriptor dev_desc;
216 struct ufshci_geometry_descriptor geo_desc;
217
218 uint32_t unipro_version;
219
220 /* WriteBooster */
221 bool is_wb_enabled;
222 bool is_wb_flush_enabled;
223 uint32_t wb_buffer_type;
224 uint32_t wb_buffer_size_mb;
225 uint32_t wb_user_space_config_option;
226 uint8_t wb_dedicated_lu;
227 uint32_t write_booster_flush_threshold;
228 };
229
230 /*
231 * One of these per allocated device.
232 */
233 struct ufshci_controller {
234 device_t dev;
235
236 uint32_t quirks;
237 #define UFSHCI_QUIRK_IGNORE_UIC_POWER_MODE \
238 1 /* QEMU does not support UIC POWER MODE */
239 #define UFSHCI_QUIRK_LONG_PEER_PA_TACTIVATE \
240 2 /* Need an additional 200 ms of PA_TActivate */
241 #define UFSHCI_QUIRK_WAIT_AFTER_POWER_MODE_CHANGE \
242 4 /* Need to wait 1250us after power mode change */
243 #define UFSHCI_QUIRK_CHANGE_LANE_AND_GEAR_SEPARATELY \
244 8 /* Need to change the number of lanes before changing HS-GEAR. */
245 uint32_t ref_clk;
246
247 struct cam_sim *ufshci_sim;
248 struct cam_path *ufshci_path;
249
250 struct mtx sc_mtx;
251 uint32_t sc_unit;
252 uint8_t sc_name[16];
253
254 struct ufshci_device ufs_dev;
255
256 bus_space_tag_t bus_tag;
257 bus_space_handle_t bus_handle;
258 int resource_id;
259 struct resource *resource;
260
261 /* Currently, there is no UFSHCI that supports MSI, MSI-X. */
262 int msi_count;
263
264 /* Fields for tracking progress during controller initialization. */
265 struct intr_config_hook config_hook;
266
267 /* For shared legacy interrupt. */
268 int rid;
269 struct resource *res;
270 void *tag;
271
272 uint32_t major_version;
273 uint32_t minor_version;
274
275 uint32_t num_io_queues;
276 uint32_t max_hw_pend_io;
277
278 /* Maximum logical unit number */
279 uint32_t max_lun_count;
280
281 /* Maximum i/o size in bytes */
282 uint32_t max_xfer_size;
283
284 /* Controller capacity */
285 uint32_t cap;
286
287 /* Page size and log2(page_size) - 12 that we're currently using */
288 uint32_t page_size;
289
290 /* Timeout value on device initialization */
291 uint32_t device_init_timeout_in_ms;
292
293 /* Timeout value on UIC command */
294 uint32_t uic_cmd_timeout_in_ms;
295
296 /* UTMR/UTR queue timeout period in seconds */
297 uint32_t timeout_period;
298
299 /* UTMR/UTR queue retry count */
300 uint32_t retry_count;
301
302 /* UFS Host Controller Interface Registers */
303 struct ufshci_registers *regs;
304
305 /* UFS Transport Protocol Layer (UTP) */
306 struct ufshci_req_queue task_mgmt_req_queue;
307 struct ufshci_req_queue transfer_req_queue;
308 bool is_single_db_supported; /* 0 = supported */
309 bool is_mcq_supported; /* 1 = supported */
310
311 /* UFS Interconnect Layer (UIC) */
312 struct mtx uic_cmd_lock;
313 uint32_t unipro_version;
314 uint8_t hs_gear;
315 uint32_t tx_lanes;
316 uint32_t rx_lanes;
317 uint32_t max_rx_hs_gear;
318 uint32_t max_tx_lanes;
319 uint32_t max_rx_lanes;
320
321 bool is_failed;
322 };
323
324 #define ufshci_mmio_offsetof(reg) offsetof(struct ufshci_registers, reg)
325
326 #define ufshci_mmio_read_4(sc, reg) \
327 bus_space_read_4((sc)->bus_tag, (sc)->bus_handle, \
328 ufshci_mmio_offsetof(reg))
329
330 #define ufshci_mmio_write_4(sc, reg, val) \
331 bus_space_write_4((sc)->bus_tag, (sc)->bus_handle, \
332 ufshci_mmio_offsetof(reg), val)
333
334 #define ufshci_printf(ctrlr, fmt, args...) \
335 device_printf(ctrlr->dev, fmt, ##args)
336
337 /* UFSHCI */
338 void ufshci_completion_poll_cb(void *arg, const struct ufshci_completion *cpl,
339 bool error);
340
341 /* SIM */
342 int ufshci_sim_attach(struct ufshci_controller *ctrlr);
343 void ufshci_sim_detach(struct ufshci_controller *ctrlr);
344
345 /* Controller */
346 int ufshci_ctrlr_construct(struct ufshci_controller *ctrlr, device_t dev);
347 void ufshci_ctrlr_destruct(struct ufshci_controller *ctrlr, device_t dev);
348 int ufshci_ctrlr_reset(struct ufshci_controller *ctrlr);
349 /* ctrlr defined as void * to allow use with config_intrhook. */
350 void ufshci_ctrlr_start_config_hook(void *arg);
351 void ufshci_ctrlr_poll(struct ufshci_controller *ctrlr);
352
353 int ufshci_ctrlr_submit_task_mgmt_request(struct ufshci_controller *ctrlr,
354 struct ufshci_request *req);
355 int ufshci_ctrlr_submit_admin_request(struct ufshci_controller *ctrlr,
356 struct ufshci_request *req);
357 int ufshci_ctrlr_submit_io_request(struct ufshci_controller *ctrlr,
358 struct ufshci_request *req);
359 int ufshci_ctrlr_send_nop(struct ufshci_controller *ctrlr);
360
361 void ufshci_reg_dump(struct ufshci_controller *ctrlr);
362
363 /* Device */
364 int ufshci_dev_init(struct ufshci_controller *ctrlr);
365 int ufshci_dev_reset(struct ufshci_controller *ctrlr);
366 int ufshci_dev_init_reference_clock(struct ufshci_controller *ctrlr);
367 int ufshci_dev_init_unipro(struct ufshci_controller *ctrlr);
368 int ufshci_dev_init_uic_power_mode(struct ufshci_controller *ctrlr);
369 int ufshci_dev_init_ufs_power_mode(struct ufshci_controller *ctrlr);
370 int ufshci_dev_get_descriptor(struct ufshci_controller *ctrlr);
371 int ufshci_dev_config_write_booster(struct ufshci_controller *ctrlr);
372
373 /* Controller Command */
374 void ufshci_ctrlr_cmd_send_task_mgmt_request(struct ufshci_controller *ctrlr,
375 ufshci_cb_fn_t cb_fn, void *cb_arg, uint8_t function, uint8_t lun,
376 uint8_t task_tag, uint8_t iid);
377 void ufshci_ctrlr_cmd_send_nop(struct ufshci_controller *ctrlr,
378 ufshci_cb_fn_t cb_fn, void *cb_arg);
379 void ufshci_ctrlr_cmd_send_query_request(struct ufshci_controller *ctrlr,
380 ufshci_cb_fn_t cb_fn, void *cb_arg, struct ufshci_query_param param);
381 void ufshci_ctrlr_cmd_send_scsi_command(struct ufshci_controller *ctrlr,
382 ufshci_cb_fn_t cb_fn, void *cb_arg, uint8_t *cmd_ptr, uint8_t cmd_len,
383 uint32_t data_len, uint8_t lun, bool is_write);
384
385 /* Request Queue */
386 bool ufshci_req_queue_process_completions(struct ufshci_req_queue *req_queue);
387 int ufshci_utmr_req_queue_construct(struct ufshci_controller *ctrlr);
388 int ufshci_utr_req_queue_construct(struct ufshci_controller *ctrlr);
389 void ufshci_utmr_req_queue_destroy(struct ufshci_controller *ctrlr);
390 void ufshci_utr_req_queue_destroy(struct ufshci_controller *ctrlr);
391 int ufshci_utmr_req_queue_enable(struct ufshci_controller *ctrlr);
392 int ufshci_utr_req_queue_enable(struct ufshci_controller *ctrlr);
393 void ufshci_req_queue_fail(struct ufshci_controller *ctrlr,
394 struct ufshci_hw_queue *hwq);
395 int ufshci_req_queue_submit_request(struct ufshci_req_queue *req_queue,
396 struct ufshci_request *req, bool is_admin);
397 void ufshci_req_queue_complete_tracker(struct ufshci_tracker *tr);
398
399 /* Request Single Doorbell Queue */
400 int ufshci_req_sdb_construct(struct ufshci_controller *ctrlr,
401 struct ufshci_req_queue *req_queue, uint32_t num_entries,
402 bool is_task_mgmt);
403 void ufshci_req_sdb_destroy(struct ufshci_controller *ctrlr,
404 struct ufshci_req_queue *req_queue);
405 struct ufshci_hw_queue *ufshci_req_sdb_get_hw_queue(
406 struct ufshci_req_queue *req_queue);
407 int ufshci_req_sdb_enable(struct ufshci_controller *ctrlr,
408 struct ufshci_req_queue *req_queue);
409 int ufshci_req_sdb_reserve_slot(struct ufshci_req_queue *req_queue,
410 struct ufshci_tracker **tr);
411 void ufshci_req_sdb_utmr_ring_doorbell(struct ufshci_controller *ctrlr,
412 struct ufshci_tracker *tr);
413 void ufshci_req_sdb_utr_ring_doorbell(struct ufshci_controller *ctrlr,
414 struct ufshci_tracker *tr);
415 bool ufshci_req_sdb_utmr_is_doorbell_cleared(struct ufshci_controller *ctrlr,
416 uint8_t slot);
417 bool ufshci_req_sdb_utr_is_doorbell_cleared(struct ufshci_controller *ctrlr,
418 uint8_t slot);
419 void ufshci_req_sdb_utmr_clear_cpl_ntf(struct ufshci_controller *ctrlr,
420 struct ufshci_tracker *tr);
421 void ufshci_req_sdb_utr_clear_cpl_ntf(struct ufshci_controller *ctrlr,
422 struct ufshci_tracker *tr);
423 bool ufshci_req_sdb_process_cpl(struct ufshci_req_queue *req_queue);
424 int ufshci_req_sdb_get_inflight_io(struct ufshci_controller *ctrlr);
425
426 /* UIC Command */
427 int ufshci_uic_power_mode_ready(struct ufshci_controller *ctrlr);
428 int ufshci_uic_cmd_ready(struct ufshci_controller *ctrlr);
429 int ufshci_uic_send_dme_link_startup(struct ufshci_controller *ctrlr);
430 int ufshci_uic_send_dme_get(struct ufshci_controller *ctrlr, uint16_t attribute,
431 uint32_t *return_value);
432 int ufshci_uic_send_dme_set(struct ufshci_controller *ctrlr, uint16_t attribute,
433 uint32_t value);
434 int ufshci_uic_send_dme_peer_get(struct ufshci_controller *ctrlr,
435 uint16_t attribute, uint32_t *return_value);
436 int ufshci_uic_send_dme_peer_set(struct ufshci_controller *ctrlr,
437 uint16_t attribute, uint32_t value);
438 int ufshci_uic_send_dme_endpoint_reset(struct ufshci_controller *ctrlr);
439
440 /* SYSCTL */
441 void ufshci_sysctl_initialize_ctrlr(struct ufshci_controller *ctrlr);
442
443 int ufshci_attach(device_t dev);
444 int ufshci_detach(device_t dev);
445
446 /*
447 * Wait for a command to complete using the ufshci_completion_poll_cb. Used in
448 * limited contexts where the caller knows it's OK to block briefly while the
449 * command runs. The ISR will run the callback which will set status->done to
450 * true, usually within microseconds. If not, then after one second timeout
451 * handler should reset the controller and abort all outstanding requests
452 * including this polled one. If still not after ten seconds, then something is
453 * wrong with the driver, and panic is the only way to recover.
454 *
455 * Most commands using this interface aren't actual I/O to the drive's media so
456 * complete within a few microseconds. Adaptively spin for one tick to catch the
457 * vast majority of these without waiting for a tick plus scheduling delays.
458 * Since these are on startup, this drastically reduces startup time.
459 */
460 static __inline void
ufshci_completion_poll(struct ufshci_completion_poll_status * status)461 ufshci_completion_poll(struct ufshci_completion_poll_status *status)
462 {
463 int timeout = ticks + 10 * hz;
464 sbintime_t delta_t = SBT_1US;
465
466 while (!atomic_load_acq_int(&status->done)) {
467 if (timeout - ticks < 0)
468 panic(
469 "UFSHCI polled command failed to complete within 10s.");
470 pause_sbt("ufshci_cpl", delta_t, 0, C_PREL(1));
471 delta_t = min(SBT_1MS, delta_t * 3 / 2);
472 }
473 }
474
475 static __inline void
ufshci_single_map(void * arg,bus_dma_segment_t * seg,int nseg,int error)476 ufshci_single_map(void *arg, bus_dma_segment_t *seg, int nseg, int error)
477 {
478 uint64_t *bus_addr = (uint64_t *)arg;
479
480 KASSERT(nseg == 1, ("number of segments (%d) is not 1", nseg));
481 if (error != 0)
482 printf("ufshci_single_map err %d\n", error);
483 *bus_addr = seg[0].ds_addr;
484 }
485
486 static __inline struct ufshci_request *
_ufshci_allocate_request(const int how,ufshci_cb_fn_t cb_fn,void * cb_arg)487 _ufshci_allocate_request(const int how, ufshci_cb_fn_t cb_fn, void *cb_arg)
488 {
489 struct ufshci_request *req;
490
491 KASSERT(how == M_WAITOK || how == M_NOWAIT,
492 ("nvme_allocate_request: invalid how %d", how));
493
494 req = malloc(sizeof(*req), M_UFSHCI, how | M_ZERO);
495 if (req != NULL) {
496 req->cb_fn = cb_fn;
497 req->cb_arg = cb_arg;
498 req->timeout = true;
499 }
500 return (req);
501 }
502
503 static __inline struct ufshci_request *
ufshci_allocate_request_vaddr(void * payload,uint32_t payload_size,const int how,ufshci_cb_fn_t cb_fn,void * cb_arg)504 ufshci_allocate_request_vaddr(void *payload, uint32_t payload_size,
505 const int how, ufshci_cb_fn_t cb_fn, void *cb_arg)
506 {
507 struct ufshci_request *req;
508
509 req = _ufshci_allocate_request(how, cb_fn, cb_arg);
510 if (req != NULL) {
511 if (payload_size) {
512 req->payload = memdesc_vaddr(payload, payload_size);
513 req->payload_valid = true;
514 }
515 }
516 return (req);
517 }
518
519 static __inline struct ufshci_request *
ufshci_allocate_request_bio(struct bio * bio,const int how,ufshci_cb_fn_t cb_fn,void * cb_arg)520 ufshci_allocate_request_bio(struct bio *bio, const int how,
521 ufshci_cb_fn_t cb_fn, void *cb_arg)
522 {
523 struct ufshci_request *req;
524
525 req = _ufshci_allocate_request(how, cb_fn, cb_arg);
526 if (req != NULL) {
527 req->payload = memdesc_bio(bio);
528 req->payload_valid = true;
529 }
530 return (req);
531 }
532
533 #define ufshci_free_request(req) free(req, M_UFSHCI)
534
535 void ufshci_ctrlr_shared_handler(void *arg);
536
537 #endif /* __UFSHCI_PRIVATE_H__ */
538