1 /*-
2 * Copyright (c) 2025, Samsung Electronics Co., Ltd.
3 * Written by Jaeyoon Choi
4 *
5 * SPDX-License-Identifier: BSD-2-Clause
6 */
7
8 #ifndef __UFSHCI_PRIVATE_H__
9 #define __UFSHCI_PRIVATE_H__
10
11 #ifdef _KERNEL
12 #include <sys/types.h>
13 #else /* !_KERNEL */
14 #include <stdbool.h>
15 #include <stdint.h>
16 #endif /* _KERNEL */
17
18 #include <sys/param.h>
19 #include <sys/systm.h>
20 #include <sys/bio.h>
21 #include <sys/bus.h>
22 #include <sys/counter.h>
23 #include <sys/kernel.h>
24 #include <sys/lock.h>
25 #include <sys/malloc.h>
26 #include <sys/memdesc.h>
27 #include <sys/module.h>
28 #include <sys/mutex.h>
29 #include <sys/power.h>
30 #include <sys/rman.h>
31 #include <sys/taskqueue.h>
32
33 #include <machine/bus.h>
34
35 #include <cam/cam.h>
36 #include <cam/scsi/scsi_all.h>
37
38 #include "ufshci.h"
39
40 MALLOC_DECLARE(M_UFSHCI);
41
42 #define UFSHCI_DEVICE_INIT_TIMEOUT_MS (2000) /* in milliseconds */
43 #define UFSHCI_UIC_CMD_TIMEOUT_MS (500) /* in milliseconds */
44 #define UFSHCI_DEFAULT_TIMEOUT_PERIOD (10) /* in seconds */
45 #define UFSHCI_MIN_TIMEOUT_PERIOD (5) /* in seconds */
46 #define UFSHCI_MAX_TIMEOUT_PERIOD (120) /* in seconds */
47
48 #define UFSHCI_DEFAULT_RETRY_COUNT (4)
49
50 #define UFSHCI_UTR_ENTRIES (32)
51 #define UFSHCI_UTRM_ENTRIES (8)
52
53 #define UFSHCI_SECTOR_SIZE (512)
54
55 struct ufshci_controller;
56
57 struct ufshci_completion_poll_status {
58 struct ufshci_completion cpl;
59 int done;
60 bool error;
61 };
62
63 struct ufshci_request {
64 struct ufshci_upiu request_upiu;
65 size_t request_size;
66 size_t response_size;
67
68 struct memdesc payload;
69 enum ufshci_data_direction data_direction;
70 ufshci_cb_fn_t cb_fn;
71 void *cb_arg;
72 bool is_admin;
73 int32_t retries;
74 bool payload_valid;
75 bool spare[2]; /* Future use */
76 STAILQ_ENTRY(ufshci_request) stailq;
77 };
78
79 enum ufshci_slot_state {
80 UFSHCI_SLOT_STATE_FREE = 0x0,
81 UFSHCI_SLOT_STATE_RESERVED = 0x1,
82 UFSHCI_SLOT_STATE_SCHEDULED = 0x2,
83 UFSHCI_SLOT_STATE_TIMEOUT = 0x3,
84 UFSHCI_SLOT_STATE_NEED_ERROR_HANDLING = 0x4,
85 };
86
87 struct ufshci_tracker {
88 TAILQ_ENTRY(ufshci_tracker) tailq;
89 struct ufshci_request *req;
90 struct ufshci_req_queue *req_queue;
91 struct ufshci_hw_queue *hwq;
92 uint8_t slot_num;
93 enum ufshci_slot_state slot_state;
94 size_t response_size;
95 sbintime_t deadline;
96
97 bus_dmamap_t payload_dma_map;
98 uint64_t payload_addr;
99
100 struct ufshci_utp_cmd_desc *ucd;
101 bus_addr_t ucd_bus_addr;
102
103 uint16_t prdt_off;
104 uint16_t prdt_entry_cnt;
105 };
106
107 enum ufshci_queue_mode {
108 UFSHCI_Q_MODE_SDB = 0x00, /* Single Doorbell Mode*/
109 UFSHCI_Q_MODE_MCQ = 0x01, /* Multi-Circular Queue Mode*/
110 };
111
112 /*
113 * UFS uses slot-based Single Doorbell (SDB) mode for request submission by
114 * default and additionally supports Multi-Circular Queue (MCQ) in UFS 4.0. To
115 * minimize duplicated code between SDB and MCQ, mode dependent operations are
116 * extracted into ufshci_qops.
117 */
118 struct ufshci_qops {
119 int (*construct)(struct ufshci_controller *ctrlr,
120 struct ufshci_req_queue *req_queue, uint32_t num_entries,
121 bool is_task_mgmt);
122 void (*destroy)(struct ufshci_controller *ctrlr,
123 struct ufshci_req_queue *req_queue);
124 struct ufshci_hw_queue *(*get_hw_queue)(
125 struct ufshci_req_queue *req_queue);
126 int (*enable)(struct ufshci_controller *ctrlr,
127 struct ufshci_req_queue *req_queue);
128 void (*disable)(struct ufshci_controller *ctrlr,
129 struct ufshci_req_queue *req_queue);
130 int (*reserve_slot)(struct ufshci_req_queue *req_queue,
131 struct ufshci_tracker **tr);
132 int (*reserve_admin_slot)(struct ufshci_req_queue *req_queue,
133 struct ufshci_tracker **tr);
134 void (*ring_doorbell)(struct ufshci_controller *ctrlr,
135 struct ufshci_tracker *tr);
136 bool (*is_doorbell_cleared)(struct ufshci_controller *ctrlr,
137 uint8_t slot);
138 void (*clear_cpl_ntf)(struct ufshci_controller *ctrlr,
139 struct ufshci_tracker *tr);
140 bool (*process_cpl)(struct ufshci_req_queue *req_queue);
141 int (*get_inflight_io)(struct ufshci_controller *ctrlr);
142 };
143
144 #define UFSHCI_SDB_Q 0 /* Queue number for a single doorbell queue */
145
146 enum ufshci_recovery {
147 RECOVERY_NONE = 0, /* Normal operations */
148 RECOVERY_WAITING, /* waiting for the reset to complete */
149 };
150
151 /*
152 * Generic queue container used by both SDB (fixed 32-slot bitmap) and MCQ
153 * (ring buffer) modes. Fields are shared; some such as sq_head, sq_tail and
154 * cq_head are not used in SDB but used in MCQ.
155 */
156 struct ufshci_hw_queue {
157 struct ufshci_controller *ctrlr;
158 struct ufshci_req_queue *req_queue;
159 uint32_t id;
160 int domain;
161 int cpu;
162
163 struct callout timer; /* recovery lock */
164 bool timer_armed; /* recovery lock */
165 enum ufshci_recovery recovery_state; /* recovery lock */
166
167 union {
168 struct ufshci_utp_xfer_req_desc *utrd;
169 struct ufshci_utp_task_mgmt_req_desc *utmrd;
170 };
171
172 bus_dma_tag_t dma_tag_queue;
173 bus_dmamap_t queuemem_map;
174 bus_addr_t req_queue_addr;
175
176 bus_addr_t *ucd_bus_addr;
177
178 uint32_t num_entries;
179 uint32_t num_trackers;
180
181 TAILQ_HEAD(, ufshci_tracker) free_tr;
182 TAILQ_HEAD(, ufshci_tracker) outstanding_tr;
183
184 /*
185 * A Request List using the single doorbell method uses a dedicated
186 * ufshci_tracker, one per slot.
187 */
188 struct ufshci_tracker **act_tr;
189
190 uint32_t sq_head; /* MCQ mode */
191 uint32_t sq_tail; /* MCQ mode */
192 uint32_t cq_head; /* MCQ mode */
193
194 uint32_t phase;
195 int64_t num_cmds;
196 int64_t num_intr_handler_calls;
197 int64_t num_retries;
198 int64_t num_failures;
199
200 /*
201 * Each lock may be acquired independently.
202 * When both are required, acquire them in this order to avoid
203 * deadlocks. (recovery_lock -> qlock)
204 */
205 struct mtx_padalign qlock;
206 struct mtx_padalign recovery_lock;
207 };
208
209 struct ufshci_req_queue {
210 struct ufshci_controller *ctrlr;
211 int domain;
212
213 /*
214 * queue_mode: active transfer scheme
215 * UFSHCI_Q_MODE_SDB – legacy single‑doorbell list
216 * UFSHCI_Q_MODE_MCQ – modern multi‑circular queue (UFSHCI 4.0+)
217 */
218 enum ufshci_queue_mode queue_mode;
219
220 uint8_t num_q;
221 struct ufshci_hw_queue *hwq;
222
223 struct ufshci_qops qops;
224
225 bool is_task_mgmt;
226 uint32_t num_entries;
227 uint32_t num_trackers;
228
229 /* Shared DMA resource */
230 struct ufshci_utp_cmd_desc *ucd;
231
232 bus_dma_tag_t dma_tag_ucd;
233 bus_dma_tag_t dma_tag_payload;
234
235 bus_dmamap_t ucdmem_map;
236 };
237
238 enum ufshci_dev_pwr {
239 UFSHCI_DEV_PWR_ACTIVE = 0,
240 UFSHCI_DEV_PWR_SLEEP,
241 UFSHCI_DEV_PWR_POWERDOWN,
242 UFSHCI_DEV_PWR_DEEPSLEEP,
243 UFSHCI_DEV_PWR_COUNT,
244 };
245
246 enum ufshci_uic_link_state {
247 UFSHCI_UIC_LINK_STATE_OFF = 0,
248 UFSHCI_UIC_LINK_STATE_ACTIVE,
249 UFSHCI_UIC_LINK_STATE_HIBERNATE,
250 UFSHCI_UIC_LINK_STATE_BROKEN,
251 };
252
253 struct ufshci_power_entry {
254 enum ufshci_dev_pwr dev_pwr;
255 uint8_t ssu_pc; /* SSU Power Condition */
256 enum ufshci_uic_link_state link_state;
257 };
258
259 /* SSU Power Condition 0x40 is defined in the UFS specification */
260 static const struct ufshci_power_entry power_map[POWER_STYPE_COUNT] = {
261 [POWER_STYPE_AWAKE] = { UFSHCI_DEV_PWR_ACTIVE, SSS_PC_ACTIVE,
262 UFSHCI_UIC_LINK_STATE_ACTIVE },
263 [POWER_STYPE_STANDBY] = { UFSHCI_DEV_PWR_SLEEP, SSS_PC_IDLE,
264 UFSHCI_UIC_LINK_STATE_HIBERNATE },
265 [POWER_STYPE_SUSPEND_TO_MEM] = { UFSHCI_DEV_PWR_POWERDOWN,
266 SSS_PC_STANDBY, UFSHCI_UIC_LINK_STATE_HIBERNATE },
267 [POWER_STYPE_SUSPEND_TO_IDLE] = { UFSHCI_DEV_PWR_SLEEP, SSS_PC_IDLE,
268 UFSHCI_UIC_LINK_STATE_HIBERNATE },
269 [POWER_STYPE_HIBERNATE] = { UFSHCI_DEV_PWR_DEEPSLEEP, 0x40,
270 UFSHCI_UIC_LINK_STATE_OFF },
271 [POWER_STYPE_POWEROFF] = { UFSHCI_DEV_PWR_POWERDOWN, SSS_PC_STANDBY,
272 UFSHCI_UIC_LINK_STATE_OFF },
273 };
274
275 struct ufshci_device {
276 uint32_t max_lun_count;
277
278 struct ufshci_device_descriptor dev_desc;
279 struct ufshci_geometry_descriptor geo_desc;
280
281 uint32_t unipro_version;
282
283 /* WriteBooster */
284 bool is_wb_enabled;
285 bool is_wb_flush_enabled;
286 uint32_t wb_buffer_type;
287 uint32_t wb_buffer_size_mb;
288 uint32_t wb_user_space_config_option;
289 uint8_t wb_dedicated_lu;
290 uint32_t write_booster_flush_threshold;
291
292 /* Power mode */
293 bool power_mode_supported;
294 enum ufshci_dev_pwr power_mode;
295 enum ufshci_uic_link_state link_state;
296
297 /* Auto Hibernation */
298 bool auto_hibernation_supported;
299 uint32_t ahit;
300 };
301
302 /*
303 * One of these per allocated device.
304 */
305 struct ufshci_controller {
306 device_t dev;
307
308 uint32_t quirks;
309 #define UFSHCI_QUIRK_IGNORE_UIC_POWER_MODE \
310 1 /* QEMU does not support UIC POWER MODE */
311 #define UFSHCI_QUIRK_LONG_PEER_PA_TACTIVATE \
312 2 /* Need an additional 200 ms of PA_TActivate */
313 #define UFSHCI_QUIRK_WAIT_AFTER_POWER_MODE_CHANGE \
314 4 /* Need to wait 1250us after power mode change */
315 #define UFSHCI_QUIRK_CHANGE_LANE_AND_GEAR_SEPARATELY \
316 8 /* Need to change the number of lanes before changing HS-GEAR. */
317 #define UFSHCI_QUIRK_NOT_SUPPORT_ABORT_TASK \
318 16 /* QEMU does not support Task Management Request */
319 #define UFSHCI_QUIRK_SKIP_WELL_KNOWN_LUNS \
320 32 /* QEMU does not support Well known logical units*/
321 #define UFSHCI_QUIRK_BROKEN_AUTO_HIBERNATE \
322 64 /* Some controllers have the Auto hibernate feature enabled but it \
323 does not work. */
324
325 uint32_t ref_clk;
326
327 struct cam_sim *ufshci_sim;
328 struct cam_path *ufshci_path;
329
330 struct cam_periph *ufs_device_wlun_periph;
331 struct mtx ufs_device_wlun_mtx;
332
333 struct mtx sc_mtx;
334 uint32_t sc_unit;
335 uint8_t sc_name[16];
336
337 struct ufshci_device ufs_dev;
338
339 bus_space_tag_t bus_tag;
340 bus_space_handle_t bus_handle;
341 int resource_id;
342 struct resource *resource;
343
344 /* Currently, there is no UFSHCI that supports MSI, MSI-X. */
345 int msi_count;
346
347 /* Fields for tracking progress during controller initialization. */
348 struct intr_config_hook config_hook;
349
350 struct task reset_task;
351 struct taskqueue *taskqueue;
352
353 /* For shared legacy interrupt. */
354 int rid;
355 struct resource *res;
356 void *tag;
357
358 uint32_t major_version;
359 uint32_t minor_version;
360
361 uint32_t enable_aborts;
362
363 uint32_t num_io_queues;
364 uint32_t max_hw_pend_io;
365
366 /* Maximum logical unit number */
367 uint32_t max_lun_count;
368
369 /* Maximum i/o size in bytes */
370 uint32_t max_xfer_size;
371
372 /* Controller capacity */
373 uint32_t cap;
374
375 /* Page size and log2(page_size) - 12 that we're currently using */
376 uint32_t page_size;
377
378 /* Timeout value on device initialization */
379 uint32_t device_init_timeout_in_ms;
380
381 /* Timeout value on UIC command */
382 uint32_t uic_cmd_timeout_in_ms;
383
384 /* UTMR/UTR queue timeout period in seconds */
385 uint32_t timeout_period;
386
387 /* UTMR/UTR queue retry count */
388 uint32_t retry_count;
389
390 /* UFS Host Controller Interface Registers */
391 struct ufshci_registers *regs;
392
393 /* UFS Transport Protocol Layer (UTP) */
394 struct ufshci_req_queue task_mgmt_req_queue;
395 struct ufshci_req_queue transfer_req_queue;
396 bool is_single_db_supported; /* 0 = supported */
397 bool is_mcq_supported; /* 1 = supported */
398
399 /* UFS Interconnect Layer (UIC) */
400 struct mtx uic_cmd_lock;
401 uint32_t unipro_version;
402 uint8_t hs_gear;
403 uint32_t tx_lanes;
404 uint32_t rx_lanes;
405 uint32_t max_rx_hs_gear;
406 uint32_t max_tx_lanes;
407 uint32_t max_rx_lanes;
408
409 bool is_failed;
410 };
411
412 #define ufshci_mmio_offsetof(reg) offsetof(struct ufshci_registers, reg)
413
414 #define ufshci_mmio_read_4(sc, reg) \
415 bus_space_read_4((sc)->bus_tag, (sc)->bus_handle, \
416 ufshci_mmio_offsetof(reg))
417
418 #define ufshci_mmio_write_4(sc, reg, val) \
419 bus_space_write_4((sc)->bus_tag, (sc)->bus_handle, \
420 ufshci_mmio_offsetof(reg), val)
421
422 #define ufshci_printf(ctrlr, fmt, args...) \
423 device_printf(ctrlr->dev, fmt, ##args)
424
425 /* UFSHCI */
426 void ufshci_completion_poll_cb(void *arg, const struct ufshci_completion *cpl,
427 bool error);
428
429 /* SIM */
430 uint8_t ufshci_sim_translate_scsi_to_ufs_lun(lun_id_t scsi_lun);
431 uint64_t ufshci_sim_translate_ufs_to_scsi_lun(uint8_t ufs_lun);
432 int ufshci_sim_attach(struct ufshci_controller *ctrlr);
433 void ufshci_sim_detach(struct ufshci_controller *ctrlr);
434 struct cam_periph *ufshci_sim_find_periph(struct ufshci_controller *ctrlr,
435 uint8_t wlun);
436 int ufshci_sim_send_ssu(struct ufshci_controller *ctrlr, bool start,
437 uint8_t pwr_cond, bool immed);
438
439 /* Controller */
440 int ufshci_ctrlr_construct(struct ufshci_controller *ctrlr, device_t dev);
441 void ufshci_ctrlr_destruct(struct ufshci_controller *ctrlr, device_t dev);
442 void ufshci_ctrlr_reset(struct ufshci_controller *ctrlr);
443 int ufshci_ctrlr_suspend(struct ufshci_controller *ctrlr,
444 enum power_stype stype);
445 int ufshci_ctrlr_resume(struct ufshci_controller *ctrlr,
446 enum power_stype stype);
447 int ufshci_ctrlr_disable(struct ufshci_controller *ctrlr);
448 /* ctrlr defined as void * to allow use with config_intrhook. */
449 void ufshci_ctrlr_start_config_hook(void *arg);
450 void ufshci_ctrlr_poll(struct ufshci_controller *ctrlr);
451
452 int ufshci_ctrlr_submit_task_mgmt_request(struct ufshci_controller *ctrlr,
453 struct ufshci_request *req);
454 int ufshci_ctrlr_submit_admin_request(struct ufshci_controller *ctrlr,
455 struct ufshci_request *req);
456 int ufshci_ctrlr_submit_io_request(struct ufshci_controller *ctrlr,
457 struct ufshci_request *req);
458 int ufshci_ctrlr_send_nop(struct ufshci_controller *ctrlr);
459
460 void ufshci_reg_dump(struct ufshci_controller *ctrlr);
461
462 /* Device */
463 int ufshci_dev_init(struct ufshci_controller *ctrlr);
464 int ufshci_dev_reset(struct ufshci_controller *ctrlr);
465 int ufshci_dev_init_reference_clock(struct ufshci_controller *ctrlr);
466 int ufshci_dev_init_unipro(struct ufshci_controller *ctrlr);
467 void ufshci_dev_enable_auto_hibernate(struct ufshci_controller *ctrlr);
468 void ufshci_dev_init_auto_hibernate(struct ufshci_controller *ctrlr);
469 int ufshci_dev_init_uic_power_mode(struct ufshci_controller *ctrlr);
470 void ufshci_dev_init_uic_link_state(struct ufshci_controller *ctrlr);
471 int ufshci_dev_init_ufs_power_mode(struct ufshci_controller *ctrlr);
472 int ufshci_dev_get_descriptor(struct ufshci_controller *ctrlr);
473 int ufshci_dev_config_write_booster(struct ufshci_controller *ctrlr);
474 int ufshci_dev_get_current_power_mode(struct ufshci_controller *ctrlr,
475 uint8_t *power_mode);
476 int ufshci_dev_link_state_transition(struct ufshci_controller *ctrlr,
477 enum ufshci_uic_link_state target_state);
478
479 /* Controller Command */
480 void ufshci_ctrlr_cmd_send_task_mgmt_request(struct ufshci_controller *ctrlr,
481 ufshci_cb_fn_t cb_fn, void *cb_arg, uint8_t function, uint8_t lun,
482 uint8_t task_tag, uint8_t iid);
483 void ufshci_ctrlr_cmd_send_nop(struct ufshci_controller *ctrlr,
484 ufshci_cb_fn_t cb_fn, void *cb_arg);
485 void ufshci_ctrlr_cmd_send_query_request(struct ufshci_controller *ctrlr,
486 ufshci_cb_fn_t cb_fn, void *cb_arg, struct ufshci_query_param param);
487 void ufshci_ctrlr_cmd_send_scsi_command(struct ufshci_controller *ctrlr,
488 ufshci_cb_fn_t cb_fn, void *cb_arg, uint8_t *cmd_ptr, uint8_t cmd_len,
489 uint32_t data_len, uint8_t lun, bool is_write);
490
491 /* Request Queue */
492 bool ufshci_req_queue_process_completions(struct ufshci_req_queue *req_queue);
493 int ufshci_utmr_req_queue_construct(struct ufshci_controller *ctrlr);
494 int ufshci_utr_req_queue_construct(struct ufshci_controller *ctrlr);
495 void ufshci_utmr_req_queue_destroy(struct ufshci_controller *ctrlr);
496 void ufshci_utr_req_queue_destroy(struct ufshci_controller *ctrlr);
497 void ufshci_utmr_req_queue_disable(struct ufshci_controller *ctrlr);
498 int ufshci_utmr_req_queue_enable(struct ufshci_controller *ctrlr);
499 void ufshci_utr_req_queue_disable(struct ufshci_controller *ctrlr);
500 int ufshci_utr_req_queue_enable(struct ufshci_controller *ctrlr);
501 void ufshci_req_queue_fail(struct ufshci_controller *ctrlr,
502 struct ufshci_hw_queue *hwq);
503 int ufshci_req_queue_submit_request(struct ufshci_req_queue *req_queue,
504 struct ufshci_request *req, bool is_admin);
505 void ufshci_req_queue_complete_tracker(struct ufshci_tracker *tr);
506
507 /* Request Single Doorbell Queue */
508 int ufshci_req_sdb_construct(struct ufshci_controller *ctrlr,
509 struct ufshci_req_queue *req_queue, uint32_t num_entries,
510 bool is_task_mgmt);
511 void ufshci_req_sdb_destroy(struct ufshci_controller *ctrlr,
512 struct ufshci_req_queue *req_queue);
513 struct ufshci_hw_queue *ufshci_req_sdb_get_hw_queue(
514 struct ufshci_req_queue *req_queue);
515 void ufshci_req_sdb_disable(struct ufshci_controller *ctrlr,
516 struct ufshci_req_queue *req_queue);
517 int ufshci_req_sdb_enable(struct ufshci_controller *ctrlr,
518 struct ufshci_req_queue *req_queue);
519 int ufshci_req_sdb_reserve_slot(struct ufshci_req_queue *req_queue,
520 struct ufshci_tracker **tr);
521 void ufshci_req_sdb_utmr_ring_doorbell(struct ufshci_controller *ctrlr,
522 struct ufshci_tracker *tr);
523 void ufshci_req_sdb_utr_ring_doorbell(struct ufshci_controller *ctrlr,
524 struct ufshci_tracker *tr);
525 bool ufshci_req_sdb_utmr_is_doorbell_cleared(struct ufshci_controller *ctrlr,
526 uint8_t slot);
527 bool ufshci_req_sdb_utr_is_doorbell_cleared(struct ufshci_controller *ctrlr,
528 uint8_t slot);
529 void ufshci_req_sdb_utmr_clear_cpl_ntf(struct ufshci_controller *ctrlr,
530 struct ufshci_tracker *tr);
531 void ufshci_req_sdb_utr_clear_cpl_ntf(struct ufshci_controller *ctrlr,
532 struct ufshci_tracker *tr);
533 bool ufshci_req_sdb_process_cpl(struct ufshci_req_queue *req_queue);
534 int ufshci_req_sdb_get_inflight_io(struct ufshci_controller *ctrlr);
535
536 /* UIC Command */
537 int ufshci_uic_power_mode_ready(struct ufshci_controller *ctrlr);
538 int ufshci_uic_hibernation_ready(struct ufshci_controller *ctrlr);
539 int ufshci_uic_cmd_ready(struct ufshci_controller *ctrlr);
540 int ufshci_uic_send_dme_link_startup(struct ufshci_controller *ctrlr);
541 int ufshci_uic_send_dme_get(struct ufshci_controller *ctrlr, uint16_t attribute,
542 uint32_t *return_value);
543 int ufshci_uic_send_dme_set(struct ufshci_controller *ctrlr, uint16_t attribute,
544 uint32_t value);
545 int ufshci_uic_send_dme_peer_get(struct ufshci_controller *ctrlr,
546 uint16_t attribute, uint32_t *return_value);
547 int ufshci_uic_send_dme_peer_set(struct ufshci_controller *ctrlr,
548 uint16_t attribute, uint32_t value);
549 int ufshci_uic_send_dme_endpoint_reset(struct ufshci_controller *ctrlr);
550 int ufshci_uic_send_dme_hibernate_enter(struct ufshci_controller *ctrlr);
551 int ufshci_uic_send_dme_hibernate_exit(struct ufshci_controller *ctrlr);
552
553 /* SYSCTL */
554 void ufshci_sysctl_initialize_ctrlr(struct ufshci_controller *ctrlr);
555
556 int ufshci_attach(device_t dev);
557 int ufshci_detach(device_t dev);
558
559 /*
560 * Wait for a command to complete using the ufshci_completion_poll_cb. Used in
561 * limited contexts where the caller knows it's OK to block briefly while the
562 * command runs. The ISR will run the callback which will set status->done to
563 * true, usually within microseconds. If not, then after one second timeout
564 * handler should reset the controller and abort all outstanding requests
565 * including this polled one. If still not after ten seconds, then something is
566 * wrong with the driver, and panic is the only way to recover.
567 *
568 * Most commands using this interface aren't actual I/O to the drive's media so
569 * complete within a few microseconds. Adaptively spin for one tick to catch the
570 * vast majority of these without waiting for a tick plus scheduling delays.
571 * Since these are on startup, this drastically reduces startup time.
572 */
573 static __inline void
ufshci_completion_poll(struct ufshci_completion_poll_status * status)574 ufshci_completion_poll(struct ufshci_completion_poll_status *status)
575 {
576 int timeout = ticks + 10 * hz;
577 sbintime_t delta_t = SBT_1US;
578
579 while (!atomic_load_acq_int(&status->done)) {
580 if (timeout - ticks < 0)
581 panic(
582 "UFSHCI polled command failed to complete within 10s.");
583 pause_sbt("ufshci_cpl", delta_t, 0, C_PREL(1));
584 delta_t = min(SBT_1MS, delta_t * 3 / 2);
585 }
586 }
587
588 static __inline void
ufshci_single_map(void * arg,bus_dma_segment_t * seg,int nseg,int error)589 ufshci_single_map(void *arg, bus_dma_segment_t *seg, int nseg, int error)
590 {
591 uint64_t *bus_addr = (uint64_t *)arg;
592
593 KASSERT(nseg == 1, ("number of segments (%d) is not 1", nseg));
594 if (error != 0)
595 printf("ufshci_single_map err %d\n", error);
596 *bus_addr = seg[0].ds_addr;
597 }
598
599 static __inline struct ufshci_request *
_ufshci_allocate_request(const int how,ufshci_cb_fn_t cb_fn,void * cb_arg)600 _ufshci_allocate_request(const int how, ufshci_cb_fn_t cb_fn, void *cb_arg)
601 {
602 struct ufshci_request *req;
603
604 KASSERT(how == M_WAITOK || how == M_NOWAIT,
605 ("ufshci_allocate_request: invalid how %d", how));
606
607 req = malloc(sizeof(*req), M_UFSHCI, how | M_ZERO);
608 if (req != NULL) {
609 req->cb_fn = cb_fn;
610 req->cb_arg = cb_arg;
611 }
612 return (req);
613 }
614
615 static __inline struct ufshci_request *
ufshci_allocate_request_vaddr(void * payload,uint32_t payload_size,const int how,ufshci_cb_fn_t cb_fn,void * cb_arg)616 ufshci_allocate_request_vaddr(void *payload, uint32_t payload_size,
617 const int how, ufshci_cb_fn_t cb_fn, void *cb_arg)
618 {
619 struct ufshci_request *req;
620
621 req = _ufshci_allocate_request(how, cb_fn, cb_arg);
622 if (req != NULL) {
623 if (payload_size) {
624 req->payload = memdesc_vaddr(payload, payload_size);
625 req->payload_valid = true;
626 }
627 }
628 return (req);
629 }
630
631 static __inline struct ufshci_request *
ufshci_allocate_request_bio(struct bio * bio,const int how,ufshci_cb_fn_t cb_fn,void * cb_arg)632 ufshci_allocate_request_bio(struct bio *bio, const int how,
633 ufshci_cb_fn_t cb_fn, void *cb_arg)
634 {
635 struct ufshci_request *req;
636
637 req = _ufshci_allocate_request(how, cb_fn, cb_arg);
638 if (req != NULL) {
639 req->payload = memdesc_bio(bio);
640 req->payload_valid = true;
641 }
642 return (req);
643 }
644
645 #define ufshci_free_request(req) free(req, M_UFSHCI)
646
647 void ufshci_ctrlr_shared_handler(void *arg);
648
649 #endif /* __UFSHCI_PRIVATE_H__ */
650