1 /*-
2 * Copyright (c) 2025, Samsung Electronics Co., Ltd.
3 * Written by Jaeyoon Choi
4 *
5 * SPDX-License-Identifier: BSD-2-Clause
6 */
7
8 #include <sys/param.h>
9 #include <sys/bus.h>
10 #include <sys/conf.h>
11 #include <sys/domainset.h>
12 #include <sys/module.h>
13
14 #include "sys/kassert.h"
15 #include "ufshci_private.h"
16 #include "ufshci_reg.h"
17
18 static void
ufshci_req_sdb_cmd_desc_destroy(struct ufshci_req_queue * req_queue)19 ufshci_req_sdb_cmd_desc_destroy(struct ufshci_req_queue *req_queue)
20 {
21 struct ufshci_hw_queue *hwq = &req_queue->hwq[UFSHCI_SDB_Q];
22 struct ufshci_tracker *tr;
23 int i;
24
25 for (i = 0; i < req_queue->num_trackers; i++) {
26 tr = hwq->act_tr[i];
27 bus_dmamap_destroy(req_queue->dma_tag_payload,
28 tr->payload_dma_map);
29 }
30
31 if (req_queue->ucd) {
32 bus_dmamap_unload(req_queue->dma_tag_ucd,
33 req_queue->ucdmem_map);
34 bus_dmamem_free(req_queue->dma_tag_ucd, req_queue->ucd,
35 req_queue->ucdmem_map);
36 req_queue->ucd = NULL;
37 }
38
39 if (req_queue->dma_tag_ucd) {
40 bus_dma_tag_destroy(req_queue->dma_tag_ucd);
41 req_queue->dma_tag_ucd = NULL;
42 }
43
44 free(req_queue->hwq->ucd_bus_addr, M_UFSHCI);
45 }
46
47 static void
ufshci_ucd_map(void * arg,bus_dma_segment_t * seg,int nseg,int error)48 ufshci_ucd_map(void *arg, bus_dma_segment_t *seg, int nseg, int error)
49 {
50 struct ufshci_hw_queue *hwq = arg;
51 int i;
52
53 if (error != 0) {
54 printf("ufshci: Failed to map UCD, error = %d\n", error);
55 return;
56 }
57
58 if (hwq->num_trackers != nseg) {
59 printf(
60 "ufshci: Failed to map UCD, num_trackers = %d, nseg = %d\n",
61 hwq->num_trackers, nseg);
62 return;
63 }
64
65 for (i = 0; i < nseg; i++) {
66 hwq->ucd_bus_addr[i] = seg[i].ds_addr;
67 }
68 }
69
70 static int
ufshci_req_sdb_cmd_desc_construct(struct ufshci_req_queue * req_queue,uint32_t num_entries,struct ufshci_controller * ctrlr)71 ufshci_req_sdb_cmd_desc_construct(struct ufshci_req_queue *req_queue,
72 uint32_t num_entries, struct ufshci_controller *ctrlr)
73 {
74 struct ufshci_hw_queue *hwq = &req_queue->hwq[UFSHCI_SDB_Q];
75 size_t ucd_allocsz, payload_allocsz;
76 uint8_t *ucdmem;
77 int i, error;
78
79 req_queue->hwq->ucd_bus_addr = malloc(sizeof(bus_addr_t) *
80 req_queue->num_trackers,
81 M_UFSHCI, M_ZERO | M_NOWAIT);
82
83 /*
84 * Each component must be page aligned, and individual PRP lists
85 * cannot cross a page boundary.
86 */
87 ucd_allocsz = num_entries * sizeof(struct ufshci_utp_cmd_desc);
88 ucd_allocsz = roundup2(ucd_allocsz, ctrlr->page_size);
89 payload_allocsz = num_entries * ctrlr->max_xfer_size;
90
91 /*
92 * Allocate physical memory for UTP Command Descriptor (UCD)
93 * Note: UFSHCI UCD format is restricted to 128-byte alignment.
94 */
95 error = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev), 128, 0,
96 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, ucd_allocsz,
97 howmany(ucd_allocsz, sizeof(struct ufshci_utp_cmd_desc)),
98 sizeof(struct ufshci_utp_cmd_desc), 0, NULL, NULL,
99 &req_queue->dma_tag_ucd);
100 if (error != 0) {
101 ufshci_printf(ctrlr, "request cmd desc tag create failed %d\n",
102 error);
103 goto out;
104 }
105
106 if (bus_dmamem_alloc(req_queue->dma_tag_ucd, (void **)&ucdmem,
107 BUS_DMA_COHERENT | BUS_DMA_NOWAIT, &req_queue->ucdmem_map)) {
108 ufshci_printf(ctrlr, "failed to allocate cmd desc memory\n");
109 goto out;
110 }
111
112 if (bus_dmamap_load(req_queue->dma_tag_ucd, req_queue->ucdmem_map,
113 ucdmem, ucd_allocsz, ufshci_ucd_map, hwq, 0) != 0) {
114 ufshci_printf(ctrlr, "failed to load cmd desc memory\n");
115 bus_dmamem_free(req_queue->dma_tag_ucd, req_queue->ucd,
116 req_queue->ucdmem_map);
117 goto out;
118 }
119
120 req_queue->ucd = (struct ufshci_utp_cmd_desc *)ucdmem;
121
122 /*
123 * Allocate physical memory for PRDT
124 * Note: UFSHCI PRDT format is restricted to 8-byte alignment.
125 */
126 error = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev), 8,
127 ctrlr->page_size, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
128 payload_allocsz, howmany(payload_allocsz, ctrlr->page_size) + 1,
129 ctrlr->page_size, 0, NULL, NULL, &req_queue->dma_tag_payload);
130 if (error != 0) {
131 ufshci_printf(ctrlr, "request prdt tag create failed %d\n",
132 error);
133 goto out;
134 }
135
136 for (i = 0; i < req_queue->num_trackers; i++) {
137 bus_dmamap_create(req_queue->dma_tag_payload, 0,
138 &hwq->act_tr[i]->payload_dma_map);
139
140 hwq->act_tr[i]->ucd = (struct ufshci_utp_cmd_desc *)ucdmem;
141 hwq->act_tr[i]->ucd_bus_addr = hwq->ucd_bus_addr[i];
142
143 ucdmem += sizeof(struct ufshci_utp_cmd_desc);
144 }
145
146 return (0);
147 out:
148 ufshci_req_sdb_cmd_desc_destroy(req_queue);
149 return (ENOMEM);
150 }
151
152 int
ufshci_req_sdb_construct(struct ufshci_controller * ctrlr,struct ufshci_req_queue * req_queue,uint32_t num_entries,bool is_task_mgmt)153 ufshci_req_sdb_construct(struct ufshci_controller *ctrlr,
154 struct ufshci_req_queue *req_queue, uint32_t num_entries, bool is_task_mgmt)
155 {
156 struct ufshci_hw_queue *hwq;
157 size_t desc_size, alloc_size;
158 uint64_t queuemem_phys;
159 uint8_t *queuemem;
160 struct ufshci_tracker *tr;
161 const size_t lock_name_len = 32;
162 char qlock_name[lock_name_len], recovery_lock_name[lock_name_len];
163 char *base;
164 int i, error;
165
166 req_queue->ctrlr = ctrlr;
167 req_queue->is_task_mgmt = is_task_mgmt;
168 req_queue->num_entries = num_entries;
169 /*
170 * In Single Doorbell mode, the number of queue entries and the number
171 * of trackers are the same.
172 */
173 req_queue->num_trackers = num_entries;
174
175 /* Single Doorbell mode uses only one queue. (UFSHCI_SDB_Q = 0) */
176 req_queue->hwq = malloc(sizeof(struct ufshci_hw_queue), M_UFSHCI,
177 M_ZERO | M_NOWAIT);
178 hwq = &req_queue->hwq[UFSHCI_SDB_Q];
179 hwq->num_entries = req_queue->num_entries;
180 hwq->num_trackers = req_queue->num_trackers;
181 hwq->ctrlr = ctrlr;
182 hwq->req_queue = req_queue;
183
184 base = is_task_mgmt ? "ufshci utmrq" : "ufshci utrq";
185 snprintf(qlock_name, sizeof(qlock_name), "%s #%d lock", base,
186 UFSHCI_SDB_Q);
187 snprintf(recovery_lock_name, sizeof(recovery_lock_name),
188 "%s #%d recovery lock", base, UFSHCI_SDB_Q);
189
190 mtx_init(&hwq->qlock, qlock_name, NULL, MTX_DEF);
191 mtx_init(&hwq->recovery_lock, recovery_lock_name, NULL, MTX_DEF);
192
193 callout_init_mtx(&hwq->timer, &hwq->recovery_lock, 0);
194 hwq->timer_armed = false;
195 hwq->recovery_state = RECOVERY_WAITING;
196
197 /*
198 * Allocate physical memory for request queue (UTP Transfer Request
199 * Descriptor (UTRD) or UTP Task Management Request Descriptor (UTMRD))
200 * Note: UTRD/UTMRD format is restricted to 1024-byte alignment.
201 */
202 desc_size = is_task_mgmt ?
203 sizeof(struct ufshci_utp_task_mgmt_req_desc) :
204 sizeof(struct ufshci_utp_xfer_req_desc);
205 alloc_size = num_entries * desc_size;
206 error = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev), 1024,
207 ctrlr->page_size, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
208 alloc_size, 1, alloc_size, 0, NULL, NULL, &hwq->dma_tag_queue);
209 if (error != 0) {
210 ufshci_printf(ctrlr, "request queue tag create failed %d\n",
211 error);
212 goto out;
213 }
214
215 if (bus_dmamem_alloc(hwq->dma_tag_queue, (void **)&queuemem,
216 BUS_DMA_COHERENT | BUS_DMA_NOWAIT, &hwq->queuemem_map)) {
217 ufshci_printf(ctrlr,
218 "failed to allocate request queue memory\n");
219 goto out;
220 }
221
222 if (bus_dmamap_load(hwq->dma_tag_queue, hwq->queuemem_map, queuemem,
223 alloc_size, ufshci_single_map, &queuemem_phys, 0) != 0) {
224 ufshci_printf(ctrlr, "failed to load request queue memory\n");
225 bus_dmamem_free(hwq->dma_tag_queue, hwq->utrd,
226 hwq->queuemem_map);
227 goto out;
228 }
229
230 hwq->num_cmds = 0;
231 hwq->num_intr_handler_calls = 0;
232 hwq->num_retries = 0;
233 hwq->num_failures = 0;
234 hwq->req_queue_addr = queuemem_phys;
235
236 /* Allocate trackers */
237 hwq->act_tr = malloc_domainset(sizeof(struct ufshci_tracker *) *
238 req_queue->num_entries,
239 M_UFSHCI, DOMAINSET_PREF(req_queue->domain), M_ZERO | M_WAITOK);
240
241 TAILQ_INIT(&hwq->free_tr);
242 TAILQ_INIT(&hwq->outstanding_tr);
243
244 for (i = 0; i < req_queue->num_trackers; i++) {
245 tr = malloc_domainset(sizeof(struct ufshci_tracker), M_UFSHCI,
246 DOMAINSET_PREF(req_queue->domain), M_ZERO | M_WAITOK);
247
248 tr->req_queue = req_queue;
249 tr->slot_num = i;
250 tr->slot_state = UFSHCI_SLOT_STATE_FREE;
251 TAILQ_INSERT_HEAD(&hwq->free_tr, tr, tailq);
252
253 hwq->act_tr[i] = tr;
254 }
255
256 if (is_task_mgmt) {
257 /* UTP Task Management Request (UTMR) */
258 uint32_t utmrlba, utmrlbau;
259
260 hwq->utmrd = (struct ufshci_utp_task_mgmt_req_desc *)queuemem;
261
262 utmrlba = hwq->req_queue_addr & 0xffffffff;
263 utmrlbau = hwq->req_queue_addr >> 32;
264 ufshci_mmio_write_4(ctrlr, utmrlba, utmrlba);
265 ufshci_mmio_write_4(ctrlr, utmrlbau, utmrlbau);
266 } else {
267 /* UTP Transfer Request (UTR) */
268 uint32_t utrlba, utrlbau;
269
270 hwq->utrd = (struct ufshci_utp_xfer_req_desc *)queuemem;
271
272 /*
273 * Allocate physical memory for the command descriptor.
274 * UTP Transfer Request (UTR) requires memory for a separate
275 * command in addition to the queue.
276 */
277 if (ufshci_req_sdb_cmd_desc_construct(req_queue, num_entries,
278 ctrlr) != 0) {
279 ufshci_printf(ctrlr,
280 "failed to construct cmd descriptor memory\n");
281 goto out;
282 }
283
284 utrlba = hwq->req_queue_addr & 0xffffffff;
285 utrlbau = hwq->req_queue_addr >> 32;
286 ufshci_mmio_write_4(ctrlr, utrlba, utrlba);
287 ufshci_mmio_write_4(ctrlr, utrlbau, utrlbau);
288 }
289
290 return (0);
291 out:
292 ufshci_req_sdb_destroy(ctrlr, req_queue);
293 return (ENOMEM);
294 }
295
296 void
ufshci_req_sdb_destroy(struct ufshci_controller * ctrlr,struct ufshci_req_queue * req_queue)297 ufshci_req_sdb_destroy(struct ufshci_controller *ctrlr,
298 struct ufshci_req_queue *req_queue)
299 {
300 struct ufshci_hw_queue *hwq = &req_queue->hwq[UFSHCI_SDB_Q];
301 struct ufshci_tracker *tr;
302 int i;
303
304 mtx_lock(&hwq->recovery_lock);
305 hwq->timer_armed = false;
306 mtx_unlock(&hwq->recovery_lock);
307 callout_drain(&hwq->timer);
308
309 if (!req_queue->is_task_mgmt)
310 ufshci_req_sdb_cmd_desc_destroy(&ctrlr->transfer_req_queue);
311
312 for (i = 0; i < req_queue->num_trackers; i++) {
313 tr = hwq->act_tr[i];
314 free(tr, M_UFSHCI);
315 }
316
317 if (hwq->act_tr) {
318 free(hwq->act_tr, M_UFSHCI);
319 hwq->act_tr = NULL;
320 }
321
322 if (hwq->utrd != NULL) {
323 bus_dmamap_unload(hwq->dma_tag_queue, hwq->queuemem_map);
324 bus_dmamem_free(hwq->dma_tag_queue, hwq->utrd,
325 hwq->queuemem_map);
326 hwq->utrd = NULL;
327 }
328
329 if (hwq->dma_tag_queue) {
330 bus_dma_tag_destroy(hwq->dma_tag_queue);
331 hwq->dma_tag_queue = NULL;
332 }
333
334 if (mtx_initialized(&hwq->recovery_lock))
335 mtx_destroy(&hwq->recovery_lock);
336 if (mtx_initialized(&hwq->qlock))
337 mtx_destroy(&hwq->qlock);
338
339 free(req_queue->hwq, M_UFSHCI);
340 }
341
342 struct ufshci_hw_queue *
ufshci_req_sdb_get_hw_queue(struct ufshci_req_queue * req_queue)343 ufshci_req_sdb_get_hw_queue(struct ufshci_req_queue *req_queue)
344 {
345 return &req_queue->hwq[UFSHCI_SDB_Q];
346 }
347
348 void
ufshci_req_sdb_disable(struct ufshci_controller * ctrlr,struct ufshci_req_queue * req_queue)349 ufshci_req_sdb_disable(struct ufshci_controller *ctrlr,
350 struct ufshci_req_queue *req_queue)
351 {
352 struct ufshci_hw_queue *hwq = &req_queue->hwq[UFSHCI_SDB_Q];
353 struct ufshci_tracker *tr, *tr_temp;
354
355 mtx_lock(&hwq->recovery_lock);
356 mtx_lock(&hwq->qlock);
357
358 if (mtx_initialized(&hwq->recovery_lock))
359 mtx_assert(&hwq->recovery_lock, MA_OWNED);
360 if (mtx_initialized(&hwq->qlock))
361 mtx_assert(&hwq->qlock, MA_OWNED);
362
363 hwq->recovery_state = RECOVERY_WAITING;
364 TAILQ_FOREACH_SAFE(tr, &hwq->outstanding_tr, tailq, tr_temp) {
365 tr->deadline = SBT_MAX;
366 }
367
368 mtx_unlock(&hwq->qlock);
369 mtx_unlock(&hwq->recovery_lock);
370 }
371
372 int
ufshci_req_sdb_enable(struct ufshci_controller * ctrlr,struct ufshci_req_queue * req_queue)373 ufshci_req_sdb_enable(struct ufshci_controller *ctrlr,
374 struct ufshci_req_queue *req_queue)
375 {
376 struct ufshci_hw_queue *hwq = &req_queue->hwq[UFSHCI_SDB_Q];
377
378 if (req_queue->is_task_mgmt) {
379 uint32_t hcs, utmrldbr, utmrlrsr;
380
381 hcs = ufshci_mmio_read_4(ctrlr, hcs);
382 if (!(hcs & UFSHCIM(UFSHCI_HCS_REG_UTMRLRDY))) {
383 ufshci_printf(ctrlr,
384 "UTP task management request list is not ready\n");
385 return (ENXIO);
386 }
387
388 utmrldbr = ufshci_mmio_read_4(ctrlr, utmrldbr);
389 if (utmrldbr != 0) {
390 ufshci_printf(ctrlr,
391 "UTP task management request list door bell is not ready\n");
392 return (ENXIO);
393 }
394
395 utmrlrsr = UFSHCIM(UFSHCI_UTMRLRSR_REG_UTMRLRSR);
396 ufshci_mmio_write_4(ctrlr, utmrlrsr, utmrlrsr);
397 } else {
398 uint32_t hcs, utrldbr, utrlcnr, utrlrsr;
399
400 hcs = ufshci_mmio_read_4(ctrlr, hcs);
401 if (!(hcs & UFSHCIM(UFSHCI_HCS_REG_UTRLRDY))) {
402 ufshci_printf(ctrlr,
403 "UTP transfer request list is not ready\n");
404 return (ENXIO);
405 }
406
407 utrldbr = ufshci_mmio_read_4(ctrlr, utrldbr);
408 if (utrldbr != 0) {
409 ufshci_printf(ctrlr,
410 "UTP transfer request list door bell is not ready\n");
411 ufshci_printf(ctrlr,
412 "Clear the UTP transfer request list door bell\n");
413 ufshci_mmio_write_4(ctrlr, utrldbr, utrldbr);
414 }
415
416 utrlcnr = ufshci_mmio_read_4(ctrlr, utrlcnr);
417 if (utrlcnr != 0) {
418 ufshci_printf(ctrlr,
419 "UTP transfer request list notification is not ready\n");
420 ufshci_printf(ctrlr,
421 "Clear the UTP transfer request list notification\n");
422 ufshci_mmio_write_4(ctrlr, utrlcnr, utrlcnr);
423 }
424
425 utrlrsr = UFSHCIM(UFSHCI_UTRLRSR_REG_UTRLRSR);
426 ufshci_mmio_write_4(ctrlr, utrlrsr, utrlrsr);
427 }
428
429 if (mtx_initialized(&hwq->recovery_lock))
430 mtx_assert(&hwq->recovery_lock, MA_OWNED);
431 if (mtx_initialized(&hwq->qlock))
432 mtx_assert(&hwq->qlock, MA_OWNED);
433 KASSERT(!req_queue->ctrlr->is_failed, ("Enabling a failed hwq\n"));
434
435 hwq->recovery_state = RECOVERY_NONE;
436
437 return (0);
438 }
439
440 int
ufshci_req_sdb_reserve_slot(struct ufshci_req_queue * req_queue,struct ufshci_tracker ** tr)441 ufshci_req_sdb_reserve_slot(struct ufshci_req_queue *req_queue,
442 struct ufshci_tracker **tr)
443 {
444 struct ufshci_hw_queue *hwq = &req_queue->hwq[UFSHCI_SDB_Q];
445 uint8_t i;
446
447 for (i = 0; i < req_queue->num_entries; i++) {
448 if (hwq->act_tr[i]->slot_state == UFSHCI_SLOT_STATE_FREE) {
449 *tr = hwq->act_tr[i];
450 (*tr)->hwq = hwq;
451 return (0);
452 }
453 }
454 return (EBUSY);
455 }
456
457 void
ufshci_req_sdb_utmr_clear_cpl_ntf(struct ufshci_controller * ctrlr,struct ufshci_tracker * tr)458 ufshci_req_sdb_utmr_clear_cpl_ntf(struct ufshci_controller *ctrlr,
459 struct ufshci_tracker *tr)
460 {
461 /*
462 * NOP
463 * UTP Task Management does not have a Completion Notification
464 * Register.
465 */
466 }
467
468 void
ufshci_req_sdb_utr_clear_cpl_ntf(struct ufshci_controller * ctrlr,struct ufshci_tracker * tr)469 ufshci_req_sdb_utr_clear_cpl_ntf(struct ufshci_controller *ctrlr,
470 struct ufshci_tracker *tr)
471 {
472 uint32_t utrlcnr;
473
474 utrlcnr = 1 << tr->slot_num;
475 ufshci_mmio_write_4(ctrlr, utrlcnr, utrlcnr);
476 }
477
478 void
ufshci_req_sdb_utmr_ring_doorbell(struct ufshci_controller * ctrlr,struct ufshci_tracker * tr)479 ufshci_req_sdb_utmr_ring_doorbell(struct ufshci_controller *ctrlr,
480 struct ufshci_tracker *tr)
481 {
482 uint32_t utmrldbr = 0;
483
484 utmrldbr |= 1 << tr->slot_num;
485 ufshci_mmio_write_4(ctrlr, utmrldbr, utmrldbr);
486
487 tr->req_queue->hwq[UFSHCI_SDB_Q].num_cmds++;
488 }
489
490 void
ufshci_req_sdb_utr_ring_doorbell(struct ufshci_controller * ctrlr,struct ufshci_tracker * tr)491 ufshci_req_sdb_utr_ring_doorbell(struct ufshci_controller *ctrlr,
492 struct ufshci_tracker *tr)
493 {
494 uint32_t utrldbr = 0;
495
496 utrldbr |= 1 << tr->slot_num;
497 ufshci_mmio_write_4(ctrlr, utrldbr, utrldbr);
498
499 tr->req_queue->hwq[UFSHCI_SDB_Q].num_cmds++;
500 }
501
502 bool
ufshci_req_sdb_utmr_is_doorbell_cleared(struct ufshci_controller * ctrlr,uint8_t slot)503 ufshci_req_sdb_utmr_is_doorbell_cleared(struct ufshci_controller *ctrlr,
504 uint8_t slot)
505 {
506 uint32_t utmrldbr;
507
508 utmrldbr = ufshci_mmio_read_4(ctrlr, utmrldbr);
509 return (!(utmrldbr & (1 << slot)));
510 }
511
512 bool
ufshci_req_sdb_utr_is_doorbell_cleared(struct ufshci_controller * ctrlr,uint8_t slot)513 ufshci_req_sdb_utr_is_doorbell_cleared(struct ufshci_controller *ctrlr,
514 uint8_t slot)
515 {
516 uint32_t utrldbr;
517
518 utrldbr = ufshci_mmio_read_4(ctrlr, utrldbr);
519 return (!(utrldbr & (1 << slot)));
520 }
521
522 bool
ufshci_req_sdb_process_cpl(struct ufshci_req_queue * req_queue)523 ufshci_req_sdb_process_cpl(struct ufshci_req_queue *req_queue)
524 {
525 struct ufshci_hw_queue *hwq = &req_queue->hwq[UFSHCI_SDB_Q];
526 struct ufshci_tracker *tr;
527 uint8_t slot;
528 bool done = false;
529
530 mtx_assert(&hwq->recovery_lock, MA_OWNED);
531
532 hwq->num_intr_handler_calls++;
533
534 bus_dmamap_sync(hwq->dma_tag_queue, hwq->queuemem_map,
535 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
536
537 for (slot = 0; slot < req_queue->num_entries; slot++) {
538 tr = hwq->act_tr[slot];
539
540 KASSERT(tr, ("there is no tracker assigned to the slot"));
541 /*
542 * When the response is delivered from the device, the doorbell
543 * is cleared.
544 */
545 if (tr->slot_state == UFSHCI_SLOT_STATE_SCHEDULED &&
546 req_queue->qops.is_doorbell_cleared(req_queue->ctrlr,
547 slot)) {
548 ufshci_req_queue_complete_tracker(tr);
549 done = true;
550 }
551 }
552
553 return (done);
554 }
555
556 int
ufshci_req_sdb_get_inflight_io(struct ufshci_controller * ctrlr)557 ufshci_req_sdb_get_inflight_io(struct ufshci_controller *ctrlr)
558 {
559 /* TODO: Implement inflight io*/
560
561 return (0);
562 }
563