1 /*-
2 * Copyright (c) 2025, Samsung Electronics Co., Ltd.
3 * Written by Jaeyoon Choi
4 *
5 * SPDX-License-Identifier: BSD-2-Clause
6 */
7
8 #include <sys/param.h>
9 #include <sys/bus.h>
10 #include <sys/conf.h>
11 #include <sys/domainset.h>
12 #include <sys/module.h>
13
14 #include <cam/scsi/scsi_all.h>
15
16 #include "sys/kassert.h"
17 #include "ufshci_private.h"
18
19 static void ufshci_req_queue_submit_tracker(struct ufshci_req_queue *req_queue,
20 struct ufshci_tracker *tr, enum ufshci_data_direction data_direction);
21
22 static const struct ufshci_qops sdb_utmr_qops = {
23 .construct = ufshci_req_sdb_construct,
24 .destroy = ufshci_req_sdb_destroy,
25 .get_hw_queue = ufshci_req_sdb_get_hw_queue,
26 .enable = ufshci_req_sdb_enable,
27 .disable = ufshci_req_sdb_disable,
28 .reserve_slot = ufshci_req_sdb_reserve_slot,
29 .reserve_admin_slot = ufshci_req_sdb_reserve_slot,
30 .ring_doorbell = ufshci_req_sdb_utmr_ring_doorbell,
31 .is_doorbell_cleared = ufshci_req_sdb_utmr_is_doorbell_cleared,
32 .clear_cpl_ntf = ufshci_req_sdb_utmr_clear_cpl_ntf,
33 .process_cpl = ufshci_req_sdb_process_cpl,
34 .get_inflight_io = ufshci_req_sdb_get_inflight_io,
35 };
36
37 static const struct ufshci_qops sdb_utr_qops = {
38 .construct = ufshci_req_sdb_construct,
39 .destroy = ufshci_req_sdb_destroy,
40 .get_hw_queue = ufshci_req_sdb_get_hw_queue,
41 .enable = ufshci_req_sdb_enable,
42 .disable = ufshci_req_sdb_disable,
43 .reserve_slot = ufshci_req_sdb_reserve_slot,
44 .reserve_admin_slot = ufshci_req_sdb_reserve_slot,
45 .ring_doorbell = ufshci_req_sdb_utr_ring_doorbell,
46 .is_doorbell_cleared = ufshci_req_sdb_utr_is_doorbell_cleared,
47 .clear_cpl_ntf = ufshci_req_sdb_utr_clear_cpl_ntf,
48 .process_cpl = ufshci_req_sdb_process_cpl,
49 .get_inflight_io = ufshci_req_sdb_get_inflight_io,
50 };
51
52 int
ufshci_utmr_req_queue_construct(struct ufshci_controller * ctrlr)53 ufshci_utmr_req_queue_construct(struct ufshci_controller *ctrlr)
54 {
55 struct ufshci_req_queue *req_queue;
56 int error;
57
58 /*
59 * UTP Task Management Request only supports Legacy Single Doorbell
60 * Queue.
61 */
62 req_queue = &ctrlr->task_mgmt_req_queue;
63 req_queue->queue_mode = UFSHCI_Q_MODE_SDB;
64 req_queue->qops = sdb_utmr_qops;
65
66 error = req_queue->qops.construct(ctrlr, req_queue, UFSHCI_UTRM_ENTRIES,
67 /*is_task_mgmt*/ true);
68
69 return (error);
70 }
71
72 void
ufshci_utmr_req_queue_destroy(struct ufshci_controller * ctrlr)73 ufshci_utmr_req_queue_destroy(struct ufshci_controller *ctrlr)
74 {
75 ctrlr->task_mgmt_req_queue.qops.destroy(ctrlr,
76 &ctrlr->task_mgmt_req_queue);
77 }
78
79 void
ufshci_utmr_req_queue_disable(struct ufshci_controller * ctrlr)80 ufshci_utmr_req_queue_disable(struct ufshci_controller *ctrlr)
81 {
82 ctrlr->task_mgmt_req_queue.qops.disable(ctrlr,
83 &ctrlr->task_mgmt_req_queue);
84 }
85
86 int
ufshci_utmr_req_queue_enable(struct ufshci_controller * ctrlr)87 ufshci_utmr_req_queue_enable(struct ufshci_controller *ctrlr)
88 {
89 return (ctrlr->task_mgmt_req_queue.qops.enable(ctrlr,
90 &ctrlr->task_mgmt_req_queue));
91 }
92
93 int
ufshci_utr_req_queue_construct(struct ufshci_controller * ctrlr)94 ufshci_utr_req_queue_construct(struct ufshci_controller *ctrlr)
95 {
96 struct ufshci_req_queue *req_queue;
97 int error;
98
99 /*
100 * Currently, it does not support MCQ mode, so it should be set to SDB
101 * mode by default.
102 * TODO: Determine queue mode by checking Capability Registers
103 */
104 req_queue = &ctrlr->transfer_req_queue;
105 req_queue->queue_mode = UFSHCI_Q_MODE_SDB;
106 req_queue->qops = sdb_utr_qops;
107
108 error = req_queue->qops.construct(ctrlr, req_queue, UFSHCI_UTR_ENTRIES,
109 /*is_task_mgmt*/ false);
110
111 return (error);
112 }
113
114 void
ufshci_utr_req_queue_destroy(struct ufshci_controller * ctrlr)115 ufshci_utr_req_queue_destroy(struct ufshci_controller *ctrlr)
116 {
117 ctrlr->transfer_req_queue.qops.destroy(ctrlr,
118 &ctrlr->transfer_req_queue);
119 }
120
121 void
ufshci_utr_req_queue_disable(struct ufshci_controller * ctrlr)122 ufshci_utr_req_queue_disable(struct ufshci_controller *ctrlr)
123 {
124 ctrlr->transfer_req_queue.qops.disable(ctrlr,
125 &ctrlr->transfer_req_queue);
126 }
127
128 int
ufshci_utr_req_queue_enable(struct ufshci_controller * ctrlr)129 ufshci_utr_req_queue_enable(struct ufshci_controller *ctrlr)
130 {
131 return (ctrlr->transfer_req_queue.qops.enable(ctrlr,
132 &ctrlr->transfer_req_queue));
133 }
134
135 static bool
ufshci_req_queue_response_is_error(struct ufshci_req_queue * req_queue,uint8_t ocs,union ufshci_reponse_upiu * response)136 ufshci_req_queue_response_is_error(struct ufshci_req_queue *req_queue,
137 uint8_t ocs, union ufshci_reponse_upiu *response)
138 {
139 bool is_error = false;
140
141 /* Check request descriptor */
142 if (ocs != UFSHCI_DESC_SUCCESS) {
143 ufshci_printf(req_queue->ctrlr, "Invalid OCS = 0x%x\n", ocs);
144 is_error = true;
145 }
146
147 /* Check response UPIU header */
148 if (response->header.response != UFSHCI_RESPONSE_CODE_TARGET_SUCCESS) {
149 ufshci_printf(req_queue->ctrlr,
150 "Function(0x%x) Invalid response code = 0x%x\n",
151 response->header.ext_iid_or_function,
152 response->header.response);
153 is_error = true;
154 }
155
156 return (is_error);
157 }
158
159 static void
ufshci_req_queue_manual_complete_tracker(struct ufshci_tracker * tr,uint8_t ocs,uint8_t rc)160 ufshci_req_queue_manual_complete_tracker(struct ufshci_tracker *tr, uint8_t ocs,
161 uint8_t rc)
162 {
163 struct ufshci_utp_xfer_req_desc *desc;
164 struct ufshci_upiu_header *resp_header;
165
166 mtx_assert(&tr->hwq->qlock, MA_NOTOWNED);
167
168 resp_header = (struct ufshci_upiu_header *)tr->ucd->response_upiu;
169 resp_header->response = rc;
170
171 desc = &tr->hwq->utrd[tr->slot_num];
172 desc->overall_command_status = ocs;
173
174 ufshci_req_queue_complete_tracker(tr);
175 }
176
177 static void
ufshci_req_queue_manual_complete_request(struct ufshci_req_queue * req_queue,struct ufshci_request * req,uint8_t ocs,uint8_t rc)178 ufshci_req_queue_manual_complete_request(struct ufshci_req_queue *req_queue,
179 struct ufshci_request *req, uint8_t ocs, uint8_t rc)
180 {
181 struct ufshci_completion cpl;
182 bool error;
183
184 memset(&cpl, 0, sizeof(cpl));
185 cpl.response_upiu.header.response = rc;
186 error = ufshci_req_queue_response_is_error(req_queue, ocs,
187 &cpl.response_upiu);
188
189 if (error) {
190 ufshci_printf(req_queue->ctrlr,
191 "Manual complete request error:0x%x", error);
192 }
193
194 if (req->cb_fn)
195 req->cb_fn(req->cb_arg, &cpl, error);
196
197 ufshci_free_request(req);
198 }
199
200 void
ufshci_req_queue_fail(struct ufshci_controller * ctrlr,struct ufshci_hw_queue * hwq)201 ufshci_req_queue_fail(struct ufshci_controller *ctrlr,
202 struct ufshci_hw_queue *hwq)
203 {
204 struct ufshci_req_queue *req_queue;
205 struct ufshci_tracker *tr;
206 struct ufshci_request *req;
207 int i;
208
209 if (!mtx_initialized(&hwq->qlock))
210 return;
211
212 mtx_lock(&hwq->qlock);
213
214 req_queue = &ctrlr->transfer_req_queue;
215
216 for (i = 0; i < req_queue->num_entries; i++) {
217 tr = hwq->act_tr[i];
218 req = tr->req;
219
220 if (tr->slot_state == UFSHCI_SLOT_STATE_RESERVED) {
221 mtx_unlock(&hwq->qlock);
222 ufshci_req_queue_manual_complete_request(req_queue, req,
223 UFSHCI_DESC_ABORTED,
224 UFSHCI_RESPONSE_CODE_GENERAL_FAILURE);
225 mtx_lock(&hwq->qlock);
226 } else if (tr->slot_state == UFSHCI_SLOT_STATE_SCHEDULED) {
227 /*
228 * Do not remove the tracker. The abort_tracker path
229 * will do that for us.
230 */
231 mtx_unlock(&hwq->qlock);
232 ufshci_req_queue_manual_complete_tracker(tr,
233 UFSHCI_DESC_ABORTED,
234 UFSHCI_RESPONSE_CODE_GENERAL_FAILURE);
235 mtx_lock(&hwq->qlock);
236 }
237 }
238
239 mtx_unlock(&hwq->qlock);
240 }
241
242 void
ufshci_req_queue_complete_tracker(struct ufshci_tracker * tr)243 ufshci_req_queue_complete_tracker(struct ufshci_tracker *tr)
244 {
245 struct ufshci_req_queue *req_queue = tr->req_queue;
246 struct ufshci_hw_queue *hwq = tr->hwq;
247 struct ufshci_request *req = tr->req;
248 struct ufshci_completion cpl;
249 uint8_t ocs;
250 bool retry, error, retriable;
251
252 mtx_assert(&hwq->qlock, MA_NOTOWNED);
253
254 /* Copy the response from the Request Descriptor or UTP Command
255 * Descriptor. */
256 cpl.size = tr->response_size;
257 if (req_queue->is_task_mgmt) {
258 memcpy(&cpl.response_upiu,
259 (void *)hwq->utmrd[tr->slot_num].response_upiu, cpl.size);
260
261 ocs = hwq->utmrd[tr->slot_num].overall_command_status;
262 } else {
263 bus_dmamap_sync(req_queue->dma_tag_ucd, req_queue->ucdmem_map,
264 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
265
266 memcpy(&cpl.response_upiu, (void *)tr->ucd->response_upiu,
267 cpl.size);
268
269 ocs = hwq->utrd[tr->slot_num].overall_command_status;
270 }
271
272 error = ufshci_req_queue_response_is_error(req_queue, ocs,
273 &cpl.response_upiu);
274
275 /* TODO: Implement retry */
276 // retriable = ufshci_completion_is_retry(cpl);
277 retriable = false;
278 retry = error && retriable &&
279 req->retries < req_queue->ctrlr->retry_count;
280 if (retry)
281 hwq->num_retries++;
282 if (error && req->retries >= req_queue->ctrlr->retry_count && retriable)
283 hwq->num_failures++;
284
285 KASSERT(tr->req, ("there is no request assigned to the tracker\n"));
286 KASSERT(cpl.response_upiu.header.task_tag ==
287 req->request_upiu.header.task_tag,
288 ("response task_tag does not match request task_tag\n"));
289
290 if (!retry) {
291 if (req->payload_valid) {
292 bus_dmamap_sync(req_queue->dma_tag_payload,
293 tr->payload_dma_map,
294 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
295 }
296 /* Copy response from the command descriptor */
297 if (req->cb_fn)
298 req->cb_fn(req->cb_arg, &cpl, error);
299 }
300
301 mtx_lock(&hwq->qlock);
302
303 /* Clear the UTRL Completion Notification register */
304 req_queue->qops.clear_cpl_ntf(req_queue->ctrlr, tr);
305
306 if (retry) {
307 req->retries++;
308 ufshci_req_queue_submit_tracker(req_queue, tr,
309 req->data_direction);
310 } else {
311 if (req->payload_valid) {
312 bus_dmamap_unload(req_queue->dma_tag_payload,
313 tr->payload_dma_map);
314 }
315
316 /* Clear tracker */
317 ufshci_free_request(req);
318 tr->req = NULL;
319 tr->slot_state = UFSHCI_SLOT_STATE_FREE;
320
321 TAILQ_REMOVE(&hwq->outstanding_tr, tr, tailq);
322 TAILQ_INSERT_HEAD(&hwq->free_tr, tr, tailq);
323 }
324
325 mtx_unlock(&tr->hwq->qlock);
326 }
327
328 bool
ufshci_req_queue_process_completions(struct ufshci_req_queue * req_queue)329 ufshci_req_queue_process_completions(struct ufshci_req_queue *req_queue)
330 {
331 struct ufshci_hw_queue *hwq;
332 bool done;
333
334 hwq = req_queue->qops.get_hw_queue(req_queue);
335
336 mtx_lock(&hwq->recovery_lock);
337 done = req_queue->qops.process_cpl(req_queue);
338 mtx_unlock(&hwq->recovery_lock);
339
340 return (done);
341 }
342
343 static void
ufshci_payload_map(void * arg,bus_dma_segment_t * seg,int nseg,int error)344 ufshci_payload_map(void *arg, bus_dma_segment_t *seg, int nseg, int error)
345 {
346 struct ufshci_tracker *tr = arg;
347 struct ufshci_prdt_entry *prdt_entry;
348 int i;
349
350 /*
351 * If the mapping operation failed, return immediately. The caller
352 * is responsible for detecting the error status and failing the
353 * tracker manually.
354 */
355 if (error != 0) {
356 ufshci_printf(tr->req_queue->ctrlr,
357 "Failed to map payload %d\n", error);
358 return;
359 }
360
361 prdt_entry = (struct ufshci_prdt_entry *)tr->ucd->prd_table;
362
363 tr->prdt_entry_cnt = nseg;
364
365 for (i = 0; i < nseg; i++) {
366 prdt_entry->data_base_address = htole64(seg[i].ds_addr) &
367 0xffffffff;
368 prdt_entry->data_base_address_upper = htole64(seg[i].ds_addr) >>
369 32;
370 prdt_entry->data_byte_count = htole32(seg[i].ds_len - 1);
371
372 ++prdt_entry;
373 }
374
375 bus_dmamap_sync(tr->req_queue->dma_tag_payload, tr->payload_dma_map,
376 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
377 }
378
379 static void
ufshci_req_queue_prepare_prdt(struct ufshci_tracker * tr)380 ufshci_req_queue_prepare_prdt(struct ufshci_tracker *tr)
381 {
382 struct ufshci_request *req = tr->req;
383 struct ufshci_utp_cmd_desc *cmd_desc = tr->ucd;
384 int error;
385
386 tr->prdt_off = UFSHCI_UTP_XFER_REQ_SIZE + UFSHCI_UTP_XFER_RESP_SIZE;
387
388 memset(cmd_desc->prd_table, 0, sizeof(cmd_desc->prd_table));
389
390 /* Filling PRDT enrties with payload */
391 error = bus_dmamap_load_mem(tr->req_queue->dma_tag_payload,
392 tr->payload_dma_map, &req->payload, ufshci_payload_map, tr,
393 BUS_DMA_NOWAIT);
394 if (error != 0) {
395 /*
396 * The dmamap operation failed, so we manually fail the
397 * tracker here with UFSHCI_DESC_INVALID_PRDT_ATTRIBUTES.
398 *
399 * ufshci_req_queue_manual_complete_tracker must not be called
400 * with the req_queue lock held.
401 */
402 ufshci_printf(tr->req_queue->ctrlr,
403 "bus_dmamap_load_mem returned with error:0x%x!\n", error);
404
405 mtx_unlock(&tr->hwq->qlock);
406 ufshci_req_queue_manual_complete_tracker(tr,
407 UFSHCI_DESC_INVALID_PRDT_ATTRIBUTES,
408 UFSHCI_RESPONSE_CODE_GENERAL_FAILURE);
409 mtx_lock(&tr->hwq->qlock);
410 }
411 }
412
413 static void
ufshci_req_queue_fill_utmr_descriptor(struct ufshci_utp_task_mgmt_req_desc * desc,struct ufshci_request * req)414 ufshci_req_queue_fill_utmr_descriptor(
415 struct ufshci_utp_task_mgmt_req_desc *desc, struct ufshci_request *req)
416 {
417 memset(desc, 0, sizeof(struct ufshci_utp_task_mgmt_req_desc));
418 desc->interrupt = true;
419 /* Set the initial value to Invalid. */
420 desc->overall_command_status = UFSHCI_UTMR_OCS_INVALID;
421
422 memcpy(desc->request_upiu, &req->request_upiu, req->request_size);
423 }
424
425 static void
ufshci_req_queue_fill_utr_descriptor(struct ufshci_utp_xfer_req_desc * desc,uint8_t data_direction,const uint64_t paddr,const uint16_t response_off,const uint16_t response_len,const uint16_t prdt_off,const uint16_t prdt_entry_cnt)426 ufshci_req_queue_fill_utr_descriptor(struct ufshci_utp_xfer_req_desc *desc,
427 uint8_t data_direction, const uint64_t paddr, const uint16_t response_off,
428 const uint16_t response_len, const uint16_t prdt_off,
429 const uint16_t prdt_entry_cnt)
430 {
431 uint8_t command_type;
432 /* Value to convert bytes to dwords */
433 const uint16_t dword_size = 4;
434
435 /*
436 * Set command type to UFS storage.
437 * The UFS 4.1 spec only defines 'UFS Storage' as a command type.
438 */
439 command_type = UFSHCI_COMMAND_TYPE_UFS_STORAGE;
440
441 memset(desc, 0, sizeof(struct ufshci_utp_xfer_req_desc));
442 desc->command_type = command_type;
443 desc->data_direction = data_direction;
444 desc->interrupt = true;
445 /* Set the initial value to Invalid. */
446 desc->overall_command_status = UFSHCI_UTR_OCS_INVALID;
447 desc->utp_command_descriptor_base_address = (uint32_t)(paddr &
448 0xffffffff);
449 desc->utp_command_descriptor_base_address_upper = (uint32_t)(paddr >>
450 32);
451
452 desc->response_upiu_offset = response_off / dword_size;
453 desc->response_upiu_length = response_len / dword_size;
454 desc->prdt_offset = prdt_off / dword_size;
455 desc->prdt_length = prdt_entry_cnt;
456 }
457
458 static void
ufshci_req_queue_timeout_recovery(struct ufshci_controller * ctrlr,struct ufshci_hw_queue * hwq)459 ufshci_req_queue_timeout_recovery(struct ufshci_controller *ctrlr,
460 struct ufshci_hw_queue *hwq)
461 {
462 /* TODO: Step 2. Logical unit reset */
463 /* TODO: Step 3. Target device reset */
464 /* TODO: Step 4. Bus reset */
465
466 /*
467 * Step 5. All previous commands were timeout.
468 * Recovery failed, reset the host controller.
469 */
470 ufshci_printf(ctrlr,
471 "Recovery step 5: Resetting controller due to a timeout.\n");
472 hwq->recovery_state = RECOVERY_WAITING;
473
474 ufshci_ctrlr_reset(ctrlr);
475 }
476
477 static void
ufshci_abort_complete(void * arg,const struct ufshci_completion * status,bool error)478 ufshci_abort_complete(void *arg, const struct ufshci_completion *status,
479 bool error)
480 {
481 struct ufshci_tracker *tr = arg;
482
483 /*
484 * We still need to check the active tracker array, to cover race where
485 * I/O timed out at same time controller was completing the I/O. An
486 * abort request always is on the Task Management Request queue, but
487 * affects either an Task Management Request or an I/O (UTRL) queue, so
488 * take the appropriate queue lock for the original command's queue,
489 * since we'll need it to avoid races with the completion code and to
490 * complete the command manually.
491 */
492 mtx_lock(&tr->hwq->qlock);
493 if (tr->slot_state != UFSHCI_SLOT_STATE_FREE) {
494 mtx_unlock(&tr->hwq->qlock);
495 /*
496 * An I/O has timed out, and the controller was unable to abort
497 * it for some reason. And we've not processed a completion for
498 * it yet. Construct a fake completion status, and then complete
499 * the I/O's tracker manually.
500 */
501 ufshci_printf(tr->hwq->ctrlr,
502 "abort task request failed, aborting task manually\n");
503 ufshci_req_queue_manual_complete_tracker(tr,
504 UFSHCI_DESC_ABORTED, UFSHCI_RESPONSE_CODE_GENERAL_FAILURE);
505
506 if ((status->response_upiu.task_mgmt_response_upiu
507 .output_param1 ==
508 UFSHCI_TASK_MGMT_SERVICE_RESPONSE_FUNCTION_COMPLETE) ||
509 (status->response_upiu.task_mgmt_response_upiu
510 .output_param1 ==
511 UFSHCI_TASK_MGMT_SERVICE_RESPONSE_FUNCTION_SUCCEEDED)) {
512 ufshci_printf(tr->hwq->ctrlr,
513 "Warning: the abort task request completed \
514 successfully, but the original task is still incomplete.");
515 return;
516 }
517
518 /* Abort Task failed. Perform recovery steps 2-5 */
519 ufshci_req_queue_timeout_recovery(tr->hwq->ctrlr, tr->hwq);
520 } else {
521 mtx_unlock(&tr->hwq->qlock);
522 }
523 }
524
525 static void
ufshci_req_queue_timeout(void * arg)526 ufshci_req_queue_timeout(void *arg)
527 {
528 struct ufshci_hw_queue *hwq = arg;
529 struct ufshci_controller *ctrlr = hwq->ctrlr;
530 struct ufshci_tracker *tr;
531 sbintime_t now;
532 bool idle = true;
533 bool fast;
534
535 mtx_assert(&hwq->recovery_lock, MA_OWNED);
536
537 /*
538 * If the controller is failed, then stop polling. This ensures that any
539 * failure processing that races with the hwq timeout will fail safely.
540 */
541 if (ctrlr->is_failed) {
542 ufshci_printf(ctrlr,
543 "Failed controller, stopping watchdog timeout.\n");
544 hwq->timer_armed = false;
545 return;
546 }
547
548 /*
549 * Shutdown condition: We set hwq->timer_armed to false in
550 * ufshci_req_sdb_destroy before calling callout_drain. When we call
551 * that, this routine might get called one last time. Exit w/o setting a
552 * timeout. None of the watchdog stuff needs to be done since we're
553 * destroying the hwq.
554 */
555 if (!hwq->timer_armed) {
556 ufshci_printf(ctrlr,
557 "Timeout fired during ufshci_utr_req_queue_destroy\n");
558 return;
559 }
560
561 switch (hwq->recovery_state) {
562 case RECOVERY_NONE:
563 /*
564 * See if there's any recovery needed. First, do a fast check to
565 * see if anything could have timed out. If not, then skip
566 * everything else.
567 */
568 fast = false;
569 mtx_lock(&hwq->qlock);
570 now = getsbinuptime();
571 TAILQ_FOREACH(tr, &hwq->outstanding_tr, tailq) {
572 /*
573 * If the first real transaction is not in timeout, then
574 * we're done. Otherwise, we try recovery.
575 */
576 idle = false;
577 if (now <= tr->deadline)
578 fast = true;
579 break;
580 }
581 mtx_unlock(&hwq->qlock);
582 if (idle || fast)
583 break;
584
585 /*
586 * There's a stale transaction at the start of the queue whose
587 * deadline has passed. Poll the competions as a last-ditch
588 * effort in case an interrupt has been missed.
589 */
590 hwq->req_queue->qops.process_cpl(hwq->req_queue);
591
592 /*
593 * Now that we've run the ISR, re-rheck to see if there's any
594 * timed out commands and abort them or reset the card if so.
595 */
596 mtx_lock(&hwq->qlock);
597 idle = true;
598 TAILQ_FOREACH(tr, &hwq->outstanding_tr, tailq) {
599 /*
600 * If we know this tracker hasn't timed out, we also
601 * know all subsequent ones haven't timed out. The tr
602 * queue is in submission order and all normal commands
603 * in a queue have the same timeout (or the timeout was
604 * changed by the user, but we eventually timeout then).
605 */
606 idle = false;
607 if (now <= tr->deadline)
608 break;
609
610 /*
611 * Timeout recovery is performed in five steps. If
612 * recovery fails at any step, the process continues to
613 * the next one:
614 * next steps:
615 * Step 1. Abort task
616 * Step 2. Logical unit reset (TODO)
617 * Step 3. Target device reset (TODO)
618 * Step 4. Bus reset (TODO)
619 * Step 5. Host controller reset
620 *
621 * If the timeout occurred in the Task Management
622 * Request queue, ignore Step 1.
623 */
624 if (ctrlr->enable_aborts &&
625 !hwq->req_queue->is_task_mgmt &&
626 tr->req->cb_fn != ufshci_abort_complete) {
627 /*
628 * Step 1. Timeout expired, abort the task.
629 *
630 * This isn't an abort command, ask for a
631 * hardware abort. This goes to the Task
632 * Management Request queue which will reset the
633 * task if it times out.
634 */
635 ufshci_printf(ctrlr,
636 "Recovery step 1: Timeout occurred. aborting the task(%d).\n",
637 tr->req->request_upiu.header.task_tag);
638 ufshci_ctrlr_cmd_send_task_mgmt_request(ctrlr,
639 ufshci_abort_complete, tr,
640 UFSHCI_TASK_MGMT_FUNCTION_ABORT_TASK,
641 tr->req->request_upiu.header.lun,
642 tr->req->request_upiu.header.task_tag, 0);
643 } else {
644 /* Recovery Step 2-5 */
645 ufshci_req_queue_timeout_recovery(ctrlr, hwq);
646 idle = false;
647 break;
648 }
649 }
650 mtx_unlock(&hwq->qlock);
651 break;
652
653 case RECOVERY_WAITING:
654 /*
655 * These messages aren't interesting while we're suspended. We
656 * put the queues into waiting state while suspending.
657 * Suspending takes a while, so we'll see these during that time
658 * and they aren't diagnostic. At other times, they indicate a
659 * problem that's worth complaining about.
660 */
661 if (!device_is_suspended(ctrlr->dev))
662 ufshci_printf(ctrlr, "Waiting for reset to complete\n");
663 idle = false; /* We want to keep polling */
664 break;
665 }
666
667 /*
668 * Rearm the timeout.
669 */
670 if (!idle) {
671 callout_schedule_sbt(&hwq->timer, SBT_1S / 2, SBT_1S / 2, 0);
672 } else {
673 hwq->timer_armed = false;
674 }
675 }
676
677 /*
678 * Submit the tracker to the hardware.
679 */
680 static void
ufshci_req_queue_submit_tracker(struct ufshci_req_queue * req_queue,struct ufshci_tracker * tr,enum ufshci_data_direction data_direction)681 ufshci_req_queue_submit_tracker(struct ufshci_req_queue *req_queue,
682 struct ufshci_tracker *tr, enum ufshci_data_direction data_direction)
683 {
684 struct ufshci_controller *ctrlr = req_queue->ctrlr;
685 struct ufshci_request *req = tr->req;
686 struct ufshci_hw_queue *hwq;
687 uint64_t ucd_paddr;
688 uint16_t request_len, response_off, response_len;
689 uint8_t slot_num = tr->slot_num;
690 int timeout;
691
692 hwq = req_queue->qops.get_hw_queue(req_queue);
693
694 mtx_assert(&hwq->qlock, MA_OWNED);
695
696 if (req->cb_fn == ufshci_completion_poll_cb)
697 timeout = 1;
698 else
699 timeout = ctrlr->timeout_period;
700 tr->deadline = getsbinuptime() + timeout * SBT_1S;
701 if (!hwq->timer_armed) {
702 hwq->timer_armed = true;
703 /*
704 * It wakes up once every 0.5 seconds to check if the deadline
705 * has passed.
706 */
707 callout_reset_sbt_on(&hwq->timer, SBT_1S / 2, SBT_1S / 2,
708 ufshci_req_queue_timeout, hwq, hwq->cpu, 0);
709 }
710
711 if (req_queue->is_task_mgmt) {
712 /* Prepare UTP Task Management Request Descriptor. */
713 ufshci_req_queue_fill_utmr_descriptor(&tr->hwq->utmrd[slot_num],
714 req);
715 } else {
716 request_len = req->request_size;
717 response_off = UFSHCI_UTP_XFER_REQ_SIZE;
718 response_len = req->response_size;
719
720 /* Prepare UTP Command Descriptor */
721 memcpy(tr->ucd, &req->request_upiu, request_len);
722 memset((uint8_t *)tr->ucd + response_off, 0, response_len);
723
724 /* Prepare PRDT */
725 if (req->payload_valid)
726 ufshci_req_queue_prepare_prdt(tr);
727
728 /* Prepare UTP Transfer Request Descriptor. */
729 ucd_paddr = tr->ucd_bus_addr;
730 ufshci_req_queue_fill_utr_descriptor(&tr->hwq->utrd[slot_num],
731 data_direction, ucd_paddr, response_off, response_len,
732 tr->prdt_off, tr->prdt_entry_cnt);
733
734 bus_dmamap_sync(req_queue->dma_tag_ucd, req_queue->ucdmem_map,
735 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
736 }
737
738 bus_dmamap_sync(tr->hwq->dma_tag_queue, tr->hwq->queuemem_map,
739 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
740
741 tr->slot_state = UFSHCI_SLOT_STATE_SCHEDULED;
742
743 /* Ring the doorbell */
744 req_queue->qops.ring_doorbell(ctrlr, tr);
745 }
746
747 static int
_ufshci_req_queue_submit_request(struct ufshci_req_queue * req_queue,struct ufshci_request * req)748 _ufshci_req_queue_submit_request(struct ufshci_req_queue *req_queue,
749 struct ufshci_request *req)
750 {
751 struct ufshci_tracker *tr = NULL;
752 int error;
753
754 mtx_assert(&req_queue->qops.get_hw_queue(req_queue)->qlock, MA_OWNED);
755
756 error = req_queue->qops.reserve_slot(req_queue, &tr);
757 if (error != 0) {
758 ufshci_printf(req_queue->ctrlr, "Failed to get tracker");
759 return (error);
760 }
761 KASSERT(tr, ("There is no tracker allocated."));
762
763 if (tr->slot_state == UFSHCI_SLOT_STATE_RESERVED ||
764 tr->slot_state == UFSHCI_SLOT_STATE_SCHEDULED)
765 return (EBUSY);
766
767 /* Set the task_tag value to slot_num for traceability. */
768 req->request_upiu.header.task_tag = tr->slot_num;
769
770 tr->slot_state = UFSHCI_SLOT_STATE_RESERVED;
771 tr->response_size = req->response_size;
772 tr->deadline = SBT_MAX;
773 tr->req = req;
774
775 TAILQ_REMOVE(&tr->hwq->free_tr, tr, tailq);
776 TAILQ_INSERT_TAIL(&tr->hwq->outstanding_tr, tr, tailq);
777
778 ufshci_req_queue_submit_tracker(req_queue, tr, req->data_direction);
779
780 return (0);
781 }
782
783 int
ufshci_req_queue_submit_request(struct ufshci_req_queue * req_queue,struct ufshci_request * req,bool is_admin)784 ufshci_req_queue_submit_request(struct ufshci_req_queue *req_queue,
785 struct ufshci_request *req, bool is_admin)
786 {
787 struct ufshci_hw_queue *hwq;
788 uint32_t error;
789
790 /* TODO: MCQs should use a separate Admin queue. */
791
792 hwq = req_queue->qops.get_hw_queue(req_queue);
793 KASSERT(hwq, ("There is no HW queue allocated."));
794
795 mtx_lock(&hwq->qlock);
796 error = _ufshci_req_queue_submit_request(req_queue, req);
797 mtx_unlock(&hwq->qlock);
798
799 return (error);
800 }
801