xref: /freebsd/sys/dev/ufshci/ufshci_req_queue.c (revision 6b841d70960a3a0ec4e43392683053878c403f9c)
1 /*-
2  * Copyright (c) 2025, Samsung Electronics Co., Ltd.
3  * Written by Jaeyoon Choi
4  *
5  * SPDX-License-Identifier: BSD-2-Clause
6  */
7 
8 #include <sys/param.h>
9 #include <sys/bus.h>
10 #include <sys/conf.h>
11 #include <sys/domainset.h>
12 #include <sys/module.h>
13 
14 #include <cam/scsi/scsi_all.h>
15 
16 #include "sys/kassert.h"
17 #include "ufshci_private.h"
18 
19 static void ufshci_req_queue_submit_tracker(struct ufshci_req_queue *req_queue,
20     struct ufshci_tracker *tr, enum ufshci_data_direction data_direction);
21 
22 static const struct ufshci_qops sdb_utmr_qops = {
23 	.construct = ufshci_req_sdb_construct,
24 	.destroy = ufshci_req_sdb_destroy,
25 	.get_hw_queue = ufshci_req_sdb_get_hw_queue,
26 	.enable = ufshci_req_sdb_enable,
27 	.disable = ufshci_req_sdb_disable,
28 	.reserve_slot = ufshci_req_sdb_reserve_slot,
29 	.reserve_admin_slot = ufshci_req_sdb_reserve_slot,
30 	.ring_doorbell = ufshci_req_sdb_utmr_ring_doorbell,
31 	.is_doorbell_cleared = ufshci_req_sdb_utmr_is_doorbell_cleared,
32 	.clear_cpl_ntf = ufshci_req_sdb_utmr_clear_cpl_ntf,
33 	.process_cpl = ufshci_req_sdb_process_cpl,
34 	.get_inflight_io = ufshci_req_sdb_get_inflight_io,
35 };
36 
37 static const struct ufshci_qops sdb_utr_qops = {
38 	.construct = ufshci_req_sdb_construct,
39 	.destroy = ufshci_req_sdb_destroy,
40 	.get_hw_queue = ufshci_req_sdb_get_hw_queue,
41 	.enable = ufshci_req_sdb_enable,
42 	.disable = ufshci_req_sdb_disable,
43 	.reserve_slot = ufshci_req_sdb_reserve_slot,
44 	.reserve_admin_slot = ufshci_req_sdb_reserve_slot,
45 	.ring_doorbell = ufshci_req_sdb_utr_ring_doorbell,
46 	.is_doorbell_cleared = ufshci_req_sdb_utr_is_doorbell_cleared,
47 	.clear_cpl_ntf = ufshci_req_sdb_utr_clear_cpl_ntf,
48 	.process_cpl = ufshci_req_sdb_process_cpl,
49 	.get_inflight_io = ufshci_req_sdb_get_inflight_io,
50 };
51 
52 int
ufshci_utmr_req_queue_construct(struct ufshci_controller * ctrlr)53 ufshci_utmr_req_queue_construct(struct ufshci_controller *ctrlr)
54 {
55 	struct ufshci_req_queue *req_queue;
56 	int error;
57 
58 	/*
59 	 * UTP Task Management Request only supports Legacy Single Doorbell
60 	 * Queue.
61 	 */
62 	req_queue = &ctrlr->task_mgmt_req_queue;
63 	req_queue->queue_mode = UFSHCI_Q_MODE_SDB;
64 	req_queue->qops = sdb_utmr_qops;
65 
66 	error = req_queue->qops.construct(ctrlr, req_queue, UFSHCI_UTRM_ENTRIES,
67 	    /*is_task_mgmt*/ true);
68 
69 	return (error);
70 }
71 
72 void
ufshci_utmr_req_queue_destroy(struct ufshci_controller * ctrlr)73 ufshci_utmr_req_queue_destroy(struct ufshci_controller *ctrlr)
74 {
75 	ctrlr->task_mgmt_req_queue.qops.destroy(ctrlr,
76 	    &ctrlr->task_mgmt_req_queue);
77 }
78 
79 void
ufshci_utmr_req_queue_disable(struct ufshci_controller * ctrlr)80 ufshci_utmr_req_queue_disable(struct ufshci_controller *ctrlr)
81 {
82 	ctrlr->task_mgmt_req_queue.qops.disable(ctrlr,
83 	    &ctrlr->task_mgmt_req_queue);
84 }
85 
86 int
ufshci_utmr_req_queue_enable(struct ufshci_controller * ctrlr)87 ufshci_utmr_req_queue_enable(struct ufshci_controller *ctrlr)
88 {
89 	return (ctrlr->task_mgmt_req_queue.qops.enable(ctrlr,
90 	    &ctrlr->task_mgmt_req_queue));
91 }
92 
93 int
ufshci_utr_req_queue_construct(struct ufshci_controller * ctrlr)94 ufshci_utr_req_queue_construct(struct ufshci_controller *ctrlr)
95 {
96 	struct ufshci_req_queue *req_queue;
97 	int error;
98 
99 	/*
100 	 * Currently, it does not support MCQ mode, so it should be set to SDB
101 	 * mode by default.
102 	 * TODO: Determine queue mode by checking Capability Registers
103 	 */
104 	req_queue = &ctrlr->transfer_req_queue;
105 	req_queue->queue_mode = UFSHCI_Q_MODE_SDB;
106 	req_queue->qops = sdb_utr_qops;
107 
108 	error = req_queue->qops.construct(ctrlr, req_queue, UFSHCI_UTR_ENTRIES,
109 	    /*is_task_mgmt*/ false);
110 
111 	return (error);
112 }
113 
114 void
ufshci_utr_req_queue_destroy(struct ufshci_controller * ctrlr)115 ufshci_utr_req_queue_destroy(struct ufshci_controller *ctrlr)
116 {
117 	ctrlr->transfer_req_queue.qops.destroy(ctrlr,
118 	    &ctrlr->transfer_req_queue);
119 }
120 
121 void
ufshci_utr_req_queue_disable(struct ufshci_controller * ctrlr)122 ufshci_utr_req_queue_disable(struct ufshci_controller *ctrlr)
123 {
124 	ctrlr->transfer_req_queue.qops.disable(ctrlr,
125 	    &ctrlr->transfer_req_queue);
126 }
127 
128 int
ufshci_utr_req_queue_enable(struct ufshci_controller * ctrlr)129 ufshci_utr_req_queue_enable(struct ufshci_controller *ctrlr)
130 {
131 	return (ctrlr->transfer_req_queue.qops.enable(ctrlr,
132 	    &ctrlr->transfer_req_queue));
133 }
134 
135 static bool
ufshci_req_queue_response_is_error(struct ufshci_req_queue * req_queue,uint8_t ocs,union ufshci_reponse_upiu * response)136 ufshci_req_queue_response_is_error(struct ufshci_req_queue *req_queue,
137     uint8_t ocs, union ufshci_reponse_upiu *response)
138 {
139 	bool is_error = false;
140 
141 	/* Check request descriptor */
142 	if (ocs != UFSHCI_DESC_SUCCESS) {
143 		ufshci_printf(req_queue->ctrlr, "Invalid OCS = 0x%x\n", ocs);
144 		is_error = true;
145 	}
146 
147 	/* Check response UPIU header */
148 	if (response->header.response != UFSHCI_RESPONSE_CODE_TARGET_SUCCESS) {
149 		ufshci_printf(req_queue->ctrlr,
150 		    "Invalid response code = 0x%x\n",
151 		    response->header.response);
152 		is_error = true;
153 	}
154 
155 	return (is_error);
156 }
157 
158 static void
ufshci_req_queue_manual_complete_tracker(struct ufshci_tracker * tr,uint8_t ocs,uint8_t rc)159 ufshci_req_queue_manual_complete_tracker(struct ufshci_tracker *tr, uint8_t ocs,
160     uint8_t rc)
161 {
162 	struct ufshci_utp_xfer_req_desc *desc;
163 	struct ufshci_upiu_header *resp_header;
164 
165 	mtx_assert(&tr->hwq->qlock, MA_NOTOWNED);
166 
167 	resp_header = (struct ufshci_upiu_header *)tr->ucd->response_upiu;
168 	resp_header->response = rc;
169 
170 	desc = &tr->hwq->utrd[tr->slot_num];
171 	desc->overall_command_status = ocs;
172 
173 	ufshci_req_queue_complete_tracker(tr);
174 }
175 
176 static void
ufshci_req_queue_manual_complete_request(struct ufshci_req_queue * req_queue,struct ufshci_request * req,uint8_t ocs,uint8_t rc)177 ufshci_req_queue_manual_complete_request(struct ufshci_req_queue *req_queue,
178     struct ufshci_request *req, uint8_t ocs, uint8_t rc)
179 {
180 	struct ufshci_completion cpl;
181 	bool error;
182 
183 	memset(&cpl, 0, sizeof(cpl));
184 	cpl.response_upiu.header.response = rc;
185 	error = ufshci_req_queue_response_is_error(req_queue, ocs,
186 	    &cpl.response_upiu);
187 
188 	if (error) {
189 		ufshci_printf(req_queue->ctrlr,
190 		    "Manual complete request error:0x%x", error);
191 	}
192 
193 	if (req->cb_fn)
194 		req->cb_fn(req->cb_arg, &cpl, error);
195 
196 	ufshci_free_request(req);
197 }
198 
199 void
ufshci_req_queue_fail(struct ufshci_controller * ctrlr,struct ufshci_hw_queue * hwq)200 ufshci_req_queue_fail(struct ufshci_controller *ctrlr,
201     struct ufshci_hw_queue *hwq)
202 {
203 	struct ufshci_req_queue *req_queue;
204 	struct ufshci_tracker *tr;
205 	struct ufshci_request *req;
206 	int i;
207 
208 	if (!mtx_initialized(&hwq->qlock))
209 		return;
210 
211 	mtx_lock(&hwq->qlock);
212 
213 	req_queue = &ctrlr->transfer_req_queue;
214 
215 	for (i = 0; i < req_queue->num_entries; i++) {
216 		tr = hwq->act_tr[i];
217 		req = tr->req;
218 
219 		if (tr->slot_state == UFSHCI_SLOT_STATE_RESERVED) {
220 			mtx_unlock(&hwq->qlock);
221 			ufshci_req_queue_manual_complete_request(req_queue, req,
222 			    UFSHCI_DESC_ABORTED,
223 			    UFSHCI_RESPONSE_CODE_GENERAL_FAILURE);
224 			mtx_lock(&hwq->qlock);
225 		} else if (tr->slot_state == UFSHCI_SLOT_STATE_SCHEDULED) {
226 			/*
227 			 * Do not remove the tracker. The abort_tracker path
228 			 * will do that for us.
229 			 */
230 			mtx_unlock(&hwq->qlock);
231 			ufshci_req_queue_manual_complete_tracker(tr,
232 			    UFSHCI_DESC_ABORTED,
233 			    UFSHCI_RESPONSE_CODE_GENERAL_FAILURE);
234 			mtx_lock(&hwq->qlock);
235 		}
236 	}
237 
238 	mtx_unlock(&hwq->qlock);
239 }
240 
241 void
ufshci_req_queue_complete_tracker(struct ufshci_tracker * tr)242 ufshci_req_queue_complete_tracker(struct ufshci_tracker *tr)
243 {
244 	struct ufshci_req_queue *req_queue = tr->req_queue;
245 	struct ufshci_hw_queue *hwq = tr->hwq;
246 	struct ufshci_request *req = tr->req;
247 	struct ufshci_completion cpl;
248 	uint8_t ocs;
249 	bool retry, error, retriable;
250 
251 	mtx_assert(&hwq->qlock, MA_NOTOWNED);
252 
253 	/* Copy the response from the Request Descriptor or UTP Command
254 	 * Descriptor. */
255 	cpl.size = tr->response_size;
256 	if (req_queue->is_task_mgmt) {
257 		memcpy(&cpl.response_upiu,
258 		    (void *)hwq->utmrd[tr->slot_num].response_upiu, cpl.size);
259 
260 		ocs = hwq->utmrd[tr->slot_num].overall_command_status;
261 	} else {
262 		bus_dmamap_sync(req_queue->dma_tag_ucd, req_queue->ucdmem_map,
263 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
264 
265 		memcpy(&cpl.response_upiu, (void *)tr->ucd->response_upiu,
266 		    cpl.size);
267 
268 		ocs = hwq->utrd[tr->slot_num].overall_command_status;
269 	}
270 
271 	error = ufshci_req_queue_response_is_error(req_queue, ocs,
272 	    &cpl.response_upiu);
273 
274 	/* TODO: Implement retry */
275 	// retriable = ufshci_completion_is_retry(cpl);
276 	retriable = false;
277 	retry = error && retriable &&
278 	    req->retries < req_queue->ctrlr->retry_count;
279 	if (retry)
280 		hwq->num_retries++;
281 	if (error && req->retries >= req_queue->ctrlr->retry_count && retriable)
282 		hwq->num_failures++;
283 
284 	KASSERT(tr->req, ("there is no request assigned to the tracker\n"));
285 	KASSERT(cpl.response_upiu.header.task_tag ==
286 		req->request_upiu.header.task_tag,
287 	    ("response task_tag does not match request task_tag\n"));
288 
289 	if (!retry) {
290 		if (req->payload_valid) {
291 			bus_dmamap_sync(req_queue->dma_tag_payload,
292 			    tr->payload_dma_map,
293 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
294 		}
295 		/* Copy response from the command descriptor */
296 		if (req->cb_fn)
297 			req->cb_fn(req->cb_arg, &cpl, error);
298 	}
299 
300 	mtx_lock(&hwq->qlock);
301 
302 	/* Clear the UTRL Completion Notification register */
303 	req_queue->qops.clear_cpl_ntf(req_queue->ctrlr, tr);
304 
305 	if (retry) {
306 		req->retries++;
307 		ufshci_req_queue_submit_tracker(req_queue, tr,
308 		    req->data_direction);
309 	} else {
310 		if (req->payload_valid) {
311 			bus_dmamap_unload(req_queue->dma_tag_payload,
312 			    tr->payload_dma_map);
313 		}
314 
315 		/* Clear tracker */
316 		ufshci_free_request(req);
317 		tr->req = NULL;
318 		tr->slot_state = UFSHCI_SLOT_STATE_FREE;
319 
320 		TAILQ_REMOVE(&hwq->outstanding_tr, tr, tailq);
321 		TAILQ_INSERT_HEAD(&hwq->free_tr, tr, tailq);
322 	}
323 
324 	mtx_unlock(&tr->hwq->qlock);
325 }
326 
327 bool
ufshci_req_queue_process_completions(struct ufshci_req_queue * req_queue)328 ufshci_req_queue_process_completions(struct ufshci_req_queue *req_queue)
329 {
330 	struct ufshci_hw_queue *hwq;
331 	bool done;
332 
333 	hwq = req_queue->qops.get_hw_queue(req_queue);
334 
335 	mtx_lock(&hwq->recovery_lock);
336 	done = req_queue->qops.process_cpl(req_queue);
337 	mtx_unlock(&hwq->recovery_lock);
338 
339 	return (done);
340 }
341 
342 static void
ufshci_payload_map(void * arg,bus_dma_segment_t * seg,int nseg,int error)343 ufshci_payload_map(void *arg, bus_dma_segment_t *seg, int nseg, int error)
344 {
345 	struct ufshci_tracker *tr = arg;
346 	struct ufshci_prdt_entry *prdt_entry;
347 	int i;
348 
349 	/*
350 	 * If the mapping operation failed, return immediately. The caller
351 	 * is responsible for detecting the error status and failing the
352 	 * tracker manually.
353 	 */
354 	if (error != 0) {
355 		ufshci_printf(tr->req_queue->ctrlr,
356 		    "Failed to map payload %d\n", error);
357 		return;
358 	}
359 
360 	prdt_entry = (struct ufshci_prdt_entry *)tr->ucd->prd_table;
361 
362 	tr->prdt_entry_cnt = nseg;
363 
364 	for (i = 0; i < nseg; i++) {
365 		prdt_entry->data_base_address = htole64(seg[i].ds_addr) &
366 		    0xffffffff;
367 		prdt_entry->data_base_address_upper = htole64(seg[i].ds_addr) >>
368 		    32;
369 		prdt_entry->data_byte_count = htole32(seg[i].ds_len - 1);
370 
371 		++prdt_entry;
372 	}
373 
374 	bus_dmamap_sync(tr->req_queue->dma_tag_payload, tr->payload_dma_map,
375 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
376 }
377 
378 static void
ufshci_req_queue_prepare_prdt(struct ufshci_tracker * tr)379 ufshci_req_queue_prepare_prdt(struct ufshci_tracker *tr)
380 {
381 	struct ufshci_request *req = tr->req;
382 	struct ufshci_utp_cmd_desc *cmd_desc = tr->ucd;
383 	int error;
384 
385 	tr->prdt_off = UFSHCI_UTP_XFER_REQ_SIZE + UFSHCI_UTP_XFER_RESP_SIZE;
386 
387 	memset(cmd_desc->prd_table, 0, sizeof(cmd_desc->prd_table));
388 
389 	/* Filling PRDT enrties with payload */
390 	error = bus_dmamap_load_mem(tr->req_queue->dma_tag_payload,
391 	    tr->payload_dma_map, &req->payload, ufshci_payload_map, tr,
392 	    BUS_DMA_NOWAIT);
393 	if (error != 0) {
394 		/*
395 		 * The dmamap operation failed, so we manually fail the
396 		 *  tracker here with UFSHCI_DESC_INVALID_PRDT_ATTRIBUTES.
397 		 *
398 		 * ufshci_req_queue_manual_complete_tracker must not be called
399 		 *  with the req_queue lock held.
400 		 */
401 		ufshci_printf(tr->req_queue->ctrlr,
402 		    "bus_dmamap_load_mem returned with error:0x%x!\n", error);
403 
404 		mtx_unlock(&tr->hwq->qlock);
405 		ufshci_req_queue_manual_complete_tracker(tr,
406 		    UFSHCI_DESC_INVALID_PRDT_ATTRIBUTES,
407 		    UFSHCI_RESPONSE_CODE_GENERAL_FAILURE);
408 		mtx_lock(&tr->hwq->qlock);
409 	}
410 }
411 
412 static void
ufshci_req_queue_fill_utmr_descriptor(struct ufshci_utp_task_mgmt_req_desc * desc,struct ufshci_request * req)413 ufshci_req_queue_fill_utmr_descriptor(
414     struct ufshci_utp_task_mgmt_req_desc *desc, struct ufshci_request *req)
415 {
416 	memset(desc, 0, sizeof(struct ufshci_utp_task_mgmt_req_desc));
417 	desc->interrupt = true;
418 	/* Set the initial value to Invalid. */
419 	desc->overall_command_status = UFSHCI_UTMR_OCS_INVALID;
420 
421 	memcpy(desc->request_upiu, &req->request_upiu, req->request_size);
422 }
423 
424 static void
ufshci_req_queue_fill_utr_descriptor(struct ufshci_utp_xfer_req_desc * desc,uint8_t data_direction,const uint64_t paddr,const uint16_t response_off,const uint16_t response_len,const uint16_t prdt_off,const uint16_t prdt_entry_cnt)425 ufshci_req_queue_fill_utr_descriptor(struct ufshci_utp_xfer_req_desc *desc,
426     uint8_t data_direction, const uint64_t paddr, const uint16_t response_off,
427     const uint16_t response_len, const uint16_t prdt_off,
428     const uint16_t prdt_entry_cnt)
429 {
430 	uint8_t command_type;
431 	/* Value to convert bytes to dwords */
432 	const uint16_t dword_size = 4;
433 
434 	/*
435 	 * Set command type to UFS storage.
436 	 * The UFS 4.1 spec only defines 'UFS Storage' as a command type.
437 	 */
438 	command_type = UFSHCI_COMMAND_TYPE_UFS_STORAGE;
439 
440 	memset(desc, 0, sizeof(struct ufshci_utp_xfer_req_desc));
441 	desc->command_type = command_type;
442 	desc->data_direction = data_direction;
443 	desc->interrupt = true;
444 	/* Set the initial value to Invalid. */
445 	desc->overall_command_status = UFSHCI_UTR_OCS_INVALID;
446 	desc->utp_command_descriptor_base_address = (uint32_t)(paddr &
447 	    0xffffffff);
448 	desc->utp_command_descriptor_base_address_upper = (uint32_t)(paddr >>
449 	    32);
450 
451 	desc->response_upiu_offset = response_off / dword_size;
452 	desc->response_upiu_length = response_len / dword_size;
453 	desc->prdt_offset = prdt_off / dword_size;
454 	desc->prdt_length = prdt_entry_cnt;
455 }
456 
457 static void
ufshci_req_queue_timeout_recovery(struct ufshci_controller * ctrlr,struct ufshci_hw_queue * hwq)458 ufshci_req_queue_timeout_recovery(struct ufshci_controller *ctrlr,
459     struct ufshci_hw_queue *hwq)
460 {
461 	/* TODO: Step 2. Logical unit reset */
462 	/* TODO: Step 3. Target device reset */
463 	/* TODO: Step 4. Bus reset */
464 
465 	/*
466 	 * Step 5. All previous commands were timeout.
467 	 * Recovery failed, reset the host controller.
468 	 */
469 	ufshci_printf(ctrlr,
470 	    "Recovery step 5: Resetting controller due to a timeout.\n");
471 	hwq->recovery_state = RECOVERY_WAITING;
472 
473 	ufshci_ctrlr_reset(ctrlr);
474 }
475 
476 static void
ufshci_abort_complete(void * arg,const struct ufshci_completion * status,bool error)477 ufshci_abort_complete(void *arg, const struct ufshci_completion *status,
478     bool error)
479 {
480 	struct ufshci_tracker *tr = arg;
481 
482 	/*
483 	 * We still need to check the active tracker array, to cover race where
484 	 * I/O timed out at same time controller was completing the I/O. An
485 	 * abort request always is on the Task Management Request queue, but
486 	 * affects either an Task Management Request or an I/O (UTRL) queue, so
487 	 * take the appropriate queue lock for the original command's queue,
488 	 * since we'll need it to avoid races with the completion code and to
489 	 * complete the command manually.
490 	 */
491 	mtx_lock(&tr->hwq->qlock);
492 	if (tr->slot_state != UFSHCI_SLOT_STATE_FREE) {
493 		mtx_unlock(&tr->hwq->qlock);
494 		/*
495 		 * An I/O has timed out, and the controller was unable to abort
496 		 * it for some reason.  And we've not processed a completion for
497 		 * it yet. Construct a fake completion status, and then complete
498 		 * the I/O's tracker manually.
499 		 */
500 		ufshci_printf(tr->hwq->ctrlr,
501 		    "abort task request failed, aborting task manually\n");
502 		ufshci_req_queue_manual_complete_tracker(tr,
503 		    UFSHCI_DESC_ABORTED, UFSHCI_RESPONSE_CODE_GENERAL_FAILURE);
504 
505 		if ((status->response_upiu.task_mgmt_response_upiu
506 			    .output_param1 ==
507 			UFSHCI_TASK_MGMT_SERVICE_RESPONSE_FUNCTION_COMPLETE) ||
508 		    (status->response_upiu.task_mgmt_response_upiu
509 			    .output_param1 ==
510 			UFSHCI_TASK_MGMT_SERVICE_RESPONSE_FUNCTION_SUCCEEDED)) {
511 			ufshci_printf(tr->hwq->ctrlr,
512 			    "Warning: the abort task request completed \
513 			    successfully, but the original task is still incomplete.");
514 			return;
515 		}
516 
517 		/* Abort Task failed. Perform recovery steps 2-5 */
518 		ufshci_req_queue_timeout_recovery(tr->hwq->ctrlr, tr->hwq);
519 	} else {
520 		mtx_unlock(&tr->hwq->qlock);
521 	}
522 }
523 
524 static void
ufshci_req_queue_timeout(void * arg)525 ufshci_req_queue_timeout(void *arg)
526 {
527 	struct ufshci_hw_queue *hwq = arg;
528 	struct ufshci_controller *ctrlr = hwq->ctrlr;
529 	struct ufshci_tracker *tr;
530 	sbintime_t now;
531 	bool idle = true;
532 	bool fast;
533 
534 	mtx_assert(&hwq->recovery_lock, MA_OWNED);
535 
536 	/*
537 	 * If the controller is failed, then stop polling. This ensures that any
538 	 * failure processing that races with the hwq timeout will fail safely.
539 	 */
540 	if (ctrlr->is_failed) {
541 		ufshci_printf(ctrlr,
542 		    "Failed controller, stopping watchdog timeout.\n");
543 		hwq->timer_armed = false;
544 		return;
545 	}
546 
547 	/*
548 	 * Shutdown condition: We set hwq->timer_armed to false in
549 	 * ufshci_req_sdb_destroy before calling callout_drain. When we call
550 	 * that, this routine might get called one last time. Exit w/o setting a
551 	 * timeout. None of the watchdog stuff needs to be done since we're
552 	 * destroying the hwq.
553 	 */
554 	if (!hwq->timer_armed) {
555 		ufshci_printf(ctrlr,
556 		    "Timeout fired during ufshci_utr_req_queue_destroy\n");
557 		return;
558 	}
559 
560 	switch (hwq->recovery_state) {
561 	case RECOVERY_NONE:
562 		/*
563 		 * See if there's any recovery needed. First, do a fast check to
564 		 * see if anything could have timed out. If not, then skip
565 		 * everything else.
566 		 */
567 		fast = false;
568 		mtx_lock(&hwq->qlock);
569 		now = getsbinuptime();
570 		TAILQ_FOREACH(tr, &hwq->outstanding_tr, tailq) {
571 			/*
572 			 * If the first real transaction is not in timeout, then
573 			 * we're done. Otherwise, we try recovery.
574 			 */
575 			idle = false;
576 			if (now <= tr->deadline)
577 				fast = true;
578 			break;
579 		}
580 		mtx_unlock(&hwq->qlock);
581 		if (idle || fast)
582 			break;
583 
584 		/*
585 		 * There's a stale transaction at the start of the queue whose
586 		 * deadline has passed. Poll the competions as a last-ditch
587 		 * effort in case an interrupt has been missed.
588 		 */
589 		hwq->req_queue->qops.process_cpl(hwq->req_queue);
590 
591 		/*
592 		 * Now that we've run the ISR, re-rheck to see if there's any
593 		 * timed out commands and abort them or reset the card if so.
594 		 */
595 		mtx_lock(&hwq->qlock);
596 		idle = true;
597 		TAILQ_FOREACH(tr, &hwq->outstanding_tr, tailq) {
598 			/*
599 			 * If we know this tracker hasn't timed out, we also
600 			 * know all subsequent ones haven't timed out. The tr
601 			 * queue is in submission order and all normal commands
602 			 * in a queue have the same timeout (or the timeout was
603 			 * changed by the user, but we eventually timeout then).
604 			 */
605 			idle = false;
606 			if (now <= tr->deadline)
607 				break;
608 
609 			/*
610 			 * Timeout recovery is performed in five steps. If
611 			 * recovery fails at any step, the process continues to
612 			 * the next one:
613 			 * next steps:
614 			 * Step 1. Abort task
615 			 * Step 2. Logical unit reset 	(TODO)
616 			 * Step 3. Target device reset 	(TODO)
617 			 * Step 4. Bus reset 		(TODO)
618 			 * Step 5. Host controller reset
619 			 *
620 			 * If the timeout occurred in the Task Management
621 			 * Request queue, ignore Step 1.
622 			 */
623 			if (ctrlr->enable_aborts &&
624 			    !hwq->req_queue->is_task_mgmt &&
625 			    tr->req->cb_fn != ufshci_abort_complete) {
626 				/*
627 				 * Step 1. Timeout expired, abort the task.
628 				 *
629 				 * This isn't an abort command, ask for a
630 				 * hardware abort. This goes to the Task
631 				 * Management Request queue which will reset the
632 				 * task if it times out.
633 				 */
634 				ufshci_printf(ctrlr,
635 				    "Recovery step 1: Timeout occurred. aborting the task(%d).\n",
636 				    tr->req->request_upiu.header.task_tag);
637 				ufshci_ctrlr_cmd_send_task_mgmt_request(ctrlr,
638 				    ufshci_abort_complete, tr,
639 				    UFSHCI_TASK_MGMT_FUNCTION_ABORT_TASK,
640 				    tr->req->request_upiu.header.lun,
641 				    tr->req->request_upiu.header.task_tag, 0);
642 			} else {
643 				/* Recovery Step 2-5 */
644 				ufshci_req_queue_timeout_recovery(ctrlr, hwq);
645 				idle = false;
646 				break;
647 			}
648 		}
649 		mtx_unlock(&hwq->qlock);
650 		break;
651 
652 	case RECOVERY_WAITING:
653 		/*
654 		 * These messages aren't interesting while we're suspended. We
655 		 * put the queues into waiting state while suspending.
656 		 * Suspending takes a while, so we'll see these during that time
657 		 * and they aren't diagnostic. At other times, they indicate a
658 		 * problem that's worth complaining about.
659 		 */
660 		if (!device_is_suspended(ctrlr->dev))
661 			ufshci_printf(ctrlr, "Waiting for reset to complete\n");
662 		idle = false; /* We want to keep polling */
663 		break;
664 	}
665 
666 	/*
667 	 * Rearm the timeout.
668 	 */
669 	if (!idle) {
670 		callout_schedule_sbt(&hwq->timer, SBT_1S / 2, SBT_1S / 2, 0);
671 	} else {
672 		hwq->timer_armed = false;
673 	}
674 }
675 
676 /*
677  * Submit the tracker to the hardware.
678  */
679 static void
ufshci_req_queue_submit_tracker(struct ufshci_req_queue * req_queue,struct ufshci_tracker * tr,enum ufshci_data_direction data_direction)680 ufshci_req_queue_submit_tracker(struct ufshci_req_queue *req_queue,
681     struct ufshci_tracker *tr, enum ufshci_data_direction data_direction)
682 {
683 	struct ufshci_controller *ctrlr = req_queue->ctrlr;
684 	struct ufshci_request *req = tr->req;
685 	struct ufshci_hw_queue *hwq;
686 	uint64_t ucd_paddr;
687 	uint16_t request_len, response_off, response_len;
688 	uint8_t slot_num = tr->slot_num;
689 	int timeout;
690 
691 	hwq = req_queue->qops.get_hw_queue(req_queue);
692 
693 	mtx_assert(&hwq->qlock, MA_OWNED);
694 
695 	if (req->cb_fn == ufshci_completion_poll_cb)
696 		timeout = 1;
697 	else
698 		timeout = ctrlr->timeout_period;
699 	tr->deadline = getsbinuptime() + timeout * SBT_1S;
700 	if (!hwq->timer_armed) {
701 		hwq->timer_armed = true;
702 		/*
703 		 * It wakes up once every 0.5 seconds to check if the deadline
704 		 * has passed.
705 		 */
706 		callout_reset_sbt_on(&hwq->timer, SBT_1S / 2, SBT_1S / 2,
707 		    ufshci_req_queue_timeout, hwq, hwq->cpu, 0);
708 	}
709 
710 	if (req_queue->is_task_mgmt) {
711 		/* Prepare UTP Task Management Request Descriptor. */
712 		ufshci_req_queue_fill_utmr_descriptor(&tr->hwq->utmrd[slot_num],
713 		    req);
714 	} else {
715 		request_len = req->request_size;
716 		response_off = UFSHCI_UTP_XFER_REQ_SIZE;
717 		response_len = req->response_size;
718 
719 		/* Prepare UTP Command Descriptor */
720 		memcpy(tr->ucd, &req->request_upiu, request_len);
721 		memset((uint8_t *)tr->ucd + response_off, 0, response_len);
722 
723 		/* Prepare PRDT */
724 		if (req->payload_valid)
725 			ufshci_req_queue_prepare_prdt(tr);
726 
727 		/* Prepare UTP Transfer Request Descriptor. */
728 		ucd_paddr = tr->ucd_bus_addr;
729 		ufshci_req_queue_fill_utr_descriptor(&tr->hwq->utrd[slot_num],
730 		    data_direction, ucd_paddr, response_off, response_len,
731 		    tr->prdt_off, tr->prdt_entry_cnt);
732 
733 		bus_dmamap_sync(req_queue->dma_tag_ucd, req_queue->ucdmem_map,
734 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
735 	}
736 
737 	bus_dmamap_sync(tr->hwq->dma_tag_queue, tr->hwq->queuemem_map,
738 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
739 
740 	tr->slot_state = UFSHCI_SLOT_STATE_SCHEDULED;
741 
742 	/* Ring the doorbell */
743 	req_queue->qops.ring_doorbell(ctrlr, tr);
744 }
745 
746 static int
_ufshci_req_queue_submit_request(struct ufshci_req_queue * req_queue,struct ufshci_request * req)747 _ufshci_req_queue_submit_request(struct ufshci_req_queue *req_queue,
748     struct ufshci_request *req)
749 {
750 	struct ufshci_tracker *tr = NULL;
751 	int error;
752 
753 	mtx_assert(&req_queue->qops.get_hw_queue(req_queue)->qlock, MA_OWNED);
754 
755 	error = req_queue->qops.reserve_slot(req_queue, &tr);
756 	if (error != 0) {
757 		ufshci_printf(req_queue->ctrlr, "Failed to get tracker");
758 		return (error);
759 	}
760 	KASSERT(tr, ("There is no tracker allocated."));
761 
762 	if (tr->slot_state == UFSHCI_SLOT_STATE_RESERVED ||
763 	    tr->slot_state == UFSHCI_SLOT_STATE_SCHEDULED)
764 		return (EBUSY);
765 
766 	/* Set the task_tag value to slot_num for traceability. */
767 	req->request_upiu.header.task_tag = tr->slot_num;
768 
769 	tr->slot_state = UFSHCI_SLOT_STATE_RESERVED;
770 	tr->response_size = req->response_size;
771 	tr->deadline = SBT_MAX;
772 	tr->req = req;
773 
774 	TAILQ_REMOVE(&tr->hwq->free_tr, tr, tailq);
775 	TAILQ_INSERT_TAIL(&tr->hwq->outstanding_tr, tr, tailq);
776 
777 	ufshci_req_queue_submit_tracker(req_queue, tr, req->data_direction);
778 
779 	return (0);
780 }
781 
782 int
ufshci_req_queue_submit_request(struct ufshci_req_queue * req_queue,struct ufshci_request * req,bool is_admin)783 ufshci_req_queue_submit_request(struct ufshci_req_queue *req_queue,
784     struct ufshci_request *req, bool is_admin)
785 {
786 	struct ufshci_hw_queue *hwq;
787 	uint32_t error;
788 
789 	/* TODO: MCQs should use a separate Admin queue. */
790 
791 	hwq = req_queue->qops.get_hw_queue(req_queue);
792 	KASSERT(hwq, ("There is no HW queue allocated."));
793 
794 	mtx_lock(&hwq->qlock);
795 	error = _ufshci_req_queue_submit_request(req_queue, req);
796 	mtx_unlock(&hwq->qlock);
797 
798 	return (error);
799 }
800