xref: /freebsd/sys/dev/ufshci/ufshci_req_queue.c (revision 1349a733cf2828e0040cabef89eeadc3ff00c40b)
1 /*-
2  * Copyright (c) 2025, Samsung Electronics Co., Ltd.
3  * Written by Jaeyoon Choi
4  *
5  * SPDX-License-Identifier: BSD-2-Clause
6  */
7 
8 #include <sys/param.h>
9 #include <sys/bus.h>
10 #include <sys/conf.h>
11 #include <sys/domainset.h>
12 #include <sys/module.h>
13 
14 #include <cam/scsi/scsi_all.h>
15 
16 #include "sys/kassert.h"
17 #include "ufshci_private.h"
18 
19 static void ufshci_req_queue_submit_tracker(struct ufshci_req_queue *req_queue,
20     struct ufshci_tracker *tr, enum ufshci_data_direction data_direction);
21 
22 static const struct ufshci_qops sdb_qops = {
23 	.construct = ufshci_req_sdb_construct,
24 	.destroy = ufshci_req_sdb_destroy,
25 	.get_hw_queue = ufshci_req_sdb_get_hw_queue,
26 	.enable = ufshci_req_sdb_enable,
27 	.reserve_slot = ufshci_req_sdb_reserve_slot,
28 	.reserve_admin_slot = ufshci_req_sdb_reserve_slot,
29 	.ring_doorbell = ufshci_req_sdb_ring_doorbell,
30 	.clear_cpl_ntf = ufshci_req_sdb_clear_cpl_ntf,
31 	.process_cpl = ufshci_req_sdb_process_cpl,
32 	.get_inflight_io = ufshci_req_sdb_get_inflight_io,
33 };
34 
35 int
ufshci_utm_req_queue_construct(struct ufshci_controller * ctrlr)36 ufshci_utm_req_queue_construct(struct ufshci_controller *ctrlr)
37 {
38 	struct ufshci_req_queue *req_queue;
39 	int error;
40 
41 	/*
42 	 * UTP Task Management Request only supports Legacy Single Doorbell
43 	 * Queue.
44 	 */
45 	req_queue = &ctrlr->task_mgmt_req_queue;
46 	req_queue->queue_mode = UFSHCI_Q_MODE_SDB;
47 	req_queue->qops = sdb_qops;
48 
49 	error = req_queue->qops.construct(ctrlr, req_queue, UFSHCI_UTRM_ENTRIES,
50 	    /*is_task_mgmt*/ true);
51 
52 	return (error);
53 }
54 
55 void
ufshci_utm_req_queue_destroy(struct ufshci_controller * ctrlr)56 ufshci_utm_req_queue_destroy(struct ufshci_controller *ctrlr)
57 {
58 	ctrlr->task_mgmt_req_queue.qops.destroy(ctrlr,
59 	    &ctrlr->task_mgmt_req_queue);
60 }
61 
62 int
ufshci_utm_req_queue_enable(struct ufshci_controller * ctrlr)63 ufshci_utm_req_queue_enable(struct ufshci_controller *ctrlr)
64 {
65 	return (ctrlr->task_mgmt_req_queue.qops.enable(ctrlr,
66 	    &ctrlr->task_mgmt_req_queue));
67 }
68 
69 int
ufshci_ut_req_queue_construct(struct ufshci_controller * ctrlr)70 ufshci_ut_req_queue_construct(struct ufshci_controller *ctrlr)
71 {
72 	struct ufshci_req_queue *req_queue;
73 	int error;
74 
75 	/*
76 	 * Currently, it does not support MCQ mode, so it should be set to SDB
77 	 * mode by default.
78 	 * TODO: Determine queue mode by checking Capability Registers
79 	 */
80 	req_queue = &ctrlr->transfer_req_queue;
81 	req_queue->queue_mode = UFSHCI_Q_MODE_SDB;
82 	req_queue->qops = sdb_qops;
83 
84 	error = req_queue->qops.construct(ctrlr, req_queue, UFSHCI_UTR_ENTRIES,
85 	    /*is_task_mgmt*/ false);
86 
87 	return (error);
88 }
89 
90 void
ufshci_ut_req_queue_destroy(struct ufshci_controller * ctrlr)91 ufshci_ut_req_queue_destroy(struct ufshci_controller *ctrlr)
92 {
93 	ctrlr->transfer_req_queue.qops.destroy(ctrlr,
94 	    &ctrlr->transfer_req_queue);
95 }
96 
97 int
ufshci_ut_req_queue_enable(struct ufshci_controller * ctrlr)98 ufshci_ut_req_queue_enable(struct ufshci_controller *ctrlr)
99 {
100 	return (ctrlr->transfer_req_queue.qops.enable(ctrlr,
101 	    &ctrlr->transfer_req_queue));
102 }
103 
104 static bool
ufshci_req_queue_response_is_error(struct ufshci_req_queue * req_queue,uint8_t ocs,union ufshci_reponse_upiu * response)105 ufshci_req_queue_response_is_error(struct ufshci_req_queue *req_queue,
106     uint8_t ocs, union ufshci_reponse_upiu *response)
107 {
108 	bool is_error = false;
109 
110 	/* Check request descriptor */
111 	if (ocs != UFSHCI_DESC_SUCCESS) {
112 		ufshci_printf(req_queue->ctrlr, "Invalid OCS = 0x%x\n", ocs);
113 		is_error = true;
114 	}
115 
116 	/* Check response UPIU header */
117 	if (response->header.response != UFSHCI_RESPONSE_CODE_TARGET_SUCCESS) {
118 		ufshci_printf(req_queue->ctrlr,
119 		    "Invalid response code = 0x%x\n",
120 		    response->header.response);
121 		is_error = true;
122 	}
123 
124 	return (is_error);
125 }
126 
127 static void
ufshci_req_queue_manual_complete_tracker(struct ufshci_tracker * tr,uint8_t ocs,uint8_t rc)128 ufshci_req_queue_manual_complete_tracker(struct ufshci_tracker *tr, uint8_t ocs,
129     uint8_t rc)
130 {
131 	struct ufshci_utp_xfer_req_desc *desc;
132 	struct ufshci_upiu_header *resp_header;
133 
134 	mtx_assert(&tr->hwq->qlock, MA_NOTOWNED);
135 
136 	resp_header = (struct ufshci_upiu_header *)tr->ucd->response_upiu;
137 	resp_header->response = rc;
138 
139 	desc = &tr->hwq->utrd[tr->slot_num];
140 	desc->overall_command_status = ocs;
141 
142 	ufshci_req_queue_complete_tracker(tr);
143 }
144 
145 static void
ufshci_req_queue_manual_complete_request(struct ufshci_req_queue * req_queue,struct ufshci_request * req,uint8_t ocs,uint8_t rc)146 ufshci_req_queue_manual_complete_request(struct ufshci_req_queue *req_queue,
147     struct ufshci_request *req, uint8_t ocs, uint8_t rc)
148 {
149 	struct ufshci_completion cpl;
150 	bool error;
151 
152 	memset(&cpl, 0, sizeof(cpl));
153 	cpl.response_upiu.header.response = rc;
154 	error = ufshci_req_queue_response_is_error(req_queue, ocs,
155 	    &cpl.response_upiu);
156 
157 	if (error) {
158 		ufshci_printf(req_queue->ctrlr,
159 		    "Manual complete request error:0x%x", error);
160 	}
161 
162 	if (req->cb_fn)
163 		req->cb_fn(req->cb_arg, &cpl, error);
164 
165 	ufshci_free_request(req);
166 }
167 
168 void
ufshci_req_queue_fail(struct ufshci_controller * ctrlr,struct ufshci_hw_queue * hwq)169 ufshci_req_queue_fail(struct ufshci_controller *ctrlr,
170     struct ufshci_hw_queue *hwq)
171 {
172 	struct ufshci_req_queue *req_queue;
173 	struct ufshci_tracker *tr;
174 	struct ufshci_request *req;
175 	int i;
176 
177 	if (!mtx_initialized(&hwq->qlock))
178 		return;
179 
180 	mtx_lock(&hwq->qlock);
181 
182 	req_queue = &ctrlr->transfer_req_queue;
183 
184 	for (i = 0; i < req_queue->num_entries; i++) {
185 		tr = hwq->act_tr[i];
186 		req = tr->req;
187 
188 		if (tr->slot_state == UFSHCI_SLOT_STATE_RESERVED) {
189 			mtx_unlock(&hwq->qlock);
190 			ufshci_req_queue_manual_complete_request(req_queue, req,
191 			    UFSHCI_DESC_ABORTED,
192 			    UFSHCI_RESPONSE_CODE_GENERAL_FAILURE);
193 			mtx_lock(&hwq->qlock);
194 		} else if (tr->slot_state == UFSHCI_SLOT_STATE_SCHEDULED) {
195 			/*
196 			 * Do not remove the tracker. The abort_tracker path
197 			 * will do that for us.
198 			 */
199 			mtx_unlock(&hwq->qlock);
200 			ufshci_req_queue_manual_complete_tracker(tr,
201 			    UFSHCI_DESC_ABORTED,
202 			    UFSHCI_RESPONSE_CODE_GENERAL_FAILURE);
203 			mtx_lock(&hwq->qlock);
204 		}
205 	}
206 
207 	mtx_unlock(&hwq->qlock);
208 }
209 
210 void
ufshci_req_queue_complete_tracker(struct ufshci_tracker * tr)211 ufshci_req_queue_complete_tracker(struct ufshci_tracker *tr)
212 {
213 	struct ufshci_req_queue *req_queue = tr->req_queue;
214 	struct ufshci_request *req = tr->req;
215 	struct ufshci_completion cpl;
216 	struct ufshci_utp_xfer_req_desc *desc;
217 	uint8_t ocs;
218 	bool retry, error, retriable;
219 
220 	mtx_assert(&tr->hwq->qlock, MA_NOTOWNED);
221 
222 	bus_dmamap_sync(req_queue->dma_tag_ucd, req_queue->ucdmem_map,
223 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
224 
225 	cpl.size = tr->response_size;
226 	memcpy(&cpl.response_upiu, (void *)tr->ucd->response_upiu, cpl.size);
227 
228 	desc = &tr->hwq->utrd[tr->slot_num];
229 	ocs = desc->overall_command_status;
230 
231 	error = ufshci_req_queue_response_is_error(req_queue, ocs,
232 	    &cpl.response_upiu);
233 
234 	/* TODO: Implement retry */
235 	// retriable = ufshci_completion_is_retry(cpl);
236 	retriable = false;
237 	retry = error && retriable &&
238 	    req->retries < req_queue->ctrlr->retry_count;
239 	if (retry)
240 		tr->hwq->num_retries++;
241 	if (error && req->retries >= req_queue->ctrlr->retry_count && retriable)
242 		tr->hwq->num_failures++;
243 
244 	KASSERT(tr->req, ("there is no request assigned to the tracker\n"));
245 	KASSERT(cpl.response_upiu.header.task_tag ==
246 		req->request_upiu.header.task_tag,
247 	    ("response task_tag does not match request task_tag\n"));
248 
249 	if (!retry) {
250 		if (req->payload_valid) {
251 			bus_dmamap_sync(req_queue->dma_tag_payload,
252 			    tr->payload_dma_map,
253 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
254 		}
255 		/* Copy response from the command descriptor */
256 		if (req->cb_fn)
257 			req->cb_fn(req->cb_arg, &cpl, error);
258 	}
259 
260 	mtx_lock(&tr->hwq->qlock);
261 
262 	/* Clear the UTRL Completion Notification register */
263 	req_queue->qops.clear_cpl_ntf(req_queue->ctrlr, tr);
264 
265 	if (retry) {
266 		req->retries++;
267 		ufshci_req_queue_submit_tracker(req_queue, tr,
268 		    req->data_direction);
269 	} else {
270 		if (req->payload_valid) {
271 			bus_dmamap_unload(req_queue->dma_tag_payload,
272 			    tr->payload_dma_map);
273 		}
274 
275 		/* Clear tracker */
276 		ufshci_free_request(req);
277 		tr->req = NULL;
278 		tr->slot_state = UFSHCI_SLOT_STATE_FREE;
279 	}
280 
281 	mtx_unlock(&tr->hwq->qlock);
282 }
283 
284 bool
ufshci_req_queue_process_completions(struct ufshci_req_queue * req_queue)285 ufshci_req_queue_process_completions(struct ufshci_req_queue *req_queue)
286 {
287 	return (req_queue->qops.process_cpl(req_queue));
288 }
289 
290 static void
ufshci_payload_map(void * arg,bus_dma_segment_t * seg,int nseg,int error)291 ufshci_payload_map(void *arg, bus_dma_segment_t *seg, int nseg, int error)
292 {
293 	struct ufshci_tracker *tr = arg;
294 	struct ufshci_prdt_entry *prdt_entry;
295 	int i;
296 
297 	/*
298 	 * If the mapping operation failed, return immediately. The caller
299 	 * is responsible for detecting the error status and failing the
300 	 * tracker manually.
301 	 */
302 	if (error != 0) {
303 		ufshci_printf(tr->req_queue->ctrlr,
304 		    "Failed to map payload %d\n", error);
305 		return;
306 	}
307 
308 	prdt_entry = (struct ufshci_prdt_entry *)tr->ucd->prd_table;
309 
310 	tr->prdt_entry_cnt = nseg;
311 
312 	for (i = 0; i < nseg; i++) {
313 		prdt_entry->data_base_address = htole64(seg[i].ds_addr) &
314 		    0xffffffff;
315 		prdt_entry->data_base_address_upper = htole64(seg[i].ds_addr) >>
316 		    32;
317 		prdt_entry->data_byte_count = htole32(seg[i].ds_len - 1);
318 
319 		++prdt_entry;
320 	}
321 
322 	bus_dmamap_sync(tr->req_queue->dma_tag_payload, tr->payload_dma_map,
323 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
324 }
325 
326 static void
ufshci_req_queue_prepare_prdt(struct ufshci_tracker * tr)327 ufshci_req_queue_prepare_prdt(struct ufshci_tracker *tr)
328 {
329 	struct ufshci_request *req = tr->req;
330 	struct ufshci_utp_cmd_desc *cmd_desc = tr->ucd;
331 	int error;
332 
333 	tr->prdt_off = UFSHCI_UTP_XFER_REQ_SIZE + UFSHCI_UTP_XFER_RESP_SIZE;
334 
335 	memset(cmd_desc->prd_table, 0, sizeof(cmd_desc->prd_table));
336 
337 	/* Filling PRDT enrties with payload */
338 	error = bus_dmamap_load_mem(tr->req_queue->dma_tag_payload,
339 	    tr->payload_dma_map, &req->payload, ufshci_payload_map, tr,
340 	    BUS_DMA_NOWAIT);
341 	if (error != 0) {
342 		/*
343 		 * The dmamap operation failed, so we manually fail the
344 		 *  tracker here with UFSHCI_DESC_INVALID_PRDT_ATTRIBUTES.
345 		 *
346 		 * ufshci_req_queue_manual_complete_tracker must not be called
347 		 *  with the req_queue lock held.
348 		 */
349 		ufshci_printf(tr->req_queue->ctrlr,
350 		    "bus_dmamap_load_mem returned with error:0x%x!\n", error);
351 
352 		mtx_unlock(&tr->hwq->qlock);
353 		ufshci_req_queue_manual_complete_tracker(tr,
354 		    UFSHCI_DESC_INVALID_PRDT_ATTRIBUTES,
355 		    UFSHCI_RESPONSE_CODE_GENERAL_FAILURE);
356 		mtx_lock(&tr->hwq->qlock);
357 	}
358 }
359 
360 static void
ufshci_req_queue_fill_descriptor(struct ufshci_utp_xfer_req_desc * desc,uint8_t data_direction,const uint64_t paddr,const uint16_t response_off,const uint16_t response_len,const uint16_t prdt_off,const uint16_t prdt_entry_cnt)361 ufshci_req_queue_fill_descriptor(struct ufshci_utp_xfer_req_desc *desc,
362     uint8_t data_direction, const uint64_t paddr, const uint16_t response_off,
363     const uint16_t response_len, const uint16_t prdt_off,
364     const uint16_t prdt_entry_cnt)
365 {
366 	uint8_t command_type;
367 	/* Value to convert bytes to dwords */
368 	const uint16_t dword_size = 4;
369 
370 	/*
371 	 * Set command type to UFS storage.
372 	 * The UFS 4.1 spec only defines 'UFS Storage' as a command type.
373 	 */
374 	command_type = UFSHCI_COMMAND_TYPE_UFS_STORAGE;
375 
376 	memset(desc, 0, sizeof(struct ufshci_utp_xfer_req_desc));
377 	desc->command_type = command_type;
378 	desc->data_direction = data_direction;
379 	desc->interrupt = true;
380 	/* Set the initial value to Invalid. */
381 	desc->overall_command_status = UFSHCI_OCS_INVALID;
382 	desc->utp_command_descriptor_base_address = (uint32_t)(paddr &
383 	    0xffffffff);
384 	desc->utp_command_descriptor_base_address_upper = (uint32_t)(paddr >>
385 	    32);
386 
387 	desc->response_upiu_offset = response_off / dword_size;
388 	desc->response_upiu_length = response_len / dword_size;
389 	desc->prdt_offset = prdt_off / dword_size;
390 	desc->prdt_length = prdt_entry_cnt;
391 }
392 
393 /*
394  * Submit the tracker to the hardware.
395  */
396 static void
ufshci_req_queue_submit_tracker(struct ufshci_req_queue * req_queue,struct ufshci_tracker * tr,enum ufshci_data_direction data_direction)397 ufshci_req_queue_submit_tracker(struct ufshci_req_queue *req_queue,
398     struct ufshci_tracker *tr, enum ufshci_data_direction data_direction)
399 {
400 	struct ufshci_controller *ctrlr = req_queue->ctrlr;
401 	struct ufshci_request *req = tr->req;
402 	uint64_t ucd_paddr;
403 	uint16_t request_len, response_off, response_len;
404 	uint8_t slot_num = tr->slot_num;
405 
406 	mtx_assert(&req_queue->qops.get_hw_queue(req_queue)->qlock, MA_OWNED);
407 
408 	/* TODO: Check timeout */
409 
410 	request_len = req->request_size;
411 	response_off = UFSHCI_UTP_XFER_REQ_SIZE;
412 	response_len = req->response_size;
413 
414 	/* Prepare UTP Command Descriptor */
415 	memcpy(tr->ucd, &req->request_upiu, request_len);
416 	memset((uint8_t *)tr->ucd + response_off, 0, response_len);
417 
418 	/* Prepare PRDT */
419 	if (req->payload_valid)
420 		ufshci_req_queue_prepare_prdt(tr);
421 
422 	bus_dmamap_sync(req_queue->dma_tag_ucd, req_queue->ucdmem_map,
423 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
424 
425 	/* Prepare UTP Transfer Request Descriptor. */
426 	ucd_paddr = tr->ucd_bus_addr;
427 	ufshci_req_queue_fill_descriptor(&tr->hwq->utrd[slot_num],
428 	    data_direction, ucd_paddr, response_off, response_len, tr->prdt_off,
429 	    tr->prdt_entry_cnt);
430 
431 	bus_dmamap_sync(tr->hwq->dma_tag_queue, tr->hwq->queuemem_map,
432 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
433 
434 	tr->slot_state = UFSHCI_SLOT_STATE_SCHEDULED;
435 
436 	/* Ring the doorbell */
437 	req_queue->qops.ring_doorbell(ctrlr, tr);
438 }
439 
440 static int
_ufshci_req_queue_submit_request(struct ufshci_req_queue * req_queue,struct ufshci_request * req)441 _ufshci_req_queue_submit_request(struct ufshci_req_queue *req_queue,
442     struct ufshci_request *req)
443 {
444 	struct ufshci_tracker *tr = NULL;
445 	int error;
446 
447 	mtx_assert(&req_queue->qops.get_hw_queue(req_queue)->qlock, MA_OWNED);
448 
449 	error = req_queue->qops.reserve_slot(req_queue, &tr);
450 	if (error != 0) {
451 		ufshci_printf(req_queue->ctrlr, "Failed to get tracker");
452 		return (error);
453 	}
454 	KASSERT(tr, ("There is no tracker allocated."));
455 
456 	if (tr->slot_state == UFSHCI_SLOT_STATE_RESERVED ||
457 	    tr->slot_state == UFSHCI_SLOT_STATE_SCHEDULED)
458 		return (EBUSY);
459 
460 	/* Set the task_tag value to slot_num for traceability. */
461 	req->request_upiu.header.task_tag = tr->slot_num;
462 
463 	tr->slot_state = UFSHCI_SLOT_STATE_RESERVED;
464 	tr->response_size = req->response_size;
465 	tr->deadline = SBT_MAX;
466 	tr->req = req;
467 
468 	ufshci_req_queue_submit_tracker(req_queue, tr, req->data_direction);
469 
470 	return (0);
471 }
472 
473 int
ufshci_req_queue_submit_request(struct ufshci_req_queue * req_queue,struct ufshci_request * req,bool is_admin)474 ufshci_req_queue_submit_request(struct ufshci_req_queue *req_queue,
475     struct ufshci_request *req, bool is_admin)
476 {
477 	struct ufshci_hw_queue *hwq;
478 	uint32_t error;
479 
480 	/* TODO: MCQs should use a separate Admin queue. */
481 
482 	hwq = req_queue->qops.get_hw_queue(req_queue);
483 	KASSERT(hwq, ("There is no HW queue allocated."));
484 
485 	mtx_lock(&hwq->qlock);
486 	error = _ufshci_req_queue_submit_request(req_queue, req);
487 	mtx_unlock(&hwq->qlock);
488 
489 	return (error);
490 }
491