xref: /freebsd/sys/dev/ufshci/ufshci_req_sdb.c (revision 2be8ce8405ed8e9a9417ca5c3198bbc9a67a825b)
1 /*-
2  * Copyright (c) 2025, Samsung Electronics Co., Ltd.
3  * Written by Jaeyoon Choi
4  *
5  * SPDX-License-Identifier: BSD-2-Clause
6  */
7 
8 #include <sys/param.h>
9 #include <sys/bus.h>
10 #include <sys/conf.h>
11 #include <sys/domainset.h>
12 #include <sys/module.h>
13 
14 #include "sys/kassert.h"
15 #include "ufshci_private.h"
16 #include "ufshci_reg.h"
17 
18 static void
ufshci_req_sdb_cmd_desc_destroy(struct ufshci_req_queue * req_queue)19 ufshci_req_sdb_cmd_desc_destroy(struct ufshci_req_queue *req_queue)
20 {
21 	struct ufshci_hw_queue *hwq = &req_queue->hwq[UFSHCI_SDB_Q];
22 	struct ufshci_tracker *tr;
23 	int i;
24 
25 	for (i = 0; i < req_queue->num_trackers; i++) {
26 		tr = hwq->act_tr[i];
27 		bus_dmamap_destroy(req_queue->dma_tag_payload,
28 		    tr->payload_dma_map);
29 	}
30 
31 	if (req_queue->ucd) {
32 		bus_dmamap_unload(req_queue->dma_tag_ucd,
33 		    req_queue->ucdmem_map);
34 		bus_dmamem_free(req_queue->dma_tag_ucd, req_queue->ucd,
35 		    req_queue->ucdmem_map);
36 		req_queue->ucd = NULL;
37 	}
38 
39 	if (req_queue->dma_tag_ucd) {
40 		bus_dma_tag_destroy(req_queue->dma_tag_ucd);
41 		req_queue->dma_tag_ucd = NULL;
42 	}
43 }
44 
45 static void
ufshci_ucd_map(void * arg,bus_dma_segment_t * seg,int nseg,int error)46 ufshci_ucd_map(void *arg, bus_dma_segment_t *seg, int nseg, int error)
47 {
48 	struct ufshci_hw_queue *hwq = arg;
49 	int i;
50 
51 	if (error != 0) {
52 		printf("ufshci: Failed to map UCD, error = %d\n", error);
53 		return;
54 	}
55 
56 	if (hwq->num_trackers != nseg) {
57 		printf(
58 		    "ufshci: Failed to map UCD, num_trackers = %d, nseg = %d\n",
59 		    hwq->num_trackers, nseg);
60 		return;
61 	}
62 
63 	for (i = 0; i < nseg; i++) {
64 		hwq->ucd_bus_addr[i] = seg[i].ds_addr;
65 	}
66 }
67 
68 static int
ufshci_req_sdb_cmd_desc_construct(struct ufshci_req_queue * req_queue,uint32_t num_entries,struct ufshci_controller * ctrlr)69 ufshci_req_sdb_cmd_desc_construct(struct ufshci_req_queue *req_queue,
70     uint32_t num_entries, struct ufshci_controller *ctrlr)
71 {
72 	struct ufshci_hw_queue *hwq = &req_queue->hwq[UFSHCI_SDB_Q];
73 	size_t ucd_allocsz, payload_allocsz;
74 	uint8_t *ucdmem;
75 	int i, error;
76 
77 	/*
78 	 * Each component must be page aligned, and individual PRP lists
79 	 * cannot cross a page boundary.
80 	 */
81 	ucd_allocsz = num_entries * sizeof(struct ufshci_utp_cmd_desc);
82 	ucd_allocsz = roundup2(ucd_allocsz, ctrlr->page_size);
83 	payload_allocsz = num_entries * ctrlr->max_xfer_size;
84 
85 	/*
86 	 * Allocate physical memory for UTP Command Descriptor (UCD)
87 	 * Note: UFSHCI UCD format is restricted to 128-byte alignment.
88 	 */
89 	error = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev), 128, 0,
90 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, ucd_allocsz,
91 	    howmany(ucd_allocsz, sizeof(struct ufshci_utp_cmd_desc)),
92 	    sizeof(struct ufshci_utp_cmd_desc), 0, NULL, NULL,
93 	    &req_queue->dma_tag_ucd);
94 	if (error != 0) {
95 		ufshci_printf(ctrlr, "request cmd desc tag create failed %d\n",
96 		    error);
97 		goto out;
98 	}
99 
100 	if (bus_dmamem_alloc(req_queue->dma_tag_ucd, (void **)&ucdmem,
101 		BUS_DMA_COHERENT | BUS_DMA_NOWAIT, &req_queue->ucdmem_map)) {
102 		ufshci_printf(ctrlr, "failed to allocate cmd desc memory\n");
103 		goto out;
104 	}
105 
106 	if (bus_dmamap_load(req_queue->dma_tag_ucd, req_queue->ucdmem_map,
107 		ucdmem, ucd_allocsz, ufshci_ucd_map, hwq, 0) != 0) {
108 		ufshci_printf(ctrlr, "failed to load cmd desc memory\n");
109 		bus_dmamem_free(req_queue->dma_tag_ucd, req_queue->ucd,
110 		    req_queue->ucdmem_map);
111 		goto out;
112 	}
113 
114 	req_queue->ucd = (struct ufshci_utp_cmd_desc *)ucdmem;
115 
116 	/*
117 	 * Allocate physical memory for PRDT
118 	 * Note: UFSHCI PRDT format is restricted to 8-byte alignment.
119 	 */
120 	error = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev), 8,
121 	    ctrlr->page_size, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
122 	    payload_allocsz, howmany(payload_allocsz, ctrlr->page_size) + 1,
123 	    ctrlr->page_size, 0, NULL, NULL, &req_queue->dma_tag_payload);
124 	if (error != 0) {
125 		ufshci_printf(ctrlr, "request prdt tag create failed %d\n",
126 		    error);
127 		goto out;
128 	}
129 
130 	for (i = 0; i < req_queue->num_trackers; i++) {
131 		bus_dmamap_create(req_queue->dma_tag_payload, 0,
132 		    &hwq->act_tr[i]->payload_dma_map);
133 
134 		hwq->act_tr[i]->ucd = (struct ufshci_utp_cmd_desc *)ucdmem;
135 		hwq->act_tr[i]->ucd_bus_addr = hwq->ucd_bus_addr[i];
136 
137 		ucdmem += sizeof(struct ufshci_utp_cmd_desc);
138 	}
139 
140 	return (0);
141 out:
142 	ufshci_req_sdb_cmd_desc_destroy(req_queue);
143 	return (ENOMEM);
144 }
145 
146 int
ufshci_req_sdb_construct(struct ufshci_controller * ctrlr,struct ufshci_req_queue * req_queue,uint32_t num_entries,bool is_task_mgmt)147 ufshci_req_sdb_construct(struct ufshci_controller *ctrlr,
148     struct ufshci_req_queue *req_queue, uint32_t num_entries, bool is_task_mgmt)
149 {
150 	struct ufshci_hw_queue *hwq;
151 	size_t desc_size, alloc_size;
152 	uint64_t queuemem_phys;
153 	uint8_t *queuemem;
154 	struct ufshci_tracker *tr;
155 	int i, error;
156 
157 	req_queue->ctrlr = ctrlr;
158 	req_queue->is_task_mgmt = is_task_mgmt;
159 	req_queue->num_entries = num_entries;
160 	/*
161 	 * In Single Doorbell mode, the number of queue entries and the number
162 	 * of trackers are the same.
163 	 */
164 	req_queue->num_trackers = num_entries;
165 
166 	/* Single Doorbell mode uses only one queue. (UFSHCI_SDB_Q = 0) */
167 	req_queue->hwq = malloc(sizeof(struct ufshci_hw_queue), M_UFSHCI,
168 	    M_ZERO | M_NOWAIT);
169 	hwq = &req_queue->hwq[UFSHCI_SDB_Q];
170 	hwq->num_entries = req_queue->num_entries;
171 	hwq->num_trackers = req_queue->num_trackers;
172 	req_queue->hwq->ucd_bus_addr = malloc(sizeof(bus_addr_t) *
173 		req_queue->num_trackers,
174 	    M_UFSHCI, M_ZERO | M_NOWAIT);
175 
176 	mtx_init(&hwq->qlock, "ufshci req_queue lock", NULL, MTX_DEF);
177 
178 	/*
179 	 * Allocate physical memory for request queue (UTP Transfer Request
180 	 * Descriptor (UTRD) or UTP Task Management Request Descriptor (UTMRD))
181 	 * Note: UTRD/UTMRD format is restricted to 1024-byte alignment.
182 	 */
183 	desc_size = is_task_mgmt ?
184 	    sizeof(struct ufshci_utp_task_mgmt_req_desc) :
185 	    sizeof(struct ufshci_utp_xfer_req_desc);
186 	alloc_size = num_entries * desc_size;
187 	error = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev), 1024,
188 	    ctrlr->page_size, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
189 	    alloc_size, 1, alloc_size, 0, NULL, NULL, &hwq->dma_tag_queue);
190 	if (error != 0) {
191 		ufshci_printf(ctrlr, "request queue tag create failed %d\n",
192 		    error);
193 		goto out;
194 	}
195 
196 	if (bus_dmamem_alloc(hwq->dma_tag_queue, (void **)&queuemem,
197 		BUS_DMA_COHERENT | BUS_DMA_NOWAIT, &hwq->queuemem_map)) {
198 		ufshci_printf(ctrlr,
199 		    "failed to allocate request queue memory\n");
200 		goto out;
201 	}
202 
203 	if (bus_dmamap_load(hwq->dma_tag_queue, hwq->queuemem_map, queuemem,
204 		alloc_size, ufshci_single_map, &queuemem_phys, 0) != 0) {
205 		ufshci_printf(ctrlr, "failed to load request queue memory\n");
206 		bus_dmamem_free(hwq->dma_tag_queue, hwq->utrd,
207 		    hwq->queuemem_map);
208 		goto out;
209 	}
210 
211 	hwq->num_cmds = 0;
212 	hwq->num_intr_handler_calls = 0;
213 	hwq->num_retries = 0;
214 	hwq->num_failures = 0;
215 	hwq->req_queue_addr = queuemem_phys;
216 
217 	/* Allocate trackers */
218 	hwq->act_tr = malloc_domainset(sizeof(struct ufshci_tracker *) *
219 		req_queue->num_entries,
220 	    M_UFSHCI, DOMAINSET_PREF(req_queue->domain), M_ZERO | M_WAITOK);
221 
222 	for (i = 0; i < req_queue->num_trackers; i++) {
223 		tr = malloc_domainset(sizeof(struct ufshci_tracker), M_UFSHCI,
224 		    DOMAINSET_PREF(req_queue->domain), M_ZERO | M_WAITOK);
225 
226 		tr->req_queue = req_queue;
227 		tr->slot_num = i;
228 		tr->slot_state = UFSHCI_SLOT_STATE_FREE;
229 
230 		hwq->act_tr[i] = tr;
231 	}
232 
233 	if (is_task_mgmt) {
234 		/* UTP Task Management Request (UTMR) */
235 		uint32_t utmrlba, utmrlbau;
236 
237 		hwq->utmrd = (struct ufshci_utp_task_mgmt_req_desc *)queuemem;
238 
239 		utmrlba = hwq->req_queue_addr & 0xffffffff;
240 		utmrlbau = hwq->req_queue_addr >> 32;
241 		ufshci_mmio_write_4(ctrlr, utmrlba, utmrlba);
242 		ufshci_mmio_write_4(ctrlr, utmrlbau, utmrlbau);
243 	} else {
244 		/* UTP Transfer Request (UTR) */
245 		uint32_t utrlba, utrlbau;
246 
247 		hwq->utrd = (struct ufshci_utp_xfer_req_desc *)queuemem;
248 
249 		/*
250 		 * Allocate physical memory for the command descriptor.
251 		 * UTP Transfer Request (UTR) requires memory for a separate
252 		 * command in addition to the queue.
253 		 */
254 		if (ufshci_req_sdb_cmd_desc_construct(req_queue, num_entries,
255 			ctrlr) != 0) {
256 			ufshci_printf(ctrlr,
257 			    "failed to construct cmd descriptor memory\n");
258 			bus_dmamem_free(hwq->dma_tag_queue, hwq->utrd,
259 			    hwq->queuemem_map);
260 			goto out;
261 		}
262 
263 		utrlba = hwq->req_queue_addr & 0xffffffff;
264 		utrlbau = hwq->req_queue_addr >> 32;
265 		ufshci_mmio_write_4(ctrlr, utrlba, utrlba);
266 		ufshci_mmio_write_4(ctrlr, utrlbau, utrlbau);
267 	}
268 
269 	return (0);
270 out:
271 	ufshci_req_sdb_destroy(ctrlr, req_queue);
272 	return (ENOMEM);
273 }
274 
275 void
ufshci_req_sdb_destroy(struct ufshci_controller * ctrlr,struct ufshci_req_queue * req_queue)276 ufshci_req_sdb_destroy(struct ufshci_controller *ctrlr,
277     struct ufshci_req_queue *req_queue)
278 {
279 	struct ufshci_hw_queue *hwq = &req_queue->hwq[UFSHCI_SDB_Q];
280 	struct ufshci_tracker *tr;
281 	int i;
282 
283 	if (!req_queue->is_task_mgmt)
284 		ufshci_req_sdb_cmd_desc_destroy(&ctrlr->transfer_req_queue);
285 
286 	for (i = 0; i < req_queue->num_trackers; i++) {
287 		tr = hwq->act_tr[i];
288 		free(tr, M_UFSHCI);
289 	}
290 
291 	if (hwq->act_tr) {
292 		free(hwq->act_tr, M_UFSHCI);
293 		hwq->act_tr = NULL;
294 	}
295 
296 	if (hwq->utrd != NULL) {
297 		bus_dmamap_unload(hwq->dma_tag_queue, hwq->queuemem_map);
298 		bus_dmamem_free(hwq->dma_tag_queue, hwq->utrd,
299 		    hwq->queuemem_map);
300 		hwq->utrd = NULL;
301 	}
302 
303 	if (hwq->dma_tag_queue) {
304 		bus_dma_tag_destroy(hwq->dma_tag_queue);
305 		hwq->dma_tag_queue = NULL;
306 	}
307 
308 	if (mtx_initialized(&hwq->qlock))
309 		mtx_destroy(&hwq->qlock);
310 
311 	free(req_queue->hwq->ucd_bus_addr, M_UFSHCI);
312 	free(req_queue->hwq, M_UFSHCI);
313 }
314 
315 struct ufshci_hw_queue *
ufshci_req_sdb_get_hw_queue(struct ufshci_req_queue * req_queue)316 ufshci_req_sdb_get_hw_queue(struct ufshci_req_queue *req_queue)
317 {
318 	return &req_queue->hwq[UFSHCI_SDB_Q];
319 }
320 
321 int
ufshci_req_sdb_enable(struct ufshci_controller * ctrlr,struct ufshci_req_queue * req_queue)322 ufshci_req_sdb_enable(struct ufshci_controller *ctrlr,
323     struct ufshci_req_queue *req_queue)
324 {
325 	if (req_queue->is_task_mgmt) {
326 		uint32_t hcs, utmrldbr, utmrlrsr;
327 
328 		hcs = ufshci_mmio_read_4(ctrlr, hcs);
329 		if (!(hcs & UFSHCIM(UFSHCI_HCS_REG_UTMRLRDY))) {
330 			ufshci_printf(ctrlr,
331 			    "UTP task management request list is not ready\n");
332 			return (ENXIO);
333 		}
334 
335 		utmrldbr = ufshci_mmio_read_4(ctrlr, utmrldbr);
336 		if (utmrldbr != 0) {
337 			ufshci_printf(ctrlr,
338 			    "UTP task management request list door bell is not ready\n");
339 			return (ENXIO);
340 		}
341 
342 		utmrlrsr = UFSHCIM(UFSHCI_UTMRLRSR_REG_UTMRLRSR);
343 		ufshci_mmio_write_4(ctrlr, utmrlrsr, utmrlrsr);
344 	} else {
345 		uint32_t hcs, utrldbr, utrlcnr, utrlrsr;
346 
347 		hcs = ufshci_mmio_read_4(ctrlr, hcs);
348 		if (!(hcs & UFSHCIM(UFSHCI_HCS_REG_UTRLRDY))) {
349 			ufshci_printf(ctrlr,
350 			    "UTP transfer request list is not ready\n");
351 			return (ENXIO);
352 		}
353 
354 		utrldbr = ufshci_mmio_read_4(ctrlr, utrldbr);
355 		if (utrldbr != 0) {
356 			ufshci_printf(ctrlr,
357 			    "UTP transfer request list door bell is not ready\n");
358 			ufshci_printf(ctrlr,
359 			    "Clear the UTP transfer request list door bell\n");
360 			ufshci_mmio_write_4(ctrlr, utrldbr, utrldbr);
361 		}
362 
363 		utrlcnr = ufshci_mmio_read_4(ctrlr, utrlcnr);
364 		if (utrlcnr != 0) {
365 			ufshci_printf(ctrlr,
366 			    "UTP transfer request list notification is not ready\n");
367 			ufshci_printf(ctrlr,
368 			    "Clear the UTP transfer request list notification\n");
369 			ufshci_mmio_write_4(ctrlr, utrlcnr, utrlcnr);
370 		}
371 
372 		utrlrsr = UFSHCIM(UFSHCI_UTRLRSR_REG_UTRLRSR);
373 		ufshci_mmio_write_4(ctrlr, utrlrsr, utrlrsr);
374 	}
375 
376 	return (0);
377 }
378 
379 int
ufshci_req_sdb_reserve_slot(struct ufshci_req_queue * req_queue,struct ufshci_tracker ** tr)380 ufshci_req_sdb_reserve_slot(struct ufshci_req_queue *req_queue,
381     struct ufshci_tracker **tr)
382 {
383 	struct ufshci_hw_queue *hwq = &req_queue->hwq[UFSHCI_SDB_Q];
384 	uint8_t i;
385 
386 	for (i = 0; i < req_queue->num_entries; i++) {
387 		if (hwq->act_tr[i]->slot_state == UFSHCI_SLOT_STATE_FREE) {
388 			*tr = hwq->act_tr[i];
389 			(*tr)->hwq = hwq;
390 			return (0);
391 		}
392 	}
393 	return (EBUSY);
394 }
395 
396 void
ufshci_req_sdb_utmr_clear_cpl_ntf(struct ufshci_controller * ctrlr,struct ufshci_tracker * tr)397 ufshci_req_sdb_utmr_clear_cpl_ntf(struct ufshci_controller *ctrlr,
398     struct ufshci_tracker *tr)
399 {
400 	/*
401 	 * NOP
402 	 * UTP Task Management does not have a Completion Notification
403 	 * Register.
404 	 */
405 }
406 
407 void
ufshci_req_sdb_utr_clear_cpl_ntf(struct ufshci_controller * ctrlr,struct ufshci_tracker * tr)408 ufshci_req_sdb_utr_clear_cpl_ntf(struct ufshci_controller *ctrlr,
409     struct ufshci_tracker *tr)
410 {
411 	uint32_t utrlcnr;
412 
413 	utrlcnr = 1 << tr->slot_num;
414 	ufshci_mmio_write_4(ctrlr, utrlcnr, utrlcnr);
415 }
416 
417 void
ufshci_req_sdb_utmr_ring_doorbell(struct ufshci_controller * ctrlr,struct ufshci_tracker * tr)418 ufshci_req_sdb_utmr_ring_doorbell(struct ufshci_controller *ctrlr,
419     struct ufshci_tracker *tr)
420 {
421 	uint32_t utmrldbr = 0;
422 
423 	utmrldbr |= 1 << tr->slot_num;
424 	ufshci_mmio_write_4(ctrlr, utmrldbr, utmrldbr);
425 
426 	tr->req_queue->hwq[UFSHCI_SDB_Q].num_cmds++;
427 }
428 
429 void
ufshci_req_sdb_utr_ring_doorbell(struct ufshci_controller * ctrlr,struct ufshci_tracker * tr)430 ufshci_req_sdb_utr_ring_doorbell(struct ufshci_controller *ctrlr,
431     struct ufshci_tracker *tr)
432 {
433 	uint32_t utrldbr = 0;
434 
435 	utrldbr |= 1 << tr->slot_num;
436 	ufshci_mmio_write_4(ctrlr, utrldbr, utrldbr);
437 
438 	tr->req_queue->hwq[UFSHCI_SDB_Q].num_cmds++;
439 }
440 
441 bool
ufshci_req_sdb_utmr_is_doorbell_cleared(struct ufshci_controller * ctrlr,uint8_t slot)442 ufshci_req_sdb_utmr_is_doorbell_cleared(struct ufshci_controller *ctrlr,
443     uint8_t slot)
444 {
445 	uint32_t utmrldbr;
446 
447 	utmrldbr = ufshci_mmio_read_4(ctrlr, utmrldbr);
448 	return (!(utmrldbr & (1 << slot)));
449 }
450 
451 bool
ufshci_req_sdb_utr_is_doorbell_cleared(struct ufshci_controller * ctrlr,uint8_t slot)452 ufshci_req_sdb_utr_is_doorbell_cleared(struct ufshci_controller *ctrlr,
453     uint8_t slot)
454 {
455 	uint32_t utrldbr;
456 
457 	utrldbr = ufshci_mmio_read_4(ctrlr, utrldbr);
458 	return (!(utrldbr & (1 << slot)));
459 }
460 
461 bool
ufshci_req_sdb_process_cpl(struct ufshci_req_queue * req_queue)462 ufshci_req_sdb_process_cpl(struct ufshci_req_queue *req_queue)
463 {
464 	struct ufshci_hw_queue *hwq = &req_queue->hwq[UFSHCI_SDB_Q];
465 	struct ufshci_tracker *tr;
466 	uint8_t slot;
467 	bool done = false;
468 
469 	hwq->num_intr_handler_calls++;
470 
471 	bus_dmamap_sync(hwq->dma_tag_queue, hwq->queuemem_map,
472 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
473 
474 	for (slot = 0; slot < req_queue->num_entries; slot++) {
475 		tr = hwq->act_tr[slot];
476 
477 		KASSERT(tr, ("there is no tracker assigned to the slot"));
478 		/*
479 		 * When the response is delivered from the device, the doorbell
480 		 * is cleared.
481 		 */
482 		if (tr->slot_state == UFSHCI_SLOT_STATE_SCHEDULED &&
483 		    req_queue->qops.is_doorbell_cleared(req_queue->ctrlr,
484 			slot)) {
485 			ufshci_req_queue_complete_tracker(tr);
486 			done = true;
487 		}
488 	}
489 
490 	return (done);
491 }
492 
493 int
ufshci_req_sdb_get_inflight_io(struct ufshci_controller * ctrlr)494 ufshci_req_sdb_get_inflight_io(struct ufshci_controller *ctrlr)
495 {
496 	/* TODO: Implement inflight io*/
497 
498 	return (0);
499 }
500