xref: /freebsd/sys/dev/ufshci/ufshci_req_sdb.c (revision 4b15965daa99044daf184221b7c283bf7f2d7e66)
1 /*-
2  * Copyright (c) 2025, Samsung Electronics Co., Ltd.
3  * Written by Jaeyoon Choi
4  *
5  * SPDX-License-Identifier: BSD-2-Clause
6  */
7 
8 #include <sys/param.h>
9 #include <sys/bus.h>
10 #include <sys/conf.h>
11 #include <sys/domainset.h>
12 #include <sys/module.h>
13 
14 #include "sys/kassert.h"
15 #include "ufshci_private.h"
16 #include "ufshci_reg.h"
17 
18 static void
19 ufshci_req_sdb_cmd_desc_destroy(struct ufshci_req_queue *req_queue)
20 {
21 	struct ufshci_hw_queue *hwq = &req_queue->hwq[UFSHCI_SDB_Q];
22 	struct ufshci_tracker *tr;
23 	int i;
24 
25 	for (i = 0; i < req_queue->num_trackers; i++) {
26 		tr = hwq->act_tr[i];
27 		bus_dmamap_destroy(req_queue->dma_tag_payload,
28 		    tr->payload_dma_map);
29 		free(tr, M_UFSHCI);
30 	}
31 
32 	if (hwq->act_tr) {
33 		free(hwq->act_tr, M_UFSHCI);
34 		hwq->act_tr = NULL;
35 	}
36 
37 	if (req_queue->ucd) {
38 		bus_dmamap_unload(req_queue->dma_tag_ucd,
39 		    req_queue->ucdmem_map);
40 		bus_dmamem_free(req_queue->dma_tag_ucd, req_queue->ucd,
41 		    req_queue->ucdmem_map);
42 		req_queue->ucd = NULL;
43 	}
44 
45 	if (req_queue->dma_tag_ucd) {
46 		bus_dma_tag_destroy(req_queue->dma_tag_ucd);
47 		req_queue->dma_tag_ucd = NULL;
48 	}
49 }
50 
51 static void
52 ufshci_ucd_map(void *arg, bus_dma_segment_t *seg, int nseg, int error)
53 {
54 	struct ufshci_hw_queue *hwq = arg;
55 	int i;
56 
57 	if (error != 0) {
58 		printf("ufshci: Failed to map UCD, error = %d\n", error);
59 		return;
60 	}
61 
62 	if (hwq->num_trackers != nseg) {
63 		printf(
64 		    "ufshci: Failed to map UCD, num_trackers = %d, nseg = %d\n",
65 		    hwq->num_trackers, nseg);
66 		return;
67 	}
68 
69 	for (i = 0; i < nseg; i++) {
70 		hwq->ucd_bus_addr[i] = seg[i].ds_addr;
71 	}
72 }
73 
74 static int
75 ufshci_req_sdb_cmd_desc_construct(struct ufshci_req_queue *req_queue,
76     uint32_t num_entries, struct ufshci_controller *ctrlr)
77 {
78 	struct ufshci_hw_queue *hwq = &req_queue->hwq[UFSHCI_SDB_Q];
79 	struct ufshci_tracker *tr;
80 	size_t ucd_allocsz, payload_allocsz;
81 	uint8_t *ucdmem;
82 	int i, error;
83 
84 	/*
85 	 * Each component must be page aligned, and individual PRP lists
86 	 * cannot cross a page boundary.
87 	 */
88 	ucd_allocsz = num_entries * sizeof(struct ufshci_utp_cmd_desc);
89 	ucd_allocsz = roundup2(ucd_allocsz, ctrlr->page_size);
90 	payload_allocsz = num_entries * ctrlr->max_xfer_size;
91 
92 	/*
93 	 * Allocate physical memory for UTP Command Descriptor (UCD)
94 	 * Note: UFSHCI UCD format is restricted to 128-byte alignment.
95 	 */
96 	error = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev), 128, 0,
97 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, ucd_allocsz,
98 	    howmany(ucd_allocsz, sizeof(struct ufshci_utp_cmd_desc)),
99 	    sizeof(struct ufshci_utp_cmd_desc), 0, NULL, NULL,
100 	    &req_queue->dma_tag_ucd);
101 	if (error != 0) {
102 		ufshci_printf(ctrlr, "request cmd desc tag create failed %d\n",
103 		    error);
104 		goto out;
105 	}
106 
107 	if (bus_dmamem_alloc(req_queue->dma_tag_ucd, (void **)&ucdmem,
108 		BUS_DMA_COHERENT | BUS_DMA_NOWAIT, &req_queue->ucdmem_map)) {
109 		ufshci_printf(ctrlr, "failed to allocate cmd desc memory\n");
110 		goto out;
111 	}
112 
113 	if (bus_dmamap_load(req_queue->dma_tag_ucd, req_queue->ucdmem_map,
114 		ucdmem, ucd_allocsz, ufshci_ucd_map, hwq, 0) != 0) {
115 		ufshci_printf(ctrlr, "failed to load cmd desc memory\n");
116 		bus_dmamem_free(req_queue->dma_tag_ucd, req_queue->ucd,
117 		    req_queue->ucdmem_map);
118 		goto out;
119 	}
120 
121 	req_queue->ucd = (struct ufshci_utp_cmd_desc *)ucdmem;
122 
123 	/*
124 	 * Allocate physical memory for PRDT
125 	 * Note: UFSHCI PRDT format is restricted to 8-byte alignment.
126 	 */
127 	error = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev), 8,
128 	    ctrlr->page_size, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
129 	    payload_allocsz, howmany(payload_allocsz, ctrlr->page_size) + 1,
130 	    ctrlr->page_size, 0, NULL, NULL, &req_queue->dma_tag_payload);
131 	if (error != 0) {
132 		ufshci_printf(ctrlr, "request prdt tag create failed %d\n",
133 		    error);
134 		goto out;
135 	}
136 
137 	hwq->act_tr = malloc_domainset(sizeof(struct ufshci_tracker *) *
138 		req_queue->num_entries,
139 	    M_UFSHCI, DOMAINSET_PREF(req_queue->domain), M_ZERO | M_WAITOK);
140 
141 	for (i = 0; i < req_queue->num_trackers; i++) {
142 		tr = malloc_domainset(sizeof(struct ufshci_tracker), M_UFSHCI,
143 		    DOMAINSET_PREF(req_queue->domain), M_ZERO | M_WAITOK);
144 
145 		bus_dmamap_create(req_queue->dma_tag_payload, 0,
146 		    &tr->payload_dma_map);
147 
148 		tr->req_queue = req_queue;
149 		tr->slot_num = i;
150 		tr->slot_state = UFSHCI_SLOT_STATE_FREE;
151 
152 		tr->ucd = (struct ufshci_utp_cmd_desc *)ucdmem;
153 		tr->ucd_bus_addr = hwq->ucd_bus_addr[i];
154 
155 		ucdmem += sizeof(struct ufshci_utp_cmd_desc);
156 
157 		hwq->act_tr[i] = tr;
158 	}
159 
160 	return (0);
161 out:
162 	ufshci_req_sdb_cmd_desc_destroy(req_queue);
163 	return (ENOMEM);
164 }
165 
166 static bool
167 ufshci_req_sdb_is_doorbell_cleared(struct ufshci_controller *ctrlr,
168     uint8_t slot)
169 {
170 	uint32_t utrldbr;
171 
172 	utrldbr = ufshci_mmio_read_4(ctrlr, utrldbr);
173 	return (!(utrldbr & (1 << slot)));
174 }
175 
176 int
177 ufshci_req_sdb_construct(struct ufshci_controller *ctrlr,
178     struct ufshci_req_queue *req_queue, uint32_t num_entries, bool is_task_mgmt)
179 {
180 	struct ufshci_hw_queue *hwq;
181 	size_t allocsz;
182 	uint64_t queuemem_phys;
183 	uint8_t *queuemem;
184 	int error;
185 
186 	req_queue->ctrlr = ctrlr;
187 	req_queue->is_task_mgmt = is_task_mgmt;
188 	req_queue->num_entries = num_entries;
189 	/*
190 	 * In Single Doorbell mode, the number of queue entries and the number
191 	 * of trackers are the same.
192 	 */
193 	req_queue->num_trackers = num_entries;
194 
195 	/* Single Doorbell mode uses only one queue. (UFSHCI_SDB_Q = 0) */
196 	req_queue->hwq = malloc(sizeof(struct ufshci_hw_queue), M_UFSHCI,
197 	    M_ZERO | M_NOWAIT);
198 	hwq = &req_queue->hwq[UFSHCI_SDB_Q];
199 	hwq->num_entries = req_queue->num_entries;
200 	hwq->num_trackers = req_queue->num_trackers;
201 	req_queue->hwq->ucd_bus_addr = malloc(sizeof(bus_addr_t) *
202 		req_queue->num_trackers,
203 	    M_UFSHCI, M_ZERO | M_NOWAIT);
204 
205 	mtx_init(&hwq->qlock, "ufshci req_queue lock", NULL, MTX_DEF);
206 
207 	/*
208 	 * Allocate physical memory for request queue (UTP Transfer Request
209 	 * Descriptor (UTRD) or UTP Task Management Request Descriptor (UTMRD))
210 	 * Note: UTRD/UTMRD format is restricted to 1024-byte alignment.
211 	 */
212 	allocsz = num_entries * sizeof(struct ufshci_utp_xfer_req_desc);
213 	error = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev), 1024,
214 	    ctrlr->page_size, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
215 	    allocsz, 1, allocsz, 0, NULL, NULL, &hwq->dma_tag_queue);
216 	if (error != 0) {
217 		ufshci_printf(ctrlr, "request queue tag create failed %d\n",
218 		    error);
219 		goto out;
220 	}
221 
222 	if (bus_dmamem_alloc(hwq->dma_tag_queue, (void **)&queuemem,
223 		BUS_DMA_COHERENT | BUS_DMA_NOWAIT, &hwq->queuemem_map)) {
224 		ufshci_printf(ctrlr,
225 		    "failed to allocate request queue memory\n");
226 		goto out;
227 	}
228 
229 	if (bus_dmamap_load(hwq->dma_tag_queue, hwq->queuemem_map, queuemem,
230 		allocsz, ufshci_single_map, &queuemem_phys, 0) != 0) {
231 		ufshci_printf(ctrlr, "failed to load request queue memory\n");
232 		bus_dmamem_free(hwq->dma_tag_queue, hwq->utrd,
233 		    hwq->queuemem_map);
234 		goto out;
235 	}
236 
237 	hwq->num_cmds = 0;
238 	hwq->num_intr_handler_calls = 0;
239 	hwq->num_retries = 0;
240 	hwq->num_failures = 0;
241 	hwq->utrd = (struct ufshci_utp_xfer_req_desc *)queuemem;
242 	hwq->req_queue_addr = queuemem_phys;
243 
244 	if (is_task_mgmt) {
245 		/* UTP Task Management Request (UTMR) */
246 		uint32_t utmrlba, utmrlbau;
247 
248 		utmrlba = hwq->req_queue_addr & 0xffffffff;
249 		utmrlbau = hwq->req_queue_addr >> 32;
250 		ufshci_mmio_write_4(ctrlr, utmrlba, utmrlba);
251 		ufshci_mmio_write_4(ctrlr, utmrlbau, utmrlbau);
252 	} else {
253 		/* UTP Transfer Request (UTR) */
254 		uint32_t utrlba, utrlbau;
255 
256 		/*
257 		 * Allocate physical memory for the command descriptor.
258 		 * UTP Transfer Request (UTR) requires memory for a separate
259 		 * command in addition to the queue.
260 		 */
261 		if (ufshci_req_sdb_cmd_desc_construct(req_queue, num_entries,
262 			ctrlr) != 0) {
263 			ufshci_printf(ctrlr,
264 			    "failed to construct cmd descriptor memory\n");
265 			bus_dmamem_free(hwq->dma_tag_queue, hwq->utrd,
266 			    hwq->queuemem_map);
267 			goto out;
268 		}
269 
270 		utrlba = hwq->req_queue_addr & 0xffffffff;
271 		utrlbau = hwq->req_queue_addr >> 32;
272 		ufshci_mmio_write_4(ctrlr, utrlba, utrlba);
273 		ufshci_mmio_write_4(ctrlr, utrlbau, utrlbau);
274 	}
275 
276 	return (0);
277 out:
278 	ufshci_req_sdb_destroy(ctrlr, req_queue);
279 	return (ENOMEM);
280 }
281 
282 void
283 ufshci_req_sdb_destroy(struct ufshci_controller *ctrlr,
284     struct ufshci_req_queue *req_queue)
285 {
286 	struct ufshci_hw_queue *hwq = &req_queue->hwq[UFSHCI_SDB_Q];
287 
288 	if (!req_queue->is_task_mgmt)
289 		ufshci_req_sdb_cmd_desc_destroy(&ctrlr->transfer_req_queue);
290 
291 	if (hwq->utrd != NULL) {
292 		bus_dmamap_unload(hwq->dma_tag_queue, hwq->queuemem_map);
293 		bus_dmamem_free(hwq->dma_tag_queue, hwq->utrd,
294 		    hwq->queuemem_map);
295 		hwq->utrd = NULL;
296 	}
297 
298 	if (hwq->dma_tag_queue) {
299 		bus_dma_tag_destroy(hwq->dma_tag_queue);
300 		hwq->dma_tag_queue = NULL;
301 	}
302 
303 	if (mtx_initialized(&hwq->qlock))
304 		mtx_destroy(&hwq->qlock);
305 
306 	free(req_queue->hwq->ucd_bus_addr, M_UFSHCI);
307 	free(req_queue->hwq, M_UFSHCI);
308 }
309 
310 struct ufshci_hw_queue *
311 ufshci_req_sdb_get_hw_queue(struct ufshci_req_queue *req_queue)
312 {
313 	return &req_queue->hwq[UFSHCI_SDB_Q];
314 }
315 
316 int
317 ufshci_req_sdb_enable(struct ufshci_controller *ctrlr,
318     struct ufshci_req_queue *req_queue)
319 {
320 	if (req_queue->is_task_mgmt) {
321 		uint32_t hcs, utmrldbr, utmrlrsr;
322 
323 		hcs = ufshci_mmio_read_4(ctrlr, hcs);
324 		if (!(hcs & UFSHCIM(UFSHCI_HCS_REG_UTMRLRDY))) {
325 			ufshci_printf(ctrlr,
326 			    "UTP task management request list is not ready\n");
327 			return (ENXIO);
328 		}
329 
330 		utmrldbr = ufshci_mmio_read_4(ctrlr, utmrldbr);
331 		if (utmrldbr != 0) {
332 			ufshci_printf(ctrlr,
333 			    "UTP task management request list door bell is not ready\n");
334 			return (ENXIO);
335 		}
336 
337 		utmrlrsr = UFSHCIM(UFSHCI_UTMRLRSR_REG_UTMRLRSR);
338 		ufshci_mmio_write_4(ctrlr, utmrlrsr, utmrlrsr);
339 	} else {
340 		uint32_t hcs, utrldbr, utrlcnr, utrlrsr;
341 
342 		hcs = ufshci_mmio_read_4(ctrlr, hcs);
343 		if (!(hcs & UFSHCIM(UFSHCI_HCS_REG_UTRLRDY))) {
344 			ufshci_printf(ctrlr,
345 			    "UTP transfer request list is not ready\n");
346 			return (ENXIO);
347 		}
348 
349 		utrldbr = ufshci_mmio_read_4(ctrlr, utrldbr);
350 		if (utrldbr != 0) {
351 			ufshci_printf(ctrlr,
352 			    "UTP transfer request list door bell is not ready\n");
353 			ufshci_printf(ctrlr,
354 			    "Clear the UTP transfer request list door bell\n");
355 			ufshci_mmio_write_4(ctrlr, utrldbr, utrldbr);
356 		}
357 
358 		utrlcnr = ufshci_mmio_read_4(ctrlr, utrlcnr);
359 		if (utrlcnr != 0) {
360 			ufshci_printf(ctrlr,
361 			    "UTP transfer request list notification is not ready\n");
362 			ufshci_printf(ctrlr,
363 			    "Clear the UTP transfer request list notification\n");
364 			ufshci_mmio_write_4(ctrlr, utrlcnr, utrlcnr);
365 		}
366 
367 		utrlrsr = UFSHCIM(UFSHCI_UTRLRSR_REG_UTRLRSR);
368 		ufshci_mmio_write_4(ctrlr, utrlrsr, utrlrsr);
369 	}
370 
371 	return (0);
372 }
373 
374 int
375 ufshci_req_sdb_reserve_slot(struct ufshci_req_queue *req_queue,
376     struct ufshci_tracker **tr)
377 {
378 	struct ufshci_hw_queue *hwq = &req_queue->hwq[UFSHCI_SDB_Q];
379 	uint8_t i;
380 
381 	for (i = 0; i < req_queue->num_entries; i++) {
382 		if (hwq->act_tr[i]->slot_state == UFSHCI_SLOT_STATE_FREE) {
383 			*tr = hwq->act_tr[i];
384 			(*tr)->hwq = hwq;
385 			return (0);
386 		}
387 	}
388 	return (EBUSY);
389 }
390 
391 void
392 ufshci_req_sdb_clear_cpl_ntf(struct ufshci_controller *ctrlr,
393     struct ufshci_tracker *tr)
394 {
395 	uint32_t utrlcnr;
396 
397 	utrlcnr = 1 << tr->slot_num;
398 	ufshci_mmio_write_4(ctrlr, utrlcnr, utrlcnr);
399 }
400 
401 void
402 ufshci_req_sdb_ring_doorbell(struct ufshci_controller *ctrlr,
403     struct ufshci_tracker *tr)
404 {
405 	uint32_t utrldbr = 0;
406 
407 	utrldbr |= 1 << tr->slot_num;
408 	ufshci_mmio_write_4(ctrlr, utrldbr, utrldbr);
409 
410 	tr->req_queue->hwq[UFSHCI_SDB_Q].num_cmds++;
411 
412 	// utrldbr = ufshci_mmio_read_4(ctrlr, utrldbr);
413 	// printf("DB=0x%08x\n", utrldbr);
414 }
415 
416 bool
417 ufshci_req_sdb_process_cpl(struct ufshci_req_queue *req_queue)
418 {
419 	struct ufshci_hw_queue *hwq = &req_queue->hwq[UFSHCI_SDB_Q];
420 	struct ufshci_tracker *tr;
421 	uint8_t slot;
422 	bool done = false;
423 
424 	hwq->num_intr_handler_calls++;
425 
426 	bus_dmamap_sync(hwq->dma_tag_queue, hwq->queuemem_map,
427 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
428 
429 	for (slot = 0; slot < req_queue->num_entries; slot++) {
430 		tr = hwq->act_tr[slot];
431 
432 		KASSERT(tr, ("there is no tracker assigned to the slot"));
433 		/*
434 		 * When the response is delivered from the device, the doorbell
435 		 * is cleared.
436 		 */
437 		if (tr->slot_state == UFSHCI_SLOT_STATE_SCHEDULED &&
438 		    ufshci_req_sdb_is_doorbell_cleared(req_queue->ctrlr,
439 			slot)) {
440 			ufshci_req_queue_complete_tracker(tr);
441 			done = true;
442 		}
443 	}
444 
445 	return (done);
446 }
447 
448 int
449 ufshci_req_sdb_get_inflight_io(struct ufshci_controller *ctrlr)
450 {
451 	/* TODO: Implement inflight io*/
452 
453 	return (0);
454 }
455