xref: /freebsd/sys/dev/ufshci/ufshci_req_sdb.c (revision ee3960cba1068e12fb032a68c46d74841d9edab3)
1 /*-
2  * Copyright (c) 2025, Samsung Electronics Co., Ltd.
3  * Written by Jaeyoon Choi
4  *
5  * SPDX-License-Identifier: BSD-2-Clause
6  */
7 
8 #include <sys/param.h>
9 #include <sys/bus.h>
10 #include <sys/conf.h>
11 #include <sys/domainset.h>
12 #include <sys/module.h>
13 
14 #include "sys/kassert.h"
15 #include "ufshci_private.h"
16 #include "ufshci_reg.h"
17 
18 static void
19 ufshci_req_sdb_cmd_desc_destroy(struct ufshci_req_queue *req_queue)
20 {
21 	struct ufshci_hw_queue *hwq = &req_queue->hwq[UFSHCI_SDB_Q];
22 	struct ufshci_tracker *tr;
23 	int i;
24 
25 	for (i = 0; i < req_queue->num_trackers; i++) {
26 		tr = hwq->act_tr[i];
27 		bus_dmamap_destroy(req_queue->dma_tag_payload,
28 		    tr->payload_dma_map);
29 		free(tr, M_UFSHCI);
30 	}
31 
32 	if (hwq->act_tr) {
33 		free(hwq->act_tr, M_UFSHCI);
34 		hwq->act_tr = NULL;
35 	}
36 
37 	if (req_queue->ucd) {
38 		bus_dmamap_unload(req_queue->dma_tag_ucd,
39 		    req_queue->ucdmem_map);
40 		bus_dmamem_free(req_queue->dma_tag_ucd, req_queue->ucd,
41 		    req_queue->ucdmem_map);
42 		req_queue->ucd = NULL;
43 	}
44 
45 	if (req_queue->dma_tag_ucd) {
46 		bus_dma_tag_destroy(req_queue->dma_tag_ucd);
47 		req_queue->dma_tag_ucd = NULL;
48 	}
49 }
50 
51 static int
52 ufshci_req_sdb_cmd_desc_construct(struct ufshci_req_queue *req_queue,
53     uint32_t num_entries, struct ufshci_controller *ctrlr)
54 {
55 	struct ufshci_hw_queue *hwq = &req_queue->hwq[UFSHCI_SDB_Q];
56 	struct ufshci_tracker *tr;
57 	size_t ucd_allocsz, payload_allocsz;
58 	uint64_t ucdmem_phys;
59 	uint8_t *ucdmem;
60 	int i, error;
61 
62 	/*
63 	 * Each component must be page aligned, and individual PRP lists
64 	 * cannot cross a page boundary.
65 	 */
66 	ucd_allocsz = num_entries * sizeof(struct ufshci_utp_cmd_desc);
67 	ucd_allocsz = roundup2(ucd_allocsz, ctrlr->page_size);
68 	payload_allocsz = num_entries * ctrlr->max_xfer_size;
69 
70 	/*
71 	 * Allocate physical memory for UTP Command Descriptor (UCD)
72 	 * Note: UFSHCI UCD format is restricted to 128-byte alignment.
73 	 */
74 	error = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev), 128,
75 	    ctrlr->page_size, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
76 	    ucd_allocsz, howmany(ucd_allocsz, ctrlr->page_size),
77 	    ctrlr->page_size, 0, NULL, NULL, &req_queue->dma_tag_ucd);
78 	if (error != 0) {
79 		ufshci_printf(ctrlr, "request cmd desc tag create failed %d\n",
80 		    error);
81 		goto out;
82 	}
83 
84 	if (bus_dmamem_alloc(req_queue->dma_tag_ucd, (void **)&ucdmem,
85 		BUS_DMA_COHERENT | BUS_DMA_NOWAIT, &req_queue->ucdmem_map)) {
86 		ufshci_printf(ctrlr, "failed to allocate cmd desc memory\n");
87 		goto out;
88 	}
89 
90 	if (bus_dmamap_load(req_queue->dma_tag_ucd, req_queue->ucdmem_map,
91 		ucdmem, ucd_allocsz, ufshci_single_map, &ucdmem_phys, 0) != 0) {
92 		ufshci_printf(ctrlr, "failed to load cmd desc memory\n");
93 		bus_dmamem_free(req_queue->dma_tag_ucd, req_queue->ucd,
94 		    req_queue->ucdmem_map);
95 		goto out;
96 	}
97 
98 	req_queue->ucd = (struct ufshci_utp_cmd_desc *)ucdmem;
99 	req_queue->ucd_addr = ucdmem_phys;
100 
101 	/*
102 	 * Allocate physical memory for PRDT
103 	 * Note: UFSHCI PRDT format is restricted to 8-byte alignment.
104 	 */
105 	error = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev), 8,
106 	    ctrlr->page_size, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
107 	    payload_allocsz, howmany(payload_allocsz, ctrlr->page_size) + 1,
108 	    ctrlr->page_size, 0, NULL, NULL, &req_queue->dma_tag_payload);
109 	if (error != 0) {
110 		ufshci_printf(ctrlr, "request prdt tag create failed %d\n",
111 		    error);
112 		goto out;
113 	}
114 
115 	hwq->act_tr = malloc_domainset(sizeof(struct ufshci_tracker *) *
116 		req_queue->num_entries,
117 	    M_UFSHCI, DOMAINSET_PREF(req_queue->domain), M_ZERO | M_WAITOK);
118 
119 	for (i = 0; i < req_queue->num_trackers; i++) {
120 		tr = malloc_domainset(sizeof(struct ufshci_tracker), M_UFSHCI,
121 		    DOMAINSET_PREF(req_queue->domain), M_ZERO | M_WAITOK);
122 
123 		bus_dmamap_create(req_queue->dma_tag_payload, 0,
124 		    &tr->payload_dma_map);
125 
126 		tr->req_queue = req_queue;
127 		tr->slot_num = i;
128 		tr->slot_state = UFSHCI_SLOT_STATE_FREE;
129 
130 		tr->ucd = (struct ufshci_utp_cmd_desc *)ucdmem;
131 		tr->ucd_bus_addr = ucdmem_phys;
132 
133 		ucdmem += sizeof(struct ufshci_utp_cmd_desc);
134 		ucdmem_phys += sizeof(struct ufshci_utp_cmd_desc);
135 
136 		hwq->act_tr[i] = tr;
137 	}
138 
139 	return (0);
140 out:
141 	ufshci_req_sdb_cmd_desc_destroy(req_queue);
142 	return (ENOMEM);
143 }
144 
145 static bool
146 ufshci_req_sdb_is_doorbell_cleared(struct ufshci_controller *ctrlr,
147     uint8_t slot)
148 {
149 	uint32_t utrldbr;
150 
151 	utrldbr = ufshci_mmio_read_4(ctrlr, utrldbr);
152 	return (!(utrldbr & (1 << slot)));
153 }
154 
155 int
156 ufshci_req_sdb_construct(struct ufshci_controller *ctrlr,
157     struct ufshci_req_queue *req_queue, uint32_t num_entries, bool is_task_mgmt)
158 {
159 	struct ufshci_hw_queue *hwq;
160 	size_t allocsz;
161 	uint64_t queuemem_phys;
162 	uint8_t *queuemem;
163 	int error;
164 
165 	req_queue->ctrlr = ctrlr;
166 	req_queue->is_task_mgmt = is_task_mgmt;
167 	req_queue->num_entries = num_entries;
168 	/*
169 	 * In Single Doorbell mode, the number of queue entries and the number
170 	 * of trackers are the same.
171 	 */
172 	req_queue->num_trackers = num_entries;
173 
174 	/* Single Doorbell mode uses only one queue. (UFSHCI_SDB_Q = 0) */
175 	req_queue->hwq = malloc(sizeof(struct ufshci_hw_queue), M_UFSHCI,
176 	    M_ZERO | M_NOWAIT);
177 	hwq = &req_queue->hwq[UFSHCI_SDB_Q];
178 
179 	mtx_init(&hwq->qlock, "ufshci req_queue lock", NULL, MTX_DEF);
180 
181 	/*
182 	 * Allocate physical memory for request queue (UTP Transfer Request
183 	 * Descriptor (UTRD) or UTP Task Management Request Descriptor (UTMRD))
184 	 * Note: UTRD/UTMRD format is restricted to 1024-byte alignment.
185 	 */
186 	allocsz = num_entries * sizeof(struct ufshci_utp_xfer_req_desc);
187 	error = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev), 1024,
188 	    ctrlr->page_size, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
189 	    allocsz, 1, allocsz, 0, NULL, NULL, &hwq->dma_tag_queue);
190 	if (error != 0) {
191 		ufshci_printf(ctrlr, "request queue tag create failed %d\n",
192 		    error);
193 		goto out;
194 	}
195 
196 	if (bus_dmamem_alloc(hwq->dma_tag_queue, (void **)&queuemem,
197 		BUS_DMA_COHERENT | BUS_DMA_NOWAIT, &hwq->queuemem_map)) {
198 		ufshci_printf(ctrlr,
199 		    "failed to allocate request queue memory\n");
200 		goto out;
201 	}
202 
203 	if (bus_dmamap_load(hwq->dma_tag_queue, hwq->queuemem_map, queuemem,
204 		allocsz, ufshci_single_map, &queuemem_phys, 0) != 0) {
205 		ufshci_printf(ctrlr, "failed to load request queue memory\n");
206 		bus_dmamem_free(hwq->dma_tag_queue, hwq->utrd,
207 		    hwq->queuemem_map);
208 		goto out;
209 	}
210 
211 	hwq->num_cmds = 0;
212 	hwq->num_intr_handler_calls = 0;
213 	hwq->num_retries = 0;
214 	hwq->num_failures = 0;
215 	hwq->utrd = (struct ufshci_utp_xfer_req_desc *)queuemem;
216 	hwq->req_queue_addr = queuemem_phys;
217 
218 	if (is_task_mgmt) {
219 		/* UTP Task Management Request (UTMR) */
220 		uint32_t utmrlba, utmrlbau;
221 
222 		utmrlba = hwq->req_queue_addr & 0xffffffff;
223 		utmrlbau = hwq->req_queue_addr >> 32;
224 		ufshci_mmio_write_4(ctrlr, utmrlba, utmrlba);
225 		ufshci_mmio_write_4(ctrlr, utmrlbau, utmrlbau);
226 	} else {
227 		/* UTP Transfer Request (UTR) */
228 		uint32_t utrlba, utrlbau;
229 
230 		/*
231 		 * Allocate physical memory for the command descriptor.
232 		 * UTP Transfer Request (UTR) requires memory for a separate
233 		 * command in addition to the queue.
234 		 */
235 		if (ufshci_req_sdb_cmd_desc_construct(req_queue, num_entries,
236 			ctrlr) != 0) {
237 			ufshci_printf(ctrlr,
238 			    "failed to construct cmd descriptor memory\n");
239 			bus_dmamem_free(hwq->dma_tag_queue, hwq->utrd,
240 			    hwq->queuemem_map);
241 			goto out;
242 		}
243 
244 		utrlba = hwq->req_queue_addr & 0xffffffff;
245 		utrlbau = hwq->req_queue_addr >> 32;
246 		ufshci_mmio_write_4(ctrlr, utrlba, utrlba);
247 		ufshci_mmio_write_4(ctrlr, utrlbau, utrlbau);
248 	}
249 
250 	return (0);
251 out:
252 	ufshci_req_sdb_destroy(ctrlr, req_queue);
253 	return (ENOMEM);
254 }
255 
256 void
257 ufshci_req_sdb_destroy(struct ufshci_controller *ctrlr,
258     struct ufshci_req_queue *req_queue)
259 {
260 	struct ufshci_hw_queue *hwq = &req_queue->hwq[UFSHCI_SDB_Q];
261 
262 	if (!req_queue->is_task_mgmt)
263 		ufshci_req_sdb_cmd_desc_destroy(&ctrlr->transfer_req_queue);
264 
265 	if (hwq->utrd != NULL) {
266 		bus_dmamap_unload(hwq->dma_tag_queue, hwq->queuemem_map);
267 		bus_dmamem_free(hwq->dma_tag_queue, hwq->utrd,
268 		    hwq->queuemem_map);
269 		hwq->utrd = NULL;
270 	}
271 
272 	if (hwq->dma_tag_queue) {
273 		bus_dma_tag_destroy(hwq->dma_tag_queue);
274 		hwq->dma_tag_queue = NULL;
275 	}
276 
277 	if (mtx_initialized(&hwq->qlock))
278 		mtx_destroy(&hwq->qlock);
279 
280 	free(req_queue->hwq, M_UFSHCI);
281 }
282 
283 struct ufshci_hw_queue *
284 ufshci_req_sdb_get_hw_queue(struct ufshci_req_queue *req_queue)
285 {
286 	return &req_queue->hwq[UFSHCI_SDB_Q];
287 }
288 
289 int
290 ufshci_req_sdb_enable(struct ufshci_controller *ctrlr,
291     struct ufshci_req_queue *req_queue)
292 {
293 	if (req_queue->is_task_mgmt) {
294 		uint32_t hcs, utmrldbr, utmrlrsr;
295 
296 		hcs = ufshci_mmio_read_4(ctrlr, hcs);
297 		if (!(hcs & UFSHCIM(UFSHCI_HCS_REG_UTMRLRDY))) {
298 			ufshci_printf(ctrlr,
299 			    "UTP task management request list is not ready\n");
300 			return (ENXIO);
301 		}
302 
303 		utmrldbr = ufshci_mmio_read_4(ctrlr, utmrldbr);
304 		if (utmrldbr != 0) {
305 			ufshci_printf(ctrlr,
306 			    "UTP task management request list door bell is not ready\n");
307 			return (ENXIO);
308 		}
309 
310 		utmrlrsr = UFSHCIM(UFSHCI_UTMRLRSR_REG_UTMRLRSR);
311 		ufshci_mmio_write_4(ctrlr, utmrlrsr, utmrlrsr);
312 	} else {
313 		uint32_t hcs, utrldbr, utrlcnr, utrlrsr;
314 
315 		hcs = ufshci_mmio_read_4(ctrlr, hcs);
316 		if (!(hcs & UFSHCIM(UFSHCI_HCS_REG_UTRLRDY))) {
317 			ufshci_printf(ctrlr,
318 			    "UTP transfer request list is not ready\n");
319 			return (ENXIO);
320 		}
321 
322 		utrldbr = ufshci_mmio_read_4(ctrlr, utrldbr);
323 		if (utrldbr != 0) {
324 			ufshci_printf(ctrlr,
325 			    "UTP transfer request list door bell is not ready\n");
326 			ufshci_printf(ctrlr,
327 			    "Clear the UTP transfer request list door bell\n");
328 			ufshci_mmio_write_4(ctrlr, utrldbr, utrldbr);
329 		}
330 
331 		utrlcnr = ufshci_mmio_read_4(ctrlr, utrlcnr);
332 		if (utrlcnr != 0) {
333 			ufshci_printf(ctrlr,
334 			    "UTP transfer request list notification is not ready\n");
335 			ufshci_printf(ctrlr,
336 			    "Clear the UTP transfer request list notification\n");
337 			ufshci_mmio_write_4(ctrlr, utrlcnr, utrlcnr);
338 		}
339 
340 		utrlrsr = UFSHCIM(UFSHCI_UTRLRSR_REG_UTRLRSR);
341 		ufshci_mmio_write_4(ctrlr, utrlrsr, utrlrsr);
342 	}
343 
344 	return (0);
345 }
346 
347 int
348 ufshci_req_sdb_reserve_slot(struct ufshci_req_queue *req_queue,
349     struct ufshci_tracker **tr)
350 {
351 	struct ufshci_hw_queue *hwq = &req_queue->hwq[UFSHCI_SDB_Q];
352 	uint8_t i;
353 
354 	for (i = 0; i < req_queue->num_entries; i++) {
355 		if (hwq->act_tr[i]->slot_state == UFSHCI_SLOT_STATE_FREE) {
356 			*tr = hwq->act_tr[i];
357 			(*tr)->hwq = hwq;
358 			return (0);
359 		}
360 	}
361 	return (EBUSY);
362 }
363 
364 void
365 ufshci_req_sdb_clear_cpl_ntf(struct ufshci_controller *ctrlr,
366     struct ufshci_tracker *tr)
367 {
368 	uint32_t utrlcnr;
369 
370 	utrlcnr = 1 << tr->slot_num;
371 	ufshci_mmio_write_4(ctrlr, utrlcnr, utrlcnr);
372 }
373 
374 void
375 ufshci_req_sdb_ring_doorbell(struct ufshci_controller *ctrlr,
376     struct ufshci_tracker *tr)
377 {
378 	uint32_t utrldbr = 0;
379 
380 	utrldbr |= 1 << tr->slot_num;
381 	ufshci_mmio_write_4(ctrlr, utrldbr, utrldbr);
382 
383 	tr->req_queue->hwq[UFSHCI_SDB_Q].num_cmds++;
384 
385 	// utrldbr = ufshci_mmio_read_4(ctrlr, utrldbr);
386 	// printf("DB=0x%08x\n", utrldbr);
387 }
388 
389 bool
390 ufshci_req_sdb_process_cpl(struct ufshci_req_queue *req_queue)
391 {
392 	struct ufshci_hw_queue *hwq = &req_queue->hwq[UFSHCI_SDB_Q];
393 	struct ufshci_tracker *tr;
394 	uint8_t slot;
395 	bool done = false;
396 
397 	hwq->num_intr_handler_calls++;
398 
399 	bus_dmamap_sync(hwq->dma_tag_queue, hwq->queuemem_map,
400 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
401 
402 	for (slot = 0; slot < req_queue->num_entries; slot++) {
403 		tr = hwq->act_tr[slot];
404 
405 		KASSERT(tr, ("there is no tracker assigned to the slot"));
406 		/*
407 		 * When the response is delivered from the device, the doorbell
408 		 * is cleared.
409 		 */
410 		if (tr->slot_state == UFSHCI_SLOT_STATE_SCHEDULED &&
411 		    ufshci_req_sdb_is_doorbell_cleared(req_queue->ctrlr,
412 			slot)) {
413 			ufshci_req_queue_complete_tracker(tr);
414 			done = true;
415 		}
416 	}
417 
418 	return (done);
419 }
420 
421 int
422 ufshci_req_sdb_get_inflight_io(struct ufshci_controller *ctrlr)
423 {
424 	/* TODO: Implement inflight io*/
425 
426 	return (0);
427 }
428