xref: /freebsd/sys/dev/ufshci/ufshci_ctrlr.c (revision 4ba91e076ee84101112d8296785098ae31dac35e)
1 /*-
2  * Copyright (c) 2025, Samsung Electronics Co., Ltd.
3  * Written by Jaeyoon Choi
4  *
5  * SPDX-License-Identifier: BSD-2-Clause
6  */
7 
8 #include <sys/param.h>
9 #include <sys/bus.h>
10 #include <sys/conf.h>
11 
12 #include "ufshci_private.h"
13 #include "ufshci_reg.h"
14 
15 static int
16 ufshci_ctrlr_enable_host_ctrlr(struct ufshci_controller *ctrlr)
17 {
18 	int timeout = ticks + MSEC_2_TICKS(ctrlr->device_init_timeout_in_ms);
19 	sbintime_t delta_t = SBT_1US;
20 	uint32_t hce;
21 
22 	hce = ufshci_mmio_read_4(ctrlr, hce);
23 
24 	/* If UFS host controller is already enabled, disable it. */
25 	if (UFSHCIV(UFSHCI_HCE_REG_HCE, hce)) {
26 		hce &= ~UFSHCIM(UFSHCI_HCE_REG_HCE);
27 		ufshci_mmio_write_4(ctrlr, hce, hce);
28 	}
29 
30 	/* Enable UFS host controller */
31 	hce |= UFSHCIM(UFSHCI_HCE_REG_HCE);
32 	ufshci_mmio_write_4(ctrlr, hce, hce);
33 
34 	/*
35 	 * During the controller initialization, the value of the HCE bit is
36 	 * unstable, so we need to read the HCE value after some time after
37 	 * initialization is complete.
38 	 */
39 	pause_sbt("ufshci_hce", ustosbt(100), 0, C_PREL(1));
40 
41 	/* Wait for the HCE flag to change */
42 	while (1) {
43 		hce = ufshci_mmio_read_4(ctrlr, hce);
44 		if (UFSHCIV(UFSHCI_HCE_REG_HCE, hce))
45 			break;
46 		if (timeout - ticks < 0) {
47 			ufshci_printf(ctrlr,
48 			    "host controller failed to enable "
49 			    "within %d ms\n",
50 			    ctrlr->device_init_timeout_in_ms);
51 			return (ENXIO);
52 		}
53 
54 		pause_sbt("ufshci_hce", delta_t, 0, C_PREL(1));
55 		delta_t = min(SBT_1MS, delta_t * 3 / 2);
56 	}
57 
58 	return (0);
59 }
60 
61 int
62 ufshci_ctrlr_construct(struct ufshci_controller *ctrlr, device_t dev)
63 {
64 	uint32_t ver, cap, hcs, ie;
65 	uint32_t timeout_period, retry_count;
66 	int error;
67 
68 	ctrlr->device_init_timeout_in_ms = UFSHCI_DEVICE_INIT_TIMEOUT_MS;
69 	ctrlr->uic_cmd_timeout_in_ms = UFSHCI_UIC_CMD_TIMEOUT_MS;
70 	ctrlr->dev = dev;
71 	ctrlr->sc_unit = device_get_unit(dev);
72 
73 	snprintf(ctrlr->sc_name, sizeof(ctrlr->sc_name), "%s",
74 	    device_get_nameunit(dev));
75 
76 	mtx_init(&ctrlr->sc_mtx, device_get_nameunit(dev), NULL,
77 	    MTX_DEF | MTX_RECURSE);
78 
79 	mtx_init(&ctrlr->uic_cmd_lock, "ufshci ctrlr uic cmd lock", NULL,
80 	    MTX_DEF);
81 
82 	ver = ufshci_mmio_read_4(ctrlr, ver);
83 	ctrlr->major_version = UFSHCIV(UFSHCI_VER_REG_MJR, ver);
84 	ctrlr->minor_version = UFSHCIV(UFSHCI_VER_REG_MNR, ver);
85 	ufshci_printf(ctrlr, "UFSHCI Version: %d.%d\n", ctrlr->major_version,
86 	    ctrlr->minor_version);
87 
88 	/* Read Device Capabilities */
89 	ctrlr->cap = cap = ufshci_mmio_read_4(ctrlr, cap);
90 	ctrlr->is_single_db_supported = UFSHCIV(UFSHCI_CAP_REG_LSDBS, cap);
91 	/*
92 	 * TODO: This driver does not yet support multi-queue.
93 	 * Check the UFSHCI_CAP_REG_MCQS bit in the future to determine if
94 	 * multi-queue support is available.
95 	 */
96 	ctrlr->is_mcq_supported = false;
97 	if (!(ctrlr->is_single_db_supported == 0 || ctrlr->is_mcq_supported))
98 		return (ENXIO);
99 	/*
100 	 * The maximum transfer size supported by UFSHCI spec is 65535 * 256 KiB
101 	 * However, we limit the maximum transfer size to 1MiB(256 * 4KiB) for
102 	 * performance reason.
103 	 */
104 	ctrlr->page_size = PAGE_SIZE;
105 	ctrlr->max_xfer_size = ctrlr->page_size * UFSHCI_MAX_PRDT_ENTRY_COUNT;
106 
107 	timeout_period = UFSHCI_DEFAULT_TIMEOUT_PERIOD;
108 	TUNABLE_INT_FETCH("hw.ufshci.timeout_period", &timeout_period);
109 	timeout_period = min(timeout_period, UFSHCI_MAX_TIMEOUT_PERIOD);
110 	timeout_period = max(timeout_period, UFSHCI_MIN_TIMEOUT_PERIOD);
111 	ctrlr->timeout_period = timeout_period;
112 
113 	retry_count = UFSHCI_DEFAULT_RETRY_COUNT;
114 	TUNABLE_INT_FETCH("hw.ufshci.retry_count", &retry_count);
115 	ctrlr->retry_count = retry_count;
116 
117 	/* Disable all interrupts */
118 	ufshci_mmio_write_4(ctrlr, ie, 0);
119 
120 	/* Enable Host Controller */
121 	error = ufshci_ctrlr_enable_host_ctrlr(ctrlr);
122 	if (error)
123 		return (error);
124 
125 	/* Send DME_LINKSTARTUP command to start the link startup procedure */
126 	error = ufshci_uic_send_dme_link_startup(ctrlr);
127 	if (error)
128 		return (error);
129 
130 	/*
131 	 * The device_present(UFSHCI_HCS_REG_DP) bit becomes true if the host
132 	 * controller has successfully received a Link Startup UIC command
133 	 * response and the UFS device has found a physical link to the
134 	 * controller.
135 	 */
136 	hcs = ufshci_mmio_read_4(ctrlr, hcs);
137 	if (!UFSHCIV(UFSHCI_HCS_REG_DP, hcs)) {
138 		ufshci_printf(ctrlr, "UFS device not found\n");
139 		return (ENXIO);
140 	}
141 
142 	/* Enable additional interrupts by programming the IE register. */
143 	ie = ufshci_mmio_read_4(ctrlr, ie);
144 	ie |= UFSHCIM(UFSHCI_IE_REG_UTRCE);  /* UTR Completion */
145 	ie |= UFSHCIM(UFSHCI_IE_REG_UEE);    /* UIC Error */
146 	ie |= UFSHCIM(UFSHCI_IE_REG_UTMRCE); /* UTMR Completion */
147 	ie |= UFSHCIM(UFSHCI_IE_REG_DFEE);   /* Device Fatal Error */
148 	ie |= UFSHCIM(UFSHCI_IE_REG_UTPEE);  /* UTP Error */
149 	ie |= UFSHCIM(UFSHCI_IE_REG_HCFEE);  /* Host Ctrlr Fatal Error */
150 	ie |= UFSHCIM(UFSHCI_IE_REG_SBFEE);  /* System Bus Fatal Error */
151 	ie |= UFSHCIM(UFSHCI_IE_REG_CEFEE);  /* Crypto Engine Fatal Error */
152 	ufshci_mmio_write_4(ctrlr, ie, ie);
153 
154 	/* TODO: Initialize interrupt Aggregation Control Register (UTRIACR) */
155 
156 	/* Allocate and initialize UTP Task Management Request List. */
157 	error = ufshci_utmr_req_queue_construct(ctrlr);
158 	if (error)
159 		return (error);
160 
161 	/* Allocate and initialize UTP Transfer Request List or SQ/CQ. */
162 	error = ufshci_utr_req_queue_construct(ctrlr);
163 	if (error)
164 		return (error);
165 
166 	/* TODO: Separate IO and Admin slot */
167 	/* max_hw_pend_io is the number of slots in the transfer_req_queue */
168 	ctrlr->max_hw_pend_io = ctrlr->transfer_req_queue.num_entries;
169 
170 	return (0);
171 }
172 
173 void
174 ufshci_ctrlr_destruct(struct ufshci_controller *ctrlr, device_t dev)
175 {
176 	if (ctrlr->resource == NULL)
177 		goto nores;
178 
179 	/* TODO: Flush In-flight IOs */
180 
181 	/* Release resources */
182 	ufshci_utmr_req_queue_destroy(ctrlr);
183 	ufshci_utr_req_queue_destroy(ctrlr);
184 
185 	if (ctrlr->tag)
186 		bus_teardown_intr(ctrlr->dev, ctrlr->res, ctrlr->tag);
187 
188 	if (ctrlr->res)
189 		bus_release_resource(ctrlr->dev, SYS_RES_IRQ,
190 		    rman_get_rid(ctrlr->res), ctrlr->res);
191 
192 	mtx_lock(&ctrlr->sc_mtx);
193 
194 	ufshci_sim_detach(ctrlr);
195 
196 	mtx_unlock(&ctrlr->sc_mtx);
197 
198 	bus_release_resource(dev, SYS_RES_MEMORY, ctrlr->resource_id,
199 	    ctrlr->resource);
200 nores:
201 	mtx_destroy(&ctrlr->uic_cmd_lock);
202 	mtx_destroy(&ctrlr->sc_mtx);
203 
204 	return;
205 }
206 
207 int
208 ufshci_ctrlr_reset(struct ufshci_controller *ctrlr)
209 {
210 	uint32_t ie;
211 	int error;
212 
213 	/* Backup and disable all interrupts */
214 	ie = ufshci_mmio_read_4(ctrlr, ie);
215 	ufshci_mmio_write_4(ctrlr, ie, 0);
216 
217 	/* Release resources */
218 	ufshci_utmr_req_queue_destroy(ctrlr);
219 	ufshci_utr_req_queue_destroy(ctrlr);
220 
221 	/* Reset Host Controller */
222 	error = ufshci_ctrlr_enable_host_ctrlr(ctrlr);
223 	if (error)
224 		return (error);
225 
226 	/* Send DME_LINKSTARTUP command to start the link startup procedure */
227 	error = ufshci_uic_send_dme_link_startup(ctrlr);
228 	if (error)
229 		return (error);
230 
231 	/* Enable interrupts */
232 	ufshci_mmio_write_4(ctrlr, ie, ie);
233 
234 	/* Allocate and initialize UTP Task Management Request List. */
235 	error = ufshci_utmr_req_queue_construct(ctrlr);
236 	if (error)
237 		return (error);
238 
239 	/* Allocate and initialize UTP Transfer Request List or SQ/CQ. */
240 	error = ufshci_utr_req_queue_construct(ctrlr);
241 	if (error)
242 		return (error);
243 
244 	return (0);
245 }
246 
247 int
248 ufshci_ctrlr_submit_task_mgmt_request(struct ufshci_controller *ctrlr,
249     struct ufshci_request *req)
250 {
251 	return (
252 	    ufshci_req_queue_submit_request(&ctrlr->task_mgmt_req_queue, req,
253 		/*is_admin*/ false));
254 }
255 
256 int
257 ufshci_ctrlr_submit_admin_request(struct ufshci_controller *ctrlr,
258     struct ufshci_request *req)
259 {
260 	return (ufshci_req_queue_submit_request(&ctrlr->transfer_req_queue, req,
261 	    /*is_admin*/ true));
262 }
263 
264 int
265 ufshci_ctrlr_submit_io_request(struct ufshci_controller *ctrlr,
266     struct ufshci_request *req)
267 {
268 	return (ufshci_req_queue_submit_request(&ctrlr->transfer_req_queue, req,
269 	    /*is_admin*/ false));
270 }
271 
272 int
273 ufshci_ctrlr_send_nop(struct ufshci_controller *ctrlr)
274 {
275 	struct ufshci_completion_poll_status status;
276 
277 	status.done = 0;
278 	ufshci_ctrlr_cmd_send_nop(ctrlr, ufshci_completion_poll_cb, &status);
279 	ufshci_completion_poll(&status);
280 	if (status.error) {
281 		ufshci_printf(ctrlr, "ufshci_ctrlr_send_nop failed!\n");
282 		return (ENXIO);
283 	}
284 
285 	return (0);
286 }
287 
288 static void
289 ufshci_ctrlr_fail(struct ufshci_controller *ctrlr, bool admin_also)
290 {
291 	printf("ufshci(4): ufshci_ctrlr_fail\n");
292 
293 	ctrlr->is_failed = true;
294 
295 	/* TODO: task_mgmt_req_queue should be handled as fail */
296 
297 	ufshci_req_queue_fail(ctrlr,
298 	    &ctrlr->transfer_req_queue.hwq[UFSHCI_SDB_Q]);
299 }
300 
301 static void
302 ufshci_ctrlr_start(struct ufshci_controller *ctrlr)
303 {
304 	TSENTER();
305 
306 	if (ufshci_ctrlr_send_nop(ctrlr) != 0) {
307 		ufshci_ctrlr_fail(ctrlr, false);
308 		return;
309 	}
310 
311 	/* Initialize UFS target drvice */
312 	if (ufshci_dev_init(ctrlr) != 0) {
313 		ufshci_ctrlr_fail(ctrlr, false);
314 		return;
315 	}
316 
317 	/* Initialize Reference Clock */
318 	if (ufshci_dev_init_reference_clock(ctrlr) != 0) {
319 		ufshci_ctrlr_fail(ctrlr, false);
320 		return;
321 	}
322 
323 	/* Initialize unipro */
324 	if (ufshci_dev_init_unipro(ctrlr) != 0) {
325 		ufshci_ctrlr_fail(ctrlr, false);
326 		return;
327 	}
328 
329 	/*
330 	 * Initialize UIC Power Mode
331 	 * QEMU UFS devices do not support unipro and power mode.
332 	 */
333 	if (!(ctrlr->quirks & UFSHCI_QUIRK_IGNORE_UIC_POWER_MODE) &&
334 	    ufshci_dev_init_uic_power_mode(ctrlr) != 0) {
335 		ufshci_ctrlr_fail(ctrlr, false);
336 		return;
337 	}
338 
339 	/* Initialize UFS Power Mode */
340 	if (ufshci_dev_init_ufs_power_mode(ctrlr) != 0) {
341 		ufshci_ctrlr_fail(ctrlr, false);
342 		return;
343 	}
344 
345 	/* Read Controller Descriptor (Device, Geometry)*/
346 	if (ufshci_dev_get_descriptor(ctrlr) != 0) {
347 		ufshci_ctrlr_fail(ctrlr, false);
348 		return;
349 	}
350 
351 	/* TODO: Configure Write Protect */
352 
353 	/* TODO: Configure Background Operations */
354 
355 	/* TODO: Configure Write Booster */
356 
357 	if (ufshci_sim_attach(ctrlr) != 0) {
358 		ufshci_ctrlr_fail(ctrlr, false);
359 		return;
360 	}
361 
362 	TSEXIT();
363 }
364 
365 void
366 ufshci_ctrlr_start_config_hook(void *arg)
367 {
368 	struct ufshci_controller *ctrlr = arg;
369 
370 	TSENTER();
371 
372 	if (ufshci_utmr_req_queue_enable(ctrlr) == 0 &&
373 	    ufshci_utr_req_queue_enable(ctrlr) == 0)
374 		ufshci_ctrlr_start(ctrlr);
375 	else
376 		ufshci_ctrlr_fail(ctrlr, false);
377 
378 	ufshci_sysctl_initialize_ctrlr(ctrlr);
379 	config_intrhook_disestablish(&ctrlr->config_hook);
380 
381 	TSEXIT();
382 }
383 
384 /*
385  * Poll all the queues enabled on the device for completion.
386  */
387 void
388 ufshci_ctrlr_poll(struct ufshci_controller *ctrlr)
389 {
390 	uint32_t is;
391 
392 	is = ufshci_mmio_read_4(ctrlr, is);
393 
394 	/* UIC error */
395 	if (is & UFSHCIM(UFSHCI_IS_REG_UE)) {
396 		uint32_t uecpa, uecdl, uecn, uect, uecdme;
397 
398 		/* UECPA for Host UIC Error Code within PHY Adapter Layer */
399 		uecpa = ufshci_mmio_read_4(ctrlr, uecpa);
400 		if (uecpa & UFSHCIM(UFSHCI_UECPA_REG_ERR)) {
401 			ufshci_printf(ctrlr, "UECPA error code: 0x%x\n",
402 			    UFSHCIV(UFSHCI_UECPA_REG_EC, uecpa));
403 		}
404 		/* UECDL for Host UIC Error Code within Data Link Layer */
405 		uecdl = ufshci_mmio_read_4(ctrlr, uecdl);
406 		if (uecdl & UFSHCIM(UFSHCI_UECDL_REG_ERR)) {
407 			ufshci_printf(ctrlr, "UECDL error code: 0x%x\n",
408 			    UFSHCIV(UFSHCI_UECDL_REG_EC, uecdl));
409 		}
410 		/* UECN for Host UIC Error Code within Network Layer */
411 		uecn = ufshci_mmio_read_4(ctrlr, uecn);
412 		if (uecn & UFSHCIM(UFSHCI_UECN_REG_ERR)) {
413 			ufshci_printf(ctrlr, "UECN error code: 0x%x\n",
414 			    UFSHCIV(UFSHCI_UECN_REG_EC, uecn));
415 		}
416 		/* UECT for Host UIC Error Code within Transport Layer */
417 		uect = ufshci_mmio_read_4(ctrlr, uect);
418 		if (uect & UFSHCIM(UFSHCI_UECT_REG_ERR)) {
419 			ufshci_printf(ctrlr, "UECT error code: 0x%x\n",
420 			    UFSHCIV(UFSHCI_UECT_REG_EC, uect));
421 		}
422 		/* UECDME for Host UIC Error Code within DME subcomponent */
423 		uecdme = ufshci_mmio_read_4(ctrlr, uecdme);
424 		if (uecdme & UFSHCIM(UFSHCI_UECDME_REG_ERR)) {
425 			ufshci_printf(ctrlr, "UECDME error code: 0x%x\n",
426 			    UFSHCIV(UFSHCI_UECDME_REG_EC, uecdme));
427 		}
428 		ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_UE));
429 	}
430 	/* Device Fatal Error Status */
431 	if (is & UFSHCIM(UFSHCI_IS_REG_DFES)) {
432 		ufshci_printf(ctrlr, "Device fatal error on ISR\n");
433 		ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_DFES));
434 	}
435 	/* UTP Error Status */
436 	if (is & UFSHCIM(UFSHCI_IS_REG_UTPES)) {
437 		ufshci_printf(ctrlr, "UTP error on ISR\n");
438 		ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_UTPES));
439 	}
440 	/* Host Controller Fatal Error Status */
441 	if (is & UFSHCIM(UFSHCI_IS_REG_HCFES)) {
442 		ufshci_printf(ctrlr, "Host controller fatal error on ISR\n");
443 		ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_HCFES));
444 	}
445 	/* System Bus Fatal Error Status */
446 	if (is & UFSHCIM(UFSHCI_IS_REG_SBFES)) {
447 		ufshci_printf(ctrlr, "System bus fatal error on ISR\n");
448 		ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_SBFES));
449 	}
450 	/* Crypto Engine Fatal Error Status */
451 	if (is & UFSHCIM(UFSHCI_IS_REG_CEFES)) {
452 		ufshci_printf(ctrlr, "Crypto engine fatal error on ISR\n");
453 		ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_CEFES));
454 	}
455 	/* UTP Task Management Request Completion Status */
456 	if (is & UFSHCIM(UFSHCI_IS_REG_UTMRCS)) {
457 		ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_UTMRCS));
458 		ufshci_req_queue_process_completions(
459 		    &ctrlr->task_mgmt_req_queue);
460 	}
461 	/* UTP Transfer Request Completion Status */
462 	if (is & UFSHCIM(UFSHCI_IS_REG_UTRCS)) {
463 		ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_UTRCS));
464 		ufshci_req_queue_process_completions(
465 		    &ctrlr->transfer_req_queue);
466 	}
467 	/* MCQ CQ Event Status */
468 	if (is & UFSHCIM(UFSHCI_IS_REG_CQES)) {
469 		/* TODO: We need to process completion Queue Pairs */
470 		ufshci_printf(ctrlr, "MCQ completion not yet implemented\n");
471 		ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_CQES));
472 	}
473 }
474 
475 /*
476  * Poll the single-vector interrupt case: num_io_queues will be 1 and
477  * there's only a single vector. While we're polling, we mask further
478  * interrupts in the controller.
479  */
480 void
481 ufshci_ctrlr_shared_handler(void *arg)
482 {
483 	struct ufshci_controller *ctrlr = arg;
484 
485 	ufshci_ctrlr_poll(ctrlr);
486 }
487 
488 void
489 ufshci_reg_dump(struct ufshci_controller *ctrlr)
490 {
491 	ufshci_printf(ctrlr, "========= UFSHCI Register Dump =========\n");
492 
493 	UFSHCI_DUMP_REG(ctrlr, cap);
494 	UFSHCI_DUMP_REG(ctrlr, mcqcap);
495 	UFSHCI_DUMP_REG(ctrlr, ver);
496 	UFSHCI_DUMP_REG(ctrlr, ext_cap);
497 	UFSHCI_DUMP_REG(ctrlr, hcpid);
498 	UFSHCI_DUMP_REG(ctrlr, hcmid);
499 	UFSHCI_DUMP_REG(ctrlr, ahit);
500 	UFSHCI_DUMP_REG(ctrlr, is);
501 	UFSHCI_DUMP_REG(ctrlr, ie);
502 	UFSHCI_DUMP_REG(ctrlr, hcsext);
503 	UFSHCI_DUMP_REG(ctrlr, hcs);
504 	UFSHCI_DUMP_REG(ctrlr, hce);
505 	UFSHCI_DUMP_REG(ctrlr, uecpa);
506 	UFSHCI_DUMP_REG(ctrlr, uecdl);
507 	UFSHCI_DUMP_REG(ctrlr, uecn);
508 	UFSHCI_DUMP_REG(ctrlr, uect);
509 	UFSHCI_DUMP_REG(ctrlr, uecdme);
510 
511 	ufshci_printf(ctrlr, "========================================\n");
512 }
513