1 /*-
2 * Copyright (c) 2025, Samsung Electronics Co., Ltd.
3 * Written by Jaeyoon Choi
4 *
5 * SPDX-License-Identifier: BSD-2-Clause
6 */
7
8 #include <sys/param.h>
9 #include <sys/bus.h>
10 #include <sys/conf.h>
11
12 #include "ufshci_private.h"
13 #include "ufshci_reg.h"
14
15 static int
ufshci_ctrlr_enable_host_ctrlr(struct ufshci_controller * ctrlr)16 ufshci_ctrlr_enable_host_ctrlr(struct ufshci_controller *ctrlr)
17 {
18 int timeout = ticks + MSEC_2_TICKS(ctrlr->device_init_timeout_in_ms);
19 sbintime_t delta_t = SBT_1US;
20 uint32_t hce;
21
22 hce = ufshci_mmio_read_4(ctrlr, hce);
23
24 /* If UFS host controller is already enabled, disable it. */
25 if (UFSHCIV(UFSHCI_HCE_REG_HCE, hce)) {
26 hce &= ~UFSHCIM(UFSHCI_HCE_REG_HCE);
27 ufshci_mmio_write_4(ctrlr, hce, hce);
28 }
29
30 /* Enable UFS host controller */
31 hce |= UFSHCIM(UFSHCI_HCE_REG_HCE);
32 ufshci_mmio_write_4(ctrlr, hce, hce);
33
34 /*
35 * During the controller initialization, the value of the HCE bit is
36 * unstable, so we need to read the HCE value after some time after
37 * initialization is complete.
38 */
39 pause_sbt("ufshci_hce", ustosbt(100), 0, C_PREL(1));
40
41 /* Wait for the HCE flag to change */
42 while (1) {
43 hce = ufshci_mmio_read_4(ctrlr, hce);
44 if (UFSHCIV(UFSHCI_HCE_REG_HCE, hce))
45 break;
46 if (timeout - ticks < 0) {
47 ufshci_printf(ctrlr,
48 "host controller failed to enable "
49 "within %d ms\n",
50 ctrlr->device_init_timeout_in_ms);
51 return (ENXIO);
52 }
53
54 pause_sbt("ufshci_hce", delta_t, 0, C_PREL(1));
55 delta_t = min(SBT_1MS, delta_t * 3 / 2);
56 }
57
58 return (0);
59 }
60
61 int
ufshci_ctrlr_construct(struct ufshci_controller * ctrlr,device_t dev)62 ufshci_ctrlr_construct(struct ufshci_controller *ctrlr, device_t dev)
63 {
64 uint32_t ver, cap, hcs, ie;
65 uint32_t timeout_period, retry_count;
66 int error;
67
68 ctrlr->device_init_timeout_in_ms = UFSHCI_DEVICE_INIT_TIMEOUT_MS;
69 ctrlr->uic_cmd_timeout_in_ms = UFSHCI_UIC_CMD_TIMEOUT_MS;
70 ctrlr->dev = dev;
71 ctrlr->sc_unit = device_get_unit(dev);
72
73 snprintf(ctrlr->sc_name, sizeof(ctrlr->sc_name), "%s",
74 device_get_nameunit(dev));
75
76 mtx_init(&ctrlr->sc_mtx, device_get_nameunit(dev), NULL,
77 MTX_DEF | MTX_RECURSE);
78
79 mtx_init(&ctrlr->uic_cmd_lock, "ufshci ctrlr uic cmd lock", NULL,
80 MTX_DEF);
81
82 ver = ufshci_mmio_read_4(ctrlr, ver);
83 ctrlr->major_version = UFSHCIV(UFSHCI_VER_REG_MJR, ver);
84 ctrlr->minor_version = UFSHCIV(UFSHCI_VER_REG_MNR, ver);
85 ufshci_printf(ctrlr, "UFSHCI Version: %d.%d\n", ctrlr->major_version,
86 ctrlr->minor_version);
87
88 /* Read Device Capabilities */
89 ctrlr->cap = cap = ufshci_mmio_read_4(ctrlr, cap);
90 ctrlr->is_single_db_supported = UFSHCIV(UFSHCI_CAP_REG_LSDBS, cap);
91 /*
92 * TODO: This driver does not yet support multi-queue.
93 * Check the UFSHCI_CAP_REG_MCQS bit in the future to determine if
94 * multi-queue support is available.
95 */
96 ctrlr->is_mcq_supported = false;
97 if (!(ctrlr->is_single_db_supported == 0 || ctrlr->is_mcq_supported))
98 return (ENXIO);
99 /*
100 * The maximum transfer size supported by UFSHCI spec is 65535 * 256 KiB
101 * However, we limit the maximum transfer size to 1MiB(256 * 4KiB) for
102 * performance reason.
103 */
104 ctrlr->page_size = PAGE_SIZE;
105 ctrlr->max_xfer_size = ctrlr->page_size * UFSHCI_MAX_PRDT_ENTRY_COUNT;
106
107 timeout_period = UFSHCI_DEFAULT_TIMEOUT_PERIOD;
108 TUNABLE_INT_FETCH("hw.ufshci.timeout_period", &timeout_period);
109 timeout_period = min(timeout_period, UFSHCI_MAX_TIMEOUT_PERIOD);
110 timeout_period = max(timeout_period, UFSHCI_MIN_TIMEOUT_PERIOD);
111 ctrlr->timeout_period = timeout_period;
112
113 retry_count = UFSHCI_DEFAULT_RETRY_COUNT;
114 TUNABLE_INT_FETCH("hw.ufshci.retry_count", &retry_count);
115 ctrlr->retry_count = retry_count;
116
117 /* Disable all interrupts */
118 ufshci_mmio_write_4(ctrlr, ie, 0);
119
120 /* Enable Host Controller */
121 error = ufshci_ctrlr_enable_host_ctrlr(ctrlr);
122 if (error)
123 return (error);
124
125 /* Send DME_LINKSTARTUP command to start the link startup procedure */
126 error = ufshci_uic_send_dme_link_startup(ctrlr);
127 if (error)
128 return (error);
129
130 /*
131 * The device_present(UFSHCI_HCS_REG_DP) bit becomes true if the host
132 * controller has successfully received a Link Startup UIC command
133 * response and the UFS device has found a physical link to the
134 * controller.
135 */
136 hcs = ufshci_mmio_read_4(ctrlr, hcs);
137 if (!UFSHCIV(UFSHCI_HCS_REG_DP, hcs)) {
138 ufshci_printf(ctrlr, "UFS device not found\n");
139 return (ENXIO);
140 }
141
142 /* Enable additional interrupts by programming the IE register. */
143 ie = ufshci_mmio_read_4(ctrlr, ie);
144 ie |= UFSHCIM(UFSHCI_IE_REG_UTRCE); /* UTR Completion */
145 ie |= UFSHCIM(UFSHCI_IE_REG_UEE); /* UIC Error */
146 ie |= UFSHCIM(UFSHCI_IE_REG_UTMRCE); /* UTMR Completion */
147 ie |= UFSHCIM(UFSHCI_IE_REG_DFEE); /* Device Fatal Error */
148 ie |= UFSHCIM(UFSHCI_IE_REG_UTPEE); /* UTP Error */
149 ie |= UFSHCIM(UFSHCI_IE_REG_HCFEE); /* Host Ctrlr Fatal Error */
150 ie |= UFSHCIM(UFSHCI_IE_REG_SBFEE); /* System Bus Fatal Error */
151 ie |= UFSHCIM(UFSHCI_IE_REG_CEFEE); /* Crypto Engine Fatal Error */
152 ufshci_mmio_write_4(ctrlr, ie, ie);
153
154 /* TODO: Initialize interrupt Aggregation Control Register (UTRIACR) */
155
156 /* Allocate and initialize UTP Task Management Request List. */
157 error = ufshci_utm_req_queue_construct(ctrlr);
158 if (error)
159 return (error);
160
161 /* Allocate and initialize UTP Transfer Request List or SQ/CQ. */
162 error = ufshci_ut_req_queue_construct(ctrlr);
163 if (error)
164 return (error);
165
166 /* TODO: Separate IO and Admin slot */
167 /* max_hw_pend_io is the number of slots in the transfer_req_queue */
168 ctrlr->max_hw_pend_io = ctrlr->transfer_req_queue.num_entries;
169
170 return (0);
171 }
172
173 void
ufshci_ctrlr_destruct(struct ufshci_controller * ctrlr,device_t dev)174 ufshci_ctrlr_destruct(struct ufshci_controller *ctrlr, device_t dev)
175 {
176 if (ctrlr->resource == NULL)
177 goto nores;
178
179 /* TODO: Flush In-flight IOs */
180
181 /* Release resources */
182 ufshci_utm_req_queue_destroy(ctrlr);
183 ufshci_ut_req_queue_destroy(ctrlr);
184
185 if (ctrlr->tag)
186 bus_teardown_intr(ctrlr->dev, ctrlr->res, ctrlr->tag);
187
188 if (ctrlr->res)
189 bus_release_resource(ctrlr->dev, SYS_RES_IRQ,
190 rman_get_rid(ctrlr->res), ctrlr->res);
191
192 mtx_lock(&ctrlr->sc_mtx);
193
194 ufshci_sim_detach(ctrlr);
195
196 mtx_unlock(&ctrlr->sc_mtx);
197
198 bus_release_resource(dev, SYS_RES_MEMORY, ctrlr->resource_id,
199 ctrlr->resource);
200 nores:
201 mtx_destroy(&ctrlr->uic_cmd_lock);
202 mtx_destroy(&ctrlr->sc_mtx);
203
204 return;
205 }
206
207 int
ufshci_ctrlr_reset(struct ufshci_controller * ctrlr)208 ufshci_ctrlr_reset(struct ufshci_controller *ctrlr)
209 {
210 uint32_t ie;
211 int error;
212
213 /* Backup and disable all interrupts */
214 ie = ufshci_mmio_read_4(ctrlr, ie);
215 ufshci_mmio_write_4(ctrlr, ie, 0);
216
217 /* Release resources */
218 ufshci_utm_req_queue_destroy(ctrlr);
219 ufshci_ut_req_queue_destroy(ctrlr);
220
221 /* Reset Host Controller */
222 error = ufshci_ctrlr_enable_host_ctrlr(ctrlr);
223 if (error)
224 return (error);
225
226 /* Send DME_LINKSTARTUP command to start the link startup procedure */
227 error = ufshci_uic_send_dme_link_startup(ctrlr);
228 if (error)
229 return (error);
230
231 /* Enable interrupts */
232 ufshci_mmio_write_4(ctrlr, ie, ie);
233
234 /* Allocate and initialize UTP Task Management Request List. */
235 error = ufshci_utm_req_queue_construct(ctrlr);
236 if (error)
237 return (error);
238
239 /* Allocate and initialize UTP Transfer Request List or SQ/CQ. */
240 error = ufshci_ut_req_queue_construct(ctrlr);
241 if (error)
242 return (error);
243
244 return (0);
245 }
246
247 int
ufshci_ctrlr_submit_admin_request(struct ufshci_controller * ctrlr,struct ufshci_request * req)248 ufshci_ctrlr_submit_admin_request(struct ufshci_controller *ctrlr,
249 struct ufshci_request *req)
250 {
251 return (ufshci_req_queue_submit_request(&ctrlr->transfer_req_queue, req,
252 /*is_admin*/ true));
253 }
254
255 int
ufshci_ctrlr_submit_io_request(struct ufshci_controller * ctrlr,struct ufshci_request * req)256 ufshci_ctrlr_submit_io_request(struct ufshci_controller *ctrlr,
257 struct ufshci_request *req)
258 {
259 return (ufshci_req_queue_submit_request(&ctrlr->transfer_req_queue, req,
260 /*is_admin*/ false));
261 }
262
263 int
ufshci_ctrlr_send_nop(struct ufshci_controller * ctrlr)264 ufshci_ctrlr_send_nop(struct ufshci_controller *ctrlr)
265 {
266 struct ufshci_completion_poll_status status;
267
268 status.done = 0;
269 ufshci_ctrlr_cmd_send_nop(ctrlr, ufshci_completion_poll_cb, &status);
270 ufshci_completion_poll(&status);
271 if (status.error) {
272 ufshci_printf(ctrlr, "ufshci_ctrlr_send_nop failed!\n");
273 return (ENXIO);
274 }
275
276 return (0);
277 }
278
279 static void
ufshci_ctrlr_fail(struct ufshci_controller * ctrlr,bool admin_also)280 ufshci_ctrlr_fail(struct ufshci_controller *ctrlr, bool admin_also)
281 {
282 printf("ufshci(4): ufshci_ctrlr_fail\n");
283
284 ctrlr->is_failed = true;
285
286 /* TODO: task_mgmt_req_queue should be handled as fail */
287
288 ufshci_req_queue_fail(ctrlr,
289 &ctrlr->transfer_req_queue.hwq[UFSHCI_SDB_Q]);
290 }
291
292 static void
ufshci_ctrlr_start(struct ufshci_controller * ctrlr)293 ufshci_ctrlr_start(struct ufshci_controller *ctrlr)
294 {
295 TSENTER();
296
297 if (ufshci_ctrlr_send_nop(ctrlr) != 0) {
298 ufshci_ctrlr_fail(ctrlr, false);
299 return;
300 }
301
302 /* Initialize UFS target drvice */
303 if (ufshci_dev_init(ctrlr) != 0) {
304 ufshci_ctrlr_fail(ctrlr, false);
305 return;
306 }
307
308 /* Initialize Reference Clock */
309 if (ufshci_dev_init_reference_clock(ctrlr) != 0) {
310 ufshci_ctrlr_fail(ctrlr, false);
311 return;
312 }
313
314 /* Initialize unipro */
315 if (ufshci_dev_init_unipro(ctrlr) != 0) {
316 ufshci_ctrlr_fail(ctrlr, false);
317 return;
318 }
319
320 /*
321 * Initialize UIC Power Mode
322 * QEMU UFS devices do not support unipro and power mode.
323 */
324 if (!(ctrlr->quirks & UFSHCI_QUIRK_IGNORE_UIC_POWER_MODE) &&
325 ufshci_dev_init_uic_power_mode(ctrlr) != 0) {
326 ufshci_ctrlr_fail(ctrlr, false);
327 return;
328 }
329
330 /* Initialize UFS Power Mode */
331 if (ufshci_dev_init_ufs_power_mode(ctrlr) != 0) {
332 ufshci_ctrlr_fail(ctrlr, false);
333 return;
334 }
335
336 /* Read Controller Descriptor (Device, Geometry)*/
337 if (ufshci_dev_get_descriptor(ctrlr) != 0) {
338 ufshci_ctrlr_fail(ctrlr, false);
339 return;
340 }
341
342 /* TODO: Configure Write Protect */
343
344 /* TODO: Configure Background Operations */
345
346 /* TODO: Configure Write Booster */
347
348 if (ufshci_sim_attach(ctrlr) != 0) {
349 ufshci_ctrlr_fail(ctrlr, false);
350 return;
351 }
352
353 TSEXIT();
354 }
355
356 void
ufshci_ctrlr_start_config_hook(void * arg)357 ufshci_ctrlr_start_config_hook(void *arg)
358 {
359 struct ufshci_controller *ctrlr = arg;
360
361 TSENTER();
362
363 if (ufshci_utm_req_queue_enable(ctrlr) == 0 &&
364 ufshci_ut_req_queue_enable(ctrlr) == 0)
365 ufshci_ctrlr_start(ctrlr);
366 else
367 ufshci_ctrlr_fail(ctrlr, false);
368
369 ufshci_sysctl_initialize_ctrlr(ctrlr);
370 config_intrhook_disestablish(&ctrlr->config_hook);
371
372 TSEXIT();
373 }
374
375 /*
376 * Poll all the queues enabled on the device for completion.
377 */
378 void
ufshci_ctrlr_poll(struct ufshci_controller * ctrlr)379 ufshci_ctrlr_poll(struct ufshci_controller *ctrlr)
380 {
381 uint32_t is;
382
383 is = ufshci_mmio_read_4(ctrlr, is);
384
385 /* UIC error */
386 if (is & UFSHCIM(UFSHCI_IS_REG_UE)) {
387 uint32_t uecpa, uecdl, uecn, uect, uecdme;
388
389 /* UECPA for Host UIC Error Code within PHY Adapter Layer */
390 uecpa = ufshci_mmio_read_4(ctrlr, uecpa);
391 if (uecpa & UFSHCIM(UFSHCI_UECPA_REG_ERR)) {
392 ufshci_printf(ctrlr, "UECPA error code: 0x%x\n",
393 UFSHCIV(UFSHCI_UECPA_REG_EC, uecpa));
394 }
395 /* UECDL for Host UIC Error Code within Data Link Layer */
396 uecdl = ufshci_mmio_read_4(ctrlr, uecdl);
397 if (uecdl & UFSHCIM(UFSHCI_UECDL_REG_ERR)) {
398 ufshci_printf(ctrlr, "UECDL error code: 0x%x\n",
399 UFSHCIV(UFSHCI_UECDL_REG_EC, uecdl));
400 }
401 /* UECN for Host UIC Error Code within Network Layer */
402 uecn = ufshci_mmio_read_4(ctrlr, uecn);
403 if (uecn & UFSHCIM(UFSHCI_UECN_REG_ERR)) {
404 ufshci_printf(ctrlr, "UECN error code: 0x%x\n",
405 UFSHCIV(UFSHCI_UECN_REG_EC, uecn));
406 }
407 /* UECT for Host UIC Error Code within Transport Layer */
408 uect = ufshci_mmio_read_4(ctrlr, uect);
409 if (uect & UFSHCIM(UFSHCI_UECT_REG_ERR)) {
410 ufshci_printf(ctrlr, "UECT error code: 0x%x\n",
411 UFSHCIV(UFSHCI_UECT_REG_EC, uect));
412 }
413 /* UECDME for Host UIC Error Code within DME subcomponent */
414 uecdme = ufshci_mmio_read_4(ctrlr, uecdme);
415 if (uecdme & UFSHCIM(UFSHCI_UECDME_REG_ERR)) {
416 ufshci_printf(ctrlr, "UECDME error code: 0x%x\n",
417 UFSHCIV(UFSHCI_UECDME_REG_EC, uecdme));
418 }
419 ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_UE));
420 }
421 /* Device Fatal Error Status */
422 if (is & UFSHCIM(UFSHCI_IS_REG_DFES)) {
423 ufshci_printf(ctrlr, "Device fatal error on ISR\n");
424 ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_DFES));
425 }
426 /* UTP Error Status */
427 if (is & UFSHCIM(UFSHCI_IS_REG_UTPES)) {
428 ufshci_printf(ctrlr, "UTP error on ISR\n");
429 ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_UTPES));
430 }
431 /* Host Controller Fatal Error Status */
432 if (is & UFSHCIM(UFSHCI_IS_REG_HCFES)) {
433 ufshci_printf(ctrlr, "Host controller fatal error on ISR\n");
434 ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_HCFES));
435 }
436 /* System Bus Fatal Error Status */
437 if (is & UFSHCIM(UFSHCI_IS_REG_SBFES)) {
438 ufshci_printf(ctrlr, "System bus fatal error on ISR\n");
439 ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_SBFES));
440 }
441 /* Crypto Engine Fatal Error Status */
442 if (is & UFSHCIM(UFSHCI_IS_REG_CEFES)) {
443 ufshci_printf(ctrlr, "Crypto engine fatal error on ISR\n");
444 ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_CEFES));
445 }
446 /* UTP Task Management Request Completion Status */
447 if (is & UFSHCIM(UFSHCI_IS_REG_UTMRCS)) {
448 ufshci_printf(ctrlr, "TODO: Implement UTMR completion\n");
449 ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_UTMRCS));
450 /* TODO: Implement UTMR completion */
451 }
452 /* UTP Transfer Request Completion Status */
453 if (is & UFSHCIM(UFSHCI_IS_REG_UTRCS)) {
454 ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_UTRCS));
455 ufshci_req_queue_process_completions(
456 &ctrlr->transfer_req_queue);
457 }
458 /* MCQ CQ Event Status */
459 if (is & UFSHCIM(UFSHCI_IS_REG_CQES)) {
460 /* TODO: We need to process completion Queue Pairs */
461 ufshci_printf(ctrlr, "MCQ completion not yet implemented\n");
462 ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_CQES));
463 }
464 }
465
466 /*
467 * Poll the single-vector interrupt case: num_io_queues will be 1 and
468 * there's only a single vector. While we're polling, we mask further
469 * interrupts in the controller.
470 */
471 void
ufshci_ctrlr_shared_handler(void * arg)472 ufshci_ctrlr_shared_handler(void *arg)
473 {
474 struct ufshci_controller *ctrlr = arg;
475
476 ufshci_ctrlr_poll(ctrlr);
477 }
478
479 void
ufshci_reg_dump(struct ufshci_controller * ctrlr)480 ufshci_reg_dump(struct ufshci_controller *ctrlr)
481 {
482 ufshci_printf(ctrlr, "========= UFSHCI Register Dump =========\n");
483
484 UFSHCI_DUMP_REG(ctrlr, cap);
485 UFSHCI_DUMP_REG(ctrlr, mcqcap);
486 UFSHCI_DUMP_REG(ctrlr, ver);
487 UFSHCI_DUMP_REG(ctrlr, ext_cap);
488 UFSHCI_DUMP_REG(ctrlr, hcpid);
489 UFSHCI_DUMP_REG(ctrlr, hcmid);
490 UFSHCI_DUMP_REG(ctrlr, ahit);
491 UFSHCI_DUMP_REG(ctrlr, is);
492 UFSHCI_DUMP_REG(ctrlr, ie);
493 UFSHCI_DUMP_REG(ctrlr, hcsext);
494 UFSHCI_DUMP_REG(ctrlr, hcs);
495 UFSHCI_DUMP_REG(ctrlr, hce);
496 UFSHCI_DUMP_REG(ctrlr, uecpa);
497 UFSHCI_DUMP_REG(ctrlr, uecdl);
498 UFSHCI_DUMP_REG(ctrlr, uecn);
499 UFSHCI_DUMP_REG(ctrlr, uect);
500 UFSHCI_DUMP_REG(ctrlr, uecdme);
501
502 ufshci_printf(ctrlr, "========================================\n");
503 }
504