1 /*-
2 * Copyright (c) 2025, Samsung Electronics Co., Ltd.
3 * Written by Jaeyoon Choi
4 *
5 * SPDX-License-Identifier: BSD-2-Clause
6 */
7
8 #include <sys/param.h>
9 #include <sys/bus.h>
10 #include <sys/conf.h>
11
12 #include "ufshci_private.h"
13 #include "ufshci_reg.h"
14
15 static void
ufshci_ctrlr_fail(struct ufshci_controller * ctrlr)16 ufshci_ctrlr_fail(struct ufshci_controller *ctrlr)
17 {
18 ctrlr->is_failed = true;
19
20 ufshci_req_queue_fail(ctrlr,
21 ctrlr->task_mgmt_req_queue.qops.get_hw_queue(
22 &ctrlr->task_mgmt_req_queue));
23 ufshci_req_queue_fail(ctrlr,
24 ctrlr->transfer_req_queue.qops.get_hw_queue(
25 &ctrlr->transfer_req_queue));
26 }
27
28 static void
ufshci_ctrlr_start(struct ufshci_controller * ctrlr,bool resetting)29 ufshci_ctrlr_start(struct ufshci_controller *ctrlr, bool resetting)
30 {
31 TSENTER();
32
33 /*
34 * If `resetting` is true, we are on the reset path.
35 * Re-enable request queues here because ufshci_ctrlr_reset_task()
36 * disables them during reset.
37 */
38 if (resetting) {
39 if (ufshci_utmr_req_queue_enable(ctrlr) != 0) {
40 ufshci_ctrlr_fail(ctrlr);
41 return;
42 }
43 if (ufshci_utr_req_queue_enable(ctrlr) != 0) {
44 ufshci_ctrlr_fail(ctrlr);
45 return;
46 }
47 }
48
49 if (ufshci_ctrlr_send_nop(ctrlr) != 0) {
50 ufshci_ctrlr_fail(ctrlr);
51 return;
52 }
53
54 /* Initialize UFS target drvice */
55 if (ufshci_dev_init(ctrlr) != 0) {
56 ufshci_ctrlr_fail(ctrlr);
57 return;
58 }
59
60 /* Initialize Reference Clock */
61 if (ufshci_dev_init_reference_clock(ctrlr) != 0) {
62 ufshci_ctrlr_fail(ctrlr);
63 return;
64 }
65
66 /* Initialize unipro */
67 if (ufshci_dev_init_unipro(ctrlr) != 0) {
68 ufshci_ctrlr_fail(ctrlr);
69 return;
70 }
71
72 /*
73 * Initialize UIC Power Mode
74 * QEMU UFS devices do not support unipro and power mode.
75 */
76 if (!(ctrlr->quirks & UFSHCI_QUIRK_IGNORE_UIC_POWER_MODE) &&
77 ufshci_dev_init_uic_power_mode(ctrlr) != 0) {
78 ufshci_ctrlr_fail(ctrlr);
79 return;
80 }
81
82 ufshci_dev_init_uic_link_state(ctrlr);
83
84 /* Read Controller Descriptor (Device, Geometry) */
85 if (ufshci_dev_get_descriptor(ctrlr) != 0) {
86 ufshci_ctrlr_fail(ctrlr);
87 return;
88 }
89
90 if (ufshci_dev_config_write_booster(ctrlr)) {
91 ufshci_ctrlr_fail(ctrlr);
92 return;
93 }
94
95 ufshci_dev_init_auto_hibernate(ctrlr);
96
97 /* TODO: Configure Write Protect */
98
99 /* TODO: Configure Background Operations */
100
101 /*
102 * If the reset is due to a timeout, it is already attached to the SIM
103 * and does not need to be attached again.
104 */
105 if (!resetting && ufshci_sim_attach(ctrlr) != 0) {
106 ufshci_ctrlr_fail(ctrlr);
107 return;
108 }
109
110 /* Initialize UFS Power Mode */
111 if (ufshci_dev_init_ufs_power_mode(ctrlr) != 0) {
112 ufshci_ctrlr_fail(ctrlr);
113 return;
114 }
115
116 TSEXIT();
117 }
118
119 static int
ufshci_ctrlr_disable_host_ctrlr(struct ufshci_controller * ctrlr)120 ufshci_ctrlr_disable_host_ctrlr(struct ufshci_controller *ctrlr)
121 {
122 int timeout = ticks + MSEC_2_TICKS(ctrlr->device_init_timeout_in_ms);
123 sbintime_t delta_t = SBT_1US;
124 uint32_t hce;
125
126 hce = ufshci_mmio_read_4(ctrlr, hce);
127
128 /* If UFS host controller is already enabled, disable it. */
129 if (UFSHCIV(UFSHCI_HCE_REG_HCE, hce)) {
130 hce &= ~UFSHCIM(UFSHCI_HCE_REG_HCE);
131 ufshci_mmio_write_4(ctrlr, hce, hce);
132 }
133
134 /* Wait for the HCE flag to change */
135 while (1) {
136 hce = ufshci_mmio_read_4(ctrlr, hce);
137 if (!UFSHCIV(UFSHCI_HCE_REG_HCE, hce))
138 break;
139 if (timeout - ticks < 0) {
140 ufshci_printf(ctrlr,
141 "host controller failed to disable "
142 "within %d ms\n",
143 ctrlr->device_init_timeout_in_ms);
144 return (ENXIO);
145 }
146
147 pause_sbt("ufshci_disable_hce", delta_t, 0, C_PREL(1));
148 delta_t = min(SBT_1MS, delta_t * 3 / 2);
149 }
150
151 return (0);
152 }
153
154 static int
ufshci_ctrlr_enable_host_ctrlr(struct ufshci_controller * ctrlr)155 ufshci_ctrlr_enable_host_ctrlr(struct ufshci_controller *ctrlr)
156 {
157 int timeout = ticks + MSEC_2_TICKS(ctrlr->device_init_timeout_in_ms);
158 sbintime_t delta_t = SBT_1US;
159 uint32_t hce;
160
161 hce = ufshci_mmio_read_4(ctrlr, hce);
162
163 /* Enable UFS host controller */
164 hce |= UFSHCIM(UFSHCI_HCE_REG_HCE);
165 ufshci_mmio_write_4(ctrlr, hce, hce);
166
167 /*
168 * During the controller initialization, the value of the HCE bit is
169 * unstable, so we need to read the HCE value after some time after
170 * initialization is complete.
171 */
172 pause_sbt("ufshci_enable_hce", ustosbt(100), 0, C_PREL(1));
173
174 /* Wait for the HCE flag to change */
175 while (1) {
176 hce = ufshci_mmio_read_4(ctrlr, hce);
177 if (UFSHCIV(UFSHCI_HCE_REG_HCE, hce))
178 break;
179 if (timeout - ticks < 0) {
180 ufshci_printf(ctrlr,
181 "host controller failed to enable "
182 "within %d ms\n",
183 ctrlr->device_init_timeout_in_ms);
184 return (ENXIO);
185 }
186
187 pause_sbt("ufshci_enable_hce", delta_t, 0, C_PREL(1));
188 delta_t = min(SBT_1MS, delta_t * 3 / 2);
189 }
190
191 return (0);
192 }
193
194 int
ufshci_ctrlr_disable(struct ufshci_controller * ctrlr)195 ufshci_ctrlr_disable(struct ufshci_controller *ctrlr)
196 {
197 int error;
198
199 /* Disable all interrupts */
200 ufshci_mmio_write_4(ctrlr, ie, 0);
201
202 error = ufshci_ctrlr_disable_host_ctrlr(ctrlr);
203 return (error);
204 }
205
206 static int
ufshci_ctrlr_enable(struct ufshci_controller * ctrlr)207 ufshci_ctrlr_enable(struct ufshci_controller *ctrlr)
208 {
209 uint32_t ie, hcs;
210 int error;
211
212 error = ufshci_ctrlr_enable_host_ctrlr(ctrlr);
213 if (error)
214 return (error);
215
216 /* Send DME_LINKSTARTUP command to start the link startup procedure */
217 error = ufshci_uic_send_dme_link_startup(ctrlr);
218 if (error)
219 return (error);
220
221 /*
222 * The device_present(UFSHCI_HCS_REG_DP) bit becomes true if the host
223 * controller has successfully received a Link Startup UIC command
224 * response and the UFS device has found a physical link to the
225 * controller.
226 */
227 hcs = ufshci_mmio_read_4(ctrlr, hcs);
228 if (!UFSHCIV(UFSHCI_HCS_REG_DP, hcs)) {
229 ufshci_printf(ctrlr, "UFS device not found\n");
230 return (ENXIO);
231 }
232
233 /* Enable additional interrupts by programming the IE register. */
234 ie = ufshci_mmio_read_4(ctrlr, ie);
235 ie |= UFSHCIM(UFSHCI_IE_REG_UTRCE); /* UTR Completion */
236 ie |= UFSHCIM(UFSHCI_IE_REG_UEE); /* UIC Error */
237 ie |= UFSHCIM(UFSHCI_IE_REG_UTMRCE); /* UTMR Completion */
238 ie |= UFSHCIM(UFSHCI_IE_REG_DFEE); /* Device Fatal Error */
239 ie |= UFSHCIM(UFSHCI_IE_REG_UTPEE); /* UTP Error */
240 ie |= UFSHCIM(UFSHCI_IE_REG_HCFEE); /* Host Ctrlr Fatal Error */
241 ie |= UFSHCIM(UFSHCI_IE_REG_SBFEE); /* System Bus Fatal Error */
242 ie |= UFSHCIM(UFSHCI_IE_REG_CEFEE); /* Crypto Engine Fatal Error */
243 ufshci_mmio_write_4(ctrlr, ie, ie);
244
245 /* TODO: Initialize interrupt Aggregation Control Register (UTRIACR) */
246
247 return (0);
248 }
249
250 static int
ufshci_ctrlr_hw_reset(struct ufshci_controller * ctrlr)251 ufshci_ctrlr_hw_reset(struct ufshci_controller *ctrlr)
252 {
253 int error;
254
255 error = ufshci_ctrlr_disable(ctrlr);
256 if (error)
257 return (error);
258
259 error = ufshci_ctrlr_enable(ctrlr);
260 return (error);
261 }
262
263 static void
ufshci_ctrlr_reset_task(void * arg,int pending)264 ufshci_ctrlr_reset_task(void *arg, int pending)
265 {
266 struct ufshci_controller *ctrlr = arg;
267 int error;
268
269 /* Release resources */
270 ufshci_utmr_req_queue_disable(ctrlr);
271 ufshci_utr_req_queue_disable(ctrlr);
272
273 error = ufshci_ctrlr_hw_reset(ctrlr);
274 if (error)
275 return (ufshci_ctrlr_fail(ctrlr));
276
277 ufshci_ctrlr_start(ctrlr, true);
278 }
279
280 int
ufshci_ctrlr_construct(struct ufshci_controller * ctrlr,device_t dev)281 ufshci_ctrlr_construct(struct ufshci_controller *ctrlr, device_t dev)
282 {
283 uint32_t ver, cap, ahit;
284 uint32_t timeout_period, retry_count;
285 int error;
286
287 ctrlr->device_init_timeout_in_ms = UFSHCI_DEVICE_INIT_TIMEOUT_MS;
288 ctrlr->uic_cmd_timeout_in_ms = UFSHCI_UIC_CMD_TIMEOUT_MS;
289 ctrlr->dev = dev;
290 ctrlr->sc_unit = device_get_unit(dev);
291
292 snprintf(ctrlr->sc_name, sizeof(ctrlr->sc_name), "%s",
293 device_get_nameunit(dev));
294
295 mtx_init(&ctrlr->sc_mtx, device_get_nameunit(dev), NULL,
296 MTX_DEF | MTX_RECURSE);
297
298 mtx_init(&ctrlr->uic_cmd_lock, "ufshci ctrlr uic cmd lock", NULL,
299 MTX_DEF);
300
301 ver = ufshci_mmio_read_4(ctrlr, ver);
302 ctrlr->major_version = UFSHCIV(UFSHCI_VER_REG_MJR, ver);
303 ctrlr->minor_version = UFSHCIV(UFSHCI_VER_REG_MNR, ver);
304 ufshci_printf(ctrlr, "UFSHCI Version: %d.%d\n", ctrlr->major_version,
305 ctrlr->minor_version);
306
307 /* Read Device Capabilities */
308 ctrlr->cap = cap = ufshci_mmio_read_4(ctrlr, cap);
309 ctrlr->is_single_db_supported = UFSHCIV(UFSHCI_CAP_REG_LSDBS, cap);
310 /*
311 * TODO: This driver does not yet support multi-queue.
312 * Check the UFSHCI_CAP_REG_MCQS bit in the future to determine if
313 * multi-queue support is available.
314 */
315 ctrlr->is_mcq_supported = false;
316 if (!(ctrlr->is_single_db_supported == 0 || ctrlr->is_mcq_supported))
317 return (ENXIO);
318 /*
319 * The maximum transfer size supported by UFSHCI spec is 65535 * 256 KiB
320 * However, we limit the maximum transfer size to 1MiB(256 * 4KiB) for
321 * performance reason.
322 */
323 ctrlr->page_size = PAGE_SIZE;
324 ctrlr->max_xfer_size = ctrlr->page_size * UFSHCI_MAX_PRDT_ENTRY_COUNT;
325
326 timeout_period = UFSHCI_DEFAULT_TIMEOUT_PERIOD;
327 TUNABLE_INT_FETCH("hw.ufshci.timeout_period", &timeout_period);
328 timeout_period = min(timeout_period, UFSHCI_MAX_TIMEOUT_PERIOD);
329 timeout_period = max(timeout_period, UFSHCI_MIN_TIMEOUT_PERIOD);
330 ctrlr->timeout_period = timeout_period;
331
332 retry_count = UFSHCI_DEFAULT_RETRY_COUNT;
333 TUNABLE_INT_FETCH("hw.ufshci.retry_count", &retry_count);
334 ctrlr->retry_count = retry_count;
335
336 ctrlr->enable_aborts = 1;
337 if (ctrlr->quirks & UFSHCI_QUIRK_NOT_SUPPORT_ABORT_TASK)
338 ctrlr->enable_aborts = 0;
339 else
340 TUNABLE_INT_FETCH("hw.ufshci.enable_aborts",
341 &ctrlr->enable_aborts);
342
343 /* Reset the UFSHCI controller */
344 error = ufshci_ctrlr_hw_reset(ctrlr);
345 if (error)
346 return (error);
347
348 /* Read the UECPA register to clear */
349 ufshci_mmio_read_4(ctrlr, uecpa);
350
351 /* Diable Auto-hibernate */
352 ahit = 0;
353 ufshci_mmio_write_4(ctrlr, ahit, ahit);
354
355 /* Allocate and initialize UTP Task Management Request List. */
356 error = ufshci_utmr_req_queue_construct(ctrlr);
357 if (error)
358 return (error);
359
360 /* Allocate and initialize UTP Transfer Request List or SQ/CQ. */
361 error = ufshci_utr_req_queue_construct(ctrlr);
362 if (error)
363 return (error);
364
365 /* TODO: Separate IO and Admin slot */
366
367 /*
368 * max_hw_pend_io is the number of slots in the transfer_req_queue.
369 * Reduce num_entries by one to reserve an admin slot.
370 */
371 ctrlr->max_hw_pend_io = ctrlr->transfer_req_queue.num_entries - 1;
372
373 /* Create a thread for the taskqueue. */
374 ctrlr->taskqueue = taskqueue_create("ufshci_taskq", M_WAITOK,
375 taskqueue_thread_enqueue, &ctrlr->taskqueue);
376 taskqueue_start_threads(&ctrlr->taskqueue, 1, PI_DISK, "ufshci taskq");
377
378 TASK_INIT(&ctrlr->reset_task, 0, ufshci_ctrlr_reset_task, ctrlr);
379
380 return (0);
381 }
382
383 void
ufshci_ctrlr_destruct(struct ufshci_controller * ctrlr,device_t dev)384 ufshci_ctrlr_destruct(struct ufshci_controller *ctrlr, device_t dev)
385 {
386 if (ctrlr->resource == NULL)
387 goto nores;
388
389 /* TODO: Flush In-flight IOs */
390
391 /* Release resources */
392 ufshci_utmr_req_queue_destroy(ctrlr);
393 ufshci_utr_req_queue_destroy(ctrlr);
394
395 if (ctrlr->tag)
396 bus_teardown_intr(ctrlr->dev, ctrlr->res, ctrlr->tag);
397
398 if (ctrlr->res)
399 bus_release_resource(ctrlr->dev, SYS_RES_IRQ,
400 rman_get_rid(ctrlr->res), ctrlr->res);
401
402 mtx_lock(&ctrlr->sc_mtx);
403
404 ufshci_sim_detach(ctrlr);
405
406 mtx_unlock(&ctrlr->sc_mtx);
407
408 bus_release_resource(dev, SYS_RES_MEMORY, ctrlr->resource_id,
409 ctrlr->resource);
410 nores:
411 KASSERT(!mtx_owned(&ctrlr->uic_cmd_lock),
412 ("destroying uic_cmd_lock while still owned"));
413 mtx_destroy(&ctrlr->uic_cmd_lock);
414
415 KASSERT(!mtx_owned(&ctrlr->sc_mtx),
416 ("destroying sc_mtx while still owned"));
417 mtx_destroy(&ctrlr->sc_mtx);
418
419 return;
420 }
421
422 void
ufshci_ctrlr_reset(struct ufshci_controller * ctrlr)423 ufshci_ctrlr_reset(struct ufshci_controller *ctrlr)
424 {
425 taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->reset_task);
426 }
427
428 int
ufshci_ctrlr_submit_task_mgmt_request(struct ufshci_controller * ctrlr,struct ufshci_request * req)429 ufshci_ctrlr_submit_task_mgmt_request(struct ufshci_controller *ctrlr,
430 struct ufshci_request *req)
431 {
432 return (
433 ufshci_req_queue_submit_request(&ctrlr->task_mgmt_req_queue, req,
434 /*is_admin*/ false));
435 }
436
437 int
ufshci_ctrlr_submit_admin_request(struct ufshci_controller * ctrlr,struct ufshci_request * req)438 ufshci_ctrlr_submit_admin_request(struct ufshci_controller *ctrlr,
439 struct ufshci_request *req)
440 {
441 return (ufshci_req_queue_submit_request(&ctrlr->transfer_req_queue, req,
442 /*is_admin*/ true));
443 }
444
445 int
ufshci_ctrlr_submit_io_request(struct ufshci_controller * ctrlr,struct ufshci_request * req)446 ufshci_ctrlr_submit_io_request(struct ufshci_controller *ctrlr,
447 struct ufshci_request *req)
448 {
449 return (ufshci_req_queue_submit_request(&ctrlr->transfer_req_queue, req,
450 /*is_admin*/ false));
451 }
452
453 int
ufshci_ctrlr_send_nop(struct ufshci_controller * ctrlr)454 ufshci_ctrlr_send_nop(struct ufshci_controller *ctrlr)
455 {
456 struct ufshci_completion_poll_status status;
457
458 status.done = 0;
459 ufshci_ctrlr_cmd_send_nop(ctrlr, ufshci_completion_poll_cb, &status);
460 ufshci_completion_poll(&status);
461 if (status.error) {
462 ufshci_printf(ctrlr, "ufshci_ctrlr_send_nop failed!\n");
463 return (ENXIO);
464 }
465
466 return (0);
467 }
468
469 void
ufshci_ctrlr_start_config_hook(void * arg)470 ufshci_ctrlr_start_config_hook(void *arg)
471 {
472 struct ufshci_controller *ctrlr = arg;
473
474 TSENTER();
475
476 if (ufshci_utmr_req_queue_enable(ctrlr) == 0 &&
477 ufshci_utr_req_queue_enable(ctrlr) == 0)
478 ufshci_ctrlr_start(ctrlr, false);
479 else
480 ufshci_ctrlr_fail(ctrlr);
481
482 ufshci_sysctl_initialize_ctrlr(ctrlr);
483 config_intrhook_disestablish(&ctrlr->config_hook);
484
485 TSEXIT();
486 }
487
488 /*
489 * Poll all the queues enabled on the device for completion.
490 */
491 void
ufshci_ctrlr_poll(struct ufshci_controller * ctrlr)492 ufshci_ctrlr_poll(struct ufshci_controller *ctrlr)
493 {
494 uint32_t is;
495
496 is = ufshci_mmio_read_4(ctrlr, is);
497
498 /* UIC error */
499 if (is & UFSHCIM(UFSHCI_IS_REG_UE)) {
500 uint32_t uecpa, uecdl, uecn, uect, uecdme;
501
502 /* UECPA for Host UIC Error Code within PHY Adapter Layer */
503 uecpa = ufshci_mmio_read_4(ctrlr, uecpa);
504 if (uecpa & UFSHCIM(UFSHCI_UECPA_REG_ERR)) {
505 ufshci_printf(ctrlr, "UECPA error code: 0x%x\n",
506 UFSHCIV(UFSHCI_UECPA_REG_EC, uecpa));
507 }
508 /* UECDL for Host UIC Error Code within Data Link Layer */
509 uecdl = ufshci_mmio_read_4(ctrlr, uecdl);
510 if (uecdl & UFSHCIM(UFSHCI_UECDL_REG_ERR)) {
511 ufshci_printf(ctrlr, "UECDL error code: 0x%x\n",
512 UFSHCIV(UFSHCI_UECDL_REG_EC, uecdl));
513 }
514 /* UECN for Host UIC Error Code within Network Layer */
515 uecn = ufshci_mmio_read_4(ctrlr, uecn);
516 if (uecn & UFSHCIM(UFSHCI_UECN_REG_ERR)) {
517 ufshci_printf(ctrlr, "UECN error code: 0x%x\n",
518 UFSHCIV(UFSHCI_UECN_REG_EC, uecn));
519 }
520 /* UECT for Host UIC Error Code within Transport Layer */
521 uect = ufshci_mmio_read_4(ctrlr, uect);
522 if (uect & UFSHCIM(UFSHCI_UECT_REG_ERR)) {
523 ufshci_printf(ctrlr, "UECT error code: 0x%x\n",
524 UFSHCIV(UFSHCI_UECT_REG_EC, uect));
525 }
526 /* UECDME for Host UIC Error Code within DME subcomponent */
527 uecdme = ufshci_mmio_read_4(ctrlr, uecdme);
528 if (uecdme & UFSHCIM(UFSHCI_UECDME_REG_ERR)) {
529 ufshci_printf(ctrlr, "UECDME error code: 0x%x\n",
530 UFSHCIV(UFSHCI_UECDME_REG_EC, uecdme));
531 }
532 ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_UE));
533 }
534 /* Device Fatal Error Status */
535 if (is & UFSHCIM(UFSHCI_IS_REG_DFES)) {
536 ufshci_printf(ctrlr, "Device fatal error on ISR\n");
537 ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_DFES));
538 }
539 /* UTP Error Status */
540 if (is & UFSHCIM(UFSHCI_IS_REG_UTPES)) {
541 ufshci_printf(ctrlr, "UTP error on ISR\n");
542 ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_UTPES));
543 }
544 /* Host Controller Fatal Error Status */
545 if (is & UFSHCIM(UFSHCI_IS_REG_HCFES)) {
546 ufshci_printf(ctrlr, "Host controller fatal error on ISR\n");
547 ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_HCFES));
548 }
549 /* System Bus Fatal Error Status */
550 if (is & UFSHCIM(UFSHCI_IS_REG_SBFES)) {
551 ufshci_printf(ctrlr, "System bus fatal error on ISR\n");
552 ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_SBFES));
553 }
554 /* Crypto Engine Fatal Error Status */
555 if (is & UFSHCIM(UFSHCI_IS_REG_CEFES)) {
556 ufshci_printf(ctrlr, "Crypto engine fatal error on ISR\n");
557 ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_CEFES));
558 }
559 /* UTP Task Management Request Completion Status */
560 if (is & UFSHCIM(UFSHCI_IS_REG_UTMRCS)) {
561 ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_UTMRCS));
562 ufshci_req_queue_process_completions(
563 &ctrlr->task_mgmt_req_queue);
564 }
565 /* UTP Transfer Request Completion Status */
566 if (is & UFSHCIM(UFSHCI_IS_REG_UTRCS)) {
567 ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_UTRCS));
568 ufshci_req_queue_process_completions(
569 &ctrlr->transfer_req_queue);
570 }
571 /* MCQ CQ Event Status */
572 if (is & UFSHCIM(UFSHCI_IS_REG_CQES)) {
573 /* TODO: We need to process completion Queue Pairs */
574 ufshci_printf(ctrlr, "MCQ completion not yet implemented\n");
575 ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_CQES));
576 }
577 }
578
579 /*
580 * Poll the single-vector interrupt case: num_io_queues will be 1 and
581 * there's only a single vector. While we're polling, we mask further
582 * interrupts in the controller.
583 */
584 void
ufshci_ctrlr_shared_handler(void * arg)585 ufshci_ctrlr_shared_handler(void *arg)
586 {
587 struct ufshci_controller *ctrlr = arg;
588
589 ufshci_ctrlr_poll(ctrlr);
590 }
591
592 void
ufshci_reg_dump(struct ufshci_controller * ctrlr)593 ufshci_reg_dump(struct ufshci_controller *ctrlr)
594 {
595 ufshci_printf(ctrlr, "========= UFSHCI Register Dump =========\n");
596
597 UFSHCI_DUMP_REG(ctrlr, cap);
598 UFSHCI_DUMP_REG(ctrlr, mcqcap);
599 UFSHCI_DUMP_REG(ctrlr, ver);
600 UFSHCI_DUMP_REG(ctrlr, ext_cap);
601 UFSHCI_DUMP_REG(ctrlr, hcpid);
602 UFSHCI_DUMP_REG(ctrlr, hcmid);
603 UFSHCI_DUMP_REG(ctrlr, ahit);
604 UFSHCI_DUMP_REG(ctrlr, is);
605 UFSHCI_DUMP_REG(ctrlr, ie);
606 UFSHCI_DUMP_REG(ctrlr, hcsext);
607 UFSHCI_DUMP_REG(ctrlr, hcs);
608 UFSHCI_DUMP_REG(ctrlr, hce);
609 UFSHCI_DUMP_REG(ctrlr, uecpa);
610 UFSHCI_DUMP_REG(ctrlr, uecdl);
611 UFSHCI_DUMP_REG(ctrlr, uecn);
612 UFSHCI_DUMP_REG(ctrlr, uect);
613 UFSHCI_DUMP_REG(ctrlr, uecdme);
614
615 ufshci_printf(ctrlr, "========================================\n");
616 }
617
618 int
ufshci_ctrlr_suspend(struct ufshci_controller * ctrlr,enum power_stype stype)619 ufshci_ctrlr_suspend(struct ufshci_controller *ctrlr, enum power_stype stype)
620 {
621 int error;
622
623 if (!ctrlr->ufs_dev.power_mode_supported)
624 return (0);
625
626 /* TODO: Need to flush the request queue */
627
628 if (ctrlr->ufs_device_wlun_periph) {
629 ctrlr->ufs_dev.power_mode = power_map[stype].dev_pwr;
630 error = ufshci_sim_send_ssu(ctrlr, /*start*/ false,
631 power_map[stype].ssu_pc, /*immed*/ false);
632 if (error) {
633 ufshci_printf(ctrlr,
634 "Failed to send SSU in suspend handler\n");
635 return (error);
636 }
637 }
638
639 /* Change the link state */
640 error = ufshci_dev_link_state_transition(ctrlr,
641 power_map[stype].link_state);
642 if (error) {
643 ufshci_printf(ctrlr,
644 "Failed to transition link state in suspend handler\n");
645 return (error);
646 }
647
648 return (0);
649 }
650
651 int
ufshci_ctrlr_resume(struct ufshci_controller * ctrlr,enum power_stype stype)652 ufshci_ctrlr_resume(struct ufshci_controller *ctrlr, enum power_stype stype)
653 {
654 int error;
655
656 if (!ctrlr->ufs_dev.power_mode_supported)
657 return (0);
658
659 /* Change the link state */
660 error = ufshci_dev_link_state_transition(ctrlr,
661 power_map[stype].link_state);
662 if (error) {
663 ufshci_printf(ctrlr,
664 "Failed to transition link state in resume handler\n");
665 return (error);
666 }
667
668 if (ctrlr->ufs_device_wlun_periph) {
669 ctrlr->ufs_dev.power_mode = power_map[stype].dev_pwr;
670 error = ufshci_sim_send_ssu(ctrlr, /*start*/ false,
671 power_map[stype].ssu_pc, /*immed*/ false);
672 if (error) {
673 ufshci_printf(ctrlr,
674 "Failed to send SSU in resume handler\n");
675 return (error);
676 }
677 }
678
679 ufshci_dev_enable_auto_hibernate(ctrlr);
680
681 return (0);
682 }
683