1 /*- 2 * Copyright (c) 2025, Samsung Electronics Co., Ltd. 3 * Written by Jaeyoon Choi 4 * 5 * SPDX-License-Identifier: BSD-2-Clause 6 */ 7 8 #include <sys/param.h> 9 #include <sys/bus.h> 10 #include <sys/conf.h> 11 12 #include "ufshci_private.h" 13 #include "ufshci_reg.h" 14 15 static void 16 ufshci_ctrlr_fail(struct ufshci_controller *ctrlr) 17 { 18 ctrlr->is_failed = true; 19 20 ufshci_req_queue_fail(ctrlr, 21 ctrlr->task_mgmt_req_queue.qops.get_hw_queue( 22 &ctrlr->task_mgmt_req_queue)); 23 ufshci_req_queue_fail(ctrlr, 24 ctrlr->transfer_req_queue.qops.get_hw_queue( 25 &ctrlr->transfer_req_queue)); 26 } 27 28 static void 29 ufshci_ctrlr_start(struct ufshci_controller *ctrlr, bool resetting) 30 { 31 TSENTER(); 32 33 /* 34 * If `resetting` is true, we are on the reset path. 35 * Re-enable request queues here because ufshci_ctrlr_reset_task() 36 * disables them during reset. 37 */ 38 if (resetting) { 39 if (ufshci_utmr_req_queue_enable(ctrlr) != 0) { 40 ufshci_ctrlr_fail(ctrlr); 41 return; 42 } 43 if (ufshci_utr_req_queue_enable(ctrlr) != 0) { 44 ufshci_ctrlr_fail(ctrlr); 45 return; 46 } 47 } 48 49 if (ufshci_ctrlr_send_nop(ctrlr) != 0) { 50 ufshci_ctrlr_fail(ctrlr); 51 return; 52 } 53 54 /* Initialize UFS target drvice */ 55 if (ufshci_dev_init(ctrlr) != 0) { 56 ufshci_ctrlr_fail(ctrlr); 57 return; 58 } 59 60 /* Initialize Reference Clock */ 61 if (ufshci_dev_init_reference_clock(ctrlr) != 0) { 62 ufshci_ctrlr_fail(ctrlr); 63 return; 64 } 65 66 /* Initialize unipro */ 67 if (ufshci_dev_init_unipro(ctrlr) != 0) { 68 ufshci_ctrlr_fail(ctrlr); 69 return; 70 } 71 72 /* 73 * Initialize UIC Power Mode 74 * QEMU UFS devices do not support unipro and power mode. 75 */ 76 if (!(ctrlr->quirks & UFSHCI_QUIRK_IGNORE_UIC_POWER_MODE) && 77 ufshci_dev_init_uic_power_mode(ctrlr) != 0) { 78 ufshci_ctrlr_fail(ctrlr); 79 return; 80 } 81 82 /* Initialize UFS Power Mode */ 83 if (ufshci_dev_init_ufs_power_mode(ctrlr) != 0) { 84 ufshci_ctrlr_fail(ctrlr); 85 return; 86 } 87 88 /* Read Controller Descriptor (Device, Geometry) */ 89 if (ufshci_dev_get_descriptor(ctrlr) != 0) { 90 ufshci_ctrlr_fail(ctrlr); 91 return; 92 } 93 94 if (ufshci_dev_config_write_booster(ctrlr)) { 95 ufshci_ctrlr_fail(ctrlr); 96 return; 97 } 98 99 /* TODO: Configure Write Protect */ 100 101 /* TODO: Configure Background Operations */ 102 103 /* 104 * If the reset is due to a timeout, it is already attached to the SIM 105 * and does not need to be attached again. 106 */ 107 if (!resetting && ufshci_sim_attach(ctrlr) != 0) { 108 ufshci_ctrlr_fail(ctrlr); 109 return; 110 } 111 112 TSEXIT(); 113 } 114 115 static int 116 ufshci_ctrlr_disable_host_ctrlr(struct ufshci_controller *ctrlr) 117 { 118 int timeout = ticks + MSEC_2_TICKS(ctrlr->device_init_timeout_in_ms); 119 sbintime_t delta_t = SBT_1US; 120 uint32_t hce; 121 122 hce = ufshci_mmio_read_4(ctrlr, hce); 123 124 /* If UFS host controller is already enabled, disable it. */ 125 if (UFSHCIV(UFSHCI_HCE_REG_HCE, hce)) { 126 hce &= ~UFSHCIM(UFSHCI_HCE_REG_HCE); 127 ufshci_mmio_write_4(ctrlr, hce, hce); 128 } 129 130 /* Wait for the HCE flag to change */ 131 while (1) { 132 hce = ufshci_mmio_read_4(ctrlr, hce); 133 if (!UFSHCIV(UFSHCI_HCE_REG_HCE, hce)) 134 break; 135 if (timeout - ticks < 0) { 136 ufshci_printf(ctrlr, 137 "host controller failed to disable " 138 "within %d ms\n", 139 ctrlr->device_init_timeout_in_ms); 140 return (ENXIO); 141 } 142 143 pause_sbt("ufshci_disable_hce", delta_t, 0, C_PREL(1)); 144 delta_t = min(SBT_1MS, delta_t * 3 / 2); 145 } 146 147 return (0); 148 } 149 150 static int 151 ufshci_ctrlr_enable_host_ctrlr(struct ufshci_controller *ctrlr) 152 { 153 int timeout = ticks + MSEC_2_TICKS(ctrlr->device_init_timeout_in_ms); 154 sbintime_t delta_t = SBT_1US; 155 uint32_t hce; 156 157 hce = ufshci_mmio_read_4(ctrlr, hce); 158 159 /* Enable UFS host controller */ 160 hce |= UFSHCIM(UFSHCI_HCE_REG_HCE); 161 ufshci_mmio_write_4(ctrlr, hce, hce); 162 163 /* 164 * During the controller initialization, the value of the HCE bit is 165 * unstable, so we need to read the HCE value after some time after 166 * initialization is complete. 167 */ 168 pause_sbt("ufshci_enable_hce", ustosbt(100), 0, C_PREL(1)); 169 170 /* Wait for the HCE flag to change */ 171 while (1) { 172 hce = ufshci_mmio_read_4(ctrlr, hce); 173 if (UFSHCIV(UFSHCI_HCE_REG_HCE, hce)) 174 break; 175 if (timeout - ticks < 0) { 176 ufshci_printf(ctrlr, 177 "host controller failed to enable " 178 "within %d ms\n", 179 ctrlr->device_init_timeout_in_ms); 180 return (ENXIO); 181 } 182 183 pause_sbt("ufshci_enable_hce", delta_t, 0, C_PREL(1)); 184 delta_t = min(SBT_1MS, delta_t * 3 / 2); 185 } 186 187 return (0); 188 } 189 190 static int 191 ufshci_ctrlr_disable(struct ufshci_controller *ctrlr) 192 { 193 int error; 194 195 /* Disable all interrupts */ 196 ufshci_mmio_write_4(ctrlr, ie, 0); 197 198 error = ufshci_ctrlr_disable_host_ctrlr(ctrlr); 199 return (error); 200 } 201 202 static int 203 ufshci_ctrlr_enable(struct ufshci_controller *ctrlr) 204 { 205 uint32_t ie, hcs; 206 int error; 207 208 error = ufshci_ctrlr_enable_host_ctrlr(ctrlr); 209 if (error) 210 return (error); 211 212 /* Send DME_LINKSTARTUP command to start the link startup procedure */ 213 error = ufshci_uic_send_dme_link_startup(ctrlr); 214 if (error) 215 return (error); 216 217 /* 218 * The device_present(UFSHCI_HCS_REG_DP) bit becomes true if the host 219 * controller has successfully received a Link Startup UIC command 220 * response and the UFS device has found a physical link to the 221 * controller. 222 */ 223 hcs = ufshci_mmio_read_4(ctrlr, hcs); 224 if (!UFSHCIV(UFSHCI_HCS_REG_DP, hcs)) { 225 ufshci_printf(ctrlr, "UFS device not found\n"); 226 return (ENXIO); 227 } 228 229 /* Enable additional interrupts by programming the IE register. */ 230 ie = ufshci_mmio_read_4(ctrlr, ie); 231 ie |= UFSHCIM(UFSHCI_IE_REG_UTRCE); /* UTR Completion */ 232 ie |= UFSHCIM(UFSHCI_IE_REG_UEE); /* UIC Error */ 233 ie |= UFSHCIM(UFSHCI_IE_REG_UTMRCE); /* UTMR Completion */ 234 ie |= UFSHCIM(UFSHCI_IE_REG_DFEE); /* Device Fatal Error */ 235 ie |= UFSHCIM(UFSHCI_IE_REG_UTPEE); /* UTP Error */ 236 ie |= UFSHCIM(UFSHCI_IE_REG_HCFEE); /* Host Ctrlr Fatal Error */ 237 ie |= UFSHCIM(UFSHCI_IE_REG_SBFEE); /* System Bus Fatal Error */ 238 ie |= UFSHCIM(UFSHCI_IE_REG_CEFEE); /* Crypto Engine Fatal Error */ 239 ufshci_mmio_write_4(ctrlr, ie, ie); 240 241 /* TODO: Initialize interrupt Aggregation Control Register (UTRIACR) */ 242 243 return (0); 244 } 245 246 static int 247 ufshci_ctrlr_hw_reset(struct ufshci_controller *ctrlr) 248 { 249 int error; 250 251 error = ufshci_ctrlr_disable(ctrlr); 252 if (error) 253 return (error); 254 255 error = ufshci_ctrlr_enable(ctrlr); 256 return (error); 257 } 258 259 static void 260 ufshci_ctrlr_reset_task(void *arg, int pending) 261 { 262 struct ufshci_controller *ctrlr = arg; 263 int error; 264 265 /* Release resources */ 266 ufshci_utmr_req_queue_disable(ctrlr); 267 ufshci_utr_req_queue_disable(ctrlr); 268 269 error = ufshci_ctrlr_hw_reset(ctrlr); 270 if (error) 271 return (ufshci_ctrlr_fail(ctrlr)); 272 273 ufshci_ctrlr_start(ctrlr, true); 274 } 275 276 int 277 ufshci_ctrlr_construct(struct ufshci_controller *ctrlr, device_t dev) 278 { 279 uint32_t ver, cap, ahit; 280 uint32_t timeout_period, retry_count; 281 int error; 282 283 ctrlr->device_init_timeout_in_ms = UFSHCI_DEVICE_INIT_TIMEOUT_MS; 284 ctrlr->uic_cmd_timeout_in_ms = UFSHCI_UIC_CMD_TIMEOUT_MS; 285 ctrlr->dev = dev; 286 ctrlr->sc_unit = device_get_unit(dev); 287 288 snprintf(ctrlr->sc_name, sizeof(ctrlr->sc_name), "%s", 289 device_get_nameunit(dev)); 290 291 mtx_init(&ctrlr->sc_mtx, device_get_nameunit(dev), NULL, 292 MTX_DEF | MTX_RECURSE); 293 294 mtx_init(&ctrlr->uic_cmd_lock, "ufshci ctrlr uic cmd lock", NULL, 295 MTX_DEF); 296 297 ver = ufshci_mmio_read_4(ctrlr, ver); 298 ctrlr->major_version = UFSHCIV(UFSHCI_VER_REG_MJR, ver); 299 ctrlr->minor_version = UFSHCIV(UFSHCI_VER_REG_MNR, ver); 300 ufshci_printf(ctrlr, "UFSHCI Version: %d.%d\n", ctrlr->major_version, 301 ctrlr->minor_version); 302 303 /* Read Device Capabilities */ 304 ctrlr->cap = cap = ufshci_mmio_read_4(ctrlr, cap); 305 ctrlr->is_single_db_supported = UFSHCIV(UFSHCI_CAP_REG_LSDBS, cap); 306 /* 307 * TODO: This driver does not yet support multi-queue. 308 * Check the UFSHCI_CAP_REG_MCQS bit in the future to determine if 309 * multi-queue support is available. 310 */ 311 ctrlr->is_mcq_supported = false; 312 if (!(ctrlr->is_single_db_supported == 0 || ctrlr->is_mcq_supported)) 313 return (ENXIO); 314 /* 315 * The maximum transfer size supported by UFSHCI spec is 65535 * 256 KiB 316 * However, we limit the maximum transfer size to 1MiB(256 * 4KiB) for 317 * performance reason. 318 */ 319 ctrlr->page_size = PAGE_SIZE; 320 ctrlr->max_xfer_size = ctrlr->page_size * UFSHCI_MAX_PRDT_ENTRY_COUNT; 321 322 timeout_period = UFSHCI_DEFAULT_TIMEOUT_PERIOD; 323 TUNABLE_INT_FETCH("hw.ufshci.timeout_period", &timeout_period); 324 timeout_period = min(timeout_period, UFSHCI_MAX_TIMEOUT_PERIOD); 325 timeout_period = max(timeout_period, UFSHCI_MIN_TIMEOUT_PERIOD); 326 ctrlr->timeout_period = timeout_period; 327 328 retry_count = UFSHCI_DEFAULT_RETRY_COUNT; 329 TUNABLE_INT_FETCH("hw.ufshci.retry_count", &retry_count); 330 ctrlr->retry_count = retry_count; 331 332 ctrlr->enable_aborts = 1; 333 if (ctrlr->quirks & UFSHCI_QUIRK_NOT_SUPPORT_ABORT_TASK) 334 ctrlr->enable_aborts = 0; 335 else 336 TUNABLE_INT_FETCH("hw.ufshci.enable_aborts", 337 &ctrlr->enable_aborts); 338 339 /* Reset the UFSHCI controller */ 340 error = ufshci_ctrlr_hw_reset(ctrlr); 341 if (error) 342 return (error); 343 344 /* Read the UECPA register to clear */ 345 ufshci_mmio_read_4(ctrlr, uecpa); 346 347 /* Diable Auto-hibernate */ 348 ahit = 0; 349 ufshci_mmio_write_4(ctrlr, ahit, ahit); 350 351 /* Allocate and initialize UTP Task Management Request List. */ 352 error = ufshci_utmr_req_queue_construct(ctrlr); 353 if (error) 354 return (error); 355 356 /* Allocate and initialize UTP Transfer Request List or SQ/CQ. */ 357 error = ufshci_utr_req_queue_construct(ctrlr); 358 if (error) 359 return (error); 360 361 /* TODO: Separate IO and Admin slot */ 362 363 /* 364 * max_hw_pend_io is the number of slots in the transfer_req_queue. 365 * Reduce num_entries by one to reserve an admin slot. 366 */ 367 ctrlr->max_hw_pend_io = ctrlr->transfer_req_queue.num_entries - 1; 368 369 /* Create a thread for the taskqueue. */ 370 ctrlr->taskqueue = taskqueue_create("ufshci_taskq", M_WAITOK, 371 taskqueue_thread_enqueue, &ctrlr->taskqueue); 372 taskqueue_start_threads(&ctrlr->taskqueue, 1, PI_DISK, "ufshci taskq"); 373 374 TASK_INIT(&ctrlr->reset_task, 0, ufshci_ctrlr_reset_task, ctrlr); 375 376 return (0); 377 } 378 379 void 380 ufshci_ctrlr_destruct(struct ufshci_controller *ctrlr, device_t dev) 381 { 382 if (ctrlr->resource == NULL) 383 goto nores; 384 385 /* TODO: Flush In-flight IOs */ 386 387 /* Release resources */ 388 ufshci_utmr_req_queue_destroy(ctrlr); 389 ufshci_utr_req_queue_destroy(ctrlr); 390 391 if (ctrlr->tag) 392 bus_teardown_intr(ctrlr->dev, ctrlr->res, ctrlr->tag); 393 394 if (ctrlr->res) 395 bus_release_resource(ctrlr->dev, SYS_RES_IRQ, 396 rman_get_rid(ctrlr->res), ctrlr->res); 397 398 mtx_lock(&ctrlr->sc_mtx); 399 400 ufshci_sim_detach(ctrlr); 401 402 mtx_unlock(&ctrlr->sc_mtx); 403 404 bus_release_resource(dev, SYS_RES_MEMORY, ctrlr->resource_id, 405 ctrlr->resource); 406 nores: 407 KASSERT(!mtx_owned(&ctrlr->uic_cmd_lock), 408 ("destroying uic_cmd_lock while still owned")); 409 mtx_destroy(&ctrlr->uic_cmd_lock); 410 411 KASSERT(!mtx_owned(&ctrlr->sc_mtx), 412 ("destroying sc_mtx while still owned")); 413 mtx_destroy(&ctrlr->sc_mtx); 414 415 return; 416 } 417 418 void 419 ufshci_ctrlr_reset(struct ufshci_controller *ctrlr) 420 { 421 taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->reset_task); 422 } 423 424 int 425 ufshci_ctrlr_submit_task_mgmt_request(struct ufshci_controller *ctrlr, 426 struct ufshci_request *req) 427 { 428 return ( 429 ufshci_req_queue_submit_request(&ctrlr->task_mgmt_req_queue, req, 430 /*is_admin*/ false)); 431 } 432 433 int 434 ufshci_ctrlr_submit_admin_request(struct ufshci_controller *ctrlr, 435 struct ufshci_request *req) 436 { 437 return (ufshci_req_queue_submit_request(&ctrlr->transfer_req_queue, req, 438 /*is_admin*/ true)); 439 } 440 441 int 442 ufshci_ctrlr_submit_io_request(struct ufshci_controller *ctrlr, 443 struct ufshci_request *req) 444 { 445 return (ufshci_req_queue_submit_request(&ctrlr->transfer_req_queue, req, 446 /*is_admin*/ false)); 447 } 448 449 int 450 ufshci_ctrlr_send_nop(struct ufshci_controller *ctrlr) 451 { 452 struct ufshci_completion_poll_status status; 453 454 status.done = 0; 455 ufshci_ctrlr_cmd_send_nop(ctrlr, ufshci_completion_poll_cb, &status); 456 ufshci_completion_poll(&status); 457 if (status.error) { 458 ufshci_printf(ctrlr, "ufshci_ctrlr_send_nop failed!\n"); 459 return (ENXIO); 460 } 461 462 return (0); 463 } 464 465 void 466 ufshci_ctrlr_start_config_hook(void *arg) 467 { 468 struct ufshci_controller *ctrlr = arg; 469 470 TSENTER(); 471 472 if (ufshci_utmr_req_queue_enable(ctrlr) == 0 && 473 ufshci_utr_req_queue_enable(ctrlr) == 0) 474 ufshci_ctrlr_start(ctrlr, false); 475 else 476 ufshci_ctrlr_fail(ctrlr); 477 478 ufshci_sysctl_initialize_ctrlr(ctrlr); 479 config_intrhook_disestablish(&ctrlr->config_hook); 480 481 TSEXIT(); 482 } 483 484 /* 485 * Poll all the queues enabled on the device for completion. 486 */ 487 void 488 ufshci_ctrlr_poll(struct ufshci_controller *ctrlr) 489 { 490 uint32_t is; 491 492 is = ufshci_mmio_read_4(ctrlr, is); 493 494 /* UIC error */ 495 if (is & UFSHCIM(UFSHCI_IS_REG_UE)) { 496 uint32_t uecpa, uecdl, uecn, uect, uecdme; 497 498 /* UECPA for Host UIC Error Code within PHY Adapter Layer */ 499 uecpa = ufshci_mmio_read_4(ctrlr, uecpa); 500 if (uecpa & UFSHCIM(UFSHCI_UECPA_REG_ERR)) { 501 ufshci_printf(ctrlr, "UECPA error code: 0x%x\n", 502 UFSHCIV(UFSHCI_UECPA_REG_EC, uecpa)); 503 } 504 /* UECDL for Host UIC Error Code within Data Link Layer */ 505 uecdl = ufshci_mmio_read_4(ctrlr, uecdl); 506 if (uecdl & UFSHCIM(UFSHCI_UECDL_REG_ERR)) { 507 ufshci_printf(ctrlr, "UECDL error code: 0x%x\n", 508 UFSHCIV(UFSHCI_UECDL_REG_EC, uecdl)); 509 } 510 /* UECN for Host UIC Error Code within Network Layer */ 511 uecn = ufshci_mmio_read_4(ctrlr, uecn); 512 if (uecn & UFSHCIM(UFSHCI_UECN_REG_ERR)) { 513 ufshci_printf(ctrlr, "UECN error code: 0x%x\n", 514 UFSHCIV(UFSHCI_UECN_REG_EC, uecn)); 515 } 516 /* UECT for Host UIC Error Code within Transport Layer */ 517 uect = ufshci_mmio_read_4(ctrlr, uect); 518 if (uect & UFSHCIM(UFSHCI_UECT_REG_ERR)) { 519 ufshci_printf(ctrlr, "UECT error code: 0x%x\n", 520 UFSHCIV(UFSHCI_UECT_REG_EC, uect)); 521 } 522 /* UECDME for Host UIC Error Code within DME subcomponent */ 523 uecdme = ufshci_mmio_read_4(ctrlr, uecdme); 524 if (uecdme & UFSHCIM(UFSHCI_UECDME_REG_ERR)) { 525 ufshci_printf(ctrlr, "UECDME error code: 0x%x\n", 526 UFSHCIV(UFSHCI_UECDME_REG_EC, uecdme)); 527 } 528 ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_UE)); 529 } 530 /* Device Fatal Error Status */ 531 if (is & UFSHCIM(UFSHCI_IS_REG_DFES)) { 532 ufshci_printf(ctrlr, "Device fatal error on ISR\n"); 533 ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_DFES)); 534 } 535 /* UTP Error Status */ 536 if (is & UFSHCIM(UFSHCI_IS_REG_UTPES)) { 537 ufshci_printf(ctrlr, "UTP error on ISR\n"); 538 ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_UTPES)); 539 } 540 /* Host Controller Fatal Error Status */ 541 if (is & UFSHCIM(UFSHCI_IS_REG_HCFES)) { 542 ufshci_printf(ctrlr, "Host controller fatal error on ISR\n"); 543 ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_HCFES)); 544 } 545 /* System Bus Fatal Error Status */ 546 if (is & UFSHCIM(UFSHCI_IS_REG_SBFES)) { 547 ufshci_printf(ctrlr, "System bus fatal error on ISR\n"); 548 ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_SBFES)); 549 } 550 /* Crypto Engine Fatal Error Status */ 551 if (is & UFSHCIM(UFSHCI_IS_REG_CEFES)) { 552 ufshci_printf(ctrlr, "Crypto engine fatal error on ISR\n"); 553 ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_CEFES)); 554 } 555 /* UTP Task Management Request Completion Status */ 556 if (is & UFSHCIM(UFSHCI_IS_REG_UTMRCS)) { 557 ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_UTMRCS)); 558 ufshci_req_queue_process_completions( 559 &ctrlr->task_mgmt_req_queue); 560 } 561 /* UTP Transfer Request Completion Status */ 562 if (is & UFSHCIM(UFSHCI_IS_REG_UTRCS)) { 563 ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_UTRCS)); 564 ufshci_req_queue_process_completions( 565 &ctrlr->transfer_req_queue); 566 } 567 /* MCQ CQ Event Status */ 568 if (is & UFSHCIM(UFSHCI_IS_REG_CQES)) { 569 /* TODO: We need to process completion Queue Pairs */ 570 ufshci_printf(ctrlr, "MCQ completion not yet implemented\n"); 571 ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_CQES)); 572 } 573 } 574 575 /* 576 * Poll the single-vector interrupt case: num_io_queues will be 1 and 577 * there's only a single vector. While we're polling, we mask further 578 * interrupts in the controller. 579 */ 580 void 581 ufshci_ctrlr_shared_handler(void *arg) 582 { 583 struct ufshci_controller *ctrlr = arg; 584 585 ufshci_ctrlr_poll(ctrlr); 586 } 587 588 void 589 ufshci_reg_dump(struct ufshci_controller *ctrlr) 590 { 591 ufshci_printf(ctrlr, "========= UFSHCI Register Dump =========\n"); 592 593 UFSHCI_DUMP_REG(ctrlr, cap); 594 UFSHCI_DUMP_REG(ctrlr, mcqcap); 595 UFSHCI_DUMP_REG(ctrlr, ver); 596 UFSHCI_DUMP_REG(ctrlr, ext_cap); 597 UFSHCI_DUMP_REG(ctrlr, hcpid); 598 UFSHCI_DUMP_REG(ctrlr, hcmid); 599 UFSHCI_DUMP_REG(ctrlr, ahit); 600 UFSHCI_DUMP_REG(ctrlr, is); 601 UFSHCI_DUMP_REG(ctrlr, ie); 602 UFSHCI_DUMP_REG(ctrlr, hcsext); 603 UFSHCI_DUMP_REG(ctrlr, hcs); 604 UFSHCI_DUMP_REG(ctrlr, hce); 605 UFSHCI_DUMP_REG(ctrlr, uecpa); 606 UFSHCI_DUMP_REG(ctrlr, uecdl); 607 UFSHCI_DUMP_REG(ctrlr, uecn); 608 UFSHCI_DUMP_REG(ctrlr, uect); 609 UFSHCI_DUMP_REG(ctrlr, uecdme); 610 611 ufshci_printf(ctrlr, "========================================\n"); 612 } 613