1 /*- 2 * Copyright (c) 2025, Samsung Electronics Co., Ltd. 3 * Written by Jaeyoon Choi 4 * 5 * SPDX-License-Identifier: BSD-2-Clause 6 */ 7 8 #include <sys/param.h> 9 #include <sys/bus.h> 10 #include <sys/conf.h> 11 12 #include "ufshci_private.h" 13 #include "ufshci_reg.h" 14 15 static int 16 ufshci_ctrlr_enable_host_ctrlr(struct ufshci_controller *ctrlr) 17 { 18 int timeout = ticks + MSEC_2_TICKS(ctrlr->device_init_timeout_in_ms); 19 sbintime_t delta_t = SBT_1US; 20 uint32_t hce; 21 22 hce = ufshci_mmio_read_4(ctrlr, hce); 23 24 /* If UFS host controller is already enabled, disable it. */ 25 if (UFSHCIV(UFSHCI_HCE_REG_HCE, hce)) { 26 hce &= ~UFSHCIM(UFSHCI_HCE_REG_HCE); 27 ufshci_mmio_write_4(ctrlr, hce, hce); 28 } 29 30 /* Enable UFS host controller */ 31 hce |= UFSHCIM(UFSHCI_HCE_REG_HCE); 32 ufshci_mmio_write_4(ctrlr, hce, hce); 33 34 /* 35 * During the controller initialization, the value of the HCE bit is 36 * unstable, so we need to read the HCE value after some time after 37 * initialization is complete. 38 */ 39 pause_sbt("ufshci_hce", ustosbt(100), 0, C_PREL(1)); 40 41 /* Wait for the HCE flag to change */ 42 while (1) { 43 hce = ufshci_mmio_read_4(ctrlr, hce); 44 if (UFSHCIV(UFSHCI_HCE_REG_HCE, hce)) 45 break; 46 if (timeout - ticks < 0) { 47 ufshci_printf(ctrlr, 48 "host controller failed to enable " 49 "within %d ms\n", 50 ctrlr->device_init_timeout_in_ms); 51 return (ENXIO); 52 } 53 54 pause_sbt("ufshci_hce", delta_t, 0, C_PREL(1)); 55 delta_t = min(SBT_1MS, delta_t * 3 / 2); 56 } 57 58 return (0); 59 } 60 61 int 62 ufshci_ctrlr_construct(struct ufshci_controller *ctrlr, device_t dev) 63 { 64 uint32_t ver, cap, hcs, ie, ahit; 65 uint32_t timeout_period, retry_count; 66 int error; 67 68 ctrlr->device_init_timeout_in_ms = UFSHCI_DEVICE_INIT_TIMEOUT_MS; 69 ctrlr->uic_cmd_timeout_in_ms = UFSHCI_UIC_CMD_TIMEOUT_MS; 70 ctrlr->dev = dev; 71 ctrlr->sc_unit = device_get_unit(dev); 72 73 snprintf(ctrlr->sc_name, sizeof(ctrlr->sc_name), "%s", 74 device_get_nameunit(dev)); 75 76 mtx_init(&ctrlr->sc_mtx, device_get_nameunit(dev), NULL, 77 MTX_DEF | MTX_RECURSE); 78 79 mtx_init(&ctrlr->uic_cmd_lock, "ufshci ctrlr uic cmd lock", NULL, 80 MTX_DEF); 81 82 ver = ufshci_mmio_read_4(ctrlr, ver); 83 ctrlr->major_version = UFSHCIV(UFSHCI_VER_REG_MJR, ver); 84 ctrlr->minor_version = UFSHCIV(UFSHCI_VER_REG_MNR, ver); 85 ufshci_printf(ctrlr, "UFSHCI Version: %d.%d\n", ctrlr->major_version, 86 ctrlr->minor_version); 87 88 /* Read Device Capabilities */ 89 ctrlr->cap = cap = ufshci_mmio_read_4(ctrlr, cap); 90 ctrlr->is_single_db_supported = UFSHCIV(UFSHCI_CAP_REG_LSDBS, cap); 91 /* 92 * TODO: This driver does not yet support multi-queue. 93 * Check the UFSHCI_CAP_REG_MCQS bit in the future to determine if 94 * multi-queue support is available. 95 */ 96 ctrlr->is_mcq_supported = false; 97 if (!(ctrlr->is_single_db_supported == 0 || ctrlr->is_mcq_supported)) 98 return (ENXIO); 99 /* 100 * The maximum transfer size supported by UFSHCI spec is 65535 * 256 KiB 101 * However, we limit the maximum transfer size to 1MiB(256 * 4KiB) for 102 * performance reason. 103 */ 104 ctrlr->page_size = PAGE_SIZE; 105 ctrlr->max_xfer_size = ctrlr->page_size * UFSHCI_MAX_PRDT_ENTRY_COUNT; 106 107 timeout_period = UFSHCI_DEFAULT_TIMEOUT_PERIOD; 108 TUNABLE_INT_FETCH("hw.ufshci.timeout_period", &timeout_period); 109 timeout_period = min(timeout_period, UFSHCI_MAX_TIMEOUT_PERIOD); 110 timeout_period = max(timeout_period, UFSHCI_MIN_TIMEOUT_PERIOD); 111 ctrlr->timeout_period = timeout_period; 112 113 retry_count = UFSHCI_DEFAULT_RETRY_COUNT; 114 TUNABLE_INT_FETCH("hw.ufshci.retry_count", &retry_count); 115 ctrlr->retry_count = retry_count; 116 117 /* Disable all interrupts */ 118 ufshci_mmio_write_4(ctrlr, ie, 0); 119 120 /* Enable Host Controller */ 121 error = ufshci_ctrlr_enable_host_ctrlr(ctrlr); 122 if (error) 123 return (error); 124 125 /* Send DME_LINKSTARTUP command to start the link startup procedure */ 126 error = ufshci_uic_send_dme_link_startup(ctrlr); 127 if (error) 128 return (error); 129 130 /* Read the UECPA register to clear */ 131 ufshci_mmio_read_4(ctrlr, uecpa); 132 133 /* Diable Auto-hibernate */ 134 ahit = 0; 135 ufshci_mmio_write_4(ctrlr, ahit, ahit); 136 137 /* 138 * The device_present(UFSHCI_HCS_REG_DP) bit becomes true if the host 139 * controller has successfully received a Link Startup UIC command 140 * response and the UFS device has found a physical link to the 141 * controller. 142 */ 143 hcs = ufshci_mmio_read_4(ctrlr, hcs); 144 if (!UFSHCIV(UFSHCI_HCS_REG_DP, hcs)) { 145 ufshci_printf(ctrlr, "UFS device not found\n"); 146 return (ENXIO); 147 } 148 149 /* Allocate and initialize UTP Task Management Request List. */ 150 error = ufshci_utmr_req_queue_construct(ctrlr); 151 if (error) 152 return (error); 153 154 /* Allocate and initialize UTP Transfer Request List or SQ/CQ. */ 155 error = ufshci_utr_req_queue_construct(ctrlr); 156 if (error) 157 return (error); 158 159 /* Enable additional interrupts by programming the IE register. */ 160 ie = ufshci_mmio_read_4(ctrlr, ie); 161 ie |= UFSHCIM(UFSHCI_IE_REG_UTRCE); /* UTR Completion */ 162 ie |= UFSHCIM(UFSHCI_IE_REG_UEE); /* UIC Error */ 163 ie |= UFSHCIM(UFSHCI_IE_REG_UTMRCE); /* UTMR Completion */ 164 ie |= UFSHCIM(UFSHCI_IE_REG_DFEE); /* Device Fatal Error */ 165 ie |= UFSHCIM(UFSHCI_IE_REG_UTPEE); /* UTP Error */ 166 ie |= UFSHCIM(UFSHCI_IE_REG_HCFEE); /* Host Ctrlr Fatal Error */ 167 ie |= UFSHCIM(UFSHCI_IE_REG_SBFEE); /* System Bus Fatal Error */ 168 ie |= UFSHCIM(UFSHCI_IE_REG_CEFEE); /* Crypto Engine Fatal Error */ 169 ufshci_mmio_write_4(ctrlr, ie, ie); 170 171 /* TODO: Initialize interrupt Aggregation Control Register (UTRIACR) */ 172 173 /* TODO: Separate IO and Admin slot */ 174 /* 175 * max_hw_pend_io is the number of slots in the transfer_req_queue. 176 * Reduce num_entries by one to reserve an admin slot. 177 */ 178 ctrlr->max_hw_pend_io = ctrlr->transfer_req_queue.num_entries - 1; 179 180 return (0); 181 } 182 183 void 184 ufshci_ctrlr_destruct(struct ufshci_controller *ctrlr, device_t dev) 185 { 186 if (ctrlr->resource == NULL) 187 goto nores; 188 189 /* TODO: Flush In-flight IOs */ 190 191 /* Release resources */ 192 ufshci_utmr_req_queue_destroy(ctrlr); 193 ufshci_utr_req_queue_destroy(ctrlr); 194 195 if (ctrlr->tag) 196 bus_teardown_intr(ctrlr->dev, ctrlr->res, ctrlr->tag); 197 198 if (ctrlr->res) 199 bus_release_resource(ctrlr->dev, SYS_RES_IRQ, 200 rman_get_rid(ctrlr->res), ctrlr->res); 201 202 mtx_lock(&ctrlr->sc_mtx); 203 204 ufshci_sim_detach(ctrlr); 205 206 mtx_unlock(&ctrlr->sc_mtx); 207 208 bus_release_resource(dev, SYS_RES_MEMORY, ctrlr->resource_id, 209 ctrlr->resource); 210 nores: 211 mtx_destroy(&ctrlr->uic_cmd_lock); 212 mtx_destroy(&ctrlr->sc_mtx); 213 214 return; 215 } 216 217 int 218 ufshci_ctrlr_reset(struct ufshci_controller *ctrlr) 219 { 220 uint32_t ie; 221 int error; 222 223 /* Backup and disable all interrupts */ 224 ie = ufshci_mmio_read_4(ctrlr, ie); 225 ufshci_mmio_write_4(ctrlr, ie, 0); 226 227 /* Release resources */ 228 ufshci_utmr_req_queue_destroy(ctrlr); 229 ufshci_utr_req_queue_destroy(ctrlr); 230 231 /* Reset Host Controller */ 232 error = ufshci_ctrlr_enable_host_ctrlr(ctrlr); 233 if (error) 234 return (error); 235 236 /* Send DME_LINKSTARTUP command to start the link startup procedure */ 237 error = ufshci_uic_send_dme_link_startup(ctrlr); 238 if (error) 239 return (error); 240 241 /* Enable interrupts */ 242 ufshci_mmio_write_4(ctrlr, ie, ie); 243 244 /* Allocate and initialize UTP Task Management Request List. */ 245 error = ufshci_utmr_req_queue_construct(ctrlr); 246 if (error) 247 return (error); 248 249 /* Allocate and initialize UTP Transfer Request List or SQ/CQ. */ 250 error = ufshci_utr_req_queue_construct(ctrlr); 251 if (error) 252 return (error); 253 254 return (0); 255 } 256 257 int 258 ufshci_ctrlr_submit_task_mgmt_request(struct ufshci_controller *ctrlr, 259 struct ufshci_request *req) 260 { 261 return ( 262 ufshci_req_queue_submit_request(&ctrlr->task_mgmt_req_queue, req, 263 /*is_admin*/ false)); 264 } 265 266 int 267 ufshci_ctrlr_submit_admin_request(struct ufshci_controller *ctrlr, 268 struct ufshci_request *req) 269 { 270 return (ufshci_req_queue_submit_request(&ctrlr->transfer_req_queue, req, 271 /*is_admin*/ true)); 272 } 273 274 int 275 ufshci_ctrlr_submit_io_request(struct ufshci_controller *ctrlr, 276 struct ufshci_request *req) 277 { 278 return (ufshci_req_queue_submit_request(&ctrlr->transfer_req_queue, req, 279 /*is_admin*/ false)); 280 } 281 282 int 283 ufshci_ctrlr_send_nop(struct ufshci_controller *ctrlr) 284 { 285 struct ufshci_completion_poll_status status; 286 287 status.done = 0; 288 ufshci_ctrlr_cmd_send_nop(ctrlr, ufshci_completion_poll_cb, &status); 289 ufshci_completion_poll(&status); 290 if (status.error) { 291 ufshci_printf(ctrlr, "ufshci_ctrlr_send_nop failed!\n"); 292 return (ENXIO); 293 } 294 295 return (0); 296 } 297 298 static void 299 ufshci_ctrlr_fail(struct ufshci_controller *ctrlr, bool admin_also) 300 { 301 printf("ufshci(4): ufshci_ctrlr_fail\n"); 302 303 ctrlr->is_failed = true; 304 305 /* TODO: task_mgmt_req_queue should be handled as fail */ 306 307 ufshci_req_queue_fail(ctrlr, 308 &ctrlr->transfer_req_queue.hwq[UFSHCI_SDB_Q]); 309 } 310 311 static void 312 ufshci_ctrlr_start(struct ufshci_controller *ctrlr) 313 { 314 TSENTER(); 315 316 if (ufshci_ctrlr_send_nop(ctrlr) != 0) { 317 ufshci_ctrlr_fail(ctrlr, false); 318 return; 319 } 320 321 /* Initialize UFS target drvice */ 322 if (ufshci_dev_init(ctrlr) != 0) { 323 ufshci_ctrlr_fail(ctrlr, false); 324 return; 325 } 326 327 /* Initialize Reference Clock */ 328 if (ufshci_dev_init_reference_clock(ctrlr) != 0) { 329 ufshci_ctrlr_fail(ctrlr, false); 330 return; 331 } 332 333 /* Initialize unipro */ 334 if (ufshci_dev_init_unipro(ctrlr) != 0) { 335 ufshci_ctrlr_fail(ctrlr, false); 336 return; 337 } 338 339 /* 340 * Initialize UIC Power Mode 341 * QEMU UFS devices do not support unipro and power mode. 342 */ 343 if (!(ctrlr->quirks & UFSHCI_QUIRK_IGNORE_UIC_POWER_MODE) && 344 ufshci_dev_init_uic_power_mode(ctrlr) != 0) { 345 ufshci_ctrlr_fail(ctrlr, false); 346 return; 347 } 348 349 /* Initialize UFS Power Mode */ 350 if (ufshci_dev_init_ufs_power_mode(ctrlr) != 0) { 351 ufshci_ctrlr_fail(ctrlr, false); 352 return; 353 } 354 355 /* Read Controller Descriptor (Device, Geometry) */ 356 if (ufshci_dev_get_descriptor(ctrlr) != 0) { 357 ufshci_ctrlr_fail(ctrlr, false); 358 return; 359 } 360 361 if (ufshci_dev_config_write_booster(ctrlr)) { 362 ufshci_ctrlr_fail(ctrlr, false); 363 return; 364 } 365 366 /* TODO: Configure Background Operations */ 367 368 if (ufshci_sim_attach(ctrlr) != 0) { 369 ufshci_ctrlr_fail(ctrlr, false); 370 return; 371 } 372 373 TSEXIT(); 374 } 375 376 void 377 ufshci_ctrlr_start_config_hook(void *arg) 378 { 379 struct ufshci_controller *ctrlr = arg; 380 381 TSENTER(); 382 383 if (ufshci_utmr_req_queue_enable(ctrlr) == 0 && 384 ufshci_utr_req_queue_enable(ctrlr) == 0) 385 ufshci_ctrlr_start(ctrlr); 386 else 387 ufshci_ctrlr_fail(ctrlr, false); 388 389 ufshci_sysctl_initialize_ctrlr(ctrlr); 390 config_intrhook_disestablish(&ctrlr->config_hook); 391 392 TSEXIT(); 393 } 394 395 /* 396 * Poll all the queues enabled on the device for completion. 397 */ 398 void 399 ufshci_ctrlr_poll(struct ufshci_controller *ctrlr) 400 { 401 uint32_t is; 402 403 is = ufshci_mmio_read_4(ctrlr, is); 404 405 /* UIC error */ 406 if (is & UFSHCIM(UFSHCI_IS_REG_UE)) { 407 uint32_t uecpa, uecdl, uecn, uect, uecdme; 408 409 /* UECPA for Host UIC Error Code within PHY Adapter Layer */ 410 uecpa = ufshci_mmio_read_4(ctrlr, uecpa); 411 if (uecpa & UFSHCIM(UFSHCI_UECPA_REG_ERR)) { 412 ufshci_printf(ctrlr, "UECPA error code: 0x%x\n", 413 UFSHCIV(UFSHCI_UECPA_REG_EC, uecpa)); 414 } 415 /* UECDL for Host UIC Error Code within Data Link Layer */ 416 uecdl = ufshci_mmio_read_4(ctrlr, uecdl); 417 if (uecdl & UFSHCIM(UFSHCI_UECDL_REG_ERR)) { 418 ufshci_printf(ctrlr, "UECDL error code: 0x%x\n", 419 UFSHCIV(UFSHCI_UECDL_REG_EC, uecdl)); 420 } 421 /* UECN for Host UIC Error Code within Network Layer */ 422 uecn = ufshci_mmio_read_4(ctrlr, uecn); 423 if (uecn & UFSHCIM(UFSHCI_UECN_REG_ERR)) { 424 ufshci_printf(ctrlr, "UECN error code: 0x%x\n", 425 UFSHCIV(UFSHCI_UECN_REG_EC, uecn)); 426 } 427 /* UECT for Host UIC Error Code within Transport Layer */ 428 uect = ufshci_mmio_read_4(ctrlr, uect); 429 if (uect & UFSHCIM(UFSHCI_UECT_REG_ERR)) { 430 ufshci_printf(ctrlr, "UECT error code: 0x%x\n", 431 UFSHCIV(UFSHCI_UECT_REG_EC, uect)); 432 } 433 /* UECDME for Host UIC Error Code within DME subcomponent */ 434 uecdme = ufshci_mmio_read_4(ctrlr, uecdme); 435 if (uecdme & UFSHCIM(UFSHCI_UECDME_REG_ERR)) { 436 ufshci_printf(ctrlr, "UECDME error code: 0x%x\n", 437 UFSHCIV(UFSHCI_UECDME_REG_EC, uecdme)); 438 } 439 ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_UE)); 440 } 441 /* Device Fatal Error Status */ 442 if (is & UFSHCIM(UFSHCI_IS_REG_DFES)) { 443 ufshci_printf(ctrlr, "Device fatal error on ISR\n"); 444 ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_DFES)); 445 } 446 /* UTP Error Status */ 447 if (is & UFSHCIM(UFSHCI_IS_REG_UTPES)) { 448 ufshci_printf(ctrlr, "UTP error on ISR\n"); 449 ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_UTPES)); 450 } 451 /* Host Controller Fatal Error Status */ 452 if (is & UFSHCIM(UFSHCI_IS_REG_HCFES)) { 453 ufshci_printf(ctrlr, "Host controller fatal error on ISR\n"); 454 ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_HCFES)); 455 } 456 /* System Bus Fatal Error Status */ 457 if (is & UFSHCIM(UFSHCI_IS_REG_SBFES)) { 458 ufshci_printf(ctrlr, "System bus fatal error on ISR\n"); 459 ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_SBFES)); 460 } 461 /* Crypto Engine Fatal Error Status */ 462 if (is & UFSHCIM(UFSHCI_IS_REG_CEFES)) { 463 ufshci_printf(ctrlr, "Crypto engine fatal error on ISR\n"); 464 ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_CEFES)); 465 } 466 /* UTP Task Management Request Completion Status */ 467 if (is & UFSHCIM(UFSHCI_IS_REG_UTMRCS)) { 468 ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_UTMRCS)); 469 ufshci_req_queue_process_completions( 470 &ctrlr->task_mgmt_req_queue); 471 } 472 /* UTP Transfer Request Completion Status */ 473 if (is & UFSHCIM(UFSHCI_IS_REG_UTRCS)) { 474 ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_UTRCS)); 475 ufshci_req_queue_process_completions( 476 &ctrlr->transfer_req_queue); 477 } 478 /* MCQ CQ Event Status */ 479 if (is & UFSHCIM(UFSHCI_IS_REG_CQES)) { 480 /* TODO: We need to process completion Queue Pairs */ 481 ufshci_printf(ctrlr, "MCQ completion not yet implemented\n"); 482 ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_CQES)); 483 } 484 } 485 486 /* 487 * Poll the single-vector interrupt case: num_io_queues will be 1 and 488 * there's only a single vector. While we're polling, we mask further 489 * interrupts in the controller. 490 */ 491 void 492 ufshci_ctrlr_shared_handler(void *arg) 493 { 494 struct ufshci_controller *ctrlr = arg; 495 496 ufshci_ctrlr_poll(ctrlr); 497 } 498 499 void 500 ufshci_reg_dump(struct ufshci_controller *ctrlr) 501 { 502 ufshci_printf(ctrlr, "========= UFSHCI Register Dump =========\n"); 503 504 UFSHCI_DUMP_REG(ctrlr, cap); 505 UFSHCI_DUMP_REG(ctrlr, mcqcap); 506 UFSHCI_DUMP_REG(ctrlr, ver); 507 UFSHCI_DUMP_REG(ctrlr, ext_cap); 508 UFSHCI_DUMP_REG(ctrlr, hcpid); 509 UFSHCI_DUMP_REG(ctrlr, hcmid); 510 UFSHCI_DUMP_REG(ctrlr, ahit); 511 UFSHCI_DUMP_REG(ctrlr, is); 512 UFSHCI_DUMP_REG(ctrlr, ie); 513 UFSHCI_DUMP_REG(ctrlr, hcsext); 514 UFSHCI_DUMP_REG(ctrlr, hcs); 515 UFSHCI_DUMP_REG(ctrlr, hce); 516 UFSHCI_DUMP_REG(ctrlr, uecpa); 517 UFSHCI_DUMP_REG(ctrlr, uecdl); 518 UFSHCI_DUMP_REG(ctrlr, uecn); 519 UFSHCI_DUMP_REG(ctrlr, uect); 520 UFSHCI_DUMP_REG(ctrlr, uecdme); 521 522 ufshci_printf(ctrlr, "========================================\n"); 523 } 524