1 /*- 2 * Copyright (c) 2025, Samsung Electronics Co., Ltd. 3 * Written by Jaeyoon Choi 4 * 5 * SPDX-License-Identifier: BSD-2-Clause 6 */ 7 8 #include <sys/param.h> 9 #include <sys/bus.h> 10 #include <sys/conf.h> 11 12 #include "ufshci_private.h" 13 #include "ufshci_reg.h" 14 15 static int 16 ufshci_dev_read_descriptor(struct ufshci_controller *ctrlr, 17 enum ufshci_descriptor_type desc_type, uint8_t index, uint8_t selector, 18 void *desc, size_t desc_size) 19 { 20 struct ufshci_completion_poll_status status; 21 struct ufshci_query_param param; 22 23 param.function = UFSHCI_QUERY_FUNC_STANDARD_READ_REQUEST; 24 param.opcode = UFSHCI_QUERY_OPCODE_READ_DESCRIPTOR; 25 param.type = desc_type; 26 param.index = index; 27 param.selector = selector; 28 param.value = 0; 29 param.desc_size = desc_size; 30 31 status.done = 0; 32 ufshci_ctrlr_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb, 33 &status, param); 34 ufshci_completion_poll(&status); 35 if (status.error) { 36 ufshci_printf(ctrlr, 37 "Failed to send Read Descriptor query request!\n"); 38 return (ENXIO); 39 } 40 41 memcpy(desc, status.cpl.response_upiu.query_response_upiu.command_data, 42 desc_size); 43 44 return (0); 45 } 46 47 static int 48 ufshci_dev_read_device_descriptor(struct ufshci_controller *ctrlr, 49 struct ufshci_device_descriptor *desc) 50 { 51 return (ufshci_dev_read_descriptor(ctrlr, UFSHCI_DESC_TYPE_DEVICE, 0, 0, 52 desc, sizeof(struct ufshci_device_descriptor))); 53 } 54 55 static int 56 ufshci_dev_read_geometry_descriptor(struct ufshci_controller *ctrlr, 57 struct ufshci_geometry_descriptor *desc) 58 { 59 return (ufshci_dev_read_descriptor(ctrlr, UFSHCI_DESC_TYPE_GEOMETRY, 0, 60 0, desc, sizeof(struct ufshci_geometry_descriptor))); 61 } 62 63 static int 64 ufshci_dev_read_unit_descriptor(struct ufshci_controller *ctrlr, uint8_t lun, 65 struct ufshci_unit_descriptor *desc) 66 { 67 return (ufshci_dev_read_descriptor(ctrlr, UFSHCI_DESC_TYPE_UNIT, lun, 0, 68 desc, sizeof(struct ufshci_unit_descriptor))); 69 } 70 71 static int 72 ufshci_dev_read_flag(struct ufshci_controller *ctrlr, 73 enum ufshci_flags flag_type, uint8_t *flag) 74 { 75 struct ufshci_completion_poll_status status; 76 struct ufshci_query_param param; 77 78 param.function = UFSHCI_QUERY_FUNC_STANDARD_READ_REQUEST; 79 param.opcode = UFSHCI_QUERY_OPCODE_READ_FLAG; 80 param.type = flag_type; 81 param.index = 0; 82 param.selector = 0; 83 param.value = 0; 84 85 status.done = 0; 86 ufshci_ctrlr_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb, 87 &status, param); 88 ufshci_completion_poll(&status); 89 if (status.error) { 90 ufshci_printf(ctrlr, "ufshci_dev_read_flag failed!\n"); 91 return (ENXIO); 92 } 93 94 *flag = status.cpl.response_upiu.query_response_upiu.flag_value; 95 96 return (0); 97 } 98 99 static int 100 ufshci_dev_set_flag(struct ufshci_controller *ctrlr, 101 enum ufshci_flags flag_type) 102 { 103 struct ufshci_completion_poll_status status; 104 struct ufshci_query_param param; 105 106 param.function = UFSHCI_QUERY_FUNC_STANDARD_WRITE_REQUEST; 107 param.opcode = UFSHCI_QUERY_OPCODE_SET_FLAG; 108 param.type = flag_type; 109 param.index = 0; 110 param.selector = 0; 111 param.value = 0; 112 113 status.done = 0; 114 ufshci_ctrlr_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb, 115 &status, param); 116 ufshci_completion_poll(&status); 117 if (status.error) { 118 ufshci_printf(ctrlr, "ufshci_dev_set_flag failed!\n"); 119 return (ENXIO); 120 } 121 122 return (0); 123 } 124 125 static int 126 ufshci_dev_clear_flag(struct ufshci_controller *ctrlr, 127 enum ufshci_flags flag_type) 128 { 129 struct ufshci_completion_poll_status status; 130 struct ufshci_query_param param; 131 132 param.function = UFSHCI_QUERY_FUNC_STANDARD_WRITE_REQUEST; 133 param.opcode = UFSHCI_QUERY_OPCODE_CLEAR_FLAG; 134 param.type = flag_type; 135 param.index = 0; 136 param.selector = 0; 137 param.value = 0; 138 139 status.done = 0; 140 ufshci_ctrlr_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb, 141 &status, param); 142 ufshci_completion_poll(&status); 143 if (status.error) { 144 ufshci_printf(ctrlr, "ufshci_dev_clear_flag failed!\n"); 145 return (ENXIO); 146 } 147 148 return (0); 149 } 150 151 static int 152 ufshci_dev_read_attribute(struct ufshci_controller *ctrlr, 153 enum ufshci_attributes attr_type, uint8_t index, uint8_t selector, 154 uint64_t *value) 155 { 156 struct ufshci_completion_poll_status status; 157 struct ufshci_query_param param; 158 159 param.function = UFSHCI_QUERY_FUNC_STANDARD_READ_REQUEST; 160 param.opcode = UFSHCI_QUERY_OPCODE_READ_ATTRIBUTE; 161 param.type = attr_type; 162 param.index = index; 163 param.selector = selector; 164 param.value = 0; 165 166 status.done = 0; 167 ufshci_ctrlr_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb, 168 &status, param); 169 ufshci_completion_poll(&status); 170 if (status.error) { 171 ufshci_printf(ctrlr, "ufshci_dev_read_attribute failed!\n"); 172 return (ENXIO); 173 } 174 175 *value = status.cpl.response_upiu.query_response_upiu.value_64; 176 177 return (0); 178 } 179 180 static int 181 ufshci_dev_write_attribute(struct ufshci_controller *ctrlr, 182 enum ufshci_attributes attr_type, uint8_t index, uint8_t selector, 183 uint64_t value) 184 { 185 struct ufshci_completion_poll_status status; 186 struct ufshci_query_param param; 187 188 param.function = UFSHCI_QUERY_FUNC_STANDARD_WRITE_REQUEST; 189 param.opcode = UFSHCI_QUERY_OPCODE_WRITE_ATTRIBUTE; 190 param.type = attr_type; 191 param.index = index; 192 param.selector = selector; 193 param.value = value; 194 195 status.done = 0; 196 ufshci_ctrlr_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb, 197 &status, param); 198 ufshci_completion_poll(&status); 199 if (status.error) { 200 ufshci_printf(ctrlr, "ufshci_dev_write_attribute failed!\n"); 201 return (ENXIO); 202 } 203 204 return (0); 205 } 206 207 int 208 ufshci_dev_init(struct ufshci_controller *ctrlr) 209 { 210 int timeout = ticks + MSEC_2_TICKS(ctrlr->device_init_timeout_in_ms); 211 sbintime_t delta_t = SBT_1US; 212 uint8_t flag; 213 int error; 214 const uint8_t device_init_completed = 0; 215 216 error = ufshci_dev_set_flag(ctrlr, UFSHCI_FLAG_F_DEVICE_INIT); 217 if (error) 218 return (error); 219 220 /* Wait for the UFSHCI_FLAG_F_DEVICE_INIT flag to change */ 221 while (1) { 222 error = ufshci_dev_read_flag(ctrlr, UFSHCI_FLAG_F_DEVICE_INIT, 223 &flag); 224 if (error) 225 return (error); 226 if (flag == device_init_completed) 227 break; 228 if (timeout - ticks < 0) { 229 ufshci_printf(ctrlr, 230 "device init did not become %d " 231 "within %d ms\n", 232 device_init_completed, 233 ctrlr->device_init_timeout_in_ms); 234 return (ENXIO); 235 } 236 237 pause_sbt("ufshciinit", delta_t, 0, C_PREL(1)); 238 delta_t = min(SBT_1MS, delta_t * 3 / 2); 239 } 240 241 return (0); 242 } 243 244 int 245 ufshci_dev_reset(struct ufshci_controller *ctrlr) 246 { 247 if (ufshci_uic_send_dme_endpoint_reset(ctrlr)) 248 return (ENXIO); 249 250 return (ufshci_dev_init(ctrlr)); 251 } 252 253 int 254 ufshci_dev_init_reference_clock(struct ufshci_controller *ctrlr) 255 { 256 int error; 257 uint8_t index, selector; 258 259 index = 0; /* bRefClkFreq is device type attribute */ 260 selector = 0; /* bRefClkFreq is device type attribute */ 261 262 error = ufshci_dev_write_attribute(ctrlr, UFSHCI_ATTR_B_REF_CLK_FREQ, 263 index, selector, ctrlr->ref_clk); 264 if (error) 265 return (error); 266 267 return (0); 268 } 269 270 int 271 ufshci_dev_init_unipro(struct ufshci_controller *ctrlr) 272 { 273 uint32_t pa_granularity, peer_pa_granularity; 274 uint32_t t_activate, pear_t_activate; 275 276 /* 277 * Unipro Version: 278 * - 7~15 = Above 2.0, 6 = 2.0, 5 = 1.8, 4 = 1.61, 3 = 1.6, 2 = 1.41, 279 * 1 = 1.40, 0 = Reserved 280 */ 281 if (ufshci_uic_send_dme_get(ctrlr, PA_LocalVerInfo, 282 &ctrlr->unipro_version)) 283 return (ENXIO); 284 if (ufshci_uic_send_dme_get(ctrlr, PA_RemoteVerInfo, 285 &ctrlr->ufs_dev.unipro_version)) 286 return (ENXIO); 287 288 /* 289 * PA_Granularity: Granularity for PA_TActivate and PA_Hibern8Time 290 * - 1=1us, 2=4us, 3=8us, 4=16us, 5=32us, 6=100us 291 */ 292 if (ufshci_uic_send_dme_get(ctrlr, PA_Granularity, &pa_granularity)) 293 return (ENXIO); 294 if (ufshci_uic_send_dme_peer_get(ctrlr, PA_Granularity, 295 &peer_pa_granularity)) 296 return (ENXIO); 297 298 /* 299 * PA_TActivate: Time to wait before activating a burst in order to 300 * wake-up peer M-RX 301 * UniPro automatically sets timing information such as PA_TActivate 302 * through the PACP_CAP_EXT1_ind command during Link Startup operation. 303 */ 304 if (ufshci_uic_send_dme_get(ctrlr, PA_TActivate, &t_activate)) 305 return (ENXIO); 306 if (ufshci_uic_send_dme_peer_get(ctrlr, PA_TActivate, &pear_t_activate)) 307 return (ENXIO); 308 309 if (ctrlr->quirks & UFSHCI_QUIRK_LONG_PEER_PA_TACTIVATE) { 310 /* 311 * Intel Lake-field UFSHCI has a quirk. We need to add 200us to 312 * the PEER's PA_TActivate. 313 */ 314 if (pa_granularity == peer_pa_granularity) { 315 pear_t_activate = t_activate + 2; 316 if (ufshci_uic_send_dme_peer_set(ctrlr, PA_TActivate, 317 pear_t_activate)) 318 return (ENXIO); 319 } 320 } 321 322 return (0); 323 } 324 325 int 326 ufshci_dev_init_uic_power_mode(struct ufshci_controller *ctrlr) 327 { 328 /* HSSerise: A = 1, B = 2 */ 329 const uint32_t hs_series = 2; 330 /* 331 * TX/RX PWRMode: 332 * - TX[3:0], RX[7:4] 333 * - Fast Mode = 1, Slow Mode = 2, FastAuto Mode = 4, SlowAuto Mode = 5 334 */ 335 const uint32_t fast_mode = 1; 336 const uint32_t rx_bit_shift = 4; 337 uint32_t power_mode, peer_granularity; 338 339 /* Update lanes with available TX/RX lanes */ 340 if (ufshci_uic_send_dme_get(ctrlr, PA_AvailTxDataLanes, 341 &ctrlr->max_tx_lanes)) 342 return (ENXIO); 343 if (ufshci_uic_send_dme_get(ctrlr, PA_AvailRxDataLanes, 344 &ctrlr->max_rx_lanes)) 345 return (ENXIO); 346 347 /* Get max HS-GEAR value */ 348 if (ufshci_uic_send_dme_get(ctrlr, PA_MaxRxHSGear, 349 &ctrlr->max_rx_hs_gear)) 350 return (ENXIO); 351 352 /* Set the data lane to max */ 353 ctrlr->tx_lanes = ctrlr->max_tx_lanes; 354 ctrlr->rx_lanes = ctrlr->max_rx_lanes; 355 if (ufshci_uic_send_dme_set(ctrlr, PA_ActiveTxDataLanes, 356 ctrlr->tx_lanes)) 357 return (ENXIO); 358 if (ufshci_uic_send_dme_set(ctrlr, PA_ActiveRxDataLanes, 359 ctrlr->rx_lanes)) 360 return (ENXIO); 361 362 if (ctrlr->quirks & UFSHCI_QUIRK_CHANGE_LANE_AND_GEAR_SEPARATELY) { 363 /* Before changing gears, first change the number of lanes. */ 364 if (ufshci_uic_send_dme_get(ctrlr, PA_PWRMode, &power_mode)) 365 return (ENXIO); 366 if (ufshci_uic_send_dme_set(ctrlr, PA_PWRMode, power_mode)) 367 return (ENXIO); 368 369 /* Wait for power mode changed. */ 370 if (ufshci_uic_power_mode_ready(ctrlr)) { 371 ufshci_reg_dump(ctrlr); 372 return (ENXIO); 373 } 374 } 375 376 /* Set HS-GEAR to max gear */ 377 ctrlr->hs_gear = ctrlr->max_rx_hs_gear; 378 if (ufshci_uic_send_dme_set(ctrlr, PA_TxGear, ctrlr->hs_gear)) 379 return (ENXIO); 380 if (ufshci_uic_send_dme_set(ctrlr, PA_RxGear, ctrlr->hs_gear)) 381 return (ENXIO); 382 383 /* 384 * Set termination 385 * - HS-MODE = ON / LS-MODE = OFF 386 */ 387 if (ufshci_uic_send_dme_set(ctrlr, PA_TxTermination, true)) 388 return (ENXIO); 389 if (ufshci_uic_send_dme_set(ctrlr, PA_RxTermination, true)) 390 return (ENXIO); 391 392 /* Set HSSerise (A = 1, B = 2) */ 393 if (ufshci_uic_send_dme_set(ctrlr, PA_HSSeries, hs_series)) 394 return (ENXIO); 395 396 /* Set Timeout values */ 397 if (ufshci_uic_send_dme_set(ctrlr, PA_PWRModeUserData0, 398 DL_FC0ProtectionTimeOutVal_Default)) 399 return (ENXIO); 400 if (ufshci_uic_send_dme_set(ctrlr, PA_PWRModeUserData1, 401 DL_TC0ReplayTimeOutVal_Default)) 402 return (ENXIO); 403 if (ufshci_uic_send_dme_set(ctrlr, PA_PWRModeUserData2, 404 DL_AFC0ReqTimeOutVal_Default)) 405 return (ENXIO); 406 if (ufshci_uic_send_dme_set(ctrlr, PA_PWRModeUserData3, 407 DL_FC0ProtectionTimeOutVal_Default)) 408 return (ENXIO); 409 if (ufshci_uic_send_dme_set(ctrlr, PA_PWRModeUserData4, 410 DL_TC0ReplayTimeOutVal_Default)) 411 return (ENXIO); 412 if (ufshci_uic_send_dme_set(ctrlr, PA_PWRModeUserData5, 413 DL_AFC0ReqTimeOutVal_Default)) 414 return (ENXIO); 415 416 if (ufshci_uic_send_dme_set(ctrlr, DME_LocalFC0ProtectionTimeOutVal, 417 DL_FC0ProtectionTimeOutVal_Default)) 418 return (ENXIO); 419 if (ufshci_uic_send_dme_set(ctrlr, DME_LocalTC0ReplayTimeOutVal, 420 DL_TC0ReplayTimeOutVal_Default)) 421 return (ENXIO); 422 if (ufshci_uic_send_dme_set(ctrlr, DME_LocalAFC0ReqTimeOutVal, 423 DL_AFC0ReqTimeOutVal_Default)) 424 return (ENXIO); 425 426 /* Set TX/RX PWRMode */ 427 power_mode = (fast_mode << rx_bit_shift) | fast_mode; 428 if (ufshci_uic_send_dme_set(ctrlr, PA_PWRMode, power_mode)) 429 return (ENXIO); 430 431 /* Wait for power mode changed. */ 432 if (ufshci_uic_power_mode_ready(ctrlr)) { 433 ufshci_reg_dump(ctrlr); 434 return (ENXIO); 435 } 436 437 if (ctrlr->quirks & UFSHCI_QUIRK_WAIT_AFTER_POWER_MODE_CHANGE) { 438 /* 439 * Intel Lake-field UFSHCI has a quirk. 440 * We need to wait 1250us and clear dme error. 441 */ 442 pause_sbt("ufshci", ustosbt(1250), 0, C_PREL(1)); 443 444 /* Test with dme_peer_get to make sure there are no errors. */ 445 if (ufshci_uic_send_dme_peer_get(ctrlr, PA_Granularity, 446 &peer_granularity)) 447 return (ENXIO); 448 } 449 450 return (0); 451 } 452 453 void 454 ufshci_dev_enable_auto_hibernate(struct ufshci_controller *ctrlr) 455 { 456 if (!ctrlr->ufs_dev.auto_hibernation_supported) 457 return; 458 459 ufshci_mmio_write_4(ctrlr, ahit, ctrlr->ufs_dev.ahit); 460 } 461 462 void 463 ufshci_dev_init_auto_hibernate(struct ufshci_controller *ctrlr) 464 { 465 ctrlr->ufs_dev.auto_hibernation_supported = 466 UFSHCIV(UFSHCI_CAP_REG_AUTOH8, ctrlr->cap) && 467 !(ctrlr->quirks & UFSHCI_QUIRK_BROKEN_AUTO_HIBERNATE); 468 469 if (!ctrlr->ufs_dev.auto_hibernation_supported) 470 return; 471 472 /* The default value for auto hibernation is 150 ms */ 473 ctrlr->ufs_dev.ahit = 0; 474 ctrlr->ufs_dev.ahit |= UFSHCIF(UFSHCI_AHIT_REG_AH8ITV, 150); 475 ctrlr->ufs_dev.ahit |= UFSHCIF(UFSHCI_AHIT_REG_TS, 3); 476 477 ufshci_dev_enable_auto_hibernate(ctrlr); 478 } 479 480 void 481 ufshci_dev_init_uic_link_state(struct ufshci_controller *ctrlr) 482 { 483 ctrlr->ufs_dev.link_state = UFSHCI_UIC_LINK_STATE_ACTIVE; 484 } 485 486 int 487 ufshci_dev_init_ufs_power_mode(struct ufshci_controller *ctrlr) 488 { 489 ctrlr->ufs_dev.power_mode_supported = false; 490 491 if (ctrlr->quirks & UFSHCI_QUIRK_SKIP_WELL_KNOWN_LUNS) 492 return (0); 493 494 ctrlr->ufs_device_wlun_periph = ufshci_sim_find_periph(ctrlr, 495 UFSHCI_WLUN_UFS_DEVICE); 496 if (ctrlr->ufs_device_wlun_periph == NULL) { 497 ufshci_printf(ctrlr, 498 "Well-known LUN `UFS Device (0x50)` not found\n"); 499 return (0); 500 } 501 502 ctrlr->ufs_dev.power_mode_supported = true; 503 ctrlr->ufs_dev.power_mode = UFSHCI_DEV_PWR_ACTIVE; 504 505 return (0); 506 } 507 508 int 509 ufshci_dev_get_descriptor(struct ufshci_controller *ctrlr) 510 { 511 struct ufshci_device *device = &ctrlr->ufs_dev; 512 /* 513 * The kDeviceDensityUnit is defined in the spec as 512. 514 * qTotalRawDeviceCapacity use big-endian byte ordering. 515 */ 516 const uint32_t device_density_unit = 512; 517 uint32_t ver; 518 int error; 519 520 error = ufshci_dev_read_device_descriptor(ctrlr, &device->dev_desc); 521 if (error) 522 return (error); 523 524 ver = be16toh(device->dev_desc.wSpecVersion); 525 ufshci_printf(ctrlr, "UFS device spec version %u.%u.%u\n", 526 UFSHCIV(UFSHCI_VER_REG_MJR, ver), UFSHCIV(UFSHCI_VER_REG_MNR, ver), 527 UFSHCIV(UFSHCI_VER_REG_VS, ver)); 528 ufshci_printf(ctrlr, "%u enabled LUNs found\n", 529 device->dev_desc.bNumberLU); 530 531 error = ufshci_dev_read_geometry_descriptor(ctrlr, &device->geo_desc); 532 if (error) 533 return (error); 534 535 if (device->geo_desc.bMaxNumberLU == 0) { 536 device->max_lun_count = 8; 537 } else if (device->geo_desc.bMaxNumberLU == 1) { 538 device->max_lun_count = 32; 539 } else { 540 ufshci_printf(ctrlr, 541 "Invalid Geometry Descriptor bMaxNumberLU value=%d\n", 542 device->geo_desc.bMaxNumberLU); 543 return (ENXIO); 544 } 545 ctrlr->max_lun_count = device->max_lun_count; 546 547 ufshci_printf(ctrlr, "UFS device total size is %lu bytes\n", 548 be64toh(device->geo_desc.qTotalRawDeviceCapacity) * 549 device_density_unit); 550 551 return (0); 552 } 553 554 static int 555 ufshci_dev_enable_write_booster(struct ufshci_controller *ctrlr) 556 { 557 struct ufshci_device *dev = &ctrlr->ufs_dev; 558 int error; 559 560 /* Enable WriteBooster */ 561 error = ufshci_dev_set_flag(ctrlr, UFSHCI_FLAG_F_WRITE_BOOSTER_EN); 562 if (error) { 563 ufshci_printf(ctrlr, "Failed to enable WriteBooster\n"); 564 return (error); 565 } 566 dev->is_wb_enabled = true; 567 568 /* Enable WriteBooster buffer flush during hibernate */ 569 error = ufshci_dev_set_flag(ctrlr, 570 UFSHCI_FLAG_F_WB_BUFFER_FLUSH_DURING_HIBERNATE); 571 if (error) { 572 ufshci_printf(ctrlr, 573 "Failed to enable WriteBooster buffer flush during hibernate\n"); 574 return (error); 575 } 576 577 /* Enable WriteBooster buffer flush */ 578 error = ufshci_dev_set_flag(ctrlr, UFSHCI_FLAG_F_WB_BUFFER_FLUSH_EN); 579 if (error) { 580 ufshci_printf(ctrlr, 581 "Failed to enable WriteBooster buffer flush\n"); 582 return (error); 583 } 584 dev->is_wb_flush_enabled = true; 585 586 return (0); 587 } 588 589 static int 590 ufshci_dev_disable_write_booster(struct ufshci_controller *ctrlr) 591 { 592 struct ufshci_device *dev = &ctrlr->ufs_dev; 593 int error; 594 595 /* Disable WriteBooster buffer flush */ 596 error = ufshci_dev_clear_flag(ctrlr, UFSHCI_FLAG_F_WB_BUFFER_FLUSH_EN); 597 if (error) { 598 ufshci_printf(ctrlr, 599 "Failed to disable WriteBooster buffer flush\n"); 600 return (error); 601 } 602 dev->is_wb_flush_enabled = false; 603 604 /* Disable WriteBooster buffer flush during hibernate */ 605 error = ufshci_dev_clear_flag(ctrlr, 606 UFSHCI_FLAG_F_WB_BUFFER_FLUSH_DURING_HIBERNATE); 607 if (error) { 608 ufshci_printf(ctrlr, 609 "Failed to disable WriteBooster buffer flush during hibernate\n"); 610 return (error); 611 } 612 613 /* Disable WriteBooster */ 614 error = ufshci_dev_clear_flag(ctrlr, UFSHCI_FLAG_F_WRITE_BOOSTER_EN); 615 if (error) { 616 ufshci_printf(ctrlr, "Failed to disable WriteBooster\n"); 617 return (error); 618 } 619 dev->is_wb_enabled = false; 620 621 return (0); 622 } 623 624 static int 625 ufshci_dev_is_write_booster_buffer_life_time_left( 626 struct ufshci_controller *ctrlr, bool *is_life_time_left) 627 { 628 struct ufshci_device *dev = &ctrlr->ufs_dev; 629 uint8_t buffer_lun; 630 uint64_t life_time; 631 uint32_t error; 632 633 if (dev->wb_buffer_type == UFSHCI_DESC_WB_BUF_TYPE_LU_DEDICATED) 634 buffer_lun = dev->wb_dedicated_lu; 635 else 636 buffer_lun = 0; 637 638 error = ufshci_dev_read_attribute(ctrlr, 639 UFSHCI_ATTR_B_WB_BUFFER_LIFE_TIME_EST, buffer_lun, 0, &life_time); 640 if (error) 641 return (error); 642 643 *is_life_time_left = (life_time != UFSHCI_ATTR_WB_LIFE_EXCEEDED); 644 645 return (0); 646 } 647 648 /* 649 * This function is not yet in use. It will be used when suspend/resume is 650 * implemented. 651 */ 652 static __unused int 653 ufshci_dev_need_write_booster_buffer_flush(struct ufshci_controller *ctrlr, 654 bool *need_flush) 655 { 656 struct ufshci_device *dev = &ctrlr->ufs_dev; 657 bool is_life_time_left = false; 658 uint64_t available_buffer_size, current_buffer_size; 659 uint8_t buffer_lun; 660 uint32_t error; 661 662 *need_flush = false; 663 664 if (!dev->is_wb_enabled) 665 return (0); 666 667 error = ufshci_dev_is_write_booster_buffer_life_time_left(ctrlr, 668 &is_life_time_left); 669 if (error) 670 return (error); 671 672 if (!is_life_time_left) 673 return (ufshci_dev_disable_write_booster(ctrlr)); 674 675 if (dev->wb_buffer_type == UFSHCI_DESC_WB_BUF_TYPE_LU_DEDICATED) 676 buffer_lun = dev->wb_dedicated_lu; 677 else 678 buffer_lun = 0; 679 680 error = ufshci_dev_read_attribute(ctrlr, 681 UFSHCI_ATTR_B_AVAILABLE_WB_BUFFER_SIZE, buffer_lun, 0, 682 &available_buffer_size); 683 if (error) 684 return (error); 685 686 switch (dev->wb_user_space_config_option) { 687 case UFSHCI_DESC_WB_BUF_USER_SPACE_REDUCTION: 688 *need_flush = (available_buffer_size <= 689 UFSHCI_ATTR_WB_AVAILABLE_10); 690 break; 691 case UFSHCI_DESC_WB_BUF_PRESERVE_USER_SPACE: 692 /* 693 * In PRESERVE USER SPACE mode, flush should be performed when 694 * the current buffer is greater than 0 and the available buffer 695 * below write_booster_flush_threshold is left. 696 */ 697 error = ufshci_dev_read_attribute(ctrlr, 698 UFSHCI_ATTR_D_CURRENT_WB_BUFFER_SIZE, buffer_lun, 0, 699 ¤t_buffer_size); 700 if (error) 701 return (error); 702 703 if (current_buffer_size == 0) 704 return (0); 705 706 *need_flush = (available_buffer_size < 707 dev->write_booster_flush_threshold); 708 break; 709 default: 710 ufshci_printf(ctrlr, 711 "Invalid bWriteBoosterBufferPreserveUserSpaceEn value"); 712 return (EINVAL); 713 } 714 715 /* 716 * TODO: Need to handle WRITEBOOSTER_FLUSH_NEEDED exception case from 717 * wExceptionEventStatus attribute. 718 */ 719 720 return (0); 721 } 722 723 int 724 ufshci_dev_config_write_booster(struct ufshci_controller *ctrlr) 725 { 726 struct ufshci_device *dev = &ctrlr->ufs_dev; 727 uint32_t extended_ufs_feature_support; 728 uint32_t alloc_units; 729 struct ufshci_unit_descriptor unit_desc; 730 uint8_t lun; 731 bool is_life_time_left; 732 uint32_t mega_byte = 1024 * 1024; 733 uint32_t error = 0; 734 735 extended_ufs_feature_support = be32toh( 736 dev->dev_desc.dExtendedUfsFeaturesSupport); 737 if (!(extended_ufs_feature_support & 738 UFSHCI_DESC_EXT_UFS_FEATURE_WRITE_BOOSTER)) { 739 /* This device does not support Write Booster */ 740 return (0); 741 } 742 743 if (ufshci_dev_enable_write_booster(ctrlr)) 744 return (0); 745 746 /* Get WriteBooster buffer parameters */ 747 dev->wb_buffer_type = dev->dev_desc.bWriteBoosterBufferType; 748 dev->wb_user_space_config_option = 749 dev->dev_desc.bWriteBoosterBufferPreserveUserSpaceEn; 750 751 /* 752 * Find the size of the write buffer. 753 * With LU-dedicated (00h), the WriteBooster buffer is assigned 754 * exclusively to one chosen LU (not one-per-LU), whereas Shared (01h) 755 * uses a single device-wide buffer shared by multiple LUs. 756 */ 757 if (dev->wb_buffer_type == UFSHCI_DESC_WB_BUF_TYPE_SINGLE_SHARED) { 758 alloc_units = be32toh( 759 dev->dev_desc.dNumSharedWriteBoosterBufferAllocUnits); 760 ufshci_printf(ctrlr, 761 "WriteBooster buffer type = Shared, alloc_units=%d\n", 762 alloc_units); 763 } else if (dev->wb_buffer_type == 764 UFSHCI_DESC_WB_BUF_TYPE_LU_DEDICATED) { 765 ufshci_printf(ctrlr, "WriteBooster buffer type = Dedicated\n"); 766 for (lun = 0; lun < ctrlr->max_lun_count; lun++) { 767 /* Find a dedicated buffer using a unit descriptor */ 768 if (ufshci_dev_read_unit_descriptor(ctrlr, lun, 769 &unit_desc)) 770 continue; 771 772 alloc_units = be32toh( 773 unit_desc.dLUNumWriteBoosterBufferAllocUnits); 774 if (alloc_units) { 775 dev->wb_dedicated_lu = lun; 776 break; 777 } 778 } 779 } else { 780 ufshci_printf(ctrlr, 781 "Not supported WriteBooster buffer type: 0x%x\n", 782 dev->wb_buffer_type); 783 goto out; 784 } 785 786 if (alloc_units == 0) { 787 ufshci_printf(ctrlr, "The WriteBooster buffer size is zero\n"); 788 goto out; 789 } 790 791 dev->wb_buffer_size_mb = alloc_units * 792 dev->geo_desc.bAllocationUnitSize * 793 (be32toh(dev->geo_desc.dSegmentSize)) / 794 (mega_byte / UFSHCI_SECTOR_SIZE); 795 796 /* Set to flush when 40% of the available buffer size remains */ 797 dev->write_booster_flush_threshold = UFSHCI_ATTR_WB_AVAILABLE_40; 798 799 /* 800 * Check if WriteBooster Buffer lifetime is left. 801 * WriteBooster Buffer lifetime — percent of life used based on P/E 802 * cycles. If "preserve user space" is enabled, writes to normal user 803 * space also consume WB life since the area is shared. 804 */ 805 error = ufshci_dev_is_write_booster_buffer_life_time_left(ctrlr, 806 &is_life_time_left); 807 if (error) 808 goto out; 809 810 if (!is_life_time_left) { 811 ufshci_printf(ctrlr, 812 "There is no WriteBooster buffer life time left.\n"); 813 goto out; 814 } 815 816 ufshci_printf(ctrlr, "WriteBooster Enabled\n"); 817 return (0); 818 out: 819 ufshci_dev_disable_write_booster(ctrlr); 820 return (error); 821 } 822 823 int 824 ufshci_dev_get_current_power_mode(struct ufshci_controller *ctrlr, 825 uint8_t *power_mode) 826 { 827 uint64_t value; 828 int err; 829 830 err = ufshci_dev_read_attribute(ctrlr, UFSHCI_ATTR_B_CURRENT_POWER_MODE, 831 /*index*/ 0, /*selector*/ 0, &value); 832 if (err) 833 return (err); 834 835 *power_mode = (uint8_t)value; 836 837 return (0); 838 } 839 840 static int 841 ufshci_dev_hibernate_enter(struct ufshci_controller *ctrlr) 842 { 843 int error; 844 845 error = ufshci_uic_send_dme_hibernate_enter(ctrlr); 846 if (error) 847 return (error); 848 849 return (ufshci_uic_hibernation_ready(ctrlr)); 850 } 851 852 static int 853 ufshci_dev_hibernate_exit(struct ufshci_controller *ctrlr) 854 { 855 int error; 856 857 error = ufshci_uic_send_dme_hibernate_exit(ctrlr); 858 if (error) 859 return (error); 860 861 return (ufshci_uic_hibernation_ready(ctrlr)); 862 } 863 864 int 865 ufshci_dev_link_state_transition(struct ufshci_controller *ctrlr, 866 enum ufshci_uic_link_state target_state) 867 { 868 struct ufshci_device *dev = &ctrlr->ufs_dev; 869 int error = 0; 870 871 if (dev->link_state == target_state) 872 return (0); 873 874 switch (target_state) { 875 case UFSHCI_UIC_LINK_STATE_OFF: 876 error = ufshci_dev_hibernate_enter(ctrlr); 877 if (error) 878 break; 879 error = ufshci_ctrlr_disable(ctrlr); 880 break; 881 case UFSHCI_UIC_LINK_STATE_ACTIVE: 882 if (dev->link_state == UFSHCI_UIC_LINK_STATE_HIBERNATE) 883 error = ufshci_dev_hibernate_exit(ctrlr); 884 else 885 error = EINVAL; 886 break; 887 case UFSHCI_UIC_LINK_STATE_HIBERNATE: 888 if (dev->link_state == UFSHCI_UIC_LINK_STATE_ACTIVE) 889 error = ufshci_dev_hibernate_enter(ctrlr); 890 else 891 error = EINVAL; 892 break; 893 case UFSHCI_UIC_LINK_STATE_BROKEN: 894 break; 895 default: 896 error = EINVAL; 897 break; 898 } 899 900 if (error) 901 return (error); 902 903 dev->link_state = target_state; 904 905 return (0); 906 } 907