1 /*- 2 * Copyright (c) 2025, Samsung Electronics Co., Ltd. 3 * Written by Jaeyoon Choi 4 * 5 * SPDX-License-Identifier: BSD-2-Clause 6 */ 7 8 #include <sys/param.h> 9 #include <sys/bus.h> 10 #include <sys/conf.h> 11 12 #include "ufshci_private.h" 13 #include "ufshci_reg.h" 14 15 static int 16 ufshci_dev_read_descriptor(struct ufshci_controller *ctrlr, 17 enum ufshci_descriptor_type desc_type, uint8_t index, uint8_t selector, 18 void *desc, size_t desc_size) 19 { 20 struct ufshci_completion_poll_status status; 21 struct ufshci_query_param param; 22 23 param.function = UFSHCI_QUERY_FUNC_STANDARD_READ_REQUEST; 24 param.opcode = UFSHCI_QUERY_OPCODE_READ_DESCRIPTOR; 25 param.type = desc_type; 26 param.index = index; 27 param.selector = selector; 28 param.value = 0; 29 param.desc_size = desc_size; 30 31 status.done = 0; 32 ufshci_ctrlr_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb, 33 &status, param); 34 ufshci_completion_poll(&status); 35 if (status.error) { 36 ufshci_printf(ctrlr, "ufshci_dev_read_descriptor failed!\n"); 37 return (ENXIO); 38 } 39 40 memcpy(desc, status.cpl.response_upiu.query_response_upiu.command_data, 41 desc_size); 42 43 return (0); 44 } 45 46 static int 47 ufshci_dev_read_device_descriptor(struct ufshci_controller *ctrlr, 48 struct ufshci_device_descriptor *desc) 49 { 50 return (ufshci_dev_read_descriptor(ctrlr, UFSHCI_DESC_TYPE_DEVICE, 0, 0, 51 desc, sizeof(struct ufshci_device_descriptor))); 52 } 53 54 static int 55 ufshci_dev_read_geometry_descriptor(struct ufshci_controller *ctrlr, 56 struct ufshci_geometry_descriptor *desc) 57 { 58 return (ufshci_dev_read_descriptor(ctrlr, UFSHCI_DESC_TYPE_GEOMETRY, 0, 59 0, desc, sizeof(struct ufshci_geometry_descriptor))); 60 } 61 62 static int 63 ufshci_dev_read_unit_descriptor(struct ufshci_controller *ctrlr, uint8_t lun, 64 struct ufshci_unit_descriptor *desc) 65 { 66 return (ufshci_dev_read_descriptor(ctrlr, UFSHCI_DESC_TYPE_UNIT, lun, 0, 67 desc, sizeof(struct ufshci_unit_descriptor))); 68 } 69 70 static int 71 ufshci_dev_read_flag(struct ufshci_controller *ctrlr, 72 enum ufshci_flags flag_type, uint8_t *flag) 73 { 74 struct ufshci_completion_poll_status status; 75 struct ufshci_query_param param; 76 77 param.function = UFSHCI_QUERY_FUNC_STANDARD_READ_REQUEST; 78 param.opcode = UFSHCI_QUERY_OPCODE_READ_FLAG; 79 param.type = flag_type; 80 param.index = 0; 81 param.selector = 0; 82 param.value = 0; 83 84 status.done = 0; 85 ufshci_ctrlr_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb, 86 &status, param); 87 ufshci_completion_poll(&status); 88 if (status.error) { 89 ufshci_printf(ctrlr, "ufshci_dev_read_flag failed!\n"); 90 return (ENXIO); 91 } 92 93 *flag = status.cpl.response_upiu.query_response_upiu.flag_value; 94 95 return (0); 96 } 97 98 static int 99 ufshci_dev_set_flag(struct ufshci_controller *ctrlr, 100 enum ufshci_flags flag_type) 101 { 102 struct ufshci_completion_poll_status status; 103 struct ufshci_query_param param; 104 105 param.function = UFSHCI_QUERY_FUNC_STANDARD_WRITE_REQUEST; 106 param.opcode = UFSHCI_QUERY_OPCODE_SET_FLAG; 107 param.type = flag_type; 108 param.index = 0; 109 param.selector = 0; 110 param.value = 0; 111 112 status.done = 0; 113 ufshci_ctrlr_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb, 114 &status, param); 115 ufshci_completion_poll(&status); 116 if (status.error) { 117 ufshci_printf(ctrlr, "ufshci_dev_set_flag failed!\n"); 118 return (ENXIO); 119 } 120 121 return (0); 122 } 123 124 static int 125 ufshci_dev_clear_flag(struct ufshci_controller *ctrlr, 126 enum ufshci_flags flag_type) 127 { 128 struct ufshci_completion_poll_status status; 129 struct ufshci_query_param param; 130 131 param.function = UFSHCI_QUERY_FUNC_STANDARD_WRITE_REQUEST; 132 param.opcode = UFSHCI_QUERY_OPCODE_CLEAR_FLAG; 133 param.type = flag_type; 134 param.index = 0; 135 param.selector = 0; 136 param.value = 0; 137 138 status.done = 0; 139 ufshci_ctrlr_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb, 140 &status, param); 141 ufshci_completion_poll(&status); 142 if (status.error) { 143 ufshci_printf(ctrlr, "ufshci_dev_clear_flag failed!\n"); 144 return (ENXIO); 145 } 146 147 return (0); 148 } 149 150 static int 151 ufshci_dev_read_attribute(struct ufshci_controller *ctrlr, 152 enum ufshci_attributes attr_type, uint8_t index, uint8_t selector, 153 uint64_t *value) 154 { 155 struct ufshci_completion_poll_status status; 156 struct ufshci_query_param param; 157 158 param.function = UFSHCI_QUERY_FUNC_STANDARD_READ_REQUEST; 159 param.opcode = UFSHCI_QUERY_OPCODE_READ_ATTRIBUTE; 160 param.type = attr_type; 161 param.index = index; 162 param.selector = selector; 163 param.value = 0; 164 165 status.done = 0; 166 ufshci_ctrlr_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb, 167 &status, param); 168 ufshci_completion_poll(&status); 169 if (status.error) { 170 ufshci_printf(ctrlr, "ufshci_dev_read_attribute failed!\n"); 171 return (ENXIO); 172 } 173 174 *value = status.cpl.response_upiu.query_response_upiu.value_64; 175 176 return (0); 177 } 178 179 static int 180 ufshci_dev_write_attribute(struct ufshci_controller *ctrlr, 181 enum ufshci_attributes attr_type, uint8_t index, uint8_t selector, 182 uint64_t value) 183 { 184 struct ufshci_completion_poll_status status; 185 struct ufshci_query_param param; 186 187 param.function = UFSHCI_QUERY_FUNC_STANDARD_WRITE_REQUEST; 188 param.opcode = UFSHCI_QUERY_OPCODE_WRITE_ATTRIBUTE; 189 param.type = attr_type; 190 param.index = index; 191 param.selector = selector; 192 param.value = value; 193 194 status.done = 0; 195 ufshci_ctrlr_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb, 196 &status, param); 197 ufshci_completion_poll(&status); 198 if (status.error) { 199 ufshci_printf(ctrlr, "ufshci_dev_write_attribute failed!\n"); 200 return (ENXIO); 201 } 202 203 return (0); 204 } 205 206 int 207 ufshci_dev_init(struct ufshci_controller *ctrlr) 208 { 209 int timeout = ticks + MSEC_2_TICKS(ctrlr->device_init_timeout_in_ms); 210 sbintime_t delta_t = SBT_1US; 211 uint8_t flag; 212 int error; 213 const uint8_t device_init_completed = 0; 214 215 error = ufshci_dev_set_flag(ctrlr, UFSHCI_FLAG_F_DEVICE_INIT); 216 if (error) 217 return (error); 218 219 /* Wait for the UFSHCI_FLAG_F_DEVICE_INIT flag to change */ 220 while (1) { 221 error = ufshci_dev_read_flag(ctrlr, UFSHCI_FLAG_F_DEVICE_INIT, 222 &flag); 223 if (error) 224 return (error); 225 if (flag == device_init_completed) 226 break; 227 if (timeout - ticks < 0) { 228 ufshci_printf(ctrlr, 229 "device init did not become %d " 230 "within %d ms\n", 231 device_init_completed, 232 ctrlr->device_init_timeout_in_ms); 233 return (ENXIO); 234 } 235 236 pause_sbt("ufshciinit", delta_t, 0, C_PREL(1)); 237 delta_t = min(SBT_1MS, delta_t * 3 / 2); 238 } 239 240 return (0); 241 } 242 243 int 244 ufshci_dev_reset(struct ufshci_controller *ctrlr) 245 { 246 if (ufshci_uic_send_dme_endpoint_reset(ctrlr)) 247 return (ENXIO); 248 249 return (ufshci_dev_init(ctrlr)); 250 } 251 252 int 253 ufshci_dev_init_reference_clock(struct ufshci_controller *ctrlr) 254 { 255 int error; 256 uint8_t index, selector; 257 258 index = 0; /* bRefClkFreq is device type attribute */ 259 selector = 0; /* bRefClkFreq is device type attribute */ 260 261 error = ufshci_dev_write_attribute(ctrlr, UFSHCI_ATTR_B_REF_CLK_FREQ, 262 index, selector, ctrlr->ref_clk); 263 if (error) 264 return (error); 265 266 return (0); 267 } 268 269 int 270 ufshci_dev_init_unipro(struct ufshci_controller *ctrlr) 271 { 272 uint32_t pa_granularity, peer_pa_granularity; 273 uint32_t t_activate, pear_t_activate; 274 275 /* 276 * Unipro Version: 277 * - 7~15 = Above 2.0, 6 = 2.0, 5 = 1.8, 4 = 1.61, 3 = 1.6, 2 = 1.41, 278 * 1 = 1.40, 0 = Reserved 279 */ 280 if (ufshci_uic_send_dme_get(ctrlr, PA_LocalVerInfo, 281 &ctrlr->unipro_version)) 282 return (ENXIO); 283 if (ufshci_uic_send_dme_get(ctrlr, PA_RemoteVerInfo, 284 &ctrlr->ufs_dev.unipro_version)) 285 return (ENXIO); 286 287 /* 288 * PA_Granularity: Granularity for PA_TActivate and PA_Hibern8Time 289 * - 1=1us, 2=4us, 3=8us, 4=16us, 5=32us, 6=100us 290 */ 291 if (ufshci_uic_send_dme_get(ctrlr, PA_Granularity, &pa_granularity)) 292 return (ENXIO); 293 if (ufshci_uic_send_dme_peer_get(ctrlr, PA_Granularity, 294 &peer_pa_granularity)) 295 return (ENXIO); 296 297 /* 298 * PA_TActivate: Time to wait before activating a burst in order to 299 * wake-up peer M-RX 300 * UniPro automatically sets timing information such as PA_TActivate 301 * through the PACP_CAP_EXT1_ind command during Link Startup operation. 302 */ 303 if (ufshci_uic_send_dme_get(ctrlr, PA_TActivate, &t_activate)) 304 return (ENXIO); 305 if (ufshci_uic_send_dme_peer_get(ctrlr, PA_TActivate, &pear_t_activate)) 306 return (ENXIO); 307 308 if (ctrlr->quirks & UFSHCI_QUIRK_LONG_PEER_PA_TACTIVATE) { 309 /* 310 * Intel Lake-field UFSHCI has a quirk. We need to add 200us to 311 * the PEER's PA_TActivate. 312 */ 313 if (pa_granularity == peer_pa_granularity) { 314 pear_t_activate = t_activate + 2; 315 if (ufshci_uic_send_dme_peer_set(ctrlr, PA_TActivate, 316 pear_t_activate)) 317 return (ENXIO); 318 } 319 } 320 321 return (0); 322 } 323 324 int 325 ufshci_dev_init_uic_power_mode(struct ufshci_controller *ctrlr) 326 { 327 /* HSSerise: A = 1, B = 2 */ 328 const uint32_t hs_series = 2; 329 /* 330 * TX/RX PWRMode: 331 * - TX[3:0], RX[7:4] 332 * - Fast Mode = 1, Slow Mode = 2, FastAuto Mode = 4, SlowAuto Mode = 5 333 */ 334 const uint32_t fast_mode = 1; 335 const uint32_t rx_bit_shift = 4; 336 uint32_t power_mode, peer_granularity; 337 338 /* Update lanes with available TX/RX lanes */ 339 if (ufshci_uic_send_dme_get(ctrlr, PA_AvailTxDataLanes, 340 &ctrlr->max_tx_lanes)) 341 return (ENXIO); 342 if (ufshci_uic_send_dme_get(ctrlr, PA_AvailRxDataLanes, 343 &ctrlr->max_rx_lanes)) 344 return (ENXIO); 345 346 /* Get max HS-GEAR value */ 347 if (ufshci_uic_send_dme_get(ctrlr, PA_MaxRxHSGear, 348 &ctrlr->max_rx_hs_gear)) 349 return (ENXIO); 350 351 /* Set the data lane to max */ 352 ctrlr->tx_lanes = ctrlr->max_tx_lanes; 353 ctrlr->rx_lanes = ctrlr->max_rx_lanes; 354 if (ufshci_uic_send_dme_set(ctrlr, PA_ActiveTxDataLanes, 355 ctrlr->tx_lanes)) 356 return (ENXIO); 357 if (ufshci_uic_send_dme_set(ctrlr, PA_ActiveRxDataLanes, 358 ctrlr->rx_lanes)) 359 return (ENXIO); 360 361 if (ctrlr->quirks & UFSHCI_QUIRK_CHANGE_LANE_AND_GEAR_SEPARATELY) { 362 /* Before changing gears, first change the number of lanes. */ 363 if (ufshci_uic_send_dme_get(ctrlr, PA_PWRMode, &power_mode)) 364 return (ENXIO); 365 if (ufshci_uic_send_dme_set(ctrlr, PA_PWRMode, power_mode)) 366 return (ENXIO); 367 368 /* Wait for power mode changed. */ 369 if (ufshci_uic_power_mode_ready(ctrlr)) { 370 ufshci_reg_dump(ctrlr); 371 return (ENXIO); 372 } 373 } 374 375 /* Set HS-GEAR to max gear */ 376 ctrlr->hs_gear = ctrlr->max_rx_hs_gear; 377 if (ufshci_uic_send_dme_set(ctrlr, PA_TxGear, ctrlr->hs_gear)) 378 return (ENXIO); 379 if (ufshci_uic_send_dme_set(ctrlr, PA_RxGear, ctrlr->hs_gear)) 380 return (ENXIO); 381 382 /* 383 * Set termination 384 * - HS-MODE = ON / LS-MODE = OFF 385 */ 386 if (ufshci_uic_send_dme_set(ctrlr, PA_TxTermination, true)) 387 return (ENXIO); 388 if (ufshci_uic_send_dme_set(ctrlr, PA_RxTermination, true)) 389 return (ENXIO); 390 391 /* Set HSSerise (A = 1, B = 2) */ 392 if (ufshci_uic_send_dme_set(ctrlr, PA_HSSeries, hs_series)) 393 return (ENXIO); 394 395 /* Set Timeout values */ 396 if (ufshci_uic_send_dme_set(ctrlr, PA_PWRModeUserData0, 397 DL_FC0ProtectionTimeOutVal_Default)) 398 return (ENXIO); 399 if (ufshci_uic_send_dme_set(ctrlr, PA_PWRModeUserData1, 400 DL_TC0ReplayTimeOutVal_Default)) 401 return (ENXIO); 402 if (ufshci_uic_send_dme_set(ctrlr, PA_PWRModeUserData2, 403 DL_AFC0ReqTimeOutVal_Default)) 404 return (ENXIO); 405 if (ufshci_uic_send_dme_set(ctrlr, PA_PWRModeUserData3, 406 DL_FC0ProtectionTimeOutVal_Default)) 407 return (ENXIO); 408 if (ufshci_uic_send_dme_set(ctrlr, PA_PWRModeUserData4, 409 DL_TC0ReplayTimeOutVal_Default)) 410 return (ENXIO); 411 if (ufshci_uic_send_dme_set(ctrlr, PA_PWRModeUserData5, 412 DL_AFC0ReqTimeOutVal_Default)) 413 return (ENXIO); 414 415 if (ufshci_uic_send_dme_set(ctrlr, DME_LocalFC0ProtectionTimeOutVal, 416 DL_FC0ProtectionTimeOutVal_Default)) 417 return (ENXIO); 418 if (ufshci_uic_send_dme_set(ctrlr, DME_LocalTC0ReplayTimeOutVal, 419 DL_TC0ReplayTimeOutVal_Default)) 420 return (ENXIO); 421 if (ufshci_uic_send_dme_set(ctrlr, DME_LocalAFC0ReqTimeOutVal, 422 DL_AFC0ReqTimeOutVal_Default)) 423 return (ENXIO); 424 425 /* Set TX/RX PWRMode */ 426 power_mode = (fast_mode << rx_bit_shift) | fast_mode; 427 if (ufshci_uic_send_dme_set(ctrlr, PA_PWRMode, power_mode)) 428 return (ENXIO); 429 430 /* Wait for power mode changed. */ 431 if (ufshci_uic_power_mode_ready(ctrlr)) { 432 ufshci_reg_dump(ctrlr); 433 return (ENXIO); 434 } 435 436 /* Clear 'Power Mode completion status' */ 437 ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_UPMS)); 438 439 if (ctrlr->quirks & UFSHCI_QUIRK_WAIT_AFTER_POWER_MODE_CHANGE) { 440 /* 441 * Intel Lake-field UFSHCI has a quirk. 442 * We need to wait 1250us and clear dme error. 443 */ 444 pause_sbt("ufshci", ustosbt(1250), 0, C_PREL(1)); 445 446 /* Test with dme_peer_get to make sure there are no errors. */ 447 if (ufshci_uic_send_dme_peer_get(ctrlr, PA_Granularity, 448 &peer_granularity)) 449 return (ENXIO); 450 } 451 452 return (0); 453 } 454 455 int 456 ufshci_dev_init_ufs_power_mode(struct ufshci_controller *ctrlr) 457 { 458 /* TODO: Need to implement */ 459 460 return (0); 461 } 462 463 int 464 ufshci_dev_get_descriptor(struct ufshci_controller *ctrlr) 465 { 466 struct ufshci_device *device = &ctrlr->ufs_dev; 467 /* 468 * The kDeviceDensityUnit is defined in the spec as 512. 469 * qTotalRawDeviceCapacity use big-endian byte ordering. 470 */ 471 const uint32_t device_density_unit = 512; 472 uint32_t ver; 473 int error; 474 475 error = ufshci_dev_read_device_descriptor(ctrlr, &device->dev_desc); 476 if (error) 477 return (error); 478 479 ver = be16toh(device->dev_desc.wSpecVersion); 480 ufshci_printf(ctrlr, "UFS device spec version %u.%u.%u\n", 481 UFSHCIV(UFSHCI_VER_REG_MJR, ver), UFSHCIV(UFSHCI_VER_REG_MNR, ver), 482 UFSHCIV(UFSHCI_VER_REG_VS, ver)); 483 ufshci_printf(ctrlr, "%u enabled LUNs found\n", 484 device->dev_desc.bNumberLU); 485 486 error = ufshci_dev_read_geometry_descriptor(ctrlr, &device->geo_desc); 487 if (error) 488 return (error); 489 490 if (device->geo_desc.bMaxNumberLU == 0) { 491 device->max_lun_count = 8; 492 } else if (device->geo_desc.bMaxNumberLU == 1) { 493 device->max_lun_count = 32; 494 } else { 495 ufshci_printf(ctrlr, 496 "Invalid Geometry Descriptor bMaxNumberLU value=%d\n", 497 device->geo_desc.bMaxNumberLU); 498 return (ENXIO); 499 } 500 ctrlr->max_lun_count = device->max_lun_count; 501 502 ufshci_printf(ctrlr, "UFS device total size is %lu bytes\n", 503 be64toh(device->geo_desc.qTotalRawDeviceCapacity) * 504 device_density_unit); 505 506 return (0); 507 } 508 509 static int 510 ufshci_dev_enable_write_booster(struct ufshci_controller *ctrlr) 511 { 512 struct ufshci_device *dev = &ctrlr->ufs_dev; 513 int error; 514 515 /* Enable WriteBooster */ 516 error = ufshci_dev_set_flag(ctrlr, UFSHCI_FLAG_F_WRITE_BOOSTER_EN); 517 if (error) { 518 ufshci_printf(ctrlr, "Failed to enable WriteBooster\n"); 519 return (error); 520 } 521 dev->is_wb_enabled = true; 522 523 /* Enable WriteBooster buffer flush during hibernate */ 524 error = ufshci_dev_set_flag(ctrlr, 525 UFSHCI_FLAG_F_WB_BUFFER_FLUSH_DURING_HIBERNATE); 526 if (error) { 527 ufshci_printf(ctrlr, 528 "Failed to enable WriteBooster buffer flush during hibernate\n"); 529 return (error); 530 } 531 532 /* Enable WriteBooster buffer flush */ 533 error = ufshci_dev_set_flag(ctrlr, UFSHCI_FLAG_F_WB_BUFFER_FLUSH_EN); 534 if (error) { 535 ufshci_printf(ctrlr, 536 "Failed to enable WriteBooster buffer flush\n"); 537 return (error); 538 } 539 dev->is_wb_flush_enabled = true; 540 541 return (0); 542 } 543 544 static int 545 ufshci_dev_disable_write_booster(struct ufshci_controller *ctrlr) 546 { 547 struct ufshci_device *dev = &ctrlr->ufs_dev; 548 int error; 549 550 /* Disable WriteBooster buffer flush */ 551 error = ufshci_dev_clear_flag(ctrlr, UFSHCI_FLAG_F_WB_BUFFER_FLUSH_EN); 552 if (error) { 553 ufshci_printf(ctrlr, 554 "Failed to disable WriteBooster buffer flush\n"); 555 return (error); 556 } 557 dev->is_wb_flush_enabled = false; 558 559 /* Disable WriteBooster buffer flush during hibernate */ 560 error = ufshci_dev_clear_flag(ctrlr, 561 UFSHCI_FLAG_F_WB_BUFFER_FLUSH_DURING_HIBERNATE); 562 if (error) { 563 ufshci_printf(ctrlr, 564 "Failed to disable WriteBooster buffer flush during hibernate\n"); 565 return (error); 566 } 567 568 /* Disable WriteBooster */ 569 error = ufshci_dev_clear_flag(ctrlr, UFSHCI_FLAG_F_WRITE_BOOSTER_EN); 570 if (error) { 571 ufshci_printf(ctrlr, "Failed to disable WriteBooster\n"); 572 return (error); 573 } 574 dev->is_wb_enabled = false; 575 576 return (0); 577 } 578 579 static int 580 ufshci_dev_is_write_booster_buffer_life_time_left( 581 struct ufshci_controller *ctrlr, bool *is_life_time_left) 582 { 583 struct ufshci_device *dev = &ctrlr->ufs_dev; 584 uint8_t buffer_lun; 585 uint64_t life_time; 586 uint32_t error; 587 588 if (dev->wb_buffer_type == UFSHCI_DESC_WB_BUF_TYPE_LU_DEDICATED) 589 buffer_lun = dev->wb_dedicated_lu; 590 else 591 buffer_lun = 0; 592 593 error = ufshci_dev_read_attribute(ctrlr, 594 UFSHCI_ATTR_B_WB_BUFFER_LIFE_TIME_EST, buffer_lun, 0, &life_time); 595 if (error) 596 return (error); 597 598 *is_life_time_left = (life_time != UFSHCI_ATTR_WB_LIFE_EXCEEDED); 599 600 return (0); 601 } 602 603 /* 604 * This function is not yet in use. It will be used when suspend/resume is 605 * implemented. 606 */ 607 static __unused int 608 ufshci_dev_need_write_booster_buffer_flush(struct ufshci_controller *ctrlr, 609 bool *need_flush) 610 { 611 struct ufshci_device *dev = &ctrlr->ufs_dev; 612 bool is_life_time_left = false; 613 uint64_t available_buffer_size, current_buffer_size; 614 uint8_t buffer_lun; 615 uint32_t error; 616 617 *need_flush = false; 618 619 if (!dev->is_wb_enabled) 620 return (0); 621 622 error = ufshci_dev_is_write_booster_buffer_life_time_left(ctrlr, 623 &is_life_time_left); 624 if (error) 625 return (error); 626 627 if (!is_life_time_left) 628 return (ufshci_dev_disable_write_booster(ctrlr)); 629 630 if (dev->wb_buffer_type == UFSHCI_DESC_WB_BUF_TYPE_LU_DEDICATED) 631 buffer_lun = dev->wb_dedicated_lu; 632 else 633 buffer_lun = 0; 634 635 error = ufshci_dev_read_attribute(ctrlr, 636 UFSHCI_ATTR_B_AVAILABLE_WB_BUFFER_SIZE, buffer_lun, 0, 637 &available_buffer_size); 638 if (error) 639 return (error); 640 641 switch (dev->wb_user_space_config_option) { 642 case UFSHCI_DESC_WB_BUF_USER_SPACE_REDUCTION: 643 *need_flush = (available_buffer_size <= 644 UFSHCI_ATTR_WB_AVAILABLE_10); 645 break; 646 case UFSHCI_DESC_WB_BUF_PRESERVE_USER_SPACE: 647 /* 648 * In PRESERVE USER SPACE mode, flush should be performed when 649 * the current buffer is greater than 0 and the available buffer 650 * below write_booster_flush_threshold is left. 651 */ 652 error = ufshci_dev_read_attribute(ctrlr, 653 UFSHCI_ATTR_D_CURRENT_WB_BUFFER_SIZE, buffer_lun, 0, 654 ¤t_buffer_size); 655 if (error) 656 return (error); 657 658 if (current_buffer_size == 0) 659 return (0); 660 661 *need_flush = (available_buffer_size < 662 dev->write_booster_flush_threshold); 663 break; 664 default: 665 ufshci_printf(ctrlr, 666 "Invalid bWriteBoosterBufferPreserveUserSpaceEn value"); 667 return (EINVAL); 668 } 669 670 /* 671 * TODO: Need to handle WRITEBOOSTER_FLUSH_NEEDED exception case from 672 * wExceptionEventStatus attribute. 673 */ 674 675 return (0); 676 } 677 678 int 679 ufshci_dev_config_write_booster(struct ufshci_controller *ctrlr) 680 { 681 struct ufshci_device *dev = &ctrlr->ufs_dev; 682 uint32_t extended_ufs_feature_support; 683 uint32_t alloc_units; 684 struct ufshci_unit_descriptor unit_desc; 685 uint8_t lun; 686 bool is_life_time_left; 687 uint32_t mega_byte = 1024 * 1024; 688 uint32_t error = 0; 689 690 extended_ufs_feature_support = be32toh( 691 dev->dev_desc.dExtendedUfsFeaturesSupport); 692 if (!(extended_ufs_feature_support & 693 UFSHCI_DESC_EXT_UFS_FEATURE_WRITE_BOOSTER)) { 694 /* This device does not support Write Booster */ 695 return (0); 696 } 697 698 if (ufshci_dev_enable_write_booster(ctrlr)) 699 return (0); 700 701 /* Get WriteBooster buffer parameters */ 702 dev->wb_buffer_type = dev->dev_desc.bWriteBoosterBufferType; 703 dev->wb_user_space_config_option = 704 dev->dev_desc.bWriteBoosterBufferPreserveUserSpaceEn; 705 706 /* 707 * Find the size of the write buffer. 708 * With LU-dedicated (00h), the WriteBooster buffer is assigned 709 * exclusively to one chosen LU (not one-per-LU), whereas Shared (01h) 710 * uses a single device-wide buffer shared by multiple LUs. 711 */ 712 if (dev->wb_buffer_type == UFSHCI_DESC_WB_BUF_TYPE_SINGLE_SHARED) { 713 alloc_units = be32toh( 714 dev->dev_desc.dNumSharedWriteBoosterBufferAllocUnits); 715 ufshci_printf(ctrlr, 716 "WriteBooster buffer type = Shared, alloc_units=%d\n", 717 alloc_units); 718 } else if (dev->wb_buffer_type == 719 UFSHCI_DESC_WB_BUF_TYPE_LU_DEDICATED) { 720 ufshci_printf(ctrlr, "WriteBooster buffer type = Dedicated\n"); 721 for (lun = 0; lun < ctrlr->max_lun_count; lun++) { 722 /* Find a dedicated buffer using a unit descriptor */ 723 if (ufshci_dev_read_unit_descriptor(ctrlr, lun, 724 &unit_desc)) 725 continue; 726 727 alloc_units = be32toh( 728 unit_desc.dLUNumWriteBoosterBufferAllocUnits); 729 if (alloc_units) { 730 dev->wb_dedicated_lu = lun; 731 break; 732 } 733 } 734 } else { 735 ufshci_printf(ctrlr, 736 "Not supported WriteBooster buffer type: 0x%x\n", 737 dev->wb_buffer_type); 738 goto out; 739 } 740 741 if (alloc_units == 0) { 742 ufshci_printf(ctrlr, "The WriteBooster buffer size is zero\n"); 743 goto out; 744 } 745 746 dev->wb_buffer_size_mb = alloc_units * 747 dev->geo_desc.bAllocationUnitSize * 748 (be32toh(dev->geo_desc.dSegmentSize)) / 749 (mega_byte / UFSHCI_SECTOR_SIZE); 750 751 /* Set to flush when 40% of the available buffer size remains */ 752 dev->write_booster_flush_threshold = UFSHCI_ATTR_WB_AVAILABLE_40; 753 754 /* 755 * Check if WriteBooster Buffer lifetime is left. 756 * WriteBooster Buffer lifetime — percent of life used based on P/E 757 * cycles. If "preserve user space" is enabled, writes to normal user 758 * space also consume WB life since the area is shared. 759 */ 760 error = ufshci_dev_is_write_booster_buffer_life_time_left(ctrlr, 761 &is_life_time_left); 762 if (error) 763 goto out; 764 765 if (!is_life_time_left) { 766 ufshci_printf(ctrlr, 767 "There is no WriteBooster buffer life time left.\n"); 768 goto out; 769 } 770 771 ufshci_printf(ctrlr, "WriteBooster Enabled\n"); 772 return (0); 773 out: 774 ufshci_dev_disable_write_booster(ctrlr); 775 return (error); 776 } 777