1 // SPDX-License-Identifier: GPL-2.0 2 // Copyright (C) 2018 Western Digital Corporation 3 4 #include <linux/err.h> 5 #include <linux/string.h> 6 #include <linux/bitfield.h> 7 #include <linux/unaligned.h> 8 #include <linux/string_choices.h> 9 10 #include <ufs/ufs.h> 11 #include <ufs/unipro.h> 12 #include "ufs-sysfs.h" 13 #include "ufshcd-priv.h" 14 15 static const char *ufs_pa_pwr_mode_to_string(enum ufs_pa_pwr_mode mode) 16 { 17 switch (mode) { 18 case FAST_MODE: return "FAST_MODE"; 19 case SLOW_MODE: return "SLOW_MODE"; 20 case FASTAUTO_MODE: return "FASTAUTO_MODE"; 21 case SLOWAUTO_MODE: return "SLOWAUTO_MODE"; 22 default: return "UNKNOWN"; 23 } 24 } 25 26 static const char *ufs_hs_gear_rate_to_string(enum ufs_hs_gear_rate rate) 27 { 28 switch (rate) { 29 case PA_HS_MODE_A: return "HS_RATE_A"; 30 case PA_HS_MODE_B: return "HS_RATE_B"; 31 default: return "UNKNOWN"; 32 } 33 } 34 35 static const char *ufs_pwm_gear_to_string(enum ufs_pwm_gear_tag gear) 36 { 37 switch (gear) { 38 case UFS_PWM_G1: return "PWM_GEAR1"; 39 case UFS_PWM_G2: return "PWM_GEAR2"; 40 case UFS_PWM_G3: return "PWM_GEAR3"; 41 case UFS_PWM_G4: return "PWM_GEAR4"; 42 case UFS_PWM_G5: return "PWM_GEAR5"; 43 case UFS_PWM_G6: return "PWM_GEAR6"; 44 case UFS_PWM_G7: return "PWM_GEAR7"; 45 default: return "UNKNOWN"; 46 } 47 } 48 49 static const char *ufs_hs_gear_to_string(enum ufs_hs_gear_tag gear) 50 { 51 switch (gear) { 52 case UFS_HS_G1: return "HS_GEAR1"; 53 case UFS_HS_G2: return "HS_GEAR2"; 54 case UFS_HS_G3: return "HS_GEAR3"; 55 case UFS_HS_G4: return "HS_GEAR4"; 56 case UFS_HS_G5: return "HS_GEAR5"; 57 default: return "UNKNOWN"; 58 } 59 } 60 61 static const char *ufs_wb_resize_hint_to_string(enum wb_resize_hint hint) 62 { 63 switch (hint) { 64 case WB_RESIZE_HINT_KEEP: 65 return "keep"; 66 case WB_RESIZE_HINT_DECREASE: 67 return "decrease"; 68 case WB_RESIZE_HINT_INCREASE: 69 return "increase"; 70 default: 71 return "unknown"; 72 } 73 } 74 75 static const char *ufs_wb_resize_status_to_string(enum wb_resize_status status) 76 { 77 switch (status) { 78 case WB_RESIZE_STATUS_IDLE: 79 return "idle"; 80 case WB_RESIZE_STATUS_IN_PROGRESS: 81 return "in_progress"; 82 case WB_RESIZE_STATUS_COMPLETE_SUCCESS: 83 return "complete_success"; 84 case WB_RESIZE_STATUS_GENERAL_FAILURE: 85 return "general_failure"; 86 default: 87 return "unknown"; 88 } 89 } 90 91 static const char * const ufs_hid_states[] = { 92 [HID_IDLE] = "idle", 93 [ANALYSIS_IN_PROGRESS] = "analysis_in_progress", 94 [DEFRAG_REQUIRED] = "defrag_required", 95 [DEFRAG_IN_PROGRESS] = "defrag_in_progress", 96 [DEFRAG_COMPLETED] = "defrag_completed", 97 [DEFRAG_NOT_REQUIRED] = "defrag_not_required", 98 }; 99 100 static const char *ufs_hid_state_to_string(enum ufs_hid_state state) 101 { 102 if (state < NUM_UFS_HID_STATES) 103 return ufs_hid_states[state]; 104 105 return "unknown"; 106 } 107 108 static const char *ufshcd_uic_link_state_to_string( 109 enum uic_link_state state) 110 { 111 switch (state) { 112 case UIC_LINK_OFF_STATE: return "OFF"; 113 case UIC_LINK_ACTIVE_STATE: return "ACTIVE"; 114 case UIC_LINK_HIBERN8_STATE: return "HIBERN8"; 115 case UIC_LINK_BROKEN_STATE: return "BROKEN"; 116 default: return "UNKNOWN"; 117 } 118 } 119 120 static const char *ufshcd_ufs_dev_pwr_mode_to_string( 121 enum ufs_dev_pwr_mode state) 122 { 123 switch (state) { 124 case UFS_ACTIVE_PWR_MODE: return "ACTIVE"; 125 case UFS_SLEEP_PWR_MODE: return "SLEEP"; 126 case UFS_POWERDOWN_PWR_MODE: return "POWERDOWN"; 127 case UFS_DEEPSLEEP_PWR_MODE: return "DEEPSLEEP"; 128 default: return "UNKNOWN"; 129 } 130 } 131 132 static inline ssize_t ufs_sysfs_pm_lvl_store(struct device *dev, 133 struct device_attribute *attr, 134 const char *buf, size_t count, 135 bool rpm) 136 { 137 struct ufs_hba *hba = dev_get_drvdata(dev); 138 struct ufs_dev_info *dev_info = &hba->dev_info; 139 unsigned long flags, value; 140 141 if (kstrtoul(buf, 0, &value)) 142 return -EINVAL; 143 144 if (value >= UFS_PM_LVL_MAX || value < hba->pm_lvl_min) 145 return -EINVAL; 146 147 if (ufs_pm_lvl_states[value].dev_state == UFS_DEEPSLEEP_PWR_MODE && 148 (!(hba->caps & UFSHCD_CAP_DEEPSLEEP) || 149 !(dev_info->wspecversion >= 0x310))) 150 return -EINVAL; 151 152 spin_lock_irqsave(hba->host->host_lock, flags); 153 if (rpm) 154 hba->rpm_lvl = value; 155 else 156 hba->spm_lvl = value; 157 spin_unlock_irqrestore(hba->host->host_lock, flags); 158 return count; 159 } 160 161 static ssize_t rpm_lvl_show(struct device *dev, 162 struct device_attribute *attr, char *buf) 163 { 164 struct ufs_hba *hba = dev_get_drvdata(dev); 165 166 return sysfs_emit(buf, "%d\n", hba->rpm_lvl); 167 } 168 169 static ssize_t rpm_lvl_store(struct device *dev, 170 struct device_attribute *attr, const char *buf, size_t count) 171 { 172 return ufs_sysfs_pm_lvl_store(dev, attr, buf, count, true); 173 } 174 175 static ssize_t rpm_target_dev_state_show(struct device *dev, 176 struct device_attribute *attr, char *buf) 177 { 178 struct ufs_hba *hba = dev_get_drvdata(dev); 179 180 return sysfs_emit(buf, "%s\n", ufshcd_ufs_dev_pwr_mode_to_string( 181 ufs_pm_lvl_states[hba->rpm_lvl].dev_state)); 182 } 183 184 static ssize_t rpm_target_link_state_show(struct device *dev, 185 struct device_attribute *attr, char *buf) 186 { 187 struct ufs_hba *hba = dev_get_drvdata(dev); 188 189 return sysfs_emit(buf, "%s\n", ufshcd_uic_link_state_to_string( 190 ufs_pm_lvl_states[hba->rpm_lvl].link_state)); 191 } 192 193 static ssize_t spm_lvl_show(struct device *dev, 194 struct device_attribute *attr, char *buf) 195 { 196 struct ufs_hba *hba = dev_get_drvdata(dev); 197 198 return sysfs_emit(buf, "%d\n", hba->spm_lvl); 199 } 200 201 static ssize_t spm_lvl_store(struct device *dev, 202 struct device_attribute *attr, const char *buf, size_t count) 203 { 204 return ufs_sysfs_pm_lvl_store(dev, attr, buf, count, false); 205 } 206 207 static ssize_t spm_target_dev_state_show(struct device *dev, 208 struct device_attribute *attr, char *buf) 209 { 210 struct ufs_hba *hba = dev_get_drvdata(dev); 211 212 return sysfs_emit(buf, "%s\n", ufshcd_ufs_dev_pwr_mode_to_string( 213 ufs_pm_lvl_states[hba->spm_lvl].dev_state)); 214 } 215 216 static ssize_t spm_target_link_state_show(struct device *dev, 217 struct device_attribute *attr, char *buf) 218 { 219 struct ufs_hba *hba = dev_get_drvdata(dev); 220 221 return sysfs_emit(buf, "%s\n", ufshcd_uic_link_state_to_string( 222 ufs_pm_lvl_states[hba->spm_lvl].link_state)); 223 } 224 225 /* Convert Auto-Hibernate Idle Timer register value to microseconds */ 226 static int ufshcd_ahit_to_us(u32 ahit) 227 { 228 int timer = FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK, ahit); 229 int scale = FIELD_GET(UFSHCI_AHIBERN8_SCALE_MASK, ahit); 230 231 for (; scale > 0; --scale) 232 timer *= UFSHCI_AHIBERN8_SCALE_FACTOR; 233 234 return timer; 235 } 236 237 /* Convert microseconds to Auto-Hibernate Idle Timer register value */ 238 u32 ufshcd_us_to_ahit(unsigned int timer) 239 { 240 unsigned int scale; 241 242 for (scale = 0; timer > UFSHCI_AHIBERN8_TIMER_MASK; ++scale) 243 timer /= UFSHCI_AHIBERN8_SCALE_FACTOR; 244 245 return FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, timer) | 246 FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, scale); 247 } 248 EXPORT_SYMBOL_GPL(ufshcd_us_to_ahit); 249 250 static int ufshcd_read_hci_reg(struct ufs_hba *hba, u32 *val, unsigned int reg) 251 { 252 down(&hba->host_sem); 253 if (!ufshcd_is_user_access_allowed(hba)) { 254 up(&hba->host_sem); 255 return -EBUSY; 256 } 257 258 ufshcd_rpm_get_sync(hba); 259 ufshcd_hold(hba); 260 *val = ufshcd_readl(hba, reg); 261 ufshcd_release(hba); 262 ufshcd_rpm_put_sync(hba); 263 264 up(&hba->host_sem); 265 return 0; 266 } 267 268 static ssize_t auto_hibern8_show(struct device *dev, 269 struct device_attribute *attr, char *buf) 270 { 271 u32 ahit; 272 int ret; 273 struct ufs_hba *hba = dev_get_drvdata(dev); 274 275 if (!ufshcd_is_auto_hibern8_supported(hba)) 276 return -EOPNOTSUPP; 277 278 ret = ufshcd_read_hci_reg(hba, &ahit, REG_AUTO_HIBERNATE_IDLE_TIMER); 279 if (ret) 280 return ret; 281 282 return sysfs_emit(buf, "%d\n", ufshcd_ahit_to_us(ahit)); 283 } 284 285 static ssize_t auto_hibern8_store(struct device *dev, 286 struct device_attribute *attr, 287 const char *buf, size_t count) 288 { 289 struct ufs_hba *hba = dev_get_drvdata(dev); 290 unsigned int timer; 291 int ret = 0; 292 293 if (!ufshcd_is_auto_hibern8_supported(hba)) 294 return -EOPNOTSUPP; 295 296 if (kstrtouint(buf, 0, &timer)) 297 return -EINVAL; 298 299 if (timer > UFSHCI_AHIBERN8_MAX) 300 return -EINVAL; 301 302 down(&hba->host_sem); 303 if (!ufshcd_is_user_access_allowed(hba)) { 304 ret = -EBUSY; 305 goto out; 306 } 307 308 ufshcd_auto_hibern8_update(hba, ufshcd_us_to_ahit(timer)); 309 310 out: 311 up(&hba->host_sem); 312 return ret ? ret : count; 313 } 314 315 static ssize_t wb_on_show(struct device *dev, struct device_attribute *attr, 316 char *buf) 317 { 318 struct ufs_hba *hba = dev_get_drvdata(dev); 319 320 return sysfs_emit(buf, "%d\n", hba->dev_info.wb_enabled); 321 } 322 323 static ssize_t wb_on_store(struct device *dev, struct device_attribute *attr, 324 const char *buf, size_t count) 325 { 326 struct ufs_hba *hba = dev_get_drvdata(dev); 327 unsigned int wb_enable; 328 ssize_t res; 329 330 if (!ufshcd_is_wb_allowed(hba) || (ufshcd_is_clkscaling_supported(hba) 331 && ufshcd_enable_wb_if_scaling_up(hba))) { 332 /* 333 * If the platform supports UFSHCD_CAP_CLK_SCALING, turn WB 334 * on/off will be done while clock scaling up/down. 335 */ 336 dev_warn(dev, "It is not allowed to configure WB!\n"); 337 return -EOPNOTSUPP; 338 } 339 340 if (kstrtouint(buf, 0, &wb_enable)) 341 return -EINVAL; 342 343 if (wb_enable != 0 && wb_enable != 1) 344 return -EINVAL; 345 346 down(&hba->host_sem); 347 if (!ufshcd_is_user_access_allowed(hba)) { 348 res = -EBUSY; 349 goto out; 350 } 351 352 ufshcd_rpm_get_sync(hba); 353 res = ufshcd_wb_toggle(hba, wb_enable); 354 ufshcd_rpm_put_sync(hba); 355 out: 356 up(&hba->host_sem); 357 return res < 0 ? res : count; 358 } 359 360 static ssize_t rtc_update_ms_show(struct device *dev, struct device_attribute *attr, 361 char *buf) 362 { 363 struct ufs_hba *hba = dev_get_drvdata(dev); 364 365 return sysfs_emit(buf, "%d\n", hba->dev_info.rtc_update_period); 366 } 367 368 static ssize_t rtc_update_ms_store(struct device *dev, struct device_attribute *attr, 369 const char *buf, size_t count) 370 { 371 struct ufs_hba *hba = dev_get_drvdata(dev); 372 unsigned int ms; 373 bool resume_period_update = false; 374 375 if (kstrtouint(buf, 0, &ms)) 376 return -EINVAL; 377 378 if (!hba->dev_info.rtc_update_period && ms > 0) 379 resume_period_update = true; 380 /* Minimum and maximum update frequency should be synchronized with all UFS vendors */ 381 hba->dev_info.rtc_update_period = ms; 382 383 if (resume_period_update) 384 schedule_delayed_work(&hba->ufs_rtc_update_work, 385 msecs_to_jiffies(hba->dev_info.rtc_update_period)); 386 return count; 387 } 388 389 static ssize_t enable_wb_buf_flush_show(struct device *dev, 390 struct device_attribute *attr, 391 char *buf) 392 { 393 struct ufs_hba *hba = dev_get_drvdata(dev); 394 395 return sysfs_emit(buf, "%d\n", hba->dev_info.wb_buf_flush_enabled); 396 } 397 398 static ssize_t enable_wb_buf_flush_store(struct device *dev, 399 struct device_attribute *attr, 400 const char *buf, size_t count) 401 { 402 struct ufs_hba *hba = dev_get_drvdata(dev); 403 unsigned int enable_wb_buf_flush; 404 ssize_t res; 405 406 if (!ufshcd_is_wb_buf_flush_allowed(hba)) { 407 dev_warn(dev, "It is not allowed to configure WB buf flushing!\n"); 408 return -EOPNOTSUPP; 409 } 410 411 if (kstrtouint(buf, 0, &enable_wb_buf_flush)) 412 return -EINVAL; 413 414 if (enable_wb_buf_flush != 0 && enable_wb_buf_flush != 1) 415 return -EINVAL; 416 417 down(&hba->host_sem); 418 if (!ufshcd_is_user_access_allowed(hba)) { 419 res = -EBUSY; 420 goto out; 421 } 422 423 ufshcd_rpm_get_sync(hba); 424 res = ufshcd_wb_toggle_buf_flush(hba, enable_wb_buf_flush); 425 ufshcd_rpm_put_sync(hba); 426 427 out: 428 up(&hba->host_sem); 429 return res < 0 ? res : count; 430 } 431 432 static ssize_t wb_flush_threshold_show(struct device *dev, 433 struct device_attribute *attr, 434 char *buf) 435 { 436 struct ufs_hba *hba = dev_get_drvdata(dev); 437 438 return sysfs_emit(buf, "%u\n", hba->vps->wb_flush_threshold); 439 } 440 441 static ssize_t wb_flush_threshold_store(struct device *dev, 442 struct device_attribute *attr, 443 const char *buf, size_t count) 444 { 445 struct ufs_hba *hba = dev_get_drvdata(dev); 446 unsigned int wb_flush_threshold; 447 448 if (kstrtouint(buf, 0, &wb_flush_threshold)) 449 return -EINVAL; 450 451 /* The range of values for wb_flush_threshold is (0,10] */ 452 if (wb_flush_threshold > UFS_WB_BUF_REMAIN_PERCENT(100) || 453 wb_flush_threshold == 0) { 454 dev_err(dev, "The value of wb_flush_threshold is invalid!\n"); 455 return -EINVAL; 456 } 457 458 hba->vps->wb_flush_threshold = wb_flush_threshold; 459 460 return count; 461 } 462 463 static const char * const wb_resize_en_mode[] = { 464 [WB_RESIZE_EN_IDLE] = "idle", 465 [WB_RESIZE_EN_DECREASE] = "decrease", 466 [WB_RESIZE_EN_INCREASE] = "increase", 467 }; 468 469 static ssize_t wb_resize_enable_store(struct device *dev, 470 struct device_attribute *attr, 471 const char *buf, size_t count) 472 { 473 struct ufs_hba *hba = dev_get_drvdata(dev); 474 int mode; 475 ssize_t res; 476 477 if (!ufshcd_is_wb_allowed(hba) || !hba->dev_info.wb_enabled 478 || !hba->dev_info.b_presrv_uspc_en 479 || !(hba->dev_info.ext_wb_sup & UFS_DEV_WB_BUF_RESIZE)) 480 return -EOPNOTSUPP; 481 482 mode = sysfs_match_string(wb_resize_en_mode, buf); 483 if (mode < 0) 484 return -EINVAL; 485 486 down(&hba->host_sem); 487 if (!ufshcd_is_user_access_allowed(hba)) { 488 res = -EBUSY; 489 goto out; 490 } 491 492 ufshcd_rpm_get_sync(hba); 493 res = ufshcd_wb_set_resize_en(hba, mode); 494 ufshcd_rpm_put_sync(hba); 495 496 out: 497 up(&hba->host_sem); 498 return res < 0 ? res : count; 499 } 500 501 /** 502 * pm_qos_enable_show - sysfs handler to show pm qos enable value 503 * @dev: device associated with the UFS controller 504 * @attr: sysfs attribute handle 505 * @buf: buffer for sysfs file 506 * 507 * Print 1 if PM QoS feature is enabled, 0 if disabled. 508 * 509 * Returns number of characters written to @buf. 510 */ 511 static ssize_t pm_qos_enable_show(struct device *dev, 512 struct device_attribute *attr, char *buf) 513 { 514 struct ufs_hba *hba = dev_get_drvdata(dev); 515 516 guard(mutex)(&hba->pm_qos_mutex); 517 518 return sysfs_emit(buf, "%d\n", hba->pm_qos_enabled); 519 } 520 521 /** 522 * pm_qos_enable_store - sysfs handler to store value 523 * @dev: device associated with the UFS controller 524 * @attr: sysfs attribute handle 525 * @buf: buffer for sysfs file 526 * @count: stores buffer characters count 527 * 528 * Input 0 to disable PM QoS and 1 value to enable. 529 * Default state: 1 530 * 531 * Return: number of characters written to @buf on success, < 0 upon failure. 532 */ 533 static ssize_t pm_qos_enable_store(struct device *dev, 534 struct device_attribute *attr, const char *buf, size_t count) 535 { 536 struct ufs_hba *hba = dev_get_drvdata(dev); 537 bool value; 538 539 if (kstrtobool(buf, &value)) 540 return -EINVAL; 541 542 if (value) 543 ufshcd_pm_qos_init(hba); 544 else 545 ufshcd_pm_qos_exit(hba); 546 547 return count; 548 } 549 550 static ssize_t critical_health_show(struct device *dev, 551 struct device_attribute *attr, char *buf) 552 { 553 struct ufs_hba *hba = dev_get_drvdata(dev); 554 555 return sysfs_emit(buf, "%d\n", hba->critical_health_count); 556 } 557 558 static ssize_t device_lvl_exception_count_show(struct device *dev, 559 struct device_attribute *attr, 560 char *buf) 561 { 562 struct ufs_hba *hba = dev_get_drvdata(dev); 563 564 if (hba->dev_info.wspecversion < 0x410) 565 return -EOPNOTSUPP; 566 567 return sysfs_emit(buf, "%u\n", atomic_read(&hba->dev_lvl_exception_count)); 568 } 569 570 static ssize_t device_lvl_exception_count_store(struct device *dev, 571 struct device_attribute *attr, 572 const char *buf, size_t count) 573 { 574 struct ufs_hba *hba = dev_get_drvdata(dev); 575 unsigned int value; 576 577 if (kstrtouint(buf, 0, &value)) 578 return -EINVAL; 579 580 /* the only supported usecase is to reset the dev_lvl_exception_count */ 581 if (value) 582 return -EINVAL; 583 584 atomic_set(&hba->dev_lvl_exception_count, 0); 585 586 return count; 587 } 588 589 static ssize_t device_lvl_exception_id_show(struct device *dev, 590 struct device_attribute *attr, 591 char *buf) 592 { 593 struct ufs_hba *hba = dev_get_drvdata(dev); 594 u64 exception_id; 595 int err; 596 597 ufshcd_rpm_get_sync(hba); 598 err = ufshcd_read_device_lvl_exception_id(hba, &exception_id); 599 ufshcd_rpm_put_sync(hba); 600 601 if (err) 602 return err; 603 604 hba->dev_lvl_exception_id = exception_id; 605 return sysfs_emit(buf, "%llu\n", exception_id); 606 } 607 608 static ssize_t dme_qos_notification_show(struct device *dev, 609 struct device_attribute *attr, 610 char *buf) 611 { 612 struct ufs_hba *hba = dev_get_drvdata(dev); 613 614 return sysfs_emit(buf, "0x%x\n", atomic_read(&hba->dme_qos_notification)); 615 } 616 617 static ssize_t dme_qos_notification_store(struct device *dev, 618 struct device_attribute *attr, 619 const char *buf, size_t count) 620 { 621 struct ufs_hba *hba = dev_get_drvdata(dev); 622 unsigned int value; 623 624 if (kstrtouint(buf, 0, &value)) 625 return -EINVAL; 626 627 /* the only supported usecase is to reset the dme_qos_notification */ 628 if (value) 629 return -EINVAL; 630 631 atomic_set(&hba->dme_qos_notification, 0); 632 633 return count; 634 } 635 636 static DEVICE_ATTR_RW(rpm_lvl); 637 static DEVICE_ATTR_RO(rpm_target_dev_state); 638 static DEVICE_ATTR_RO(rpm_target_link_state); 639 static DEVICE_ATTR_RW(spm_lvl); 640 static DEVICE_ATTR_RO(spm_target_dev_state); 641 static DEVICE_ATTR_RO(spm_target_link_state); 642 static DEVICE_ATTR_RW(auto_hibern8); 643 static DEVICE_ATTR_RW(wb_on); 644 static DEVICE_ATTR_RW(enable_wb_buf_flush); 645 static DEVICE_ATTR_RW(wb_flush_threshold); 646 static DEVICE_ATTR_WO(wb_resize_enable); 647 static DEVICE_ATTR_RW(rtc_update_ms); 648 static DEVICE_ATTR_RW(pm_qos_enable); 649 static DEVICE_ATTR_RO(critical_health); 650 static DEVICE_ATTR_RW(device_lvl_exception_count); 651 static DEVICE_ATTR_RO(device_lvl_exception_id); 652 static DEVICE_ATTR_RW(dme_qos_notification); 653 654 static struct attribute *ufs_sysfs_ufshcd_attrs[] = { 655 &dev_attr_rpm_lvl.attr, 656 &dev_attr_rpm_target_dev_state.attr, 657 &dev_attr_rpm_target_link_state.attr, 658 &dev_attr_spm_lvl.attr, 659 &dev_attr_spm_target_dev_state.attr, 660 &dev_attr_spm_target_link_state.attr, 661 &dev_attr_auto_hibern8.attr, 662 &dev_attr_wb_on.attr, 663 &dev_attr_enable_wb_buf_flush.attr, 664 &dev_attr_wb_flush_threshold.attr, 665 &dev_attr_wb_resize_enable.attr, 666 &dev_attr_rtc_update_ms.attr, 667 &dev_attr_pm_qos_enable.attr, 668 &dev_attr_critical_health.attr, 669 &dev_attr_device_lvl_exception_count.attr, 670 &dev_attr_device_lvl_exception_id.attr, 671 &dev_attr_dme_qos_notification.attr, 672 NULL 673 }; 674 675 static const struct attribute_group ufs_sysfs_default_group = { 676 .attrs = ufs_sysfs_ufshcd_attrs, 677 }; 678 679 static ssize_t clock_scaling_show(struct device *dev, struct device_attribute *attr, 680 char *buf) 681 { 682 struct ufs_hba *hba = dev_get_drvdata(dev); 683 684 return sysfs_emit(buf, "%d\n", ufshcd_is_clkscaling_supported(hba)); 685 } 686 687 static ssize_t write_booster_show(struct device *dev, struct device_attribute *attr, 688 char *buf) 689 { 690 struct ufs_hba *hba = dev_get_drvdata(dev); 691 692 return sysfs_emit(buf, "%d\n", ufshcd_is_wb_allowed(hba)); 693 } 694 695 static DEVICE_ATTR_RO(clock_scaling); 696 static DEVICE_ATTR_RO(write_booster); 697 698 /* 699 * See Documentation/ABI/testing/sysfs-driver-ufs for the semantics of this 700 * group. 701 */ 702 static struct attribute *ufs_sysfs_capabilities_attrs[] = { 703 &dev_attr_clock_scaling.attr, 704 &dev_attr_write_booster.attr, 705 NULL 706 }; 707 708 static const struct attribute_group ufs_sysfs_capabilities_group = { 709 .name = "capabilities", 710 .attrs = ufs_sysfs_capabilities_attrs, 711 }; 712 713 static ssize_t version_show(struct device *dev, 714 struct device_attribute *attr, char *buf) 715 { 716 struct ufs_hba *hba = dev_get_drvdata(dev); 717 718 return sysfs_emit(buf, "0x%x\n", hba->ufs_version); 719 } 720 721 static ssize_t product_id_show(struct device *dev, 722 struct device_attribute *attr, char *buf) 723 { 724 int ret; 725 u32 val; 726 struct ufs_hba *hba = dev_get_drvdata(dev); 727 728 ret = ufshcd_read_hci_reg(hba, &val, REG_CONTROLLER_PID); 729 if (ret) 730 return ret; 731 732 return sysfs_emit(buf, "0x%x\n", val); 733 } 734 735 static ssize_t man_id_show(struct device *dev, 736 struct device_attribute *attr, char *buf) 737 { 738 int ret; 739 u32 val; 740 struct ufs_hba *hba = dev_get_drvdata(dev); 741 742 ret = ufshcd_read_hci_reg(hba, &val, REG_CONTROLLER_MID); 743 if (ret) 744 return ret; 745 746 return sysfs_emit(buf, "0x%x\n", val); 747 } 748 749 static DEVICE_ATTR_RO(version); 750 static DEVICE_ATTR_RO(product_id); 751 static DEVICE_ATTR_RO(man_id); 752 753 static struct attribute *ufs_sysfs_ufshci_cap_attrs[] = { 754 &dev_attr_version.attr, 755 &dev_attr_product_id.attr, 756 &dev_attr_man_id.attr, 757 NULL 758 }; 759 760 static const struct attribute_group ufs_sysfs_ufshci_group = { 761 .name = "ufshci_capabilities", 762 .attrs = ufs_sysfs_ufshci_cap_attrs, 763 }; 764 765 static ssize_t monitor_enable_show(struct device *dev, 766 struct device_attribute *attr, char *buf) 767 { 768 struct ufs_hba *hba = dev_get_drvdata(dev); 769 770 return sysfs_emit(buf, "%d\n", hba->monitor.enabled); 771 } 772 773 static ssize_t monitor_enable_store(struct device *dev, 774 struct device_attribute *attr, 775 const char *buf, size_t count) 776 { 777 struct ufs_hba *hba = dev_get_drvdata(dev); 778 unsigned long value, flags; 779 780 if (kstrtoul(buf, 0, &value)) 781 return -EINVAL; 782 783 value = !!value; 784 spin_lock_irqsave(hba->host->host_lock, flags); 785 if (value == hba->monitor.enabled) 786 goto out_unlock; 787 788 if (!value) { 789 memset(&hba->monitor, 0, sizeof(hba->monitor)); 790 } else { 791 hba->monitor.enabled = true; 792 hba->monitor.enabled_ts = ktime_get(); 793 } 794 795 out_unlock: 796 spin_unlock_irqrestore(hba->host->host_lock, flags); 797 return count; 798 } 799 800 static ssize_t monitor_chunk_size_show(struct device *dev, 801 struct device_attribute *attr, char *buf) 802 { 803 struct ufs_hba *hba = dev_get_drvdata(dev); 804 805 return sysfs_emit(buf, "%lu\n", hba->monitor.chunk_size); 806 } 807 808 static ssize_t monitor_chunk_size_store(struct device *dev, 809 struct device_attribute *attr, 810 const char *buf, size_t count) 811 { 812 struct ufs_hba *hba = dev_get_drvdata(dev); 813 unsigned long value, flags; 814 815 if (kstrtoul(buf, 0, &value)) 816 return -EINVAL; 817 818 spin_lock_irqsave(hba->host->host_lock, flags); 819 /* Only allow chunk size change when monitor is disabled */ 820 if (!hba->monitor.enabled) 821 hba->monitor.chunk_size = value; 822 spin_unlock_irqrestore(hba->host->host_lock, flags); 823 return count; 824 } 825 826 static ssize_t read_total_sectors_show(struct device *dev, 827 struct device_attribute *attr, char *buf) 828 { 829 struct ufs_hba *hba = dev_get_drvdata(dev); 830 831 return sysfs_emit(buf, "%lu\n", hba->monitor.nr_sec_rw[READ]); 832 } 833 834 static ssize_t read_total_busy_show(struct device *dev, 835 struct device_attribute *attr, char *buf) 836 { 837 struct ufs_hba *hba = dev_get_drvdata(dev); 838 839 return sysfs_emit(buf, "%llu\n", 840 ktime_to_us(hba->monitor.total_busy[READ])); 841 } 842 843 static ssize_t read_nr_requests_show(struct device *dev, 844 struct device_attribute *attr, char *buf) 845 { 846 struct ufs_hba *hba = dev_get_drvdata(dev); 847 848 return sysfs_emit(buf, "%lu\n", hba->monitor.nr_req[READ]); 849 } 850 851 static ssize_t read_req_latency_avg_show(struct device *dev, 852 struct device_attribute *attr, 853 char *buf) 854 { 855 struct ufs_hba *hba = dev_get_drvdata(dev); 856 struct ufs_hba_monitor *m = &hba->monitor; 857 858 if (!m->nr_req[READ]) 859 return sysfs_emit(buf, "0\n"); 860 861 return sysfs_emit(buf, "%llu\n", div_u64(ktime_to_us(m->lat_sum[READ]), 862 m->nr_req[READ])); 863 } 864 865 static ssize_t read_req_latency_max_show(struct device *dev, 866 struct device_attribute *attr, 867 char *buf) 868 { 869 struct ufs_hba *hba = dev_get_drvdata(dev); 870 871 return sysfs_emit(buf, "%llu\n", 872 ktime_to_us(hba->monitor.lat_max[READ])); 873 } 874 875 static ssize_t read_req_latency_min_show(struct device *dev, 876 struct device_attribute *attr, 877 char *buf) 878 { 879 struct ufs_hba *hba = dev_get_drvdata(dev); 880 881 return sysfs_emit(buf, "%llu\n", 882 ktime_to_us(hba->monitor.lat_min[READ])); 883 } 884 885 static ssize_t read_req_latency_sum_show(struct device *dev, 886 struct device_attribute *attr, 887 char *buf) 888 { 889 struct ufs_hba *hba = dev_get_drvdata(dev); 890 891 return sysfs_emit(buf, "%llu\n", 892 ktime_to_us(hba->monitor.lat_sum[READ])); 893 } 894 895 static ssize_t write_total_sectors_show(struct device *dev, 896 struct device_attribute *attr, 897 char *buf) 898 { 899 struct ufs_hba *hba = dev_get_drvdata(dev); 900 901 return sysfs_emit(buf, "%lu\n", hba->monitor.nr_sec_rw[WRITE]); 902 } 903 904 static ssize_t write_total_busy_show(struct device *dev, 905 struct device_attribute *attr, char *buf) 906 { 907 struct ufs_hba *hba = dev_get_drvdata(dev); 908 909 return sysfs_emit(buf, "%llu\n", 910 ktime_to_us(hba->monitor.total_busy[WRITE])); 911 } 912 913 static ssize_t write_nr_requests_show(struct device *dev, 914 struct device_attribute *attr, char *buf) 915 { 916 struct ufs_hba *hba = dev_get_drvdata(dev); 917 918 return sysfs_emit(buf, "%lu\n", hba->monitor.nr_req[WRITE]); 919 } 920 921 static ssize_t write_req_latency_avg_show(struct device *dev, 922 struct device_attribute *attr, 923 char *buf) 924 { 925 struct ufs_hba *hba = dev_get_drvdata(dev); 926 struct ufs_hba_monitor *m = &hba->monitor; 927 928 if (!m->nr_req[WRITE]) 929 return sysfs_emit(buf, "0\n"); 930 931 return sysfs_emit(buf, "%llu\n", div_u64(ktime_to_us(m->lat_sum[WRITE]), 932 m->nr_req[WRITE])); 933 } 934 935 static ssize_t write_req_latency_max_show(struct device *dev, 936 struct device_attribute *attr, 937 char *buf) 938 { 939 struct ufs_hba *hba = dev_get_drvdata(dev); 940 941 return sysfs_emit(buf, "%llu\n", 942 ktime_to_us(hba->monitor.lat_max[WRITE])); 943 } 944 945 static ssize_t write_req_latency_min_show(struct device *dev, 946 struct device_attribute *attr, 947 char *buf) 948 { 949 struct ufs_hba *hba = dev_get_drvdata(dev); 950 951 return sysfs_emit(buf, "%llu\n", 952 ktime_to_us(hba->monitor.lat_min[WRITE])); 953 } 954 955 static ssize_t write_req_latency_sum_show(struct device *dev, 956 struct device_attribute *attr, 957 char *buf) 958 { 959 struct ufs_hba *hba = dev_get_drvdata(dev); 960 961 return sysfs_emit(buf, "%llu\n", 962 ktime_to_us(hba->monitor.lat_sum[WRITE])); 963 } 964 965 static DEVICE_ATTR_RW(monitor_enable); 966 static DEVICE_ATTR_RW(monitor_chunk_size); 967 static DEVICE_ATTR_RO(read_total_sectors); 968 static DEVICE_ATTR_RO(read_total_busy); 969 static DEVICE_ATTR_RO(read_nr_requests); 970 static DEVICE_ATTR_RO(read_req_latency_avg); 971 static DEVICE_ATTR_RO(read_req_latency_max); 972 static DEVICE_ATTR_RO(read_req_latency_min); 973 static DEVICE_ATTR_RO(read_req_latency_sum); 974 static DEVICE_ATTR_RO(write_total_sectors); 975 static DEVICE_ATTR_RO(write_total_busy); 976 static DEVICE_ATTR_RO(write_nr_requests); 977 static DEVICE_ATTR_RO(write_req_latency_avg); 978 static DEVICE_ATTR_RO(write_req_latency_max); 979 static DEVICE_ATTR_RO(write_req_latency_min); 980 static DEVICE_ATTR_RO(write_req_latency_sum); 981 982 static struct attribute *ufs_sysfs_monitor_attrs[] = { 983 &dev_attr_monitor_enable.attr, 984 &dev_attr_monitor_chunk_size.attr, 985 &dev_attr_read_total_sectors.attr, 986 &dev_attr_read_total_busy.attr, 987 &dev_attr_read_nr_requests.attr, 988 &dev_attr_read_req_latency_avg.attr, 989 &dev_attr_read_req_latency_max.attr, 990 &dev_attr_read_req_latency_min.attr, 991 &dev_attr_read_req_latency_sum.attr, 992 &dev_attr_write_total_sectors.attr, 993 &dev_attr_write_total_busy.attr, 994 &dev_attr_write_nr_requests.attr, 995 &dev_attr_write_req_latency_avg.attr, 996 &dev_attr_write_req_latency_max.attr, 997 &dev_attr_write_req_latency_min.attr, 998 &dev_attr_write_req_latency_sum.attr, 999 NULL 1000 }; 1001 1002 static const struct attribute_group ufs_sysfs_monitor_group = { 1003 .name = "monitor", 1004 .attrs = ufs_sysfs_monitor_attrs, 1005 }; 1006 1007 static ssize_t lane_show(struct device *dev, struct device_attribute *attr, 1008 char *buf) 1009 { 1010 struct ufs_hba *hba = dev_get_drvdata(dev); 1011 1012 return sysfs_emit(buf, "%u\n", hba->pwr_info.lane_rx); 1013 } 1014 1015 static ssize_t mode_show(struct device *dev, struct device_attribute *attr, 1016 char *buf) 1017 { 1018 struct ufs_hba *hba = dev_get_drvdata(dev); 1019 1020 return sysfs_emit(buf, "%s\n", ufs_pa_pwr_mode_to_string(hba->pwr_info.pwr_rx)); 1021 } 1022 1023 static ssize_t rate_show(struct device *dev, struct device_attribute *attr, 1024 char *buf) 1025 { 1026 struct ufs_hba *hba = dev_get_drvdata(dev); 1027 1028 return sysfs_emit(buf, "%s\n", ufs_hs_gear_rate_to_string(hba->pwr_info.hs_rate)); 1029 } 1030 1031 static ssize_t gear_show(struct device *dev, struct device_attribute *attr, 1032 char *buf) 1033 { 1034 struct ufs_hba *hba = dev_get_drvdata(dev); 1035 1036 return sysfs_emit(buf, "%s\n", hba->pwr_info.hs_rate ? 1037 ufs_hs_gear_to_string(hba->pwr_info.gear_rx) : 1038 ufs_pwm_gear_to_string(hba->pwr_info.gear_rx)); 1039 } 1040 1041 static ssize_t dev_pm_show(struct device *dev, struct device_attribute *attr, 1042 char *buf) 1043 { 1044 struct ufs_hba *hba = dev_get_drvdata(dev); 1045 1046 return sysfs_emit(buf, "%s\n", ufshcd_ufs_dev_pwr_mode_to_string(hba->curr_dev_pwr_mode)); 1047 } 1048 1049 static ssize_t link_state_show(struct device *dev, 1050 struct device_attribute *attr, char *buf) 1051 { 1052 struct ufs_hba *hba = dev_get_drvdata(dev); 1053 1054 return sysfs_emit(buf, "%s\n", ufshcd_uic_link_state_to_string(hba->uic_link_state)); 1055 } 1056 1057 static DEVICE_ATTR_RO(lane); 1058 static DEVICE_ATTR_RO(mode); 1059 static DEVICE_ATTR_RO(rate); 1060 static DEVICE_ATTR_RO(gear); 1061 static DEVICE_ATTR_RO(dev_pm); 1062 static DEVICE_ATTR_RO(link_state); 1063 1064 static struct attribute *ufs_power_info_attrs[] = { 1065 &dev_attr_lane.attr, 1066 &dev_attr_mode.attr, 1067 &dev_attr_rate.attr, 1068 &dev_attr_gear.attr, 1069 &dev_attr_dev_pm.attr, 1070 &dev_attr_link_state.attr, 1071 NULL 1072 }; 1073 1074 static const struct attribute_group ufs_sysfs_power_info_group = { 1075 .name = "power_info", 1076 .attrs = ufs_power_info_attrs, 1077 }; 1078 1079 static ssize_t ufs_sysfs_read_desc_param(struct ufs_hba *hba, 1080 enum desc_idn desc_id, 1081 u8 desc_index, 1082 u8 param_offset, 1083 u8 *sysfs_buf, 1084 u8 param_size) 1085 { 1086 u8 desc_buf[8] = {0}; 1087 int ret; 1088 1089 if (param_size > 8) 1090 return -EINVAL; 1091 1092 down(&hba->host_sem); 1093 if (!ufshcd_is_user_access_allowed(hba)) { 1094 ret = -EBUSY; 1095 goto out; 1096 } 1097 1098 ufshcd_rpm_get_sync(hba); 1099 ret = ufshcd_read_desc_param(hba, desc_id, desc_index, 1100 param_offset, desc_buf, param_size); 1101 ufshcd_rpm_put_sync(hba); 1102 if (ret) { 1103 ret = -EINVAL; 1104 goto out; 1105 } 1106 1107 switch (param_size) { 1108 case 1: 1109 ret = sysfs_emit(sysfs_buf, "0x%02X\n", *desc_buf); 1110 break; 1111 case 2: 1112 ret = sysfs_emit(sysfs_buf, "0x%04X\n", 1113 get_unaligned_be16(desc_buf)); 1114 break; 1115 case 4: 1116 ret = sysfs_emit(sysfs_buf, "0x%08X\n", 1117 get_unaligned_be32(desc_buf)); 1118 break; 1119 case 8: 1120 ret = sysfs_emit(sysfs_buf, "0x%016llX\n", 1121 get_unaligned_be64(desc_buf)); 1122 break; 1123 } 1124 1125 out: 1126 up(&hba->host_sem); 1127 return ret; 1128 } 1129 1130 #define UFS_DESC_PARAM(_name, _puname, _duname, _size) \ 1131 static ssize_t _name##_show(struct device *dev, \ 1132 struct device_attribute *attr, char *buf) \ 1133 { \ 1134 struct ufs_hba *hba = dev_get_drvdata(dev); \ 1135 return ufs_sysfs_read_desc_param(hba, QUERY_DESC_IDN_##_duname, \ 1136 0, _duname##_DESC_PARAM##_puname, buf, _size); \ 1137 } \ 1138 static DEVICE_ATTR_RO(_name) 1139 1140 #define UFS_DEVICE_DESC_PARAM(_name, _uname, _size) \ 1141 UFS_DESC_PARAM(_name, _uname, DEVICE, _size) 1142 1143 UFS_DEVICE_DESC_PARAM(device_type, _DEVICE_TYPE, 1); 1144 UFS_DEVICE_DESC_PARAM(device_class, _DEVICE_CLASS, 1); 1145 UFS_DEVICE_DESC_PARAM(device_sub_class, _DEVICE_SUB_CLASS, 1); 1146 UFS_DEVICE_DESC_PARAM(protocol, _PRTCL, 1); 1147 UFS_DEVICE_DESC_PARAM(number_of_luns, _NUM_LU, 1); 1148 UFS_DEVICE_DESC_PARAM(number_of_wluns, _NUM_WLU, 1); 1149 UFS_DEVICE_DESC_PARAM(boot_enable, _BOOT_ENBL, 1); 1150 UFS_DEVICE_DESC_PARAM(descriptor_access_enable, _DESC_ACCSS_ENBL, 1); 1151 UFS_DEVICE_DESC_PARAM(initial_power_mode, _INIT_PWR_MODE, 1); 1152 UFS_DEVICE_DESC_PARAM(high_priority_lun, _HIGH_PR_LUN, 1); 1153 UFS_DEVICE_DESC_PARAM(secure_removal_type, _SEC_RMV_TYPE, 1); 1154 UFS_DEVICE_DESC_PARAM(support_security_lun, _SEC_LU, 1); 1155 UFS_DEVICE_DESC_PARAM(bkops_termination_latency, _BKOP_TERM_LT, 1); 1156 UFS_DEVICE_DESC_PARAM(initial_active_icc_level, _ACTVE_ICC_LVL, 1); 1157 UFS_DEVICE_DESC_PARAM(specification_version, _SPEC_VER, 2); 1158 UFS_DEVICE_DESC_PARAM(manufacturing_date, _MANF_DATE, 2); 1159 UFS_DEVICE_DESC_PARAM(manufacturer_id, _MANF_ID, 2); 1160 UFS_DEVICE_DESC_PARAM(rtt_capability, _RTT_CAP, 1); 1161 UFS_DEVICE_DESC_PARAM(rtc_update, _FRQ_RTC, 2); 1162 UFS_DEVICE_DESC_PARAM(ufs_features, _UFS_FEAT, 1); 1163 UFS_DEVICE_DESC_PARAM(ffu_timeout, _FFU_TMT, 1); 1164 UFS_DEVICE_DESC_PARAM(queue_depth, _Q_DPTH, 1); 1165 UFS_DEVICE_DESC_PARAM(device_version, _DEV_VER, 2); 1166 UFS_DEVICE_DESC_PARAM(number_of_secure_wpa, _NUM_SEC_WPA, 1); 1167 UFS_DEVICE_DESC_PARAM(psa_max_data_size, _PSA_MAX_DATA, 4); 1168 UFS_DEVICE_DESC_PARAM(psa_state_timeout, _PSA_TMT, 1); 1169 UFS_DEVICE_DESC_PARAM(ext_feature_sup, _EXT_UFS_FEATURE_SUP, 4); 1170 UFS_DEVICE_DESC_PARAM(wb_presv_us_en, _WB_PRESRV_USRSPC_EN, 1); 1171 UFS_DEVICE_DESC_PARAM(wb_type, _WB_TYPE, 1); 1172 UFS_DEVICE_DESC_PARAM(wb_shared_alloc_units, _WB_SHARED_ALLOC_UNITS, 4); 1173 1174 static struct attribute *ufs_sysfs_device_descriptor[] = { 1175 &dev_attr_device_type.attr, 1176 &dev_attr_device_class.attr, 1177 &dev_attr_device_sub_class.attr, 1178 &dev_attr_protocol.attr, 1179 &dev_attr_number_of_luns.attr, 1180 &dev_attr_number_of_wluns.attr, 1181 &dev_attr_boot_enable.attr, 1182 &dev_attr_descriptor_access_enable.attr, 1183 &dev_attr_initial_power_mode.attr, 1184 &dev_attr_high_priority_lun.attr, 1185 &dev_attr_secure_removal_type.attr, 1186 &dev_attr_support_security_lun.attr, 1187 &dev_attr_bkops_termination_latency.attr, 1188 &dev_attr_initial_active_icc_level.attr, 1189 &dev_attr_specification_version.attr, 1190 &dev_attr_manufacturing_date.attr, 1191 &dev_attr_manufacturer_id.attr, 1192 &dev_attr_rtt_capability.attr, 1193 &dev_attr_rtc_update.attr, 1194 &dev_attr_ufs_features.attr, 1195 &dev_attr_ffu_timeout.attr, 1196 &dev_attr_queue_depth.attr, 1197 &dev_attr_device_version.attr, 1198 &dev_attr_number_of_secure_wpa.attr, 1199 &dev_attr_psa_max_data_size.attr, 1200 &dev_attr_psa_state_timeout.attr, 1201 &dev_attr_ext_feature_sup.attr, 1202 &dev_attr_wb_presv_us_en.attr, 1203 &dev_attr_wb_type.attr, 1204 &dev_attr_wb_shared_alloc_units.attr, 1205 NULL, 1206 }; 1207 1208 static const struct attribute_group ufs_sysfs_device_descriptor_group = { 1209 .name = "device_descriptor", 1210 .attrs = ufs_sysfs_device_descriptor, 1211 }; 1212 1213 #define UFS_INTERCONNECT_DESC_PARAM(_name, _uname, _size) \ 1214 UFS_DESC_PARAM(_name, _uname, INTERCONNECT, _size) 1215 1216 UFS_INTERCONNECT_DESC_PARAM(unipro_version, _UNIPRO_VER, 2); 1217 UFS_INTERCONNECT_DESC_PARAM(mphy_version, _MPHY_VER, 2); 1218 1219 static struct attribute *ufs_sysfs_interconnect_descriptor[] = { 1220 &dev_attr_unipro_version.attr, 1221 &dev_attr_mphy_version.attr, 1222 NULL, 1223 }; 1224 1225 static const struct attribute_group ufs_sysfs_interconnect_descriptor_group = { 1226 .name = "interconnect_descriptor", 1227 .attrs = ufs_sysfs_interconnect_descriptor, 1228 }; 1229 1230 #define UFS_GEOMETRY_DESC_PARAM(_name, _uname, _size) \ 1231 UFS_DESC_PARAM(_name, _uname, GEOMETRY, _size) 1232 1233 UFS_GEOMETRY_DESC_PARAM(raw_device_capacity, _DEV_CAP, 8); 1234 UFS_GEOMETRY_DESC_PARAM(max_number_of_luns, _MAX_NUM_LUN, 1); 1235 UFS_GEOMETRY_DESC_PARAM(segment_size, _SEG_SIZE, 4); 1236 UFS_GEOMETRY_DESC_PARAM(allocation_unit_size, _ALLOC_UNIT_SIZE, 1); 1237 UFS_GEOMETRY_DESC_PARAM(min_addressable_block_size, _MIN_BLK_SIZE, 1); 1238 UFS_GEOMETRY_DESC_PARAM(optimal_read_block_size, _OPT_RD_BLK_SIZE, 1); 1239 UFS_GEOMETRY_DESC_PARAM(optimal_write_block_size, _OPT_WR_BLK_SIZE, 1); 1240 UFS_GEOMETRY_DESC_PARAM(max_in_buffer_size, _MAX_IN_BUF_SIZE, 1); 1241 UFS_GEOMETRY_DESC_PARAM(max_out_buffer_size, _MAX_OUT_BUF_SIZE, 1); 1242 UFS_GEOMETRY_DESC_PARAM(rpmb_rw_size, _RPMB_RW_SIZE, 1); 1243 UFS_GEOMETRY_DESC_PARAM(dyn_capacity_resource_policy, _DYN_CAP_RSRC_PLC, 1); 1244 UFS_GEOMETRY_DESC_PARAM(data_ordering, _DATA_ORDER, 1); 1245 UFS_GEOMETRY_DESC_PARAM(max_number_of_contexts, _MAX_NUM_CTX, 1); 1246 UFS_GEOMETRY_DESC_PARAM(sys_data_tag_unit_size, _TAG_UNIT_SIZE, 1); 1247 UFS_GEOMETRY_DESC_PARAM(sys_data_tag_resource_size, _TAG_RSRC_SIZE, 1); 1248 UFS_GEOMETRY_DESC_PARAM(secure_removal_types, _SEC_RM_TYPES, 1); 1249 UFS_GEOMETRY_DESC_PARAM(memory_types, _MEM_TYPES, 2); 1250 UFS_GEOMETRY_DESC_PARAM(sys_code_memory_max_alloc_units, 1251 _SCM_MAX_NUM_UNITS, 4); 1252 UFS_GEOMETRY_DESC_PARAM(sys_code_memory_capacity_adjustment_factor, 1253 _SCM_CAP_ADJ_FCTR, 2); 1254 UFS_GEOMETRY_DESC_PARAM(non_persist_memory_max_alloc_units, 1255 _NPM_MAX_NUM_UNITS, 4); 1256 UFS_GEOMETRY_DESC_PARAM(non_persist_memory_capacity_adjustment_factor, 1257 _NPM_CAP_ADJ_FCTR, 2); 1258 UFS_GEOMETRY_DESC_PARAM(enh1_memory_max_alloc_units, 1259 _ENM1_MAX_NUM_UNITS, 4); 1260 UFS_GEOMETRY_DESC_PARAM(enh1_memory_capacity_adjustment_factor, 1261 _ENM1_CAP_ADJ_FCTR, 2); 1262 UFS_GEOMETRY_DESC_PARAM(enh2_memory_max_alloc_units, 1263 _ENM2_MAX_NUM_UNITS, 4); 1264 UFS_GEOMETRY_DESC_PARAM(enh2_memory_capacity_adjustment_factor, 1265 _ENM2_CAP_ADJ_FCTR, 2); 1266 UFS_GEOMETRY_DESC_PARAM(enh3_memory_max_alloc_units, 1267 _ENM3_MAX_NUM_UNITS, 4); 1268 UFS_GEOMETRY_DESC_PARAM(enh3_memory_capacity_adjustment_factor, 1269 _ENM3_CAP_ADJ_FCTR, 2); 1270 UFS_GEOMETRY_DESC_PARAM(enh4_memory_max_alloc_units, 1271 _ENM4_MAX_NUM_UNITS, 4); 1272 UFS_GEOMETRY_DESC_PARAM(enh4_memory_capacity_adjustment_factor, 1273 _ENM4_CAP_ADJ_FCTR, 2); 1274 UFS_GEOMETRY_DESC_PARAM(wb_max_alloc_units, _WB_MAX_ALLOC_UNITS, 4); 1275 UFS_GEOMETRY_DESC_PARAM(wb_max_wb_luns, _WB_MAX_WB_LUNS, 1); 1276 UFS_GEOMETRY_DESC_PARAM(wb_buff_cap_adj, _WB_BUFF_CAP_ADJ, 1); 1277 UFS_GEOMETRY_DESC_PARAM(wb_sup_red_type, _WB_SUP_RED_TYPE, 1); 1278 UFS_GEOMETRY_DESC_PARAM(wb_sup_wb_type, _WB_SUP_WB_TYPE, 1); 1279 1280 1281 static struct attribute *ufs_sysfs_geometry_descriptor[] = { 1282 &dev_attr_raw_device_capacity.attr, 1283 &dev_attr_max_number_of_luns.attr, 1284 &dev_attr_segment_size.attr, 1285 &dev_attr_allocation_unit_size.attr, 1286 &dev_attr_min_addressable_block_size.attr, 1287 &dev_attr_optimal_read_block_size.attr, 1288 &dev_attr_optimal_write_block_size.attr, 1289 &dev_attr_max_in_buffer_size.attr, 1290 &dev_attr_max_out_buffer_size.attr, 1291 &dev_attr_rpmb_rw_size.attr, 1292 &dev_attr_dyn_capacity_resource_policy.attr, 1293 &dev_attr_data_ordering.attr, 1294 &dev_attr_max_number_of_contexts.attr, 1295 &dev_attr_sys_data_tag_unit_size.attr, 1296 &dev_attr_sys_data_tag_resource_size.attr, 1297 &dev_attr_secure_removal_types.attr, 1298 &dev_attr_memory_types.attr, 1299 &dev_attr_sys_code_memory_max_alloc_units.attr, 1300 &dev_attr_sys_code_memory_capacity_adjustment_factor.attr, 1301 &dev_attr_non_persist_memory_max_alloc_units.attr, 1302 &dev_attr_non_persist_memory_capacity_adjustment_factor.attr, 1303 &dev_attr_enh1_memory_max_alloc_units.attr, 1304 &dev_attr_enh1_memory_capacity_adjustment_factor.attr, 1305 &dev_attr_enh2_memory_max_alloc_units.attr, 1306 &dev_attr_enh2_memory_capacity_adjustment_factor.attr, 1307 &dev_attr_enh3_memory_max_alloc_units.attr, 1308 &dev_attr_enh3_memory_capacity_adjustment_factor.attr, 1309 &dev_attr_enh4_memory_max_alloc_units.attr, 1310 &dev_attr_enh4_memory_capacity_adjustment_factor.attr, 1311 &dev_attr_wb_max_alloc_units.attr, 1312 &dev_attr_wb_max_wb_luns.attr, 1313 &dev_attr_wb_buff_cap_adj.attr, 1314 &dev_attr_wb_sup_red_type.attr, 1315 &dev_attr_wb_sup_wb_type.attr, 1316 NULL, 1317 }; 1318 1319 static const struct attribute_group ufs_sysfs_geometry_descriptor_group = { 1320 .name = "geometry_descriptor", 1321 .attrs = ufs_sysfs_geometry_descriptor, 1322 }; 1323 1324 #define UFS_HEALTH_DESC_PARAM(_name, _uname, _size) \ 1325 UFS_DESC_PARAM(_name, _uname, HEALTH, _size) 1326 1327 UFS_HEALTH_DESC_PARAM(eol_info, _EOL_INFO, 1); 1328 UFS_HEALTH_DESC_PARAM(life_time_estimation_a, _LIFE_TIME_EST_A, 1); 1329 UFS_HEALTH_DESC_PARAM(life_time_estimation_b, _LIFE_TIME_EST_B, 1); 1330 1331 static struct attribute *ufs_sysfs_health_descriptor[] = { 1332 &dev_attr_eol_info.attr, 1333 &dev_attr_life_time_estimation_a.attr, 1334 &dev_attr_life_time_estimation_b.attr, 1335 NULL, 1336 }; 1337 1338 static const struct attribute_group ufs_sysfs_health_descriptor_group = { 1339 .name = "health_descriptor", 1340 .attrs = ufs_sysfs_health_descriptor, 1341 }; 1342 1343 #define UFS_POWER_DESC_PARAM(_name, _uname, _index) \ 1344 static ssize_t _name##_index##_show(struct device *dev, \ 1345 struct device_attribute *attr, char *buf) \ 1346 { \ 1347 struct ufs_hba *hba = dev_get_drvdata(dev); \ 1348 return ufs_sysfs_read_desc_param(hba, QUERY_DESC_IDN_POWER, 0, \ 1349 PWR_DESC##_uname##_0 + _index * 2, buf, 2); \ 1350 } \ 1351 static DEVICE_ATTR_RO(_name##_index) 1352 1353 UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 0); 1354 UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 1); 1355 UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 2); 1356 UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 3); 1357 UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 4); 1358 UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 5); 1359 UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 6); 1360 UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 7); 1361 UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 8); 1362 UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 9); 1363 UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 10); 1364 UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 11); 1365 UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 12); 1366 UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 13); 1367 UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 14); 1368 UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 15); 1369 UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 0); 1370 UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 1); 1371 UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 2); 1372 UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 3); 1373 UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 4); 1374 UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 5); 1375 UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 6); 1376 UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 7); 1377 UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 8); 1378 UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 9); 1379 UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 10); 1380 UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 11); 1381 UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 12); 1382 UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 13); 1383 UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 14); 1384 UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 15); 1385 UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 0); 1386 UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 1); 1387 UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 2); 1388 UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 3); 1389 UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 4); 1390 UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 5); 1391 UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 6); 1392 UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 7); 1393 UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 8); 1394 UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 9); 1395 UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 10); 1396 UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 11); 1397 UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 12); 1398 UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 13); 1399 UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 14); 1400 UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 15); 1401 1402 static struct attribute *ufs_sysfs_power_descriptor[] = { 1403 &dev_attr_active_icc_levels_vcc0.attr, 1404 &dev_attr_active_icc_levels_vcc1.attr, 1405 &dev_attr_active_icc_levels_vcc2.attr, 1406 &dev_attr_active_icc_levels_vcc3.attr, 1407 &dev_attr_active_icc_levels_vcc4.attr, 1408 &dev_attr_active_icc_levels_vcc5.attr, 1409 &dev_attr_active_icc_levels_vcc6.attr, 1410 &dev_attr_active_icc_levels_vcc7.attr, 1411 &dev_attr_active_icc_levels_vcc8.attr, 1412 &dev_attr_active_icc_levels_vcc9.attr, 1413 &dev_attr_active_icc_levels_vcc10.attr, 1414 &dev_attr_active_icc_levels_vcc11.attr, 1415 &dev_attr_active_icc_levels_vcc12.attr, 1416 &dev_attr_active_icc_levels_vcc13.attr, 1417 &dev_attr_active_icc_levels_vcc14.attr, 1418 &dev_attr_active_icc_levels_vcc15.attr, 1419 &dev_attr_active_icc_levels_vccq0.attr, 1420 &dev_attr_active_icc_levels_vccq1.attr, 1421 &dev_attr_active_icc_levels_vccq2.attr, 1422 &dev_attr_active_icc_levels_vccq3.attr, 1423 &dev_attr_active_icc_levels_vccq4.attr, 1424 &dev_attr_active_icc_levels_vccq5.attr, 1425 &dev_attr_active_icc_levels_vccq6.attr, 1426 &dev_attr_active_icc_levels_vccq7.attr, 1427 &dev_attr_active_icc_levels_vccq8.attr, 1428 &dev_attr_active_icc_levels_vccq9.attr, 1429 &dev_attr_active_icc_levels_vccq10.attr, 1430 &dev_attr_active_icc_levels_vccq11.attr, 1431 &dev_attr_active_icc_levels_vccq12.attr, 1432 &dev_attr_active_icc_levels_vccq13.attr, 1433 &dev_attr_active_icc_levels_vccq14.attr, 1434 &dev_attr_active_icc_levels_vccq15.attr, 1435 &dev_attr_active_icc_levels_vccq20.attr, 1436 &dev_attr_active_icc_levels_vccq21.attr, 1437 &dev_attr_active_icc_levels_vccq22.attr, 1438 &dev_attr_active_icc_levels_vccq23.attr, 1439 &dev_attr_active_icc_levels_vccq24.attr, 1440 &dev_attr_active_icc_levels_vccq25.attr, 1441 &dev_attr_active_icc_levels_vccq26.attr, 1442 &dev_attr_active_icc_levels_vccq27.attr, 1443 &dev_attr_active_icc_levels_vccq28.attr, 1444 &dev_attr_active_icc_levels_vccq29.attr, 1445 &dev_attr_active_icc_levels_vccq210.attr, 1446 &dev_attr_active_icc_levels_vccq211.attr, 1447 &dev_attr_active_icc_levels_vccq212.attr, 1448 &dev_attr_active_icc_levels_vccq213.attr, 1449 &dev_attr_active_icc_levels_vccq214.attr, 1450 &dev_attr_active_icc_levels_vccq215.attr, 1451 NULL, 1452 }; 1453 1454 static const struct attribute_group ufs_sysfs_power_descriptor_group = { 1455 .name = "power_descriptor", 1456 .attrs = ufs_sysfs_power_descriptor, 1457 }; 1458 1459 #define UFS_STRING_DESCRIPTOR(_name, _pname) \ 1460 static ssize_t _name##_show(struct device *dev, \ 1461 struct device_attribute *attr, char *buf) \ 1462 { \ 1463 u8 index; \ 1464 struct ufs_hba *hba = dev_get_drvdata(dev); \ 1465 int ret; \ 1466 int desc_len = QUERY_DESC_MAX_SIZE; \ 1467 u8 *desc_buf; \ 1468 \ 1469 down(&hba->host_sem); \ 1470 if (!ufshcd_is_user_access_allowed(hba)) { \ 1471 up(&hba->host_sem); \ 1472 return -EBUSY; \ 1473 } \ 1474 desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_ATOMIC); \ 1475 if (!desc_buf) { \ 1476 up(&hba->host_sem); \ 1477 return -ENOMEM; \ 1478 } \ 1479 ufshcd_rpm_get_sync(hba); \ 1480 ret = ufshcd_query_descriptor_retry(hba, \ 1481 UPIU_QUERY_OPCODE_READ_DESC, QUERY_DESC_IDN_DEVICE, \ 1482 0, 0, desc_buf, &desc_len); \ 1483 if (ret) { \ 1484 ret = -EINVAL; \ 1485 goto out; \ 1486 } \ 1487 index = desc_buf[DEVICE_DESC_PARAM##_pname]; \ 1488 kfree(desc_buf); \ 1489 desc_buf = NULL; \ 1490 ret = ufshcd_read_string_desc(hba, index, &desc_buf, \ 1491 SD_ASCII_STD); \ 1492 if (ret < 0) \ 1493 goto out; \ 1494 ret = sysfs_emit(buf, "%s\n", desc_buf); \ 1495 out: \ 1496 ufshcd_rpm_put_sync(hba); \ 1497 kfree(desc_buf); \ 1498 up(&hba->host_sem); \ 1499 return ret; \ 1500 } \ 1501 static DEVICE_ATTR_RO(_name) 1502 1503 UFS_STRING_DESCRIPTOR(manufacturer_name, _MANF_NAME); 1504 UFS_STRING_DESCRIPTOR(product_name, _PRDCT_NAME); 1505 UFS_STRING_DESCRIPTOR(oem_id, _OEM_ID); 1506 UFS_STRING_DESCRIPTOR(serial_number, _SN); 1507 UFS_STRING_DESCRIPTOR(product_revision, _PRDCT_REV); 1508 1509 static struct attribute *ufs_sysfs_string_descriptors[] = { 1510 &dev_attr_manufacturer_name.attr, 1511 &dev_attr_product_name.attr, 1512 &dev_attr_oem_id.attr, 1513 &dev_attr_serial_number.attr, 1514 &dev_attr_product_revision.attr, 1515 NULL, 1516 }; 1517 1518 static const struct attribute_group ufs_sysfs_string_descriptors_group = { 1519 .name = "string_descriptors", 1520 .attrs = ufs_sysfs_string_descriptors, 1521 }; 1522 1523 static inline bool ufshcd_is_wb_flags(enum flag_idn idn) 1524 { 1525 return idn >= QUERY_FLAG_IDN_WB_EN && 1526 idn <= QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8; 1527 } 1528 1529 #define UFS_FLAG(_name, _uname) \ 1530 static ssize_t _name##_show(struct device *dev, \ 1531 struct device_attribute *attr, char *buf) \ 1532 { \ 1533 bool flag; \ 1534 u8 index = 0; \ 1535 int ret; \ 1536 struct ufs_hba *hba = dev_get_drvdata(dev); \ 1537 \ 1538 down(&hba->host_sem); \ 1539 if (!ufshcd_is_user_access_allowed(hba)) { \ 1540 up(&hba->host_sem); \ 1541 return -EBUSY; \ 1542 } \ 1543 if (ufshcd_is_wb_flags(QUERY_FLAG_IDN##_uname)) \ 1544 index = ufshcd_wb_get_query_index(hba); \ 1545 ufshcd_rpm_get_sync(hba); \ 1546 ret = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG, \ 1547 QUERY_FLAG_IDN##_uname, index, &flag); \ 1548 ufshcd_rpm_put_sync(hba); \ 1549 if (ret) { \ 1550 ret = -EINVAL; \ 1551 goto out; \ 1552 } \ 1553 ret = sysfs_emit(buf, "%s\n", str_true_false(flag)); \ 1554 out: \ 1555 up(&hba->host_sem); \ 1556 return ret; \ 1557 } \ 1558 static DEVICE_ATTR_RO(_name) 1559 1560 UFS_FLAG(device_init, _FDEVICEINIT); 1561 UFS_FLAG(permanent_wpe, _PERMANENT_WPE); 1562 UFS_FLAG(power_on_wpe, _PWR_ON_WPE); 1563 UFS_FLAG(bkops_enable, _BKOPS_EN); 1564 UFS_FLAG(life_span_mode_enable, _LIFE_SPAN_MODE_ENABLE); 1565 UFS_FLAG(phy_resource_removal, _FPHYRESOURCEREMOVAL); 1566 UFS_FLAG(busy_rtc, _BUSY_RTC); 1567 UFS_FLAG(disable_fw_update, _PERMANENTLY_DISABLE_FW_UPDATE); 1568 UFS_FLAG(wb_enable, _WB_EN); 1569 UFS_FLAG(wb_flush_en, _WB_BUFF_FLUSH_EN); 1570 UFS_FLAG(wb_flush_during_h8, _WB_BUFF_FLUSH_DURING_HIBERN8); 1571 1572 static struct attribute *ufs_sysfs_device_flags[] = { 1573 &dev_attr_device_init.attr, 1574 &dev_attr_permanent_wpe.attr, 1575 &dev_attr_power_on_wpe.attr, 1576 &dev_attr_bkops_enable.attr, 1577 &dev_attr_life_span_mode_enable.attr, 1578 &dev_attr_phy_resource_removal.attr, 1579 &dev_attr_busy_rtc.attr, 1580 &dev_attr_disable_fw_update.attr, 1581 &dev_attr_wb_enable.attr, 1582 &dev_attr_wb_flush_en.attr, 1583 &dev_attr_wb_flush_during_h8.attr, 1584 NULL, 1585 }; 1586 1587 static const struct attribute_group ufs_sysfs_flags_group = { 1588 .name = "flags", 1589 .attrs = ufs_sysfs_device_flags, 1590 }; 1591 1592 static ssize_t max_number_of_rtt_show(struct device *dev, 1593 struct device_attribute *attr, char *buf) 1594 { 1595 struct ufs_hba *hba = dev_get_drvdata(dev); 1596 u32 rtt; 1597 int ret; 1598 1599 down(&hba->host_sem); 1600 if (!ufshcd_is_user_access_allowed(hba)) { 1601 up(&hba->host_sem); 1602 return -EBUSY; 1603 } 1604 1605 ufshcd_rpm_get_sync(hba); 1606 ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, 1607 QUERY_ATTR_IDN_MAX_NUM_OF_RTT, 0, 0, &rtt); 1608 ufshcd_rpm_put_sync(hba); 1609 1610 if (ret) 1611 goto out; 1612 1613 ret = sysfs_emit(buf, "0x%08X\n", rtt); 1614 1615 out: 1616 up(&hba->host_sem); 1617 return ret; 1618 } 1619 1620 static ssize_t max_number_of_rtt_store(struct device *dev, 1621 struct device_attribute *attr, 1622 const char *buf, size_t count) 1623 { 1624 struct ufs_hba *hba = dev_get_drvdata(dev); 1625 struct ufs_dev_info *dev_info = &hba->dev_info; 1626 struct scsi_device *sdev; 1627 unsigned int memflags; 1628 unsigned int rtt; 1629 int ret; 1630 1631 if (kstrtouint(buf, 0, &rtt)) 1632 return -EINVAL; 1633 1634 if (rtt > dev_info->rtt_cap) { 1635 dev_err(dev, "rtt can be at most bDeviceRTTCap\n"); 1636 return -EINVAL; 1637 } 1638 1639 down(&hba->host_sem); 1640 if (!ufshcd_is_user_access_allowed(hba)) { 1641 ret = -EBUSY; 1642 goto out; 1643 } 1644 1645 ufshcd_rpm_get_sync(hba); 1646 1647 memflags = memalloc_noio_save(); 1648 shost_for_each_device(sdev, hba->host) 1649 blk_mq_freeze_queue_nomemsave(sdev->request_queue); 1650 1651 ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, 1652 QUERY_ATTR_IDN_MAX_NUM_OF_RTT, 0, 0, &rtt); 1653 1654 shost_for_each_device(sdev, hba->host) 1655 blk_mq_unfreeze_queue_nomemrestore(sdev->request_queue); 1656 memalloc_noio_restore(memflags); 1657 1658 ufshcd_rpm_put_sync(hba); 1659 1660 out: 1661 up(&hba->host_sem); 1662 return ret < 0 ? ret : count; 1663 } 1664 1665 static DEVICE_ATTR_RW(max_number_of_rtt); 1666 1667 static inline bool ufshcd_is_wb_attrs(enum attr_idn idn) 1668 { 1669 return idn >= QUERY_ATTR_IDN_WB_FLUSH_STATUS && 1670 idn <= QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE; 1671 } 1672 1673 static int wb_read_resize_attrs(struct ufs_hba *hba, 1674 enum attr_idn idn, u32 *attr_val) 1675 { 1676 u8 index = 0; 1677 int ret; 1678 1679 if (!ufshcd_is_wb_allowed(hba) || !hba->dev_info.wb_enabled 1680 || !hba->dev_info.b_presrv_uspc_en 1681 || !(hba->dev_info.ext_wb_sup & UFS_DEV_WB_BUF_RESIZE)) 1682 return -EOPNOTSUPP; 1683 1684 down(&hba->host_sem); 1685 if (!ufshcd_is_user_access_allowed(hba)) { 1686 up(&hba->host_sem); 1687 return -EBUSY; 1688 } 1689 1690 index = ufshcd_wb_get_query_index(hba); 1691 ufshcd_rpm_get_sync(hba); 1692 ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, 1693 idn, index, 0, attr_val); 1694 ufshcd_rpm_put_sync(hba); 1695 1696 up(&hba->host_sem); 1697 return ret; 1698 } 1699 1700 static ssize_t wb_resize_hint_show(struct device *dev, 1701 struct device_attribute *attr, char *buf) 1702 { 1703 struct ufs_hba *hba = dev_get_drvdata(dev); 1704 int ret; 1705 u32 value; 1706 1707 ret = wb_read_resize_attrs(hba, 1708 QUERY_ATTR_IDN_WB_BUF_RESIZE_HINT, &value); 1709 if (ret) 1710 return ret; 1711 1712 return sysfs_emit(buf, "%s\n", ufs_wb_resize_hint_to_string(value)); 1713 } 1714 1715 static DEVICE_ATTR_RO(wb_resize_hint); 1716 1717 static ssize_t wb_resize_status_show(struct device *dev, 1718 struct device_attribute *attr, char *buf) 1719 { 1720 struct ufs_hba *hba = dev_get_drvdata(dev); 1721 int ret; 1722 u32 value; 1723 1724 ret = wb_read_resize_attrs(hba, 1725 QUERY_ATTR_IDN_WB_BUF_RESIZE_STATUS, &value); 1726 if (ret) 1727 return ret; 1728 1729 return sysfs_emit(buf, "%s\n", ufs_wb_resize_status_to_string(value)); 1730 } 1731 1732 static DEVICE_ATTR_RO(wb_resize_status); 1733 1734 #define UFS_ATTRIBUTE(_name, _uname) \ 1735 static ssize_t _name##_show(struct device *dev, \ 1736 struct device_attribute *attr, char *buf) \ 1737 { \ 1738 struct ufs_hba *hba = dev_get_drvdata(dev); \ 1739 u32 value; \ 1740 int ret; \ 1741 u8 index = 0; \ 1742 \ 1743 down(&hba->host_sem); \ 1744 if (!ufshcd_is_user_access_allowed(hba)) { \ 1745 up(&hba->host_sem); \ 1746 return -EBUSY; \ 1747 } \ 1748 if (ufshcd_is_wb_attrs(QUERY_ATTR_IDN##_uname)) \ 1749 index = ufshcd_wb_get_query_index(hba); \ 1750 ufshcd_rpm_get_sync(hba); \ 1751 ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, \ 1752 QUERY_ATTR_IDN##_uname, index, 0, &value); \ 1753 ufshcd_rpm_put_sync(hba); \ 1754 if (ret) { \ 1755 ret = -EINVAL; \ 1756 goto out; \ 1757 } \ 1758 ret = sysfs_emit(buf, "0x%08X\n", value); \ 1759 out: \ 1760 up(&hba->host_sem); \ 1761 return ret; \ 1762 } \ 1763 static DEVICE_ATTR_RO(_name) 1764 1765 UFS_ATTRIBUTE(boot_lun_enabled, _BOOT_LU_EN); 1766 UFS_ATTRIBUTE(current_power_mode, _POWER_MODE); 1767 UFS_ATTRIBUTE(active_icc_level, _ACTIVE_ICC_LVL); 1768 UFS_ATTRIBUTE(ooo_data_enabled, _OOO_DATA_EN); 1769 UFS_ATTRIBUTE(bkops_status, _BKOPS_STATUS); 1770 UFS_ATTRIBUTE(purge_status, _PURGE_STATUS); 1771 UFS_ATTRIBUTE(max_data_in_size, _MAX_DATA_IN); 1772 UFS_ATTRIBUTE(max_data_out_size, _MAX_DATA_OUT); 1773 UFS_ATTRIBUTE(reference_clock_frequency, _REF_CLK_FREQ); 1774 UFS_ATTRIBUTE(configuration_descriptor_lock, _CONF_DESC_LOCK); 1775 UFS_ATTRIBUTE(exception_event_control, _EE_CONTROL); 1776 UFS_ATTRIBUTE(exception_event_status, _EE_STATUS); 1777 UFS_ATTRIBUTE(ffu_status, _FFU_STATUS); 1778 UFS_ATTRIBUTE(psa_state, _PSA_STATE); 1779 UFS_ATTRIBUTE(psa_data_size, _PSA_DATA_SIZE); 1780 UFS_ATTRIBUTE(wb_flush_status, _WB_FLUSH_STATUS); 1781 UFS_ATTRIBUTE(wb_avail_buf, _AVAIL_WB_BUFF_SIZE); 1782 UFS_ATTRIBUTE(wb_life_time_est, _WB_BUFF_LIFE_TIME_EST); 1783 UFS_ATTRIBUTE(wb_cur_buf, _CURR_WB_BUFF_SIZE); 1784 1785 1786 static struct attribute *ufs_sysfs_attributes[] = { 1787 &dev_attr_boot_lun_enabled.attr, 1788 &dev_attr_current_power_mode.attr, 1789 &dev_attr_active_icc_level.attr, 1790 &dev_attr_ooo_data_enabled.attr, 1791 &dev_attr_bkops_status.attr, 1792 &dev_attr_purge_status.attr, 1793 &dev_attr_max_data_in_size.attr, 1794 &dev_attr_max_data_out_size.attr, 1795 &dev_attr_reference_clock_frequency.attr, 1796 &dev_attr_configuration_descriptor_lock.attr, 1797 &dev_attr_max_number_of_rtt.attr, 1798 &dev_attr_exception_event_control.attr, 1799 &dev_attr_exception_event_status.attr, 1800 &dev_attr_ffu_status.attr, 1801 &dev_attr_psa_state.attr, 1802 &dev_attr_psa_data_size.attr, 1803 &dev_attr_wb_flush_status.attr, 1804 &dev_attr_wb_avail_buf.attr, 1805 &dev_attr_wb_life_time_est.attr, 1806 &dev_attr_wb_cur_buf.attr, 1807 &dev_attr_wb_resize_hint.attr, 1808 &dev_attr_wb_resize_status.attr, 1809 NULL, 1810 }; 1811 1812 static const struct attribute_group ufs_sysfs_attributes_group = { 1813 .name = "attributes", 1814 .attrs = ufs_sysfs_attributes, 1815 }; 1816 1817 static int hid_query_attr(struct ufs_hba *hba, enum query_opcode opcode, 1818 enum attr_idn idn, u32 *attr_val) 1819 { 1820 int ret; 1821 1822 down(&hba->host_sem); 1823 if (!ufshcd_is_user_access_allowed(hba)) { 1824 up(&hba->host_sem); 1825 return -EBUSY; 1826 } 1827 1828 ufshcd_rpm_get_sync(hba); 1829 ret = ufshcd_query_attr(hba, opcode, idn, 0, 0, attr_val); 1830 ufshcd_rpm_put_sync(hba); 1831 1832 up(&hba->host_sem); 1833 return ret; 1834 } 1835 1836 static ssize_t analysis_trigger_store(struct device *dev, 1837 struct device_attribute *attr, const char *buf, size_t count) 1838 { 1839 struct ufs_hba *hba = dev_get_drvdata(dev); 1840 int mode; 1841 int ret; 1842 1843 if (sysfs_streq(buf, "enable")) 1844 mode = HID_ANALYSIS_ENABLE; 1845 else if (sysfs_streq(buf, "disable")) 1846 mode = HID_ANALYSIS_AND_DEFRAG_DISABLE; 1847 else 1848 return -EINVAL; 1849 1850 ret = hid_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, 1851 QUERY_ATTR_IDN_HID_DEFRAG_OPERATION, &mode); 1852 1853 return ret < 0 ? ret : count; 1854 } 1855 1856 static DEVICE_ATTR_WO(analysis_trigger); 1857 1858 static ssize_t defrag_trigger_store(struct device *dev, 1859 struct device_attribute *attr, const char *buf, size_t count) 1860 { 1861 struct ufs_hba *hba = dev_get_drvdata(dev); 1862 int mode; 1863 int ret; 1864 1865 if (sysfs_streq(buf, "enable")) 1866 mode = HID_ANALYSIS_AND_DEFRAG_ENABLE; 1867 else if (sysfs_streq(buf, "disable")) 1868 mode = HID_ANALYSIS_AND_DEFRAG_DISABLE; 1869 else 1870 return -EINVAL; 1871 1872 ret = hid_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, 1873 QUERY_ATTR_IDN_HID_DEFRAG_OPERATION, &mode); 1874 1875 return ret < 0 ? ret : count; 1876 } 1877 1878 static DEVICE_ATTR_WO(defrag_trigger); 1879 1880 #define UFS_HID_AVAILABLE_SIZE_INVALID 0xFFFFFFFFU 1881 static ssize_t fragmented_size_show(struct device *dev, 1882 struct device_attribute *attr, char *buf) 1883 { 1884 struct ufs_hba *hba = dev_get_drvdata(dev); 1885 u32 value; 1886 int ret; 1887 1888 ret = hid_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, 1889 QUERY_ATTR_IDN_HID_AVAILABLE_SIZE, &value); 1890 if (ret) 1891 return ret; 1892 1893 if (value == UFS_HID_AVAILABLE_SIZE_INVALID) 1894 return -ENODATA; 1895 1896 return sysfs_emit(buf, "%u\n", value); 1897 } 1898 1899 static DEVICE_ATTR_RO(fragmented_size); 1900 1901 static ssize_t defrag_size_show(struct device *dev, 1902 struct device_attribute *attr, char *buf) 1903 { 1904 struct ufs_hba *hba = dev_get_drvdata(dev); 1905 u32 value; 1906 int ret; 1907 1908 ret = hid_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, 1909 QUERY_ATTR_IDN_HID_SIZE, &value); 1910 if (ret) 1911 return ret; 1912 1913 return sysfs_emit(buf, "%u\n", value); 1914 } 1915 1916 static ssize_t defrag_size_store(struct device *dev, 1917 struct device_attribute *attr, const char *buf, size_t count) 1918 { 1919 struct ufs_hba *hba = dev_get_drvdata(dev); 1920 u32 value; 1921 int ret; 1922 1923 if (kstrtou32(buf, 0, &value)) 1924 return -EINVAL; 1925 1926 ret = hid_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, 1927 QUERY_ATTR_IDN_HID_SIZE, &value); 1928 1929 return ret < 0 ? ret : count; 1930 } 1931 1932 static DEVICE_ATTR_RW(defrag_size); 1933 1934 static ssize_t progress_ratio_show(struct device *dev, 1935 struct device_attribute *attr, char *buf) 1936 { 1937 struct ufs_hba *hba = dev_get_drvdata(dev); 1938 u32 value; 1939 int ret; 1940 1941 ret = hid_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, 1942 QUERY_ATTR_IDN_HID_PROGRESS_RATIO, &value); 1943 if (ret) 1944 return ret; 1945 1946 return sysfs_emit(buf, "%u\n", value); 1947 } 1948 1949 static DEVICE_ATTR_RO(progress_ratio); 1950 1951 static ssize_t state_show(struct device *dev, 1952 struct device_attribute *attr, char *buf) 1953 { 1954 struct ufs_hba *hba = dev_get_drvdata(dev); 1955 u32 value; 1956 int ret; 1957 1958 ret = hid_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, 1959 QUERY_ATTR_IDN_HID_STATE, &value); 1960 if (ret) 1961 return ret; 1962 1963 return sysfs_emit(buf, "%s\n", ufs_hid_state_to_string(value)); 1964 } 1965 1966 static DEVICE_ATTR_RO(state); 1967 1968 static struct attribute *ufs_sysfs_hid[] = { 1969 &dev_attr_analysis_trigger.attr, 1970 &dev_attr_defrag_trigger.attr, 1971 &dev_attr_fragmented_size.attr, 1972 &dev_attr_defrag_size.attr, 1973 &dev_attr_progress_ratio.attr, 1974 &dev_attr_state.attr, 1975 NULL, 1976 }; 1977 1978 static umode_t ufs_sysfs_hid_is_visible(struct kobject *kobj, 1979 struct attribute *attr, int n) 1980 { 1981 struct device *dev = container_of(kobj, struct device, kobj); 1982 struct ufs_hba *hba = dev_get_drvdata(dev); 1983 1984 return hba->dev_info.hid_sup ? attr->mode : 0; 1985 } 1986 1987 static const struct attribute_group ufs_sysfs_hid_group = { 1988 .name = "hid", 1989 .attrs = ufs_sysfs_hid, 1990 .is_visible = ufs_sysfs_hid_is_visible, 1991 }; 1992 1993 static const struct attribute_group *ufs_sysfs_groups[] = { 1994 &ufs_sysfs_default_group, 1995 &ufs_sysfs_capabilities_group, 1996 &ufs_sysfs_ufshci_group, 1997 &ufs_sysfs_monitor_group, 1998 &ufs_sysfs_power_info_group, 1999 &ufs_sysfs_device_descriptor_group, 2000 &ufs_sysfs_interconnect_descriptor_group, 2001 &ufs_sysfs_geometry_descriptor_group, 2002 &ufs_sysfs_health_descriptor_group, 2003 &ufs_sysfs_power_descriptor_group, 2004 &ufs_sysfs_string_descriptors_group, 2005 &ufs_sysfs_flags_group, 2006 &ufs_sysfs_attributes_group, 2007 &ufs_sysfs_hid_group, 2008 NULL, 2009 }; 2010 2011 #define UFS_LUN_DESC_PARAM(_pname, _puname, _duname, _size) \ 2012 static ssize_t _pname##_show(struct device *dev, \ 2013 struct device_attribute *attr, char *buf) \ 2014 { \ 2015 struct scsi_device *sdev = to_scsi_device(dev); \ 2016 struct ufs_hba *hba = shost_priv(sdev->host); \ 2017 u8 lun = ufshcd_scsi_to_upiu_lun(sdev->lun); \ 2018 if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun)) \ 2019 return -EINVAL; \ 2020 return ufs_sysfs_read_desc_param(hba, QUERY_DESC_IDN_##_duname, \ 2021 lun, _duname##_DESC_PARAM##_puname, buf, _size); \ 2022 } \ 2023 static DEVICE_ATTR_RO(_pname) 2024 2025 #define UFS_UNIT_DESC_PARAM(_name, _uname, _size) \ 2026 UFS_LUN_DESC_PARAM(_name, _uname, UNIT, _size) 2027 2028 UFS_UNIT_DESC_PARAM(lu_enable, _LU_ENABLE, 1); 2029 UFS_UNIT_DESC_PARAM(boot_lun_id, _BOOT_LUN_ID, 1); 2030 UFS_UNIT_DESC_PARAM(lun_write_protect, _LU_WR_PROTECT, 1); 2031 UFS_UNIT_DESC_PARAM(lun_queue_depth, _LU_Q_DEPTH, 1); 2032 UFS_UNIT_DESC_PARAM(psa_sensitive, _PSA_SENSITIVE, 1); 2033 UFS_UNIT_DESC_PARAM(lun_memory_type, _MEM_TYPE, 1); 2034 UFS_UNIT_DESC_PARAM(data_reliability, _DATA_RELIABILITY, 1); 2035 UFS_UNIT_DESC_PARAM(logical_block_size, _LOGICAL_BLK_SIZE, 1); 2036 UFS_UNIT_DESC_PARAM(logical_block_count, _LOGICAL_BLK_COUNT, 8); 2037 UFS_UNIT_DESC_PARAM(erase_block_size, _ERASE_BLK_SIZE, 4); 2038 UFS_UNIT_DESC_PARAM(provisioning_type, _PROVISIONING_TYPE, 1); 2039 UFS_UNIT_DESC_PARAM(physical_memory_resource_count, _PHY_MEM_RSRC_CNT, 8); 2040 UFS_UNIT_DESC_PARAM(context_capabilities, _CTX_CAPABILITIES, 2); 2041 UFS_UNIT_DESC_PARAM(large_unit_granularity, _LARGE_UNIT_SIZE_M1, 1); 2042 UFS_UNIT_DESC_PARAM(wb_buf_alloc_units, _WB_BUF_ALLOC_UNITS, 4); 2043 2044 static struct attribute *ufs_sysfs_unit_descriptor[] = { 2045 &dev_attr_lu_enable.attr, 2046 &dev_attr_boot_lun_id.attr, 2047 &dev_attr_lun_write_protect.attr, 2048 &dev_attr_lun_queue_depth.attr, 2049 &dev_attr_psa_sensitive.attr, 2050 &dev_attr_lun_memory_type.attr, 2051 &dev_attr_data_reliability.attr, 2052 &dev_attr_logical_block_size.attr, 2053 &dev_attr_logical_block_count.attr, 2054 &dev_attr_erase_block_size.attr, 2055 &dev_attr_provisioning_type.attr, 2056 &dev_attr_physical_memory_resource_count.attr, 2057 &dev_attr_context_capabilities.attr, 2058 &dev_attr_large_unit_granularity.attr, 2059 &dev_attr_wb_buf_alloc_units.attr, 2060 NULL, 2061 }; 2062 2063 static umode_t ufs_unit_descriptor_is_visible(struct kobject *kobj, struct attribute *attr, int n) 2064 { 2065 struct device *dev = container_of(kobj, struct device, kobj); 2066 struct scsi_device *sdev = to_scsi_device(dev); 2067 u8 lun = ufshcd_scsi_to_upiu_lun(sdev->lun); 2068 umode_t mode = attr->mode; 2069 2070 if (lun == UFS_UPIU_BOOT_WLUN || lun == UFS_UPIU_UFS_DEVICE_WLUN) 2071 /* Boot and device WLUN have no unit descriptors */ 2072 mode = 0; 2073 if (lun == UFS_UPIU_RPMB_WLUN && attr == &dev_attr_wb_buf_alloc_units.attr) 2074 mode = 0; 2075 2076 return mode; 2077 } 2078 2079 2080 const struct attribute_group ufs_sysfs_unit_descriptor_group = { 2081 .name = "unit_descriptor", 2082 .attrs = ufs_sysfs_unit_descriptor, 2083 .is_visible = ufs_unit_descriptor_is_visible, 2084 }; 2085 2086 static ssize_t dyn_cap_needed_attribute_show(struct device *dev, 2087 struct device_attribute *attr, char *buf) 2088 { 2089 u32 value; 2090 struct scsi_device *sdev = to_scsi_device(dev); 2091 struct ufs_hba *hba = shost_priv(sdev->host); 2092 u8 lun = ufshcd_scsi_to_upiu_lun(sdev->lun); 2093 int ret; 2094 2095 down(&hba->host_sem); 2096 if (!ufshcd_is_user_access_allowed(hba)) { 2097 ret = -EBUSY; 2098 goto out; 2099 } 2100 2101 ufshcd_rpm_get_sync(hba); 2102 ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, 2103 QUERY_ATTR_IDN_DYN_CAP_NEEDED, lun, 0, &value); 2104 ufshcd_rpm_put_sync(hba); 2105 if (ret) { 2106 ret = -EINVAL; 2107 goto out; 2108 } 2109 2110 ret = sysfs_emit(buf, "0x%08X\n", value); 2111 2112 out: 2113 up(&hba->host_sem); 2114 return ret; 2115 } 2116 static DEVICE_ATTR_RO(dyn_cap_needed_attribute); 2117 2118 static struct attribute *ufs_sysfs_lun_attributes[] = { 2119 &dev_attr_dyn_cap_needed_attribute.attr, 2120 NULL, 2121 }; 2122 2123 const struct attribute_group ufs_sysfs_lun_attributes_group = { 2124 .attrs = ufs_sysfs_lun_attributes, 2125 }; 2126 2127 void ufs_sysfs_add_nodes(struct device *dev) 2128 { 2129 int ret; 2130 2131 ret = sysfs_create_groups(&dev->kobj, ufs_sysfs_groups); 2132 if (ret) 2133 dev_err(dev, 2134 "%s: sysfs groups creation failed (err = %d)\n", 2135 __func__, ret); 2136 } 2137 2138 void ufs_sysfs_remove_nodes(struct device *dev) 2139 { 2140 sysfs_remove_groups(&dev->kobj, ufs_sysfs_groups); 2141 } 2142