1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2018 Western Digital Corporation
3
4 #include <linux/err.h>
5 #include <linux/string.h>
6 #include <linux/bitfield.h>
7 #include <linux/unaligned.h>
8
9 #include <ufs/ufs.h>
10 #include <ufs/unipro.h>
11 #include "ufs-sysfs.h"
12 #include "ufshcd-priv.h"
13
ufs_pa_pwr_mode_to_string(enum ufs_pa_pwr_mode mode)14 static const char *ufs_pa_pwr_mode_to_string(enum ufs_pa_pwr_mode mode)
15 {
16 switch (mode) {
17 case FAST_MODE: return "FAST_MODE";
18 case SLOW_MODE: return "SLOW_MODE";
19 case FASTAUTO_MODE: return "FASTAUTO_MODE";
20 case SLOWAUTO_MODE: return "SLOWAUTO_MODE";
21 default: return "UNKNOWN";
22 }
23 }
24
ufs_hs_gear_rate_to_string(enum ufs_hs_gear_rate rate)25 static const char *ufs_hs_gear_rate_to_string(enum ufs_hs_gear_rate rate)
26 {
27 switch (rate) {
28 case PA_HS_MODE_A: return "HS_RATE_A";
29 case PA_HS_MODE_B: return "HS_RATE_B";
30 default: return "UNKNOWN";
31 }
32 }
33
ufs_pwm_gear_to_string(enum ufs_pwm_gear_tag gear)34 static const char *ufs_pwm_gear_to_string(enum ufs_pwm_gear_tag gear)
35 {
36 switch (gear) {
37 case UFS_PWM_G1: return "PWM_GEAR1";
38 case UFS_PWM_G2: return "PWM_GEAR2";
39 case UFS_PWM_G3: return "PWM_GEAR3";
40 case UFS_PWM_G4: return "PWM_GEAR4";
41 case UFS_PWM_G5: return "PWM_GEAR5";
42 case UFS_PWM_G6: return "PWM_GEAR6";
43 case UFS_PWM_G7: return "PWM_GEAR7";
44 default: return "UNKNOWN";
45 }
46 }
47
ufs_hs_gear_to_string(enum ufs_hs_gear_tag gear)48 static const char *ufs_hs_gear_to_string(enum ufs_hs_gear_tag gear)
49 {
50 switch (gear) {
51 case UFS_HS_G1: return "HS_GEAR1";
52 case UFS_HS_G2: return "HS_GEAR2";
53 case UFS_HS_G3: return "HS_GEAR3";
54 case UFS_HS_G4: return "HS_GEAR4";
55 case UFS_HS_G5: return "HS_GEAR5";
56 default: return "UNKNOWN";
57 }
58 }
59
ufs_wb_resize_hint_to_string(enum wb_resize_hint hint)60 static const char *ufs_wb_resize_hint_to_string(enum wb_resize_hint hint)
61 {
62 switch (hint) {
63 case WB_RESIZE_HINT_KEEP:
64 return "keep";
65 case WB_RESIZE_HINT_DECREASE:
66 return "decrease";
67 case WB_RESIZE_HINT_INCREASE:
68 return "increase";
69 default:
70 return "unknown";
71 }
72 }
73
ufs_wb_resize_status_to_string(enum wb_resize_status status)74 static const char *ufs_wb_resize_status_to_string(enum wb_resize_status status)
75 {
76 switch (status) {
77 case WB_RESIZE_STATUS_IDLE:
78 return "idle";
79 case WB_RESIZE_STATUS_IN_PROGRESS:
80 return "in_progress";
81 case WB_RESIZE_STATUS_COMPLETE_SUCCESS:
82 return "complete_success";
83 case WB_RESIZE_STATUS_GENERAL_FAILURE:
84 return "general_failure";
85 default:
86 return "unknown";
87 }
88 }
89
90 static const char * const ufs_hid_states[] = {
91 [HID_IDLE] = "idle",
92 [ANALYSIS_IN_PROGRESS] = "analysis_in_progress",
93 [DEFRAG_REQUIRED] = "defrag_required",
94 [DEFRAG_IN_PROGRESS] = "defrag_in_progress",
95 [DEFRAG_COMPLETED] = "defrag_completed",
96 [DEFRAG_NOT_REQUIRED] = "defrag_not_required",
97 };
98
ufs_hid_state_to_string(enum ufs_hid_state state)99 static const char *ufs_hid_state_to_string(enum ufs_hid_state state)
100 {
101 if (state < NUM_UFS_HID_STATES)
102 return ufs_hid_states[state];
103
104 return "unknown";
105 }
106
ufshcd_uic_link_state_to_string(enum uic_link_state state)107 static const char *ufshcd_uic_link_state_to_string(
108 enum uic_link_state state)
109 {
110 switch (state) {
111 case UIC_LINK_OFF_STATE: return "OFF";
112 case UIC_LINK_ACTIVE_STATE: return "ACTIVE";
113 case UIC_LINK_HIBERN8_STATE: return "HIBERN8";
114 case UIC_LINK_BROKEN_STATE: return "BROKEN";
115 default: return "UNKNOWN";
116 }
117 }
118
ufshcd_ufs_dev_pwr_mode_to_string(enum ufs_dev_pwr_mode state)119 static const char *ufshcd_ufs_dev_pwr_mode_to_string(
120 enum ufs_dev_pwr_mode state)
121 {
122 switch (state) {
123 case UFS_ACTIVE_PWR_MODE: return "ACTIVE";
124 case UFS_SLEEP_PWR_MODE: return "SLEEP";
125 case UFS_POWERDOWN_PWR_MODE: return "POWERDOWN";
126 case UFS_DEEPSLEEP_PWR_MODE: return "DEEPSLEEP";
127 default: return "UNKNOWN";
128 }
129 }
130
ufs_sysfs_pm_lvl_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count,bool rpm)131 static inline ssize_t ufs_sysfs_pm_lvl_store(struct device *dev,
132 struct device_attribute *attr,
133 const char *buf, size_t count,
134 bool rpm)
135 {
136 struct ufs_hba *hba = dev_get_drvdata(dev);
137 struct ufs_dev_info *dev_info = &hba->dev_info;
138 unsigned long flags, value;
139
140 if (kstrtoul(buf, 0, &value))
141 return -EINVAL;
142
143 if (value >= UFS_PM_LVL_MAX)
144 return -EINVAL;
145
146 if (ufs_pm_lvl_states[value].dev_state == UFS_DEEPSLEEP_PWR_MODE &&
147 (!(hba->caps & UFSHCD_CAP_DEEPSLEEP) ||
148 !(dev_info->wspecversion >= 0x310)))
149 return -EINVAL;
150
151 spin_lock_irqsave(hba->host->host_lock, flags);
152 if (rpm)
153 hba->rpm_lvl = value;
154 else
155 hba->spm_lvl = value;
156 spin_unlock_irqrestore(hba->host->host_lock, flags);
157 return count;
158 }
159
rpm_lvl_show(struct device * dev,struct device_attribute * attr,char * buf)160 static ssize_t rpm_lvl_show(struct device *dev,
161 struct device_attribute *attr, char *buf)
162 {
163 struct ufs_hba *hba = dev_get_drvdata(dev);
164
165 return sysfs_emit(buf, "%d\n", hba->rpm_lvl);
166 }
167
rpm_lvl_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)168 static ssize_t rpm_lvl_store(struct device *dev,
169 struct device_attribute *attr, const char *buf, size_t count)
170 {
171 return ufs_sysfs_pm_lvl_store(dev, attr, buf, count, true);
172 }
173
rpm_target_dev_state_show(struct device * dev,struct device_attribute * attr,char * buf)174 static ssize_t rpm_target_dev_state_show(struct device *dev,
175 struct device_attribute *attr, char *buf)
176 {
177 struct ufs_hba *hba = dev_get_drvdata(dev);
178
179 return sysfs_emit(buf, "%s\n", ufshcd_ufs_dev_pwr_mode_to_string(
180 ufs_pm_lvl_states[hba->rpm_lvl].dev_state));
181 }
182
rpm_target_link_state_show(struct device * dev,struct device_attribute * attr,char * buf)183 static ssize_t rpm_target_link_state_show(struct device *dev,
184 struct device_attribute *attr, char *buf)
185 {
186 struct ufs_hba *hba = dev_get_drvdata(dev);
187
188 return sysfs_emit(buf, "%s\n", ufshcd_uic_link_state_to_string(
189 ufs_pm_lvl_states[hba->rpm_lvl].link_state));
190 }
191
spm_lvl_show(struct device * dev,struct device_attribute * attr,char * buf)192 static ssize_t spm_lvl_show(struct device *dev,
193 struct device_attribute *attr, char *buf)
194 {
195 struct ufs_hba *hba = dev_get_drvdata(dev);
196
197 return sysfs_emit(buf, "%d\n", hba->spm_lvl);
198 }
199
spm_lvl_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)200 static ssize_t spm_lvl_store(struct device *dev,
201 struct device_attribute *attr, const char *buf, size_t count)
202 {
203 return ufs_sysfs_pm_lvl_store(dev, attr, buf, count, false);
204 }
205
spm_target_dev_state_show(struct device * dev,struct device_attribute * attr,char * buf)206 static ssize_t spm_target_dev_state_show(struct device *dev,
207 struct device_attribute *attr, char *buf)
208 {
209 struct ufs_hba *hba = dev_get_drvdata(dev);
210
211 return sysfs_emit(buf, "%s\n", ufshcd_ufs_dev_pwr_mode_to_string(
212 ufs_pm_lvl_states[hba->spm_lvl].dev_state));
213 }
214
spm_target_link_state_show(struct device * dev,struct device_attribute * attr,char * buf)215 static ssize_t spm_target_link_state_show(struct device *dev,
216 struct device_attribute *attr, char *buf)
217 {
218 struct ufs_hba *hba = dev_get_drvdata(dev);
219
220 return sysfs_emit(buf, "%s\n", ufshcd_uic_link_state_to_string(
221 ufs_pm_lvl_states[hba->spm_lvl].link_state));
222 }
223
224 /* Convert Auto-Hibernate Idle Timer register value to microseconds */
ufshcd_ahit_to_us(u32 ahit)225 static int ufshcd_ahit_to_us(u32 ahit)
226 {
227 int timer = FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK, ahit);
228 int scale = FIELD_GET(UFSHCI_AHIBERN8_SCALE_MASK, ahit);
229
230 for (; scale > 0; --scale)
231 timer *= UFSHCI_AHIBERN8_SCALE_FACTOR;
232
233 return timer;
234 }
235
236 /* Convert microseconds to Auto-Hibernate Idle Timer register value */
ufshcd_us_to_ahit(unsigned int timer)237 static u32 ufshcd_us_to_ahit(unsigned int timer)
238 {
239 unsigned int scale;
240
241 for (scale = 0; timer > UFSHCI_AHIBERN8_TIMER_MASK; ++scale)
242 timer /= UFSHCI_AHIBERN8_SCALE_FACTOR;
243
244 return FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, timer) |
245 FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, scale);
246 }
247
ufshcd_read_hci_reg(struct ufs_hba * hba,u32 * val,unsigned int reg)248 static int ufshcd_read_hci_reg(struct ufs_hba *hba, u32 *val, unsigned int reg)
249 {
250 down(&hba->host_sem);
251 if (!ufshcd_is_user_access_allowed(hba)) {
252 up(&hba->host_sem);
253 return -EBUSY;
254 }
255
256 ufshcd_rpm_get_sync(hba);
257 ufshcd_hold(hba);
258 *val = ufshcd_readl(hba, reg);
259 ufshcd_release(hba);
260 ufshcd_rpm_put_sync(hba);
261
262 up(&hba->host_sem);
263 return 0;
264 }
265
auto_hibern8_show(struct device * dev,struct device_attribute * attr,char * buf)266 static ssize_t auto_hibern8_show(struct device *dev,
267 struct device_attribute *attr, char *buf)
268 {
269 u32 ahit;
270 int ret;
271 struct ufs_hba *hba = dev_get_drvdata(dev);
272
273 if (!ufshcd_is_auto_hibern8_supported(hba))
274 return -EOPNOTSUPP;
275
276 ret = ufshcd_read_hci_reg(hba, &ahit, REG_AUTO_HIBERNATE_IDLE_TIMER);
277 if (ret)
278 return ret;
279
280 return sysfs_emit(buf, "%d\n", ufshcd_ahit_to_us(ahit));
281 }
282
auto_hibern8_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)283 static ssize_t auto_hibern8_store(struct device *dev,
284 struct device_attribute *attr,
285 const char *buf, size_t count)
286 {
287 struct ufs_hba *hba = dev_get_drvdata(dev);
288 unsigned int timer;
289 int ret = 0;
290
291 if (!ufshcd_is_auto_hibern8_supported(hba))
292 return -EOPNOTSUPP;
293
294 if (kstrtouint(buf, 0, &timer))
295 return -EINVAL;
296
297 if (timer > UFSHCI_AHIBERN8_MAX)
298 return -EINVAL;
299
300 down(&hba->host_sem);
301 if (!ufshcd_is_user_access_allowed(hba)) {
302 ret = -EBUSY;
303 goto out;
304 }
305
306 ufshcd_auto_hibern8_update(hba, ufshcd_us_to_ahit(timer));
307
308 out:
309 up(&hba->host_sem);
310 return ret ? ret : count;
311 }
312
wb_on_show(struct device * dev,struct device_attribute * attr,char * buf)313 static ssize_t wb_on_show(struct device *dev, struct device_attribute *attr,
314 char *buf)
315 {
316 struct ufs_hba *hba = dev_get_drvdata(dev);
317
318 return sysfs_emit(buf, "%d\n", hba->dev_info.wb_enabled);
319 }
320
wb_on_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)321 static ssize_t wb_on_store(struct device *dev, struct device_attribute *attr,
322 const char *buf, size_t count)
323 {
324 struct ufs_hba *hba = dev_get_drvdata(dev);
325 unsigned int wb_enable;
326 ssize_t res;
327
328 if (!ufshcd_is_wb_allowed(hba) || (ufshcd_is_clkscaling_supported(hba)
329 && ufshcd_enable_wb_if_scaling_up(hba))) {
330 /*
331 * If the platform supports UFSHCD_CAP_CLK_SCALING, turn WB
332 * on/off will be done while clock scaling up/down.
333 */
334 dev_warn(dev, "It is not allowed to configure WB!\n");
335 return -EOPNOTSUPP;
336 }
337
338 if (kstrtouint(buf, 0, &wb_enable))
339 return -EINVAL;
340
341 if (wb_enable != 0 && wb_enable != 1)
342 return -EINVAL;
343
344 down(&hba->host_sem);
345 if (!ufshcd_is_user_access_allowed(hba)) {
346 res = -EBUSY;
347 goto out;
348 }
349
350 ufshcd_rpm_get_sync(hba);
351 res = ufshcd_wb_toggle(hba, wb_enable);
352 ufshcd_rpm_put_sync(hba);
353 out:
354 up(&hba->host_sem);
355 return res < 0 ? res : count;
356 }
357
rtc_update_ms_show(struct device * dev,struct device_attribute * attr,char * buf)358 static ssize_t rtc_update_ms_show(struct device *dev, struct device_attribute *attr,
359 char *buf)
360 {
361 struct ufs_hba *hba = dev_get_drvdata(dev);
362
363 return sysfs_emit(buf, "%d\n", hba->dev_info.rtc_update_period);
364 }
365
rtc_update_ms_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)366 static ssize_t rtc_update_ms_store(struct device *dev, struct device_attribute *attr,
367 const char *buf, size_t count)
368 {
369 struct ufs_hba *hba = dev_get_drvdata(dev);
370 unsigned int ms;
371 bool resume_period_update = false;
372
373 if (kstrtouint(buf, 0, &ms))
374 return -EINVAL;
375
376 if (!hba->dev_info.rtc_update_period && ms > 0)
377 resume_period_update = true;
378 /* Minimum and maximum update frequency should be synchronized with all UFS vendors */
379 hba->dev_info.rtc_update_period = ms;
380
381 if (resume_period_update)
382 schedule_delayed_work(&hba->ufs_rtc_update_work,
383 msecs_to_jiffies(hba->dev_info.rtc_update_period));
384 return count;
385 }
386
enable_wb_buf_flush_show(struct device * dev,struct device_attribute * attr,char * buf)387 static ssize_t enable_wb_buf_flush_show(struct device *dev,
388 struct device_attribute *attr,
389 char *buf)
390 {
391 struct ufs_hba *hba = dev_get_drvdata(dev);
392
393 return sysfs_emit(buf, "%d\n", hba->dev_info.wb_buf_flush_enabled);
394 }
395
enable_wb_buf_flush_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)396 static ssize_t enable_wb_buf_flush_store(struct device *dev,
397 struct device_attribute *attr,
398 const char *buf, size_t count)
399 {
400 struct ufs_hba *hba = dev_get_drvdata(dev);
401 unsigned int enable_wb_buf_flush;
402 ssize_t res;
403
404 if (!ufshcd_is_wb_buf_flush_allowed(hba)) {
405 dev_warn(dev, "It is not allowed to configure WB buf flushing!\n");
406 return -EOPNOTSUPP;
407 }
408
409 if (kstrtouint(buf, 0, &enable_wb_buf_flush))
410 return -EINVAL;
411
412 if (enable_wb_buf_flush != 0 && enable_wb_buf_flush != 1)
413 return -EINVAL;
414
415 down(&hba->host_sem);
416 if (!ufshcd_is_user_access_allowed(hba)) {
417 res = -EBUSY;
418 goto out;
419 }
420
421 ufshcd_rpm_get_sync(hba);
422 res = ufshcd_wb_toggle_buf_flush(hba, enable_wb_buf_flush);
423 ufshcd_rpm_put_sync(hba);
424
425 out:
426 up(&hba->host_sem);
427 return res < 0 ? res : count;
428 }
429
wb_flush_threshold_show(struct device * dev,struct device_attribute * attr,char * buf)430 static ssize_t wb_flush_threshold_show(struct device *dev,
431 struct device_attribute *attr,
432 char *buf)
433 {
434 struct ufs_hba *hba = dev_get_drvdata(dev);
435
436 return sysfs_emit(buf, "%u\n", hba->vps->wb_flush_threshold);
437 }
438
wb_flush_threshold_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)439 static ssize_t wb_flush_threshold_store(struct device *dev,
440 struct device_attribute *attr,
441 const char *buf, size_t count)
442 {
443 struct ufs_hba *hba = dev_get_drvdata(dev);
444 unsigned int wb_flush_threshold;
445
446 if (kstrtouint(buf, 0, &wb_flush_threshold))
447 return -EINVAL;
448
449 /* The range of values for wb_flush_threshold is (0,10] */
450 if (wb_flush_threshold > UFS_WB_BUF_REMAIN_PERCENT(100) ||
451 wb_flush_threshold == 0) {
452 dev_err(dev, "The value of wb_flush_threshold is invalid!\n");
453 return -EINVAL;
454 }
455
456 hba->vps->wb_flush_threshold = wb_flush_threshold;
457
458 return count;
459 }
460
461 static const char * const wb_resize_en_mode[] = {
462 [WB_RESIZE_EN_IDLE] = "idle",
463 [WB_RESIZE_EN_DECREASE] = "decrease",
464 [WB_RESIZE_EN_INCREASE] = "increase",
465 };
466
wb_resize_enable_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)467 static ssize_t wb_resize_enable_store(struct device *dev,
468 struct device_attribute *attr,
469 const char *buf, size_t count)
470 {
471 struct ufs_hba *hba = dev_get_drvdata(dev);
472 int mode;
473 ssize_t res;
474
475 if (!ufshcd_is_wb_allowed(hba) || !hba->dev_info.wb_enabled
476 || !hba->dev_info.b_presrv_uspc_en
477 || !(hba->dev_info.ext_wb_sup & UFS_DEV_WB_BUF_RESIZE))
478 return -EOPNOTSUPP;
479
480 mode = sysfs_match_string(wb_resize_en_mode, buf);
481 if (mode < 0)
482 return -EINVAL;
483
484 down(&hba->host_sem);
485 if (!ufshcd_is_user_access_allowed(hba)) {
486 res = -EBUSY;
487 goto out;
488 }
489
490 ufshcd_rpm_get_sync(hba);
491 res = ufshcd_wb_set_resize_en(hba, mode);
492 ufshcd_rpm_put_sync(hba);
493
494 out:
495 up(&hba->host_sem);
496 return res < 0 ? res : count;
497 }
498
499 /**
500 * pm_qos_enable_show - sysfs handler to show pm qos enable value
501 * @dev: device associated with the UFS controller
502 * @attr: sysfs attribute handle
503 * @buf: buffer for sysfs file
504 *
505 * Print 1 if PM QoS feature is enabled, 0 if disabled.
506 *
507 * Returns number of characters written to @buf.
508 */
pm_qos_enable_show(struct device * dev,struct device_attribute * attr,char * buf)509 static ssize_t pm_qos_enable_show(struct device *dev,
510 struct device_attribute *attr, char *buf)
511 {
512 struct ufs_hba *hba = dev_get_drvdata(dev);
513
514 return sysfs_emit(buf, "%d\n", hba->pm_qos_enabled);
515 }
516
517 /**
518 * pm_qos_enable_store - sysfs handler to store value
519 * @dev: device associated with the UFS controller
520 * @attr: sysfs attribute handle
521 * @buf: buffer for sysfs file
522 * @count: stores buffer characters count
523 *
524 * Input 0 to disable PM QoS and 1 value to enable.
525 * Default state: 1
526 *
527 * Return: number of characters written to @buf on success, < 0 upon failure.
528 */
pm_qos_enable_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)529 static ssize_t pm_qos_enable_store(struct device *dev,
530 struct device_attribute *attr, const char *buf, size_t count)
531 {
532 struct ufs_hba *hba = dev_get_drvdata(dev);
533 bool value;
534
535 if (kstrtobool(buf, &value))
536 return -EINVAL;
537
538 if (value)
539 ufshcd_pm_qos_init(hba);
540 else
541 ufshcd_pm_qos_exit(hba);
542
543 return count;
544 }
545
critical_health_show(struct device * dev,struct device_attribute * attr,char * buf)546 static ssize_t critical_health_show(struct device *dev,
547 struct device_attribute *attr, char *buf)
548 {
549 struct ufs_hba *hba = dev_get_drvdata(dev);
550
551 return sysfs_emit(buf, "%d\n", hba->critical_health_count);
552 }
553
device_lvl_exception_count_show(struct device * dev,struct device_attribute * attr,char * buf)554 static ssize_t device_lvl_exception_count_show(struct device *dev,
555 struct device_attribute *attr,
556 char *buf)
557 {
558 struct ufs_hba *hba = dev_get_drvdata(dev);
559
560 if (hba->dev_info.wspecversion < 0x410)
561 return -EOPNOTSUPP;
562
563 return sysfs_emit(buf, "%u\n", atomic_read(&hba->dev_lvl_exception_count));
564 }
565
device_lvl_exception_count_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)566 static ssize_t device_lvl_exception_count_store(struct device *dev,
567 struct device_attribute *attr,
568 const char *buf, size_t count)
569 {
570 struct ufs_hba *hba = dev_get_drvdata(dev);
571 unsigned int value;
572
573 if (kstrtouint(buf, 0, &value))
574 return -EINVAL;
575
576 /* the only supported usecase is to reset the dev_lvl_exception_count */
577 if (value)
578 return -EINVAL;
579
580 atomic_set(&hba->dev_lvl_exception_count, 0);
581
582 return count;
583 }
584
device_lvl_exception_id_show(struct device * dev,struct device_attribute * attr,char * buf)585 static ssize_t device_lvl_exception_id_show(struct device *dev,
586 struct device_attribute *attr,
587 char *buf)
588 {
589 struct ufs_hba *hba = dev_get_drvdata(dev);
590 u64 exception_id;
591 int err;
592
593 ufshcd_rpm_get_sync(hba);
594 err = ufshcd_read_device_lvl_exception_id(hba, &exception_id);
595 ufshcd_rpm_put_sync(hba);
596
597 if (err)
598 return err;
599
600 hba->dev_lvl_exception_id = exception_id;
601 return sysfs_emit(buf, "%llu\n", exception_id);
602 }
603
604 static DEVICE_ATTR_RW(rpm_lvl);
605 static DEVICE_ATTR_RO(rpm_target_dev_state);
606 static DEVICE_ATTR_RO(rpm_target_link_state);
607 static DEVICE_ATTR_RW(spm_lvl);
608 static DEVICE_ATTR_RO(spm_target_dev_state);
609 static DEVICE_ATTR_RO(spm_target_link_state);
610 static DEVICE_ATTR_RW(auto_hibern8);
611 static DEVICE_ATTR_RW(wb_on);
612 static DEVICE_ATTR_RW(enable_wb_buf_flush);
613 static DEVICE_ATTR_RW(wb_flush_threshold);
614 static DEVICE_ATTR_WO(wb_resize_enable);
615 static DEVICE_ATTR_RW(rtc_update_ms);
616 static DEVICE_ATTR_RW(pm_qos_enable);
617 static DEVICE_ATTR_RO(critical_health);
618 static DEVICE_ATTR_RW(device_lvl_exception_count);
619 static DEVICE_ATTR_RO(device_lvl_exception_id);
620
621 static struct attribute *ufs_sysfs_ufshcd_attrs[] = {
622 &dev_attr_rpm_lvl.attr,
623 &dev_attr_rpm_target_dev_state.attr,
624 &dev_attr_rpm_target_link_state.attr,
625 &dev_attr_spm_lvl.attr,
626 &dev_attr_spm_target_dev_state.attr,
627 &dev_attr_spm_target_link_state.attr,
628 &dev_attr_auto_hibern8.attr,
629 &dev_attr_wb_on.attr,
630 &dev_attr_enable_wb_buf_flush.attr,
631 &dev_attr_wb_flush_threshold.attr,
632 &dev_attr_wb_resize_enable.attr,
633 &dev_attr_rtc_update_ms.attr,
634 &dev_attr_pm_qos_enable.attr,
635 &dev_attr_critical_health.attr,
636 &dev_attr_device_lvl_exception_count.attr,
637 &dev_attr_device_lvl_exception_id.attr,
638 NULL
639 };
640
641 static const struct attribute_group ufs_sysfs_default_group = {
642 .attrs = ufs_sysfs_ufshcd_attrs,
643 };
644
clock_scaling_show(struct device * dev,struct device_attribute * attr,char * buf)645 static ssize_t clock_scaling_show(struct device *dev, struct device_attribute *attr,
646 char *buf)
647 {
648 struct ufs_hba *hba = dev_get_drvdata(dev);
649
650 return sysfs_emit(buf, "%d\n", ufshcd_is_clkscaling_supported(hba));
651 }
652
write_booster_show(struct device * dev,struct device_attribute * attr,char * buf)653 static ssize_t write_booster_show(struct device *dev, struct device_attribute *attr,
654 char *buf)
655 {
656 struct ufs_hba *hba = dev_get_drvdata(dev);
657
658 return sysfs_emit(buf, "%d\n", ufshcd_is_wb_allowed(hba));
659 }
660
661 static DEVICE_ATTR_RO(clock_scaling);
662 static DEVICE_ATTR_RO(write_booster);
663
664 /*
665 * See Documentation/ABI/testing/sysfs-driver-ufs for the semantics of this
666 * group.
667 */
668 static struct attribute *ufs_sysfs_capabilities_attrs[] = {
669 &dev_attr_clock_scaling.attr,
670 &dev_attr_write_booster.attr,
671 NULL
672 };
673
674 static const struct attribute_group ufs_sysfs_capabilities_group = {
675 .name = "capabilities",
676 .attrs = ufs_sysfs_capabilities_attrs,
677 };
678
version_show(struct device * dev,struct device_attribute * attr,char * buf)679 static ssize_t version_show(struct device *dev,
680 struct device_attribute *attr, char *buf)
681 {
682 struct ufs_hba *hba = dev_get_drvdata(dev);
683
684 return sysfs_emit(buf, "0x%x\n", hba->ufs_version);
685 }
686
product_id_show(struct device * dev,struct device_attribute * attr,char * buf)687 static ssize_t product_id_show(struct device *dev,
688 struct device_attribute *attr, char *buf)
689 {
690 int ret;
691 u32 val;
692 struct ufs_hba *hba = dev_get_drvdata(dev);
693
694 ret = ufshcd_read_hci_reg(hba, &val, REG_CONTROLLER_PID);
695 if (ret)
696 return ret;
697
698 return sysfs_emit(buf, "0x%x\n", val);
699 }
700
man_id_show(struct device * dev,struct device_attribute * attr,char * buf)701 static ssize_t man_id_show(struct device *dev,
702 struct device_attribute *attr, char *buf)
703 {
704 int ret;
705 u32 val;
706 struct ufs_hba *hba = dev_get_drvdata(dev);
707
708 ret = ufshcd_read_hci_reg(hba, &val, REG_CONTROLLER_MID);
709 if (ret)
710 return ret;
711
712 return sysfs_emit(buf, "0x%x\n", val);
713 }
714
715 static DEVICE_ATTR_RO(version);
716 static DEVICE_ATTR_RO(product_id);
717 static DEVICE_ATTR_RO(man_id);
718
719 static struct attribute *ufs_sysfs_ufshci_cap_attrs[] = {
720 &dev_attr_version.attr,
721 &dev_attr_product_id.attr,
722 &dev_attr_man_id.attr,
723 NULL
724 };
725
726 static const struct attribute_group ufs_sysfs_ufshci_group = {
727 .name = "ufshci_capabilities",
728 .attrs = ufs_sysfs_ufshci_cap_attrs,
729 };
730
monitor_enable_show(struct device * dev,struct device_attribute * attr,char * buf)731 static ssize_t monitor_enable_show(struct device *dev,
732 struct device_attribute *attr, char *buf)
733 {
734 struct ufs_hba *hba = dev_get_drvdata(dev);
735
736 return sysfs_emit(buf, "%d\n", hba->monitor.enabled);
737 }
738
monitor_enable_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)739 static ssize_t monitor_enable_store(struct device *dev,
740 struct device_attribute *attr,
741 const char *buf, size_t count)
742 {
743 struct ufs_hba *hba = dev_get_drvdata(dev);
744 unsigned long value, flags;
745
746 if (kstrtoul(buf, 0, &value))
747 return -EINVAL;
748
749 value = !!value;
750 spin_lock_irqsave(hba->host->host_lock, flags);
751 if (value == hba->monitor.enabled)
752 goto out_unlock;
753
754 if (!value) {
755 memset(&hba->monitor, 0, sizeof(hba->monitor));
756 } else {
757 hba->monitor.enabled = true;
758 hba->monitor.enabled_ts = ktime_get();
759 }
760
761 out_unlock:
762 spin_unlock_irqrestore(hba->host->host_lock, flags);
763 return count;
764 }
765
monitor_chunk_size_show(struct device * dev,struct device_attribute * attr,char * buf)766 static ssize_t monitor_chunk_size_show(struct device *dev,
767 struct device_attribute *attr, char *buf)
768 {
769 struct ufs_hba *hba = dev_get_drvdata(dev);
770
771 return sysfs_emit(buf, "%lu\n", hba->monitor.chunk_size);
772 }
773
monitor_chunk_size_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)774 static ssize_t monitor_chunk_size_store(struct device *dev,
775 struct device_attribute *attr,
776 const char *buf, size_t count)
777 {
778 struct ufs_hba *hba = dev_get_drvdata(dev);
779 unsigned long value, flags;
780
781 if (kstrtoul(buf, 0, &value))
782 return -EINVAL;
783
784 spin_lock_irqsave(hba->host->host_lock, flags);
785 /* Only allow chunk size change when monitor is disabled */
786 if (!hba->monitor.enabled)
787 hba->monitor.chunk_size = value;
788 spin_unlock_irqrestore(hba->host->host_lock, flags);
789 return count;
790 }
791
read_total_sectors_show(struct device * dev,struct device_attribute * attr,char * buf)792 static ssize_t read_total_sectors_show(struct device *dev,
793 struct device_attribute *attr, char *buf)
794 {
795 struct ufs_hba *hba = dev_get_drvdata(dev);
796
797 return sysfs_emit(buf, "%lu\n", hba->monitor.nr_sec_rw[READ]);
798 }
799
read_total_busy_show(struct device * dev,struct device_attribute * attr,char * buf)800 static ssize_t read_total_busy_show(struct device *dev,
801 struct device_attribute *attr, char *buf)
802 {
803 struct ufs_hba *hba = dev_get_drvdata(dev);
804
805 return sysfs_emit(buf, "%llu\n",
806 ktime_to_us(hba->monitor.total_busy[READ]));
807 }
808
read_nr_requests_show(struct device * dev,struct device_attribute * attr,char * buf)809 static ssize_t read_nr_requests_show(struct device *dev,
810 struct device_attribute *attr, char *buf)
811 {
812 struct ufs_hba *hba = dev_get_drvdata(dev);
813
814 return sysfs_emit(buf, "%lu\n", hba->monitor.nr_req[READ]);
815 }
816
read_req_latency_avg_show(struct device * dev,struct device_attribute * attr,char * buf)817 static ssize_t read_req_latency_avg_show(struct device *dev,
818 struct device_attribute *attr,
819 char *buf)
820 {
821 struct ufs_hba *hba = dev_get_drvdata(dev);
822 struct ufs_hba_monitor *m = &hba->monitor;
823
824 if (!m->nr_req[READ])
825 return sysfs_emit(buf, "0\n");
826
827 return sysfs_emit(buf, "%llu\n", div_u64(ktime_to_us(m->lat_sum[READ]),
828 m->nr_req[READ]));
829 }
830
read_req_latency_max_show(struct device * dev,struct device_attribute * attr,char * buf)831 static ssize_t read_req_latency_max_show(struct device *dev,
832 struct device_attribute *attr,
833 char *buf)
834 {
835 struct ufs_hba *hba = dev_get_drvdata(dev);
836
837 return sysfs_emit(buf, "%llu\n",
838 ktime_to_us(hba->monitor.lat_max[READ]));
839 }
840
read_req_latency_min_show(struct device * dev,struct device_attribute * attr,char * buf)841 static ssize_t read_req_latency_min_show(struct device *dev,
842 struct device_attribute *attr,
843 char *buf)
844 {
845 struct ufs_hba *hba = dev_get_drvdata(dev);
846
847 return sysfs_emit(buf, "%llu\n",
848 ktime_to_us(hba->monitor.lat_min[READ]));
849 }
850
read_req_latency_sum_show(struct device * dev,struct device_attribute * attr,char * buf)851 static ssize_t read_req_latency_sum_show(struct device *dev,
852 struct device_attribute *attr,
853 char *buf)
854 {
855 struct ufs_hba *hba = dev_get_drvdata(dev);
856
857 return sysfs_emit(buf, "%llu\n",
858 ktime_to_us(hba->monitor.lat_sum[READ]));
859 }
860
write_total_sectors_show(struct device * dev,struct device_attribute * attr,char * buf)861 static ssize_t write_total_sectors_show(struct device *dev,
862 struct device_attribute *attr,
863 char *buf)
864 {
865 struct ufs_hba *hba = dev_get_drvdata(dev);
866
867 return sysfs_emit(buf, "%lu\n", hba->monitor.nr_sec_rw[WRITE]);
868 }
869
write_total_busy_show(struct device * dev,struct device_attribute * attr,char * buf)870 static ssize_t write_total_busy_show(struct device *dev,
871 struct device_attribute *attr, char *buf)
872 {
873 struct ufs_hba *hba = dev_get_drvdata(dev);
874
875 return sysfs_emit(buf, "%llu\n",
876 ktime_to_us(hba->monitor.total_busy[WRITE]));
877 }
878
write_nr_requests_show(struct device * dev,struct device_attribute * attr,char * buf)879 static ssize_t write_nr_requests_show(struct device *dev,
880 struct device_attribute *attr, char *buf)
881 {
882 struct ufs_hba *hba = dev_get_drvdata(dev);
883
884 return sysfs_emit(buf, "%lu\n", hba->monitor.nr_req[WRITE]);
885 }
886
write_req_latency_avg_show(struct device * dev,struct device_attribute * attr,char * buf)887 static ssize_t write_req_latency_avg_show(struct device *dev,
888 struct device_attribute *attr,
889 char *buf)
890 {
891 struct ufs_hba *hba = dev_get_drvdata(dev);
892 struct ufs_hba_monitor *m = &hba->monitor;
893
894 if (!m->nr_req[WRITE])
895 return sysfs_emit(buf, "0\n");
896
897 return sysfs_emit(buf, "%llu\n", div_u64(ktime_to_us(m->lat_sum[WRITE]),
898 m->nr_req[WRITE]));
899 }
900
write_req_latency_max_show(struct device * dev,struct device_attribute * attr,char * buf)901 static ssize_t write_req_latency_max_show(struct device *dev,
902 struct device_attribute *attr,
903 char *buf)
904 {
905 struct ufs_hba *hba = dev_get_drvdata(dev);
906
907 return sysfs_emit(buf, "%llu\n",
908 ktime_to_us(hba->monitor.lat_max[WRITE]));
909 }
910
write_req_latency_min_show(struct device * dev,struct device_attribute * attr,char * buf)911 static ssize_t write_req_latency_min_show(struct device *dev,
912 struct device_attribute *attr,
913 char *buf)
914 {
915 struct ufs_hba *hba = dev_get_drvdata(dev);
916
917 return sysfs_emit(buf, "%llu\n",
918 ktime_to_us(hba->monitor.lat_min[WRITE]));
919 }
920
write_req_latency_sum_show(struct device * dev,struct device_attribute * attr,char * buf)921 static ssize_t write_req_latency_sum_show(struct device *dev,
922 struct device_attribute *attr,
923 char *buf)
924 {
925 struct ufs_hba *hba = dev_get_drvdata(dev);
926
927 return sysfs_emit(buf, "%llu\n",
928 ktime_to_us(hba->monitor.lat_sum[WRITE]));
929 }
930
931 static DEVICE_ATTR_RW(monitor_enable);
932 static DEVICE_ATTR_RW(monitor_chunk_size);
933 static DEVICE_ATTR_RO(read_total_sectors);
934 static DEVICE_ATTR_RO(read_total_busy);
935 static DEVICE_ATTR_RO(read_nr_requests);
936 static DEVICE_ATTR_RO(read_req_latency_avg);
937 static DEVICE_ATTR_RO(read_req_latency_max);
938 static DEVICE_ATTR_RO(read_req_latency_min);
939 static DEVICE_ATTR_RO(read_req_latency_sum);
940 static DEVICE_ATTR_RO(write_total_sectors);
941 static DEVICE_ATTR_RO(write_total_busy);
942 static DEVICE_ATTR_RO(write_nr_requests);
943 static DEVICE_ATTR_RO(write_req_latency_avg);
944 static DEVICE_ATTR_RO(write_req_latency_max);
945 static DEVICE_ATTR_RO(write_req_latency_min);
946 static DEVICE_ATTR_RO(write_req_latency_sum);
947
948 static struct attribute *ufs_sysfs_monitor_attrs[] = {
949 &dev_attr_monitor_enable.attr,
950 &dev_attr_monitor_chunk_size.attr,
951 &dev_attr_read_total_sectors.attr,
952 &dev_attr_read_total_busy.attr,
953 &dev_attr_read_nr_requests.attr,
954 &dev_attr_read_req_latency_avg.attr,
955 &dev_attr_read_req_latency_max.attr,
956 &dev_attr_read_req_latency_min.attr,
957 &dev_attr_read_req_latency_sum.attr,
958 &dev_attr_write_total_sectors.attr,
959 &dev_attr_write_total_busy.attr,
960 &dev_attr_write_nr_requests.attr,
961 &dev_attr_write_req_latency_avg.attr,
962 &dev_attr_write_req_latency_max.attr,
963 &dev_attr_write_req_latency_min.attr,
964 &dev_attr_write_req_latency_sum.attr,
965 NULL
966 };
967
968 static const struct attribute_group ufs_sysfs_monitor_group = {
969 .name = "monitor",
970 .attrs = ufs_sysfs_monitor_attrs,
971 };
972
lane_show(struct device * dev,struct device_attribute * attr,char * buf)973 static ssize_t lane_show(struct device *dev, struct device_attribute *attr,
974 char *buf)
975 {
976 struct ufs_hba *hba = dev_get_drvdata(dev);
977
978 return sysfs_emit(buf, "%u\n", hba->pwr_info.lane_rx);
979 }
980
mode_show(struct device * dev,struct device_attribute * attr,char * buf)981 static ssize_t mode_show(struct device *dev, struct device_attribute *attr,
982 char *buf)
983 {
984 struct ufs_hba *hba = dev_get_drvdata(dev);
985
986 return sysfs_emit(buf, "%s\n", ufs_pa_pwr_mode_to_string(hba->pwr_info.pwr_rx));
987 }
988
rate_show(struct device * dev,struct device_attribute * attr,char * buf)989 static ssize_t rate_show(struct device *dev, struct device_attribute *attr,
990 char *buf)
991 {
992 struct ufs_hba *hba = dev_get_drvdata(dev);
993
994 return sysfs_emit(buf, "%s\n", ufs_hs_gear_rate_to_string(hba->pwr_info.hs_rate));
995 }
996
gear_show(struct device * dev,struct device_attribute * attr,char * buf)997 static ssize_t gear_show(struct device *dev, struct device_attribute *attr,
998 char *buf)
999 {
1000 struct ufs_hba *hba = dev_get_drvdata(dev);
1001
1002 return sysfs_emit(buf, "%s\n", hba->pwr_info.hs_rate ?
1003 ufs_hs_gear_to_string(hba->pwr_info.gear_rx) :
1004 ufs_pwm_gear_to_string(hba->pwr_info.gear_rx));
1005 }
1006
dev_pm_show(struct device * dev,struct device_attribute * attr,char * buf)1007 static ssize_t dev_pm_show(struct device *dev, struct device_attribute *attr,
1008 char *buf)
1009 {
1010 struct ufs_hba *hba = dev_get_drvdata(dev);
1011
1012 return sysfs_emit(buf, "%s\n", ufshcd_ufs_dev_pwr_mode_to_string(hba->curr_dev_pwr_mode));
1013 }
1014
link_state_show(struct device * dev,struct device_attribute * attr,char * buf)1015 static ssize_t link_state_show(struct device *dev,
1016 struct device_attribute *attr, char *buf)
1017 {
1018 struct ufs_hba *hba = dev_get_drvdata(dev);
1019
1020 return sysfs_emit(buf, "%s\n", ufshcd_uic_link_state_to_string(hba->uic_link_state));
1021 }
1022
1023 static DEVICE_ATTR_RO(lane);
1024 static DEVICE_ATTR_RO(mode);
1025 static DEVICE_ATTR_RO(rate);
1026 static DEVICE_ATTR_RO(gear);
1027 static DEVICE_ATTR_RO(dev_pm);
1028 static DEVICE_ATTR_RO(link_state);
1029
1030 static struct attribute *ufs_power_info_attrs[] = {
1031 &dev_attr_lane.attr,
1032 &dev_attr_mode.attr,
1033 &dev_attr_rate.attr,
1034 &dev_attr_gear.attr,
1035 &dev_attr_dev_pm.attr,
1036 &dev_attr_link_state.attr,
1037 NULL
1038 };
1039
1040 static const struct attribute_group ufs_sysfs_power_info_group = {
1041 .name = "power_info",
1042 .attrs = ufs_power_info_attrs,
1043 };
1044
ufs_sysfs_read_desc_param(struct ufs_hba * hba,enum desc_idn desc_id,u8 desc_index,u8 param_offset,u8 * sysfs_buf,u8 param_size)1045 static ssize_t ufs_sysfs_read_desc_param(struct ufs_hba *hba,
1046 enum desc_idn desc_id,
1047 u8 desc_index,
1048 u8 param_offset,
1049 u8 *sysfs_buf,
1050 u8 param_size)
1051 {
1052 u8 desc_buf[8] = {0};
1053 int ret;
1054
1055 if (param_size > 8)
1056 return -EINVAL;
1057
1058 down(&hba->host_sem);
1059 if (!ufshcd_is_user_access_allowed(hba)) {
1060 ret = -EBUSY;
1061 goto out;
1062 }
1063
1064 ufshcd_rpm_get_sync(hba);
1065 ret = ufshcd_read_desc_param(hba, desc_id, desc_index,
1066 param_offset, desc_buf, param_size);
1067 ufshcd_rpm_put_sync(hba);
1068 if (ret) {
1069 ret = -EINVAL;
1070 goto out;
1071 }
1072
1073 switch (param_size) {
1074 case 1:
1075 ret = sysfs_emit(sysfs_buf, "0x%02X\n", *desc_buf);
1076 break;
1077 case 2:
1078 ret = sysfs_emit(sysfs_buf, "0x%04X\n",
1079 get_unaligned_be16(desc_buf));
1080 break;
1081 case 4:
1082 ret = sysfs_emit(sysfs_buf, "0x%08X\n",
1083 get_unaligned_be32(desc_buf));
1084 break;
1085 case 8:
1086 ret = sysfs_emit(sysfs_buf, "0x%016llX\n",
1087 get_unaligned_be64(desc_buf));
1088 break;
1089 }
1090
1091 out:
1092 up(&hba->host_sem);
1093 return ret;
1094 }
1095
1096 #define UFS_DESC_PARAM(_name, _puname, _duname, _size) \
1097 static ssize_t _name##_show(struct device *dev, \
1098 struct device_attribute *attr, char *buf) \
1099 { \
1100 struct ufs_hba *hba = dev_get_drvdata(dev); \
1101 return ufs_sysfs_read_desc_param(hba, QUERY_DESC_IDN_##_duname, \
1102 0, _duname##_DESC_PARAM##_puname, buf, _size); \
1103 } \
1104 static DEVICE_ATTR_RO(_name)
1105
1106 #define UFS_DEVICE_DESC_PARAM(_name, _uname, _size) \
1107 UFS_DESC_PARAM(_name, _uname, DEVICE, _size)
1108
1109 UFS_DEVICE_DESC_PARAM(device_type, _DEVICE_TYPE, 1);
1110 UFS_DEVICE_DESC_PARAM(device_class, _DEVICE_CLASS, 1);
1111 UFS_DEVICE_DESC_PARAM(device_sub_class, _DEVICE_SUB_CLASS, 1);
1112 UFS_DEVICE_DESC_PARAM(protocol, _PRTCL, 1);
1113 UFS_DEVICE_DESC_PARAM(number_of_luns, _NUM_LU, 1);
1114 UFS_DEVICE_DESC_PARAM(number_of_wluns, _NUM_WLU, 1);
1115 UFS_DEVICE_DESC_PARAM(boot_enable, _BOOT_ENBL, 1);
1116 UFS_DEVICE_DESC_PARAM(descriptor_access_enable, _DESC_ACCSS_ENBL, 1);
1117 UFS_DEVICE_DESC_PARAM(initial_power_mode, _INIT_PWR_MODE, 1);
1118 UFS_DEVICE_DESC_PARAM(high_priority_lun, _HIGH_PR_LUN, 1);
1119 UFS_DEVICE_DESC_PARAM(secure_removal_type, _SEC_RMV_TYPE, 1);
1120 UFS_DEVICE_DESC_PARAM(support_security_lun, _SEC_LU, 1);
1121 UFS_DEVICE_DESC_PARAM(bkops_termination_latency, _BKOP_TERM_LT, 1);
1122 UFS_DEVICE_DESC_PARAM(initial_active_icc_level, _ACTVE_ICC_LVL, 1);
1123 UFS_DEVICE_DESC_PARAM(specification_version, _SPEC_VER, 2);
1124 UFS_DEVICE_DESC_PARAM(manufacturing_date, _MANF_DATE, 2);
1125 UFS_DEVICE_DESC_PARAM(manufacturer_id, _MANF_ID, 2);
1126 UFS_DEVICE_DESC_PARAM(rtt_capability, _RTT_CAP, 1);
1127 UFS_DEVICE_DESC_PARAM(rtc_update, _FRQ_RTC, 2);
1128 UFS_DEVICE_DESC_PARAM(ufs_features, _UFS_FEAT, 1);
1129 UFS_DEVICE_DESC_PARAM(ffu_timeout, _FFU_TMT, 1);
1130 UFS_DEVICE_DESC_PARAM(queue_depth, _Q_DPTH, 1);
1131 UFS_DEVICE_DESC_PARAM(device_version, _DEV_VER, 2);
1132 UFS_DEVICE_DESC_PARAM(number_of_secure_wpa, _NUM_SEC_WPA, 1);
1133 UFS_DEVICE_DESC_PARAM(psa_max_data_size, _PSA_MAX_DATA, 4);
1134 UFS_DEVICE_DESC_PARAM(psa_state_timeout, _PSA_TMT, 1);
1135 UFS_DEVICE_DESC_PARAM(ext_feature_sup, _EXT_UFS_FEATURE_SUP, 4);
1136 UFS_DEVICE_DESC_PARAM(wb_presv_us_en, _WB_PRESRV_USRSPC_EN, 1);
1137 UFS_DEVICE_DESC_PARAM(wb_type, _WB_TYPE, 1);
1138 UFS_DEVICE_DESC_PARAM(wb_shared_alloc_units, _WB_SHARED_ALLOC_UNITS, 4);
1139
1140 static struct attribute *ufs_sysfs_device_descriptor[] = {
1141 &dev_attr_device_type.attr,
1142 &dev_attr_device_class.attr,
1143 &dev_attr_device_sub_class.attr,
1144 &dev_attr_protocol.attr,
1145 &dev_attr_number_of_luns.attr,
1146 &dev_attr_number_of_wluns.attr,
1147 &dev_attr_boot_enable.attr,
1148 &dev_attr_descriptor_access_enable.attr,
1149 &dev_attr_initial_power_mode.attr,
1150 &dev_attr_high_priority_lun.attr,
1151 &dev_attr_secure_removal_type.attr,
1152 &dev_attr_support_security_lun.attr,
1153 &dev_attr_bkops_termination_latency.attr,
1154 &dev_attr_initial_active_icc_level.attr,
1155 &dev_attr_specification_version.attr,
1156 &dev_attr_manufacturing_date.attr,
1157 &dev_attr_manufacturer_id.attr,
1158 &dev_attr_rtt_capability.attr,
1159 &dev_attr_rtc_update.attr,
1160 &dev_attr_ufs_features.attr,
1161 &dev_attr_ffu_timeout.attr,
1162 &dev_attr_queue_depth.attr,
1163 &dev_attr_device_version.attr,
1164 &dev_attr_number_of_secure_wpa.attr,
1165 &dev_attr_psa_max_data_size.attr,
1166 &dev_attr_psa_state_timeout.attr,
1167 &dev_attr_ext_feature_sup.attr,
1168 &dev_attr_wb_presv_us_en.attr,
1169 &dev_attr_wb_type.attr,
1170 &dev_attr_wb_shared_alloc_units.attr,
1171 NULL,
1172 };
1173
1174 static const struct attribute_group ufs_sysfs_device_descriptor_group = {
1175 .name = "device_descriptor",
1176 .attrs = ufs_sysfs_device_descriptor,
1177 };
1178
1179 #define UFS_INTERCONNECT_DESC_PARAM(_name, _uname, _size) \
1180 UFS_DESC_PARAM(_name, _uname, INTERCONNECT, _size)
1181
1182 UFS_INTERCONNECT_DESC_PARAM(unipro_version, _UNIPRO_VER, 2);
1183 UFS_INTERCONNECT_DESC_PARAM(mphy_version, _MPHY_VER, 2);
1184
1185 static struct attribute *ufs_sysfs_interconnect_descriptor[] = {
1186 &dev_attr_unipro_version.attr,
1187 &dev_attr_mphy_version.attr,
1188 NULL,
1189 };
1190
1191 static const struct attribute_group ufs_sysfs_interconnect_descriptor_group = {
1192 .name = "interconnect_descriptor",
1193 .attrs = ufs_sysfs_interconnect_descriptor,
1194 };
1195
1196 #define UFS_GEOMETRY_DESC_PARAM(_name, _uname, _size) \
1197 UFS_DESC_PARAM(_name, _uname, GEOMETRY, _size)
1198
1199 UFS_GEOMETRY_DESC_PARAM(raw_device_capacity, _DEV_CAP, 8);
1200 UFS_GEOMETRY_DESC_PARAM(max_number_of_luns, _MAX_NUM_LUN, 1);
1201 UFS_GEOMETRY_DESC_PARAM(segment_size, _SEG_SIZE, 4);
1202 UFS_GEOMETRY_DESC_PARAM(allocation_unit_size, _ALLOC_UNIT_SIZE, 1);
1203 UFS_GEOMETRY_DESC_PARAM(min_addressable_block_size, _MIN_BLK_SIZE, 1);
1204 UFS_GEOMETRY_DESC_PARAM(optimal_read_block_size, _OPT_RD_BLK_SIZE, 1);
1205 UFS_GEOMETRY_DESC_PARAM(optimal_write_block_size, _OPT_WR_BLK_SIZE, 1);
1206 UFS_GEOMETRY_DESC_PARAM(max_in_buffer_size, _MAX_IN_BUF_SIZE, 1);
1207 UFS_GEOMETRY_DESC_PARAM(max_out_buffer_size, _MAX_OUT_BUF_SIZE, 1);
1208 UFS_GEOMETRY_DESC_PARAM(rpmb_rw_size, _RPMB_RW_SIZE, 1);
1209 UFS_GEOMETRY_DESC_PARAM(dyn_capacity_resource_policy, _DYN_CAP_RSRC_PLC, 1);
1210 UFS_GEOMETRY_DESC_PARAM(data_ordering, _DATA_ORDER, 1);
1211 UFS_GEOMETRY_DESC_PARAM(max_number_of_contexts, _MAX_NUM_CTX, 1);
1212 UFS_GEOMETRY_DESC_PARAM(sys_data_tag_unit_size, _TAG_UNIT_SIZE, 1);
1213 UFS_GEOMETRY_DESC_PARAM(sys_data_tag_resource_size, _TAG_RSRC_SIZE, 1);
1214 UFS_GEOMETRY_DESC_PARAM(secure_removal_types, _SEC_RM_TYPES, 1);
1215 UFS_GEOMETRY_DESC_PARAM(memory_types, _MEM_TYPES, 2);
1216 UFS_GEOMETRY_DESC_PARAM(sys_code_memory_max_alloc_units,
1217 _SCM_MAX_NUM_UNITS, 4);
1218 UFS_GEOMETRY_DESC_PARAM(sys_code_memory_capacity_adjustment_factor,
1219 _SCM_CAP_ADJ_FCTR, 2);
1220 UFS_GEOMETRY_DESC_PARAM(non_persist_memory_max_alloc_units,
1221 _NPM_MAX_NUM_UNITS, 4);
1222 UFS_GEOMETRY_DESC_PARAM(non_persist_memory_capacity_adjustment_factor,
1223 _NPM_CAP_ADJ_FCTR, 2);
1224 UFS_GEOMETRY_DESC_PARAM(enh1_memory_max_alloc_units,
1225 _ENM1_MAX_NUM_UNITS, 4);
1226 UFS_GEOMETRY_DESC_PARAM(enh1_memory_capacity_adjustment_factor,
1227 _ENM1_CAP_ADJ_FCTR, 2);
1228 UFS_GEOMETRY_DESC_PARAM(enh2_memory_max_alloc_units,
1229 _ENM2_MAX_NUM_UNITS, 4);
1230 UFS_GEOMETRY_DESC_PARAM(enh2_memory_capacity_adjustment_factor,
1231 _ENM2_CAP_ADJ_FCTR, 2);
1232 UFS_GEOMETRY_DESC_PARAM(enh3_memory_max_alloc_units,
1233 _ENM3_MAX_NUM_UNITS, 4);
1234 UFS_GEOMETRY_DESC_PARAM(enh3_memory_capacity_adjustment_factor,
1235 _ENM3_CAP_ADJ_FCTR, 2);
1236 UFS_GEOMETRY_DESC_PARAM(enh4_memory_max_alloc_units,
1237 _ENM4_MAX_NUM_UNITS, 4);
1238 UFS_GEOMETRY_DESC_PARAM(enh4_memory_capacity_adjustment_factor,
1239 _ENM4_CAP_ADJ_FCTR, 2);
1240 UFS_GEOMETRY_DESC_PARAM(wb_max_alloc_units, _WB_MAX_ALLOC_UNITS, 4);
1241 UFS_GEOMETRY_DESC_PARAM(wb_max_wb_luns, _WB_MAX_WB_LUNS, 1);
1242 UFS_GEOMETRY_DESC_PARAM(wb_buff_cap_adj, _WB_BUFF_CAP_ADJ, 1);
1243 UFS_GEOMETRY_DESC_PARAM(wb_sup_red_type, _WB_SUP_RED_TYPE, 1);
1244 UFS_GEOMETRY_DESC_PARAM(wb_sup_wb_type, _WB_SUP_WB_TYPE, 1);
1245
1246
1247 static struct attribute *ufs_sysfs_geometry_descriptor[] = {
1248 &dev_attr_raw_device_capacity.attr,
1249 &dev_attr_max_number_of_luns.attr,
1250 &dev_attr_segment_size.attr,
1251 &dev_attr_allocation_unit_size.attr,
1252 &dev_attr_min_addressable_block_size.attr,
1253 &dev_attr_optimal_read_block_size.attr,
1254 &dev_attr_optimal_write_block_size.attr,
1255 &dev_attr_max_in_buffer_size.attr,
1256 &dev_attr_max_out_buffer_size.attr,
1257 &dev_attr_rpmb_rw_size.attr,
1258 &dev_attr_dyn_capacity_resource_policy.attr,
1259 &dev_attr_data_ordering.attr,
1260 &dev_attr_max_number_of_contexts.attr,
1261 &dev_attr_sys_data_tag_unit_size.attr,
1262 &dev_attr_sys_data_tag_resource_size.attr,
1263 &dev_attr_secure_removal_types.attr,
1264 &dev_attr_memory_types.attr,
1265 &dev_attr_sys_code_memory_max_alloc_units.attr,
1266 &dev_attr_sys_code_memory_capacity_adjustment_factor.attr,
1267 &dev_attr_non_persist_memory_max_alloc_units.attr,
1268 &dev_attr_non_persist_memory_capacity_adjustment_factor.attr,
1269 &dev_attr_enh1_memory_max_alloc_units.attr,
1270 &dev_attr_enh1_memory_capacity_adjustment_factor.attr,
1271 &dev_attr_enh2_memory_max_alloc_units.attr,
1272 &dev_attr_enh2_memory_capacity_adjustment_factor.attr,
1273 &dev_attr_enh3_memory_max_alloc_units.attr,
1274 &dev_attr_enh3_memory_capacity_adjustment_factor.attr,
1275 &dev_attr_enh4_memory_max_alloc_units.attr,
1276 &dev_attr_enh4_memory_capacity_adjustment_factor.attr,
1277 &dev_attr_wb_max_alloc_units.attr,
1278 &dev_attr_wb_max_wb_luns.attr,
1279 &dev_attr_wb_buff_cap_adj.attr,
1280 &dev_attr_wb_sup_red_type.attr,
1281 &dev_attr_wb_sup_wb_type.attr,
1282 NULL,
1283 };
1284
1285 static const struct attribute_group ufs_sysfs_geometry_descriptor_group = {
1286 .name = "geometry_descriptor",
1287 .attrs = ufs_sysfs_geometry_descriptor,
1288 };
1289
1290 #define UFS_HEALTH_DESC_PARAM(_name, _uname, _size) \
1291 UFS_DESC_PARAM(_name, _uname, HEALTH, _size)
1292
1293 UFS_HEALTH_DESC_PARAM(eol_info, _EOL_INFO, 1);
1294 UFS_HEALTH_DESC_PARAM(life_time_estimation_a, _LIFE_TIME_EST_A, 1);
1295 UFS_HEALTH_DESC_PARAM(life_time_estimation_b, _LIFE_TIME_EST_B, 1);
1296
1297 static struct attribute *ufs_sysfs_health_descriptor[] = {
1298 &dev_attr_eol_info.attr,
1299 &dev_attr_life_time_estimation_a.attr,
1300 &dev_attr_life_time_estimation_b.attr,
1301 NULL,
1302 };
1303
1304 static const struct attribute_group ufs_sysfs_health_descriptor_group = {
1305 .name = "health_descriptor",
1306 .attrs = ufs_sysfs_health_descriptor,
1307 };
1308
1309 #define UFS_POWER_DESC_PARAM(_name, _uname, _index) \
1310 static ssize_t _name##_index##_show(struct device *dev, \
1311 struct device_attribute *attr, char *buf) \
1312 { \
1313 struct ufs_hba *hba = dev_get_drvdata(dev); \
1314 return ufs_sysfs_read_desc_param(hba, QUERY_DESC_IDN_POWER, 0, \
1315 PWR_DESC##_uname##_0 + _index * 2, buf, 2); \
1316 } \
1317 static DEVICE_ATTR_RO(_name##_index)
1318
1319 UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 0);
1320 UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 1);
1321 UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 2);
1322 UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 3);
1323 UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 4);
1324 UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 5);
1325 UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 6);
1326 UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 7);
1327 UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 8);
1328 UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 9);
1329 UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 10);
1330 UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 11);
1331 UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 12);
1332 UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 13);
1333 UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 14);
1334 UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 15);
1335 UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 0);
1336 UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 1);
1337 UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 2);
1338 UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 3);
1339 UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 4);
1340 UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 5);
1341 UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 6);
1342 UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 7);
1343 UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 8);
1344 UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 9);
1345 UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 10);
1346 UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 11);
1347 UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 12);
1348 UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 13);
1349 UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 14);
1350 UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 15);
1351 UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 0);
1352 UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 1);
1353 UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 2);
1354 UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 3);
1355 UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 4);
1356 UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 5);
1357 UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 6);
1358 UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 7);
1359 UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 8);
1360 UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 9);
1361 UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 10);
1362 UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 11);
1363 UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 12);
1364 UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 13);
1365 UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 14);
1366 UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 15);
1367
1368 static struct attribute *ufs_sysfs_power_descriptor[] = {
1369 &dev_attr_active_icc_levels_vcc0.attr,
1370 &dev_attr_active_icc_levels_vcc1.attr,
1371 &dev_attr_active_icc_levels_vcc2.attr,
1372 &dev_attr_active_icc_levels_vcc3.attr,
1373 &dev_attr_active_icc_levels_vcc4.attr,
1374 &dev_attr_active_icc_levels_vcc5.attr,
1375 &dev_attr_active_icc_levels_vcc6.attr,
1376 &dev_attr_active_icc_levels_vcc7.attr,
1377 &dev_attr_active_icc_levels_vcc8.attr,
1378 &dev_attr_active_icc_levels_vcc9.attr,
1379 &dev_attr_active_icc_levels_vcc10.attr,
1380 &dev_attr_active_icc_levels_vcc11.attr,
1381 &dev_attr_active_icc_levels_vcc12.attr,
1382 &dev_attr_active_icc_levels_vcc13.attr,
1383 &dev_attr_active_icc_levels_vcc14.attr,
1384 &dev_attr_active_icc_levels_vcc15.attr,
1385 &dev_attr_active_icc_levels_vccq0.attr,
1386 &dev_attr_active_icc_levels_vccq1.attr,
1387 &dev_attr_active_icc_levels_vccq2.attr,
1388 &dev_attr_active_icc_levels_vccq3.attr,
1389 &dev_attr_active_icc_levels_vccq4.attr,
1390 &dev_attr_active_icc_levels_vccq5.attr,
1391 &dev_attr_active_icc_levels_vccq6.attr,
1392 &dev_attr_active_icc_levels_vccq7.attr,
1393 &dev_attr_active_icc_levels_vccq8.attr,
1394 &dev_attr_active_icc_levels_vccq9.attr,
1395 &dev_attr_active_icc_levels_vccq10.attr,
1396 &dev_attr_active_icc_levels_vccq11.attr,
1397 &dev_attr_active_icc_levels_vccq12.attr,
1398 &dev_attr_active_icc_levels_vccq13.attr,
1399 &dev_attr_active_icc_levels_vccq14.attr,
1400 &dev_attr_active_icc_levels_vccq15.attr,
1401 &dev_attr_active_icc_levels_vccq20.attr,
1402 &dev_attr_active_icc_levels_vccq21.attr,
1403 &dev_attr_active_icc_levels_vccq22.attr,
1404 &dev_attr_active_icc_levels_vccq23.attr,
1405 &dev_attr_active_icc_levels_vccq24.attr,
1406 &dev_attr_active_icc_levels_vccq25.attr,
1407 &dev_attr_active_icc_levels_vccq26.attr,
1408 &dev_attr_active_icc_levels_vccq27.attr,
1409 &dev_attr_active_icc_levels_vccq28.attr,
1410 &dev_attr_active_icc_levels_vccq29.attr,
1411 &dev_attr_active_icc_levels_vccq210.attr,
1412 &dev_attr_active_icc_levels_vccq211.attr,
1413 &dev_attr_active_icc_levels_vccq212.attr,
1414 &dev_attr_active_icc_levels_vccq213.attr,
1415 &dev_attr_active_icc_levels_vccq214.attr,
1416 &dev_attr_active_icc_levels_vccq215.attr,
1417 NULL,
1418 };
1419
1420 static const struct attribute_group ufs_sysfs_power_descriptor_group = {
1421 .name = "power_descriptor",
1422 .attrs = ufs_sysfs_power_descriptor,
1423 };
1424
1425 #define UFS_STRING_DESCRIPTOR(_name, _pname) \
1426 static ssize_t _name##_show(struct device *dev, \
1427 struct device_attribute *attr, char *buf) \
1428 { \
1429 u8 index; \
1430 struct ufs_hba *hba = dev_get_drvdata(dev); \
1431 int ret; \
1432 int desc_len = QUERY_DESC_MAX_SIZE; \
1433 u8 *desc_buf; \
1434 \
1435 down(&hba->host_sem); \
1436 if (!ufshcd_is_user_access_allowed(hba)) { \
1437 up(&hba->host_sem); \
1438 return -EBUSY; \
1439 } \
1440 desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_ATOMIC); \
1441 if (!desc_buf) { \
1442 up(&hba->host_sem); \
1443 return -ENOMEM; \
1444 } \
1445 ufshcd_rpm_get_sync(hba); \
1446 ret = ufshcd_query_descriptor_retry(hba, \
1447 UPIU_QUERY_OPCODE_READ_DESC, QUERY_DESC_IDN_DEVICE, \
1448 0, 0, desc_buf, &desc_len); \
1449 if (ret) { \
1450 ret = -EINVAL; \
1451 goto out; \
1452 } \
1453 index = desc_buf[DEVICE_DESC_PARAM##_pname]; \
1454 kfree(desc_buf); \
1455 desc_buf = NULL; \
1456 ret = ufshcd_read_string_desc(hba, index, &desc_buf, \
1457 SD_ASCII_STD); \
1458 if (ret < 0) \
1459 goto out; \
1460 ret = sysfs_emit(buf, "%s\n", desc_buf); \
1461 out: \
1462 ufshcd_rpm_put_sync(hba); \
1463 kfree(desc_buf); \
1464 up(&hba->host_sem); \
1465 return ret; \
1466 } \
1467 static DEVICE_ATTR_RO(_name)
1468
1469 UFS_STRING_DESCRIPTOR(manufacturer_name, _MANF_NAME);
1470 UFS_STRING_DESCRIPTOR(product_name, _PRDCT_NAME);
1471 UFS_STRING_DESCRIPTOR(oem_id, _OEM_ID);
1472 UFS_STRING_DESCRIPTOR(serial_number, _SN);
1473 UFS_STRING_DESCRIPTOR(product_revision, _PRDCT_REV);
1474
1475 static struct attribute *ufs_sysfs_string_descriptors[] = {
1476 &dev_attr_manufacturer_name.attr,
1477 &dev_attr_product_name.attr,
1478 &dev_attr_oem_id.attr,
1479 &dev_attr_serial_number.attr,
1480 &dev_attr_product_revision.attr,
1481 NULL,
1482 };
1483
1484 static const struct attribute_group ufs_sysfs_string_descriptors_group = {
1485 .name = "string_descriptors",
1486 .attrs = ufs_sysfs_string_descriptors,
1487 };
1488
ufshcd_is_wb_flags(enum flag_idn idn)1489 static inline bool ufshcd_is_wb_flags(enum flag_idn idn)
1490 {
1491 return idn >= QUERY_FLAG_IDN_WB_EN &&
1492 idn <= QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8;
1493 }
1494
1495 #define UFS_FLAG(_name, _uname) \
1496 static ssize_t _name##_show(struct device *dev, \
1497 struct device_attribute *attr, char *buf) \
1498 { \
1499 bool flag; \
1500 u8 index = 0; \
1501 int ret; \
1502 struct ufs_hba *hba = dev_get_drvdata(dev); \
1503 \
1504 down(&hba->host_sem); \
1505 if (!ufshcd_is_user_access_allowed(hba)) { \
1506 up(&hba->host_sem); \
1507 return -EBUSY; \
1508 } \
1509 if (ufshcd_is_wb_flags(QUERY_FLAG_IDN##_uname)) \
1510 index = ufshcd_wb_get_query_index(hba); \
1511 ufshcd_rpm_get_sync(hba); \
1512 ret = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG, \
1513 QUERY_FLAG_IDN##_uname, index, &flag); \
1514 ufshcd_rpm_put_sync(hba); \
1515 if (ret) { \
1516 ret = -EINVAL; \
1517 goto out; \
1518 } \
1519 ret = sysfs_emit(buf, "%s\n", flag ? "true" : "false"); \
1520 out: \
1521 up(&hba->host_sem); \
1522 return ret; \
1523 } \
1524 static DEVICE_ATTR_RO(_name)
1525
1526 UFS_FLAG(device_init, _FDEVICEINIT);
1527 UFS_FLAG(permanent_wpe, _PERMANENT_WPE);
1528 UFS_FLAG(power_on_wpe, _PWR_ON_WPE);
1529 UFS_FLAG(bkops_enable, _BKOPS_EN);
1530 UFS_FLAG(life_span_mode_enable, _LIFE_SPAN_MODE_ENABLE);
1531 UFS_FLAG(phy_resource_removal, _FPHYRESOURCEREMOVAL);
1532 UFS_FLAG(busy_rtc, _BUSY_RTC);
1533 UFS_FLAG(disable_fw_update, _PERMANENTLY_DISABLE_FW_UPDATE);
1534 UFS_FLAG(wb_enable, _WB_EN);
1535 UFS_FLAG(wb_flush_en, _WB_BUFF_FLUSH_EN);
1536 UFS_FLAG(wb_flush_during_h8, _WB_BUFF_FLUSH_DURING_HIBERN8);
1537
1538 static struct attribute *ufs_sysfs_device_flags[] = {
1539 &dev_attr_device_init.attr,
1540 &dev_attr_permanent_wpe.attr,
1541 &dev_attr_power_on_wpe.attr,
1542 &dev_attr_bkops_enable.attr,
1543 &dev_attr_life_span_mode_enable.attr,
1544 &dev_attr_phy_resource_removal.attr,
1545 &dev_attr_busy_rtc.attr,
1546 &dev_attr_disable_fw_update.attr,
1547 &dev_attr_wb_enable.attr,
1548 &dev_attr_wb_flush_en.attr,
1549 &dev_attr_wb_flush_during_h8.attr,
1550 NULL,
1551 };
1552
1553 static const struct attribute_group ufs_sysfs_flags_group = {
1554 .name = "flags",
1555 .attrs = ufs_sysfs_device_flags,
1556 };
1557
max_number_of_rtt_show(struct device * dev,struct device_attribute * attr,char * buf)1558 static ssize_t max_number_of_rtt_show(struct device *dev,
1559 struct device_attribute *attr, char *buf)
1560 {
1561 struct ufs_hba *hba = dev_get_drvdata(dev);
1562 u32 rtt;
1563 int ret;
1564
1565 down(&hba->host_sem);
1566 if (!ufshcd_is_user_access_allowed(hba)) {
1567 up(&hba->host_sem);
1568 return -EBUSY;
1569 }
1570
1571 ufshcd_rpm_get_sync(hba);
1572 ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
1573 QUERY_ATTR_IDN_MAX_NUM_OF_RTT, 0, 0, &rtt);
1574 ufshcd_rpm_put_sync(hba);
1575
1576 if (ret)
1577 goto out;
1578
1579 ret = sysfs_emit(buf, "0x%08X\n", rtt);
1580
1581 out:
1582 up(&hba->host_sem);
1583 return ret;
1584 }
1585
max_number_of_rtt_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1586 static ssize_t max_number_of_rtt_store(struct device *dev,
1587 struct device_attribute *attr,
1588 const char *buf, size_t count)
1589 {
1590 struct ufs_hba *hba = dev_get_drvdata(dev);
1591 struct ufs_dev_info *dev_info = &hba->dev_info;
1592 struct scsi_device *sdev;
1593 unsigned int memflags;
1594 unsigned int rtt;
1595 int ret;
1596
1597 if (kstrtouint(buf, 0, &rtt))
1598 return -EINVAL;
1599
1600 if (rtt > dev_info->rtt_cap) {
1601 dev_err(dev, "rtt can be at most bDeviceRTTCap\n");
1602 return -EINVAL;
1603 }
1604
1605 down(&hba->host_sem);
1606 if (!ufshcd_is_user_access_allowed(hba)) {
1607 ret = -EBUSY;
1608 goto out;
1609 }
1610
1611 ufshcd_rpm_get_sync(hba);
1612
1613 memflags = memalloc_noio_save();
1614 shost_for_each_device(sdev, hba->host)
1615 blk_mq_freeze_queue_nomemsave(sdev->request_queue);
1616
1617 ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
1618 QUERY_ATTR_IDN_MAX_NUM_OF_RTT, 0, 0, &rtt);
1619
1620 shost_for_each_device(sdev, hba->host)
1621 blk_mq_unfreeze_queue_nomemrestore(sdev->request_queue);
1622 memalloc_noio_restore(memflags);
1623
1624 ufshcd_rpm_put_sync(hba);
1625
1626 out:
1627 up(&hba->host_sem);
1628 return ret < 0 ? ret : count;
1629 }
1630
1631 static DEVICE_ATTR_RW(max_number_of_rtt);
1632
ufshcd_is_wb_attrs(enum attr_idn idn)1633 static inline bool ufshcd_is_wb_attrs(enum attr_idn idn)
1634 {
1635 return idn >= QUERY_ATTR_IDN_WB_FLUSH_STATUS &&
1636 idn <= QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE;
1637 }
1638
wb_read_resize_attrs(struct ufs_hba * hba,enum attr_idn idn,u32 * attr_val)1639 static int wb_read_resize_attrs(struct ufs_hba *hba,
1640 enum attr_idn idn, u32 *attr_val)
1641 {
1642 u8 index = 0;
1643 int ret;
1644
1645 if (!ufshcd_is_wb_allowed(hba) || !hba->dev_info.wb_enabled
1646 || !hba->dev_info.b_presrv_uspc_en
1647 || !(hba->dev_info.ext_wb_sup & UFS_DEV_WB_BUF_RESIZE))
1648 return -EOPNOTSUPP;
1649
1650 down(&hba->host_sem);
1651 if (!ufshcd_is_user_access_allowed(hba)) {
1652 up(&hba->host_sem);
1653 return -EBUSY;
1654 }
1655
1656 index = ufshcd_wb_get_query_index(hba);
1657 ufshcd_rpm_get_sync(hba);
1658 ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
1659 idn, index, 0, attr_val);
1660 ufshcd_rpm_put_sync(hba);
1661
1662 up(&hba->host_sem);
1663 return ret;
1664 }
1665
wb_resize_hint_show(struct device * dev,struct device_attribute * attr,char * buf)1666 static ssize_t wb_resize_hint_show(struct device *dev,
1667 struct device_attribute *attr, char *buf)
1668 {
1669 struct ufs_hba *hba = dev_get_drvdata(dev);
1670 int ret;
1671 u32 value;
1672
1673 ret = wb_read_resize_attrs(hba,
1674 QUERY_ATTR_IDN_WB_BUF_RESIZE_HINT, &value);
1675 if (ret)
1676 return ret;
1677
1678 return sysfs_emit(buf, "%s\n", ufs_wb_resize_hint_to_string(value));
1679 }
1680
1681 static DEVICE_ATTR_RO(wb_resize_hint);
1682
wb_resize_status_show(struct device * dev,struct device_attribute * attr,char * buf)1683 static ssize_t wb_resize_status_show(struct device *dev,
1684 struct device_attribute *attr, char *buf)
1685 {
1686 struct ufs_hba *hba = dev_get_drvdata(dev);
1687 int ret;
1688 u32 value;
1689
1690 ret = wb_read_resize_attrs(hba,
1691 QUERY_ATTR_IDN_WB_BUF_RESIZE_STATUS, &value);
1692 if (ret)
1693 return ret;
1694
1695 return sysfs_emit(buf, "%s\n", ufs_wb_resize_status_to_string(value));
1696 }
1697
1698 static DEVICE_ATTR_RO(wb_resize_status);
1699
1700 #define UFS_ATTRIBUTE(_name, _uname) \
1701 static ssize_t _name##_show(struct device *dev, \
1702 struct device_attribute *attr, char *buf) \
1703 { \
1704 struct ufs_hba *hba = dev_get_drvdata(dev); \
1705 u32 value; \
1706 int ret; \
1707 u8 index = 0; \
1708 \
1709 down(&hba->host_sem); \
1710 if (!ufshcd_is_user_access_allowed(hba)) { \
1711 up(&hba->host_sem); \
1712 return -EBUSY; \
1713 } \
1714 if (ufshcd_is_wb_attrs(QUERY_ATTR_IDN##_uname)) \
1715 index = ufshcd_wb_get_query_index(hba); \
1716 ufshcd_rpm_get_sync(hba); \
1717 ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, \
1718 QUERY_ATTR_IDN##_uname, index, 0, &value); \
1719 ufshcd_rpm_put_sync(hba); \
1720 if (ret) { \
1721 ret = -EINVAL; \
1722 goto out; \
1723 } \
1724 ret = sysfs_emit(buf, "0x%08X\n", value); \
1725 out: \
1726 up(&hba->host_sem); \
1727 return ret; \
1728 } \
1729 static DEVICE_ATTR_RO(_name)
1730
1731 UFS_ATTRIBUTE(boot_lun_enabled, _BOOT_LU_EN);
1732 UFS_ATTRIBUTE(current_power_mode, _POWER_MODE);
1733 UFS_ATTRIBUTE(active_icc_level, _ACTIVE_ICC_LVL);
1734 UFS_ATTRIBUTE(ooo_data_enabled, _OOO_DATA_EN);
1735 UFS_ATTRIBUTE(bkops_status, _BKOPS_STATUS);
1736 UFS_ATTRIBUTE(purge_status, _PURGE_STATUS);
1737 UFS_ATTRIBUTE(max_data_in_size, _MAX_DATA_IN);
1738 UFS_ATTRIBUTE(max_data_out_size, _MAX_DATA_OUT);
1739 UFS_ATTRIBUTE(reference_clock_frequency, _REF_CLK_FREQ);
1740 UFS_ATTRIBUTE(configuration_descriptor_lock, _CONF_DESC_LOCK);
1741 UFS_ATTRIBUTE(exception_event_control, _EE_CONTROL);
1742 UFS_ATTRIBUTE(exception_event_status, _EE_STATUS);
1743 UFS_ATTRIBUTE(ffu_status, _FFU_STATUS);
1744 UFS_ATTRIBUTE(psa_state, _PSA_STATE);
1745 UFS_ATTRIBUTE(psa_data_size, _PSA_DATA_SIZE);
1746 UFS_ATTRIBUTE(wb_flush_status, _WB_FLUSH_STATUS);
1747 UFS_ATTRIBUTE(wb_avail_buf, _AVAIL_WB_BUFF_SIZE);
1748 UFS_ATTRIBUTE(wb_life_time_est, _WB_BUFF_LIFE_TIME_EST);
1749 UFS_ATTRIBUTE(wb_cur_buf, _CURR_WB_BUFF_SIZE);
1750
1751
1752 static struct attribute *ufs_sysfs_attributes[] = {
1753 &dev_attr_boot_lun_enabled.attr,
1754 &dev_attr_current_power_mode.attr,
1755 &dev_attr_active_icc_level.attr,
1756 &dev_attr_ooo_data_enabled.attr,
1757 &dev_attr_bkops_status.attr,
1758 &dev_attr_purge_status.attr,
1759 &dev_attr_max_data_in_size.attr,
1760 &dev_attr_max_data_out_size.attr,
1761 &dev_attr_reference_clock_frequency.attr,
1762 &dev_attr_configuration_descriptor_lock.attr,
1763 &dev_attr_max_number_of_rtt.attr,
1764 &dev_attr_exception_event_control.attr,
1765 &dev_attr_exception_event_status.attr,
1766 &dev_attr_ffu_status.attr,
1767 &dev_attr_psa_state.attr,
1768 &dev_attr_psa_data_size.attr,
1769 &dev_attr_wb_flush_status.attr,
1770 &dev_attr_wb_avail_buf.attr,
1771 &dev_attr_wb_life_time_est.attr,
1772 &dev_attr_wb_cur_buf.attr,
1773 &dev_attr_wb_resize_hint.attr,
1774 &dev_attr_wb_resize_status.attr,
1775 NULL,
1776 };
1777
1778 static const struct attribute_group ufs_sysfs_attributes_group = {
1779 .name = "attributes",
1780 .attrs = ufs_sysfs_attributes,
1781 };
1782
hid_query_attr(struct ufs_hba * hba,enum query_opcode opcode,enum attr_idn idn,u32 * attr_val)1783 static int hid_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
1784 enum attr_idn idn, u32 *attr_val)
1785 {
1786 int ret;
1787
1788 down(&hba->host_sem);
1789 if (!ufshcd_is_user_access_allowed(hba)) {
1790 up(&hba->host_sem);
1791 return -EBUSY;
1792 }
1793
1794 ufshcd_rpm_get_sync(hba);
1795 ret = ufshcd_query_attr(hba, opcode, idn, 0, 0, attr_val);
1796 ufshcd_rpm_put_sync(hba);
1797
1798 up(&hba->host_sem);
1799 return ret;
1800 }
1801
analysis_trigger_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1802 static ssize_t analysis_trigger_store(struct device *dev,
1803 struct device_attribute *attr, const char *buf, size_t count)
1804 {
1805 struct ufs_hba *hba = dev_get_drvdata(dev);
1806 int mode;
1807 int ret;
1808
1809 if (sysfs_streq(buf, "enable"))
1810 mode = HID_ANALYSIS_ENABLE;
1811 else if (sysfs_streq(buf, "disable"))
1812 mode = HID_ANALYSIS_AND_DEFRAG_DISABLE;
1813 else
1814 return -EINVAL;
1815
1816 ret = hid_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
1817 QUERY_ATTR_IDN_HID_DEFRAG_OPERATION, &mode);
1818
1819 return ret < 0 ? ret : count;
1820 }
1821
1822 static DEVICE_ATTR_WO(analysis_trigger);
1823
defrag_trigger_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1824 static ssize_t defrag_trigger_store(struct device *dev,
1825 struct device_attribute *attr, const char *buf, size_t count)
1826 {
1827 struct ufs_hba *hba = dev_get_drvdata(dev);
1828 int mode;
1829 int ret;
1830
1831 if (sysfs_streq(buf, "enable"))
1832 mode = HID_ANALYSIS_AND_DEFRAG_ENABLE;
1833 else if (sysfs_streq(buf, "disable"))
1834 mode = HID_ANALYSIS_AND_DEFRAG_DISABLE;
1835 else
1836 return -EINVAL;
1837
1838 ret = hid_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
1839 QUERY_ATTR_IDN_HID_DEFRAG_OPERATION, &mode);
1840
1841 return ret < 0 ? ret : count;
1842 }
1843
1844 static DEVICE_ATTR_WO(defrag_trigger);
1845
fragmented_size_show(struct device * dev,struct device_attribute * attr,char * buf)1846 static ssize_t fragmented_size_show(struct device *dev,
1847 struct device_attribute *attr, char *buf)
1848 {
1849 struct ufs_hba *hba = dev_get_drvdata(dev);
1850 u32 value;
1851 int ret;
1852
1853 ret = hid_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
1854 QUERY_ATTR_IDN_HID_AVAILABLE_SIZE, &value);
1855 if (ret)
1856 return ret;
1857
1858 return sysfs_emit(buf, "%u\n", value);
1859 }
1860
1861 static DEVICE_ATTR_RO(fragmented_size);
1862
defrag_size_show(struct device * dev,struct device_attribute * attr,char * buf)1863 static ssize_t defrag_size_show(struct device *dev,
1864 struct device_attribute *attr, char *buf)
1865 {
1866 struct ufs_hba *hba = dev_get_drvdata(dev);
1867 u32 value;
1868 int ret;
1869
1870 ret = hid_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
1871 QUERY_ATTR_IDN_HID_SIZE, &value);
1872 if (ret)
1873 return ret;
1874
1875 return sysfs_emit(buf, "%u\n", value);
1876 }
1877
defrag_size_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1878 static ssize_t defrag_size_store(struct device *dev,
1879 struct device_attribute *attr, const char *buf, size_t count)
1880 {
1881 struct ufs_hba *hba = dev_get_drvdata(dev);
1882 u32 value;
1883 int ret;
1884
1885 if (kstrtou32(buf, 0, &value))
1886 return -EINVAL;
1887
1888 ret = hid_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
1889 QUERY_ATTR_IDN_HID_SIZE, &value);
1890
1891 return ret < 0 ? ret : count;
1892 }
1893
1894 static DEVICE_ATTR_RW(defrag_size);
1895
progress_ratio_show(struct device * dev,struct device_attribute * attr,char * buf)1896 static ssize_t progress_ratio_show(struct device *dev,
1897 struct device_attribute *attr, char *buf)
1898 {
1899 struct ufs_hba *hba = dev_get_drvdata(dev);
1900 u32 value;
1901 int ret;
1902
1903 ret = hid_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
1904 QUERY_ATTR_IDN_HID_PROGRESS_RATIO, &value);
1905 if (ret)
1906 return ret;
1907
1908 return sysfs_emit(buf, "%u\n", value);
1909 }
1910
1911 static DEVICE_ATTR_RO(progress_ratio);
1912
state_show(struct device * dev,struct device_attribute * attr,char * buf)1913 static ssize_t state_show(struct device *dev,
1914 struct device_attribute *attr, char *buf)
1915 {
1916 struct ufs_hba *hba = dev_get_drvdata(dev);
1917 u32 value;
1918 int ret;
1919
1920 ret = hid_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
1921 QUERY_ATTR_IDN_HID_STATE, &value);
1922 if (ret)
1923 return ret;
1924
1925 return sysfs_emit(buf, "%s\n", ufs_hid_state_to_string(value));
1926 }
1927
1928 static DEVICE_ATTR_RO(state);
1929
1930 static struct attribute *ufs_sysfs_hid[] = {
1931 &dev_attr_analysis_trigger.attr,
1932 &dev_attr_defrag_trigger.attr,
1933 &dev_attr_fragmented_size.attr,
1934 &dev_attr_defrag_size.attr,
1935 &dev_attr_progress_ratio.attr,
1936 &dev_attr_state.attr,
1937 NULL,
1938 };
1939
ufs_sysfs_hid_is_visible(struct kobject * kobj,struct attribute * attr,int n)1940 static umode_t ufs_sysfs_hid_is_visible(struct kobject *kobj,
1941 struct attribute *attr, int n)
1942 {
1943 struct device *dev = container_of(kobj, struct device, kobj);
1944 struct ufs_hba *hba = dev_get_drvdata(dev);
1945
1946 return hba->dev_info.hid_sup ? attr->mode : 0;
1947 }
1948
1949 static const struct attribute_group ufs_sysfs_hid_group = {
1950 .name = "hid",
1951 .attrs = ufs_sysfs_hid,
1952 .is_visible = ufs_sysfs_hid_is_visible,
1953 };
1954
1955 static const struct attribute_group *ufs_sysfs_groups[] = {
1956 &ufs_sysfs_default_group,
1957 &ufs_sysfs_capabilities_group,
1958 &ufs_sysfs_ufshci_group,
1959 &ufs_sysfs_monitor_group,
1960 &ufs_sysfs_power_info_group,
1961 &ufs_sysfs_device_descriptor_group,
1962 &ufs_sysfs_interconnect_descriptor_group,
1963 &ufs_sysfs_geometry_descriptor_group,
1964 &ufs_sysfs_health_descriptor_group,
1965 &ufs_sysfs_power_descriptor_group,
1966 &ufs_sysfs_string_descriptors_group,
1967 &ufs_sysfs_flags_group,
1968 &ufs_sysfs_attributes_group,
1969 &ufs_sysfs_hid_group,
1970 NULL,
1971 };
1972
1973 #define UFS_LUN_DESC_PARAM(_pname, _puname, _duname, _size) \
1974 static ssize_t _pname##_show(struct device *dev, \
1975 struct device_attribute *attr, char *buf) \
1976 { \
1977 struct scsi_device *sdev = to_scsi_device(dev); \
1978 struct ufs_hba *hba = shost_priv(sdev->host); \
1979 u8 lun = ufshcd_scsi_to_upiu_lun(sdev->lun); \
1980 if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun)) \
1981 return -EINVAL; \
1982 return ufs_sysfs_read_desc_param(hba, QUERY_DESC_IDN_##_duname, \
1983 lun, _duname##_DESC_PARAM##_puname, buf, _size); \
1984 } \
1985 static DEVICE_ATTR_RO(_pname)
1986
1987 #define UFS_UNIT_DESC_PARAM(_name, _uname, _size) \
1988 UFS_LUN_DESC_PARAM(_name, _uname, UNIT, _size)
1989
1990 UFS_UNIT_DESC_PARAM(lu_enable, _LU_ENABLE, 1);
1991 UFS_UNIT_DESC_PARAM(boot_lun_id, _BOOT_LUN_ID, 1);
1992 UFS_UNIT_DESC_PARAM(lun_write_protect, _LU_WR_PROTECT, 1);
1993 UFS_UNIT_DESC_PARAM(lun_queue_depth, _LU_Q_DEPTH, 1);
1994 UFS_UNIT_DESC_PARAM(psa_sensitive, _PSA_SENSITIVE, 1);
1995 UFS_UNIT_DESC_PARAM(lun_memory_type, _MEM_TYPE, 1);
1996 UFS_UNIT_DESC_PARAM(data_reliability, _DATA_RELIABILITY, 1);
1997 UFS_UNIT_DESC_PARAM(logical_block_size, _LOGICAL_BLK_SIZE, 1);
1998 UFS_UNIT_DESC_PARAM(logical_block_count, _LOGICAL_BLK_COUNT, 8);
1999 UFS_UNIT_DESC_PARAM(erase_block_size, _ERASE_BLK_SIZE, 4);
2000 UFS_UNIT_DESC_PARAM(provisioning_type, _PROVISIONING_TYPE, 1);
2001 UFS_UNIT_DESC_PARAM(physical_memory_resource_count, _PHY_MEM_RSRC_CNT, 8);
2002 UFS_UNIT_DESC_PARAM(context_capabilities, _CTX_CAPABILITIES, 2);
2003 UFS_UNIT_DESC_PARAM(large_unit_granularity, _LARGE_UNIT_SIZE_M1, 1);
2004 UFS_UNIT_DESC_PARAM(wb_buf_alloc_units, _WB_BUF_ALLOC_UNITS, 4);
2005
2006 static struct attribute *ufs_sysfs_unit_descriptor[] = {
2007 &dev_attr_lu_enable.attr,
2008 &dev_attr_boot_lun_id.attr,
2009 &dev_attr_lun_write_protect.attr,
2010 &dev_attr_lun_queue_depth.attr,
2011 &dev_attr_psa_sensitive.attr,
2012 &dev_attr_lun_memory_type.attr,
2013 &dev_attr_data_reliability.attr,
2014 &dev_attr_logical_block_size.attr,
2015 &dev_attr_logical_block_count.attr,
2016 &dev_attr_erase_block_size.attr,
2017 &dev_attr_provisioning_type.attr,
2018 &dev_attr_physical_memory_resource_count.attr,
2019 &dev_attr_context_capabilities.attr,
2020 &dev_attr_large_unit_granularity.attr,
2021 &dev_attr_wb_buf_alloc_units.attr,
2022 NULL,
2023 };
2024
ufs_unit_descriptor_is_visible(struct kobject * kobj,struct attribute * attr,int n)2025 static umode_t ufs_unit_descriptor_is_visible(struct kobject *kobj, struct attribute *attr, int n)
2026 {
2027 struct device *dev = container_of(kobj, struct device, kobj);
2028 struct scsi_device *sdev = to_scsi_device(dev);
2029 u8 lun = ufshcd_scsi_to_upiu_lun(sdev->lun);
2030 umode_t mode = attr->mode;
2031
2032 if (lun == UFS_UPIU_BOOT_WLUN || lun == UFS_UPIU_UFS_DEVICE_WLUN)
2033 /* Boot and device WLUN have no unit descriptors */
2034 mode = 0;
2035 if (lun == UFS_UPIU_RPMB_WLUN && attr == &dev_attr_wb_buf_alloc_units.attr)
2036 mode = 0;
2037
2038 return mode;
2039 }
2040
2041
2042 const struct attribute_group ufs_sysfs_unit_descriptor_group = {
2043 .name = "unit_descriptor",
2044 .attrs = ufs_sysfs_unit_descriptor,
2045 .is_visible = ufs_unit_descriptor_is_visible,
2046 };
2047
dyn_cap_needed_attribute_show(struct device * dev,struct device_attribute * attr,char * buf)2048 static ssize_t dyn_cap_needed_attribute_show(struct device *dev,
2049 struct device_attribute *attr, char *buf)
2050 {
2051 u32 value;
2052 struct scsi_device *sdev = to_scsi_device(dev);
2053 struct ufs_hba *hba = shost_priv(sdev->host);
2054 u8 lun = ufshcd_scsi_to_upiu_lun(sdev->lun);
2055 int ret;
2056
2057 down(&hba->host_sem);
2058 if (!ufshcd_is_user_access_allowed(hba)) {
2059 ret = -EBUSY;
2060 goto out;
2061 }
2062
2063 ufshcd_rpm_get_sync(hba);
2064 ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
2065 QUERY_ATTR_IDN_DYN_CAP_NEEDED, lun, 0, &value);
2066 ufshcd_rpm_put_sync(hba);
2067 if (ret) {
2068 ret = -EINVAL;
2069 goto out;
2070 }
2071
2072 ret = sysfs_emit(buf, "0x%08X\n", value);
2073
2074 out:
2075 up(&hba->host_sem);
2076 return ret;
2077 }
2078 static DEVICE_ATTR_RO(dyn_cap_needed_attribute);
2079
2080 static struct attribute *ufs_sysfs_lun_attributes[] = {
2081 &dev_attr_dyn_cap_needed_attribute.attr,
2082 NULL,
2083 };
2084
2085 const struct attribute_group ufs_sysfs_lun_attributes_group = {
2086 .attrs = ufs_sysfs_lun_attributes,
2087 };
2088
ufs_sysfs_add_nodes(struct device * dev)2089 void ufs_sysfs_add_nodes(struct device *dev)
2090 {
2091 int ret;
2092
2093 ret = sysfs_create_groups(&dev->kobj, ufs_sysfs_groups);
2094 if (ret)
2095 dev_err(dev,
2096 "%s: sysfs groups creation failed (err = %d)\n",
2097 __func__, ret);
2098 }
2099
ufs_sysfs_remove_nodes(struct device * dev)2100 void ufs_sysfs_remove_nodes(struct device *dev)
2101 {
2102 sysfs_remove_groups(&dev->kobj, ufs_sysfs_groups);
2103 }
2104