1 /*-
2 * Copyright (c) 2025, Samsung Electronics Co., Ltd.
3 * Written by Jaeyoon Choi
4 *
5 * SPDX-License-Identifier: BSD-2-Clause
6 */
7
8 #include <sys/param.h>
9 #include <sys/bus.h>
10 #include <sys/conf.h>
11
12 #include "ufshci_private.h"
13 #include "ufshci_reg.h"
14
15 static int
ufshci_dev_read_descriptor(struct ufshci_controller * ctrlr,enum ufshci_descriptor_type desc_type,uint8_t index,uint8_t selector,void * desc,size_t desc_size)16 ufshci_dev_read_descriptor(struct ufshci_controller *ctrlr,
17 enum ufshci_descriptor_type desc_type, uint8_t index, uint8_t selector,
18 void *desc, size_t desc_size)
19 {
20 struct ufshci_completion_poll_status status;
21 struct ufshci_query_param param;
22
23 param.function = UFSHCI_QUERY_FUNC_STANDARD_READ_REQUEST;
24 param.opcode = UFSHCI_QUERY_OPCODE_READ_DESCRIPTOR;
25 param.type = desc_type;
26 param.index = index;
27 param.selector = selector;
28 param.value = 0;
29 param.desc_size = desc_size;
30
31 status.done = 0;
32 ufshci_ctrlr_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb,
33 &status, param);
34 ufshci_completion_poll(&status);
35 if (status.error) {
36 ufshci_printf(ctrlr, "ufshci_dev_read_descriptor failed!\n");
37 return (ENXIO);
38 }
39
40 memcpy(desc, status.cpl.response_upiu.query_response_upiu.command_data,
41 desc_size);
42
43 return (0);
44 }
45
46 static int
ufshci_dev_read_device_descriptor(struct ufshci_controller * ctrlr,struct ufshci_device_descriptor * desc)47 ufshci_dev_read_device_descriptor(struct ufshci_controller *ctrlr,
48 struct ufshci_device_descriptor *desc)
49 {
50 return (ufshci_dev_read_descriptor(ctrlr, UFSHCI_DESC_TYPE_DEVICE, 0, 0,
51 desc, sizeof(struct ufshci_device_descriptor)));
52 }
53
54 static int
ufshci_dev_read_geometry_descriptor(struct ufshci_controller * ctrlr,struct ufshci_geometry_descriptor * desc)55 ufshci_dev_read_geometry_descriptor(struct ufshci_controller *ctrlr,
56 struct ufshci_geometry_descriptor *desc)
57 {
58 return (ufshci_dev_read_descriptor(ctrlr, UFSHCI_DESC_TYPE_GEOMETRY, 0,
59 0, desc, sizeof(struct ufshci_geometry_descriptor)));
60 }
61
62 static int
ufshci_dev_read_unit_descriptor(struct ufshci_controller * ctrlr,uint8_t lun,struct ufshci_unit_descriptor * desc)63 ufshci_dev_read_unit_descriptor(struct ufshci_controller *ctrlr, uint8_t lun,
64 struct ufshci_unit_descriptor *desc)
65 {
66 return (ufshci_dev_read_descriptor(ctrlr, UFSHCI_DESC_TYPE_UNIT, lun, 0,
67 desc, sizeof(struct ufshci_unit_descriptor)));
68 }
69
70 static int
ufshci_dev_read_flag(struct ufshci_controller * ctrlr,enum ufshci_flags flag_type,uint8_t * flag)71 ufshci_dev_read_flag(struct ufshci_controller *ctrlr,
72 enum ufshci_flags flag_type, uint8_t *flag)
73 {
74 struct ufshci_completion_poll_status status;
75 struct ufshci_query_param param;
76
77 param.function = UFSHCI_QUERY_FUNC_STANDARD_READ_REQUEST;
78 param.opcode = UFSHCI_QUERY_OPCODE_READ_FLAG;
79 param.type = flag_type;
80 param.index = 0;
81 param.selector = 0;
82 param.value = 0;
83
84 status.done = 0;
85 ufshci_ctrlr_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb,
86 &status, param);
87 ufshci_completion_poll(&status);
88 if (status.error) {
89 ufshci_printf(ctrlr, "ufshci_dev_read_flag failed!\n");
90 return (ENXIO);
91 }
92
93 *flag = status.cpl.response_upiu.query_response_upiu.flag_value;
94
95 return (0);
96 }
97
98 static int
ufshci_dev_set_flag(struct ufshci_controller * ctrlr,enum ufshci_flags flag_type)99 ufshci_dev_set_flag(struct ufshci_controller *ctrlr,
100 enum ufshci_flags flag_type)
101 {
102 struct ufshci_completion_poll_status status;
103 struct ufshci_query_param param;
104
105 param.function = UFSHCI_QUERY_FUNC_STANDARD_WRITE_REQUEST;
106 param.opcode = UFSHCI_QUERY_OPCODE_SET_FLAG;
107 param.type = flag_type;
108 param.index = 0;
109 param.selector = 0;
110 param.value = 0;
111
112 status.done = 0;
113 ufshci_ctrlr_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb,
114 &status, param);
115 ufshci_completion_poll(&status);
116 if (status.error) {
117 ufshci_printf(ctrlr, "ufshci_dev_set_flag failed!\n");
118 return (ENXIO);
119 }
120
121 return (0);
122 }
123
124 static int
ufshci_dev_clear_flag(struct ufshci_controller * ctrlr,enum ufshci_flags flag_type)125 ufshci_dev_clear_flag(struct ufshci_controller *ctrlr,
126 enum ufshci_flags flag_type)
127 {
128 struct ufshci_completion_poll_status status;
129 struct ufshci_query_param param;
130
131 param.function = UFSHCI_QUERY_FUNC_STANDARD_WRITE_REQUEST;
132 param.opcode = UFSHCI_QUERY_OPCODE_CLEAR_FLAG;
133 param.type = flag_type;
134 param.index = 0;
135 param.selector = 0;
136 param.value = 0;
137
138 status.done = 0;
139 ufshci_ctrlr_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb,
140 &status, param);
141 ufshci_completion_poll(&status);
142 if (status.error) {
143 ufshci_printf(ctrlr, "ufshci_dev_clear_flag failed!\n");
144 return (ENXIO);
145 }
146
147 return (0);
148 }
149
150 static int
ufshci_dev_read_attribute(struct ufshci_controller * ctrlr,enum ufshci_attributes attr_type,uint8_t index,uint8_t selector,uint64_t * value)151 ufshci_dev_read_attribute(struct ufshci_controller *ctrlr,
152 enum ufshci_attributes attr_type, uint8_t index, uint8_t selector,
153 uint64_t *value)
154 {
155 struct ufshci_completion_poll_status status;
156 struct ufshci_query_param param;
157
158 param.function = UFSHCI_QUERY_FUNC_STANDARD_READ_REQUEST;
159 param.opcode = UFSHCI_QUERY_OPCODE_READ_ATTRIBUTE;
160 param.type = attr_type;
161 param.index = index;
162 param.selector = selector;
163 param.value = 0;
164
165 status.done = 0;
166 ufshci_ctrlr_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb,
167 &status, param);
168 ufshci_completion_poll(&status);
169 if (status.error) {
170 ufshci_printf(ctrlr, "ufshci_dev_read_attribute failed!\n");
171 return (ENXIO);
172 }
173
174 *value = status.cpl.response_upiu.query_response_upiu.value_64;
175
176 return (0);
177 }
178
179 static int
ufshci_dev_write_attribute(struct ufshci_controller * ctrlr,enum ufshci_attributes attr_type,uint8_t index,uint8_t selector,uint64_t value)180 ufshci_dev_write_attribute(struct ufshci_controller *ctrlr,
181 enum ufshci_attributes attr_type, uint8_t index, uint8_t selector,
182 uint64_t value)
183 {
184 struct ufshci_completion_poll_status status;
185 struct ufshci_query_param param;
186
187 param.function = UFSHCI_QUERY_FUNC_STANDARD_WRITE_REQUEST;
188 param.opcode = UFSHCI_QUERY_OPCODE_WRITE_ATTRIBUTE;
189 param.type = attr_type;
190 param.index = index;
191 param.selector = selector;
192 param.value = value;
193
194 status.done = 0;
195 ufshci_ctrlr_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb,
196 &status, param);
197 ufshci_completion_poll(&status);
198 if (status.error) {
199 ufshci_printf(ctrlr, "ufshci_dev_write_attribute failed!\n");
200 return (ENXIO);
201 }
202
203 return (0);
204 }
205
206 int
ufshci_dev_init(struct ufshci_controller * ctrlr)207 ufshci_dev_init(struct ufshci_controller *ctrlr)
208 {
209 int timeout = ticks + MSEC_2_TICKS(ctrlr->device_init_timeout_in_ms);
210 sbintime_t delta_t = SBT_1US;
211 uint8_t flag;
212 int error;
213 const uint8_t device_init_completed = 0;
214
215 error = ufshci_dev_set_flag(ctrlr, UFSHCI_FLAG_F_DEVICE_INIT);
216 if (error)
217 return (error);
218
219 /* Wait for the UFSHCI_FLAG_F_DEVICE_INIT flag to change */
220 while (1) {
221 error = ufshci_dev_read_flag(ctrlr, UFSHCI_FLAG_F_DEVICE_INIT,
222 &flag);
223 if (error)
224 return (error);
225 if (flag == device_init_completed)
226 break;
227 if (timeout - ticks < 0) {
228 ufshci_printf(ctrlr,
229 "device init did not become %d "
230 "within %d ms\n",
231 device_init_completed,
232 ctrlr->device_init_timeout_in_ms);
233 return (ENXIO);
234 }
235
236 pause_sbt("ufshciinit", delta_t, 0, C_PREL(1));
237 delta_t = min(SBT_1MS, delta_t * 3 / 2);
238 }
239
240 return (0);
241 }
242
243 int
ufshci_dev_reset(struct ufshci_controller * ctrlr)244 ufshci_dev_reset(struct ufshci_controller *ctrlr)
245 {
246 if (ufshci_uic_send_dme_endpoint_reset(ctrlr))
247 return (ENXIO);
248
249 return (ufshci_dev_init(ctrlr));
250 }
251
252 int
ufshci_dev_init_reference_clock(struct ufshci_controller * ctrlr)253 ufshci_dev_init_reference_clock(struct ufshci_controller *ctrlr)
254 {
255 int error;
256 uint8_t index, selector;
257
258 index = 0; /* bRefClkFreq is device type attribute */
259 selector = 0; /* bRefClkFreq is device type attribute */
260
261 error = ufshci_dev_write_attribute(ctrlr, UFSHCI_ATTR_B_REF_CLK_FREQ,
262 index, selector, ctrlr->ref_clk);
263 if (error)
264 return (error);
265
266 return (0);
267 }
268
269 int
ufshci_dev_init_unipro(struct ufshci_controller * ctrlr)270 ufshci_dev_init_unipro(struct ufshci_controller *ctrlr)
271 {
272 uint32_t pa_granularity, peer_pa_granularity;
273 uint32_t t_activate, pear_t_activate;
274
275 /*
276 * Unipro Version:
277 * - 7~15 = Above 2.0, 6 = 2.0, 5 = 1.8, 4 = 1.61, 3 = 1.6, 2 = 1.41,
278 * 1 = 1.40, 0 = Reserved
279 */
280 if (ufshci_uic_send_dme_get(ctrlr, PA_LocalVerInfo,
281 &ctrlr->unipro_version))
282 return (ENXIO);
283 if (ufshci_uic_send_dme_get(ctrlr, PA_RemoteVerInfo,
284 &ctrlr->ufs_dev.unipro_version))
285 return (ENXIO);
286
287 /*
288 * PA_Granularity: Granularity for PA_TActivate and PA_Hibern8Time
289 * - 1=1us, 2=4us, 3=8us, 4=16us, 5=32us, 6=100us
290 */
291 if (ufshci_uic_send_dme_get(ctrlr, PA_Granularity, &pa_granularity))
292 return (ENXIO);
293 if (ufshci_uic_send_dme_peer_get(ctrlr, PA_Granularity,
294 &peer_pa_granularity))
295 return (ENXIO);
296
297 /*
298 * PA_TActivate: Time to wait before activating a burst in order to
299 * wake-up peer M-RX
300 * UniPro automatically sets timing information such as PA_TActivate
301 * through the PACP_CAP_EXT1_ind command during Link Startup operation.
302 */
303 if (ufshci_uic_send_dme_get(ctrlr, PA_TActivate, &t_activate))
304 return (ENXIO);
305 if (ufshci_uic_send_dme_peer_get(ctrlr, PA_TActivate, &pear_t_activate))
306 return (ENXIO);
307
308 if (ctrlr->quirks & UFSHCI_QUIRK_LONG_PEER_PA_TACTIVATE) {
309 /*
310 * Intel Lake-field UFSHCI has a quirk. We need to add 200us to
311 * the PEER's PA_TActivate.
312 */
313 if (pa_granularity == peer_pa_granularity) {
314 pear_t_activate = t_activate + 2;
315 if (ufshci_uic_send_dme_peer_set(ctrlr, PA_TActivate,
316 pear_t_activate))
317 return (ENXIO);
318 }
319 }
320
321 return (0);
322 }
323
324 int
ufshci_dev_init_uic_power_mode(struct ufshci_controller * ctrlr)325 ufshci_dev_init_uic_power_mode(struct ufshci_controller *ctrlr)
326 {
327 /* HSSerise: A = 1, B = 2 */
328 const uint32_t hs_series = 2;
329 /*
330 * TX/RX PWRMode:
331 * - TX[3:0], RX[7:4]
332 * - Fast Mode = 1, Slow Mode = 2, FastAuto Mode = 4, SlowAuto Mode = 5
333 */
334 const uint32_t fast_mode = 1;
335 const uint32_t rx_bit_shift = 4;
336 uint32_t power_mode, peer_granularity;
337
338 /* Update lanes with available TX/RX lanes */
339 if (ufshci_uic_send_dme_get(ctrlr, PA_AvailTxDataLanes,
340 &ctrlr->max_tx_lanes))
341 return (ENXIO);
342 if (ufshci_uic_send_dme_get(ctrlr, PA_AvailRxDataLanes,
343 &ctrlr->max_rx_lanes))
344 return (ENXIO);
345
346 /* Get max HS-GEAR value */
347 if (ufshci_uic_send_dme_get(ctrlr, PA_MaxRxHSGear,
348 &ctrlr->max_rx_hs_gear))
349 return (ENXIO);
350
351 /* Set the data lane to max */
352 ctrlr->tx_lanes = ctrlr->max_tx_lanes;
353 ctrlr->rx_lanes = ctrlr->max_rx_lanes;
354 if (ufshci_uic_send_dme_set(ctrlr, PA_ActiveTxDataLanes,
355 ctrlr->tx_lanes))
356 return (ENXIO);
357 if (ufshci_uic_send_dme_set(ctrlr, PA_ActiveRxDataLanes,
358 ctrlr->rx_lanes))
359 return (ENXIO);
360
361 if (ctrlr->quirks & UFSHCI_QUIRK_CHANGE_LANE_AND_GEAR_SEPARATELY) {
362 /* Before changing gears, first change the number of lanes. */
363 if (ufshci_uic_send_dme_get(ctrlr, PA_PWRMode, &power_mode))
364 return (ENXIO);
365 if (ufshci_uic_send_dme_set(ctrlr, PA_PWRMode, power_mode))
366 return (ENXIO);
367
368 /* Wait for power mode changed. */
369 if (ufshci_uic_power_mode_ready(ctrlr)) {
370 ufshci_reg_dump(ctrlr);
371 return (ENXIO);
372 }
373 }
374
375 /* Set HS-GEAR to max gear */
376 ctrlr->hs_gear = ctrlr->max_rx_hs_gear;
377 if (ufshci_uic_send_dme_set(ctrlr, PA_TxGear, ctrlr->hs_gear))
378 return (ENXIO);
379 if (ufshci_uic_send_dme_set(ctrlr, PA_RxGear, ctrlr->hs_gear))
380 return (ENXIO);
381
382 /*
383 * Set termination
384 * - HS-MODE = ON / LS-MODE = OFF
385 */
386 if (ufshci_uic_send_dme_set(ctrlr, PA_TxTermination, true))
387 return (ENXIO);
388 if (ufshci_uic_send_dme_set(ctrlr, PA_RxTermination, true))
389 return (ENXIO);
390
391 /* Set HSSerise (A = 1, B = 2) */
392 if (ufshci_uic_send_dme_set(ctrlr, PA_HSSeries, hs_series))
393 return (ENXIO);
394
395 /* Set Timeout values */
396 if (ufshci_uic_send_dme_set(ctrlr, PA_PWRModeUserData0,
397 DL_FC0ProtectionTimeOutVal_Default))
398 return (ENXIO);
399 if (ufshci_uic_send_dme_set(ctrlr, PA_PWRModeUserData1,
400 DL_TC0ReplayTimeOutVal_Default))
401 return (ENXIO);
402 if (ufshci_uic_send_dme_set(ctrlr, PA_PWRModeUserData2,
403 DL_AFC0ReqTimeOutVal_Default))
404 return (ENXIO);
405 if (ufshci_uic_send_dme_set(ctrlr, PA_PWRModeUserData3,
406 DL_FC0ProtectionTimeOutVal_Default))
407 return (ENXIO);
408 if (ufshci_uic_send_dme_set(ctrlr, PA_PWRModeUserData4,
409 DL_TC0ReplayTimeOutVal_Default))
410 return (ENXIO);
411 if (ufshci_uic_send_dme_set(ctrlr, PA_PWRModeUserData5,
412 DL_AFC0ReqTimeOutVal_Default))
413 return (ENXIO);
414
415 if (ufshci_uic_send_dme_set(ctrlr, DME_LocalFC0ProtectionTimeOutVal,
416 DL_FC0ProtectionTimeOutVal_Default))
417 return (ENXIO);
418 if (ufshci_uic_send_dme_set(ctrlr, DME_LocalTC0ReplayTimeOutVal,
419 DL_TC0ReplayTimeOutVal_Default))
420 return (ENXIO);
421 if (ufshci_uic_send_dme_set(ctrlr, DME_LocalAFC0ReqTimeOutVal,
422 DL_AFC0ReqTimeOutVal_Default))
423 return (ENXIO);
424
425 /* Set TX/RX PWRMode */
426 power_mode = (fast_mode << rx_bit_shift) | fast_mode;
427 if (ufshci_uic_send_dme_set(ctrlr, PA_PWRMode, power_mode))
428 return (ENXIO);
429
430 /* Wait for power mode changed. */
431 if (ufshci_uic_power_mode_ready(ctrlr)) {
432 ufshci_reg_dump(ctrlr);
433 return (ENXIO);
434 }
435
436 /* Clear 'Power Mode completion status' */
437 ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_UPMS));
438
439 if (ctrlr->quirks & UFSHCI_QUIRK_WAIT_AFTER_POWER_MODE_CHANGE) {
440 /*
441 * Intel Lake-field UFSHCI has a quirk.
442 * We need to wait 1250us and clear dme error.
443 */
444 pause_sbt("ufshci", ustosbt(1250), 0, C_PREL(1));
445
446 /* Test with dme_peer_get to make sure there are no errors. */
447 if (ufshci_uic_send_dme_peer_get(ctrlr, PA_Granularity,
448 &peer_granularity))
449 return (ENXIO);
450 }
451
452 return (0);
453 }
454
455 int
ufshci_dev_init_ufs_power_mode(struct ufshci_controller * ctrlr)456 ufshci_dev_init_ufs_power_mode(struct ufshci_controller *ctrlr)
457 {
458 ctrlr->ufs_dev.power_mode_supported = false;
459
460 if (ctrlr->quirks & UFSHCI_QUIRK_SKIP_WELL_KNOWN_LUNS)
461 return (0);
462
463 ctrlr->ufs_device_wlun_periph = ufshci_sim_find_periph(ctrlr,
464 UFSHCI_WLUN_UFS_DEVICE);
465 if (ctrlr->ufs_device_wlun_periph == NULL) {
466 ufshci_printf(ctrlr,
467 "Well-known LUN `UFS Device (0x50)` not found\n");
468 return (0);
469 }
470
471 ctrlr->ufs_dev.power_mode_supported = true;
472 return (0);
473 }
474
475 int
ufshci_dev_get_descriptor(struct ufshci_controller * ctrlr)476 ufshci_dev_get_descriptor(struct ufshci_controller *ctrlr)
477 {
478 struct ufshci_device *device = &ctrlr->ufs_dev;
479 /*
480 * The kDeviceDensityUnit is defined in the spec as 512.
481 * qTotalRawDeviceCapacity use big-endian byte ordering.
482 */
483 const uint32_t device_density_unit = 512;
484 uint32_t ver;
485 int error;
486
487 error = ufshci_dev_read_device_descriptor(ctrlr, &device->dev_desc);
488 if (error)
489 return (error);
490
491 ver = be16toh(device->dev_desc.wSpecVersion);
492 ufshci_printf(ctrlr, "UFS device spec version %u.%u.%u\n",
493 UFSHCIV(UFSHCI_VER_REG_MJR, ver), UFSHCIV(UFSHCI_VER_REG_MNR, ver),
494 UFSHCIV(UFSHCI_VER_REG_VS, ver));
495 ufshci_printf(ctrlr, "%u enabled LUNs found\n",
496 device->dev_desc.bNumberLU);
497
498 error = ufshci_dev_read_geometry_descriptor(ctrlr, &device->geo_desc);
499 if (error)
500 return (error);
501
502 if (device->geo_desc.bMaxNumberLU == 0) {
503 device->max_lun_count = 8;
504 } else if (device->geo_desc.bMaxNumberLU == 1) {
505 device->max_lun_count = 32;
506 } else {
507 ufshci_printf(ctrlr,
508 "Invalid Geometry Descriptor bMaxNumberLU value=%d\n",
509 device->geo_desc.bMaxNumberLU);
510 return (ENXIO);
511 }
512 ctrlr->max_lun_count = device->max_lun_count;
513
514 ufshci_printf(ctrlr, "UFS device total size is %lu bytes\n",
515 be64toh(device->geo_desc.qTotalRawDeviceCapacity) *
516 device_density_unit);
517
518 return (0);
519 }
520
521 static int
ufshci_dev_enable_write_booster(struct ufshci_controller * ctrlr)522 ufshci_dev_enable_write_booster(struct ufshci_controller *ctrlr)
523 {
524 struct ufshci_device *dev = &ctrlr->ufs_dev;
525 int error;
526
527 /* Enable WriteBooster */
528 error = ufshci_dev_set_flag(ctrlr, UFSHCI_FLAG_F_WRITE_BOOSTER_EN);
529 if (error) {
530 ufshci_printf(ctrlr, "Failed to enable WriteBooster\n");
531 return (error);
532 }
533 dev->is_wb_enabled = true;
534
535 /* Enable WriteBooster buffer flush during hibernate */
536 error = ufshci_dev_set_flag(ctrlr,
537 UFSHCI_FLAG_F_WB_BUFFER_FLUSH_DURING_HIBERNATE);
538 if (error) {
539 ufshci_printf(ctrlr,
540 "Failed to enable WriteBooster buffer flush during hibernate\n");
541 return (error);
542 }
543
544 /* Enable WriteBooster buffer flush */
545 error = ufshci_dev_set_flag(ctrlr, UFSHCI_FLAG_F_WB_BUFFER_FLUSH_EN);
546 if (error) {
547 ufshci_printf(ctrlr,
548 "Failed to enable WriteBooster buffer flush\n");
549 return (error);
550 }
551 dev->is_wb_flush_enabled = true;
552
553 return (0);
554 }
555
556 static int
ufshci_dev_disable_write_booster(struct ufshci_controller * ctrlr)557 ufshci_dev_disable_write_booster(struct ufshci_controller *ctrlr)
558 {
559 struct ufshci_device *dev = &ctrlr->ufs_dev;
560 int error;
561
562 /* Disable WriteBooster buffer flush */
563 error = ufshci_dev_clear_flag(ctrlr, UFSHCI_FLAG_F_WB_BUFFER_FLUSH_EN);
564 if (error) {
565 ufshci_printf(ctrlr,
566 "Failed to disable WriteBooster buffer flush\n");
567 return (error);
568 }
569 dev->is_wb_flush_enabled = false;
570
571 /* Disable WriteBooster buffer flush during hibernate */
572 error = ufshci_dev_clear_flag(ctrlr,
573 UFSHCI_FLAG_F_WB_BUFFER_FLUSH_DURING_HIBERNATE);
574 if (error) {
575 ufshci_printf(ctrlr,
576 "Failed to disable WriteBooster buffer flush during hibernate\n");
577 return (error);
578 }
579
580 /* Disable WriteBooster */
581 error = ufshci_dev_clear_flag(ctrlr, UFSHCI_FLAG_F_WRITE_BOOSTER_EN);
582 if (error) {
583 ufshci_printf(ctrlr, "Failed to disable WriteBooster\n");
584 return (error);
585 }
586 dev->is_wb_enabled = false;
587
588 return (0);
589 }
590
591 static int
ufshci_dev_is_write_booster_buffer_life_time_left(struct ufshci_controller * ctrlr,bool * is_life_time_left)592 ufshci_dev_is_write_booster_buffer_life_time_left(
593 struct ufshci_controller *ctrlr, bool *is_life_time_left)
594 {
595 struct ufshci_device *dev = &ctrlr->ufs_dev;
596 uint8_t buffer_lun;
597 uint64_t life_time;
598 uint32_t error;
599
600 if (dev->wb_buffer_type == UFSHCI_DESC_WB_BUF_TYPE_LU_DEDICATED)
601 buffer_lun = dev->wb_dedicated_lu;
602 else
603 buffer_lun = 0;
604
605 error = ufshci_dev_read_attribute(ctrlr,
606 UFSHCI_ATTR_B_WB_BUFFER_LIFE_TIME_EST, buffer_lun, 0, &life_time);
607 if (error)
608 return (error);
609
610 *is_life_time_left = (life_time != UFSHCI_ATTR_WB_LIFE_EXCEEDED);
611
612 return (0);
613 }
614
615 /*
616 * This function is not yet in use. It will be used when suspend/resume is
617 * implemented.
618 */
619 static __unused int
ufshci_dev_need_write_booster_buffer_flush(struct ufshci_controller * ctrlr,bool * need_flush)620 ufshci_dev_need_write_booster_buffer_flush(struct ufshci_controller *ctrlr,
621 bool *need_flush)
622 {
623 struct ufshci_device *dev = &ctrlr->ufs_dev;
624 bool is_life_time_left = false;
625 uint64_t available_buffer_size, current_buffer_size;
626 uint8_t buffer_lun;
627 uint32_t error;
628
629 *need_flush = false;
630
631 if (!dev->is_wb_enabled)
632 return (0);
633
634 error = ufshci_dev_is_write_booster_buffer_life_time_left(ctrlr,
635 &is_life_time_left);
636 if (error)
637 return (error);
638
639 if (!is_life_time_left)
640 return (ufshci_dev_disable_write_booster(ctrlr));
641
642 if (dev->wb_buffer_type == UFSHCI_DESC_WB_BUF_TYPE_LU_DEDICATED)
643 buffer_lun = dev->wb_dedicated_lu;
644 else
645 buffer_lun = 0;
646
647 error = ufshci_dev_read_attribute(ctrlr,
648 UFSHCI_ATTR_B_AVAILABLE_WB_BUFFER_SIZE, buffer_lun, 0,
649 &available_buffer_size);
650 if (error)
651 return (error);
652
653 switch (dev->wb_user_space_config_option) {
654 case UFSHCI_DESC_WB_BUF_USER_SPACE_REDUCTION:
655 *need_flush = (available_buffer_size <=
656 UFSHCI_ATTR_WB_AVAILABLE_10);
657 break;
658 case UFSHCI_DESC_WB_BUF_PRESERVE_USER_SPACE:
659 /*
660 * In PRESERVE USER SPACE mode, flush should be performed when
661 * the current buffer is greater than 0 and the available buffer
662 * below write_booster_flush_threshold is left.
663 */
664 error = ufshci_dev_read_attribute(ctrlr,
665 UFSHCI_ATTR_D_CURRENT_WB_BUFFER_SIZE, buffer_lun, 0,
666 ¤t_buffer_size);
667 if (error)
668 return (error);
669
670 if (current_buffer_size == 0)
671 return (0);
672
673 *need_flush = (available_buffer_size <
674 dev->write_booster_flush_threshold);
675 break;
676 default:
677 ufshci_printf(ctrlr,
678 "Invalid bWriteBoosterBufferPreserveUserSpaceEn value");
679 return (EINVAL);
680 }
681
682 /*
683 * TODO: Need to handle WRITEBOOSTER_FLUSH_NEEDED exception case from
684 * wExceptionEventStatus attribute.
685 */
686
687 return (0);
688 }
689
690 int
ufshci_dev_config_write_booster(struct ufshci_controller * ctrlr)691 ufshci_dev_config_write_booster(struct ufshci_controller *ctrlr)
692 {
693 struct ufshci_device *dev = &ctrlr->ufs_dev;
694 uint32_t extended_ufs_feature_support;
695 uint32_t alloc_units;
696 struct ufshci_unit_descriptor unit_desc;
697 uint8_t lun;
698 bool is_life_time_left;
699 uint32_t mega_byte = 1024 * 1024;
700 uint32_t error = 0;
701
702 extended_ufs_feature_support = be32toh(
703 dev->dev_desc.dExtendedUfsFeaturesSupport);
704 if (!(extended_ufs_feature_support &
705 UFSHCI_DESC_EXT_UFS_FEATURE_WRITE_BOOSTER)) {
706 /* This device does not support Write Booster */
707 return (0);
708 }
709
710 if (ufshci_dev_enable_write_booster(ctrlr))
711 return (0);
712
713 /* Get WriteBooster buffer parameters */
714 dev->wb_buffer_type = dev->dev_desc.bWriteBoosterBufferType;
715 dev->wb_user_space_config_option =
716 dev->dev_desc.bWriteBoosterBufferPreserveUserSpaceEn;
717
718 /*
719 * Find the size of the write buffer.
720 * With LU-dedicated (00h), the WriteBooster buffer is assigned
721 * exclusively to one chosen LU (not one-per-LU), whereas Shared (01h)
722 * uses a single device-wide buffer shared by multiple LUs.
723 */
724 if (dev->wb_buffer_type == UFSHCI_DESC_WB_BUF_TYPE_SINGLE_SHARED) {
725 alloc_units = be32toh(
726 dev->dev_desc.dNumSharedWriteBoosterBufferAllocUnits);
727 ufshci_printf(ctrlr,
728 "WriteBooster buffer type = Shared, alloc_units=%d\n",
729 alloc_units);
730 } else if (dev->wb_buffer_type ==
731 UFSHCI_DESC_WB_BUF_TYPE_LU_DEDICATED) {
732 ufshci_printf(ctrlr, "WriteBooster buffer type = Dedicated\n");
733 for (lun = 0; lun < ctrlr->max_lun_count; lun++) {
734 /* Find a dedicated buffer using a unit descriptor */
735 if (ufshci_dev_read_unit_descriptor(ctrlr, lun,
736 &unit_desc))
737 continue;
738
739 alloc_units = be32toh(
740 unit_desc.dLUNumWriteBoosterBufferAllocUnits);
741 if (alloc_units) {
742 dev->wb_dedicated_lu = lun;
743 break;
744 }
745 }
746 } else {
747 ufshci_printf(ctrlr,
748 "Not supported WriteBooster buffer type: 0x%x\n",
749 dev->wb_buffer_type);
750 goto out;
751 }
752
753 if (alloc_units == 0) {
754 ufshci_printf(ctrlr, "The WriteBooster buffer size is zero\n");
755 goto out;
756 }
757
758 dev->wb_buffer_size_mb = alloc_units *
759 dev->geo_desc.bAllocationUnitSize *
760 (be32toh(dev->geo_desc.dSegmentSize)) /
761 (mega_byte / UFSHCI_SECTOR_SIZE);
762
763 /* Set to flush when 40% of the available buffer size remains */
764 dev->write_booster_flush_threshold = UFSHCI_ATTR_WB_AVAILABLE_40;
765
766 /*
767 * Check if WriteBooster Buffer lifetime is left.
768 * WriteBooster Buffer lifetime — percent of life used based on P/E
769 * cycles. If "preserve user space" is enabled, writes to normal user
770 * space also consume WB life since the area is shared.
771 */
772 error = ufshci_dev_is_write_booster_buffer_life_time_left(ctrlr,
773 &is_life_time_left);
774 if (error)
775 goto out;
776
777 if (!is_life_time_left) {
778 ufshci_printf(ctrlr,
779 "There is no WriteBooster buffer life time left.\n");
780 goto out;
781 }
782
783 ufshci_printf(ctrlr, "WriteBooster Enabled\n");
784 return (0);
785 out:
786 ufshci_dev_disable_write_booster(ctrlr);
787 return (error);
788 }
789