1 /*-
2 * Copyright (c) 2025, Samsung Electronics Co., Ltd.
3 * Written by Jaeyoon Choi
4 *
5 * SPDX-License-Identifier: BSD-2-Clause
6 */
7
8 #include <sys/param.h>
9 #include <sys/bus.h>
10 #include <sys/conf.h>
11
12 #include "ufshci_private.h"
13 #include "ufshci_reg.h"
14
15 static int
ufshci_dev_read_descriptor(struct ufshci_controller * ctrlr,enum ufshci_descriptor_type desc_type,uint8_t index,uint8_t selector,void * desc,size_t desc_size)16 ufshci_dev_read_descriptor(struct ufshci_controller *ctrlr,
17 enum ufshci_descriptor_type desc_type, uint8_t index, uint8_t selector,
18 void *desc, size_t desc_size)
19 {
20 struct ufshci_completion_poll_status status;
21 struct ufshci_query_param param;
22
23 param.function = UFSHCI_QUERY_FUNC_STANDARD_READ_REQUEST;
24 param.opcode = UFSHCI_QUERY_OPCODE_READ_DESCRIPTOR;
25 param.type = desc_type;
26 param.index = index;
27 param.selector = selector;
28 param.value = 0;
29 param.desc_size = desc_size;
30
31 status.done = 0;
32 ufshci_ctrlr_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb,
33 &status, param);
34 ufshci_completion_poll(&status);
35 if (status.error) {
36 ufshci_printf(ctrlr, "ufshci_dev_read_descriptor failed!\n");
37 return (ENXIO);
38 }
39
40 memcpy(desc, status.cpl.response_upiu.query_response_upiu.command_data,
41 desc_size);
42
43 return (0);
44 }
45
46 static int
ufshci_dev_read_device_descriptor(struct ufshci_controller * ctrlr,struct ufshci_device_descriptor * desc)47 ufshci_dev_read_device_descriptor(struct ufshci_controller *ctrlr,
48 struct ufshci_device_descriptor *desc)
49 {
50 return (ufshci_dev_read_descriptor(ctrlr, UFSHCI_DESC_TYPE_DEVICE, 0, 0,
51 desc, sizeof(struct ufshci_device_descriptor)));
52 }
53
54 static int
ufshci_dev_read_geometry_descriptor(struct ufshci_controller * ctrlr,struct ufshci_geometry_descriptor * desc)55 ufshci_dev_read_geometry_descriptor(struct ufshci_controller *ctrlr,
56 struct ufshci_geometry_descriptor *desc)
57 {
58 return (ufshci_dev_read_descriptor(ctrlr, UFSHCI_DESC_TYPE_GEOMETRY, 0,
59 0, desc, sizeof(struct ufshci_geometry_descriptor)));
60 }
61
62 static int
ufshci_dev_read_unit_descriptor(struct ufshci_controller * ctrlr,uint8_t lun,struct ufshci_unit_descriptor * desc)63 ufshci_dev_read_unit_descriptor(struct ufshci_controller *ctrlr, uint8_t lun,
64 struct ufshci_unit_descriptor *desc)
65 {
66 return (ufshci_dev_read_descriptor(ctrlr, UFSHCI_DESC_TYPE_UNIT, lun, 0,
67 desc, sizeof(struct ufshci_unit_descriptor)));
68 }
69
70 static int
ufshci_dev_read_flag(struct ufshci_controller * ctrlr,enum ufshci_flags flag_type,uint8_t * flag)71 ufshci_dev_read_flag(struct ufshci_controller *ctrlr,
72 enum ufshci_flags flag_type, uint8_t *flag)
73 {
74 struct ufshci_completion_poll_status status;
75 struct ufshci_query_param param;
76
77 param.function = UFSHCI_QUERY_FUNC_STANDARD_READ_REQUEST;
78 param.opcode = UFSHCI_QUERY_OPCODE_READ_FLAG;
79 param.type = flag_type;
80 param.index = 0;
81 param.selector = 0;
82 param.value = 0;
83
84 status.done = 0;
85 ufshci_ctrlr_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb,
86 &status, param);
87 ufshci_completion_poll(&status);
88 if (status.error) {
89 ufshci_printf(ctrlr, "ufshci_dev_read_flag failed!\n");
90 return (ENXIO);
91 }
92
93 *flag = status.cpl.response_upiu.query_response_upiu.flag_value;
94
95 return (0);
96 }
97
98 static int
ufshci_dev_set_flag(struct ufshci_controller * ctrlr,enum ufshci_flags flag_type)99 ufshci_dev_set_flag(struct ufshci_controller *ctrlr,
100 enum ufshci_flags flag_type)
101 {
102 struct ufshci_completion_poll_status status;
103 struct ufshci_query_param param;
104
105 param.function = UFSHCI_QUERY_FUNC_STANDARD_WRITE_REQUEST;
106 param.opcode = UFSHCI_QUERY_OPCODE_SET_FLAG;
107 param.type = flag_type;
108 param.index = 0;
109 param.selector = 0;
110 param.value = 0;
111
112 status.done = 0;
113 ufshci_ctrlr_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb,
114 &status, param);
115 ufshci_completion_poll(&status);
116 if (status.error) {
117 ufshci_printf(ctrlr, "ufshci_dev_set_flag failed!\n");
118 return (ENXIO);
119 }
120
121 return (0);
122 }
123
124 static int
ufshci_dev_clear_flag(struct ufshci_controller * ctrlr,enum ufshci_flags flag_type)125 ufshci_dev_clear_flag(struct ufshci_controller *ctrlr,
126 enum ufshci_flags flag_type)
127 {
128 struct ufshci_completion_poll_status status;
129 struct ufshci_query_param param;
130
131 param.function = UFSHCI_QUERY_FUNC_STANDARD_WRITE_REQUEST;
132 param.opcode = UFSHCI_QUERY_OPCODE_CLEAR_FLAG;
133 param.type = flag_type;
134 param.index = 0;
135 param.selector = 0;
136 param.value = 0;
137
138 status.done = 0;
139 ufshci_ctrlr_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb,
140 &status, param);
141 ufshci_completion_poll(&status);
142 if (status.error) {
143 ufshci_printf(ctrlr, "ufshci_dev_clear_flag failed!\n");
144 return (ENXIO);
145 }
146
147 return (0);
148 }
149
150 static int
ufshci_dev_read_attribute(struct ufshci_controller * ctrlr,enum ufshci_attributes attr_type,uint8_t index,uint8_t selector,uint64_t * value)151 ufshci_dev_read_attribute(struct ufshci_controller *ctrlr,
152 enum ufshci_attributes attr_type, uint8_t index, uint8_t selector,
153 uint64_t *value)
154 {
155 struct ufshci_completion_poll_status status;
156 struct ufshci_query_param param;
157
158 param.function = UFSHCI_QUERY_FUNC_STANDARD_READ_REQUEST;
159 param.opcode = UFSHCI_QUERY_OPCODE_READ_ATTRIBUTE;
160 param.type = attr_type;
161 param.index = index;
162 param.selector = selector;
163 param.value = 0;
164
165 status.done = 0;
166 ufshci_ctrlr_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb,
167 &status, param);
168 ufshci_completion_poll(&status);
169 if (status.error) {
170 ufshci_printf(ctrlr, "ufshci_dev_read_attribute failed!\n");
171 return (ENXIO);
172 }
173
174 *value = status.cpl.response_upiu.query_response_upiu.value_64;
175
176 return (0);
177 }
178
179 static int
ufshci_dev_write_attribute(struct ufshci_controller * ctrlr,enum ufshci_attributes attr_type,uint8_t index,uint8_t selector,uint64_t value)180 ufshci_dev_write_attribute(struct ufshci_controller *ctrlr,
181 enum ufshci_attributes attr_type, uint8_t index, uint8_t selector,
182 uint64_t value)
183 {
184 struct ufshci_completion_poll_status status;
185 struct ufshci_query_param param;
186
187 param.function = UFSHCI_QUERY_FUNC_STANDARD_WRITE_REQUEST;
188 param.opcode = UFSHCI_QUERY_OPCODE_WRITE_ATTRIBUTE;
189 param.type = attr_type;
190 param.index = index;
191 param.selector = selector;
192 param.value = value;
193
194 status.done = 0;
195 ufshci_ctrlr_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb,
196 &status, param);
197 ufshci_completion_poll(&status);
198 if (status.error) {
199 ufshci_printf(ctrlr, "ufshci_dev_write_attribute failed!\n");
200 return (ENXIO);
201 }
202
203 return (0);
204 }
205
206 int
ufshci_dev_init(struct ufshci_controller * ctrlr)207 ufshci_dev_init(struct ufshci_controller *ctrlr)
208 {
209 int timeout = ticks + MSEC_2_TICKS(ctrlr->device_init_timeout_in_ms);
210 sbintime_t delta_t = SBT_1US;
211 uint8_t flag;
212 int error;
213 const uint8_t device_init_completed = 0;
214
215 error = ufshci_dev_set_flag(ctrlr, UFSHCI_FLAG_F_DEVICE_INIT);
216 if (error)
217 return (error);
218
219 /* Wait for the UFSHCI_FLAG_F_DEVICE_INIT flag to change */
220 while (1) {
221 error = ufshci_dev_read_flag(ctrlr, UFSHCI_FLAG_F_DEVICE_INIT,
222 &flag);
223 if (error)
224 return (error);
225 if (flag == device_init_completed)
226 break;
227 if (timeout - ticks < 0) {
228 ufshci_printf(ctrlr,
229 "device init did not become %d "
230 "within %d ms\n",
231 device_init_completed,
232 ctrlr->device_init_timeout_in_ms);
233 return (ENXIO);
234 }
235
236 pause_sbt("ufshciinit", delta_t, 0, C_PREL(1));
237 delta_t = min(SBT_1MS, delta_t * 3 / 2);
238 }
239
240 return (0);
241 }
242
243 int
ufshci_dev_reset(struct ufshci_controller * ctrlr)244 ufshci_dev_reset(struct ufshci_controller *ctrlr)
245 {
246 if (ufshci_uic_send_dme_endpoint_reset(ctrlr))
247 return (ENXIO);
248
249 return (ufshci_dev_init(ctrlr));
250 }
251
252 int
ufshci_dev_init_reference_clock(struct ufshci_controller * ctrlr)253 ufshci_dev_init_reference_clock(struct ufshci_controller *ctrlr)
254 {
255 int error;
256 uint8_t index, selector;
257
258 index = 0; /* bRefClkFreq is device type attribute */
259 selector = 0; /* bRefClkFreq is device type attribute */
260
261 error = ufshci_dev_write_attribute(ctrlr, UFSHCI_ATTR_B_REF_CLK_FREQ,
262 index, selector, ctrlr->ref_clk);
263 if (error)
264 return (error);
265
266 return (0);
267 }
268
269 int
ufshci_dev_init_unipro(struct ufshci_controller * ctrlr)270 ufshci_dev_init_unipro(struct ufshci_controller *ctrlr)
271 {
272 uint32_t pa_granularity, peer_pa_granularity;
273 uint32_t t_activate, pear_t_activate;
274
275 /*
276 * Unipro Version:
277 * - 7~15 = Above 2.0, 6 = 2.0, 5 = 1.8, 4 = 1.61, 3 = 1.6, 2 = 1.41,
278 * 1 = 1.40, 0 = Reserved
279 */
280 if (ufshci_uic_send_dme_get(ctrlr, PA_LocalVerInfo,
281 &ctrlr->unipro_version))
282 return (ENXIO);
283 if (ufshci_uic_send_dme_get(ctrlr, PA_RemoteVerInfo,
284 &ctrlr->ufs_dev.unipro_version))
285 return (ENXIO);
286
287 /*
288 * PA_Granularity: Granularity for PA_TActivate and PA_Hibern8Time
289 * - 1=1us, 2=4us, 3=8us, 4=16us, 5=32us, 6=100us
290 */
291 if (ufshci_uic_send_dme_get(ctrlr, PA_Granularity, &pa_granularity))
292 return (ENXIO);
293 if (ufshci_uic_send_dme_peer_get(ctrlr, PA_Granularity,
294 &peer_pa_granularity))
295 return (ENXIO);
296
297 /*
298 * PA_TActivate: Time to wait before activating a burst in order to
299 * wake-up peer M-RX
300 * UniPro automatically sets timing information such as PA_TActivate
301 * through the PACP_CAP_EXT1_ind command during Link Startup operation.
302 */
303 if (ufshci_uic_send_dme_get(ctrlr, PA_TActivate, &t_activate))
304 return (ENXIO);
305 if (ufshci_uic_send_dme_peer_get(ctrlr, PA_TActivate, &pear_t_activate))
306 return (ENXIO);
307
308 if (ctrlr->quirks & UFSHCI_QUIRK_LONG_PEER_PA_TACTIVATE) {
309 /*
310 * Intel Lake-field UFSHCI has a quirk. We need to add 200us to
311 * the PEER's PA_TActivate.
312 */
313 if (pa_granularity == peer_pa_granularity) {
314 pear_t_activate = t_activate + 2;
315 if (ufshci_uic_send_dme_peer_set(ctrlr, PA_TActivate,
316 pear_t_activate))
317 return (ENXIO);
318 }
319 }
320
321 return (0);
322 }
323
324 int
ufshci_dev_init_uic_power_mode(struct ufshci_controller * ctrlr)325 ufshci_dev_init_uic_power_mode(struct ufshci_controller *ctrlr)
326 {
327 /* HSSerise: A = 1, B = 2 */
328 const uint32_t hs_series = 2;
329 /*
330 * TX/RX PWRMode:
331 * - TX[3:0], RX[7:4]
332 * - Fast Mode = 1, Slow Mode = 2, FastAuto Mode = 4, SlowAuto Mode = 5
333 */
334 const uint32_t fast_mode = 1;
335 const uint32_t rx_bit_shift = 4;
336 uint32_t power_mode, peer_granularity;
337
338 /* Update lanes with available TX/RX lanes */
339 if (ufshci_uic_send_dme_get(ctrlr, PA_AvailTxDataLanes,
340 &ctrlr->max_tx_lanes))
341 return (ENXIO);
342 if (ufshci_uic_send_dme_get(ctrlr, PA_AvailRxDataLanes,
343 &ctrlr->max_rx_lanes))
344 return (ENXIO);
345
346 /* Get max HS-GEAR value */
347 if (ufshci_uic_send_dme_get(ctrlr, PA_MaxRxHSGear,
348 &ctrlr->max_rx_hs_gear))
349 return (ENXIO);
350
351 /* Set the data lane to max */
352 ctrlr->tx_lanes = ctrlr->max_tx_lanes;
353 ctrlr->rx_lanes = ctrlr->max_rx_lanes;
354 if (ufshci_uic_send_dme_set(ctrlr, PA_ActiveTxDataLanes,
355 ctrlr->tx_lanes))
356 return (ENXIO);
357 if (ufshci_uic_send_dme_set(ctrlr, PA_ActiveRxDataLanes,
358 ctrlr->rx_lanes))
359 return (ENXIO);
360
361 if (ctrlr->quirks & UFSHCI_QUIRK_CHANGE_LANE_AND_GEAR_SEPARATELY) {
362 /* Before changing gears, first change the number of lanes. */
363 if (ufshci_uic_send_dme_get(ctrlr, PA_PWRMode, &power_mode))
364 return (ENXIO);
365 if (ufshci_uic_send_dme_set(ctrlr, PA_PWRMode, power_mode))
366 return (ENXIO);
367
368 /* Wait for power mode changed. */
369 if (ufshci_uic_power_mode_ready(ctrlr)) {
370 ufshci_reg_dump(ctrlr);
371 return (ENXIO);
372 }
373 }
374
375 /* Set HS-GEAR to max gear */
376 ctrlr->hs_gear = ctrlr->max_rx_hs_gear;
377 if (ufshci_uic_send_dme_set(ctrlr, PA_TxGear, ctrlr->hs_gear))
378 return (ENXIO);
379 if (ufshci_uic_send_dme_set(ctrlr, PA_RxGear, ctrlr->hs_gear))
380 return (ENXIO);
381
382 /*
383 * Set termination
384 * - HS-MODE = ON / LS-MODE = OFF
385 */
386 if (ufshci_uic_send_dme_set(ctrlr, PA_TxTermination, true))
387 return (ENXIO);
388 if (ufshci_uic_send_dme_set(ctrlr, PA_RxTermination, true))
389 return (ENXIO);
390
391 /* Set HSSerise (A = 1, B = 2) */
392 if (ufshci_uic_send_dme_set(ctrlr, PA_HSSeries, hs_series))
393 return (ENXIO);
394
395 /* Set Timeout values */
396 if (ufshci_uic_send_dme_set(ctrlr, PA_PWRModeUserData0,
397 DL_FC0ProtectionTimeOutVal_Default))
398 return (ENXIO);
399 if (ufshci_uic_send_dme_set(ctrlr, PA_PWRModeUserData1,
400 DL_TC0ReplayTimeOutVal_Default))
401 return (ENXIO);
402 if (ufshci_uic_send_dme_set(ctrlr, PA_PWRModeUserData2,
403 DL_AFC0ReqTimeOutVal_Default))
404 return (ENXIO);
405 if (ufshci_uic_send_dme_set(ctrlr, PA_PWRModeUserData3,
406 DL_FC0ProtectionTimeOutVal_Default))
407 return (ENXIO);
408 if (ufshci_uic_send_dme_set(ctrlr, PA_PWRModeUserData4,
409 DL_TC0ReplayTimeOutVal_Default))
410 return (ENXIO);
411 if (ufshci_uic_send_dme_set(ctrlr, PA_PWRModeUserData5,
412 DL_AFC0ReqTimeOutVal_Default))
413 return (ENXIO);
414
415 if (ufshci_uic_send_dme_set(ctrlr, DME_LocalFC0ProtectionTimeOutVal,
416 DL_FC0ProtectionTimeOutVal_Default))
417 return (ENXIO);
418 if (ufshci_uic_send_dme_set(ctrlr, DME_LocalTC0ReplayTimeOutVal,
419 DL_TC0ReplayTimeOutVal_Default))
420 return (ENXIO);
421 if (ufshci_uic_send_dme_set(ctrlr, DME_LocalAFC0ReqTimeOutVal,
422 DL_AFC0ReqTimeOutVal_Default))
423 return (ENXIO);
424
425 /* Set TX/RX PWRMode */
426 power_mode = (fast_mode << rx_bit_shift) | fast_mode;
427 if (ufshci_uic_send_dme_set(ctrlr, PA_PWRMode, power_mode))
428 return (ENXIO);
429
430 /* Wait for power mode changed. */
431 if (ufshci_uic_power_mode_ready(ctrlr)) {
432 ufshci_reg_dump(ctrlr);
433 return (ENXIO);
434 }
435
436 if (ctrlr->quirks & UFSHCI_QUIRK_WAIT_AFTER_POWER_MODE_CHANGE) {
437 /*
438 * Intel Lake-field UFSHCI has a quirk.
439 * We need to wait 1250us and clear dme error.
440 */
441 pause_sbt("ufshci", ustosbt(1250), 0, C_PREL(1));
442
443 /* Test with dme_peer_get to make sure there are no errors. */
444 if (ufshci_uic_send_dme_peer_get(ctrlr, PA_Granularity,
445 &peer_granularity))
446 return (ENXIO);
447 }
448
449 return (0);
450 }
451
452 void
ufshci_dev_enable_auto_hibernate(struct ufshci_controller * ctrlr)453 ufshci_dev_enable_auto_hibernate(struct ufshci_controller *ctrlr)
454 {
455 if (!ctrlr->ufs_dev.auto_hibernation_supported)
456 return;
457
458 ufshci_mmio_write_4(ctrlr, ahit, ctrlr->ufs_dev.ahit);
459 }
460
461 void
ufshci_dev_init_auto_hibernate(struct ufshci_controller * ctrlr)462 ufshci_dev_init_auto_hibernate(struct ufshci_controller *ctrlr)
463 {
464 ctrlr->ufs_dev.auto_hibernation_supported =
465 UFSHCIV(UFSHCI_CAP_REG_AUTOH8, ctrlr->cap) &&
466 !(ctrlr->quirks & UFSHCI_QUIRK_BROKEN_AUTO_HIBERNATE);
467
468 if (!ctrlr->ufs_dev.auto_hibernation_supported)
469 return;
470
471 /* The default value for auto hibernation is 150 ms */
472 ctrlr->ufs_dev.ahit = 0;
473 ctrlr->ufs_dev.ahit |= UFSHCIF(UFSHCI_AHIT_REG_AH8ITV, 150);
474 ctrlr->ufs_dev.ahit |= UFSHCIF(UFSHCI_AHIT_REG_TS, 3);
475
476 ufshci_dev_enable_auto_hibernate(ctrlr);
477 }
478
479 void
ufshci_dev_init_uic_link_state(struct ufshci_controller * ctrlr)480 ufshci_dev_init_uic_link_state(struct ufshci_controller *ctrlr)
481 {
482 ctrlr->ufs_dev.link_state = UFSHCI_UIC_LINK_STATE_ACTIVE;
483 }
484
485 int
ufshci_dev_init_ufs_power_mode(struct ufshci_controller * ctrlr)486 ufshci_dev_init_ufs_power_mode(struct ufshci_controller *ctrlr)
487 {
488 ctrlr->ufs_dev.power_mode_supported = false;
489
490 if (ctrlr->quirks & UFSHCI_QUIRK_SKIP_WELL_KNOWN_LUNS)
491 return (0);
492
493 ctrlr->ufs_device_wlun_periph = ufshci_sim_find_periph(ctrlr,
494 UFSHCI_WLUN_UFS_DEVICE);
495 if (ctrlr->ufs_device_wlun_periph == NULL) {
496 ufshci_printf(ctrlr,
497 "Well-known LUN `UFS Device (0x50)` not found\n");
498 return (0);
499 }
500
501 ctrlr->ufs_dev.power_mode_supported = true;
502 ctrlr->ufs_dev.power_mode = UFSHCI_DEV_PWR_ACTIVE;
503
504 return (0);
505 }
506
507 int
ufshci_dev_get_descriptor(struct ufshci_controller * ctrlr)508 ufshci_dev_get_descriptor(struct ufshci_controller *ctrlr)
509 {
510 struct ufshci_device *device = &ctrlr->ufs_dev;
511 /*
512 * The kDeviceDensityUnit is defined in the spec as 512.
513 * qTotalRawDeviceCapacity use big-endian byte ordering.
514 */
515 const uint32_t device_density_unit = 512;
516 uint32_t ver;
517 int error;
518
519 error = ufshci_dev_read_device_descriptor(ctrlr, &device->dev_desc);
520 if (error)
521 return (error);
522
523 ver = be16toh(device->dev_desc.wSpecVersion);
524 ufshci_printf(ctrlr, "UFS device spec version %u.%u.%u\n",
525 UFSHCIV(UFSHCI_VER_REG_MJR, ver), UFSHCIV(UFSHCI_VER_REG_MNR, ver),
526 UFSHCIV(UFSHCI_VER_REG_VS, ver));
527 ufshci_printf(ctrlr, "%u enabled LUNs found\n",
528 device->dev_desc.bNumberLU);
529
530 error = ufshci_dev_read_geometry_descriptor(ctrlr, &device->geo_desc);
531 if (error)
532 return (error);
533
534 if (device->geo_desc.bMaxNumberLU == 0) {
535 device->max_lun_count = 8;
536 } else if (device->geo_desc.bMaxNumberLU == 1) {
537 device->max_lun_count = 32;
538 } else {
539 ufshci_printf(ctrlr,
540 "Invalid Geometry Descriptor bMaxNumberLU value=%d\n",
541 device->geo_desc.bMaxNumberLU);
542 return (ENXIO);
543 }
544 ctrlr->max_lun_count = device->max_lun_count;
545
546 ufshci_printf(ctrlr, "UFS device total size is %lu bytes\n",
547 be64toh(device->geo_desc.qTotalRawDeviceCapacity) *
548 device_density_unit);
549
550 return (0);
551 }
552
553 static int
ufshci_dev_enable_write_booster(struct ufshci_controller * ctrlr)554 ufshci_dev_enable_write_booster(struct ufshci_controller *ctrlr)
555 {
556 struct ufshci_device *dev = &ctrlr->ufs_dev;
557 int error;
558
559 /* Enable WriteBooster */
560 error = ufshci_dev_set_flag(ctrlr, UFSHCI_FLAG_F_WRITE_BOOSTER_EN);
561 if (error) {
562 ufshci_printf(ctrlr, "Failed to enable WriteBooster\n");
563 return (error);
564 }
565 dev->is_wb_enabled = true;
566
567 /* Enable WriteBooster buffer flush during hibernate */
568 error = ufshci_dev_set_flag(ctrlr,
569 UFSHCI_FLAG_F_WB_BUFFER_FLUSH_DURING_HIBERNATE);
570 if (error) {
571 ufshci_printf(ctrlr,
572 "Failed to enable WriteBooster buffer flush during hibernate\n");
573 return (error);
574 }
575
576 /* Enable WriteBooster buffer flush */
577 error = ufshci_dev_set_flag(ctrlr, UFSHCI_FLAG_F_WB_BUFFER_FLUSH_EN);
578 if (error) {
579 ufshci_printf(ctrlr,
580 "Failed to enable WriteBooster buffer flush\n");
581 return (error);
582 }
583 dev->is_wb_flush_enabled = true;
584
585 return (0);
586 }
587
588 static int
ufshci_dev_disable_write_booster(struct ufshci_controller * ctrlr)589 ufshci_dev_disable_write_booster(struct ufshci_controller *ctrlr)
590 {
591 struct ufshci_device *dev = &ctrlr->ufs_dev;
592 int error;
593
594 /* Disable WriteBooster buffer flush */
595 error = ufshci_dev_clear_flag(ctrlr, UFSHCI_FLAG_F_WB_BUFFER_FLUSH_EN);
596 if (error) {
597 ufshci_printf(ctrlr,
598 "Failed to disable WriteBooster buffer flush\n");
599 return (error);
600 }
601 dev->is_wb_flush_enabled = false;
602
603 /* Disable WriteBooster buffer flush during hibernate */
604 error = ufshci_dev_clear_flag(ctrlr,
605 UFSHCI_FLAG_F_WB_BUFFER_FLUSH_DURING_HIBERNATE);
606 if (error) {
607 ufshci_printf(ctrlr,
608 "Failed to disable WriteBooster buffer flush during hibernate\n");
609 return (error);
610 }
611
612 /* Disable WriteBooster */
613 error = ufshci_dev_clear_flag(ctrlr, UFSHCI_FLAG_F_WRITE_BOOSTER_EN);
614 if (error) {
615 ufshci_printf(ctrlr, "Failed to disable WriteBooster\n");
616 return (error);
617 }
618 dev->is_wb_enabled = false;
619
620 return (0);
621 }
622
623 static int
ufshci_dev_is_write_booster_buffer_life_time_left(struct ufshci_controller * ctrlr,bool * is_life_time_left)624 ufshci_dev_is_write_booster_buffer_life_time_left(
625 struct ufshci_controller *ctrlr, bool *is_life_time_left)
626 {
627 struct ufshci_device *dev = &ctrlr->ufs_dev;
628 uint8_t buffer_lun;
629 uint64_t life_time;
630 uint32_t error;
631
632 if (dev->wb_buffer_type == UFSHCI_DESC_WB_BUF_TYPE_LU_DEDICATED)
633 buffer_lun = dev->wb_dedicated_lu;
634 else
635 buffer_lun = 0;
636
637 error = ufshci_dev_read_attribute(ctrlr,
638 UFSHCI_ATTR_B_WB_BUFFER_LIFE_TIME_EST, buffer_lun, 0, &life_time);
639 if (error)
640 return (error);
641
642 *is_life_time_left = (life_time != UFSHCI_ATTR_WB_LIFE_EXCEEDED);
643
644 return (0);
645 }
646
647 /*
648 * This function is not yet in use. It will be used when suspend/resume is
649 * implemented.
650 */
651 static __unused int
ufshci_dev_need_write_booster_buffer_flush(struct ufshci_controller * ctrlr,bool * need_flush)652 ufshci_dev_need_write_booster_buffer_flush(struct ufshci_controller *ctrlr,
653 bool *need_flush)
654 {
655 struct ufshci_device *dev = &ctrlr->ufs_dev;
656 bool is_life_time_left = false;
657 uint64_t available_buffer_size, current_buffer_size;
658 uint8_t buffer_lun;
659 uint32_t error;
660
661 *need_flush = false;
662
663 if (!dev->is_wb_enabled)
664 return (0);
665
666 error = ufshci_dev_is_write_booster_buffer_life_time_left(ctrlr,
667 &is_life_time_left);
668 if (error)
669 return (error);
670
671 if (!is_life_time_left)
672 return (ufshci_dev_disable_write_booster(ctrlr));
673
674 if (dev->wb_buffer_type == UFSHCI_DESC_WB_BUF_TYPE_LU_DEDICATED)
675 buffer_lun = dev->wb_dedicated_lu;
676 else
677 buffer_lun = 0;
678
679 error = ufshci_dev_read_attribute(ctrlr,
680 UFSHCI_ATTR_B_AVAILABLE_WB_BUFFER_SIZE, buffer_lun, 0,
681 &available_buffer_size);
682 if (error)
683 return (error);
684
685 switch (dev->wb_user_space_config_option) {
686 case UFSHCI_DESC_WB_BUF_USER_SPACE_REDUCTION:
687 *need_flush = (available_buffer_size <=
688 UFSHCI_ATTR_WB_AVAILABLE_10);
689 break;
690 case UFSHCI_DESC_WB_BUF_PRESERVE_USER_SPACE:
691 /*
692 * In PRESERVE USER SPACE mode, flush should be performed when
693 * the current buffer is greater than 0 and the available buffer
694 * below write_booster_flush_threshold is left.
695 */
696 error = ufshci_dev_read_attribute(ctrlr,
697 UFSHCI_ATTR_D_CURRENT_WB_BUFFER_SIZE, buffer_lun, 0,
698 ¤t_buffer_size);
699 if (error)
700 return (error);
701
702 if (current_buffer_size == 0)
703 return (0);
704
705 *need_flush = (available_buffer_size <
706 dev->write_booster_flush_threshold);
707 break;
708 default:
709 ufshci_printf(ctrlr,
710 "Invalid bWriteBoosterBufferPreserveUserSpaceEn value");
711 return (EINVAL);
712 }
713
714 /*
715 * TODO: Need to handle WRITEBOOSTER_FLUSH_NEEDED exception case from
716 * wExceptionEventStatus attribute.
717 */
718
719 return (0);
720 }
721
722 int
ufshci_dev_config_write_booster(struct ufshci_controller * ctrlr)723 ufshci_dev_config_write_booster(struct ufshci_controller *ctrlr)
724 {
725 struct ufshci_device *dev = &ctrlr->ufs_dev;
726 uint32_t extended_ufs_feature_support;
727 uint32_t alloc_units;
728 struct ufshci_unit_descriptor unit_desc;
729 uint8_t lun;
730 bool is_life_time_left;
731 uint32_t mega_byte = 1024 * 1024;
732 uint32_t error = 0;
733
734 extended_ufs_feature_support = be32toh(
735 dev->dev_desc.dExtendedUfsFeaturesSupport);
736 if (!(extended_ufs_feature_support &
737 UFSHCI_DESC_EXT_UFS_FEATURE_WRITE_BOOSTER)) {
738 /* This device does not support Write Booster */
739 return (0);
740 }
741
742 if (ufshci_dev_enable_write_booster(ctrlr))
743 return (0);
744
745 /* Get WriteBooster buffer parameters */
746 dev->wb_buffer_type = dev->dev_desc.bWriteBoosterBufferType;
747 dev->wb_user_space_config_option =
748 dev->dev_desc.bWriteBoosterBufferPreserveUserSpaceEn;
749
750 /*
751 * Find the size of the write buffer.
752 * With LU-dedicated (00h), the WriteBooster buffer is assigned
753 * exclusively to one chosen LU (not one-per-LU), whereas Shared (01h)
754 * uses a single device-wide buffer shared by multiple LUs.
755 */
756 if (dev->wb_buffer_type == UFSHCI_DESC_WB_BUF_TYPE_SINGLE_SHARED) {
757 alloc_units = be32toh(
758 dev->dev_desc.dNumSharedWriteBoosterBufferAllocUnits);
759 ufshci_printf(ctrlr,
760 "WriteBooster buffer type = Shared, alloc_units=%d\n",
761 alloc_units);
762 } else if (dev->wb_buffer_type ==
763 UFSHCI_DESC_WB_BUF_TYPE_LU_DEDICATED) {
764 ufshci_printf(ctrlr, "WriteBooster buffer type = Dedicated\n");
765 for (lun = 0; lun < ctrlr->max_lun_count; lun++) {
766 /* Find a dedicated buffer using a unit descriptor */
767 if (ufshci_dev_read_unit_descriptor(ctrlr, lun,
768 &unit_desc))
769 continue;
770
771 alloc_units = be32toh(
772 unit_desc.dLUNumWriteBoosterBufferAllocUnits);
773 if (alloc_units) {
774 dev->wb_dedicated_lu = lun;
775 break;
776 }
777 }
778 } else {
779 ufshci_printf(ctrlr,
780 "Not supported WriteBooster buffer type: 0x%x\n",
781 dev->wb_buffer_type);
782 goto out;
783 }
784
785 if (alloc_units == 0) {
786 ufshci_printf(ctrlr, "The WriteBooster buffer size is zero\n");
787 goto out;
788 }
789
790 dev->wb_buffer_size_mb = alloc_units *
791 dev->geo_desc.bAllocationUnitSize *
792 (be32toh(dev->geo_desc.dSegmentSize)) /
793 (mega_byte / UFSHCI_SECTOR_SIZE);
794
795 /* Set to flush when 40% of the available buffer size remains */
796 dev->write_booster_flush_threshold = UFSHCI_ATTR_WB_AVAILABLE_40;
797
798 /*
799 * Check if WriteBooster Buffer lifetime is left.
800 * WriteBooster Buffer lifetime — percent of life used based on P/E
801 * cycles. If "preserve user space" is enabled, writes to normal user
802 * space also consume WB life since the area is shared.
803 */
804 error = ufshci_dev_is_write_booster_buffer_life_time_left(ctrlr,
805 &is_life_time_left);
806 if (error)
807 goto out;
808
809 if (!is_life_time_left) {
810 ufshci_printf(ctrlr,
811 "There is no WriteBooster buffer life time left.\n");
812 goto out;
813 }
814
815 ufshci_printf(ctrlr, "WriteBooster Enabled\n");
816 return (0);
817 out:
818 ufshci_dev_disable_write_booster(ctrlr);
819 return (error);
820 }
821
822 int
ufshci_dev_get_current_power_mode(struct ufshci_controller * ctrlr,uint8_t * power_mode)823 ufshci_dev_get_current_power_mode(struct ufshci_controller *ctrlr,
824 uint8_t *power_mode)
825 {
826 uint64_t value;
827 int err;
828
829 err = ufshci_dev_read_attribute(ctrlr, UFSHCI_ATTR_B_CURRENT_POWER_MODE,
830 /*index*/ 0, /*selector*/ 0, &value);
831 if (err)
832 return (err);
833
834 *power_mode = (uint8_t)value;
835
836 return (0);
837 }
838
839 static int
ufshci_dev_hibernate_enter(struct ufshci_controller * ctrlr)840 ufshci_dev_hibernate_enter(struct ufshci_controller *ctrlr)
841 {
842 int error;
843
844 error = ufshci_uic_send_dme_hibernate_enter(ctrlr);
845 if (error)
846 return (error);
847
848 return (ufshci_uic_hibernation_ready(ctrlr));
849 }
850
851 static int
ufshci_dev_hibernate_exit(struct ufshci_controller * ctrlr)852 ufshci_dev_hibernate_exit(struct ufshci_controller *ctrlr)
853 {
854 int error;
855
856 error = ufshci_uic_send_dme_hibernate_exit(ctrlr);
857 if (error)
858 return (error);
859
860 return (ufshci_uic_hibernation_ready(ctrlr));
861 }
862
863 int
ufshci_dev_link_state_transition(struct ufshci_controller * ctrlr,enum ufshci_uic_link_state target_state)864 ufshci_dev_link_state_transition(struct ufshci_controller *ctrlr,
865 enum ufshci_uic_link_state target_state)
866 {
867 struct ufshci_device *dev = &ctrlr->ufs_dev;
868 int error = 0;
869
870 if (dev->link_state == target_state)
871 return (0);
872
873 switch (target_state) {
874 case UFSHCI_UIC_LINK_STATE_OFF:
875 error = ufshci_dev_hibernate_enter(ctrlr);
876 if (error)
877 break;
878 error = ufshci_ctrlr_disable(ctrlr);
879 break;
880 case UFSHCI_UIC_LINK_STATE_ACTIVE:
881 if (dev->link_state == UFSHCI_UIC_LINK_STATE_HIBERNATE)
882 error = ufshci_dev_hibernate_exit(ctrlr);
883 else
884 error = EINVAL;
885 break;
886 case UFSHCI_UIC_LINK_STATE_HIBERNATE:
887 if (dev->link_state == UFSHCI_UIC_LINK_STATE_ACTIVE)
888 error = ufshci_dev_hibernate_enter(ctrlr);
889 else
890 error = EINVAL;
891 break;
892 case UFSHCI_UIC_LINK_STATE_BROKEN:
893 break;
894 default:
895 error = EINVAL;
896 break;
897 }
898
899 if (error)
900 return (error);
901
902 dev->link_state = target_state;
903
904 return (0);
905 }
906