1 /*-
2 * Copyright (c) 2025, Samsung Electronics Co., Ltd.
3 * Written by Jaeyoon Choi
4 *
5 * SPDX-License-Identifier: BSD-2-Clause
6 */
7
8 #include <sys/param.h>
9 #include <sys/bus.h>
10 #include <sys/conf.h>
11
12 #include "ufshci_private.h"
13 #include "ufshci_reg.h"
14
15 static int
ufshci_dev_read_descriptor(struct ufshci_controller * ctrlr,enum ufshci_descriptor_type desc_type,uint8_t index,uint8_t selector,void * desc,size_t desc_size)16 ufshci_dev_read_descriptor(struct ufshci_controller *ctrlr,
17 enum ufshci_descriptor_type desc_type, uint8_t index, uint8_t selector,
18 void *desc, size_t desc_size)
19 {
20 struct ufshci_completion_poll_status status;
21 struct ufshci_query_param param;
22
23 param.function = UFSHCI_QUERY_FUNC_STANDARD_READ_REQUEST;
24 param.opcode = UFSHCI_QUERY_OPCODE_READ_DESCRIPTOR;
25 param.type = desc_type;
26 param.index = index;
27 param.selector = selector;
28 param.value = 0;
29 param.desc_size = desc_size;
30
31 status.done = 0;
32 ufshci_ctrlr_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb,
33 &status, param);
34 ufshci_completion_poll(&status);
35 if (status.error) {
36 ufshci_printf(ctrlr,
37 "Failed to send Read Descriptor query request!\n");
38 return (ENXIO);
39 }
40
41 memcpy(desc, status.cpl.response_upiu.query_response_upiu.command_data,
42 desc_size);
43
44 return (0);
45 }
46
47 static int
ufshci_dev_read_device_descriptor(struct ufshci_controller * ctrlr,struct ufshci_device_descriptor * desc)48 ufshci_dev_read_device_descriptor(struct ufshci_controller *ctrlr,
49 struct ufshci_device_descriptor *desc)
50 {
51 return (ufshci_dev_read_descriptor(ctrlr, UFSHCI_DESC_TYPE_DEVICE, 0, 0,
52 desc, sizeof(struct ufshci_device_descriptor)));
53 }
54
55 static int
ufshci_dev_read_geometry_descriptor(struct ufshci_controller * ctrlr,struct ufshci_geometry_descriptor * desc)56 ufshci_dev_read_geometry_descriptor(struct ufshci_controller *ctrlr,
57 struct ufshci_geometry_descriptor *desc)
58 {
59 return (ufshci_dev_read_descriptor(ctrlr, UFSHCI_DESC_TYPE_GEOMETRY, 0,
60 0, desc, sizeof(struct ufshci_geometry_descriptor)));
61 }
62
63 static int
ufshci_dev_read_unit_descriptor(struct ufshci_controller * ctrlr,uint8_t lun,struct ufshci_unit_descriptor * desc)64 ufshci_dev_read_unit_descriptor(struct ufshci_controller *ctrlr, uint8_t lun,
65 struct ufshci_unit_descriptor *desc)
66 {
67 return (ufshci_dev_read_descriptor(ctrlr, UFSHCI_DESC_TYPE_UNIT, lun, 0,
68 desc, sizeof(struct ufshci_unit_descriptor)));
69 }
70
71 static int
ufshci_dev_read_flag(struct ufshci_controller * ctrlr,enum ufshci_flags flag_type,uint8_t * flag)72 ufshci_dev_read_flag(struct ufshci_controller *ctrlr,
73 enum ufshci_flags flag_type, uint8_t *flag)
74 {
75 struct ufshci_completion_poll_status status;
76 struct ufshci_query_param param;
77
78 param.function = UFSHCI_QUERY_FUNC_STANDARD_READ_REQUEST;
79 param.opcode = UFSHCI_QUERY_OPCODE_READ_FLAG;
80 param.type = flag_type;
81 param.index = 0;
82 param.selector = 0;
83 param.value = 0;
84
85 status.done = 0;
86 ufshci_ctrlr_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb,
87 &status, param);
88 ufshci_completion_poll(&status);
89 if (status.error) {
90 ufshci_printf(ctrlr, "ufshci_dev_read_flag failed!\n");
91 return (ENXIO);
92 }
93
94 *flag = status.cpl.response_upiu.query_response_upiu.flag_value;
95
96 return (0);
97 }
98
99 static int
ufshci_dev_set_flag(struct ufshci_controller * ctrlr,enum ufshci_flags flag_type)100 ufshci_dev_set_flag(struct ufshci_controller *ctrlr,
101 enum ufshci_flags flag_type)
102 {
103 struct ufshci_completion_poll_status status;
104 struct ufshci_query_param param;
105
106 param.function = UFSHCI_QUERY_FUNC_STANDARD_WRITE_REQUEST;
107 param.opcode = UFSHCI_QUERY_OPCODE_SET_FLAG;
108 param.type = flag_type;
109 param.index = 0;
110 param.selector = 0;
111 param.value = 0;
112
113 status.done = 0;
114 ufshci_ctrlr_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb,
115 &status, param);
116 ufshci_completion_poll(&status);
117 if (status.error) {
118 ufshci_printf(ctrlr, "ufshci_dev_set_flag failed!\n");
119 return (ENXIO);
120 }
121
122 return (0);
123 }
124
125 static int
ufshci_dev_clear_flag(struct ufshci_controller * ctrlr,enum ufshci_flags flag_type)126 ufshci_dev_clear_flag(struct ufshci_controller *ctrlr,
127 enum ufshci_flags flag_type)
128 {
129 struct ufshci_completion_poll_status status;
130 struct ufshci_query_param param;
131
132 param.function = UFSHCI_QUERY_FUNC_STANDARD_WRITE_REQUEST;
133 param.opcode = UFSHCI_QUERY_OPCODE_CLEAR_FLAG;
134 param.type = flag_type;
135 param.index = 0;
136 param.selector = 0;
137 param.value = 0;
138
139 status.done = 0;
140 ufshci_ctrlr_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb,
141 &status, param);
142 ufshci_completion_poll(&status);
143 if (status.error) {
144 ufshci_printf(ctrlr, "ufshci_dev_clear_flag failed!\n");
145 return (ENXIO);
146 }
147
148 return (0);
149 }
150
151 static int
ufshci_dev_read_attribute(struct ufshci_controller * ctrlr,enum ufshci_attributes attr_type,uint8_t index,uint8_t selector,uint64_t * value)152 ufshci_dev_read_attribute(struct ufshci_controller *ctrlr,
153 enum ufshci_attributes attr_type, uint8_t index, uint8_t selector,
154 uint64_t *value)
155 {
156 struct ufshci_completion_poll_status status;
157 struct ufshci_query_param param;
158
159 param.function = UFSHCI_QUERY_FUNC_STANDARD_READ_REQUEST;
160 param.opcode = UFSHCI_QUERY_OPCODE_READ_ATTRIBUTE;
161 param.type = attr_type;
162 param.index = index;
163 param.selector = selector;
164 param.value = 0;
165
166 status.done = 0;
167 ufshci_ctrlr_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb,
168 &status, param);
169 ufshci_completion_poll(&status);
170 if (status.error) {
171 ufshci_printf(ctrlr, "ufshci_dev_read_attribute failed!\n");
172 return (ENXIO);
173 }
174
175 *value = status.cpl.response_upiu.query_response_upiu.value_64;
176
177 return (0);
178 }
179
180 static int
ufshci_dev_write_attribute(struct ufshci_controller * ctrlr,enum ufshci_attributes attr_type,uint8_t index,uint8_t selector,uint64_t value)181 ufshci_dev_write_attribute(struct ufshci_controller *ctrlr,
182 enum ufshci_attributes attr_type, uint8_t index, uint8_t selector,
183 uint64_t value)
184 {
185 struct ufshci_completion_poll_status status;
186 struct ufshci_query_param param;
187
188 param.function = UFSHCI_QUERY_FUNC_STANDARD_WRITE_REQUEST;
189 param.opcode = UFSHCI_QUERY_OPCODE_WRITE_ATTRIBUTE;
190 param.type = attr_type;
191 param.index = index;
192 param.selector = selector;
193 param.value = value;
194
195 status.done = 0;
196 ufshci_ctrlr_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb,
197 &status, param);
198 ufshci_completion_poll(&status);
199 if (status.error) {
200 ufshci_printf(ctrlr, "ufshci_dev_write_attribute failed!\n");
201 return (ENXIO);
202 }
203
204 return (0);
205 }
206
207 int
ufshci_dev_init(struct ufshci_controller * ctrlr)208 ufshci_dev_init(struct ufshci_controller *ctrlr)
209 {
210 int timeout = ticks + MSEC_2_TICKS(ctrlr->device_init_timeout_in_ms);
211 sbintime_t delta_t = SBT_1US;
212 uint8_t flag;
213 int error;
214 const uint8_t device_init_completed = 0;
215
216 error = ufshci_dev_set_flag(ctrlr, UFSHCI_FLAG_F_DEVICE_INIT);
217 if (error)
218 return (error);
219
220 /* Wait for the UFSHCI_FLAG_F_DEVICE_INIT flag to change */
221 while (1) {
222 error = ufshci_dev_read_flag(ctrlr, UFSHCI_FLAG_F_DEVICE_INIT,
223 &flag);
224 if (error)
225 return (error);
226 if (flag == device_init_completed)
227 break;
228 if (timeout - ticks < 0) {
229 ufshci_printf(ctrlr,
230 "device init did not become %d "
231 "within %d ms\n",
232 device_init_completed,
233 ctrlr->device_init_timeout_in_ms);
234 return (ENXIO);
235 }
236
237 pause_sbt("ufshciinit", delta_t, 0, C_PREL(1));
238 delta_t = min(SBT_1MS, delta_t * 3 / 2);
239 }
240
241 return (0);
242 }
243
244 int
ufshci_dev_reset(struct ufshci_controller * ctrlr)245 ufshci_dev_reset(struct ufshci_controller *ctrlr)
246 {
247 if (ufshci_uic_send_dme_endpoint_reset(ctrlr))
248 return (ENXIO);
249
250 return (ufshci_dev_init(ctrlr));
251 }
252
253 int
ufshci_dev_init_reference_clock(struct ufshci_controller * ctrlr)254 ufshci_dev_init_reference_clock(struct ufshci_controller *ctrlr)
255 {
256 int error;
257 uint8_t index, selector;
258
259 index = 0; /* bRefClkFreq is device type attribute */
260 selector = 0; /* bRefClkFreq is device type attribute */
261
262 error = ufshci_dev_write_attribute(ctrlr, UFSHCI_ATTR_B_REF_CLK_FREQ,
263 index, selector, ctrlr->ref_clk);
264 if (error)
265 return (error);
266
267 return (0);
268 }
269
270 int
ufshci_dev_init_unipro(struct ufshci_controller * ctrlr)271 ufshci_dev_init_unipro(struct ufshci_controller *ctrlr)
272 {
273 uint32_t pa_granularity, peer_pa_granularity;
274 uint32_t t_activate, pear_t_activate;
275
276 if (ctrlr->quirks & UFSHCI_QUIRK_LONG_PEER_PA_TACTIVATE) {
277 /*
278 * PA_Granularity: Granularity for PA_TActivate and
279 * PA_Hibern8Time
280 * - 1=1us, 2=4us, 3=8us, 4=16us, 5=32us, 6=100us
281 */
282 if (ufshci_uic_send_dme_get(ctrlr, PA_Granularity,
283 &pa_granularity))
284 return (ENXIO);
285 if (ufshci_uic_send_dme_peer_get(ctrlr, PA_Granularity,
286 &peer_pa_granularity))
287 return (ENXIO);
288
289 /*
290 * PA_TActivate: Time to wait before activating a burst in order
291 * to wake-up peer M-RX UniPro automatically sets timing
292 * information such as PA_TActivate through the
293 * PACP_CAP_EXT1_ind command during Link Startup operation.
294 */
295 if (ufshci_uic_send_dme_get(ctrlr, PA_TActivate, &t_activate))
296 return (ENXIO);
297 if (ufshci_uic_send_dme_peer_get(ctrlr, PA_TActivate,
298 &pear_t_activate))
299 return (ENXIO);
300
301 /*
302 * Intel Lake-field UFSHCI has a quirk. We need to add 200us to
303 * the PEER's PA_TActivate.
304 */
305 if (pa_granularity == peer_pa_granularity) {
306 pear_t_activate = t_activate + 2;
307 if (ufshci_uic_send_dme_peer_set(ctrlr, PA_TActivate,
308 pear_t_activate))
309 return (ENXIO);
310 }
311 }
312
313 return (0);
314 }
315
316 int
ufshci_dev_init_uic_power_mode(struct ufshci_controller * ctrlr)317 ufshci_dev_init_uic_power_mode(struct ufshci_controller *ctrlr)
318 {
319 /* HSSerise: A = 1, B = 2 */
320 const uint32_t hs_series = 2;
321 /*
322 * TX/RX PWRMode:
323 * - TX[3:0], RX[7:4]
324 * - Fast Mode = 1, Slow Mode = 2, FastAuto Mode = 4, SlowAuto Mode = 5
325 */
326 const uint32_t fast_mode = 1;
327 const uint32_t rx_bit_shift = 4;
328 uint32_t peer_granularity;
329
330 /* Update lanes with available TX/RX lanes */
331 if (ufshci_uic_send_dme_get(ctrlr, PA_AvailTxDataLanes,
332 &ctrlr->max_tx_lanes))
333 return (ENXIO);
334 if (ufshci_uic_send_dme_get(ctrlr, PA_AvailRxDataLanes,
335 &ctrlr->max_rx_lanes))
336 return (ENXIO);
337
338 /* Get max HS-GEAR value */
339 if (ufshci_uic_send_dme_get(ctrlr, PA_MaxRxHSGear,
340 &ctrlr->max_rx_hs_gear))
341 return (ENXIO);
342
343 /* Set the data lane to max */
344 ctrlr->tx_lanes = ctrlr->max_tx_lanes;
345 ctrlr->rx_lanes = ctrlr->max_rx_lanes;
346 if (ufshci_uic_send_dme_set(ctrlr, PA_ActiveTxDataLanes,
347 ctrlr->tx_lanes))
348 return (ENXIO);
349 if (ufshci_uic_send_dme_set(ctrlr, PA_ActiveRxDataLanes,
350 ctrlr->rx_lanes))
351 return (ENXIO);
352
353 if (ctrlr->quirks & UFSHCI_QUIRK_CHANGE_LANE_AND_GEAR_SEPARATELY) {
354 /* Before changing gears, first change the number of lanes. */
355 if (ufshci_uic_send_dme_get(ctrlr, PA_PWRMode,
356 &ctrlr->tx_rx_power_mode))
357 return (ENXIO);
358 if (ufshci_uic_send_dme_set(ctrlr, PA_PWRMode,
359 ctrlr->tx_rx_power_mode))
360 return (ENXIO);
361
362 /* Wait for power mode changed. */
363 if (ufshci_uic_power_mode_ready(ctrlr)) {
364 ufshci_reg_dump(ctrlr);
365 return (ENXIO);
366 }
367 }
368
369 /* Set HS-GEAR to max gear */
370 ctrlr->hs_gear = ctrlr->max_rx_hs_gear;
371 if (ufshci_uic_send_dme_set(ctrlr, PA_TxGear, ctrlr->hs_gear))
372 return (ENXIO);
373 if (ufshci_uic_send_dme_set(ctrlr, PA_RxGear, ctrlr->hs_gear))
374 return (ENXIO);
375
376 /*
377 * Set termination
378 * - HS-MODE = ON / LS-MODE = OFF
379 */
380 if (ufshci_uic_send_dme_set(ctrlr, PA_TxTermination, true))
381 return (ENXIO);
382 if (ufshci_uic_send_dme_set(ctrlr, PA_RxTermination, true))
383 return (ENXIO);
384
385 /* Set HSSerise (A = 1, B = 2) */
386 if (ufshci_uic_send_dme_set(ctrlr, PA_HSSeries, hs_series))
387 return (ENXIO);
388
389 /* Set Timeout values */
390 if (ufshci_uic_send_dme_set(ctrlr, PA_PWRModeUserData0,
391 DL_FC0ProtectionTimeOutVal_Default))
392 return (ENXIO);
393 if (ufshci_uic_send_dme_set(ctrlr, PA_PWRModeUserData1,
394 DL_TC0ReplayTimeOutVal_Default))
395 return (ENXIO);
396 if (ufshci_uic_send_dme_set(ctrlr, PA_PWRModeUserData2,
397 DL_AFC0ReqTimeOutVal_Default))
398 return (ENXIO);
399 if (ufshci_uic_send_dme_set(ctrlr, PA_PWRModeUserData3,
400 DL_FC0ProtectionTimeOutVal_Default))
401 return (ENXIO);
402 if (ufshci_uic_send_dme_set(ctrlr, PA_PWRModeUserData4,
403 DL_TC0ReplayTimeOutVal_Default))
404 return (ENXIO);
405 if (ufshci_uic_send_dme_set(ctrlr, PA_PWRModeUserData5,
406 DL_AFC0ReqTimeOutVal_Default))
407 return (ENXIO);
408
409 if (ufshci_uic_send_dme_set(ctrlr, DME_LocalFC0ProtectionTimeOutVal,
410 DL_FC0ProtectionTimeOutVal_Default))
411 return (ENXIO);
412 if (ufshci_uic_send_dme_set(ctrlr, DME_LocalTC0ReplayTimeOutVal,
413 DL_TC0ReplayTimeOutVal_Default))
414 return (ENXIO);
415 if (ufshci_uic_send_dme_set(ctrlr, DME_LocalAFC0ReqTimeOutVal,
416 DL_AFC0ReqTimeOutVal_Default))
417 return (ENXIO);
418
419 /* Set TX/RX PWRMode */
420 ctrlr->tx_rx_power_mode = (fast_mode << rx_bit_shift) | fast_mode;
421 if (ufshci_uic_send_dme_set(ctrlr, PA_PWRMode, ctrlr->tx_rx_power_mode))
422 return (ENXIO);
423
424 /* Wait for power mode changed. */
425 if (ufshci_uic_power_mode_ready(ctrlr)) {
426 ufshci_reg_dump(ctrlr);
427 return (ENXIO);
428 }
429
430 if (ctrlr->quirks & UFSHCI_QUIRK_WAIT_AFTER_POWER_MODE_CHANGE) {
431 /*
432 * Intel Lake-field UFSHCI has a quirk.
433 * We need to wait 1250us and clear dme error.
434 */
435 pause_sbt("ufshci", ustosbt(1250), 0, C_PREL(1));
436
437 /* Test with dme_peer_get to make sure there are no errors. */
438 if (ufshci_uic_send_dme_peer_get(ctrlr, PA_Granularity,
439 &peer_granularity))
440 return (ENXIO);
441 }
442
443 return (0);
444 }
445
446 void
ufshci_dev_enable_auto_hibernate(struct ufshci_controller * ctrlr)447 ufshci_dev_enable_auto_hibernate(struct ufshci_controller *ctrlr)
448 {
449 if (!ctrlr->ufs_dev.auto_hibernation_supported)
450 return;
451
452 ufshci_mmio_write_4(ctrlr, ahit, ctrlr->ufs_dev.ahit);
453 }
454
455 void
ufshci_dev_init_auto_hibernate(struct ufshci_controller * ctrlr)456 ufshci_dev_init_auto_hibernate(struct ufshci_controller *ctrlr)
457 {
458 ctrlr->ufs_dev.auto_hibernation_supported =
459 UFSHCIV(UFSHCI_CAP_REG_AUTOH8, ctrlr->cap) &&
460 !(ctrlr->quirks & UFSHCI_QUIRK_BROKEN_AUTO_HIBERNATE);
461
462 if (!ctrlr->ufs_dev.auto_hibernation_supported)
463 return;
464
465 /* The default value for auto hibernation is 150 ms */
466 ctrlr->ufs_dev.ahit = 0;
467 ctrlr->ufs_dev.ahit |= UFSHCIF(UFSHCI_AHIT_REG_AH8ITV, 150);
468 ctrlr->ufs_dev.ahit |= UFSHCIF(UFSHCI_AHIT_REG_TS, 3);
469
470 ufshci_dev_enable_auto_hibernate(ctrlr);
471 }
472
473 void
ufshci_dev_init_uic_link_state(struct ufshci_controller * ctrlr)474 ufshci_dev_init_uic_link_state(struct ufshci_controller *ctrlr)
475 {
476 ctrlr->ufs_dev.link_state = UFSHCI_UIC_LINK_STATE_ACTIVE;
477 }
478
479 int
ufshci_dev_init_ufs_power_mode(struct ufshci_controller * ctrlr)480 ufshci_dev_init_ufs_power_mode(struct ufshci_controller *ctrlr)
481 {
482 ctrlr->ufs_dev.power_mode_supported = false;
483
484 if (ctrlr->quirks & UFSHCI_QUIRK_SKIP_WELL_KNOWN_LUNS)
485 return (0);
486
487 ctrlr->ufs_device_wlun_periph = ufshci_sim_find_periph(ctrlr,
488 UFSHCI_WLUN_UFS_DEVICE);
489 if (ctrlr->ufs_device_wlun_periph == NULL) {
490 ufshci_printf(ctrlr,
491 "Well-known LUN `UFS Device (0x50)` not found\n");
492 return (0);
493 }
494
495 ctrlr->ufs_dev.power_mode_supported = true;
496 ctrlr->ufs_dev.power_mode = UFSHCI_DEV_PWR_ACTIVE;
497
498 return (0);
499 }
500
501 int
ufshci_dev_get_descriptor(struct ufshci_controller * ctrlr)502 ufshci_dev_get_descriptor(struct ufshci_controller *ctrlr)
503 {
504 struct ufshci_device *device = &ctrlr->ufs_dev;
505 /*
506 * The kDeviceDensityUnit is defined in the spec as 512.
507 * qTotalRawDeviceCapacity use big-endian byte ordering.
508 */
509 const uint32_t device_density_unit = 512;
510 uint32_t ver;
511 int error;
512
513 error = ufshci_dev_read_device_descriptor(ctrlr, &device->dev_desc);
514 if (error)
515 return (error);
516
517 ver = be16toh(device->dev_desc.wSpecVersion);
518 ufshci_printf(ctrlr, "UFS device spec version %u.%u.%u\n",
519 UFSHCIV(UFSHCI_VER_REG_MJR, ver), UFSHCIV(UFSHCI_VER_REG_MNR, ver),
520 UFSHCIV(UFSHCI_VER_REG_VS, ver));
521 ufshci_printf(ctrlr, "%u enabled LUNs found\n",
522 device->dev_desc.bNumberLU);
523
524 error = ufshci_dev_read_geometry_descriptor(ctrlr, &device->geo_desc);
525 if (error)
526 return (error);
527
528 if (device->geo_desc.bMaxNumberLU == 0) {
529 device->max_lun_count = 8;
530 } else if (device->geo_desc.bMaxNumberLU == 1) {
531 device->max_lun_count = 32;
532 } else {
533 ufshci_printf(ctrlr,
534 "Invalid Geometry Descriptor bMaxNumberLU value=%d\n",
535 device->geo_desc.bMaxNumberLU);
536 return (ENXIO);
537 }
538 ctrlr->max_lun_count = device->max_lun_count;
539
540 ufshci_printf(ctrlr, "UFS device total size is %lu bytes\n",
541 be64toh(device->geo_desc.qTotalRawDeviceCapacity) *
542 device_density_unit);
543
544 return (0);
545 }
546
547 static int
ufshci_dev_enable_write_booster(struct ufshci_controller * ctrlr)548 ufshci_dev_enable_write_booster(struct ufshci_controller *ctrlr)
549 {
550 struct ufshci_device *dev = &ctrlr->ufs_dev;
551 int error;
552
553 /* Enable WriteBooster */
554 error = ufshci_dev_set_flag(ctrlr, UFSHCI_FLAG_F_WRITE_BOOSTER_EN);
555 if (error) {
556 ufshci_printf(ctrlr, "Failed to enable WriteBooster\n");
557 return (error);
558 }
559 dev->is_wb_enabled = true;
560
561 /* Enable WriteBooster buffer flush during hibernate */
562 error = ufshci_dev_set_flag(ctrlr,
563 UFSHCI_FLAG_F_WB_BUFFER_FLUSH_DURING_HIBERNATE);
564 if (error) {
565 ufshci_printf(ctrlr,
566 "Failed to enable WriteBooster buffer flush during hibernate\n");
567 return (error);
568 }
569
570 /* Enable WriteBooster buffer flush */
571 error = ufshci_dev_set_flag(ctrlr, UFSHCI_FLAG_F_WB_BUFFER_FLUSH_EN);
572 if (error) {
573 ufshci_printf(ctrlr,
574 "Failed to enable WriteBooster buffer flush\n");
575 return (error);
576 }
577 dev->is_wb_flush_enabled = true;
578
579 return (0);
580 }
581
582 static int
ufshci_dev_disable_write_booster(struct ufshci_controller * ctrlr)583 ufshci_dev_disable_write_booster(struct ufshci_controller *ctrlr)
584 {
585 struct ufshci_device *dev = &ctrlr->ufs_dev;
586 int error;
587
588 /* Disable WriteBooster buffer flush */
589 error = ufshci_dev_clear_flag(ctrlr, UFSHCI_FLAG_F_WB_BUFFER_FLUSH_EN);
590 if (error) {
591 ufshci_printf(ctrlr,
592 "Failed to disable WriteBooster buffer flush\n");
593 return (error);
594 }
595 dev->is_wb_flush_enabled = false;
596
597 /* Disable WriteBooster buffer flush during hibernate */
598 error = ufshci_dev_clear_flag(ctrlr,
599 UFSHCI_FLAG_F_WB_BUFFER_FLUSH_DURING_HIBERNATE);
600 if (error) {
601 ufshci_printf(ctrlr,
602 "Failed to disable WriteBooster buffer flush during hibernate\n");
603 return (error);
604 }
605
606 /* Disable WriteBooster */
607 error = ufshci_dev_clear_flag(ctrlr, UFSHCI_FLAG_F_WRITE_BOOSTER_EN);
608 if (error) {
609 ufshci_printf(ctrlr, "Failed to disable WriteBooster\n");
610 return (error);
611 }
612 dev->is_wb_enabled = false;
613
614 return (0);
615 }
616
617 static int
ufshci_dev_is_write_booster_buffer_life_time_left(struct ufshci_controller * ctrlr,bool * is_life_time_left)618 ufshci_dev_is_write_booster_buffer_life_time_left(
619 struct ufshci_controller *ctrlr, bool *is_life_time_left)
620 {
621 struct ufshci_device *dev = &ctrlr->ufs_dev;
622 uint8_t buffer_lun;
623 uint64_t life_time;
624 uint32_t error;
625
626 if (dev->wb_buffer_type == UFSHCI_DESC_WB_BUF_TYPE_LU_DEDICATED)
627 buffer_lun = dev->wb_dedicated_lu;
628 else
629 buffer_lun = 0;
630
631 error = ufshci_dev_read_attribute(ctrlr,
632 UFSHCI_ATTR_B_WB_BUFFER_LIFE_TIME_EST, buffer_lun, 0, &life_time);
633 if (error)
634 return (error);
635
636 *is_life_time_left = (life_time != UFSHCI_ATTR_WB_LIFE_EXCEEDED);
637
638 return (0);
639 }
640
641 /*
642 * This function is not yet in use. It will be used when suspend/resume is
643 * implemented.
644 */
645 static __unused int
ufshci_dev_need_write_booster_buffer_flush(struct ufshci_controller * ctrlr,bool * need_flush)646 ufshci_dev_need_write_booster_buffer_flush(struct ufshci_controller *ctrlr,
647 bool *need_flush)
648 {
649 struct ufshci_device *dev = &ctrlr->ufs_dev;
650 bool is_life_time_left = false;
651 uint64_t available_buffer_size, current_buffer_size;
652 uint8_t buffer_lun;
653 uint32_t error;
654
655 *need_flush = false;
656
657 if (!dev->is_wb_enabled)
658 return (0);
659
660 error = ufshci_dev_is_write_booster_buffer_life_time_left(ctrlr,
661 &is_life_time_left);
662 if (error)
663 return (error);
664
665 if (!is_life_time_left)
666 return (ufshci_dev_disable_write_booster(ctrlr));
667
668 if (dev->wb_buffer_type == UFSHCI_DESC_WB_BUF_TYPE_LU_DEDICATED)
669 buffer_lun = dev->wb_dedicated_lu;
670 else
671 buffer_lun = 0;
672
673 error = ufshci_dev_read_attribute(ctrlr,
674 UFSHCI_ATTR_B_AVAILABLE_WB_BUFFER_SIZE, buffer_lun, 0,
675 &available_buffer_size);
676 if (error)
677 return (error);
678
679 switch (dev->wb_user_space_config_option) {
680 case UFSHCI_DESC_WB_BUF_USER_SPACE_REDUCTION:
681 *need_flush = (available_buffer_size <=
682 UFSHCI_ATTR_WB_AVAILABLE_10);
683 break;
684 case UFSHCI_DESC_WB_BUF_PRESERVE_USER_SPACE:
685 /*
686 * In PRESERVE USER SPACE mode, flush should be performed when
687 * the current buffer is greater than 0 and the available buffer
688 * below write_booster_flush_threshold is left.
689 */
690 error = ufshci_dev_read_attribute(ctrlr,
691 UFSHCI_ATTR_D_CURRENT_WB_BUFFER_SIZE, buffer_lun, 0,
692 ¤t_buffer_size);
693 if (error)
694 return (error);
695
696 if (current_buffer_size == 0)
697 return (0);
698
699 *need_flush = (available_buffer_size <
700 dev->write_booster_flush_threshold);
701 break;
702 default:
703 ufshci_printf(ctrlr,
704 "Invalid bWriteBoosterBufferPreserveUserSpaceEn value");
705 return (EINVAL);
706 }
707
708 /*
709 * TODO: Need to handle WRITEBOOSTER_FLUSH_NEEDED exception case from
710 * wExceptionEventStatus attribute.
711 */
712
713 return (0);
714 }
715
716 int
ufshci_dev_config_write_booster(struct ufshci_controller * ctrlr)717 ufshci_dev_config_write_booster(struct ufshci_controller *ctrlr)
718 {
719 struct ufshci_device *dev = &ctrlr->ufs_dev;
720 uint32_t extended_ufs_feature_support;
721 uint32_t alloc_units;
722 struct ufshci_unit_descriptor unit_desc;
723 uint8_t lun;
724 bool is_life_time_left;
725 uint32_t mega_byte = 1024 * 1024;
726 uint32_t error = 0;
727
728 extended_ufs_feature_support = be32toh(
729 dev->dev_desc.dExtendedUfsFeaturesSupport);
730 if (!(extended_ufs_feature_support &
731 UFSHCI_DESC_EXT_UFS_FEATURE_WRITE_BOOSTER)) {
732 /* This device does not support Write Booster */
733 return (0);
734 }
735
736 if (ufshci_dev_enable_write_booster(ctrlr))
737 return (0);
738
739 /* Get WriteBooster buffer parameters */
740 dev->wb_buffer_type = dev->dev_desc.bWriteBoosterBufferType;
741 dev->wb_user_space_config_option =
742 dev->dev_desc.bWriteBoosterBufferPreserveUserSpaceEn;
743
744 /*
745 * Find the size of the write buffer.
746 * With LU-dedicated (00h), the WriteBooster buffer is assigned
747 * exclusively to one chosen LU (not one-per-LU), whereas Shared (01h)
748 * uses a single device-wide buffer shared by multiple LUs.
749 */
750 if (dev->wb_buffer_type == UFSHCI_DESC_WB_BUF_TYPE_SINGLE_SHARED) {
751 alloc_units = be32toh(
752 dev->dev_desc.dNumSharedWriteBoosterBufferAllocUnits);
753 ufshci_printf(ctrlr,
754 "WriteBooster buffer type = Shared, alloc_units=%d\n",
755 alloc_units);
756 } else if (dev->wb_buffer_type ==
757 UFSHCI_DESC_WB_BUF_TYPE_LU_DEDICATED) {
758 ufshci_printf(ctrlr, "WriteBooster buffer type = Dedicated\n");
759 for (lun = 0; lun < ctrlr->max_lun_count; lun++) {
760 /* Find a dedicated buffer using a unit descriptor */
761 if (ufshci_dev_read_unit_descriptor(ctrlr, lun,
762 &unit_desc))
763 continue;
764
765 alloc_units = be32toh(
766 unit_desc.dLUNumWriteBoosterBufferAllocUnits);
767 if (alloc_units) {
768 dev->wb_dedicated_lu = lun;
769 break;
770 }
771 }
772 } else {
773 ufshci_printf(ctrlr,
774 "Not supported WriteBooster buffer type: 0x%x\n",
775 dev->wb_buffer_type);
776 goto out;
777 }
778
779 if (alloc_units == 0) {
780 ufshci_printf(ctrlr, "The WriteBooster buffer size is zero\n");
781 goto out;
782 }
783
784 dev->wb_buffer_size_mb = alloc_units *
785 dev->geo_desc.bAllocationUnitSize *
786 (be32toh(dev->geo_desc.dSegmentSize)) /
787 (mega_byte / UFSHCI_SECTOR_SIZE);
788
789 /* Set to flush when 40% of the available buffer size remains */
790 dev->write_booster_flush_threshold = UFSHCI_ATTR_WB_AVAILABLE_40;
791
792 /*
793 * Check if WriteBooster Buffer lifetime is left.
794 * WriteBooster Buffer lifetime — percent of life used based on P/E
795 * cycles. If "preserve user space" is enabled, writes to normal user
796 * space also consume WB life since the area is shared.
797 */
798 error = ufshci_dev_is_write_booster_buffer_life_time_left(ctrlr,
799 &is_life_time_left);
800 if (error)
801 goto out;
802
803 if (!is_life_time_left) {
804 ufshci_printf(ctrlr,
805 "There is no WriteBooster buffer life time left.\n");
806 goto out;
807 }
808
809 ufshci_printf(ctrlr, "WriteBooster Enabled\n");
810 return (0);
811 out:
812 ufshci_dev_disable_write_booster(ctrlr);
813 return (error);
814 }
815
816 int
ufshci_dev_get_current_power_mode(struct ufshci_controller * ctrlr,uint8_t * power_mode)817 ufshci_dev_get_current_power_mode(struct ufshci_controller *ctrlr,
818 uint8_t *power_mode)
819 {
820 uint64_t value;
821 int err;
822
823 err = ufshci_dev_read_attribute(ctrlr, UFSHCI_ATTR_B_CURRENT_POWER_MODE,
824 /*index*/ 0, /*selector*/ 0, &value);
825 if (err)
826 return (err);
827
828 *power_mode = (uint8_t)value;
829
830 return (0);
831 }
832
833 static int
ufshci_dev_hibernate_enter(struct ufshci_controller * ctrlr)834 ufshci_dev_hibernate_enter(struct ufshci_controller *ctrlr)
835 {
836 int error;
837
838 error = ufshci_uic_send_dme_hibernate_enter(ctrlr);
839 if (error)
840 return (error);
841
842 return (ufshci_uic_hibernation_ready(ctrlr));
843 }
844
845 static int
ufshci_dev_hibernate_exit(struct ufshci_controller * ctrlr)846 ufshci_dev_hibernate_exit(struct ufshci_controller *ctrlr)
847 {
848 int error;
849
850 error = ufshci_uic_send_dme_hibernate_exit(ctrlr);
851 if (error)
852 return (error);
853
854 return (ufshci_uic_hibernation_ready(ctrlr));
855 }
856
857 int
ufshci_dev_link_state_transition(struct ufshci_controller * ctrlr,enum ufshci_uic_link_state target_state)858 ufshci_dev_link_state_transition(struct ufshci_controller *ctrlr,
859 enum ufshci_uic_link_state target_state)
860 {
861 struct ufshci_device *dev = &ctrlr->ufs_dev;
862 int error = 0;
863
864 if (dev->link_state == target_state)
865 return (0);
866
867 switch (target_state) {
868 case UFSHCI_UIC_LINK_STATE_OFF:
869 error = ufshci_dev_hibernate_enter(ctrlr);
870 if (error)
871 break;
872 error = ufshci_ctrlr_disable(ctrlr);
873 break;
874 case UFSHCI_UIC_LINK_STATE_ACTIVE:
875 if (dev->link_state == UFSHCI_UIC_LINK_STATE_HIBERNATE)
876 error = ufshci_dev_hibernate_exit(ctrlr);
877 else
878 error = EINVAL;
879 break;
880 case UFSHCI_UIC_LINK_STATE_HIBERNATE:
881 if (dev->link_state == UFSHCI_UIC_LINK_STATE_ACTIVE)
882 error = ufshci_dev_hibernate_enter(ctrlr);
883 else
884 error = EINVAL;
885 break;
886 case UFSHCI_UIC_LINK_STATE_BROKEN:
887 break;
888 default:
889 error = EINVAL;
890 break;
891 }
892
893 if (error)
894 return (error);
895
896 dev->link_state = target_state;
897
898 return (0);
899 }
900