1 /*-
2 * Copyright (c) 2025, Samsung Electronics Co., Ltd.
3 * Written by Jaeyoon Choi
4 *
5 * SPDX-License-Identifier: BSD-2-Clause
6 */
7
8 #include <sys/param.h>
9 #include <sys/bus.h>
10 #include <sys/conf.h>
11
12 #include "ufshci_private.h"
13 #include "ufshci_reg.h"
14
15 static int
ufshci_dev_read_descriptor(struct ufshci_controller * ctrlr,enum ufshci_descriptor_type desc_type,uint8_t index,uint8_t selector,void * desc,size_t desc_size)16 ufshci_dev_read_descriptor(struct ufshci_controller *ctrlr,
17 enum ufshci_descriptor_type desc_type, uint8_t index, uint8_t selector,
18 void *desc, size_t desc_size)
19 {
20 struct ufshci_completion_poll_status status;
21 struct ufshci_query_param param;
22
23 param.function = UFSHCI_QUERY_FUNC_STANDARD_READ_REQUEST;
24 param.opcode = UFSHCI_QUERY_OPCODE_READ_DESCRIPTOR;
25 param.type = desc_type;
26 param.index = index;
27 param.selector = selector;
28 param.value = 0;
29 param.desc_size = desc_size;
30
31 status.done = 0;
32 ufshci_ctrlr_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb,
33 &status, param);
34 ufshci_completion_poll(&status);
35 if (status.error) {
36 ufshci_printf(ctrlr,
37 "Failed to send Read Descriptor query request!\n");
38 return (ENXIO);
39 }
40
41 memcpy(desc, status.cpl.response_upiu.query_response_upiu.command_data,
42 desc_size);
43
44 return (0);
45 }
46
47 static int
ufshci_dev_read_device_descriptor(struct ufshci_controller * ctrlr,struct ufshci_device_descriptor * desc)48 ufshci_dev_read_device_descriptor(struct ufshci_controller *ctrlr,
49 struct ufshci_device_descriptor *desc)
50 {
51 return (ufshci_dev_read_descriptor(ctrlr, UFSHCI_DESC_TYPE_DEVICE, 0, 0,
52 desc, sizeof(struct ufshci_device_descriptor)));
53 }
54
55 static int
ufshci_dev_read_geometry_descriptor(struct ufshci_controller * ctrlr,struct ufshci_geometry_descriptor * desc)56 ufshci_dev_read_geometry_descriptor(struct ufshci_controller *ctrlr,
57 struct ufshci_geometry_descriptor *desc)
58 {
59 return (ufshci_dev_read_descriptor(ctrlr, UFSHCI_DESC_TYPE_GEOMETRY, 0,
60 0, desc, sizeof(struct ufshci_geometry_descriptor)));
61 }
62
63 static int
ufshci_dev_read_unit_descriptor(struct ufshci_controller * ctrlr,uint8_t lun,struct ufshci_unit_descriptor * desc)64 ufshci_dev_read_unit_descriptor(struct ufshci_controller *ctrlr, uint8_t lun,
65 struct ufshci_unit_descriptor *desc)
66 {
67 return (ufshci_dev_read_descriptor(ctrlr, UFSHCI_DESC_TYPE_UNIT, lun, 0,
68 desc, sizeof(struct ufshci_unit_descriptor)));
69 }
70
71 static int
ufshci_dev_read_flag(struct ufshci_controller * ctrlr,enum ufshci_flags flag_type,uint8_t * flag)72 ufshci_dev_read_flag(struct ufshci_controller *ctrlr,
73 enum ufshci_flags flag_type, uint8_t *flag)
74 {
75 struct ufshci_completion_poll_status status;
76 struct ufshci_query_param param;
77
78 param.function = UFSHCI_QUERY_FUNC_STANDARD_READ_REQUEST;
79 param.opcode = UFSHCI_QUERY_OPCODE_READ_FLAG;
80 param.type = flag_type;
81 param.index = 0;
82 param.selector = 0;
83 param.value = 0;
84
85 status.done = 0;
86 ufshci_ctrlr_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb,
87 &status, param);
88 ufshci_completion_poll(&status);
89 if (status.error) {
90 ufshci_printf(ctrlr, "ufshci_dev_read_flag failed!\n");
91 return (ENXIO);
92 }
93
94 *flag = status.cpl.response_upiu.query_response_upiu.flag_value;
95
96 return (0);
97 }
98
99 static int
ufshci_dev_set_flag(struct ufshci_controller * ctrlr,enum ufshci_flags flag_type)100 ufshci_dev_set_flag(struct ufshci_controller *ctrlr,
101 enum ufshci_flags flag_type)
102 {
103 struct ufshci_completion_poll_status status;
104 struct ufshci_query_param param;
105
106 param.function = UFSHCI_QUERY_FUNC_STANDARD_WRITE_REQUEST;
107 param.opcode = UFSHCI_QUERY_OPCODE_SET_FLAG;
108 param.type = flag_type;
109 param.index = 0;
110 param.selector = 0;
111 param.value = 0;
112
113 status.done = 0;
114 ufshci_ctrlr_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb,
115 &status, param);
116 ufshci_completion_poll(&status);
117 if (status.error) {
118 ufshci_printf(ctrlr, "ufshci_dev_set_flag failed!\n");
119 return (ENXIO);
120 }
121
122 return (0);
123 }
124
125 static int
ufshci_dev_clear_flag(struct ufshci_controller * ctrlr,enum ufshci_flags flag_type)126 ufshci_dev_clear_flag(struct ufshci_controller *ctrlr,
127 enum ufshci_flags flag_type)
128 {
129 struct ufshci_completion_poll_status status;
130 struct ufshci_query_param param;
131
132 param.function = UFSHCI_QUERY_FUNC_STANDARD_WRITE_REQUEST;
133 param.opcode = UFSHCI_QUERY_OPCODE_CLEAR_FLAG;
134 param.type = flag_type;
135 param.index = 0;
136 param.selector = 0;
137 param.value = 0;
138
139 status.done = 0;
140 ufshci_ctrlr_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb,
141 &status, param);
142 ufshci_completion_poll(&status);
143 if (status.error) {
144 ufshci_printf(ctrlr, "ufshci_dev_clear_flag failed!\n");
145 return (ENXIO);
146 }
147
148 return (0);
149 }
150
151 static int
ufshci_dev_read_attribute(struct ufshci_controller * ctrlr,enum ufshci_attributes attr_type,uint8_t index,uint8_t selector,uint64_t * value)152 ufshci_dev_read_attribute(struct ufshci_controller *ctrlr,
153 enum ufshci_attributes attr_type, uint8_t index, uint8_t selector,
154 uint64_t *value)
155 {
156 struct ufshci_completion_poll_status status;
157 struct ufshci_query_param param;
158
159 param.function = UFSHCI_QUERY_FUNC_STANDARD_READ_REQUEST;
160 param.opcode = UFSHCI_QUERY_OPCODE_READ_ATTRIBUTE;
161 param.type = attr_type;
162 param.index = index;
163 param.selector = selector;
164 param.value = 0;
165
166 status.done = 0;
167 ufshci_ctrlr_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb,
168 &status, param);
169 ufshci_completion_poll(&status);
170 if (status.error) {
171 ufshci_printf(ctrlr, "ufshci_dev_read_attribute failed!\n");
172 return (ENXIO);
173 }
174
175 *value = status.cpl.response_upiu.query_response_upiu.value_64;
176
177 return (0);
178 }
179
180 static int
ufshci_dev_write_attribute(struct ufshci_controller * ctrlr,enum ufshci_attributes attr_type,uint8_t index,uint8_t selector,uint64_t value)181 ufshci_dev_write_attribute(struct ufshci_controller *ctrlr,
182 enum ufshci_attributes attr_type, uint8_t index, uint8_t selector,
183 uint64_t value)
184 {
185 struct ufshci_completion_poll_status status;
186 struct ufshci_query_param param;
187
188 param.function = UFSHCI_QUERY_FUNC_STANDARD_WRITE_REQUEST;
189 param.opcode = UFSHCI_QUERY_OPCODE_WRITE_ATTRIBUTE;
190 param.type = attr_type;
191 param.index = index;
192 param.selector = selector;
193 param.value = value;
194
195 status.done = 0;
196 ufshci_ctrlr_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb,
197 &status, param);
198 ufshci_completion_poll(&status);
199 if (status.error) {
200 ufshci_printf(ctrlr, "ufshci_dev_write_attribute failed!\n");
201 return (ENXIO);
202 }
203
204 return (0);
205 }
206
207 int
ufshci_dev_init(struct ufshci_controller * ctrlr)208 ufshci_dev_init(struct ufshci_controller *ctrlr)
209 {
210 int timeout = ticks + MSEC_2_TICKS(ctrlr->device_init_timeout_in_ms);
211 sbintime_t delta_t = SBT_1US;
212 uint8_t flag;
213 int error;
214 const uint8_t device_init_completed = 0;
215
216 error = ufshci_dev_set_flag(ctrlr, UFSHCI_FLAG_F_DEVICE_INIT);
217 if (error)
218 return (error);
219
220 /* Wait for the UFSHCI_FLAG_F_DEVICE_INIT flag to change */
221 while (1) {
222 error = ufshci_dev_read_flag(ctrlr, UFSHCI_FLAG_F_DEVICE_INIT,
223 &flag);
224 if (error)
225 return (error);
226 if (flag == device_init_completed)
227 break;
228 if (timeout - ticks < 0) {
229 ufshci_printf(ctrlr,
230 "device init did not become %d "
231 "within %d ms\n",
232 device_init_completed,
233 ctrlr->device_init_timeout_in_ms);
234 return (ENXIO);
235 }
236
237 pause_sbt("ufshciinit", delta_t, 0, C_PREL(1));
238 delta_t = min(SBT_1MS, delta_t * 3 / 2);
239 }
240
241 return (0);
242 }
243
244 int
ufshci_dev_reset(struct ufshci_controller * ctrlr)245 ufshci_dev_reset(struct ufshci_controller *ctrlr)
246 {
247 if (ufshci_uic_send_dme_endpoint_reset(ctrlr))
248 return (ENXIO);
249
250 return (ufshci_dev_init(ctrlr));
251 }
252
253 int
ufshci_dev_init_reference_clock(struct ufshci_controller * ctrlr)254 ufshci_dev_init_reference_clock(struct ufshci_controller *ctrlr)
255 {
256 int error;
257 uint8_t index, selector;
258
259 index = 0; /* bRefClkFreq is device type attribute */
260 selector = 0; /* bRefClkFreq is device type attribute */
261
262 error = ufshci_dev_write_attribute(ctrlr, UFSHCI_ATTR_B_REF_CLK_FREQ,
263 index, selector, ctrlr->ref_clk);
264 if (error)
265 return (error);
266
267 return (0);
268 }
269
270 int
ufshci_dev_init_unipro(struct ufshci_controller * ctrlr)271 ufshci_dev_init_unipro(struct ufshci_controller *ctrlr)
272 {
273 uint32_t pa_granularity, peer_pa_granularity;
274 uint32_t t_activate, pear_t_activate;
275
276 if (ctrlr->quirks & UFSHCI_QUIRK_LONG_PEER_PA_TACTIVATE) {
277 /*
278 * PA_Granularity: Granularity for PA_TActivate and
279 * PA_Hibern8Time
280 * - 1=1us, 2=4us, 3=8us, 4=16us, 5=32us, 6=100us
281 */
282 if (ufshci_uic_send_dme_get(ctrlr, PA_Granularity,
283 &pa_granularity))
284 return (ENXIO);
285 if (ufshci_uic_send_dme_peer_get(ctrlr, PA_Granularity,
286 &peer_pa_granularity))
287 return (ENXIO);
288
289 /*
290 * PA_TActivate: Time to wait before activating a burst in order
291 * to wake-up peer M-RX UniPro automatically sets timing
292 * information such as PA_TActivate through the
293 * PACP_CAP_EXT1_ind command during Link Startup operation.
294 */
295 if (ufshci_uic_send_dme_get(ctrlr, PA_TActivate, &t_activate))
296 return (ENXIO);
297 if (ufshci_uic_send_dme_peer_get(ctrlr, PA_TActivate,
298 &pear_t_activate))
299 return (ENXIO);
300
301 /*
302 * Intel Lake-field UFSHCI has a quirk. We need to add 200us to
303 * the PEER's PA_TActivate.
304 */
305 if (pa_granularity == peer_pa_granularity) {
306 pear_t_activate = t_activate + 2;
307 if (ufshci_uic_send_dme_peer_set(ctrlr, PA_TActivate,
308 pear_t_activate))
309 return (ENXIO);
310 }
311 }
312
313 return (0);
314 }
315
316 int
ufshci_dev_init_uic_power_mode(struct ufshci_controller * ctrlr)317 ufshci_dev_init_uic_power_mode(struct ufshci_controller *ctrlr)
318 {
319 /* HSSerise: A = 1, B = 2 */
320 const uint32_t hs_series = 2;
321 /*
322 * TX/RX PWRMode:
323 * - TX[3:0], RX[7:4]
324 * - Fast Mode = 1, Slow Mode = 2, FastAuto Mode = 4, SlowAuto Mode = 5
325 */
326 const uint32_t fast_mode = 1;
327 const uint32_t rx_bit_shift = 4;
328 uint32_t power_mode, peer_granularity;
329
330 /* Update lanes with available TX/RX lanes */
331 if (ufshci_uic_send_dme_get(ctrlr, PA_AvailTxDataLanes,
332 &ctrlr->max_tx_lanes))
333 return (ENXIO);
334 if (ufshci_uic_send_dme_get(ctrlr, PA_AvailRxDataLanes,
335 &ctrlr->max_rx_lanes))
336 return (ENXIO);
337
338 /* Get max HS-GEAR value */
339 if (ufshci_uic_send_dme_get(ctrlr, PA_MaxRxHSGear,
340 &ctrlr->max_rx_hs_gear))
341 return (ENXIO);
342
343 /* Set the data lane to max */
344 ctrlr->tx_lanes = ctrlr->max_tx_lanes;
345 ctrlr->rx_lanes = ctrlr->max_rx_lanes;
346 if (ufshci_uic_send_dme_set(ctrlr, PA_ActiveTxDataLanes,
347 ctrlr->tx_lanes))
348 return (ENXIO);
349 if (ufshci_uic_send_dme_set(ctrlr, PA_ActiveRxDataLanes,
350 ctrlr->rx_lanes))
351 return (ENXIO);
352
353 if (ctrlr->quirks & UFSHCI_QUIRK_CHANGE_LANE_AND_GEAR_SEPARATELY) {
354 /* Before changing gears, first change the number of lanes. */
355 if (ufshci_uic_send_dme_get(ctrlr, PA_PWRMode, &power_mode))
356 return (ENXIO);
357 if (ufshci_uic_send_dme_set(ctrlr, PA_PWRMode, power_mode))
358 return (ENXIO);
359
360 /* Wait for power mode changed. */
361 if (ufshci_uic_power_mode_ready(ctrlr)) {
362 ufshci_reg_dump(ctrlr);
363 return (ENXIO);
364 }
365 }
366
367 /* Set HS-GEAR to max gear */
368 ctrlr->hs_gear = ctrlr->max_rx_hs_gear;
369 if (ufshci_uic_send_dme_set(ctrlr, PA_TxGear, ctrlr->hs_gear))
370 return (ENXIO);
371 if (ufshci_uic_send_dme_set(ctrlr, PA_RxGear, ctrlr->hs_gear))
372 return (ENXIO);
373
374 /*
375 * Set termination
376 * - HS-MODE = ON / LS-MODE = OFF
377 */
378 if (ufshci_uic_send_dme_set(ctrlr, PA_TxTermination, true))
379 return (ENXIO);
380 if (ufshci_uic_send_dme_set(ctrlr, PA_RxTermination, true))
381 return (ENXIO);
382
383 /* Set HSSerise (A = 1, B = 2) */
384 if (ufshci_uic_send_dme_set(ctrlr, PA_HSSeries, hs_series))
385 return (ENXIO);
386
387 /* Set Timeout values */
388 if (ufshci_uic_send_dme_set(ctrlr, PA_PWRModeUserData0,
389 DL_FC0ProtectionTimeOutVal_Default))
390 return (ENXIO);
391 if (ufshci_uic_send_dme_set(ctrlr, PA_PWRModeUserData1,
392 DL_TC0ReplayTimeOutVal_Default))
393 return (ENXIO);
394 if (ufshci_uic_send_dme_set(ctrlr, PA_PWRModeUserData2,
395 DL_AFC0ReqTimeOutVal_Default))
396 return (ENXIO);
397 if (ufshci_uic_send_dme_set(ctrlr, PA_PWRModeUserData3,
398 DL_FC0ProtectionTimeOutVal_Default))
399 return (ENXIO);
400 if (ufshci_uic_send_dme_set(ctrlr, PA_PWRModeUserData4,
401 DL_TC0ReplayTimeOutVal_Default))
402 return (ENXIO);
403 if (ufshci_uic_send_dme_set(ctrlr, PA_PWRModeUserData5,
404 DL_AFC0ReqTimeOutVal_Default))
405 return (ENXIO);
406
407 if (ufshci_uic_send_dme_set(ctrlr, DME_LocalFC0ProtectionTimeOutVal,
408 DL_FC0ProtectionTimeOutVal_Default))
409 return (ENXIO);
410 if (ufshci_uic_send_dme_set(ctrlr, DME_LocalTC0ReplayTimeOutVal,
411 DL_TC0ReplayTimeOutVal_Default))
412 return (ENXIO);
413 if (ufshci_uic_send_dme_set(ctrlr, DME_LocalAFC0ReqTimeOutVal,
414 DL_AFC0ReqTimeOutVal_Default))
415 return (ENXIO);
416
417 /* Set TX/RX PWRMode */
418 power_mode = (fast_mode << rx_bit_shift) | fast_mode;
419 if (ufshci_uic_send_dme_set(ctrlr, PA_PWRMode, power_mode))
420 return (ENXIO);
421
422 /* Wait for power mode changed. */
423 if (ufshci_uic_power_mode_ready(ctrlr)) {
424 ufshci_reg_dump(ctrlr);
425 return (ENXIO);
426 }
427
428 if (ctrlr->quirks & UFSHCI_QUIRK_WAIT_AFTER_POWER_MODE_CHANGE) {
429 /*
430 * Intel Lake-field UFSHCI has a quirk.
431 * We need to wait 1250us and clear dme error.
432 */
433 pause_sbt("ufshci", ustosbt(1250), 0, C_PREL(1));
434
435 /* Test with dme_peer_get to make sure there are no errors. */
436 if (ufshci_uic_send_dme_peer_get(ctrlr, PA_Granularity,
437 &peer_granularity))
438 return (ENXIO);
439 }
440
441 return (0);
442 }
443
444 void
ufshci_dev_enable_auto_hibernate(struct ufshci_controller * ctrlr)445 ufshci_dev_enable_auto_hibernate(struct ufshci_controller *ctrlr)
446 {
447 if (!ctrlr->ufs_dev.auto_hibernation_supported)
448 return;
449
450 ufshci_mmio_write_4(ctrlr, ahit, ctrlr->ufs_dev.ahit);
451 }
452
453 void
ufshci_dev_init_auto_hibernate(struct ufshci_controller * ctrlr)454 ufshci_dev_init_auto_hibernate(struct ufshci_controller *ctrlr)
455 {
456 ctrlr->ufs_dev.auto_hibernation_supported =
457 UFSHCIV(UFSHCI_CAP_REG_AUTOH8, ctrlr->cap) &&
458 !(ctrlr->quirks & UFSHCI_QUIRK_BROKEN_AUTO_HIBERNATE);
459
460 if (!ctrlr->ufs_dev.auto_hibernation_supported)
461 return;
462
463 /* The default value for auto hibernation is 150 ms */
464 ctrlr->ufs_dev.ahit = 0;
465 ctrlr->ufs_dev.ahit |= UFSHCIF(UFSHCI_AHIT_REG_AH8ITV, 150);
466 ctrlr->ufs_dev.ahit |= UFSHCIF(UFSHCI_AHIT_REG_TS, 3);
467
468 ufshci_dev_enable_auto_hibernate(ctrlr);
469 }
470
471 void
ufshci_dev_init_uic_link_state(struct ufshci_controller * ctrlr)472 ufshci_dev_init_uic_link_state(struct ufshci_controller *ctrlr)
473 {
474 ctrlr->ufs_dev.link_state = UFSHCI_UIC_LINK_STATE_ACTIVE;
475 }
476
477 int
ufshci_dev_init_ufs_power_mode(struct ufshci_controller * ctrlr)478 ufshci_dev_init_ufs_power_mode(struct ufshci_controller *ctrlr)
479 {
480 ctrlr->ufs_dev.power_mode_supported = false;
481
482 if (ctrlr->quirks & UFSHCI_QUIRK_SKIP_WELL_KNOWN_LUNS)
483 return (0);
484
485 ctrlr->ufs_device_wlun_periph = ufshci_sim_find_periph(ctrlr,
486 UFSHCI_WLUN_UFS_DEVICE);
487 if (ctrlr->ufs_device_wlun_periph == NULL) {
488 ufshci_printf(ctrlr,
489 "Well-known LUN `UFS Device (0x50)` not found\n");
490 return (0);
491 }
492
493 ctrlr->ufs_dev.power_mode_supported = true;
494 ctrlr->ufs_dev.power_mode = UFSHCI_DEV_PWR_ACTIVE;
495
496 return (0);
497 }
498
499 int
ufshci_dev_get_descriptor(struct ufshci_controller * ctrlr)500 ufshci_dev_get_descriptor(struct ufshci_controller *ctrlr)
501 {
502 struct ufshci_device *device = &ctrlr->ufs_dev;
503 /*
504 * The kDeviceDensityUnit is defined in the spec as 512.
505 * qTotalRawDeviceCapacity use big-endian byte ordering.
506 */
507 const uint32_t device_density_unit = 512;
508 uint32_t ver;
509 int error;
510
511 error = ufshci_dev_read_device_descriptor(ctrlr, &device->dev_desc);
512 if (error)
513 return (error);
514
515 ver = be16toh(device->dev_desc.wSpecVersion);
516 ufshci_printf(ctrlr, "UFS device spec version %u.%u.%u\n",
517 UFSHCIV(UFSHCI_VER_REG_MJR, ver), UFSHCIV(UFSHCI_VER_REG_MNR, ver),
518 UFSHCIV(UFSHCI_VER_REG_VS, ver));
519 ufshci_printf(ctrlr, "%u enabled LUNs found\n",
520 device->dev_desc.bNumberLU);
521
522 error = ufshci_dev_read_geometry_descriptor(ctrlr, &device->geo_desc);
523 if (error)
524 return (error);
525
526 if (device->geo_desc.bMaxNumberLU == 0) {
527 device->max_lun_count = 8;
528 } else if (device->geo_desc.bMaxNumberLU == 1) {
529 device->max_lun_count = 32;
530 } else {
531 ufshci_printf(ctrlr,
532 "Invalid Geometry Descriptor bMaxNumberLU value=%d\n",
533 device->geo_desc.bMaxNumberLU);
534 return (ENXIO);
535 }
536 ctrlr->max_lun_count = device->max_lun_count;
537
538 ufshci_printf(ctrlr, "UFS device total size is %lu bytes\n",
539 be64toh(device->geo_desc.qTotalRawDeviceCapacity) *
540 device_density_unit);
541
542 return (0);
543 }
544
545 static int
ufshci_dev_enable_write_booster(struct ufshci_controller * ctrlr)546 ufshci_dev_enable_write_booster(struct ufshci_controller *ctrlr)
547 {
548 struct ufshci_device *dev = &ctrlr->ufs_dev;
549 int error;
550
551 /* Enable WriteBooster */
552 error = ufshci_dev_set_flag(ctrlr, UFSHCI_FLAG_F_WRITE_BOOSTER_EN);
553 if (error) {
554 ufshci_printf(ctrlr, "Failed to enable WriteBooster\n");
555 return (error);
556 }
557 dev->is_wb_enabled = true;
558
559 /* Enable WriteBooster buffer flush during hibernate */
560 error = ufshci_dev_set_flag(ctrlr,
561 UFSHCI_FLAG_F_WB_BUFFER_FLUSH_DURING_HIBERNATE);
562 if (error) {
563 ufshci_printf(ctrlr,
564 "Failed to enable WriteBooster buffer flush during hibernate\n");
565 return (error);
566 }
567
568 /* Enable WriteBooster buffer flush */
569 error = ufshci_dev_set_flag(ctrlr, UFSHCI_FLAG_F_WB_BUFFER_FLUSH_EN);
570 if (error) {
571 ufshci_printf(ctrlr,
572 "Failed to enable WriteBooster buffer flush\n");
573 return (error);
574 }
575 dev->is_wb_flush_enabled = true;
576
577 return (0);
578 }
579
580 static int
ufshci_dev_disable_write_booster(struct ufshci_controller * ctrlr)581 ufshci_dev_disable_write_booster(struct ufshci_controller *ctrlr)
582 {
583 struct ufshci_device *dev = &ctrlr->ufs_dev;
584 int error;
585
586 /* Disable WriteBooster buffer flush */
587 error = ufshci_dev_clear_flag(ctrlr, UFSHCI_FLAG_F_WB_BUFFER_FLUSH_EN);
588 if (error) {
589 ufshci_printf(ctrlr,
590 "Failed to disable WriteBooster buffer flush\n");
591 return (error);
592 }
593 dev->is_wb_flush_enabled = false;
594
595 /* Disable WriteBooster buffer flush during hibernate */
596 error = ufshci_dev_clear_flag(ctrlr,
597 UFSHCI_FLAG_F_WB_BUFFER_FLUSH_DURING_HIBERNATE);
598 if (error) {
599 ufshci_printf(ctrlr,
600 "Failed to disable WriteBooster buffer flush during hibernate\n");
601 return (error);
602 }
603
604 /* Disable WriteBooster */
605 error = ufshci_dev_clear_flag(ctrlr, UFSHCI_FLAG_F_WRITE_BOOSTER_EN);
606 if (error) {
607 ufshci_printf(ctrlr, "Failed to disable WriteBooster\n");
608 return (error);
609 }
610 dev->is_wb_enabled = false;
611
612 return (0);
613 }
614
615 static int
ufshci_dev_is_write_booster_buffer_life_time_left(struct ufshci_controller * ctrlr,bool * is_life_time_left)616 ufshci_dev_is_write_booster_buffer_life_time_left(
617 struct ufshci_controller *ctrlr, bool *is_life_time_left)
618 {
619 struct ufshci_device *dev = &ctrlr->ufs_dev;
620 uint8_t buffer_lun;
621 uint64_t life_time;
622 uint32_t error;
623
624 if (dev->wb_buffer_type == UFSHCI_DESC_WB_BUF_TYPE_LU_DEDICATED)
625 buffer_lun = dev->wb_dedicated_lu;
626 else
627 buffer_lun = 0;
628
629 error = ufshci_dev_read_attribute(ctrlr,
630 UFSHCI_ATTR_B_WB_BUFFER_LIFE_TIME_EST, buffer_lun, 0, &life_time);
631 if (error)
632 return (error);
633
634 *is_life_time_left = (life_time != UFSHCI_ATTR_WB_LIFE_EXCEEDED);
635
636 return (0);
637 }
638
639 /*
640 * This function is not yet in use. It will be used when suspend/resume is
641 * implemented.
642 */
643 static __unused int
ufshci_dev_need_write_booster_buffer_flush(struct ufshci_controller * ctrlr,bool * need_flush)644 ufshci_dev_need_write_booster_buffer_flush(struct ufshci_controller *ctrlr,
645 bool *need_flush)
646 {
647 struct ufshci_device *dev = &ctrlr->ufs_dev;
648 bool is_life_time_left = false;
649 uint64_t available_buffer_size, current_buffer_size;
650 uint8_t buffer_lun;
651 uint32_t error;
652
653 *need_flush = false;
654
655 if (!dev->is_wb_enabled)
656 return (0);
657
658 error = ufshci_dev_is_write_booster_buffer_life_time_left(ctrlr,
659 &is_life_time_left);
660 if (error)
661 return (error);
662
663 if (!is_life_time_left)
664 return (ufshci_dev_disable_write_booster(ctrlr));
665
666 if (dev->wb_buffer_type == UFSHCI_DESC_WB_BUF_TYPE_LU_DEDICATED)
667 buffer_lun = dev->wb_dedicated_lu;
668 else
669 buffer_lun = 0;
670
671 error = ufshci_dev_read_attribute(ctrlr,
672 UFSHCI_ATTR_B_AVAILABLE_WB_BUFFER_SIZE, buffer_lun, 0,
673 &available_buffer_size);
674 if (error)
675 return (error);
676
677 switch (dev->wb_user_space_config_option) {
678 case UFSHCI_DESC_WB_BUF_USER_SPACE_REDUCTION:
679 *need_flush = (available_buffer_size <=
680 UFSHCI_ATTR_WB_AVAILABLE_10);
681 break;
682 case UFSHCI_DESC_WB_BUF_PRESERVE_USER_SPACE:
683 /*
684 * In PRESERVE USER SPACE mode, flush should be performed when
685 * the current buffer is greater than 0 and the available buffer
686 * below write_booster_flush_threshold is left.
687 */
688 error = ufshci_dev_read_attribute(ctrlr,
689 UFSHCI_ATTR_D_CURRENT_WB_BUFFER_SIZE, buffer_lun, 0,
690 ¤t_buffer_size);
691 if (error)
692 return (error);
693
694 if (current_buffer_size == 0)
695 return (0);
696
697 *need_flush = (available_buffer_size <
698 dev->write_booster_flush_threshold);
699 break;
700 default:
701 ufshci_printf(ctrlr,
702 "Invalid bWriteBoosterBufferPreserveUserSpaceEn value");
703 return (EINVAL);
704 }
705
706 /*
707 * TODO: Need to handle WRITEBOOSTER_FLUSH_NEEDED exception case from
708 * wExceptionEventStatus attribute.
709 */
710
711 return (0);
712 }
713
714 int
ufshci_dev_config_write_booster(struct ufshci_controller * ctrlr)715 ufshci_dev_config_write_booster(struct ufshci_controller *ctrlr)
716 {
717 struct ufshci_device *dev = &ctrlr->ufs_dev;
718 uint32_t extended_ufs_feature_support;
719 uint32_t alloc_units;
720 struct ufshci_unit_descriptor unit_desc;
721 uint8_t lun;
722 bool is_life_time_left;
723 uint32_t mega_byte = 1024 * 1024;
724 uint32_t error = 0;
725
726 extended_ufs_feature_support = be32toh(
727 dev->dev_desc.dExtendedUfsFeaturesSupport);
728 if (!(extended_ufs_feature_support &
729 UFSHCI_DESC_EXT_UFS_FEATURE_WRITE_BOOSTER)) {
730 /* This device does not support Write Booster */
731 return (0);
732 }
733
734 if (ufshci_dev_enable_write_booster(ctrlr))
735 return (0);
736
737 /* Get WriteBooster buffer parameters */
738 dev->wb_buffer_type = dev->dev_desc.bWriteBoosterBufferType;
739 dev->wb_user_space_config_option =
740 dev->dev_desc.bWriteBoosterBufferPreserveUserSpaceEn;
741
742 /*
743 * Find the size of the write buffer.
744 * With LU-dedicated (00h), the WriteBooster buffer is assigned
745 * exclusively to one chosen LU (not one-per-LU), whereas Shared (01h)
746 * uses a single device-wide buffer shared by multiple LUs.
747 */
748 if (dev->wb_buffer_type == UFSHCI_DESC_WB_BUF_TYPE_SINGLE_SHARED) {
749 alloc_units = be32toh(
750 dev->dev_desc.dNumSharedWriteBoosterBufferAllocUnits);
751 ufshci_printf(ctrlr,
752 "WriteBooster buffer type = Shared, alloc_units=%d\n",
753 alloc_units);
754 } else if (dev->wb_buffer_type ==
755 UFSHCI_DESC_WB_BUF_TYPE_LU_DEDICATED) {
756 ufshci_printf(ctrlr, "WriteBooster buffer type = Dedicated\n");
757 for (lun = 0; lun < ctrlr->max_lun_count; lun++) {
758 /* Find a dedicated buffer using a unit descriptor */
759 if (ufshci_dev_read_unit_descriptor(ctrlr, lun,
760 &unit_desc))
761 continue;
762
763 alloc_units = be32toh(
764 unit_desc.dLUNumWriteBoosterBufferAllocUnits);
765 if (alloc_units) {
766 dev->wb_dedicated_lu = lun;
767 break;
768 }
769 }
770 } else {
771 ufshci_printf(ctrlr,
772 "Not supported WriteBooster buffer type: 0x%x\n",
773 dev->wb_buffer_type);
774 goto out;
775 }
776
777 if (alloc_units == 0) {
778 ufshci_printf(ctrlr, "The WriteBooster buffer size is zero\n");
779 goto out;
780 }
781
782 dev->wb_buffer_size_mb = alloc_units *
783 dev->geo_desc.bAllocationUnitSize *
784 (be32toh(dev->geo_desc.dSegmentSize)) /
785 (mega_byte / UFSHCI_SECTOR_SIZE);
786
787 /* Set to flush when 40% of the available buffer size remains */
788 dev->write_booster_flush_threshold = UFSHCI_ATTR_WB_AVAILABLE_40;
789
790 /*
791 * Check if WriteBooster Buffer lifetime is left.
792 * WriteBooster Buffer lifetime — percent of life used based on P/E
793 * cycles. If "preserve user space" is enabled, writes to normal user
794 * space also consume WB life since the area is shared.
795 */
796 error = ufshci_dev_is_write_booster_buffer_life_time_left(ctrlr,
797 &is_life_time_left);
798 if (error)
799 goto out;
800
801 if (!is_life_time_left) {
802 ufshci_printf(ctrlr,
803 "There is no WriteBooster buffer life time left.\n");
804 goto out;
805 }
806
807 ufshci_printf(ctrlr, "WriteBooster Enabled\n");
808 return (0);
809 out:
810 ufshci_dev_disable_write_booster(ctrlr);
811 return (error);
812 }
813
814 int
ufshci_dev_get_current_power_mode(struct ufshci_controller * ctrlr,uint8_t * power_mode)815 ufshci_dev_get_current_power_mode(struct ufshci_controller *ctrlr,
816 uint8_t *power_mode)
817 {
818 uint64_t value;
819 int err;
820
821 err = ufshci_dev_read_attribute(ctrlr, UFSHCI_ATTR_B_CURRENT_POWER_MODE,
822 /*index*/ 0, /*selector*/ 0, &value);
823 if (err)
824 return (err);
825
826 *power_mode = (uint8_t)value;
827
828 return (0);
829 }
830
831 static int
ufshci_dev_hibernate_enter(struct ufshci_controller * ctrlr)832 ufshci_dev_hibernate_enter(struct ufshci_controller *ctrlr)
833 {
834 int error;
835
836 error = ufshci_uic_send_dme_hibernate_enter(ctrlr);
837 if (error)
838 return (error);
839
840 return (ufshci_uic_hibernation_ready(ctrlr));
841 }
842
843 static int
ufshci_dev_hibernate_exit(struct ufshci_controller * ctrlr)844 ufshci_dev_hibernate_exit(struct ufshci_controller *ctrlr)
845 {
846 int error;
847
848 error = ufshci_uic_send_dme_hibernate_exit(ctrlr);
849 if (error)
850 return (error);
851
852 return (ufshci_uic_hibernation_ready(ctrlr));
853 }
854
855 int
ufshci_dev_link_state_transition(struct ufshci_controller * ctrlr,enum ufshci_uic_link_state target_state)856 ufshci_dev_link_state_transition(struct ufshci_controller *ctrlr,
857 enum ufshci_uic_link_state target_state)
858 {
859 struct ufshci_device *dev = &ctrlr->ufs_dev;
860 int error = 0;
861
862 if (dev->link_state == target_state)
863 return (0);
864
865 switch (target_state) {
866 case UFSHCI_UIC_LINK_STATE_OFF:
867 error = ufshci_dev_hibernate_enter(ctrlr);
868 if (error)
869 break;
870 error = ufshci_ctrlr_disable(ctrlr);
871 break;
872 case UFSHCI_UIC_LINK_STATE_ACTIVE:
873 if (dev->link_state == UFSHCI_UIC_LINK_STATE_HIBERNATE)
874 error = ufshci_dev_hibernate_exit(ctrlr);
875 else
876 error = EINVAL;
877 break;
878 case UFSHCI_UIC_LINK_STATE_HIBERNATE:
879 if (dev->link_state == UFSHCI_UIC_LINK_STATE_ACTIVE)
880 error = ufshci_dev_hibernate_enter(ctrlr);
881 else
882 error = EINVAL;
883 break;
884 case UFSHCI_UIC_LINK_STATE_BROKEN:
885 break;
886 default:
887 error = EINVAL;
888 break;
889 }
890
891 if (error)
892 return (error);
893
894 dev->link_state = target_state;
895
896 return (0);
897 }
898