1 /*-
2 * Copyright (c) 2025, Samsung Electronics Co., Ltd.
3 * Written by Jaeyoon Choi
4 *
5 * SPDX-License-Identifier: BSD-2-Clause
6 */
7
8 #include <sys/param.h>
9 #include <sys/bus.h>
10 #include <sys/conf.h>
11
12 #include "ufshci_private.h"
13 #include "ufshci_reg.h"
14
15 static int
ufshci_dev_read_descriptor(struct ufshci_controller * ctrlr,enum ufshci_descriptor_type desc_type,uint8_t index,uint8_t selector,void * desc,size_t desc_size)16 ufshci_dev_read_descriptor(struct ufshci_controller *ctrlr,
17 enum ufshci_descriptor_type desc_type, uint8_t index, uint8_t selector,
18 void *desc, size_t desc_size)
19 {
20 struct ufshci_completion_poll_status status;
21 struct ufshci_query_param param;
22
23 param.function = UFSHCI_QUERY_FUNC_STANDARD_READ_REQUEST;
24 param.opcode = UFSHCI_QUERY_OPCODE_READ_DESCRIPTOR;
25 param.type = desc_type;
26 param.index = index;
27 param.selector = selector;
28 param.value = 0;
29 param.desc_size = desc_size;
30
31 status.done = 0;
32 ufshci_ctrlr_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb,
33 &status, param);
34 ufshci_completion_poll(&status);
35 if (status.error) {
36 ufshci_printf(ctrlr, "ufshci_dev_read_descriptor failed!\n");
37 return (ENXIO);
38 }
39
40 memcpy(desc, status.cpl.response_upiu.query_response_upiu.command_data,
41 desc_size);
42
43 return (0);
44 }
45
46 static int
ufshci_dev_read_device_descriptor(struct ufshci_controller * ctrlr,struct ufshci_device_descriptor * desc)47 ufshci_dev_read_device_descriptor(struct ufshci_controller *ctrlr,
48 struct ufshci_device_descriptor *desc)
49 {
50 return (ufshci_dev_read_descriptor(ctrlr, UFSHCI_DESC_TYPE_DEVICE, 0, 0,
51 desc, sizeof(struct ufshci_device_descriptor)));
52 }
53
54 static int
ufshci_dev_read_geometry_descriptor(struct ufshci_controller * ctrlr,struct ufshci_geometry_descriptor * desc)55 ufshci_dev_read_geometry_descriptor(struct ufshci_controller *ctrlr,
56 struct ufshci_geometry_descriptor *desc)
57 {
58 return (ufshci_dev_read_descriptor(ctrlr, UFSHCI_DESC_TYPE_GEOMETRY, 0,
59 0, desc, sizeof(struct ufshci_geometry_descriptor)));
60 }
61
62 static int
ufshci_dev_read_flag(struct ufshci_controller * ctrlr,enum ufshci_flags flag_type,uint8_t * flag)63 ufshci_dev_read_flag(struct ufshci_controller *ctrlr,
64 enum ufshci_flags flag_type, uint8_t *flag)
65 {
66 struct ufshci_completion_poll_status status;
67 struct ufshci_query_param param;
68
69 param.function = UFSHCI_QUERY_FUNC_STANDARD_READ_REQUEST;
70 param.opcode = UFSHCI_QUERY_OPCODE_READ_FLAG;
71 param.type = flag_type;
72 param.index = 0;
73 param.selector = 0;
74 param.value = 0;
75
76 status.done = 0;
77 ufshci_ctrlr_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb,
78 &status, param);
79 ufshci_completion_poll(&status);
80 if (status.error) {
81 ufshci_printf(ctrlr, "ufshci_dev_read_flag failed!\n");
82 return (ENXIO);
83 }
84
85 *flag = status.cpl.response_upiu.query_response_upiu.flag_value;
86
87 return (0);
88 }
89
90 static int
ufshci_dev_set_flag(struct ufshci_controller * ctrlr,enum ufshci_flags flag_type)91 ufshci_dev_set_flag(struct ufshci_controller *ctrlr,
92 enum ufshci_flags flag_type)
93 {
94 struct ufshci_completion_poll_status status;
95 struct ufshci_query_param param;
96
97 param.function = UFSHCI_QUERY_FUNC_STANDARD_WRITE_REQUEST;
98 param.opcode = UFSHCI_QUERY_OPCODE_SET_FLAG;
99 param.type = flag_type;
100 param.index = 0;
101 param.selector = 0;
102 param.value = 0;
103
104 status.done = 0;
105 ufshci_ctrlr_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb,
106 &status, param);
107 ufshci_completion_poll(&status);
108 if (status.error) {
109 ufshci_printf(ctrlr, "ufshci_dev_set_flag failed!\n");
110 return (ENXIO);
111 }
112
113 return (0);
114 }
115
116 static int
ufshci_dev_write_attribute(struct ufshci_controller * ctrlr,enum ufshci_attributes attr_type,uint8_t index,uint8_t selector,uint64_t value)117 ufshci_dev_write_attribute(struct ufshci_controller *ctrlr,
118 enum ufshci_attributes attr_type, uint8_t index, uint8_t selector,
119 uint64_t value)
120 {
121 struct ufshci_completion_poll_status status;
122 struct ufshci_query_param param;
123
124 param.function = UFSHCI_QUERY_FUNC_STANDARD_WRITE_REQUEST;
125 param.opcode = UFSHCI_QUERY_OPCODE_WRITE_ATTRIBUTE;
126 param.type = attr_type;
127 param.index = index;
128 param.selector = selector;
129 param.value = value;
130
131 status.done = 0;
132 ufshci_ctrlr_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb,
133 &status, param);
134 ufshci_completion_poll(&status);
135 if (status.error) {
136 ufshci_printf(ctrlr, "ufshci_dev_write_attribute failed!\n");
137 return (ENXIO);
138 }
139
140 return (0);
141 }
142
143 int
ufshci_dev_init(struct ufshci_controller * ctrlr)144 ufshci_dev_init(struct ufshci_controller *ctrlr)
145 {
146 int timeout = ticks + MSEC_2_TICKS(ctrlr->device_init_timeout_in_ms);
147 sbintime_t delta_t = SBT_1US;
148 uint8_t flag;
149 int error;
150 const uint8_t device_init_completed = 0;
151
152 error = ufshci_dev_set_flag(ctrlr, UFSHCI_FLAG_F_DEVICE_INIT);
153 if (error)
154 return (error);
155
156 /* Wait for the UFSHCI_FLAG_F_DEVICE_INIT flag to change */
157 while (1) {
158 error = ufshci_dev_read_flag(ctrlr, UFSHCI_FLAG_F_DEVICE_INIT,
159 &flag);
160 if (error)
161 return (error);
162 if (flag == device_init_completed)
163 break;
164 if (timeout - ticks < 0) {
165 ufshci_printf(ctrlr,
166 "device init did not become %d "
167 "within %d ms\n",
168 device_init_completed,
169 ctrlr->device_init_timeout_in_ms);
170 return (ENXIO);
171 }
172
173 pause_sbt("ufshciinit", delta_t, 0, C_PREL(1));
174 delta_t = min(SBT_1MS, delta_t * 3 / 2);
175 }
176
177 return (0);
178 }
179
180 int
ufshci_dev_reset(struct ufshci_controller * ctrlr)181 ufshci_dev_reset(struct ufshci_controller *ctrlr)
182 {
183 if (ufshci_uic_send_dme_endpoint_reset(ctrlr))
184 return (ENXIO);
185
186 return (ufshci_dev_init(ctrlr));
187 }
188
189 int
ufshci_dev_init_reference_clock(struct ufshci_controller * ctrlr)190 ufshci_dev_init_reference_clock(struct ufshci_controller *ctrlr)
191 {
192 int error;
193 uint8_t index, selector;
194
195 index = 0; /* bRefClkFreq is device type attribute */
196 selector = 0; /* bRefClkFreq is device type attribute */
197
198 error = ufshci_dev_write_attribute(ctrlr, UFSHCI_ATTR_B_REF_CLK_FREQ,
199 index, selector, ctrlr->ref_clk);
200 if (error)
201 return (error);
202
203 return (0);
204 }
205
206 int
ufshci_dev_init_unipro(struct ufshci_controller * ctrlr)207 ufshci_dev_init_unipro(struct ufshci_controller *ctrlr)
208 {
209 uint32_t pa_granularity, peer_pa_granularity;
210 uint32_t t_activate, pear_t_activate;
211
212 /*
213 * Unipro Version:
214 * - 7~15 = Above 2.0, 6 = 2.0, 5 = 1.8, 4 = 1.61, 3 = 1.6, 2 = 1.41,
215 * 1 = 1.40, 0 = Reserved
216 */
217 if (ufshci_uic_send_dme_get(ctrlr, PA_LocalVerInfo,
218 &ctrlr->unipro_version))
219 return (ENXIO);
220 if (ufshci_uic_send_dme_get(ctrlr, PA_RemoteVerInfo,
221 &ctrlr->ufs_dev.unipro_version))
222 return (ENXIO);
223
224 /*
225 * PA_Granularity: Granularity for PA_TActivate and PA_Hibern8Time
226 * - 1=1us, 2=4us, 3=8us, 4=16us, 5=32us, 6=100us
227 */
228 if (ufshci_uic_send_dme_get(ctrlr, PA_Granularity, &pa_granularity))
229 return (ENXIO);
230 if (ufshci_uic_send_dme_peer_get(ctrlr, PA_Granularity,
231 &peer_pa_granularity))
232 return (ENXIO);
233
234 /*
235 * PA_TActivate: Time to wait before activating a burst in order to
236 * wake-up peer M-RX
237 * UniPro automatically sets timing information such as PA_TActivate
238 * through the PACP_CAP_EXT1_ind command during Link Startup operation.
239 */
240 if (ufshci_uic_send_dme_get(ctrlr, PA_TActivate, &t_activate))
241 return (ENXIO);
242 if (ufshci_uic_send_dme_peer_get(ctrlr, PA_TActivate, &pear_t_activate))
243 return (ENXIO);
244
245 if (ctrlr->quirks & UFSHCI_QUIRK_LONG_PEER_PA_TACTIVATE) {
246 /*
247 * Intel Lake-field UFSHCI has a quirk. We need to add 200us to
248 * the PEER's PA_TActivate.
249 */
250 if (pa_granularity == peer_pa_granularity) {
251 pear_t_activate = t_activate + 2;
252 if (ufshci_uic_send_dme_peer_set(ctrlr, PA_TActivate,
253 pear_t_activate))
254 return (ENXIO);
255 }
256 }
257
258 return (0);
259 }
260
261 int
ufshci_dev_init_uic_power_mode(struct ufshci_controller * ctrlr)262 ufshci_dev_init_uic_power_mode(struct ufshci_controller *ctrlr)
263 {
264 /* HSSerise: A = 1, B = 2 */
265 const uint32_t hs_series = 2;
266 /*
267 * TX/RX PWRMode:
268 * - TX[3:0], RX[7:4]
269 * - Fast Mode = 1, Slow Mode = 2, FastAuto Mode = 4, SlowAuto Mode = 5
270 */
271 const uint32_t fast_mode = 1;
272 const uint32_t rx_bit_shift = 4;
273 const uint32_t power_mode = (fast_mode << rx_bit_shift) | fast_mode;
274
275 /* Update lanes with available TX/RX lanes */
276 if (ufshci_uic_send_dme_get(ctrlr, PA_AvailTxDataLanes,
277 &ctrlr->max_tx_lanes))
278 return (ENXIO);
279 if (ufshci_uic_send_dme_get(ctrlr, PA_AvailRxDataLanes,
280 &ctrlr->max_rx_lanes))
281 return (ENXIO);
282
283 /* Get max HS-GEAR value */
284 if (ufshci_uic_send_dme_get(ctrlr, PA_MaxRxHSGear,
285 &ctrlr->max_rx_hs_gear))
286 return (ENXIO);
287
288 /* Set the data lane to max */
289 ctrlr->tx_lanes = ctrlr->max_tx_lanes;
290 ctrlr->rx_lanes = ctrlr->max_rx_lanes;
291 if (ufshci_uic_send_dme_set(ctrlr, PA_ActiveTxDataLanes,
292 ctrlr->tx_lanes))
293 return (ENXIO);
294 if (ufshci_uic_send_dme_set(ctrlr, PA_ActiveRxDataLanes,
295 ctrlr->rx_lanes))
296 return (ENXIO);
297
298 /* Set HS-GEAR to max gear */
299 ctrlr->hs_gear = ctrlr->max_rx_hs_gear;
300 if (ufshci_uic_send_dme_set(ctrlr, PA_TxGear, ctrlr->hs_gear))
301 return (ENXIO);
302 if (ufshci_uic_send_dme_set(ctrlr, PA_RxGear, ctrlr->hs_gear))
303 return (ENXIO);
304
305 /*
306 * Set termination
307 * - HS-MODE = ON / LS-MODE = OFF
308 */
309 if (ufshci_uic_send_dme_set(ctrlr, PA_TxTermination, true))
310 return (ENXIO);
311 if (ufshci_uic_send_dme_set(ctrlr, PA_RxTermination, true))
312 return (ENXIO);
313
314 /* Set HSSerise (A = 1, B = 2) */
315 if (ufshci_uic_send_dme_set(ctrlr, PA_HSSeries, hs_series))
316 return (ENXIO);
317
318 /* Set Timeout values */
319 if (ufshci_uic_send_dme_set(ctrlr, PA_PWRModeUserData0,
320 DL_FC0ProtectionTimeOutVal_Default))
321 return (ENXIO);
322 if (ufshci_uic_send_dme_set(ctrlr, PA_PWRModeUserData1,
323 DL_TC0ReplayTimeOutVal_Default))
324 return (ENXIO);
325 if (ufshci_uic_send_dme_set(ctrlr, PA_PWRModeUserData2,
326 DL_AFC0ReqTimeOutVal_Default))
327 return (ENXIO);
328 if (ufshci_uic_send_dme_set(ctrlr, PA_PWRModeUserData3,
329 DL_FC0ProtectionTimeOutVal_Default))
330 return (ENXIO);
331 if (ufshci_uic_send_dme_set(ctrlr, PA_PWRModeUserData4,
332 DL_TC0ReplayTimeOutVal_Default))
333 return (ENXIO);
334 if (ufshci_uic_send_dme_set(ctrlr, PA_PWRModeUserData5,
335 DL_AFC0ReqTimeOutVal_Default))
336 return (ENXIO);
337
338 if (ufshci_uic_send_dme_set(ctrlr, DME_LocalFC0ProtectionTimeOutVal,
339 DL_FC0ProtectionTimeOutVal_Default))
340 return (ENXIO);
341 if (ufshci_uic_send_dme_set(ctrlr, DME_LocalTC0ReplayTimeOutVal,
342 DL_TC0ReplayTimeOutVal_Default))
343 return (ENXIO);
344 if (ufshci_uic_send_dme_set(ctrlr, DME_LocalAFC0ReqTimeOutVal,
345 DL_AFC0ReqTimeOutVal_Default))
346 return (ENXIO);
347
348 /* Set TX/RX PWRMode */
349 if (ufshci_uic_send_dme_set(ctrlr, PA_PWRMode, power_mode))
350 return (ENXIO);
351
352 /* Wait for power mode changed. */
353 if (ufshci_uic_power_mode_ready(ctrlr)) {
354 ufshci_reg_dump(ctrlr);
355 return (ENXIO);
356 }
357
358 /* Clear 'Power Mode completion status' */
359 ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_UPMS));
360
361 if (ctrlr->quirks & UFSHCI_QUIRK_WAIT_AFTER_POWER_MODE_CHANGE) {
362 /*
363 * Intel Lake-field UFSHCI has a quirk.
364 * We need to wait 1250us and clear dme error.
365 */
366 pause_sbt("ufshci", ustosbt(1250), 0, C_PREL(1));
367
368 /* Test with dme_peer_get to make sure there are no errors. */
369 if (ufshci_uic_send_dme_peer_get(ctrlr, PA_Granularity, NULL))
370 return (ENXIO);
371 }
372
373 return (0);
374 }
375
376 int
ufshci_dev_init_ufs_power_mode(struct ufshci_controller * ctrlr)377 ufshci_dev_init_ufs_power_mode(struct ufshci_controller *ctrlr)
378 {
379 /* TODO: Need to implement */
380
381 return (0);
382 }
383
384 int
ufshci_dev_get_descriptor(struct ufshci_controller * ctrlr)385 ufshci_dev_get_descriptor(struct ufshci_controller *ctrlr)
386 {
387 struct ufshci_device *device = &ctrlr->ufs_dev;
388 /*
389 * The kDeviceDensityUnit is defined in the spec as 512.
390 * qTotalRawDeviceCapacity use big-endian byte ordering.
391 */
392 const uint32_t device_density_unit = 512;
393 uint32_t ver;
394 int error;
395
396 error = ufshci_dev_read_device_descriptor(ctrlr, &device->dev_desc);
397 if (error)
398 return (error);
399
400 ver = be16toh(device->dev_desc.wSpecVersion);
401 ufshci_printf(ctrlr, "UFS device spec version %u.%u%u\n",
402 UFSHCIV(UFSHCI_VER_REG_MJR, ver), UFSHCIV(UFSHCI_VER_REG_MNR, ver),
403 UFSHCIV(UFSHCI_VER_REG_VS, ver));
404 ufshci_printf(ctrlr, "%u enabled LUNs found\n",
405 device->dev_desc.bNumberLU);
406
407 error = ufshci_dev_read_geometry_descriptor(ctrlr, &device->geo_desc);
408 if (error)
409 return (error);
410
411 if (device->geo_desc.bMaxNumberLU == 0) {
412 device->max_lun_count = 8;
413 } else if (device->geo_desc.bMaxNumberLU == 1) {
414 device->max_lun_count = 32;
415 } else {
416 ufshci_printf(ctrlr,
417 "Invalid Geometry Descriptor bMaxNumberLU value=%d\n",
418 device->geo_desc.bMaxNumberLU);
419 return (ENXIO);
420 }
421 ctrlr->max_lun_count = device->max_lun_count;
422
423 ufshci_printf(ctrlr, "UFS device total size is %lu bytes\n",
424 be64toh(device->geo_desc.qTotalRawDeviceCapacity) *
425 device_density_unit);
426
427 return (0);
428 }
429