1 /*- 2 * Copyright (c) 2025, Samsung Electronics Co., Ltd. 3 * Written by Jaeyoon Choi 4 * 5 * SPDX-License-Identifier: BSD-2-Clause 6 */ 7 8 #include <sys/param.h> 9 #include <sys/bus.h> 10 #include <sys/conf.h> 11 #include <sys/domainset.h> 12 #include <sys/module.h> 13 14 #include <cam/scsi/scsi_all.h> 15 16 #include "sys/kassert.h" 17 #include "ufshci_private.h" 18 19 static void ufshci_req_queue_submit_tracker(struct ufshci_req_queue *req_queue, 20 struct ufshci_tracker *tr, enum ufshci_data_direction data_direction); 21 22 static const struct ufshci_qops sdb_utmr_qops = { 23 .construct = ufshci_req_sdb_construct, 24 .destroy = ufshci_req_sdb_destroy, 25 .get_hw_queue = ufshci_req_sdb_get_hw_queue, 26 .enable = ufshci_req_sdb_enable, 27 .reserve_slot = ufshci_req_sdb_reserve_slot, 28 .reserve_admin_slot = ufshci_req_sdb_reserve_slot, 29 .ring_doorbell = ufshci_req_sdb_utmr_ring_doorbell, 30 .is_doorbell_cleared = ufshci_req_sdb_utmr_is_doorbell_cleared, 31 .clear_cpl_ntf = ufshci_req_sdb_utmr_clear_cpl_ntf, 32 .process_cpl = ufshci_req_sdb_process_cpl, 33 .get_inflight_io = ufshci_req_sdb_get_inflight_io, 34 }; 35 36 static const struct ufshci_qops sdb_utr_qops = { 37 .construct = ufshci_req_sdb_construct, 38 .destroy = ufshci_req_sdb_destroy, 39 .get_hw_queue = ufshci_req_sdb_get_hw_queue, 40 .enable = ufshci_req_sdb_enable, 41 .reserve_slot = ufshci_req_sdb_reserve_slot, 42 .reserve_admin_slot = ufshci_req_sdb_reserve_slot, 43 .ring_doorbell = ufshci_req_sdb_utr_ring_doorbell, 44 .is_doorbell_cleared = ufshci_req_sdb_utr_is_doorbell_cleared, 45 .clear_cpl_ntf = ufshci_req_sdb_utr_clear_cpl_ntf, 46 .process_cpl = ufshci_req_sdb_process_cpl, 47 .get_inflight_io = ufshci_req_sdb_get_inflight_io, 48 }; 49 50 int 51 ufshci_utmr_req_queue_construct(struct ufshci_controller *ctrlr) 52 { 53 struct ufshci_req_queue *req_queue; 54 int error; 55 56 /* 57 * UTP Task Management Request only supports Legacy Single Doorbell 58 * Queue. 59 */ 60 req_queue = &ctrlr->task_mgmt_req_queue; 61 req_queue->queue_mode = UFSHCI_Q_MODE_SDB; 62 req_queue->qops = sdb_utmr_qops; 63 64 error = req_queue->qops.construct(ctrlr, req_queue, UFSHCI_UTRM_ENTRIES, 65 /*is_task_mgmt*/ true); 66 67 return (error); 68 } 69 70 void 71 ufshci_utmr_req_queue_destroy(struct ufshci_controller *ctrlr) 72 { 73 ctrlr->task_mgmt_req_queue.qops.destroy(ctrlr, 74 &ctrlr->task_mgmt_req_queue); 75 } 76 77 int 78 ufshci_utmr_req_queue_enable(struct ufshci_controller *ctrlr) 79 { 80 return (ctrlr->task_mgmt_req_queue.qops.enable(ctrlr, 81 &ctrlr->task_mgmt_req_queue)); 82 } 83 84 int 85 ufshci_utr_req_queue_construct(struct ufshci_controller *ctrlr) 86 { 87 struct ufshci_req_queue *req_queue; 88 int error; 89 90 /* 91 * Currently, it does not support MCQ mode, so it should be set to SDB 92 * mode by default. 93 * TODO: Determine queue mode by checking Capability Registers 94 */ 95 req_queue = &ctrlr->transfer_req_queue; 96 req_queue->queue_mode = UFSHCI_Q_MODE_SDB; 97 req_queue->qops = sdb_utr_qops; 98 99 error = req_queue->qops.construct(ctrlr, req_queue, UFSHCI_UTR_ENTRIES, 100 /*is_task_mgmt*/ false); 101 102 return (error); 103 } 104 105 void 106 ufshci_utr_req_queue_destroy(struct ufshci_controller *ctrlr) 107 { 108 ctrlr->transfer_req_queue.qops.destroy(ctrlr, 109 &ctrlr->transfer_req_queue); 110 } 111 112 int 113 ufshci_utr_req_queue_enable(struct ufshci_controller *ctrlr) 114 { 115 return (ctrlr->transfer_req_queue.qops.enable(ctrlr, 116 &ctrlr->transfer_req_queue)); 117 } 118 119 static bool 120 ufshci_req_queue_response_is_error(struct ufshci_req_queue *req_queue, 121 uint8_t ocs, union ufshci_reponse_upiu *response) 122 { 123 bool is_error = false; 124 125 /* Check request descriptor */ 126 if (ocs != UFSHCI_DESC_SUCCESS) { 127 ufshci_printf(req_queue->ctrlr, "Invalid OCS = 0x%x\n", ocs); 128 is_error = true; 129 } 130 131 /* Check response UPIU header */ 132 if (response->header.response != UFSHCI_RESPONSE_CODE_TARGET_SUCCESS) { 133 ufshci_printf(req_queue->ctrlr, 134 "Invalid response code = 0x%x\n", 135 response->header.response); 136 is_error = true; 137 } 138 139 return (is_error); 140 } 141 142 static void 143 ufshci_req_queue_manual_complete_tracker(struct ufshci_tracker *tr, uint8_t ocs, 144 uint8_t rc) 145 { 146 struct ufshci_utp_xfer_req_desc *desc; 147 struct ufshci_upiu_header *resp_header; 148 149 mtx_assert(&tr->hwq->qlock, MA_NOTOWNED); 150 151 resp_header = (struct ufshci_upiu_header *)tr->ucd->response_upiu; 152 resp_header->response = rc; 153 154 desc = &tr->hwq->utrd[tr->slot_num]; 155 desc->overall_command_status = ocs; 156 157 ufshci_req_queue_complete_tracker(tr); 158 } 159 160 static void 161 ufshci_req_queue_manual_complete_request(struct ufshci_req_queue *req_queue, 162 struct ufshci_request *req, uint8_t ocs, uint8_t rc) 163 { 164 struct ufshci_completion cpl; 165 bool error; 166 167 memset(&cpl, 0, sizeof(cpl)); 168 cpl.response_upiu.header.response = rc; 169 error = ufshci_req_queue_response_is_error(req_queue, ocs, 170 &cpl.response_upiu); 171 172 if (error) { 173 ufshci_printf(req_queue->ctrlr, 174 "Manual complete request error:0x%x", error); 175 } 176 177 if (req->cb_fn) 178 req->cb_fn(req->cb_arg, &cpl, error); 179 180 ufshci_free_request(req); 181 } 182 183 void 184 ufshci_req_queue_fail(struct ufshci_controller *ctrlr, 185 struct ufshci_hw_queue *hwq) 186 { 187 struct ufshci_req_queue *req_queue; 188 struct ufshci_tracker *tr; 189 struct ufshci_request *req; 190 int i; 191 192 if (!mtx_initialized(&hwq->qlock)) 193 return; 194 195 mtx_lock(&hwq->qlock); 196 197 req_queue = &ctrlr->transfer_req_queue; 198 199 for (i = 0; i < req_queue->num_entries; i++) { 200 tr = hwq->act_tr[i]; 201 req = tr->req; 202 203 if (tr->slot_state == UFSHCI_SLOT_STATE_RESERVED) { 204 mtx_unlock(&hwq->qlock); 205 ufshci_req_queue_manual_complete_request(req_queue, req, 206 UFSHCI_DESC_ABORTED, 207 UFSHCI_RESPONSE_CODE_GENERAL_FAILURE); 208 mtx_lock(&hwq->qlock); 209 } else if (tr->slot_state == UFSHCI_SLOT_STATE_SCHEDULED) { 210 /* 211 * Do not remove the tracker. The abort_tracker path 212 * will do that for us. 213 */ 214 mtx_unlock(&hwq->qlock); 215 ufshci_req_queue_manual_complete_tracker(tr, 216 UFSHCI_DESC_ABORTED, 217 UFSHCI_RESPONSE_CODE_GENERAL_FAILURE); 218 mtx_lock(&hwq->qlock); 219 } 220 } 221 222 mtx_unlock(&hwq->qlock); 223 } 224 225 void 226 ufshci_req_queue_complete_tracker(struct ufshci_tracker *tr) 227 { 228 struct ufshci_req_queue *req_queue = tr->req_queue; 229 struct ufshci_request *req = tr->req; 230 struct ufshci_completion cpl; 231 uint8_t ocs; 232 bool retry, error, retriable; 233 234 mtx_assert(&tr->hwq->qlock, MA_NOTOWNED); 235 236 /* Copy the response from the Request Descriptor or UTP Command 237 * Descriptor. */ 238 if (req_queue->is_task_mgmt) { 239 cpl.size = tr->response_size; 240 memcpy(&cpl.response_upiu, 241 (void *)tr->hwq->utmrd[tr->slot_num].response_upiu, 242 cpl.size); 243 244 ocs = tr->hwq->utmrd[tr->slot_num].overall_command_status; 245 } else { 246 bus_dmamap_sync(req_queue->dma_tag_ucd, req_queue->ucdmem_map, 247 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 248 249 cpl.size = tr->response_size; 250 memcpy(&cpl.response_upiu, (void *)tr->ucd->response_upiu, 251 cpl.size); 252 253 ocs = tr->hwq->utrd[tr->slot_num].overall_command_status; 254 } 255 256 error = ufshci_req_queue_response_is_error(req_queue, ocs, 257 &cpl.response_upiu); 258 259 /* TODO: Implement retry */ 260 // retriable = ufshci_completion_is_retry(cpl); 261 retriable = false; 262 retry = error && retriable && 263 req->retries < req_queue->ctrlr->retry_count; 264 if (retry) 265 tr->hwq->num_retries++; 266 if (error && req->retries >= req_queue->ctrlr->retry_count && retriable) 267 tr->hwq->num_failures++; 268 269 KASSERT(tr->req, ("there is no request assigned to the tracker\n")); 270 KASSERT(cpl.response_upiu.header.task_tag == 271 req->request_upiu.header.task_tag, 272 ("response task_tag does not match request task_tag\n")); 273 274 if (!retry) { 275 if (req->payload_valid) { 276 bus_dmamap_sync(req_queue->dma_tag_payload, 277 tr->payload_dma_map, 278 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 279 } 280 /* Copy response from the command descriptor */ 281 if (req->cb_fn) 282 req->cb_fn(req->cb_arg, &cpl, error); 283 } 284 285 mtx_lock(&tr->hwq->qlock); 286 287 /* Clear the UTRL Completion Notification register */ 288 req_queue->qops.clear_cpl_ntf(req_queue->ctrlr, tr); 289 290 if (retry) { 291 req->retries++; 292 ufshci_req_queue_submit_tracker(req_queue, tr, 293 req->data_direction); 294 } else { 295 if (req->payload_valid) { 296 bus_dmamap_unload(req_queue->dma_tag_payload, 297 tr->payload_dma_map); 298 } 299 300 /* Clear tracker */ 301 ufshci_free_request(req); 302 tr->req = NULL; 303 tr->slot_state = UFSHCI_SLOT_STATE_FREE; 304 } 305 306 mtx_unlock(&tr->hwq->qlock); 307 } 308 309 bool 310 ufshci_req_queue_process_completions(struct ufshci_req_queue *req_queue) 311 { 312 return (req_queue->qops.process_cpl(req_queue)); 313 } 314 315 static void 316 ufshci_payload_map(void *arg, bus_dma_segment_t *seg, int nseg, int error) 317 { 318 struct ufshci_tracker *tr = arg; 319 struct ufshci_prdt_entry *prdt_entry; 320 int i; 321 322 /* 323 * If the mapping operation failed, return immediately. The caller 324 * is responsible for detecting the error status and failing the 325 * tracker manually. 326 */ 327 if (error != 0) { 328 ufshci_printf(tr->req_queue->ctrlr, 329 "Failed to map payload %d\n", error); 330 return; 331 } 332 333 prdt_entry = (struct ufshci_prdt_entry *)tr->ucd->prd_table; 334 335 tr->prdt_entry_cnt = nseg; 336 337 for (i = 0; i < nseg; i++) { 338 prdt_entry->data_base_address = htole64(seg[i].ds_addr) & 339 0xffffffff; 340 prdt_entry->data_base_address_upper = htole64(seg[i].ds_addr) >> 341 32; 342 prdt_entry->data_byte_count = htole32(seg[i].ds_len - 1); 343 344 ++prdt_entry; 345 } 346 347 bus_dmamap_sync(tr->req_queue->dma_tag_payload, tr->payload_dma_map, 348 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 349 } 350 351 static void 352 ufshci_req_queue_prepare_prdt(struct ufshci_tracker *tr) 353 { 354 struct ufshci_request *req = tr->req; 355 struct ufshci_utp_cmd_desc *cmd_desc = tr->ucd; 356 int error; 357 358 tr->prdt_off = UFSHCI_UTP_XFER_REQ_SIZE + UFSHCI_UTP_XFER_RESP_SIZE; 359 360 memset(cmd_desc->prd_table, 0, sizeof(cmd_desc->prd_table)); 361 362 /* Filling PRDT enrties with payload */ 363 error = bus_dmamap_load_mem(tr->req_queue->dma_tag_payload, 364 tr->payload_dma_map, &req->payload, ufshci_payload_map, tr, 365 BUS_DMA_NOWAIT); 366 if (error != 0) { 367 /* 368 * The dmamap operation failed, so we manually fail the 369 * tracker here with UFSHCI_DESC_INVALID_PRDT_ATTRIBUTES. 370 * 371 * ufshci_req_queue_manual_complete_tracker must not be called 372 * with the req_queue lock held. 373 */ 374 ufshci_printf(tr->req_queue->ctrlr, 375 "bus_dmamap_load_mem returned with error:0x%x!\n", error); 376 377 mtx_unlock(&tr->hwq->qlock); 378 ufshci_req_queue_manual_complete_tracker(tr, 379 UFSHCI_DESC_INVALID_PRDT_ATTRIBUTES, 380 UFSHCI_RESPONSE_CODE_GENERAL_FAILURE); 381 mtx_lock(&tr->hwq->qlock); 382 } 383 } 384 385 static void 386 ufshci_req_queue_fill_utmr_descriptor( 387 struct ufshci_utp_task_mgmt_req_desc *desc, struct ufshci_request *req) 388 { 389 memset(desc, 0, sizeof(struct ufshci_utp_task_mgmt_req_desc)); 390 desc->interrupt = true; 391 /* Set the initial value to Invalid. */ 392 desc->overall_command_status = UFSHCI_UTMR_OCS_INVALID; 393 394 memcpy(desc->request_upiu, &req->request_upiu, req->request_size); 395 } 396 397 static void 398 ufshci_req_queue_fill_utr_descriptor(struct ufshci_utp_xfer_req_desc *desc, 399 uint8_t data_direction, const uint64_t paddr, const uint16_t response_off, 400 const uint16_t response_len, const uint16_t prdt_off, 401 const uint16_t prdt_entry_cnt) 402 { 403 uint8_t command_type; 404 /* Value to convert bytes to dwords */ 405 const uint16_t dword_size = 4; 406 407 /* 408 * Set command type to UFS storage. 409 * The UFS 4.1 spec only defines 'UFS Storage' as a command type. 410 */ 411 command_type = UFSHCI_COMMAND_TYPE_UFS_STORAGE; 412 413 memset(desc, 0, sizeof(struct ufshci_utp_xfer_req_desc)); 414 desc->command_type = command_type; 415 desc->data_direction = data_direction; 416 desc->interrupt = true; 417 /* Set the initial value to Invalid. */ 418 desc->overall_command_status = UFSHCI_UTR_OCS_INVALID; 419 desc->utp_command_descriptor_base_address = (uint32_t)(paddr & 420 0xffffffff); 421 desc->utp_command_descriptor_base_address_upper = (uint32_t)(paddr >> 422 32); 423 424 desc->response_upiu_offset = response_off / dword_size; 425 desc->response_upiu_length = response_len / dword_size; 426 desc->prdt_offset = prdt_off / dword_size; 427 desc->prdt_length = prdt_entry_cnt; 428 } 429 430 /* 431 * Submit the tracker to the hardware. 432 */ 433 static void 434 ufshci_req_queue_submit_tracker(struct ufshci_req_queue *req_queue, 435 struct ufshci_tracker *tr, enum ufshci_data_direction data_direction) 436 { 437 struct ufshci_controller *ctrlr = req_queue->ctrlr; 438 struct ufshci_request *req = tr->req; 439 uint64_t ucd_paddr; 440 uint16_t request_len, response_off, response_len; 441 uint8_t slot_num = tr->slot_num; 442 443 mtx_assert(&req_queue->qops.get_hw_queue(req_queue)->qlock, MA_OWNED); 444 445 /* TODO: Check timeout */ 446 447 if (req_queue->is_task_mgmt) { 448 /* Prepare UTP Task Management Request Descriptor. */ 449 ufshci_req_queue_fill_utmr_descriptor(&tr->hwq->utmrd[slot_num], 450 req); 451 } else { 452 request_len = req->request_size; 453 response_off = UFSHCI_UTP_XFER_REQ_SIZE; 454 response_len = req->response_size; 455 456 /* Prepare UTP Command Descriptor */ 457 memcpy(tr->ucd, &req->request_upiu, request_len); 458 memset((uint8_t *)tr->ucd + response_off, 0, response_len); 459 460 /* Prepare PRDT */ 461 if (req->payload_valid) 462 ufshci_req_queue_prepare_prdt(tr); 463 464 /* Prepare UTP Transfer Request Descriptor. */ 465 ucd_paddr = tr->ucd_bus_addr; 466 ufshci_req_queue_fill_utr_descriptor(&tr->hwq->utrd[slot_num], 467 data_direction, ucd_paddr, response_off, response_len, 468 tr->prdt_off, tr->prdt_entry_cnt); 469 470 bus_dmamap_sync(req_queue->dma_tag_ucd, req_queue->ucdmem_map, 471 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 472 } 473 474 bus_dmamap_sync(tr->hwq->dma_tag_queue, tr->hwq->queuemem_map, 475 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 476 477 tr->slot_state = UFSHCI_SLOT_STATE_SCHEDULED; 478 479 /* Ring the doorbell */ 480 req_queue->qops.ring_doorbell(ctrlr, tr); 481 } 482 483 static int 484 _ufshci_req_queue_submit_request(struct ufshci_req_queue *req_queue, 485 struct ufshci_request *req) 486 { 487 struct ufshci_tracker *tr = NULL; 488 int error; 489 490 mtx_assert(&req_queue->qops.get_hw_queue(req_queue)->qlock, MA_OWNED); 491 492 error = req_queue->qops.reserve_slot(req_queue, &tr); 493 if (error != 0) { 494 ufshci_printf(req_queue->ctrlr, "Failed to get tracker"); 495 return (error); 496 } 497 KASSERT(tr, ("There is no tracker allocated.")); 498 499 if (tr->slot_state == UFSHCI_SLOT_STATE_RESERVED || 500 tr->slot_state == UFSHCI_SLOT_STATE_SCHEDULED) 501 return (EBUSY); 502 503 /* Set the task_tag value to slot_num for traceability. */ 504 req->request_upiu.header.task_tag = tr->slot_num; 505 506 tr->slot_state = UFSHCI_SLOT_STATE_RESERVED; 507 tr->response_size = req->response_size; 508 tr->deadline = SBT_MAX; 509 tr->req = req; 510 511 ufshci_req_queue_submit_tracker(req_queue, tr, req->data_direction); 512 513 return (0); 514 } 515 516 int 517 ufshci_req_queue_submit_request(struct ufshci_req_queue *req_queue, 518 struct ufshci_request *req, bool is_admin) 519 { 520 struct ufshci_hw_queue *hwq; 521 uint32_t error; 522 523 /* TODO: MCQs should use a separate Admin queue. */ 524 525 hwq = req_queue->qops.get_hw_queue(req_queue); 526 KASSERT(hwq, ("There is no HW queue allocated.")); 527 528 mtx_lock(&hwq->qlock); 529 error = _ufshci_req_queue_submit_request(req_queue, req); 530 mtx_unlock(&hwq->qlock); 531 532 return (error); 533 } 534