1*1349a733SJaeyoon Choi /*-
2*1349a733SJaeyoon Choi * Copyright (c) 2025, Samsung Electronics Co., Ltd.
3*1349a733SJaeyoon Choi * Written by Jaeyoon Choi
4*1349a733SJaeyoon Choi *
5*1349a733SJaeyoon Choi * SPDX-License-Identifier: BSD-2-Clause
6*1349a733SJaeyoon Choi */
7*1349a733SJaeyoon Choi
8*1349a733SJaeyoon Choi #include <sys/param.h>
9*1349a733SJaeyoon Choi #include <sys/bus.h>
10*1349a733SJaeyoon Choi #include <sys/conf.h>
11*1349a733SJaeyoon Choi #include <sys/domainset.h>
12*1349a733SJaeyoon Choi #include <sys/module.h>
13*1349a733SJaeyoon Choi
14*1349a733SJaeyoon Choi #include <cam/scsi/scsi_all.h>
15*1349a733SJaeyoon Choi
16*1349a733SJaeyoon Choi #include "sys/kassert.h"
17*1349a733SJaeyoon Choi #include "ufshci_private.h"
18*1349a733SJaeyoon Choi
19*1349a733SJaeyoon Choi static void ufshci_req_queue_submit_tracker(struct ufshci_req_queue *req_queue,
20*1349a733SJaeyoon Choi struct ufshci_tracker *tr, enum ufshci_data_direction data_direction);
21*1349a733SJaeyoon Choi
22*1349a733SJaeyoon Choi static const struct ufshci_qops sdb_qops = {
23*1349a733SJaeyoon Choi .construct = ufshci_req_sdb_construct,
24*1349a733SJaeyoon Choi .destroy = ufshci_req_sdb_destroy,
25*1349a733SJaeyoon Choi .get_hw_queue = ufshci_req_sdb_get_hw_queue,
26*1349a733SJaeyoon Choi .enable = ufshci_req_sdb_enable,
27*1349a733SJaeyoon Choi .reserve_slot = ufshci_req_sdb_reserve_slot,
28*1349a733SJaeyoon Choi .reserve_admin_slot = ufshci_req_sdb_reserve_slot,
29*1349a733SJaeyoon Choi .ring_doorbell = ufshci_req_sdb_ring_doorbell,
30*1349a733SJaeyoon Choi .clear_cpl_ntf = ufshci_req_sdb_clear_cpl_ntf,
31*1349a733SJaeyoon Choi .process_cpl = ufshci_req_sdb_process_cpl,
32*1349a733SJaeyoon Choi .get_inflight_io = ufshci_req_sdb_get_inflight_io,
33*1349a733SJaeyoon Choi };
34*1349a733SJaeyoon Choi
35*1349a733SJaeyoon Choi int
ufshci_utm_req_queue_construct(struct ufshci_controller * ctrlr)36*1349a733SJaeyoon Choi ufshci_utm_req_queue_construct(struct ufshci_controller *ctrlr)
37*1349a733SJaeyoon Choi {
38*1349a733SJaeyoon Choi struct ufshci_req_queue *req_queue;
39*1349a733SJaeyoon Choi int error;
40*1349a733SJaeyoon Choi
41*1349a733SJaeyoon Choi /*
42*1349a733SJaeyoon Choi * UTP Task Management Request only supports Legacy Single Doorbell
43*1349a733SJaeyoon Choi * Queue.
44*1349a733SJaeyoon Choi */
45*1349a733SJaeyoon Choi req_queue = &ctrlr->task_mgmt_req_queue;
46*1349a733SJaeyoon Choi req_queue->queue_mode = UFSHCI_Q_MODE_SDB;
47*1349a733SJaeyoon Choi req_queue->qops = sdb_qops;
48*1349a733SJaeyoon Choi
49*1349a733SJaeyoon Choi error = req_queue->qops.construct(ctrlr, req_queue, UFSHCI_UTRM_ENTRIES,
50*1349a733SJaeyoon Choi /*is_task_mgmt*/ true);
51*1349a733SJaeyoon Choi
52*1349a733SJaeyoon Choi return (error);
53*1349a733SJaeyoon Choi }
54*1349a733SJaeyoon Choi
55*1349a733SJaeyoon Choi void
ufshci_utm_req_queue_destroy(struct ufshci_controller * ctrlr)56*1349a733SJaeyoon Choi ufshci_utm_req_queue_destroy(struct ufshci_controller *ctrlr)
57*1349a733SJaeyoon Choi {
58*1349a733SJaeyoon Choi ctrlr->task_mgmt_req_queue.qops.destroy(ctrlr,
59*1349a733SJaeyoon Choi &ctrlr->task_mgmt_req_queue);
60*1349a733SJaeyoon Choi }
61*1349a733SJaeyoon Choi
62*1349a733SJaeyoon Choi int
ufshci_utm_req_queue_enable(struct ufshci_controller * ctrlr)63*1349a733SJaeyoon Choi ufshci_utm_req_queue_enable(struct ufshci_controller *ctrlr)
64*1349a733SJaeyoon Choi {
65*1349a733SJaeyoon Choi return (ctrlr->task_mgmt_req_queue.qops.enable(ctrlr,
66*1349a733SJaeyoon Choi &ctrlr->task_mgmt_req_queue));
67*1349a733SJaeyoon Choi }
68*1349a733SJaeyoon Choi
69*1349a733SJaeyoon Choi int
ufshci_ut_req_queue_construct(struct ufshci_controller * ctrlr)70*1349a733SJaeyoon Choi ufshci_ut_req_queue_construct(struct ufshci_controller *ctrlr)
71*1349a733SJaeyoon Choi {
72*1349a733SJaeyoon Choi struct ufshci_req_queue *req_queue;
73*1349a733SJaeyoon Choi int error;
74*1349a733SJaeyoon Choi
75*1349a733SJaeyoon Choi /*
76*1349a733SJaeyoon Choi * Currently, it does not support MCQ mode, so it should be set to SDB
77*1349a733SJaeyoon Choi * mode by default.
78*1349a733SJaeyoon Choi * TODO: Determine queue mode by checking Capability Registers
79*1349a733SJaeyoon Choi */
80*1349a733SJaeyoon Choi req_queue = &ctrlr->transfer_req_queue;
81*1349a733SJaeyoon Choi req_queue->queue_mode = UFSHCI_Q_MODE_SDB;
82*1349a733SJaeyoon Choi req_queue->qops = sdb_qops;
83*1349a733SJaeyoon Choi
84*1349a733SJaeyoon Choi error = req_queue->qops.construct(ctrlr, req_queue, UFSHCI_UTR_ENTRIES,
85*1349a733SJaeyoon Choi /*is_task_mgmt*/ false);
86*1349a733SJaeyoon Choi
87*1349a733SJaeyoon Choi return (error);
88*1349a733SJaeyoon Choi }
89*1349a733SJaeyoon Choi
90*1349a733SJaeyoon Choi void
ufshci_ut_req_queue_destroy(struct ufshci_controller * ctrlr)91*1349a733SJaeyoon Choi ufshci_ut_req_queue_destroy(struct ufshci_controller *ctrlr)
92*1349a733SJaeyoon Choi {
93*1349a733SJaeyoon Choi ctrlr->transfer_req_queue.qops.destroy(ctrlr,
94*1349a733SJaeyoon Choi &ctrlr->transfer_req_queue);
95*1349a733SJaeyoon Choi }
96*1349a733SJaeyoon Choi
97*1349a733SJaeyoon Choi int
ufshci_ut_req_queue_enable(struct ufshci_controller * ctrlr)98*1349a733SJaeyoon Choi ufshci_ut_req_queue_enable(struct ufshci_controller *ctrlr)
99*1349a733SJaeyoon Choi {
100*1349a733SJaeyoon Choi return (ctrlr->transfer_req_queue.qops.enable(ctrlr,
101*1349a733SJaeyoon Choi &ctrlr->transfer_req_queue));
102*1349a733SJaeyoon Choi }
103*1349a733SJaeyoon Choi
104*1349a733SJaeyoon Choi static bool
ufshci_req_queue_response_is_error(struct ufshci_req_queue * req_queue,uint8_t ocs,union ufshci_reponse_upiu * response)105*1349a733SJaeyoon Choi ufshci_req_queue_response_is_error(struct ufshci_req_queue *req_queue,
106*1349a733SJaeyoon Choi uint8_t ocs, union ufshci_reponse_upiu *response)
107*1349a733SJaeyoon Choi {
108*1349a733SJaeyoon Choi bool is_error = false;
109*1349a733SJaeyoon Choi
110*1349a733SJaeyoon Choi /* Check request descriptor */
111*1349a733SJaeyoon Choi if (ocs != UFSHCI_DESC_SUCCESS) {
112*1349a733SJaeyoon Choi ufshci_printf(req_queue->ctrlr, "Invalid OCS = 0x%x\n", ocs);
113*1349a733SJaeyoon Choi is_error = true;
114*1349a733SJaeyoon Choi }
115*1349a733SJaeyoon Choi
116*1349a733SJaeyoon Choi /* Check response UPIU header */
117*1349a733SJaeyoon Choi if (response->header.response != UFSHCI_RESPONSE_CODE_TARGET_SUCCESS) {
118*1349a733SJaeyoon Choi ufshci_printf(req_queue->ctrlr,
119*1349a733SJaeyoon Choi "Invalid response code = 0x%x\n",
120*1349a733SJaeyoon Choi response->header.response);
121*1349a733SJaeyoon Choi is_error = true;
122*1349a733SJaeyoon Choi }
123*1349a733SJaeyoon Choi
124*1349a733SJaeyoon Choi return (is_error);
125*1349a733SJaeyoon Choi }
126*1349a733SJaeyoon Choi
127*1349a733SJaeyoon Choi static void
ufshci_req_queue_manual_complete_tracker(struct ufshci_tracker * tr,uint8_t ocs,uint8_t rc)128*1349a733SJaeyoon Choi ufshci_req_queue_manual_complete_tracker(struct ufshci_tracker *tr, uint8_t ocs,
129*1349a733SJaeyoon Choi uint8_t rc)
130*1349a733SJaeyoon Choi {
131*1349a733SJaeyoon Choi struct ufshci_utp_xfer_req_desc *desc;
132*1349a733SJaeyoon Choi struct ufshci_upiu_header *resp_header;
133*1349a733SJaeyoon Choi
134*1349a733SJaeyoon Choi mtx_assert(&tr->hwq->qlock, MA_NOTOWNED);
135*1349a733SJaeyoon Choi
136*1349a733SJaeyoon Choi resp_header = (struct ufshci_upiu_header *)tr->ucd->response_upiu;
137*1349a733SJaeyoon Choi resp_header->response = rc;
138*1349a733SJaeyoon Choi
139*1349a733SJaeyoon Choi desc = &tr->hwq->utrd[tr->slot_num];
140*1349a733SJaeyoon Choi desc->overall_command_status = ocs;
141*1349a733SJaeyoon Choi
142*1349a733SJaeyoon Choi ufshci_req_queue_complete_tracker(tr);
143*1349a733SJaeyoon Choi }
144*1349a733SJaeyoon Choi
145*1349a733SJaeyoon Choi static void
ufshci_req_queue_manual_complete_request(struct ufshci_req_queue * req_queue,struct ufshci_request * req,uint8_t ocs,uint8_t rc)146*1349a733SJaeyoon Choi ufshci_req_queue_manual_complete_request(struct ufshci_req_queue *req_queue,
147*1349a733SJaeyoon Choi struct ufshci_request *req, uint8_t ocs, uint8_t rc)
148*1349a733SJaeyoon Choi {
149*1349a733SJaeyoon Choi struct ufshci_completion cpl;
150*1349a733SJaeyoon Choi bool error;
151*1349a733SJaeyoon Choi
152*1349a733SJaeyoon Choi memset(&cpl, 0, sizeof(cpl));
153*1349a733SJaeyoon Choi cpl.response_upiu.header.response = rc;
154*1349a733SJaeyoon Choi error = ufshci_req_queue_response_is_error(req_queue, ocs,
155*1349a733SJaeyoon Choi &cpl.response_upiu);
156*1349a733SJaeyoon Choi
157*1349a733SJaeyoon Choi if (error) {
158*1349a733SJaeyoon Choi ufshci_printf(req_queue->ctrlr,
159*1349a733SJaeyoon Choi "Manual complete request error:0x%x", error);
160*1349a733SJaeyoon Choi }
161*1349a733SJaeyoon Choi
162*1349a733SJaeyoon Choi if (req->cb_fn)
163*1349a733SJaeyoon Choi req->cb_fn(req->cb_arg, &cpl, error);
164*1349a733SJaeyoon Choi
165*1349a733SJaeyoon Choi ufshci_free_request(req);
166*1349a733SJaeyoon Choi }
167*1349a733SJaeyoon Choi
168*1349a733SJaeyoon Choi void
ufshci_req_queue_fail(struct ufshci_controller * ctrlr,struct ufshci_hw_queue * hwq)169*1349a733SJaeyoon Choi ufshci_req_queue_fail(struct ufshci_controller *ctrlr,
170*1349a733SJaeyoon Choi struct ufshci_hw_queue *hwq)
171*1349a733SJaeyoon Choi {
172*1349a733SJaeyoon Choi struct ufshci_req_queue *req_queue;
173*1349a733SJaeyoon Choi struct ufshci_tracker *tr;
174*1349a733SJaeyoon Choi struct ufshci_request *req;
175*1349a733SJaeyoon Choi int i;
176*1349a733SJaeyoon Choi
177*1349a733SJaeyoon Choi if (!mtx_initialized(&hwq->qlock))
178*1349a733SJaeyoon Choi return;
179*1349a733SJaeyoon Choi
180*1349a733SJaeyoon Choi mtx_lock(&hwq->qlock);
181*1349a733SJaeyoon Choi
182*1349a733SJaeyoon Choi req_queue = &ctrlr->transfer_req_queue;
183*1349a733SJaeyoon Choi
184*1349a733SJaeyoon Choi for (i = 0; i < req_queue->num_entries; i++) {
185*1349a733SJaeyoon Choi tr = hwq->act_tr[i];
186*1349a733SJaeyoon Choi req = tr->req;
187*1349a733SJaeyoon Choi
188*1349a733SJaeyoon Choi if (tr->slot_state == UFSHCI_SLOT_STATE_RESERVED) {
189*1349a733SJaeyoon Choi mtx_unlock(&hwq->qlock);
190*1349a733SJaeyoon Choi ufshci_req_queue_manual_complete_request(req_queue, req,
191*1349a733SJaeyoon Choi UFSHCI_DESC_ABORTED,
192*1349a733SJaeyoon Choi UFSHCI_RESPONSE_CODE_GENERAL_FAILURE);
193*1349a733SJaeyoon Choi mtx_lock(&hwq->qlock);
194*1349a733SJaeyoon Choi } else if (tr->slot_state == UFSHCI_SLOT_STATE_SCHEDULED) {
195*1349a733SJaeyoon Choi /*
196*1349a733SJaeyoon Choi * Do not remove the tracker. The abort_tracker path
197*1349a733SJaeyoon Choi * will do that for us.
198*1349a733SJaeyoon Choi */
199*1349a733SJaeyoon Choi mtx_unlock(&hwq->qlock);
200*1349a733SJaeyoon Choi ufshci_req_queue_manual_complete_tracker(tr,
201*1349a733SJaeyoon Choi UFSHCI_DESC_ABORTED,
202*1349a733SJaeyoon Choi UFSHCI_RESPONSE_CODE_GENERAL_FAILURE);
203*1349a733SJaeyoon Choi mtx_lock(&hwq->qlock);
204*1349a733SJaeyoon Choi }
205*1349a733SJaeyoon Choi }
206*1349a733SJaeyoon Choi
207*1349a733SJaeyoon Choi mtx_unlock(&hwq->qlock);
208*1349a733SJaeyoon Choi }
209*1349a733SJaeyoon Choi
210*1349a733SJaeyoon Choi void
ufshci_req_queue_complete_tracker(struct ufshci_tracker * tr)211*1349a733SJaeyoon Choi ufshci_req_queue_complete_tracker(struct ufshci_tracker *tr)
212*1349a733SJaeyoon Choi {
213*1349a733SJaeyoon Choi struct ufshci_req_queue *req_queue = tr->req_queue;
214*1349a733SJaeyoon Choi struct ufshci_request *req = tr->req;
215*1349a733SJaeyoon Choi struct ufshci_completion cpl;
216*1349a733SJaeyoon Choi struct ufshci_utp_xfer_req_desc *desc;
217*1349a733SJaeyoon Choi uint8_t ocs;
218*1349a733SJaeyoon Choi bool retry, error, retriable;
219*1349a733SJaeyoon Choi
220*1349a733SJaeyoon Choi mtx_assert(&tr->hwq->qlock, MA_NOTOWNED);
221*1349a733SJaeyoon Choi
222*1349a733SJaeyoon Choi bus_dmamap_sync(req_queue->dma_tag_ucd, req_queue->ucdmem_map,
223*1349a733SJaeyoon Choi BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
224*1349a733SJaeyoon Choi
225*1349a733SJaeyoon Choi cpl.size = tr->response_size;
226*1349a733SJaeyoon Choi memcpy(&cpl.response_upiu, (void *)tr->ucd->response_upiu, cpl.size);
227*1349a733SJaeyoon Choi
228*1349a733SJaeyoon Choi desc = &tr->hwq->utrd[tr->slot_num];
229*1349a733SJaeyoon Choi ocs = desc->overall_command_status;
230*1349a733SJaeyoon Choi
231*1349a733SJaeyoon Choi error = ufshci_req_queue_response_is_error(req_queue, ocs,
232*1349a733SJaeyoon Choi &cpl.response_upiu);
233*1349a733SJaeyoon Choi
234*1349a733SJaeyoon Choi /* TODO: Implement retry */
235*1349a733SJaeyoon Choi // retriable = ufshci_completion_is_retry(cpl);
236*1349a733SJaeyoon Choi retriable = false;
237*1349a733SJaeyoon Choi retry = error && retriable &&
238*1349a733SJaeyoon Choi req->retries < req_queue->ctrlr->retry_count;
239*1349a733SJaeyoon Choi if (retry)
240*1349a733SJaeyoon Choi tr->hwq->num_retries++;
241*1349a733SJaeyoon Choi if (error && req->retries >= req_queue->ctrlr->retry_count && retriable)
242*1349a733SJaeyoon Choi tr->hwq->num_failures++;
243*1349a733SJaeyoon Choi
244*1349a733SJaeyoon Choi KASSERT(tr->req, ("there is no request assigned to the tracker\n"));
245*1349a733SJaeyoon Choi KASSERT(cpl.response_upiu.header.task_tag ==
246*1349a733SJaeyoon Choi req->request_upiu.header.task_tag,
247*1349a733SJaeyoon Choi ("response task_tag does not match request task_tag\n"));
248*1349a733SJaeyoon Choi
249*1349a733SJaeyoon Choi if (!retry) {
250*1349a733SJaeyoon Choi if (req->payload_valid) {
251*1349a733SJaeyoon Choi bus_dmamap_sync(req_queue->dma_tag_payload,
252*1349a733SJaeyoon Choi tr->payload_dma_map,
253*1349a733SJaeyoon Choi BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
254*1349a733SJaeyoon Choi }
255*1349a733SJaeyoon Choi /* Copy response from the command descriptor */
256*1349a733SJaeyoon Choi if (req->cb_fn)
257*1349a733SJaeyoon Choi req->cb_fn(req->cb_arg, &cpl, error);
258*1349a733SJaeyoon Choi }
259*1349a733SJaeyoon Choi
260*1349a733SJaeyoon Choi mtx_lock(&tr->hwq->qlock);
261*1349a733SJaeyoon Choi
262*1349a733SJaeyoon Choi /* Clear the UTRL Completion Notification register */
263*1349a733SJaeyoon Choi req_queue->qops.clear_cpl_ntf(req_queue->ctrlr, tr);
264*1349a733SJaeyoon Choi
265*1349a733SJaeyoon Choi if (retry) {
266*1349a733SJaeyoon Choi req->retries++;
267*1349a733SJaeyoon Choi ufshci_req_queue_submit_tracker(req_queue, tr,
268*1349a733SJaeyoon Choi req->data_direction);
269*1349a733SJaeyoon Choi } else {
270*1349a733SJaeyoon Choi if (req->payload_valid) {
271*1349a733SJaeyoon Choi bus_dmamap_unload(req_queue->dma_tag_payload,
272*1349a733SJaeyoon Choi tr->payload_dma_map);
273*1349a733SJaeyoon Choi }
274*1349a733SJaeyoon Choi
275*1349a733SJaeyoon Choi /* Clear tracker */
276*1349a733SJaeyoon Choi ufshci_free_request(req);
277*1349a733SJaeyoon Choi tr->req = NULL;
278*1349a733SJaeyoon Choi tr->slot_state = UFSHCI_SLOT_STATE_FREE;
279*1349a733SJaeyoon Choi }
280*1349a733SJaeyoon Choi
281*1349a733SJaeyoon Choi mtx_unlock(&tr->hwq->qlock);
282*1349a733SJaeyoon Choi }
283*1349a733SJaeyoon Choi
284*1349a733SJaeyoon Choi bool
ufshci_req_queue_process_completions(struct ufshci_req_queue * req_queue)285*1349a733SJaeyoon Choi ufshci_req_queue_process_completions(struct ufshci_req_queue *req_queue)
286*1349a733SJaeyoon Choi {
287*1349a733SJaeyoon Choi return (req_queue->qops.process_cpl(req_queue));
288*1349a733SJaeyoon Choi }
289*1349a733SJaeyoon Choi
290*1349a733SJaeyoon Choi static void
ufshci_payload_map(void * arg,bus_dma_segment_t * seg,int nseg,int error)291*1349a733SJaeyoon Choi ufshci_payload_map(void *arg, bus_dma_segment_t *seg, int nseg, int error)
292*1349a733SJaeyoon Choi {
293*1349a733SJaeyoon Choi struct ufshci_tracker *tr = arg;
294*1349a733SJaeyoon Choi struct ufshci_prdt_entry *prdt_entry;
295*1349a733SJaeyoon Choi int i;
296*1349a733SJaeyoon Choi
297*1349a733SJaeyoon Choi /*
298*1349a733SJaeyoon Choi * If the mapping operation failed, return immediately. The caller
299*1349a733SJaeyoon Choi * is responsible for detecting the error status and failing the
300*1349a733SJaeyoon Choi * tracker manually.
301*1349a733SJaeyoon Choi */
302*1349a733SJaeyoon Choi if (error != 0) {
303*1349a733SJaeyoon Choi ufshci_printf(tr->req_queue->ctrlr,
304*1349a733SJaeyoon Choi "Failed to map payload %d\n", error);
305*1349a733SJaeyoon Choi return;
306*1349a733SJaeyoon Choi }
307*1349a733SJaeyoon Choi
308*1349a733SJaeyoon Choi prdt_entry = (struct ufshci_prdt_entry *)tr->ucd->prd_table;
309*1349a733SJaeyoon Choi
310*1349a733SJaeyoon Choi tr->prdt_entry_cnt = nseg;
311*1349a733SJaeyoon Choi
312*1349a733SJaeyoon Choi for (i = 0; i < nseg; i++) {
313*1349a733SJaeyoon Choi prdt_entry->data_base_address = htole64(seg[i].ds_addr) &
314*1349a733SJaeyoon Choi 0xffffffff;
315*1349a733SJaeyoon Choi prdt_entry->data_base_address_upper = htole64(seg[i].ds_addr) >>
316*1349a733SJaeyoon Choi 32;
317*1349a733SJaeyoon Choi prdt_entry->data_byte_count = htole32(seg[i].ds_len - 1);
318*1349a733SJaeyoon Choi
319*1349a733SJaeyoon Choi ++prdt_entry;
320*1349a733SJaeyoon Choi }
321*1349a733SJaeyoon Choi
322*1349a733SJaeyoon Choi bus_dmamap_sync(tr->req_queue->dma_tag_payload, tr->payload_dma_map,
323*1349a733SJaeyoon Choi BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
324*1349a733SJaeyoon Choi }
325*1349a733SJaeyoon Choi
326*1349a733SJaeyoon Choi static void
ufshci_req_queue_prepare_prdt(struct ufshci_tracker * tr)327*1349a733SJaeyoon Choi ufshci_req_queue_prepare_prdt(struct ufshci_tracker *tr)
328*1349a733SJaeyoon Choi {
329*1349a733SJaeyoon Choi struct ufshci_request *req = tr->req;
330*1349a733SJaeyoon Choi struct ufshci_utp_cmd_desc *cmd_desc = tr->ucd;
331*1349a733SJaeyoon Choi int error;
332*1349a733SJaeyoon Choi
333*1349a733SJaeyoon Choi tr->prdt_off = UFSHCI_UTP_XFER_REQ_SIZE + UFSHCI_UTP_XFER_RESP_SIZE;
334*1349a733SJaeyoon Choi
335*1349a733SJaeyoon Choi memset(cmd_desc->prd_table, 0, sizeof(cmd_desc->prd_table));
336*1349a733SJaeyoon Choi
337*1349a733SJaeyoon Choi /* Filling PRDT enrties with payload */
338*1349a733SJaeyoon Choi error = bus_dmamap_load_mem(tr->req_queue->dma_tag_payload,
339*1349a733SJaeyoon Choi tr->payload_dma_map, &req->payload, ufshci_payload_map, tr,
340*1349a733SJaeyoon Choi BUS_DMA_NOWAIT);
341*1349a733SJaeyoon Choi if (error != 0) {
342*1349a733SJaeyoon Choi /*
343*1349a733SJaeyoon Choi * The dmamap operation failed, so we manually fail the
344*1349a733SJaeyoon Choi * tracker here with UFSHCI_DESC_INVALID_PRDT_ATTRIBUTES.
345*1349a733SJaeyoon Choi *
346*1349a733SJaeyoon Choi * ufshci_req_queue_manual_complete_tracker must not be called
347*1349a733SJaeyoon Choi * with the req_queue lock held.
348*1349a733SJaeyoon Choi */
349*1349a733SJaeyoon Choi ufshci_printf(tr->req_queue->ctrlr,
350*1349a733SJaeyoon Choi "bus_dmamap_load_mem returned with error:0x%x!\n", error);
351*1349a733SJaeyoon Choi
352*1349a733SJaeyoon Choi mtx_unlock(&tr->hwq->qlock);
353*1349a733SJaeyoon Choi ufshci_req_queue_manual_complete_tracker(tr,
354*1349a733SJaeyoon Choi UFSHCI_DESC_INVALID_PRDT_ATTRIBUTES,
355*1349a733SJaeyoon Choi UFSHCI_RESPONSE_CODE_GENERAL_FAILURE);
356*1349a733SJaeyoon Choi mtx_lock(&tr->hwq->qlock);
357*1349a733SJaeyoon Choi }
358*1349a733SJaeyoon Choi }
359*1349a733SJaeyoon Choi
360*1349a733SJaeyoon Choi static void
ufshci_req_queue_fill_descriptor(struct ufshci_utp_xfer_req_desc * desc,uint8_t data_direction,const uint64_t paddr,const uint16_t response_off,const uint16_t response_len,const uint16_t prdt_off,const uint16_t prdt_entry_cnt)361*1349a733SJaeyoon Choi ufshci_req_queue_fill_descriptor(struct ufshci_utp_xfer_req_desc *desc,
362*1349a733SJaeyoon Choi uint8_t data_direction, const uint64_t paddr, const uint16_t response_off,
363*1349a733SJaeyoon Choi const uint16_t response_len, const uint16_t prdt_off,
364*1349a733SJaeyoon Choi const uint16_t prdt_entry_cnt)
365*1349a733SJaeyoon Choi {
366*1349a733SJaeyoon Choi uint8_t command_type;
367*1349a733SJaeyoon Choi /* Value to convert bytes to dwords */
368*1349a733SJaeyoon Choi const uint16_t dword_size = 4;
369*1349a733SJaeyoon Choi
370*1349a733SJaeyoon Choi /*
371*1349a733SJaeyoon Choi * Set command type to UFS storage.
372*1349a733SJaeyoon Choi * The UFS 4.1 spec only defines 'UFS Storage' as a command type.
373*1349a733SJaeyoon Choi */
374*1349a733SJaeyoon Choi command_type = UFSHCI_COMMAND_TYPE_UFS_STORAGE;
375*1349a733SJaeyoon Choi
376*1349a733SJaeyoon Choi memset(desc, 0, sizeof(struct ufshci_utp_xfer_req_desc));
377*1349a733SJaeyoon Choi desc->command_type = command_type;
378*1349a733SJaeyoon Choi desc->data_direction = data_direction;
379*1349a733SJaeyoon Choi desc->interrupt = true;
380*1349a733SJaeyoon Choi /* Set the initial value to Invalid. */
381*1349a733SJaeyoon Choi desc->overall_command_status = UFSHCI_OCS_INVALID;
382*1349a733SJaeyoon Choi desc->utp_command_descriptor_base_address = (uint32_t)(paddr &
383*1349a733SJaeyoon Choi 0xffffffff);
384*1349a733SJaeyoon Choi desc->utp_command_descriptor_base_address_upper = (uint32_t)(paddr >>
385*1349a733SJaeyoon Choi 32);
386*1349a733SJaeyoon Choi
387*1349a733SJaeyoon Choi desc->response_upiu_offset = response_off / dword_size;
388*1349a733SJaeyoon Choi desc->response_upiu_length = response_len / dword_size;
389*1349a733SJaeyoon Choi desc->prdt_offset = prdt_off / dword_size;
390*1349a733SJaeyoon Choi desc->prdt_length = prdt_entry_cnt;
391*1349a733SJaeyoon Choi }
392*1349a733SJaeyoon Choi
393*1349a733SJaeyoon Choi /*
394*1349a733SJaeyoon Choi * Submit the tracker to the hardware.
395*1349a733SJaeyoon Choi */
396*1349a733SJaeyoon Choi static void
ufshci_req_queue_submit_tracker(struct ufshci_req_queue * req_queue,struct ufshci_tracker * tr,enum ufshci_data_direction data_direction)397*1349a733SJaeyoon Choi ufshci_req_queue_submit_tracker(struct ufshci_req_queue *req_queue,
398*1349a733SJaeyoon Choi struct ufshci_tracker *tr, enum ufshci_data_direction data_direction)
399*1349a733SJaeyoon Choi {
400*1349a733SJaeyoon Choi struct ufshci_controller *ctrlr = req_queue->ctrlr;
401*1349a733SJaeyoon Choi struct ufshci_request *req = tr->req;
402*1349a733SJaeyoon Choi uint64_t ucd_paddr;
403*1349a733SJaeyoon Choi uint16_t request_len, response_off, response_len;
404*1349a733SJaeyoon Choi uint8_t slot_num = tr->slot_num;
405*1349a733SJaeyoon Choi
406*1349a733SJaeyoon Choi mtx_assert(&req_queue->qops.get_hw_queue(req_queue)->qlock, MA_OWNED);
407*1349a733SJaeyoon Choi
408*1349a733SJaeyoon Choi /* TODO: Check timeout */
409*1349a733SJaeyoon Choi
410*1349a733SJaeyoon Choi request_len = req->request_size;
411*1349a733SJaeyoon Choi response_off = UFSHCI_UTP_XFER_REQ_SIZE;
412*1349a733SJaeyoon Choi response_len = req->response_size;
413*1349a733SJaeyoon Choi
414*1349a733SJaeyoon Choi /* Prepare UTP Command Descriptor */
415*1349a733SJaeyoon Choi memcpy(tr->ucd, &req->request_upiu, request_len);
416*1349a733SJaeyoon Choi memset((uint8_t *)tr->ucd + response_off, 0, response_len);
417*1349a733SJaeyoon Choi
418*1349a733SJaeyoon Choi /* Prepare PRDT */
419*1349a733SJaeyoon Choi if (req->payload_valid)
420*1349a733SJaeyoon Choi ufshci_req_queue_prepare_prdt(tr);
421*1349a733SJaeyoon Choi
422*1349a733SJaeyoon Choi bus_dmamap_sync(req_queue->dma_tag_ucd, req_queue->ucdmem_map,
423*1349a733SJaeyoon Choi BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
424*1349a733SJaeyoon Choi
425*1349a733SJaeyoon Choi /* Prepare UTP Transfer Request Descriptor. */
426*1349a733SJaeyoon Choi ucd_paddr = tr->ucd_bus_addr;
427*1349a733SJaeyoon Choi ufshci_req_queue_fill_descriptor(&tr->hwq->utrd[slot_num],
428*1349a733SJaeyoon Choi data_direction, ucd_paddr, response_off, response_len, tr->prdt_off,
429*1349a733SJaeyoon Choi tr->prdt_entry_cnt);
430*1349a733SJaeyoon Choi
431*1349a733SJaeyoon Choi bus_dmamap_sync(tr->hwq->dma_tag_queue, tr->hwq->queuemem_map,
432*1349a733SJaeyoon Choi BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
433*1349a733SJaeyoon Choi
434*1349a733SJaeyoon Choi tr->slot_state = UFSHCI_SLOT_STATE_SCHEDULED;
435*1349a733SJaeyoon Choi
436*1349a733SJaeyoon Choi /* Ring the doorbell */
437*1349a733SJaeyoon Choi req_queue->qops.ring_doorbell(ctrlr, tr);
438*1349a733SJaeyoon Choi }
439*1349a733SJaeyoon Choi
440*1349a733SJaeyoon Choi static int
_ufshci_req_queue_submit_request(struct ufshci_req_queue * req_queue,struct ufshci_request * req)441*1349a733SJaeyoon Choi _ufshci_req_queue_submit_request(struct ufshci_req_queue *req_queue,
442*1349a733SJaeyoon Choi struct ufshci_request *req)
443*1349a733SJaeyoon Choi {
444*1349a733SJaeyoon Choi struct ufshci_tracker *tr = NULL;
445*1349a733SJaeyoon Choi int error;
446*1349a733SJaeyoon Choi
447*1349a733SJaeyoon Choi mtx_assert(&req_queue->qops.get_hw_queue(req_queue)->qlock, MA_OWNED);
448*1349a733SJaeyoon Choi
449*1349a733SJaeyoon Choi error = req_queue->qops.reserve_slot(req_queue, &tr);
450*1349a733SJaeyoon Choi if (error != 0) {
451*1349a733SJaeyoon Choi ufshci_printf(req_queue->ctrlr, "Failed to get tracker");
452*1349a733SJaeyoon Choi return (error);
453*1349a733SJaeyoon Choi }
454*1349a733SJaeyoon Choi KASSERT(tr, ("There is no tracker allocated."));
455*1349a733SJaeyoon Choi
456*1349a733SJaeyoon Choi if (tr->slot_state == UFSHCI_SLOT_STATE_RESERVED ||
457*1349a733SJaeyoon Choi tr->slot_state == UFSHCI_SLOT_STATE_SCHEDULED)
458*1349a733SJaeyoon Choi return (EBUSY);
459*1349a733SJaeyoon Choi
460*1349a733SJaeyoon Choi /* Set the task_tag value to slot_num for traceability. */
461*1349a733SJaeyoon Choi req->request_upiu.header.task_tag = tr->slot_num;
462*1349a733SJaeyoon Choi
463*1349a733SJaeyoon Choi tr->slot_state = UFSHCI_SLOT_STATE_RESERVED;
464*1349a733SJaeyoon Choi tr->response_size = req->response_size;
465*1349a733SJaeyoon Choi tr->deadline = SBT_MAX;
466*1349a733SJaeyoon Choi tr->req = req;
467*1349a733SJaeyoon Choi
468*1349a733SJaeyoon Choi ufshci_req_queue_submit_tracker(req_queue, tr, req->data_direction);
469*1349a733SJaeyoon Choi
470*1349a733SJaeyoon Choi return (0);
471*1349a733SJaeyoon Choi }
472*1349a733SJaeyoon Choi
473*1349a733SJaeyoon Choi int
ufshci_req_queue_submit_request(struct ufshci_req_queue * req_queue,struct ufshci_request * req,bool is_admin)474*1349a733SJaeyoon Choi ufshci_req_queue_submit_request(struct ufshci_req_queue *req_queue,
475*1349a733SJaeyoon Choi struct ufshci_request *req, bool is_admin)
476*1349a733SJaeyoon Choi {
477*1349a733SJaeyoon Choi struct ufshci_hw_queue *hwq;
478*1349a733SJaeyoon Choi uint32_t error;
479*1349a733SJaeyoon Choi
480*1349a733SJaeyoon Choi /* TODO: MCQs should use a separate Admin queue. */
481*1349a733SJaeyoon Choi
482*1349a733SJaeyoon Choi hwq = req_queue->qops.get_hw_queue(req_queue);
483*1349a733SJaeyoon Choi KASSERT(hwq, ("There is no HW queue allocated."));
484*1349a733SJaeyoon Choi
485*1349a733SJaeyoon Choi mtx_lock(&hwq->qlock);
486*1349a733SJaeyoon Choi error = _ufshci_req_queue_submit_request(req_queue, req);
487*1349a733SJaeyoon Choi mtx_unlock(&hwq->qlock);
488*1349a733SJaeyoon Choi
489*1349a733SJaeyoon Choi return (error);
490*1349a733SJaeyoon Choi }
491