1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2022 Qualcomm Innovation Center. All rights reserved.
4 *
5 * Authors:
6 * Asutosh Das <quic_asutoshd@quicinc.com>
7 * Can Guo <quic_cang@quicinc.com>
8 */
9
10 #include <linux/unaligned.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/module.h>
13 #include <linux/platform_device.h>
14 #include "ufshcd-priv.h"
15 #include <linux/delay.h>
16 #include <scsi/scsi_cmnd.h>
17 #include <linux/bitfield.h>
18 #include <linux/iopoll.h>
19
20 #define MAX_QUEUE_SUP GENMASK(7, 0)
21 #define QCFGPTR GENMASK(23, 16)
22 #define UFS_MCQ_MIN_RW_QUEUES 2
23 #define UFS_MCQ_MIN_READ_QUEUES 0
24 #define UFS_MCQ_MIN_POLL_QUEUES 0
25 #define QUEUE_EN_OFFSET 31
26 #define QUEUE_ID_OFFSET 16
27
28 #define MCQ_CFG_MAC_MASK GENMASK(16, 8)
29 #define MCQ_ENTRY_SIZE_IN_DWORD 8
30 #define CQE_UCD_BA GENMASK_ULL(63, 7)
31
32 /* Max mcq register polling time in microseconds */
33 #define MCQ_POLL_US 500000
34
rw_queue_count_set(const char * val,const struct kernel_param * kp)35 static int rw_queue_count_set(const char *val, const struct kernel_param *kp)
36 {
37 return param_set_uint_minmax(val, kp, UFS_MCQ_MIN_RW_QUEUES,
38 num_possible_cpus());
39 }
40
41 static const struct kernel_param_ops rw_queue_count_ops = {
42 .set = rw_queue_count_set,
43 .get = param_get_uint,
44 };
45
46 static unsigned int rw_queues;
47 module_param_cb(rw_queues, &rw_queue_count_ops, &rw_queues, 0644);
48 MODULE_PARM_DESC(rw_queues,
49 "Number of interrupt driven I/O queues used for rw. Default value is nr_cpus");
50
read_queue_count_set(const char * val,const struct kernel_param * kp)51 static int read_queue_count_set(const char *val, const struct kernel_param *kp)
52 {
53 return param_set_uint_minmax(val, kp, UFS_MCQ_MIN_READ_QUEUES,
54 num_possible_cpus());
55 }
56
57 static const struct kernel_param_ops read_queue_count_ops = {
58 .set = read_queue_count_set,
59 .get = param_get_uint,
60 };
61
62 static unsigned int read_queues;
63 module_param_cb(read_queues, &read_queue_count_ops, &read_queues, 0644);
64 MODULE_PARM_DESC(read_queues,
65 "Number of interrupt driven read queues used for read. Default value is 0");
66
poll_queue_count_set(const char * val,const struct kernel_param * kp)67 static int poll_queue_count_set(const char *val, const struct kernel_param *kp)
68 {
69 return param_set_uint_minmax(val, kp, UFS_MCQ_MIN_POLL_QUEUES,
70 num_possible_cpus());
71 }
72
73 static const struct kernel_param_ops poll_queue_count_ops = {
74 .set = poll_queue_count_set,
75 .get = param_get_uint,
76 };
77
78 static unsigned int poll_queues = 1;
79 module_param_cb(poll_queues, &poll_queue_count_ops, &poll_queues, 0644);
80 MODULE_PARM_DESC(poll_queues,
81 "Number of poll queues used for r/w. Default value is 1");
82
83 /**
84 * ufshcd_mcq_config_mac - Set the #Max Activ Cmds.
85 * @hba: per adapter instance
86 * @max_active_cmds: maximum # of active commands to the device at any time.
87 *
88 * The controller won't send more than the max_active_cmds to the device at
89 * any time.
90 */
ufshcd_mcq_config_mac(struct ufs_hba * hba,u32 max_active_cmds)91 void ufshcd_mcq_config_mac(struct ufs_hba *hba, u32 max_active_cmds)
92 {
93 u32 val;
94
95 val = ufshcd_readl(hba, REG_UFS_MCQ_CFG);
96 val &= ~MCQ_CFG_MAC_MASK;
97 val |= FIELD_PREP(MCQ_CFG_MAC_MASK, max_active_cmds - 1);
98 ufshcd_writel(hba, val, REG_UFS_MCQ_CFG);
99 }
100 EXPORT_SYMBOL_GPL(ufshcd_mcq_config_mac);
101
102 /**
103 * ufshcd_mcq_req_to_hwq - find the hardware queue on which the
104 * request would be issued.
105 * @hba: per adapter instance
106 * @req: pointer to the request to be issued
107 *
108 * Return: the hardware queue instance on which the request will be or has
109 * been queued. %NULL if the request has already been freed.
110 */
ufshcd_mcq_req_to_hwq(struct ufs_hba * hba,struct request * req)111 struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba,
112 struct request *req)
113 {
114 struct blk_mq_hw_ctx *hctx = READ_ONCE(req->mq_hctx);
115
116 return hctx ? &hba->uhq[hctx->queue_num] : NULL;
117 }
118
119 /**
120 * ufshcd_mcq_queue_cfg_addr - get an start address of the MCQ Queue Config
121 * Registers.
122 * @hba: per adapter instance
123 *
124 * Return: Start address of MCQ Queue Config Registers in HCI
125 */
ufshcd_mcq_queue_cfg_addr(struct ufs_hba * hba)126 unsigned int ufshcd_mcq_queue_cfg_addr(struct ufs_hba *hba)
127 {
128 return FIELD_GET(QCFGPTR, hba->mcq_capabilities) * 0x200;
129 }
130 EXPORT_SYMBOL_GPL(ufshcd_mcq_queue_cfg_addr);
131
132 /**
133 * ufshcd_mcq_decide_queue_depth - decide the queue depth
134 * @hba: per adapter instance
135 *
136 * Return: queue-depth on success, non-zero on error
137 *
138 * MAC - Max. Active Command of the Host Controller (HC)
139 * HC wouldn't send more than this commands to the device.
140 * Calculates and adjusts the queue depth based on the depth
141 * supported by the HC and ufs device.
142 */
ufshcd_mcq_decide_queue_depth(struct ufs_hba * hba)143 int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba)
144 {
145 int mac;
146
147 if (!hba->vops || !hba->vops->get_hba_mac) {
148 /*
149 * Extract the maximum number of active transfer tasks value
150 * from the host controller capabilities register. This value is
151 * 0-based.
152 */
153 hba->capabilities =
154 ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
155 mac = hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS_MCQ;
156 mac++;
157 } else {
158 mac = hba->vops->get_hba_mac(hba);
159 }
160 if (mac < 0)
161 goto err;
162
163 WARN_ON_ONCE(!hba->dev_info.bqueuedepth);
164 /*
165 * max. value of bqueuedepth = 256, mac is host dependent.
166 * It is mandatory for UFS device to define bQueueDepth if
167 * shared queuing architecture is enabled.
168 */
169 return min_t(int, mac, hba->dev_info.bqueuedepth);
170
171 err:
172 dev_err(hba->dev, "Failed to get mac, err=%d\n", mac);
173 return mac;
174 }
175
ufshcd_mcq_config_nr_queues(struct ufs_hba * hba)176 static int ufshcd_mcq_config_nr_queues(struct ufs_hba *hba)
177 {
178 int i;
179 u32 hba_maxq, rem, tot_queues;
180 struct Scsi_Host *host = hba->host;
181
182 /* maxq is 0 based value */
183 hba_maxq = FIELD_GET(MAX_QUEUE_SUP, hba->mcq_capabilities) + 1;
184
185 tot_queues = read_queues + poll_queues + rw_queues;
186
187 if (hba_maxq < tot_queues) {
188 dev_err(hba->dev, "Total queues (%d) exceeds HC capacity (%d)\n",
189 tot_queues, hba_maxq);
190 return -EOPNOTSUPP;
191 }
192
193 /*
194 * Device should support at least one I/O queue to handle device
195 * commands via hba->dev_cmd_queue.
196 */
197 if (hba_maxq == poll_queues) {
198 dev_err(hba->dev, "At least one non-poll queue required\n");
199 return -EOPNOTSUPP;
200 }
201
202 rem = hba_maxq;
203
204 if (rw_queues) {
205 hba->nr_queues[HCTX_TYPE_DEFAULT] = rw_queues;
206 rem -= hba->nr_queues[HCTX_TYPE_DEFAULT];
207 } else {
208 rw_queues = num_possible_cpus();
209 }
210
211 if (poll_queues) {
212 hba->nr_queues[HCTX_TYPE_POLL] = poll_queues;
213 rem -= hba->nr_queues[HCTX_TYPE_POLL];
214 }
215
216 if (read_queues) {
217 hba->nr_queues[HCTX_TYPE_READ] = read_queues;
218 rem -= hba->nr_queues[HCTX_TYPE_READ];
219 }
220
221 if (!hba->nr_queues[HCTX_TYPE_DEFAULT])
222 hba->nr_queues[HCTX_TYPE_DEFAULT] = min3(rem, rw_queues,
223 num_possible_cpus());
224
225 for (i = 0; i < HCTX_MAX_TYPES; i++)
226 host->nr_hw_queues += hba->nr_queues[i];
227
228 hba->nr_hw_queues = host->nr_hw_queues;
229 return 0;
230 }
231
ufshcd_mcq_memory_alloc(struct ufs_hba * hba)232 int ufshcd_mcq_memory_alloc(struct ufs_hba *hba)
233 {
234 struct ufs_hw_queue *hwq;
235 size_t utrdl_size, cqe_size;
236 int i;
237
238 for (i = 0; i < hba->nr_hw_queues; i++) {
239 hwq = &hba->uhq[i];
240
241 utrdl_size = sizeof(struct utp_transfer_req_desc) *
242 hwq->max_entries;
243 hwq->sqe_base_addr = dmam_alloc_coherent(hba->dev, utrdl_size,
244 &hwq->sqe_dma_addr,
245 GFP_KERNEL);
246 if (!hwq->sqe_dma_addr) {
247 dev_err(hba->dev, "SQE allocation failed\n");
248 return -ENOMEM;
249 }
250
251 cqe_size = sizeof(struct cq_entry) * hwq->max_entries;
252 hwq->cqe_base_addr = dmam_alloc_coherent(hba->dev, cqe_size,
253 &hwq->cqe_dma_addr,
254 GFP_KERNEL);
255 if (!hwq->cqe_dma_addr) {
256 dev_err(hba->dev, "CQE allocation failed\n");
257 return -ENOMEM;
258 }
259 }
260
261 return 0;
262 }
263
mcq_opr_base(struct ufs_hba * hba,enum ufshcd_mcq_opr n,int i)264 static void __iomem *mcq_opr_base(struct ufs_hba *hba,
265 enum ufshcd_mcq_opr n, int i)
266 {
267 struct ufshcd_mcq_opr_info_t *opr = &hba->mcq_opr[n];
268
269 return opr->base + opr->stride * i;
270 }
271
ufshcd_mcq_read_cqis(struct ufs_hba * hba,int i)272 u32 ufshcd_mcq_read_cqis(struct ufs_hba *hba, int i)
273 {
274 return readl(mcq_opr_base(hba, OPR_CQIS, i) + REG_CQIS);
275 }
276 EXPORT_SYMBOL_GPL(ufshcd_mcq_read_cqis);
277
ufshcd_mcq_write_cqis(struct ufs_hba * hba,u32 val,int i)278 void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i)
279 {
280 writel(val, mcq_opr_base(hba, OPR_CQIS, i) + REG_CQIS);
281 }
282 EXPORT_SYMBOL_GPL(ufshcd_mcq_write_cqis);
283
284 /*
285 * Current MCQ specification doesn't provide a Task Tag or its equivalent in
286 * the Completion Queue Entry. Find the Task Tag using an indirect method.
287 */
ufshcd_mcq_get_tag(struct ufs_hba * hba,struct cq_entry * cqe)288 static int ufshcd_mcq_get_tag(struct ufs_hba *hba, struct cq_entry *cqe)
289 {
290 u64 addr;
291
292 /* sizeof(struct utp_transfer_cmd_desc) must be a multiple of 128 */
293 BUILD_BUG_ON(sizeof(struct utp_transfer_cmd_desc) & GENMASK(6, 0));
294
295 /* Bits 63:7 UCD base address, 6:5 are reserved, 4:0 is SQ ID */
296 addr = (le64_to_cpu(cqe->command_desc_base_addr) & CQE_UCD_BA) -
297 hba->ucdl_dma_addr;
298
299 return div_u64(addr, ufshcd_get_ucd_size(hba));
300 }
301
ufshcd_mcq_process_cqe(struct ufs_hba * hba,struct ufs_hw_queue * hwq)302 static void ufshcd_mcq_process_cqe(struct ufs_hba *hba,
303 struct ufs_hw_queue *hwq)
304 {
305 struct cq_entry *cqe = ufshcd_mcq_cur_cqe(hwq);
306 int tag = ufshcd_mcq_get_tag(hba, cqe);
307
308 if (cqe->command_desc_base_addr) {
309 ufshcd_compl_one_cqe(hba, tag, cqe);
310 /* After processed the cqe, mark it empty (invalid) entry */
311 cqe->command_desc_base_addr = 0;
312 }
313 }
314
ufshcd_mcq_compl_all_cqes_lock(struct ufs_hba * hba,struct ufs_hw_queue * hwq)315 void ufshcd_mcq_compl_all_cqes_lock(struct ufs_hba *hba,
316 struct ufs_hw_queue *hwq)
317 {
318 unsigned long flags;
319 u32 entries = hwq->max_entries;
320
321 spin_lock_irqsave(&hwq->cq_lock, flags);
322 while (entries > 0) {
323 ufshcd_mcq_process_cqe(hba, hwq);
324 ufshcd_mcq_inc_cq_head_slot(hwq);
325 entries--;
326 }
327
328 ufshcd_mcq_update_cq_tail_slot(hwq);
329 hwq->cq_head_slot = hwq->cq_tail_slot;
330 spin_unlock_irqrestore(&hwq->cq_lock, flags);
331 }
332
ufshcd_mcq_poll_cqe_lock(struct ufs_hba * hba,struct ufs_hw_queue * hwq)333 unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
334 struct ufs_hw_queue *hwq)
335 {
336 unsigned long completed_reqs = 0;
337 unsigned long flags;
338
339 spin_lock_irqsave(&hwq->cq_lock, flags);
340 ufshcd_mcq_update_cq_tail_slot(hwq);
341 while (!ufshcd_mcq_is_cq_empty(hwq)) {
342 ufshcd_mcq_process_cqe(hba, hwq);
343 ufshcd_mcq_inc_cq_head_slot(hwq);
344 completed_reqs++;
345 }
346
347 if (completed_reqs)
348 ufshcd_mcq_update_cq_head(hwq);
349 spin_unlock_irqrestore(&hwq->cq_lock, flags);
350
351 return completed_reqs;
352 }
353 EXPORT_SYMBOL_GPL(ufshcd_mcq_poll_cqe_lock);
354
ufshcd_mcq_make_queues_operational(struct ufs_hba * hba)355 void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba)
356 {
357 struct ufs_hw_queue *hwq;
358 u16 qsize;
359 int i;
360
361 for (i = 0; i < hba->nr_hw_queues; i++) {
362 hwq = &hba->uhq[i];
363 hwq->id = i;
364 qsize = hwq->max_entries * MCQ_ENTRY_SIZE_IN_DWORD - 1;
365
366 /* Submission Queue Lower Base Address */
367 ufsmcq_writelx(hba, lower_32_bits(hwq->sqe_dma_addr),
368 ufshcd_mcq_cfg_offset(REG_SQLBA, i));
369 /* Submission Queue Upper Base Address */
370 ufsmcq_writelx(hba, upper_32_bits(hwq->sqe_dma_addr),
371 ufshcd_mcq_cfg_offset(REG_SQUBA, i));
372 /* Submission Queue Doorbell Address Offset */
373 ufsmcq_writelx(hba, ufshcd_mcq_opr_offset(hba, OPR_SQD, i),
374 ufshcd_mcq_cfg_offset(REG_SQDAO, i));
375 /* Submission Queue Interrupt Status Address Offset */
376 ufsmcq_writelx(hba, ufshcd_mcq_opr_offset(hba, OPR_SQIS, i),
377 ufshcd_mcq_cfg_offset(REG_SQISAO, i));
378
379 /* Completion Queue Lower Base Address */
380 ufsmcq_writelx(hba, lower_32_bits(hwq->cqe_dma_addr),
381 ufshcd_mcq_cfg_offset(REG_CQLBA, i));
382 /* Completion Queue Upper Base Address */
383 ufsmcq_writelx(hba, upper_32_bits(hwq->cqe_dma_addr),
384 ufshcd_mcq_cfg_offset(REG_CQUBA, i));
385 /* Completion Queue Doorbell Address Offset */
386 ufsmcq_writelx(hba, ufshcd_mcq_opr_offset(hba, OPR_CQD, i),
387 ufshcd_mcq_cfg_offset(REG_CQDAO, i));
388 /* Completion Queue Interrupt Status Address Offset */
389 ufsmcq_writelx(hba, ufshcd_mcq_opr_offset(hba, OPR_CQIS, i),
390 ufshcd_mcq_cfg_offset(REG_CQISAO, i));
391
392 /* Save the base addresses for quicker access */
393 hwq->mcq_sq_head = mcq_opr_base(hba, OPR_SQD, i) + REG_SQHP;
394 hwq->mcq_sq_tail = mcq_opr_base(hba, OPR_SQD, i) + REG_SQTP;
395 hwq->mcq_cq_head = mcq_opr_base(hba, OPR_CQD, i) + REG_CQHP;
396 hwq->mcq_cq_tail = mcq_opr_base(hba, OPR_CQD, i) + REG_CQTP;
397
398 /* Reinitializing is needed upon HC reset */
399 hwq->sq_tail_slot = hwq->cq_tail_slot = hwq->cq_head_slot = 0;
400
401 /* Enable Tail Entry Push Status interrupt only for non-poll queues */
402 if (i < hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL])
403 writel(1, mcq_opr_base(hba, OPR_CQIS, i) + REG_CQIE);
404
405 /* Completion Queue Enable|Size to Completion Queue Attribute */
406 ufsmcq_writel(hba, (1 << QUEUE_EN_OFFSET) | qsize,
407 ufshcd_mcq_cfg_offset(REG_CQATTR, i));
408
409 /*
410 * Submission Qeueue Enable|Size|Completion Queue ID to
411 * Submission Queue Attribute
412 */
413 ufsmcq_writel(hba, (1 << QUEUE_EN_OFFSET) | qsize |
414 (i << QUEUE_ID_OFFSET),
415 ufshcd_mcq_cfg_offset(REG_SQATTR, i));
416 }
417 }
418 EXPORT_SYMBOL_GPL(ufshcd_mcq_make_queues_operational);
419
ufshcd_mcq_enable(struct ufs_hba * hba)420 void ufshcd_mcq_enable(struct ufs_hba *hba)
421 {
422 ufshcd_rmwl(hba, MCQ_MODE_SELECT, MCQ_MODE_SELECT, REG_UFS_MEM_CFG);
423 hba->mcq_enabled = true;
424 }
425 EXPORT_SYMBOL_GPL(ufshcd_mcq_enable);
426
ufshcd_mcq_disable(struct ufs_hba * hba)427 void ufshcd_mcq_disable(struct ufs_hba *hba)
428 {
429 ufshcd_rmwl(hba, MCQ_MODE_SELECT, 0, REG_UFS_MEM_CFG);
430 hba->mcq_enabled = false;
431 }
432
ufshcd_mcq_enable_esi(struct ufs_hba * hba)433 void ufshcd_mcq_enable_esi(struct ufs_hba *hba)
434 {
435 ufshcd_writel(hba, ufshcd_readl(hba, REG_UFS_MEM_CFG) | 0x2,
436 REG_UFS_MEM_CFG);
437 }
438 EXPORT_SYMBOL_GPL(ufshcd_mcq_enable_esi);
439
ufshcd_mcq_config_esi(struct ufs_hba * hba,struct msi_msg * msg)440 void ufshcd_mcq_config_esi(struct ufs_hba *hba, struct msi_msg *msg)
441 {
442 ufshcd_writel(hba, msg->address_lo, REG_UFS_ESILBA);
443 ufshcd_writel(hba, msg->address_hi, REG_UFS_ESIUBA);
444 }
445 EXPORT_SYMBOL_GPL(ufshcd_mcq_config_esi);
446
ufshcd_mcq_init(struct ufs_hba * hba)447 int ufshcd_mcq_init(struct ufs_hba *hba)
448 {
449 struct Scsi_Host *host = hba->host;
450 struct ufs_hw_queue *hwq;
451 int ret, i;
452
453 ret = ufshcd_mcq_config_nr_queues(hba);
454 if (ret)
455 return ret;
456
457 ret = ufshcd_vops_mcq_config_resource(hba);
458 if (ret)
459 return ret;
460
461 ret = ufshcd_mcq_vops_op_runtime_config(hba);
462 if (ret) {
463 dev_err(hba->dev, "Operation runtime config failed, ret=%d\n",
464 ret);
465 return ret;
466 }
467 hba->uhq = devm_kzalloc(hba->dev,
468 hba->nr_hw_queues * sizeof(struct ufs_hw_queue),
469 GFP_KERNEL);
470 if (!hba->uhq) {
471 dev_err(hba->dev, "ufs hw queue memory allocation failed\n");
472 return -ENOMEM;
473 }
474
475 for (i = 0; i < hba->nr_hw_queues; i++) {
476 hwq = &hba->uhq[i];
477 hwq->max_entries = hba->nutrs + 1;
478 spin_lock_init(&hwq->sq_lock);
479 spin_lock_init(&hwq->cq_lock);
480 mutex_init(&hwq->sq_mutex);
481 }
482
483 /* The very first HW queue serves device commands */
484 hba->dev_cmd_queue = &hba->uhq[0];
485
486 host->host_tagset = 1;
487 return 0;
488 }
489
ufshcd_mcq_sq_stop(struct ufs_hba * hba,struct ufs_hw_queue * hwq)490 static int ufshcd_mcq_sq_stop(struct ufs_hba *hba, struct ufs_hw_queue *hwq)
491 {
492 void __iomem *reg;
493 u32 id = hwq->id, val;
494 int err;
495
496 if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_RTC)
497 return -ETIMEDOUT;
498
499 writel(SQ_STOP, mcq_opr_base(hba, OPR_SQD, id) + REG_SQRTC);
500 reg = mcq_opr_base(hba, OPR_SQD, id) + REG_SQRTS;
501 err = read_poll_timeout(readl, val, val & SQ_STS, 20,
502 MCQ_POLL_US, false, reg);
503 if (err)
504 dev_err(hba->dev, "%s: failed. hwq-id=%d, err=%d\n",
505 __func__, id, err);
506 return err;
507 }
508
ufshcd_mcq_sq_start(struct ufs_hba * hba,struct ufs_hw_queue * hwq)509 static int ufshcd_mcq_sq_start(struct ufs_hba *hba, struct ufs_hw_queue *hwq)
510 {
511 void __iomem *reg;
512 u32 id = hwq->id, val;
513 int err;
514
515 if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_RTC)
516 return -ETIMEDOUT;
517
518 writel(SQ_START, mcq_opr_base(hba, OPR_SQD, id) + REG_SQRTC);
519 reg = mcq_opr_base(hba, OPR_SQD, id) + REG_SQRTS;
520 err = read_poll_timeout(readl, val, !(val & SQ_STS), 20,
521 MCQ_POLL_US, false, reg);
522 if (err)
523 dev_err(hba->dev, "%s: failed. hwq-id=%d, err=%d\n",
524 __func__, id, err);
525 return err;
526 }
527
528 /**
529 * ufshcd_mcq_sq_cleanup - Clean up submission queue resources
530 * associated with the pending command.
531 * @hba: per adapter instance.
532 * @task_tag: The command's task tag.
533 *
534 * Return: 0 for success; error code otherwise.
535 */
ufshcd_mcq_sq_cleanup(struct ufs_hba * hba,int task_tag)536 int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int task_tag)
537 {
538 struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
539 struct scsi_cmnd *cmd = lrbp->cmd;
540 struct ufs_hw_queue *hwq;
541 void __iomem *reg, *opr_sqd_base;
542 u32 nexus, id, val;
543 int err;
544
545 if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_RTC)
546 return -ETIMEDOUT;
547
548 if (task_tag != hba->nutrs - UFSHCD_NUM_RESERVED) {
549 if (!cmd)
550 return -EINVAL;
551 hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
552 if (!hwq)
553 return 0;
554 } else {
555 hwq = hba->dev_cmd_queue;
556 }
557
558 id = hwq->id;
559
560 mutex_lock(&hwq->sq_mutex);
561
562 /* stop the SQ fetching before working on it */
563 err = ufshcd_mcq_sq_stop(hba, hwq);
564 if (err)
565 goto unlock;
566
567 /* SQCTI = EXT_IID, IID, LUN, Task Tag */
568 nexus = lrbp->lun << 8 | task_tag;
569 opr_sqd_base = mcq_opr_base(hba, OPR_SQD, id);
570 writel(nexus, opr_sqd_base + REG_SQCTI);
571
572 /* Initiate Cleanup */
573 writel(readl(opr_sqd_base + REG_SQRTC) | SQ_ICU,
574 opr_sqd_base + REG_SQRTC);
575
576 /* Wait until SQRTSy.CUS = 1. Report SQRTSy.RTC. */
577 reg = opr_sqd_base + REG_SQRTS;
578 err = read_poll_timeout(readl, val, val & SQ_CUS, 20,
579 MCQ_POLL_US, false, reg);
580 if (err)
581 dev_err(hba->dev, "%s: failed. hwq=%d, tag=%d err=%d\n",
582 __func__, id, task_tag, err);
583 else
584 dev_info(hba->dev,
585 "%s, hwq %d: cleanup return code (RTC) %ld\n",
586 __func__, id,
587 FIELD_GET(SQ_ICU_ERR_CODE_MASK, readl(reg)));
588
589 if (ufshcd_mcq_sq_start(hba, hwq))
590 err = -ETIMEDOUT;
591
592 unlock:
593 mutex_unlock(&hwq->sq_mutex);
594 return err;
595 }
596
597 /**
598 * ufshcd_mcq_nullify_sqe - Nullify the submission queue entry.
599 * Write the sqe's Command Type to 0xF. The host controller will not
600 * fetch any sqe with Command Type = 0xF.
601 *
602 * @utrd: UTP Transfer Request Descriptor to be nullified.
603 */
ufshcd_mcq_nullify_sqe(struct utp_transfer_req_desc * utrd)604 static void ufshcd_mcq_nullify_sqe(struct utp_transfer_req_desc *utrd)
605 {
606 utrd->header.command_type = 0xf;
607 }
608
609 /**
610 * ufshcd_mcq_sqe_search - Search for the command in the submission queue
611 * If the command is in the submission queue and not issued to the device yet,
612 * nullify the sqe so the host controller will skip fetching the sqe.
613 *
614 * @hba: per adapter instance.
615 * @hwq: Hardware Queue to be searched.
616 * @task_tag: The command's task tag.
617 *
618 * Return: true if the SQE containing the command is present in the SQ
619 * (not fetched by the controller); returns false if the SQE is not in the SQ.
620 */
ufshcd_mcq_sqe_search(struct ufs_hba * hba,struct ufs_hw_queue * hwq,int task_tag)621 static bool ufshcd_mcq_sqe_search(struct ufs_hba *hba,
622 struct ufs_hw_queue *hwq, int task_tag)
623 {
624 struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
625 struct utp_transfer_req_desc *utrd;
626 __le64 cmd_desc_base_addr;
627 bool ret = false;
628 u64 addr, match;
629 u32 sq_head_slot;
630
631 if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_RTC)
632 return true;
633
634 mutex_lock(&hwq->sq_mutex);
635
636 ufshcd_mcq_sq_stop(hba, hwq);
637 sq_head_slot = ufshcd_mcq_get_sq_head_slot(hwq);
638 if (sq_head_slot == hwq->sq_tail_slot)
639 goto out;
640
641 cmd_desc_base_addr = lrbp->utr_descriptor_ptr->command_desc_base_addr;
642 addr = le64_to_cpu(cmd_desc_base_addr) & CQE_UCD_BA;
643
644 while (sq_head_slot != hwq->sq_tail_slot) {
645 utrd = hwq->sqe_base_addr + sq_head_slot;
646 match = le64_to_cpu(utrd->command_desc_base_addr) & CQE_UCD_BA;
647 if (addr == match) {
648 ufshcd_mcq_nullify_sqe(utrd);
649 ret = true;
650 goto out;
651 }
652
653 sq_head_slot++;
654 if (sq_head_slot == hwq->max_entries)
655 sq_head_slot = 0;
656 }
657
658 out:
659 ufshcd_mcq_sq_start(hba, hwq);
660 mutex_unlock(&hwq->sq_mutex);
661 return ret;
662 }
663
664 /**
665 * ufshcd_mcq_abort - Abort the command in MCQ.
666 * @cmd: The command to be aborted.
667 *
668 * Return: SUCCESS or FAILED error codes
669 */
ufshcd_mcq_abort(struct scsi_cmnd * cmd)670 int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
671 {
672 struct Scsi_Host *host = cmd->device->host;
673 struct ufs_hba *hba = shost_priv(host);
674 int tag = scsi_cmd_to_rq(cmd)->tag;
675 struct ufshcd_lrb *lrbp = &hba->lrb[tag];
676 struct ufs_hw_queue *hwq;
677 unsigned long flags;
678 int err;
679
680 if (!ufshcd_cmd_inflight(lrbp->cmd)) {
681 dev_err(hba->dev,
682 "%s: skip abort. cmd at tag %d already completed.\n",
683 __func__, tag);
684 return FAILED;
685 }
686
687 /* Skip task abort in case previous aborts failed and report failure */
688 if (lrbp->req_abort_skip) {
689 dev_err(hba->dev, "%s: skip abort. tag %d failed earlier\n",
690 __func__, tag);
691 return FAILED;
692 }
693
694 hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
695
696 if (ufshcd_mcq_sqe_search(hba, hwq, tag)) {
697 /*
698 * Failure. The command should not be "stuck" in SQ for
699 * a long time which resulted in command being aborted.
700 */
701 dev_err(hba->dev, "%s: cmd found in sq. hwq=%d, tag=%d\n",
702 __func__, hwq->id, tag);
703 return FAILED;
704 }
705
706 /*
707 * The command is not in the submission queue, and it is not
708 * in the completion queue either. Query the device to see if
709 * the command is being processed in the device.
710 */
711 err = ufshcd_try_to_abort_task(hba, tag);
712 if (err) {
713 dev_err(hba->dev, "%s: device abort failed %d\n", __func__, err);
714 lrbp->req_abort_skip = true;
715 return FAILED;
716 }
717
718 spin_lock_irqsave(&hwq->cq_lock, flags);
719 if (ufshcd_cmd_inflight(lrbp->cmd))
720 ufshcd_release_scsi_cmd(hba, lrbp);
721 spin_unlock_irqrestore(&hwq->cq_lock, flags);
722
723 return SUCCESS;
724 }
725