xref: /linux/drivers/ufs/core/ufs-mcq.c (revision 7eb7f5723df50a7d5564aa609e4c147f669a5cb4)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2022 Qualcomm Innovation Center. All rights reserved.
4  *
5  * Authors:
6  *	Asutosh Das <quic_asutoshd@quicinc.com>
7  *	Can Guo <quic_cang@quicinc.com>
8  */
9 
10 #include <linux/unaligned.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/module.h>
13 #include <linux/platform_device.h>
14 #include "ufshcd-priv.h"
15 #include <linux/delay.h>
16 #include <scsi/scsi_cmnd.h>
17 #include <linux/bitfield.h>
18 #include <linux/iopoll.h>
19 
20 #define MAX_QUEUE_SUP GENMASK(7, 0)
21 #define QCFGPTR GENMASK(23, 16)
22 #define UFS_MCQ_MIN_RW_QUEUES 2
23 #define UFS_MCQ_MIN_READ_QUEUES 0
24 #define UFS_MCQ_MIN_POLL_QUEUES 0
25 #define QUEUE_EN_OFFSET 31
26 #define QUEUE_ID_OFFSET 16
27 
28 #define MCQ_CFG_MAC_MASK	GENMASK(16, 8)
29 #define MCQ_ENTRY_SIZE_IN_DWORD	8
30 #define CQE_UCD_BA GENMASK_ULL(63, 7)
31 
32 #define UFSHCD_ENABLE_MCQ_INTRS	(UTP_TASK_REQ_COMPL |\
33 				 UFSHCD_ERROR_MASK |\
34 				 MCQ_CQ_EVENT_STATUS)
35 
36 /* Max mcq register polling time in microseconds */
37 #define MCQ_POLL_US 500000
38 
rw_queue_count_set(const char * val,const struct kernel_param * kp)39 static int rw_queue_count_set(const char *val, const struct kernel_param *kp)
40 {
41 	return param_set_uint_minmax(val, kp, UFS_MCQ_MIN_RW_QUEUES,
42 				     num_possible_cpus());
43 }
44 
45 static const struct kernel_param_ops rw_queue_count_ops = {
46 	.set = rw_queue_count_set,
47 	.get = param_get_uint,
48 };
49 
50 static unsigned int rw_queues;
51 module_param_cb(rw_queues, &rw_queue_count_ops, &rw_queues, 0644);
52 MODULE_PARM_DESC(rw_queues,
53 		 "Number of interrupt driven I/O queues used for rw. Default value is nr_cpus");
54 
read_queue_count_set(const char * val,const struct kernel_param * kp)55 static int read_queue_count_set(const char *val, const struct kernel_param *kp)
56 {
57 	return param_set_uint_minmax(val, kp, UFS_MCQ_MIN_READ_QUEUES,
58 				     num_possible_cpus());
59 }
60 
61 static const struct kernel_param_ops read_queue_count_ops = {
62 	.set = read_queue_count_set,
63 	.get = param_get_uint,
64 };
65 
66 static unsigned int read_queues;
67 module_param_cb(read_queues, &read_queue_count_ops, &read_queues, 0644);
68 MODULE_PARM_DESC(read_queues,
69 		 "Number of interrupt driven read queues used for read. Default value is 0");
70 
poll_queue_count_set(const char * val,const struct kernel_param * kp)71 static int poll_queue_count_set(const char *val, const struct kernel_param *kp)
72 {
73 	return param_set_uint_minmax(val, kp, UFS_MCQ_MIN_POLL_QUEUES,
74 				     num_possible_cpus());
75 }
76 
77 static const struct kernel_param_ops poll_queue_count_ops = {
78 	.set = poll_queue_count_set,
79 	.get = param_get_uint,
80 };
81 
82 static unsigned int poll_queues = 1;
83 module_param_cb(poll_queues, &poll_queue_count_ops, &poll_queues, 0644);
84 MODULE_PARM_DESC(poll_queues,
85 		 "Number of poll queues used for r/w. Default value is 1");
86 
87 /**
88  * ufshcd_mcq_config_mac - Set the #Max Activ Cmds.
89  * @hba: per adapter instance
90  * @max_active_cmds: maximum # of active commands to the device at any time.
91  *
92  * The controller won't send more than the max_active_cmds to the device at
93  * any time.
94  */
ufshcd_mcq_config_mac(struct ufs_hba * hba,u32 max_active_cmds)95 void ufshcd_mcq_config_mac(struct ufs_hba *hba, u32 max_active_cmds)
96 {
97 	u32 val;
98 
99 	val = ufshcd_readl(hba, REG_UFS_MCQ_CFG);
100 	val &= ~MCQ_CFG_MAC_MASK;
101 	val |= FIELD_PREP(MCQ_CFG_MAC_MASK, max_active_cmds - 1);
102 	ufshcd_writel(hba, val, REG_UFS_MCQ_CFG);
103 }
104 EXPORT_SYMBOL_GPL(ufshcd_mcq_config_mac);
105 
106 /**
107  * ufshcd_mcq_req_to_hwq - find the hardware queue on which the
108  * request would be issued.
109  * @hba: per adapter instance
110  * @req: pointer to the request to be issued
111  *
112  * Return: the hardware queue instance on which the request will be or has
113  * been queued. %NULL if the request has already been freed.
114  */
ufshcd_mcq_req_to_hwq(struct ufs_hba * hba,struct request * req)115 struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba,
116 					 struct request *req)
117 {
118 	struct blk_mq_hw_ctx *hctx = READ_ONCE(req->mq_hctx);
119 
120 	return hctx ? &hba->uhq[hctx->queue_num] : NULL;
121 }
122 
123 /**
124  * ufshcd_mcq_queue_cfg_addr - get an start address of the MCQ Queue Config
125  * Registers.
126  * @hba: per adapter instance
127  *
128  * Return: Start address of MCQ Queue Config Registers in HCI
129  */
ufshcd_mcq_queue_cfg_addr(struct ufs_hba * hba)130 unsigned int ufshcd_mcq_queue_cfg_addr(struct ufs_hba *hba)
131 {
132 	return FIELD_GET(QCFGPTR, hba->mcq_capabilities) * 0x200;
133 }
134 EXPORT_SYMBOL_GPL(ufshcd_mcq_queue_cfg_addr);
135 
136 /**
137  * ufshcd_get_hba_mac - Maximum number of commands supported by the host
138  *	controller.
139  * @hba: per adapter instance
140  *
141  * Return: queue depth on success; negative upon error.
142  *
143  * MAC = Maximum number of Active Commands supported by the Host Controller.
144  */
ufshcd_get_hba_mac(struct ufs_hba * hba)145 int ufshcd_get_hba_mac(struct ufs_hba *hba)
146 {
147 	int mac;
148 
149 	if (!hba->vops || !hba->vops->get_hba_mac) {
150 		/*
151 		 * Extract the maximum number of active transfer tasks value
152 		 * from the host controller capabilities register. This value is
153 		 * 0-based.
154 		 */
155 		hba->capabilities =
156 			ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
157 		mac = hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS_MCQ;
158 		mac++;
159 	} else {
160 		mac = hba->vops->get_hba_mac(hba);
161 	}
162 	if (mac < 0)
163 		dev_err(hba->dev, "Failed to get mac, err=%d\n", mac);
164 	return mac;
165 }
166 
ufshcd_mcq_config_nr_queues(struct ufs_hba * hba)167 static int ufshcd_mcq_config_nr_queues(struct ufs_hba *hba)
168 {
169 	int i;
170 	u32 hba_maxq, rem, tot_queues;
171 	struct Scsi_Host *host = hba->host;
172 
173 	/* maxq is 0 based value */
174 	hba_maxq = FIELD_GET(MAX_QUEUE_SUP, hba->mcq_capabilities) + 1;
175 
176 	tot_queues = read_queues + poll_queues + rw_queues;
177 
178 	if (hba_maxq < tot_queues) {
179 		dev_err(hba->dev, "Total queues (%d) exceeds HC capacity (%d)\n",
180 			tot_queues, hba_maxq);
181 		return -EOPNOTSUPP;
182 	}
183 
184 	/*
185 	 * Device should support at least one I/O queue to handle device
186 	 * commands via hba->dev_cmd_queue.
187 	 */
188 	if (hba_maxq == poll_queues) {
189 		dev_err(hba->dev, "At least one non-poll queue required\n");
190 		return -EOPNOTSUPP;
191 	}
192 
193 	rem = hba_maxq;
194 
195 	if (rw_queues) {
196 		hba->nr_queues[HCTX_TYPE_DEFAULT] = rw_queues;
197 		rem -= hba->nr_queues[HCTX_TYPE_DEFAULT];
198 	} else {
199 		rw_queues = num_possible_cpus();
200 	}
201 
202 	if (poll_queues) {
203 		hba->nr_queues[HCTX_TYPE_POLL] = poll_queues;
204 		rem -= hba->nr_queues[HCTX_TYPE_POLL];
205 	}
206 
207 	if (read_queues) {
208 		hba->nr_queues[HCTX_TYPE_READ] = read_queues;
209 		rem -= hba->nr_queues[HCTX_TYPE_READ];
210 	}
211 
212 	if (!hba->nr_queues[HCTX_TYPE_DEFAULT])
213 		hba->nr_queues[HCTX_TYPE_DEFAULT] = min3(rem, rw_queues,
214 							 num_possible_cpus());
215 
216 	for (i = 0; i < HCTX_MAX_TYPES; i++)
217 		host->nr_hw_queues += hba->nr_queues[i];
218 
219 	hba->nr_hw_queues = host->nr_hw_queues;
220 	return 0;
221 }
222 
ufshcd_mcq_memory_alloc(struct ufs_hba * hba)223 int ufshcd_mcq_memory_alloc(struct ufs_hba *hba)
224 {
225 	struct ufs_hw_queue *hwq;
226 	size_t utrdl_size, cqe_size;
227 	int i;
228 
229 	for (i = 0; i < hba->nr_hw_queues; i++) {
230 		hwq = &hba->uhq[i];
231 
232 		utrdl_size = sizeof(struct utp_transfer_req_desc) *
233 			     hwq->max_entries;
234 		hwq->sqe_base_addr = dmam_alloc_coherent(hba->dev, utrdl_size,
235 							 &hwq->sqe_dma_addr,
236 							 GFP_KERNEL);
237 		if (!hwq->sqe_base_addr) {
238 			dev_err(hba->dev, "SQE allocation failed\n");
239 			return -ENOMEM;
240 		}
241 
242 		cqe_size = sizeof(struct cq_entry) * hwq->max_entries;
243 		hwq->cqe_base_addr = dmam_alloc_coherent(hba->dev, cqe_size,
244 							 &hwq->cqe_dma_addr,
245 							 GFP_KERNEL);
246 		if (!hwq->cqe_base_addr) {
247 			dev_err(hba->dev, "CQE allocation failed\n");
248 			return -ENOMEM;
249 		}
250 	}
251 
252 	return 0;
253 }
254 
mcq_opr_base(struct ufs_hba * hba,enum ufshcd_mcq_opr n,int i)255 static void __iomem *mcq_opr_base(struct ufs_hba *hba,
256 					 enum ufshcd_mcq_opr n, int i)
257 {
258 	struct ufshcd_mcq_opr_info_t *opr = &hba->mcq_opr[n];
259 
260 	return opr->base + opr->stride * i;
261 }
262 
ufshcd_mcq_read_cqis(struct ufs_hba * hba,int i)263 u32 ufshcd_mcq_read_cqis(struct ufs_hba *hba, int i)
264 {
265 	return readl(mcq_opr_base(hba, OPR_CQIS, i) + REG_CQIS);
266 }
267 EXPORT_SYMBOL_GPL(ufshcd_mcq_read_cqis);
268 
ufshcd_mcq_write_cqis(struct ufs_hba * hba,u32 val,int i)269 void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i)
270 {
271 	writel(val, mcq_opr_base(hba, OPR_CQIS, i) + REG_CQIS);
272 }
273 EXPORT_SYMBOL_GPL(ufshcd_mcq_write_cqis);
274 
275 /*
276  * Current MCQ specification doesn't provide a Task Tag or its equivalent in
277  * the Completion Queue Entry. Find the Task Tag using an indirect method.
278  */
ufshcd_mcq_get_tag(struct ufs_hba * hba,struct cq_entry * cqe)279 static int ufshcd_mcq_get_tag(struct ufs_hba *hba, struct cq_entry *cqe)
280 {
281 	u64 addr;
282 
283 	/* sizeof(struct utp_transfer_cmd_desc) must be a multiple of 128 */
284 	BUILD_BUG_ON(sizeof(struct utp_transfer_cmd_desc) & GENMASK(6, 0));
285 
286 	/* Bits 63:7 UCD base address, 6:5 are reserved, 4:0 is SQ ID */
287 	addr = (le64_to_cpu(cqe->command_desc_base_addr) & CQE_UCD_BA) -
288 		hba->ucdl_dma_addr;
289 
290 	return div_u64(addr, ufshcd_get_ucd_size(hba));
291 }
292 
ufshcd_mcq_process_cqe(struct ufs_hba * hba,struct ufs_hw_queue * hwq)293 static void ufshcd_mcq_process_cqe(struct ufs_hba *hba,
294 				   struct ufs_hw_queue *hwq)
295 {
296 	struct cq_entry *cqe = ufshcd_mcq_cur_cqe(hwq);
297 
298 	if (cqe->command_desc_base_addr) {
299 		int tag = ufshcd_mcq_get_tag(hba, cqe);
300 
301 		ufshcd_compl_one_cqe(hba, tag, cqe);
302 		/* After processed the cqe, mark it empty (invalid) entry */
303 		cqe->command_desc_base_addr = 0;
304 	}
305 }
306 
ufshcd_mcq_compl_all_cqes_lock(struct ufs_hba * hba,struct ufs_hw_queue * hwq)307 void ufshcd_mcq_compl_all_cqes_lock(struct ufs_hba *hba,
308 				    struct ufs_hw_queue *hwq)
309 {
310 	unsigned long flags;
311 	u32 entries = hwq->max_entries;
312 
313 	spin_lock_irqsave(&hwq->cq_lock, flags);
314 	while (entries > 0) {
315 		ufshcd_mcq_process_cqe(hba, hwq);
316 		ufshcd_mcq_inc_cq_head_slot(hwq);
317 		entries--;
318 	}
319 
320 	ufshcd_mcq_update_cq_tail_slot(hwq);
321 	hwq->cq_head_slot = hwq->cq_tail_slot;
322 	spin_unlock_irqrestore(&hwq->cq_lock, flags);
323 }
324 
ufshcd_mcq_poll_cqe_lock(struct ufs_hba * hba,struct ufs_hw_queue * hwq)325 unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
326 				       struct ufs_hw_queue *hwq)
327 {
328 	unsigned long completed_reqs = 0;
329 	unsigned long flags;
330 
331 	spin_lock_irqsave(&hwq->cq_lock, flags);
332 	ufshcd_mcq_update_cq_tail_slot(hwq);
333 	while (!ufshcd_mcq_is_cq_empty(hwq)) {
334 		ufshcd_mcq_process_cqe(hba, hwq);
335 		ufshcd_mcq_inc_cq_head_slot(hwq);
336 		completed_reqs++;
337 	}
338 
339 	if (completed_reqs)
340 		ufshcd_mcq_update_cq_head(hwq);
341 	spin_unlock_irqrestore(&hwq->cq_lock, flags);
342 
343 	return completed_reqs;
344 }
345 EXPORT_SYMBOL_GPL(ufshcd_mcq_poll_cqe_lock);
346 
ufshcd_mcq_make_queues_operational(struct ufs_hba * hba)347 void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba)
348 {
349 	struct ufs_hw_queue *hwq;
350 	u32 intrs;
351 	u16 qsize;
352 	int i;
353 
354 	/* Enable required interrupts */
355 	intrs = UFSHCD_ENABLE_MCQ_INTRS;
356 	if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_INTR)
357 		intrs &= ~MCQ_CQ_EVENT_STATUS;
358 	ufshcd_enable_intr(hba, intrs);
359 
360 	for (i = 0; i < hba->nr_hw_queues; i++) {
361 		hwq = &hba->uhq[i];
362 		hwq->id = i;
363 		qsize = hwq->max_entries * MCQ_ENTRY_SIZE_IN_DWORD - 1;
364 
365 		/* Submission Queue Lower Base Address */
366 		ufsmcq_writelx(hba, lower_32_bits(hwq->sqe_dma_addr),
367 			      ufshcd_mcq_cfg_offset(REG_SQLBA, i));
368 		/* Submission Queue Upper Base Address */
369 		ufsmcq_writelx(hba, upper_32_bits(hwq->sqe_dma_addr),
370 			      ufshcd_mcq_cfg_offset(REG_SQUBA, i));
371 		/* Submission Queue Doorbell Address Offset */
372 		ufsmcq_writelx(hba, ufshcd_mcq_opr_offset(hba, OPR_SQD, i),
373 			      ufshcd_mcq_cfg_offset(REG_SQDAO, i));
374 		/* Submission Queue Interrupt Status Address Offset */
375 		ufsmcq_writelx(hba, ufshcd_mcq_opr_offset(hba, OPR_SQIS, i),
376 			      ufshcd_mcq_cfg_offset(REG_SQISAO, i));
377 
378 		/* Completion Queue Lower Base Address */
379 		ufsmcq_writelx(hba, lower_32_bits(hwq->cqe_dma_addr),
380 			      ufshcd_mcq_cfg_offset(REG_CQLBA, i));
381 		/* Completion Queue Upper Base Address */
382 		ufsmcq_writelx(hba, upper_32_bits(hwq->cqe_dma_addr),
383 			      ufshcd_mcq_cfg_offset(REG_CQUBA, i));
384 		/* Completion Queue Doorbell Address Offset */
385 		ufsmcq_writelx(hba, ufshcd_mcq_opr_offset(hba, OPR_CQD, i),
386 			      ufshcd_mcq_cfg_offset(REG_CQDAO, i));
387 		/* Completion Queue Interrupt Status Address Offset */
388 		ufsmcq_writelx(hba, ufshcd_mcq_opr_offset(hba, OPR_CQIS, i),
389 			      ufshcd_mcq_cfg_offset(REG_CQISAO, i));
390 
391 		/* Save the base addresses for quicker access */
392 		hwq->mcq_sq_head = mcq_opr_base(hba, OPR_SQD, i) + REG_SQHP;
393 		hwq->mcq_sq_tail = mcq_opr_base(hba, OPR_SQD, i) + REG_SQTP;
394 		hwq->mcq_cq_head = mcq_opr_base(hba, OPR_CQD, i) + REG_CQHP;
395 		hwq->mcq_cq_tail = mcq_opr_base(hba, OPR_CQD, i) + REG_CQTP;
396 
397 		/* Reinitializing is needed upon HC reset */
398 		hwq->sq_tail_slot = hwq->cq_tail_slot = hwq->cq_head_slot = 0;
399 
400 		/* Enable Tail Entry Push Status interrupt only for non-poll queues */
401 		if (i < hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL])
402 			writel(1, mcq_opr_base(hba, OPR_CQIS, i) + REG_CQIE);
403 
404 		/* Completion Queue Enable|Size to Completion Queue Attribute */
405 		ufsmcq_writel(hba, (1 << QUEUE_EN_OFFSET) | qsize,
406 			      ufshcd_mcq_cfg_offset(REG_CQATTR, i));
407 
408 		/*
409 		 * Submission Qeueue Enable|Size|Completion Queue ID to
410 		 * Submission Queue Attribute
411 		 */
412 		ufsmcq_writel(hba, (1 << QUEUE_EN_OFFSET) | qsize |
413 			      (i << QUEUE_ID_OFFSET),
414 			      ufshcd_mcq_cfg_offset(REG_SQATTR, i));
415 	}
416 }
417 EXPORT_SYMBOL_GPL(ufshcd_mcq_make_queues_operational);
418 
ufshcd_mcq_enable(struct ufs_hba * hba)419 void ufshcd_mcq_enable(struct ufs_hba *hba)
420 {
421 	ufshcd_rmwl(hba, MCQ_MODE_SELECT, MCQ_MODE_SELECT, REG_UFS_MEM_CFG);
422 	hba->mcq_enabled = true;
423 }
424 EXPORT_SYMBOL_GPL(ufshcd_mcq_enable);
425 
ufshcd_mcq_disable(struct ufs_hba * hba)426 void ufshcd_mcq_disable(struct ufs_hba *hba)
427 {
428 	ufshcd_rmwl(hba, MCQ_MODE_SELECT, 0, REG_UFS_MEM_CFG);
429 	hba->mcq_enabled = false;
430 }
431 
ufshcd_mcq_enable_esi(struct ufs_hba * hba)432 void ufshcd_mcq_enable_esi(struct ufs_hba *hba)
433 {
434 	ufshcd_writel(hba, ufshcd_readl(hba, REG_UFS_MEM_CFG) | 0x2,
435 		      REG_UFS_MEM_CFG);
436 }
437 EXPORT_SYMBOL_GPL(ufshcd_mcq_enable_esi);
438 
ufshcd_mcq_config_esi(struct ufs_hba * hba,struct msi_msg * msg)439 void ufshcd_mcq_config_esi(struct ufs_hba *hba, struct msi_msg *msg)
440 {
441 	ufshcd_writel(hba, msg->address_lo, REG_UFS_ESILBA);
442 	ufshcd_writel(hba, msg->address_hi, REG_UFS_ESIUBA);
443 }
444 EXPORT_SYMBOL_GPL(ufshcd_mcq_config_esi);
445 
ufshcd_mcq_init(struct ufs_hba * hba)446 int ufshcd_mcq_init(struct ufs_hba *hba)
447 {
448 	struct Scsi_Host *host = hba->host;
449 	struct ufs_hw_queue *hwq;
450 	int ret, i;
451 
452 	ret = ufshcd_mcq_config_nr_queues(hba);
453 	if (ret)
454 		return ret;
455 
456 	ret = ufshcd_vops_mcq_config_resource(hba);
457 	if (ret)
458 		return ret;
459 
460 	ret = ufshcd_mcq_vops_op_runtime_config(hba);
461 	if (ret) {
462 		dev_err(hba->dev, "Operation runtime config failed, ret=%d\n",
463 			ret);
464 		return ret;
465 	}
466 	hba->uhq = devm_kzalloc(hba->dev,
467 				hba->nr_hw_queues * sizeof(struct ufs_hw_queue),
468 				GFP_KERNEL);
469 	if (!hba->uhq) {
470 		dev_err(hba->dev, "ufs hw queue memory allocation failed\n");
471 		return -ENOMEM;
472 	}
473 
474 	for (i = 0; i < hba->nr_hw_queues; i++) {
475 		hwq = &hba->uhq[i];
476 		hwq->max_entries = hba->nutrs + 1;
477 		spin_lock_init(&hwq->sq_lock);
478 		spin_lock_init(&hwq->cq_lock);
479 		mutex_init(&hwq->sq_mutex);
480 	}
481 
482 	host->host_tagset = 1;
483 	return 0;
484 }
485 
ufshcd_mcq_sq_stop(struct ufs_hba * hba,struct ufs_hw_queue * hwq)486 static int ufshcd_mcq_sq_stop(struct ufs_hba *hba, struct ufs_hw_queue *hwq)
487 {
488 	void __iomem *reg;
489 	u32 id = hwq->id, val;
490 	int err;
491 
492 	if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_RTC)
493 		return -ETIMEDOUT;
494 
495 	writel(SQ_STOP, mcq_opr_base(hba, OPR_SQD, id) + REG_SQRTC);
496 	reg = mcq_opr_base(hba, OPR_SQD, id) + REG_SQRTS;
497 	err = read_poll_timeout(readl, val, val & SQ_STS, 20,
498 				MCQ_POLL_US, false, reg);
499 	if (err)
500 		dev_err(hba->dev, "%s: failed. hwq-id=%d, err=%d\n",
501 			__func__, id, err);
502 	return err;
503 }
504 
ufshcd_mcq_sq_start(struct ufs_hba * hba,struct ufs_hw_queue * hwq)505 static int ufshcd_mcq_sq_start(struct ufs_hba *hba, struct ufs_hw_queue *hwq)
506 {
507 	void __iomem *reg;
508 	u32 id = hwq->id, val;
509 	int err;
510 
511 	if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_RTC)
512 		return -ETIMEDOUT;
513 
514 	writel(SQ_START, mcq_opr_base(hba, OPR_SQD, id) + REG_SQRTC);
515 	reg = mcq_opr_base(hba, OPR_SQD, id) + REG_SQRTS;
516 	err = read_poll_timeout(readl, val, !(val & SQ_STS), 20,
517 				MCQ_POLL_US, false, reg);
518 	if (err)
519 		dev_err(hba->dev, "%s: failed. hwq-id=%d, err=%d\n",
520 			__func__, id, err);
521 	return err;
522 }
523 
524 /**
525  * ufshcd_mcq_sq_cleanup - Clean up submission queue resources
526  * associated with the pending command.
527  * @hba: per adapter instance.
528  * @task_tag: The command's task tag.
529  *
530  * Return: 0 for success; error code otherwise.
531  */
ufshcd_mcq_sq_cleanup(struct ufs_hba * hba,int task_tag)532 int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int task_tag)
533 {
534 	struct scsi_cmnd *cmd = ufshcd_tag_to_cmd(hba, task_tag);
535 	struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
536 	struct request *rq = scsi_cmd_to_rq(cmd);
537 	struct ufs_hw_queue *hwq;
538 	void __iomem *reg, *opr_sqd_base;
539 	u32 nexus, id, val;
540 	int err;
541 
542 	if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_RTC)
543 		return -ETIMEDOUT;
544 
545 	if (!cmd)
546 		return -EINVAL;
547 
548 	hwq = ufshcd_mcq_req_to_hwq(hba, rq);
549 	if (!hwq)
550 		return 0;
551 
552 	id = hwq->id;
553 
554 	guard(mutex)(&hwq->sq_mutex);
555 
556 	/* stop the SQ fetching before working on it */
557 	err = ufshcd_mcq_sq_stop(hba, hwq);
558 	if (err)
559 		return err;
560 
561 	/* SQCTI = EXT_IID, IID, LUN, Task Tag */
562 	nexus = lrbp->lun << 8 | task_tag;
563 	opr_sqd_base = mcq_opr_base(hba, OPR_SQD, id);
564 	writel(nexus, opr_sqd_base + REG_SQCTI);
565 
566 	/* Initiate Cleanup */
567 	writel(readl(opr_sqd_base + REG_SQRTC) | SQ_ICU,
568 		opr_sqd_base + REG_SQRTC);
569 
570 	/* Wait until SQRTSy.CUS = 1. Report SQRTSy.RTC. */
571 	reg = opr_sqd_base + REG_SQRTS;
572 	err = read_poll_timeout(readl, val, val & SQ_CUS, 20,
573 				MCQ_POLL_US, false, reg);
574 	if (err)
575 		dev_err(hba->dev, "%s: failed. hwq=%d, tag=%d err=%d\n",
576 			__func__, id, task_tag, err);
577 	else
578 		dev_info(hba->dev,
579 			 "%s, hwq %d: cleanup return code (RTC) %ld\n",
580 			 __func__, id,
581 			 FIELD_GET(SQ_ICU_ERR_CODE_MASK, readl(reg)));
582 
583 	if (ufshcd_mcq_sq_start(hba, hwq))
584 		err = -ETIMEDOUT;
585 
586 	return err;
587 }
588 
589 /**
590  * ufshcd_mcq_nullify_sqe - Nullify the submission queue entry.
591  * Write the sqe's Command Type to 0xF. The host controller will not
592  * fetch any sqe with Command Type = 0xF.
593  *
594  * @utrd: UTP Transfer Request Descriptor to be nullified.
595  */
ufshcd_mcq_nullify_sqe(struct utp_transfer_req_desc * utrd)596 static void ufshcd_mcq_nullify_sqe(struct utp_transfer_req_desc *utrd)
597 {
598 	utrd->header.command_type = 0xf;
599 }
600 
601 /**
602  * ufshcd_mcq_sqe_search - Search for the command in the submission queue
603  * If the command is in the submission queue and not issued to the device yet,
604  * nullify the sqe so the host controller will skip fetching the sqe.
605  *
606  * @hba: per adapter instance.
607  * @hwq: Hardware Queue to be searched.
608  * @task_tag: The command's task tag.
609  *
610  * Return: true if the SQE containing the command is present in the SQ
611  * (not fetched by the controller); returns false if the SQE is not in the SQ.
612  */
ufshcd_mcq_sqe_search(struct ufs_hba * hba,struct ufs_hw_queue * hwq,int task_tag)613 static bool ufshcd_mcq_sqe_search(struct ufs_hba *hba,
614 				  struct ufs_hw_queue *hwq, int task_tag)
615 {
616 	struct scsi_cmnd *cmd = ufshcd_tag_to_cmd(hba, task_tag);
617 	struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
618 	struct utp_transfer_req_desc *utrd;
619 	__le64  cmd_desc_base_addr;
620 	bool ret = false;
621 	u64 addr, match;
622 	u32 sq_head_slot;
623 
624 	if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_RTC)
625 		return true;
626 
627 	mutex_lock(&hwq->sq_mutex);
628 
629 	ufshcd_mcq_sq_stop(hba, hwq);
630 	sq_head_slot = ufshcd_mcq_get_sq_head_slot(hwq);
631 	if (sq_head_slot == hwq->sq_tail_slot)
632 		goto out;
633 
634 	cmd_desc_base_addr = lrbp->utr_descriptor_ptr->command_desc_base_addr;
635 	addr = le64_to_cpu(cmd_desc_base_addr) & CQE_UCD_BA;
636 
637 	while (sq_head_slot != hwq->sq_tail_slot) {
638 		utrd = hwq->sqe_base_addr + sq_head_slot;
639 		match = le64_to_cpu(utrd->command_desc_base_addr) & CQE_UCD_BA;
640 		if (addr == match) {
641 			ufshcd_mcq_nullify_sqe(utrd);
642 			ret = true;
643 			goto out;
644 		}
645 
646 		sq_head_slot++;
647 		if (sq_head_slot == hwq->max_entries)
648 			sq_head_slot = 0;
649 	}
650 
651 out:
652 	ufshcd_mcq_sq_start(hba, hwq);
653 	mutex_unlock(&hwq->sq_mutex);
654 	return ret;
655 }
656 
657 /**
658  * ufshcd_mcq_abort - Abort the command in MCQ.
659  * @cmd: The command to be aborted.
660  *
661  * Return: SUCCESS or FAILED error codes
662  */
ufshcd_mcq_abort(struct scsi_cmnd * cmd)663 int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
664 {
665 	struct Scsi_Host *host = cmd->device->host;
666 	struct ufs_hba *hba = shost_priv(host);
667 	int tag = scsi_cmd_to_rq(cmd)->tag;
668 	struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
669 	struct ufs_hw_queue *hwq;
670 	int err;
671 
672 	/* Skip task abort in case previous aborts failed and report failure */
673 	if (lrbp->req_abort_skip) {
674 		dev_err(hba->dev, "%s: skip abort. tag %d failed earlier\n",
675 			__func__, tag);
676 		return FAILED;
677 	}
678 
679 	hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
680 	if (!hwq) {
681 		dev_err(hba->dev, "%s: skip abort. cmd at tag %d already completed.\n",
682 			__func__, tag);
683 		return FAILED;
684 	}
685 
686 	if (ufshcd_mcq_sqe_search(hba, hwq, tag)) {
687 		/*
688 		 * Failure. The command should not be "stuck" in SQ for
689 		 * a long time which resulted in command being aborted.
690 		 */
691 		dev_err(hba->dev, "%s: cmd found in sq. hwq=%d, tag=%d\n",
692 			__func__, hwq->id, tag);
693 		return FAILED;
694 	}
695 
696 	/*
697 	 * The command is not in the submission queue, and it is not
698 	 * in the completion queue either. Query the device to see if
699 	 * the command is being processed in the device.
700 	 */
701 	err = ufshcd_try_to_abort_task(hba, tag);
702 	if (err) {
703 		dev_err(hba->dev, "%s: device abort failed %d\n", __func__, err);
704 		lrbp->req_abort_skip = true;
705 		return FAILED;
706 	}
707 
708 	return SUCCESS;
709 }
710