xref: /linux/drivers/scsi/qla2xxx/qla_inline.h (revision c894ec016c9d0418dd832202225a8c64f450d71e)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * QLogic Fibre Channel HBA Driver
4  * Copyright (c)  2003-2014 QLogic Corporation
5  */
6 
7 #include "qla_target.h"
8 /**
9  * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
10  * Continuation Type 1 IOCBs to allocate.
11  *
12  * @vha: HA context
13  * @dsds: number of data segment descriptors needed
14  *
15  * Returns the number of IOCB entries needed to store @dsds.
16  */
17 static inline uint16_t
18 qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds)
19 {
20 	uint16_t iocbs;
21 
22 	iocbs = 1;
23 	if (dsds > 1) {
24 		iocbs += (dsds - 1) / 5;
25 		if ((dsds - 1) % 5)
26 			iocbs++;
27 	}
28 	return iocbs;
29 }
30 
31 /*
32  * qla2x00_debounce_register
33  *      Debounce register.
34  *
35  * Input:
36  *      port = register address.
37  *
38  * Returns:
39  *      register value.
40  */
41 static __inline__ uint16_t
42 qla2x00_debounce_register(volatile __le16 __iomem *addr)
43 {
44 	volatile uint16_t first;
45 	volatile uint16_t second;
46 
47 	do {
48 		first = rd_reg_word(addr);
49 		barrier();
50 		cpu_relax();
51 		second = rd_reg_word(addr);
52 	} while (first != second);
53 
54 	return (first);
55 }
56 
57 static inline void
58 qla2x00_poll(struct rsp_que *rsp)
59 {
60 	struct qla_hw_data *ha = rsp->hw;
61 
62 	if (IS_P3P_TYPE(ha))
63 		qla82xx_poll(0, rsp);
64 	else
65 		ha->isp_ops->intr_handler(0, rsp);
66 }
67 
68 static inline uint8_t *
69 host_to_fcp_swap(uint8_t *fcp, uint32_t bsize)
70 {
71        uint32_t *ifcp = (uint32_t *) fcp;
72        uint32_t *ofcp = (uint32_t *) fcp;
73        uint32_t iter = bsize >> 2;
74 
75        for (; iter ; iter--)
76                *ofcp++ = swab32(*ifcp++);
77 
78        return fcp;
79 }
80 
81 static inline void
82 host_to_adap(uint8_t *src, uint8_t *dst, uint32_t bsize)
83 {
84 	uint32_t *isrc = (uint32_t *) src;
85 	__le32 *odest = (__le32 *) dst;
86 	uint32_t iter = bsize >> 2;
87 
88 	for ( ; iter--; isrc++)
89 		*odest++ = cpu_to_le32(*isrc);
90 }
91 
92 static inline void
93 qla2x00_clean_dsd_pool(struct qla_hw_data *ha, struct crc_context *ctx)
94 {
95 	struct dsd_dma *dsd, *tdsd;
96 
97 	/* clean up allocated prev pool */
98 	list_for_each_entry_safe(dsd, tdsd, &ctx->dsd_list, list) {
99 		dma_pool_free(ha->dl_dma_pool, dsd->dsd_addr,
100 		    dsd->dsd_list_dma);
101 		list_del(&dsd->list);
102 		kfree(dsd);
103 	}
104 	INIT_LIST_HEAD(&ctx->dsd_list);
105 }
106 
107 static inline void
108 qla2x00_set_fcport_disc_state(fc_port_t *fcport, int state)
109 {
110 	int old_val;
111 	uint8_t shiftbits, mask;
112 
113 	/* This will have to change when the max no. of states > 16 */
114 	shiftbits = 4;
115 	mask = (1 << shiftbits) - 1;
116 
117 	fcport->disc_state = state;
118 	while (1) {
119 		old_val = atomic_read(&fcport->shadow_disc_state);
120 		if (old_val == atomic_cmpxchg(&fcport->shadow_disc_state,
121 		    old_val, (old_val << shiftbits) | state)) {
122 			ql_dbg(ql_dbg_disc, fcport->vha, 0x2134,
123 			    "FCPort %8phC disc_state transition: %s to %s - portid=%06x.\n",
124 			    fcport->port_name, port_dstate_str[old_val & mask],
125 			    port_dstate_str[state], fcport->d_id.b24);
126 			return;
127 		}
128 	}
129 }
130 
131 static inline int
132 qla2x00_hba_err_chk_enabled(srb_t *sp)
133 {
134 	/*
135 	 * Uncomment when corresponding SCSI changes are done.
136 	 *
137 	if (!sp->cmd->prot_chk)
138 		return 0;
139 	 *
140 	 */
141 	switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
142 	case SCSI_PROT_READ_STRIP:
143 	case SCSI_PROT_WRITE_INSERT:
144 		if (ql2xenablehba_err_chk >= 1)
145 			return 1;
146 		break;
147 	case SCSI_PROT_READ_PASS:
148 	case SCSI_PROT_WRITE_PASS:
149 		if (ql2xenablehba_err_chk >= 2)
150 			return 1;
151 		break;
152 	case SCSI_PROT_READ_INSERT:
153 	case SCSI_PROT_WRITE_STRIP:
154 		return 1;
155 	}
156 	return 0;
157 }
158 
159 static inline int
160 qla2x00_reset_active(scsi_qla_host_t *vha)
161 {
162 	scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev);
163 
164 	/* Test appropriate base-vha and vha flags. */
165 	return test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) ||
166 	    test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
167 	    test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
168 	    test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
169 	    test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
170 }
171 
172 static inline int
173 qla2x00_chip_is_down(scsi_qla_host_t *vha)
174 {
175 	return (qla2x00_reset_active(vha) || !vha->hw->flags.fw_started);
176 }
177 
178 static void qla2xxx_init_sp(srb_t *sp, scsi_qla_host_t *vha,
179 			    struct qla_qpair *qpair, fc_port_t *fcport)
180 {
181 	memset(sp, 0, sizeof(*sp));
182 	sp->fcport = fcport;
183 	sp->iocbs = 1;
184 	sp->vha = vha;
185 	sp->qpair = qpair;
186 	sp->cmd_type = TYPE_SRB;
187 	/* ref : INIT - normal flow */
188 	kref_init(&sp->cmd_kref);
189 	INIT_LIST_HEAD(&sp->elem);
190 }
191 
192 static inline srb_t *
193 qla2xxx_get_qpair_sp(scsi_qla_host_t *vha, struct qla_qpair *qpair,
194     fc_port_t *fcport, gfp_t flag)
195 {
196 	srb_t *sp = NULL;
197 	uint8_t bail;
198 
199 	QLA_QPAIR_MARK_BUSY(qpair, bail);
200 	if (unlikely(bail))
201 		return NULL;
202 
203 	sp = mempool_alloc(qpair->srb_mempool, flag);
204 	if (sp)
205 		qla2xxx_init_sp(sp, vha, qpair, fcport);
206 	else
207 		QLA_QPAIR_MARK_NOT_BUSY(qpair);
208 	return sp;
209 }
210 
211 void qla2xxx_rel_done_warning(srb_t *sp, int res);
212 void qla2xxx_rel_free_warning(srb_t *sp);
213 
214 static inline void
215 qla2xxx_rel_qpair_sp(struct qla_qpair *qpair, srb_t *sp)
216 {
217 	sp->qpair = NULL;
218 	sp->done = qla2xxx_rel_done_warning;
219 	sp->free = qla2xxx_rel_free_warning;
220 	mempool_free(sp, qpair->srb_mempool);
221 	QLA_QPAIR_MARK_NOT_BUSY(qpair);
222 }
223 
224 static inline srb_t *
225 qla2x00_get_sp(scsi_qla_host_t *vha, fc_port_t *fcport, gfp_t flag)
226 {
227 	srb_t *sp = NULL;
228 	struct qla_qpair *qpair;
229 
230 	if (unlikely(qla_vha_mark_busy(vha)))
231 		return NULL;
232 
233 	qpair = vha->hw->base_qpair;
234 	sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, flag);
235 	if (!sp)
236 		goto done;
237 
238 	sp->vha = vha;
239 done:
240 	if (!sp)
241 		QLA_VHA_MARK_NOT_BUSY(vha);
242 	return sp;
243 }
244 
245 static inline void
246 qla2x00_rel_sp(srb_t *sp)
247 {
248 	QLA_VHA_MARK_NOT_BUSY(sp->vha);
249 	qla2xxx_rel_qpair_sp(sp->qpair, sp);
250 }
251 
252 static inline int
253 qla2x00_gid_list_size(struct qla_hw_data *ha)
254 {
255 	if (IS_QLAFX00(ha))
256 		return sizeof(uint32_t) * 32;
257 	else
258 		return sizeof(struct gid_list_info) * ha->max_fibre_devices;
259 }
260 
261 static inline void
262 qla2x00_handle_mbx_completion(struct qla_hw_data *ha, int status)
263 {
264 	if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
265 	    (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
266 		set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
267 		clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
268 		complete(&ha->mbx_intr_comp);
269 	}
270 }
271 
272 static inline void
273 qla2x00_set_retry_delay_timestamp(fc_port_t *fcport, uint16_t sts_qual)
274 {
275 	u8 scope;
276 	u16 qual;
277 #define SQ_SCOPE_MASK		0xc000 /* SAM-6 rev5 5.3.2 */
278 #define SQ_SCOPE_SHIFT		14
279 #define SQ_QUAL_MASK		0x3fff
280 
281 #define SQ_MAX_WAIT_SEC		60 /* Max I/O hold off time in seconds. */
282 #define SQ_MAX_WAIT_TIME	(SQ_MAX_WAIT_SEC * 10) /* in 100ms. */
283 
284 	if (!sts_qual) /* Common case. */
285 		return;
286 
287 	scope = (sts_qual & SQ_SCOPE_MASK) >> SQ_SCOPE_SHIFT;
288 	/* Handle only scope 1 or 2, which is for I-T nexus. */
289 	if (scope != 1 && scope != 2)
290 		return;
291 
292 	/* Skip processing, if retry delay timer is already in effect. */
293 	if (fcport->retry_delay_timestamp &&
294 	    time_before(jiffies, fcport->retry_delay_timestamp))
295 		return;
296 
297 	qual = sts_qual & SQ_QUAL_MASK;
298 	if (qual < 1 || qual > 0x3fef)
299 		return;
300 	qual = min(qual, (u16)SQ_MAX_WAIT_TIME);
301 
302 	/* qual is expressed in 100ms increments. */
303 	fcport->retry_delay_timestamp = jiffies + (qual * HZ / 10);
304 
305 	ql_log(ql_log_warn, fcport->vha, 0x5101,
306 	       "%8phC: I/O throttling requested (status qualifier = %04xh), holding off I/Os for %ums.\n",
307 	       fcport->port_name, sts_qual, qual * 100);
308 }
309 
310 static inline bool
311 qla_is_exch_offld_enabled(struct scsi_qla_host *vha)
312 {
313 	if (qla_ini_mode_enabled(vha) &&
314 	    (vha->ql2xiniexchg > FW_DEF_EXCHANGES_CNT))
315 		return true;
316 	else if (qla_tgt_mode_enabled(vha) &&
317 	    (vha->ql2xexchoffld > FW_DEF_EXCHANGES_CNT))
318 		return true;
319 	else if (qla_dual_mode_enabled(vha) &&
320 	    ((vha->ql2xiniexchg + vha->ql2xexchoffld) > FW_DEF_EXCHANGES_CNT))
321 		return true;
322 	else
323 		return false;
324 }
325 
326 static inline void
327 qla_cpu_update(struct qla_qpair *qpair, uint16_t cpuid)
328 {
329 	qpair->cpuid = cpuid;
330 
331 	if (!list_empty(&qpair->hints_list)) {
332 		struct qla_qpair_hint *h;
333 
334 		list_for_each_entry(h, &qpair->hints_list, hint_elem)
335 			h->cpuid = qpair->cpuid;
336 	}
337 }
338 
339 static inline struct qla_qpair_hint *
340 qla_qpair_to_hint(struct qla_tgt *tgt, struct qla_qpair *qpair)
341 {
342 	struct qla_qpair_hint *h;
343 	u16 i;
344 
345 	for (i = 0; i < tgt->ha->max_qpairs + 1; i++) {
346 		h = &tgt->qphints[i];
347 		if (h->qpair == qpair)
348 			return h;
349 	}
350 
351 	return NULL;
352 }
353 
354 static inline void
355 qla_83xx_start_iocbs(struct qla_qpair *qpair)
356 {
357 	struct req_que *req = qpair->req;
358 
359 	req->ring_index++;
360 	if (req->ring_index == req->length) {
361 		req->ring_index = 0;
362 		req->ring_ptr = req->ring;
363 	} else
364 		req->ring_ptr++;
365 
366 	wrt_reg_dword(req->req_q_in, req->ring_index);
367 }
368 
369 static inline int
370 qla2xxx_get_fc4_priority(struct scsi_qla_host *vha)
371 {
372 	uint32_t data;
373 
374 	data =
375 	    ((uint8_t *)vha->hw->nvram)[NVRAM_DUAL_FCP_NVME_FLAG_OFFSET];
376 
377 
378 	return (data >> 6) & BIT_0 ? FC4_PRIORITY_FCP : FC4_PRIORITY_NVME;
379 }
380 
381 enum {
382 	RESOURCE_NONE,
383 	RESOURCE_IOCB = BIT_0,
384 	RESOURCE_EXCH = BIT_1,  /* exchange */
385 	RESOURCE_FORCE = BIT_2,
386 };
387 
388 static inline int
389 qla_get_fw_resources(struct qla_qpair *qp, struct iocb_resource *iores)
390 {
391 	u16 iocbs_used, i;
392 	u16 exch_used;
393 	struct qla_hw_data *ha = qp->vha->hw;
394 
395 	if (!ql2xenforce_iocb_limit) {
396 		iores->res_type = RESOURCE_NONE;
397 		return 0;
398 	}
399 	if (iores->res_type & RESOURCE_FORCE)
400 		goto force;
401 
402 	if ((iores->iocb_cnt + qp->fwres.iocbs_used) >= qp->fwres.iocbs_qp_limit) {
403 		/* no need to acquire qpair lock. It's just rough calculation */
404 		iocbs_used = ha->base_qpair->fwres.iocbs_used;
405 		for (i = 0; i < ha->max_qpairs; i++) {
406 			if (ha->queue_pair_map[i])
407 				iocbs_used += ha->queue_pair_map[i]->fwres.iocbs_used;
408 		}
409 
410 		if ((iores->iocb_cnt + iocbs_used) >= qp->fwres.iocbs_limit) {
411 			iores->res_type = RESOURCE_NONE;
412 			return -ENOSPC;
413 		}
414 	}
415 
416 	if (iores->res_type & RESOURCE_EXCH) {
417 		exch_used = ha->base_qpair->fwres.exch_used;
418 		for (i = 0; i < ha->max_qpairs; i++) {
419 			if (ha->queue_pair_map[i])
420 				exch_used += ha->queue_pair_map[i]->fwres.exch_used;
421 		}
422 
423 		if ((exch_used + iores->exch_cnt) >= qp->fwres.exch_limit) {
424 			iores->res_type = RESOURCE_NONE;
425 			return -ENOSPC;
426 		}
427 	}
428 force:
429 	qp->fwres.iocbs_used += iores->iocb_cnt;
430 	qp->fwres.exch_used += iores->exch_cnt;
431 	return 0;
432 }
433 
434 static inline void
435 qla_put_fw_resources(struct qla_qpair *qp, struct iocb_resource *iores)
436 {
437 	if (iores->res_type & RESOURCE_IOCB) {
438 		if (qp->fwres.iocbs_used >= iores->iocb_cnt) {
439 			qp->fwres.iocbs_used -= iores->iocb_cnt;
440 		} else {
441 			/* should not happen */
442 			qp->fwres.iocbs_used = 0;
443 		}
444 	}
445 
446 	if (iores->res_type & RESOURCE_EXCH) {
447 		if (qp->fwres.exch_used >= iores->exch_cnt) {
448 			qp->fwres.exch_used -= iores->exch_cnt;
449 		} else {
450 			/* should not happen */
451 			qp->fwres.exch_used = 0;
452 		}
453 	}
454 	iores->res_type = RESOURCE_NONE;
455 }
456 
457 #define ISP_REG_DISCONNECT 0xffffffffU
458 /**************************************************************************
459  * qla2x00_isp_reg_stat
460  *
461  * Description:
462  *        Read the host status register of ISP before aborting the command.
463  *
464  * Input:
465  *       ha = pointer to host adapter structure.
466  *
467  *
468  * Returns:
469  *       Either true or false.
470  *
471  * Note: Return true if there is register disconnect.
472  **************************************************************************/
473 static inline
474 uint32_t qla2x00_isp_reg_stat(struct qla_hw_data *ha)
475 {
476 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
477 	struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
478 
479 	if (IS_P3P_TYPE(ha))
480 		return ((rd_reg_dword(&reg82->host_int)) == ISP_REG_DISCONNECT);
481 	else
482 		return ((rd_reg_dword(&reg->host_status)) ==
483 			ISP_REG_DISCONNECT);
484 }
485 
486 static inline
487 bool qla_pci_disconnected(struct scsi_qla_host *vha,
488 			  struct device_reg_24xx __iomem *reg)
489 {
490 	uint32_t stat;
491 	bool ret = false;
492 
493 	stat = rd_reg_dword(&reg->host_status);
494 	if (stat == 0xffffffff) {
495 		ql_log(ql_log_info, vha, 0x8041,
496 		       "detected PCI disconnect.\n");
497 		qla_schedule_eeh_work(vha);
498 		ret = true;
499 	}
500 	return ret;
501 }
502 
503 static inline bool
504 fcport_is_smaller(fc_port_t *fcport)
505 {
506 	if (wwn_to_u64(fcport->port_name) <
507 		wwn_to_u64(fcport->vha->port_name))
508 		return true;
509 	else
510 		return false;
511 }
512 
513 static inline bool
514 fcport_is_bigger(fc_port_t *fcport)
515 {
516 	return !fcport_is_smaller(fcport);
517 }
518 
519 static inline struct qla_qpair *
520 qla_mapq_nvme_select_qpair(struct qla_hw_data *ha, struct qla_qpair *qpair)
521 {
522 	int cpuid = smp_processor_id();
523 
524 	if (qpair->cpuid != cpuid &&
525 	    ha->qp_cpu_map[cpuid]) {
526 		qpair = ha->qp_cpu_map[cpuid];
527 	}
528 	return qpair;
529 }
530 
531 static inline void
532 qla_mapq_init_qp_cpu_map(struct qla_hw_data *ha,
533 			 struct qla_msix_entry *msix,
534 			 struct qla_qpair *qpair)
535 {
536 	const struct cpumask *mask;
537 	unsigned int cpu;
538 
539 	if (!ha->qp_cpu_map)
540 		return;
541 	mask = pci_irq_get_affinity(ha->pdev, msix->vector_base0);
542 	qpair->cpuid = cpumask_first(mask);
543 	for_each_cpu(cpu, mask) {
544 		ha->qp_cpu_map[cpu] = qpair;
545 	}
546 	msix->cpuid = qpair->cpuid;
547 }
548 
549 static inline void
550 qla_mapq_free_qp_cpu_map(struct qla_hw_data *ha)
551 {
552 	if (ha->qp_cpu_map) {
553 		kfree(ha->qp_cpu_map);
554 		ha->qp_cpu_map = NULL;
555 	}
556 }
557 
558 static inline int qla_mapq_alloc_qp_cpu_map(struct qla_hw_data *ha)
559 {
560 	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
561 
562 	if (!ha->qp_cpu_map) {
563 		ha->qp_cpu_map = kcalloc(NR_CPUS, sizeof(struct qla_qpair *),
564 					 GFP_KERNEL);
565 		if (!ha->qp_cpu_map) {
566 			ql_log(ql_log_fatal, vha, 0x0180,
567 			       "Unable to allocate memory for qp_cpu_map ptrs.\n");
568 			return -1;
569 		}
570 	}
571 	return 0;
572 }
573