xref: /linux/drivers/scsi/qla2xxx/qla_iocb.c (revision c537b994505099b7197e7d3125b942ecbcc51eb6)
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2005 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8 
9 #include <linux/blkdev.h>
10 #include <linux/delay.h>
11 
12 #include <scsi/scsi_tcq.h>
13 
14 static inline uint16_t qla2x00_get_cmd_direction(struct scsi_cmnd *cmd);
15 static inline cont_entry_t *qla2x00_prep_cont_type0_iocb(scsi_qla_host_t *);
16 static inline cont_a64_entry_t *qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *);
17 static request_t *qla2x00_req_pkt(scsi_qla_host_t *ha);
18 static void qla2x00_isp_cmd(scsi_qla_host_t *ha);
19 
20 /**
21  * qla2x00_get_cmd_direction() - Determine control_flag data direction.
22  * @cmd: SCSI command
23  *
24  * Returns the proper CF_* direction based on CDB.
25  */
26 static inline uint16_t
27 qla2x00_get_cmd_direction(struct scsi_cmnd *cmd)
28 {
29 	uint16_t cflags;
30 
31 	cflags = 0;
32 
33 	/* Set transfer direction */
34 	if (cmd->sc_data_direction == DMA_TO_DEVICE)
35 		cflags = CF_WRITE;
36 	else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
37 		cflags = CF_READ;
38 	return (cflags);
39 }
40 
41 /**
42  * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
43  * Continuation Type 0 IOCBs to allocate.
44  *
45  * @dsds: number of data segment decriptors needed
46  *
47  * Returns the number of IOCB entries needed to store @dsds.
48  */
49 uint16_t
50 qla2x00_calc_iocbs_32(uint16_t dsds)
51 {
52 	uint16_t iocbs;
53 
54 	iocbs = 1;
55 	if (dsds > 3) {
56 		iocbs += (dsds - 3) / 7;
57 		if ((dsds - 3) % 7)
58 			iocbs++;
59 	}
60 	return (iocbs);
61 }
62 
63 /**
64  * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
65  * Continuation Type 1 IOCBs to allocate.
66  *
67  * @dsds: number of data segment decriptors needed
68  *
69  * Returns the number of IOCB entries needed to store @dsds.
70  */
71 uint16_t
72 qla2x00_calc_iocbs_64(uint16_t dsds)
73 {
74 	uint16_t iocbs;
75 
76 	iocbs = 1;
77 	if (dsds > 2) {
78 		iocbs += (dsds - 2) / 5;
79 		if ((dsds - 2) % 5)
80 			iocbs++;
81 	}
82 	return (iocbs);
83 }
84 
85 /**
86  * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
87  * @ha: HA context
88  *
89  * Returns a pointer to the Continuation Type 0 IOCB packet.
90  */
91 static inline cont_entry_t *
92 qla2x00_prep_cont_type0_iocb(scsi_qla_host_t *ha)
93 {
94 	cont_entry_t *cont_pkt;
95 
96 	/* Adjust ring index. */
97 	ha->req_ring_index++;
98 	if (ha->req_ring_index == ha->request_q_length) {
99 		ha->req_ring_index = 0;
100 		ha->request_ring_ptr = ha->request_ring;
101 	} else {
102 		ha->request_ring_ptr++;
103 	}
104 
105 	cont_pkt = (cont_entry_t *)ha->request_ring_ptr;
106 
107 	/* Load packet defaults. */
108 	*((uint32_t *)(&cont_pkt->entry_type)) =
109 	    __constant_cpu_to_le32(CONTINUE_TYPE);
110 
111 	return (cont_pkt);
112 }
113 
114 /**
115  * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
116  * @ha: HA context
117  *
118  * Returns a pointer to the continuation type 1 IOCB packet.
119  */
120 static inline cont_a64_entry_t *
121 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *ha)
122 {
123 	cont_a64_entry_t *cont_pkt;
124 
125 	/* Adjust ring index. */
126 	ha->req_ring_index++;
127 	if (ha->req_ring_index == ha->request_q_length) {
128 		ha->req_ring_index = 0;
129 		ha->request_ring_ptr = ha->request_ring;
130 	} else {
131 		ha->request_ring_ptr++;
132 	}
133 
134 	cont_pkt = (cont_a64_entry_t *)ha->request_ring_ptr;
135 
136 	/* Load packet defaults. */
137 	*((uint32_t *)(&cont_pkt->entry_type)) =
138 	    __constant_cpu_to_le32(CONTINUE_A64_TYPE);
139 
140 	return (cont_pkt);
141 }
142 
143 /**
144  * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
145  * capable IOCB types.
146  *
147  * @sp: SRB command to process
148  * @cmd_pkt: Command type 2 IOCB
149  * @tot_dsds: Total number of segments to transfer
150  */
151 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
152     uint16_t tot_dsds)
153 {
154 	uint16_t	avail_dsds;
155 	uint32_t	*cur_dsd;
156 	scsi_qla_host_t	*ha;
157 	struct scsi_cmnd *cmd;
158 
159 	cmd = sp->cmd;
160 
161 	/* Update entry type to indicate Command Type 2 IOCB */
162 	*((uint32_t *)(&cmd_pkt->entry_type)) =
163 	    __constant_cpu_to_le32(COMMAND_TYPE);
164 
165 	/* No data transfer */
166 	if (cmd->request_bufflen == 0 || cmd->sc_data_direction == DMA_NONE) {
167 		cmd_pkt->byte_count = __constant_cpu_to_le32(0);
168 		return;
169 	}
170 
171 	ha = sp->ha;
172 
173 	cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(cmd));
174 
175 	/* Three DSDs are available in the Command Type 2 IOCB */
176 	avail_dsds = 3;
177 	cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
178 
179 	/* Load data segments */
180 	if (cmd->use_sg != 0) {
181 		struct	scatterlist *cur_seg;
182 		struct	scatterlist *end_seg;
183 
184 		cur_seg = (struct scatterlist *)cmd->request_buffer;
185 		end_seg = cur_seg + tot_dsds;
186 		while (cur_seg < end_seg) {
187 			cont_entry_t	*cont_pkt;
188 
189 			/* Allocate additional continuation packets? */
190 			if (avail_dsds == 0) {
191 				/*
192 				 * Seven DSDs are available in the Continuation
193 				 * Type 0 IOCB.
194 				 */
195 				cont_pkt = qla2x00_prep_cont_type0_iocb(ha);
196 				cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
197 				avail_dsds = 7;
198 			}
199 
200 			*cur_dsd++ = cpu_to_le32(sg_dma_address(cur_seg));
201 			*cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
202 			avail_dsds--;
203 
204 			cur_seg++;
205 		}
206 	} else {
207 		*cur_dsd++ = cpu_to_le32(sp->dma_handle);
208 		*cur_dsd++ = cpu_to_le32(cmd->request_bufflen);
209 	}
210 }
211 
212 /**
213  * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
214  * capable IOCB types.
215  *
216  * @sp: SRB command to process
217  * @cmd_pkt: Command type 3 IOCB
218  * @tot_dsds: Total number of segments to transfer
219  */
220 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
221     uint16_t tot_dsds)
222 {
223 	uint16_t	avail_dsds;
224 	uint32_t	*cur_dsd;
225 	scsi_qla_host_t	*ha;
226 	struct scsi_cmnd *cmd;
227 
228 	cmd = sp->cmd;
229 
230 	/* Update entry type to indicate Command Type 3 IOCB */
231 	*((uint32_t *)(&cmd_pkt->entry_type)) =
232 	    __constant_cpu_to_le32(COMMAND_A64_TYPE);
233 
234 	/* No data transfer */
235 	if (cmd->request_bufflen == 0 || cmd->sc_data_direction == DMA_NONE) {
236 		cmd_pkt->byte_count = __constant_cpu_to_le32(0);
237 		return;
238 	}
239 
240 	ha = sp->ha;
241 
242 	cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(cmd));
243 
244 	/* Two DSDs are available in the Command Type 3 IOCB */
245 	avail_dsds = 2;
246 	cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
247 
248 	/* Load data segments */
249 	if (cmd->use_sg != 0) {
250 		struct	scatterlist *cur_seg;
251 		struct	scatterlist *end_seg;
252 
253 		cur_seg = (struct scatterlist *)cmd->request_buffer;
254 		end_seg = cur_seg + tot_dsds;
255 		while (cur_seg < end_seg) {
256 			dma_addr_t	sle_dma;
257 			cont_a64_entry_t *cont_pkt;
258 
259 			/* Allocate additional continuation packets? */
260 			if (avail_dsds == 0) {
261 				/*
262 				 * Five DSDs are available in the Continuation
263 				 * Type 1 IOCB.
264 				 */
265 				cont_pkt = qla2x00_prep_cont_type1_iocb(ha);
266 				cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
267 				avail_dsds = 5;
268 			}
269 
270 			sle_dma = sg_dma_address(cur_seg);
271 			*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
272 			*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
273 			*cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
274 			avail_dsds--;
275 
276 			cur_seg++;
277 		}
278 	} else {
279 		*cur_dsd++ = cpu_to_le32(LSD(sp->dma_handle));
280 		*cur_dsd++ = cpu_to_le32(MSD(sp->dma_handle));
281 		*cur_dsd++ = cpu_to_le32(cmd->request_bufflen);
282 	}
283 }
284 
285 /**
286  * qla2x00_start_scsi() - Send a SCSI command to the ISP
287  * @sp: command to send to the ISP
288  *
289  * Returns non-zero if a failure occured, else zero.
290  */
291 int
292 qla2x00_start_scsi(srb_t *sp)
293 {
294 	int		ret;
295 	unsigned long   flags;
296 	scsi_qla_host_t	*ha;
297 	struct scsi_cmnd *cmd;
298 	uint32_t	*clr_ptr;
299 	uint32_t        index;
300 	uint32_t	handle;
301 	cmd_entry_t	*cmd_pkt;
302 	struct scatterlist *sg;
303 	uint16_t	cnt;
304 	uint16_t	req_cnt;
305 	uint16_t	tot_dsds;
306 	struct device_reg_2xxx __iomem *reg;
307 
308 	/* Setup device pointers. */
309 	ret = 0;
310 	ha = sp->ha;
311 	reg = &ha->iobase->isp;
312 	cmd = sp->cmd;
313 	/* So we know we haven't pci_map'ed anything yet */
314 	tot_dsds = 0;
315 
316 	/* Send marker if required */
317 	if (ha->marker_needed != 0) {
318 		if (qla2x00_marker(ha, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
319 			return (QLA_FUNCTION_FAILED);
320 		}
321 		ha->marker_needed = 0;
322 	}
323 
324 	/* Acquire ring specific lock */
325 	spin_lock_irqsave(&ha->hardware_lock, flags);
326 
327 	/* Check for room in outstanding command list. */
328 	handle = ha->current_outstanding_cmd;
329 	for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
330 		handle++;
331 		if (handle == MAX_OUTSTANDING_COMMANDS)
332 			handle = 1;
333 		if (ha->outstanding_cmds[handle] == 0)
334 			break;
335 	}
336 	if (index == MAX_OUTSTANDING_COMMANDS)
337 		goto queuing_error;
338 
339 	/* Map the sg table so we have an accurate count of sg entries needed */
340 	if (cmd->use_sg) {
341 		sg = (struct scatterlist *) cmd->request_buffer;
342 		tot_dsds = pci_map_sg(ha->pdev, sg, cmd->use_sg,
343 		    cmd->sc_data_direction);
344 		if (tot_dsds == 0)
345 			goto queuing_error;
346 	} else if (cmd->request_bufflen) {
347 		dma_addr_t	req_dma;
348 
349 		req_dma = pci_map_single(ha->pdev, cmd->request_buffer,
350 		    cmd->request_bufflen, cmd->sc_data_direction);
351 		if (dma_mapping_error(req_dma))
352 			goto queuing_error;
353 
354 		sp->dma_handle = req_dma;
355 		tot_dsds = 1;
356 	}
357 
358 	/* Calculate the number of request entries needed. */
359 	req_cnt = ha->isp_ops.calc_req_entries(tot_dsds);
360 	if (ha->req_q_cnt < (req_cnt + 2)) {
361 		cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
362 		if (ha->req_ring_index < cnt)
363 			ha->req_q_cnt = cnt - ha->req_ring_index;
364 		else
365 			ha->req_q_cnt = ha->request_q_length -
366 			    (ha->req_ring_index - cnt);
367 	}
368 	if (ha->req_q_cnt < (req_cnt + 2))
369 		goto queuing_error;
370 
371 	/* Build command packet */
372 	ha->current_outstanding_cmd = handle;
373 	ha->outstanding_cmds[handle] = sp;
374 	sp->ha = ha;
375 	sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
376 	ha->req_q_cnt -= req_cnt;
377 
378 	cmd_pkt = (cmd_entry_t *)ha->request_ring_ptr;
379 	cmd_pkt->handle = handle;
380 	/* Zero out remaining portion of packet. */
381 	clr_ptr = (uint32_t *)cmd_pkt + 2;
382 	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
383 	cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
384 
385 	/* Set target ID and LUN number*/
386 	SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
387 	cmd_pkt->lun = cpu_to_le16(sp->cmd->device->lun);
388 
389 	/* Update tagged queuing modifier */
390 	cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG);
391 
392 	/* Load SCSI command packet. */
393 	memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
394 	cmd_pkt->byte_count = cpu_to_le32((uint32_t)cmd->request_bufflen);
395 
396 	/* Build IOCB segments */
397 	ha->isp_ops.build_iocbs(sp, cmd_pkt, tot_dsds);
398 
399 	/* Set total data segment count. */
400 	cmd_pkt->entry_count = (uint8_t)req_cnt;
401 	wmb();
402 
403 	/* Adjust ring index. */
404 	ha->req_ring_index++;
405 	if (ha->req_ring_index == ha->request_q_length) {
406 		ha->req_ring_index = 0;
407 		ha->request_ring_ptr = ha->request_ring;
408 	} else
409 		ha->request_ring_ptr++;
410 
411 	sp->flags |= SRB_DMA_VALID;
412 
413 	/* Set chip new ring index. */
414 	WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), ha->req_ring_index);
415 	RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg));	/* PCI Posting. */
416 
417 	/* Manage unprocessed RIO/ZIO commands in response queue. */
418 	if (ha->flags.process_response_queue &&
419 	    ha->response_ring_ptr->signature != RESPONSE_PROCESSED)
420 		qla2x00_process_response_queue(ha);
421 
422 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
423 	return (QLA_SUCCESS);
424 
425 queuing_error:
426 	if (cmd->use_sg && tot_dsds) {
427 		sg = (struct scatterlist *) cmd->request_buffer;
428 		pci_unmap_sg(ha->pdev, sg, cmd->use_sg,
429 		    cmd->sc_data_direction);
430 	} else if (tot_dsds) {
431 		pci_unmap_single(ha->pdev, sp->dma_handle,
432 		    cmd->request_bufflen, cmd->sc_data_direction);
433 	}
434 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
435 
436 	return (QLA_FUNCTION_FAILED);
437 }
438 
439 /**
440  * qla2x00_marker() - Send a marker IOCB to the firmware.
441  * @ha: HA context
442  * @loop_id: loop ID
443  * @lun: LUN
444  * @type: marker modifier
445  *
446  * Can be called from both normal and interrupt context.
447  *
448  * Returns non-zero if a failure occured, else zero.
449  */
450 int
451 __qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
452     uint8_t type)
453 {
454 	mrk_entry_t *mrk;
455 	struct mrk_entry_24xx *mrk24;
456 
457 	mrk24 = NULL;
458 	mrk = (mrk_entry_t *)qla2x00_req_pkt(ha);
459 	if (mrk == NULL) {
460 		DEBUG2_3(printk("%s(%ld): failed to allocate Marker IOCB.\n",
461 		    __func__, ha->host_no));
462 
463 		return (QLA_FUNCTION_FAILED);
464 	}
465 
466 	mrk->entry_type = MARKER_TYPE;
467 	mrk->modifier = type;
468 	if (type != MK_SYNC_ALL) {
469 		if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
470 			mrk24 = (struct mrk_entry_24xx *) mrk;
471 			mrk24->nport_handle = cpu_to_le16(loop_id);
472 			mrk24->lun[1] = LSB(lun);
473 			mrk24->lun[2] = MSB(lun);
474 			host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
475 		} else {
476 			SET_TARGET_ID(ha, mrk->target, loop_id);
477 			mrk->lun = cpu_to_le16(lun);
478 		}
479 	}
480 	wmb();
481 
482 	qla2x00_isp_cmd(ha);
483 
484 	return (QLA_SUCCESS);
485 }
486 
487 int
488 qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
489     uint8_t type)
490 {
491 	int ret;
492 	unsigned long flags = 0;
493 
494 	spin_lock_irqsave(&ha->hardware_lock, flags);
495 	ret = __qla2x00_marker(ha, loop_id, lun, type);
496 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
497 
498 	return (ret);
499 }
500 
501 /**
502  * qla2x00_req_pkt() - Retrieve a request packet from the request ring.
503  * @ha: HA context
504  *
505  * Note: The caller must hold the hardware lock before calling this routine.
506  *
507  * Returns NULL if function failed, else, a pointer to the request packet.
508  */
509 static request_t *
510 qla2x00_req_pkt(scsi_qla_host_t *ha)
511 {
512 	device_reg_t __iomem *reg = ha->iobase;
513 	request_t	*pkt = NULL;
514 	uint16_t	cnt;
515 	uint32_t	*dword_ptr;
516 	uint32_t	timer;
517 	uint16_t	req_cnt = 1;
518 
519 	/* Wait 1 second for slot. */
520 	for (timer = HZ; timer; timer--) {
521 		if ((req_cnt + 2) >= ha->req_q_cnt) {
522 			/* Calculate number of free request entries. */
523 			if (IS_QLA24XX(ha) || IS_QLA54XX(ha))
524 				cnt = (uint16_t)RD_REG_DWORD(
525 				    &reg->isp24.req_q_out);
526 			else
527 				cnt = qla2x00_debounce_register(
528 				    ISP_REQ_Q_OUT(ha, &reg->isp));
529 			if  (ha->req_ring_index < cnt)
530 				ha->req_q_cnt = cnt - ha->req_ring_index;
531 			else
532 				ha->req_q_cnt = ha->request_q_length -
533 				    (ha->req_ring_index - cnt);
534 		}
535 		/* If room for request in request ring. */
536 		if ((req_cnt + 2) < ha->req_q_cnt) {
537 			ha->req_q_cnt--;
538 			pkt = ha->request_ring_ptr;
539 
540 			/* Zero out packet. */
541 			dword_ptr = (uint32_t *)pkt;
542 			for (cnt = 0; cnt < REQUEST_ENTRY_SIZE / 4; cnt++)
543 				*dword_ptr++ = 0;
544 
545 			/* Set system defined field. */
546 			pkt->sys_define = (uint8_t)ha->req_ring_index;
547 
548 			/* Set entry count. */
549 			pkt->entry_count = 1;
550 
551 			break;
552 		}
553 
554 		/* Release ring specific lock */
555 		spin_unlock(&ha->hardware_lock);
556 
557 		udelay(2);   /* 2 us */
558 
559 		/* Check for pending interrupts. */
560 		/* During init we issue marker directly */
561 		if (!ha->marker_needed)
562 			qla2x00_poll(ha);
563 
564 		spin_lock_irq(&ha->hardware_lock);
565 	}
566 	if (!pkt) {
567 		DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__));
568 	}
569 
570 	return (pkt);
571 }
572 
573 /**
574  * qla2x00_isp_cmd() - Modify the request ring pointer.
575  * @ha: HA context
576  *
577  * Note: The caller must hold the hardware lock before calling this routine.
578  */
579 static void
580 qla2x00_isp_cmd(scsi_qla_host_t *ha)
581 {
582 	device_reg_t __iomem *reg = ha->iobase;
583 
584 	DEBUG5(printk("%s(): IOCB data:\n", __func__));
585 	DEBUG5(qla2x00_dump_buffer(
586 	    (uint8_t *)ha->request_ring_ptr, REQUEST_ENTRY_SIZE));
587 
588 	/* Adjust ring index. */
589 	ha->req_ring_index++;
590 	if (ha->req_ring_index == ha->request_q_length) {
591 		ha->req_ring_index = 0;
592 		ha->request_ring_ptr = ha->request_ring;
593 	} else
594 		ha->request_ring_ptr++;
595 
596 	/* Set chip new ring index. */
597 	if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
598 		WRT_REG_DWORD(&reg->isp24.req_q_in, ha->req_ring_index);
599 		RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
600 	} else {
601 		WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp), ha->req_ring_index);
602 		RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
603 	}
604 
605 }
606 
607 /**
608  * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
609  * Continuation Type 1 IOCBs to allocate.
610  *
611  * @dsds: number of data segment decriptors needed
612  *
613  * Returns the number of IOCB entries needed to store @dsds.
614  */
615 static inline uint16_t
616 qla24xx_calc_iocbs(uint16_t dsds)
617 {
618 	uint16_t iocbs;
619 
620 	iocbs = 1;
621 	if (dsds > 1) {
622 		iocbs += (dsds - 1) / 5;
623 		if ((dsds - 1) % 5)
624 			iocbs++;
625 	}
626 	return iocbs;
627 }
628 
629 /**
630  * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
631  * IOCB types.
632  *
633  * @sp: SRB command to process
634  * @cmd_pkt: Command type 3 IOCB
635  * @tot_dsds: Total number of segments to transfer
636  */
637 static inline void
638 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
639     uint16_t tot_dsds)
640 {
641 	uint16_t	avail_dsds;
642 	uint32_t	*cur_dsd;
643 	scsi_qla_host_t	*ha;
644 	struct scsi_cmnd *cmd;
645 
646 	cmd = sp->cmd;
647 
648 	/* Update entry type to indicate Command Type 3 IOCB */
649 	*((uint32_t *)(&cmd_pkt->entry_type)) =
650 	    __constant_cpu_to_le32(COMMAND_TYPE_7);
651 
652 	/* No data transfer */
653 	if (cmd->request_bufflen == 0 || cmd->sc_data_direction == DMA_NONE) {
654 		cmd_pkt->byte_count = __constant_cpu_to_le32(0);
655 		return;
656 	}
657 
658 	ha = sp->ha;
659 
660 	/* Set transfer direction */
661 	if (cmd->sc_data_direction == DMA_TO_DEVICE)
662 		cmd_pkt->task_mgmt_flags =
663 		    __constant_cpu_to_le16(TMF_WRITE_DATA);
664 	else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
665 		cmd_pkt->task_mgmt_flags =
666 		    __constant_cpu_to_le16(TMF_READ_DATA);
667 
668 	/* One DSD is available in the Command Type 3 IOCB */
669 	avail_dsds = 1;
670 	cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
671 
672 	/* Load data segments */
673 	if (cmd->use_sg != 0) {
674 		struct	scatterlist *cur_seg;
675 		struct	scatterlist *end_seg;
676 
677 		cur_seg = (struct scatterlist *)cmd->request_buffer;
678 		end_seg = cur_seg + tot_dsds;
679 		while (cur_seg < end_seg) {
680 			dma_addr_t	sle_dma;
681 			cont_a64_entry_t *cont_pkt;
682 
683 			/* Allocate additional continuation packets? */
684 			if (avail_dsds == 0) {
685 				/*
686 				 * Five DSDs are available in the Continuation
687 				 * Type 1 IOCB.
688 				 */
689 				cont_pkt = qla2x00_prep_cont_type1_iocb(ha);
690 				cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
691 				avail_dsds = 5;
692 			}
693 
694 			sle_dma = sg_dma_address(cur_seg);
695 			*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
696 			*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
697 			*cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
698 			avail_dsds--;
699 
700 			cur_seg++;
701 		}
702 	} else {
703 		*cur_dsd++ = cpu_to_le32(LSD(sp->dma_handle));
704 		*cur_dsd++ = cpu_to_le32(MSD(sp->dma_handle));
705 		*cur_dsd++ = cpu_to_le32(cmd->request_bufflen);
706 	}
707 }
708 
709 
710 /**
711  * qla24xx_start_scsi() - Send a SCSI command to the ISP
712  * @sp: command to send to the ISP
713  *
714  * Returns non-zero if a failure occured, else zero.
715  */
716 int
717 qla24xx_start_scsi(srb_t *sp)
718 {
719 	int		ret;
720 	unsigned long   flags;
721 	scsi_qla_host_t	*ha;
722 	struct scsi_cmnd *cmd;
723 	uint32_t	*clr_ptr;
724 	uint32_t        index;
725 	uint32_t	handle;
726 	struct cmd_type_7 *cmd_pkt;
727 	struct scatterlist *sg;
728 	uint16_t	cnt;
729 	uint16_t	req_cnt;
730 	uint16_t	tot_dsds;
731 	struct device_reg_24xx __iomem *reg;
732 
733 	/* Setup device pointers. */
734 	ret = 0;
735 	ha = sp->ha;
736 	reg = &ha->iobase->isp24;
737 	cmd = sp->cmd;
738 	/* So we know we haven't pci_map'ed anything yet */
739 	tot_dsds = 0;
740 
741 	/* Send marker if required */
742 	if (ha->marker_needed != 0) {
743 		if (qla2x00_marker(ha, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
744 			return QLA_FUNCTION_FAILED;
745 		}
746 		ha->marker_needed = 0;
747 	}
748 
749 	/* Acquire ring specific lock */
750 	spin_lock_irqsave(&ha->hardware_lock, flags);
751 
752 	/* Check for room in outstanding command list. */
753 	handle = ha->current_outstanding_cmd;
754 	for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
755 		handle++;
756 		if (handle == MAX_OUTSTANDING_COMMANDS)
757 			handle = 1;
758 		if (ha->outstanding_cmds[handle] == 0)
759 			break;
760 	}
761 	if (index == MAX_OUTSTANDING_COMMANDS)
762 		goto queuing_error;
763 
764 	/* Map the sg table so we have an accurate count of sg entries needed */
765 	if (cmd->use_sg) {
766 		sg = (struct scatterlist *) cmd->request_buffer;
767 		tot_dsds = pci_map_sg(ha->pdev, sg, cmd->use_sg,
768 		    cmd->sc_data_direction);
769 		if (tot_dsds == 0)
770 			goto queuing_error;
771 	} else if (cmd->request_bufflen) {
772 		dma_addr_t      req_dma;
773 
774 		req_dma = pci_map_single(ha->pdev, cmd->request_buffer,
775 		    cmd->request_bufflen, cmd->sc_data_direction);
776 		if (dma_mapping_error(req_dma))
777 			goto queuing_error;
778 
779 		sp->dma_handle = req_dma;
780 		tot_dsds = 1;
781 	}
782 
783 	req_cnt = qla24xx_calc_iocbs(tot_dsds);
784 	if (ha->req_q_cnt < (req_cnt + 2)) {
785 		cnt = (uint16_t)RD_REG_DWORD_RELAXED(&reg->req_q_out);
786 		if (ha->req_ring_index < cnt)
787 			ha->req_q_cnt = cnt - ha->req_ring_index;
788 		else
789 			ha->req_q_cnt = ha->request_q_length -
790 				(ha->req_ring_index - cnt);
791 	}
792 	if (ha->req_q_cnt < (req_cnt + 2))
793 		goto queuing_error;
794 
795 	/* Build command packet. */
796 	ha->current_outstanding_cmd = handle;
797 	ha->outstanding_cmds[handle] = sp;
798 	sp->ha = ha;
799 	sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
800 	ha->req_q_cnt -= req_cnt;
801 
802 	cmd_pkt = (struct cmd_type_7 *)ha->request_ring_ptr;
803 	cmd_pkt->handle = handle;
804 
805 	/* Zero out remaining portion of packet. */
806 	/*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
807 	clr_ptr = (uint32_t *)cmd_pkt + 2;
808 	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
809 	cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
810 
811 	/* Set NPORT-ID and LUN number*/
812 	cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
813 	cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
814 	cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
815 	cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
816 
817 	int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
818 	host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
819 
820 	/* Load SCSI command packet. */
821 	memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
822 	host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
823 
824 	cmd_pkt->byte_count = cpu_to_le32((uint32_t)cmd->request_bufflen);
825 
826 	/* Build IOCB segments */
827 	qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
828 
829 	/* Set total data segment count. */
830 	cmd_pkt->entry_count = (uint8_t)req_cnt;
831 	wmb();
832 
833 	/* Adjust ring index. */
834 	ha->req_ring_index++;
835 	if (ha->req_ring_index == ha->request_q_length) {
836 		ha->req_ring_index = 0;
837 		ha->request_ring_ptr = ha->request_ring;
838 	} else
839 		ha->request_ring_ptr++;
840 
841 	sp->flags |= SRB_DMA_VALID;
842 
843 	/* Set chip new ring index. */
844 	WRT_REG_DWORD(&reg->req_q_in, ha->req_ring_index);
845 	RD_REG_DWORD_RELAXED(&reg->req_q_in);		/* PCI Posting. */
846 
847 	/* Manage unprocessed RIO/ZIO commands in response queue. */
848 	if (ha->flags.process_response_queue &&
849 	    ha->response_ring_ptr->signature != RESPONSE_PROCESSED)
850 		qla24xx_process_response_queue(ha);
851 
852 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
853 	return QLA_SUCCESS;
854 
855 queuing_error:
856 	if (cmd->use_sg && tot_dsds) {
857 		sg = (struct scatterlist *) cmd->request_buffer;
858 		pci_unmap_sg(ha->pdev, sg, cmd->use_sg,
859 		    cmd->sc_data_direction);
860 	} else if (tot_dsds) {
861 		pci_unmap_single(ha->pdev, sp->dma_handle,
862 		    cmd->request_bufflen, cmd->sc_data_direction);
863 	}
864 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
865 
866 	return QLA_FUNCTION_FAILED;
867 }
868