xref: /linux/drivers/scsi/qla2xxx/qla_iocb.c (revision 858259cf7d1c443c836a2022b78cb281f0a9b95e)
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2005 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8 
9 #include <linux/blkdev.h>
10 #include <linux/delay.h>
11 
12 #include <scsi/scsi_tcq.h>
13 
14 static inline uint16_t qla2x00_get_cmd_direction(struct scsi_cmnd *cmd);
15 static inline cont_entry_t *qla2x00_prep_cont_type0_iocb(scsi_qla_host_t *);
16 static inline cont_a64_entry_t *qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *);
17 static request_t *qla2x00_req_pkt(scsi_qla_host_t *ha);
18 
19 /**
20  * qla2x00_get_cmd_direction() - Determine control_flag data direction.
21  * @cmd: SCSI command
22  *
23  * Returns the proper CF_* direction based on CDB.
24  */
25 static inline uint16_t
26 qla2x00_get_cmd_direction(struct scsi_cmnd *cmd)
27 {
28 	uint16_t cflags;
29 
30 	cflags = 0;
31 
32 	/* Set transfer direction */
33 	if (cmd->sc_data_direction == DMA_TO_DEVICE)
34 		cflags = CF_WRITE;
35 	else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
36 		cflags = CF_READ;
37 	return (cflags);
38 }
39 
40 /**
41  * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
42  * Continuation Type 0 IOCBs to allocate.
43  *
44  * @dsds: number of data segment decriptors needed
45  *
46  * Returns the number of IOCB entries needed to store @dsds.
47  */
48 uint16_t
49 qla2x00_calc_iocbs_32(uint16_t dsds)
50 {
51 	uint16_t iocbs;
52 
53 	iocbs = 1;
54 	if (dsds > 3) {
55 		iocbs += (dsds - 3) / 7;
56 		if ((dsds - 3) % 7)
57 			iocbs++;
58 	}
59 	return (iocbs);
60 }
61 
62 /**
63  * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
64  * Continuation Type 1 IOCBs to allocate.
65  *
66  * @dsds: number of data segment decriptors needed
67  *
68  * Returns the number of IOCB entries needed to store @dsds.
69  */
70 uint16_t
71 qla2x00_calc_iocbs_64(uint16_t dsds)
72 {
73 	uint16_t iocbs;
74 
75 	iocbs = 1;
76 	if (dsds > 2) {
77 		iocbs += (dsds - 2) / 5;
78 		if ((dsds - 2) % 5)
79 			iocbs++;
80 	}
81 	return (iocbs);
82 }
83 
84 /**
85  * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
86  * @ha: HA context
87  *
88  * Returns a pointer to the Continuation Type 0 IOCB packet.
89  */
90 static inline cont_entry_t *
91 qla2x00_prep_cont_type0_iocb(scsi_qla_host_t *ha)
92 {
93 	cont_entry_t *cont_pkt;
94 
95 	/* Adjust ring index. */
96 	ha->req_ring_index++;
97 	if (ha->req_ring_index == ha->request_q_length) {
98 		ha->req_ring_index = 0;
99 		ha->request_ring_ptr = ha->request_ring;
100 	} else {
101 		ha->request_ring_ptr++;
102 	}
103 
104 	cont_pkt = (cont_entry_t *)ha->request_ring_ptr;
105 
106 	/* Load packet defaults. */
107 	*((uint32_t *)(&cont_pkt->entry_type)) =
108 	    __constant_cpu_to_le32(CONTINUE_TYPE);
109 
110 	return (cont_pkt);
111 }
112 
113 /**
114  * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
115  * @ha: HA context
116  *
117  * Returns a pointer to the continuation type 1 IOCB packet.
118  */
119 static inline cont_a64_entry_t *
120 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *ha)
121 {
122 	cont_a64_entry_t *cont_pkt;
123 
124 	/* Adjust ring index. */
125 	ha->req_ring_index++;
126 	if (ha->req_ring_index == ha->request_q_length) {
127 		ha->req_ring_index = 0;
128 		ha->request_ring_ptr = ha->request_ring;
129 	} else {
130 		ha->request_ring_ptr++;
131 	}
132 
133 	cont_pkt = (cont_a64_entry_t *)ha->request_ring_ptr;
134 
135 	/* Load packet defaults. */
136 	*((uint32_t *)(&cont_pkt->entry_type)) =
137 	    __constant_cpu_to_le32(CONTINUE_A64_TYPE);
138 
139 	return (cont_pkt);
140 }
141 
142 /**
143  * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
144  * capable IOCB types.
145  *
146  * @sp: SRB command to process
147  * @cmd_pkt: Command type 2 IOCB
148  * @tot_dsds: Total number of segments to transfer
149  */
150 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
151     uint16_t tot_dsds)
152 {
153 	uint16_t	avail_dsds;
154 	uint32_t	*cur_dsd;
155 	scsi_qla_host_t	*ha;
156 	struct scsi_cmnd *cmd;
157 
158 	cmd = sp->cmd;
159 
160 	/* Update entry type to indicate Command Type 2 IOCB */
161 	*((uint32_t *)(&cmd_pkt->entry_type)) =
162 	    __constant_cpu_to_le32(COMMAND_TYPE);
163 
164 	/* No data transfer */
165 	if (cmd->request_bufflen == 0 || cmd->sc_data_direction == DMA_NONE) {
166 		cmd_pkt->byte_count = __constant_cpu_to_le32(0);
167 		return;
168 	}
169 
170 	ha = sp->ha;
171 
172 	cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(cmd));
173 
174 	/* Three DSDs are available in the Command Type 2 IOCB */
175 	avail_dsds = 3;
176 	cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
177 
178 	/* Load data segments */
179 	if (cmd->use_sg != 0) {
180 		struct	scatterlist *cur_seg;
181 		struct	scatterlist *end_seg;
182 
183 		cur_seg = (struct scatterlist *)cmd->request_buffer;
184 		end_seg = cur_seg + tot_dsds;
185 		while (cur_seg < end_seg) {
186 			cont_entry_t	*cont_pkt;
187 
188 			/* Allocate additional continuation packets? */
189 			if (avail_dsds == 0) {
190 				/*
191 				 * Seven DSDs are available in the Continuation
192 				 * Type 0 IOCB.
193 				 */
194 				cont_pkt = qla2x00_prep_cont_type0_iocb(ha);
195 				cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
196 				avail_dsds = 7;
197 			}
198 
199 			*cur_dsd++ = cpu_to_le32(sg_dma_address(cur_seg));
200 			*cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
201 			avail_dsds--;
202 
203 			cur_seg++;
204 		}
205 	} else {
206 		*cur_dsd++ = cpu_to_le32(sp->dma_handle);
207 		*cur_dsd++ = cpu_to_le32(cmd->request_bufflen);
208 	}
209 }
210 
211 /**
212  * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
213  * capable IOCB types.
214  *
215  * @sp: SRB command to process
216  * @cmd_pkt: Command type 3 IOCB
217  * @tot_dsds: Total number of segments to transfer
218  */
219 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
220     uint16_t tot_dsds)
221 {
222 	uint16_t	avail_dsds;
223 	uint32_t	*cur_dsd;
224 	scsi_qla_host_t	*ha;
225 	struct scsi_cmnd *cmd;
226 
227 	cmd = sp->cmd;
228 
229 	/* Update entry type to indicate Command Type 3 IOCB */
230 	*((uint32_t *)(&cmd_pkt->entry_type)) =
231 	    __constant_cpu_to_le32(COMMAND_A64_TYPE);
232 
233 	/* No data transfer */
234 	if (cmd->request_bufflen == 0 || cmd->sc_data_direction == DMA_NONE) {
235 		cmd_pkt->byte_count = __constant_cpu_to_le32(0);
236 		return;
237 	}
238 
239 	ha = sp->ha;
240 
241 	cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(cmd));
242 
243 	/* Two DSDs are available in the Command Type 3 IOCB */
244 	avail_dsds = 2;
245 	cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
246 
247 	/* Load data segments */
248 	if (cmd->use_sg != 0) {
249 		struct	scatterlist *cur_seg;
250 		struct	scatterlist *end_seg;
251 
252 		cur_seg = (struct scatterlist *)cmd->request_buffer;
253 		end_seg = cur_seg + tot_dsds;
254 		while (cur_seg < end_seg) {
255 			dma_addr_t	sle_dma;
256 			cont_a64_entry_t *cont_pkt;
257 
258 			/* Allocate additional continuation packets? */
259 			if (avail_dsds == 0) {
260 				/*
261 				 * Five DSDs are available in the Continuation
262 				 * Type 1 IOCB.
263 				 */
264 				cont_pkt = qla2x00_prep_cont_type1_iocb(ha);
265 				cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
266 				avail_dsds = 5;
267 			}
268 
269 			sle_dma = sg_dma_address(cur_seg);
270 			*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
271 			*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
272 			*cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
273 			avail_dsds--;
274 
275 			cur_seg++;
276 		}
277 	} else {
278 		*cur_dsd++ = cpu_to_le32(LSD(sp->dma_handle));
279 		*cur_dsd++ = cpu_to_le32(MSD(sp->dma_handle));
280 		*cur_dsd++ = cpu_to_le32(cmd->request_bufflen);
281 	}
282 }
283 
284 /**
285  * qla2x00_start_scsi() - Send a SCSI command to the ISP
286  * @sp: command to send to the ISP
287  *
288  * Returns non-zero if a failure occured, else zero.
289  */
290 int
291 qla2x00_start_scsi(srb_t *sp)
292 {
293 	int		ret;
294 	unsigned long   flags;
295 	scsi_qla_host_t	*ha;
296 	struct scsi_cmnd *cmd;
297 	uint32_t	*clr_ptr;
298 	uint32_t        index;
299 	uint32_t	handle;
300 	cmd_entry_t	*cmd_pkt;
301 	struct scatterlist *sg;
302 	uint16_t	cnt;
303 	uint16_t	req_cnt;
304 	uint16_t	tot_dsds;
305 	struct device_reg_2xxx __iomem *reg;
306 
307 	/* Setup device pointers. */
308 	ret = 0;
309 	ha = sp->ha;
310 	reg = &ha->iobase->isp;
311 	cmd = sp->cmd;
312 	/* So we know we haven't pci_map'ed anything yet */
313 	tot_dsds = 0;
314 
315 	/* Send marker if required */
316 	if (ha->marker_needed != 0) {
317 		if (qla2x00_marker(ha, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
318 			return (QLA_FUNCTION_FAILED);
319 		}
320 		ha->marker_needed = 0;
321 	}
322 
323 	/* Acquire ring specific lock */
324 	spin_lock_irqsave(&ha->hardware_lock, flags);
325 
326 	/* Check for room in outstanding command list. */
327 	handle = ha->current_outstanding_cmd;
328 	for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
329 		handle++;
330 		if (handle == MAX_OUTSTANDING_COMMANDS)
331 			handle = 1;
332 		if (ha->outstanding_cmds[handle] == 0)
333 			break;
334 	}
335 	if (index == MAX_OUTSTANDING_COMMANDS)
336 		goto queuing_error;
337 
338 	/* Map the sg table so we have an accurate count of sg entries needed */
339 	if (cmd->use_sg) {
340 		sg = (struct scatterlist *) cmd->request_buffer;
341 		tot_dsds = pci_map_sg(ha->pdev, sg, cmd->use_sg,
342 		    cmd->sc_data_direction);
343 		if (tot_dsds == 0)
344 			goto queuing_error;
345 	} else if (cmd->request_bufflen) {
346 		dma_addr_t	req_dma;
347 
348 		req_dma = pci_map_single(ha->pdev, cmd->request_buffer,
349 		    cmd->request_bufflen, cmd->sc_data_direction);
350 		if (dma_mapping_error(req_dma))
351 			goto queuing_error;
352 
353 		sp->dma_handle = req_dma;
354 		tot_dsds = 1;
355 	}
356 
357 	/* Calculate the number of request entries needed. */
358 	req_cnt = ha->isp_ops.calc_req_entries(tot_dsds);
359 	if (ha->req_q_cnt < (req_cnt + 2)) {
360 		cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
361 		if (ha->req_ring_index < cnt)
362 			ha->req_q_cnt = cnt - ha->req_ring_index;
363 		else
364 			ha->req_q_cnt = ha->request_q_length -
365 			    (ha->req_ring_index - cnt);
366 	}
367 	if (ha->req_q_cnt < (req_cnt + 2))
368 		goto queuing_error;
369 
370 	/* Build command packet */
371 	ha->current_outstanding_cmd = handle;
372 	ha->outstanding_cmds[handle] = sp;
373 	sp->ha = ha;
374 	sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
375 	ha->req_q_cnt -= req_cnt;
376 
377 	cmd_pkt = (cmd_entry_t *)ha->request_ring_ptr;
378 	cmd_pkt->handle = handle;
379 	/* Zero out remaining portion of packet. */
380 	clr_ptr = (uint32_t *)cmd_pkt + 2;
381 	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
382 	cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
383 
384 	/* Set target ID and LUN number*/
385 	SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
386 	cmd_pkt->lun = cpu_to_le16(sp->cmd->device->lun);
387 
388 	/* Update tagged queuing modifier */
389 	cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG);
390 
391 	/* Load SCSI command packet. */
392 	memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
393 	cmd_pkt->byte_count = cpu_to_le32((uint32_t)cmd->request_bufflen);
394 
395 	/* Build IOCB segments */
396 	ha->isp_ops.build_iocbs(sp, cmd_pkt, tot_dsds);
397 
398 	/* Set total data segment count. */
399 	cmd_pkt->entry_count = (uint8_t)req_cnt;
400 	wmb();
401 
402 	/* Adjust ring index. */
403 	ha->req_ring_index++;
404 	if (ha->req_ring_index == ha->request_q_length) {
405 		ha->req_ring_index = 0;
406 		ha->request_ring_ptr = ha->request_ring;
407 	} else
408 		ha->request_ring_ptr++;
409 
410 	sp->flags |= SRB_DMA_VALID;
411 	sp->state = SRB_ACTIVE_STATE;
412 
413 	/* Set chip new ring index. */
414 	WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), ha->req_ring_index);
415 	RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg));	/* PCI Posting. */
416 
417 	/* Manage unprocessed RIO/ZIO commands in response queue. */
418 	if (ha->flags.process_response_queue &&
419 	    ha->response_ring_ptr->signature != RESPONSE_PROCESSED)
420 		qla2x00_process_response_queue(ha);
421 
422 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
423 	return (QLA_SUCCESS);
424 
425 queuing_error:
426 	if (cmd->use_sg && tot_dsds) {
427 		sg = (struct scatterlist *) cmd->request_buffer;
428 		pci_unmap_sg(ha->pdev, sg, cmd->use_sg,
429 		    cmd->sc_data_direction);
430 	} else if (tot_dsds) {
431 		pci_unmap_single(ha->pdev, sp->dma_handle,
432 		    cmd->request_bufflen, cmd->sc_data_direction);
433 	}
434 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
435 
436 	return (QLA_FUNCTION_FAILED);
437 }
438 
439 /**
440  * qla2x00_marker() - Send a marker IOCB to the firmware.
441  * @ha: HA context
442  * @loop_id: loop ID
443  * @lun: LUN
444  * @type: marker modifier
445  *
446  * Can be called from both normal and interrupt context.
447  *
448  * Returns non-zero if a failure occured, else zero.
449  */
450 int
451 __qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
452     uint8_t type)
453 {
454 	mrk_entry_t *mrk;
455 	struct mrk_entry_24xx *mrk24;
456 
457 	mrk24 = NULL;
458 	mrk = (mrk_entry_t *)qla2x00_req_pkt(ha);
459 	if (mrk == NULL) {
460 		DEBUG2_3(printk("%s(%ld): failed to allocate Marker IOCB.\n",
461 		    __func__, ha->host_no));
462 
463 		return (QLA_FUNCTION_FAILED);
464 	}
465 
466 	mrk->entry_type = MARKER_TYPE;
467 	mrk->modifier = type;
468 	if (type != MK_SYNC_ALL) {
469 		if (IS_QLA24XX(ha) || IS_QLA25XX(ha)) {
470 			mrk24 = (struct mrk_entry_24xx *) mrk;
471 			mrk24->nport_handle = cpu_to_le16(loop_id);
472 			mrk24->lun[1] = LSB(lun);
473 			mrk24->lun[2] = MSB(lun);
474 		} else {
475 			SET_TARGET_ID(ha, mrk->target, loop_id);
476 			mrk->lun = cpu_to_le16(lun);
477 		}
478 	}
479 	wmb();
480 
481 	qla2x00_isp_cmd(ha);
482 
483 	return (QLA_SUCCESS);
484 }
485 
486 int
487 qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
488     uint8_t type)
489 {
490 	int ret;
491 	unsigned long flags = 0;
492 
493 	spin_lock_irqsave(&ha->hardware_lock, flags);
494 	ret = __qla2x00_marker(ha, loop_id, lun, type);
495 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
496 
497 	return (ret);
498 }
499 
500 /**
501  * qla2x00_req_pkt() - Retrieve a request packet from the request ring.
502  * @ha: HA context
503  *
504  * Note: The caller must hold the hardware lock before calling this routine.
505  *
506  * Returns NULL if function failed, else, a pointer to the request packet.
507  */
508 static request_t *
509 qla2x00_req_pkt(scsi_qla_host_t *ha)
510 {
511 	device_reg_t __iomem *reg = ha->iobase;
512 	request_t	*pkt = NULL;
513 	uint16_t	cnt;
514 	uint32_t	*dword_ptr;
515 	uint32_t	timer;
516 	uint16_t	req_cnt = 1;
517 
518 	/* Wait 1 second for slot. */
519 	for (timer = HZ; timer; timer--) {
520 		if ((req_cnt + 2) >= ha->req_q_cnt) {
521 			/* Calculate number of free request entries. */
522 			if (IS_QLA24XX(ha) || IS_QLA25XX(ha))
523 				cnt = (uint16_t)RD_REG_DWORD(
524 				    &reg->isp24.req_q_out);
525 			else
526 				cnt = qla2x00_debounce_register(
527 				    ISP_REQ_Q_OUT(ha, &reg->isp));
528 			if  (ha->req_ring_index < cnt)
529 				ha->req_q_cnt = cnt - ha->req_ring_index;
530 			else
531 				ha->req_q_cnt = ha->request_q_length -
532 				    (ha->req_ring_index - cnt);
533 		}
534 		/* If room for request in request ring. */
535 		if ((req_cnt + 2) < ha->req_q_cnt) {
536 			ha->req_q_cnt--;
537 			pkt = ha->request_ring_ptr;
538 
539 			/* Zero out packet. */
540 			dword_ptr = (uint32_t *)pkt;
541 			for (cnt = 0; cnt < REQUEST_ENTRY_SIZE / 4; cnt++)
542 				*dword_ptr++ = 0;
543 
544 			/* Set system defined field. */
545 			pkt->sys_define = (uint8_t)ha->req_ring_index;
546 
547 			/* Set entry count. */
548 			pkt->entry_count = 1;
549 
550 			break;
551 		}
552 
553 		/* Release ring specific lock */
554 		spin_unlock(&ha->hardware_lock);
555 
556 		udelay(2);   /* 2 us */
557 
558 		/* Check for pending interrupts. */
559 		/* During init we issue marker directly */
560 		if (!ha->marker_needed)
561 			qla2x00_poll(ha);
562 
563 		spin_lock_irq(&ha->hardware_lock);
564 	}
565 	if (!pkt) {
566 		DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__));
567 	}
568 
569 	return (pkt);
570 }
571 
572 /**
573  * qla2x00_isp_cmd() - Modify the request ring pointer.
574  * @ha: HA context
575  *
576  * Note: The caller must hold the hardware lock before calling this routine.
577  */
578 void
579 qla2x00_isp_cmd(scsi_qla_host_t *ha)
580 {
581 	device_reg_t __iomem *reg = ha->iobase;
582 
583 	DEBUG5(printk("%s(): IOCB data:\n", __func__));
584 	DEBUG5(qla2x00_dump_buffer(
585 	    (uint8_t *)ha->request_ring_ptr, REQUEST_ENTRY_SIZE));
586 
587 	/* Adjust ring index. */
588 	ha->req_ring_index++;
589 	if (ha->req_ring_index == ha->request_q_length) {
590 		ha->req_ring_index = 0;
591 		ha->request_ring_ptr = ha->request_ring;
592 	} else
593 		ha->request_ring_ptr++;
594 
595 	/* Set chip new ring index. */
596 	if (IS_QLA24XX(ha) || IS_QLA25XX(ha)) {
597 		WRT_REG_DWORD(&reg->isp24.req_q_in, ha->req_ring_index);
598 		RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
599 	} else {
600 		WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp), ha->req_ring_index);
601 		RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
602 	}
603 
604 }
605 
606 /**
607  * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
608  * Continuation Type 1 IOCBs to allocate.
609  *
610  * @dsds: number of data segment decriptors needed
611  *
612  * Returns the number of IOCB entries needed to store @dsds.
613  */
614 static inline uint16_t
615 qla24xx_calc_iocbs(uint16_t dsds)
616 {
617 	uint16_t iocbs;
618 
619 	iocbs = 1;
620 	if (dsds > 1) {
621 		iocbs += (dsds - 1) / 5;
622 		if ((dsds - 1) % 5)
623 			iocbs++;
624 	}
625 	return iocbs;
626 }
627 
628 /**
629  * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
630  * IOCB types.
631  *
632  * @sp: SRB command to process
633  * @cmd_pkt: Command type 3 IOCB
634  * @tot_dsds: Total number of segments to transfer
635  */
636 static inline void
637 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
638     uint16_t tot_dsds)
639 {
640 	uint16_t	avail_dsds;
641 	uint32_t	*cur_dsd;
642 	scsi_qla_host_t	*ha;
643 	struct scsi_cmnd *cmd;
644 
645 	cmd = sp->cmd;
646 
647 	/* Update entry type to indicate Command Type 3 IOCB */
648 	*((uint32_t *)(&cmd_pkt->entry_type)) =
649 	    __constant_cpu_to_le32(COMMAND_TYPE_7);
650 
651 	/* No data transfer */
652 	if (cmd->request_bufflen == 0 || cmd->sc_data_direction == DMA_NONE) {
653 		cmd_pkt->byte_count = __constant_cpu_to_le32(0);
654 		return;
655 	}
656 
657 	ha = sp->ha;
658 
659 	/* Set transfer direction */
660 	if (cmd->sc_data_direction == DMA_TO_DEVICE)
661 		cmd_pkt->task_mgmt_flags =
662 		    __constant_cpu_to_le16(TMF_WRITE_DATA);
663 	else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
664 		cmd_pkt->task_mgmt_flags =
665 		    __constant_cpu_to_le16(TMF_READ_DATA);
666 
667 	/* One DSD is available in the Command Type 3 IOCB */
668 	avail_dsds = 1;
669 	cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
670 
671 	/* Load data segments */
672 	if (cmd->use_sg != 0) {
673 		struct	scatterlist *cur_seg;
674 		struct	scatterlist *end_seg;
675 
676 		cur_seg = (struct scatterlist *)cmd->request_buffer;
677 		end_seg = cur_seg + tot_dsds;
678 		while (cur_seg < end_seg) {
679 			dma_addr_t	sle_dma;
680 			cont_a64_entry_t *cont_pkt;
681 
682 			/* Allocate additional continuation packets? */
683 			if (avail_dsds == 0) {
684 				/*
685 				 * Five DSDs are available in the Continuation
686 				 * Type 1 IOCB.
687 				 */
688 				cont_pkt = qla2x00_prep_cont_type1_iocb(ha);
689 				cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
690 				avail_dsds = 5;
691 			}
692 
693 			sle_dma = sg_dma_address(cur_seg);
694 			*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
695 			*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
696 			*cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
697 			avail_dsds--;
698 
699 			cur_seg++;
700 		}
701 	} else {
702 		*cur_dsd++ = cpu_to_le32(LSD(sp->dma_handle));
703 		*cur_dsd++ = cpu_to_le32(MSD(sp->dma_handle));
704 		*cur_dsd++ = cpu_to_le32(cmd->request_bufflen);
705 	}
706 }
707 
708 
709 /**
710  * qla24xx_start_scsi() - Send a SCSI command to the ISP
711  * @sp: command to send to the ISP
712  *
713  * Returns non-zero if a failure occured, else zero.
714  */
715 int
716 qla24xx_start_scsi(srb_t *sp)
717 {
718 	int		ret;
719 	unsigned long   flags;
720 	scsi_qla_host_t	*ha;
721 	struct scsi_cmnd *cmd;
722 	uint32_t	*clr_ptr;
723 	uint32_t        index;
724 	uint32_t	handle;
725 	struct cmd_type_7 *cmd_pkt;
726 	struct scatterlist *sg;
727 	uint16_t	cnt;
728 	uint16_t	req_cnt;
729 	uint16_t	tot_dsds;
730 	struct device_reg_24xx __iomem *reg;
731 
732 	/* Setup device pointers. */
733 	ret = 0;
734 	ha = sp->ha;
735 	reg = &ha->iobase->isp24;
736 	cmd = sp->cmd;
737 	/* So we know we haven't pci_map'ed anything yet */
738 	tot_dsds = 0;
739 
740 	/* Send marker if required */
741 	if (ha->marker_needed != 0) {
742 		if (qla2x00_marker(ha, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
743 			return QLA_FUNCTION_FAILED;
744 		}
745 		ha->marker_needed = 0;
746 	}
747 
748 	/* Acquire ring specific lock */
749 	spin_lock_irqsave(&ha->hardware_lock, flags);
750 
751 	/* Check for room in outstanding command list. */
752 	handle = ha->current_outstanding_cmd;
753 	for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
754 		handle++;
755 		if (handle == MAX_OUTSTANDING_COMMANDS)
756 			handle = 1;
757 		if (ha->outstanding_cmds[handle] == 0)
758 			break;
759 	}
760 	if (index == MAX_OUTSTANDING_COMMANDS)
761 		goto queuing_error;
762 
763 	/* Map the sg table so we have an accurate count of sg entries needed */
764 	if (cmd->use_sg) {
765 		sg = (struct scatterlist *) cmd->request_buffer;
766 		tot_dsds = pci_map_sg(ha->pdev, sg, cmd->use_sg,
767 		    cmd->sc_data_direction);
768 		if (tot_dsds == 0)
769 			goto queuing_error;
770 	} else if (cmd->request_bufflen) {
771 		dma_addr_t      req_dma;
772 
773 		req_dma = pci_map_single(ha->pdev, cmd->request_buffer,
774 		    cmd->request_bufflen, cmd->sc_data_direction);
775 		if (dma_mapping_error(req_dma))
776 			goto queuing_error;
777 
778 		sp->dma_handle = req_dma;
779 		tot_dsds = 1;
780 	}
781 
782 	req_cnt = qla24xx_calc_iocbs(tot_dsds);
783 	if (ha->req_q_cnt < (req_cnt + 2)) {
784 		cnt = (uint16_t)RD_REG_DWORD_RELAXED(&reg->req_q_out);
785 		if (ha->req_ring_index < cnt)
786 			ha->req_q_cnt = cnt - ha->req_ring_index;
787 		else
788 			ha->req_q_cnt = ha->request_q_length -
789 				(ha->req_ring_index - cnt);
790 	}
791 	if (ha->req_q_cnt < (req_cnt + 2))
792 		goto queuing_error;
793 
794 	/* Build command packet. */
795 	ha->current_outstanding_cmd = handle;
796 	ha->outstanding_cmds[handle] = sp;
797 	sp->ha = ha;
798 	sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
799 	ha->req_q_cnt -= req_cnt;
800 
801 	cmd_pkt = (struct cmd_type_7 *)ha->request_ring_ptr;
802 	cmd_pkt->handle = handle;
803 
804 	/* Zero out remaining portion of packet. */
805 	/*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
806 	clr_ptr = (uint32_t *)cmd_pkt + 2;
807 	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
808 	cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
809 
810 	/* Set NPORT-ID and LUN number*/
811 	cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
812 	cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
813 	cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
814 	cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
815 
816 	int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
817 
818 	/* Load SCSI command packet. */
819 	memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
820 	host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
821 
822 	cmd_pkt->byte_count = cpu_to_le32((uint32_t)cmd->request_bufflen);
823 
824 	/* Build IOCB segments */
825 	qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
826 
827 	/* Set total data segment count. */
828 	cmd_pkt->entry_count = (uint8_t)req_cnt;
829 	wmb();
830 
831 	/* Adjust ring index. */
832 	ha->req_ring_index++;
833 	if (ha->req_ring_index == ha->request_q_length) {
834 		ha->req_ring_index = 0;
835 		ha->request_ring_ptr = ha->request_ring;
836 	} else
837 		ha->request_ring_ptr++;
838 
839 	sp->flags |= SRB_DMA_VALID;
840 	sp->state = SRB_ACTIVE_STATE;
841 
842 	/* Set chip new ring index. */
843 	WRT_REG_DWORD(&reg->req_q_in, ha->req_ring_index);
844 	RD_REG_DWORD_RELAXED(&reg->req_q_in);		/* PCI Posting. */
845 
846 	/* Manage unprocessed RIO/ZIO commands in response queue. */
847 	if (ha->flags.process_response_queue &&
848 	    ha->response_ring_ptr->signature != RESPONSE_PROCESSED)
849 		qla24xx_process_response_queue(ha);
850 
851 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
852 	return QLA_SUCCESS;
853 
854 queuing_error:
855 	if (cmd->use_sg && tot_dsds) {
856 		sg = (struct scatterlist *) cmd->request_buffer;
857 		pci_unmap_sg(ha->pdev, sg, cmd->use_sg,
858 		    cmd->sc_data_direction);
859 	} else if (tot_dsds) {
860 		pci_unmap_single(ha->pdev, sp->dma_handle,
861 		    cmd->request_bufflen, cmd->sc_data_direction);
862 	}
863 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
864 
865 	return QLA_FUNCTION_FAILED;
866 }
867