xref: /freebsd/sys/dev/ocs_fc/ocs_unsol.c (revision 62cfcf62f627e5093fb37026a6d8c98e4d2ef04c)
1 /*-
2  * Copyright (c) 2017 Broadcom. All rights reserved.
3  * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice,
9  *    this list of conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright notice,
12  *    this list of conditions and the following disclaimer in the documentation
13  *    and/or other materials provided with the distribution.
14  *
15  * 3. Neither the name of the copyright holder nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  *
31  * $FreeBSD$
32  */
33 
34 /**
35  * @file
36  * Code to handle unsolicited received FC frames.
37  */
38 
39 /*!
40  * @defgroup unsol Unsolicited Frame Handling
41  */
42 
43 #include "ocs.h"
44 #include "ocs_els.h"
45 #include "ocs_fabric.h"
46 #include "ocs_device.h"
47 
48 
49 #define frame_printf(ocs, hdr, fmt, ...) \
50 	do { \
51 		char s_id_text[16]; \
52 		ocs_node_fcid_display(fc_be24toh((hdr)->s_id), s_id_text, sizeof(s_id_text)); \
53 		ocs_log_debug(ocs, "[%06x.%s] %02x/%04x/%04x: " fmt, fc_be24toh((hdr)->d_id), s_id_text, \
54 			(hdr)->r_ctl, ocs_be16toh((hdr)->ox_id), ocs_be16toh((hdr)->rx_id), ##__VA_ARGS__); \
55 	} while(0)
56 
57 static int32_t ocs_unsol_process(ocs_t *ocs, ocs_hw_sequence_t *seq);
58 static int32_t ocs_dispatch_fcp_cmd(ocs_node_t *node, ocs_hw_sequence_t *seq);
59 static int32_t ocs_dispatch_fcp_cmd_auto_xfer_rdy(ocs_node_t *node, ocs_hw_sequence_t *seq);
60 static int32_t ocs_dispatch_fcp_data(ocs_node_t *node, ocs_hw_sequence_t *seq);
61 static int32_t ocs_domain_dispatch_frame(void *arg, ocs_hw_sequence_t *seq);
62 static int32_t ocs_node_dispatch_frame(void *arg, ocs_hw_sequence_t *seq);
63 static int32_t ocs_fc_tmf_rejected_cb(ocs_io_t *io, ocs_scsi_io_status_e scsi_status, uint32_t flags, void *arg);
64 static ocs_hw_sequence_t *ocs_frame_next(ocs_list_t *pend_list, ocs_lock_t *list_lock);
65 static uint8_t ocs_node_frames_held(void *arg);
66 static uint8_t ocs_domain_frames_held(void *arg);
67 static int32_t ocs_purge_pending(ocs_t *ocs, ocs_list_t *pend_list, ocs_lock_t *list_lock);
68 static int32_t ocs_sframe_send_task_set_full_or_busy(ocs_node_t *node, ocs_hw_sequence_t *seq);
69 
70 #define OCS_MAX_FRAMES_BEFORE_YEILDING 10000
71 
72 /**
73  * @brief Process the RQ circular buffer and process the incoming frames.
74  *
75  * @param mythread Pointer to thread object.
76  *
77  * @return Returns 0 on success, or a non-zero value on failure.
78  */
79 int32_t
80 ocs_unsol_rq_thread(ocs_thread_t *mythread)
81 {
82 	ocs_xport_rq_thread_info_t *thread_data = mythread->arg;
83 	ocs_t *ocs = thread_data->ocs;
84 	ocs_hw_sequence_t *seq;
85 	uint32_t yield_count = OCS_MAX_FRAMES_BEFORE_YEILDING;
86 
87 	ocs_log_debug(ocs, "%s running\n", mythread->name);
88 	while (!ocs_thread_terminate_requested(mythread)) {
89 		seq = ocs_cbuf_get(thread_data->seq_cbuf, 100000);
90 		if (seq == NULL) {
91 			/* Prevent soft lockups by yielding the CPU */
92 			ocs_thread_yield(&thread_data->thread);
93 			yield_count = OCS_MAX_FRAMES_BEFORE_YEILDING;
94 			continue;
95 		}
96 		/* Note: Always returns 0 */
97 		ocs_unsol_process((ocs_t*)seq->hw->os, seq);
98 
99 		/* We have to prevent CPU soft lockups, so just yield the CPU after x frames. */
100 		if (--yield_count == 0) {
101 			ocs_thread_yield(&thread_data->thread);
102 			yield_count = OCS_MAX_FRAMES_BEFORE_YEILDING;
103 		}
104 	}
105 	ocs_log_debug(ocs, "%s exiting\n", mythread->name);
106 	thread_data->thread_started = FALSE;
107 	return 0;
108 }
109 
110 /**
111  * @ingroup unsol
112  * @brief Callback function when aborting a port owned XRI
113  * exchanges.
114  *
115  * @return Returns 0.
116  */
117 static int32_t
118 ocs_unsol_abort_cb (ocs_hw_io_t *hio, ocs_remote_node_t *rnode, uint32_t len, int32_t status, uint32_t ext, void *arg)
119 {
120 	ocs_t *ocs = arg;
121 	ocs_assert(hio, -1);
122 	ocs_assert(arg, -1);
123 	ocs_log_debug(ocs, "xri=0x%x tag=0x%x\n", hio->indicator, hio->reqtag);
124 	ocs_hw_io_free(&ocs->hw, hio);
125 	return 0;
126 }
127 
128 
129 /**
130  * @ingroup unsol
131  * @brief Abort either a RQ Pair auto XFER RDY XRI.
132  * @return Returns None.
133  */
134 static void
135 ocs_port_owned_abort(ocs_t *ocs, ocs_hw_io_t *hio)
136 {
137 	ocs_hw_rtn_e hw_rc;
138 	hw_rc = ocs_hw_io_abort(&ocs->hw, hio, FALSE,
139 				  ocs_unsol_abort_cb, ocs);
140 	if((hw_rc == OCS_HW_RTN_IO_ABORT_IN_PROGRESS) ||
141 	   (hw_rc == OCS_HW_RTN_IO_PORT_OWNED_ALREADY_ABORTED)) {
142 		ocs_log_debug(ocs, "already aborted XRI 0x%x\n", hio->indicator);
143 	} else if(hw_rc != OCS_HW_RTN_SUCCESS) {
144 		ocs_log_debug(ocs, "Error aborting XRI 0x%x status %d\n",
145 			      hio->indicator, hw_rc);
146 	}
147 }
148 
149 /**
150  * @ingroup unsol
151  * @brief Handle unsolicited FC frames.
152  *
153  * <h3 class="desc">Description</h3>
154  * This function is called from the HW with unsolicited FC frames (FCP, ELS, BLS, etc.).
155  *
156  * @param arg Application-specified callback data.
157  * @param seq Header/payload sequence buffers.
158  *
159  * @return Returns 0 on success; or a negative error value on failure.
160  */
161 
162 int32_t
163 ocs_unsolicited_cb(void *arg, ocs_hw_sequence_t *seq)
164 {
165 	ocs_t *ocs = arg;
166 	ocs_xport_t *xport = ocs->xport;
167 	int32_t rc;
168 
169 	CPUTRACE("");
170 
171 	if (ocs->rq_threads == 0) {
172 		rc = ocs_unsol_process(ocs, seq);
173 	} else {
174 		/* use the ox_id to dispatch this IO to a thread */
175 		fc_header_t *hdr = seq->header->dma.virt;
176 		uint32_t ox_id =  ocs_be16toh(hdr->ox_id);
177 		uint32_t thr_index = ox_id % ocs->rq_threads;
178 
179 		rc = ocs_cbuf_put(xport->rq_thread_info[thr_index].seq_cbuf, seq);
180 	}
181 
182 	if (rc) {
183 		ocs_hw_sequence_free(&ocs->hw, seq);
184 	}
185 
186 	return 0;
187 }
188 
189 /**
190  * @ingroup unsol
191  * @brief Handle unsolicited FC frames.
192  *
193  * <h3 class="desc">Description</h3>
194  * This function is called either from ocs_unsolicited_cb() or ocs_unsol_rq_thread().
195  *
196  * @param ocs Pointer to the ocs structure.
197  * @param seq Header/payload sequence buffers.
198  *
199  * @return Returns 0 on success, or a negative error value on failure.
200  */
201 static int32_t
202 ocs_unsol_process(ocs_t *ocs, ocs_hw_sequence_t *seq)
203 {
204 	ocs_xport_fcfi_t *xport_fcfi = NULL;
205 	ocs_domain_t *domain;
206 	uint8_t seq_fcfi = seq->fcfi;
207 
208 	/* HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB */
209 	if (ocs->hw.workaround.override_fcfi) {
210 		if (ocs->hw.first_domain_idx > -1) {
211 			seq_fcfi = ocs->hw.first_domain_idx;
212 		}
213 	}
214 
215 	/* Range check seq->fcfi */
216 	if (seq_fcfi < ARRAY_SIZE(ocs->xport->fcfi)) {
217 		xport_fcfi = &ocs->xport->fcfi[seq_fcfi];
218 	}
219 
220 	/* If the transport FCFI entry is NULL, then drop the frame */
221 	if (xport_fcfi == NULL) {
222 		ocs_log_test(ocs, "FCFI %d is not valid, dropping frame\n", seq->fcfi);
223 		if (seq->hio != NULL) {
224 			ocs_port_owned_abort(ocs, seq->hio);
225 		}
226 
227 		ocs_hw_sequence_free(&ocs->hw, seq);
228 		return 0;
229 	}
230 	domain = ocs_hw_domain_get(&ocs->hw, seq_fcfi);
231 
232 	/*
233 	 * If we are holding frames or the domain is not yet registered or
234 	 * there's already frames on the pending list,
235 	 * then add the new frame to pending list
236 	 */
237 	if (domain == NULL ||
238 	    xport_fcfi->hold_frames ||
239 	    !ocs_list_empty(&xport_fcfi->pend_frames)) {
240 		ocs_lock(&xport_fcfi->pend_frames_lock);
241 			ocs_list_add_tail(&xport_fcfi->pend_frames, seq);
242 		ocs_unlock(&xport_fcfi->pend_frames_lock);
243 
244 		if (domain != NULL) {
245 			/* immediately process pending frames */
246 			ocs_domain_process_pending(domain);
247 		}
248 	} else {
249 		/*
250 		 * We are not holding frames and pending list is empty, just process frame.
251 		 * A non-zero return means the frame was not handled - so cleanup
252 		 */
253 		if (ocs_domain_dispatch_frame(domain, seq)) {
254 			if (seq->hio != NULL) {
255 				ocs_port_owned_abort(ocs, seq->hio);
256 			}
257 			ocs_hw_sequence_free(&ocs->hw, seq);
258 		}
259 	}
260 	return 0;
261 }
262 
263 /**
264  * @ingroup unsol
265  * @brief Process pending frames queued to the given node.
266  *
267  * <h3 class="desc">Description</h3>
268  * Frames that are queued for the \c node are dispatched and returned
269  * to the RQ.
270  *
271  * @param node Node of the queued frames that are to be dispatched.
272  *
273  * @return Returns 0 on success, or a negative error value on failure.
274  */
275 
276 int32_t
277 ocs_process_node_pending(ocs_node_t *node)
278 {
279 	ocs_t *ocs = node->ocs;
280 	ocs_hw_sequence_t *seq = NULL;
281 	uint32_t pend_frames_processed = 0;
282 
283 	for (;;) {
284 		/* need to check for hold frames condition after each frame processed
285 		 * because any given frame could cause a transition to a state that
286 		 * holds frames
287 		 */
288 		if (ocs_node_frames_held(node)) {
289 			break;
290 		}
291 
292 		/* Get next frame/sequence */
293 		ocs_lock(&node->pend_frames_lock);
294 			seq = ocs_list_remove_head(&node->pend_frames);
295 			if (seq == NULL) {
296 				pend_frames_processed = node->pend_frames_processed;
297 				node->pend_frames_processed = 0;
298 				ocs_unlock(&node->pend_frames_lock);
299 				break;
300 			}
301 			node->pend_frames_processed++;
302 		ocs_unlock(&node->pend_frames_lock);
303 
304 		/* now dispatch frame(s) to dispatch function */
305 		if (ocs_node_dispatch_frame(node, seq)) {
306 			if (seq->hio != NULL) {
307 				ocs_port_owned_abort(ocs, seq->hio);
308 			}
309 			ocs_hw_sequence_free(&ocs->hw, seq);
310 		}
311 	}
312 
313 	if (pend_frames_processed != 0) {
314 		ocs_log_debug(ocs, "%u node frames held and processed\n", pend_frames_processed);
315 	}
316 
317 	return 0;
318 }
319 
320 /**
321  * @ingroup unsol
322  * @brief Process pending frames queued to the given domain.
323  *
324  * <h3 class="desc">Description</h3>
325  * Frames that are queued for the \c domain are dispatched and
326  * returned to the RQ.
327  *
328  * @param domain Domain of the queued frames that are to be
329  *		 dispatched.
330  *
331  * @return Returns 0 on success, or a negative error value on failure.
332  */
333 
334 int32_t
335 ocs_domain_process_pending(ocs_domain_t *domain)
336 {
337 	ocs_t *ocs = domain->ocs;
338 	ocs_xport_fcfi_t *xport_fcfi;
339 	ocs_hw_sequence_t *seq = NULL;
340 	uint32_t pend_frames_processed = 0;
341 
342 	ocs_assert(domain->fcf_indicator < SLI4_MAX_FCFI, -1);
343 	xport_fcfi = &ocs->xport->fcfi[domain->fcf_indicator];
344 
345 	for (;;) {
346 		/* need to check for hold frames condition after each frame processed
347 		 * because any given frame could cause a transition to a state that
348 		 * holds frames
349 		 */
350 		if (ocs_domain_frames_held(domain)) {
351 			break;
352 		}
353 
354 		/* Get next frame/sequence */
355 		ocs_lock(&xport_fcfi->pend_frames_lock);
356 			seq = ocs_list_remove_head(&xport_fcfi->pend_frames);
357 			if (seq == NULL) {
358 				pend_frames_processed = xport_fcfi->pend_frames_processed;
359 				xport_fcfi->pend_frames_processed = 0;
360 				ocs_unlock(&xport_fcfi->pend_frames_lock);
361 				break;
362 			}
363 			xport_fcfi->pend_frames_processed++;
364 		ocs_unlock(&xport_fcfi->pend_frames_lock);
365 
366 		/* now dispatch frame(s) to dispatch function */
367 		if (ocs_domain_dispatch_frame(domain, seq)) {
368 			if (seq->hio != NULL) {
369 				ocs_port_owned_abort(ocs, seq->hio);
370 			}
371 			ocs_hw_sequence_free(&ocs->hw, seq);
372 		}
373 	}
374 	if (pend_frames_processed != 0) {
375 		ocs_log_debug(ocs, "%u domain frames held and processed\n", pend_frames_processed);
376 	}
377 	return 0;
378 }
379 
380 /**
381  * @ingroup unsol
382  * @brief Purge given pending list
383  *
384  * <h3 class="desc">Description</h3>
385  * Frames that are queued on the given pending list are
386  * discarded and returned to the RQ.
387  *
388  * @param ocs Pointer to ocs object.
389  * @param pend_list Pending list to be purged.
390  * @param list_lock Lock that protects pending list.
391  *
392  * @return Returns 0 on success, or a negative error value on failure.
393  */
394 
395 static int32_t
396 ocs_purge_pending(ocs_t *ocs, ocs_list_t *pend_list, ocs_lock_t *list_lock)
397 {
398 	ocs_hw_sequence_t *frame;
399 
400 	for (;;) {
401 		frame = ocs_frame_next(pend_list, list_lock);
402 		if (frame == NULL) {
403 			break;
404 		}
405 
406 		frame_printf(ocs, (fc_header_t*) frame->header->dma.virt, "Discarding held frame\n");
407 		if (frame->hio != NULL) {
408 			ocs_port_owned_abort(ocs, frame->hio);
409 		}
410 		ocs_hw_sequence_free(&ocs->hw, frame);
411 	}
412 
413 	return 0;
414 }
415 
416 /**
417  * @ingroup unsol
418  * @brief Purge node's pending (queued) frames.
419  *
420  * <h3 class="desc">Description</h3>
421  * Frames that are queued for the \c node are discarded and returned
422  * to the RQ.
423  *
424  * @param node Node of the queued frames that are to be discarded.
425  *
426  * @return Returns 0 on success, or a negative error value on failure.
427  */
428 
429 int32_t
430 ocs_node_purge_pending(ocs_node_t *node)
431 {
432 	return ocs_purge_pending(node->ocs, &node->pend_frames, &node->pend_frames_lock);
433 }
434 
435 /**
436  * @ingroup unsol
437  * @brief Purge xport's pending (queued) frames.
438  *
439  * <h3 class="desc">Description</h3>
440  * Frames that are queued for the \c xport are discarded and
441  * returned to the RQ.
442  *
443  * @param domain Pointer to domain object.
444  *
445  * @return Returns 0 on success; or a negative error value on failure.
446  */
447 
448 int32_t
449 ocs_domain_purge_pending(ocs_domain_t *domain)
450 {
451 	ocs_t *ocs = domain->ocs;
452 	ocs_xport_fcfi_t *xport_fcfi;
453 
454 	ocs_assert(domain->fcf_indicator < SLI4_MAX_FCFI, -1);
455 	xport_fcfi = &ocs->xport->fcfi[domain->fcf_indicator];
456 	return ocs_purge_pending(domain->ocs,
457 				 &xport_fcfi->pend_frames,
458 				 &xport_fcfi->pend_frames_lock);
459 }
460 
461 /**
462  * @ingroup unsol
463  * @brief Check if node's pending frames are held.
464  *
465  * @param arg Node for which the pending frame hold condition is
466  * checked.
467  *
468  * @return Returns 1 if node is holding pending frames, or 0
469  * if not.
470  */
471 
472 static uint8_t
473 ocs_node_frames_held(void *arg)
474 {
475 	ocs_node_t *node = (ocs_node_t *)arg;
476 	return node->hold_frames;
477 }
478 
479 /**
480  * @ingroup unsol
481  * @brief Check if domain's pending frames are held.
482  *
483  * @param arg Domain for which the pending frame hold condition is
484  * checked.
485  *
486  * @return Returns 1 if domain is holding pending frames, or 0
487  * if not.
488  */
489 
490 static uint8_t
491 ocs_domain_frames_held(void *arg)
492 {
493 	ocs_domain_t *domain = (ocs_domain_t *)arg;
494 	ocs_t *ocs = domain->ocs;
495 	ocs_xport_fcfi_t *xport_fcfi;
496 
497 	ocs_assert(domain != NULL, 1);
498 	ocs_assert(domain->fcf_indicator < SLI4_MAX_FCFI, 1);
499 	xport_fcfi = &ocs->xport->fcfi[domain->fcf_indicator];
500 	return xport_fcfi->hold_frames;
501 }
502 
503 /**
504  * @ingroup unsol
505  * @brief Globally (at xport level) hold unsolicited frames.
506  *
507  * <h3 class="desc">Description</h3>
508  * This function places a hold on processing unsolicited FC
509  * frames queued to the xport pending list.
510  *
511  * @param domain Pointer to domain object.
512  *
513  * @return Returns None.
514  */
515 
516 void
517 ocs_domain_hold_frames(ocs_domain_t *domain)
518 {
519 	ocs_t *ocs = domain->ocs;
520 	ocs_xport_fcfi_t *xport_fcfi;
521 
522 	ocs_assert(domain->fcf_indicator < SLI4_MAX_FCFI);
523 	xport_fcfi = &ocs->xport->fcfi[domain->fcf_indicator];
524 	if (!xport_fcfi->hold_frames) {
525 		ocs_log_debug(domain->ocs, "hold frames set for FCFI %d\n",
526 			      domain->fcf_indicator);
527 		xport_fcfi->hold_frames = 1;
528 	}
529 }
530 
531 /**
532  * @ingroup unsol
533  * @brief Clear hold on unsolicited frames.
534  *
535  * <h3 class="desc">Description</h3>
536  * This function clears the hold on processing unsolicited FC
537  * frames queued to the domain pending list.
538  *
539  * @param domain Pointer to domain object.
540  *
541  * @return Returns None.
542  */
543 
544 void
545 ocs_domain_accept_frames(ocs_domain_t *domain)
546 {
547 	ocs_t *ocs = domain->ocs;
548 	ocs_xport_fcfi_t *xport_fcfi;
549 
550 	ocs_assert(domain->fcf_indicator < SLI4_MAX_FCFI);
551 	xport_fcfi = &ocs->xport->fcfi[domain->fcf_indicator];
552 	if (xport_fcfi->hold_frames == 1) {
553 		ocs_log_debug(domain->ocs, "hold frames cleared for FCFI %d\n",
554 			      domain->fcf_indicator);
555 	}
556 	xport_fcfi->hold_frames = 0;
557 	ocs_domain_process_pending(domain);
558 }
559 
560 
561 /**
562  * @ingroup unsol
563  * @brief Dispatch unsolicited FC frame.
564  *
565  * <h3 class="desc">Description</h3>
566  * This function processes an unsolicited FC frame queued at the
567  * domain level.
568  *
569  * @param arg Pointer to ocs object.
570  * @param seq Header/payload sequence buffers.
571  *
572  * @return Returns 0 if frame processed and RX buffers cleaned
573  * up appropriately, -1 if frame not handled.
574  */
575 
576 static __inline int32_t
577 ocs_domain_dispatch_frame(void *arg, ocs_hw_sequence_t *seq)
578 {
579 	ocs_domain_t *domain = (ocs_domain_t *)arg;
580 	ocs_t *ocs = domain->ocs;
581 	fc_header_t *hdr;
582 	uint32_t s_id;
583 	uint32_t d_id;
584 	ocs_node_t *node = NULL;
585 	ocs_sport_t *sport = NULL;
586 
587 	ocs_assert(seq->header, -1);
588 	ocs_assert(seq->header->dma.virt, -1);
589 	ocs_assert(seq->payload->dma.virt, -1);
590 
591 	hdr = seq->header->dma.virt;
592 
593 	/* extract the s_id and d_id */
594 	s_id = fc_be24toh(hdr->s_id);
595 	d_id = fc_be24toh(hdr->d_id);
596 
597 	sport = domain->sport;
598 	if (sport == NULL) {
599 		frame_printf(ocs, hdr, "phy sport for FC ID 0x%06x is NULL, dropping frame\n", d_id);
600 		return -1;
601 	}
602 
603 	if (sport->fc_id != d_id) {
604 		/* Not a physical port IO lookup sport associated with the npiv port */
605 		sport = ocs_sport_find(domain, d_id); /* Look up without lock */
606 		if (sport == NULL) {
607 			if (hdr->type == FC_TYPE_FCP) {
608 				/* Drop frame */
609 				ocs_log_warn(ocs, "unsolicited FCP frame with invalid d_id x%x, dropping\n",
610 					     d_id);
611 				return -1;
612 			} else {
613 				/* p2p will use this case */
614 				sport = domain->sport;
615 			}
616 		}
617 	}
618 
619 	/* Lookup the node given the remote s_id */
620 	node = ocs_node_find(sport, s_id);
621 
622 	/* If not found, then create a new node */
623 	if (node == NULL) {
624 		/* If this is solicited data or control based on R_CTL and there is no node context,
625 		 * then we can drop the frame
626 		 */
627 		if ((hdr->r_ctl == FC_RCTL_FC4_DATA) && (
628 		    (hdr->info == FC_RCTL_INFO_SOL_DATA) || (hdr->info == FC_RCTL_INFO_SOL_CTRL))) {
629 			ocs_log_debug(ocs, "solicited data/ctrl frame without node, dropping\n");
630 			return -1;
631 		}
632 		node = ocs_node_alloc(sport, s_id, FALSE, FALSE);
633 		if (node == NULL) {
634 			ocs_log_err(ocs, "ocs_node_alloc() failed\n");
635 			return -1;
636 		}
637 		/* don't send PLOGI on ocs_d_init entry */
638 		ocs_node_init_device(node, FALSE);
639 	}
640 
641 	if (node->hold_frames || !ocs_list_empty((&node->pend_frames))) {
642 		/* TODO: info log level
643 		frame_printf(ocs, hdr, "Holding frame\n");
644 		*/
645 		/* add frame to node's pending list */
646 		ocs_lock(&node->pend_frames_lock);
647 			ocs_list_add_tail(&node->pend_frames, seq);
648 		ocs_unlock(&node->pend_frames_lock);
649 
650 		return 0;
651 	}
652 
653 	/* now dispatch frame to the node frame handler */
654 	return ocs_node_dispatch_frame(node, seq);
655 }
656 
657 /**
658  * @ingroup unsol
659  * @brief Dispatch a frame.
660  *
661  * <h3 class="desc">Description</h3>
662  * A frame is dispatched from the \c node to the handler.
663  *
664  * @param arg Node that originated the frame.
665  * @param seq Header/payload sequence buffers.
666  *
667  * @return Returns 0 if frame processed and RX buffers cleaned
668  * up appropriately, -1 if frame not handled.
669  */
670 static int32_t
671 ocs_node_dispatch_frame(void *arg, ocs_hw_sequence_t *seq)
672 {
673 
674 	fc_header_t *hdr = seq->header->dma.virt;
675 	uint32_t port_id;
676 	ocs_node_t *node = (ocs_node_t *)arg;
677 	int32_t rc = -1;
678 	int32_t sit_set = 0;
679 
680 	port_id = fc_be24toh(hdr->s_id);
681 	ocs_assert(port_id == node->rnode.fc_id, -1);
682 
683 	if (fc_be24toh(hdr->f_ctl) & FC_FCTL_END_SEQUENCE) {
684 		/*if SIT is set */
685 		if (fc_be24toh(hdr->f_ctl) & FC_FCTL_SEQUENCE_INITIATIVE) {
686 			sit_set = 1;
687 		}
688 		switch (hdr->r_ctl) {
689 		case FC_RCTL_ELS:
690 			if (sit_set) {
691 				rc = ocs_node_recv_els_frame(node, seq);
692 			}
693 			break;
694 
695 		case FC_RCTL_BLS:
696 			if (sit_set) {
697 				rc = ocs_node_recv_abts_frame(node, seq);
698 			}else {
699 				rc = ocs_node_recv_bls_no_sit(node, seq);
700 			}
701 			break;
702 
703 		case FC_RCTL_FC4_DATA:
704 			switch(hdr->type) {
705 			case FC_TYPE_FCP:
706 				if (hdr->info == FC_RCTL_INFO_UNSOL_CMD) {
707 					if (node->fcp_enabled) {
708 						if (sit_set) {
709 							rc = ocs_dispatch_fcp_cmd(node, seq);
710 						}else {
711 							/* send the auto xfer ready command */
712 							rc = ocs_dispatch_fcp_cmd_auto_xfer_rdy(node, seq);
713 						}
714 					} else {
715 						rc = ocs_node_recv_fcp_cmd(node, seq);
716 					}
717 				} else if (hdr->info == FC_RCTL_INFO_SOL_DATA) {
718 					if (sit_set) {
719 						rc = ocs_dispatch_fcp_data(node, seq);
720 					}
721 				}
722 				break;
723 			case FC_TYPE_GS:
724 				if (sit_set) {
725 					rc = ocs_node_recv_ct_frame(node, seq);
726 				}
727 				break;
728 			default:
729 				break;
730 			}
731 			break;
732 		}
733 	} else {
734 		node_printf(node, "Dropping frame hdr = %08x %08x %08x %08x %08x %08x\n",
735 			    ocs_htobe32(((uint32_t *)hdr)[0]),
736 			    ocs_htobe32(((uint32_t *)hdr)[1]),
737 			    ocs_htobe32(((uint32_t *)hdr)[2]),
738 			    ocs_htobe32(((uint32_t *)hdr)[3]),
739 			    ocs_htobe32(((uint32_t *)hdr)[4]),
740 			    ocs_htobe32(((uint32_t *)hdr)[5]));
741 	}
742 	return rc;
743 }
744 
745 /**
746  * @ingroup unsol
747  * @brief Dispatch unsolicited FCP frames (RQ Pair).
748  *
749  * <h3 class="desc">Description</h3>
750  * Dispatch unsolicited FCP frames (called from the device node state machine).
751  *
752  * @param io Pointer to the IO context.
753  * @param task_management_flags Task management flags from the FCP_CMND frame.
754  * @param node Node that originated the frame.
755  * @param lun 32-bit LUN from FCP_CMND frame.
756  *
757  * @return Returns None.
758  */
759 
760 static void
761 ocs_dispatch_unsolicited_tmf(ocs_io_t *io, uint8_t task_management_flags, ocs_node_t *node, uint64_t lun)
762 {
763 	uint32_t i;
764 	struct {
765 		uint32_t mask;
766 		ocs_scsi_tmf_cmd_e cmd;
767 	} tmflist[] = {
768 		{FCP_QUERY_TASK_SET,		OCS_SCSI_TMF_QUERY_TASK_SET},
769 		{FCP_ABORT_TASK_SET,		OCS_SCSI_TMF_ABORT_TASK_SET},
770 		{FCP_CLEAR_TASK_SET,		OCS_SCSI_TMF_CLEAR_TASK_SET},
771 		{FCP_QUERY_ASYNCHRONOUS_EVENT,	OCS_SCSI_TMF_QUERY_ASYNCHRONOUS_EVENT},
772 		{FCP_LOGICAL_UNIT_RESET,	OCS_SCSI_TMF_LOGICAL_UNIT_RESET},
773 		{FCP_TARGET_RESET,		OCS_SCSI_TMF_TARGET_RESET},
774 		{FCP_CLEAR_ACA,			OCS_SCSI_TMF_CLEAR_ACA}};
775 
776 	io->exp_xfer_len = 0; /* BUG 32235 */
777 
778 	for (i = 0; i < ARRAY_SIZE(tmflist); i ++) {
779 		if (tmflist[i].mask & task_management_flags) {
780 			io->tmf_cmd = tmflist[i].cmd;
781 			ocs_scsi_recv_tmf(io, lun, tmflist[i].cmd, NULL, 0);
782 			break;
783 		}
784 	}
785 	if (i == ARRAY_SIZE(tmflist)) {
786 		/* Not handled */
787 		node_printf(node, "TMF x%x rejected\n", task_management_flags);
788 		ocs_scsi_send_tmf_resp(io, OCS_SCSI_TMF_FUNCTION_REJECTED, NULL, ocs_fc_tmf_rejected_cb, NULL);
789 	}
790 }
791 
792 static int32_t
793 ocs_validate_fcp_cmd(ocs_t *ocs, ocs_hw_sequence_t *seq)
794 {
795 	size_t		exp_payload_len = 0;
796 	fcp_cmnd_iu_t *cmnd = seq->payload->dma.virt;
797 	exp_payload_len = sizeof(fcp_cmnd_iu_t) - 16 + cmnd->additional_fcp_cdb_length;
798 
799 	/*
800 	 * If we received less than FCP_CMND_IU bytes, assume that the frame is
801 	 * corrupted in some way and drop it. This was seen when jamming the FCTL
802 	 * fill bytes field.
803 	 */
804 	if (seq->payload->dma.len < exp_payload_len) {
805 		fc_header_t	*fchdr = seq->header->dma.virt;
806 		ocs_log_debug(ocs, "dropping ox_id %04x with payload length (%zd) less than expected (%zd)\n",
807 			      ocs_be16toh(fchdr->ox_id), seq->payload->dma.len,
808 			      exp_payload_len);
809 		return -1;
810 	}
811 	return 0;
812 
813 }
814 
815 static void
816 ocs_populate_io_fcp_cmd(ocs_io_t *io, fcp_cmnd_iu_t *cmnd, fc_header_t *fchdr, uint8_t sit)
817 {
818 	uint32_t	*fcp_dl;
819 	io->init_task_tag = ocs_be16toh(fchdr->ox_id);
820 	/* note, tgt_task_tag, hw_tag  set when HW io is allocated */
821 	fcp_dl = (uint32_t*)(&(cmnd->fcp_cdb_and_dl));
822 	fcp_dl += cmnd->additional_fcp_cdb_length;
823 	io->exp_xfer_len = ocs_be32toh(*fcp_dl);
824 	io->transferred = 0;
825 
826 	/* The upper 7 bits of CS_CTL is the frame priority thru the SAN.
827 	 * Our assertion here is, the priority given to a frame containing
828 	 * the FCP cmd should be the priority given to ALL frames contained
829 	 * in that IO. Thus we need to save the incoming CS_CTL here.
830 	 */
831 	if (fc_be24toh(fchdr->f_ctl) & FC_FCTL_PRIORITY_ENABLE) {
832 		io->cs_ctl = fchdr->cs_ctl;
833 	} else {
834 		io->cs_ctl = 0;
835 	}
836 	io->seq_init = sit;
837 }
838 
839 static uint32_t
840 ocs_get_flags_fcp_cmd(fcp_cmnd_iu_t *cmnd)
841 {
842 	uint32_t flags = 0;
843 	switch (cmnd->task_attribute) {
844 	case FCP_TASK_ATTR_SIMPLE:
845 		flags |= OCS_SCSI_CMD_SIMPLE;
846 		break;
847 	case FCP_TASK_ATTR_HEAD_OF_QUEUE:
848 		flags |= OCS_SCSI_CMD_HEAD_OF_QUEUE;
849 		break;
850 	case FCP_TASK_ATTR_ORDERED:
851 		flags |= OCS_SCSI_CMD_ORDERED;
852 		break;
853 	case FCP_TASK_ATTR_ACA:
854 		flags |= OCS_SCSI_CMD_ACA;
855 		break;
856 	case FCP_TASK_ATTR_UNTAGGED:
857 		flags |= OCS_SCSI_CMD_UNTAGGED;
858 		break;
859 	}
860 	if (cmnd->wrdata)
861 		flags |= OCS_SCSI_CMD_DIR_IN;
862 	if (cmnd->rddata)
863 		flags |= OCS_SCSI_CMD_DIR_OUT;
864 
865 	return flags;
866 }
867 
868 /**
869  * @ingroup unsol
870  * @brief Dispatch unsolicited FCP_CMND frame.
871  *
872  * <h3 class="desc">Description</h3>
873  * Dispatch unsolicited FCP_CMND frame. RQ Pair mode - always
874  * used for RQ Pair mode since first burst is not supported.
875  *
876  * @param node Node that originated the frame.
877  * @param seq Header/payload sequence buffers.
878  *
879  * @return Returns 0 if frame processed and RX buffers cleaned
880  * up appropriately, -1 if frame not handled and RX buffers need
881  * to be returned.
882  */
883 static int32_t
884 ocs_dispatch_fcp_cmd(ocs_node_t *node, ocs_hw_sequence_t *seq)
885 {
886 	ocs_t *ocs = node->ocs;
887 	fc_header_t	*fchdr = seq->header->dma.virt;
888 	fcp_cmnd_iu_t	*cmnd = NULL;
889 	ocs_io_t	*io = NULL;
890 	fc_vm_header_t 	*vhdr;
891 	uint8_t 	df_ctl;
892 	uint64_t	lun = UINT64_MAX;
893 	int32_t		rc = 0;
894 
895 	ocs_assert(seq->payload, -1);
896 	cmnd = seq->payload->dma.virt;
897 
898 	/* perform FCP_CMND validation check(s) */
899 	if (ocs_validate_fcp_cmd(ocs, seq)) {
900 		return -1;
901 	}
902 
903 	lun = CAM_EXTLUN_BYTE_SWIZZLE(be64dec(cmnd->fcp_lun));
904 	if (lun == UINT64_MAX) {
905 		return -1;
906 	}
907 
908 	io = ocs_scsi_io_alloc(node, OCS_SCSI_IO_ROLE_RESPONDER);
909 	if (io == NULL) {
910 		uint32_t send_frame_capable;
911 
912 		/* If we have SEND_FRAME capability, then use it to send task set full or busy */
913 		rc = ocs_hw_get(&ocs->hw, OCS_HW_SEND_FRAME_CAPABLE, &send_frame_capable);
914 		if ((rc == 0) && send_frame_capable) {
915 			rc = ocs_sframe_send_task_set_full_or_busy(node, seq);
916 			if (rc) {
917 				ocs_log_test(ocs, "ocs_sframe_send_task_set_full_or_busy failed: %d\n", rc);
918 			}
919 			return rc;
920 		}
921 
922 		ocs_log_err(ocs, "IO allocation failed ox_id %04x\n", ocs_be16toh(fchdr->ox_id));
923 		return -1;
924 	}
925 	io->hw_priv = seq->hw_priv;
926 
927 	/* Check if the CMD has vmheader. */
928 	io->app_id = 0;
929 	df_ctl = fchdr->df_ctl;
930 	if (df_ctl & FC_DFCTL_DEVICE_HDR_16_MASK) {
931 		uint32_t vmhdr_offset = 0;
932 		/* Presence of VMID. Get the vm header offset. */
933 		if (df_ctl & FC_DFCTL_ESP_HDR_MASK) {
934 			vmhdr_offset += FC_DFCTL_ESP_HDR_SIZE;
935 			ocs_log_err(ocs, "ESP Header present. Fix ESP Size.\n");
936 		}
937 
938 		if (df_ctl & FC_DFCTL_NETWORK_HDR_MASK) {
939 			vmhdr_offset += FC_DFCTL_NETWORK_HDR_SIZE;
940 		}
941 		vhdr = (fc_vm_header_t *) ((char *)fchdr + sizeof(fc_header_t) + vmhdr_offset);
942 		io->app_id = ocs_be32toh(vhdr->src_vmid);
943 	}
944 
945 	/* RQ pair, if we got here, SIT=1 */
946 	ocs_populate_io_fcp_cmd(io, cmnd, fchdr, TRUE);
947 
948 	if (cmnd->task_management_flags) {
949 		ocs_dispatch_unsolicited_tmf(io, cmnd->task_management_flags, node, lun);
950 	} else {
951 		uint32_t flags = ocs_get_flags_fcp_cmd(cmnd);
952 
953 		/* can return failure for things like task set full and UAs,
954 		 * no need to treat as a dropped frame if rc != 0
955 		 */
956 		ocs_scsi_recv_cmd(io, lun, cmnd->fcp_cdb,
957 				  sizeof(cmnd->fcp_cdb) +
958 				  (cmnd->additional_fcp_cdb_length * sizeof(uint32_t)),
959 				  flags);
960 	}
961 
962 	/* successfully processed, now return RX buffer to the chip */
963 	ocs_hw_sequence_free(&ocs->hw, seq);
964 	return 0;
965 }
966 
967 /**
968  * @ingroup unsol
969  * @brief Dispatch unsolicited FCP_CMND frame (auto xfer rdy).
970  *
971  * <h3 class="desc">Description</h3>
972  * Dispatch unsolicited FCP_CMND frame that is assisted with auto xfer ready.
973  *
974  * @param node Node that originated the frame.
975  * @param seq Header/payload sequence buffers.
976  *
977  * @return Returns 0 if frame processed and RX buffers cleaned
978  * up appropriately, -1 if frame not handled and RX buffers need
979  * to be returned.
980  */
981 static int32_t
982 ocs_dispatch_fcp_cmd_auto_xfer_rdy(ocs_node_t *node, ocs_hw_sequence_t *seq)
983 {
984 	ocs_t *ocs = node->ocs;
985 	fc_header_t	*fchdr = seq->header->dma.virt;
986 	fcp_cmnd_iu_t	*cmnd = NULL;
987 	ocs_io_t	*io = NULL;
988 	uint64_t	lun = UINT64_MAX;
989 	int32_t		rc = 0;
990 
991 	ocs_assert(seq->payload, -1);
992 	cmnd = seq->payload->dma.virt;
993 
994 	/* perform FCP_CMND validation check(s) */
995 	if (ocs_validate_fcp_cmd(ocs, seq)) {
996 		return -1;
997 	}
998 
999 	/* make sure first burst or auto xfer_rdy is enabled */
1000 	if (!seq->auto_xrdy) {
1001 		node_printf(node, "IO is not Auto Xfr Rdy assisted, dropping FCP_CMND\n");
1002 		return -1;
1003 	}
1004 
1005 	lun = CAM_EXTLUN_BYTE_SWIZZLE(be64dec(cmnd->fcp_lun));
1006 
1007 	/* TODO should there be a check here for an error? Why do any of the
1008 	 * below if the LUN decode failed? */
1009 	io = ocs_scsi_io_alloc(node, OCS_SCSI_IO_ROLE_RESPONDER);
1010 	if (io == NULL) {
1011 		uint32_t send_frame_capable;
1012 
1013 		/* If we have SEND_FRAME capability, then use it to send task set full or busy */
1014 		rc = ocs_hw_get(&ocs->hw, OCS_HW_SEND_FRAME_CAPABLE, &send_frame_capable);
1015 		if ((rc == 0) && send_frame_capable) {
1016 			rc = ocs_sframe_send_task_set_full_or_busy(node, seq);
1017 			if (rc) {
1018 				ocs_log_test(ocs, "ocs_sframe_send_task_set_full_or_busy failed: %d\n", rc);
1019 			}
1020 			return rc;
1021 		}
1022 
1023 		ocs_log_err(ocs, "IO allocation failed ox_id %04x\n", ocs_be16toh(fchdr->ox_id));
1024 		return -1;
1025 	}
1026 	io->hw_priv = seq->hw_priv;
1027 
1028 	/* RQ pair, if we got here, SIT=0 */
1029 	ocs_populate_io_fcp_cmd(io, cmnd, fchdr, FALSE);
1030 
1031 	if (cmnd->task_management_flags) {
1032 		/* first burst command better not be a TMF */
1033 		ocs_log_err(ocs, "TMF flags set 0x%x\n", cmnd->task_management_flags);
1034 		ocs_scsi_io_free(io);
1035 		return -1;
1036 	} else {
1037 		uint32_t flags = ocs_get_flags_fcp_cmd(cmnd);
1038 
1039 		/* activate HW IO */
1040 		ocs_hw_io_activate_port_owned(&ocs->hw, seq->hio);
1041 		io->hio = seq->hio;
1042 		seq->hio->ul_io = io;
1043 		io->tgt_task_tag = seq->hio->indicator;
1044 
1045 		/* Note: Data buffers are received in another call */
1046 		ocs_scsi_recv_cmd_first_burst(io, lun, cmnd->fcp_cdb,
1047 					      sizeof(cmnd->fcp_cdb) +
1048 					      (cmnd->additional_fcp_cdb_length * sizeof(uint32_t)),
1049 					      flags, NULL, 0);
1050 	}
1051 
1052 	/* FCP_CMND processed, return RX buffer to the chip */
1053 	ocs_hw_sequence_free(&ocs->hw, seq);
1054 	return 0;
1055 }
1056 
1057 /**
1058  * @ingroup unsol
1059  * @brief Dispatch FCP data frames for auto xfer ready.
1060  *
1061  * <h3 class="desc">Description</h3>
1062  * Dispatch unsolicited FCP data frames (auto xfer ready)
1063  * containing sequence initiative transferred (SIT=1).
1064  *
1065  * @param node Node that originated the frame.
1066  * @param seq Header/payload sequence buffers.
1067  *
1068  * @return Returns 0 if frame processed and RX buffers cleaned
1069  * up appropriately, -1 if frame not handled.
1070  */
1071 
1072 static int32_t
1073 ocs_dispatch_fcp_data(ocs_node_t *node, ocs_hw_sequence_t *seq)
1074 {
1075 	ocs_t *ocs = node->ocs;
1076 	ocs_hw_t *hw = &ocs->hw;
1077 	ocs_hw_io_t *hio = seq->hio;
1078 	ocs_io_t	*io;
1079 	ocs_dma_t fburst[1];
1080 
1081 	ocs_assert(seq->payload, -1);
1082 	ocs_assert(hio, -1);
1083 
1084 	io = hio->ul_io;
1085 	if (io == NULL) {
1086 		ocs_log_err(ocs, "data received for NULL io, xri=0x%x\n",
1087 			    hio->indicator);
1088 		return -1;
1089 	}
1090 
1091 	/*
1092 	 * We only support data completions for auto xfer ready. Make sure
1093 	 * this is a port owned XRI.
1094 	 */
1095 	if (!ocs_hw_is_io_port_owned(hw, seq->hio)) {
1096 		ocs_log_err(ocs, "data received for host owned XRI, xri=0x%x\n",
1097 			    hio->indicator);
1098 		return -1;
1099 	}
1100 
1101 	/* For error statuses, pass the error to the target back end */
1102 	if (seq->status != OCS_HW_UNSOL_SUCCESS) {
1103 		ocs_log_err(ocs, "data with status 0x%x received, xri=0x%x\n",
1104 			    seq->status, hio->indicator);
1105 
1106 		/*
1107 		 * In this case, there is an existing, in-use HW IO that
1108 		 * first may need to be aborted. Then, the backend will be
1109 		 * notified of the error while waiting for the data.
1110 		 */
1111 		ocs_port_owned_abort(ocs, seq->hio);
1112 
1113 		/*
1114 		 * HW IO has already been allocated and is waiting for data.
1115 		 * Need to tell backend that an error has occurred.
1116 		 */
1117 		ocs_scsi_recv_cmd_first_burst(io, 0, NULL, 0, OCS_SCSI_FIRST_BURST_ERR, NULL, 0);
1118 		return -1;
1119 	}
1120 
1121 	/* sequence initiative has been transferred */
1122 	io->seq_init = 1;
1123 
1124 	/* convert the array of pointers to the correct type, to send to backend */
1125 	fburst[0] = seq->payload->dma;
1126 
1127 	/* the amount of first burst data was saved as "acculated sequence length" */
1128 	io->transferred = seq->payload->dma.len;
1129 
1130 	if (ocs_scsi_recv_cmd_first_burst(io, 0, NULL, 0, 0,
1131 					  fburst, io->transferred)) {
1132 		ocs_log_err(ocs, "error passing first burst, xri=0x%x, oxid=0x%x\n",
1133 			    hio->indicator, io->init_task_tag);
1134 	}
1135 
1136 	/* Free the header and all the accumulated payload buffers */
1137 	ocs_hw_sequence_free(&ocs->hw, seq);
1138 	return 0;
1139 }
1140 
1141 
1142 /**
1143  * @ingroup unsol
1144  * @brief Handle the callback for the TMF FUNCTION_REJECTED response.
1145  *
1146  * <h3 class="desc">Description</h3>
1147  * Handle the callback of a send TMF FUNCTION_REJECTED response request.
1148  *
1149  * @param io Pointer to the IO context.
1150  * @param scsi_status Status of the response.
1151  * @param flags Callback flags.
1152  * @param arg Callback argument.
1153  *
1154  * @return Returns 0 on success, or a negative error value on failure.
1155  */
1156 
1157 static int32_t
1158 ocs_fc_tmf_rejected_cb(ocs_io_t *io, ocs_scsi_io_status_e scsi_status, uint32_t flags, void *arg)
1159 {
1160 	ocs_scsi_io_free(io);
1161 	return 0;
1162 }
1163 
1164 /**
1165  * @brief Return next FC frame on node->pend_frames list
1166  *
1167  * The next FC frame on the node->pend_frames list is returned, or NULL
1168  * if the list is empty.
1169  *
1170  * @param pend_list Pending list to be purged.
1171  * @param list_lock Lock that protects pending list.
1172  *
1173  * @return Returns pointer to the next FC frame, or NULL if the pending frame list
1174  * is empty.
1175  */
1176 static ocs_hw_sequence_t *
1177 ocs_frame_next(ocs_list_t *pend_list, ocs_lock_t *list_lock)
1178 {
1179 	ocs_hw_sequence_t *frame = NULL;
1180 
1181 	ocs_lock(list_lock);
1182 		frame = ocs_list_remove_head(pend_list);
1183 	ocs_unlock(list_lock);
1184 	return frame;
1185 }
1186 
1187 /**
1188  * @brief Process send fcp response frame callback
1189  *
1190  * The function is called when the send FCP response posting has completed. Regardless
1191  * of the outcome, the sequence is freed.
1192  *
1193  * @param arg Pointer to originator frame sequence.
1194  * @param cqe Pointer to completion queue entry.
1195  * @param status Status of operation.
1196  *
1197  * @return None.
1198  */
1199 static void
1200 ocs_sframe_common_send_cb(void *arg, uint8_t *cqe, int32_t status)
1201 {
1202 	ocs_hw_send_frame_context_t *ctx = arg;
1203 	ocs_hw_t *hw = ctx->hw;
1204 
1205 	/* Free WQ completion callback */
1206 	ocs_hw_reqtag_free(hw, ctx->wqcb);
1207 
1208 	/* Free sequence */
1209 	ocs_hw_sequence_free(hw, ctx->seq);
1210 }
1211 
1212 /**
1213  * @brief Send a frame, common code
1214  *
1215  * A frame is sent using SEND_FRAME, the R_CTL/F_CTL/TYPE may be specified, the payload is
1216  * sent as a single frame.
1217  *
1218  * Memory resources are allocated from RQ buffers contained in the passed in sequence data.
1219  *
1220  * @param node Pointer to node object.
1221  * @param seq Pointer to sequence object.
1222  * @param r_ctl R_CTL value to place in FC header.
1223  * @param info INFO value to place in FC header.
1224  * @param f_ctl F_CTL value to place in FC header.
1225  * @param type TYPE value to place in FC header.
1226  * @param payload Pointer to payload data
1227  * @param payload_len Length of payload in bytes.
1228  *
1229  * @return Returns 0 on success, or a negative error code value on failure.
1230  */
1231 static int32_t
1232 ocs_sframe_common_send(ocs_node_t *node, ocs_hw_sequence_t *seq, uint8_t r_ctl, uint8_t info, uint32_t f_ctl,
1233 		       uint8_t type, void *payload, uint32_t payload_len)
1234 {
1235 	ocs_t *ocs = node->ocs;
1236 	ocs_hw_t *hw = &ocs->hw;
1237 	ocs_hw_rtn_e rc = 0;
1238 	fc_header_t *behdr = seq->header->dma.virt;
1239 	fc_header_le_t hdr;
1240 	uint32_t s_id = fc_be24toh(behdr->s_id);
1241 	uint32_t d_id = fc_be24toh(behdr->d_id);
1242 	uint16_t ox_id = ocs_be16toh(behdr->ox_id);
1243 	uint16_t rx_id = ocs_be16toh(behdr->rx_id);
1244 	ocs_hw_send_frame_context_t *ctx;
1245 
1246 	uint32_t heap_size = seq->payload->dma.size;
1247 	uintptr_t heap_phys_base = seq->payload->dma.phys;
1248 	uint8_t *heap_virt_base = seq->payload->dma.virt;
1249 	uint32_t heap_offset = 0;
1250 
1251 	/* Build the FC header reusing the RQ header DMA buffer */
1252 	ocs_memset(&hdr, 0, sizeof(hdr));
1253 	hdr.d_id = s_id;			/* send it back to whomever sent it to us */
1254 	hdr.r_ctl = r_ctl;
1255 	hdr.info = info;
1256 	hdr.s_id = d_id;
1257 	hdr.cs_ctl = 0;
1258 	hdr.f_ctl = f_ctl;
1259 	hdr.type = type;
1260 	hdr.seq_cnt = 0;
1261 	hdr.df_ctl = 0;
1262 
1263 	/*
1264 	 * send_frame_seq_id is an atomic, we just let it increment, while storing only
1265 	 * the low 8 bits to hdr->seq_id
1266 	 */
1267 	hdr.seq_id = (uint8_t) ocs_atomic_add_return(&hw->send_frame_seq_id, 1);
1268 
1269 	hdr.rx_id = rx_id;
1270 	hdr.ox_id = ox_id;
1271 	hdr.parameter = 0;
1272 
1273 	/* Allocate and fill in the send frame request context */
1274 	ctx = (void*)(heap_virt_base + heap_offset);
1275 	heap_offset += sizeof(*ctx);
1276 	ocs_assert(heap_offset < heap_size, -1);
1277 	ocs_memset(ctx, 0, sizeof(*ctx));
1278 
1279 	/* Save sequence */
1280 	ctx->seq = seq;
1281 
1282 	/* Allocate a response payload DMA buffer from the heap */
1283 	ctx->payload.phys = heap_phys_base + heap_offset;
1284 	ctx->payload.virt = heap_virt_base + heap_offset;
1285 	ctx->payload.size = payload_len;
1286 	ctx->payload.len = payload_len;
1287 	heap_offset += payload_len;
1288 	ocs_assert(heap_offset <= heap_size, -1);
1289 
1290 	/* Copy the payload in */
1291 	ocs_memcpy(ctx->payload.virt, payload, payload_len);
1292 
1293 	/* Send */
1294 	rc = ocs_hw_send_frame(&ocs->hw, (void*)&hdr, FC_SOFI3, FC_EOFT, &ctx->payload, ctx,
1295 				ocs_sframe_common_send_cb, ctx);
1296 	if (rc) {
1297 		ocs_log_test(ocs, "ocs_hw_send_frame failed: %d\n", rc);
1298 	}
1299 
1300 	return rc ? -1 : 0;
1301 }
1302 
1303 /**
1304  * @brief Send FCP response using SEND_FRAME
1305  *
1306  * The FCP response is send using the SEND_FRAME function.
1307  *
1308  * @param node Pointer to node object.
1309  * @param seq Pointer to inbound sequence.
1310  * @param rsp Pointer to response data.
1311  * @param rsp_len Length of response data, in bytes.
1312  *
1313  * @return Returns 0 on success, or a negative error code value on failure.
1314  */
1315 static int32_t
1316 ocs_sframe_send_fcp_rsp(ocs_node_t *node, ocs_hw_sequence_t *seq, void *rsp, uint32_t rsp_len)
1317 {
1318 	return ocs_sframe_common_send(node, seq,
1319 				      FC_RCTL_FC4_DATA,
1320 				      FC_RCTL_INFO_CMD_STATUS,
1321 				      FC_FCTL_EXCHANGE_RESPONDER |
1322 					      FC_FCTL_LAST_SEQUENCE |
1323 					      FC_FCTL_END_SEQUENCE |
1324 					      FC_FCTL_SEQUENCE_INITIATIVE,
1325 				      FC_TYPE_FCP,
1326 				      rsp, rsp_len);
1327 }
1328 
1329 /**
1330  * @brief Send task set full response
1331  *
1332  * Return a task set full or busy response using send frame.
1333  *
1334  * @param node Pointer to node object.
1335  * @param seq Pointer to originator frame sequence.
1336  *
1337  * @return Returns 0 on success, or a negative error code value on failure.
1338  */
1339 static int32_t
1340 ocs_sframe_send_task_set_full_or_busy(ocs_node_t *node, ocs_hw_sequence_t *seq)
1341 {
1342 	fcp_rsp_iu_t fcprsp;
1343 	fcp_cmnd_iu_t *fcpcmd = seq->payload->dma.virt;
1344 	uint32_t *fcp_dl_ptr;
1345 	uint32_t fcp_dl;
1346 	int32_t rc = 0;
1347 
1348 	/* extract FCP_DL from FCP command*/
1349 	fcp_dl_ptr = (uint32_t*)(&(fcpcmd->fcp_cdb_and_dl));
1350 	fcp_dl_ptr += fcpcmd->additional_fcp_cdb_length;
1351 	fcp_dl = ocs_be32toh(*fcp_dl_ptr);
1352 
1353 	/* construct task set full or busy response */
1354 	ocs_memset(&fcprsp, 0, sizeof(fcprsp));
1355 	ocs_lock(&node->active_ios_lock);
1356 		fcprsp.scsi_status = ocs_list_empty(&node->active_ios) ? SCSI_STATUS_BUSY : SCSI_STATUS_TASK_SET_FULL;
1357 	ocs_unlock(&node->active_ios_lock);
1358 	*((uint32_t*)&fcprsp.fcp_resid) = fcp_dl;
1359 
1360 	/* send it using send_frame */
1361 	rc = ocs_sframe_send_fcp_rsp(node, seq, &fcprsp, sizeof(fcprsp) - sizeof(fcprsp.data));
1362 	if (rc) {
1363 		ocs_log_test(node->ocs, "ocs_sframe_send_fcp_rsp failed: %d\n", rc);
1364 	}
1365 	return rc;
1366 }
1367 
1368 /**
1369  * @brief Send BA_ACC using sent frame
1370  *
1371  * A BA_ACC is sent using SEND_FRAME
1372  *
1373  * @param node Pointer to node object.
1374  * @param seq Pointer to originator frame sequence.
1375  *
1376  * @return Returns 0 on success, or a negative error code value on failure.
1377  */
1378 int32_t
1379 ocs_sframe_send_bls_acc(ocs_node_t *node,  ocs_hw_sequence_t *seq)
1380 {
1381 	fc_header_t *behdr = seq->header->dma.virt;
1382 	uint16_t ox_id = ocs_be16toh(behdr->ox_id);
1383 	uint16_t rx_id = ocs_be16toh(behdr->rx_id);
1384 	fc_ba_acc_payload_t acc = {0};
1385 
1386 	acc.ox_id = ocs_htobe16(ox_id);
1387 	acc.rx_id = ocs_htobe16(rx_id);
1388 	acc.low_seq_cnt = UINT16_MAX;
1389 	acc.high_seq_cnt = UINT16_MAX;
1390 
1391 	return ocs_sframe_common_send(node, seq,
1392 				      FC_RCTL_BLS,
1393 				      FC_RCTL_INFO_UNSOL_DATA,
1394 				      FC_FCTL_EXCHANGE_RESPONDER |
1395 					      FC_FCTL_LAST_SEQUENCE |
1396 					      FC_FCTL_END_SEQUENCE,
1397 				      FC_TYPE_BASIC_LINK,
1398 				      &acc, sizeof(acc));
1399 }
1400