xref: /freebsd/sys/dev/ocs_fc/ocs_unsol.c (revision d4eeb02986980bf33dd56c41ceb9fc5f180c0d47)
1 /*-
2  * Copyright (c) 2017 Broadcom. All rights reserved.
3  * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice,
9  *    this list of conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright notice,
12  *    this list of conditions and the following disclaimer in the documentation
13  *    and/or other materials provided with the distribution.
14  *
15  * 3. Neither the name of the copyright holder nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  *
31  * $FreeBSD$
32  */
33 
34 /**
35  * @file
36  * Code to handle unsolicited received FC frames.
37  */
38 
39 /*!
40  * @defgroup unsol Unsolicited Frame Handling
41  */
42 
43 #include "ocs.h"
44 #include "ocs_els.h"
45 #include "ocs_fabric.h"
46 #include "ocs_device.h"
47 
48 #define frame_printf(ocs, hdr, fmt, ...) \
49 	do { \
50 		char s_id_text[16]; \
51 		ocs_node_fcid_display(fc_be24toh((hdr)->s_id), s_id_text, sizeof(s_id_text)); \
52 		ocs_log_debug(ocs, "[%06x.%s] %02x/%04x/%04x: " fmt, fc_be24toh((hdr)->d_id), s_id_text, \
53 			(hdr)->r_ctl, ocs_be16toh((hdr)->ox_id), ocs_be16toh((hdr)->rx_id), ##__VA_ARGS__); \
54 	} while(0)
55 
56 static int32_t ocs_unsol_process(ocs_t *ocs, ocs_hw_sequence_t *seq);
57 static int32_t ocs_dispatch_fcp_cmd(ocs_node_t *node, ocs_hw_sequence_t *seq);
58 static int32_t ocs_dispatch_fcp_cmd_auto_xfer_rdy(ocs_node_t *node, ocs_hw_sequence_t *seq);
59 static int32_t ocs_dispatch_fcp_data(ocs_node_t *node, ocs_hw_sequence_t *seq);
60 static int32_t ocs_domain_dispatch_frame(void *arg, ocs_hw_sequence_t *seq);
61 static int32_t ocs_node_dispatch_frame(void *arg, ocs_hw_sequence_t *seq);
62 static int32_t ocs_fc_tmf_rejected_cb(ocs_io_t *io, ocs_scsi_io_status_e scsi_status, uint32_t flags, void *arg);
63 static ocs_hw_sequence_t *ocs_frame_next(ocs_list_t *pend_list, ocs_lock_t *list_lock);
64 static uint8_t ocs_node_frames_held(void *arg);
65 static uint8_t ocs_domain_frames_held(void *arg);
66 static int32_t ocs_purge_pending(ocs_t *ocs, ocs_list_t *pend_list, ocs_lock_t *list_lock);
67 static int32_t ocs_sframe_send_task_set_full_or_busy(ocs_node_t *node, ocs_hw_sequence_t *seq);
68 
69 #define OCS_MAX_FRAMES_BEFORE_YEILDING 10000
70 
71 /**
72  * @brief Process the RQ circular buffer and process the incoming frames.
73  *
74  * @param mythread Pointer to thread object.
75  *
76  * @return Returns 0 on success, or a non-zero value on failure.
77  */
78 int32_t
79 ocs_unsol_rq_thread(ocs_thread_t *mythread)
80 {
81 	ocs_xport_rq_thread_info_t *thread_data = mythread->arg;
82 	ocs_t *ocs = thread_data->ocs;
83 	ocs_hw_sequence_t *seq;
84 	uint32_t yield_count = OCS_MAX_FRAMES_BEFORE_YEILDING;
85 
86 	ocs_log_debug(ocs, "%s running\n", mythread->name);
87 	while (!ocs_thread_terminate_requested(mythread)) {
88 		seq = ocs_cbuf_get(thread_data->seq_cbuf, 100000);
89 		if (seq == NULL) {
90 			/* Prevent soft lockups by yielding the CPU */
91 			ocs_thread_yield(&thread_data->thread);
92 			yield_count = OCS_MAX_FRAMES_BEFORE_YEILDING;
93 			continue;
94 		}
95 		/* Note: Always returns 0 */
96 		ocs_unsol_process((ocs_t*)seq->hw->os, seq);
97 
98 		/* We have to prevent CPU soft lockups, so just yield the CPU after x frames. */
99 		if (--yield_count == 0) {
100 			ocs_thread_yield(&thread_data->thread);
101 			yield_count = OCS_MAX_FRAMES_BEFORE_YEILDING;
102 		}
103 	}
104 	ocs_log_debug(ocs, "%s exiting\n", mythread->name);
105 	thread_data->thread_started = FALSE;
106 	return 0;
107 }
108 
109 /**
110  * @ingroup unsol
111  * @brief Callback function when aborting a port owned XRI
112  * exchanges.
113  *
114  * @return Returns 0.
115  */
116 static int32_t
117 ocs_unsol_abort_cb (ocs_hw_io_t *hio, ocs_remote_node_t *rnode, uint32_t len, int32_t status, uint32_t ext, void *arg)
118 {
119 	ocs_t *ocs = arg;
120 	ocs_assert(hio, -1);
121 	ocs_assert(arg, -1);
122 	ocs_log_debug(ocs, "xri=0x%x tag=0x%x\n", hio->indicator, hio->reqtag);
123 	ocs_hw_io_free(&ocs->hw, hio);
124 	return 0;
125 }
126 
127 /**
128  * @ingroup unsol
129  * @brief Abort either a RQ Pair auto XFER RDY XRI.
130  * @return Returns None.
131  */
132 static void
133 ocs_port_owned_abort(ocs_t *ocs, ocs_hw_io_t *hio)
134 {
135 	ocs_hw_rtn_e hw_rc;
136 	hw_rc = ocs_hw_io_abort(&ocs->hw, hio, FALSE,
137 				  ocs_unsol_abort_cb, ocs);
138 	if((hw_rc == OCS_HW_RTN_IO_ABORT_IN_PROGRESS) ||
139 	   (hw_rc == OCS_HW_RTN_IO_PORT_OWNED_ALREADY_ABORTED)) {
140 		ocs_log_debug(ocs, "already aborted XRI 0x%x\n", hio->indicator);
141 	} else if(hw_rc != OCS_HW_RTN_SUCCESS) {
142 		ocs_log_debug(ocs, "Error aborting XRI 0x%x status %d\n",
143 			      hio->indicator, hw_rc);
144 	}
145 }
146 
147 /**
148  * @ingroup unsol
149  * @brief Handle unsolicited FC frames.
150  *
151  * <h3 class="desc">Description</h3>
152  * This function is called from the HW with unsolicited FC frames (FCP, ELS, BLS, etc.).
153  *
154  * @param arg Application-specified callback data.
155  * @param seq Header/payload sequence buffers.
156  *
157  * @return Returns 0 on success; or a negative error value on failure.
158  */
159 
160 int32_t
161 ocs_unsolicited_cb(void *arg, ocs_hw_sequence_t *seq)
162 {
163 	ocs_t *ocs = arg;
164 	ocs_xport_t *xport = ocs->xport;
165 	int32_t rc;
166 
167 	CPUTRACE("");
168 
169 	if (ocs->rq_threads == 0) {
170 		rc = ocs_unsol_process(ocs, seq);
171 	} else {
172 		/* use the ox_id to dispatch this IO to a thread */
173 		fc_header_t *hdr = seq->header->dma.virt;
174 		uint32_t ox_id =  ocs_be16toh(hdr->ox_id);
175 		uint32_t thr_index = ox_id % ocs->rq_threads;
176 
177 		rc = ocs_cbuf_put(xport->rq_thread_info[thr_index].seq_cbuf, seq);
178 	}
179 
180 	if (rc) {
181 		ocs_hw_sequence_free(&ocs->hw, seq);
182 	}
183 
184 	return 0;
185 }
186 
187 /**
188  * @ingroup unsol
189  * @brief Handle unsolicited FC frames.
190  *
191  * <h3 class="desc">Description</h3>
192  * This function is called either from ocs_unsolicited_cb() or ocs_unsol_rq_thread().
193  *
194  * @param ocs Pointer to the ocs structure.
195  * @param seq Header/payload sequence buffers.
196  *
197  * @return Returns 0 on success, or a negative error value on failure.
198  */
199 static int32_t
200 ocs_unsol_process(ocs_t *ocs, ocs_hw_sequence_t *seq)
201 {
202 	ocs_xport_fcfi_t *xport_fcfi = NULL;
203 	ocs_domain_t *domain;
204 	uint8_t seq_fcfi = seq->fcfi;
205 
206 	/* HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB */
207 	if (ocs->hw.workaround.override_fcfi) {
208 		if (ocs->hw.first_domain_idx > -1) {
209 			seq_fcfi = ocs->hw.first_domain_idx;
210 		}
211 	}
212 
213 	/* Range check seq->fcfi */
214 	if (seq_fcfi < ARRAY_SIZE(ocs->xport->fcfi)) {
215 		xport_fcfi = &ocs->xport->fcfi[seq_fcfi];
216 	}
217 
218 	/* If the transport FCFI entry is NULL, then drop the frame */
219 	if (xport_fcfi == NULL) {
220 		ocs_log_test(ocs, "FCFI %d is not valid, dropping frame\n", seq->fcfi);
221 		if (seq->hio != NULL) {
222 			ocs_port_owned_abort(ocs, seq->hio);
223 		}
224 
225 		ocs_hw_sequence_free(&ocs->hw, seq);
226 		return 0;
227 	}
228 	domain = ocs_hw_domain_get(&ocs->hw, seq_fcfi);
229 
230 	/*
231 	 * If we are holding frames or the domain is not yet registered or
232 	 * there's already frames on the pending list,
233 	 * then add the new frame to pending list
234 	 */
235 	if (domain == NULL ||
236 	    xport_fcfi->hold_frames ||
237 	    !ocs_list_empty(&xport_fcfi->pend_frames)) {
238 		ocs_lock(&xport_fcfi->pend_frames_lock);
239 			ocs_list_add_tail(&xport_fcfi->pend_frames, seq);
240 		ocs_unlock(&xport_fcfi->pend_frames_lock);
241 
242 		if (domain != NULL) {
243 			/* immediately process pending frames */
244 			ocs_domain_process_pending(domain);
245 		}
246 	} else {
247 		/*
248 		 * We are not holding frames and pending list is empty, just process frame.
249 		 * A non-zero return means the frame was not handled - so cleanup
250 		 */
251 		if (ocs_domain_dispatch_frame(domain, seq)) {
252 			if (seq->hio != NULL) {
253 				ocs_port_owned_abort(ocs, seq->hio);
254 			}
255 			ocs_hw_sequence_free(&ocs->hw, seq);
256 		}
257 	}
258 	return 0;
259 }
260 
261 /**
262  * @ingroup unsol
263  * @brief Process pending frames queued to the given node.
264  *
265  * <h3 class="desc">Description</h3>
266  * Frames that are queued for the \c node are dispatched and returned
267  * to the RQ.
268  *
269  * @param node Node of the queued frames that are to be dispatched.
270  *
271  * @return Returns 0 on success, or a negative error value on failure.
272  */
273 
274 int32_t
275 ocs_process_node_pending(ocs_node_t *node)
276 {
277 	ocs_t *ocs = node->ocs;
278 	ocs_hw_sequence_t *seq = NULL;
279 	uint32_t pend_frames_processed = 0;
280 
281 	for (;;) {
282 		/* need to check for hold frames condition after each frame processed
283 		 * because any given frame could cause a transition to a state that
284 		 * holds frames
285 		 */
286 		if (ocs_node_frames_held(node)) {
287 			break;
288 		}
289 
290 		/* Get next frame/sequence */
291 		ocs_lock(&node->pend_frames_lock);
292 			seq = ocs_list_remove_head(&node->pend_frames);
293 			if (seq == NULL) {
294 				pend_frames_processed = node->pend_frames_processed;
295 				node->pend_frames_processed = 0;
296 				ocs_unlock(&node->pend_frames_lock);
297 				break;
298 			}
299 			node->pend_frames_processed++;
300 		ocs_unlock(&node->pend_frames_lock);
301 
302 		/* now dispatch frame(s) to dispatch function */
303 		if (ocs_node_dispatch_frame(node, seq)) {
304 			if (seq->hio != NULL) {
305 				ocs_port_owned_abort(ocs, seq->hio);
306 			}
307 			ocs_hw_sequence_free(&ocs->hw, seq);
308 		}
309 	}
310 
311 	if (pend_frames_processed != 0) {
312 		ocs_log_debug(ocs, "%u node frames held and processed\n", pend_frames_processed);
313 	}
314 
315 	return 0;
316 }
317 
318 /**
319  * @ingroup unsol
320  * @brief Process pending frames queued to the given domain.
321  *
322  * <h3 class="desc">Description</h3>
323  * Frames that are queued for the \c domain are dispatched and
324  * returned to the RQ.
325  *
326  * @param domain Domain of the queued frames that are to be
327  *		 dispatched.
328  *
329  * @return Returns 0 on success, or a negative error value on failure.
330  */
331 
332 int32_t
333 ocs_domain_process_pending(ocs_domain_t *domain)
334 {
335 	ocs_t *ocs = domain->ocs;
336 	ocs_xport_fcfi_t *xport_fcfi;
337 	ocs_hw_sequence_t *seq = NULL;
338 	uint32_t pend_frames_processed = 0;
339 
340 	ocs_assert(domain->fcf_indicator < SLI4_MAX_FCFI, -1);
341 	xport_fcfi = &ocs->xport->fcfi[domain->fcf_indicator];
342 
343 	for (;;) {
344 		/* need to check for hold frames condition after each frame processed
345 		 * because any given frame could cause a transition to a state that
346 		 * holds frames
347 		 */
348 		if (ocs_domain_frames_held(domain)) {
349 			break;
350 		}
351 
352 		/* Get next frame/sequence */
353 		ocs_lock(&xport_fcfi->pend_frames_lock);
354 			seq = ocs_list_remove_head(&xport_fcfi->pend_frames);
355 			if (seq == NULL) {
356 				pend_frames_processed = xport_fcfi->pend_frames_processed;
357 				xport_fcfi->pend_frames_processed = 0;
358 				ocs_unlock(&xport_fcfi->pend_frames_lock);
359 				break;
360 			}
361 			xport_fcfi->pend_frames_processed++;
362 		ocs_unlock(&xport_fcfi->pend_frames_lock);
363 
364 		/* now dispatch frame(s) to dispatch function */
365 		if (ocs_domain_dispatch_frame(domain, seq)) {
366 			if (seq->hio != NULL) {
367 				ocs_port_owned_abort(ocs, seq->hio);
368 			}
369 			ocs_hw_sequence_free(&ocs->hw, seq);
370 		}
371 	}
372 	if (pend_frames_processed != 0) {
373 		ocs_log_debug(ocs, "%u domain frames held and processed\n", pend_frames_processed);
374 	}
375 	return 0;
376 }
377 
378 /**
379  * @ingroup unsol
380  * @brief Purge given pending list
381  *
382  * <h3 class="desc">Description</h3>
383  * Frames that are queued on the given pending list are
384  * discarded and returned to the RQ.
385  *
386  * @param ocs Pointer to ocs object.
387  * @param pend_list Pending list to be purged.
388  * @param list_lock Lock that protects pending list.
389  *
390  * @return Returns 0 on success, or a negative error value on failure.
391  */
392 
393 static int32_t
394 ocs_purge_pending(ocs_t *ocs, ocs_list_t *pend_list, ocs_lock_t *list_lock)
395 {
396 	ocs_hw_sequence_t *frame;
397 
398 	for (;;) {
399 		frame = ocs_frame_next(pend_list, list_lock);
400 		if (frame == NULL) {
401 			break;
402 		}
403 
404 		frame_printf(ocs, (fc_header_t*) frame->header->dma.virt, "Discarding held frame\n");
405 		if (frame->hio != NULL) {
406 			ocs_port_owned_abort(ocs, frame->hio);
407 		}
408 		ocs_hw_sequence_free(&ocs->hw, frame);
409 	}
410 
411 	return 0;
412 }
413 
414 /**
415  * @ingroup unsol
416  * @brief Purge node's pending (queued) frames.
417  *
418  * <h3 class="desc">Description</h3>
419  * Frames that are queued for the \c node are discarded and returned
420  * to the RQ.
421  *
422  * @param node Node of the queued frames that are to be discarded.
423  *
424  * @return Returns 0 on success, or a negative error value on failure.
425  */
426 
427 int32_t
428 ocs_node_purge_pending(ocs_node_t *node)
429 {
430 	return ocs_purge_pending(node->ocs, &node->pend_frames, &node->pend_frames_lock);
431 }
432 
433 /**
434  * @ingroup unsol
435  * @brief Purge xport's pending (queued) frames.
436  *
437  * <h3 class="desc">Description</h3>
438  * Frames that are queued for the \c xport are discarded and
439  * returned to the RQ.
440  *
441  * @param domain Pointer to domain object.
442  *
443  * @return Returns 0 on success; or a negative error value on failure.
444  */
445 
446 int32_t
447 ocs_domain_purge_pending(ocs_domain_t *domain)
448 {
449 	ocs_t *ocs = domain->ocs;
450 	ocs_xport_fcfi_t *xport_fcfi;
451 
452 	ocs_assert(domain->fcf_indicator < SLI4_MAX_FCFI, -1);
453 	xport_fcfi = &ocs->xport->fcfi[domain->fcf_indicator];
454 	return ocs_purge_pending(domain->ocs,
455 				 &xport_fcfi->pend_frames,
456 				 &xport_fcfi->pend_frames_lock);
457 }
458 
459 /**
460  * @ingroup unsol
461  * @brief Check if node's pending frames are held.
462  *
463  * @param arg Node for which the pending frame hold condition is
464  * checked.
465  *
466  * @return Returns 1 if node is holding pending frames, or 0
467  * if not.
468  */
469 
470 static uint8_t
471 ocs_node_frames_held(void *arg)
472 {
473 	ocs_node_t *node = (ocs_node_t *)arg;
474 	return node->hold_frames;
475 }
476 
477 /**
478  * @ingroup unsol
479  * @brief Check if domain's pending frames are held.
480  *
481  * @param arg Domain for which the pending frame hold condition is
482  * checked.
483  *
484  * @return Returns 1 if domain is holding pending frames, or 0
485  * if not.
486  */
487 
488 static uint8_t
489 ocs_domain_frames_held(void *arg)
490 {
491 	ocs_domain_t *domain = (ocs_domain_t *)arg;
492 	ocs_t *ocs = domain->ocs;
493 	ocs_xport_fcfi_t *xport_fcfi;
494 
495 	ocs_assert(domain != NULL, 1);
496 	ocs_assert(domain->fcf_indicator < SLI4_MAX_FCFI, 1);
497 	xport_fcfi = &ocs->xport->fcfi[domain->fcf_indicator];
498 	return xport_fcfi->hold_frames;
499 }
500 
501 /**
502  * @ingroup unsol
503  * @brief Globally (at xport level) hold unsolicited frames.
504  *
505  * <h3 class="desc">Description</h3>
506  * This function places a hold on processing unsolicited FC
507  * frames queued to the xport pending list.
508  *
509  * @param domain Pointer to domain object.
510  *
511  * @return Returns None.
512  */
513 
514 void
515 ocs_domain_hold_frames(ocs_domain_t *domain)
516 {
517 	ocs_t *ocs = domain->ocs;
518 	ocs_xport_fcfi_t *xport_fcfi;
519 
520 	ocs_assert(domain->fcf_indicator < SLI4_MAX_FCFI);
521 	xport_fcfi = &ocs->xport->fcfi[domain->fcf_indicator];
522 	if (!xport_fcfi->hold_frames) {
523 		ocs_log_debug(domain->ocs, "hold frames set for FCFI %d\n",
524 			      domain->fcf_indicator);
525 		xport_fcfi->hold_frames = 1;
526 	}
527 }
528 
529 /**
530  * @ingroup unsol
531  * @brief Clear hold on unsolicited frames.
532  *
533  * <h3 class="desc">Description</h3>
534  * This function clears the hold on processing unsolicited FC
535  * frames queued to the domain pending list.
536  *
537  * @param domain Pointer to domain object.
538  *
539  * @return Returns None.
540  */
541 
542 void
543 ocs_domain_accept_frames(ocs_domain_t *domain)
544 {
545 	ocs_t *ocs = domain->ocs;
546 	ocs_xport_fcfi_t *xport_fcfi;
547 
548 	ocs_assert(domain->fcf_indicator < SLI4_MAX_FCFI);
549 	xport_fcfi = &ocs->xport->fcfi[domain->fcf_indicator];
550 	if (xport_fcfi->hold_frames == 1) {
551 		ocs_log_debug(domain->ocs, "hold frames cleared for FCFI %d\n",
552 			      domain->fcf_indicator);
553 	}
554 	xport_fcfi->hold_frames = 0;
555 	ocs_domain_process_pending(domain);
556 }
557 
558 /**
559  * @ingroup unsol
560  * @brief Dispatch unsolicited FC frame.
561  *
562  * <h3 class="desc">Description</h3>
563  * This function processes an unsolicited FC frame queued at the
564  * domain level.
565  *
566  * @param arg Pointer to ocs object.
567  * @param seq Header/payload sequence buffers.
568  *
569  * @return Returns 0 if frame processed and RX buffers cleaned
570  * up appropriately, -1 if frame not handled.
571  */
572 
573 static __inline int32_t
574 ocs_domain_dispatch_frame(void *arg, ocs_hw_sequence_t *seq)
575 {
576 	ocs_domain_t *domain = (ocs_domain_t *)arg;
577 	ocs_t *ocs = domain->ocs;
578 	fc_header_t *hdr;
579 	uint32_t s_id;
580 	uint32_t d_id;
581 	ocs_node_t *node = NULL;
582 	ocs_sport_t *sport = NULL;
583 
584 	ocs_assert(seq->header, -1);
585 	ocs_assert(seq->header->dma.virt, -1);
586 	ocs_assert(seq->payload->dma.virt, -1);
587 
588 	hdr = seq->header->dma.virt;
589 
590 	/* extract the s_id and d_id */
591 	s_id = fc_be24toh(hdr->s_id);
592 	d_id = fc_be24toh(hdr->d_id);
593 
594 	sport = domain->sport;
595 	if (sport == NULL) {
596 		frame_printf(ocs, hdr, "phy sport for FC ID 0x%06x is NULL, dropping frame\n", d_id);
597 		return -1;
598 	}
599 
600 	if (sport->fc_id != d_id) {
601 		/* Not a physical port IO lookup sport associated with the npiv port */
602 		sport = ocs_sport_find(domain, d_id); /* Look up without lock */
603 		if (sport == NULL) {
604 			if (hdr->type == FC_TYPE_FCP) {
605 				/* Drop frame */
606 				ocs_log_warn(ocs, "unsolicited FCP frame with invalid d_id x%x, dropping\n",
607 					     d_id);
608 				return -1;
609 			} else {
610 				/* p2p will use this case */
611 				sport = domain->sport;
612 			}
613 		}
614 	}
615 
616 	/* Lookup the node given the remote s_id */
617 	node = ocs_node_find(sport, s_id);
618 
619 	/* If not found, then create a new node */
620 	if (node == NULL) {
621 		/* If this is solicited data or control based on R_CTL and there is no node context,
622 		 * then we can drop the frame
623 		 */
624 		if ((hdr->r_ctl == FC_RCTL_FC4_DATA) && (
625 		    (hdr->info == FC_RCTL_INFO_SOL_DATA) || (hdr->info == FC_RCTL_INFO_SOL_CTRL))) {
626 			ocs_log_debug(ocs, "solicited data/ctrl frame without node, dropping\n");
627 			return -1;
628 		}
629 		node = ocs_node_alloc(sport, s_id, FALSE, FALSE);
630 		if (node == NULL) {
631 			ocs_log_err(ocs, "ocs_node_alloc() failed\n");
632 			return -1;
633 		}
634 		/* don't send PLOGI on ocs_d_init entry */
635 		ocs_node_init_device(node, FALSE);
636 	}
637 
638 	if (node->hold_frames || !ocs_list_empty((&node->pend_frames))) {
639 		/* TODO: info log level
640 		frame_printf(ocs, hdr, "Holding frame\n");
641 		*/
642 		/* add frame to node's pending list */
643 		ocs_lock(&node->pend_frames_lock);
644 			ocs_list_add_tail(&node->pend_frames, seq);
645 		ocs_unlock(&node->pend_frames_lock);
646 
647 		return 0;
648 	}
649 
650 	/* now dispatch frame to the node frame handler */
651 	return ocs_node_dispatch_frame(node, seq);
652 }
653 
654 /**
655  * @ingroup unsol
656  * @brief Dispatch a frame.
657  *
658  * <h3 class="desc">Description</h3>
659  * A frame is dispatched from the \c node to the handler.
660  *
661  * @param arg Node that originated the frame.
662  * @param seq Header/payload sequence buffers.
663  *
664  * @return Returns 0 if frame processed and RX buffers cleaned
665  * up appropriately, -1 if frame not handled.
666  */
667 static int32_t
668 ocs_node_dispatch_frame(void *arg, ocs_hw_sequence_t *seq)
669 {
670 
671 	fc_header_t *hdr = seq->header->dma.virt;
672 	uint32_t port_id;
673 	ocs_node_t *node = (ocs_node_t *)arg;
674 	int32_t rc = -1;
675 	int32_t sit_set = 0;
676 
677 	port_id = fc_be24toh(hdr->s_id);
678 	ocs_assert(port_id == node->rnode.fc_id, -1);
679 
680 	if (fc_be24toh(hdr->f_ctl) & FC_FCTL_END_SEQUENCE) {
681 		/*if SIT is set */
682 		if (fc_be24toh(hdr->f_ctl) & FC_FCTL_SEQUENCE_INITIATIVE) {
683 			sit_set = 1;
684 		}
685 		switch (hdr->r_ctl) {
686 		case FC_RCTL_ELS:
687 			if (sit_set) {
688 				rc = ocs_node_recv_els_frame(node, seq);
689 			}
690 			break;
691 
692 		case FC_RCTL_BLS:
693 			if ((sit_set) && (hdr->info == FC_INFO_ABTS)) {
694 				rc = ocs_node_recv_abts_frame(node, seq);
695 			}else {
696 				rc = ocs_node_recv_bls_no_sit(node, seq);
697 			}
698 			break;
699 
700 		case FC_RCTL_FC4_DATA:
701 			switch(hdr->type) {
702 			case FC_TYPE_FCP:
703 				if (hdr->info == FC_RCTL_INFO_UNSOL_CMD) {
704 					if (node->fcp_enabled) {
705 						if (sit_set) {
706 							rc = ocs_dispatch_fcp_cmd(node, seq);
707 						}else {
708 							/* send the auto xfer ready command */
709 							rc = ocs_dispatch_fcp_cmd_auto_xfer_rdy(node, seq);
710 						}
711 					} else {
712 						rc = ocs_node_recv_fcp_cmd(node, seq);
713 					}
714 				} else if (hdr->info == FC_RCTL_INFO_SOL_DATA) {
715 					if (sit_set) {
716 						rc = ocs_dispatch_fcp_data(node, seq);
717 					}
718 				}
719 				break;
720 			case FC_TYPE_GS:
721 				if (sit_set) {
722 					rc = ocs_node_recv_ct_frame(node, seq);
723 				}
724 				break;
725 			default:
726 				break;
727 			}
728 			break;
729 		}
730 	} else {
731 		node_printf(node, "Dropping frame hdr = %08x %08x %08x %08x %08x %08x\n",
732 			    ocs_htobe32(((uint32_t *)hdr)[0]),
733 			    ocs_htobe32(((uint32_t *)hdr)[1]),
734 			    ocs_htobe32(((uint32_t *)hdr)[2]),
735 			    ocs_htobe32(((uint32_t *)hdr)[3]),
736 			    ocs_htobe32(((uint32_t *)hdr)[4]),
737 			    ocs_htobe32(((uint32_t *)hdr)[5]));
738 	}
739 	return rc;
740 }
741 
742 /**
743  * @ingroup unsol
744  * @brief Dispatch unsolicited FCP frames (RQ Pair).
745  *
746  * <h3 class="desc">Description</h3>
747  * Dispatch unsolicited FCP frames (called from the device node state machine).
748  *
749  * @param io Pointer to the IO context.
750  * @param task_management_flags Task management flags from the FCP_CMND frame.
751  * @param node Node that originated the frame.
752  * @param lun 32-bit LUN from FCP_CMND frame.
753  *
754  * @return Returns None.
755  */
756 
757 static void
758 ocs_dispatch_unsolicited_tmf(ocs_io_t *io, uint8_t task_management_flags, ocs_node_t *node, uint64_t lun)
759 {
760 	uint32_t i;
761 	struct {
762 		uint32_t mask;
763 		ocs_scsi_tmf_cmd_e cmd;
764 	} tmflist[] = {
765 		{FCP_QUERY_TASK_SET,		OCS_SCSI_TMF_QUERY_TASK_SET},
766 		{FCP_ABORT_TASK_SET,		OCS_SCSI_TMF_ABORT_TASK_SET},
767 		{FCP_CLEAR_TASK_SET,		OCS_SCSI_TMF_CLEAR_TASK_SET},
768 		{FCP_QUERY_ASYNCHRONOUS_EVENT,	OCS_SCSI_TMF_QUERY_ASYNCHRONOUS_EVENT},
769 		{FCP_LOGICAL_UNIT_RESET,	OCS_SCSI_TMF_LOGICAL_UNIT_RESET},
770 		{FCP_TARGET_RESET,		OCS_SCSI_TMF_TARGET_RESET},
771 		{FCP_CLEAR_ACA,			OCS_SCSI_TMF_CLEAR_ACA}};
772 
773 	io->exp_xfer_len = 0; /* BUG 32235 */
774 
775 	for (i = 0; i < ARRAY_SIZE(tmflist); i ++) {
776 		if (tmflist[i].mask & task_management_flags) {
777 			io->tmf_cmd = tmflist[i].cmd;
778 			ocs_scsi_recv_tmf(io, lun, tmflist[i].cmd, NULL, 0);
779 			break;
780 		}
781 	}
782 	if (i == ARRAY_SIZE(tmflist)) {
783 		/* Not handled */
784 		node_printf(node, "TMF x%x rejected\n", task_management_flags);
785 		ocs_scsi_send_tmf_resp(io, OCS_SCSI_TMF_FUNCTION_REJECTED, NULL, ocs_fc_tmf_rejected_cb, NULL);
786 	}
787 }
788 
789 static int32_t
790 ocs_validate_fcp_cmd(ocs_t *ocs, ocs_hw_sequence_t *seq)
791 {
792 	size_t		exp_payload_len = 0;
793 	fcp_cmnd_iu_t *cmnd = seq->payload->dma.virt;
794 	exp_payload_len = sizeof(fcp_cmnd_iu_t) - 16 + cmnd->additional_fcp_cdb_length;
795 
796 	/*
797 	 * If we received less than FCP_CMND_IU bytes, assume that the frame is
798 	 * corrupted in some way and drop it. This was seen when jamming the FCTL
799 	 * fill bytes field.
800 	 */
801 	if (seq->payload->dma.len < exp_payload_len) {
802 		fc_header_t	*fchdr = seq->header->dma.virt;
803 		ocs_log_debug(ocs, "dropping ox_id %04x with payload length (%zd) less than expected (%zd)\n",
804 			      ocs_be16toh(fchdr->ox_id), seq->payload->dma.len,
805 			      exp_payload_len);
806 		return -1;
807 	}
808 	return 0;
809 
810 }
811 
812 static void
813 ocs_populate_io_fcp_cmd(ocs_io_t *io, fcp_cmnd_iu_t *cmnd, fc_header_t *fchdr, uint8_t sit)
814 {
815 	uint32_t	*fcp_dl;
816 	io->init_task_tag = ocs_be16toh(fchdr->ox_id);
817 	/* note, tgt_task_tag, hw_tag  set when HW io is allocated */
818 	fcp_dl = (uint32_t*)(&(cmnd->fcp_cdb_and_dl));
819 	fcp_dl += cmnd->additional_fcp_cdb_length;
820 	io->exp_xfer_len = ocs_be32toh(*fcp_dl);
821 	io->transferred = 0;
822 
823 	/* The upper 7 bits of CS_CTL is the frame priority thru the SAN.
824 	 * Our assertion here is, the priority given to a frame containing
825 	 * the FCP cmd should be the priority given to ALL frames contained
826 	 * in that IO. Thus we need to save the incoming CS_CTL here.
827 	 */
828 	if (fc_be24toh(fchdr->f_ctl) & FC_FCTL_PRIORITY_ENABLE) {
829 		io->cs_ctl = fchdr->cs_ctl;
830 	} else {
831 		io->cs_ctl = 0;
832 	}
833 	io->seq_init = sit;
834 }
835 
836 static uint32_t
837 ocs_get_flags_fcp_cmd(fcp_cmnd_iu_t *cmnd)
838 {
839 	uint32_t flags = 0;
840 	switch (cmnd->task_attribute) {
841 	case FCP_TASK_ATTR_SIMPLE:
842 		flags |= OCS_SCSI_CMD_SIMPLE;
843 		break;
844 	case FCP_TASK_ATTR_HEAD_OF_QUEUE:
845 		flags |= OCS_SCSI_CMD_HEAD_OF_QUEUE;
846 		break;
847 	case FCP_TASK_ATTR_ORDERED:
848 		flags |= OCS_SCSI_CMD_ORDERED;
849 		break;
850 	case FCP_TASK_ATTR_ACA:
851 		flags |= OCS_SCSI_CMD_ACA;
852 		break;
853 	case FCP_TASK_ATTR_UNTAGGED:
854 		flags |= OCS_SCSI_CMD_UNTAGGED;
855 		break;
856 	}
857 	flags |= (uint32_t)cmnd->command_priority << OCS_SCSI_PRIORITY_SHIFT;
858 	if (cmnd->wrdata)
859 		flags |= OCS_SCSI_CMD_DIR_IN;
860 	if (cmnd->rddata)
861 		flags |= OCS_SCSI_CMD_DIR_OUT;
862 
863 	return flags;
864 }
865 
866 /**
867  * @ingroup unsol
868  * @brief Dispatch unsolicited FCP_CMND frame.
869  *
870  * <h3 class="desc">Description</h3>
871  * Dispatch unsolicited FCP_CMND frame. RQ Pair mode - always
872  * used for RQ Pair mode since first burst is not supported.
873  *
874  * @param node Node that originated the frame.
875  * @param seq Header/payload sequence buffers.
876  *
877  * @return Returns 0 if frame processed and RX buffers cleaned
878  * up appropriately, -1 if frame not handled and RX buffers need
879  * to be returned.
880  */
881 static int32_t
882 ocs_dispatch_fcp_cmd(ocs_node_t *node, ocs_hw_sequence_t *seq)
883 {
884 	ocs_t *ocs = node->ocs;
885 	fc_header_t	*fchdr = seq->header->dma.virt;
886 	fcp_cmnd_iu_t	*cmnd = NULL;
887 	ocs_io_t	*io = NULL;
888 	fc_vm_header_t 	*vhdr;
889 	uint8_t 	df_ctl;
890 	uint64_t	lun = UINT64_MAX;
891 	int32_t		rc = 0;
892 
893 	ocs_assert(seq->payload, -1);
894 	cmnd = seq->payload->dma.virt;
895 
896 	/* perform FCP_CMND validation check(s) */
897 	if (ocs_validate_fcp_cmd(ocs, seq)) {
898 		return -1;
899 	}
900 
901 	lun = CAM_EXTLUN_BYTE_SWIZZLE(be64dec(cmnd->fcp_lun));
902 	if (lun == UINT64_MAX) {
903 		return -1;
904 	}
905 
906 	io = ocs_scsi_io_alloc(node, OCS_SCSI_IO_ROLE_RESPONDER);
907 	if (io == NULL) {
908 		uint32_t send_frame_capable;
909 
910 		/* If we have SEND_FRAME capability, then use it to send task set full or busy */
911 		rc = ocs_hw_get(&ocs->hw, OCS_HW_SEND_FRAME_CAPABLE, &send_frame_capable);
912 		if ((rc == 0) && send_frame_capable) {
913 			rc = ocs_sframe_send_task_set_full_or_busy(node, seq);
914 			if (rc) {
915 				ocs_log_test(ocs, "ocs_sframe_send_task_set_full_or_busy failed: %d\n", rc);
916 			}
917 			return rc;
918 		}
919 
920 		ocs_log_err(ocs, "IO allocation failed ox_id %04x\n", ocs_be16toh(fchdr->ox_id));
921 		return -1;
922 	}
923 	io->hw_priv = seq->hw_priv;
924 
925 	/* Check if the CMD has vmheader. */
926 	io->app_id = 0;
927 	df_ctl = fchdr->df_ctl;
928 	if (df_ctl & FC_DFCTL_DEVICE_HDR_16_MASK) {
929 		uint32_t vmhdr_offset = 0;
930 		/* Presence of VMID. Get the vm header offset. */
931 		if (df_ctl & FC_DFCTL_ESP_HDR_MASK) {
932 			vmhdr_offset += FC_DFCTL_ESP_HDR_SIZE;
933 			ocs_log_err(ocs, "ESP Header present. Fix ESP Size.\n");
934 		}
935 
936 		if (df_ctl & FC_DFCTL_NETWORK_HDR_MASK) {
937 			vmhdr_offset += FC_DFCTL_NETWORK_HDR_SIZE;
938 		}
939 		vhdr = (fc_vm_header_t *) ((char *)fchdr + sizeof(fc_header_t) + vmhdr_offset);
940 		io->app_id = ocs_be32toh(vhdr->src_vmid);
941 	}
942 
943 	/* RQ pair, if we got here, SIT=1 */
944 	ocs_populate_io_fcp_cmd(io, cmnd, fchdr, TRUE);
945 
946 	if (cmnd->task_management_flags) {
947 		ocs_dispatch_unsolicited_tmf(io, cmnd->task_management_flags, node, lun);
948 	} else {
949 		uint32_t flags = ocs_get_flags_fcp_cmd(cmnd);
950 
951 		/* can return failure for things like task set full and UAs,
952 		 * no need to treat as a dropped frame if rc != 0
953 		 */
954 		ocs_scsi_recv_cmd(io, lun, cmnd->fcp_cdb,
955 				  sizeof(cmnd->fcp_cdb) +
956 				  (cmnd->additional_fcp_cdb_length * sizeof(uint32_t)),
957 				  flags);
958 	}
959 
960 	/* successfully processed, now return RX buffer to the chip */
961 	ocs_hw_sequence_free(&ocs->hw, seq);
962 	return 0;
963 }
964 
965 /**
966  * @ingroup unsol
967  * @brief Dispatch unsolicited FCP_CMND frame (auto xfer rdy).
968  *
969  * <h3 class="desc">Description</h3>
970  * Dispatch unsolicited FCP_CMND frame that is assisted with auto xfer ready.
971  *
972  * @param node Node that originated the frame.
973  * @param seq Header/payload sequence buffers.
974  *
975  * @return Returns 0 if frame processed and RX buffers cleaned
976  * up appropriately, -1 if frame not handled and RX buffers need
977  * to be returned.
978  */
979 static int32_t
980 ocs_dispatch_fcp_cmd_auto_xfer_rdy(ocs_node_t *node, ocs_hw_sequence_t *seq)
981 {
982 	ocs_t *ocs = node->ocs;
983 	fc_header_t	*fchdr = seq->header->dma.virt;
984 	fcp_cmnd_iu_t	*cmnd = NULL;
985 	ocs_io_t	*io = NULL;
986 	uint64_t	lun = UINT64_MAX;
987 	int32_t		rc = 0;
988 
989 	ocs_assert(seq->payload, -1);
990 	cmnd = seq->payload->dma.virt;
991 
992 	/* perform FCP_CMND validation check(s) */
993 	if (ocs_validate_fcp_cmd(ocs, seq)) {
994 		return -1;
995 	}
996 
997 	/* make sure first burst or auto xfer_rdy is enabled */
998 	if (!seq->auto_xrdy) {
999 		node_printf(node, "IO is not Auto Xfr Rdy assisted, dropping FCP_CMND\n");
1000 		return -1;
1001 	}
1002 
1003 	lun = CAM_EXTLUN_BYTE_SWIZZLE(be64dec(cmnd->fcp_lun));
1004 
1005 	/* TODO should there be a check here for an error? Why do any of the
1006 	 * below if the LUN decode failed? */
1007 	io = ocs_scsi_io_alloc(node, OCS_SCSI_IO_ROLE_RESPONDER);
1008 	if (io == NULL) {
1009 		uint32_t send_frame_capable;
1010 
1011 		/* If we have SEND_FRAME capability, then use it to send task set full or busy */
1012 		rc = ocs_hw_get(&ocs->hw, OCS_HW_SEND_FRAME_CAPABLE, &send_frame_capable);
1013 		if ((rc == 0) && send_frame_capable) {
1014 			rc = ocs_sframe_send_task_set_full_or_busy(node, seq);
1015 			if (rc) {
1016 				ocs_log_test(ocs, "ocs_sframe_send_task_set_full_or_busy failed: %d\n", rc);
1017 			}
1018 			return rc;
1019 		}
1020 
1021 		ocs_log_err(ocs, "IO allocation failed ox_id %04x\n", ocs_be16toh(fchdr->ox_id));
1022 		return -1;
1023 	}
1024 	io->hw_priv = seq->hw_priv;
1025 
1026 	/* RQ pair, if we got here, SIT=0 */
1027 	ocs_populate_io_fcp_cmd(io, cmnd, fchdr, FALSE);
1028 
1029 	if (cmnd->task_management_flags) {
1030 		/* first burst command better not be a TMF */
1031 		ocs_log_err(ocs, "TMF flags set 0x%x\n", cmnd->task_management_flags);
1032 		ocs_scsi_io_free(io);
1033 		return -1;
1034 	} else {
1035 		uint32_t flags = ocs_get_flags_fcp_cmd(cmnd);
1036 
1037 		/* activate HW IO */
1038 		ocs_hw_io_activate_port_owned(&ocs->hw, seq->hio);
1039 		io->hio = seq->hio;
1040 		seq->hio->ul_io = io;
1041 		io->tgt_task_tag = seq->hio->indicator;
1042 
1043 		/* Note: Data buffers are received in another call */
1044 		ocs_scsi_recv_cmd_first_burst(io, lun, cmnd->fcp_cdb,
1045 					      sizeof(cmnd->fcp_cdb) +
1046 					      (cmnd->additional_fcp_cdb_length * sizeof(uint32_t)),
1047 					      flags, NULL, 0);
1048 	}
1049 
1050 	/* FCP_CMND processed, return RX buffer to the chip */
1051 	ocs_hw_sequence_free(&ocs->hw, seq);
1052 	return 0;
1053 }
1054 
1055 /**
1056  * @ingroup unsol
1057  * @brief Dispatch FCP data frames for auto xfer ready.
1058  *
1059  * <h3 class="desc">Description</h3>
1060  * Dispatch unsolicited FCP data frames (auto xfer ready)
1061  * containing sequence initiative transferred (SIT=1).
1062  *
1063  * @param node Node that originated the frame.
1064  * @param seq Header/payload sequence buffers.
1065  *
1066  * @return Returns 0 if frame processed and RX buffers cleaned
1067  * up appropriately, -1 if frame not handled.
1068  */
1069 
1070 static int32_t
1071 ocs_dispatch_fcp_data(ocs_node_t *node, ocs_hw_sequence_t *seq)
1072 {
1073 	ocs_t *ocs = node->ocs;
1074 	ocs_hw_t *hw = &ocs->hw;
1075 	ocs_hw_io_t *hio = seq->hio;
1076 	ocs_io_t	*io;
1077 	ocs_dma_t fburst[1];
1078 
1079 	ocs_assert(seq->payload, -1);
1080 	ocs_assert(hio, -1);
1081 
1082 	io = hio->ul_io;
1083 	if (io == NULL) {
1084 		ocs_log_err(ocs, "data received for NULL io, xri=0x%x\n",
1085 			    hio->indicator);
1086 		return -1;
1087 	}
1088 
1089 	/*
1090 	 * We only support data completions for auto xfer ready. Make sure
1091 	 * this is a port owned XRI.
1092 	 */
1093 	if (!ocs_hw_is_io_port_owned(hw, seq->hio)) {
1094 		ocs_log_err(ocs, "data received for host owned XRI, xri=0x%x\n",
1095 			    hio->indicator);
1096 		return -1;
1097 	}
1098 
1099 	/* For error statuses, pass the error to the target back end */
1100 	if (seq->status != OCS_HW_UNSOL_SUCCESS) {
1101 		ocs_log_err(ocs, "data with status 0x%x received, xri=0x%x\n",
1102 			    seq->status, hio->indicator);
1103 
1104 		/*
1105 		 * In this case, there is an existing, in-use HW IO that
1106 		 * first may need to be aborted. Then, the backend will be
1107 		 * notified of the error while waiting for the data.
1108 		 */
1109 		ocs_port_owned_abort(ocs, seq->hio);
1110 
1111 		/*
1112 		 * HW IO has already been allocated and is waiting for data.
1113 		 * Need to tell backend that an error has occurred.
1114 		 */
1115 		ocs_scsi_recv_cmd_first_burst(io, 0, NULL, 0, OCS_SCSI_FIRST_BURST_ERR, NULL, 0);
1116 		return -1;
1117 	}
1118 
1119 	/* sequence initiative has been transferred */
1120 	io->seq_init = 1;
1121 
1122 	/* convert the array of pointers to the correct type, to send to backend */
1123 	fburst[0] = seq->payload->dma;
1124 
1125 	/* the amount of first burst data was saved as "acculated sequence length" */
1126 	io->transferred = seq->payload->dma.len;
1127 
1128 	if (ocs_scsi_recv_cmd_first_burst(io, 0, NULL, 0, 0,
1129 					  fburst, io->transferred)) {
1130 		ocs_log_err(ocs, "error passing first burst, xri=0x%x, oxid=0x%x\n",
1131 			    hio->indicator, io->init_task_tag);
1132 	}
1133 
1134 	/* Free the header and all the accumulated payload buffers */
1135 	ocs_hw_sequence_free(&ocs->hw, seq);
1136 	return 0;
1137 }
1138 
1139 /**
1140  * @ingroup unsol
1141  * @brief Handle the callback for the TMF FUNCTION_REJECTED response.
1142  *
1143  * <h3 class="desc">Description</h3>
1144  * Handle the callback of a send TMF FUNCTION_REJECTED response request.
1145  *
1146  * @param io Pointer to the IO context.
1147  * @param scsi_status Status of the response.
1148  * @param flags Callback flags.
1149  * @param arg Callback argument.
1150  *
1151  * @return Returns 0 on success, or a negative error value on failure.
1152  */
1153 
1154 static int32_t
1155 ocs_fc_tmf_rejected_cb(ocs_io_t *io, ocs_scsi_io_status_e scsi_status, uint32_t flags, void *arg)
1156 {
1157 	ocs_scsi_io_free(io);
1158 	return 0;
1159 }
1160 
1161 /**
1162  * @brief Return next FC frame on node->pend_frames list
1163  *
1164  * The next FC frame on the node->pend_frames list is returned, or NULL
1165  * if the list is empty.
1166  *
1167  * @param pend_list Pending list to be purged.
1168  * @param list_lock Lock that protects pending list.
1169  *
1170  * @return Returns pointer to the next FC frame, or NULL if the pending frame list
1171  * is empty.
1172  */
1173 static ocs_hw_sequence_t *
1174 ocs_frame_next(ocs_list_t *pend_list, ocs_lock_t *list_lock)
1175 {
1176 	ocs_hw_sequence_t *frame = NULL;
1177 
1178 	ocs_lock(list_lock);
1179 		frame = ocs_list_remove_head(pend_list);
1180 	ocs_unlock(list_lock);
1181 	return frame;
1182 }
1183 
1184 /**
1185  * @brief Process send fcp response frame callback
1186  *
1187  * The function is called when the send FCP response posting has completed. Regardless
1188  * of the outcome, the sequence is freed.
1189  *
1190  * @param arg Pointer to originator frame sequence.
1191  * @param cqe Pointer to completion queue entry.
1192  * @param status Status of operation.
1193  *
1194  * @return None.
1195  */
1196 static void
1197 ocs_sframe_common_send_cb(void *arg, uint8_t *cqe, int32_t status)
1198 {
1199 	ocs_hw_send_frame_context_t *ctx = arg;
1200 	ocs_hw_t *hw = ctx->hw;
1201 
1202 	/* Free WQ completion callback */
1203 	ocs_hw_reqtag_free(hw, ctx->wqcb);
1204 
1205 	/* Free sequence */
1206 	ocs_hw_sequence_free(hw, ctx->seq);
1207 }
1208 
1209 /**
1210  * @brief Send a frame, common code
1211  *
1212  * A frame is sent using SEND_FRAME, the R_CTL/F_CTL/TYPE may be specified, the payload is
1213  * sent as a single frame.
1214  *
1215  * Memory resources are allocated from RQ buffers contained in the passed in sequence data.
1216  *
1217  * @param node Pointer to node object.
1218  * @param seq Pointer to sequence object.
1219  * @param r_ctl R_CTL value to place in FC header.
1220  * @param info INFO value to place in FC header.
1221  * @param f_ctl F_CTL value to place in FC header.
1222  * @param type TYPE value to place in FC header.
1223  * @param payload Pointer to payload data
1224  * @param payload_len Length of payload in bytes.
1225  *
1226  * @return Returns 0 on success, or a negative error code value on failure.
1227  */
1228 static int32_t
1229 ocs_sframe_common_send(ocs_node_t *node, ocs_hw_sequence_t *seq, uint8_t r_ctl, uint8_t info, uint32_t f_ctl,
1230 		       uint8_t type, void *payload, uint32_t payload_len)
1231 {
1232 	ocs_t *ocs = node->ocs;
1233 	ocs_hw_t *hw = &ocs->hw;
1234 	ocs_hw_rtn_e rc = 0;
1235 	fc_header_t *behdr = seq->header->dma.virt;
1236 	fc_header_le_t hdr;
1237 	uint32_t s_id = fc_be24toh(behdr->s_id);
1238 	uint32_t d_id = fc_be24toh(behdr->d_id);
1239 	uint16_t ox_id = ocs_be16toh(behdr->ox_id);
1240 	uint16_t rx_id = ocs_be16toh(behdr->rx_id);
1241 	ocs_hw_send_frame_context_t *ctx;
1242 
1243 	uint32_t heap_size = seq->payload->dma.size;
1244 	uintptr_t heap_phys_base = seq->payload->dma.phys;
1245 	uint8_t *heap_virt_base = seq->payload->dma.virt;
1246 	uint32_t heap_offset = 0;
1247 
1248 	/* Build the FC header reusing the RQ header DMA buffer */
1249 	ocs_memset(&hdr, 0, sizeof(hdr));
1250 	hdr.d_id = s_id;			/* send it back to whomever sent it to us */
1251 	hdr.r_ctl = r_ctl;
1252 	hdr.info = info;
1253 	hdr.s_id = d_id;
1254 	hdr.cs_ctl = 0;
1255 	hdr.f_ctl = f_ctl;
1256 	hdr.type = type;
1257 	hdr.seq_cnt = 0;
1258 	hdr.df_ctl = 0;
1259 
1260 	/*
1261 	 * send_frame_seq_id is an atomic, we just let it increment, while storing only
1262 	 * the low 8 bits to hdr->seq_id
1263 	 */
1264 	hdr.seq_id = (uint8_t) ocs_atomic_add_return(&hw->send_frame_seq_id, 1);
1265 
1266 	hdr.rx_id = rx_id;
1267 	hdr.ox_id = ox_id;
1268 	hdr.parameter = 0;
1269 
1270 	/* Allocate and fill in the send frame request context */
1271 	ctx = (void*)(heap_virt_base + heap_offset);
1272 	heap_offset += sizeof(*ctx);
1273 	ocs_assert(heap_offset < heap_size, -1);
1274 	ocs_memset(ctx, 0, sizeof(*ctx));
1275 
1276 	/* Save sequence */
1277 	ctx->seq = seq;
1278 
1279 	/* Allocate a response payload DMA buffer from the heap */
1280 	ctx->payload.phys = heap_phys_base + heap_offset;
1281 	ctx->payload.virt = heap_virt_base + heap_offset;
1282 	ctx->payload.size = payload_len;
1283 	ctx->payload.len = payload_len;
1284 	heap_offset += payload_len;
1285 	ocs_assert(heap_offset <= heap_size, -1);
1286 
1287 	/* Copy the payload in */
1288 	ocs_memcpy(ctx->payload.virt, payload, payload_len);
1289 
1290 	/* Send */
1291 	rc = ocs_hw_send_frame(&ocs->hw, (void*)&hdr, FC_SOFI3, FC_EOFT, &ctx->payload, ctx,
1292 				ocs_sframe_common_send_cb, ctx);
1293 	if (rc) {
1294 		ocs_log_test(ocs, "ocs_hw_send_frame failed: %d\n", rc);
1295 	}
1296 
1297 	return rc ? -1 : 0;
1298 }
1299 
1300 /**
1301  * @brief Send FCP response using SEND_FRAME
1302  *
1303  * The FCP response is send using the SEND_FRAME function.
1304  *
1305  * @param node Pointer to node object.
1306  * @param seq Pointer to inbound sequence.
1307  * @param rsp Pointer to response data.
1308  * @param rsp_len Length of response data, in bytes.
1309  *
1310  * @return Returns 0 on success, or a negative error code value on failure.
1311  */
1312 static int32_t
1313 ocs_sframe_send_fcp_rsp(ocs_node_t *node, ocs_hw_sequence_t *seq, void *rsp, uint32_t rsp_len)
1314 {
1315 	return ocs_sframe_common_send(node, seq,
1316 				      FC_RCTL_FC4_DATA,
1317 				      FC_RCTL_INFO_CMD_STATUS,
1318 				      FC_FCTL_EXCHANGE_RESPONDER |
1319 					      FC_FCTL_LAST_SEQUENCE |
1320 					      FC_FCTL_END_SEQUENCE |
1321 					      FC_FCTL_SEQUENCE_INITIATIVE,
1322 				      FC_TYPE_FCP,
1323 				      rsp, rsp_len);
1324 }
1325 
1326 /**
1327  * @brief Send task set full response
1328  *
1329  * Return a task set full or busy response using send frame.
1330  *
1331  * @param node Pointer to node object.
1332  * @param seq Pointer to originator frame sequence.
1333  *
1334  * @return Returns 0 on success, or a negative error code value on failure.
1335  */
1336 static int32_t
1337 ocs_sframe_send_task_set_full_or_busy(ocs_node_t *node, ocs_hw_sequence_t *seq)
1338 {
1339 	fcp_rsp_iu_t fcprsp;
1340 	fcp_cmnd_iu_t *fcpcmd = seq->payload->dma.virt;
1341 	uint32_t *fcp_dl_ptr;
1342 	uint32_t fcp_dl;
1343 	int32_t rc = 0;
1344 
1345 	/* extract FCP_DL from FCP command*/
1346 	fcp_dl_ptr = (uint32_t*)(&(fcpcmd->fcp_cdb_and_dl));
1347 	fcp_dl_ptr += fcpcmd->additional_fcp_cdb_length;
1348 	fcp_dl = ocs_be32toh(*fcp_dl_ptr);
1349 
1350 	/* construct task set full or busy response */
1351 	ocs_memset(&fcprsp, 0, sizeof(fcprsp));
1352 	ocs_lock(&node->active_ios_lock);
1353 		fcprsp.scsi_status = ocs_list_empty(&node->active_ios) ? SCSI_STATUS_BUSY : SCSI_STATUS_TASK_SET_FULL;
1354 	ocs_unlock(&node->active_ios_lock);
1355 	*((uint32_t*)&fcprsp.fcp_resid) = fcp_dl;
1356 
1357 	/* send it using send_frame */
1358 	rc = ocs_sframe_send_fcp_rsp(node, seq, &fcprsp, sizeof(fcprsp) - sizeof(fcprsp.data));
1359 	if (rc) {
1360 		ocs_log_test(node->ocs, "ocs_sframe_send_fcp_rsp failed: %d\n", rc);
1361 	}
1362 	return rc;
1363 }
1364 
1365 /**
1366  * @brief Send BA_ACC using sent frame
1367  *
1368  * A BA_ACC is sent using SEND_FRAME
1369  *
1370  * @param node Pointer to node object.
1371  * @param seq Pointer to originator frame sequence.
1372  *
1373  * @return Returns 0 on success, or a negative error code value on failure.
1374  */
1375 int32_t
1376 ocs_sframe_send_bls_acc(ocs_node_t *node,  ocs_hw_sequence_t *seq)
1377 {
1378 	fc_header_t *behdr = seq->header->dma.virt;
1379 	uint16_t ox_id = ocs_be16toh(behdr->ox_id);
1380 	uint16_t rx_id = ocs_be16toh(behdr->rx_id);
1381 	fc_ba_acc_payload_t acc = {0};
1382 
1383 	acc.ox_id = ocs_htobe16(ox_id);
1384 	acc.rx_id = ocs_htobe16(rx_id);
1385 	acc.low_seq_cnt = UINT16_MAX;
1386 	acc.high_seq_cnt = UINT16_MAX;
1387 
1388 	return ocs_sframe_common_send(node, seq,
1389 				      FC_RCTL_BLS,
1390 				      FC_RCTL_INFO_UNSOL_DATA,
1391 				      FC_FCTL_EXCHANGE_RESPONDER |
1392 					      FC_FCTL_LAST_SEQUENCE |
1393 					      FC_FCTL_END_SEQUENCE,
1394 				      FC_TYPE_BASIC_LINK,
1395 				      &acc, sizeof(acc));
1396 }
1397