xref: /linux/drivers/scsi/libfc/fc_exch.c (revision da155d5b40587815a4397e1a69382fe2366d940b)
1 /*
2  * Copyright(c) 2007 Intel Corporation. All rights reserved.
3  * Copyright(c) 2008 Red Hat, Inc.  All rights reserved.
4  * Copyright(c) 2008 Mike Christie
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along with
16  * this program; if not, write to the Free Software Foundation, Inc.,
17  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Maintained at www.Open-FCoE.org
20  */
21 
22 /*
23  * Fibre Channel exchange and sequence handling.
24  */
25 
26 #include <linux/timer.h>
27 #include <linux/slab.h>
28 #include <linux/err.h>
29 
30 #include <scsi/fc/fc_fc2.h>
31 
32 #include <scsi/libfc.h>
33 #include <scsi/fc_encode.h>
34 
35 #include "fc_libfc.h"
36 
37 u16	fc_cpu_mask;		/* cpu mask for possible cpus */
38 EXPORT_SYMBOL(fc_cpu_mask);
39 static u16	fc_cpu_order;	/* 2's power to represent total possible cpus */
40 static struct kmem_cache *fc_em_cachep;	       /* cache for exchanges */
41 static struct workqueue_struct *fc_exch_workqueue;
42 
43 /*
44  * Structure and function definitions for managing Fibre Channel Exchanges
45  * and Sequences.
46  *
47  * The three primary structures used here are fc_exch_mgr, fc_exch, and fc_seq.
48  *
49  * fc_exch_mgr holds the exchange state for an N port
50  *
51  * fc_exch holds state for one exchange and links to its active sequence.
52  *
53  * fc_seq holds the state for an individual sequence.
54  */
55 
56 /**
57  * struct fc_exch_pool - Per cpu exchange pool
58  * @next_index:	  Next possible free exchange index
59  * @total_exches: Total allocated exchanges
60  * @lock:	  Exch pool lock
61  * @ex_list:	  List of exchanges
62  *
63  * This structure manages per cpu exchanges in array of exchange pointers.
64  * This array is allocated followed by struct fc_exch_pool memory for
65  * assigned range of exchanges to per cpu pool.
66  */
67 struct fc_exch_pool {
68 	spinlock_t	 lock;
69 	struct list_head ex_list;
70 	u16		 next_index;
71 	u16		 total_exches;
72 
73 	/* two cache of free slot in exch array */
74 	u16		 left;
75 	u16		 right;
76 } ____cacheline_aligned_in_smp;
77 
78 /**
79  * struct fc_exch_mgr - The Exchange Manager (EM).
80  * @class:	    Default class for new sequences
81  * @kref:	    Reference counter
82  * @min_xid:	    Minimum exchange ID
83  * @max_xid:	    Maximum exchange ID
84  * @ep_pool:	    Reserved exchange pointers
85  * @pool_max_index: Max exch array index in exch pool
86  * @pool:	    Per cpu exch pool
87  * @stats:	    Statistics structure
88  *
89  * This structure is the center for creating exchanges and sequences.
90  * It manages the allocation of exchange IDs.
91  */
92 struct fc_exch_mgr {
93 	struct fc_exch_pool *pool;
94 	mempool_t	*ep_pool;
95 	enum fc_class	class;
96 	struct kref	kref;
97 	u16		min_xid;
98 	u16		max_xid;
99 	u16		pool_max_index;
100 
101 	/*
102 	 * currently exchange mgr stats are updated but not used.
103 	 * either stats can be expose via sysfs or remove them
104 	 * all together if not used XXX
105 	 */
106 	struct {
107 		atomic_t no_free_exch;
108 		atomic_t no_free_exch_xid;
109 		atomic_t xid_not_found;
110 		atomic_t xid_busy;
111 		atomic_t seq_not_found;
112 		atomic_t non_bls_resp;
113 	} stats;
114 };
115 
116 /**
117  * struct fc_exch_mgr_anchor - primary structure for list of EMs
118  * @ema_list: Exchange Manager Anchor list
119  * @mp:	      Exchange Manager associated with this anchor
120  * @match:    Routine to determine if this anchor's EM should be used
121  *
122  * When walking the list of anchors the match routine will be called
123  * for each anchor to determine if that EM should be used. The last
124  * anchor in the list will always match to handle any exchanges not
125  * handled by other EMs. The non-default EMs would be added to the
126  * anchor list by HW that provides FCoE offloads.
127  */
128 struct fc_exch_mgr_anchor {
129 	struct list_head ema_list;
130 	struct fc_exch_mgr *mp;
131 	bool (*match)(struct fc_frame *);
132 };
133 
134 static void fc_exch_rrq(struct fc_exch *);
135 static void fc_seq_ls_acc(struct fc_frame *);
136 static void fc_seq_ls_rjt(struct fc_frame *, enum fc_els_rjt_reason,
137 			  enum fc_els_rjt_explan);
138 static void fc_exch_els_rec(struct fc_frame *);
139 static void fc_exch_els_rrq(struct fc_frame *);
140 
141 /*
142  * Internal implementation notes.
143  *
144  * The exchange manager is one by default in libfc but LLD may choose
145  * to have one per CPU. The sequence manager is one per exchange manager
146  * and currently never separated.
147  *
148  * Section 9.8 in FC-FS-2 specifies:  "The SEQ_ID is a one-byte field
149  * assigned by the Sequence Initiator that shall be unique for a specific
150  * D_ID and S_ID pair while the Sequence is open."   Note that it isn't
151  * qualified by exchange ID, which one might think it would be.
152  * In practice this limits the number of open sequences and exchanges to 256
153  * per session.	 For most targets we could treat this limit as per exchange.
154  *
155  * The exchange and its sequence are freed when the last sequence is received.
156  * It's possible for the remote port to leave an exchange open without
157  * sending any sequences.
158  *
159  * Notes on reference counts:
160  *
161  * Exchanges are reference counted and exchange gets freed when the reference
162  * count becomes zero.
163  *
164  * Timeouts:
165  * Sequences are timed out for E_D_TOV and R_A_TOV.
166  *
167  * Sequence event handling:
168  *
169  * The following events may occur on initiator sequences:
170  *
171  *	Send.
172  *	    For now, the whole thing is sent.
173  *	Receive ACK
174  *	    This applies only to class F.
175  *	    The sequence is marked complete.
176  *	ULP completion.
177  *	    The upper layer calls fc_exch_done() when done
178  *	    with exchange and sequence tuple.
179  *	RX-inferred completion.
180  *	    When we receive the next sequence on the same exchange, we can
181  *	    retire the previous sequence ID.  (XXX not implemented).
182  *	Timeout.
183  *	    R_A_TOV frees the sequence ID.  If we're waiting for ACK,
184  *	    E_D_TOV causes abort and calls upper layer response handler
185  *	    with FC_EX_TIMEOUT error.
186  *	Receive RJT
187  *	    XXX defer.
188  *	Send ABTS
189  *	    On timeout.
190  *
191  * The following events may occur on recipient sequences:
192  *
193  *	Receive
194  *	    Allocate sequence for first frame received.
195  *	    Hold during receive handler.
196  *	    Release when final frame received.
197  *	    Keep status of last N of these for the ELS RES command.  XXX TBD.
198  *	Receive ABTS
199  *	    Deallocate sequence
200  *	Send RJT
201  *	    Deallocate
202  *
203  * For now, we neglect conditions where only part of a sequence was
204  * received or transmitted, or where out-of-order receipt is detected.
205  */
206 
207 /*
208  * Locking notes:
209  *
210  * The EM code run in a per-CPU worker thread.
211  *
212  * To protect against concurrency between a worker thread code and timers,
213  * sequence allocation and deallocation must be locked.
214  *  - exchange refcnt can be done atomicly without locks.
215  *  - sequence allocation must be locked by exch lock.
216  *  - If the EM pool lock and ex_lock must be taken at the same time, then the
217  *    EM pool lock must be taken before the ex_lock.
218  */
219 
220 /*
221  * opcode names for debugging.
222  */
223 static char *fc_exch_rctl_names[] = FC_RCTL_NAMES_INIT;
224 
225 /**
226  * fc_exch_name_lookup() - Lookup name by opcode
227  * @op:	       Opcode to be looked up
228  * @table:     Opcode/name table
229  * @max_index: Index not to be exceeded
230  *
231  * This routine is used to determine a human-readable string identifying
232  * a R_CTL opcode.
233  */
234 static inline const char *fc_exch_name_lookup(unsigned int op, char **table,
235 					      unsigned int max_index)
236 {
237 	const char *name = NULL;
238 
239 	if (op < max_index)
240 		name = table[op];
241 	if (!name)
242 		name = "unknown";
243 	return name;
244 }
245 
246 /**
247  * fc_exch_rctl_name() - Wrapper routine for fc_exch_name_lookup()
248  * @op: The opcode to be looked up
249  */
250 static const char *fc_exch_rctl_name(unsigned int op)
251 {
252 	return fc_exch_name_lookup(op, fc_exch_rctl_names,
253 				   ARRAY_SIZE(fc_exch_rctl_names));
254 }
255 
256 /**
257  * fc_exch_hold() - Increment an exchange's reference count
258  * @ep: Echange to be held
259  */
260 static inline void fc_exch_hold(struct fc_exch *ep)
261 {
262 	atomic_inc(&ep->ex_refcnt);
263 }
264 
265 /**
266  * fc_exch_setup_hdr() - Initialize a FC header by initializing some fields
267  *			 and determine SOF and EOF.
268  * @ep:	   The exchange to that will use the header
269  * @fp:	   The frame whose header is to be modified
270  * @f_ctl: F_CTL bits that will be used for the frame header
271  *
272  * The fields initialized by this routine are: fh_ox_id, fh_rx_id,
273  * fh_seq_id, fh_seq_cnt and the SOF and EOF.
274  */
275 static void fc_exch_setup_hdr(struct fc_exch *ep, struct fc_frame *fp,
276 			      u32 f_ctl)
277 {
278 	struct fc_frame_header *fh = fc_frame_header_get(fp);
279 	u16 fill;
280 
281 	fr_sof(fp) = ep->class;
282 	if (ep->seq.cnt)
283 		fr_sof(fp) = fc_sof_normal(ep->class);
284 
285 	if (f_ctl & FC_FC_END_SEQ) {
286 		fr_eof(fp) = FC_EOF_T;
287 		if (fc_sof_needs_ack(ep->class))
288 			fr_eof(fp) = FC_EOF_N;
289 		/*
290 		 * From F_CTL.
291 		 * The number of fill bytes to make the length a 4-byte
292 		 * multiple is the low order 2-bits of the f_ctl.
293 		 * The fill itself will have been cleared by the frame
294 		 * allocation.
295 		 * After this, the length will be even, as expected by
296 		 * the transport.
297 		 */
298 		fill = fr_len(fp) & 3;
299 		if (fill) {
300 			fill = 4 - fill;
301 			/* TODO, this may be a problem with fragmented skb */
302 			skb_put(fp_skb(fp), fill);
303 			hton24(fh->fh_f_ctl, f_ctl | fill);
304 		}
305 	} else {
306 		WARN_ON(fr_len(fp) % 4 != 0);	/* no pad to non last frame */
307 		fr_eof(fp) = FC_EOF_N;
308 	}
309 
310 	/*
311 	 * Initialize remainig fh fields
312 	 * from fc_fill_fc_hdr
313 	 */
314 	fh->fh_ox_id = htons(ep->oxid);
315 	fh->fh_rx_id = htons(ep->rxid);
316 	fh->fh_seq_id = ep->seq.id;
317 	fh->fh_seq_cnt = htons(ep->seq.cnt);
318 }
319 
320 /**
321  * fc_exch_release() - Decrement an exchange's reference count
322  * @ep: Exchange to be released
323  *
324  * If the reference count reaches zero and the exchange is complete,
325  * it is freed.
326  */
327 static void fc_exch_release(struct fc_exch *ep)
328 {
329 	struct fc_exch_mgr *mp;
330 
331 	if (atomic_dec_and_test(&ep->ex_refcnt)) {
332 		mp = ep->em;
333 		if (ep->destructor)
334 			ep->destructor(&ep->seq, ep->arg);
335 		WARN_ON(!(ep->esb_stat & ESB_ST_COMPLETE));
336 		mempool_free(ep, mp->ep_pool);
337 	}
338 }
339 
340 /**
341  * fc_exch_done_locked() - Complete an exchange with the exchange lock held
342  * @ep: The exchange that is complete
343  */
344 static int fc_exch_done_locked(struct fc_exch *ep)
345 {
346 	int rc = 1;
347 
348 	/*
349 	 * We must check for completion in case there are two threads
350 	 * tyring to complete this. But the rrq code will reuse the
351 	 * ep, and in that case we only clear the resp and set it as
352 	 * complete, so it can be reused by the timer to send the rrq.
353 	 */
354 	ep->resp = NULL;
355 	if (ep->state & FC_EX_DONE)
356 		return rc;
357 	ep->esb_stat |= ESB_ST_COMPLETE;
358 
359 	if (!(ep->esb_stat & ESB_ST_REC_QUAL)) {
360 		ep->state |= FC_EX_DONE;
361 		if (cancel_delayed_work(&ep->timeout_work))
362 			atomic_dec(&ep->ex_refcnt); /* drop hold for timer */
363 		rc = 0;
364 	}
365 	return rc;
366 }
367 
368 /**
369  * fc_exch_ptr_get() - Return an exchange from an exchange pool
370  * @pool:  Exchange Pool to get an exchange from
371  * @index: Index of the exchange within the pool
372  *
373  * Use the index to get an exchange from within an exchange pool. exches
374  * will point to an array of exchange pointers. The index will select
375  * the exchange within the array.
376  */
377 static inline struct fc_exch *fc_exch_ptr_get(struct fc_exch_pool *pool,
378 					      u16 index)
379 {
380 	struct fc_exch **exches = (struct fc_exch **)(pool + 1);
381 	return exches[index];
382 }
383 
384 /**
385  * fc_exch_ptr_set() - Assign an exchange to a slot in an exchange pool
386  * @pool:  The pool to assign the exchange to
387  * @index: The index in the pool where the exchange will be assigned
388  * @ep:	   The exchange to assign to the pool
389  */
390 static inline void fc_exch_ptr_set(struct fc_exch_pool *pool, u16 index,
391 				   struct fc_exch *ep)
392 {
393 	((struct fc_exch **)(pool + 1))[index] = ep;
394 }
395 
396 /**
397  * fc_exch_delete() - Delete an exchange
398  * @ep: The exchange to be deleted
399  */
400 static void fc_exch_delete(struct fc_exch *ep)
401 {
402 	struct fc_exch_pool *pool;
403 	u16 index;
404 
405 	pool = ep->pool;
406 	spin_lock_bh(&pool->lock);
407 	WARN_ON(pool->total_exches <= 0);
408 	pool->total_exches--;
409 
410 	/* update cache of free slot */
411 	index = (ep->xid - ep->em->min_xid) >> fc_cpu_order;
412 	if (pool->left == FC_XID_UNKNOWN)
413 		pool->left = index;
414 	else if (pool->right == FC_XID_UNKNOWN)
415 		pool->right = index;
416 	else
417 		pool->next_index = index;
418 
419 	fc_exch_ptr_set(pool, index, NULL);
420 	list_del(&ep->ex_list);
421 	spin_unlock_bh(&pool->lock);
422 	fc_exch_release(ep);	/* drop hold for exch in mp */
423 }
424 
425 /**
426  * fc_exch_timer_set_locked() - Start a timer for an exchange w/ the
427  *				the exchange lock held
428  * @ep:		The exchange whose timer will start
429  * @timer_msec: The timeout period
430  *
431  * Used for upper level protocols to time out the exchange.
432  * The timer is cancelled when it fires or when the exchange completes.
433  */
434 static inline void fc_exch_timer_set_locked(struct fc_exch *ep,
435 					    unsigned int timer_msec)
436 {
437 	if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE))
438 		return;
439 
440 	FC_EXCH_DBG(ep, "Exchange timer armed\n");
441 
442 	if (queue_delayed_work(fc_exch_workqueue, &ep->timeout_work,
443 			       msecs_to_jiffies(timer_msec)))
444 		fc_exch_hold(ep);		/* hold for timer */
445 }
446 
447 /**
448  * fc_exch_timer_set() - Lock the exchange and set the timer
449  * @ep:		The exchange whose timer will start
450  * @timer_msec: The timeout period
451  */
452 static void fc_exch_timer_set(struct fc_exch *ep, unsigned int timer_msec)
453 {
454 	spin_lock_bh(&ep->ex_lock);
455 	fc_exch_timer_set_locked(ep, timer_msec);
456 	spin_unlock_bh(&ep->ex_lock);
457 }
458 
459 /**
460  * fc_seq_send() - Send a frame using existing sequence/exchange pair
461  * @lport: The local port that the exchange will be sent on
462  * @sp:	   The sequence to be sent
463  * @fp:	   The frame to be sent on the exchange
464  */
465 static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp,
466 		       struct fc_frame *fp)
467 {
468 	struct fc_exch *ep;
469 	struct fc_frame_header *fh = fc_frame_header_get(fp);
470 	int error;
471 	u32 f_ctl;
472 
473 	ep = fc_seq_exch(sp);
474 	WARN_ON((ep->esb_stat & ESB_ST_SEQ_INIT) != ESB_ST_SEQ_INIT);
475 
476 	f_ctl = ntoh24(fh->fh_f_ctl);
477 	fc_exch_setup_hdr(ep, fp, f_ctl);
478 	fr_encaps(fp) = ep->encaps;
479 
480 	/*
481 	 * update sequence count if this frame is carrying
482 	 * multiple FC frames when sequence offload is enabled
483 	 * by LLD.
484 	 */
485 	if (fr_max_payload(fp))
486 		sp->cnt += DIV_ROUND_UP((fr_len(fp) - sizeof(*fh)),
487 					fr_max_payload(fp));
488 	else
489 		sp->cnt++;
490 
491 	/*
492 	 * Send the frame.
493 	 */
494 	error = lport->tt.frame_send(lport, fp);
495 
496 	if (fh->fh_type == FC_TYPE_BLS)
497 		return error;
498 
499 	/*
500 	 * Update the exchange and sequence flags,
501 	 * assuming all frames for the sequence have been sent.
502 	 * We can only be called to send once for each sequence.
503 	 */
504 	spin_lock_bh(&ep->ex_lock);
505 	ep->f_ctl = f_ctl & ~FC_FC_FIRST_SEQ;	/* not first seq */
506 	if (f_ctl & FC_FC_SEQ_INIT)
507 		ep->esb_stat &= ~ESB_ST_SEQ_INIT;
508 	spin_unlock_bh(&ep->ex_lock);
509 	return error;
510 }
511 
512 /**
513  * fc_seq_alloc() - Allocate a sequence for a given exchange
514  * @ep:	    The exchange to allocate a new sequence for
515  * @seq_id: The sequence ID to be used
516  *
517  * We don't support multiple originated sequences on the same exchange.
518  * By implication, any previously originated sequence on this exchange
519  * is complete, and we reallocate the same sequence.
520  */
521 static struct fc_seq *fc_seq_alloc(struct fc_exch *ep, u8 seq_id)
522 {
523 	struct fc_seq *sp;
524 
525 	sp = &ep->seq;
526 	sp->ssb_stat = 0;
527 	sp->cnt = 0;
528 	sp->id = seq_id;
529 	return sp;
530 }
531 
532 /**
533  * fc_seq_start_next_locked() - Allocate a new sequence on the same
534  *				exchange as the supplied sequence
535  * @sp: The sequence/exchange to get a new sequence for
536  */
537 static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp)
538 {
539 	struct fc_exch *ep = fc_seq_exch(sp);
540 
541 	sp = fc_seq_alloc(ep, ep->seq_id++);
542 	FC_EXCH_DBG(ep, "f_ctl %6x seq %2x\n",
543 		    ep->f_ctl, sp->id);
544 	return sp;
545 }
546 
547 /**
548  * fc_seq_start_next() - Lock the exchange and get a new sequence
549  *			 for a given sequence/exchange pair
550  * @sp: The sequence/exchange to get a new exchange for
551  */
552 static struct fc_seq *fc_seq_start_next(struct fc_seq *sp)
553 {
554 	struct fc_exch *ep = fc_seq_exch(sp);
555 
556 	spin_lock_bh(&ep->ex_lock);
557 	sp = fc_seq_start_next_locked(sp);
558 	spin_unlock_bh(&ep->ex_lock);
559 
560 	return sp;
561 }
562 
563 /*
564  * Set the response handler for the exchange associated with a sequence.
565  */
566 static void fc_seq_set_resp(struct fc_seq *sp,
567 			    void (*resp)(struct fc_seq *, struct fc_frame *,
568 					 void *),
569 			    void *arg)
570 {
571 	struct fc_exch *ep = fc_seq_exch(sp);
572 
573 	spin_lock_bh(&ep->ex_lock);
574 	ep->resp = resp;
575 	ep->arg = arg;
576 	spin_unlock_bh(&ep->ex_lock);
577 }
578 
579 /**
580  * fc_exch_abort_locked() - Abort an exchange
581  * @ep:	The exchange to be aborted
582  * @timer_msec: The period of time to wait before aborting
583  *
584  * Locking notes:  Called with exch lock held
585  *
586  * Return value: 0 on success else error code
587  */
588 static int fc_exch_abort_locked(struct fc_exch *ep,
589 				unsigned int timer_msec)
590 {
591 	struct fc_seq *sp;
592 	struct fc_frame *fp;
593 	int error;
594 
595 	if (ep->esb_stat & (ESB_ST_COMPLETE | ESB_ST_ABNORMAL) ||
596 	    ep->state & (FC_EX_DONE | FC_EX_RST_CLEANUP))
597 		return -ENXIO;
598 
599 	/*
600 	 * Send the abort on a new sequence if possible.
601 	 */
602 	sp = fc_seq_start_next_locked(&ep->seq);
603 	if (!sp)
604 		return -ENOMEM;
605 
606 	ep->esb_stat |= ESB_ST_SEQ_INIT | ESB_ST_ABNORMAL;
607 	if (timer_msec)
608 		fc_exch_timer_set_locked(ep, timer_msec);
609 
610 	/*
611 	 * If not logged into the fabric, don't send ABTS but leave
612 	 * sequence active until next timeout.
613 	 */
614 	if (!ep->sid)
615 		return 0;
616 
617 	/*
618 	 * Send an abort for the sequence that timed out.
619 	 */
620 	fp = fc_frame_alloc(ep->lp, 0);
621 	if (fp) {
622 		fc_fill_fc_hdr(fp, FC_RCTL_BA_ABTS, ep->did, ep->sid,
623 			       FC_TYPE_BLS, FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
624 		error = fc_seq_send(ep->lp, sp, fp);
625 	} else
626 		error = -ENOBUFS;
627 	return error;
628 }
629 
630 /**
631  * fc_seq_exch_abort() - Abort an exchange and sequence
632  * @req_sp:	The sequence to be aborted
633  * @timer_msec: The period of time to wait before aborting
634  *
635  * Generally called because of a timeout or an abort from the upper layer.
636  *
637  * Return value: 0 on success else error code
638  */
639 static int fc_seq_exch_abort(const struct fc_seq *req_sp,
640 			     unsigned int timer_msec)
641 {
642 	struct fc_exch *ep;
643 	int error;
644 
645 	ep = fc_seq_exch(req_sp);
646 	spin_lock_bh(&ep->ex_lock);
647 	error = fc_exch_abort_locked(ep, timer_msec);
648 	spin_unlock_bh(&ep->ex_lock);
649 	return error;
650 }
651 
652 /**
653  * fc_exch_timeout() - Handle exchange timer expiration
654  * @work: The work_struct identifying the exchange that timed out
655  */
656 static void fc_exch_timeout(struct work_struct *work)
657 {
658 	struct fc_exch *ep = container_of(work, struct fc_exch,
659 					  timeout_work.work);
660 	struct fc_seq *sp = &ep->seq;
661 	void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg);
662 	void *arg;
663 	u32 e_stat;
664 	int rc = 1;
665 
666 	FC_EXCH_DBG(ep, "Exchange timed out\n");
667 
668 	spin_lock_bh(&ep->ex_lock);
669 	if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE))
670 		goto unlock;
671 
672 	e_stat = ep->esb_stat;
673 	if (e_stat & ESB_ST_COMPLETE) {
674 		ep->esb_stat = e_stat & ~ESB_ST_REC_QUAL;
675 		spin_unlock_bh(&ep->ex_lock);
676 		if (e_stat & ESB_ST_REC_QUAL)
677 			fc_exch_rrq(ep);
678 		goto done;
679 	} else {
680 		resp = ep->resp;
681 		arg = ep->arg;
682 		ep->resp = NULL;
683 		if (e_stat & ESB_ST_ABNORMAL)
684 			rc = fc_exch_done_locked(ep);
685 		spin_unlock_bh(&ep->ex_lock);
686 		if (!rc)
687 			fc_exch_delete(ep);
688 		if (resp)
689 			resp(sp, ERR_PTR(-FC_EX_TIMEOUT), arg);
690 		fc_seq_exch_abort(sp, 2 * ep->r_a_tov);
691 		goto done;
692 	}
693 unlock:
694 	spin_unlock_bh(&ep->ex_lock);
695 done:
696 	/*
697 	 * This release matches the hold taken when the timer was set.
698 	 */
699 	fc_exch_release(ep);
700 }
701 
702 /**
703  * fc_exch_em_alloc() - Allocate an exchange from a specified EM.
704  * @lport: The local port that the exchange is for
705  * @mp:	   The exchange manager that will allocate the exchange
706  *
707  * Returns pointer to allocated fc_exch with exch lock held.
708  */
709 static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
710 					struct fc_exch_mgr *mp)
711 {
712 	struct fc_exch *ep;
713 	unsigned int cpu;
714 	u16 index;
715 	struct fc_exch_pool *pool;
716 
717 	/* allocate memory for exchange */
718 	ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
719 	if (!ep) {
720 		atomic_inc(&mp->stats.no_free_exch);
721 		goto out;
722 	}
723 	memset(ep, 0, sizeof(*ep));
724 
725 	cpu = get_cpu();
726 	pool = per_cpu_ptr(mp->pool, cpu);
727 	spin_lock_bh(&pool->lock);
728 	put_cpu();
729 
730 	/* peek cache of free slot */
731 	if (pool->left != FC_XID_UNKNOWN) {
732 		index = pool->left;
733 		pool->left = FC_XID_UNKNOWN;
734 		goto hit;
735 	}
736 	if (pool->right != FC_XID_UNKNOWN) {
737 		index = pool->right;
738 		pool->right = FC_XID_UNKNOWN;
739 		goto hit;
740 	}
741 
742 	index = pool->next_index;
743 	/* allocate new exch from pool */
744 	while (fc_exch_ptr_get(pool, index)) {
745 		index = index == mp->pool_max_index ? 0 : index + 1;
746 		if (index == pool->next_index)
747 			goto err;
748 	}
749 	pool->next_index = index == mp->pool_max_index ? 0 : index + 1;
750 hit:
751 	fc_exch_hold(ep);	/* hold for exch in mp */
752 	spin_lock_init(&ep->ex_lock);
753 	/*
754 	 * Hold exch lock for caller to prevent fc_exch_reset()
755 	 * from releasing exch	while fc_exch_alloc() caller is
756 	 * still working on exch.
757 	 */
758 	spin_lock_bh(&ep->ex_lock);
759 
760 	fc_exch_ptr_set(pool, index, ep);
761 	list_add_tail(&ep->ex_list, &pool->ex_list);
762 	fc_seq_alloc(ep, ep->seq_id++);
763 	pool->total_exches++;
764 	spin_unlock_bh(&pool->lock);
765 
766 	/*
767 	 *  update exchange
768 	 */
769 	ep->oxid = ep->xid = (index << fc_cpu_order | cpu) + mp->min_xid;
770 	ep->em = mp;
771 	ep->pool = pool;
772 	ep->lp = lport;
773 	ep->f_ctl = FC_FC_FIRST_SEQ;	/* next seq is first seq */
774 	ep->rxid = FC_XID_UNKNOWN;
775 	ep->class = mp->class;
776 	INIT_DELAYED_WORK(&ep->timeout_work, fc_exch_timeout);
777 out:
778 	return ep;
779 err:
780 	spin_unlock_bh(&pool->lock);
781 	atomic_inc(&mp->stats.no_free_exch_xid);
782 	mempool_free(ep, mp->ep_pool);
783 	return NULL;
784 }
785 
786 /**
787  * fc_exch_alloc() - Allocate an exchange from an EM on a
788  *		     local port's list of EMs.
789  * @lport: The local port that will own the exchange
790  * @fp:	   The FC frame that the exchange will be for
791  *
792  * This function walks the list of exchange manager(EM)
793  * anchors to select an EM for a new exchange allocation. The
794  * EM is selected when a NULL match function pointer is encountered
795  * or when a call to a match function returns true.
796  */
797 static inline struct fc_exch *fc_exch_alloc(struct fc_lport *lport,
798 					    struct fc_frame *fp)
799 {
800 	struct fc_exch_mgr_anchor *ema;
801 
802 	list_for_each_entry(ema, &lport->ema_list, ema_list)
803 		if (!ema->match || ema->match(fp))
804 			return fc_exch_em_alloc(lport, ema->mp);
805 	return NULL;
806 }
807 
808 /**
809  * fc_exch_find() - Lookup and hold an exchange
810  * @mp:	 The exchange manager to lookup the exchange from
811  * @xid: The XID of the exchange to look up
812  */
813 static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid)
814 {
815 	struct fc_exch_pool *pool;
816 	struct fc_exch *ep = NULL;
817 
818 	if ((xid >= mp->min_xid) && (xid <= mp->max_xid)) {
819 		pool = per_cpu_ptr(mp->pool, xid & fc_cpu_mask);
820 		spin_lock_bh(&pool->lock);
821 		ep = fc_exch_ptr_get(pool, (xid - mp->min_xid) >> fc_cpu_order);
822 		if (ep && ep->xid == xid)
823 			fc_exch_hold(ep);
824 		spin_unlock_bh(&pool->lock);
825 	}
826 	return ep;
827 }
828 
829 
830 /**
831  * fc_exch_done() - Indicate that an exchange/sequence tuple is complete and
832  *		    the memory allocated for the related objects may be freed.
833  * @sp: The sequence that has completed
834  */
835 static void fc_exch_done(struct fc_seq *sp)
836 {
837 	struct fc_exch *ep = fc_seq_exch(sp);
838 	int rc;
839 
840 	spin_lock_bh(&ep->ex_lock);
841 	rc = fc_exch_done_locked(ep);
842 	spin_unlock_bh(&ep->ex_lock);
843 	if (!rc)
844 		fc_exch_delete(ep);
845 }
846 
847 /**
848  * fc_exch_resp() - Allocate a new exchange for a response frame
849  * @lport: The local port that the exchange was for
850  * @mp:	   The exchange manager to allocate the exchange from
851  * @fp:	   The response frame
852  *
853  * Sets the responder ID in the frame header.
854  */
855 static struct fc_exch *fc_exch_resp(struct fc_lport *lport,
856 				    struct fc_exch_mgr *mp,
857 				    struct fc_frame *fp)
858 {
859 	struct fc_exch *ep;
860 	struct fc_frame_header *fh;
861 
862 	ep = fc_exch_alloc(lport, fp);
863 	if (ep) {
864 		ep->class = fc_frame_class(fp);
865 
866 		/*
867 		 * Set EX_CTX indicating we're responding on this exchange.
868 		 */
869 		ep->f_ctl |= FC_FC_EX_CTX;	/* we're responding */
870 		ep->f_ctl &= ~FC_FC_FIRST_SEQ;	/* not new */
871 		fh = fc_frame_header_get(fp);
872 		ep->sid = ntoh24(fh->fh_d_id);
873 		ep->did = ntoh24(fh->fh_s_id);
874 		ep->oid = ep->did;
875 
876 		/*
877 		 * Allocated exchange has placed the XID in the
878 		 * originator field. Move it to the responder field,
879 		 * and set the originator XID from the frame.
880 		 */
881 		ep->rxid = ep->xid;
882 		ep->oxid = ntohs(fh->fh_ox_id);
883 		ep->esb_stat |= ESB_ST_RESP | ESB_ST_SEQ_INIT;
884 		if ((ntoh24(fh->fh_f_ctl) & FC_FC_SEQ_INIT) == 0)
885 			ep->esb_stat &= ~ESB_ST_SEQ_INIT;
886 
887 		fc_exch_hold(ep);	/* hold for caller */
888 		spin_unlock_bh(&ep->ex_lock);	/* lock from fc_exch_alloc */
889 	}
890 	return ep;
891 }
892 
893 /**
894  * fc_seq_lookup_recip() - Find a sequence where the other end
895  *			   originated the sequence
896  * @lport: The local port that the frame was sent to
897  * @mp:	   The Exchange Manager to lookup the exchange from
898  * @fp:	   The frame associated with the sequence we're looking for
899  *
900  * If fc_pf_rjt_reason is FC_RJT_NONE then this function will have a hold
901  * on the ep that should be released by the caller.
902  */
903 static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
904 						 struct fc_exch_mgr *mp,
905 						 struct fc_frame *fp)
906 {
907 	struct fc_frame_header *fh = fc_frame_header_get(fp);
908 	struct fc_exch *ep = NULL;
909 	struct fc_seq *sp = NULL;
910 	enum fc_pf_rjt_reason reject = FC_RJT_NONE;
911 	u32 f_ctl;
912 	u16 xid;
913 
914 	f_ctl = ntoh24(fh->fh_f_ctl);
915 	WARN_ON((f_ctl & FC_FC_SEQ_CTX) != 0);
916 
917 	/*
918 	 * Lookup or create the exchange if we will be creating the sequence.
919 	 */
920 	if (f_ctl & FC_FC_EX_CTX) {
921 		xid = ntohs(fh->fh_ox_id);	/* we originated exch */
922 		ep = fc_exch_find(mp, xid);
923 		if (!ep) {
924 			atomic_inc(&mp->stats.xid_not_found);
925 			reject = FC_RJT_OX_ID;
926 			goto out;
927 		}
928 		if (ep->rxid == FC_XID_UNKNOWN)
929 			ep->rxid = ntohs(fh->fh_rx_id);
930 		else if (ep->rxid != ntohs(fh->fh_rx_id)) {
931 			reject = FC_RJT_OX_ID;
932 			goto rel;
933 		}
934 	} else {
935 		xid = ntohs(fh->fh_rx_id);	/* we are the responder */
936 
937 		/*
938 		 * Special case for MDS issuing an ELS TEST with a
939 		 * bad rxid of 0.
940 		 * XXX take this out once we do the proper reject.
941 		 */
942 		if (xid == 0 && fh->fh_r_ctl == FC_RCTL_ELS_REQ &&
943 		    fc_frame_payload_op(fp) == ELS_TEST) {
944 			fh->fh_rx_id = htons(FC_XID_UNKNOWN);
945 			xid = FC_XID_UNKNOWN;
946 		}
947 
948 		/*
949 		 * new sequence - find the exchange
950 		 */
951 		ep = fc_exch_find(mp, xid);
952 		if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
953 			if (ep) {
954 				atomic_inc(&mp->stats.xid_busy);
955 				reject = FC_RJT_RX_ID;
956 				goto rel;
957 			}
958 			ep = fc_exch_resp(lport, mp, fp);
959 			if (!ep) {
960 				reject = FC_RJT_EXCH_EST;	/* XXX */
961 				goto out;
962 			}
963 			xid = ep->xid;	/* get our XID */
964 		} else if (!ep) {
965 			atomic_inc(&mp->stats.xid_not_found);
966 			reject = FC_RJT_RX_ID;	/* XID not found */
967 			goto out;
968 		}
969 	}
970 
971 	/*
972 	 * At this point, we have the exchange held.
973 	 * Find or create the sequence.
974 	 */
975 	if (fc_sof_is_init(fr_sof(fp))) {
976 		sp = &ep->seq;
977 		sp->ssb_stat |= SSB_ST_RESP;
978 		sp->id = fh->fh_seq_id;
979 	} else {
980 		sp = &ep->seq;
981 		if (sp->id != fh->fh_seq_id) {
982 			atomic_inc(&mp->stats.seq_not_found);
983 			if (f_ctl & FC_FC_END_SEQ) {
984 				/*
985 				 * Update sequence_id based on incoming last
986 				 * frame of sequence exchange. This is needed
987 				 * for FCoE target where DDP has been used
988 				 * on target where, stack is indicated only
989 				 * about last frame's (payload _header) header.
990 				 * Whereas "seq_id" which is part of
991 				 * frame_header is allocated by initiator
992 				 * which is totally different from "seq_id"
993 				 * allocated when XFER_RDY was sent by target.
994 				 * To avoid false -ve which results into not
995 				 * sending RSP, hence write request on other
996 				 * end never finishes.
997 				 */
998 				spin_lock_bh(&ep->ex_lock);
999 				sp->ssb_stat |= SSB_ST_RESP;
1000 				sp->id = fh->fh_seq_id;
1001 				spin_unlock_bh(&ep->ex_lock);
1002 			} else {
1003 				/* sequence/exch should exist */
1004 				reject = FC_RJT_SEQ_ID;
1005 				goto rel;
1006 			}
1007 		}
1008 	}
1009 	WARN_ON(ep != fc_seq_exch(sp));
1010 
1011 	if (f_ctl & FC_FC_SEQ_INIT)
1012 		ep->esb_stat |= ESB_ST_SEQ_INIT;
1013 
1014 	fr_seq(fp) = sp;
1015 out:
1016 	return reject;
1017 rel:
1018 	fc_exch_done(&ep->seq);
1019 	fc_exch_release(ep);	/* hold from fc_exch_find/fc_exch_resp */
1020 	return reject;
1021 }
1022 
1023 /**
1024  * fc_seq_lookup_orig() - Find a sequence where this end
1025  *			  originated the sequence
1026  * @mp:	   The Exchange Manager to lookup the exchange from
1027  * @fp:	   The frame associated with the sequence we're looking for
1028  *
1029  * Does not hold the sequence for the caller.
1030  */
1031 static struct fc_seq *fc_seq_lookup_orig(struct fc_exch_mgr *mp,
1032 					 struct fc_frame *fp)
1033 {
1034 	struct fc_frame_header *fh = fc_frame_header_get(fp);
1035 	struct fc_exch *ep;
1036 	struct fc_seq *sp = NULL;
1037 	u32 f_ctl;
1038 	u16 xid;
1039 
1040 	f_ctl = ntoh24(fh->fh_f_ctl);
1041 	WARN_ON((f_ctl & FC_FC_SEQ_CTX) != FC_FC_SEQ_CTX);
1042 	xid = ntohs((f_ctl & FC_FC_EX_CTX) ? fh->fh_ox_id : fh->fh_rx_id);
1043 	ep = fc_exch_find(mp, xid);
1044 	if (!ep)
1045 		return NULL;
1046 	if (ep->seq.id == fh->fh_seq_id) {
1047 		/*
1048 		 * Save the RX_ID if we didn't previously know it.
1049 		 */
1050 		sp = &ep->seq;
1051 		if ((f_ctl & FC_FC_EX_CTX) != 0 &&
1052 		    ep->rxid == FC_XID_UNKNOWN) {
1053 			ep->rxid = ntohs(fh->fh_rx_id);
1054 		}
1055 	}
1056 	fc_exch_release(ep);
1057 	return sp;
1058 }
1059 
1060 /**
1061  * fc_exch_set_addr() - Set the source and destination IDs for an exchange
1062  * @ep:	     The exchange to set the addresses for
1063  * @orig_id: The originator's ID
1064  * @resp_id: The responder's ID
1065  *
1066  * Note this must be done before the first sequence of the exchange is sent.
1067  */
1068 static void fc_exch_set_addr(struct fc_exch *ep,
1069 			     u32 orig_id, u32 resp_id)
1070 {
1071 	ep->oid = orig_id;
1072 	if (ep->esb_stat & ESB_ST_RESP) {
1073 		ep->sid = resp_id;
1074 		ep->did = orig_id;
1075 	} else {
1076 		ep->sid = orig_id;
1077 		ep->did = resp_id;
1078 	}
1079 }
1080 
1081 /**
1082  * fc_seq_els_rsp_send() - Send an ELS response using information from
1083  *			   the existing sequence/exchange.
1084  * @fp:	      The received frame
1085  * @els_cmd:  The ELS command to be sent
1086  * @els_data: The ELS data to be sent
1087  *
1088  * The received frame is not freed.
1089  */
1090 static void fc_seq_els_rsp_send(struct fc_frame *fp, enum fc_els_cmd els_cmd,
1091 				struct fc_seq_els_data *els_data)
1092 {
1093 	switch (els_cmd) {
1094 	case ELS_LS_RJT:
1095 		fc_seq_ls_rjt(fp, els_data->reason, els_data->explan);
1096 		break;
1097 	case ELS_LS_ACC:
1098 		fc_seq_ls_acc(fp);
1099 		break;
1100 	case ELS_RRQ:
1101 		fc_exch_els_rrq(fp);
1102 		break;
1103 	case ELS_REC:
1104 		fc_exch_els_rec(fp);
1105 		break;
1106 	default:
1107 		FC_LPORT_DBG(fr_dev(fp), "Invalid ELS CMD:%x\n", els_cmd);
1108 	}
1109 }
1110 
1111 /**
1112  * fc_seq_send_last() - Send a sequence that is the last in the exchange
1113  * @sp:	     The sequence that is to be sent
1114  * @fp:	     The frame that will be sent on the sequence
1115  * @rctl:    The R_CTL information to be sent
1116  * @fh_type: The frame header type
1117  */
1118 static void fc_seq_send_last(struct fc_seq *sp, struct fc_frame *fp,
1119 			     enum fc_rctl rctl, enum fc_fh_type fh_type)
1120 {
1121 	u32 f_ctl;
1122 	struct fc_exch *ep = fc_seq_exch(sp);
1123 
1124 	f_ctl = FC_FC_LAST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT;
1125 	f_ctl |= ep->f_ctl;
1126 	fc_fill_fc_hdr(fp, rctl, ep->did, ep->sid, fh_type, f_ctl, 0);
1127 	fc_seq_send(ep->lp, sp, fp);
1128 }
1129 
1130 /**
1131  * fc_seq_send_ack() - Send an acknowledgement that we've received a frame
1132  * @sp:	   The sequence to send the ACK on
1133  * @rx_fp: The received frame that is being acknoledged
1134  *
1135  * Send ACK_1 (or equiv.) indicating we received something.
1136  */
1137 static void fc_seq_send_ack(struct fc_seq *sp, const struct fc_frame *rx_fp)
1138 {
1139 	struct fc_frame *fp;
1140 	struct fc_frame_header *rx_fh;
1141 	struct fc_frame_header *fh;
1142 	struct fc_exch *ep = fc_seq_exch(sp);
1143 	struct fc_lport *lport = ep->lp;
1144 	unsigned int f_ctl;
1145 
1146 	/*
1147 	 * Don't send ACKs for class 3.
1148 	 */
1149 	if (fc_sof_needs_ack(fr_sof(rx_fp))) {
1150 		fp = fc_frame_alloc(lport, 0);
1151 		if (!fp)
1152 			return;
1153 
1154 		fh = fc_frame_header_get(fp);
1155 		fh->fh_r_ctl = FC_RCTL_ACK_1;
1156 		fh->fh_type = FC_TYPE_BLS;
1157 
1158 		/*
1159 		 * Form f_ctl by inverting EX_CTX and SEQ_CTX (bits 23, 22).
1160 		 * Echo FIRST_SEQ, LAST_SEQ, END_SEQ, END_CONN, SEQ_INIT.
1161 		 * Bits 9-8 are meaningful (retransmitted or unidirectional).
1162 		 * Last ACK uses bits 7-6 (continue sequence),
1163 		 * bits 5-4 are meaningful (what kind of ACK to use).
1164 		 */
1165 		rx_fh = fc_frame_header_get(rx_fp);
1166 		f_ctl = ntoh24(rx_fh->fh_f_ctl);
1167 		f_ctl &= FC_FC_EX_CTX | FC_FC_SEQ_CTX |
1168 			FC_FC_FIRST_SEQ | FC_FC_LAST_SEQ |
1169 			FC_FC_END_SEQ | FC_FC_END_CONN | FC_FC_SEQ_INIT |
1170 			FC_FC_RETX_SEQ | FC_FC_UNI_TX;
1171 		f_ctl ^= FC_FC_EX_CTX | FC_FC_SEQ_CTX;
1172 		hton24(fh->fh_f_ctl, f_ctl);
1173 
1174 		fc_exch_setup_hdr(ep, fp, f_ctl);
1175 		fh->fh_seq_id = rx_fh->fh_seq_id;
1176 		fh->fh_seq_cnt = rx_fh->fh_seq_cnt;
1177 		fh->fh_parm_offset = htonl(1);	/* ack single frame */
1178 
1179 		fr_sof(fp) = fr_sof(rx_fp);
1180 		if (f_ctl & FC_FC_END_SEQ)
1181 			fr_eof(fp) = FC_EOF_T;
1182 		else
1183 			fr_eof(fp) = FC_EOF_N;
1184 
1185 		lport->tt.frame_send(lport, fp);
1186 	}
1187 }
1188 
1189 /**
1190  * fc_exch_send_ba_rjt() - Send BLS Reject
1191  * @rx_fp:  The frame being rejected
1192  * @reason: The reason the frame is being rejected
1193  * @explan: The explanation for the rejection
1194  *
1195  * This is for rejecting BA_ABTS only.
1196  */
1197 static void fc_exch_send_ba_rjt(struct fc_frame *rx_fp,
1198 				enum fc_ba_rjt_reason reason,
1199 				enum fc_ba_rjt_explan explan)
1200 {
1201 	struct fc_frame *fp;
1202 	struct fc_frame_header *rx_fh;
1203 	struct fc_frame_header *fh;
1204 	struct fc_ba_rjt *rp;
1205 	struct fc_lport *lport;
1206 	unsigned int f_ctl;
1207 
1208 	lport = fr_dev(rx_fp);
1209 	fp = fc_frame_alloc(lport, sizeof(*rp));
1210 	if (!fp)
1211 		return;
1212 	fh = fc_frame_header_get(fp);
1213 	rx_fh = fc_frame_header_get(rx_fp);
1214 
1215 	memset(fh, 0, sizeof(*fh) + sizeof(*rp));
1216 
1217 	rp = fc_frame_payload_get(fp, sizeof(*rp));
1218 	rp->br_reason = reason;
1219 	rp->br_explan = explan;
1220 
1221 	/*
1222 	 * seq_id, cs_ctl, df_ctl and param/offset are zero.
1223 	 */
1224 	memcpy(fh->fh_s_id, rx_fh->fh_d_id, 3);
1225 	memcpy(fh->fh_d_id, rx_fh->fh_s_id, 3);
1226 	fh->fh_ox_id = rx_fh->fh_ox_id;
1227 	fh->fh_rx_id = rx_fh->fh_rx_id;
1228 	fh->fh_seq_cnt = rx_fh->fh_seq_cnt;
1229 	fh->fh_r_ctl = FC_RCTL_BA_RJT;
1230 	fh->fh_type = FC_TYPE_BLS;
1231 
1232 	/*
1233 	 * Form f_ctl by inverting EX_CTX and SEQ_CTX (bits 23, 22).
1234 	 * Echo FIRST_SEQ, LAST_SEQ, END_SEQ, END_CONN, SEQ_INIT.
1235 	 * Bits 9-8 are meaningful (retransmitted or unidirectional).
1236 	 * Last ACK uses bits 7-6 (continue sequence),
1237 	 * bits 5-4 are meaningful (what kind of ACK to use).
1238 	 * Always set LAST_SEQ, END_SEQ.
1239 	 */
1240 	f_ctl = ntoh24(rx_fh->fh_f_ctl);
1241 	f_ctl &= FC_FC_EX_CTX | FC_FC_SEQ_CTX |
1242 		FC_FC_END_CONN | FC_FC_SEQ_INIT |
1243 		FC_FC_RETX_SEQ | FC_FC_UNI_TX;
1244 	f_ctl ^= FC_FC_EX_CTX | FC_FC_SEQ_CTX;
1245 	f_ctl |= FC_FC_LAST_SEQ | FC_FC_END_SEQ;
1246 	f_ctl &= ~FC_FC_FIRST_SEQ;
1247 	hton24(fh->fh_f_ctl, f_ctl);
1248 
1249 	fr_sof(fp) = fc_sof_class(fr_sof(rx_fp));
1250 	fr_eof(fp) = FC_EOF_T;
1251 	if (fc_sof_needs_ack(fr_sof(fp)))
1252 		fr_eof(fp) = FC_EOF_N;
1253 
1254 	lport->tt.frame_send(lport, fp);
1255 }
1256 
1257 /**
1258  * fc_exch_recv_abts() - Handle an incoming ABTS
1259  * @ep:	   The exchange the abort was on
1260  * @rx_fp: The ABTS frame
1261  *
1262  * This would be for target mode usually, but could be due to lost
1263  * FCP transfer ready, confirm or RRQ. We always handle this as an
1264  * exchange abort, ignoring the parameter.
1265  */
1266 static void fc_exch_recv_abts(struct fc_exch *ep, struct fc_frame *rx_fp)
1267 {
1268 	struct fc_frame *fp;
1269 	struct fc_ba_acc *ap;
1270 	struct fc_frame_header *fh;
1271 	struct fc_seq *sp;
1272 
1273 	if (!ep)
1274 		goto reject;
1275 	spin_lock_bh(&ep->ex_lock);
1276 	if (ep->esb_stat & ESB_ST_COMPLETE) {
1277 		spin_unlock_bh(&ep->ex_lock);
1278 		goto reject;
1279 	}
1280 	if (!(ep->esb_stat & ESB_ST_REC_QUAL))
1281 		fc_exch_hold(ep);		/* hold for REC_QUAL */
1282 	ep->esb_stat |= ESB_ST_ABNORMAL | ESB_ST_REC_QUAL;
1283 	fc_exch_timer_set_locked(ep, ep->r_a_tov);
1284 
1285 	fp = fc_frame_alloc(ep->lp, sizeof(*ap));
1286 	if (!fp) {
1287 		spin_unlock_bh(&ep->ex_lock);
1288 		goto free;
1289 	}
1290 	fh = fc_frame_header_get(fp);
1291 	ap = fc_frame_payload_get(fp, sizeof(*ap));
1292 	memset(ap, 0, sizeof(*ap));
1293 	sp = &ep->seq;
1294 	ap->ba_high_seq_cnt = htons(0xffff);
1295 	if (sp->ssb_stat & SSB_ST_RESP) {
1296 		ap->ba_seq_id = sp->id;
1297 		ap->ba_seq_id_val = FC_BA_SEQ_ID_VAL;
1298 		ap->ba_high_seq_cnt = fh->fh_seq_cnt;
1299 		ap->ba_low_seq_cnt = htons(sp->cnt);
1300 	}
1301 	sp = fc_seq_start_next_locked(sp);
1302 	spin_unlock_bh(&ep->ex_lock);
1303 	fc_seq_send_last(sp, fp, FC_RCTL_BA_ACC, FC_TYPE_BLS);
1304 	fc_frame_free(rx_fp);
1305 	return;
1306 
1307 reject:
1308 	fc_exch_send_ba_rjt(rx_fp, FC_BA_RJT_UNABLE, FC_BA_RJT_INV_XID);
1309 free:
1310 	fc_frame_free(rx_fp);
1311 }
1312 
1313 /**
1314  * fc_seq_assign() - Assign exchange and sequence for incoming request
1315  * @lport: The local port that received the request
1316  * @fp:    The request frame
1317  *
1318  * On success, the sequence pointer will be returned and also in fr_seq(@fp).
1319  * A reference will be held on the exchange/sequence for the caller, which
1320  * must call fc_seq_release().
1321  */
1322 static struct fc_seq *fc_seq_assign(struct fc_lport *lport, struct fc_frame *fp)
1323 {
1324 	struct fc_exch_mgr_anchor *ema;
1325 
1326 	WARN_ON(lport != fr_dev(fp));
1327 	WARN_ON(fr_seq(fp));
1328 	fr_seq(fp) = NULL;
1329 
1330 	list_for_each_entry(ema, &lport->ema_list, ema_list)
1331 		if ((!ema->match || ema->match(fp)) &&
1332 		    fc_seq_lookup_recip(lport, ema->mp, fp) == FC_RJT_NONE)
1333 			break;
1334 	return fr_seq(fp);
1335 }
1336 
1337 /**
1338  * fc_seq_release() - Release the hold
1339  * @sp:    The sequence.
1340  */
1341 static void fc_seq_release(struct fc_seq *sp)
1342 {
1343 	fc_exch_release(fc_seq_exch(sp));
1344 }
1345 
1346 /**
1347  * fc_exch_recv_req() - Handler for an incoming request
1348  * @lport: The local port that received the request
1349  * @mp:	   The EM that the exchange is on
1350  * @fp:	   The request frame
1351  *
1352  * This is used when the other end is originating the exchange
1353  * and the sequence.
1354  */
1355 static void fc_exch_recv_req(struct fc_lport *lport, struct fc_exch_mgr *mp,
1356 			     struct fc_frame *fp)
1357 {
1358 	struct fc_frame_header *fh = fc_frame_header_get(fp);
1359 	struct fc_seq *sp = NULL;
1360 	struct fc_exch *ep = NULL;
1361 	enum fc_pf_rjt_reason reject;
1362 
1363 	/* We can have the wrong fc_lport at this point with NPIV, which is a
1364 	 * problem now that we know a new exchange needs to be allocated
1365 	 */
1366 	lport = fc_vport_id_lookup(lport, ntoh24(fh->fh_d_id));
1367 	if (!lport) {
1368 		fc_frame_free(fp);
1369 		return;
1370 	}
1371 	fr_dev(fp) = lport;
1372 
1373 	BUG_ON(fr_seq(fp));		/* XXX remove later */
1374 
1375 	/*
1376 	 * If the RX_ID is 0xffff, don't allocate an exchange.
1377 	 * The upper-level protocol may request one later, if needed.
1378 	 */
1379 	if (fh->fh_rx_id == htons(FC_XID_UNKNOWN))
1380 		return lport->tt.lport_recv(lport, fp);
1381 
1382 	reject = fc_seq_lookup_recip(lport, mp, fp);
1383 	if (reject == FC_RJT_NONE) {
1384 		sp = fr_seq(fp);	/* sequence will be held */
1385 		ep = fc_seq_exch(sp);
1386 		fc_seq_send_ack(sp, fp);
1387 		ep->encaps = fr_encaps(fp);
1388 
1389 		/*
1390 		 * Call the receive function.
1391 		 *
1392 		 * The receive function may allocate a new sequence
1393 		 * over the old one, so we shouldn't change the
1394 		 * sequence after this.
1395 		 *
1396 		 * The frame will be freed by the receive function.
1397 		 * If new exch resp handler is valid then call that
1398 		 * first.
1399 		 */
1400 		if (ep->resp)
1401 			ep->resp(sp, fp, ep->arg);
1402 		else
1403 			lport->tt.lport_recv(lport, fp);
1404 		fc_exch_release(ep);	/* release from lookup */
1405 	} else {
1406 		FC_LPORT_DBG(lport, "exch/seq lookup failed: reject %x\n",
1407 			     reject);
1408 		fc_frame_free(fp);
1409 	}
1410 }
1411 
1412 /**
1413  * fc_exch_recv_seq_resp() - Handler for an incoming response where the other
1414  *			     end is the originator of the sequence that is a
1415  *			     response to our initial exchange
1416  * @mp: The EM that the exchange is on
1417  * @fp: The response frame
1418  */
1419 static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
1420 {
1421 	struct fc_frame_header *fh = fc_frame_header_get(fp);
1422 	struct fc_seq *sp;
1423 	struct fc_exch *ep;
1424 	enum fc_sof sof;
1425 	u32 f_ctl;
1426 	void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg);
1427 	void *ex_resp_arg;
1428 	int rc;
1429 
1430 	ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
1431 	if (!ep) {
1432 		atomic_inc(&mp->stats.xid_not_found);
1433 		goto out;
1434 	}
1435 	if (ep->esb_stat & ESB_ST_COMPLETE) {
1436 		atomic_inc(&mp->stats.xid_not_found);
1437 		goto rel;
1438 	}
1439 	if (ep->rxid == FC_XID_UNKNOWN)
1440 		ep->rxid = ntohs(fh->fh_rx_id);
1441 	if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
1442 		atomic_inc(&mp->stats.xid_not_found);
1443 		goto rel;
1444 	}
1445 	if (ep->did != ntoh24(fh->fh_s_id) &&
1446 	    ep->did != FC_FID_FLOGI) {
1447 		atomic_inc(&mp->stats.xid_not_found);
1448 		goto rel;
1449 	}
1450 	sof = fr_sof(fp);
1451 	sp = &ep->seq;
1452 	if (fc_sof_is_init(sof)) {
1453 		sp->ssb_stat |= SSB_ST_RESP;
1454 		sp->id = fh->fh_seq_id;
1455 	} else if (sp->id != fh->fh_seq_id) {
1456 		atomic_inc(&mp->stats.seq_not_found);
1457 		goto rel;
1458 	}
1459 
1460 	f_ctl = ntoh24(fh->fh_f_ctl);
1461 	fr_seq(fp) = sp;
1462 	if (f_ctl & FC_FC_SEQ_INIT)
1463 		ep->esb_stat |= ESB_ST_SEQ_INIT;
1464 
1465 	if (fc_sof_needs_ack(sof))
1466 		fc_seq_send_ack(sp, fp);
1467 	resp = ep->resp;
1468 	ex_resp_arg = ep->arg;
1469 
1470 	if (fh->fh_type != FC_TYPE_FCP && fr_eof(fp) == FC_EOF_T &&
1471 	    (f_ctl & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) ==
1472 	    (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) {
1473 		spin_lock_bh(&ep->ex_lock);
1474 		resp = ep->resp;
1475 		rc = fc_exch_done_locked(ep);
1476 		WARN_ON(fc_seq_exch(sp) != ep);
1477 		spin_unlock_bh(&ep->ex_lock);
1478 		if (!rc)
1479 			fc_exch_delete(ep);
1480 	}
1481 
1482 	/*
1483 	 * Call the receive function.
1484 	 * The sequence is held (has a refcnt) for us,
1485 	 * but not for the receive function.
1486 	 *
1487 	 * The receive function may allocate a new sequence
1488 	 * over the old one, so we shouldn't change the
1489 	 * sequence after this.
1490 	 *
1491 	 * The frame will be freed by the receive function.
1492 	 * If new exch resp handler is valid then call that
1493 	 * first.
1494 	 */
1495 	if (resp)
1496 		resp(sp, fp, ex_resp_arg);
1497 	else
1498 		fc_frame_free(fp);
1499 	fc_exch_release(ep);
1500 	return;
1501 rel:
1502 	fc_exch_release(ep);
1503 out:
1504 	fc_frame_free(fp);
1505 }
1506 
1507 /**
1508  * fc_exch_recv_resp() - Handler for a sequence where other end is
1509  *			 responding to our sequence
1510  * @mp: The EM that the exchange is on
1511  * @fp: The response frame
1512  */
1513 static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
1514 {
1515 	struct fc_seq *sp;
1516 
1517 	sp = fc_seq_lookup_orig(mp, fp);	/* doesn't hold sequence */
1518 
1519 	if (!sp)
1520 		atomic_inc(&mp->stats.xid_not_found);
1521 	else
1522 		atomic_inc(&mp->stats.non_bls_resp);
1523 
1524 	fc_frame_free(fp);
1525 }
1526 
1527 /**
1528  * fc_exch_abts_resp() - Handler for a response to an ABT
1529  * @ep: The exchange that the frame is on
1530  * @fp: The response frame
1531  *
1532  * This response would be to an ABTS cancelling an exchange or sequence.
1533  * The response can be either BA_ACC or BA_RJT
1534  */
1535 static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp)
1536 {
1537 	void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg);
1538 	void *ex_resp_arg;
1539 	struct fc_frame_header *fh;
1540 	struct fc_ba_acc *ap;
1541 	struct fc_seq *sp;
1542 	u16 low;
1543 	u16 high;
1544 	int rc = 1, has_rec = 0;
1545 
1546 	fh = fc_frame_header_get(fp);
1547 	FC_EXCH_DBG(ep, "exch: BLS rctl %x - %s\n", fh->fh_r_ctl,
1548 		    fc_exch_rctl_name(fh->fh_r_ctl));
1549 
1550 	if (cancel_delayed_work_sync(&ep->timeout_work))
1551 		fc_exch_release(ep);	/* release from pending timer hold */
1552 
1553 	spin_lock_bh(&ep->ex_lock);
1554 	switch (fh->fh_r_ctl) {
1555 	case FC_RCTL_BA_ACC:
1556 		ap = fc_frame_payload_get(fp, sizeof(*ap));
1557 		if (!ap)
1558 			break;
1559 
1560 		/*
1561 		 * Decide whether to establish a Recovery Qualifier.
1562 		 * We do this if there is a non-empty SEQ_CNT range and
1563 		 * SEQ_ID is the same as the one we aborted.
1564 		 */
1565 		low = ntohs(ap->ba_low_seq_cnt);
1566 		high = ntohs(ap->ba_high_seq_cnt);
1567 		if ((ep->esb_stat & ESB_ST_REC_QUAL) == 0 &&
1568 		    (ap->ba_seq_id_val != FC_BA_SEQ_ID_VAL ||
1569 		     ap->ba_seq_id == ep->seq_id) && low != high) {
1570 			ep->esb_stat |= ESB_ST_REC_QUAL;
1571 			fc_exch_hold(ep);  /* hold for recovery qualifier */
1572 			has_rec = 1;
1573 		}
1574 		break;
1575 	case FC_RCTL_BA_RJT:
1576 		break;
1577 	default:
1578 		break;
1579 	}
1580 
1581 	resp = ep->resp;
1582 	ex_resp_arg = ep->arg;
1583 
1584 	/* do we need to do some other checks here. Can we reuse more of
1585 	 * fc_exch_recv_seq_resp
1586 	 */
1587 	sp = &ep->seq;
1588 	/*
1589 	 * do we want to check END_SEQ as well as LAST_SEQ here?
1590 	 */
1591 	if (ep->fh_type != FC_TYPE_FCP &&
1592 	    ntoh24(fh->fh_f_ctl) & FC_FC_LAST_SEQ)
1593 		rc = fc_exch_done_locked(ep);
1594 	spin_unlock_bh(&ep->ex_lock);
1595 	if (!rc)
1596 		fc_exch_delete(ep);
1597 
1598 	if (resp)
1599 		resp(sp, fp, ex_resp_arg);
1600 	else
1601 		fc_frame_free(fp);
1602 
1603 	if (has_rec)
1604 		fc_exch_timer_set(ep, ep->r_a_tov);
1605 
1606 }
1607 
1608 /**
1609  * fc_exch_recv_bls() - Handler for a BLS sequence
1610  * @mp: The EM that the exchange is on
1611  * @fp: The request frame
1612  *
1613  * The BLS frame is always a sequence initiated by the remote side.
1614  * We may be either the originator or recipient of the exchange.
1615  */
1616 static void fc_exch_recv_bls(struct fc_exch_mgr *mp, struct fc_frame *fp)
1617 {
1618 	struct fc_frame_header *fh;
1619 	struct fc_exch *ep;
1620 	u32 f_ctl;
1621 
1622 	fh = fc_frame_header_get(fp);
1623 	f_ctl = ntoh24(fh->fh_f_ctl);
1624 	fr_seq(fp) = NULL;
1625 
1626 	ep = fc_exch_find(mp, (f_ctl & FC_FC_EX_CTX) ?
1627 			  ntohs(fh->fh_ox_id) : ntohs(fh->fh_rx_id));
1628 	if (ep && (f_ctl & FC_FC_SEQ_INIT)) {
1629 		spin_lock_bh(&ep->ex_lock);
1630 		ep->esb_stat |= ESB_ST_SEQ_INIT;
1631 		spin_unlock_bh(&ep->ex_lock);
1632 	}
1633 	if (f_ctl & FC_FC_SEQ_CTX) {
1634 		/*
1635 		 * A response to a sequence we initiated.
1636 		 * This should only be ACKs for class 2 or F.
1637 		 */
1638 		switch (fh->fh_r_ctl) {
1639 		case FC_RCTL_ACK_1:
1640 		case FC_RCTL_ACK_0:
1641 			break;
1642 		default:
1643 			FC_EXCH_DBG(ep, "BLS rctl %x - %s received",
1644 				    fh->fh_r_ctl,
1645 				    fc_exch_rctl_name(fh->fh_r_ctl));
1646 			break;
1647 		}
1648 		fc_frame_free(fp);
1649 	} else {
1650 		switch (fh->fh_r_ctl) {
1651 		case FC_RCTL_BA_RJT:
1652 		case FC_RCTL_BA_ACC:
1653 			if (ep)
1654 				fc_exch_abts_resp(ep, fp);
1655 			else
1656 				fc_frame_free(fp);
1657 			break;
1658 		case FC_RCTL_BA_ABTS:
1659 			fc_exch_recv_abts(ep, fp);
1660 			break;
1661 		default:			/* ignore junk */
1662 			fc_frame_free(fp);
1663 			break;
1664 		}
1665 	}
1666 	if (ep)
1667 		fc_exch_release(ep);	/* release hold taken by fc_exch_find */
1668 }
1669 
1670 /**
1671  * fc_seq_ls_acc() - Accept sequence with LS_ACC
1672  * @rx_fp: The received frame, not freed here.
1673  *
1674  * If this fails due to allocation or transmit congestion, assume the
1675  * originator will repeat the sequence.
1676  */
1677 static void fc_seq_ls_acc(struct fc_frame *rx_fp)
1678 {
1679 	struct fc_lport *lport;
1680 	struct fc_els_ls_acc *acc;
1681 	struct fc_frame *fp;
1682 
1683 	lport = fr_dev(rx_fp);
1684 	fp = fc_frame_alloc(lport, sizeof(*acc));
1685 	if (!fp)
1686 		return;
1687 	acc = fc_frame_payload_get(fp, sizeof(*acc));
1688 	memset(acc, 0, sizeof(*acc));
1689 	acc->la_cmd = ELS_LS_ACC;
1690 	fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
1691 	lport->tt.frame_send(lport, fp);
1692 }
1693 
1694 /**
1695  * fc_seq_ls_rjt() - Reject a sequence with ELS LS_RJT
1696  * @rx_fp: The received frame, not freed here.
1697  * @reason: The reason the sequence is being rejected
1698  * @explan: The explanation for the rejection
1699  *
1700  * If this fails due to allocation or transmit congestion, assume the
1701  * originator will repeat the sequence.
1702  */
1703 static void fc_seq_ls_rjt(struct fc_frame *rx_fp, enum fc_els_rjt_reason reason,
1704 			  enum fc_els_rjt_explan explan)
1705 {
1706 	struct fc_lport *lport;
1707 	struct fc_els_ls_rjt *rjt;
1708 	struct fc_frame *fp;
1709 
1710 	lport = fr_dev(rx_fp);
1711 	fp = fc_frame_alloc(lport, sizeof(*rjt));
1712 	if (!fp)
1713 		return;
1714 	rjt = fc_frame_payload_get(fp, sizeof(*rjt));
1715 	memset(rjt, 0, sizeof(*rjt));
1716 	rjt->er_cmd = ELS_LS_RJT;
1717 	rjt->er_reason = reason;
1718 	rjt->er_explan = explan;
1719 	fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
1720 	lport->tt.frame_send(lport, fp);
1721 }
1722 
1723 /**
1724  * fc_exch_reset() - Reset an exchange
1725  * @ep: The exchange to be reset
1726  */
1727 static void fc_exch_reset(struct fc_exch *ep)
1728 {
1729 	struct fc_seq *sp;
1730 	void (*resp)(struct fc_seq *, struct fc_frame *, void *);
1731 	void *arg;
1732 	int rc = 1;
1733 
1734 	spin_lock_bh(&ep->ex_lock);
1735 	fc_exch_abort_locked(ep, 0);
1736 	ep->state |= FC_EX_RST_CLEANUP;
1737 	if (cancel_delayed_work(&ep->timeout_work))
1738 		atomic_dec(&ep->ex_refcnt);	/* drop hold for timer */
1739 	resp = ep->resp;
1740 	ep->resp = NULL;
1741 	if (ep->esb_stat & ESB_ST_REC_QUAL)
1742 		atomic_dec(&ep->ex_refcnt);	/* drop hold for rec_qual */
1743 	ep->esb_stat &= ~ESB_ST_REC_QUAL;
1744 	arg = ep->arg;
1745 	sp = &ep->seq;
1746 	rc = fc_exch_done_locked(ep);
1747 	spin_unlock_bh(&ep->ex_lock);
1748 	if (!rc)
1749 		fc_exch_delete(ep);
1750 
1751 	if (resp)
1752 		resp(sp, ERR_PTR(-FC_EX_CLOSED), arg);
1753 }
1754 
1755 /**
1756  * fc_exch_pool_reset() - Reset a per cpu exchange pool
1757  * @lport: The local port that the exchange pool is on
1758  * @pool:  The exchange pool to be reset
1759  * @sid:   The source ID
1760  * @did:   The destination ID
1761  *
1762  * Resets a per cpu exches pool, releasing all of its sequences
1763  * and exchanges. If sid is non-zero then reset only exchanges
1764  * we sourced from the local port's FID. If did is non-zero then
1765  * only reset exchanges destined for the local port's FID.
1766  */
1767 static void fc_exch_pool_reset(struct fc_lport *lport,
1768 			       struct fc_exch_pool *pool,
1769 			       u32 sid, u32 did)
1770 {
1771 	struct fc_exch *ep;
1772 	struct fc_exch *next;
1773 
1774 	spin_lock_bh(&pool->lock);
1775 restart:
1776 	list_for_each_entry_safe(ep, next, &pool->ex_list, ex_list) {
1777 		if ((lport == ep->lp) &&
1778 		    (sid == 0 || sid == ep->sid) &&
1779 		    (did == 0 || did == ep->did)) {
1780 			fc_exch_hold(ep);
1781 			spin_unlock_bh(&pool->lock);
1782 
1783 			fc_exch_reset(ep);
1784 
1785 			fc_exch_release(ep);
1786 			spin_lock_bh(&pool->lock);
1787 
1788 			/*
1789 			 * must restart loop incase while lock
1790 			 * was down multiple eps were released.
1791 			 */
1792 			goto restart;
1793 		}
1794 	}
1795 	spin_unlock_bh(&pool->lock);
1796 }
1797 
1798 /**
1799  * fc_exch_mgr_reset() - Reset all EMs of a local port
1800  * @lport: The local port whose EMs are to be reset
1801  * @sid:   The source ID
1802  * @did:   The destination ID
1803  *
1804  * Reset all EMs associated with a given local port. Release all
1805  * sequences and exchanges. If sid is non-zero then reset only the
1806  * exchanges sent from the local port's FID. If did is non-zero then
1807  * reset only exchanges destined for the local port's FID.
1808  */
1809 void fc_exch_mgr_reset(struct fc_lport *lport, u32 sid, u32 did)
1810 {
1811 	struct fc_exch_mgr_anchor *ema;
1812 	unsigned int cpu;
1813 
1814 	list_for_each_entry(ema, &lport->ema_list, ema_list) {
1815 		for_each_possible_cpu(cpu)
1816 			fc_exch_pool_reset(lport,
1817 					   per_cpu_ptr(ema->mp->pool, cpu),
1818 					   sid, did);
1819 	}
1820 }
1821 EXPORT_SYMBOL(fc_exch_mgr_reset);
1822 
1823 /**
1824  * fc_exch_lookup() - find an exchange
1825  * @lport: The local port
1826  * @xid: The exchange ID
1827  *
1828  * Returns exchange pointer with hold for caller, or NULL if not found.
1829  */
1830 static struct fc_exch *fc_exch_lookup(struct fc_lport *lport, u32 xid)
1831 {
1832 	struct fc_exch_mgr_anchor *ema;
1833 
1834 	list_for_each_entry(ema, &lport->ema_list, ema_list)
1835 		if (ema->mp->min_xid <= xid && xid <= ema->mp->max_xid)
1836 			return fc_exch_find(ema->mp, xid);
1837 	return NULL;
1838 }
1839 
1840 /**
1841  * fc_exch_els_rec() - Handler for ELS REC (Read Exchange Concise) requests
1842  * @rfp: The REC frame, not freed here.
1843  *
1844  * Note that the requesting port may be different than the S_ID in the request.
1845  */
1846 static void fc_exch_els_rec(struct fc_frame *rfp)
1847 {
1848 	struct fc_lport *lport;
1849 	struct fc_frame *fp;
1850 	struct fc_exch *ep;
1851 	struct fc_els_rec *rp;
1852 	struct fc_els_rec_acc *acc;
1853 	enum fc_els_rjt_reason reason = ELS_RJT_LOGIC;
1854 	enum fc_els_rjt_explan explan;
1855 	u32 sid;
1856 	u16 rxid;
1857 	u16 oxid;
1858 
1859 	lport = fr_dev(rfp);
1860 	rp = fc_frame_payload_get(rfp, sizeof(*rp));
1861 	explan = ELS_EXPL_INV_LEN;
1862 	if (!rp)
1863 		goto reject;
1864 	sid = ntoh24(rp->rec_s_id);
1865 	rxid = ntohs(rp->rec_rx_id);
1866 	oxid = ntohs(rp->rec_ox_id);
1867 
1868 	ep = fc_exch_lookup(lport,
1869 			    sid == fc_host_port_id(lport->host) ? oxid : rxid);
1870 	explan = ELS_EXPL_OXID_RXID;
1871 	if (!ep)
1872 		goto reject;
1873 	if (ep->oid != sid || oxid != ep->oxid)
1874 		goto rel;
1875 	if (rxid != FC_XID_UNKNOWN && rxid != ep->rxid)
1876 		goto rel;
1877 	fp = fc_frame_alloc(lport, sizeof(*acc));
1878 	if (!fp)
1879 		goto out;
1880 
1881 	acc = fc_frame_payload_get(fp, sizeof(*acc));
1882 	memset(acc, 0, sizeof(*acc));
1883 	acc->reca_cmd = ELS_LS_ACC;
1884 	acc->reca_ox_id = rp->rec_ox_id;
1885 	memcpy(acc->reca_ofid, rp->rec_s_id, 3);
1886 	acc->reca_rx_id = htons(ep->rxid);
1887 	if (ep->sid == ep->oid)
1888 		hton24(acc->reca_rfid, ep->did);
1889 	else
1890 		hton24(acc->reca_rfid, ep->sid);
1891 	acc->reca_fc4value = htonl(ep->seq.rec_data);
1892 	acc->reca_e_stat = htonl(ep->esb_stat & (ESB_ST_RESP |
1893 						 ESB_ST_SEQ_INIT |
1894 						 ESB_ST_COMPLETE));
1895 	fc_fill_reply_hdr(fp, rfp, FC_RCTL_ELS_REP, 0);
1896 	lport->tt.frame_send(lport, fp);
1897 out:
1898 	fc_exch_release(ep);
1899 	return;
1900 
1901 rel:
1902 	fc_exch_release(ep);
1903 reject:
1904 	fc_seq_ls_rjt(rfp, reason, explan);
1905 }
1906 
1907 /**
1908  * fc_exch_rrq_resp() - Handler for RRQ responses
1909  * @sp:	 The sequence that the RRQ is on
1910  * @fp:	 The RRQ frame
1911  * @arg: The exchange that the RRQ is on
1912  *
1913  * TODO: fix error handler.
1914  */
1915 static void fc_exch_rrq_resp(struct fc_seq *sp, struct fc_frame *fp, void *arg)
1916 {
1917 	struct fc_exch *aborted_ep = arg;
1918 	unsigned int op;
1919 
1920 	if (IS_ERR(fp)) {
1921 		int err = PTR_ERR(fp);
1922 
1923 		if (err == -FC_EX_CLOSED || err == -FC_EX_TIMEOUT)
1924 			goto cleanup;
1925 		FC_EXCH_DBG(aborted_ep, "Cannot process RRQ, "
1926 			    "frame error %d\n", err);
1927 		return;
1928 	}
1929 
1930 	op = fc_frame_payload_op(fp);
1931 	fc_frame_free(fp);
1932 
1933 	switch (op) {
1934 	case ELS_LS_RJT:
1935 		FC_EXCH_DBG(aborted_ep, "LS_RJT for RRQ");
1936 		/* fall through */
1937 	case ELS_LS_ACC:
1938 		goto cleanup;
1939 	default:
1940 		FC_EXCH_DBG(aborted_ep, "unexpected response op %x "
1941 			    "for RRQ", op);
1942 		return;
1943 	}
1944 
1945 cleanup:
1946 	fc_exch_done(&aborted_ep->seq);
1947 	/* drop hold for rec qual */
1948 	fc_exch_release(aborted_ep);
1949 }
1950 
1951 
1952 /**
1953  * fc_exch_seq_send() - Send a frame using a new exchange and sequence
1954  * @lport:	The local port to send the frame on
1955  * @fp:		The frame to be sent
1956  * @resp:	The response handler for this request
1957  * @destructor: The destructor for the exchange
1958  * @arg:	The argument to be passed to the response handler
1959  * @timer_msec: The timeout period for the exchange
1960  *
1961  * The frame pointer with some of the header's fields must be
1962  * filled before calling this routine, those fields are:
1963  *
1964  * - routing control
1965  * - FC port did
1966  * - FC port sid
1967  * - FC header type
1968  * - frame control
1969  * - parameter or relative offset
1970  */
1971 static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport,
1972 				       struct fc_frame *fp,
1973 				       void (*resp)(struct fc_seq *,
1974 						    struct fc_frame *fp,
1975 						    void *arg),
1976 				       void (*destructor)(struct fc_seq *,
1977 							  void *),
1978 				       void *arg, u32 timer_msec)
1979 {
1980 	struct fc_exch *ep;
1981 	struct fc_seq *sp = NULL;
1982 	struct fc_frame_header *fh;
1983 	struct fc_fcp_pkt *fsp = NULL;
1984 	int rc = 1;
1985 
1986 	ep = fc_exch_alloc(lport, fp);
1987 	if (!ep) {
1988 		fc_frame_free(fp);
1989 		return NULL;
1990 	}
1991 	ep->esb_stat |= ESB_ST_SEQ_INIT;
1992 	fh = fc_frame_header_get(fp);
1993 	fc_exch_set_addr(ep, ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id));
1994 	ep->resp = resp;
1995 	ep->destructor = destructor;
1996 	ep->arg = arg;
1997 	ep->r_a_tov = FC_DEF_R_A_TOV;
1998 	ep->lp = lport;
1999 	sp = &ep->seq;
2000 
2001 	ep->fh_type = fh->fh_type; /* save for possbile timeout handling */
2002 	ep->f_ctl = ntoh24(fh->fh_f_ctl);
2003 	fc_exch_setup_hdr(ep, fp, ep->f_ctl);
2004 	sp->cnt++;
2005 
2006 	if (ep->xid <= lport->lro_xid && fh->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD) {
2007 		fsp = fr_fsp(fp);
2008 		fc_fcp_ddp_setup(fr_fsp(fp), ep->xid);
2009 	}
2010 
2011 	if (unlikely(lport->tt.frame_send(lport, fp)))
2012 		goto err;
2013 
2014 	if (timer_msec)
2015 		fc_exch_timer_set_locked(ep, timer_msec);
2016 	ep->f_ctl &= ~FC_FC_FIRST_SEQ;	/* not first seq */
2017 
2018 	if (ep->f_ctl & FC_FC_SEQ_INIT)
2019 		ep->esb_stat &= ~ESB_ST_SEQ_INIT;
2020 	spin_unlock_bh(&ep->ex_lock);
2021 	return sp;
2022 err:
2023 	if (fsp)
2024 		fc_fcp_ddp_done(fsp);
2025 	rc = fc_exch_done_locked(ep);
2026 	spin_unlock_bh(&ep->ex_lock);
2027 	if (!rc)
2028 		fc_exch_delete(ep);
2029 	return NULL;
2030 }
2031 
2032 /**
2033  * fc_exch_rrq() - Send an ELS RRQ (Reinstate Recovery Qualifier) command
2034  * @ep: The exchange to send the RRQ on
2035  *
2036  * This tells the remote port to stop blocking the use of
2037  * the exchange and the seq_cnt range.
2038  */
2039 static void fc_exch_rrq(struct fc_exch *ep)
2040 {
2041 	struct fc_lport *lport;
2042 	struct fc_els_rrq *rrq;
2043 	struct fc_frame *fp;
2044 	u32 did;
2045 
2046 	lport = ep->lp;
2047 
2048 	fp = fc_frame_alloc(lport, sizeof(*rrq));
2049 	if (!fp)
2050 		goto retry;
2051 
2052 	rrq = fc_frame_payload_get(fp, sizeof(*rrq));
2053 	memset(rrq, 0, sizeof(*rrq));
2054 	rrq->rrq_cmd = ELS_RRQ;
2055 	hton24(rrq->rrq_s_id, ep->sid);
2056 	rrq->rrq_ox_id = htons(ep->oxid);
2057 	rrq->rrq_rx_id = htons(ep->rxid);
2058 
2059 	did = ep->did;
2060 	if (ep->esb_stat & ESB_ST_RESP)
2061 		did = ep->sid;
2062 
2063 	fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, did,
2064 		       lport->port_id, FC_TYPE_ELS,
2065 		       FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
2066 
2067 	if (fc_exch_seq_send(lport, fp, fc_exch_rrq_resp, NULL, ep,
2068 			     lport->e_d_tov))
2069 		return;
2070 
2071 retry:
2072 	spin_lock_bh(&ep->ex_lock);
2073 	if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE)) {
2074 		spin_unlock_bh(&ep->ex_lock);
2075 		/* drop hold for rec qual */
2076 		fc_exch_release(ep);
2077 		return;
2078 	}
2079 	ep->esb_stat |= ESB_ST_REC_QUAL;
2080 	fc_exch_timer_set_locked(ep, ep->r_a_tov);
2081 	spin_unlock_bh(&ep->ex_lock);
2082 }
2083 
2084 /**
2085  * fc_exch_els_rrq() - Handler for ELS RRQ (Reset Recovery Qualifier) requests
2086  * @fp: The RRQ frame, not freed here.
2087  */
2088 static void fc_exch_els_rrq(struct fc_frame *fp)
2089 {
2090 	struct fc_lport *lport;
2091 	struct fc_exch *ep = NULL;	/* request or subject exchange */
2092 	struct fc_els_rrq *rp;
2093 	u32 sid;
2094 	u16 xid;
2095 	enum fc_els_rjt_explan explan;
2096 
2097 	lport = fr_dev(fp);
2098 	rp = fc_frame_payload_get(fp, sizeof(*rp));
2099 	explan = ELS_EXPL_INV_LEN;
2100 	if (!rp)
2101 		goto reject;
2102 
2103 	/*
2104 	 * lookup subject exchange.
2105 	 */
2106 	sid = ntoh24(rp->rrq_s_id);		/* subject source */
2107 	xid = fc_host_port_id(lport->host) == sid ?
2108 			ntohs(rp->rrq_ox_id) : ntohs(rp->rrq_rx_id);
2109 	ep = fc_exch_lookup(lport, xid);
2110 	explan = ELS_EXPL_OXID_RXID;
2111 	if (!ep)
2112 		goto reject;
2113 	spin_lock_bh(&ep->ex_lock);
2114 	if (ep->oxid != ntohs(rp->rrq_ox_id))
2115 		goto unlock_reject;
2116 	if (ep->rxid != ntohs(rp->rrq_rx_id) &&
2117 	    ep->rxid != FC_XID_UNKNOWN)
2118 		goto unlock_reject;
2119 	explan = ELS_EXPL_SID;
2120 	if (ep->sid != sid)
2121 		goto unlock_reject;
2122 
2123 	/*
2124 	 * Clear Recovery Qualifier state, and cancel timer if complete.
2125 	 */
2126 	if (ep->esb_stat & ESB_ST_REC_QUAL) {
2127 		ep->esb_stat &= ~ESB_ST_REC_QUAL;
2128 		atomic_dec(&ep->ex_refcnt);	/* drop hold for rec qual */
2129 	}
2130 	if (ep->esb_stat & ESB_ST_COMPLETE) {
2131 		if (cancel_delayed_work(&ep->timeout_work))
2132 			atomic_dec(&ep->ex_refcnt);	/* drop timer hold */
2133 	}
2134 
2135 	spin_unlock_bh(&ep->ex_lock);
2136 
2137 	/*
2138 	 * Send LS_ACC.
2139 	 */
2140 	fc_seq_ls_acc(fp);
2141 	goto out;
2142 
2143 unlock_reject:
2144 	spin_unlock_bh(&ep->ex_lock);
2145 reject:
2146 	fc_seq_ls_rjt(fp, ELS_RJT_LOGIC, explan);
2147 out:
2148 	if (ep)
2149 		fc_exch_release(ep);	/* drop hold from fc_exch_find */
2150 }
2151 
2152 /**
2153  * fc_exch_mgr_add() - Add an exchange manager to a local port's list of EMs
2154  * @lport: The local port to add the exchange manager to
2155  * @mp:	   The exchange manager to be added to the local port
2156  * @match: The match routine that indicates when this EM should be used
2157  */
2158 struct fc_exch_mgr_anchor *fc_exch_mgr_add(struct fc_lport *lport,
2159 					   struct fc_exch_mgr *mp,
2160 					   bool (*match)(struct fc_frame *))
2161 {
2162 	struct fc_exch_mgr_anchor *ema;
2163 
2164 	ema = kmalloc(sizeof(*ema), GFP_ATOMIC);
2165 	if (!ema)
2166 		return ema;
2167 
2168 	ema->mp = mp;
2169 	ema->match = match;
2170 	/* add EM anchor to EM anchors list */
2171 	list_add_tail(&ema->ema_list, &lport->ema_list);
2172 	kref_get(&mp->kref);
2173 	return ema;
2174 }
2175 EXPORT_SYMBOL(fc_exch_mgr_add);
2176 
2177 /**
2178  * fc_exch_mgr_destroy() - Destroy an exchange manager
2179  * @kref: The reference to the EM to be destroyed
2180  */
2181 static void fc_exch_mgr_destroy(struct kref *kref)
2182 {
2183 	struct fc_exch_mgr *mp = container_of(kref, struct fc_exch_mgr, kref);
2184 
2185 	mempool_destroy(mp->ep_pool);
2186 	free_percpu(mp->pool);
2187 	kfree(mp);
2188 }
2189 
2190 /**
2191  * fc_exch_mgr_del() - Delete an EM from a local port's list
2192  * @ema: The exchange manager anchor identifying the EM to be deleted
2193  */
2194 void fc_exch_mgr_del(struct fc_exch_mgr_anchor *ema)
2195 {
2196 	/* remove EM anchor from EM anchors list */
2197 	list_del(&ema->ema_list);
2198 	kref_put(&ema->mp->kref, fc_exch_mgr_destroy);
2199 	kfree(ema);
2200 }
2201 EXPORT_SYMBOL(fc_exch_mgr_del);
2202 
2203 /**
2204  * fc_exch_mgr_list_clone() - Share all exchange manager objects
2205  * @src: Source lport to clone exchange managers from
2206  * @dst: New lport that takes references to all the exchange managers
2207  */
2208 int fc_exch_mgr_list_clone(struct fc_lport *src, struct fc_lport *dst)
2209 {
2210 	struct fc_exch_mgr_anchor *ema, *tmp;
2211 
2212 	list_for_each_entry(ema, &src->ema_list, ema_list) {
2213 		if (!fc_exch_mgr_add(dst, ema->mp, ema->match))
2214 			goto err;
2215 	}
2216 	return 0;
2217 err:
2218 	list_for_each_entry_safe(ema, tmp, &dst->ema_list, ema_list)
2219 		fc_exch_mgr_del(ema);
2220 	return -ENOMEM;
2221 }
2222 EXPORT_SYMBOL(fc_exch_mgr_list_clone);
2223 
2224 /**
2225  * fc_exch_mgr_alloc() - Allocate an exchange manager
2226  * @lport:   The local port that the new EM will be associated with
2227  * @class:   The default FC class for new exchanges
2228  * @min_xid: The minimum XID for exchanges from the new EM
2229  * @max_xid: The maximum XID for exchanges from the new EM
2230  * @match:   The match routine for the new EM
2231  */
2232 struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lport,
2233 				      enum fc_class class,
2234 				      u16 min_xid, u16 max_xid,
2235 				      bool (*match)(struct fc_frame *))
2236 {
2237 	struct fc_exch_mgr *mp;
2238 	u16 pool_exch_range;
2239 	size_t pool_size;
2240 	unsigned int cpu;
2241 	struct fc_exch_pool *pool;
2242 
2243 	if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN ||
2244 	    (min_xid & fc_cpu_mask) != 0) {
2245 		FC_LPORT_DBG(lport, "Invalid min_xid 0x:%x and max_xid 0x:%x\n",
2246 			     min_xid, max_xid);
2247 		return NULL;
2248 	}
2249 
2250 	/*
2251 	 * allocate memory for EM
2252 	 */
2253 	mp = kzalloc(sizeof(struct fc_exch_mgr), GFP_ATOMIC);
2254 	if (!mp)
2255 		return NULL;
2256 
2257 	mp->class = class;
2258 	/* adjust em exch xid range for offload */
2259 	mp->min_xid = min_xid;
2260 	mp->max_xid = max_xid;
2261 
2262 	mp->ep_pool = mempool_create_slab_pool(2, fc_em_cachep);
2263 	if (!mp->ep_pool)
2264 		goto free_mp;
2265 
2266 	/*
2267 	 * Setup per cpu exch pool with entire exchange id range equally
2268 	 * divided across all cpus. The exch pointers array memory is
2269 	 * allocated for exch range per pool.
2270 	 */
2271 	pool_exch_range = (mp->max_xid - mp->min_xid + 1) / (fc_cpu_mask + 1);
2272 	mp->pool_max_index = pool_exch_range - 1;
2273 
2274 	/*
2275 	 * Allocate and initialize per cpu exch pool
2276 	 */
2277 	pool_size = sizeof(*pool) + pool_exch_range * sizeof(struct fc_exch *);
2278 	mp->pool = __alloc_percpu(pool_size, __alignof__(struct fc_exch_pool));
2279 	if (!mp->pool)
2280 		goto free_mempool;
2281 	for_each_possible_cpu(cpu) {
2282 		pool = per_cpu_ptr(mp->pool, cpu);
2283 		pool->left = FC_XID_UNKNOWN;
2284 		pool->right = FC_XID_UNKNOWN;
2285 		spin_lock_init(&pool->lock);
2286 		INIT_LIST_HEAD(&pool->ex_list);
2287 	}
2288 
2289 	kref_init(&mp->kref);
2290 	if (!fc_exch_mgr_add(lport, mp, match)) {
2291 		free_percpu(mp->pool);
2292 		goto free_mempool;
2293 	}
2294 
2295 	/*
2296 	 * Above kref_init() sets mp->kref to 1 and then
2297 	 * call to fc_exch_mgr_add incremented mp->kref again,
2298 	 * so adjust that extra increment.
2299 	 */
2300 	kref_put(&mp->kref, fc_exch_mgr_destroy);
2301 	return mp;
2302 
2303 free_mempool:
2304 	mempool_destroy(mp->ep_pool);
2305 free_mp:
2306 	kfree(mp);
2307 	return NULL;
2308 }
2309 EXPORT_SYMBOL(fc_exch_mgr_alloc);
2310 
2311 /**
2312  * fc_exch_mgr_free() - Free all exchange managers on a local port
2313  * @lport: The local port whose EMs are to be freed
2314  */
2315 void fc_exch_mgr_free(struct fc_lport *lport)
2316 {
2317 	struct fc_exch_mgr_anchor *ema, *next;
2318 
2319 	flush_workqueue(fc_exch_workqueue);
2320 	list_for_each_entry_safe(ema, next, &lport->ema_list, ema_list)
2321 		fc_exch_mgr_del(ema);
2322 }
2323 EXPORT_SYMBOL(fc_exch_mgr_free);
2324 
2325 /**
2326  * fc_find_ema() - Lookup and return appropriate Exchange Manager Anchor depending
2327  * upon 'xid'.
2328  * @f_ctl: f_ctl
2329  * @lport: The local port the frame was received on
2330  * @fh: The received frame header
2331  */
2332 static struct fc_exch_mgr_anchor *fc_find_ema(u32 f_ctl,
2333 					      struct fc_lport *lport,
2334 					      struct fc_frame_header *fh)
2335 {
2336 	struct fc_exch_mgr_anchor *ema;
2337 	u16 xid;
2338 
2339 	if (f_ctl & FC_FC_EX_CTX)
2340 		xid = ntohs(fh->fh_ox_id);
2341 	else {
2342 		xid = ntohs(fh->fh_rx_id);
2343 		if (xid == FC_XID_UNKNOWN)
2344 			return list_entry(lport->ema_list.prev,
2345 					  typeof(*ema), ema_list);
2346 	}
2347 
2348 	list_for_each_entry(ema, &lport->ema_list, ema_list) {
2349 		if ((xid >= ema->mp->min_xid) &&
2350 		    (xid <= ema->mp->max_xid))
2351 			return ema;
2352 	}
2353 	return NULL;
2354 }
2355 /**
2356  * fc_exch_recv() - Handler for received frames
2357  * @lport: The local port the frame was received on
2358  * @fp:	The received frame
2359  */
2360 void fc_exch_recv(struct fc_lport *lport, struct fc_frame *fp)
2361 {
2362 	struct fc_frame_header *fh = fc_frame_header_get(fp);
2363 	struct fc_exch_mgr_anchor *ema;
2364 	u32 f_ctl;
2365 
2366 	/* lport lock ? */
2367 	if (!lport || lport->state == LPORT_ST_DISABLED) {
2368 		FC_LPORT_DBG(lport, "Receiving frames for an lport that "
2369 			     "has not been initialized correctly\n");
2370 		fc_frame_free(fp);
2371 		return;
2372 	}
2373 
2374 	f_ctl = ntoh24(fh->fh_f_ctl);
2375 	ema = fc_find_ema(f_ctl, lport, fh);
2376 	if (!ema) {
2377 		FC_LPORT_DBG(lport, "Unable to find Exchange Manager Anchor,"
2378 				    "fc_ctl <0x%x>, xid <0x%x>\n",
2379 				     f_ctl,
2380 				     (f_ctl & FC_FC_EX_CTX) ?
2381 				     ntohs(fh->fh_ox_id) :
2382 				     ntohs(fh->fh_rx_id));
2383 		fc_frame_free(fp);
2384 		return;
2385 	}
2386 
2387 	/*
2388 	 * If frame is marked invalid, just drop it.
2389 	 */
2390 	switch (fr_eof(fp)) {
2391 	case FC_EOF_T:
2392 		if (f_ctl & FC_FC_END_SEQ)
2393 			skb_trim(fp_skb(fp), fr_len(fp) - FC_FC_FILL(f_ctl));
2394 		/* fall through */
2395 	case FC_EOF_N:
2396 		if (fh->fh_type == FC_TYPE_BLS)
2397 			fc_exch_recv_bls(ema->mp, fp);
2398 		else if ((f_ctl & (FC_FC_EX_CTX | FC_FC_SEQ_CTX)) ==
2399 			 FC_FC_EX_CTX)
2400 			fc_exch_recv_seq_resp(ema->mp, fp);
2401 		else if (f_ctl & FC_FC_SEQ_CTX)
2402 			fc_exch_recv_resp(ema->mp, fp);
2403 		else	/* no EX_CTX and no SEQ_CTX */
2404 			fc_exch_recv_req(lport, ema->mp, fp);
2405 		break;
2406 	default:
2407 		FC_LPORT_DBG(lport, "dropping invalid frame (eof %x)",
2408 			     fr_eof(fp));
2409 		fc_frame_free(fp);
2410 	}
2411 }
2412 EXPORT_SYMBOL(fc_exch_recv);
2413 
2414 /**
2415  * fc_exch_init() - Initialize the exchange layer for a local port
2416  * @lport: The local port to initialize the exchange layer for
2417  */
2418 int fc_exch_init(struct fc_lport *lport)
2419 {
2420 	if (!lport->tt.seq_start_next)
2421 		lport->tt.seq_start_next = fc_seq_start_next;
2422 
2423 	if (!lport->tt.seq_set_resp)
2424 		lport->tt.seq_set_resp = fc_seq_set_resp;
2425 
2426 	if (!lport->tt.exch_seq_send)
2427 		lport->tt.exch_seq_send = fc_exch_seq_send;
2428 
2429 	if (!lport->tt.seq_send)
2430 		lport->tt.seq_send = fc_seq_send;
2431 
2432 	if (!lport->tt.seq_els_rsp_send)
2433 		lport->tt.seq_els_rsp_send = fc_seq_els_rsp_send;
2434 
2435 	if (!lport->tt.exch_done)
2436 		lport->tt.exch_done = fc_exch_done;
2437 
2438 	if (!lport->tt.exch_mgr_reset)
2439 		lport->tt.exch_mgr_reset = fc_exch_mgr_reset;
2440 
2441 	if (!lport->tt.seq_exch_abort)
2442 		lport->tt.seq_exch_abort = fc_seq_exch_abort;
2443 
2444 	if (!lport->tt.seq_assign)
2445 		lport->tt.seq_assign = fc_seq_assign;
2446 
2447 	if (!lport->tt.seq_release)
2448 		lport->tt.seq_release = fc_seq_release;
2449 
2450 	return 0;
2451 }
2452 EXPORT_SYMBOL(fc_exch_init);
2453 
2454 /**
2455  * fc_setup_exch_mgr() - Setup an exchange manager
2456  */
2457 int fc_setup_exch_mgr(void)
2458 {
2459 	fc_em_cachep = kmem_cache_create("libfc_em", sizeof(struct fc_exch),
2460 					 0, SLAB_HWCACHE_ALIGN, NULL);
2461 	if (!fc_em_cachep)
2462 		return -ENOMEM;
2463 
2464 	/*
2465 	 * Initialize fc_cpu_mask and fc_cpu_order. The
2466 	 * fc_cpu_mask is set for nr_cpu_ids rounded up
2467 	 * to order of 2's * power and order is stored
2468 	 * in fc_cpu_order as this is later required in
2469 	 * mapping between an exch id and exch array index
2470 	 * in per cpu exch pool.
2471 	 *
2472 	 * This round up is required to align fc_cpu_mask
2473 	 * to exchange id's lower bits such that all incoming
2474 	 * frames of an exchange gets delivered to the same
2475 	 * cpu on which exchange originated by simple bitwise
2476 	 * AND operation between fc_cpu_mask and exchange id.
2477 	 */
2478 	fc_cpu_mask = 1;
2479 	fc_cpu_order = 0;
2480 	while (fc_cpu_mask < nr_cpu_ids) {
2481 		fc_cpu_mask <<= 1;
2482 		fc_cpu_order++;
2483 	}
2484 	fc_cpu_mask--;
2485 
2486 	fc_exch_workqueue = create_singlethread_workqueue("fc_exch_workqueue");
2487 	if (!fc_exch_workqueue)
2488 		goto err;
2489 	return 0;
2490 err:
2491 	kmem_cache_destroy(fc_em_cachep);
2492 	return -ENOMEM;
2493 }
2494 
2495 /**
2496  * fc_destroy_exch_mgr() - Destroy an exchange manager
2497  */
2498 void fc_destroy_exch_mgr(void)
2499 {
2500 	destroy_workqueue(fc_exch_workqueue);
2501 	kmem_cache_destroy(fc_em_cachep);
2502 }
2503