xref: /linux/drivers/scsi/libfc/fc_exch.c (revision 163b099146b85d1b05bd2eaa045acbeee25c29e4)
1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   * Copyright(c) 2007 Intel Corporation. All rights reserved.
4   * Copyright(c) 2008 Red Hat, Inc.  All rights reserved.
5   * Copyright(c) 2008 Mike Christie
6   *
7   * Maintained at www.Open-FCoE.org
8   */
9  
10  /*
11   * Fibre Channel exchange and sequence handling.
12   */
13  
14  #include <linux/timer.h>
15  #include <linux/slab.h>
16  #include <linux/err.h>
17  #include <linux/export.h>
18  #include <linux/log2.h>
19  
20  #include <scsi/fc/fc_fc2.h>
21  
22  #include <scsi/libfc.h>
23  
24  #include "fc_libfc.h"
25  
26  u16	fc_cpu_mask;		/* cpu mask for possible cpus */
27  EXPORT_SYMBOL(fc_cpu_mask);
28  static u16	fc_cpu_order;	/* 2's power to represent total possible cpus */
29  static struct kmem_cache *fc_em_cachep;	       /* cache for exchanges */
30  static struct workqueue_struct *fc_exch_workqueue;
31  
32  /*
33   * Structure and function definitions for managing Fibre Channel Exchanges
34   * and Sequences.
35   *
36   * The three primary structures used here are fc_exch_mgr, fc_exch, and fc_seq.
37   *
38   * fc_exch_mgr holds the exchange state for an N port
39   *
40   * fc_exch holds state for one exchange and links to its active sequence.
41   *
42   * fc_seq holds the state for an individual sequence.
43   */
44  
45  /**
46   * struct fc_exch_pool - Per cpu exchange pool
47   * @next_index:	  Next possible free exchange index
48   * @total_exches: Total allocated exchanges
49   * @lock:	  Exch pool lock
50   * @ex_list:	  List of exchanges
51   * @left:	  Cache of free slot in exch array
52   * @right:	  Cache of free slot in exch array
53   *
54   * This structure manages per cpu exchanges in array of exchange pointers.
55   * This array is allocated followed by struct fc_exch_pool memory for
56   * assigned range of exchanges to per cpu pool.
57   */
58  struct fc_exch_pool {
59  	spinlock_t	 lock;
60  	struct list_head ex_list;
61  	u16		 next_index;
62  	u16		 total_exches;
63  
64  	u16		 left;
65  	u16		 right;
66  } ____cacheline_aligned_in_smp;
67  
68  /**
69   * struct fc_exch_mgr - The Exchange Manager (EM).
70   * @class:	    Default class for new sequences
71   * @kref:	    Reference counter
72   * @min_xid:	    Minimum exchange ID
73   * @max_xid:	    Maximum exchange ID
74   * @ep_pool:	    Reserved exchange pointers
75   * @pool_max_index: Max exch array index in exch pool
76   * @pool:	    Per cpu exch pool
77   * @lport:	    Local exchange port
78   * @stats:	    Statistics structure
79   *
80   * This structure is the center for creating exchanges and sequences.
81   * It manages the allocation of exchange IDs.
82   */
83  struct fc_exch_mgr {
84  	struct fc_exch_pool __percpu *pool;
85  	mempool_t	*ep_pool;
86  	struct fc_lport	*lport;
87  	enum fc_class	class;
88  	struct kref	kref;
89  	u16		min_xid;
90  	u16		max_xid;
91  	u16		pool_max_index;
92  
93  	struct {
94  		atomic_t no_free_exch;
95  		atomic_t no_free_exch_xid;
96  		atomic_t xid_not_found;
97  		atomic_t xid_busy;
98  		atomic_t seq_not_found;
99  		atomic_t non_bls_resp;
100  	} stats;
101  };
102  
103  /**
104   * struct fc_exch_mgr_anchor - primary structure for list of EMs
105   * @ema_list: Exchange Manager Anchor list
106   * @mp:	      Exchange Manager associated with this anchor
107   * @match:    Routine to determine if this anchor's EM should be used
108   *
109   * When walking the list of anchors the match routine will be called
110   * for each anchor to determine if that EM should be used. The last
111   * anchor in the list will always match to handle any exchanges not
112   * handled by other EMs. The non-default EMs would be added to the
113   * anchor list by HW that provides offloads.
114   */
115  struct fc_exch_mgr_anchor {
116  	struct list_head ema_list;
117  	struct fc_exch_mgr *mp;
118  	bool (*match)(struct fc_frame *);
119  };
120  
121  static void fc_exch_rrq(struct fc_exch *);
122  static void fc_seq_ls_acc(struct fc_frame *);
123  static void fc_seq_ls_rjt(struct fc_frame *, enum fc_els_rjt_reason,
124  			  enum fc_els_rjt_explan);
125  static void fc_exch_els_rec(struct fc_frame *);
126  static void fc_exch_els_rrq(struct fc_frame *);
127  
128  /*
129   * Internal implementation notes.
130   *
131   * The exchange manager is one by default in libfc but LLD may choose
132   * to have one per CPU. The sequence manager is one per exchange manager
133   * and currently never separated.
134   *
135   * Section 9.8 in FC-FS-2 specifies:  "The SEQ_ID is a one-byte field
136   * assigned by the Sequence Initiator that shall be unique for a specific
137   * D_ID and S_ID pair while the Sequence is open."   Note that it isn't
138   * qualified by exchange ID, which one might think it would be.
139   * In practice this limits the number of open sequences and exchanges to 256
140   * per session.	 For most targets we could treat this limit as per exchange.
141   *
142   * The exchange and its sequence are freed when the last sequence is received.
143   * It's possible for the remote port to leave an exchange open without
144   * sending any sequences.
145   *
146   * Notes on reference counts:
147   *
148   * Exchanges are reference counted and exchange gets freed when the reference
149   * count becomes zero.
150   *
151   * Timeouts:
152   * Sequences are timed out for E_D_TOV and R_A_TOV.
153   *
154   * Sequence event handling:
155   *
156   * The following events may occur on initiator sequences:
157   *
158   *	Send.
159   *	    For now, the whole thing is sent.
160   *	Receive ACK
161   *	    This applies only to class F.
162   *	    The sequence is marked complete.
163   *	ULP completion.
164   *	    The upper layer calls fc_exch_done() when done
165   *	    with exchange and sequence tuple.
166   *	RX-inferred completion.
167   *	    When we receive the next sequence on the same exchange, we can
168   *	    retire the previous sequence ID.  (XXX not implemented).
169   *	Timeout.
170   *	    R_A_TOV frees the sequence ID.  If we're waiting for ACK,
171   *	    E_D_TOV causes abort and calls upper layer response handler
172   *	    with FC_EX_TIMEOUT error.
173   *	Receive RJT
174   *	    XXX defer.
175   *	Send ABTS
176   *	    On timeout.
177   *
178   * The following events may occur on recipient sequences:
179   *
180   *	Receive
181   *	    Allocate sequence for first frame received.
182   *	    Hold during receive handler.
183   *	    Release when final frame received.
184   *	    Keep status of last N of these for the ELS RES command.  XXX TBD.
185   *	Receive ABTS
186   *	    Deallocate sequence
187   *	Send RJT
188   *	    Deallocate
189   *
190   * For now, we neglect conditions where only part of a sequence was
191   * received or transmitted, or where out-of-order receipt is detected.
192   */
193  
194  /*
195   * Locking notes:
196   *
197   * The EM code run in a per-CPU worker thread.
198   *
199   * To protect against concurrency between a worker thread code and timers,
200   * sequence allocation and deallocation must be locked.
201   *  - exchange refcnt can be done atomicly without locks.
202   *  - sequence allocation must be locked by exch lock.
203   *  - If the EM pool lock and ex_lock must be taken at the same time, then the
204   *    EM pool lock must be taken before the ex_lock.
205   */
206  
207  /*
208   * opcode names for debugging.
209   */
210  static char *fc_exch_rctl_names[] = FC_RCTL_NAMES_INIT;
211  
212  /**
213   * fc_exch_name_lookup() - Lookup name by opcode
214   * @op:	       Opcode to be looked up
215   * @table:     Opcode/name table
216   * @max_index: Index not to be exceeded
217   *
218   * This routine is used to determine a human-readable string identifying
219   * a R_CTL opcode.
220   */
221  static inline const char *fc_exch_name_lookup(unsigned int op, char **table,
222  					      unsigned int max_index)
223  {
224  	const char *name = NULL;
225  
226  	if (op < max_index)
227  		name = table[op];
228  	if (!name)
229  		name = "unknown";
230  	return name;
231  }
232  
233  /**
234   * fc_exch_rctl_name() - Wrapper routine for fc_exch_name_lookup()
235   * @op: The opcode to be looked up
236   */
237  static const char *fc_exch_rctl_name(unsigned int op)
238  {
239  	return fc_exch_name_lookup(op, fc_exch_rctl_names,
240  				   ARRAY_SIZE(fc_exch_rctl_names));
241  }
242  
243  /**
244   * fc_exch_hold() - Increment an exchange's reference count
245   * @ep: Echange to be held
246   */
247  static inline void fc_exch_hold(struct fc_exch *ep)
248  {
249  	atomic_inc(&ep->ex_refcnt);
250  }
251  
252  /**
253   * fc_exch_setup_hdr() - Initialize a FC header by initializing some fields
254   *			 and determine SOF and EOF.
255   * @ep:	   The exchange to that will use the header
256   * @fp:	   The frame whose header is to be modified
257   * @f_ctl: F_CTL bits that will be used for the frame header
258   *
259   * The fields initialized by this routine are: fh_ox_id, fh_rx_id,
260   * fh_seq_id, fh_seq_cnt and the SOF and EOF.
261   */
262  static void fc_exch_setup_hdr(struct fc_exch *ep, struct fc_frame *fp,
263  			      u32 f_ctl)
264  {
265  	struct fc_frame_header *fh = fc_frame_header_get(fp);
266  	u16 fill;
267  
268  	fr_sof(fp) = ep->class;
269  	if (ep->seq.cnt)
270  		fr_sof(fp) = fc_sof_normal(ep->class);
271  
272  	if (f_ctl & FC_FC_END_SEQ) {
273  		fr_eof(fp) = FC_EOF_T;
274  		if (fc_sof_needs_ack((enum fc_sof)ep->class))
275  			fr_eof(fp) = FC_EOF_N;
276  		/*
277  		 * From F_CTL.
278  		 * The number of fill bytes to make the length a 4-byte
279  		 * multiple is the low order 2-bits of the f_ctl.
280  		 * The fill itself will have been cleared by the frame
281  		 * allocation.
282  		 * After this, the length will be even, as expected by
283  		 * the transport.
284  		 */
285  		fill = fr_len(fp) & 3;
286  		if (fill) {
287  			fill = 4 - fill;
288  			/* TODO, this may be a problem with fragmented skb */
289  			skb_put(fp_skb(fp), fill);
290  			hton24(fh->fh_f_ctl, f_ctl | fill);
291  		}
292  	} else {
293  		WARN_ON(fr_len(fp) % 4 != 0);	/* no pad to non last frame */
294  		fr_eof(fp) = FC_EOF_N;
295  	}
296  
297  	/* Initialize remaining fh fields from fc_fill_fc_hdr */
298  	fh->fh_ox_id = htons(ep->oxid);
299  	fh->fh_rx_id = htons(ep->rxid);
300  	fh->fh_seq_id = ep->seq.id;
301  	fh->fh_seq_cnt = htons(ep->seq.cnt);
302  }
303  
304  /**
305   * fc_exch_release() - Decrement an exchange's reference count
306   * @ep: Exchange to be released
307   *
308   * If the reference count reaches zero and the exchange is complete,
309   * it is freed.
310   */
311  static void fc_exch_release(struct fc_exch *ep)
312  {
313  	struct fc_exch_mgr *mp;
314  
315  	if (atomic_dec_and_test(&ep->ex_refcnt)) {
316  		mp = ep->em;
317  		if (ep->destructor)
318  			ep->destructor(&ep->seq, ep->arg);
319  		WARN_ON(!(ep->esb_stat & ESB_ST_COMPLETE));
320  		mempool_free(ep, mp->ep_pool);
321  	}
322  }
323  
324  /**
325   * fc_exch_timer_cancel() - cancel exch timer
326   * @ep:		The exchange whose timer to be canceled
327   */
328  static inline void fc_exch_timer_cancel(struct fc_exch *ep)
329  {
330  	if (cancel_delayed_work(&ep->timeout_work)) {
331  		FC_EXCH_DBG(ep, "Exchange timer canceled\n");
332  		atomic_dec(&ep->ex_refcnt); /* drop hold for timer */
333  	}
334  }
335  
336  /**
337   * fc_exch_timer_set_locked() - Start a timer for an exchange w/ the
338   *				the exchange lock held
339   * @ep:		The exchange whose timer will start
340   * @timer_msec: The timeout period
341   *
342   * Used for upper level protocols to time out the exchange.
343   * The timer is cancelled when it fires or when the exchange completes.
344   */
345  static inline void fc_exch_timer_set_locked(struct fc_exch *ep,
346  					    unsigned int timer_msec)
347  {
348  	if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE))
349  		return;
350  
351  	FC_EXCH_DBG(ep, "Exchange timer armed : %d msecs\n", timer_msec);
352  
353  	fc_exch_hold(ep);		/* hold for timer */
354  	if (!queue_delayed_work(fc_exch_workqueue, &ep->timeout_work,
355  				msecs_to_jiffies(timer_msec))) {
356  		FC_EXCH_DBG(ep, "Exchange already queued\n");
357  		fc_exch_release(ep);
358  	}
359  }
360  
361  /**
362   * fc_exch_timer_set() - Lock the exchange and set the timer
363   * @ep:		The exchange whose timer will start
364   * @timer_msec: The timeout period
365   */
366  static void fc_exch_timer_set(struct fc_exch *ep, unsigned int timer_msec)
367  {
368  	spin_lock_bh(&ep->ex_lock);
369  	fc_exch_timer_set_locked(ep, timer_msec);
370  	spin_unlock_bh(&ep->ex_lock);
371  }
372  
373  /**
374   * fc_exch_done_locked() - Complete an exchange with the exchange lock held
375   * @ep: The exchange that is complete
376   *
377   * Note: May sleep if invoked from outside a response handler.
378   */
379  static int fc_exch_done_locked(struct fc_exch *ep)
380  {
381  	int rc = 1;
382  
383  	/*
384  	 * We must check for completion in case there are two threads
385  	 * tyring to complete this. But the rrq code will reuse the
386  	 * ep, and in that case we only clear the resp and set it as
387  	 * complete, so it can be reused by the timer to send the rrq.
388  	 */
389  	if (ep->state & FC_EX_DONE)
390  		return rc;
391  	ep->esb_stat |= ESB_ST_COMPLETE;
392  
393  	if (!(ep->esb_stat & ESB_ST_REC_QUAL)) {
394  		ep->state |= FC_EX_DONE;
395  		fc_exch_timer_cancel(ep);
396  		rc = 0;
397  	}
398  	return rc;
399  }
400  
401  static struct fc_exch fc_quarantine_exch;
402  
403  /**
404   * fc_exch_ptr_get() - Return an exchange from an exchange pool
405   * @pool:  Exchange Pool to get an exchange from
406   * @index: Index of the exchange within the pool
407   *
408   * Use the index to get an exchange from within an exchange pool. exches
409   * will point to an array of exchange pointers. The index will select
410   * the exchange within the array.
411   */
412  static inline struct fc_exch *fc_exch_ptr_get(struct fc_exch_pool *pool,
413  					      u16 index)
414  {
415  	struct fc_exch **exches = (struct fc_exch **)(pool + 1);
416  	return exches[index];
417  }
418  
419  /**
420   * fc_exch_ptr_set() - Assign an exchange to a slot in an exchange pool
421   * @pool:  The pool to assign the exchange to
422   * @index: The index in the pool where the exchange will be assigned
423   * @ep:	   The exchange to assign to the pool
424   */
425  static inline void fc_exch_ptr_set(struct fc_exch_pool *pool, u16 index,
426  				   struct fc_exch *ep)
427  {
428  	((struct fc_exch **)(pool + 1))[index] = ep;
429  }
430  
431  /**
432   * fc_exch_delete() - Delete an exchange
433   * @ep: The exchange to be deleted
434   */
435  static void fc_exch_delete(struct fc_exch *ep)
436  {
437  	struct fc_exch_pool *pool;
438  	u16 index;
439  
440  	pool = ep->pool;
441  	spin_lock_bh(&pool->lock);
442  	WARN_ON(pool->total_exches <= 0);
443  	pool->total_exches--;
444  
445  	/* update cache of free slot */
446  	index = (ep->xid - ep->em->min_xid) >> fc_cpu_order;
447  	if (!(ep->state & FC_EX_QUARANTINE)) {
448  		if (pool->left == FC_XID_UNKNOWN)
449  			pool->left = index;
450  		else if (pool->right == FC_XID_UNKNOWN)
451  			pool->right = index;
452  		else
453  			pool->next_index = index;
454  		fc_exch_ptr_set(pool, index, NULL);
455  	} else {
456  		fc_exch_ptr_set(pool, index, &fc_quarantine_exch);
457  	}
458  	list_del(&ep->ex_list);
459  	spin_unlock_bh(&pool->lock);
460  	fc_exch_release(ep);	/* drop hold for exch in mp */
461  }
462  
463  static int fc_seq_send_locked(struct fc_lport *lport, struct fc_seq *sp,
464  			      struct fc_frame *fp)
465  {
466  	struct fc_exch *ep;
467  	struct fc_frame_header *fh = fc_frame_header_get(fp);
468  	int error = -ENXIO;
469  	u32 f_ctl;
470  	u8 fh_type = fh->fh_type;
471  
472  	ep = fc_seq_exch(sp);
473  
474  	if (ep->esb_stat & (ESB_ST_COMPLETE | ESB_ST_ABNORMAL)) {
475  		fc_frame_free(fp);
476  		goto out;
477  	}
478  
479  	WARN_ON(!(ep->esb_stat & ESB_ST_SEQ_INIT));
480  
481  	f_ctl = ntoh24(fh->fh_f_ctl);
482  	fc_exch_setup_hdr(ep, fp, f_ctl);
483  	fr_encaps(fp) = ep->encaps;
484  
485  	/*
486  	 * update sequence count if this frame is carrying
487  	 * multiple FC frames when sequence offload is enabled
488  	 * by LLD.
489  	 */
490  	if (fr_max_payload(fp))
491  		sp->cnt += DIV_ROUND_UP((fr_len(fp) - sizeof(*fh)),
492  					fr_max_payload(fp));
493  	else
494  		sp->cnt++;
495  
496  	/*
497  	 * Send the frame.
498  	 */
499  	error = lport->tt.frame_send(lport, fp);
500  
501  	if (fh_type == FC_TYPE_BLS)
502  		goto out;
503  
504  	/*
505  	 * Update the exchange and sequence flags,
506  	 * assuming all frames for the sequence have been sent.
507  	 * We can only be called to send once for each sequence.
508  	 */
509  	ep->f_ctl = f_ctl & ~FC_FC_FIRST_SEQ;	/* not first seq */
510  	if (f_ctl & FC_FC_SEQ_INIT)
511  		ep->esb_stat &= ~ESB_ST_SEQ_INIT;
512  out:
513  	return error;
514  }
515  
516  /**
517   * fc_seq_send() - Send a frame using existing sequence/exchange pair
518   * @lport: The local port that the exchange will be sent on
519   * @sp:	   The sequence to be sent
520   * @fp:	   The frame to be sent on the exchange
521   *
522   * Note: The frame will be freed either by a direct call to fc_frame_free(fp)
523   * or indirectly by calling libfc_function_template.frame_send().
524   */
525  int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp, struct fc_frame *fp)
526  {
527  	struct fc_exch *ep;
528  	int error;
529  	ep = fc_seq_exch(sp);
530  	spin_lock_bh(&ep->ex_lock);
531  	error = fc_seq_send_locked(lport, sp, fp);
532  	spin_unlock_bh(&ep->ex_lock);
533  	return error;
534  }
535  EXPORT_SYMBOL(fc_seq_send);
536  
537  /**
538   * fc_seq_alloc() - Allocate a sequence for a given exchange
539   * @ep:	    The exchange to allocate a new sequence for
540   * @seq_id: The sequence ID to be used
541   *
542   * We don't support multiple originated sequences on the same exchange.
543   * By implication, any previously originated sequence on this exchange
544   * is complete, and we reallocate the same sequence.
545   */
546  static struct fc_seq *fc_seq_alloc(struct fc_exch *ep, u8 seq_id)
547  {
548  	struct fc_seq *sp;
549  
550  	sp = &ep->seq;
551  	sp->ssb_stat = 0;
552  	sp->cnt = 0;
553  	sp->id = seq_id;
554  	return sp;
555  }
556  
557  /**
558   * fc_seq_start_next_locked() - Allocate a new sequence on the same
559   *				exchange as the supplied sequence
560   * @sp: The sequence/exchange to get a new sequence for
561   */
562  static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp)
563  {
564  	struct fc_exch *ep = fc_seq_exch(sp);
565  
566  	sp = fc_seq_alloc(ep, ep->seq_id++);
567  	FC_EXCH_DBG(ep, "f_ctl %6x seq %2x\n",
568  		    ep->f_ctl, sp->id);
569  	return sp;
570  }
571  
572  /**
573   * fc_seq_start_next() - Lock the exchange and get a new sequence
574   *			 for a given sequence/exchange pair
575   * @sp: The sequence/exchange to get a new exchange for
576   */
577  struct fc_seq *fc_seq_start_next(struct fc_seq *sp)
578  {
579  	struct fc_exch *ep = fc_seq_exch(sp);
580  
581  	spin_lock_bh(&ep->ex_lock);
582  	sp = fc_seq_start_next_locked(sp);
583  	spin_unlock_bh(&ep->ex_lock);
584  
585  	return sp;
586  }
587  EXPORT_SYMBOL(fc_seq_start_next);
588  
589  /*
590   * Set the response handler for the exchange associated with a sequence.
591   *
592   * Note: May sleep if invoked from outside a response handler.
593   */
594  void fc_seq_set_resp(struct fc_seq *sp,
595  		     void (*resp)(struct fc_seq *, struct fc_frame *, void *),
596  		     void *arg)
597  {
598  	struct fc_exch *ep = fc_seq_exch(sp);
599  	DEFINE_WAIT(wait);
600  
601  	spin_lock_bh(&ep->ex_lock);
602  	while (ep->resp_active && ep->resp_task != current) {
603  		prepare_to_wait(&ep->resp_wq, &wait, TASK_UNINTERRUPTIBLE);
604  		spin_unlock_bh(&ep->ex_lock);
605  
606  		schedule();
607  
608  		spin_lock_bh(&ep->ex_lock);
609  	}
610  	finish_wait(&ep->resp_wq, &wait);
611  	ep->resp = resp;
612  	ep->arg = arg;
613  	spin_unlock_bh(&ep->ex_lock);
614  }
615  EXPORT_SYMBOL(fc_seq_set_resp);
616  
617  /**
618   * fc_exch_abort_locked() - Abort an exchange
619   * @ep:	The exchange to be aborted
620   * @timer_msec: The period of time to wait before aborting
621   *
622   * Abort an exchange and sequence. Generally called because of a
623   * exchange timeout or an abort from the upper layer.
624   *
625   * A timer_msec can be specified for abort timeout, if non-zero
626   * timer_msec value is specified then exchange resp handler
627   * will be called with timeout error if no response to abort.
628   *
629   * Locking notes:  Called with exch lock held
630   *
631   * Return value: 0 on success else error code
632   */
633  static int fc_exch_abort_locked(struct fc_exch *ep,
634  				unsigned int timer_msec)
635  {
636  	struct fc_seq *sp;
637  	struct fc_frame *fp;
638  	int error;
639  
640  	FC_EXCH_DBG(ep, "exch: abort, time %d msecs\n", timer_msec);
641  	if (ep->esb_stat & (ESB_ST_COMPLETE | ESB_ST_ABNORMAL) ||
642  	    ep->state & (FC_EX_DONE | FC_EX_RST_CLEANUP)) {
643  		FC_EXCH_DBG(ep, "exch: already completed esb %x state %x\n",
644  			    ep->esb_stat, ep->state);
645  		return -ENXIO;
646  	}
647  
648  	/*
649  	 * Send the abort on a new sequence if possible.
650  	 */
651  	sp = fc_seq_start_next_locked(&ep->seq);
652  	if (!sp)
653  		return -ENOMEM;
654  
655  	if (timer_msec)
656  		fc_exch_timer_set_locked(ep, timer_msec);
657  
658  	if (ep->sid) {
659  		/*
660  		 * Send an abort for the sequence that timed out.
661  		 */
662  		fp = fc_frame_alloc(ep->lp, 0);
663  		if (fp) {
664  			ep->esb_stat |= ESB_ST_SEQ_INIT;
665  			fc_fill_fc_hdr(fp, FC_RCTL_BA_ABTS, ep->did, ep->sid,
666  				       FC_TYPE_BLS, FC_FC_END_SEQ |
667  				       FC_FC_SEQ_INIT, 0);
668  			error = fc_seq_send_locked(ep->lp, sp, fp);
669  		} else {
670  			error = -ENOBUFS;
671  		}
672  	} else {
673  		/*
674  		 * If not logged into the fabric, don't send ABTS but leave
675  		 * sequence active until next timeout.
676  		 */
677  		error = 0;
678  	}
679  	ep->esb_stat |= ESB_ST_ABNORMAL;
680  	return error;
681  }
682  
683  /**
684   * fc_seq_exch_abort() - Abort an exchange and sequence
685   * @req_sp:	The sequence to be aborted
686   * @timer_msec: The period of time to wait before aborting
687   *
688   * Generally called because of a timeout or an abort from the upper layer.
689   *
690   * Return value: 0 on success else error code
691   */
692  int fc_seq_exch_abort(const struct fc_seq *req_sp, unsigned int timer_msec)
693  {
694  	struct fc_exch *ep;
695  	int error;
696  
697  	ep = fc_seq_exch(req_sp);
698  	spin_lock_bh(&ep->ex_lock);
699  	error = fc_exch_abort_locked(ep, timer_msec);
700  	spin_unlock_bh(&ep->ex_lock);
701  	return error;
702  }
703  
704  /**
705   * fc_invoke_resp() - invoke ep->resp()
706   * @ep:	   The exchange to be operated on
707   * @fp:	   The frame pointer to pass through to ->resp()
708   * @sp:	   The sequence pointer to pass through to ->resp()
709   *
710   * Notes:
711   * It is assumed that after initialization finished (this means the
712   * first unlock of ex_lock after fc_exch_alloc()) ep->resp and ep->arg are
713   * modified only via fc_seq_set_resp(). This guarantees that none of these
714   * two variables changes if ep->resp_active > 0.
715   *
716   * If an fc_seq_set_resp() call is busy modifying ep->resp and ep->arg when
717   * this function is invoked, the first spin_lock_bh() call in this function
718   * will wait until fc_seq_set_resp() has finished modifying these variables.
719   *
720   * Since fc_exch_done() invokes fc_seq_set_resp() it is guaranteed that that
721   * ep->resp() won't be invoked after fc_exch_done() has returned.
722   *
723   * The response handler itself may invoke fc_exch_done(), which will clear the
724   * ep->resp pointer.
725   *
726   * Return value:
727   * Returns true if and only if ep->resp has been invoked.
728   */
729  static bool fc_invoke_resp(struct fc_exch *ep, struct fc_seq *sp,
730  			   struct fc_frame *fp)
731  {
732  	void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg);
733  	void *arg;
734  	bool res = false;
735  
736  	spin_lock_bh(&ep->ex_lock);
737  	ep->resp_active++;
738  	if (ep->resp_task != current)
739  		ep->resp_task = !ep->resp_task ? current : NULL;
740  	resp = ep->resp;
741  	arg = ep->arg;
742  	spin_unlock_bh(&ep->ex_lock);
743  
744  	if (resp) {
745  		resp(sp, fp, arg);
746  		res = true;
747  	}
748  
749  	spin_lock_bh(&ep->ex_lock);
750  	if (--ep->resp_active == 0)
751  		ep->resp_task = NULL;
752  	spin_unlock_bh(&ep->ex_lock);
753  
754  	if (ep->resp_active == 0)
755  		wake_up(&ep->resp_wq);
756  
757  	return res;
758  }
759  
760  /**
761   * fc_exch_timeout() - Handle exchange timer expiration
762   * @work: The work_struct identifying the exchange that timed out
763   */
764  static void fc_exch_timeout(struct work_struct *work)
765  {
766  	struct fc_exch *ep = container_of(work, struct fc_exch,
767  					  timeout_work.work);
768  	struct fc_seq *sp = &ep->seq;
769  	u32 e_stat;
770  	int rc = 1;
771  
772  	FC_EXCH_DBG(ep, "Exchange timed out state %x\n", ep->state);
773  
774  	spin_lock_bh(&ep->ex_lock);
775  	if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE))
776  		goto unlock;
777  
778  	e_stat = ep->esb_stat;
779  	if (e_stat & ESB_ST_COMPLETE) {
780  		ep->esb_stat = e_stat & ~ESB_ST_REC_QUAL;
781  		spin_unlock_bh(&ep->ex_lock);
782  		if (e_stat & ESB_ST_REC_QUAL)
783  			fc_exch_rrq(ep);
784  		goto done;
785  	} else {
786  		if (e_stat & ESB_ST_ABNORMAL)
787  			rc = fc_exch_done_locked(ep);
788  		spin_unlock_bh(&ep->ex_lock);
789  		if (!rc)
790  			fc_exch_delete(ep);
791  		fc_invoke_resp(ep, sp, ERR_PTR(-FC_EX_TIMEOUT));
792  		fc_seq_set_resp(sp, NULL, ep->arg);
793  		fc_seq_exch_abort(sp, 2 * ep->r_a_tov);
794  		goto done;
795  	}
796  unlock:
797  	spin_unlock_bh(&ep->ex_lock);
798  done:
799  	/*
800  	 * This release matches the hold taken when the timer was set.
801  	 */
802  	fc_exch_release(ep);
803  }
804  
805  /**
806   * fc_exch_em_alloc() - Allocate an exchange from a specified EM.
807   * @lport: The local port that the exchange is for
808   * @mp:	   The exchange manager that will allocate the exchange
809   *
810   * Returns pointer to allocated fc_exch with exch lock held.
811   */
812  static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
813  					struct fc_exch_mgr *mp)
814  {
815  	struct fc_exch *ep;
816  	unsigned int cpu;
817  	u16 index;
818  	struct fc_exch_pool *pool;
819  
820  	/* allocate memory for exchange */
821  	ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
822  	if (!ep) {
823  		atomic_inc(&mp->stats.no_free_exch);
824  		goto out;
825  	}
826  	memset(ep, 0, sizeof(*ep));
827  
828  	cpu = get_cpu();
829  	pool = per_cpu_ptr(mp->pool, cpu);
830  	spin_lock_bh(&pool->lock);
831  	put_cpu();
832  
833  	/* peek cache of free slot */
834  	if (pool->left != FC_XID_UNKNOWN) {
835  		if (!WARN_ON(fc_exch_ptr_get(pool, pool->left))) {
836  			index = pool->left;
837  			pool->left = FC_XID_UNKNOWN;
838  			goto hit;
839  		}
840  	}
841  	if (pool->right != FC_XID_UNKNOWN) {
842  		if (!WARN_ON(fc_exch_ptr_get(pool, pool->right))) {
843  			index = pool->right;
844  			pool->right = FC_XID_UNKNOWN;
845  			goto hit;
846  		}
847  	}
848  
849  	index = pool->next_index;
850  	/* allocate new exch from pool */
851  	while (fc_exch_ptr_get(pool, index)) {
852  		index = index == mp->pool_max_index ? 0 : index + 1;
853  		if (index == pool->next_index)
854  			goto err;
855  	}
856  	pool->next_index = index == mp->pool_max_index ? 0 : index + 1;
857  hit:
858  	fc_exch_hold(ep);	/* hold for exch in mp */
859  	spin_lock_init(&ep->ex_lock);
860  	/*
861  	 * Hold exch lock for caller to prevent fc_exch_reset()
862  	 * from releasing exch	while fc_exch_alloc() caller is
863  	 * still working on exch.
864  	 */
865  	spin_lock_bh(&ep->ex_lock);
866  
867  	fc_exch_ptr_set(pool, index, ep);
868  	list_add_tail(&ep->ex_list, &pool->ex_list);
869  	fc_seq_alloc(ep, ep->seq_id++);
870  	pool->total_exches++;
871  	spin_unlock_bh(&pool->lock);
872  
873  	/*
874  	 *  update exchange
875  	 */
876  	ep->oxid = ep->xid = (index << fc_cpu_order | cpu) + mp->min_xid;
877  	ep->em = mp;
878  	ep->pool = pool;
879  	ep->lp = lport;
880  	ep->f_ctl = FC_FC_FIRST_SEQ;	/* next seq is first seq */
881  	ep->rxid = FC_XID_UNKNOWN;
882  	ep->class = mp->class;
883  	ep->resp_active = 0;
884  	init_waitqueue_head(&ep->resp_wq);
885  	INIT_DELAYED_WORK(&ep->timeout_work, fc_exch_timeout);
886  out:
887  	return ep;
888  err:
889  	spin_unlock_bh(&pool->lock);
890  	atomic_inc(&mp->stats.no_free_exch_xid);
891  	mempool_free(ep, mp->ep_pool);
892  	return NULL;
893  }
894  
895  /**
896   * fc_exch_alloc() - Allocate an exchange from an EM on a
897   *		     local port's list of EMs.
898   * @lport: The local port that will own the exchange
899   * @fp:	   The FC frame that the exchange will be for
900   *
901   * This function walks the list of exchange manager(EM)
902   * anchors to select an EM for a new exchange allocation. The
903   * EM is selected when a NULL match function pointer is encountered
904   * or when a call to a match function returns true.
905   */
906  static struct fc_exch *fc_exch_alloc(struct fc_lport *lport,
907  				     struct fc_frame *fp)
908  {
909  	struct fc_exch_mgr_anchor *ema;
910  	struct fc_exch *ep;
911  
912  	list_for_each_entry(ema, &lport->ema_list, ema_list) {
913  		if (!ema->match || ema->match(fp)) {
914  			ep = fc_exch_em_alloc(lport, ema->mp);
915  			if (ep)
916  				return ep;
917  		}
918  	}
919  	return NULL;
920  }
921  
922  /**
923   * fc_exch_find() - Lookup and hold an exchange
924   * @mp:	 The exchange manager to lookup the exchange from
925   * @xid: The XID of the exchange to look up
926   */
927  static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid)
928  {
929  	struct fc_lport *lport = mp->lport;
930  	struct fc_exch_pool *pool;
931  	struct fc_exch *ep = NULL;
932  	u16 cpu = xid & fc_cpu_mask;
933  
934  	if (xid == FC_XID_UNKNOWN)
935  		return NULL;
936  
937  	if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) {
938  		pr_err("host%u: lport %6.6x: xid %d invalid CPU %d\n:",
939  		       lport->host->host_no, lport->port_id, xid, cpu);
940  		return NULL;
941  	}
942  
943  	if ((xid >= mp->min_xid) && (xid <= mp->max_xid)) {
944  		pool = per_cpu_ptr(mp->pool, cpu);
945  		spin_lock_bh(&pool->lock);
946  		ep = fc_exch_ptr_get(pool, (xid - mp->min_xid) >> fc_cpu_order);
947  		if (ep == &fc_quarantine_exch) {
948  			FC_LPORT_DBG(lport, "xid %x quarantined\n", xid);
949  			ep = NULL;
950  		}
951  		if (ep) {
952  			WARN_ON(ep->xid != xid);
953  			fc_exch_hold(ep);
954  		}
955  		spin_unlock_bh(&pool->lock);
956  	}
957  	return ep;
958  }
959  
960  
961  /**
962   * fc_exch_done() - Indicate that an exchange/sequence tuple is complete and
963   *		    the memory allocated for the related objects may be freed.
964   * @sp: The sequence that has completed
965   *
966   * Note: May sleep if invoked from outside a response handler.
967   */
968  void fc_exch_done(struct fc_seq *sp)
969  {
970  	struct fc_exch *ep = fc_seq_exch(sp);
971  	int rc;
972  
973  	spin_lock_bh(&ep->ex_lock);
974  	rc = fc_exch_done_locked(ep);
975  	spin_unlock_bh(&ep->ex_lock);
976  
977  	fc_seq_set_resp(sp, NULL, ep->arg);
978  	if (!rc)
979  		fc_exch_delete(ep);
980  }
981  EXPORT_SYMBOL(fc_exch_done);
982  
983  /**
984   * fc_exch_resp() - Allocate a new exchange for a response frame
985   * @lport: The local port that the exchange was for
986   * @mp:	   The exchange manager to allocate the exchange from
987   * @fp:	   The response frame
988   *
989   * Sets the responder ID in the frame header.
990   */
991  static struct fc_exch *fc_exch_resp(struct fc_lport *lport,
992  				    struct fc_exch_mgr *mp,
993  				    struct fc_frame *fp)
994  {
995  	struct fc_exch *ep;
996  	struct fc_frame_header *fh;
997  
998  	ep = fc_exch_alloc(lport, fp);
999  	if (ep) {
1000  		ep->class = fc_frame_class(fp);
1001  
1002  		/*
1003  		 * Set EX_CTX indicating we're responding on this exchange.
1004  		 */
1005  		ep->f_ctl |= FC_FC_EX_CTX;	/* we're responding */
1006  		ep->f_ctl &= ~FC_FC_FIRST_SEQ;	/* not new */
1007  		fh = fc_frame_header_get(fp);
1008  		ep->sid = ntoh24(fh->fh_d_id);
1009  		ep->did = ntoh24(fh->fh_s_id);
1010  		ep->oid = ep->did;
1011  
1012  		/*
1013  		 * Allocated exchange has placed the XID in the
1014  		 * originator field. Move it to the responder field,
1015  		 * and set the originator XID from the frame.
1016  		 */
1017  		ep->rxid = ep->xid;
1018  		ep->oxid = ntohs(fh->fh_ox_id);
1019  		ep->esb_stat |= ESB_ST_RESP | ESB_ST_SEQ_INIT;
1020  		if ((ntoh24(fh->fh_f_ctl) & FC_FC_SEQ_INIT) == 0)
1021  			ep->esb_stat &= ~ESB_ST_SEQ_INIT;
1022  
1023  		fc_exch_hold(ep);	/* hold for caller */
1024  		spin_unlock_bh(&ep->ex_lock);	/* lock from fc_exch_alloc */
1025  	}
1026  	return ep;
1027  }
1028  
1029  /**
1030   * fc_seq_lookup_recip() - Find a sequence where the other end
1031   *			   originated the sequence
1032   * @lport: The local port that the frame was sent to
1033   * @mp:	   The Exchange Manager to lookup the exchange from
1034   * @fp:	   The frame associated with the sequence we're looking for
1035   *
1036   * If fc_pf_rjt_reason is FC_RJT_NONE then this function will have a hold
1037   * on the ep that should be released by the caller.
1038   */
1039  static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
1040  						 struct fc_exch_mgr *mp,
1041  						 struct fc_frame *fp)
1042  {
1043  	struct fc_frame_header *fh = fc_frame_header_get(fp);
1044  	struct fc_exch *ep = NULL;
1045  	struct fc_seq *sp = NULL;
1046  	enum fc_pf_rjt_reason reject = FC_RJT_NONE;
1047  	u32 f_ctl;
1048  	u16 xid;
1049  
1050  	f_ctl = ntoh24(fh->fh_f_ctl);
1051  	WARN_ON((f_ctl & FC_FC_SEQ_CTX) != 0);
1052  
1053  	/*
1054  	 * Lookup or create the exchange if we will be creating the sequence.
1055  	 */
1056  	if (f_ctl & FC_FC_EX_CTX) {
1057  		xid = ntohs(fh->fh_ox_id);	/* we originated exch */
1058  		ep = fc_exch_find(mp, xid);
1059  		if (!ep) {
1060  			atomic_inc(&mp->stats.xid_not_found);
1061  			reject = FC_RJT_OX_ID;
1062  			goto out;
1063  		}
1064  		if (ep->rxid == FC_XID_UNKNOWN)
1065  			ep->rxid = ntohs(fh->fh_rx_id);
1066  		else if (ep->rxid != ntohs(fh->fh_rx_id)) {
1067  			reject = FC_RJT_OX_ID;
1068  			goto rel;
1069  		}
1070  	} else {
1071  		xid = ntohs(fh->fh_rx_id);	/* we are the responder */
1072  
1073  		/*
1074  		 * Special case for MDS issuing an ELS TEST with a
1075  		 * bad rxid of 0.
1076  		 * XXX take this out once we do the proper reject.
1077  		 */
1078  		if (xid == 0 && fh->fh_r_ctl == FC_RCTL_ELS_REQ &&
1079  		    fc_frame_payload_op(fp) == ELS_TEST) {
1080  			fh->fh_rx_id = htons(FC_XID_UNKNOWN);
1081  			xid = FC_XID_UNKNOWN;
1082  		}
1083  
1084  		/*
1085  		 * new sequence - find the exchange
1086  		 */
1087  		ep = fc_exch_find(mp, xid);
1088  		if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
1089  			if (ep) {
1090  				atomic_inc(&mp->stats.xid_busy);
1091  				reject = FC_RJT_RX_ID;
1092  				goto rel;
1093  			}
1094  			ep = fc_exch_resp(lport, mp, fp);
1095  			if (!ep) {
1096  				reject = FC_RJT_EXCH_EST;	/* XXX */
1097  				goto out;
1098  			}
1099  			xid = ep->xid;	/* get our XID */
1100  		} else if (!ep) {
1101  			atomic_inc(&mp->stats.xid_not_found);
1102  			reject = FC_RJT_RX_ID;	/* XID not found */
1103  			goto out;
1104  		}
1105  	}
1106  
1107  	spin_lock_bh(&ep->ex_lock);
1108  	/*
1109  	 * At this point, we have the exchange held.
1110  	 * Find or create the sequence.
1111  	 */
1112  	if (fc_sof_is_init(fr_sof(fp))) {
1113  		sp = &ep->seq;
1114  		sp->ssb_stat |= SSB_ST_RESP;
1115  		sp->id = fh->fh_seq_id;
1116  	} else {
1117  		sp = &ep->seq;
1118  		if (sp->id != fh->fh_seq_id) {
1119  			atomic_inc(&mp->stats.seq_not_found);
1120  			if (f_ctl & FC_FC_END_SEQ) {
1121  				/*
1122  				 * Update sequence_id based on incoming last
1123  				 * frame of sequence exchange. This is needed
1124  				 * for FC target where DDP has been used
1125  				 * on target where, stack is indicated only
1126  				 * about last frame's (payload _header) header.
1127  				 * Whereas "seq_id" which is part of
1128  				 * frame_header is allocated by initiator
1129  				 * which is totally different from "seq_id"
1130  				 * allocated when XFER_RDY was sent by target.
1131  				 * To avoid false -ve which results into not
1132  				 * sending RSP, hence write request on other
1133  				 * end never finishes.
1134  				 */
1135  				sp->ssb_stat |= SSB_ST_RESP;
1136  				sp->id = fh->fh_seq_id;
1137  			} else {
1138  				spin_unlock_bh(&ep->ex_lock);
1139  
1140  				/* sequence/exch should exist */
1141  				reject = FC_RJT_SEQ_ID;
1142  				goto rel;
1143  			}
1144  		}
1145  	}
1146  	WARN_ON(ep != fc_seq_exch(sp));
1147  
1148  	if (f_ctl & FC_FC_SEQ_INIT)
1149  		ep->esb_stat |= ESB_ST_SEQ_INIT;
1150  	spin_unlock_bh(&ep->ex_lock);
1151  
1152  	fr_seq(fp) = sp;
1153  out:
1154  	return reject;
1155  rel:
1156  	fc_exch_done(&ep->seq);
1157  	fc_exch_release(ep);	/* hold from fc_exch_find/fc_exch_resp */
1158  	return reject;
1159  }
1160  
1161  /**
1162   * fc_seq_lookup_orig() - Find a sequence where this end
1163   *			  originated the sequence
1164   * @mp:	   The Exchange Manager to lookup the exchange from
1165   * @fp:	   The frame associated with the sequence we're looking for
1166   *
1167   * Does not hold the sequence for the caller.
1168   */
1169  static struct fc_seq *fc_seq_lookup_orig(struct fc_exch_mgr *mp,
1170  					 struct fc_frame *fp)
1171  {
1172  	struct fc_frame_header *fh = fc_frame_header_get(fp);
1173  	struct fc_exch *ep;
1174  	struct fc_seq *sp = NULL;
1175  	u32 f_ctl;
1176  	u16 xid;
1177  
1178  	f_ctl = ntoh24(fh->fh_f_ctl);
1179  	WARN_ON((f_ctl & FC_FC_SEQ_CTX) != FC_FC_SEQ_CTX);
1180  	xid = ntohs((f_ctl & FC_FC_EX_CTX) ? fh->fh_ox_id : fh->fh_rx_id);
1181  	ep = fc_exch_find(mp, xid);
1182  	if (!ep)
1183  		return NULL;
1184  	if (ep->seq.id == fh->fh_seq_id) {
1185  		/*
1186  		 * Save the RX_ID if we didn't previously know it.
1187  		 */
1188  		sp = &ep->seq;
1189  		if ((f_ctl & FC_FC_EX_CTX) != 0 &&
1190  		    ep->rxid == FC_XID_UNKNOWN) {
1191  			ep->rxid = ntohs(fh->fh_rx_id);
1192  		}
1193  	}
1194  	fc_exch_release(ep);
1195  	return sp;
1196  }
1197  
1198  /**
1199   * fc_exch_set_addr() - Set the source and destination IDs for an exchange
1200   * @ep:	     The exchange to set the addresses for
1201   * @orig_id: The originator's ID
1202   * @resp_id: The responder's ID
1203   *
1204   * Note this must be done before the first sequence of the exchange is sent.
1205   */
1206  static void fc_exch_set_addr(struct fc_exch *ep,
1207  			     u32 orig_id, u32 resp_id)
1208  {
1209  	ep->oid = orig_id;
1210  	if (ep->esb_stat & ESB_ST_RESP) {
1211  		ep->sid = resp_id;
1212  		ep->did = orig_id;
1213  	} else {
1214  		ep->sid = orig_id;
1215  		ep->did = resp_id;
1216  	}
1217  }
1218  
1219  /**
1220   * fc_seq_els_rsp_send() - Send an ELS response using information from
1221   *			   the existing sequence/exchange.
1222   * @fp:	      The received frame
1223   * @els_cmd:  The ELS command to be sent
1224   * @els_data: The ELS data to be sent
1225   *
1226   * The received frame is not freed.
1227   */
1228  void fc_seq_els_rsp_send(struct fc_frame *fp, enum fc_els_cmd els_cmd,
1229  			 struct fc_seq_els_data *els_data)
1230  {
1231  	switch (els_cmd) {
1232  	case ELS_LS_RJT:
1233  		fc_seq_ls_rjt(fp, els_data->reason, els_data->explan);
1234  		break;
1235  	case ELS_LS_ACC:
1236  		fc_seq_ls_acc(fp);
1237  		break;
1238  	case ELS_RRQ:
1239  		fc_exch_els_rrq(fp);
1240  		break;
1241  	case ELS_REC:
1242  		fc_exch_els_rec(fp);
1243  		break;
1244  	default:
1245  		FC_LPORT_DBG(fr_dev(fp), "Invalid ELS CMD:%x\n", els_cmd);
1246  	}
1247  }
1248  EXPORT_SYMBOL_GPL(fc_seq_els_rsp_send);
1249  
1250  /**
1251   * fc_seq_send_last() - Send a sequence that is the last in the exchange
1252   * @sp:	     The sequence that is to be sent
1253   * @fp:	     The frame that will be sent on the sequence
1254   * @rctl:    The R_CTL information to be sent
1255   * @fh_type: The frame header type
1256   */
1257  static void fc_seq_send_last(struct fc_seq *sp, struct fc_frame *fp,
1258  			     enum fc_rctl rctl, enum fc_fh_type fh_type)
1259  {
1260  	u32 f_ctl;
1261  	struct fc_exch *ep = fc_seq_exch(sp);
1262  
1263  	f_ctl = FC_FC_LAST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT;
1264  	f_ctl |= ep->f_ctl;
1265  	fc_fill_fc_hdr(fp, rctl, ep->did, ep->sid, fh_type, f_ctl, 0);
1266  	fc_seq_send_locked(ep->lp, sp, fp);
1267  }
1268  
1269  /**
1270   * fc_seq_send_ack() - Send an acknowledgement that we've received a frame
1271   * @sp:	   The sequence to send the ACK on
1272   * @rx_fp: The received frame that is being acknoledged
1273   *
1274   * Send ACK_1 (or equiv.) indicating we received something.
1275   */
1276  static void fc_seq_send_ack(struct fc_seq *sp, const struct fc_frame *rx_fp)
1277  {
1278  	struct fc_frame *fp;
1279  	struct fc_frame_header *rx_fh;
1280  	struct fc_frame_header *fh;
1281  	struct fc_exch *ep = fc_seq_exch(sp);
1282  	struct fc_lport *lport = ep->lp;
1283  	unsigned int f_ctl;
1284  
1285  	/*
1286  	 * Don't send ACKs for class 3.
1287  	 */
1288  	if (fc_sof_needs_ack(fr_sof(rx_fp))) {
1289  		fp = fc_frame_alloc(lport, 0);
1290  		if (!fp) {
1291  			FC_EXCH_DBG(ep, "Drop ACK request, out of memory\n");
1292  			return;
1293  		}
1294  
1295  		fh = fc_frame_header_get(fp);
1296  		fh->fh_r_ctl = FC_RCTL_ACK_1;
1297  		fh->fh_type = FC_TYPE_BLS;
1298  
1299  		/*
1300  		 * Form f_ctl by inverting EX_CTX and SEQ_CTX (bits 23, 22).
1301  		 * Echo FIRST_SEQ, LAST_SEQ, END_SEQ, END_CONN, SEQ_INIT.
1302  		 * Bits 9-8 are meaningful (retransmitted or unidirectional).
1303  		 * Last ACK uses bits 7-6 (continue sequence),
1304  		 * bits 5-4 are meaningful (what kind of ACK to use).
1305  		 */
1306  		rx_fh = fc_frame_header_get(rx_fp);
1307  		f_ctl = ntoh24(rx_fh->fh_f_ctl);
1308  		f_ctl &= FC_FC_EX_CTX | FC_FC_SEQ_CTX |
1309  			FC_FC_FIRST_SEQ | FC_FC_LAST_SEQ |
1310  			FC_FC_END_SEQ | FC_FC_END_CONN | FC_FC_SEQ_INIT |
1311  			FC_FC_RETX_SEQ | FC_FC_UNI_TX;
1312  		f_ctl ^= FC_FC_EX_CTX | FC_FC_SEQ_CTX;
1313  		hton24(fh->fh_f_ctl, f_ctl);
1314  
1315  		fc_exch_setup_hdr(ep, fp, f_ctl);
1316  		fh->fh_seq_id = rx_fh->fh_seq_id;
1317  		fh->fh_seq_cnt = rx_fh->fh_seq_cnt;
1318  		fh->fh_parm_offset = htonl(1);	/* ack single frame */
1319  
1320  		fr_sof(fp) = fr_sof(rx_fp);
1321  		if (f_ctl & FC_FC_END_SEQ)
1322  			fr_eof(fp) = FC_EOF_T;
1323  		else
1324  			fr_eof(fp) = FC_EOF_N;
1325  
1326  		lport->tt.frame_send(lport, fp);
1327  	}
1328  }
1329  
1330  /**
1331   * fc_exch_send_ba_rjt() - Send BLS Reject
1332   * @rx_fp:  The frame being rejected
1333   * @reason: The reason the frame is being rejected
1334   * @explan: The explanation for the rejection
1335   *
1336   * This is for rejecting BA_ABTS only.
1337   */
1338  static void fc_exch_send_ba_rjt(struct fc_frame *rx_fp,
1339  				enum fc_ba_rjt_reason reason,
1340  				enum fc_ba_rjt_explan explan)
1341  {
1342  	struct fc_frame *fp;
1343  	struct fc_frame_header *rx_fh;
1344  	struct fc_frame_header *fh;
1345  	struct fc_ba_rjt *rp;
1346  	struct fc_seq *sp;
1347  	struct fc_lport *lport;
1348  	unsigned int f_ctl;
1349  
1350  	lport = fr_dev(rx_fp);
1351  	sp = fr_seq(rx_fp);
1352  	fp = fc_frame_alloc(lport, sizeof(*rp));
1353  	if (!fp) {
1354  		FC_EXCH_DBG(fc_seq_exch(sp),
1355  			     "Drop BA_RJT request, out of memory\n");
1356  		return;
1357  	}
1358  	fh = fc_frame_header_get(fp);
1359  	rx_fh = fc_frame_header_get(rx_fp);
1360  
1361  	memset(fh, 0, sizeof(*fh) + sizeof(*rp));
1362  
1363  	rp = fc_frame_payload_get(fp, sizeof(*rp));
1364  	rp->br_reason = reason;
1365  	rp->br_explan = explan;
1366  
1367  	/*
1368  	 * seq_id, cs_ctl, df_ctl and param/offset are zero.
1369  	 */
1370  	memcpy(fh->fh_s_id, rx_fh->fh_d_id, 3);
1371  	memcpy(fh->fh_d_id, rx_fh->fh_s_id, 3);
1372  	fh->fh_ox_id = rx_fh->fh_ox_id;
1373  	fh->fh_rx_id = rx_fh->fh_rx_id;
1374  	fh->fh_seq_cnt = rx_fh->fh_seq_cnt;
1375  	fh->fh_r_ctl = FC_RCTL_BA_RJT;
1376  	fh->fh_type = FC_TYPE_BLS;
1377  
1378  	/*
1379  	 * Form f_ctl by inverting EX_CTX and SEQ_CTX (bits 23, 22).
1380  	 * Echo FIRST_SEQ, LAST_SEQ, END_SEQ, END_CONN, SEQ_INIT.
1381  	 * Bits 9-8 are meaningful (retransmitted or unidirectional).
1382  	 * Last ACK uses bits 7-6 (continue sequence),
1383  	 * bits 5-4 are meaningful (what kind of ACK to use).
1384  	 * Always set LAST_SEQ, END_SEQ.
1385  	 */
1386  	f_ctl = ntoh24(rx_fh->fh_f_ctl);
1387  	f_ctl &= FC_FC_EX_CTX | FC_FC_SEQ_CTX |
1388  		FC_FC_END_CONN | FC_FC_SEQ_INIT |
1389  		FC_FC_RETX_SEQ | FC_FC_UNI_TX;
1390  	f_ctl ^= FC_FC_EX_CTX | FC_FC_SEQ_CTX;
1391  	f_ctl |= FC_FC_LAST_SEQ | FC_FC_END_SEQ;
1392  	f_ctl &= ~FC_FC_FIRST_SEQ;
1393  	hton24(fh->fh_f_ctl, f_ctl);
1394  
1395  	fr_sof(fp) = fc_sof_class(fr_sof(rx_fp));
1396  	fr_eof(fp) = FC_EOF_T;
1397  	if (fc_sof_needs_ack(fr_sof(fp)))
1398  		fr_eof(fp) = FC_EOF_N;
1399  
1400  	lport->tt.frame_send(lport, fp);
1401  }
1402  
1403  /**
1404   * fc_exch_recv_abts() - Handle an incoming ABTS
1405   * @ep:	   The exchange the abort was on
1406   * @rx_fp: The ABTS frame
1407   *
1408   * This would be for target mode usually, but could be due to lost
1409   * FCP transfer ready, confirm or RRQ. We always handle this as an
1410   * exchange abort, ignoring the parameter.
1411   */
1412  static void fc_exch_recv_abts(struct fc_exch *ep, struct fc_frame *rx_fp)
1413  {
1414  	struct fc_frame *fp;
1415  	struct fc_ba_acc *ap;
1416  	struct fc_frame_header *fh;
1417  	struct fc_seq *sp;
1418  
1419  	if (!ep)
1420  		goto reject;
1421  
1422  	FC_EXCH_DBG(ep, "exch: ABTS received\n");
1423  	fp = fc_frame_alloc(ep->lp, sizeof(*ap));
1424  	if (!fp) {
1425  		FC_EXCH_DBG(ep, "Drop ABTS request, out of memory\n");
1426  		goto free;
1427  	}
1428  
1429  	spin_lock_bh(&ep->ex_lock);
1430  	if (ep->esb_stat & ESB_ST_COMPLETE) {
1431  		spin_unlock_bh(&ep->ex_lock);
1432  		FC_EXCH_DBG(ep, "exch: ABTS rejected, exchange complete\n");
1433  		fc_frame_free(fp);
1434  		goto reject;
1435  	}
1436  	if (!(ep->esb_stat & ESB_ST_REC_QUAL)) {
1437  		ep->esb_stat |= ESB_ST_REC_QUAL;
1438  		fc_exch_hold(ep);		/* hold for REC_QUAL */
1439  	}
1440  	fc_exch_timer_set_locked(ep, ep->r_a_tov);
1441  	fh = fc_frame_header_get(fp);
1442  	ap = fc_frame_payload_get(fp, sizeof(*ap));
1443  	memset(ap, 0, sizeof(*ap));
1444  	sp = &ep->seq;
1445  	ap->ba_high_seq_cnt = htons(0xffff);
1446  	if (sp->ssb_stat & SSB_ST_RESP) {
1447  		ap->ba_seq_id = sp->id;
1448  		ap->ba_seq_id_val = FC_BA_SEQ_ID_VAL;
1449  		ap->ba_high_seq_cnt = fh->fh_seq_cnt;
1450  		ap->ba_low_seq_cnt = htons(sp->cnt);
1451  	}
1452  	sp = fc_seq_start_next_locked(sp);
1453  	fc_seq_send_last(sp, fp, FC_RCTL_BA_ACC, FC_TYPE_BLS);
1454  	ep->esb_stat |= ESB_ST_ABNORMAL;
1455  	spin_unlock_bh(&ep->ex_lock);
1456  
1457  free:
1458  	fc_frame_free(rx_fp);
1459  	return;
1460  
1461  reject:
1462  	fc_exch_send_ba_rjt(rx_fp, FC_BA_RJT_UNABLE, FC_BA_RJT_INV_XID);
1463  	goto free;
1464  }
1465  
1466  /**
1467   * fc_seq_assign() - Assign exchange and sequence for incoming request
1468   * @lport: The local port that received the request
1469   * @fp:    The request frame
1470   *
1471   * On success, the sequence pointer will be returned and also in fr_seq(@fp).
1472   * A reference will be held on the exchange/sequence for the caller, which
1473   * must call fc_seq_release().
1474   */
1475  struct fc_seq *fc_seq_assign(struct fc_lport *lport, struct fc_frame *fp)
1476  {
1477  	struct fc_exch_mgr_anchor *ema;
1478  
1479  	WARN_ON(lport != fr_dev(fp));
1480  	WARN_ON(fr_seq(fp));
1481  	fr_seq(fp) = NULL;
1482  
1483  	list_for_each_entry(ema, &lport->ema_list, ema_list)
1484  		if ((!ema->match || ema->match(fp)) &&
1485  		    fc_seq_lookup_recip(lport, ema->mp, fp) == FC_RJT_NONE)
1486  			break;
1487  	return fr_seq(fp);
1488  }
1489  EXPORT_SYMBOL(fc_seq_assign);
1490  
1491  /**
1492   * fc_seq_release() - Release the hold
1493   * @sp:    The sequence.
1494   */
1495  void fc_seq_release(struct fc_seq *sp)
1496  {
1497  	fc_exch_release(fc_seq_exch(sp));
1498  }
1499  EXPORT_SYMBOL(fc_seq_release);
1500  
1501  /**
1502   * fc_exch_recv_req() - Handler for an incoming request
1503   * @lport: The local port that received the request
1504   * @mp:	   The EM that the exchange is on
1505   * @fp:	   The request frame
1506   *
1507   * This is used when the other end is originating the exchange
1508   * and the sequence.
1509   */
1510  static void fc_exch_recv_req(struct fc_lport *lport, struct fc_exch_mgr *mp,
1511  			     struct fc_frame *fp)
1512  {
1513  	struct fc_frame_header *fh = fc_frame_header_get(fp);
1514  	struct fc_seq *sp = NULL;
1515  	struct fc_exch *ep = NULL;
1516  	enum fc_pf_rjt_reason reject;
1517  
1518  	/* We can have the wrong fc_lport at this point with NPIV, which is a
1519  	 * problem now that we know a new exchange needs to be allocated
1520  	 */
1521  	lport = fc_vport_id_lookup(lport, ntoh24(fh->fh_d_id));
1522  	if (!lport) {
1523  		fc_frame_free(fp);
1524  		return;
1525  	}
1526  	fr_dev(fp) = lport;
1527  
1528  	BUG_ON(fr_seq(fp));		/* XXX remove later */
1529  
1530  	/*
1531  	 * If the RX_ID is 0xffff, don't allocate an exchange.
1532  	 * The upper-level protocol may request one later, if needed.
1533  	 */
1534  	if (fh->fh_rx_id == htons(FC_XID_UNKNOWN))
1535  		return fc_lport_recv(lport, fp);
1536  
1537  	reject = fc_seq_lookup_recip(lport, mp, fp);
1538  	if (reject == FC_RJT_NONE) {
1539  		sp = fr_seq(fp);	/* sequence will be held */
1540  		ep = fc_seq_exch(sp);
1541  		fc_seq_send_ack(sp, fp);
1542  		ep->encaps = fr_encaps(fp);
1543  
1544  		/*
1545  		 * Call the receive function.
1546  		 *
1547  		 * The receive function may allocate a new sequence
1548  		 * over the old one, so we shouldn't change the
1549  		 * sequence after this.
1550  		 *
1551  		 * The frame will be freed by the receive function.
1552  		 * If new exch resp handler is valid then call that
1553  		 * first.
1554  		 */
1555  		if (!fc_invoke_resp(ep, sp, fp))
1556  			fc_lport_recv(lport, fp);
1557  		fc_exch_release(ep);	/* release from lookup */
1558  	} else {
1559  		FC_LPORT_DBG(lport, "exch/seq lookup failed: reject %x\n",
1560  			     reject);
1561  		fc_frame_free(fp);
1562  	}
1563  }
1564  
1565  /**
1566   * fc_exch_recv_seq_resp() - Handler for an incoming response where the other
1567   *			     end is the originator of the sequence that is a
1568   *			     response to our initial exchange
1569   * @mp: The EM that the exchange is on
1570   * @fp: The response frame
1571   */
1572  static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
1573  {
1574  	struct fc_frame_header *fh = fc_frame_header_get(fp);
1575  	struct fc_seq *sp;
1576  	struct fc_exch *ep;
1577  	enum fc_sof sof;
1578  	u32 f_ctl;
1579  	int rc;
1580  
1581  	ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
1582  	if (!ep) {
1583  		atomic_inc(&mp->stats.xid_not_found);
1584  		goto out;
1585  	}
1586  	if (ep->esb_stat & ESB_ST_COMPLETE) {
1587  		atomic_inc(&mp->stats.xid_not_found);
1588  		goto rel;
1589  	}
1590  	if (ep->rxid == FC_XID_UNKNOWN)
1591  		ep->rxid = ntohs(fh->fh_rx_id);
1592  	if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
1593  		atomic_inc(&mp->stats.xid_not_found);
1594  		goto rel;
1595  	}
1596  	if (ep->did != ntoh24(fh->fh_s_id) &&
1597  	    ep->did != FC_FID_FLOGI) {
1598  		atomic_inc(&mp->stats.xid_not_found);
1599  		goto rel;
1600  	}
1601  	sof = fr_sof(fp);
1602  	sp = &ep->seq;
1603  	if (fc_sof_is_init(sof)) {
1604  		sp->ssb_stat |= SSB_ST_RESP;
1605  		sp->id = fh->fh_seq_id;
1606  	}
1607  
1608  	f_ctl = ntoh24(fh->fh_f_ctl);
1609  	fr_seq(fp) = sp;
1610  
1611  	spin_lock_bh(&ep->ex_lock);
1612  	if (f_ctl & FC_FC_SEQ_INIT)
1613  		ep->esb_stat |= ESB_ST_SEQ_INIT;
1614  	spin_unlock_bh(&ep->ex_lock);
1615  
1616  	if (fc_sof_needs_ack(sof))
1617  		fc_seq_send_ack(sp, fp);
1618  
1619  	if (fh->fh_type != FC_TYPE_FCP && fr_eof(fp) == FC_EOF_T &&
1620  	    (f_ctl & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) ==
1621  	    (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) {
1622  		spin_lock_bh(&ep->ex_lock);
1623  		rc = fc_exch_done_locked(ep);
1624  		WARN_ON(fc_seq_exch(sp) != ep);
1625  		spin_unlock_bh(&ep->ex_lock);
1626  		if (!rc) {
1627  			fc_exch_delete(ep);
1628  		} else {
1629  			FC_EXCH_DBG(ep, "ep is completed already,"
1630  					"hence skip calling the resp\n");
1631  			goto skip_resp;
1632  		}
1633  	}
1634  
1635  	/*
1636  	 * Call the receive function.
1637  	 * The sequence is held (has a refcnt) for us,
1638  	 * but not for the receive function.
1639  	 *
1640  	 * The receive function may allocate a new sequence
1641  	 * over the old one, so we shouldn't change the
1642  	 * sequence after this.
1643  	 *
1644  	 * The frame will be freed by the receive function.
1645  	 * If new exch resp handler is valid then call that
1646  	 * first.
1647  	 */
1648  	if (!fc_invoke_resp(ep, sp, fp))
1649  		fc_frame_free(fp);
1650  
1651  skip_resp:
1652  	fc_exch_release(ep);
1653  	return;
1654  rel:
1655  	fc_exch_release(ep);
1656  out:
1657  	fc_frame_free(fp);
1658  }
1659  
1660  /**
1661   * fc_exch_recv_resp() - Handler for a sequence where other end is
1662   *			 responding to our sequence
1663   * @mp: The EM that the exchange is on
1664   * @fp: The response frame
1665   */
1666  static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
1667  {
1668  	struct fc_seq *sp;
1669  
1670  	sp = fc_seq_lookup_orig(mp, fp);	/* doesn't hold sequence */
1671  
1672  	if (!sp)
1673  		atomic_inc(&mp->stats.xid_not_found);
1674  	else
1675  		atomic_inc(&mp->stats.non_bls_resp);
1676  
1677  	fc_frame_free(fp);
1678  }
1679  
1680  /**
1681   * fc_exch_abts_resp() - Handler for a response to an ABT
1682   * @ep: The exchange that the frame is on
1683   * @fp: The response frame
1684   *
1685   * This response would be to an ABTS cancelling an exchange or sequence.
1686   * The response can be either BA_ACC or BA_RJT
1687   */
1688  static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp)
1689  {
1690  	struct fc_frame_header *fh;
1691  	struct fc_ba_acc *ap;
1692  	struct fc_seq *sp;
1693  	u16 low;
1694  	u16 high;
1695  	int rc = 1, has_rec = 0;
1696  
1697  	fh = fc_frame_header_get(fp);
1698  	FC_EXCH_DBG(ep, "exch: BLS rctl %x - %s\n", fh->fh_r_ctl,
1699  		    fc_exch_rctl_name(fh->fh_r_ctl));
1700  
1701  	if (cancel_delayed_work_sync(&ep->timeout_work)) {
1702  		FC_EXCH_DBG(ep, "Exchange timer canceled due to ABTS response\n");
1703  		fc_exch_release(ep);	/* release from pending timer hold */
1704  	}
1705  
1706  	spin_lock_bh(&ep->ex_lock);
1707  	switch (fh->fh_r_ctl) {
1708  	case FC_RCTL_BA_ACC:
1709  		ap = fc_frame_payload_get(fp, sizeof(*ap));
1710  		if (!ap)
1711  			break;
1712  
1713  		/*
1714  		 * Decide whether to establish a Recovery Qualifier.
1715  		 * We do this if there is a non-empty SEQ_CNT range and
1716  		 * SEQ_ID is the same as the one we aborted.
1717  		 */
1718  		low = ntohs(ap->ba_low_seq_cnt);
1719  		high = ntohs(ap->ba_high_seq_cnt);
1720  		if ((ep->esb_stat & ESB_ST_REC_QUAL) == 0 &&
1721  		    (ap->ba_seq_id_val != FC_BA_SEQ_ID_VAL ||
1722  		     ap->ba_seq_id == ep->seq_id) && low != high) {
1723  			ep->esb_stat |= ESB_ST_REC_QUAL;
1724  			fc_exch_hold(ep);  /* hold for recovery qualifier */
1725  			has_rec = 1;
1726  		}
1727  		break;
1728  	case FC_RCTL_BA_RJT:
1729  		break;
1730  	default:
1731  		break;
1732  	}
1733  
1734  	/* do we need to do some other checks here. Can we reuse more of
1735  	 * fc_exch_recv_seq_resp
1736  	 */
1737  	sp = &ep->seq;
1738  	/*
1739  	 * do we want to check END_SEQ as well as LAST_SEQ here?
1740  	 */
1741  	if (ep->fh_type != FC_TYPE_FCP &&
1742  	    ntoh24(fh->fh_f_ctl) & FC_FC_LAST_SEQ)
1743  		rc = fc_exch_done_locked(ep);
1744  	spin_unlock_bh(&ep->ex_lock);
1745  
1746  	fc_exch_hold(ep);
1747  	if (!rc)
1748  		fc_exch_delete(ep);
1749  	if (!fc_invoke_resp(ep, sp, fp))
1750  		fc_frame_free(fp);
1751  	if (has_rec)
1752  		fc_exch_timer_set(ep, ep->r_a_tov);
1753  	fc_exch_release(ep);
1754  }
1755  
1756  /**
1757   * fc_exch_recv_bls() - Handler for a BLS sequence
1758   * @mp: The EM that the exchange is on
1759   * @fp: The request frame
1760   *
1761   * The BLS frame is always a sequence initiated by the remote side.
1762   * We may be either the originator or recipient of the exchange.
1763   */
1764  static void fc_exch_recv_bls(struct fc_exch_mgr *mp, struct fc_frame *fp)
1765  {
1766  	struct fc_frame_header *fh;
1767  	struct fc_exch *ep;
1768  	u32 f_ctl;
1769  
1770  	fh = fc_frame_header_get(fp);
1771  	f_ctl = ntoh24(fh->fh_f_ctl);
1772  	fr_seq(fp) = NULL;
1773  
1774  	ep = fc_exch_find(mp, (f_ctl & FC_FC_EX_CTX) ?
1775  			  ntohs(fh->fh_ox_id) : ntohs(fh->fh_rx_id));
1776  	if (ep && (f_ctl & FC_FC_SEQ_INIT)) {
1777  		spin_lock_bh(&ep->ex_lock);
1778  		ep->esb_stat |= ESB_ST_SEQ_INIT;
1779  		spin_unlock_bh(&ep->ex_lock);
1780  	}
1781  	if (f_ctl & FC_FC_SEQ_CTX) {
1782  		/*
1783  		 * A response to a sequence we initiated.
1784  		 * This should only be ACKs for class 2 or F.
1785  		 */
1786  		switch (fh->fh_r_ctl) {
1787  		case FC_RCTL_ACK_1:
1788  		case FC_RCTL_ACK_0:
1789  			break;
1790  		default:
1791  			if (ep)
1792  				FC_EXCH_DBG(ep, "BLS rctl %x - %s received\n",
1793  					    fh->fh_r_ctl,
1794  					    fc_exch_rctl_name(fh->fh_r_ctl));
1795  			break;
1796  		}
1797  		fc_frame_free(fp);
1798  	} else {
1799  		switch (fh->fh_r_ctl) {
1800  		case FC_RCTL_BA_RJT:
1801  		case FC_RCTL_BA_ACC:
1802  			if (ep)
1803  				fc_exch_abts_resp(ep, fp);
1804  			else
1805  				fc_frame_free(fp);
1806  			break;
1807  		case FC_RCTL_BA_ABTS:
1808  			if (ep)
1809  				fc_exch_recv_abts(ep, fp);
1810  			else
1811  				fc_frame_free(fp);
1812  			break;
1813  		default:			/* ignore junk */
1814  			fc_frame_free(fp);
1815  			break;
1816  		}
1817  	}
1818  	if (ep)
1819  		fc_exch_release(ep);	/* release hold taken by fc_exch_find */
1820  }
1821  
1822  /**
1823   * fc_seq_ls_acc() - Accept sequence with LS_ACC
1824   * @rx_fp: The received frame, not freed here.
1825   *
1826   * If this fails due to allocation or transmit congestion, assume the
1827   * originator will repeat the sequence.
1828   */
1829  static void fc_seq_ls_acc(struct fc_frame *rx_fp)
1830  {
1831  	struct fc_lport *lport;
1832  	struct fc_els_ls_acc *acc;
1833  	struct fc_frame *fp;
1834  	struct fc_seq *sp;
1835  
1836  	lport = fr_dev(rx_fp);
1837  	sp = fr_seq(rx_fp);
1838  	fp = fc_frame_alloc(lport, sizeof(*acc));
1839  	if (!fp) {
1840  		FC_EXCH_DBG(fc_seq_exch(sp),
1841  			    "exch: drop LS_ACC, out of memory\n");
1842  		return;
1843  	}
1844  	acc = fc_frame_payload_get(fp, sizeof(*acc));
1845  	memset(acc, 0, sizeof(*acc));
1846  	acc->la_cmd = ELS_LS_ACC;
1847  	fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
1848  	lport->tt.frame_send(lport, fp);
1849  }
1850  
1851  /**
1852   * fc_seq_ls_rjt() - Reject a sequence with ELS LS_RJT
1853   * @rx_fp: The received frame, not freed here.
1854   * @reason: The reason the sequence is being rejected
1855   * @explan: The explanation for the rejection
1856   *
1857   * If this fails due to allocation or transmit congestion, assume the
1858   * originator will repeat the sequence.
1859   */
1860  static void fc_seq_ls_rjt(struct fc_frame *rx_fp, enum fc_els_rjt_reason reason,
1861  			  enum fc_els_rjt_explan explan)
1862  {
1863  	struct fc_lport *lport;
1864  	struct fc_els_ls_rjt *rjt;
1865  	struct fc_frame *fp;
1866  	struct fc_seq *sp;
1867  
1868  	lport = fr_dev(rx_fp);
1869  	sp = fr_seq(rx_fp);
1870  	fp = fc_frame_alloc(lport, sizeof(*rjt));
1871  	if (!fp) {
1872  		FC_EXCH_DBG(fc_seq_exch(sp),
1873  			    "exch: drop LS_ACC, out of memory\n");
1874  		return;
1875  	}
1876  	rjt = fc_frame_payload_get(fp, sizeof(*rjt));
1877  	memset(rjt, 0, sizeof(*rjt));
1878  	rjt->er_cmd = ELS_LS_RJT;
1879  	rjt->er_reason = reason;
1880  	rjt->er_explan = explan;
1881  	fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
1882  	lport->tt.frame_send(lport, fp);
1883  }
1884  
1885  /**
1886   * fc_exch_reset() - Reset an exchange
1887   * @ep: The exchange to be reset
1888   *
1889   * Note: May sleep if invoked from outside a response handler.
1890   */
1891  static void fc_exch_reset(struct fc_exch *ep)
1892  {
1893  	struct fc_seq *sp;
1894  	int rc = 1;
1895  
1896  	spin_lock_bh(&ep->ex_lock);
1897  	ep->state |= FC_EX_RST_CLEANUP;
1898  	fc_exch_timer_cancel(ep);
1899  	if (ep->esb_stat & ESB_ST_REC_QUAL)
1900  		atomic_dec(&ep->ex_refcnt);	/* drop hold for rec_qual */
1901  	ep->esb_stat &= ~ESB_ST_REC_QUAL;
1902  	sp = &ep->seq;
1903  	rc = fc_exch_done_locked(ep);
1904  	spin_unlock_bh(&ep->ex_lock);
1905  
1906  	fc_exch_hold(ep);
1907  
1908  	if (!rc) {
1909  		fc_exch_delete(ep);
1910  	} else {
1911  		FC_EXCH_DBG(ep, "ep is completed already,"
1912  				"hence skip calling the resp\n");
1913  		goto skip_resp;
1914  	}
1915  
1916  	fc_invoke_resp(ep, sp, ERR_PTR(-FC_EX_CLOSED));
1917  skip_resp:
1918  	fc_seq_set_resp(sp, NULL, ep->arg);
1919  	fc_exch_release(ep);
1920  }
1921  
1922  /**
1923   * fc_exch_pool_reset() - Reset a per cpu exchange pool
1924   * @lport: The local port that the exchange pool is on
1925   * @pool:  The exchange pool to be reset
1926   * @sid:   The source ID
1927   * @did:   The destination ID
1928   *
1929   * Resets a per cpu exches pool, releasing all of its sequences
1930   * and exchanges. If sid is non-zero then reset only exchanges
1931   * we sourced from the local port's FID. If did is non-zero then
1932   * only reset exchanges destined for the local port's FID.
1933   */
1934  static void fc_exch_pool_reset(struct fc_lport *lport,
1935  			       struct fc_exch_pool *pool,
1936  			       u32 sid, u32 did)
1937  {
1938  	struct fc_exch *ep;
1939  	struct fc_exch *next;
1940  
1941  	spin_lock_bh(&pool->lock);
1942  restart:
1943  	list_for_each_entry_safe(ep, next, &pool->ex_list, ex_list) {
1944  		if ((lport == ep->lp) &&
1945  		    (sid == 0 || sid == ep->sid) &&
1946  		    (did == 0 || did == ep->did)) {
1947  			fc_exch_hold(ep);
1948  			spin_unlock_bh(&pool->lock);
1949  
1950  			fc_exch_reset(ep);
1951  
1952  			fc_exch_release(ep);
1953  			spin_lock_bh(&pool->lock);
1954  
1955  			/*
1956  			 * must restart loop incase while lock
1957  			 * was down multiple eps were released.
1958  			 */
1959  			goto restart;
1960  		}
1961  	}
1962  	pool->next_index = 0;
1963  	pool->left = FC_XID_UNKNOWN;
1964  	pool->right = FC_XID_UNKNOWN;
1965  	spin_unlock_bh(&pool->lock);
1966  }
1967  
1968  /**
1969   * fc_exch_mgr_reset() - Reset all EMs of a local port
1970   * @lport: The local port whose EMs are to be reset
1971   * @sid:   The source ID
1972   * @did:   The destination ID
1973   *
1974   * Reset all EMs associated with a given local port. Release all
1975   * sequences and exchanges. If sid is non-zero then reset only the
1976   * exchanges sent from the local port's FID. If did is non-zero then
1977   * reset only exchanges destined for the local port's FID.
1978   */
1979  void fc_exch_mgr_reset(struct fc_lport *lport, u32 sid, u32 did)
1980  {
1981  	struct fc_exch_mgr_anchor *ema;
1982  	unsigned int cpu;
1983  
1984  	list_for_each_entry(ema, &lport->ema_list, ema_list) {
1985  		for_each_possible_cpu(cpu)
1986  			fc_exch_pool_reset(lport,
1987  					   per_cpu_ptr(ema->mp->pool, cpu),
1988  					   sid, did);
1989  	}
1990  }
1991  EXPORT_SYMBOL(fc_exch_mgr_reset);
1992  
1993  /**
1994   * fc_exch_lookup() - find an exchange
1995   * @lport: The local port
1996   * @xid: The exchange ID
1997   *
1998   * Returns exchange pointer with hold for caller, or NULL if not found.
1999   */
2000  static struct fc_exch *fc_exch_lookup(struct fc_lport *lport, u32 xid)
2001  {
2002  	struct fc_exch_mgr_anchor *ema;
2003  
2004  	list_for_each_entry(ema, &lport->ema_list, ema_list)
2005  		if (ema->mp->min_xid <= xid && xid <= ema->mp->max_xid)
2006  			return fc_exch_find(ema->mp, xid);
2007  	return NULL;
2008  }
2009  
2010  /**
2011   * fc_exch_els_rec() - Handler for ELS REC (Read Exchange Concise) requests
2012   * @rfp: The REC frame, not freed here.
2013   *
2014   * Note that the requesting port may be different than the S_ID in the request.
2015   */
2016  static void fc_exch_els_rec(struct fc_frame *rfp)
2017  {
2018  	struct fc_lport *lport;
2019  	struct fc_frame *fp;
2020  	struct fc_exch *ep;
2021  	struct fc_els_rec *rp;
2022  	struct fc_els_rec_acc *acc;
2023  	enum fc_els_rjt_reason reason = ELS_RJT_LOGIC;
2024  	enum fc_els_rjt_explan explan;
2025  	u32 sid;
2026  	u16 xid, rxid, oxid;
2027  
2028  	lport = fr_dev(rfp);
2029  	rp = fc_frame_payload_get(rfp, sizeof(*rp));
2030  	explan = ELS_EXPL_INV_LEN;
2031  	if (!rp)
2032  		goto reject;
2033  	sid = ntoh24(rp->rec_s_id);
2034  	rxid = ntohs(rp->rec_rx_id);
2035  	oxid = ntohs(rp->rec_ox_id);
2036  
2037  	explan = ELS_EXPL_OXID_RXID;
2038  	if (sid == fc_host_port_id(lport->host))
2039  		xid = oxid;
2040  	else
2041  		xid = rxid;
2042  	if (xid == FC_XID_UNKNOWN) {
2043  		FC_LPORT_DBG(lport,
2044  			     "REC request from %x: invalid rxid %x oxid %x\n",
2045  			     sid, rxid, oxid);
2046  		goto reject;
2047  	}
2048  	ep = fc_exch_lookup(lport, xid);
2049  	if (!ep) {
2050  		FC_LPORT_DBG(lport,
2051  			     "REC request from %x: rxid %x oxid %x not found\n",
2052  			     sid, rxid, oxid);
2053  		goto reject;
2054  	}
2055  	FC_EXCH_DBG(ep, "REC request from %x: rxid %x oxid %x\n",
2056  		    sid, rxid, oxid);
2057  	if (ep->oid != sid || oxid != ep->oxid)
2058  		goto rel;
2059  	if (rxid != FC_XID_UNKNOWN && rxid != ep->rxid)
2060  		goto rel;
2061  	fp = fc_frame_alloc(lport, sizeof(*acc));
2062  	if (!fp) {
2063  		FC_EXCH_DBG(ep, "Drop REC request, out of memory\n");
2064  		goto out;
2065  	}
2066  
2067  	acc = fc_frame_payload_get(fp, sizeof(*acc));
2068  	memset(acc, 0, sizeof(*acc));
2069  	acc->reca_cmd = ELS_LS_ACC;
2070  	acc->reca_ox_id = rp->rec_ox_id;
2071  	memcpy(acc->reca_ofid, rp->rec_s_id, 3);
2072  	acc->reca_rx_id = htons(ep->rxid);
2073  	if (ep->sid == ep->oid)
2074  		hton24(acc->reca_rfid, ep->did);
2075  	else
2076  		hton24(acc->reca_rfid, ep->sid);
2077  	acc->reca_fc4value = htonl(ep->seq.rec_data);
2078  	acc->reca_e_stat = htonl(ep->esb_stat & (ESB_ST_RESP |
2079  						 ESB_ST_SEQ_INIT |
2080  						 ESB_ST_COMPLETE));
2081  	fc_fill_reply_hdr(fp, rfp, FC_RCTL_ELS_REP, 0);
2082  	lport->tt.frame_send(lport, fp);
2083  out:
2084  	fc_exch_release(ep);
2085  	return;
2086  
2087  rel:
2088  	fc_exch_release(ep);
2089  reject:
2090  	fc_seq_ls_rjt(rfp, reason, explan);
2091  }
2092  
2093  /**
2094   * fc_exch_rrq_resp() - Handler for RRQ responses
2095   * @sp:	 The sequence that the RRQ is on
2096   * @fp:	 The RRQ frame
2097   * @arg: The exchange that the RRQ is on
2098   *
2099   * TODO: fix error handler.
2100   */
2101  static void fc_exch_rrq_resp(struct fc_seq *sp, struct fc_frame *fp, void *arg)
2102  {
2103  	struct fc_exch *aborted_ep = arg;
2104  	unsigned int op;
2105  
2106  	if (IS_ERR(fp)) {
2107  		int err = PTR_ERR(fp);
2108  
2109  		if (err == -FC_EX_CLOSED || err == -FC_EX_TIMEOUT)
2110  			goto cleanup;
2111  		FC_EXCH_DBG(aborted_ep, "Cannot process RRQ, "
2112  			    "frame error %d\n", err);
2113  		return;
2114  	}
2115  
2116  	op = fc_frame_payload_op(fp);
2117  	fc_frame_free(fp);
2118  
2119  	switch (op) {
2120  	case ELS_LS_RJT:
2121  		FC_EXCH_DBG(aborted_ep, "LS_RJT for RRQ\n");
2122  		fallthrough;
2123  	case ELS_LS_ACC:
2124  		goto cleanup;
2125  	default:
2126  		FC_EXCH_DBG(aborted_ep, "unexpected response op %x for RRQ\n",
2127  			    op);
2128  		return;
2129  	}
2130  
2131  cleanup:
2132  	fc_exch_done(&aborted_ep->seq);
2133  	/* drop hold for rec qual */
2134  	fc_exch_release(aborted_ep);
2135  }
2136  
2137  
2138  /**
2139   * fc_exch_seq_send() - Send a frame using a new exchange and sequence
2140   * @lport:	The local port to send the frame on
2141   * @fp:		The frame to be sent
2142   * @resp:	The response handler for this request
2143   * @destructor: The destructor for the exchange
2144   * @arg:	The argument to be passed to the response handler
2145   * @timer_msec: The timeout period for the exchange
2146   *
2147   * The exchange response handler is set in this routine to resp()
2148   * function pointer. It can be called in two scenarios: if a timeout
2149   * occurs or if a response frame is received for the exchange. The
2150   * fc_frame pointer in response handler will also indicate timeout
2151   * as error using IS_ERR related macros.
2152   *
2153   * The exchange destructor handler is also set in this routine.
2154   * The destructor handler is invoked by EM layer when exchange
2155   * is about to free, this can be used by caller to free its
2156   * resources along with exchange free.
2157   *
2158   * The arg is passed back to resp and destructor handler.
2159   *
2160   * The timeout value (in msec) for an exchange is set if non zero
2161   * timer_msec argument is specified. The timer is canceled when
2162   * it fires or when the exchange is done. The exchange timeout handler
2163   * is registered by EM layer.
2164   *
2165   * The frame pointer with some of the header's fields must be
2166   * filled before calling this routine, those fields are:
2167   *
2168   * - routing control
2169   * - FC port did
2170   * - FC port sid
2171   * - FC header type
2172   * - frame control
2173   * - parameter or relative offset
2174   */
2175  struct fc_seq *fc_exch_seq_send(struct fc_lport *lport,
2176  				struct fc_frame *fp,
2177  				void (*resp)(struct fc_seq *,
2178  					     struct fc_frame *fp,
2179  					     void *arg),
2180  				void (*destructor)(struct fc_seq *, void *),
2181  				void *arg, u32 timer_msec)
2182  {
2183  	struct fc_exch *ep;
2184  	struct fc_seq *sp = NULL;
2185  	struct fc_frame_header *fh;
2186  	struct fc_fcp_pkt *fsp = NULL;
2187  	int rc = 1;
2188  
2189  	ep = fc_exch_alloc(lport, fp);
2190  	if (!ep) {
2191  		fc_frame_free(fp);
2192  		return NULL;
2193  	}
2194  	ep->esb_stat |= ESB_ST_SEQ_INIT;
2195  	fh = fc_frame_header_get(fp);
2196  	fc_exch_set_addr(ep, ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id));
2197  	ep->resp = resp;
2198  	ep->destructor = destructor;
2199  	ep->arg = arg;
2200  	ep->r_a_tov = lport->r_a_tov;
2201  	ep->lp = lport;
2202  	sp = &ep->seq;
2203  
2204  	ep->fh_type = fh->fh_type; /* save for possbile timeout handling */
2205  	ep->f_ctl = ntoh24(fh->fh_f_ctl);
2206  	fc_exch_setup_hdr(ep, fp, ep->f_ctl);
2207  	sp->cnt++;
2208  
2209  	if (ep->xid <= lport->lro_xid && fh->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD) {
2210  		fsp = fr_fsp(fp);
2211  		fc_fcp_ddp_setup(fr_fsp(fp), ep->xid);
2212  	}
2213  
2214  	if (unlikely(lport->tt.frame_send(lport, fp)))
2215  		goto err;
2216  
2217  	if (timer_msec)
2218  		fc_exch_timer_set_locked(ep, timer_msec);
2219  	ep->f_ctl &= ~FC_FC_FIRST_SEQ;	/* not first seq */
2220  
2221  	if (ep->f_ctl & FC_FC_SEQ_INIT)
2222  		ep->esb_stat &= ~ESB_ST_SEQ_INIT;
2223  	spin_unlock_bh(&ep->ex_lock);
2224  	return sp;
2225  err:
2226  	if (fsp)
2227  		fc_fcp_ddp_done(fsp);
2228  	rc = fc_exch_done_locked(ep);
2229  	spin_unlock_bh(&ep->ex_lock);
2230  	if (!rc)
2231  		fc_exch_delete(ep);
2232  	return NULL;
2233  }
2234  EXPORT_SYMBOL(fc_exch_seq_send);
2235  
2236  /**
2237   * fc_exch_rrq() - Send an ELS RRQ (Reinstate Recovery Qualifier) command
2238   * @ep: The exchange to send the RRQ on
2239   *
2240   * This tells the remote port to stop blocking the use of
2241   * the exchange and the seq_cnt range.
2242   */
2243  static void fc_exch_rrq(struct fc_exch *ep)
2244  {
2245  	struct fc_lport *lport;
2246  	struct fc_els_rrq *rrq;
2247  	struct fc_frame *fp;
2248  	u32 did;
2249  
2250  	lport = ep->lp;
2251  
2252  	fp = fc_frame_alloc(lport, sizeof(*rrq));
2253  	if (!fp)
2254  		goto retry;
2255  
2256  	rrq = fc_frame_payload_get(fp, sizeof(*rrq));
2257  	memset(rrq, 0, sizeof(*rrq));
2258  	rrq->rrq_cmd = ELS_RRQ;
2259  	hton24(rrq->rrq_s_id, ep->sid);
2260  	rrq->rrq_ox_id = htons(ep->oxid);
2261  	rrq->rrq_rx_id = htons(ep->rxid);
2262  
2263  	did = ep->did;
2264  	if (ep->esb_stat & ESB_ST_RESP)
2265  		did = ep->sid;
2266  
2267  	fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, did,
2268  		       lport->port_id, FC_TYPE_ELS,
2269  		       FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
2270  
2271  	if (fc_exch_seq_send(lport, fp, fc_exch_rrq_resp, NULL, ep,
2272  			     lport->e_d_tov))
2273  		return;
2274  
2275  retry:
2276  	FC_EXCH_DBG(ep, "exch: RRQ send failed\n");
2277  	spin_lock_bh(&ep->ex_lock);
2278  	if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE)) {
2279  		spin_unlock_bh(&ep->ex_lock);
2280  		/* drop hold for rec qual */
2281  		fc_exch_release(ep);
2282  		return;
2283  	}
2284  	ep->esb_stat |= ESB_ST_REC_QUAL;
2285  	fc_exch_timer_set_locked(ep, ep->r_a_tov);
2286  	spin_unlock_bh(&ep->ex_lock);
2287  }
2288  
2289  /**
2290   * fc_exch_els_rrq() - Handler for ELS RRQ (Reset Recovery Qualifier) requests
2291   * @fp: The RRQ frame, not freed here.
2292   */
2293  static void fc_exch_els_rrq(struct fc_frame *fp)
2294  {
2295  	struct fc_lport *lport;
2296  	struct fc_exch *ep = NULL;	/* request or subject exchange */
2297  	struct fc_els_rrq *rp;
2298  	u32 sid;
2299  	u16 xid;
2300  	enum fc_els_rjt_explan explan;
2301  
2302  	lport = fr_dev(fp);
2303  	rp = fc_frame_payload_get(fp, sizeof(*rp));
2304  	explan = ELS_EXPL_INV_LEN;
2305  	if (!rp)
2306  		goto reject;
2307  
2308  	/*
2309  	 * lookup subject exchange.
2310  	 */
2311  	sid = ntoh24(rp->rrq_s_id);		/* subject source */
2312  	xid = fc_host_port_id(lport->host) == sid ?
2313  			ntohs(rp->rrq_ox_id) : ntohs(rp->rrq_rx_id);
2314  	ep = fc_exch_lookup(lport, xid);
2315  	explan = ELS_EXPL_OXID_RXID;
2316  	if (!ep)
2317  		goto reject;
2318  	spin_lock_bh(&ep->ex_lock);
2319  	FC_EXCH_DBG(ep, "RRQ request from %x: xid %x rxid %x oxid %x\n",
2320  		    sid, xid, ntohs(rp->rrq_rx_id), ntohs(rp->rrq_ox_id));
2321  	if (ep->oxid != ntohs(rp->rrq_ox_id))
2322  		goto unlock_reject;
2323  	if (ep->rxid != ntohs(rp->rrq_rx_id) &&
2324  	    ep->rxid != FC_XID_UNKNOWN)
2325  		goto unlock_reject;
2326  	explan = ELS_EXPL_SID;
2327  	if (ep->sid != sid)
2328  		goto unlock_reject;
2329  
2330  	/*
2331  	 * Clear Recovery Qualifier state, and cancel timer if complete.
2332  	 */
2333  	if (ep->esb_stat & ESB_ST_REC_QUAL) {
2334  		ep->esb_stat &= ~ESB_ST_REC_QUAL;
2335  		atomic_dec(&ep->ex_refcnt);	/* drop hold for rec qual */
2336  	}
2337  	if (ep->esb_stat & ESB_ST_COMPLETE)
2338  		fc_exch_timer_cancel(ep);
2339  
2340  	spin_unlock_bh(&ep->ex_lock);
2341  
2342  	/*
2343  	 * Send LS_ACC.
2344  	 */
2345  	fc_seq_ls_acc(fp);
2346  	goto out;
2347  
2348  unlock_reject:
2349  	spin_unlock_bh(&ep->ex_lock);
2350  reject:
2351  	fc_seq_ls_rjt(fp, ELS_RJT_LOGIC, explan);
2352  out:
2353  	if (ep)
2354  		fc_exch_release(ep);	/* drop hold from fc_exch_find */
2355  }
2356  
2357  /**
2358   * fc_exch_update_stats() - update exches stats to lport
2359   * @lport: The local port to update exchange manager stats
2360   */
2361  void fc_exch_update_stats(struct fc_lport *lport)
2362  {
2363  	struct fc_host_statistics *st;
2364  	struct fc_exch_mgr_anchor *ema;
2365  	struct fc_exch_mgr *mp;
2366  
2367  	st = &lport->host_stats;
2368  
2369  	list_for_each_entry(ema, &lport->ema_list, ema_list) {
2370  		mp = ema->mp;
2371  		st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch);
2372  		st->fc_no_free_exch_xid +=
2373  				atomic_read(&mp->stats.no_free_exch_xid);
2374  		st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found);
2375  		st->fc_xid_busy += atomic_read(&mp->stats.xid_busy);
2376  		st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found);
2377  		st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp);
2378  	}
2379  }
2380  EXPORT_SYMBOL(fc_exch_update_stats);
2381  
2382  /**
2383   * fc_exch_mgr_add() - Add an exchange manager to a local port's list of EMs
2384   * @lport: The local port to add the exchange manager to
2385   * @mp:	   The exchange manager to be added to the local port
2386   * @match: The match routine that indicates when this EM should be used
2387   */
2388  struct fc_exch_mgr_anchor *fc_exch_mgr_add(struct fc_lport *lport,
2389  					   struct fc_exch_mgr *mp,
2390  					   bool (*match)(struct fc_frame *))
2391  {
2392  	struct fc_exch_mgr_anchor *ema;
2393  
2394  	ema = kmalloc(sizeof(*ema), GFP_ATOMIC);
2395  	if (!ema)
2396  		return ema;
2397  
2398  	ema->mp = mp;
2399  	ema->match = match;
2400  	/* add EM anchor to EM anchors list */
2401  	list_add_tail(&ema->ema_list, &lport->ema_list);
2402  	kref_get(&mp->kref);
2403  	return ema;
2404  }
2405  EXPORT_SYMBOL(fc_exch_mgr_add);
2406  
2407  /**
2408   * fc_exch_mgr_destroy() - Destroy an exchange manager
2409   * @kref: The reference to the EM to be destroyed
2410   */
2411  static void fc_exch_mgr_destroy(struct kref *kref)
2412  {
2413  	struct fc_exch_mgr *mp = container_of(kref, struct fc_exch_mgr, kref);
2414  
2415  	mempool_destroy(mp->ep_pool);
2416  	free_percpu(mp->pool);
2417  	kfree(mp);
2418  }
2419  
2420  /**
2421   * fc_exch_mgr_del() - Delete an EM from a local port's list
2422   * @ema: The exchange manager anchor identifying the EM to be deleted
2423   */
2424  void fc_exch_mgr_del(struct fc_exch_mgr_anchor *ema)
2425  {
2426  	/* remove EM anchor from EM anchors list */
2427  	list_del(&ema->ema_list);
2428  	kref_put(&ema->mp->kref, fc_exch_mgr_destroy);
2429  	kfree(ema);
2430  }
2431  EXPORT_SYMBOL(fc_exch_mgr_del);
2432  
2433  /**
2434   * fc_exch_mgr_list_clone() - Share all exchange manager objects
2435   * @src: Source lport to clone exchange managers from
2436   * @dst: New lport that takes references to all the exchange managers
2437   */
2438  int fc_exch_mgr_list_clone(struct fc_lport *src, struct fc_lport *dst)
2439  {
2440  	struct fc_exch_mgr_anchor *ema, *tmp;
2441  
2442  	list_for_each_entry(ema, &src->ema_list, ema_list) {
2443  		if (!fc_exch_mgr_add(dst, ema->mp, ema->match))
2444  			goto err;
2445  	}
2446  	return 0;
2447  err:
2448  	list_for_each_entry_safe(ema, tmp, &dst->ema_list, ema_list)
2449  		fc_exch_mgr_del(ema);
2450  	return -ENOMEM;
2451  }
2452  EXPORT_SYMBOL(fc_exch_mgr_list_clone);
2453  
2454  /**
2455   * fc_exch_mgr_alloc() - Allocate an exchange manager
2456   * @lport:   The local port that the new EM will be associated with
2457   * @class:   The default FC class for new exchanges
2458   * @min_xid: The minimum XID for exchanges from the new EM
2459   * @max_xid: The maximum XID for exchanges from the new EM
2460   * @match:   The match routine for the new EM
2461   */
2462  struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lport,
2463  				      enum fc_class class,
2464  				      u16 min_xid, u16 max_xid,
2465  				      bool (*match)(struct fc_frame *))
2466  {
2467  	struct fc_exch_mgr *mp;
2468  	u16 pool_exch_range;
2469  	size_t pool_size;
2470  	unsigned int cpu;
2471  	struct fc_exch_pool *pool;
2472  
2473  	if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN ||
2474  	    (min_xid & fc_cpu_mask) != 0) {
2475  		FC_LPORT_DBG(lport, "Invalid min_xid 0x:%x and max_xid 0x:%x\n",
2476  			     min_xid, max_xid);
2477  		return NULL;
2478  	}
2479  
2480  	/*
2481  	 * allocate memory for EM
2482  	 */
2483  	mp = kzalloc(sizeof(struct fc_exch_mgr), GFP_ATOMIC);
2484  	if (!mp)
2485  		return NULL;
2486  
2487  	mp->class = class;
2488  	mp->lport = lport;
2489  	/* adjust em exch xid range for offload */
2490  	mp->min_xid = min_xid;
2491  
2492         /* reduce range so per cpu pool fits into PCPU_MIN_UNIT_SIZE pool */
2493  	pool_exch_range = (PCPU_MIN_UNIT_SIZE - sizeof(*pool)) /
2494  		sizeof(struct fc_exch *);
2495  	if ((max_xid - min_xid + 1) / (fc_cpu_mask + 1) > pool_exch_range) {
2496  		mp->max_xid = pool_exch_range * (fc_cpu_mask + 1) +
2497  			min_xid - 1;
2498  	} else {
2499  		mp->max_xid = max_xid;
2500  		pool_exch_range = (mp->max_xid - mp->min_xid + 1) /
2501  			(fc_cpu_mask + 1);
2502  	}
2503  
2504  	mp->ep_pool = mempool_create_slab_pool(2, fc_em_cachep);
2505  	if (!mp->ep_pool)
2506  		goto free_mp;
2507  
2508  	/*
2509  	 * Setup per cpu exch pool with entire exchange id range equally
2510  	 * divided across all cpus. The exch pointers array memory is
2511  	 * allocated for exch range per pool.
2512  	 */
2513  	mp->pool_max_index = pool_exch_range - 1;
2514  
2515  	/*
2516  	 * Allocate and initialize per cpu exch pool
2517  	 */
2518  	pool_size = sizeof(*pool) + pool_exch_range * sizeof(struct fc_exch *);
2519  	mp->pool = __alloc_percpu(pool_size, __alignof__(struct fc_exch_pool));
2520  	if (!mp->pool)
2521  		goto free_mempool;
2522  	for_each_possible_cpu(cpu) {
2523  		pool = per_cpu_ptr(mp->pool, cpu);
2524  		pool->next_index = 0;
2525  		pool->left = FC_XID_UNKNOWN;
2526  		pool->right = FC_XID_UNKNOWN;
2527  		spin_lock_init(&pool->lock);
2528  		INIT_LIST_HEAD(&pool->ex_list);
2529  	}
2530  
2531  	kref_init(&mp->kref);
2532  	if (!fc_exch_mgr_add(lport, mp, match)) {
2533  		free_percpu(mp->pool);
2534  		goto free_mempool;
2535  	}
2536  
2537  	/*
2538  	 * Above kref_init() sets mp->kref to 1 and then
2539  	 * call to fc_exch_mgr_add incremented mp->kref again,
2540  	 * so adjust that extra increment.
2541  	 */
2542  	kref_put(&mp->kref, fc_exch_mgr_destroy);
2543  	return mp;
2544  
2545  free_mempool:
2546  	mempool_destroy(mp->ep_pool);
2547  free_mp:
2548  	kfree(mp);
2549  	return NULL;
2550  }
2551  EXPORT_SYMBOL(fc_exch_mgr_alloc);
2552  
2553  /**
2554   * fc_exch_mgr_free() - Free all exchange managers on a local port
2555   * @lport: The local port whose EMs are to be freed
2556   */
2557  void fc_exch_mgr_free(struct fc_lport *lport)
2558  {
2559  	struct fc_exch_mgr_anchor *ema, *next;
2560  
2561  	flush_workqueue(fc_exch_workqueue);
2562  	list_for_each_entry_safe(ema, next, &lport->ema_list, ema_list)
2563  		fc_exch_mgr_del(ema);
2564  }
2565  EXPORT_SYMBOL(fc_exch_mgr_free);
2566  
2567  /**
2568   * fc_find_ema() - Lookup and return appropriate Exchange Manager Anchor depending
2569   * upon 'xid'.
2570   * @f_ctl: f_ctl
2571   * @lport: The local port the frame was received on
2572   * @fh: The received frame header
2573   */
2574  static struct fc_exch_mgr_anchor *fc_find_ema(u32 f_ctl,
2575  					      struct fc_lport *lport,
2576  					      struct fc_frame_header *fh)
2577  {
2578  	struct fc_exch_mgr_anchor *ema;
2579  	u16 xid;
2580  
2581  	if (f_ctl & FC_FC_EX_CTX)
2582  		xid = ntohs(fh->fh_ox_id);
2583  	else {
2584  		xid = ntohs(fh->fh_rx_id);
2585  		if (xid == FC_XID_UNKNOWN)
2586  			return list_entry(lport->ema_list.prev,
2587  					  typeof(*ema), ema_list);
2588  	}
2589  
2590  	list_for_each_entry(ema, &lport->ema_list, ema_list) {
2591  		if ((xid >= ema->mp->min_xid) &&
2592  		    (xid <= ema->mp->max_xid))
2593  			return ema;
2594  	}
2595  	return NULL;
2596  }
2597  /**
2598   * fc_exch_recv() - Handler for received frames
2599   * @lport: The local port the frame was received on
2600   * @fp:	The received frame
2601   */
2602  void fc_exch_recv(struct fc_lport *lport, struct fc_frame *fp)
2603  {
2604  	struct fc_frame_header *fh = fc_frame_header_get(fp);
2605  	struct fc_exch_mgr_anchor *ema;
2606  	u32 f_ctl;
2607  
2608  	/* lport lock ? */
2609  	if (!lport || lport->state == LPORT_ST_DISABLED) {
2610  		FC_LIBFC_DBG("Receiving frames for an lport that "
2611  			     "has not been initialized correctly\n");
2612  		fc_frame_free(fp);
2613  		return;
2614  	}
2615  
2616  	f_ctl = ntoh24(fh->fh_f_ctl);
2617  	ema = fc_find_ema(f_ctl, lport, fh);
2618  	if (!ema) {
2619  		FC_LPORT_DBG(lport, "Unable to find Exchange Manager Anchor,"
2620  				    "fc_ctl <0x%x>, xid <0x%x>\n",
2621  				     f_ctl,
2622  				     (f_ctl & FC_FC_EX_CTX) ?
2623  				     ntohs(fh->fh_ox_id) :
2624  				     ntohs(fh->fh_rx_id));
2625  		fc_frame_free(fp);
2626  		return;
2627  	}
2628  
2629  	/*
2630  	 * If frame is marked invalid, just drop it.
2631  	 */
2632  	switch (fr_eof(fp)) {
2633  	case FC_EOF_T:
2634  		if (f_ctl & FC_FC_END_SEQ)
2635  			skb_trim(fp_skb(fp), fr_len(fp) - FC_FC_FILL(f_ctl));
2636  		fallthrough;
2637  	case FC_EOF_N:
2638  		if (fh->fh_type == FC_TYPE_BLS)
2639  			fc_exch_recv_bls(ema->mp, fp);
2640  		else if ((f_ctl & (FC_FC_EX_CTX | FC_FC_SEQ_CTX)) ==
2641  			 FC_FC_EX_CTX)
2642  			fc_exch_recv_seq_resp(ema->mp, fp);
2643  		else if (f_ctl & FC_FC_SEQ_CTX)
2644  			fc_exch_recv_resp(ema->mp, fp);
2645  		else	/* no EX_CTX and no SEQ_CTX */
2646  			fc_exch_recv_req(lport, ema->mp, fp);
2647  		break;
2648  	default:
2649  		FC_LPORT_DBG(lport, "dropping invalid frame (eof %x)",
2650  			     fr_eof(fp));
2651  		fc_frame_free(fp);
2652  	}
2653  }
2654  EXPORT_SYMBOL(fc_exch_recv);
2655  
2656  /**
2657   * fc_exch_init() - Initialize the exchange layer for a local port
2658   * @lport: The local port to initialize the exchange layer for
2659   */
2660  int fc_exch_init(struct fc_lport *lport)
2661  {
2662  	if (!lport->tt.exch_mgr_reset)
2663  		lport->tt.exch_mgr_reset = fc_exch_mgr_reset;
2664  
2665  	return 0;
2666  }
2667  EXPORT_SYMBOL(fc_exch_init);
2668  
2669  /**
2670   * fc_setup_exch_mgr() - Setup an exchange manager
2671   */
2672  int fc_setup_exch_mgr(void)
2673  {
2674  	fc_em_cachep = kmem_cache_create("libfc_em", sizeof(struct fc_exch),
2675  					 0, SLAB_HWCACHE_ALIGN, NULL);
2676  	if (!fc_em_cachep)
2677  		return -ENOMEM;
2678  
2679  	/*
2680  	 * Initialize fc_cpu_mask and fc_cpu_order. The
2681  	 * fc_cpu_mask is set for nr_cpu_ids rounded up
2682  	 * to order of 2's * power and order is stored
2683  	 * in fc_cpu_order as this is later required in
2684  	 * mapping between an exch id and exch array index
2685  	 * in per cpu exch pool.
2686  	 *
2687  	 * This round up is required to align fc_cpu_mask
2688  	 * to exchange id's lower bits such that all incoming
2689  	 * frames of an exchange gets delivered to the same
2690  	 * cpu on which exchange originated by simple bitwise
2691  	 * AND operation between fc_cpu_mask and exchange id.
2692  	 */
2693  	fc_cpu_order = ilog2(roundup_pow_of_two(nr_cpu_ids));
2694  	fc_cpu_mask = (1 << fc_cpu_order) - 1;
2695  
2696  	fc_exch_workqueue = create_singlethread_workqueue("fc_exch_workqueue");
2697  	if (!fc_exch_workqueue)
2698  		goto err;
2699  	return 0;
2700  err:
2701  	kmem_cache_destroy(fc_em_cachep);
2702  	return -ENOMEM;
2703  }
2704  
2705  /**
2706   * fc_destroy_exch_mgr() - Destroy an exchange manager
2707   */
2708  void fc_destroy_exch_mgr(void)
2709  {
2710  	destroy_workqueue(fc_exch_workqueue);
2711  	kmem_cache_destroy(fc_em_cachep);
2712  }
2713