xref: /linux/drivers/scsi/libfc/fc_disc.c (revision 5bdef865eb358b6f3760e25e591ae115e9eeddef)
1 /*
2  * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; if not, write to the Free Software Foundation, Inc.,
15  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16  *
17  * Maintained at www.Open-FCoE.org
18  */
19 
20 /*
21  * Target Discovery
22  *
23  * This block discovers all FC-4 remote ports, including FCP initiators. It
24  * also handles RSCN events and re-discovery if necessary.
25  */
26 
27 /*
28  * DISC LOCKING
29  *
30  * The disc mutex is can be locked when acquiring rport locks, but may not
31  * be held when acquiring the lport lock. Refer to fc_lport.c for more
32  * details.
33  */
34 
35 #include <linux/timer.h>
36 #include <linux/err.h>
37 #include <asm/unaligned.h>
38 
39 #include <scsi/fc/fc_gs.h>
40 
41 #include <scsi/libfc.h>
42 
43 #define FC_DISC_RETRY_LIMIT	3	/* max retries */
44 #define FC_DISC_RETRY_DELAY	500UL	/* (msecs) delay */
45 
46 #define	FC_DISC_DELAY		3
47 
48 static void fc_disc_gpn_ft_req(struct fc_disc *);
49 static void fc_disc_gpn_ft_resp(struct fc_seq *, struct fc_frame *, void *);
50 static int fc_disc_new_target(struct fc_disc *, struct fc_rport *,
51 			      struct fc_rport_identifiers *);
52 static void fc_disc_del_target(struct fc_disc *, struct fc_rport *);
53 static void fc_disc_done(struct fc_disc *);
54 static void fc_disc_timeout(struct work_struct *);
55 static void fc_disc_single(struct fc_disc *, struct fc_disc_port *);
56 static void fc_disc_restart(struct fc_disc *);
57 
58 /**
59  * fc_disc_lookup_rport() - lookup a remote port by port_id
60  * @lport: Fibre Channel host port instance
61  * @port_id: remote port port_id to match
62  */
63 struct fc_rport *fc_disc_lookup_rport(const struct fc_lport *lport,
64 				      u32 port_id)
65 {
66 	const struct fc_disc *disc = &lport->disc;
67 	struct fc_rport *rport, *found = NULL;
68 	struct fc_rport_libfc_priv *rdata;
69 	int disc_found = 0;
70 
71 	list_for_each_entry(rdata, &disc->rports, peers) {
72 		rport = PRIV_TO_RPORT(rdata);
73 		if (rport->port_id == port_id) {
74 			disc_found = 1;
75 			found = rport;
76 			break;
77 		}
78 	}
79 
80 	if (!disc_found)
81 		found = NULL;
82 
83 	return found;
84 }
85 
86 /**
87  * fc_disc_stop_rports() - delete all the remote ports associated with the lport
88  * @disc: The discovery job to stop rports on
89  *
90  * Locking Note: This function expects that the lport mutex is locked before
91  * calling it.
92  */
93 void fc_disc_stop_rports(struct fc_disc *disc)
94 {
95 	struct fc_lport *lport;
96 	struct fc_rport *rport;
97 	struct fc_rport_libfc_priv *rdata, *next;
98 
99 	lport = disc->lport;
100 
101 	mutex_lock(&disc->disc_mutex);
102 	list_for_each_entry_safe(rdata, next, &disc->rports, peers) {
103 		rport = PRIV_TO_RPORT(rdata);
104 		list_del(&rdata->peers);
105 		lport->tt.rport_logoff(rport);
106 	}
107 
108 	list_for_each_entry_safe(rdata, next, &disc->rogue_rports, peers) {
109 		rport = PRIV_TO_RPORT(rdata);
110 		lport->tt.rport_logoff(rport);
111 	}
112 
113 	mutex_unlock(&disc->disc_mutex);
114 }
115 
116 /**
117  * fc_disc_rport_callback() - Event handler for rport events
118  * @lport: The lport which is receiving the event
119  * @rport: The rport which the event has occured on
120  * @event: The event that occured
121  *
122  * Locking Note: The rport lock should not be held when calling
123  *		 this function.
124  */
125 static void fc_disc_rport_callback(struct fc_lport *lport,
126 				   struct fc_rport *rport,
127 				   enum fc_rport_event event)
128 {
129 	struct fc_rport_libfc_priv *rdata = rport->dd_data;
130 	struct fc_disc *disc = &lport->disc;
131 
132 	FC_DISC_DBG(disc, "Received a %d event for port (%6x)\n", event,
133 		    rport->port_id);
134 
135 	switch (event) {
136 	case RPORT_EV_CREATED:
137 		if (disc) {
138 			mutex_lock(&disc->disc_mutex);
139 			list_add_tail(&rdata->peers, &disc->rports);
140 			mutex_unlock(&disc->disc_mutex);
141 		}
142 		break;
143 	case RPORT_EV_LOGO:
144 	case RPORT_EV_FAILED:
145 	case RPORT_EV_STOP:
146 		mutex_lock(&disc->disc_mutex);
147 		mutex_lock(&rdata->rp_mutex);
148 		if (rdata->trans_state == FC_PORTSTATE_ROGUE)
149 			list_del(&rdata->peers);
150 		mutex_unlock(&rdata->rp_mutex);
151 		mutex_unlock(&disc->disc_mutex);
152 		break;
153 	default:
154 		break;
155 	}
156 
157 }
158 
159 /**
160  * fc_disc_recv_rscn_req() - Handle Registered State Change Notification (RSCN)
161  * @sp: Current sequence of the RSCN exchange
162  * @fp: RSCN Frame
163  * @lport: Fibre Channel host port instance
164  *
165  * Locking Note: This function expects that the disc_mutex is locked
166  *		 before it is called.
167  */
168 static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp,
169 				  struct fc_disc *disc)
170 {
171 	struct fc_lport *lport;
172 	struct fc_rport *rport;
173 	struct fc_rport_libfc_priv *rdata;
174 	struct fc_els_rscn *rp;
175 	struct fc_els_rscn_page *pp;
176 	struct fc_seq_els_data rjt_data;
177 	unsigned int len;
178 	int redisc = 0;
179 	enum fc_els_rscn_ev_qual ev_qual;
180 	enum fc_els_rscn_addr_fmt fmt;
181 	LIST_HEAD(disc_ports);
182 	struct fc_disc_port *dp, *next;
183 
184 	lport = disc->lport;
185 
186 	FC_DISC_DBG(disc, "Received an RSCN event\n");
187 
188 	/* make sure the frame contains an RSCN message */
189 	rp = fc_frame_payload_get(fp, sizeof(*rp));
190 	if (!rp)
191 		goto reject;
192 	/* make sure the page length is as expected (4 bytes) */
193 	if (rp->rscn_page_len != sizeof(*pp))
194 		goto reject;
195 	/* get the RSCN payload length */
196 	len = ntohs(rp->rscn_plen);
197 	if (len < sizeof(*rp))
198 		goto reject;
199 	/* make sure the frame contains the expected payload */
200 	rp = fc_frame_payload_get(fp, len);
201 	if (!rp)
202 		goto reject;
203 	/* payload must be a multiple of the RSCN page size */
204 	len -= sizeof(*rp);
205 	if (len % sizeof(*pp))
206 		goto reject;
207 
208 	for (pp = (void *)(rp + 1); len > 0; len -= sizeof(*pp), pp++) {
209 		ev_qual = pp->rscn_page_flags >> ELS_RSCN_EV_QUAL_BIT;
210 		ev_qual &= ELS_RSCN_EV_QUAL_MASK;
211 		fmt = pp->rscn_page_flags >> ELS_RSCN_ADDR_FMT_BIT;
212 		fmt &= ELS_RSCN_ADDR_FMT_MASK;
213 		/*
214 		 * if we get an address format other than port
215 		 * (area, domain, fabric), then do a full discovery
216 		 */
217 		switch (fmt) {
218 		case ELS_ADDR_FMT_PORT:
219 			FC_DISC_DBG(disc, "Port address format for port "
220 				    "(%6x)\n", ntoh24(pp->rscn_fid));
221 			dp = kzalloc(sizeof(*dp), GFP_KERNEL);
222 			if (!dp) {
223 				redisc = 1;
224 				break;
225 			}
226 			dp->lp = lport;
227 			dp->ids.port_id = ntoh24(pp->rscn_fid);
228 			dp->ids.port_name = -1;
229 			dp->ids.node_name = -1;
230 			dp->ids.roles = FC_RPORT_ROLE_UNKNOWN;
231 			list_add_tail(&dp->peers, &disc_ports);
232 			break;
233 		case ELS_ADDR_FMT_AREA:
234 		case ELS_ADDR_FMT_DOM:
235 		case ELS_ADDR_FMT_FAB:
236 		default:
237 			FC_DISC_DBG(disc, "Address format is (%d)\n", fmt);
238 			redisc = 1;
239 			break;
240 		}
241 	}
242 	lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
243 	if (redisc) {
244 		FC_DISC_DBG(disc, "RSCN received: rediscovering\n");
245 		fc_disc_restart(disc);
246 	} else {
247 		FC_DISC_DBG(disc, "RSCN received: not rediscovering. "
248 			    "redisc %d state %d in_prog %d\n",
249 			    redisc, lport->state, disc->pending);
250 		list_for_each_entry_safe(dp, next, &disc_ports, peers) {
251 			list_del(&dp->peers);
252 			rport = lport->tt.rport_lookup(lport, dp->ids.port_id);
253 			if (rport) {
254 				rdata = rport->dd_data;
255 				list_del(&rdata->peers);
256 				lport->tt.rport_logoff(rport);
257 			}
258 			fc_disc_single(disc, dp);
259 		}
260 	}
261 	fc_frame_free(fp);
262 	return;
263 reject:
264 	FC_DISC_DBG(disc, "Received a bad RSCN frame\n");
265 	rjt_data.fp = NULL;
266 	rjt_data.reason = ELS_RJT_LOGIC;
267 	rjt_data.explan = ELS_EXPL_NONE;
268 	lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
269 	fc_frame_free(fp);
270 }
271 
272 /**
273  * fc_disc_recv_req() - Handle incoming requests
274  * @sp: Current sequence of the request exchange
275  * @fp: The frame
276  * @lport: The FC local port
277  *
278  * Locking Note: This function is called from the EM and will lock
279  *		 the disc_mutex before calling the handler for the
280  *		 request.
281  */
282 static void fc_disc_recv_req(struct fc_seq *sp, struct fc_frame *fp,
283 			     struct fc_lport *lport)
284 {
285 	u8 op;
286 	struct fc_disc *disc = &lport->disc;
287 
288 	op = fc_frame_payload_op(fp);
289 	switch (op) {
290 	case ELS_RSCN:
291 		mutex_lock(&disc->disc_mutex);
292 		fc_disc_recv_rscn_req(sp, fp, disc);
293 		mutex_unlock(&disc->disc_mutex);
294 		break;
295 	default:
296 		FC_DISC_DBG(disc, "Received an unsupported request, "
297 			    "the opcode is (%x)\n", op);
298 		break;
299 	}
300 }
301 
302 /**
303  * fc_disc_restart() - Restart discovery
304  * @lport: FC discovery context
305  *
306  * Locking Note: This function expects that the disc mutex
307  *		 is already locked.
308  */
309 static void fc_disc_restart(struct fc_disc *disc)
310 {
311 	struct fc_rport *rport;
312 	struct fc_rport_libfc_priv *rdata, *next;
313 	struct fc_lport *lport = disc->lport;
314 
315 	FC_DISC_DBG(disc, "Restarting discovery\n");
316 
317 	list_for_each_entry_safe(rdata, next, &disc->rports, peers) {
318 		rport = PRIV_TO_RPORT(rdata);
319 		list_del(&rdata->peers);
320 		lport->tt.rport_logoff(rport);
321 	}
322 
323 	disc->requested = 1;
324 	if (!disc->pending)
325 		fc_disc_gpn_ft_req(disc);
326 }
327 
328 /**
329  * fc_disc_start() - Fibre Channel Target discovery
330  * @lport: FC local port
331  *
332  * Returns non-zero if discovery cannot be started.
333  */
334 static void fc_disc_start(void (*disc_callback)(struct fc_lport *,
335 						enum fc_disc_event),
336 			  struct fc_lport *lport)
337 {
338 	struct fc_rport *rport;
339 	struct fc_rport_identifiers ids;
340 	struct fc_disc *disc = &lport->disc;
341 
342 	/*
343 	 * At this point we may have a new disc job or an existing
344 	 * one. Either way, let's lock when we make changes to it
345 	 * and send the GPN_FT request.
346 	 */
347 	mutex_lock(&disc->disc_mutex);
348 
349 	disc->disc_callback = disc_callback;
350 
351 	/*
352 	 * If not ready, or already running discovery, just set request flag.
353 	 */
354 	disc->requested = 1;
355 
356 	if (disc->pending) {
357 		mutex_unlock(&disc->disc_mutex);
358 		return;
359 	}
360 
361 	/*
362 	 * Handle point-to-point mode as a simple discovery
363 	 * of the remote port. Yucky, yucky, yuck, yuck!
364 	 */
365 	rport = disc->lport->ptp_rp;
366 	if (rport) {
367 		ids.port_id = rport->port_id;
368 		ids.port_name = rport->port_name;
369 		ids.node_name = rport->node_name;
370 		ids.roles = FC_RPORT_ROLE_UNKNOWN;
371 		get_device(&rport->dev);
372 
373 		if (!fc_disc_new_target(disc, rport, &ids)) {
374 			disc->event = DISC_EV_SUCCESS;
375 			fc_disc_done(disc);
376 		}
377 		put_device(&rport->dev);
378 	} else {
379 		fc_disc_gpn_ft_req(disc);	/* get ports by FC-4 type */
380 	}
381 
382 	mutex_unlock(&disc->disc_mutex);
383 }
384 
385 static struct fc_rport_operations fc_disc_rport_ops = {
386 	.event_callback = fc_disc_rport_callback,
387 };
388 
389 /**
390  * fc_disc_new_target() - Handle new target found by discovery
391  * @lport: FC local port
392  * @rport: The previous FC remote port (NULL if new remote port)
393  * @ids: Identifiers for the new FC remote port
394  *
395  * Locking Note: This function expects that the disc_mutex is locked
396  *		 before it is called.
397  */
398 static int fc_disc_new_target(struct fc_disc *disc,
399 			      struct fc_rport *rport,
400 			      struct fc_rport_identifiers *ids)
401 {
402 	struct fc_lport *lport = disc->lport;
403 	struct fc_rport_libfc_priv *rdata;
404 	int error = 0;
405 
406 	if (rport && ids->port_name) {
407 		if (rport->port_name == -1) {
408 			/*
409 			 * Set WWN and fall through to notify of create.
410 			 */
411 			fc_rport_set_name(rport, ids->port_name,
412 					  rport->node_name);
413 		} else if (rport->port_name != ids->port_name) {
414 			/*
415 			 * This is a new port with the same FCID as
416 			 * a previously-discovered port.  Presumably the old
417 			 * port logged out and a new port logged in and was
418 			 * assigned the same FCID.  This should be rare.
419 			 * Delete the old one and fall thru to re-create.
420 			 */
421 			fc_disc_del_target(disc, rport);
422 			rport = NULL;
423 		}
424 	}
425 	if (((ids->port_name != -1) || (ids->port_id != -1)) &&
426 	    ids->port_id != fc_host_port_id(lport->host) &&
427 	    ids->port_name != lport->wwpn) {
428 		if (!rport) {
429 			rport = lport->tt.rport_lookup(lport, ids->port_id);
430 			if (!rport) {
431 				struct fc_disc_port dp;
432 				dp.lp = lport;
433 				dp.ids.port_id = ids->port_id;
434 				dp.ids.port_name = ids->port_name;
435 				dp.ids.node_name = ids->node_name;
436 				dp.ids.roles = ids->roles;
437 				rport = lport->tt.rport_create(&dp);
438 			}
439 			if (!rport)
440 				error = -ENOMEM;
441 		}
442 		if (rport) {
443 			rdata = rport->dd_data;
444 			rdata->ops = &fc_disc_rport_ops;
445 			rdata->rp_state = RPORT_ST_INIT;
446 			list_add_tail(&rdata->peers, &disc->rogue_rports);
447 			lport->tt.rport_login(rport);
448 		}
449 	}
450 	return error;
451 }
452 
453 /**
454  * fc_disc_del_target() - Delete a target
455  * @disc: FC discovery context
456  * @rport: The remote port to be removed
457  */
458 static void fc_disc_del_target(struct fc_disc *disc, struct fc_rport *rport)
459 {
460 	struct fc_lport *lport = disc->lport;
461 	struct fc_rport_libfc_priv *rdata = rport->dd_data;
462 	list_del(&rdata->peers);
463 	lport->tt.rport_logoff(rport);
464 }
465 
466 /**
467  * fc_disc_done() - Discovery has been completed
468  * @disc: FC discovery context
469  * Locking Note: This function expects that the disc mutex is locked before
470  * it is called. The discovery callback is then made with the lock released,
471  * and the lock is re-taken before returning from this function
472  */
473 static void fc_disc_done(struct fc_disc *disc)
474 {
475 	struct fc_lport *lport = disc->lport;
476 	enum fc_disc_event event;
477 
478 	FC_DISC_DBG(disc, "Discovery complete\n");
479 
480 	event = disc->event;
481 	disc->event = DISC_EV_NONE;
482 
483 	if (disc->requested)
484 		fc_disc_gpn_ft_req(disc);
485 	else
486 		disc->pending = 0;
487 
488 	mutex_unlock(&disc->disc_mutex);
489 	disc->disc_callback(lport, event);
490 	mutex_lock(&disc->disc_mutex);
491 }
492 
493 /**
494  * fc_disc_error() - Handle error on dNS request
495  * @disc: FC discovery context
496  * @fp: The frame pointer
497  */
498 static void fc_disc_error(struct fc_disc *disc, struct fc_frame *fp)
499 {
500 	struct fc_lport *lport = disc->lport;
501 	unsigned long delay = 0;
502 
503 	FC_DISC_DBG(disc, "Error %ld, retries %d/%d\n",
504 		    PTR_ERR(fp), disc->retry_count,
505 		    FC_DISC_RETRY_LIMIT);
506 
507 	if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) {
508 		/*
509 		 * Memory allocation failure, or the exchange timed out,
510 		 * retry after delay.
511 		 */
512 		if (disc->retry_count < FC_DISC_RETRY_LIMIT) {
513 			/* go ahead and retry */
514 			if (!fp)
515 				delay = msecs_to_jiffies(FC_DISC_RETRY_DELAY);
516 			else {
517 				delay = msecs_to_jiffies(lport->e_d_tov);
518 
519 				/* timeout faster first time */
520 				if (!disc->retry_count)
521 					delay /= 4;
522 			}
523 			disc->retry_count++;
524 			schedule_delayed_work(&disc->disc_work, delay);
525 		} else {
526 			/* exceeded retries */
527 			disc->event = DISC_EV_FAILED;
528 			fc_disc_done(disc);
529 		}
530 	}
531 }
532 
533 /**
534  * fc_disc_gpn_ft_req() - Send Get Port Names by FC-4 type (GPN_FT) request
535  * @lport: FC discovery context
536  *
537  * Locking Note: This function expects that the disc_mutex is locked
538  *		 before it is called.
539  */
540 static void fc_disc_gpn_ft_req(struct fc_disc *disc)
541 {
542 	struct fc_frame *fp;
543 	struct fc_lport *lport = disc->lport;
544 
545 	WARN_ON(!fc_lport_test_ready(lport));
546 
547 	disc->pending = 1;
548 	disc->requested = 0;
549 
550 	disc->buf_len = 0;
551 	disc->seq_count = 0;
552 	fp = fc_frame_alloc(lport,
553 			    sizeof(struct fc_ct_hdr) +
554 			    sizeof(struct fc_ns_gid_ft));
555 	if (!fp)
556 		goto err;
557 
558 	if (lport->tt.elsct_send(lport, NULL, fp,
559 				 FC_NS_GPN_FT,
560 				 fc_disc_gpn_ft_resp,
561 				 disc, lport->e_d_tov))
562 		return;
563 err:
564 	fc_disc_error(disc, fp);
565 }
566 
567 /**
568  * fc_disc_gpn_ft_parse() - Parse the list of IDs and names resulting from a request
569  * @lport: Fibre Channel host port instance
570  * @buf: GPN_FT response buffer
571  * @len: size of response buffer
572  */
573 static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len)
574 {
575 	struct fc_lport *lport;
576 	struct fc_gpn_ft_resp *np;
577 	char *bp;
578 	size_t plen;
579 	size_t tlen;
580 	int error = 0;
581 	struct fc_disc_port dp;
582 	struct fc_rport *rport;
583 	struct fc_rport_libfc_priv *rdata;
584 
585 	lport = disc->lport;
586 
587 	/*
588 	 * Handle partial name record left over from previous call.
589 	 */
590 	bp = buf;
591 	plen = len;
592 	np = (struct fc_gpn_ft_resp *)bp;
593 	tlen = disc->buf_len;
594 	if (tlen) {
595 		WARN_ON(tlen >= sizeof(*np));
596 		plen = sizeof(*np) - tlen;
597 		WARN_ON(plen <= 0);
598 		WARN_ON(plen >= sizeof(*np));
599 		if (plen > len)
600 			plen = len;
601 		np = &disc->partial_buf;
602 		memcpy((char *)np + tlen, bp, plen);
603 
604 		/*
605 		 * Set bp so that the loop below will advance it to the
606 		 * first valid full name element.
607 		 */
608 		bp -= tlen;
609 		len += tlen;
610 		plen += tlen;
611 		disc->buf_len = (unsigned char) plen;
612 		if (plen == sizeof(*np))
613 			disc->buf_len = 0;
614 	}
615 
616 	/*
617 	 * Handle full name records, including the one filled from above.
618 	 * Normally, np == bp and plen == len, but from the partial case above,
619 	 * bp, len describe the overall buffer, and np, plen describe the
620 	 * partial buffer, which if would usually be full now.
621 	 * After the first time through the loop, things return to "normal".
622 	 */
623 	while (plen >= sizeof(*np)) {
624 		dp.lp = lport;
625 		dp.ids.port_id = ntoh24(np->fp_fid);
626 		dp.ids.port_name = ntohll(np->fp_wwpn);
627 		dp.ids.node_name = -1;
628 		dp.ids.roles = FC_RPORT_ROLE_UNKNOWN;
629 
630 		if ((dp.ids.port_id != fc_host_port_id(lport->host)) &&
631 		    (dp.ids.port_name != lport->wwpn)) {
632 			rport = lport->tt.rport_create(&dp);
633 			if (rport) {
634 				rdata = rport->dd_data;
635 				rdata->ops = &fc_disc_rport_ops;
636 				rdata->local_port = lport;
637 				list_add_tail(&rdata->peers,
638 					      &disc->rogue_rports);
639 				lport->tt.rport_login(rport);
640 			} else
641 				printk(KERN_WARNING "libfc: Failed to allocate "
642 				       "memory for the newly discovered port "
643 				       "(%6x)\n", dp.ids.port_id);
644 		}
645 
646 		if (np->fp_flags & FC_NS_FID_LAST) {
647 			disc->event = DISC_EV_SUCCESS;
648 			fc_disc_done(disc);
649 			len = 0;
650 			break;
651 		}
652 		len -= sizeof(*np);
653 		bp += sizeof(*np);
654 		np = (struct fc_gpn_ft_resp *)bp;
655 		plen = len;
656 	}
657 
658 	/*
659 	 * Save any partial record at the end of the buffer for next time.
660 	 */
661 	if (error == 0 && len > 0 && len < sizeof(*np)) {
662 		if (np != &disc->partial_buf) {
663 			FC_DISC_DBG(disc, "Partial buffer remains "
664 				    "for discovery\n");
665 			memcpy(&disc->partial_buf, np, len);
666 		}
667 		disc->buf_len = (unsigned char) len;
668 	} else {
669 		disc->buf_len = 0;
670 	}
671 	return error;
672 }
673 
674 /**
675  * fc_disc_timeout() - Retry handler for the disc component
676  * @work: Structure holding disc obj that needs retry discovery
677  *
678  * Handle retry of memory allocation for remote ports.
679  */
680 static void fc_disc_timeout(struct work_struct *work)
681 {
682 	struct fc_disc *disc = container_of(work,
683 					    struct fc_disc,
684 					    disc_work.work);
685 	mutex_lock(&disc->disc_mutex);
686 	if (disc->requested && !disc->pending)
687 		fc_disc_gpn_ft_req(disc);
688 	mutex_unlock(&disc->disc_mutex);
689 }
690 
691 /**
692  * fc_disc_gpn_ft_resp() - Handle a response frame from Get Port Names (GPN_FT)
693  * @sp: Current sequence of GPN_FT exchange
694  * @fp: response frame
695  * @lp_arg: Fibre Channel host port instance
696  *
697  * Locking Note: This function is called without disc mutex held, and
698  *		 should do all its processing with the mutex held
699  */
700 static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp,
701 				void *disc_arg)
702 {
703 	struct fc_disc *disc = disc_arg;
704 	struct fc_ct_hdr *cp;
705 	struct fc_frame_header *fh;
706 	unsigned int seq_cnt;
707 	void *buf = NULL;
708 	unsigned int len;
709 	int error;
710 
711 	mutex_lock(&disc->disc_mutex);
712 	FC_DISC_DBG(disc, "Received a GPN_FT response\n");
713 
714 	if (IS_ERR(fp)) {
715 		fc_disc_error(disc, fp);
716 		mutex_unlock(&disc->disc_mutex);
717 		return;
718 	}
719 
720 	WARN_ON(!fc_frame_is_linear(fp));	/* buffer must be contiguous */
721 	fh = fc_frame_header_get(fp);
722 	len = fr_len(fp) - sizeof(*fh);
723 	seq_cnt = ntohs(fh->fh_seq_cnt);
724 	if (fr_sof(fp) == FC_SOF_I3 && seq_cnt == 0 &&
725 	    disc->seq_count == 0) {
726 		cp = fc_frame_payload_get(fp, sizeof(*cp));
727 		if (!cp) {
728 			FC_DISC_DBG(disc, "GPN_FT response too short, len %d\n",
729 				    fr_len(fp));
730 		} else if (ntohs(cp->ct_cmd) == FC_FS_ACC) {
731 
732 			/* Accepted, parse the response. */
733 			buf = cp + 1;
734 			len -= sizeof(*cp);
735 		} else if (ntohs(cp->ct_cmd) == FC_FS_RJT) {
736 			FC_DISC_DBG(disc, "GPN_FT rejected reason %x exp %x "
737 				    "(check zoning)\n", cp->ct_reason,
738 				    cp->ct_explan);
739 			disc->event = DISC_EV_FAILED;
740 			fc_disc_done(disc);
741 		} else {
742 			FC_DISC_DBG(disc, "GPN_FT unexpected response code "
743 				    "%x\n", ntohs(cp->ct_cmd));
744 		}
745 	} else if (fr_sof(fp) == FC_SOF_N3 &&
746 		   seq_cnt == disc->seq_count) {
747 		buf = fh + 1;
748 	} else {
749 		FC_DISC_DBG(disc, "GPN_FT unexpected frame - out of sequence? "
750 			    "seq_cnt %x expected %x sof %x eof %x\n",
751 			    seq_cnt, disc->seq_count, fr_sof(fp), fr_eof(fp));
752 	}
753 	if (buf) {
754 		error = fc_disc_gpn_ft_parse(disc, buf, len);
755 		if (error)
756 			fc_disc_error(disc, fp);
757 		else
758 			disc->seq_count++;
759 	}
760 	fc_frame_free(fp);
761 
762 	mutex_unlock(&disc->disc_mutex);
763 }
764 
765 /**
766  * fc_disc_single() - Discover the directory information for a single target
767  * @lport: FC local port
768  * @dp: The port to rediscover
769  *
770  * Locking Note: This function expects that the disc_mutex is locked
771  *		 before it is called.
772  */
773 static void fc_disc_single(struct fc_disc *disc, struct fc_disc_port *dp)
774 {
775 	struct fc_lport *lport;
776 	struct fc_rport *new_rport;
777 	struct fc_rport_libfc_priv *rdata;
778 
779 	lport = disc->lport;
780 
781 	if (dp->ids.port_id == fc_host_port_id(lport->host))
782 		goto out;
783 
784 	new_rport = lport->tt.rport_create(dp);
785 	if (new_rport) {
786 		rdata = new_rport->dd_data;
787 		rdata->ops = &fc_disc_rport_ops;
788 		kfree(dp);
789 		list_add_tail(&rdata->peers, &disc->rogue_rports);
790 		lport->tt.rport_login(new_rport);
791 	}
792 	return;
793 out:
794 	kfree(dp);
795 }
796 
797 /**
798  * fc_disc_stop() - Stop discovery for a given lport
799  * @lport: The lport that discovery should stop for
800  */
801 void fc_disc_stop(struct fc_lport *lport)
802 {
803 	struct fc_disc *disc = &lport->disc;
804 
805 	if (disc) {
806 		cancel_delayed_work_sync(&disc->disc_work);
807 		fc_disc_stop_rports(disc);
808 	}
809 }
810 
811 /**
812  * fc_disc_stop_final() - Stop discovery for a given lport
813  * @lport: The lport that discovery should stop for
814  *
815  * This function will block until discovery has been
816  * completely stopped and all rports have been deleted.
817  */
818 void fc_disc_stop_final(struct fc_lport *lport)
819 {
820 	fc_disc_stop(lport);
821 	lport->tt.rport_flush_queue();
822 }
823 
824 /**
825  * fc_disc_init() - Initialize the discovery block
826  * @lport: FC local port
827  */
828 int fc_disc_init(struct fc_lport *lport)
829 {
830 	struct fc_disc *disc;
831 
832 	if (!lport->tt.disc_start)
833 		lport->tt.disc_start = fc_disc_start;
834 
835 	if (!lport->tt.disc_stop)
836 		lport->tt.disc_stop = fc_disc_stop;
837 
838 	if (!lport->tt.disc_stop_final)
839 		lport->tt.disc_stop_final = fc_disc_stop_final;
840 
841 	if (!lport->tt.disc_recv_req)
842 		lport->tt.disc_recv_req = fc_disc_recv_req;
843 
844 	if (!lport->tt.rport_lookup)
845 		lport->tt.rport_lookup = fc_disc_lookup_rport;
846 
847 	disc = &lport->disc;
848 	INIT_DELAYED_WORK(&disc->disc_work, fc_disc_timeout);
849 	mutex_init(&disc->disc_mutex);
850 	INIT_LIST_HEAD(&disc->rports);
851 	INIT_LIST_HEAD(&disc->rogue_rports);
852 
853 	disc->lport = lport;
854 	disc->delay = FC_DISC_DELAY;
855 	disc->event = DISC_EV_NONE;
856 
857 	return 0;
858 }
859 EXPORT_SYMBOL(fc_disc_init);
860