xref: /linux/drivers/scsi/bnx2fc/bnx2fc_els.c (revision 17d87c45b9042fa2f830c5a47cdfd3370bb60729)
1 /*
2  * bnx2fc_els.c: QLogic NetXtreme II Linux FCoE offload driver.
3  * This file contains helper routines that handle ELS requests
4  * and responses.
5  *
6  * Copyright (c) 2008 - 2013 Broadcom Corporation
7  * Copyright (c) 2014, QLogic Corporation
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation.
12  *
13  * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
14  */
15 
16 #include "bnx2fc.h"
17 
18 static void bnx2fc_logo_resp(struct fc_seq *seq, struct fc_frame *fp,
19 			     void *arg);
20 static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
21 			      void *arg);
22 static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
23 			void *data, u32 data_len,
24 			void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg),
25 			struct bnx2fc_els_cb_arg *cb_arg, u32 timer_msec);
26 
27 static void bnx2fc_rrq_compl(struct bnx2fc_els_cb_arg *cb_arg)
28 {
29 	struct bnx2fc_cmd *orig_io_req;
30 	struct bnx2fc_cmd *rrq_req;
31 	int rc = 0;
32 
33 	BUG_ON(!cb_arg);
34 	rrq_req = cb_arg->io_req;
35 	orig_io_req = cb_arg->aborted_io_req;
36 	BUG_ON(!orig_io_req);
37 	BNX2FC_ELS_DBG("rrq_compl: orig xid = 0x%x, rrq_xid = 0x%x\n",
38 		   orig_io_req->xid, rrq_req->xid);
39 
40 	kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
41 
42 	if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &rrq_req->req_flags)) {
43 		/*
44 		 * els req is timed out. cleanup the IO with FW and
45 		 * drop the completion. Remove from active_cmd_queue.
46 		 */
47 		BNX2FC_ELS_DBG("rrq xid - 0x%x timed out, clean it up\n",
48 			   rrq_req->xid);
49 
50 		if (rrq_req->on_active_queue) {
51 			list_del_init(&rrq_req->link);
52 			rrq_req->on_active_queue = 0;
53 			rc = bnx2fc_initiate_cleanup(rrq_req);
54 			BUG_ON(rc);
55 		}
56 	}
57 	kfree(cb_arg);
58 }
59 int bnx2fc_send_rrq(struct bnx2fc_cmd *aborted_io_req)
60 {
61 
62 	struct fc_els_rrq rrq;
63 	struct bnx2fc_rport *tgt = aborted_io_req->tgt;
64 	struct fc_lport *lport = tgt->rdata->local_port;
65 	struct bnx2fc_els_cb_arg *cb_arg = NULL;
66 	u32 sid = tgt->sid;
67 	u32 r_a_tov = lport->r_a_tov;
68 	unsigned long start = jiffies;
69 	int rc;
70 
71 	BNX2FC_ELS_DBG("Sending RRQ orig_xid = 0x%x\n",
72 		   aborted_io_req->xid);
73 	memset(&rrq, 0, sizeof(rrq));
74 
75 	cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_NOIO);
76 	if (!cb_arg) {
77 		printk(KERN_ERR PFX "Unable to allocate cb_arg for RRQ\n");
78 		rc = -ENOMEM;
79 		goto rrq_err;
80 	}
81 
82 	cb_arg->aborted_io_req = aborted_io_req;
83 
84 	rrq.rrq_cmd = ELS_RRQ;
85 	hton24(rrq.rrq_s_id, sid);
86 	rrq.rrq_ox_id = htons(aborted_io_req->xid);
87 	rrq.rrq_rx_id = htons(aborted_io_req->task->rxwr_txrd.var_ctx.rx_id);
88 
89 retry_rrq:
90 	rc = bnx2fc_initiate_els(tgt, ELS_RRQ, &rrq, sizeof(rrq),
91 				 bnx2fc_rrq_compl, cb_arg,
92 				 r_a_tov);
93 	if (rc == -ENOMEM) {
94 		if (time_after(jiffies, start + (10 * HZ))) {
95 			BNX2FC_ELS_DBG("rrq Failed\n");
96 			rc = FAILED;
97 			goto rrq_err;
98 		}
99 		msleep(20);
100 		goto retry_rrq;
101 	}
102 rrq_err:
103 	if (rc) {
104 		BNX2FC_ELS_DBG("RRQ failed - release orig io req 0x%x\n",
105 			aborted_io_req->xid);
106 		kfree(cb_arg);
107 		spin_lock_bh(&tgt->tgt_lock);
108 		kref_put(&aborted_io_req->refcount, bnx2fc_cmd_release);
109 		spin_unlock_bh(&tgt->tgt_lock);
110 	}
111 	return rc;
112 }
113 
114 static void bnx2fc_l2_els_compl(struct bnx2fc_els_cb_arg *cb_arg)
115 {
116 	struct bnx2fc_cmd *els_req;
117 	struct bnx2fc_rport *tgt;
118 	struct bnx2fc_mp_req *mp_req;
119 	struct fc_frame_header *fc_hdr;
120 	unsigned char *buf;
121 	void *resp_buf;
122 	u32 resp_len, hdr_len;
123 	u16 l2_oxid;
124 	int frame_len;
125 	int rc = 0;
126 
127 	l2_oxid = cb_arg->l2_oxid;
128 	BNX2FC_ELS_DBG("ELS COMPL - l2_oxid = 0x%x\n", l2_oxid);
129 
130 	els_req = cb_arg->io_req;
131 	if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &els_req->req_flags)) {
132 		/*
133 		 * els req is timed out. cleanup the IO with FW and
134 		 * drop the completion. libfc will handle the els timeout
135 		 */
136 		if (els_req->on_active_queue) {
137 			list_del_init(&els_req->link);
138 			els_req->on_active_queue = 0;
139 			rc = bnx2fc_initiate_cleanup(els_req);
140 			BUG_ON(rc);
141 		}
142 		goto free_arg;
143 	}
144 
145 	tgt = els_req->tgt;
146 	mp_req = &(els_req->mp_req);
147 	fc_hdr = &(mp_req->resp_fc_hdr);
148 	resp_len = mp_req->resp_len;
149 	resp_buf = mp_req->resp_buf;
150 
151 	buf = kzalloc(PAGE_SIZE, GFP_ATOMIC);
152 	if (!buf) {
153 		printk(KERN_ERR PFX "Unable to alloc mp buf\n");
154 		goto free_arg;
155 	}
156 	hdr_len = sizeof(*fc_hdr);
157 	if (hdr_len + resp_len > PAGE_SIZE) {
158 		printk(KERN_ERR PFX "l2_els_compl: resp len is "
159 				    "beyond page size\n");
160 		goto free_buf;
161 	}
162 	memcpy(buf, fc_hdr, hdr_len);
163 	memcpy(buf + hdr_len, resp_buf, resp_len);
164 	frame_len = hdr_len + resp_len;
165 
166 	bnx2fc_process_l2_frame_compl(tgt, buf, frame_len, l2_oxid);
167 
168 free_buf:
169 	kfree(buf);
170 free_arg:
171 	kfree(cb_arg);
172 }
173 
174 int bnx2fc_send_adisc(struct bnx2fc_rport *tgt, struct fc_frame *fp)
175 {
176 	struct fc_els_adisc *adisc;
177 	struct fc_frame_header *fh;
178 	struct bnx2fc_els_cb_arg *cb_arg;
179 	struct fc_lport *lport = tgt->rdata->local_port;
180 	u32 r_a_tov = lport->r_a_tov;
181 	int rc;
182 
183 	fh = fc_frame_header_get(fp);
184 	cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
185 	if (!cb_arg) {
186 		printk(KERN_ERR PFX "Unable to allocate cb_arg for ADISC\n");
187 		return -ENOMEM;
188 	}
189 
190 	cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
191 
192 	BNX2FC_ELS_DBG("send ADISC: l2_oxid = 0x%x\n", cb_arg->l2_oxid);
193 	adisc = fc_frame_payload_get(fp, sizeof(*adisc));
194 	/* adisc is initialized by libfc */
195 	rc = bnx2fc_initiate_els(tgt, ELS_ADISC, adisc, sizeof(*adisc),
196 				 bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov);
197 	if (rc)
198 		kfree(cb_arg);
199 	return rc;
200 }
201 
202 int bnx2fc_send_logo(struct bnx2fc_rport *tgt, struct fc_frame *fp)
203 {
204 	struct fc_els_logo *logo;
205 	struct fc_frame_header *fh;
206 	struct bnx2fc_els_cb_arg *cb_arg;
207 	struct fc_lport *lport = tgt->rdata->local_port;
208 	u32 r_a_tov = lport->r_a_tov;
209 	int rc;
210 
211 	fh = fc_frame_header_get(fp);
212 	cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
213 	if (!cb_arg) {
214 		printk(KERN_ERR PFX "Unable to allocate cb_arg for LOGO\n");
215 		return -ENOMEM;
216 	}
217 
218 	cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
219 
220 	BNX2FC_ELS_DBG("Send LOGO: l2_oxid = 0x%x\n", cb_arg->l2_oxid);
221 	logo = fc_frame_payload_get(fp, sizeof(*logo));
222 	/* logo is initialized by libfc */
223 	rc = bnx2fc_initiate_els(tgt, ELS_LOGO, logo, sizeof(*logo),
224 				 bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov);
225 	if (rc)
226 		kfree(cb_arg);
227 	return rc;
228 }
229 
230 int bnx2fc_send_rls(struct bnx2fc_rport *tgt, struct fc_frame *fp)
231 {
232 	struct fc_els_rls *rls;
233 	struct fc_frame_header *fh;
234 	struct bnx2fc_els_cb_arg *cb_arg;
235 	struct fc_lport *lport = tgt->rdata->local_port;
236 	u32 r_a_tov = lport->r_a_tov;
237 	int rc;
238 
239 	fh = fc_frame_header_get(fp);
240 	cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
241 	if (!cb_arg) {
242 		printk(KERN_ERR PFX "Unable to allocate cb_arg for LOGO\n");
243 		return -ENOMEM;
244 	}
245 
246 	cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
247 
248 	rls = fc_frame_payload_get(fp, sizeof(*rls));
249 	/* rls is initialized by libfc */
250 	rc = bnx2fc_initiate_els(tgt, ELS_RLS, rls, sizeof(*rls),
251 				  bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov);
252 	if (rc)
253 		kfree(cb_arg);
254 	return rc;
255 }
256 
257 void bnx2fc_srr_compl(struct bnx2fc_els_cb_arg *cb_arg)
258 {
259 	struct bnx2fc_mp_req *mp_req;
260 	struct fc_frame_header *fc_hdr, *fh;
261 	struct bnx2fc_cmd *srr_req;
262 	struct bnx2fc_cmd *orig_io_req;
263 	struct fc_frame *fp;
264 	unsigned char *buf;
265 	void *resp_buf;
266 	u32 resp_len, hdr_len;
267 	u8 opcode;
268 	int rc = 0;
269 
270 	orig_io_req = cb_arg->aborted_io_req;
271 	srr_req = cb_arg->io_req;
272 	if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &srr_req->req_flags)) {
273 		/* SRR timedout */
274 		BNX2FC_IO_DBG(srr_req, "srr timed out, abort "
275 		       "orig_io - 0x%x\n",
276 			orig_io_req->xid);
277 		rc = bnx2fc_initiate_abts(srr_req);
278 		if (rc != SUCCESS) {
279 			BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts "
280 				"failed. issue cleanup\n");
281 			bnx2fc_initiate_cleanup(srr_req);
282 		}
283 		if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags) ||
284 		    test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) {
285 			BNX2FC_IO_DBG(srr_req, "srr_compl:xid 0x%x flags = %lx",
286 				      orig_io_req->xid, orig_io_req->req_flags);
287 			goto srr_compl_done;
288 		}
289 		orig_io_req->srr_retry++;
290 		if (orig_io_req->srr_retry <= SRR_RETRY_COUNT) {
291 			struct bnx2fc_rport *tgt = orig_io_req->tgt;
292 			spin_unlock_bh(&tgt->tgt_lock);
293 			rc = bnx2fc_send_srr(orig_io_req,
294 					     orig_io_req->srr_offset,
295 					     orig_io_req->srr_rctl);
296 			spin_lock_bh(&tgt->tgt_lock);
297 			if (!rc)
298 				goto srr_compl_done;
299 		}
300 
301 		rc = bnx2fc_initiate_abts(orig_io_req);
302 		if (rc != SUCCESS) {
303 			BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts "
304 				"failed xid = 0x%x. issue cleanup\n",
305 				orig_io_req->xid);
306 			bnx2fc_initiate_cleanup(orig_io_req);
307 		}
308 		goto srr_compl_done;
309 	}
310 	if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags) ||
311 	    test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) {
312 		BNX2FC_IO_DBG(srr_req, "srr_compl:xid - 0x%x flags = %lx",
313 			      orig_io_req->xid, orig_io_req->req_flags);
314 		goto srr_compl_done;
315 	}
316 	mp_req = &(srr_req->mp_req);
317 	fc_hdr = &(mp_req->resp_fc_hdr);
318 	resp_len = mp_req->resp_len;
319 	resp_buf = mp_req->resp_buf;
320 
321 	hdr_len = sizeof(*fc_hdr);
322 	buf = kzalloc(PAGE_SIZE, GFP_ATOMIC);
323 	if (!buf) {
324 		printk(KERN_ERR PFX "srr buf: mem alloc failure\n");
325 		goto srr_compl_done;
326 	}
327 	memcpy(buf, fc_hdr, hdr_len);
328 	memcpy(buf + hdr_len, resp_buf, resp_len);
329 
330 	fp = fc_frame_alloc(NULL, resp_len);
331 	if (!fp) {
332 		printk(KERN_ERR PFX "fc_frame_alloc failure\n");
333 		goto free_buf;
334 	}
335 
336 	fh = (struct fc_frame_header *) fc_frame_header_get(fp);
337 	/* Copy FC Frame header and payload into the frame */
338 	memcpy(fh, buf, hdr_len + resp_len);
339 
340 	opcode = fc_frame_payload_op(fp);
341 	switch (opcode) {
342 	case ELS_LS_ACC:
343 		BNX2FC_IO_DBG(srr_req, "SRR success\n");
344 		break;
345 	case ELS_LS_RJT:
346 		BNX2FC_IO_DBG(srr_req, "SRR rejected\n");
347 		rc = bnx2fc_initiate_abts(orig_io_req);
348 		if (rc != SUCCESS) {
349 			BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts "
350 				"failed xid = 0x%x. issue cleanup\n",
351 				orig_io_req->xid);
352 			bnx2fc_initiate_cleanup(orig_io_req);
353 		}
354 		break;
355 	default:
356 		BNX2FC_IO_DBG(srr_req, "srr compl - invalid opcode = %d\n",
357 			opcode);
358 		break;
359 	}
360 	fc_frame_free(fp);
361 free_buf:
362 	kfree(buf);
363 srr_compl_done:
364 	kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
365 }
366 
367 void bnx2fc_rec_compl(struct bnx2fc_els_cb_arg *cb_arg)
368 {
369 	struct bnx2fc_cmd *orig_io_req, *new_io_req;
370 	struct bnx2fc_cmd *rec_req;
371 	struct bnx2fc_mp_req *mp_req;
372 	struct fc_frame_header *fc_hdr, *fh;
373 	struct fc_els_ls_rjt *rjt;
374 	struct fc_els_rec_acc *acc;
375 	struct bnx2fc_rport *tgt;
376 	struct fcoe_err_report_entry *err_entry;
377 	struct scsi_cmnd *sc_cmd;
378 	enum fc_rctl r_ctl;
379 	unsigned char *buf;
380 	void *resp_buf;
381 	struct fc_frame *fp;
382 	u8 opcode;
383 	u32 offset;
384 	u32 e_stat;
385 	u32 resp_len, hdr_len;
386 	int rc = 0;
387 	bool send_seq_clnp = false;
388 	bool abort_io = false;
389 
390 	BNX2FC_MISC_DBG("Entered rec_compl callback\n");
391 	rec_req = cb_arg->io_req;
392 	orig_io_req = cb_arg->aborted_io_req;
393 	BNX2FC_IO_DBG(rec_req, "rec_compl: orig xid = 0x%x", orig_io_req->xid);
394 	tgt = orig_io_req->tgt;
395 
396 	/* Handle REC timeout case */
397 	if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &rec_req->req_flags)) {
398 		BNX2FC_IO_DBG(rec_req, "timed out, abort "
399 		       "orig_io - 0x%x\n",
400 			orig_io_req->xid);
401 		/* els req is timed out. send abts for els */
402 		rc = bnx2fc_initiate_abts(rec_req);
403 		if (rc != SUCCESS) {
404 			BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts "
405 				"failed. issue cleanup\n");
406 			bnx2fc_initiate_cleanup(rec_req);
407 		}
408 		orig_io_req->rec_retry++;
409 		/* REC timedout. send ABTS to the orig IO req */
410 		if (orig_io_req->rec_retry <= REC_RETRY_COUNT) {
411 			spin_unlock_bh(&tgt->tgt_lock);
412 			rc = bnx2fc_send_rec(orig_io_req);
413 			spin_lock_bh(&tgt->tgt_lock);
414 			if (!rc)
415 				goto rec_compl_done;
416 		}
417 		rc = bnx2fc_initiate_abts(orig_io_req);
418 		if (rc != SUCCESS) {
419 			BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts "
420 				"failed xid = 0x%x. issue cleanup\n",
421 				orig_io_req->xid);
422 			bnx2fc_initiate_cleanup(orig_io_req);
423 		}
424 		goto rec_compl_done;
425 	}
426 
427 	if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags)) {
428 		BNX2FC_IO_DBG(rec_req, "completed"
429 		       "orig_io - 0x%x\n",
430 			orig_io_req->xid);
431 		goto rec_compl_done;
432 	}
433 	if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) {
434 		BNX2FC_IO_DBG(rec_req, "abts in prog "
435 		       "orig_io - 0x%x\n",
436 			orig_io_req->xid);
437 		goto rec_compl_done;
438 	}
439 
440 	mp_req = &(rec_req->mp_req);
441 	fc_hdr = &(mp_req->resp_fc_hdr);
442 	resp_len = mp_req->resp_len;
443 	acc = resp_buf = mp_req->resp_buf;
444 
445 	hdr_len = sizeof(*fc_hdr);
446 
447 	buf = kzalloc(PAGE_SIZE, GFP_ATOMIC);
448 	if (!buf) {
449 		printk(KERN_ERR PFX "rec buf: mem alloc failure\n");
450 		goto rec_compl_done;
451 	}
452 	memcpy(buf, fc_hdr, hdr_len);
453 	memcpy(buf + hdr_len, resp_buf, resp_len);
454 
455 	fp = fc_frame_alloc(NULL, resp_len);
456 	if (!fp) {
457 		printk(KERN_ERR PFX "fc_frame_alloc failure\n");
458 		goto free_buf;
459 	}
460 
461 	fh = (struct fc_frame_header *) fc_frame_header_get(fp);
462 	/* Copy FC Frame header and payload into the frame */
463 	memcpy(fh, buf, hdr_len + resp_len);
464 
465 	opcode = fc_frame_payload_op(fp);
466 	if (opcode == ELS_LS_RJT) {
467 		BNX2FC_IO_DBG(rec_req, "opcode is RJT\n");
468 		rjt = fc_frame_payload_get(fp, sizeof(*rjt));
469 		if ((rjt->er_reason == ELS_RJT_LOGIC ||
470 		    rjt->er_reason == ELS_RJT_UNAB) &&
471 		    rjt->er_explan == ELS_EXPL_OXID_RXID) {
472 			BNX2FC_IO_DBG(rec_req, "handle CMD LOST case\n");
473 			new_io_req = bnx2fc_cmd_alloc(tgt);
474 			if (!new_io_req)
475 				goto abort_io;
476 			new_io_req->sc_cmd = orig_io_req->sc_cmd;
477 			/* cleanup orig_io_req that is with the FW */
478 			set_bit(BNX2FC_FLAG_CMD_LOST,
479 				&orig_io_req->req_flags);
480 			bnx2fc_initiate_cleanup(orig_io_req);
481 			/* Post a new IO req with the same sc_cmd */
482 			BNX2FC_IO_DBG(rec_req, "Post IO request again\n");
483 			spin_unlock_bh(&tgt->tgt_lock);
484 			rc = bnx2fc_post_io_req(tgt, new_io_req);
485 			spin_lock_bh(&tgt->tgt_lock);
486 			if (!rc)
487 				goto free_frame;
488 			BNX2FC_IO_DBG(rec_req, "REC: io post err\n");
489 		}
490 abort_io:
491 		rc = bnx2fc_initiate_abts(orig_io_req);
492 		if (rc != SUCCESS) {
493 			BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts "
494 				"failed. issue cleanup\n");
495 			bnx2fc_initiate_cleanup(orig_io_req);
496 		}
497 	} else if (opcode == ELS_LS_ACC) {
498 		/* REVISIT: Check if the exchange is already aborted */
499 		offset = ntohl(acc->reca_fc4value);
500 		e_stat = ntohl(acc->reca_e_stat);
501 		if (e_stat & ESB_ST_SEQ_INIT)  {
502 			BNX2FC_IO_DBG(rec_req, "target has the seq init\n");
503 			goto free_frame;
504 		}
505 		BNX2FC_IO_DBG(rec_req, "e_stat = 0x%x, offset = 0x%x\n",
506 			e_stat, offset);
507 		/* Seq initiative is with us */
508 		err_entry = (struct fcoe_err_report_entry *)
509 			     &orig_io_req->err_entry;
510 		sc_cmd = orig_io_req->sc_cmd;
511 		if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
512 			/* SCSI WRITE command */
513 			if (offset == orig_io_req->data_xfer_len) {
514 				BNX2FC_IO_DBG(rec_req, "WRITE - resp lost\n");
515 				/* FCP_RSP lost */
516 				r_ctl = FC_RCTL_DD_CMD_STATUS;
517 				offset = 0;
518 			} else  {
519 				/* start transmitting from offset */
520 				BNX2FC_IO_DBG(rec_req, "XFER_RDY/DATA lost\n");
521 				send_seq_clnp = true;
522 				r_ctl = FC_RCTL_DD_DATA_DESC;
523 				if (bnx2fc_initiate_seq_cleanup(orig_io_req,
524 								offset, r_ctl))
525 					abort_io = true;
526 				/* XFER_RDY */
527 			}
528 		} else {
529 			/* SCSI READ command */
530 			if (err_entry->data.rx_buf_off ==
531 					orig_io_req->data_xfer_len) {
532 				/* FCP_RSP lost */
533 				BNX2FC_IO_DBG(rec_req, "READ - resp lost\n");
534 				r_ctl = FC_RCTL_DD_CMD_STATUS;
535 				offset = 0;
536 			} else  {
537 				/* request retransmission from this offset */
538 				send_seq_clnp = true;
539 				offset = err_entry->data.rx_buf_off;
540 				BNX2FC_IO_DBG(rec_req, "RD DATA lost\n");
541 				/* FCP_DATA lost */
542 				r_ctl = FC_RCTL_DD_SOL_DATA;
543 				if (bnx2fc_initiate_seq_cleanup(orig_io_req,
544 								offset, r_ctl))
545 					abort_io = true;
546 			}
547 		}
548 		if (abort_io) {
549 			rc = bnx2fc_initiate_abts(orig_io_req);
550 			if (rc != SUCCESS) {
551 				BNX2FC_IO_DBG(rec_req, "rec_compl:initiate_abts"
552 					      " failed. issue cleanup\n");
553 				bnx2fc_initiate_cleanup(orig_io_req);
554 			}
555 		} else if (!send_seq_clnp) {
556 			BNX2FC_IO_DBG(rec_req, "Send SRR - FCP_RSP\n");
557 			spin_unlock_bh(&tgt->tgt_lock);
558 			rc = bnx2fc_send_srr(orig_io_req, offset, r_ctl);
559 			spin_lock_bh(&tgt->tgt_lock);
560 
561 			if (rc) {
562 				BNX2FC_IO_DBG(rec_req, "Unable to send SRR"
563 					" IO will abort\n");
564 			}
565 		}
566 	}
567 free_frame:
568 	fc_frame_free(fp);
569 free_buf:
570 	kfree(buf);
571 rec_compl_done:
572 	kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
573 	kfree(cb_arg);
574 }
575 
576 int bnx2fc_send_rec(struct bnx2fc_cmd *orig_io_req)
577 {
578 	struct fc_els_rec rec;
579 	struct bnx2fc_rport *tgt = orig_io_req->tgt;
580 	struct fc_lport *lport = tgt->rdata->local_port;
581 	struct bnx2fc_els_cb_arg *cb_arg = NULL;
582 	u32 sid = tgt->sid;
583 	u32 r_a_tov = lport->r_a_tov;
584 	int rc;
585 
586 	BNX2FC_IO_DBG(orig_io_req, "Sending REC\n");
587 	memset(&rec, 0, sizeof(rec));
588 
589 	cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
590 	if (!cb_arg) {
591 		printk(KERN_ERR PFX "Unable to allocate cb_arg for REC\n");
592 		rc = -ENOMEM;
593 		goto rec_err;
594 	}
595 	kref_get(&orig_io_req->refcount);
596 
597 	cb_arg->aborted_io_req = orig_io_req;
598 
599 	rec.rec_cmd = ELS_REC;
600 	hton24(rec.rec_s_id, sid);
601 	rec.rec_ox_id = htons(orig_io_req->xid);
602 	rec.rec_rx_id = htons(orig_io_req->task->rxwr_txrd.var_ctx.rx_id);
603 
604 	rc = bnx2fc_initiate_els(tgt, ELS_REC, &rec, sizeof(rec),
605 				 bnx2fc_rec_compl, cb_arg,
606 				 r_a_tov);
607 rec_err:
608 	if (rc) {
609 		BNX2FC_IO_DBG(orig_io_req, "REC failed - release\n");
610 		spin_lock_bh(&tgt->tgt_lock);
611 		kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
612 		spin_unlock_bh(&tgt->tgt_lock);
613 		kfree(cb_arg);
614 	}
615 	return rc;
616 }
617 
618 int bnx2fc_send_srr(struct bnx2fc_cmd *orig_io_req, u32 offset, u8 r_ctl)
619 {
620 	struct fcp_srr srr;
621 	struct bnx2fc_rport *tgt = orig_io_req->tgt;
622 	struct fc_lport *lport = tgt->rdata->local_port;
623 	struct bnx2fc_els_cb_arg *cb_arg = NULL;
624 	u32 r_a_tov = lport->r_a_tov;
625 	int rc;
626 
627 	BNX2FC_IO_DBG(orig_io_req, "Sending SRR\n");
628 	memset(&srr, 0, sizeof(srr));
629 
630 	cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
631 	if (!cb_arg) {
632 		printk(KERN_ERR PFX "Unable to allocate cb_arg for SRR\n");
633 		rc = -ENOMEM;
634 		goto srr_err;
635 	}
636 	kref_get(&orig_io_req->refcount);
637 
638 	cb_arg->aborted_io_req = orig_io_req;
639 
640 	srr.srr_op = ELS_SRR;
641 	srr.srr_ox_id = htons(orig_io_req->xid);
642 	srr.srr_rx_id = htons(orig_io_req->task->rxwr_txrd.var_ctx.rx_id);
643 	srr.srr_rel_off = htonl(offset);
644 	srr.srr_r_ctl = r_ctl;
645 	orig_io_req->srr_offset = offset;
646 	orig_io_req->srr_rctl = r_ctl;
647 
648 	rc = bnx2fc_initiate_els(tgt, ELS_SRR, &srr, sizeof(srr),
649 				 bnx2fc_srr_compl, cb_arg,
650 				 r_a_tov);
651 srr_err:
652 	if (rc) {
653 		BNX2FC_IO_DBG(orig_io_req, "SRR failed - release\n");
654 		spin_lock_bh(&tgt->tgt_lock);
655 		kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
656 		spin_unlock_bh(&tgt->tgt_lock);
657 		kfree(cb_arg);
658 	} else
659 		set_bit(BNX2FC_FLAG_SRR_SENT, &orig_io_req->req_flags);
660 
661 	return rc;
662 }
663 
664 static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
665 			void *data, u32 data_len,
666 			void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg),
667 			struct bnx2fc_els_cb_arg *cb_arg, u32 timer_msec)
668 {
669 	struct fcoe_port *port = tgt->port;
670 	struct bnx2fc_interface *interface = port->priv;
671 	struct fc_rport *rport = tgt->rport;
672 	struct fc_lport *lport = port->lport;
673 	struct bnx2fc_cmd *els_req;
674 	struct bnx2fc_mp_req *mp_req;
675 	struct fc_frame_header *fc_hdr;
676 	struct fcoe_task_ctx_entry *task;
677 	struct fcoe_task_ctx_entry *task_page;
678 	int rc = 0;
679 	int task_idx, index;
680 	u32 did, sid;
681 	u16 xid;
682 
683 	rc = fc_remote_port_chkready(rport);
684 	if (rc) {
685 		printk(KERN_ERR PFX "els 0x%x: rport not ready\n", op);
686 		rc = -EINVAL;
687 		goto els_err;
688 	}
689 	if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
690 		printk(KERN_ERR PFX "els 0x%x: link is not ready\n", op);
691 		rc = -EINVAL;
692 		goto els_err;
693 	}
694 	if (!(test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) ||
695 	     (test_bit(BNX2FC_FLAG_EXPL_LOGO, &tgt->flags))) {
696 		printk(KERN_ERR PFX "els 0x%x: tgt not ready\n", op);
697 		rc = -EINVAL;
698 		goto els_err;
699 	}
700 	els_req = bnx2fc_elstm_alloc(tgt, BNX2FC_ELS);
701 	if (!els_req) {
702 		rc = -ENOMEM;
703 		goto els_err;
704 	}
705 
706 	els_req->sc_cmd = NULL;
707 	els_req->port = port;
708 	els_req->tgt = tgt;
709 	els_req->cb_func = cb_func;
710 	cb_arg->io_req = els_req;
711 	els_req->cb_arg = cb_arg;
712 
713 	mp_req = (struct bnx2fc_mp_req *)&(els_req->mp_req);
714 	rc = bnx2fc_init_mp_req(els_req);
715 	if (rc == FAILED) {
716 		printk(KERN_ERR PFX "ELS MP request init failed\n");
717 		spin_lock_bh(&tgt->tgt_lock);
718 		kref_put(&els_req->refcount, bnx2fc_cmd_release);
719 		spin_unlock_bh(&tgt->tgt_lock);
720 		rc = -ENOMEM;
721 		goto els_err;
722 	} else {
723 		/* rc SUCCESS */
724 		rc = 0;
725 	}
726 
727 	/* Set the data_xfer_len to the size of ELS payload */
728 	mp_req->req_len = data_len;
729 	els_req->data_xfer_len = mp_req->req_len;
730 
731 	/* Fill ELS Payload */
732 	if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS)) {
733 		memcpy(mp_req->req_buf, data, data_len);
734 	} else {
735 		printk(KERN_ERR PFX "Invalid ELS op 0x%x\n", op);
736 		els_req->cb_func = NULL;
737 		els_req->cb_arg = NULL;
738 		spin_lock_bh(&tgt->tgt_lock);
739 		kref_put(&els_req->refcount, bnx2fc_cmd_release);
740 		spin_unlock_bh(&tgt->tgt_lock);
741 		rc = -EINVAL;
742 	}
743 
744 	if (rc)
745 		goto els_err;
746 
747 	/* Fill FC header */
748 	fc_hdr = &(mp_req->req_fc_hdr);
749 
750 	did = tgt->rport->port_id;
751 	sid = tgt->sid;
752 
753 	if (op == ELS_SRR)
754 		__fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS4_REQ, did, sid,
755 				   FC_TYPE_FCP, FC_FC_FIRST_SEQ |
756 				   FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
757 	else
758 		__fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS_REQ, did, sid,
759 				   FC_TYPE_ELS, FC_FC_FIRST_SEQ |
760 				   FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
761 
762 	/* Obtain exchange id */
763 	xid = els_req->xid;
764 	task_idx = xid/BNX2FC_TASKS_PER_PAGE;
765 	index = xid % BNX2FC_TASKS_PER_PAGE;
766 
767 	/* Initialize task context for this IO request */
768 	task_page = (struct fcoe_task_ctx_entry *)
769 			interface->hba->task_ctx[task_idx];
770 	task = &(task_page[index]);
771 	bnx2fc_init_mp_task(els_req, task);
772 
773 	spin_lock_bh(&tgt->tgt_lock);
774 
775 	if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
776 		printk(KERN_ERR PFX "initiate_els.. session not ready\n");
777 		els_req->cb_func = NULL;
778 		els_req->cb_arg = NULL;
779 		kref_put(&els_req->refcount, bnx2fc_cmd_release);
780 		spin_unlock_bh(&tgt->tgt_lock);
781 		return -EINVAL;
782 	}
783 
784 	if (timer_msec)
785 		bnx2fc_cmd_timer_set(els_req, timer_msec);
786 	bnx2fc_add_2_sq(tgt, xid);
787 
788 	els_req->on_active_queue = 1;
789 	list_add_tail(&els_req->link, &tgt->els_queue);
790 
791 	/* Ring doorbell */
792 	bnx2fc_ring_doorbell(tgt);
793 	spin_unlock_bh(&tgt->tgt_lock);
794 
795 els_err:
796 	return rc;
797 }
798 
799 void bnx2fc_process_els_compl(struct bnx2fc_cmd *els_req,
800 			      struct fcoe_task_ctx_entry *task, u8 num_rq)
801 {
802 	struct bnx2fc_mp_req *mp_req;
803 	struct fc_frame_header *fc_hdr;
804 	u64 *hdr;
805 	u64 *temp_hdr;
806 
807 	BNX2FC_ELS_DBG("Entered process_els_compl xid = 0x%x"
808 			"cmd_type = %d\n", els_req->xid, els_req->cmd_type);
809 
810 	if (test_and_set_bit(BNX2FC_FLAG_ELS_DONE,
811 			     &els_req->req_flags)) {
812 		BNX2FC_ELS_DBG("Timer context finished processing this "
813 			   "els - 0x%x\n", els_req->xid);
814 		/* This IO doesn't receive cleanup completion */
815 		kref_put(&els_req->refcount, bnx2fc_cmd_release);
816 		return;
817 	}
818 
819 	/* Cancel the timeout_work, as we received the response */
820 	if (cancel_delayed_work(&els_req->timeout_work))
821 		kref_put(&els_req->refcount,
822 			 bnx2fc_cmd_release); /* drop timer hold */
823 
824 	if (els_req->on_active_queue) {
825 		list_del_init(&els_req->link);
826 		els_req->on_active_queue = 0;
827 	}
828 
829 	mp_req = &(els_req->mp_req);
830 	fc_hdr = &(mp_req->resp_fc_hdr);
831 
832 	hdr = (u64 *)fc_hdr;
833 	temp_hdr = (u64 *)
834 		&task->rxwr_only.union_ctx.comp_info.mp_rsp.fc_hdr;
835 	hdr[0] = cpu_to_be64(temp_hdr[0]);
836 	hdr[1] = cpu_to_be64(temp_hdr[1]);
837 	hdr[2] = cpu_to_be64(temp_hdr[2]);
838 
839 	mp_req->resp_len =
840 		task->rxwr_only.union_ctx.comp_info.mp_rsp.mp_payload_len;
841 
842 	/* Parse ELS response */
843 	if ((els_req->cb_func) && (els_req->cb_arg)) {
844 		els_req->cb_func(els_req->cb_arg);
845 		els_req->cb_arg = NULL;
846 	}
847 
848 	kref_put(&els_req->refcount, bnx2fc_cmd_release);
849 }
850 
851 static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
852 			      void *arg)
853 {
854 	struct fcoe_ctlr *fip = arg;
855 	struct fc_exch *exch = fc_seq_exch(seq);
856 	struct fc_lport *lport = exch->lp;
857 	u8 *mac;
858 	u8 op;
859 
860 	if (IS_ERR(fp))
861 		goto done;
862 
863 	mac = fr_cb(fp)->granted_mac;
864 	if (is_zero_ether_addr(mac)) {
865 		op = fc_frame_payload_op(fp);
866 		if (lport->vport) {
867 			if (op == ELS_LS_RJT) {
868 				printk(KERN_ERR PFX "bnx2fc_flogi_resp is LS_RJT\n");
869 				fc_vport_terminate(lport->vport);
870 				fc_frame_free(fp);
871 				return;
872 			}
873 		}
874 		fcoe_ctlr_recv_flogi(fip, lport, fp);
875 	}
876 	if (!is_zero_ether_addr(mac))
877 		fip->update_mac(lport, mac);
878 done:
879 	fc_lport_flogi_resp(seq, fp, lport);
880 }
881 
882 static void bnx2fc_logo_resp(struct fc_seq *seq, struct fc_frame *fp,
883 			     void *arg)
884 {
885 	struct fcoe_ctlr *fip = arg;
886 	struct fc_exch *exch = fc_seq_exch(seq);
887 	struct fc_lport *lport = exch->lp;
888 	static u8 zero_mac[ETH_ALEN] = { 0 };
889 
890 	if (!IS_ERR(fp))
891 		fip->update_mac(lport, zero_mac);
892 	fc_lport_logo_resp(seq, fp, lport);
893 }
894 
895 struct fc_seq *bnx2fc_elsct_send(struct fc_lport *lport, u32 did,
896 				      struct fc_frame *fp, unsigned int op,
897 				      void (*resp)(struct fc_seq *,
898 						   struct fc_frame *,
899 						   void *),
900 				      void *arg, u32 timeout)
901 {
902 	struct fcoe_port *port = lport_priv(lport);
903 	struct bnx2fc_interface *interface = port->priv;
904 	struct fcoe_ctlr *fip = bnx2fc_to_ctlr(interface);
905 	struct fc_frame_header *fh = fc_frame_header_get(fp);
906 
907 	switch (op) {
908 	case ELS_FLOGI:
909 	case ELS_FDISC:
910 		return fc_elsct_send(lport, did, fp, op, bnx2fc_flogi_resp,
911 				     fip, timeout);
912 	case ELS_LOGO:
913 		/* only hook onto fabric logouts, not port logouts */
914 		if (ntoh24(fh->fh_d_id) != FC_FID_FLOGI)
915 			break;
916 		return fc_elsct_send(lport, did, fp, op, bnx2fc_logo_resp,
917 				     fip, timeout);
918 	}
919 	return fc_elsct_send(lport, did, fp, op, resp, arg, timeout);
920 }
921