1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
4 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
5 */
6
7 /*
8 * Functions to build and send ELS/CT/BLS commands and responses.
9 */
10
11 #include "efc.h"
12 #include "efc_els.h"
13 #include "../libefc_sli/sli4.h"
14
15 #define EFC_LOG_ENABLE_ELS_TRACE(efc) \
16 (((efc) != NULL) ? (((efc)->logmask & (1U << 1)) != 0) : 0)
17
18 #define node_els_trace() \
19 do { \
20 if (EFC_LOG_ENABLE_ELS_TRACE(efc)) \
21 efc_log_info(efc, "[%s] %-20s\n", \
22 node->display_name, __func__); \
23 } while (0)
24
25 #define els_io_printf(els, fmt, ...) \
26 efc_log_err((struct efc *)els->node->efc,\
27 "[%s] %-8s " fmt, \
28 els->node->display_name,\
29 els->display_name, ##__VA_ARGS__)
30
31 #define EFC_ELS_RSP_LEN 1024
32 #define EFC_ELS_GID_PT_RSP_LEN 8096
33
34 struct efc_els_io_req *
efc_els_io_alloc(struct efc_node * node,u32 reqlen)35 efc_els_io_alloc(struct efc_node *node, u32 reqlen)
36 {
37 return efc_els_io_alloc_size(node, reqlen, EFC_ELS_RSP_LEN);
38 }
39
40 struct efc_els_io_req *
efc_els_io_alloc_size(struct efc_node * node,u32 reqlen,u32 rsplen)41 efc_els_io_alloc_size(struct efc_node *node, u32 reqlen, u32 rsplen)
42 {
43 struct efc *efc;
44 struct efc_els_io_req *els;
45 unsigned long flags = 0;
46
47 efc = node->efc;
48
49 if (!node->els_io_enabled) {
50 efc_log_err(efc, "els io alloc disabled\n");
51 return NULL;
52 }
53
54 els = mempool_alloc(efc->els_io_pool, GFP_ATOMIC);
55 if (!els) {
56 atomic_add_return(1, &efc->els_io_alloc_failed_count);
57 return NULL;
58 }
59
60 /* initialize refcount */
61 kref_init(&els->ref);
62 els->release = _efc_els_io_free;
63
64 /* populate generic io fields */
65 els->node = node;
66
67 /* now allocate DMA for request and response */
68 els->io.req.size = reqlen;
69 els->io.req.virt = dma_alloc_coherent(&efc->pci->dev, els->io.req.size,
70 &els->io.req.phys, GFP_KERNEL);
71 if (!els->io.req.virt) {
72 mempool_free(els, efc->els_io_pool);
73 return NULL;
74 }
75
76 els->io.rsp.size = rsplen;
77 els->io.rsp.virt = dma_alloc_coherent(&efc->pci->dev, els->io.rsp.size,
78 &els->io.rsp.phys, GFP_KERNEL);
79 if (!els->io.rsp.virt) {
80 dma_free_coherent(&efc->pci->dev, els->io.req.size,
81 els->io.req.virt, els->io.req.phys);
82 mempool_free(els, efc->els_io_pool);
83 els = NULL;
84 }
85
86 if (els) {
87 /* initialize fields */
88 els->els_retries_remaining = EFC_FC_ELS_DEFAULT_RETRIES;
89
90 /* add els structure to ELS IO list */
91 INIT_LIST_HEAD(&els->list_entry);
92 spin_lock_irqsave(&node->els_ios_lock, flags);
93 list_add_tail(&els->list_entry, &node->els_ios_list);
94 spin_unlock_irqrestore(&node->els_ios_lock, flags);
95 }
96
97 return els;
98 }
99
100 void
efc_els_io_free(struct efc_els_io_req * els)101 efc_els_io_free(struct efc_els_io_req *els)
102 {
103 kref_put(&els->ref, els->release);
104 }
105
106 void
_efc_els_io_free(struct kref * arg)107 _efc_els_io_free(struct kref *arg)
108 {
109 struct efc_els_io_req *els =
110 container_of(arg, struct efc_els_io_req, ref);
111 struct efc *efc;
112 struct efc_node *node;
113 int send_empty_event = false;
114 unsigned long flags = 0;
115
116 node = els->node;
117 efc = node->efc;
118
119 spin_lock_irqsave(&node->els_ios_lock, flags);
120
121 list_del(&els->list_entry);
122 /* Send list empty event if the IO allocator
123 * is disabled, and the list is empty
124 * If node->els_io_enabled was not checked,
125 * the event would be posted continually
126 */
127 send_empty_event = (!node->els_io_enabled &&
128 list_empty(&node->els_ios_list));
129
130 spin_unlock_irqrestore(&node->els_ios_lock, flags);
131
132 /* free ELS request and response buffers */
133 dma_free_coherent(&efc->pci->dev, els->io.rsp.size,
134 els->io.rsp.virt, els->io.rsp.phys);
135 dma_free_coherent(&efc->pci->dev, els->io.req.size,
136 els->io.req.virt, els->io.req.phys);
137
138 mempool_free(els, efc->els_io_pool);
139
140 if (send_empty_event)
141 efc_scsi_io_list_empty(node->efc, node);
142 }
143
144 static void
145 efc_els_retry(struct efc_els_io_req *els);
146
147 static void
efc_els_delay_timer_cb(struct timer_list * t)148 efc_els_delay_timer_cb(struct timer_list *t)
149 {
150 struct efc_els_io_req *els = from_timer(els, t, delay_timer);
151
152 /* Retry delay timer expired, retry the ELS request */
153 efc_els_retry(els);
154 }
155
156 static int
efc_els_req_cb(void * arg,u32 length,int status,u32 ext_status)157 efc_els_req_cb(void *arg, u32 length, int status, u32 ext_status)
158 {
159 struct efc_els_io_req *els;
160 struct efc_node *node;
161 struct efc *efc;
162 struct efc_node_cb cbdata;
163 u32 reason_code;
164
165 els = arg;
166 node = els->node;
167 efc = node->efc;
168
169 if (status)
170 els_io_printf(els, "status x%x ext x%x\n", status, ext_status);
171
172 /* set the response len element of els->rsp */
173 els->io.rsp.len = length;
174
175 cbdata.status = status;
176 cbdata.ext_status = ext_status;
177 cbdata.header = NULL;
178 cbdata.els_rsp = els->io.rsp;
179
180 /* set the response len element of els->rsp */
181 cbdata.rsp_len = length;
182
183 /* FW returns the number of bytes received on the link in
184 * the WCQE, not the amount placed in the buffer; use this info to
185 * check if there was an overrun.
186 */
187 if (length > els->io.rsp.size) {
188 efc_log_warn(efc,
189 "ELS response returned len=%d > buflen=%zu\n",
190 length, els->io.rsp.size);
191 efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_REQ_FAIL, &cbdata);
192 return 0;
193 }
194
195 /* Post event to ELS IO object */
196 switch (status) {
197 case SLI4_FC_WCQE_STATUS_SUCCESS:
198 efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_REQ_OK, &cbdata);
199 break;
200
201 case SLI4_FC_WCQE_STATUS_LS_RJT:
202 reason_code = (ext_status >> 16) & 0xff;
203
204 /* delay and retry if reason code is Logical Busy */
205 switch (reason_code) {
206 case ELS_RJT_BUSY:
207 els->node->els_req_cnt--;
208 els_io_printf(els,
209 "LS_RJT Logical Busy, delay and retry\n");
210 timer_setup(&els->delay_timer,
211 efc_els_delay_timer_cb, 0);
212 mod_timer(&els->delay_timer,
213 jiffies + msecs_to_jiffies(5000));
214 break;
215 default:
216 efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_REQ_RJT,
217 &cbdata);
218 break;
219 }
220 break;
221
222 case SLI4_FC_WCQE_STATUS_LOCAL_REJECT:
223 switch (ext_status) {
224 case SLI4_FC_LOCAL_REJECT_SEQUENCE_TIMEOUT:
225 efc_els_retry(els);
226 break;
227 default:
228 efc_log_err(efc, "LOCAL_REJECT with ext status:%x\n",
229 ext_status);
230 efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_REQ_FAIL,
231 &cbdata);
232 break;
233 }
234 break;
235 default: /* Other error */
236 efc_log_warn(efc, "els req failed status x%x, ext_status x%x\n",
237 status, ext_status);
238 efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_REQ_FAIL, &cbdata);
239 break;
240 }
241
242 return 0;
243 }
244
efc_disc_io_complete(struct efc_disc_io * io,u32 len,u32 status,u32 ext_status)245 void efc_disc_io_complete(struct efc_disc_io *io, u32 len, u32 status,
246 u32 ext_status)
247 {
248 struct efc_els_io_req *els =
249 container_of(io, struct efc_els_io_req, io);
250
251 WARN_ON_ONCE(!els->cb);
252
253 ((efc_hw_srrs_cb_t)els->cb) (els, len, status, ext_status);
254 }
255
efc_els_send_req(struct efc_node * node,struct efc_els_io_req * els,enum efc_disc_io_type io_type)256 static int efc_els_send_req(struct efc_node *node, struct efc_els_io_req *els,
257 enum efc_disc_io_type io_type)
258 {
259 int rc = 0;
260 struct efc *efc = node->efc;
261 struct efc_node_cb cbdata;
262
263 /* update ELS request counter */
264 els->node->els_req_cnt++;
265
266 /* Prepare the IO request details */
267 els->io.io_type = io_type;
268 els->io.xmit_len = els->io.req.size;
269 els->io.rsp_len = els->io.rsp.size;
270 els->io.rpi = node->rnode.indicator;
271 els->io.vpi = node->nport->indicator;
272 els->io.s_id = node->nport->fc_id;
273 els->io.d_id = node->rnode.fc_id;
274
275 if (node->rnode.attached)
276 els->io.rpi_registered = true;
277
278 els->cb = efc_els_req_cb;
279
280 rc = efc->tt.send_els(efc, &els->io);
281 if (!rc)
282 return rc;
283
284 cbdata.status = EFC_STATUS_INVALID;
285 cbdata.ext_status = EFC_STATUS_INVALID;
286 cbdata.els_rsp = els->io.rsp;
287 efc_log_err(efc, "efc_els_send failed: %d\n", rc);
288 efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_REQ_FAIL, &cbdata);
289
290 return rc;
291 }
292
293 static void
efc_els_retry(struct efc_els_io_req * els)294 efc_els_retry(struct efc_els_io_req *els)
295 {
296 struct efc *efc;
297 struct efc_node_cb cbdata;
298 u32 rc;
299
300 efc = els->node->efc;
301 cbdata.status = EFC_STATUS_INVALID;
302 cbdata.ext_status = EFC_STATUS_INVALID;
303 cbdata.els_rsp = els->io.rsp;
304
305 if (els->els_retries_remaining) {
306 els->els_retries_remaining--;
307 rc = efc->tt.send_els(efc, &els->io);
308 } else {
309 rc = -EIO;
310 }
311
312 if (rc) {
313 efc_log_err(efc, "ELS retries exhausted\n");
314 efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_REQ_FAIL, &cbdata);
315 }
316 }
317
318 static int
efc_els_acc_cb(void * arg,u32 length,int status,u32 ext_status)319 efc_els_acc_cb(void *arg, u32 length, int status, u32 ext_status)
320 {
321 struct efc_els_io_req *els;
322 struct efc_node *node;
323 struct efc *efc;
324 struct efc_node_cb cbdata;
325
326 els = arg;
327 node = els->node;
328 efc = node->efc;
329
330 cbdata.status = status;
331 cbdata.ext_status = ext_status;
332 cbdata.header = NULL;
333 cbdata.els_rsp = els->io.rsp;
334
335 /* Post node event */
336 switch (status) {
337 case SLI4_FC_WCQE_STATUS_SUCCESS:
338 efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_CMPL_OK, &cbdata);
339 break;
340
341 default: /* Other error */
342 efc_log_warn(efc, "[%s] %-8s failed status x%x, ext x%x\n",
343 node->display_name, els->display_name,
344 status, ext_status);
345 efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_CMPL_FAIL, &cbdata);
346 break;
347 }
348
349 return 0;
350 }
351
352 static int
efc_els_send_rsp(struct efc_els_io_req * els,u32 rsplen)353 efc_els_send_rsp(struct efc_els_io_req *els, u32 rsplen)
354 {
355 int rc = 0;
356 struct efc_node_cb cbdata;
357 struct efc_node *node = els->node;
358 struct efc *efc = node->efc;
359
360 /* increment ELS completion counter */
361 node->els_cmpl_cnt++;
362
363 els->io.io_type = EFC_DISC_IO_ELS_RESP;
364 els->cb = efc_els_acc_cb;
365
366 /* Prepare the IO request details */
367 els->io.xmit_len = rsplen;
368 els->io.rsp_len = els->io.rsp.size;
369 els->io.rpi = node->rnode.indicator;
370 els->io.vpi = node->nport->indicator;
371 if (node->nport->fc_id != U32_MAX)
372 els->io.s_id = node->nport->fc_id;
373 else
374 els->io.s_id = els->io.iparam.els.s_id;
375 els->io.d_id = node->rnode.fc_id;
376
377 if (node->attached)
378 els->io.rpi_registered = true;
379
380 rc = efc->tt.send_els(efc, &els->io);
381 if (!rc)
382 return rc;
383
384 cbdata.status = EFC_STATUS_INVALID;
385 cbdata.ext_status = EFC_STATUS_INVALID;
386 cbdata.els_rsp = els->io.rsp;
387 efc_els_io_cleanup(els, EFC_EVT_SRRS_ELS_CMPL_FAIL, &cbdata);
388
389 return rc;
390 }
391
392 int
efc_send_plogi(struct efc_node * node)393 efc_send_plogi(struct efc_node *node)
394 {
395 struct efc_els_io_req *els;
396 struct efc *efc = node->efc;
397 struct fc_els_flogi *plogi;
398
399 node_els_trace();
400
401 els = efc_els_io_alloc(node, sizeof(*plogi));
402 if (!els) {
403 efc_log_err(efc, "IO alloc failed\n");
404 return -EIO;
405 }
406 els->display_name = "plogi";
407
408 /* Build PLOGI request */
409 plogi = els->io.req.virt;
410
411 memcpy(plogi, node->nport->service_params, sizeof(*plogi));
412
413 plogi->fl_cmd = ELS_PLOGI;
414 memset(plogi->_fl_resvd, 0, sizeof(plogi->_fl_resvd));
415
416 return efc_els_send_req(node, els, EFC_DISC_IO_ELS_REQ);
417 }
418
419 int
efc_send_flogi(struct efc_node * node)420 efc_send_flogi(struct efc_node *node)
421 {
422 struct efc_els_io_req *els;
423 struct efc *efc;
424 struct fc_els_flogi *flogi;
425
426 efc = node->efc;
427
428 node_els_trace();
429
430 els = efc_els_io_alloc(node, sizeof(*flogi));
431 if (!els) {
432 efc_log_err(efc, "IO alloc failed\n");
433 return -EIO;
434 }
435
436 els->display_name = "flogi";
437
438 /* Build FLOGI request */
439 flogi = els->io.req.virt;
440
441 memcpy(flogi, node->nport->service_params, sizeof(*flogi));
442 flogi->fl_cmd = ELS_FLOGI;
443 memset(flogi->_fl_resvd, 0, sizeof(flogi->_fl_resvd));
444
445 return efc_els_send_req(node, els, EFC_DISC_IO_ELS_REQ);
446 }
447
448 int
efc_send_fdisc(struct efc_node * node)449 efc_send_fdisc(struct efc_node *node)
450 {
451 struct efc_els_io_req *els;
452 struct efc *efc;
453 struct fc_els_flogi *fdisc;
454
455 efc = node->efc;
456
457 node_els_trace();
458
459 els = efc_els_io_alloc(node, sizeof(*fdisc));
460 if (!els) {
461 efc_log_err(efc, "IO alloc failed\n");
462 return -EIO;
463 }
464
465 els->display_name = "fdisc";
466
467 /* Build FDISC request */
468 fdisc = els->io.req.virt;
469
470 memcpy(fdisc, node->nport->service_params, sizeof(*fdisc));
471 fdisc->fl_cmd = ELS_FDISC;
472 memset(fdisc->_fl_resvd, 0, sizeof(fdisc->_fl_resvd));
473
474 return efc_els_send_req(node, els, EFC_DISC_IO_ELS_REQ);
475 }
476
477 int
efc_send_prli(struct efc_node * node)478 efc_send_prli(struct efc_node *node)
479 {
480 struct efc *efc = node->efc;
481 struct efc_els_io_req *els;
482 struct {
483 struct fc_els_prli prli;
484 struct fc_els_spp spp;
485 } *pp;
486
487 node_els_trace();
488
489 els = efc_els_io_alloc(node, sizeof(*pp));
490 if (!els) {
491 efc_log_err(efc, "IO alloc failed\n");
492 return -EIO;
493 }
494
495 els->display_name = "prli";
496
497 /* Build PRLI request */
498 pp = els->io.req.virt;
499
500 memset(pp, 0, sizeof(*pp));
501
502 pp->prli.prli_cmd = ELS_PRLI;
503 pp->prli.prli_spp_len = 16;
504 pp->prli.prli_len = cpu_to_be16(sizeof(*pp));
505 pp->spp.spp_type = FC_TYPE_FCP;
506 pp->spp.spp_type_ext = 0;
507 pp->spp.spp_flags = FC_SPP_EST_IMG_PAIR;
508 pp->spp.spp_params = cpu_to_be32(FCP_SPPF_RD_XRDY_DIS |
509 (node->nport->enable_ini ?
510 FCP_SPPF_INIT_FCN : 0) |
511 (node->nport->enable_tgt ?
512 FCP_SPPF_TARG_FCN : 0));
513
514 return efc_els_send_req(node, els, EFC_DISC_IO_ELS_REQ);
515 }
516
517 int
efc_send_logo(struct efc_node * node)518 efc_send_logo(struct efc_node *node)
519 {
520 struct efc *efc = node->efc;
521 struct efc_els_io_req *els;
522 struct fc_els_logo *logo;
523 struct fc_els_flogi *sparams;
524
525 node_els_trace();
526
527 sparams = (struct fc_els_flogi *)node->nport->service_params;
528
529 els = efc_els_io_alloc(node, sizeof(*logo));
530 if (!els) {
531 efc_log_err(efc, "IO alloc failed\n");
532 return -EIO;
533 }
534
535 els->display_name = "logo";
536
537 /* Build LOGO request */
538
539 logo = els->io.req.virt;
540
541 memset(logo, 0, sizeof(*logo));
542 logo->fl_cmd = ELS_LOGO;
543 hton24(logo->fl_n_port_id, node->rnode.nport->fc_id);
544 logo->fl_n_port_wwn = sparams->fl_wwpn;
545
546 return efc_els_send_req(node, els, EFC_DISC_IO_ELS_REQ);
547 }
548
549 int
efc_send_adisc(struct efc_node * node)550 efc_send_adisc(struct efc_node *node)
551 {
552 struct efc *efc = node->efc;
553 struct efc_els_io_req *els;
554 struct fc_els_adisc *adisc;
555 struct fc_els_flogi *sparams;
556 struct efc_nport *nport = node->nport;
557
558 node_els_trace();
559
560 sparams = (struct fc_els_flogi *)node->nport->service_params;
561
562 els = efc_els_io_alloc(node, sizeof(*adisc));
563 if (!els) {
564 efc_log_err(efc, "IO alloc failed\n");
565 return -EIO;
566 }
567
568 els->display_name = "adisc";
569
570 /* Build ADISC request */
571
572 adisc = els->io.req.virt;
573
574 memset(adisc, 0, sizeof(*adisc));
575 adisc->adisc_cmd = ELS_ADISC;
576 hton24(adisc->adisc_hard_addr, nport->fc_id);
577 adisc->adisc_wwpn = sparams->fl_wwpn;
578 adisc->adisc_wwnn = sparams->fl_wwnn;
579 hton24(adisc->adisc_port_id, node->rnode.nport->fc_id);
580
581 return efc_els_send_req(node, els, EFC_DISC_IO_ELS_REQ);
582 }
583
584 int
efc_send_scr(struct efc_node * node)585 efc_send_scr(struct efc_node *node)
586 {
587 struct efc_els_io_req *els;
588 struct efc *efc = node->efc;
589 struct fc_els_scr *req;
590
591 node_els_trace();
592
593 els = efc_els_io_alloc(node, sizeof(*req));
594 if (!els) {
595 efc_log_err(efc, "IO alloc failed\n");
596 return -EIO;
597 }
598
599 els->display_name = "scr";
600
601 req = els->io.req.virt;
602
603 memset(req, 0, sizeof(*req));
604 req->scr_cmd = ELS_SCR;
605 req->scr_reg_func = ELS_SCRF_FULL;
606
607 return efc_els_send_req(node, els, EFC_DISC_IO_ELS_REQ);
608 }
609
610 int
efc_send_ls_rjt(struct efc_node * node,u32 ox_id,u32 reason_code,u32 reason_code_expl,u32 vendor_unique)611 efc_send_ls_rjt(struct efc_node *node, u32 ox_id, u32 reason_code,
612 u32 reason_code_expl, u32 vendor_unique)
613 {
614 struct efc *efc = node->efc;
615 struct efc_els_io_req *els = NULL;
616 struct fc_els_ls_rjt *rjt;
617
618 els = efc_els_io_alloc(node, sizeof(*rjt));
619 if (!els) {
620 efc_log_err(efc, "els IO alloc failed\n");
621 return -EIO;
622 }
623
624 node_els_trace();
625
626 els->display_name = "ls_rjt";
627
628 memset(&els->io.iparam, 0, sizeof(els->io.iparam));
629 els->io.iparam.els.ox_id = ox_id;
630
631 rjt = els->io.req.virt;
632 memset(rjt, 0, sizeof(*rjt));
633
634 rjt->er_cmd = ELS_LS_RJT;
635 rjt->er_reason = reason_code;
636 rjt->er_explan = reason_code_expl;
637
638 return efc_els_send_rsp(els, sizeof(*rjt));
639 }
640
641 int
efc_send_plogi_acc(struct efc_node * node,u32 ox_id)642 efc_send_plogi_acc(struct efc_node *node, u32 ox_id)
643 {
644 struct efc *efc = node->efc;
645 struct efc_els_io_req *els = NULL;
646 struct fc_els_flogi *plogi;
647 struct fc_els_flogi *req = (struct fc_els_flogi *)node->service_params;
648
649 node_els_trace();
650
651 els = efc_els_io_alloc(node, sizeof(*plogi));
652 if (!els) {
653 efc_log_err(efc, "els IO alloc failed\n");
654 return -EIO;
655 }
656
657 els->display_name = "plogi_acc";
658
659 memset(&els->io.iparam, 0, sizeof(els->io.iparam));
660 els->io.iparam.els.ox_id = ox_id;
661
662 plogi = els->io.req.virt;
663
664 /* copy our port's service parameters to payload */
665 memcpy(plogi, node->nport->service_params, sizeof(*plogi));
666 plogi->fl_cmd = ELS_LS_ACC;
667 memset(plogi->_fl_resvd, 0, sizeof(plogi->_fl_resvd));
668
669 /* Set Application header support bit if requested */
670 if (req->fl_csp.sp_features & cpu_to_be16(FC_SP_FT_BCAST))
671 plogi->fl_csp.sp_features |= cpu_to_be16(FC_SP_FT_BCAST);
672
673 return efc_els_send_rsp(els, sizeof(*plogi));
674 }
675
676 int
efc_send_flogi_p2p_acc(struct efc_node * node,u32 ox_id,u32 s_id)677 efc_send_flogi_p2p_acc(struct efc_node *node, u32 ox_id, u32 s_id)
678 {
679 struct efc *efc = node->efc;
680 struct efc_els_io_req *els = NULL;
681 struct fc_els_flogi *flogi;
682
683 node_els_trace();
684
685 els = efc_els_io_alloc(node, sizeof(*flogi));
686 if (!els) {
687 efc_log_err(efc, "els IO alloc failed\n");
688 return -EIO;
689 }
690
691 els->display_name = "flogi_p2p_acc";
692
693 memset(&els->io.iparam, 0, sizeof(els->io.iparam));
694 els->io.iparam.els.ox_id = ox_id;
695 els->io.iparam.els.s_id = s_id;
696
697 flogi = els->io.req.virt;
698
699 /* copy our port's service parameters to payload */
700 memcpy(flogi, node->nport->service_params, sizeof(*flogi));
701 flogi->fl_cmd = ELS_LS_ACC;
702 memset(flogi->_fl_resvd, 0, sizeof(flogi->_fl_resvd));
703
704 memset(flogi->fl_cssp, 0, sizeof(flogi->fl_cssp));
705
706 return efc_els_send_rsp(els, sizeof(*flogi));
707 }
708
709 int
efc_send_prli_acc(struct efc_node * node,u32 ox_id)710 efc_send_prli_acc(struct efc_node *node, u32 ox_id)
711 {
712 struct efc *efc = node->efc;
713 struct efc_els_io_req *els = NULL;
714 struct {
715 struct fc_els_prli prli;
716 struct fc_els_spp spp;
717 } *pp;
718
719 node_els_trace();
720
721 els = efc_els_io_alloc(node, sizeof(*pp));
722 if (!els) {
723 efc_log_err(efc, "els IO alloc failed\n");
724 return -EIO;
725 }
726
727 els->display_name = "prli_acc";
728
729 memset(&els->io.iparam, 0, sizeof(els->io.iparam));
730 els->io.iparam.els.ox_id = ox_id;
731
732 pp = els->io.req.virt;
733 memset(pp, 0, sizeof(*pp));
734
735 pp->prli.prli_cmd = ELS_LS_ACC;
736 pp->prli.prli_spp_len = 0x10;
737 pp->prli.prli_len = cpu_to_be16(sizeof(*pp));
738 pp->spp.spp_type = FC_TYPE_FCP;
739 pp->spp.spp_type_ext = 0;
740 pp->spp.spp_flags = FC_SPP_EST_IMG_PAIR | FC_SPP_RESP_ACK;
741
742 pp->spp.spp_params = cpu_to_be32(FCP_SPPF_RD_XRDY_DIS |
743 (node->nport->enable_ini ?
744 FCP_SPPF_INIT_FCN : 0) |
745 (node->nport->enable_tgt ?
746 FCP_SPPF_TARG_FCN : 0));
747
748 return efc_els_send_rsp(els, sizeof(*pp));
749 }
750
751 int
efc_send_prlo_acc(struct efc_node * node,u32 ox_id)752 efc_send_prlo_acc(struct efc_node *node, u32 ox_id)
753 {
754 struct efc *efc = node->efc;
755 struct efc_els_io_req *els = NULL;
756 struct {
757 struct fc_els_prlo prlo;
758 struct fc_els_spp spp;
759 } *pp;
760
761 node_els_trace();
762
763 els = efc_els_io_alloc(node, sizeof(*pp));
764 if (!els) {
765 efc_log_err(efc, "els IO alloc failed\n");
766 return -EIO;
767 }
768
769 els->display_name = "prlo_acc";
770
771 memset(&els->io.iparam, 0, sizeof(els->io.iparam));
772 els->io.iparam.els.ox_id = ox_id;
773
774 pp = els->io.req.virt;
775 memset(pp, 0, sizeof(*pp));
776 pp->prlo.prlo_cmd = ELS_LS_ACC;
777 pp->prlo.prlo_obs = 0x10;
778 pp->prlo.prlo_len = cpu_to_be16(sizeof(*pp));
779
780 pp->spp.spp_type = FC_TYPE_FCP;
781 pp->spp.spp_type_ext = 0;
782 pp->spp.spp_flags = FC_SPP_RESP_ACK;
783
784 return efc_els_send_rsp(els, sizeof(*pp));
785 }
786
787 int
efc_send_ls_acc(struct efc_node * node,u32 ox_id)788 efc_send_ls_acc(struct efc_node *node, u32 ox_id)
789 {
790 struct efc *efc = node->efc;
791 struct efc_els_io_req *els = NULL;
792 struct fc_els_ls_acc *acc;
793
794 node_els_trace();
795
796 els = efc_els_io_alloc(node, sizeof(*acc));
797 if (!els) {
798 efc_log_err(efc, "els IO alloc failed\n");
799 return -EIO;
800 }
801
802 els->display_name = "ls_acc";
803
804 memset(&els->io.iparam, 0, sizeof(els->io.iparam));
805 els->io.iparam.els.ox_id = ox_id;
806
807 acc = els->io.req.virt;
808 memset(acc, 0, sizeof(*acc));
809
810 acc->la_cmd = ELS_LS_ACC;
811
812 return efc_els_send_rsp(els, sizeof(*acc));
813 }
814
815 int
efc_send_logo_acc(struct efc_node * node,u32 ox_id)816 efc_send_logo_acc(struct efc_node *node, u32 ox_id)
817 {
818 struct efc_els_io_req *els = NULL;
819 struct efc *efc = node->efc;
820 struct fc_els_ls_acc *logo;
821
822 node_els_trace();
823
824 els = efc_els_io_alloc(node, sizeof(*logo));
825 if (!els) {
826 efc_log_err(efc, "els IO alloc failed\n");
827 return -EIO;
828 }
829
830 els->display_name = "logo_acc";
831
832 memset(&els->io.iparam, 0, sizeof(els->io.iparam));
833 els->io.iparam.els.ox_id = ox_id;
834
835 logo = els->io.req.virt;
836 memset(logo, 0, sizeof(*logo));
837
838 logo->la_cmd = ELS_LS_ACC;
839
840 return efc_els_send_rsp(els, sizeof(*logo));
841 }
842
843 int
efc_send_adisc_acc(struct efc_node * node,u32 ox_id)844 efc_send_adisc_acc(struct efc_node *node, u32 ox_id)
845 {
846 struct efc *efc = node->efc;
847 struct efc_els_io_req *els = NULL;
848 struct fc_els_adisc *adisc;
849 struct fc_els_flogi *sparams;
850
851 node_els_trace();
852
853 els = efc_els_io_alloc(node, sizeof(*adisc));
854 if (!els) {
855 efc_log_err(efc, "els IO alloc failed\n");
856 return -EIO;
857 }
858
859 els->display_name = "adisc_acc";
860
861 /* Go ahead and send the ELS_ACC */
862 memset(&els->io.iparam, 0, sizeof(els->io.iparam));
863 els->io.iparam.els.ox_id = ox_id;
864
865 sparams = (struct fc_els_flogi *)node->nport->service_params;
866 adisc = els->io.req.virt;
867 memset(adisc, 0, sizeof(*adisc));
868 adisc->adisc_cmd = ELS_LS_ACC;
869 adisc->adisc_wwpn = sparams->fl_wwpn;
870 adisc->adisc_wwnn = sparams->fl_wwnn;
871 hton24(adisc->adisc_port_id, node->rnode.nport->fc_id);
872
873 return efc_els_send_rsp(els, sizeof(*adisc));
874 }
875
876 static inline void
fcct_build_req_header(struct fc_ct_hdr * hdr,u16 cmd,u16 max_size)877 fcct_build_req_header(struct fc_ct_hdr *hdr, u16 cmd, u16 max_size)
878 {
879 hdr->ct_rev = FC_CT_REV;
880 hdr->ct_fs_type = FC_FST_DIR;
881 hdr->ct_fs_subtype = FC_NS_SUBTYPE;
882 hdr->ct_options = 0;
883 hdr->ct_cmd = cpu_to_be16(cmd);
884 /* words */
885 hdr->ct_mr_size = cpu_to_be16(max_size / (sizeof(u32)));
886 hdr->ct_reason = 0;
887 hdr->ct_explan = 0;
888 hdr->ct_vendor = 0;
889 }
890
891 int
efc_ns_send_rftid(struct efc_node * node)892 efc_ns_send_rftid(struct efc_node *node)
893 {
894 struct efc *efc = node->efc;
895 struct efc_els_io_req *els;
896 struct {
897 struct fc_ct_hdr hdr;
898 struct fc_ns_rft_id rftid;
899 } *ct;
900
901 node_els_trace();
902
903 els = efc_els_io_alloc(node, sizeof(*ct));
904 if (!els) {
905 efc_log_err(efc, "IO alloc failed\n");
906 return -EIO;
907 }
908
909 els->io.iparam.ct.r_ctl = FC_RCTL_ELS_REQ;
910 els->io.iparam.ct.type = FC_TYPE_CT;
911 els->io.iparam.ct.df_ctl = 0;
912 els->io.iparam.ct.timeout = EFC_FC_ELS_SEND_DEFAULT_TIMEOUT;
913
914 els->display_name = "rftid";
915
916 ct = els->io.req.virt;
917 memset(ct, 0, sizeof(*ct));
918 fcct_build_req_header(&ct->hdr, FC_NS_RFT_ID,
919 sizeof(struct fc_ns_rft_id));
920
921 hton24(ct->rftid.fr_fid.fp_fid, node->rnode.nport->fc_id);
922 ct->rftid.fr_fts.ff_type_map[FC_TYPE_FCP / FC_NS_BPW] =
923 cpu_to_be32(1 << (FC_TYPE_FCP % FC_NS_BPW));
924
925 return efc_els_send_req(node, els, EFC_DISC_IO_CT_REQ);
926 }
927
928 int
efc_ns_send_rffid(struct efc_node * node)929 efc_ns_send_rffid(struct efc_node *node)
930 {
931 struct efc *efc = node->efc;
932 struct efc_els_io_req *els;
933 struct {
934 struct fc_ct_hdr hdr;
935 struct fc_ns_rff_id rffid;
936 } *ct;
937
938 node_els_trace();
939
940 els = efc_els_io_alloc(node, sizeof(*ct));
941 if (!els) {
942 efc_log_err(efc, "IO alloc failed\n");
943 return -EIO;
944 }
945
946 els->io.iparam.ct.r_ctl = FC_RCTL_ELS_REQ;
947 els->io.iparam.ct.type = FC_TYPE_CT;
948 els->io.iparam.ct.df_ctl = 0;
949 els->io.iparam.ct.timeout = EFC_FC_ELS_SEND_DEFAULT_TIMEOUT;
950
951 els->display_name = "rffid";
952 ct = els->io.req.virt;
953
954 memset(ct, 0, sizeof(*ct));
955 fcct_build_req_header(&ct->hdr, FC_NS_RFF_ID,
956 sizeof(struct fc_ns_rff_id));
957
958 hton24(ct->rffid.fr_fid.fp_fid, node->rnode.nport->fc_id);
959 if (node->nport->enable_ini)
960 ct->rffid.fr_feat |= FCP_FEAT_INIT;
961 if (node->nport->enable_tgt)
962 ct->rffid.fr_feat |= FCP_FEAT_TARG;
963 ct->rffid.fr_type = FC_TYPE_FCP;
964
965 return efc_els_send_req(node, els, EFC_DISC_IO_CT_REQ);
966 }
967
968 int
efc_ns_send_gidpt(struct efc_node * node)969 efc_ns_send_gidpt(struct efc_node *node)
970 {
971 struct efc_els_io_req *els = NULL;
972 struct efc *efc = node->efc;
973 struct {
974 struct fc_ct_hdr hdr;
975 struct fc_ns_gid_pt gidpt;
976 } *ct;
977
978 node_els_trace();
979
980 els = efc_els_io_alloc_size(node, sizeof(*ct), EFC_ELS_GID_PT_RSP_LEN);
981 if (!els) {
982 efc_log_err(efc, "IO alloc failed\n");
983 return -EIO;
984 }
985
986 els->io.iparam.ct.r_ctl = FC_RCTL_ELS_REQ;
987 els->io.iparam.ct.type = FC_TYPE_CT;
988 els->io.iparam.ct.df_ctl = 0;
989 els->io.iparam.ct.timeout = EFC_FC_ELS_SEND_DEFAULT_TIMEOUT;
990
991 els->display_name = "gidpt";
992
993 ct = els->io.req.virt;
994
995 memset(ct, 0, sizeof(*ct));
996 fcct_build_req_header(&ct->hdr, FC_NS_GID_PT,
997 sizeof(struct fc_ns_gid_pt));
998
999 ct->gidpt.fn_pt_type = FC_TYPE_FCP;
1000
1001 return efc_els_send_req(node, els, EFC_DISC_IO_CT_REQ);
1002 }
1003
1004 void
efc_els_io_cleanup(struct efc_els_io_req * els,int evt,void * arg)1005 efc_els_io_cleanup(struct efc_els_io_req *els, int evt, void *arg)
1006 {
1007 /* don't want further events that could come; e.g. abort requests
1008 * from the node state machine; thus, disable state machine
1009 */
1010 els->els_req_free = true;
1011 efc_node_post_els_resp(els->node, evt, arg);
1012
1013 efc_els_io_free(els);
1014 }
1015
1016 static int
efc_ct_acc_cb(void * arg,u32 length,int status,u32 ext_status)1017 efc_ct_acc_cb(void *arg, u32 length, int status, u32 ext_status)
1018 {
1019 struct efc_els_io_req *els = arg;
1020
1021 efc_els_io_free(els);
1022
1023 return 0;
1024 }
1025
1026 int
efc_send_ct_rsp(struct efc * efc,struct efc_node * node,u16 ox_id,struct fc_ct_hdr * ct_hdr,u32 cmd_rsp_code,u32 reason_code,u32 reason_code_explanation)1027 efc_send_ct_rsp(struct efc *efc, struct efc_node *node, u16 ox_id,
1028 struct fc_ct_hdr *ct_hdr, u32 cmd_rsp_code,
1029 u32 reason_code, u32 reason_code_explanation)
1030 {
1031 struct efc_els_io_req *els = NULL;
1032 struct fc_ct_hdr *rsp = NULL;
1033
1034 els = efc_els_io_alloc(node, 256);
1035 if (!els) {
1036 efc_log_err(efc, "IO alloc failed\n");
1037 return -EIO;
1038 }
1039
1040 rsp = els->io.rsp.virt;
1041
1042 *rsp = *ct_hdr;
1043
1044 fcct_build_req_header(rsp, cmd_rsp_code, 0);
1045 rsp->ct_reason = reason_code;
1046 rsp->ct_explan = reason_code_explanation;
1047
1048 els->display_name = "ct_rsp";
1049 els->cb = efc_ct_acc_cb;
1050
1051 /* Prepare the IO request details */
1052 els->io.io_type = EFC_DISC_IO_CT_RESP;
1053 els->io.xmit_len = sizeof(*rsp);
1054
1055 els->io.rpi = node->rnode.indicator;
1056 els->io.d_id = node->rnode.fc_id;
1057
1058 memset(&els->io.iparam, 0, sizeof(els->io.iparam));
1059
1060 els->io.iparam.ct.ox_id = ox_id;
1061 els->io.iparam.ct.r_ctl = 3;
1062 els->io.iparam.ct.type = FC_TYPE_CT;
1063 els->io.iparam.ct.df_ctl = 0;
1064 els->io.iparam.ct.timeout = 5;
1065
1066 if (efc->tt.send_els(efc, &els->io)) {
1067 efc_els_io_free(els);
1068 return -EIO;
1069 }
1070 return 0;
1071 }
1072
1073 int
efc_send_bls_acc(struct efc_node * node,struct fc_frame_header * hdr)1074 efc_send_bls_acc(struct efc_node *node, struct fc_frame_header *hdr)
1075 {
1076 struct sli_bls_params bls;
1077 struct fc_ba_acc *acc;
1078 struct efc *efc = node->efc;
1079
1080 memset(&bls, 0, sizeof(bls));
1081 bls.ox_id = be16_to_cpu(hdr->fh_ox_id);
1082 bls.rx_id = be16_to_cpu(hdr->fh_rx_id);
1083 bls.s_id = ntoh24(hdr->fh_d_id);
1084 bls.d_id = node->rnode.fc_id;
1085 bls.rpi = node->rnode.indicator;
1086 bls.vpi = node->nport->indicator;
1087
1088 acc = (void *)bls.payload;
1089 acc->ba_ox_id = cpu_to_be16(bls.ox_id);
1090 acc->ba_rx_id = cpu_to_be16(bls.rx_id);
1091 acc->ba_high_seq_cnt = cpu_to_be16(U16_MAX);
1092
1093 return efc->tt.send_bls(efc, FC_RCTL_BA_ACC, &bls);
1094 }
1095