1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
4 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
5 */
6
7 #include "efclib.h"
8 #include "../libefc_sli/sli4.h"
9 #include "efc_cmds.h"
10 #include "efc_sm.h"
11
12 static void
efc_nport_free_resources(struct efc_nport * nport,int evt,void * data)13 efc_nport_free_resources(struct efc_nport *nport, int evt, void *data)
14 {
15 struct efc *efc = nport->efc;
16
17 /* Clear the nport attached flag */
18 nport->attached = false;
19
20 /* Free the service parameters buffer */
21 if (nport->dma.virt) {
22 dma_free_coherent(&efc->pci->dev, nport->dma.size,
23 nport->dma.virt, nport->dma.phys);
24 memset(&nport->dma, 0, sizeof(struct efc_dma));
25 }
26
27 /* Free the SLI resources */
28 sli_resource_free(efc->sli, SLI4_RSRC_VPI, nport->indicator);
29
30 efc_nport_cb(efc, evt, nport);
31 }
32
33 static int
efc_nport_get_mbox_status(struct efc_nport * nport,u8 * mqe,int status)34 efc_nport_get_mbox_status(struct efc_nport *nport, u8 *mqe, int status)
35 {
36 struct efc *efc = nport->efc;
37 struct sli4_mbox_command_header *hdr =
38 (struct sli4_mbox_command_header *)mqe;
39
40 if (status || le16_to_cpu(hdr->status)) {
41 efc_log_debug(efc, "bad status vpi=%#x st=%x hdr=%x\n",
42 nport->indicator, status, le16_to_cpu(hdr->status));
43 return -EIO;
44 }
45
46 return 0;
47 }
48
49 static int
efc_nport_free_unreg_vpi_cb(struct efc * efc,int status,u8 * mqe,void * arg)50 efc_nport_free_unreg_vpi_cb(struct efc *efc, int status, u8 *mqe, void *arg)
51 {
52 struct efc_nport *nport = arg;
53 int evt = EFC_EVT_NPORT_FREE_OK;
54 int rc;
55
56 rc = efc_nport_get_mbox_status(nport, mqe, status);
57 if (rc)
58 evt = EFC_EVT_NPORT_FREE_FAIL;
59
60 efc_nport_free_resources(nport, evt, mqe);
61 return rc;
62 }
63
64 static void
efc_nport_free_unreg_vpi(struct efc_nport * nport)65 efc_nport_free_unreg_vpi(struct efc_nport *nport)
66 {
67 struct efc *efc = nport->efc;
68 int rc;
69 u8 data[SLI4_BMBX_SIZE];
70
71 rc = sli_cmd_unreg_vpi(efc->sli, data, nport->indicator,
72 SLI4_UNREG_TYPE_PORT);
73 if (rc) {
74 efc_log_err(efc, "UNREG_VPI format failure\n");
75 efc_nport_free_resources(nport, EFC_EVT_NPORT_FREE_FAIL, data);
76 return;
77 }
78
79 rc = efc->tt.issue_mbox_rqst(efc->base, data,
80 efc_nport_free_unreg_vpi_cb, nport);
81 if (rc) {
82 efc_log_err(efc, "UNREG_VPI command failure\n");
83 efc_nport_free_resources(nport, EFC_EVT_NPORT_FREE_FAIL, data);
84 }
85 }
86
87 static void
efc_nport_send_evt(struct efc_nport * nport,int evt,void * data)88 efc_nport_send_evt(struct efc_nport *nport, int evt, void *data)
89 {
90 struct efc *efc = nport->efc;
91
92 /* Now inform the registered callbacks */
93 efc_nport_cb(efc, evt, nport);
94
95 /* Set the nport attached flag */
96 if (evt == EFC_EVT_NPORT_ATTACH_OK)
97 nport->attached = true;
98
99 /* If there is a pending free request, then handle it now */
100 if (nport->free_req_pending)
101 efc_nport_free_unreg_vpi(nport);
102 }
103
104 static int
efc_nport_alloc_init_vpi_cb(struct efc * efc,int status,u8 * mqe,void * arg)105 efc_nport_alloc_init_vpi_cb(struct efc *efc, int status, u8 *mqe, void *arg)
106 {
107 struct efc_nport *nport = arg;
108
109 if (efc_nport_get_mbox_status(nport, mqe, status)) {
110 efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, mqe);
111 return -EIO;
112 }
113
114 efc_nport_send_evt(nport, EFC_EVT_NPORT_ALLOC_OK, mqe);
115 return 0;
116 }
117
118 static void
efc_nport_alloc_init_vpi(struct efc_nport * nport)119 efc_nport_alloc_init_vpi(struct efc_nport *nport)
120 {
121 struct efc *efc = nport->efc;
122 u8 data[SLI4_BMBX_SIZE];
123 int rc;
124
125 /* If there is a pending free request, then handle it now */
126 if (nport->free_req_pending) {
127 efc_nport_free_resources(nport, EFC_EVT_NPORT_FREE_OK, data);
128 return;
129 }
130
131 rc = sli_cmd_init_vpi(efc->sli, data,
132 nport->indicator, nport->domain->indicator);
133 if (rc) {
134 efc_log_err(efc, "INIT_VPI format failure\n");
135 efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, data);
136 return;
137 }
138
139 rc = efc->tt.issue_mbox_rqst(efc->base, data,
140 efc_nport_alloc_init_vpi_cb, nport);
141 if (rc) {
142 efc_log_err(efc, "INIT_VPI command failure\n");
143 efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, data);
144 }
145 }
146
147 static int
efc_nport_alloc_read_sparm64_cb(struct efc * efc,int status,u8 * mqe,void * arg)148 efc_nport_alloc_read_sparm64_cb(struct efc *efc, int status, u8 *mqe, void *arg)
149 {
150 struct efc_nport *nport = arg;
151 u8 *payload = NULL;
152
153 if (efc_nport_get_mbox_status(nport, mqe, status)) {
154 efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, mqe);
155 return -EIO;
156 }
157
158 payload = nport->dma.virt;
159
160 memcpy(&nport->sli_wwpn, payload + SLI4_READ_SPARM64_WWPN_OFFSET,
161 sizeof(nport->sli_wwpn));
162 memcpy(&nport->sli_wwnn, payload + SLI4_READ_SPARM64_WWNN_OFFSET,
163 sizeof(nport->sli_wwnn));
164
165 dma_free_coherent(&efc->pci->dev, nport->dma.size, nport->dma.virt,
166 nport->dma.phys);
167 memset(&nport->dma, 0, sizeof(struct efc_dma));
168 efc_nport_alloc_init_vpi(nport);
169 return 0;
170 }
171
172 static void
efc_nport_alloc_read_sparm64(struct efc * efc,struct efc_nport * nport)173 efc_nport_alloc_read_sparm64(struct efc *efc, struct efc_nport *nport)
174 {
175 u8 data[SLI4_BMBX_SIZE];
176 int rc;
177
178 /* Allocate memory for the service parameters */
179 nport->dma.size = EFC_SPARAM_DMA_SZ;
180 nport->dma.virt = dma_alloc_coherent(&efc->pci->dev,
181 nport->dma.size, &nport->dma.phys,
182 GFP_KERNEL);
183 if (!nport->dma.virt) {
184 efc_log_err(efc, "Failed to allocate DMA memory\n");
185 efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, data);
186 return;
187 }
188
189 rc = sli_cmd_read_sparm64(efc->sli, data,
190 &nport->dma, nport->indicator);
191 if (rc) {
192 efc_log_err(efc, "READ_SPARM64 format failure\n");
193 efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, data);
194 return;
195 }
196
197 rc = efc->tt.issue_mbox_rqst(efc->base, data,
198 efc_nport_alloc_read_sparm64_cb, nport);
199 if (rc) {
200 efc_log_err(efc, "READ_SPARM64 command failure\n");
201 efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, data);
202 }
203 }
204
205 int
efc_cmd_nport_alloc(struct efc * efc,struct efc_nport * nport,struct efc_domain * domain,u8 * wwpn)206 efc_cmd_nport_alloc(struct efc *efc, struct efc_nport *nport,
207 struct efc_domain *domain, u8 *wwpn)
208 {
209 u32 index;
210
211 nport->indicator = U32_MAX;
212 nport->free_req_pending = false;
213
214 if (wwpn)
215 memcpy(&nport->sli_wwpn, wwpn, sizeof(nport->sli_wwpn));
216
217 /*
218 * allocate a VPI object for the port and stores it in the
219 * indicator field of the port object.
220 */
221 if (sli_resource_alloc(efc->sli, SLI4_RSRC_VPI,
222 &nport->indicator, &index)) {
223 efc_log_err(efc, "VPI allocation failure\n");
224 return -EIO;
225 }
226
227 if (domain) {
228 /*
229 * If the WWPN is NULL, fetch the default
230 * WWPN and WWNN before initializing the VPI
231 */
232 if (!wwpn)
233 efc_nport_alloc_read_sparm64(efc, nport);
234 else
235 efc_nport_alloc_init_vpi(nport);
236 } else if (!wwpn) {
237 /* domain NULL and wwpn non-NULL */
238 efc_log_err(efc, "need WWN for physical port\n");
239 sli_resource_free(efc->sli, SLI4_RSRC_VPI, nport->indicator);
240 return -EIO;
241 }
242
243 return 0;
244 }
245
246 static int
efc_nport_attach_reg_vpi_cb(struct efc * efc,int status,u8 * mqe,void * arg)247 efc_nport_attach_reg_vpi_cb(struct efc *efc, int status, u8 *mqe,
248 void *arg)
249 {
250 struct efc_nport *nport = arg;
251
252 nport->attaching = false;
253 if (efc_nport_get_mbox_status(nport, mqe, status)) {
254 efc_nport_free_resources(nport, EFC_EVT_NPORT_ATTACH_FAIL, mqe);
255 return -EIO;
256 }
257
258 efc_nport_send_evt(nport, EFC_EVT_NPORT_ATTACH_OK, mqe);
259 return 0;
260 }
261
262 int
efc_cmd_nport_attach(struct efc * efc,struct efc_nport * nport,u32 fc_id)263 efc_cmd_nport_attach(struct efc *efc, struct efc_nport *nport, u32 fc_id)
264 {
265 u8 buf[SLI4_BMBX_SIZE];
266 int rc = 0;
267
268 if (!nport) {
269 efc_log_err(efc, "bad param(s) nport=%p\n", nport);
270 return -EIO;
271 }
272
273 nport->fc_id = fc_id;
274
275 /* register previously-allocated VPI with the device */
276 rc = sli_cmd_reg_vpi(efc->sli, buf, nport->fc_id,
277 nport->sli_wwpn, nport->indicator,
278 nport->domain->indicator, false);
279 if (rc) {
280 efc_log_err(efc, "REG_VPI format failure\n");
281 efc_nport_free_resources(nport, EFC_EVT_NPORT_ATTACH_FAIL, buf);
282 return rc;
283 }
284
285 rc = efc->tt.issue_mbox_rqst(efc->base, buf,
286 efc_nport_attach_reg_vpi_cb, nport);
287 if (rc) {
288 efc_log_err(efc, "REG_VPI command failure\n");
289 efc_nport_free_resources(nport, EFC_EVT_NPORT_ATTACH_FAIL, buf);
290 } else {
291 nport->attaching = true;
292 }
293
294 return rc;
295 }
296
297 int
efc_cmd_nport_free(struct efc * efc,struct efc_nport * nport)298 efc_cmd_nport_free(struct efc *efc, struct efc_nport *nport)
299 {
300 if (!nport) {
301 efc_log_err(efc, "bad parameter(s) nport=%p\n", nport);
302 return -EIO;
303 }
304
305 /* Issue the UNREG_VPI command to free the assigned VPI context */
306 if (nport->attached)
307 efc_nport_free_unreg_vpi(nport);
308 else if (nport->attaching)
309 nport->free_req_pending = true;
310 else
311 efc_sm_post_event(&nport->sm, EFC_EVT_NPORT_FREE_OK, NULL);
312
313 return 0;
314 }
315
316 static int
efc_domain_get_mbox_status(struct efc_domain * domain,u8 * mqe,int status)317 efc_domain_get_mbox_status(struct efc_domain *domain, u8 *mqe, int status)
318 {
319 struct efc *efc = domain->efc;
320 struct sli4_mbox_command_header *hdr =
321 (struct sli4_mbox_command_header *)mqe;
322
323 if (status || le16_to_cpu(hdr->status)) {
324 efc_log_debug(efc, "bad status vfi=%#x st=%x hdr=%x\n",
325 domain->indicator, status,
326 le16_to_cpu(hdr->status));
327 return -EIO;
328 }
329
330 return 0;
331 }
332
333 static void
efc_domain_free_resources(struct efc_domain * domain,int evt,void * data)334 efc_domain_free_resources(struct efc_domain *domain, int evt, void *data)
335 {
336 struct efc *efc = domain->efc;
337
338 /* Free the service parameters buffer */
339 if (domain->dma.virt) {
340 dma_free_coherent(&efc->pci->dev,
341 domain->dma.size, domain->dma.virt,
342 domain->dma.phys);
343 memset(&domain->dma, 0, sizeof(struct efc_dma));
344 }
345
346 /* Free the SLI resources */
347 sli_resource_free(efc->sli, SLI4_RSRC_VFI, domain->indicator);
348
349 efc_domain_cb(efc, evt, domain);
350 }
351
352 static void
efc_domain_send_nport_evt(struct efc_domain * domain,int port_evt,int domain_evt,void * data)353 efc_domain_send_nport_evt(struct efc_domain *domain,
354 int port_evt, int domain_evt, void *data)
355 {
356 struct efc *efc = domain->efc;
357
358 /* Send alloc/attach ok to the physical nport */
359 efc_nport_send_evt(domain->nport, port_evt, NULL);
360
361 /* Now inform the registered callbacks */
362 efc_domain_cb(efc, domain_evt, domain);
363 }
364
365 static int
efc_domain_alloc_read_sparm64_cb(struct efc * efc,int status,u8 * mqe,void * arg)366 efc_domain_alloc_read_sparm64_cb(struct efc *efc, int status, u8 *mqe,
367 void *arg)
368 {
369 struct efc_domain *domain = arg;
370
371 if (efc_domain_get_mbox_status(domain, mqe, status)) {
372 efc_domain_free_resources(domain,
373 EFC_HW_DOMAIN_ALLOC_FAIL, mqe);
374 return -EIO;
375 }
376
377 efc_domain_send_nport_evt(domain, EFC_EVT_NPORT_ALLOC_OK,
378 EFC_HW_DOMAIN_ALLOC_OK, mqe);
379 return 0;
380 }
381
382 static void
efc_domain_alloc_read_sparm64(struct efc_domain * domain)383 efc_domain_alloc_read_sparm64(struct efc_domain *domain)
384 {
385 struct efc *efc = domain->efc;
386 u8 data[SLI4_BMBX_SIZE];
387 int rc;
388
389 rc = sli_cmd_read_sparm64(efc->sli, data, &domain->dma, 0);
390 if (rc) {
391 efc_log_err(efc, "READ_SPARM64 format failure\n");
392 efc_domain_free_resources(domain,
393 EFC_HW_DOMAIN_ALLOC_FAIL, data);
394 return;
395 }
396
397 rc = efc->tt.issue_mbox_rqst(efc->base, data,
398 efc_domain_alloc_read_sparm64_cb, domain);
399 if (rc) {
400 efc_log_err(efc, "READ_SPARM64 command failure\n");
401 efc_domain_free_resources(domain,
402 EFC_HW_DOMAIN_ALLOC_FAIL, data);
403 }
404 }
405
406 static int
efc_domain_alloc_init_vfi_cb(struct efc * efc,int status,u8 * mqe,void * arg)407 efc_domain_alloc_init_vfi_cb(struct efc *efc, int status, u8 *mqe,
408 void *arg)
409 {
410 struct efc_domain *domain = arg;
411
412 if (efc_domain_get_mbox_status(domain, mqe, status)) {
413 efc_domain_free_resources(domain,
414 EFC_HW_DOMAIN_ALLOC_FAIL, mqe);
415 return -EIO;
416 }
417
418 efc_domain_alloc_read_sparm64(domain);
419 return 0;
420 }
421
422 static void
efc_domain_alloc_init_vfi(struct efc_domain * domain)423 efc_domain_alloc_init_vfi(struct efc_domain *domain)
424 {
425 struct efc *efc = domain->efc;
426 struct efc_nport *nport = domain->nport;
427 u8 data[SLI4_BMBX_SIZE];
428 int rc;
429
430 /*
431 * For FC, the HW alread registered an FCFI.
432 * Copy FCF information into the domain and jump to INIT_VFI.
433 */
434 domain->fcf_indicator = efc->fcfi;
435 rc = sli_cmd_init_vfi(efc->sli, data, domain->indicator,
436 domain->fcf_indicator, nport->indicator);
437 if (rc) {
438 efc_log_err(efc, "INIT_VFI format failure\n");
439 efc_domain_free_resources(domain,
440 EFC_HW_DOMAIN_ALLOC_FAIL, data);
441 return;
442 }
443
444 efc_log_err(efc, "%s issue mbox\n", __func__);
445 rc = efc->tt.issue_mbox_rqst(efc->base, data,
446 efc_domain_alloc_init_vfi_cb, domain);
447 if (rc) {
448 efc_log_err(efc, "INIT_VFI command failure\n");
449 efc_domain_free_resources(domain,
450 EFC_HW_DOMAIN_ALLOC_FAIL, data);
451 }
452 }
453
454 int
efc_cmd_domain_alloc(struct efc * efc,struct efc_domain * domain,u32 fcf)455 efc_cmd_domain_alloc(struct efc *efc, struct efc_domain *domain, u32 fcf)
456 {
457 u32 index;
458
459 if (!domain || !domain->nport) {
460 efc_log_err(efc, "bad parameter(s) domain=%p nport=%p\n",
461 domain, domain ? domain->nport : NULL);
462 return -EIO;
463 }
464
465 /* allocate memory for the service parameters */
466 domain->dma.size = EFC_SPARAM_DMA_SZ;
467 domain->dma.virt = dma_alloc_coherent(&efc->pci->dev,
468 domain->dma.size,
469 &domain->dma.phys, GFP_KERNEL);
470 if (!domain->dma.virt) {
471 efc_log_err(efc, "Failed to allocate DMA memory\n");
472 return -EIO;
473 }
474
475 domain->fcf = fcf;
476 domain->fcf_indicator = U32_MAX;
477 domain->indicator = U32_MAX;
478
479 if (sli_resource_alloc(efc->sli, SLI4_RSRC_VFI, &domain->indicator,
480 &index)) {
481 efc_log_err(efc, "VFI allocation failure\n");
482
483 dma_free_coherent(&efc->pci->dev,
484 domain->dma.size, domain->dma.virt,
485 domain->dma.phys);
486 memset(&domain->dma, 0, sizeof(struct efc_dma));
487
488 return -EIO;
489 }
490
491 efc_domain_alloc_init_vfi(domain);
492 return 0;
493 }
494
495 static int
efc_domain_attach_reg_vfi_cb(struct efc * efc,int status,u8 * mqe,void * arg)496 efc_domain_attach_reg_vfi_cb(struct efc *efc, int status, u8 *mqe,
497 void *arg)
498 {
499 struct efc_domain *domain = arg;
500
501 if (efc_domain_get_mbox_status(domain, mqe, status)) {
502 efc_domain_free_resources(domain,
503 EFC_HW_DOMAIN_ATTACH_FAIL, mqe);
504 return -EIO;
505 }
506
507 efc_domain_send_nport_evt(domain, EFC_EVT_NPORT_ATTACH_OK,
508 EFC_HW_DOMAIN_ATTACH_OK, mqe);
509 return 0;
510 }
511
512 int
efc_cmd_domain_attach(struct efc * efc,struct efc_domain * domain,u32 fc_id)513 efc_cmd_domain_attach(struct efc *efc, struct efc_domain *domain, u32 fc_id)
514 {
515 u8 buf[SLI4_BMBX_SIZE];
516 int rc = 0;
517
518 if (!domain) {
519 efc_log_err(efc, "bad param(s) domain=%p\n", domain);
520 return -EIO;
521 }
522
523 domain->nport->fc_id = fc_id;
524
525 rc = sli_cmd_reg_vfi(efc->sli, buf, SLI4_BMBX_SIZE, domain->indicator,
526 domain->fcf_indicator, domain->dma,
527 domain->nport->indicator, domain->nport->sli_wwpn,
528 domain->nport->fc_id);
529 if (rc) {
530 efc_log_err(efc, "REG_VFI format failure\n");
531 goto cleanup;
532 }
533
534 rc = efc->tt.issue_mbox_rqst(efc->base, buf,
535 efc_domain_attach_reg_vfi_cb, domain);
536 if (rc) {
537 efc_log_err(efc, "REG_VFI command failure\n");
538 goto cleanup;
539 }
540
541 return rc;
542
543 cleanup:
544 efc_domain_free_resources(domain, EFC_HW_DOMAIN_ATTACH_FAIL, buf);
545
546 return rc;
547 }
548
549 static int
efc_domain_free_unreg_vfi_cb(struct efc * efc,int status,u8 * mqe,void * arg)550 efc_domain_free_unreg_vfi_cb(struct efc *efc, int status, u8 *mqe, void *arg)
551 {
552 struct efc_domain *domain = arg;
553 int evt = EFC_HW_DOMAIN_FREE_OK;
554 int rc;
555
556 rc = efc_domain_get_mbox_status(domain, mqe, status);
557 if (rc) {
558 evt = EFC_HW_DOMAIN_FREE_FAIL;
559 rc = -EIO;
560 }
561
562 efc_domain_free_resources(domain, evt, mqe);
563 return rc;
564 }
565
566 static void
efc_domain_free_unreg_vfi(struct efc_domain * domain)567 efc_domain_free_unreg_vfi(struct efc_domain *domain)
568 {
569 struct efc *efc = domain->efc;
570 int rc;
571 u8 data[SLI4_BMBX_SIZE];
572
573 rc = sli_cmd_unreg_vfi(efc->sli, data, domain->indicator,
574 SLI4_UNREG_TYPE_DOMAIN);
575 if (rc) {
576 efc_log_err(efc, "UNREG_VFI format failure\n");
577 goto cleanup;
578 }
579
580 rc = efc->tt.issue_mbox_rqst(efc->base, data,
581 efc_domain_free_unreg_vfi_cb, domain);
582 if (rc) {
583 efc_log_err(efc, "UNREG_VFI command failure\n");
584 goto cleanup;
585 }
586
587 return;
588
589 cleanup:
590 efc_domain_free_resources(domain, EFC_HW_DOMAIN_FREE_FAIL, data);
591 }
592
593 int
efc_cmd_domain_free(struct efc * efc,struct efc_domain * domain)594 efc_cmd_domain_free(struct efc *efc, struct efc_domain *domain)
595 {
596 if (!domain) {
597 efc_log_err(efc, "bad parameter(s) domain=%p\n", domain);
598 return -EIO;
599 }
600
601 efc_domain_free_unreg_vfi(domain);
602 return 0;
603 }
604
605 int
efc_cmd_node_alloc(struct efc * efc,struct efc_remote_node * rnode,u32 fc_addr,struct efc_nport * nport)606 efc_cmd_node_alloc(struct efc *efc, struct efc_remote_node *rnode, u32 fc_addr,
607 struct efc_nport *nport)
608 {
609 /* Check for invalid indicator */
610 if (rnode->indicator != U32_MAX) {
611 efc_log_err(efc,
612 "RPI allocation failure addr=%#x rpi=%#x\n",
613 fc_addr, rnode->indicator);
614 return -EIO;
615 }
616
617 /* NULL SLI port indicates an unallocated remote node */
618 rnode->nport = NULL;
619
620 if (sli_resource_alloc(efc->sli, SLI4_RSRC_RPI,
621 &rnode->indicator, &rnode->index)) {
622 efc_log_err(efc, "RPI allocation failure addr=%#x\n",
623 fc_addr);
624 return -EIO;
625 }
626
627 rnode->fc_id = fc_addr;
628 rnode->nport = nport;
629
630 return 0;
631 }
632
633 static int
efc_cmd_node_attach_cb(struct efc * efc,int status,u8 * mqe,void * arg)634 efc_cmd_node_attach_cb(struct efc *efc, int status, u8 *mqe, void *arg)
635 {
636 struct efc_remote_node *rnode = arg;
637 struct sli4_mbox_command_header *hdr =
638 (struct sli4_mbox_command_header *)mqe;
639 int evt = 0;
640
641 if (status || le16_to_cpu(hdr->status)) {
642 efc_log_debug(efc, "bad status cqe=%#x mqe=%#x\n", status,
643 le16_to_cpu(hdr->status));
644 rnode->attached = false;
645 evt = EFC_EVT_NODE_ATTACH_FAIL;
646 } else {
647 rnode->attached = true;
648 evt = EFC_EVT_NODE_ATTACH_OK;
649 }
650
651 efc_remote_node_cb(efc, evt, rnode);
652
653 return 0;
654 }
655
656 int
efc_cmd_node_attach(struct efc * efc,struct efc_remote_node * rnode,struct efc_dma * sparms)657 efc_cmd_node_attach(struct efc *efc, struct efc_remote_node *rnode,
658 struct efc_dma *sparms)
659 {
660 int rc = -EIO;
661 u8 buf[SLI4_BMBX_SIZE];
662
663 if (!rnode || !sparms) {
664 efc_log_err(efc, "bad parameter(s) rnode=%p sparms=%p\n",
665 rnode, sparms);
666 return -EIO;
667 }
668
669 /*
670 * If the attach count is non-zero, this RPI has already been reg'd.
671 * Otherwise, register the RPI
672 */
673 if (rnode->index == U32_MAX) {
674 efc_log_err(efc, "bad parameter rnode->index invalid\n");
675 return -EIO;
676 }
677
678 /* Update a remote node object with the remote port's service params */
679 if (!sli_cmd_reg_rpi(efc->sli, buf, rnode->indicator,
680 rnode->nport->indicator, rnode->fc_id, sparms, 0, 0))
681 rc = efc->tt.issue_mbox_rqst(efc->base, buf,
682 efc_cmd_node_attach_cb, rnode);
683
684 return rc;
685 }
686
687 int
efc_node_free_resources(struct efc * efc,struct efc_remote_node * rnode)688 efc_node_free_resources(struct efc *efc, struct efc_remote_node *rnode)
689 {
690 int rc = 0;
691
692 if (!rnode) {
693 efc_log_err(efc, "bad parameter rnode=%p\n", rnode);
694 return -EIO;
695 }
696
697 if (rnode->nport) {
698 if (rnode->attached) {
699 efc_log_err(efc, "rnode is still attached\n");
700 return -EIO;
701 }
702 if (rnode->indicator != U32_MAX) {
703 if (sli_resource_free(efc->sli, SLI4_RSRC_RPI,
704 rnode->indicator)) {
705 efc_log_err(efc,
706 "RPI free fail RPI %d addr=%#x\n",
707 rnode->indicator, rnode->fc_id);
708 rc = -EIO;
709 } else {
710 rnode->indicator = U32_MAX;
711 rnode->index = U32_MAX;
712 }
713 }
714 }
715
716 return rc;
717 }
718
719 static int
efc_cmd_node_free_cb(struct efc * efc,int status,u8 * mqe,void * arg)720 efc_cmd_node_free_cb(struct efc *efc, int status, u8 *mqe, void *arg)
721 {
722 struct efc_remote_node *rnode = arg;
723 struct sli4_mbox_command_header *hdr =
724 (struct sli4_mbox_command_header *)mqe;
725 int evt = EFC_EVT_NODE_FREE_FAIL;
726 int rc = 0;
727
728 if (status || le16_to_cpu(hdr->status)) {
729 efc_log_debug(efc, "bad status cqe=%#x mqe=%#x\n", status,
730 le16_to_cpu(hdr->status));
731
732 /*
733 * In certain cases, a non-zero MQE status is OK (all must be
734 * true):
735 * - node is attached
736 * - status is 0x1400
737 */
738 if (!rnode->attached ||
739 (le16_to_cpu(hdr->status) != SLI4_MBX_STATUS_RPI_NOT_REG))
740 rc = -EIO;
741 }
742
743 if (!rc) {
744 rnode->attached = false;
745 evt = EFC_EVT_NODE_FREE_OK;
746 }
747
748 efc_remote_node_cb(efc, evt, rnode);
749
750 return rc;
751 }
752
753 int
efc_cmd_node_detach(struct efc * efc,struct efc_remote_node * rnode)754 efc_cmd_node_detach(struct efc *efc, struct efc_remote_node *rnode)
755 {
756 u8 buf[SLI4_BMBX_SIZE];
757 int rc = -EIO;
758
759 if (!rnode) {
760 efc_log_err(efc, "bad parameter rnode=%p\n", rnode);
761 return -EIO;
762 }
763
764 if (rnode->nport) {
765 if (!rnode->attached)
766 return -EIO;
767
768 rc = -EIO;
769
770 if (!sli_cmd_unreg_rpi(efc->sli, buf, rnode->indicator,
771 SLI4_RSRC_RPI, U32_MAX))
772 rc = efc->tt.issue_mbox_rqst(efc->base, buf,
773 efc_cmd_node_free_cb, rnode);
774
775 if (rc != 0) {
776 efc_log_err(efc, "UNREG_RPI failed\n");
777 rc = -EIO;
778 }
779 }
780
781 return rc;
782 }
783