1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
4 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
5 */
6
7 /*
8 * domain_sm Domain State Machine: States
9 */
10
11 #include "efc.h"
12
13 int
efc_domain_cb(void * arg,int event,void * data)14 efc_domain_cb(void *arg, int event, void *data)
15 {
16 struct efc *efc = arg;
17 struct efc_domain *domain = NULL;
18 int rc = 0;
19 unsigned long flags = 0;
20
21 if (event != EFC_HW_DOMAIN_FOUND)
22 domain = data;
23
24 /* Accept domain callback events from the user driver */
25 spin_lock_irqsave(&efc->lock, flags);
26 switch (event) {
27 case EFC_HW_DOMAIN_FOUND: {
28 u64 fcf_wwn = 0;
29 struct efc_domain_record *drec = data;
30
31 /* extract the fcf_wwn */
32 fcf_wwn = be64_to_cpu(*((__be64 *)drec->wwn));
33
34 efc_log_debug(efc, "Domain found: wwn %016llX\n", fcf_wwn);
35
36 /* lookup domain, or allocate a new one */
37 domain = efc->domain;
38 if (!domain) {
39 domain = efc_domain_alloc(efc, fcf_wwn);
40 if (!domain) {
41 efc_log_err(efc, "efc_domain_alloc() failed\n");
42 rc = -1;
43 break;
44 }
45 efc_sm_transition(&domain->drvsm, __efc_domain_init,
46 NULL);
47 }
48 efc_domain_post_event(domain, EFC_EVT_DOMAIN_FOUND, drec);
49 break;
50 }
51
52 case EFC_HW_DOMAIN_LOST:
53 domain_trace(domain, "EFC_HW_DOMAIN_LOST:\n");
54 efc->hold_frames = true;
55 efc_domain_post_event(domain, EFC_EVT_DOMAIN_LOST, NULL);
56 break;
57
58 case EFC_HW_DOMAIN_ALLOC_OK:
59 domain_trace(domain, "EFC_HW_DOMAIN_ALLOC_OK:\n");
60 efc_domain_post_event(domain, EFC_EVT_DOMAIN_ALLOC_OK, NULL);
61 break;
62
63 case EFC_HW_DOMAIN_ALLOC_FAIL:
64 domain_trace(domain, "EFC_HW_DOMAIN_ALLOC_FAIL:\n");
65 efc_domain_post_event(domain, EFC_EVT_DOMAIN_ALLOC_FAIL,
66 NULL);
67 break;
68
69 case EFC_HW_DOMAIN_ATTACH_OK:
70 domain_trace(domain, "EFC_HW_DOMAIN_ATTACH_OK:\n");
71 efc_domain_post_event(domain, EFC_EVT_DOMAIN_ATTACH_OK, NULL);
72 break;
73
74 case EFC_HW_DOMAIN_ATTACH_FAIL:
75 domain_trace(domain, "EFC_HW_DOMAIN_ATTACH_FAIL:\n");
76 efc_domain_post_event(domain,
77 EFC_EVT_DOMAIN_ATTACH_FAIL, NULL);
78 break;
79
80 case EFC_HW_DOMAIN_FREE_OK:
81 domain_trace(domain, "EFC_HW_DOMAIN_FREE_OK:\n");
82 efc_domain_post_event(domain, EFC_EVT_DOMAIN_FREE_OK, NULL);
83 break;
84
85 case EFC_HW_DOMAIN_FREE_FAIL:
86 domain_trace(domain, "EFC_HW_DOMAIN_FREE_FAIL:\n");
87 efc_domain_post_event(domain, EFC_EVT_DOMAIN_FREE_FAIL, NULL);
88 break;
89
90 default:
91 efc_log_warn(efc, "unsupported event %#x\n", event);
92 }
93 spin_unlock_irqrestore(&efc->lock, flags);
94
95 if (efc->domain && domain->req_accept_frames) {
96 domain->req_accept_frames = false;
97 efc->hold_frames = false;
98 }
99
100 return rc;
101 }
102
103 static void
_efc_domain_free(struct kref * arg)104 _efc_domain_free(struct kref *arg)
105 {
106 struct efc_domain *domain = container_of(arg, struct efc_domain, ref);
107 struct efc *efc = domain->efc;
108
109 if (efc->domain_free_cb)
110 (*efc->domain_free_cb)(efc, efc->domain_free_cb_arg);
111
112 kfree(domain);
113 }
114
115 void
efc_domain_free(struct efc_domain * domain)116 efc_domain_free(struct efc_domain *domain)
117 {
118 struct efc *efc;
119
120 efc = domain->efc;
121
122 /* Hold frames to clear the domain pointer from the xport lookup */
123 efc->hold_frames = false;
124
125 efc_log_debug(efc, "Domain free: wwn %016llX\n", domain->fcf_wwn);
126
127 xa_destroy(&domain->lookup);
128 efc->domain = NULL;
129 kref_put(&domain->ref, domain->release);
130 }
131
132 struct efc_domain *
efc_domain_alloc(struct efc * efc,uint64_t fcf_wwn)133 efc_domain_alloc(struct efc *efc, uint64_t fcf_wwn)
134 {
135 struct efc_domain *domain;
136
137 domain = kzalloc(sizeof(*domain), GFP_ATOMIC);
138 if (!domain)
139 return NULL;
140
141 domain->efc = efc;
142 domain->drvsm.app = domain;
143
144 /* initialize refcount */
145 kref_init(&domain->ref);
146 domain->release = _efc_domain_free;
147
148 xa_init(&domain->lookup);
149
150 INIT_LIST_HEAD(&domain->nport_list);
151 efc->domain = domain;
152 domain->fcf_wwn = fcf_wwn;
153 efc_log_debug(efc, "Domain allocated: wwn %016llX\n", domain->fcf_wwn);
154
155 return domain;
156 }
157
158 void
efc_register_domain_free_cb(struct efc * efc,void (* callback)(struct efc * efc,void * arg),void * arg)159 efc_register_domain_free_cb(struct efc *efc,
160 void (*callback)(struct efc *efc, void *arg),
161 void *arg)
162 {
163 /* Register a callback to be called when the domain is freed */
164 efc->domain_free_cb = callback;
165 efc->domain_free_cb_arg = arg;
166 if (!efc->domain && callback)
167 (*callback)(efc, arg);
168 }
169
170 static void
__efc_domain_common(const char * funcname,struct efc_sm_ctx * ctx,enum efc_sm_event evt,void * arg)171 __efc_domain_common(const char *funcname, struct efc_sm_ctx *ctx,
172 enum efc_sm_event evt, void *arg)
173 {
174 struct efc_domain *domain = ctx->app;
175
176 switch (evt) {
177 case EFC_EVT_ENTER:
178 case EFC_EVT_REENTER:
179 case EFC_EVT_EXIT:
180 case EFC_EVT_ALL_CHILD_NODES_FREE:
181 /*
182 * this can arise if an FLOGI fails on the NPORT,
183 * and the NPORT is shutdown
184 */
185 break;
186 default:
187 efc_log_warn(domain->efc, "%-20s %-20s not handled\n",
188 funcname, efc_sm_event_name(evt));
189 }
190 }
191
192 static void
__efc_domain_common_shutdown(const char * funcname,struct efc_sm_ctx * ctx,enum efc_sm_event evt,void * arg)193 __efc_domain_common_shutdown(const char *funcname, struct efc_sm_ctx *ctx,
194 enum efc_sm_event evt, void *arg)
195 {
196 struct efc_domain *domain = ctx->app;
197
198 switch (evt) {
199 case EFC_EVT_ENTER:
200 case EFC_EVT_REENTER:
201 case EFC_EVT_EXIT:
202 break;
203 case EFC_EVT_DOMAIN_FOUND:
204 /* save drec, mark domain_found_pending */
205 memcpy(&domain->pending_drec, arg,
206 sizeof(domain->pending_drec));
207 domain->domain_found_pending = true;
208 break;
209 case EFC_EVT_DOMAIN_LOST:
210 /* unmark domain_found_pending */
211 domain->domain_found_pending = false;
212 break;
213
214 default:
215 efc_log_warn(domain->efc, "%-20s %-20s not handled\n",
216 funcname, efc_sm_event_name(evt));
217 }
218 }
219
220 #define std_domain_state_decl(...)\
221 struct efc_domain *domain = NULL;\
222 struct efc *efc = NULL;\
223 \
224 WARN_ON(!ctx || !ctx->app);\
225 domain = ctx->app;\
226 WARN_ON(!domain->efc);\
227 efc = domain->efc
228
229 void
__efc_domain_init(struct efc_sm_ctx * ctx,enum efc_sm_event evt,void * arg)230 __efc_domain_init(struct efc_sm_ctx *ctx, enum efc_sm_event evt,
231 void *arg)
232 {
233 std_domain_state_decl();
234
235 domain_sm_trace(domain);
236
237 switch (evt) {
238 case EFC_EVT_ENTER:
239 domain->attached = false;
240 break;
241
242 case EFC_EVT_DOMAIN_FOUND: {
243 u32 i;
244 struct efc_domain_record *drec = arg;
245 struct efc_nport *nport;
246
247 u64 my_wwnn = efc->req_wwnn;
248 u64 my_wwpn = efc->req_wwpn;
249 __be64 bewwpn;
250
251 if (my_wwpn == 0 || my_wwnn == 0) {
252 efc_log_debug(efc, "using default hardware WWN config\n");
253 my_wwpn = efc->def_wwpn;
254 my_wwnn = efc->def_wwnn;
255 }
256
257 efc_log_debug(efc, "Create nport WWPN %016llX WWNN %016llX\n",
258 my_wwpn, my_wwnn);
259
260 /* Allocate a nport and transition to __efc_nport_allocated */
261 nport = efc_nport_alloc(domain, my_wwpn, my_wwnn, U32_MAX,
262 efc->enable_ini, efc->enable_tgt);
263
264 if (!nport) {
265 efc_log_err(efc, "efc_nport_alloc() failed\n");
266 break;
267 }
268 efc_sm_transition(&nport->sm, __efc_nport_allocated, NULL);
269
270 bewwpn = cpu_to_be64(nport->wwpn);
271
272 /* allocate struct efc_nport object for local port
273 * Note: drec->fc_id is ALPA from read_topology only if loop
274 */
275 if (efc_cmd_nport_alloc(efc, nport, NULL, (uint8_t *)&bewwpn)) {
276 efc_log_err(efc, "Can't allocate port\n");
277 efc_nport_free(nport);
278 break;
279 }
280
281 domain->is_loop = drec->is_loop;
282
283 /*
284 * If the loop position map includes ALPA == 0,
285 * then we are in a public loop (NL_PORT)
286 * Note that the first element of the loopmap[]
287 * contains the count of elements, and if
288 * ALPA == 0 is present, it will occupy the first
289 * location after the count.
290 */
291 domain->is_nlport = drec->map.loop[1] == 0x00;
292
293 if (!domain->is_loop) {
294 /* Initiate HW domain alloc */
295 if (efc_cmd_domain_alloc(efc, domain, drec->index)) {
296 efc_log_err(efc,
297 "Failed to initiate HW domain allocation\n");
298 break;
299 }
300 efc_sm_transition(ctx, __efc_domain_wait_alloc, arg);
301 break;
302 }
303
304 efc_log_debug(efc, "%s fc_id=%#x speed=%d\n",
305 drec->is_loop ?
306 (domain->is_nlport ?
307 "public-loop" : "loop") : "other",
308 drec->fc_id, drec->speed);
309
310 nport->fc_id = drec->fc_id;
311 nport->topology = EFC_NPORT_TOPO_FC_AL;
312 snprintf(nport->display_name, sizeof(nport->display_name),
313 "s%06x", drec->fc_id);
314
315 if (efc->enable_ini) {
316 u32 count = drec->map.loop[0];
317
318 efc_log_debug(efc, "%d position map entries\n",
319 count);
320 for (i = 1; i <= count; i++) {
321 if (drec->map.loop[i] != drec->fc_id) {
322 struct efc_node *node;
323
324 efc_log_debug(efc, "%#x -> %#x\n",
325 drec->fc_id,
326 drec->map.loop[i]);
327 node = efc_node_alloc(nport,
328 drec->map.loop[i],
329 false, true);
330 if (!node) {
331 efc_log_err(efc,
332 "efc_node_alloc() failed\n");
333 break;
334 }
335 efc_node_transition(node,
336 __efc_d_wait_loop,
337 NULL);
338 }
339 }
340 }
341
342 /* Initiate HW domain alloc */
343 if (efc_cmd_domain_alloc(efc, domain, drec->index)) {
344 efc_log_err(efc,
345 "Failed to initiate HW domain allocation\n");
346 break;
347 }
348 efc_sm_transition(ctx, __efc_domain_wait_alloc, arg);
349 break;
350 }
351 default:
352 __efc_domain_common(__func__, ctx, evt, arg);
353 }
354 }
355
356 void
__efc_domain_wait_alloc(struct efc_sm_ctx * ctx,enum efc_sm_event evt,void * arg)357 __efc_domain_wait_alloc(struct efc_sm_ctx *ctx,
358 enum efc_sm_event evt, void *arg)
359 {
360 std_domain_state_decl();
361
362 domain_sm_trace(domain);
363
364 switch (evt) {
365 case EFC_EVT_DOMAIN_ALLOC_OK: {
366 struct fc_els_flogi *sp;
367 struct efc_nport *nport;
368
369 nport = domain->nport;
370 if (WARN_ON(!nport))
371 return;
372
373 sp = (struct fc_els_flogi *)nport->service_params;
374
375 /* Save the domain service parameters */
376 memcpy(domain->service_params + 4, domain->dma.virt,
377 sizeof(struct fc_els_flogi) - 4);
378 memcpy(nport->service_params + 4, domain->dma.virt,
379 sizeof(struct fc_els_flogi) - 4);
380
381 /*
382 * Update the nport's service parameters,
383 * user might have specified non-default names
384 */
385 sp->fl_wwpn = cpu_to_be64(nport->wwpn);
386 sp->fl_wwnn = cpu_to_be64(nport->wwnn);
387
388 /*
389 * Take the loop topology path,
390 * unless we are an NL_PORT (public loop)
391 */
392 if (domain->is_loop && !domain->is_nlport) {
393 /*
394 * For loop, we already have our FC ID
395 * and don't need fabric login.
396 * Transition to the allocated state and
397 * post an event to attach to
398 * the domain. Note that this breaks the
399 * normal action/transition
400 * pattern here to avoid a race with the
401 * domain attach callback.
402 */
403 /* sm: is_loop / domain_attach */
404 efc_sm_transition(ctx, __efc_domain_allocated, NULL);
405 __efc_domain_attach_internal(domain, nport->fc_id);
406 break;
407 }
408 {
409 struct efc_node *node;
410
411 /* alloc fabric node, send FLOGI */
412 node = efc_node_find(nport, FC_FID_FLOGI);
413 if (node) {
414 efc_log_err(efc,
415 "Fabric Controller node already exists\n");
416 break;
417 }
418 node = efc_node_alloc(nport, FC_FID_FLOGI,
419 false, false);
420 if (!node) {
421 efc_log_err(efc,
422 "Error: efc_node_alloc() failed\n");
423 } else {
424 efc_node_transition(node,
425 __efc_fabric_init, NULL);
426 }
427 /* Accept frames */
428 domain->req_accept_frames = true;
429 }
430 /* sm: / start fabric logins */
431 efc_sm_transition(ctx, __efc_domain_allocated, NULL);
432 break;
433 }
434
435 case EFC_EVT_DOMAIN_ALLOC_FAIL:
436 efc_log_err(efc, "%s recv'd waiting for DOMAIN_ALLOC_OK;",
437 efc_sm_event_name(evt));
438 efc_log_err(efc, "shutting down domain\n");
439 domain->req_domain_free = true;
440 break;
441
442 case EFC_EVT_DOMAIN_FOUND:
443 /* Should not happen */
444 break;
445
446 case EFC_EVT_DOMAIN_LOST:
447 efc_log_debug(efc,
448 "%s received while waiting for hw_domain_alloc()\n",
449 efc_sm_event_name(evt));
450 efc_sm_transition(ctx, __efc_domain_wait_domain_lost, NULL);
451 break;
452
453 default:
454 __efc_domain_common(__func__, ctx, evt, arg);
455 }
456 }
457
458 void
__efc_domain_allocated(struct efc_sm_ctx * ctx,enum efc_sm_event evt,void * arg)459 __efc_domain_allocated(struct efc_sm_ctx *ctx,
460 enum efc_sm_event evt, void *arg)
461 {
462 std_domain_state_decl();
463
464 domain_sm_trace(domain);
465
466 switch (evt) {
467 case EFC_EVT_DOMAIN_REQ_ATTACH: {
468 int rc = 0;
469 u32 fc_id;
470
471 if (WARN_ON(!arg))
472 return;
473
474 fc_id = *((u32 *)arg);
475 efc_log_debug(efc, "Requesting hw domain attach fc_id x%x\n",
476 fc_id);
477 /* Update nport lookup */
478 rc = xa_err(xa_store(&domain->lookup, fc_id, domain->nport,
479 GFP_ATOMIC));
480 if (rc) {
481 efc_log_err(efc, "Sport lookup store failed: %d\n", rc);
482 return;
483 }
484
485 /* Update display name for the nport */
486 efc_node_fcid_display(fc_id, domain->nport->display_name,
487 sizeof(domain->nport->display_name));
488
489 /* Issue domain attach call */
490 rc = efc_cmd_domain_attach(efc, domain, fc_id);
491 if (rc) {
492 efc_log_err(efc, "efc_hw_domain_attach failed: %d\n",
493 rc);
494 return;
495 }
496 /* sm: / domain_attach */
497 efc_sm_transition(ctx, __efc_domain_wait_attach, NULL);
498 break;
499 }
500
501 case EFC_EVT_DOMAIN_FOUND:
502 /* Should not happen */
503 efc_log_err(efc, "%s: evt: %d should not happen\n",
504 __func__, evt);
505 break;
506
507 case EFC_EVT_DOMAIN_LOST: {
508 efc_log_debug(efc,
509 "%s received while in EFC_EVT_DOMAIN_REQ_ATTACH\n",
510 efc_sm_event_name(evt));
511 if (!list_empty(&domain->nport_list)) {
512 /*
513 * if there are nports, transition to
514 * wait state and send shutdown to each
515 * nport
516 */
517 struct efc_nport *nport = NULL, *nport_next = NULL;
518
519 efc_sm_transition(ctx, __efc_domain_wait_nports_free,
520 NULL);
521 list_for_each_entry_safe(nport, nport_next,
522 &domain->nport_list,
523 list_entry) {
524 efc_sm_post_event(&nport->sm,
525 EFC_EVT_SHUTDOWN, NULL);
526 }
527 } else {
528 /* no nports exist, free domain */
529 efc_sm_transition(ctx, __efc_domain_wait_shutdown,
530 NULL);
531 if (efc_cmd_domain_free(efc, domain))
532 efc_log_err(efc, "hw_domain_free failed\n");
533 }
534
535 break;
536 }
537
538 default:
539 __efc_domain_common(__func__, ctx, evt, arg);
540 }
541 }
542
543 void
__efc_domain_wait_attach(struct efc_sm_ctx * ctx,enum efc_sm_event evt,void * arg)544 __efc_domain_wait_attach(struct efc_sm_ctx *ctx,
545 enum efc_sm_event evt, void *arg)
546 {
547 std_domain_state_decl();
548
549 domain_sm_trace(domain);
550
551 switch (evt) {
552 case EFC_EVT_DOMAIN_ATTACH_OK: {
553 struct efc_node *node = NULL;
554 struct efc_nport *nport, *next_nport;
555 unsigned long index;
556
557 /*
558 * Set domain notify pending state to avoid
559 * duplicate domain event post
560 */
561 domain->domain_notify_pend = true;
562
563 /* Mark as attached */
564 domain->attached = true;
565
566 /* Transition to ready */
567 /* sm: / forward event to all nports and nodes */
568 efc_sm_transition(ctx, __efc_domain_ready, NULL);
569
570 /* We have an FCFI, so we can accept frames */
571 domain->req_accept_frames = true;
572
573 /*
574 * Notify all nodes that the domain attach request
575 * has completed
576 * Note: nport will have already received notification
577 * of nport attached as a result of the HW's port attach.
578 */
579 list_for_each_entry_safe(nport, next_nport,
580 &domain->nport_list, list_entry) {
581 xa_for_each(&nport->lookup, index, node) {
582 efc_node_post_event(node,
583 EFC_EVT_DOMAIN_ATTACH_OK,
584 NULL);
585 }
586 }
587 domain->domain_notify_pend = false;
588 break;
589 }
590
591 case EFC_EVT_DOMAIN_ATTACH_FAIL:
592 efc_log_debug(efc,
593 "%s received while waiting for hw attach\n",
594 efc_sm_event_name(evt));
595 break;
596
597 case EFC_EVT_DOMAIN_FOUND:
598 /* Should not happen */
599 efc_log_err(efc, "%s: evt: %d should not happen\n",
600 __func__, evt);
601 break;
602
603 case EFC_EVT_DOMAIN_LOST:
604 /*
605 * Domain lost while waiting for an attach to complete,
606 * go to a state that waits for the domain attach to
607 * complete, then handle domain lost
608 */
609 efc_sm_transition(ctx, __efc_domain_wait_domain_lost, NULL);
610 break;
611
612 case EFC_EVT_DOMAIN_REQ_ATTACH:
613 /*
614 * In P2P we can get an attach request from
615 * the other FLOGI path, so drop this one
616 */
617 break;
618
619 default:
620 __efc_domain_common(__func__, ctx, evt, arg);
621 }
622 }
623
624 void
__efc_domain_ready(struct efc_sm_ctx * ctx,enum efc_sm_event evt,void * arg)625 __efc_domain_ready(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg)
626 {
627 std_domain_state_decl();
628
629 domain_sm_trace(domain);
630
631 switch (evt) {
632 case EFC_EVT_ENTER: {
633 /* start any pending vports */
634 if (efc_vport_start(domain)) {
635 efc_log_debug(domain->efc,
636 "efc_vport_start didn't start vports\n");
637 }
638 break;
639 }
640 case EFC_EVT_DOMAIN_LOST: {
641 if (!list_empty(&domain->nport_list)) {
642 /*
643 * if there are nports, transition to wait state
644 * and send shutdown to each nport
645 */
646 struct efc_nport *nport = NULL, *nport_next = NULL;
647
648 efc_sm_transition(ctx, __efc_domain_wait_nports_free,
649 NULL);
650 list_for_each_entry_safe(nport, nport_next,
651 &domain->nport_list,
652 list_entry) {
653 efc_sm_post_event(&nport->sm,
654 EFC_EVT_SHUTDOWN, NULL);
655 }
656 } else {
657 /* no nports exist, free domain */
658 efc_sm_transition(ctx, __efc_domain_wait_shutdown,
659 NULL);
660 if (efc_cmd_domain_free(efc, domain))
661 efc_log_err(efc, "hw_domain_free failed\n");
662 }
663 break;
664 }
665
666 case EFC_EVT_DOMAIN_FOUND:
667 /* Should not happen */
668 efc_log_err(efc, "%s: evt: %d should not happen\n",
669 __func__, evt);
670 break;
671
672 case EFC_EVT_DOMAIN_REQ_ATTACH: {
673 /* can happen during p2p */
674 u32 fc_id;
675
676 fc_id = *((u32 *)arg);
677
678 /* Assume that the domain is attached */
679 WARN_ON(!domain->attached);
680
681 /*
682 * Verify that the requested FC_ID
683 * is the same as the one we're working with
684 */
685 WARN_ON(domain->nport->fc_id != fc_id);
686 break;
687 }
688
689 default:
690 __efc_domain_common(__func__, ctx, evt, arg);
691 }
692 }
693
694 void
__efc_domain_wait_nports_free(struct efc_sm_ctx * ctx,enum efc_sm_event evt,void * arg)695 __efc_domain_wait_nports_free(struct efc_sm_ctx *ctx, enum efc_sm_event evt,
696 void *arg)
697 {
698 std_domain_state_decl();
699
700 domain_sm_trace(domain);
701
702 /* Wait for nodes to free prior to the domain shutdown */
703 switch (evt) {
704 case EFC_EVT_ALL_CHILD_NODES_FREE: {
705 int rc;
706
707 /* sm: / efc_hw_domain_free */
708 efc_sm_transition(ctx, __efc_domain_wait_shutdown, NULL);
709
710 /* Request efc_hw_domain_free and wait for completion */
711 rc = efc_cmd_domain_free(efc, domain);
712 if (rc) {
713 efc_log_err(efc, "efc_hw_domain_free() failed: %d\n",
714 rc);
715 }
716 break;
717 }
718 default:
719 __efc_domain_common_shutdown(__func__, ctx, evt, arg);
720 }
721 }
722
723 void
__efc_domain_wait_shutdown(struct efc_sm_ctx * ctx,enum efc_sm_event evt,void * arg)724 __efc_domain_wait_shutdown(struct efc_sm_ctx *ctx,
725 enum efc_sm_event evt, void *arg)
726 {
727 std_domain_state_decl();
728
729 domain_sm_trace(domain);
730
731 switch (evt) {
732 case EFC_EVT_DOMAIN_FREE_OK:
733 /* sm: / domain_free */
734 if (domain->domain_found_pending) {
735 /*
736 * save fcf_wwn and drec from this domain,
737 * free current domain and allocate
738 * a new one with the same fcf_wwn
739 * could use a SLI-4 "re-register VPI"
740 * operation here?
741 */
742 u64 fcf_wwn = domain->fcf_wwn;
743 struct efc_domain_record drec = domain->pending_drec;
744
745 efc_log_debug(efc, "Reallocating domain\n");
746 domain->req_domain_free = true;
747 domain = efc_domain_alloc(efc, fcf_wwn);
748
749 if (!domain) {
750 efc_log_err(efc,
751 "efc_domain_alloc() failed\n");
752 return;
753 }
754 /*
755 * got a new domain; at this point,
756 * there are at least two domains
757 * once the req_domain_free flag is processed,
758 * the associated domain will be removed.
759 */
760 efc_sm_transition(&domain->drvsm, __efc_domain_init,
761 NULL);
762 efc_sm_post_event(&domain->drvsm,
763 EFC_EVT_DOMAIN_FOUND, &drec);
764 } else {
765 domain->req_domain_free = true;
766 }
767 break;
768 default:
769 __efc_domain_common_shutdown(__func__, ctx, evt, arg);
770 }
771 }
772
773 void
__efc_domain_wait_domain_lost(struct efc_sm_ctx * ctx,enum efc_sm_event evt,void * arg)774 __efc_domain_wait_domain_lost(struct efc_sm_ctx *ctx,
775 enum efc_sm_event evt, void *arg)
776 {
777 std_domain_state_decl();
778
779 domain_sm_trace(domain);
780
781 /*
782 * Wait for the domain alloc/attach completion
783 * after receiving a domain lost.
784 */
785 switch (evt) {
786 case EFC_EVT_DOMAIN_ALLOC_OK:
787 case EFC_EVT_DOMAIN_ATTACH_OK: {
788 if (!list_empty(&domain->nport_list)) {
789 /*
790 * if there are nports, transition to
791 * wait state and send shutdown to each nport
792 */
793 struct efc_nport *nport = NULL, *nport_next = NULL;
794
795 efc_sm_transition(ctx, __efc_domain_wait_nports_free,
796 NULL);
797 list_for_each_entry_safe(nport, nport_next,
798 &domain->nport_list,
799 list_entry) {
800 efc_sm_post_event(&nport->sm,
801 EFC_EVT_SHUTDOWN, NULL);
802 }
803 } else {
804 /* no nports exist, free domain */
805 efc_sm_transition(ctx, __efc_domain_wait_shutdown,
806 NULL);
807 if (efc_cmd_domain_free(efc, domain))
808 efc_log_err(efc, "hw_domain_free() failed\n");
809 }
810 break;
811 }
812 case EFC_EVT_DOMAIN_ALLOC_FAIL:
813 case EFC_EVT_DOMAIN_ATTACH_FAIL:
814 efc_log_err(efc, "[domain] %-20s: failed\n",
815 efc_sm_event_name(evt));
816 break;
817
818 default:
819 __efc_domain_common_shutdown(__func__, ctx, evt, arg);
820 }
821 }
822
823 void
__efc_domain_attach_internal(struct efc_domain * domain,u32 s_id)824 __efc_domain_attach_internal(struct efc_domain *domain, u32 s_id)
825 {
826 memcpy(domain->dma.virt,
827 ((uint8_t *)domain->flogi_service_params) + 4,
828 sizeof(struct fc_els_flogi) - 4);
829 (void)efc_sm_post_event(&domain->drvsm, EFC_EVT_DOMAIN_REQ_ATTACH,
830 &s_id);
831 }
832
833 void
efc_domain_attach(struct efc_domain * domain,u32 s_id)834 efc_domain_attach(struct efc_domain *domain, u32 s_id)
835 {
836 __efc_domain_attach_internal(domain, s_id);
837 }
838
839 int
efc_domain_post_event(struct efc_domain * domain,enum efc_sm_event event,void * arg)840 efc_domain_post_event(struct efc_domain *domain,
841 enum efc_sm_event event, void *arg)
842 {
843 int rc;
844 bool req_domain_free;
845
846 rc = efc_sm_post_event(&domain->drvsm, event, arg);
847
848 req_domain_free = domain->req_domain_free;
849 domain->req_domain_free = false;
850
851 if (req_domain_free)
852 efc_domain_free(domain);
853
854 return rc;
855 }
856
857 static void
efct_domain_process_pending(struct efc_domain * domain)858 efct_domain_process_pending(struct efc_domain *domain)
859 {
860 struct efc *efc = domain->efc;
861 struct efc_hw_sequence *seq = NULL;
862 u32 processed = 0;
863 unsigned long flags = 0;
864
865 for (;;) {
866 /* need to check for hold frames condition after each frame
867 * processed because any given frame could cause a transition
868 * to a state that holds frames
869 */
870 if (efc->hold_frames)
871 break;
872
873 /* Get next frame/sequence */
874 spin_lock_irqsave(&efc->pend_frames_lock, flags);
875
876 if (!list_empty(&efc->pend_frames)) {
877 seq = list_first_entry(&efc->pend_frames,
878 struct efc_hw_sequence, list_entry);
879 list_del(&seq->list_entry);
880 }
881
882 if (!seq) {
883 processed = efc->pend_frames_processed;
884 efc->pend_frames_processed = 0;
885 spin_unlock_irqrestore(&efc->pend_frames_lock, flags);
886 break;
887 }
888 efc->pend_frames_processed++;
889
890 spin_unlock_irqrestore(&efc->pend_frames_lock, flags);
891
892 /* now dispatch frame(s) to dispatch function */
893 if (efc_domain_dispatch_frame(domain, seq))
894 efc->tt.hw_seq_free(efc, seq);
895
896 seq = NULL;
897 }
898
899 if (processed != 0)
900 efc_log_debug(efc, "%u domain frames held and processed\n",
901 processed);
902 }
903
904 void
efc_dispatch_frame(struct efc * efc,struct efc_hw_sequence * seq)905 efc_dispatch_frame(struct efc *efc, struct efc_hw_sequence *seq)
906 {
907 struct efc_domain *domain = efc->domain;
908
909 /*
910 * If we are holding frames or the domain is not yet registered or
911 * there's already frames on the pending list,
912 * then add the new frame to pending list
913 */
914 if (!domain || efc->hold_frames || !list_empty(&efc->pend_frames)) {
915 unsigned long flags = 0;
916
917 spin_lock_irqsave(&efc->pend_frames_lock, flags);
918 INIT_LIST_HEAD(&seq->list_entry);
919 list_add_tail(&seq->list_entry, &efc->pend_frames);
920 spin_unlock_irqrestore(&efc->pend_frames_lock, flags);
921
922 if (domain) {
923 /* immediately process pending frames */
924 efct_domain_process_pending(domain);
925 }
926 } else {
927 /*
928 * We are not holding frames and pending list is empty,
929 * just process frame. A non-zero return means the frame
930 * was not handled - so cleanup
931 */
932 if (efc_domain_dispatch_frame(domain, seq))
933 efc->tt.hw_seq_free(efc, seq);
934 }
935 }
936
937 int
efc_domain_dispatch_frame(void * arg,struct efc_hw_sequence * seq)938 efc_domain_dispatch_frame(void *arg, struct efc_hw_sequence *seq)
939 {
940 struct efc_domain *domain = (struct efc_domain *)arg;
941 struct efc *efc = domain->efc;
942 struct fc_frame_header *hdr;
943 struct efc_node *node = NULL;
944 struct efc_nport *nport = NULL;
945 unsigned long flags = 0;
946 u32 s_id, d_id, rc = EFC_HW_SEQ_FREE;
947
948 if (!seq->header || !seq->header->dma.virt || !seq->payload->dma.virt) {
949 efc_log_err(efc, "Sequence header or payload is null\n");
950 return rc;
951 }
952
953 hdr = seq->header->dma.virt;
954
955 /* extract the s_id and d_id */
956 s_id = ntoh24(hdr->fh_s_id);
957 d_id = ntoh24(hdr->fh_d_id);
958
959 spin_lock_irqsave(&efc->lock, flags);
960
961 nport = efc_nport_find(domain, d_id);
962 if (!nport) {
963 if (hdr->fh_type == FC_TYPE_FCP) {
964 /* Drop frame */
965 efc_log_warn(efc, "FCP frame with invalid d_id x%x\n",
966 d_id);
967 goto out;
968 }
969
970 /* p2p will use this case */
971 nport = domain->nport;
972 if (!nport || !kref_get_unless_zero(&nport->ref)) {
973 efc_log_err(efc, "Physical nport is NULL\n");
974 goto out;
975 }
976 }
977
978 /* Lookup the node given the remote s_id */
979 node = efc_node_find(nport, s_id);
980
981 /* If not found, then create a new node */
982 if (!node) {
983 /*
984 * If this is solicited data or control based on R_CTL and
985 * there is no node context, then we can drop the frame
986 */
987 if ((hdr->fh_r_ctl == FC_RCTL_DD_SOL_DATA) ||
988 (hdr->fh_r_ctl == FC_RCTL_DD_SOL_CTL)) {
989 efc_log_debug(efc, "sol data/ctrl frame without node\n");
990 goto out_release;
991 }
992
993 node = efc_node_alloc(nport, s_id, false, false);
994 if (!node) {
995 efc_log_err(efc, "efc_node_alloc() failed\n");
996 goto out_release;
997 }
998 /* don't send PLOGI on efc_d_init entry */
999 efc_node_init_device(node, false);
1000 }
1001
1002 if (node->hold_frames || !list_empty(&node->pend_frames)) {
1003 /* add frame to node's pending list */
1004 spin_lock(&node->pend_frames_lock);
1005 INIT_LIST_HEAD(&seq->list_entry);
1006 list_add_tail(&seq->list_entry, &node->pend_frames);
1007 spin_unlock(&node->pend_frames_lock);
1008 rc = EFC_HW_SEQ_HOLD;
1009 goto out_release;
1010 }
1011
1012 /* now dispatch frame to the node frame handler */
1013 efc_node_dispatch_frame(node, seq);
1014
1015 out_release:
1016 kref_put(&nport->ref, nport->release);
1017 out:
1018 spin_unlock_irqrestore(&efc->lock, flags);
1019 return rc;
1020 }
1021
1022 void
efc_node_dispatch_frame(void * arg,struct efc_hw_sequence * seq)1023 efc_node_dispatch_frame(void *arg, struct efc_hw_sequence *seq)
1024 {
1025 struct fc_frame_header *hdr = seq->header->dma.virt;
1026 u32 port_id;
1027 struct efc_node *node = (struct efc_node *)arg;
1028 struct efc *efc = node->efc;
1029
1030 port_id = ntoh24(hdr->fh_s_id);
1031
1032 if (WARN_ON(port_id != node->rnode.fc_id))
1033 return;
1034
1035 if ((!(ntoh24(hdr->fh_f_ctl) & FC_FC_END_SEQ)) ||
1036 !(ntoh24(hdr->fh_f_ctl) & FC_FC_SEQ_INIT)) {
1037 node_printf(node,
1038 "Drop frame hdr = %08x %08x %08x %08x %08x %08x\n",
1039 cpu_to_be32(((u32 *)hdr)[0]),
1040 cpu_to_be32(((u32 *)hdr)[1]),
1041 cpu_to_be32(((u32 *)hdr)[2]),
1042 cpu_to_be32(((u32 *)hdr)[3]),
1043 cpu_to_be32(((u32 *)hdr)[4]),
1044 cpu_to_be32(((u32 *)hdr)[5]));
1045 return;
1046 }
1047
1048 switch (hdr->fh_r_ctl) {
1049 case FC_RCTL_ELS_REQ:
1050 case FC_RCTL_ELS_REP:
1051 efc_node_recv_els_frame(node, seq);
1052 break;
1053
1054 case FC_RCTL_BA_ABTS:
1055 case FC_RCTL_BA_ACC:
1056 case FC_RCTL_BA_RJT:
1057 case FC_RCTL_BA_NOP:
1058 efc_log_err(efc, "Received ABTS:\n");
1059 break;
1060
1061 case FC_RCTL_DD_UNSOL_CMD:
1062 case FC_RCTL_DD_UNSOL_CTL:
1063 switch (hdr->fh_type) {
1064 case FC_TYPE_FCP:
1065 if ((hdr->fh_r_ctl & 0xf) == FC_RCTL_DD_UNSOL_CMD) {
1066 if (!node->fcp_enabled) {
1067 efc_node_recv_fcp_cmd(node, seq);
1068 break;
1069 }
1070 efc_log_err(efc, "Recvd FCP CMD. Drop IO\n");
1071 } else if ((hdr->fh_r_ctl & 0xf) ==
1072 FC_RCTL_DD_SOL_DATA) {
1073 node_printf(node,
1074 "solicited data recvd. Drop IO\n");
1075 }
1076 break;
1077
1078 case FC_TYPE_CT:
1079 efc_node_recv_ct_frame(node, seq);
1080 break;
1081 default:
1082 break;
1083 }
1084 break;
1085 default:
1086 efc_log_err(efc, "Unhandled frame rctl: %02x\n", hdr->fh_r_ctl);
1087 }
1088 }
1089