xref: /titanic_41/usr/src/uts/common/io/bnxe/bnxe_fcoe.c (revision f391a51a4e9639750045473dba1cc2831267c93e)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2014 QLogic Corporation
24  * The contents of this file are subject to the terms of the
25  * QLogic End User License (the "License").
26  * You may not use this file except in compliance with the License.
27  *
28  * You can obtain a copy of the License at
29  * http://www.qlogic.com/Resources/Documents/DriverDownloadHelp/
30  * QLogic_End_User_Software_License.txt
31  * See the License for the specific language governing permissions
32  * and limitations under the License.
33  */
34 
35 #include "bnxe.h"
36 
37 
38 #define VERIFY_FCOE_BINDING(pUM)                                  \
39     if (!BNXE_FCOE(pUM))                                          \
40     {                                                             \
41         BnxeLogWarn((pUM), "FCoE not supported on this device!"); \
42         return B_FALSE;                                           \
43     }                                                             \
44     if (!(CLIENT_BOUND(pUM, LM_CLI_IDX_FCOE)))                    \
45     {                                                             \
46         BnxeLogWarn((pUM), "FCoE client not bound!");             \
47         return B_FALSE;                                           \
48     }
49 
50 
BnxeFcoeFreeResc(um_device_t * pUM,BnxeFcoeState * pFcoeState)51 void BnxeFcoeFreeResc(um_device_t *   pUM,
52                       BnxeFcoeState * pFcoeState)
53 {
54     BNXE_LOCK_ENTER_OFFLOAD(pUM);
55     lm_fc_del_fcoe_state(&pUM->lm_dev, &pFcoeState->lm_fcoe);
56     BNXE_LOCK_EXIT_OFFLOAD(pUM);
57 
58     lm_fc_free_con_resc(&pUM->lm_dev, &pFcoeState->lm_fcoe);
59 
60     kmem_free(pFcoeState, sizeof(BnxeFcoeState));
61 }
62 
63 
BnxeFcoeCqeIndicate(um_device_t * pUM,void * pData,u32_t dataLen)64 static boolean_t BnxeFcoeCqeIndicate(um_device_t * pUM,
65                                      void *        pData,
66                                      u32_t         dataLen)
67 {
68     struct fcoe_kcqe * kcqe = (struct fcoe_kcqe *)pData;
69 
70     if (dataLen != (sizeof(*kcqe)))
71     {
72         BnxeLogWarn(pUM, "Invalid FCoE CQE");
73         return B_FALSE;
74     }
75 
76     /* XXX
77      * Need to add a mutex or reference count to ensure that bnxef isn't
78      * unloaded underneath this taskq dispatch routine.
79      */
80 
81     ASSERT(CLIENT_BOUND(pUM, LM_CLI_IDX_FCOE));
82     pUM->fcoe.bind.cliIndicateCqes(pUM->fcoe.pDev,
83                                    (void **)&kcqe, 1);
84 
85     /* XXX release mutex or decrement reference count */
86 
87     return B_TRUE;
88 }
89 
90 
BnxeFcoeInitCqeWork(um_device_t * pUM,void * pData,u32_t dataLen)91 static void BnxeFcoeInitCqeWork(um_device_t * pUM,
92                                 void *        pData,
93                                 u32_t         dataLen)
94 {
95     if (!BnxeFcoeCqeIndicate(pUM, pData, dataLen))
96     {
97         pUM->fcoe.stats.initCqeRxErr++;
98     }
99     else
100     {
101         pUM->fcoe.stats.initCqeRx++;
102     }
103 }
104 
105 
BnxeFcoeInitCqe(um_device_t * pUM,struct fcoe_kcqe * kcqe)106 boolean_t BnxeFcoeInitCqe(um_device_t *      pUM,
107                           struct fcoe_kcqe * kcqe)
108 {
109     struct fcoe_kcqe tmp_kcqe = {0};
110 
111     tmp_kcqe.op_code = FCOE_KCQE_OPCODE_INIT_FUNC;
112 
113     tmp_kcqe.flags |=
114         (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
115 
116     tmp_kcqe.completion_status =
117         mm_cpu_to_le32((mm_le32_to_cpu(kcqe->completion_status) == 0) ?
118                            FCOE_KCQE_COMPLETION_STATUS_SUCCESS :
119                            FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR);
120 
121     return BnxeWorkQueueAdd(pUM, BnxeFcoeInitCqeWork,
122                             &tmp_kcqe, sizeof(tmp_kcqe));
123 }
124 
125 
BnxeFcoeInitWqeWork(um_device_t * pUM,void * pData,u32_t dataLen)126 static void BnxeFcoeInitWqeWork(um_device_t * pUM,
127                                 void *        pData,
128                                 u32_t         dataLen)
129 {
130     union fcoe_kwqe * kwqe = (union fcoe_kwqe *)pData;
131     struct fcoe_kcqe  kcqe  = {0};
132 
133     if (dataLen != (3 * sizeof(*kwqe)))
134     {
135         BnxeLogWarn(pUM, "Invalid FCoE Init WQE");
136         pUM->fcoe.stats.initWqeTxErr++;
137         return;
138     }
139 
140     if (kwqe[1].init2.hsi_major_version != FCOE_HSI_MAJOR_VERSION)
141     {
142         BnxeLogWarn(pUM, "ERROR: Invalid FCoE HSI major version (L5=%d vs FW=%d)",
143                     kwqe[1].init2.hsi_major_version,
144                     FCOE_HSI_MAJOR_VERSION);
145         kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_WRONG_HSI_VERSION;
146         goto BnxeFcoeInitWqeWork_error;
147     }
148 
149     if (lm_fc_init(&pUM->lm_dev,
150                    &kwqe[0].init1,
151                    &kwqe[1].init2,
152                    &kwqe[2].init3) != LM_STATUS_SUCCESS)
153     {
154         BnxeLogWarn(pUM, "Failed to post FCoE Init WQE");
155         kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_ERROR;
156         goto BnxeFcoeInitWqeWork_error;
157     }
158 
159     pUM->fcoe.stats.initWqeTx++;
160 
161     return;
162 
163 BnxeFcoeInitWqeWork_error:
164 
165     pUM->fcoe.stats.initWqeTxErr++;
166 
167     kcqe.op_code = FCOE_KCQE_OPCODE_INIT_FUNC;
168     kcqe.flags |= (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
169     kcqe.completion_status = mm_cpu_to_le32(kcqe.completion_status);
170     kcqe.fcoe_conn_id = kwqe[1].conn_offload1.fcoe_conn_id;
171 
172     /* call here directly (for error case) */
173 
174     /* XXX
175      * Need to add a mutex or reference count to ensure that bnxef isn't
176      * unloaded underneath this taskq dispatch routine.
177      */
178 
179     {
180         struct fcoe_kcqe * pKcqe = &kcqe;
181         ASSERT(CLIENT_BOUND(pUM, LM_CLI_IDX_FCOE));
182         pUM->fcoe.bind.cliIndicateCqes(pUM->fcoe.pDev,
183                                        (void **)&pKcqe, 1);
184     }
185 
186     /* XXX release mutex or decrement reference count */
187 }
188 
189 
BnxeFcoeInitWqe(um_device_t * pUM,union fcoe_kwqe ** kwqes)190 static boolean_t BnxeFcoeInitWqe(um_device_t *      pUM,
191                                  union fcoe_kwqe ** kwqes)
192 {
193     union fcoe_kwqe wqe[3];
194 
195     wqe[0] =*(kwqes[0]);
196     wqe[1] =*(kwqes[1]);
197     wqe[2] =*(kwqes[2]);
198 
199     return BnxeWorkQueueAdd(pUM, BnxeFcoeInitWqeWork, wqe, sizeof(wqe));
200 }
201 
202 
BnxeFcoeOffloadConnCqeWork(um_device_t * pUM,void * pData,u32_t dataLen)203 static void BnxeFcoeOffloadConnCqeWork(um_device_t * pUM,
204                                        void *        pData,
205                                        u32_t         dataLen)
206 {
207     if (!BnxeFcoeCqeIndicate(pUM, pData, dataLen))
208     {
209         pUM->fcoe.stats.offloadConnCqeRxErr++;
210     }
211     else
212     {
213         pUM->fcoe.stats.offloadConnCqeRx++;
214     }
215 }
216 
217 
BnxeFcoeOffloadConnCqe(um_device_t * pUM,BnxeFcoeState * pFcoeState,struct fcoe_kcqe * kcqe)218 boolean_t BnxeFcoeOffloadConnCqe(um_device_t *      pUM,
219                                  BnxeFcoeState *    pFcoeState,
220                                  struct fcoe_kcqe * kcqe)
221 {
222     struct fcoe_kcqe tmp_kcqe = {0};
223 
224     tmp_kcqe.op_code = FCOE_KCQE_OPCODE_OFFLOAD_CONN;
225 
226     tmp_kcqe.flags |=
227         (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
228 
229     tmp_kcqe.fcoe_conn_context_id = kcqe->fcoe_conn_context_id;
230     tmp_kcqe.fcoe_conn_id         = kcqe->fcoe_conn_id;
231 
232     tmp_kcqe.completion_status =
233         mm_cpu_to_le32((mm_le32_to_cpu(kcqe->completion_status) == 0) ?
234                            FCOE_KCQE_COMPLETION_STATUS_SUCCESS :
235                            FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE);
236 
237     if (pFcoeState != NULL)
238     {
239         pFcoeState->lm_fcoe.hdr.status =
240             (mm_le32_to_cpu(kcqe->completion_status) == 0) ?
241                 STATE_STATUS_NORMAL :
242                 STATE_STATUS_INIT_OFFLOAD_ERR;
243     }
244 
245     return BnxeWorkQueueAdd(pUM, BnxeFcoeOffloadConnCqeWork,
246                             &tmp_kcqe, sizeof(tmp_kcqe));
247 }
248 
249 
BnxeFcoeOffloadConnWqeWork(um_device_t * pUM,void * pData,u32_t dataLen)250 static void BnxeFcoeOffloadConnWqeWork(um_device_t * pUM,
251                                        void *        pData,
252                                        u32_t         dataLen)
253 {
254     union fcoe_kwqe * kwqe = (union fcoe_kwqe *)pData;
255     struct fcoe_kcqe  kcqe = {0};
256     BnxeFcoeState *   pFcoeState;
257     lm_status_t       rc;
258 
259     if (dataLen != (4 * sizeof(*kwqe)))
260     {
261         BnxeLogWarn(pUM, "Invalid FCoE Offload Conn WQE");
262         pUM->fcoe.stats.offloadConnWqeTxErr++;
263         return;
264     }
265 
266     if ((pFcoeState = kmem_zalloc(sizeof(BnxeFcoeState),
267                                   KM_NOSLEEP)) == NULL)
268     {
269         BnxeLogWarn(pUM, "Failed to allocate memory for FCoE state");
270         goto BnxeFcoeOffloadConnWqeWork_error;
271     }
272 
273     BNXE_LOCK_ENTER_OFFLOAD(pUM);
274     rc = lm_fc_init_fcoe_state(&pUM->lm_dev,
275                                &pUM->lm_dev.fcoe_info.run_time.state_blk,
276                                &pFcoeState->lm_fcoe);
277     BNXE_LOCK_EXIT_OFFLOAD(pUM);
278 
279     if (rc != LM_STATUS_SUCCESS)
280     {
281         kmem_free(pFcoeState, sizeof(BnxeFcoeState));
282 
283         BnxeLogWarn(pUM, "Failed to initialize FCoE state");
284         goto BnxeFcoeOffloadConnWqeWork_error;
285     }
286 
287     pFcoeState->lm_fcoe.ofld1 = kwqe[0].conn_offload1;
288     pFcoeState->lm_fcoe.ofld2 = kwqe[1].conn_offload2;
289     pFcoeState->lm_fcoe.ofld3 = kwqe[2].conn_offload3;
290     pFcoeState->lm_fcoe.ofld4 = kwqe[3].conn_offload4;
291 
292     rc = lm_fc_alloc_con_resc(&pUM->lm_dev, &pFcoeState->lm_fcoe);
293 
294     if (rc == LM_STATUS_SUCCESS)
295     {
296         lm_fc_init_fcoe_context(&pUM->lm_dev, &pFcoeState->lm_fcoe);
297         lm_fc_post_offload_ramrod(&pUM->lm_dev, &pFcoeState->lm_fcoe);
298     }
299     else if (rc == LM_STATUS_PENDING)
300     {
301         /*
302          * the cid is pending - its too soon to initialize the context, it will
303          * be initialized from the recycle cid callback and completed as well.
304          */
305         BnxeLogInfo(pUM, "lm_fc_alloc_con_resc returned pending?");
306     }
307     else
308     {
309         BnxeFcoeFreeResc(pUM, pFcoeState);
310         BnxeLogInfo(pUM, "lm_fc_alloc_con_resc failed (%d)", rc);
311         goto BnxeFcoeOffloadConnWqeWork_error;
312     }
313 
314     pUM->fcoe.stats.offloadConnWqeTx++;
315 
316     return;
317 
318 BnxeFcoeOffloadConnWqeWork_error:
319 
320     pUM->fcoe.stats.offloadConnWqeTxErr++;
321 
322     kcqe.op_code = FCOE_KCQE_OPCODE_OFFLOAD_CONN;
323     kcqe.flags |= (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
324     kcqe.completion_status = mm_cpu_to_le32(FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE);
325     kcqe.fcoe_conn_id = kwqe[0].conn_offload1.fcoe_conn_id;
326 
327     /* call here directly (for error case) */
328 
329     /* XXX
330      * Need to add a mutex or reference count to ensure that bnxef isn't
331      * unloaded underneath this taskq dispatch routine.
332      */
333 
334     {
335         struct fcoe_kcqe * pKcqe = &kcqe;
336         ASSERT(CLIENT_BOUND(pUM, LM_CLI_IDX_FCOE));
337         pUM->fcoe.bind.cliIndicateCqes(pUM->fcoe.pDev,
338                                        (void **)&pKcqe, 1);
339     }
340 
341     /* XXX release mutex or decrement reference count */
342 }
343 
344 
BnxeFcoeOffloadConnWqe(um_device_t * pUM,union fcoe_kwqe ** kwqes)345 static boolean_t BnxeFcoeOffloadConnWqe(um_device_t *      pUM,
346                                         union fcoe_kwqe ** kwqes)
347 {
348     union fcoe_kwqe wqe[4];
349 
350     wqe[0] =*(kwqes[0]);
351     wqe[1] =*(kwqes[1]);
352     wqe[2] =*(kwqes[2]);
353     wqe[3] =*(kwqes[3]);
354 
355     return BnxeWorkQueueAdd(pUM, BnxeFcoeOffloadConnWqeWork,
356                             wqe, sizeof(wqe));
357 }
358 
359 
BnxeFcoeEnableConnCqeWork(um_device_t * pUM,void * pData,u32_t dataLen)360 static void BnxeFcoeEnableConnCqeWork(um_device_t * pUM,
361                                       void *        pData,
362                                       u32_t         dataLen)
363 {
364     if (!BnxeFcoeCqeIndicate(pUM, pData, dataLen))
365     {
366         pUM->fcoe.stats.enableConnCqeRxErr++;
367     }
368     else
369     {
370         pUM->fcoe.stats.enableConnCqeRx++;
371     }
372 }
373 
374 
BnxeFcoeEnableConnCqe(um_device_t * pUM,BnxeFcoeState * pFcoeState,struct fcoe_kcqe * kcqe)375 boolean_t BnxeFcoeEnableConnCqe(um_device_t *      pUM,
376                                 BnxeFcoeState *    pFcoeState,
377                                 struct fcoe_kcqe * kcqe)
378 {
379     struct fcoe_kcqe tmp_kcqe = {0};
380 
381     tmp_kcqe.op_code = FCOE_KCQE_OPCODE_ENABLE_CONN;
382 
383     tmp_kcqe.flags |=
384         (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
385 
386     tmp_kcqe.fcoe_conn_context_id = kcqe->fcoe_conn_context_id;
387     tmp_kcqe.fcoe_conn_id         = kcqe->fcoe_conn_id;
388 
389     tmp_kcqe.completion_status =
390         mm_cpu_to_le32((mm_le32_to_cpu(kcqe->completion_status) == 0) ?
391                            FCOE_KCQE_COMPLETION_STATUS_SUCCESS :
392                            FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE);
393 
394     if (pFcoeState != NULL)
395     {
396         pFcoeState->lm_fcoe.hdr.status =
397             (mm_le32_to_cpu(kcqe->completion_status) == 0) ?
398                 STATE_STATUS_NORMAL :
399                 STATE_STATUS_INIT_OFFLOAD_ERR;
400     }
401 
402     return BnxeWorkQueueAdd(pUM, BnxeFcoeEnableConnCqeWork,
403                             &tmp_kcqe, sizeof(tmp_kcqe));
404 }
405 
406 
BnxeFcoeEnableConnWqeWork(um_device_t * pUM,void * pData,u32_t dataLen)407 static void BnxeFcoeEnableConnWqeWork(um_device_t * pUM,
408                                       void *        pData,
409                                       u32_t         dataLen)
410 {
411     union fcoe_kwqe * kwqe = (union fcoe_kwqe *)pData;
412     struct fcoe_kcqe  kcqe = {0};
413     BnxeFcoeState *   pFcoeState;
414 
415     if (dataLen != sizeof(*kwqe))
416     {
417         BnxeLogWarn(pUM, "Invalid FCoE Enable Conn WQE");
418         pUM->fcoe.stats.enableConnWqeTxErr++;
419         return;
420     }
421 
422     pFcoeState =
423         lm_cid_cookie(&pUM->lm_dev,
424                       FCOE_CONNECTION_TYPE,
425                       SW_CID(mm_le32_to_cpu(kwqe->conn_enable_disable.context_id)));
426 
427     if (pFcoeState == NULL)
428     {
429         goto BnxeFcoeEnableConnWqeWork_error;
430     }
431 
432     if (lm_fc_post_enable_ramrod(&pUM->lm_dev,
433                                  &pFcoeState->lm_fcoe,
434                                  &kwqe->conn_enable_disable) !=
435         LM_STATUS_SUCCESS)
436     {
437         goto BnxeFcoeEnableConnWqeWork_error;
438     }
439 
440     pUM->fcoe.stats.enableConnWqeTx++;
441 
442     return;
443 
444 BnxeFcoeEnableConnWqeWork_error:
445 
446     pUM->fcoe.stats.enableConnWqeTxErr++;
447 
448     BnxeLogWarn(pUM, "Failed to post FCoE Enable Conn WQE");
449 
450     kcqe.op_code = FCOE_KCQE_OPCODE_ENABLE_CONN;
451     kcqe.flags |= (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
452     kcqe.fcoe_conn_context_id = kwqe->conn_enable_disable.context_id;
453     kcqe.completion_status = mm_cpu_to_le32(FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR);
454 
455     /* call here directly (for error case) */
456 
457     /* XXX
458      * Need to add a mutex or reference count to ensure that bnxef isn't
459      * unloaded underneath this taskq dispatch routine.
460      */
461 
462     {
463         struct fcoe_kcqe * pKcqe = &kcqe;
464         ASSERT(CLIENT_BOUND(pUM, LM_CLI_IDX_FCOE));
465         pUM->fcoe.bind.cliIndicateCqes(pUM->fcoe.pDev,
466                                        (void **)&pKcqe, 1);
467     }
468 
469     /* XXX release mutex or decrement reference count */
470 }
471 
472 
BnxeFcoeEnableConnWqe(um_device_t * pUM,union fcoe_kwqe ** kwqes)473 static boolean_t BnxeFcoeEnableConnWqe(um_device_t *      pUM,
474                                        union fcoe_kwqe ** kwqes)
475 {
476     return BnxeWorkQueueAdd(pUM, BnxeFcoeEnableConnWqeWork,
477                             kwqes[0], sizeof(*(kwqes[0])));
478 }
479 
480 
BnxeFcoeDisableConnCqeWork(um_device_t * pUM,void * pData,u32_t dataLen)481 static void BnxeFcoeDisableConnCqeWork(um_device_t * pUM,
482                                        void *        pData,
483                                        u32_t         dataLen)
484 {
485     if (!BnxeFcoeCqeIndicate(pUM, pData, dataLen))
486     {
487         pUM->fcoe.stats.disableConnCqeRxErr++;
488     }
489     else
490     {
491         pUM->fcoe.stats.disableConnCqeRx++;
492     }
493 }
494 
495 
BnxeFcoeDisableConnCqe(um_device_t * pUM,BnxeFcoeState * pFcoeState,struct fcoe_kcqe * kcqe)496 boolean_t BnxeFcoeDisableConnCqe(um_device_t *      pUM,
497                                  BnxeFcoeState *    pFcoeState,
498                                  struct fcoe_kcqe * kcqe)
499 {
500     struct fcoe_kcqe tmp_kcqe = {0};
501 
502     tmp_kcqe.op_code = FCOE_KCQE_OPCODE_DISABLE_CONN;
503 
504     tmp_kcqe.flags |=
505         (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
506 
507     tmp_kcqe.fcoe_conn_context_id = kcqe->fcoe_conn_context_id;
508     tmp_kcqe.fcoe_conn_id         = kcqe->fcoe_conn_id;
509 
510     tmp_kcqe.completion_status =
511         mm_cpu_to_le32((mm_le32_to_cpu(kcqe->completion_status) == 0) ?
512                            FCOE_KCQE_COMPLETION_STATUS_SUCCESS :
513                            FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR);
514 
515     if (pFcoeState != NULL)
516     {
517         pFcoeState->lm_fcoe.hdr.status =
518             (mm_le32_to_cpu(kcqe->completion_status) == 0) ?
519                 STATE_STATUS_NORMAL :
520                 STATE_STATUS_INIT_OFFLOAD_ERR;
521     }
522 
523     return BnxeWorkQueueAdd(pUM, BnxeFcoeDisableConnCqeWork,
524                             &tmp_kcqe, sizeof(tmp_kcqe));
525 }
526 
527 
BnxeFcoeDisableConnWqeWork(um_device_t * pUM,void * pData,u32_t dataLen)528 static void BnxeFcoeDisableConnWqeWork(um_device_t * pUM,
529                                        void *        pData,
530                                        u32_t         dataLen)
531 {
532     union fcoe_kwqe * kwqe = (union fcoe_kwqe *)pData;
533     struct fcoe_kcqe  kcqe = {0};
534     BnxeFcoeState *   pFcoeState;
535 
536     if (dataLen != sizeof(*kwqe))
537     {
538         BnxeLogWarn(pUM, "Invalid FCoE Disable Conn WQE");
539         pUM->fcoe.stats.disableConnWqeTxErr++;
540         return;
541     }
542 
543     pFcoeState =
544         lm_cid_cookie(&pUM->lm_dev,
545                       FCOE_CONNECTION_TYPE,
546                       SW_CID(mm_le32_to_cpu(kwqe->conn_enable_disable.context_id)));
547 
548     if (pFcoeState == NULL)
549     {
550         goto BnxeFcoeDisableConnWqeWork_error;
551     }
552 
553     if (lm_fc_post_disable_ramrod(&pUM->lm_dev,
554                                   &pFcoeState->lm_fcoe,
555                                   &kwqe->conn_enable_disable) !=
556         LM_STATUS_SUCCESS)
557     {
558         goto BnxeFcoeDisableConnWqeWork_error;
559     }
560 
561     pUM->fcoe.stats.disableConnWqeTx++;
562 
563     return;
564 
565 BnxeFcoeDisableConnWqeWork_error:
566 
567     pUM->fcoe.stats.disableConnWqeTxErr++;
568 
569     BnxeLogWarn(pUM, "Failed to post FCoE Disable Conn WQE");
570 
571     kcqe.op_code = FCOE_KCQE_OPCODE_DISABLE_CONN;
572     kcqe.flags |= (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
573     kcqe.fcoe_conn_context_id = kwqe->conn_enable_disable.context_id;
574     kcqe.completion_status = mm_cpu_to_le32(FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR);
575 
576     /* call here directly (for error case) */
577 
578     /* XXX
579      * Need to add a mutex or reference count to ensure that bnxef isn't
580      * unloaded underneath this taskq dispatch routine.
581      */
582 
583     {
584         struct fcoe_kcqe * pKcqe = &kcqe;
585         ASSERT(CLIENT_BOUND(pUM, LM_CLI_IDX_FCOE));
586         pUM->fcoe.bind.cliIndicateCqes(pUM->fcoe.pDev,
587                                        (void **)&pKcqe, 1);
588     }
589 
590     /* XXX release mutex or decrement reference count */
591 }
592 
593 
BnxeFcoeDisableConnWqe(um_device_t * pUM,union fcoe_kwqe ** kwqes)594 static boolean_t BnxeFcoeDisableConnWqe(um_device_t *      pUM,
595                                        union fcoe_kwqe ** kwqes)
596 {
597     return BnxeWorkQueueAdd(pUM, BnxeFcoeDisableConnWqeWork,
598                             kwqes[0], sizeof(*(kwqes[0])));
599 }
600 
601 
BnxeFcoeDestroyConnCqeWork(um_device_t * pUM,void * pData,u32_t dataLen)602 static void BnxeFcoeDestroyConnCqeWork(um_device_t * pUM,
603                                        void *        pData,
604                                        u32_t         dataLen)
605 {
606     struct fcoe_kcqe * kcqe = (struct fcoe_kcqe *)pData;
607     BnxeFcoeState *    pFcoeState;
608 
609     if (dataLen != (sizeof(*kcqe)))
610     {
611         BnxeLogWarn(pUM, "Invalid FCoE Destroy Conn CQE");
612         pUM->fcoe.stats.destroyConnCqeRxErr++;
613         return;
614     }
615 
616     pFcoeState =
617         lm_cid_cookie(&pUM->lm_dev,
618                       FCOE_CONNECTION_TYPE,
619                       SW_CID(mm_le32_to_cpu(kcqe->fcoe_conn_context_id)));
620 
621     BnxeFcoeFreeResc(pUM, pFcoeState);
622 
623     if (!BnxeFcoeCqeIndicate(pUM, pData, dataLen))
624     {
625         pUM->fcoe.stats.destroyConnCqeRxErr++;
626     }
627     else
628     {
629         pUM->fcoe.stats.destroyConnCqeRx++;
630     }
631 }
632 
633 
BnxeFcoeDestroyConnCqe(um_device_t * pUM,BnxeFcoeState * pFcoeState,struct fcoe_kcqe * kcqe)634 boolean_t BnxeFcoeDestroyConnCqe(um_device_t *      pUM,
635                                  BnxeFcoeState *    pFcoeState,
636                                  struct fcoe_kcqe * kcqe)
637 {
638     struct fcoe_kcqe tmp_kcqe = {0};
639 
640     tmp_kcqe.op_code = FCOE_KCQE_OPCODE_DESTROY_CONN;
641 
642     tmp_kcqe.flags |=
643         (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
644 
645     tmp_kcqe.fcoe_conn_context_id = kcqe->fcoe_conn_context_id;
646     tmp_kcqe.fcoe_conn_id         = kcqe->fcoe_conn_id;
647 
648     tmp_kcqe.completion_status =
649         mm_cpu_to_le32((mm_le32_to_cpu(kcqe->completion_status) == 0) ?
650                            FCOE_KCQE_COMPLETION_STATUS_SUCCESS :
651                            FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR);
652 
653     if (pFcoeState != NULL)
654     {
655         pFcoeState->lm_fcoe.hdr.status =
656             (mm_le32_to_cpu(kcqe->completion_status) == 0) ?
657                 STATE_STATUS_NORMAL :
658                 STATE_STATUS_INIT_OFFLOAD_ERR;
659     }
660 
661     return BnxeWorkQueueAdd(pUM, BnxeFcoeDestroyConnCqeWork,
662                             &tmp_kcqe, sizeof(tmp_kcqe));
663 }
664 
665 
BnxeFcoeDestroyConnWqeWork(um_device_t * pUM,void * pData,u32_t dataLen)666 static void BnxeFcoeDestroyConnWqeWork(um_device_t * pUM,
667                                        void *        pData,
668                                        u32_t         dataLen)
669 {
670     union fcoe_kwqe * kwqe = (union fcoe_kwqe *)pData;
671     struct fcoe_kcqe  kcqe = {0};
672     BnxeFcoeState *   pFcoeState;
673 
674     if (dataLen != sizeof(*kwqe))
675     {
676         BnxeLogWarn(pUM, "Invalid FCoE Destroy Conn WQE");
677         pUM->fcoe.stats.destroyConnWqeTxErr++;
678         return;
679     }
680 
681     pFcoeState =
682         lm_cid_cookie(&pUM->lm_dev,
683                       FCOE_CONNECTION_TYPE,
684                       SW_CID(mm_le32_to_cpu(kwqe->conn_destroy.context_id)));
685 
686     if (pFcoeState == NULL)
687     {
688         goto BnxeFcoeDestroyConnWqeWork_error;
689     }
690 
691     if (lm_fc_post_terminate_ramrod(&pUM->lm_dev,
692                                     &pFcoeState->lm_fcoe) !=
693         LM_STATUS_SUCCESS)
694     {
695         goto BnxeFcoeDestroyConnWqeWork_error;
696     }
697 
698     pUM->fcoe.stats.destroyConnWqeTx++;
699 
700     return;
701 
702 BnxeFcoeDestroyConnWqeWork_error:
703 
704     pUM->fcoe.stats.destroyConnWqeTxErr++;
705 
706     BnxeLogWarn(pUM, "Failed to post FCoE Destroy Conn WQE");
707 
708     kcqe.op_code = FCOE_KCQE_OPCODE_DESTROY_CONN;
709     kcqe.flags |= (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
710     kcqe.fcoe_conn_context_id = kwqe->conn_destroy.context_id;
711     kcqe.fcoe_conn_id         = kwqe->conn_destroy.conn_id;
712     kcqe.completion_status = mm_cpu_to_le32(FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR);
713 
714     /* call here directly (for error case) */
715 
716     /* XXX
717      * Need to add a mutex or reference count to ensure that bnxef isn't
718      * unloaded underneath this taskq dispatch routine.
719      */
720 
721     {
722         struct fcoe_kcqe * pKcqe = &kcqe;
723         ASSERT(CLIENT_BOUND(pUM, LM_CLI_IDX_FCOE));
724         pUM->fcoe.bind.cliIndicateCqes(pUM->fcoe.pDev,
725                                        (void **)&pKcqe, 1);
726     }
727 
728     /* XXX release mutex or decrement reference count */
729 }
730 
731 
BnxeFcoeDestroyConnWqe(um_device_t * pUM,union fcoe_kwqe ** kwqes)732 static boolean_t BnxeFcoeDestroyConnWqe(um_device_t *      pUM,
733                                        union fcoe_kwqe ** kwqes)
734 {
735     return BnxeWorkQueueAdd(pUM, BnxeFcoeDestroyConnWqeWork,
736                             kwqes[0], sizeof(*(kwqes[0])));
737 }
738 
739 
BnxeFcoeDestroyCqeWork(um_device_t * pUM,void * pData,u32_t dataLen)740 static void BnxeFcoeDestroyCqeWork(um_device_t * pUM,
741                                    void *        pData,
742                                    u32_t         dataLen)
743 {
744     if (!BnxeFcoeCqeIndicate(pUM, pData, dataLen))
745     {
746         pUM->fcoe.stats.destroyCqeRxErr++;
747     }
748     else
749     {
750         pUM->fcoe.stats.destroyCqeRx++;
751     }
752 }
753 
754 
BnxeFcoeDestroyCqe(um_device_t * pUM,struct fcoe_kcqe * kcqe)755 boolean_t BnxeFcoeDestroyCqe(um_device_t *      pUM,
756                              struct fcoe_kcqe * kcqe)
757 {
758     struct fcoe_kcqe tmp_kcqe = {0};
759 
760     tmp_kcqe.op_code = FCOE_KCQE_OPCODE_DESTROY_FUNC;
761 
762     tmp_kcqe.flags |=
763         (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
764 
765     tmp_kcqe.completion_status =
766         mm_le32_to_cpu((mm_le32_to_cpu(kcqe->completion_status) == 0) ?
767                            FCOE_KCQE_COMPLETION_STATUS_SUCCESS :
768                            FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR);
769 
770     return BnxeWorkQueueAdd(pUM, BnxeFcoeDestroyCqeWork,
771                             &tmp_kcqe, sizeof(tmp_kcqe));
772 }
773 
774 
BnxeFcoeDestroyWqeWork(um_device_t * pUM,void * pData,u32_t dataLen)775 static void BnxeFcoeDestroyWqeWork(um_device_t * pUM,
776                                    void *        pData,
777                                    u32_t         dataLen)
778 {
779     union fcoe_kwqe * kwqe = (union fcoe_kwqe *)pData;
780     struct fcoe_kcqe  kcqe = {0};
781     BnxeFcoeState *   pFcoeState;
782 
783     if (dataLen != sizeof(*kwqe))
784     {
785         BnxeLogWarn(pUM, "Invalid FCoE Destroy WQE");
786         pUM->fcoe.stats.destroyWqeTxErr++;
787         return;
788     }
789 
790     if (lm_fc_post_destroy_ramrod(&pUM->lm_dev) == LM_STATUS_SUCCESS)
791     {
792         pUM->fcoe.stats.destroyWqeTx++;
793         return;
794     }
795 
796     pUM->fcoe.stats.destroyWqeTxErr++;
797 
798     BnxeLogWarn(pUM, "Failed to post FCoE Destroy WQE");
799 
800     kcqe.op_code = FCOE_KCQE_OPCODE_DESTROY_FUNC;
801     kcqe.flags |= (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
802     kcqe.completion_status = mm_cpu_to_le32(FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR);
803 
804     /* call here directly (for error case) */
805 
806     /* XXX
807      * Need to add a mutex or reference count to ensure that bnxef isn't
808      * unloaded underneath this taskq dispatch routine.
809      */
810 
811     {
812         struct fcoe_kcqe * pKcqe = &kcqe;
813         ASSERT(CLIENT_BOUND(pUM, LM_CLI_IDX_FCOE));
814         pUM->fcoe.bind.cliIndicateCqes(pUM->fcoe.pDev,
815                                        (void **)&pKcqe, 1);
816     }
817 
818     /* XXX release mutex or decrement reference count */
819 }
820 
821 
BnxeFcoeDestroyWqe(um_device_t * pUM,union fcoe_kwqe ** kwqes)822 static boolean_t BnxeFcoeDestroyWqe(um_device_t *      pUM,
823                                     union fcoe_kwqe ** kwqes)
824 {
825     return BnxeWorkQueueAdd(pUM, BnxeFcoeDestroyWqeWork,
826                             kwqes[0], sizeof(*(kwqes[0])));
827 }
828 
829 
BnxeFcoeStatCqeWork(um_device_t * pUM,void * pData,u32_t dataLen)830 static void BnxeFcoeStatCqeWork(um_device_t * pUM,
831                                 void *        pData,
832                                 u32_t         dataLen)
833 {
834     if (!BnxeFcoeCqeIndicate(pUM, pData, dataLen))
835     {
836         pUM->fcoe.stats.statCqeRxErr++;
837     }
838     else
839     {
840         pUM->fcoe.stats.statCqeRx++;
841     }
842 }
843 
844 
BnxeFcoeStatCqe(um_device_t * pUM,struct fcoe_kcqe * kcqe)845 boolean_t BnxeFcoeStatCqe(um_device_t *      pUM,
846                           struct fcoe_kcqe * kcqe)
847 {
848     struct fcoe_kcqe tmp_kcqe = {0};
849 
850     tmp_kcqe.op_code = FCOE_KCQE_OPCODE_STAT_FUNC;
851 
852     tmp_kcqe.flags |=
853         (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
854 
855     tmp_kcqe.completion_status =
856         mm_cpu_to_le32((mm_le32_to_cpu(kcqe->completion_status) == 0) ?
857                            FCOE_KCQE_COMPLETION_STATUS_SUCCESS :
858                            FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR);
859 
860     return BnxeWorkQueueAdd(pUM, BnxeFcoeStatCqeWork,
861                             &tmp_kcqe, sizeof(tmp_kcqe));
862 }
863 
864 
BnxeFcoeStatWqeWork(um_device_t * pUM,void * pData,u32_t dataLen)865 static void BnxeFcoeStatWqeWork(um_device_t * pUM,
866                                 void *        pData,
867                                 u32_t         dataLen)
868 {
869     union fcoe_kwqe * kwqe = (union fcoe_kwqe *)pData;
870     struct fcoe_kcqe  kcqe = {0};
871 
872     if (dataLen != sizeof(*kwqe))
873     {
874         BnxeLogWarn(pUM, "Invalid FCoE Stat WQE");
875         pUM->fcoe.stats.statWqeTxErr++;
876         return;
877     }
878 
879     if (lm_fc_post_stat_ramrod(&pUM->lm_dev,
880                                &kwqe->statistics) == LM_STATUS_SUCCESS)
881     {
882         pUM->fcoe.stats.statWqeTx++;
883         return;
884     }
885 
886     pUM->fcoe.stats.statWqeTxErr++;
887 
888     BnxeLogWarn(pUM, "Failed to post FCoE Stat WQE");
889 
890     kcqe.op_code = FCOE_KCQE_OPCODE_STAT_FUNC;
891     kcqe.flags |= (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
892     kcqe.completion_status = mm_cpu_to_le32(FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR);
893 
894     /* call here directly (for error case) */
895 
896     /* XXX
897      * Need to add a mutex or reference count to ensure that bnxef isn't
898      * unloaded underneath this taskq dispatch routine.
899      */
900 
901     {
902         struct fcoe_kcqe * pKcqe = &kcqe;
903         ASSERT(CLIENT_BOUND(pUM, LM_CLI_IDX_FCOE));
904         pUM->fcoe.bind.cliIndicateCqes(pUM->fcoe.pDev,
905                                        (void **)&pKcqe, 1);
906     }
907 
908     /* XXX release mutex or decrement reference count */
909 }
910 
911 
BnxeFcoeStatWqe(um_device_t * pUM,union fcoe_kwqe ** kwqes)912 static boolean_t BnxeFcoeStatWqe(um_device_t *      pUM,
913                                  union fcoe_kwqe ** kwqes)
914 {
915     return BnxeWorkQueueAdd(pUM, BnxeFcoeStatWqeWork,
916                             kwqes[0], sizeof(*(kwqes[0])));
917 }
918 
919 
920 #define KCQE_LIMIT 64
921 
BnxeFcoeCompRequestCqeWork(um_device_t * pUM,void * pData,u32_t dataLen)922 static void BnxeFcoeCompRequestCqeWork(um_device_t * pUM,
923                                        void *        pData,
924                                        u32_t         dataLen)
925 {
926     struct fcoe_kcqe * kcqe_arr = (struct fcoe_kcqe *)pData;
927     struct fcoe_kcqe * kcqes[KCQE_LIMIT];
928     u32_t              num_kcqes;
929     int i;
930 
931     if ((dataLen % (sizeof(*kcqe_arr))) != 0)
932     {
933         BnxeLogWarn(pUM, "Invalid FCoE Comp Request CQE array");
934         pUM->fcoe.stats.compRequestCqeRxErr++;
935         return;
936     }
937 
938     num_kcqes = (dataLen / (sizeof(*kcqe_arr)));
939 
940     /* init the kcqe pointer array */
941 
942     for (i = 0; i < num_kcqes; i++)
943     {
944         kcqes[i] = &kcqe_arr[i];
945     }
946 
947     ASSERT(CLIENT_BOUND(pUM, LM_CLI_IDX_FCOE));
948 
949     if (!pUM->fcoe.bind.cliIndicateCqes(pUM->fcoe.pDev,
950                                         (void **)kcqes,
951                                         num_kcqes))
952     {
953         pUM->fcoe.stats.compRequestCqeRxErr++;
954     }
955     else
956     {
957         pUM->fcoe.stats.compRequestCqeRx += num_kcqes;
958     }
959 }
960 
961 
BnxeFcoeCompRequestCqe(um_device_t * pUM,struct fcoe_kcqe * kcqes,u32_t num_kcqes)962 boolean_t BnxeFcoeCompRequestCqe(um_device_t *      pUM,
963                                  struct fcoe_kcqe * kcqes,
964                                  u32_t              num_kcqes)
965 {
966     u32_t kcqesIdx = 0;
967     u32_t kcqesLimit = 0;
968     u32_t numUp;
969 
970     /* Send up KCQE_LIMIT kcqes at a time... */
971 
972     while (kcqesIdx < num_kcqes)
973     {
974         if (num_kcqes - kcqesIdx > KCQE_LIMIT)
975         {
976             kcqesLimit += KCQE_LIMIT;
977         }
978         else
979         {
980             kcqesLimit = num_kcqes;
981         }
982 
983         numUp = (kcqesLimit % KCQE_LIMIT == 0) ? KCQE_LIMIT :
984                                                  (kcqesLimit % KCQE_LIMIT);
985 
986 #if 0
987         if (!BnxeWorkQueueAdd(pUM, BnxeFcoeCompRequestCqeWork,
988                               kcqes + kcqesIdx,
989                               (sizeof(struct fcoe_kcqe) * numUp)))
990         {
991             return B_FALSE;
992         }
993 #else
994         BnxeFcoeCompRequestCqeWork(pUM,
995                                    kcqes + kcqesIdx,
996                                    (sizeof(struct fcoe_kcqe) * numUp));
997 #endif
998 
999         kcqesIdx += (kcqesLimit - kcqesIdx);
1000     }
1001 
1002     return B_TRUE;
1003 }
1004 
1005 
BnxeFcoePrvCtl(dev_info_t * pDev,int cmd,void * pData,int dataLen)1006 boolean_t BnxeFcoePrvCtl(dev_info_t * pDev,
1007                          int          cmd,
1008                          void *       pData,
1009                          int          dataLen)
1010 {
1011     um_device_t *  pUM = (um_device_t *)ddi_get_driver_private(pDev);
1012     BnxeFcoeInfo * pFcoeInfo;
1013     int rc, i;
1014 
1015     /* sanity check */
1016     if (pUM == NULL || pUM->pDev != pDev)
1017     {
1018         BnxeLogWarn(NULL, "%s: dev_info_t match failed", __func__);
1019         return B_FALSE;
1020     }
1021 
1022     BnxeLogDbg(pUM, "*** %s ***", __func__);
1023 
1024     VERIFY_FCOE_BINDING(pUM);
1025 
1026     switch (cmd)
1027     {
1028     case PRV_CTL_GET_MAC_ADDR:
1029 
1030         if (dataLen < ETHERNET_ADDRESS_SIZE)
1031         {
1032             BnxeLogWarn(pUM, "Invalid MAC Address buffer length for get (%d)",
1033                         dataLen);
1034             return B_FALSE;
1035         }
1036 
1037         if (!pData)
1038         {
1039             BnxeLogWarn(pUM, "NULL MAC Address buffer for get");
1040             return B_FALSE;
1041         }
1042 
1043         COPY_ETH_ADDRESS(pUM->lm_dev.hw_info.fcoe_mac_addr, pData);
1044 
1045         return B_TRUE;
1046 
1047     case PRV_CTL_SET_MAC_ADDR:
1048 
1049         if (dataLen < ETHERNET_ADDRESS_SIZE)
1050         {
1051             BnxeLogWarn(pUM, "Invalid MAC Address length for set (%d)",
1052                         dataLen);
1053             return B_FALSE;
1054         }
1055 
1056         if (!pData)
1057         {
1058             BnxeLogWarn(pUM, "NULL MAC Address buffer for set");
1059             return B_FALSE;
1060         }
1061 
1062         /* Validate MAC address */
1063         if (IS_ETH_MULTICAST(pData))
1064         {
1065             BnxeLogWarn(pUM, "Cannot program a mcast/bcast address as an MAC Address.");
1066             return B_FALSE;
1067         }
1068 
1069         BNXE_LOCK_ENTER_HWINIT(pUM);
1070 
1071         /* XXX wrong? (overwriting fcoe hw programmed address!) */
1072         COPY_ETH_ADDRESS(pData, pUM->lm_dev.hw_info.fcoe_mac_addr);
1073 
1074         rc = BnxeMacAddress(pUM, LM_CLI_IDX_FCOE, B_TRUE,
1075                             pUM->lm_dev.hw_info.fcoe_mac_addr);
1076 
1077         BNXE_LOCK_EXIT_HWINIT(pUM);
1078 
1079         return (rc < 0) ? B_FALSE : B_TRUE;
1080 
1081     case PRV_CTL_QUERY_PARAMS:
1082 
1083         if (dataLen != sizeof(BnxeFcoeInfo))
1084         {
1085             BnxeLogWarn(pUM, "Invalid query buffer for FCoE (%d)",
1086                         dataLen);
1087             return B_FALSE;
1088         }
1089 
1090         if (!pData)
1091         {
1092             BnxeLogWarn(pUM, "Invalid query buffer for FCoE");
1093             return B_FALSE;
1094         }
1095 
1096         pFcoeInfo = (BnxeFcoeInfo *)pData;
1097 
1098         pFcoeInfo->flags = 0;
1099 
1100         /*
1101          * Always set the FORCE_LOAD flags which tells bnxef to perform any
1102          * necessary delays needed when bringing up targets. This allows ample
1103          * time for bnxef to come up and finish for FCoE boot. If we don't
1104          * force a delay then there is a race condition and the kernel won't
1105          * find the root disk.
1106          */
1107         pFcoeInfo->flags |= FCOE_INFO_FLAG_FORCE_LOAD;
1108 
1109         switch (pUM->lm_dev.params.mf_mode)
1110         {
1111         case SINGLE_FUNCTION:
1112             pFcoeInfo->flags |= FCOE_INFO_FLAG_MF_MODE_SF;
1113             break;
1114         case MULTI_FUNCTION_SD:
1115             pFcoeInfo->flags |= FCOE_INFO_FLAG_MF_MODE_SD;
1116             break;
1117         case MULTI_FUNCTION_SI:
1118             pFcoeInfo->flags |= FCOE_INFO_FLAG_MF_MODE_SI;
1119             break;
1120         case MULTI_FUNCTION_AFEX:
1121             pFcoeInfo->flags |= FCOE_INFO_FLAG_MF_MODE_AFEX;
1122             break;
1123         default:
1124             break;
1125         }
1126 
1127         pFcoeInfo->max_fcoe_conn      = pUM->lm_dev.params.max_func_fcoe_cons;
1128         pFcoeInfo->max_fcoe_exchanges = pUM->lm_dev.params.max_fcoe_task;
1129 
1130         memcpy(&pFcoeInfo->wwn, &pUM->fcoe.wwn, sizeof(BnxeWwnInfo));
1131 
1132         return B_TRUE;
1133 
1134     case PRV_CTL_DISABLE_INTR:
1135 
1136         BnxeIntrIguSbDisable(pUM, FCOE_CID(&pUM->lm_dev), B_FALSE);
1137         return B_TRUE;
1138 
1139     case PRV_CTL_ENABLE_INTR:
1140 
1141         BnxeIntrIguSbEnable(pUM, FCOE_CID(&pUM->lm_dev), B_FALSE);
1142         return B_TRUE;
1143 
1144     case PRV_CTL_MBA_BOOT:
1145 
1146         if (dataLen != sizeof(boolean_t))
1147         {
1148             BnxeLogWarn(pUM, "Invalid MBA boot check buffer for FCoE (%d)",
1149                         dataLen);
1150             return B_FALSE;
1151         }
1152 
1153         if (!pData)
1154         {
1155             BnxeLogWarn(pUM, "Invalid MBA boot check buffer for FCoE");
1156             return B_FALSE;
1157         }
1158 
1159         *((boolean_t *)pData) =
1160             (pUM->iscsiInfo.signature != 0) ? B_TRUE : B_FALSE;
1161 
1162         return B_TRUE;
1163 
1164     case PRV_CTL_LINK_STATE:
1165 
1166         if (dataLen != sizeof(boolean_t))
1167         {
1168             BnxeLogWarn(pUM, "Invalid link state buffer for FCoE (%d)",
1169                         dataLen);
1170             return B_FALSE;
1171         }
1172 
1173         if (!pData)
1174         {
1175             BnxeLogWarn(pUM, "Invalid link state buffer for FCoE");
1176             return B_FALSE;
1177         }
1178 
1179         *((boolean_t *)pData) =
1180             (pUM->devParams.lastIndLink == LM_STATUS_LINK_ACTIVE) ?
1181                 B_TRUE : B_FALSE;
1182 
1183         return B_TRUE;
1184 
1185     case PRV_CTL_BOARD_TYPE:
1186 
1187         if (!pData || (dataLen <= 0))
1188         {
1189             BnxeLogWarn(pUM, "Invalid board type buffer for FCoE");
1190             return B_FALSE;
1191         }
1192 
1193         snprintf((char *)pData, dataLen, "%s", pUM->chipName);
1194 
1195         return B_TRUE;
1196 
1197     case PRV_CTL_BOARD_SERNUM:
1198 
1199         if (!pData || (dataLen <= 0))
1200         {
1201             BnxeLogWarn(pUM, "Invalid board serial number buffer for FCoE");
1202             return B_FALSE;
1203         }
1204 
1205         snprintf((char *)pData, dataLen,
1206                  "%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c",
1207                  pUM->lm_dev.hw_info.board_num[0],
1208                  pUM->lm_dev.hw_info.board_num[1],
1209                  pUM->lm_dev.hw_info.board_num[2],
1210                  pUM->lm_dev.hw_info.board_num[3],
1211                  pUM->lm_dev.hw_info.board_num[4],
1212                  pUM->lm_dev.hw_info.board_num[5],
1213                  pUM->lm_dev.hw_info.board_num[6],
1214                  pUM->lm_dev.hw_info.board_num[7],
1215                  pUM->lm_dev.hw_info.board_num[8],
1216                  pUM->lm_dev.hw_info.board_num[9],
1217                  pUM->lm_dev.hw_info.board_num[10],
1218                  pUM->lm_dev.hw_info.board_num[11],
1219                  pUM->lm_dev.hw_info.board_num[12],
1220                  pUM->lm_dev.hw_info.board_num[13],
1221                  pUM->lm_dev.hw_info.board_num[14],
1222                  pUM->lm_dev.hw_info.board_num[15]);
1223 
1224         return B_TRUE;
1225 
1226     case PRV_CTL_BOOTCODE_VERSION:
1227 
1228         if (!pData || (dataLen <= 0))
1229         {
1230             BnxeLogWarn(pUM, "Invalid boot code version buffer for FCoE");
1231             return B_FALSE;
1232         }
1233 
1234         snprintf((char *)pData, dataLen, "%s", pUM->versionBC);
1235 
1236         return B_TRUE;
1237 
1238     case PRV_CTL_REPORT_FCOE_STATS:
1239 
1240         if (!pData ||
1241             (dataLen !=
1242              sizeof(pUM->lm_dev.vars.stats.stats_mirror.
1243                                      stats_drv.drv_info_to_mfw.fcoe_stats)))
1244         {
1245             BnxeLogWarn(pUM, "Invalid stats reporting buffer for FCoE");
1246             return B_FALSE;
1247         }
1248 
1249         memcpy(&pUM->lm_dev.vars.stats.stats_mirror.
1250                                 stats_drv.drv_info_to_mfw.fcoe_stats,
1251                (fcoe_stats_info_t *)pData,
1252                sizeof(fcoe_stats_info_t));
1253 
1254         return B_TRUE;
1255 
1256     case PRV_CTL_SET_CAPS:
1257 
1258         if (!pData || (dataLen != sizeof(struct fcoe_capabilities)))
1259         {
1260             BnxeLogWarn(pUM, "Invalid capabilities buffer for FCoE");
1261             return B_FALSE;
1262         }
1263 
1264         memcpy(&pUM->lm_dev.vars.stats.stats_mirror.stats_drv.
1265                                  drv_info_to_shmem.fcoe_capabilities,
1266                pData,
1267                sizeof(pUM->lm_dev.vars.stats.stats_mirror.stats_drv.
1268                                        drv_info_to_shmem.fcoe_capabilities));
1269 
1270         lm_ncsi_fcoe_cap_to_scratchpad(&pUM->lm_dev);
1271 
1272         return B_TRUE;
1273 
1274     default:
1275 
1276         BnxeLogWarn(pUM, "Unknown provider command %d", cmd);
1277         return B_FALSE;
1278     }
1279 }
1280 
1281 
BnxeFcoePrvTx(dev_info_t * pDev,mblk_t * pMblk,u32_t flags,u16_t vlan_tag)1282 mblk_t * BnxeFcoePrvTx(dev_info_t * pDev,
1283                        mblk_t *     pMblk,
1284                        u32_t        flags,
1285                        u16_t        vlan_tag)
1286 {
1287     um_device_t * pUM = (um_device_t *)ddi_get_driver_private(pDev);
1288     lm_device_t * pLM = &pUM->lm_dev;
1289     mblk_t *      pNextMblk = NULL;
1290     int           txCount = 0;
1291     int rc;
1292 
1293     /* sanity check */
1294     if (pUM == NULL || pUM->pDev != pDev)
1295     {
1296         BnxeLogWarn(NULL, "%s: dev_info_t match failed", __func__);
1297         return pMblk;
1298     }
1299 
1300     VERIFY_FCOE_BINDING(pUM);
1301 
1302     BnxeLogDbg(pUM, "*** %s ***", __func__);
1303 
1304     while (pMblk)
1305     {
1306         txCount++;
1307 
1308         pNextMblk = pMblk->b_next;
1309         pMblk->b_next = NULL;
1310 
1311         rc = BnxeTxSendMblk(pUM, FCOE_CID(pLM), pMblk, flags, vlan_tag);
1312 
1313         if (rc == BNXE_TX_GOODXMIT)
1314         {
1315             pMblk = pNextMblk;
1316             continue;
1317         }
1318         else if (rc == BNXE_TX_DEFERPKT)
1319         {
1320             pMblk = pNextMblk;
1321         }
1322         else
1323         {
1324             pMblk->b_next = pNextMblk;
1325         }
1326 
1327         break;
1328     }
1329 
1330     return pMblk;
1331 }
1332 
1333 
BnxeFcoePrvPoll(dev_info_t * pDev)1334 boolean_t BnxeFcoePrvPoll(dev_info_t * pDev)
1335 {
1336     um_device_t * pUM  = (um_device_t *)ddi_get_driver_private(pDev);
1337     RxQueue *     pRxQ = &pUM->rxq[FCOE_CID(&pUM->lm_dev)];
1338     u32_t         idx  = pRxQ->idx;
1339 
1340     /* sanity check */
1341     if (pUM == NULL || pUM->pDev != pDev)
1342     {
1343         BnxeLogWarn(NULL, "%s: dev_info_t match failed", __func__);
1344         return B_FALSE;
1345     }
1346 
1347     VERIFY_FCOE_BINDING(pUM);
1348 
1349     BnxeLogDbg(pUM, "*** %s ***", __func__);
1350 
1351     if (pRxQ->inPollMode == B_FALSE)
1352     {
1353         BnxeLogWarn(pUM, "Polling on FCoE ring %d when NOT in poll mode!", idx);
1354         return NULL;
1355     }
1356 
1357     pRxQ->pollCnt++;
1358 
1359     BnxePollRxRingFCOE(pUM);
1360 
1361     return B_TRUE;
1362 }
1363 
1364 
BnxeFcoePrvSendWqes(dev_info_t * pDev,void * wqes[],int wqeCnt)1365 boolean_t BnxeFcoePrvSendWqes(dev_info_t * pDev,
1366                               void *       wqes[],
1367                               int          wqeCnt)
1368 {
1369     union fcoe_kwqe ** kwqes = (union fcoe_kwqe **)wqes;
1370     int                kwqeCnt = 0;
1371 
1372     um_device_t * pUM = (um_device_t *)ddi_get_driver_private(pDev);
1373 
1374     /* sanity check */
1375     if (pUM == NULL || pUM->pDev != pDev)
1376     {
1377         BnxeLogWarn(NULL, "%s: dev_info_t match failed", __func__);
1378         return B_FALSE;
1379     }
1380 
1381     VERIFY_FCOE_BINDING(pUM);
1382 
1383     if ((kwqes == NULL) || (kwqes[0] == NULL))
1384     {
1385         BnxeLogWarn(pUM, "Invalid WQE array");
1386         return B_FALSE;
1387     }
1388 
1389     BnxeLogDbg(pUM, "*** %s ***", __func__);
1390 
1391     while (kwqeCnt < wqeCnt)
1392     {
1393         switch (kwqes[kwqeCnt]->init1.hdr.op_code)
1394         {
1395         case FCOE_KWQE_OPCODE_INIT1:
1396 
1397             BnxeLogDbg(pUM, "*** %s - FCOE_KWQE_OPCODE_INIT", __func__);
1398 
1399             if ((wqeCnt <= kwqeCnt + 2) ||
1400                 (kwqes[kwqeCnt + 1] == NULL) ||
1401                 (kwqes[kwqeCnt + 2] == NULL) ||
1402                 (kwqes[kwqeCnt + 1]->init2.hdr.op_code != FCOE_KWQE_OPCODE_INIT2) ||
1403                 (kwqes[kwqeCnt + 2]->init3.hdr.op_code != FCOE_KWQE_OPCODE_INIT3))
1404             {
1405                 BnxeLogWarn(pUM, "FCoE Init kwqes error");
1406                 pUM->fcoe.stats.initWqeTxErr++;
1407                 return B_FALSE;
1408             }
1409 
1410             if (!BnxeFcoeInitWqe(pUM, &kwqes[kwqeCnt]))
1411             {
1412                 BnxeLogWarn(pUM, "Failed to init FCoE Init WQE work");
1413                 return B_FALSE;
1414             }
1415 
1416             kwqeCnt += 3;
1417 
1418             break;
1419 
1420         case FCOE_KWQE_OPCODE_OFFLOAD_CONN1:
1421 
1422             BnxeLogDbg(pUM, "*** %s - FCOE_KWQE_OPCODE_OFFLOAD_CONN1", __func__);
1423 
1424             if ((wqeCnt <= kwqeCnt + 3) ||
1425                 (kwqes[kwqeCnt + 1] == NULL) ||
1426                 (kwqes[kwqeCnt + 2] == NULL) ||
1427                 (kwqes[kwqeCnt + 3] == NULL) ||
1428                 (kwqes[kwqeCnt + 1]->conn_offload2.hdr.op_code != FCOE_KWQE_OPCODE_OFFLOAD_CONN2) ||
1429                 (kwqes[kwqeCnt + 2]->conn_offload3.hdr.op_code != FCOE_KWQE_OPCODE_OFFLOAD_CONN3) ||
1430                 (kwqes[kwqeCnt + 3]->conn_offload4.hdr.op_code != FCOE_KWQE_OPCODE_OFFLOAD_CONN4))
1431             {
1432                 BnxeLogWarn(pUM, "FCoE Offload Conn kwqes error");
1433                 pUM->fcoe.stats.offloadConnWqeTxErr++;
1434                 return B_FALSE;
1435             }
1436 
1437             if (!BnxeFcoeOffloadConnWqe(pUM, &kwqes[kwqeCnt]))
1438             {
1439                 BnxeLogWarn(pUM, "Failed to init FCoE Offload Conn WQE work");
1440                 return B_FALSE;
1441             }
1442 
1443             kwqeCnt += 4;
1444 
1445             break;
1446 
1447         case FCOE_KWQE_OPCODE_ENABLE_CONN:
1448 
1449             BnxeLogDbg(pUM, "*** %s - FCOE_KWQE_OPCODE_ENABLE_CONN", __func__);
1450 
1451             if (!BnxeFcoeEnableConnWqe(pUM, &kwqes[kwqeCnt]))
1452             {
1453                 BnxeLogWarn(pUM, "Failed to init FCoE Enable Conn WQE work");
1454                 return B_FALSE;
1455             }
1456 
1457             kwqeCnt += 1;
1458 
1459             break;
1460 
1461         case FCOE_KWQE_OPCODE_DISABLE_CONN:
1462 
1463             BnxeLogDbg(pUM, "*** %s - FCOE_KWQE_OPCODE_DISABLE_CONN", __func__);
1464 
1465             if (!BnxeFcoeDisableConnWqe(pUM, &kwqes[kwqeCnt]))
1466             {
1467                 BnxeLogWarn(pUM, "Failed to init FCoE Disable Conn WQE work");
1468                 return B_FALSE;
1469             }
1470 
1471             kwqeCnt += 1;
1472 
1473             break;
1474 
1475         case FCOE_KWQE_OPCODE_DESTROY_CONN:
1476 
1477             BnxeLogDbg(pUM, "*** %s - FCOE_KWQE_OPCODE_DESTROY_CONN", __func__);
1478 
1479             if (!BnxeFcoeDestroyConnWqe(pUM, &kwqes[kwqeCnt]))
1480             {
1481                 BnxeLogWarn(pUM, "Failed to init FCoE Destroy Conn WQE work");
1482                 return B_FALSE;
1483             }
1484 
1485             kwqeCnt += 1;
1486 
1487             break;
1488 
1489         case FCOE_KWQE_OPCODE_DESTROY:
1490 
1491             BnxeLogDbg(pUM, "*** %s - FCOE_KWQE_OPCODE_DESTROY", __func__);
1492 
1493             if (!BnxeFcoeDestroyWqe(pUM, &kwqes[kwqeCnt]))
1494             {
1495                 BnxeLogWarn(pUM, "Failed to init FCoE Destroy WQE work");
1496                 return B_FALSE;
1497             }
1498 
1499             kwqeCnt += 1;
1500 
1501             break;
1502 
1503         case FCOE_KWQE_OPCODE_STAT:
1504 
1505             BnxeLogDbg(pUM, "*** %s - FCOE_KWQE_OPCODE_STAT", __func__);
1506 
1507             if (!BnxeFcoeStatWqe(pUM, &kwqes[kwqeCnt]))
1508             {
1509                 BnxeLogWarn(pUM, "Failed to init FCoE Stat WQE work");
1510                 return B_FALSE;
1511             }
1512 
1513             kwqeCnt += 1;
1514 
1515             break;
1516 
1517         default:
1518 
1519             BnxeDbgBreakMsg(pUM, "Invalid KWQE opcode");
1520             return B_FALSE;
1521         }
1522     }
1523 
1524     return B_TRUE;
1525 }
1526 
1527 
BnxeFcoePrvMapMailboxq(dev_info_t * pDev,u32_t cid,void ** ppMap,ddi_acc_handle_t * pAccHandle)1528 boolean_t BnxeFcoePrvMapMailboxq(dev_info_t *       pDev,
1529                                  u32_t              cid,
1530                                  void **            ppMap,
1531                                  ddi_acc_handle_t * pAccHandle)
1532 {
1533     um_device_t * pUM = (um_device_t *)ddi_get_driver_private(pDev);
1534 
1535     /* sanity check */
1536     if (pUM == NULL || pUM->pDev != pDev)
1537     {
1538         BnxeLogWarn(NULL, "%s: dev_info_t match failed", __func__);
1539         return B_FALSE;
1540     }
1541 
1542     VERIFY_FCOE_BINDING(pUM);
1543 
1544     BnxeLogDbg(pUM, "*** %s ***", __func__);
1545 
1546     /* get the right offset from the mapped bar */
1547 
1548     *ppMap = (void *)((u8_t *)pUM->lm_dev.context_info->array[SW_CID(cid)].cid_resc.mapped_cid_bar_addr + DPM_TRIGER_TYPE);
1549     *pAccHandle = pUM->lm_dev.context_info->array[SW_CID(cid)].cid_resc.reg_handle;
1550 
1551     if (!(*ppMap) || !(*pAccHandle))
1552     {
1553         BnxeLogWarn(pUM, "Cannot map mailboxq base address for FCoE");
1554         return B_FALSE;
1555     }
1556 
1557     return B_TRUE;
1558 }
1559 
1560 
BnxeFcoePrvUnmapMailboxq(dev_info_t * pDev,u32_t cid,void * pMap,ddi_acc_handle_t accHandle)1561 boolean_t BnxeFcoePrvUnmapMailboxq(dev_info_t *     pDev,
1562                                    u32_t            cid,
1563                                    void *           pMap,
1564                                    ddi_acc_handle_t accHandle)
1565 {
1566     um_device_t *    pUM = (um_device_t *)ddi_get_driver_private(pDev);
1567     void *           pTmp;
1568     ddi_acc_handle_t tmpAcc;
1569 
1570     /* sanity check */
1571     if (pUM == NULL || pUM->pDev != pDev)
1572     {
1573         BnxeLogWarn(NULL, "%s: dev_info_t match failed", __func__);
1574         return B_FALSE;
1575     }
1576 
1577     VERIFY_FCOE_BINDING(pUM);
1578 
1579     BnxeLogDbg(pUM, "*** %s ***", __func__);
1580 
1581     /* verify the mapped bar address */
1582     pTmp = (void *)((u8_t *)pUM->lm_dev.context_info->array[SW_CID(cid)].cid_resc.mapped_cid_bar_addr + DPM_TRIGER_TYPE);
1583     tmpAcc = pUM->lm_dev.context_info->array[SW_CID(cid)].cid_resc.reg_handle;
1584 
1585     if ((pMap != pTmp) || (accHandle != tmpAcc))
1586     {
1587         BnxeLogWarn(pUM, "Invalid map info for FCoE (%p)", pMap);
1588         return B_FALSE;
1589     }
1590 
1591     return B_TRUE;
1592 }
1593 
1594 
BnxeFcoeInit(um_device_t * pUM)1595 int BnxeFcoeInit(um_device_t * pUM)
1596 {
1597     char * pCompat[2] = { BNXEF_NAME, NULL };
1598     char   name[256];
1599     int    rc;
1600 
1601     BnxeLogInfo(pUM, "Starting FCoE");
1602 
1603     if (!BNXE_FCOE(pUM))
1604     {
1605         BnxeLogWarn(pUM, "FCoE not supported on this device");
1606         return ENOTSUP;
1607     }
1608 
1609     //if (CLIENT_DEVI(pUM, LM_CLI_IDX_FCOE))
1610     if (pUM->fcoe.pDev)
1611     {
1612         BnxeLogWarn(pUM, "FCoE child node already initialized");
1613         return EEXIST;
1614     }
1615 
1616     if (ndi_devi_alloc(pUM->pDev,
1617                        BNXEF_NAME,
1618                        DEVI_PSEUDO_NODEID,
1619                        &pUM->fcoe.pDev) != NDI_SUCCESS)
1620     {
1621         BnxeLogWarn(pUM, "Failed to allocate a child node for FCoE");
1622         pUM->fcoe.pDev = NULL;
1623         return ENOMEM;
1624     }
1625 
1626     if (ndi_prop_update_string_array(DDI_DEV_T_NONE,
1627                                      pUM->fcoe.pDev,
1628                                      "name",
1629                                      pCompat,
1630                                      1) != DDI_PROP_SUCCESS)
1631     {
1632         BnxeLogWarn(pUM, "Failed to set the name string for FCoE");
1633         /* XXX see other call to ndi_devi_free below */
1634         //ndi_devi_free(pUM->fcoe.pDev);
1635         pUM->fcoe.pDev = NULL;
1636         return ENOENT;
1637     }
1638 
1639     CLIENT_DEVI_SET(pUM, LM_CLI_IDX_FCOE);
1640 
1641     /*
1642      * XXX If/when supporting custom wwn's then prime them
1643      * here in so they will be passed to bnxef during BINDING.
1644      * Ideally custom wwn's will be set via the driver .conf
1645      * file and via a private driver property.
1646      */
1647     memset(&pUM->fcoe.wwn, 0, sizeof(BnxeWwnInfo));
1648     pUM->fcoe.wwn.fcp_pwwn_provided = B_TRUE;
1649     memcpy(pUM->fcoe.wwn.fcp_pwwn, pUM->lm_dev.hw_info.fcoe_wwn_port_name,
1650            BNXE_FCOE_WWN_SIZE);
1651     pUM->fcoe.wwn.fcp_nwwn_provided = B_TRUE;
1652     memcpy(pUM->fcoe.wwn.fcp_nwwn, pUM->lm_dev.hw_info.fcoe_wwn_node_name,
1653            BNXE_FCOE_WWN_SIZE);
1654 
1655     BnxeLogInfo(pUM, "Created the FCoE child node %s@%s",
1656                 BNXEF_NAME, ddi_get_name_addr(pUM->pDev));
1657 
1658     if ((rc = ndi_devi_online(pUM->fcoe.pDev, NDI_ONLINE_ATTACH)) !=
1659         NDI_SUCCESS)
1660     {
1661         /* XXX
1662          * ndi_devi_free will cause a panic. Don't know why and we've
1663          * verified that Sun's FCoE driver does not free it either.
1664          */
1665         //ndi_devi_free(pUM->fcoe.pDev);
1666         CLIENT_DEVI_RESET(pUM, LM_CLI_IDX_FCOE);
1667         pUM->fcoe.pDev = NULL;
1668         BnxeLogInfo(pUM, "Unable to bind the QLogic FCoE driver (%d)", rc);
1669         return ECHILD;
1670     }
1671 
1672 #if 0
1673     /* bring bnxef online and attach it */
1674     if (ndi_devi_bind_driver(pUM->fcoe.pDev, 0) != NDI_SUCCESS)
1675     {
1676         BnxeLogInfo(pUM, "Unable to bind the QLogic FCoE driver");
1677     }
1678 #endif
1679 
1680     return 0;
1681 }
1682 
1683 
BnxeFcoeFini(um_device_t * pUM)1684 int BnxeFcoeFini(um_device_t * pUM)
1685 {
1686     int rc = 0;
1687     int nullDev = B_FALSE; /* false = wait for bnxef UNBIND */
1688 
1689     BnxeLogInfo(pUM, "Stopping FCoE");
1690 
1691     if (!BNXE_FCOE(pUM))
1692     {
1693         BnxeLogWarn(pUM, "FCoE not supported on this device");
1694         return ENOTSUP;
1695     }
1696 
1697     if (CLIENT_BOUND(pUM, LM_CLI_IDX_FCOE))
1698     {
1699         if (pUM->fcoe.pDev == NULL)
1700         {
1701             BnxeLogWarn(pUM, "FCoE Client bound and pDev is NULL, FINI failed! %s@%s",
1702                         BNXEF_NAME, ddi_get_name_addr(pUM->pDev));
1703             return ENOENT;
1704         }
1705         else if (pUM->fcoe.bind.cliCtl == NULL)
1706         {
1707             BnxeLogWarn(pUM, "FCoE Client bound and cliCtl is NULL, FINI failed! %s@%s",
1708                         BNXEF_NAME, ddi_get_name_addr(pUM->pDev));
1709             return ENOENT;
1710         }
1711         else if (pUM->fcoe.bind.cliCtl(pUM->fcoe.pDev,
1712                                        CLI_CTL_UNLOAD,
1713                                        NULL,
1714                                        0) == B_FALSE)
1715         {
1716             BnxeLogWarn(pUM, "FCoE Client bound and UNLOAD failed! %s@%s",
1717                         BNXEF_NAME, ddi_get_name_addr(pUM->pDev));
1718             return ENOMSG; /* no graceful unload with bnxef */
1719         }
1720     }
1721     else
1722     {
1723         rc = ENODEV;
1724         nullDev = B_TRUE;
1725     }
1726 
1727     /*
1728      * There are times when delete-port doesn't fully work and bnxef is unable
1729      * to detach and never calls UNBIND.  So here we'll just make sure that
1730      * the child dev node is not NULL which semi-gaurantees the UNBIND hasn't
1731      * been called yet.  Multiple offline calls will hopefully kick bnxef...
1732      */
1733     //if (CLIENT_DEVI(pUM, LM_CLI_IDX_FCOE))
1734     if (pUM->fcoe.pDev)
1735     {
1736         CLIENT_DEVI_RESET(pUM, LM_CLI_IDX_FCOE);
1737 
1738         BnxeLogWarn(pUM, "Bringing down QLogic FCoE driver %s@%s",
1739                     BNXEF_NAME, ddi_get_name_addr(pUM->pDev));
1740 
1741 #if 1
1742         if (ndi_devi_offline(pUM->fcoe.pDev, NDI_DEVI_REMOVE) != NDI_SUCCESS)
1743         {
1744             BnxeLogWarn(pUM, "Failed to bring the QLogic FCoE driver offline %s@%s",
1745                         BNXEF_NAME, ddi_get_name_addr(pUM->pDev));
1746             return EBUSY;
1747         }
1748 #else
1749         ndi_devi_offline(pUM->fcoe.pDev, NDI_DEVI_REMOVE);
1750         if (nullDev) pUM->fcoe.pDev = NULL;
1751 #endif
1752 
1753         memset(&pUM->fcoe.wwn, 0, sizeof(BnxeWwnInfo));
1754 
1755         BnxeLogInfo(pUM, "Destroyed the FCoE child node %s@%s",
1756                     BNXEF_NAME, ddi_get_name_addr(pUM->pDev));
1757     }
1758 
1759     return rc;
1760 }
1761 
1762 
BnxeFcoeStartStop(um_device_t * pUM)1763 void BnxeFcoeStartStop(um_device_t * pUM)
1764 {
1765     int rc;
1766 
1767     if (!BNXE_FCOE(pUM))
1768     {
1769         BnxeLogWarn(pUM, "FCoE is not supported on this device");
1770         return;
1771     }
1772 
1773     if (pUM->devParams.fcoeEnable)
1774     {
1775         BnxeFcoeInit(pUM);
1776     }
1777     else
1778     {
1779         BnxeFcoeFini(pUM);
1780     }
1781 }
1782 
1783