1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /* Copyright © 2003-2011 Emulex. All rights reserved. */
23
24 /*
25 * Source file containing Queue handling functions
26 *
27 */
28
29 #include <oce_impl.h>
30 extern struct oce_dev *oce_dev_list[];
31
32 int oce_destroy_q(struct oce_dev *oce, struct oce_mbx *mbx, size_t req_size,
33 enum qtype qtype);
34 /* MAil box Queue functions */
35 struct oce_mq *
36 oce_mq_create(struct oce_dev *dev, struct oce_eq *eq, uint32_t q_len);
37
38 /* event queue handling */
39 struct oce_eq *
40 oce_eq_create(struct oce_dev *dev, uint32_t q_len, uint32_t item_size,
41 uint32_t eq_delay);
42
43 /* completion queue handling */
44 struct oce_cq *
45 oce_cq_create(struct oce_dev *dev, struct oce_eq *eq, uint32_t q_len,
46 uint32_t item_size, boolean_t sol_event, boolean_t is_eventable,
47 boolean_t nodelay, uint32_t ncoalesce);
48
49
50 /* Tx WQ functions */
51 static struct oce_wq *oce_wq_init(struct oce_dev *dev, uint32_t q_len,
52 int wq_type);
53 static void oce_wq_fini(struct oce_dev *dev, struct oce_wq *wq);
54 static int oce_wq_create(struct oce_wq *wq, struct oce_eq *eq);
55 static void oce_wq_del(struct oce_dev *dev, struct oce_wq *wq);
56 /* Rx Queue functions */
57 static struct oce_rq *oce_rq_init(struct oce_dev *dev, uint32_t q_len,
58 uint32_t frag_size, uint32_t mtu,
59 boolean_t rss);
60 static void oce_rq_fini(struct oce_dev *dev, struct oce_rq *rq);
61 static int oce_rq_create(struct oce_rq *rq, uint32_t if_id, struct oce_eq *eq);
62 static void oce_rq_del(struct oce_dev *dev, struct oce_rq *rq);
63
64 /*
65 * function to create an event queue
66 *
67 * dev - software handle to the device
68 * eqcfg - pointer to a config structure containg the eq parameters
69 *
70 * return pointer to EQ; NULL on failure
71 */
72 struct oce_eq *
oce_eq_create(struct oce_dev * dev,uint32_t q_len,uint32_t item_size,uint32_t eq_delay)73 oce_eq_create(struct oce_dev *dev, uint32_t q_len, uint32_t item_size,
74 uint32_t eq_delay)
75 {
76 struct oce_eq *eq;
77 struct oce_mbx mbx;
78 struct mbx_create_common_eq *fwcmd;
79 int ret = 0;
80
81 /* allocate an eq */
82 eq = kmem_zalloc(sizeof (struct oce_eq), KM_NOSLEEP);
83
84 if (eq == NULL) {
85 return (NULL);
86 }
87
88 bzero(&mbx, sizeof (struct oce_mbx));
89 /* allocate mbx */
90 fwcmd = (struct mbx_create_common_eq *)&mbx.payload;
91
92 eq->ring = create_ring_buffer(dev, q_len,
93 item_size, DDI_DMA_CONSISTENT);
94
95 if (eq->ring == NULL) {
96 oce_log(dev, CE_WARN, MOD_CONFIG,
97 "EQ ring alloc failed:0x%p", (void *)eq->ring);
98 kmem_free(eq, sizeof (struct oce_eq));
99 return (NULL);
100 }
101
102 mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
103 MBX_SUBSYSTEM_COMMON,
104 OPCODE_CREATE_COMMON_EQ, MBX_TIMEOUT_SEC,
105 sizeof (struct mbx_create_common_eq));
106
107 fwcmd->params.req.num_pages = eq->ring->dbuf->num_pages;
108 oce_page_list(eq->ring->dbuf, &fwcmd->params.req.pages[0],
109 eq->ring->dbuf->num_pages);
110
111 /* dw 0 */
112 fwcmd->params.req.eq_ctx.size = (item_size == 4) ? 0 : 1;
113 fwcmd->params.req.eq_ctx.valid = 1;
114 /* dw 1 */
115 fwcmd->params.req.eq_ctx.armed = 0;
116 fwcmd->params.req.eq_ctx.pd = 0;
117 fwcmd->params.req.eq_ctx.count = OCE_LOG2(q_len/256);
118
119 /* dw 2 */
120 fwcmd->params.req.eq_ctx.function = dev->fn;
121 fwcmd->params.req.eq_ctx.nodelay = 0;
122 fwcmd->params.req.eq_ctx.phase = 0;
123 /* todo: calculate multiplier from max min and cur */
124 fwcmd->params.req.eq_ctx.delay_mult = eq_delay;
125
126 /* fill rest of mbx */
127 mbx.u0.s.embedded = 1;
128 mbx.payload_length = sizeof (struct mbx_create_common_eq);
129 DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
130
131 /* now post the command */
132 ret = oce_mbox_post(dev, &mbx, NULL);
133
134 if (ret != 0) {
135 oce_log(dev, CE_WARN, MOD_CONFIG, "EQ create failed: %d", ret);
136 destroy_ring_buffer(dev, eq->ring);
137 kmem_free(eq, sizeof (struct oce_eq));
138 return (NULL);
139 }
140
141 /* interpret the response */
142 eq->eq_id = LE_16(fwcmd->params.rsp.eq_id);
143 eq->eq_cfg.q_len = q_len;
144 eq->eq_cfg.item_size = item_size;
145 eq->eq_cfg.cur_eqd = (uint8_t)eq_delay;
146 eq->parent = (void *)dev;
147 atomic_inc_32(&dev->neqs);
148 oce_log(dev, CE_NOTE, MOD_CONFIG,
149 "EQ created, eq=0x%p eq_id=0x%x", (void *)eq, eq->eq_id);
150 /* Save the eq pointer */
151 return (eq);
152 } /* oce_eq_create */
153
154 /*
155 * function to delete an event queue
156 *
157 * dev - software handle to the device
158 * eq - handle to the eq to be deleted
159 *
160 * return 0=>success, failure otherwise
161 */
162 void
oce_eq_del(struct oce_dev * dev,struct oce_eq * eq)163 oce_eq_del(struct oce_dev *dev, struct oce_eq *eq)
164 {
165 struct oce_mbx mbx;
166 struct mbx_destroy_common_eq *fwcmd;
167
168 /* drain the residual events */
169 oce_drain_eq(eq);
170
171 /* destroy the ring */
172 destroy_ring_buffer(dev, eq->ring);
173 eq->ring = NULL;
174
175 /* send a command to delete the EQ */
176 fwcmd = (struct mbx_destroy_common_eq *)&mbx.payload;
177 fwcmd->params.req.id = eq->eq_id;
178 (void) oce_destroy_q(dev, &mbx,
179 sizeof (struct mbx_destroy_common_eq),
180 QTYPE_EQ);
181 kmem_free(eq, sizeof (struct oce_eq));
182 atomic_dec_32(&dev->neqs);
183 }
184
185 /*
186 * function to create a completion queue
187 *
188 * dev - software handle to the device
189 * eq - optional eq to be associated with to the cq
190 * cqcfg - configuration for this queue
191 *
192 * return pointer to the cq created. NULL on failure
193 */
194 struct oce_cq *
oce_cq_create(struct oce_dev * dev,struct oce_eq * eq,uint32_t q_len,uint32_t item_size,boolean_t sol_event,boolean_t is_eventable,boolean_t nodelay,uint32_t ncoalesce)195 oce_cq_create(struct oce_dev *dev, struct oce_eq *eq, uint32_t q_len,
196 uint32_t item_size, boolean_t sol_event, boolean_t is_eventable,
197 boolean_t nodelay, uint32_t ncoalesce)
198 {
199 struct oce_cq *cq = NULL;
200 struct oce_mbx mbx;
201 struct mbx_create_common_cq *fwcmd;
202 int ret = 0;
203
204 /* create cq */
205 cq = kmem_zalloc(sizeof (struct oce_cq), KM_NOSLEEP);
206 if (cq == NULL) {
207 oce_log(dev, CE_NOTE, MOD_CONFIG, "%s",
208 "CQ allocation failed");
209 return (NULL);
210 }
211
212 /* create the ring buffer for this queue */
213 cq->ring = create_ring_buffer(dev, q_len,
214 item_size, DDI_DMA_CONSISTENT);
215 if (cq->ring == NULL) {
216 oce_log(dev, CE_WARN, MOD_CONFIG,
217 "CQ ring alloc failed:0x%p",
218 (void *)cq->ring);
219 kmem_free(cq, sizeof (struct oce_cq));
220 return (NULL);
221 }
222 /* initialize mailbox */
223 bzero(&mbx, sizeof (struct oce_mbx));
224 fwcmd = (struct mbx_create_common_cq *)&mbx.payload;
225
226 /* fill the command header */
227 mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
228 MBX_SUBSYSTEM_COMMON,
229 OPCODE_CREATE_COMMON_CQ, MBX_TIMEOUT_SEC,
230 sizeof (struct mbx_create_common_cq));
231
232 /* fill command context */
233 /* dw0 */
234 fwcmd->params.req.cq_ctx.eventable = is_eventable;
235 fwcmd->params.req.cq_ctx.sol_event = sol_event;
236 fwcmd->params.req.cq_ctx.valid = 1;
237 fwcmd->params.req.cq_ctx.count = OCE_LOG2(q_len/256);
238 fwcmd->params.req.cq_ctx.nodelay = nodelay;
239 fwcmd->params.req.cq_ctx.coalesce_wm = ncoalesce;
240
241 /* dw1 */
242 fwcmd->params.req.cq_ctx.armed = B_FALSE;
243 fwcmd->params.req.cq_ctx.eq_id = eq->eq_id;
244 fwcmd->params.req.cq_ctx.pd = 0;
245 /* dw2 */
246 fwcmd->params.req.cq_ctx.function = dev->fn;
247
248 /* fill the rest of the command */
249 fwcmd->params.req.num_pages = cq->ring->dbuf->num_pages;
250 oce_page_list(cq->ring->dbuf, &fwcmd->params.req.pages[0],
251 cq->ring->dbuf->num_pages);
252
253 /* fill rest of mbx */
254 mbx.u0.s.embedded = 1;
255 mbx.payload_length = sizeof (struct mbx_create_common_cq);
256 DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
257
258 /* now send the mail box */
259 ret = oce_mbox_post(dev, &mbx, NULL);
260
261 if (ret != 0) {
262 oce_log(dev, CE_WARN, MOD_CONFIG,
263 "CQ create failed: 0x%x", ret);
264 destroy_ring_buffer(dev, cq->ring);
265 kmem_free(cq, sizeof (struct oce_cq));
266 return (NULL);
267 }
268
269 cq->parent = dev;
270 cq->eq = eq; /* eq array index */
271 cq->cq_cfg.q_len = q_len;
272 cq->cq_cfg.item_size = item_size;
273 cq->cq_cfg.sol_eventable = (uint8_t)sol_event;
274 cq->cq_cfg.nodelay = (uint8_t)nodelay;
275 /* interpret the response */
276 cq->cq_id = LE_16(fwcmd->params.rsp.cq_id);
277 dev->cq[cq->cq_id % OCE_MAX_CQ] = cq;
278 atomic_inc_32(&eq->ref_count);
279 return (cq);
280 } /* oce_cq_create */
281
282 /*
283 * function to delete a completion queue
284 *
285 * dev - software handle to the device
286 * cq - handle to the CQ to delete
287 *
288 * return none
289 */
290 static void
oce_cq_del(struct oce_dev * dev,struct oce_cq * cq)291 oce_cq_del(struct oce_dev *dev, struct oce_cq *cq)
292 {
293 struct oce_mbx mbx;
294 struct mbx_destroy_common_cq *fwcmd;
295
296 /* destroy the ring */
297 destroy_ring_buffer(dev, cq->ring);
298 cq->ring = NULL;
299
300 bzero(&mbx, sizeof (struct oce_mbx));
301 /* send a command to delete the CQ */
302 fwcmd = (struct mbx_destroy_common_cq *)&mbx.payload;
303 fwcmd->params.req.id = cq->cq_id;
304 (void) oce_destroy_q(dev, &mbx,
305 sizeof (struct mbx_destroy_common_cq),
306 QTYPE_CQ);
307
308 /* Reset the handler */
309 cq->cq_handler = NULL;
310 dev->cq[cq->cq_id % OCE_MAX_CQ] = NULL;
311 atomic_dec_32(&cq->eq->ref_count);
312 mutex_destroy(&cq->lock);
313
314 /* release the eq */
315 kmem_free(cq, sizeof (struct oce_cq));
316 } /* oce_cq_del */
317
318 /*
319 * function to create an MQ
320 *
321 * dev - software handle to the device
322 * eq - the EQ to associate with the MQ for event notification
323 * q_len - the number of entries to create in the MQ
324 *
325 * return pointer to the created MQ, failure otherwise
326 */
327 struct oce_mq *
oce_mq_create(struct oce_dev * dev,struct oce_eq * eq,uint32_t q_len)328 oce_mq_create(struct oce_dev *dev, struct oce_eq *eq, uint32_t q_len)
329 {
330 struct oce_mbx mbx;
331 struct mbx_create_common_mq *fwcmd;
332 struct oce_mq *mq = NULL;
333 int ret = 0;
334 struct oce_cq *cq;
335
336 /* Create the Completion Q */
337 cq = oce_cq_create(dev, eq, CQ_LEN_256,
338 sizeof (struct oce_mq_cqe),
339 B_FALSE, B_TRUE, B_TRUE, 0);
340 if (cq == NULL) {
341 return (NULL);
342 }
343
344
345 /* allocate the mq */
346 mq = kmem_zalloc(sizeof (struct oce_mq), KM_NOSLEEP);
347
348 if (mq == NULL) {
349 goto mq_alloc_fail;
350 }
351
352 bzero(&mbx, sizeof (struct oce_mbx));
353 /* allocate mbx */
354 fwcmd = (struct mbx_create_common_mq *)&mbx.payload;
355
356 /* create the ring buffer for this queue */
357 mq->ring = create_ring_buffer(dev, q_len,
358 sizeof (struct oce_mbx), DDI_DMA_CONSISTENT | DDI_DMA_RDWR);
359 if (mq->ring == NULL) {
360 oce_log(dev, CE_WARN, MOD_CONFIG,
361 "MQ ring alloc failed:0x%p",
362 (void *)mq->ring);
363 goto mq_ring_alloc;
364 }
365
366 mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
367 MBX_SUBSYSTEM_COMMON,
368 OPCODE_CREATE_COMMON_MQ, MBX_TIMEOUT_SEC,
369 sizeof (struct mbx_create_common_mq));
370
371 fwcmd->params.req.num_pages = mq->ring->dbuf->num_pages;
372 oce_page_list(mq->ring->dbuf, fwcmd->params.req.pages,
373 mq->ring->dbuf->num_pages);
374 fwcmd->params.req.context.u0.s.cq_id = cq->cq_id;
375 fwcmd->params.req.context.u0.s.ring_size =
376 OCE_LOG2(q_len) + 1;
377 fwcmd->params.req.context.u0.s.valid = 1;
378 fwcmd->params.req.context.u0.s.fid = dev->fn;
379
380 /* fill rest of mbx */
381 mbx.u0.s.embedded = 1;
382 mbx.payload_length = sizeof (struct mbx_create_common_mq);
383 DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
384
385 /* now send the mail box */
386 ret = oce_mbox_post(dev, &mbx, NULL);
387 if (ret != DDI_SUCCESS) {
388 oce_log(dev, CE_WARN, MOD_CONFIG,
389 "MQ create failed: 0x%x", ret);
390 goto mq_fail;
391 }
392
393 /* interpret the response */
394 mq->mq_id = LE_16(fwcmd->params.rsp.mq_id);
395 mq->cq = cq;
396 mq->cfg.q_len = (uint8_t)q_len;
397 mq->cfg.eqd = 0;
398
399 /* fill rest of the mq */
400 mq->parent = dev;
401
402 /* set the MQCQ handlers */
403 cq->cq_handler = oce_drain_mq_cq;
404 cq->cb_arg = (void *)mq;
405 mutex_init(&mq->lock, NULL, MUTEX_DRIVER,
406 DDI_INTR_PRI(dev->intr_pri));
407 return (mq);
408
409 mq_fail:
410 destroy_ring_buffer(dev, mq->ring);
411 mq_ring_alloc:
412 kmem_free(mq, sizeof (struct oce_mq));
413 mq_alloc_fail:
414 oce_cq_del(dev, cq);
415 return (NULL);
416 } /* oce_mq_create */
417
418 /*
419 * function to delete an MQ
420 *
421 * dev - software handle to the device
422 * mq - pointer to the MQ to delete
423 *
424 * return none
425 */
426 static void
oce_mq_del(struct oce_dev * dev,struct oce_mq * mq)427 oce_mq_del(struct oce_dev *dev, struct oce_mq *mq)
428 {
429 struct oce_mbx mbx;
430 struct mbx_destroy_common_mq *fwcmd;
431
432 /* destroy the ring */
433 destroy_ring_buffer(dev, mq->ring);
434 mq->ring = NULL;
435 bzero(&mbx, sizeof (struct oce_mbx));
436 fwcmd = (struct mbx_destroy_common_mq *)&mbx.payload;
437 fwcmd->params.req.id = mq->mq_id;
438 (void) oce_destroy_q(dev, &mbx,
439 sizeof (struct mbx_destroy_common_mq),
440 QTYPE_MQ);
441 oce_cq_del(dev, mq->cq);
442 mq->cq = NULL;
443 mutex_destroy(&mq->lock);
444 kmem_free(mq, sizeof (struct oce_mq));
445 } /* oce_mq_del */
446
447 /*
448 * function to create a WQ for NIC Tx
449 *
450 * dev - software handle to the device
451 * wqcfg - configuration structure providing WQ config parameters
452 *
453 * return pointer to the WQ created. NULL on failure
454 */
455 static struct oce_wq *
oce_wq_init(struct oce_dev * dev,uint32_t q_len,int wq_type)456 oce_wq_init(struct oce_dev *dev, uint32_t q_len, int wq_type)
457 {
458 struct oce_wq *wq;
459 char str[MAX_POOL_NAME];
460 int ret;
461 static int wq_id = 0;
462
463 ASSERT(dev != NULL);
464 /* q_len must be min 256 and max 2k */
465 if (q_len < 256 || q_len > 2048) {
466 oce_log(dev, CE_WARN, MOD_CONFIG,
467 "Invalid q length. Must be "
468 "[256, 2000]: 0x%x", q_len);
469 return (NULL);
470 }
471
472 /* allocate wq */
473 wq = kmem_zalloc(sizeof (struct oce_wq), KM_NOSLEEP);
474 if (wq == NULL) {
475 oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
476 "WQ allocation failed");
477 return (NULL);
478 }
479
480 /* Set the wq config */
481 wq->cfg.q_len = q_len;
482 wq->cfg.wq_type = (uint8_t)wq_type;
483 wq->cfg.eqd = OCE_DEFAULT_WQ_EQD;
484 wq->cfg.nbufs = 2 * wq->cfg.q_len;
485 wq->cfg.nhdl = 2 * wq->cfg.q_len;
486 wq->cfg.buf_size = dev->tx_bcopy_limit;
487
488 /* assign parent */
489 wq->parent = (void *)dev;
490
491 /* Create the WQ Buffer pool */
492 ret = oce_wqb_cache_create(wq, wq->cfg.buf_size);
493 if (ret != DDI_SUCCESS) {
494 oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
495 "WQ Buffer Pool create failed ");
496 goto wqb_fail;
497 }
498
499 /* Create a pool of memory handles */
500 ret = oce_wqm_cache_create(wq);
501 if (ret != DDI_SUCCESS) {
502 oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
503 "WQ MAP Handles Pool create failed ");
504 goto wqm_fail;
505 }
506
507 (void) snprintf(str, MAX_POOL_NAME, "%s%d%s%d", "oce_wqed_",
508 dev->dev_id, "_", wq_id++);
509 wq->wqed_cache = kmem_cache_create(str, sizeof (oce_wqe_desc_t),
510 0, NULL, NULL, NULL, NULL, NULL, 0);
511 if (wq->wqed_cache == NULL) {
512 oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
513 "WQ Packet Desc Pool create failed ");
514 goto wqed_fail;
515 }
516
517 /* create the ring buffer */
518 wq->ring = create_ring_buffer(dev, q_len,
519 NIC_WQE_SIZE, DDI_DMA_CONSISTENT | DDI_DMA_RDWR);
520 if (wq->ring == NULL) {
521 oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
522 "Failed to create WQ ring ");
523 goto wq_ringfail;
524 }
525
526 /* Initialize WQ lock */
527 mutex_init(&wq->tx_lock, NULL, MUTEX_DRIVER,
528 DDI_INTR_PRI(dev->intr_pri));
529 /* Initialize WQ lock */
530 mutex_init(&wq->txc_lock, NULL, MUTEX_DRIVER,
531 DDI_INTR_PRI(dev->intr_pri));
532 atomic_inc_32(&dev->nwqs);
533
534 OCE_LIST_CREATE(&wq->wqe_desc_list, DDI_INTR_PRI(dev->intr_pri));
535 return (wq);
536
537 wq_ringfail:
538 kmem_cache_destroy(wq->wqed_cache);
539 wqed_fail:
540 oce_wqm_cache_destroy(wq);
541 wqm_fail:
542 oce_wqb_cache_destroy(wq);
543 wqb_fail:
544 kmem_free(wq, sizeof (struct oce_wq));
545 return (NULL);
546 } /* oce_wq_create */
547
548 /*
549 * function to delete a WQ
550 *
551 * dev - software handle to the device
552 * wq - WQ to delete
553 *
554 * return 0 => success, failure otherwise
555 */
556 static void
oce_wq_fini(struct oce_dev * dev,struct oce_wq * wq)557 oce_wq_fini(struct oce_dev *dev, struct oce_wq *wq)
558 {
559 /* destroy cq */
560 oce_wqb_cache_destroy(wq);
561 oce_wqm_cache_destroy(wq);
562 kmem_cache_destroy(wq->wqed_cache);
563
564 /* Free the packet descriptor list */
565 OCE_LIST_DESTROY(&wq->wqe_desc_list);
566 destroy_ring_buffer(dev, wq->ring);
567 wq->ring = NULL;
568 /* Destroy the Mutex */
569 mutex_destroy(&wq->tx_lock);
570 mutex_destroy(&wq->txc_lock);
571 kmem_free(wq, sizeof (struct oce_wq));
572 atomic_dec_32(&dev->nwqs);
573 } /* oce_wq_del */
574
575
576 static int
oce_wq_create(struct oce_wq * wq,struct oce_eq * eq)577 oce_wq_create(struct oce_wq *wq, struct oce_eq *eq)
578 {
579
580 struct oce_mbx mbx;
581 struct mbx_create_nic_wq *fwcmd;
582 struct oce_dev *dev = wq->parent;
583 struct oce_cq *cq;
584 int ret;
585
586 /* create the CQ */
587 cq = oce_cq_create(dev, eq, CQ_LEN_1024,
588 sizeof (struct oce_nic_tx_cqe),
589 B_FALSE, B_TRUE, B_FALSE, 3);
590 if (cq == NULL) {
591 oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
592 "WCCQ create failed ");
593 return (DDI_FAILURE);
594 }
595 /* now fill the command */
596 bzero(&mbx, sizeof (struct oce_mbx));
597 fwcmd = (struct mbx_create_nic_wq *)&mbx.payload;
598 mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
599 MBX_SUBSYSTEM_NIC,
600 OPCODE_CREATE_NIC_WQ, MBX_TIMEOUT_SEC,
601 sizeof (struct mbx_create_nic_wq));
602
603 fwcmd->params.req.nic_wq_type = (uint8_t)wq->cfg.wq_type;
604 fwcmd->params.req.num_pages = wq->ring->dbuf->num_pages;
605 oce_log(dev, CE_NOTE, MOD_CONFIG, "NUM_PAGES = 0x%d size = %lu",
606 (uint32_t)wq->ring->dbuf->num_pages,
607 wq->ring->dbuf->size);
608
609 /* workaround: fill 0x01 for ulp_mask in rsvd0 */
610 fwcmd->params.req.rsvd0 = 0x01;
611 fwcmd->params.req.wq_size = OCE_LOG2(wq->cfg.q_len) + 1;
612 fwcmd->params.req.valid = 1;
613 fwcmd->params.req.pd_id = 0;
614 fwcmd->params.req.pci_function_id = dev->fn;
615 fwcmd->params.req.cq_id = cq->cq_id;
616
617 oce_page_list(wq->ring->dbuf, fwcmd->params.req.pages,
618 wq->ring->dbuf->num_pages);
619
620 /* fill rest of mbx */
621 mbx.u0.s.embedded = 1;
622 mbx.payload_length = sizeof (struct mbx_create_nic_wq);
623 DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
624
625 /* now post the command */
626 ret = oce_mbox_post(dev, &mbx, NULL);
627 if (ret != DDI_SUCCESS) {
628 oce_log(dev, CE_WARN, MOD_CONFIG,
629 "WQ create failed: %d", ret);
630 oce_cq_del(dev, cq);
631 return (ret);
632 }
633
634 /* interpret the response */
635 wq->wq_id = LE_16(fwcmd->params.rsp.wq_id);
636 wq->qstate = QCREATED;
637 wq->cq = cq;
638 /* set the WQCQ handlers */
639 wq->cq->cq_handler = oce_drain_wq_cq;
640 wq->cq->cb_arg = (void *)wq;
641 /* All are free to start with */
642 wq->wq_free = wq->cfg.q_len;
643 /* reset indicies */
644 wq->ring->cidx = 0;
645 wq->ring->pidx = 0;
646 oce_log(dev, CE_NOTE, MOD_CONFIG, "WQ CREATED WQID = %d",
647 wq->wq_id);
648
649 return (0);
650 }
651
652 /*
653 * function to delete a WQ
654 *
655 * dev - software handle to the device
656 * wq - WQ to delete
657 *
658 * return none
659 */
660 static void
oce_wq_del(struct oce_dev * dev,struct oce_wq * wq)661 oce_wq_del(struct oce_dev *dev, struct oce_wq *wq)
662 {
663 struct oce_mbx mbx;
664 struct mbx_delete_nic_wq *fwcmd;
665
666
667 ASSERT(dev != NULL);
668 ASSERT(wq != NULL);
669 if (wq->qstate == QCREATED) {
670 bzero(&mbx, sizeof (struct oce_mbx));
671 /* now fill the command */
672 fwcmd = (struct mbx_delete_nic_wq *)&mbx.payload;
673 fwcmd->params.req.wq_id = wq->wq_id;
674 (void) oce_destroy_q(dev, &mbx,
675 sizeof (struct mbx_delete_nic_wq),
676 QTYPE_WQ);
677 wq->qstate = QDELETED;
678 oce_cq_del(dev, wq->cq);
679 wq->cq = NULL;
680 }
681 } /* oce_wq_del */
682
683 /*
684 * function to allocate RQ resources
685 *
686 * dev - software handle to the device
687 * rqcfg - configuration structure providing RQ config parameters
688 *
689 * return pointer to the RQ created. NULL on failure
690 */
691 static struct oce_rq *
oce_rq_init(struct oce_dev * dev,uint32_t q_len,uint32_t frag_size,uint32_t mtu,boolean_t rss)692 oce_rq_init(struct oce_dev *dev, uint32_t q_len,
693 uint32_t frag_size, uint32_t mtu,
694 boolean_t rss)
695 {
696
697 struct oce_rq *rq;
698 int ret;
699
700 /* validate q creation parameters */
701 if (!OCE_LOG2(frag_size))
702 return (NULL);
703 if ((q_len == 0) || (q_len > 1024))
704 return (NULL);
705
706 /* allocate the rq */
707 rq = kmem_zalloc(sizeof (struct oce_rq), KM_NOSLEEP);
708 if (rq == NULL) {
709 oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
710 "RQ allocation failed");
711 return (NULL);
712 }
713
714 rq->cfg.q_len = q_len;
715 rq->cfg.frag_size = frag_size;
716 rq->cfg.mtu = mtu;
717 rq->cfg.eqd = 0;
718 rq->cfg.nbufs = dev->rq_max_bufs;
719 rq->cfg.is_rss_queue = rss;
720
721 /* assign parent */
722 rq->parent = (void *)dev;
723
724 rq->rq_bdesc_array =
725 kmem_zalloc((sizeof (oce_rq_bdesc_t) * rq->cfg.nbufs), KM_NOSLEEP);
726 if (rq->rq_bdesc_array == NULL) {
727 oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
728 "RQ bdesc alloc failed");
729 goto rqbd_alloc_fail;
730 }
731 /* create the rq buffer descriptor ring */
732 rq->shadow_ring =
733 kmem_zalloc((rq->cfg.q_len * sizeof (oce_rq_bdesc_t *)),
734 KM_NOSLEEP);
735 if (rq->shadow_ring == NULL) {
736 oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
737 "RQ shadow ring alloc failed ");
738 goto rq_shdw_fail;
739 }
740
741 /* allocate the free list array */
742 rq->rqb_freelist =
743 kmem_zalloc(rq->cfg.nbufs * sizeof (oce_rq_bdesc_t *), KM_NOSLEEP);
744 if (rq->rqb_freelist == NULL) {
745 goto rqb_free_list_fail;
746 }
747 /* create the buffer pool */
748 ret = oce_rqb_cache_create(rq, dev->rq_frag_size +
749 OCE_RQE_BUF_HEADROOM);
750 if (ret != DDI_SUCCESS) {
751 goto rqb_fail;
752 }
753
754 /* create the ring buffer */
755 rq->ring = create_ring_buffer(dev, q_len,
756 sizeof (struct oce_nic_rqe), DDI_DMA_CONSISTENT | DDI_DMA_RDWR);
757 if (rq->ring == NULL) {
758 oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
759 "RQ ring create failed ");
760 goto rq_ringfail;
761 }
762
763 /* Initialize the RQ lock */
764 mutex_init(&rq->rx_lock, NULL, MUTEX_DRIVER,
765 DDI_INTR_PRI(dev->intr_pri));
766 /* Initialize the recharge lock */
767 mutex_init(&rq->rc_lock, NULL, MUTEX_DRIVER,
768 DDI_INTR_PRI(dev->intr_pri));
769 atomic_inc_32(&dev->nrqs);
770 return (rq);
771
772 rq_ringfail:
773 oce_rqb_cache_destroy(rq);
774 rqb_fail:
775 kmem_free(rq->rqb_freelist,
776 (rq->cfg.nbufs * sizeof (oce_rq_bdesc_t *)));
777 rqb_free_list_fail:
778
779 kmem_free(rq->shadow_ring,
780 (rq->cfg.q_len * sizeof (oce_rq_bdesc_t *)));
781 rq_shdw_fail:
782 kmem_free(rq->rq_bdesc_array,
783 (sizeof (oce_rq_bdesc_t) * rq->cfg.nbufs));
784 rqbd_alloc_fail:
785 kmem_free(rq, sizeof (struct oce_rq));
786 return (NULL);
787 } /* oce_rq_create */
788
789 /*
790 * function to delete an RQ
791 *
792 * dev - software handle to the device
793 * rq - RQ to delete
794 *
795 * return none
796 */
797 static void
oce_rq_fini(struct oce_dev * dev,struct oce_rq * rq)798 oce_rq_fini(struct oce_dev *dev, struct oce_rq *rq)
799 {
800 /* Destroy buffer cache */
801 oce_rqb_cache_destroy(rq);
802 destroy_ring_buffer(dev, rq->ring);
803 rq->ring = NULL;
804 kmem_free(rq->shadow_ring,
805 sizeof (oce_rq_bdesc_t *) * rq->cfg.q_len);
806 rq->shadow_ring = NULL;
807 kmem_free(rq->rq_bdesc_array,
808 (sizeof (oce_rq_bdesc_t) * rq->cfg.nbufs));
809 rq->rq_bdesc_array = NULL;
810 kmem_free(rq->rqb_freelist,
811 (rq->cfg.nbufs * sizeof (oce_rq_bdesc_t *)));
812 rq->rqb_freelist = NULL;
813 mutex_destroy(&rq->rx_lock);
814 mutex_destroy(&rq->rc_lock);
815 kmem_free(rq, sizeof (struct oce_rq));
816 atomic_dec_32(&dev->nrqs);
817 } /* oce_rq_del */
818
819
820 static int
oce_rq_create(struct oce_rq * rq,uint32_t if_id,struct oce_eq * eq)821 oce_rq_create(struct oce_rq *rq, uint32_t if_id, struct oce_eq *eq)
822 {
823 struct oce_mbx mbx;
824 struct mbx_create_nic_rq *fwcmd;
825 struct oce_dev *dev = rq->parent;
826 struct oce_cq *cq;
827 int ret;
828
829 cq = oce_cq_create(dev, eq, CQ_LEN_1024, sizeof (struct oce_nic_rx_cqe),
830 B_FALSE, B_TRUE, B_FALSE, 3);
831
832 if (cq == NULL) {
833 return (DDI_FAILURE);
834 }
835
836 /* now fill the command */
837 bzero(&mbx, sizeof (struct oce_mbx));
838 fwcmd = (struct mbx_create_nic_rq *)&mbx.payload;
839 mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
840 MBX_SUBSYSTEM_NIC,
841 OPCODE_CREATE_NIC_RQ, MBX_TIMEOUT_SEC,
842 sizeof (struct mbx_create_nic_rq));
843
844 fwcmd->params.req.num_pages = rq->ring->dbuf->num_pages;
845 fwcmd->params.req.frag_size = OCE_LOG2(rq->cfg.frag_size);
846 fwcmd->params.req.cq_id = cq->cq_id;
847 oce_page_list(rq->ring->dbuf, fwcmd->params.req.pages,
848 rq->ring->dbuf->num_pages);
849
850 fwcmd->params.req.if_id = if_id;
851 fwcmd->params.req.max_frame_size = (uint16_t)rq->cfg.mtu;
852 fwcmd->params.req.is_rss_queue = rq->cfg.is_rss_queue;
853
854 /* fill rest of mbx */
855 mbx.u0.s.embedded = 1;
856 mbx.payload_length = sizeof (struct mbx_create_nic_rq);
857 DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
858
859 /* now post the command */
860 ret = oce_mbox_post(dev, &mbx, NULL);
861 if (ret != 0) {
862 oce_log(dev, CE_WARN, MOD_CONFIG,
863 "RQ create failed: %d", ret);
864 oce_cq_del(dev, cq);
865 return (ret);
866 }
867
868 /* interpret the response */
869 rq->rq_id = LE_16(fwcmd->params.rsp.u0.s.rq_id);
870 rq->rss_cpuid = fwcmd->params.rsp.u0.s.rss_cpuid;
871 rq->cfg.if_id = if_id;
872 rq->qstate = QCREATED;
873 rq->cq = cq;
874
875 /* set the Completion Handler */
876 rq->cq->cq_handler = oce_drain_rq_cq;
877 rq->cq->cb_arg = (void *)rq;
878 /* reset the indicies */
879 rq->ring->cidx = 0;
880 rq->ring->pidx = 0;
881 rq->buf_avail = 0;
882 oce_log(dev, CE_NOTE, MOD_CONFIG, "RQ created, RQID : %d", rq->rq_id);
883 return (0);
884
885 }
886
887 /*
888 * function to delete an RQ
889 *
890 * dev - software handle to the device
891 * rq - RQ to delete
892 *
893 * return none
894 */
895 static void
oce_rq_del(struct oce_dev * dev,struct oce_rq * rq)896 oce_rq_del(struct oce_dev *dev, struct oce_rq *rq)
897 {
898 struct oce_mbx mbx;
899 struct mbx_delete_nic_rq *fwcmd;
900
901 ASSERT(dev != NULL);
902 ASSERT(rq != NULL);
903
904 bzero(&mbx, sizeof (struct oce_mbx));
905
906 /* delete the Queue */
907 if (rq->qstate == QCREATED) {
908 fwcmd = (struct mbx_delete_nic_rq *)&mbx.payload;
909 fwcmd->params.req.rq_id = rq->rq_id;
910 (void) oce_destroy_q(dev, &mbx,
911 sizeof (struct mbx_delete_nic_rq), QTYPE_RQ);
912 rq->qstate = QDELETED;
913 oce_clean_rq(rq);
914 /* Delete the associated CQ */
915 oce_cq_del(dev, rq->cq);
916 rq->cq = NULL;
917 /* free up the posted buffers */
918 oce_rq_discharge(rq);
919 }
920 } /* oce_rq_del */
921
922 /*
923 * function to arm an EQ so that it can generate events
924 *
925 * dev - software handle to the device
926 * qid - id of the EQ returned by the fw at the time of creation
927 * npopped - number of EQEs to arm with
928 * rearm - rearm bit
929 * clearint - bit to clear the interrupt condition because of which
930 * EQEs are generated
931 *
932 * return none
933 */
934 void
oce_arm_eq(struct oce_dev * dev,int16_t qid,int npopped,boolean_t rearm,boolean_t clearint)935 oce_arm_eq(struct oce_dev *dev, int16_t qid, int npopped,
936 boolean_t rearm, boolean_t clearint)
937 {
938 eq_db_t eq_db = {0};
939
940 eq_db.bits.rearm = rearm;
941 eq_db.bits.event = B_TRUE;
942 eq_db.bits.num_popped = npopped;
943 eq_db.bits.clrint = clearint;
944 eq_db.bits.qid = qid;
945 OCE_DB_WRITE32(dev, PD_EQ_DB, eq_db.dw0);
946 }
947
948 /*
949 * function to arm a CQ with CQEs
950 *
951 * dev - software handle to the device
952 * qid - the id of the CQ returned by the fw at the time of creation
953 * npopped - number of CQEs to arm with
954 * rearm - rearm bit enable/disable
955 *
956 * return none
957 */
958 void
oce_arm_cq(struct oce_dev * dev,int16_t qid,int npopped,boolean_t rearm)959 oce_arm_cq(struct oce_dev *dev, int16_t qid, int npopped,
960 boolean_t rearm)
961 {
962 cq_db_t cq_db = {0};
963 cq_db.bits.rearm = rearm;
964 cq_db.bits.num_popped = npopped;
965 cq_db.bits.event = 0;
966 cq_db.bits.qid = qid;
967 OCE_DB_WRITE32(dev, PD_CQ_DB, cq_db.dw0);
968 }
969
970
971 /*
972 * function to delete a EQ, CQ, MQ, WQ or RQ
973 *
974 * dev - sofware handle to the device
975 * mbx - mbox command to send to the fw to delete the queue
976 * mbx contains the queue information to delete
977 * req_size - the size of the mbx payload dependent on the qtype
978 * qtype - the type of queue i.e. EQ, CQ, MQ, WQ or RQ
979 *
980 * return DDI_SUCCESS => success, failure otherwise
981 */
982 int
oce_destroy_q(struct oce_dev * dev,struct oce_mbx * mbx,size_t req_size,enum qtype qtype)983 oce_destroy_q(struct oce_dev *dev, struct oce_mbx *mbx, size_t req_size,
984 enum qtype qtype)
985 {
986 struct mbx_hdr *hdr = (struct mbx_hdr *)&mbx->payload;
987 int opcode;
988 int subsys;
989 int ret;
990
991 switch (qtype) {
992 case QTYPE_EQ: {
993 opcode = OPCODE_DESTROY_COMMON_EQ;
994 subsys = MBX_SUBSYSTEM_COMMON;
995 break;
996 }
997 case QTYPE_CQ: {
998 opcode = OPCODE_DESTROY_COMMON_CQ;
999 subsys = MBX_SUBSYSTEM_COMMON;
1000 break;
1001 }
1002 case QTYPE_MQ: {
1003 opcode = OPCODE_DESTROY_COMMON_MQ;
1004 subsys = MBX_SUBSYSTEM_COMMON;
1005 break;
1006 }
1007 case QTYPE_WQ: {
1008 opcode = OPCODE_DELETE_NIC_WQ;
1009 subsys = MBX_SUBSYSTEM_NIC;
1010 break;
1011 }
1012 case QTYPE_RQ: {
1013 opcode = OPCODE_DELETE_NIC_RQ;
1014 subsys = MBX_SUBSYSTEM_NIC;
1015 break;
1016 }
1017 default: {
1018 ASSERT(0);
1019 break;
1020 }
1021 }
1022
1023 mbx_common_req_hdr_init(hdr, 0, 0, subsys,
1024 opcode, MBX_TIMEOUT_SEC, req_size);
1025
1026 /* fill rest of mbx */
1027 mbx->u0.s.embedded = 1;
1028 mbx->payload_length = (uint32_t)req_size;
1029 DW_SWAP(u32ptr(mbx), mbx->payload_length + OCE_BMBX_RHDR_SZ);
1030
1031 /* send command */
1032 ret = oce_mbox_post(dev, mbx, NULL);
1033
1034 if (ret != 0) {
1035 oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
1036 "Failed to del q ");
1037 }
1038 return (ret);
1039 }
1040
1041 /*
1042 * function to set the delay parameter in the EQ for interrupt coalescing
1043 *
1044 * dev - software handle to the device
1045 * eq_arr - array of EQ ids to delete
1046 * eq_cnt - number of elements in eq_arr
1047 * eq_delay - delay parameter
1048 *
1049 * return DDI_SUCCESS => success, failure otherwise
1050 */
1051 int
oce_set_eq_delay(struct oce_dev * dev,uint32_t * eq_arr,uint32_t eq_cnt,uint32_t eq_delay)1052 oce_set_eq_delay(struct oce_dev *dev, uint32_t *eq_arr,
1053 uint32_t eq_cnt, uint32_t eq_delay)
1054 {
1055 struct oce_mbx mbx;
1056 struct mbx_modify_common_eq_delay *fwcmd;
1057 int ret;
1058 int neq;
1059
1060 bzero(&mbx, sizeof (struct oce_mbx));
1061 fwcmd = (struct mbx_modify_common_eq_delay *)&mbx.payload;
1062
1063 /* fill the command */
1064 fwcmd->params.req.num_eq = eq_cnt;
1065 for (neq = 0; neq < eq_cnt; neq++) {
1066 fwcmd->params.req.delay[neq].eq_id = eq_arr[neq];
1067 fwcmd->params.req.delay[neq].phase = 0;
1068 fwcmd->params.req.delay[neq].dm = eq_delay;
1069
1070 }
1071
1072 /* initialize the ioctl header */
1073 mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
1074 MBX_SUBSYSTEM_COMMON,
1075 OPCODE_MODIFY_COMMON_EQ_DELAY,
1076 MBX_TIMEOUT_SEC,
1077 sizeof (struct mbx_modify_common_eq_delay));
1078
1079 /* fill rest of mbx */
1080 mbx.u0.s.embedded = 1;
1081 mbx.payload_length = sizeof (struct mbx_modify_common_eq_delay);
1082 DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
1083
1084 /* post the command */
1085 ret = oce_mbox_post(dev, &mbx, NULL);
1086 if (ret != 0) {
1087 oce_log(dev, CE_WARN, MOD_CONFIG,
1088 "Failed to set EQ delay %d", ret);
1089 }
1090
1091 return (ret);
1092 } /* oce_set_eq_delay */
1093
1094 /*
1095 * function to cleanup the eqs used during stop
1096 *
1097 * eq - pointer to event queue structure
1098 *
1099 * return none
1100 */
1101 void
oce_drain_eq(struct oce_eq * eq)1102 oce_drain_eq(struct oce_eq *eq)
1103 {
1104 struct oce_eqe *eqe;
1105 uint16_t num_eqe = 0;
1106 struct oce_dev *dev;
1107
1108 dev = eq->parent;
1109 /* get the first item in eq to process */
1110 eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe);
1111
1112 while (eqe->u0.dw0) {
1113 eqe->u0.dw0 = LE_32(eqe->u0.dw0);
1114
1115 /* clear valid bit */
1116 eqe->u0.dw0 = 0;
1117
1118 /* process next eqe */
1119 RING_GET(eq->ring, 1);
1120
1121 eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe);
1122 num_eqe++;
1123 } /* for all EQEs */
1124 if (num_eqe) {
1125 oce_arm_eq(dev, eq->eq_id, num_eqe, B_FALSE, B_TRUE);
1126 }
1127 } /* oce_drain_eq */
1128
1129
1130 int
oce_init_txrx(struct oce_dev * dev)1131 oce_init_txrx(struct oce_dev *dev)
1132 {
1133 int qid = 0;
1134
1135 /* enable RSS if rx queues > 1 */
1136 dev->rss_enable = (dev->rx_rings > 1) ? B_TRUE : B_FALSE;
1137
1138 for (qid = 0; qid < dev->tx_rings; qid++) {
1139 dev->wq[qid] = oce_wq_init(dev, dev->tx_ring_size,
1140 NIC_WQ_TYPE_STANDARD);
1141 if (dev->wq[qid] == NULL) {
1142 goto queue_fail;
1143 }
1144 }
1145
1146 /* Now create the Rx Queues */
1147 /* qid 0 is always default non rss queue for rss */
1148 dev->rq[0] = oce_rq_init(dev, dev->rx_ring_size, dev->rq_frag_size,
1149 OCE_MAX_JUMBO_FRAME_SIZE, B_FALSE);
1150 if (dev->rq[0] == NULL) {
1151 goto queue_fail;
1152 }
1153
1154 for (qid = 1; qid < dev->rx_rings; qid++) {
1155 dev->rq[qid] = oce_rq_init(dev, dev->rx_ring_size,
1156 dev->rq_frag_size, OCE_MAX_JUMBO_FRAME_SIZE,
1157 dev->rss_enable);
1158 if (dev->rq[qid] == NULL) {
1159 goto queue_fail;
1160 }
1161 }
1162
1163 return (DDI_SUCCESS);
1164 queue_fail:
1165 oce_fini_txrx(dev);
1166 return (DDI_FAILURE);
1167 }
1168 void
oce_fini_txrx(struct oce_dev * dev)1169 oce_fini_txrx(struct oce_dev *dev)
1170 {
1171 int qid;
1172 int nqs;
1173
1174 /* free all the tx rings */
1175 /* nwqs is decremented in fini so copy count first */
1176 nqs = dev->nwqs;
1177 for (qid = 0; qid < nqs; qid++) {
1178 if (dev->wq[qid] != NULL) {
1179 oce_wq_fini(dev, dev->wq[qid]);
1180 dev->wq[qid] = NULL;
1181 }
1182 }
1183 /* free all the rx rings */
1184 nqs = dev->nrqs;
1185 for (qid = 0; qid < nqs; qid++) {
1186 if (dev->rq[qid] != NULL) {
1187 oce_rq_fini(dev, dev->rq[qid]);
1188 dev->rq[qid] = NULL;
1189 }
1190 }
1191 }
1192
1193 int
oce_create_queues(struct oce_dev * dev)1194 oce_create_queues(struct oce_dev *dev)
1195 {
1196
1197 int i;
1198 struct oce_eq *eq;
1199 struct oce_mq *mq;
1200
1201 for (i = 0; i < dev->num_vectors; i++) {
1202 eq = oce_eq_create(dev, EQ_LEN_1024, EQE_SIZE_4, 0);
1203 if (eq == NULL) {
1204 goto rings_fail;
1205 }
1206 dev->eq[i] = eq;
1207 }
1208 for (i = 0; i < dev->nwqs; i++) {
1209 if (oce_wq_create(dev->wq[i], dev->eq[0]) != 0)
1210 goto rings_fail;
1211 }
1212
1213 for (i = 0; i < dev->nrqs; i++) {
1214 if (oce_rq_create(dev->rq[i], dev->if_id,
1215 dev->neqs > 1 ? dev->eq[1 + i] : dev->eq[0]) != 0)
1216 goto rings_fail;
1217 }
1218 mq = oce_mq_create(dev, dev->eq[0], 64);
1219 if (mq == NULL)
1220 goto rings_fail;
1221 dev->mq = mq;
1222 return (DDI_SUCCESS);
1223 rings_fail:
1224 oce_delete_queues(dev);
1225 return (DDI_FAILURE);
1226
1227 }
1228
1229 void
oce_delete_queues(struct oce_dev * dev)1230 oce_delete_queues(struct oce_dev *dev)
1231 {
1232 int i;
1233 int neqs = dev->neqs;
1234 if (dev->mq != NULL) {
1235 oce_mq_del(dev, dev->mq);
1236 dev->mq = NULL;
1237 }
1238
1239 for (i = 0; i < dev->nrqs; i++) {
1240 oce_rq_del(dev, dev->rq[i]);
1241 }
1242 for (i = 0; i < dev->nwqs; i++) {
1243 oce_wq_del(dev, dev->wq[i]);
1244 }
1245 /* delete as many eqs as the number of vectors */
1246 for (i = 0; i < neqs; i++) {
1247 oce_eq_del(dev, dev->eq[i]);
1248 dev->eq[i] = NULL;
1249 }
1250 }
1251
1252 void
oce_dev_rss_ready(struct oce_dev * dev)1253 oce_dev_rss_ready(struct oce_dev *dev)
1254 {
1255 uint8_t dev_index = 0;
1256 uint8_t adapter_rss = 0;
1257
1258 /* Return if rx_rings <= 1 (No RSS) */
1259 if (dev->rx_rings <= 1) {
1260 oce_log(dev, CE_NOTE, MOD_CONFIG,
1261 "Rx rings = %d, Not enabling RSS", dev->rx_rings);
1262 return;
1263 }
1264
1265 /*
1266 * Count the number of PCI functions enabling RSS on this
1267 * adapter
1268 */
1269 while (dev_index < MAX_DEVS) {
1270 if ((oce_dev_list[dev_index] != NULL) &&
1271 (dev->pci_bus == oce_dev_list[dev_index]->pci_bus) &&
1272 (dev->pci_device == oce_dev_list[dev_index]->pci_device) &&
1273 (oce_dev_list[dev_index]->rss_enable)) {
1274 adapter_rss++;
1275 }
1276 dev_index++;
1277 }
1278
1279 /*
1280 * If there are already MAX_RSS_PER_ADAPTER PCI functions using
1281 * RSS on this adapter, reduce the number of rx rings to 1
1282 * (No RSS)
1283 */
1284 if (adapter_rss >= MAX_RSS_PER_ADAPTER) {
1285 dev->rx_rings = 1;
1286 }
1287 }
1288