xref: /illumos-gate/usr/src/uts/common/io/fibre-channel/fca/oce/oce_tx.c (revision 3abb112f8485b33b6b9b52b340bede0a333c10bf)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /* Copyright © 2003-2011 Emulex. All rights reserved.  */
23 
24 /*
25  * Source file containing the implementation of the Transmit
26  * Path
27  */
28 
29 #include <oce_impl.h>
30 
31 static void oce_free_wqed(struct oce_wq *wq,  oce_wqe_desc_t *wqed);
32 static int oce_map_wqe(struct oce_wq *wq, oce_wqe_desc_t *wqed,
33     mblk_t *mp, uint32_t pkt_len);
34 static int oce_bcopy_wqe(struct oce_wq *wq, oce_wqe_desc_t *wqed, mblk_t *mp,
35     uint32_t pkt_len);
36 static void oce_wqb_dtor(struct oce_wq *wq, oce_wq_bdesc_t *wqbd);
37 static int oce_wqb_ctor(oce_wq_bdesc_t *wqbd, struct oce_wq *wq,
38     size_t size, int flags);
39 static inline oce_wq_bdesc_t *oce_wqb_alloc(struct oce_wq *wq);
40 static void oce_wqb_free(struct oce_wq *wq, oce_wq_bdesc_t *wqbd);
41 
42 static void oce_wqmd_free(struct oce_wq *wq, oce_wq_mdesc_t *wqmd);
43 static void oce_wqm_free(struct oce_wq *wq, oce_wq_mdesc_t *wqmd);
44 static oce_wq_mdesc_t *oce_wqm_alloc(struct oce_wq *wq);
45 static int oce_wqm_ctor(oce_wq_mdesc_t *wqmd, struct oce_wq *wq);
46 static void oce_wqm_dtor(struct oce_wq *wq, oce_wq_mdesc_t *wqmd);
47 static void oce_fill_ring_descs(struct oce_wq *wq, oce_wqe_desc_t *wqed);
48 static void oce_remove_vtag(mblk_t *mp);
49 static void oce_insert_vtag(mblk_t  *mp, uint16_t vlan_tag);
50 static inline int oce_process_tx_compl(struct oce_wq *wq, boolean_t rearm);
51 
52 
53 static ddi_dma_attr_t tx_map_dma_attr = {
54 	DMA_ATTR_V0,		/* version number */
55 	0x0000000000000000ull,	/* low address */
56 	0xFFFFFFFFFFFFFFFFull,	/* high address */
57 	0x0000000000010000ull,	/* dma counter max */
58 	OCE_TXMAP_ALIGN,	/* alignment */
59 	0x7FF,			/* burst sizes */
60 	0x00000001,		/* minimum transfer size */
61 	0x00000000FFFFFFFFull,	/* maximum transfer size */
62 	0xFFFFFFFFFFFFFFFFull,	/* maximum segment size */
63 	OCE_MAX_TXDMA_COOKIES,	/* scatter/gather list length */
64 	0x00000001,		/* granularity */
65 	DDI_DMA_FLAGERR		/* dma_attr_flags */
66 };
67 
68 
69 ddi_dma_attr_t oce_tx_dma_buf_attr = {
70 	DMA_ATTR_V0,		/* version number */
71 	0x0000000000000000ull,	/* low address */
72 	0xFFFFFFFFFFFFFFFFull,	/* high address */
73 	0x00000000FFFFFFFFull,	/* dma counter max */
74 	OCE_DMA_ALIGNMENT,	/* alignment */
75 	0x000007FF,		/* burst sizes */
76 	0x00000001,		/* minimum transfer size */
77 	0x00000000FFFFFFFFull,	/* maximum transfer size */
78 	0xFFFFFFFFFFFFFFFFull,	/* maximum segment size */
79 	1,			/* scatter/gather list length */
80 	0x00000001,		/* granularity */
81 	DDI_DMA_FLAGERR		/* dma_attr_flags */
82 };
83 
84 /*
85  * WQ map handle destructor
86  *
87  * wq - Pointer to WQ structure
88  * wqmd - pointer to WQE mapping handle descriptor
89  *
90  * return none
91  */
92 
93 static void
oce_wqm_dtor(struct oce_wq * wq,oce_wq_mdesc_t * wqmd)94 oce_wqm_dtor(struct oce_wq *wq, oce_wq_mdesc_t *wqmd)
95 {
96 	_NOTE(ARGUNUSED(wq));
97 	/* Free the DMA handle */
98 	if (wqmd->dma_handle != NULL)
99 		(void) ddi_dma_free_handle(&(wqmd->dma_handle));
100 	wqmd->dma_handle = NULL;
101 } /* oce_wqm_dtor */
102 
103 /*
104  * WQ map handles contructor
105  *
106  * wqmd - pointer to WQE mapping handle descriptor
107  * wq - Pointer to WQ structure
108  *
109  * return DDI_SUCCESS=>success, DDI_FAILURE=>error
110  */
111 static int
oce_wqm_ctor(oce_wq_mdesc_t * wqmd,struct oce_wq * wq)112 oce_wqm_ctor(oce_wq_mdesc_t *wqmd, struct oce_wq *wq)
113 {
114 	struct oce_dev *dev;
115 	int ret;
116 
117 	dev = wq->parent;
118 	/* Allocate DMA handle */
119 	ret = ddi_dma_alloc_handle(dev->dip, &tx_map_dma_attr,
120 	    DDI_DMA_DONTWAIT, NULL, &wqmd->dma_handle);
121 
122 	return (ret);
123 } /* oce_wqm_ctor */
124 
125 /*
126  * function to create WQ mapping handles cache
127  *
128  * wq - pointer to WQ structure
129  *
130  * return DDI_SUCCESS=>success, DDI_FAILURE=>error
131  */
132 int
oce_wqm_cache_create(struct oce_wq * wq)133 oce_wqm_cache_create(struct oce_wq *wq)
134 {
135 	struct oce_dev *dev = wq->parent;
136 	int size;
137 	int cnt;
138 	int ret;
139 
140 	size = wq->cfg.nhdl * sizeof (oce_wq_mdesc_t);
141 	wq->wq_mdesc_array = kmem_zalloc(size, KM_NOSLEEP);
142 	if (wq->wq_mdesc_array == NULL) {
143 		return (DDI_FAILURE);
144 	}
145 
146 	/* Create the free buffer list */
147 	OCE_LIST_CREATE(&wq->wq_mdesc_list, DDI_INTR_PRI(dev->intr_pri));
148 
149 	for (cnt = 0; cnt < wq->cfg.nhdl; cnt++) {
150 		ret = oce_wqm_ctor(&wq->wq_mdesc_array[cnt], wq);
151 		if (ret != DDI_SUCCESS) {
152 			goto wqm_fail;
153 		}
154 		OCE_LIST_INSERT_TAIL(&wq->wq_mdesc_list,
155 		    &wq->wq_mdesc_array[cnt]);
156 	}
157 	return (DDI_SUCCESS);
158 
159 wqm_fail:
160 	oce_wqm_cache_destroy(wq);
161 	return (DDI_FAILURE);
162 }
163 
164 /*
165  * function to destroy WQ mapping handles cache
166  *
167  * wq - pointer to WQ structure
168  *
169  * return none
170  */
171 void
oce_wqm_cache_destroy(struct oce_wq * wq)172 oce_wqm_cache_destroy(struct oce_wq *wq)
173 {
174 	oce_wq_mdesc_t *wqmd;
175 
176 	while ((wqmd = OCE_LIST_REM_HEAD(&wq->wq_mdesc_list)) != NULL) {
177 		oce_wqm_dtor(wq, wqmd);
178 	}
179 
180 	kmem_free(wq->wq_mdesc_array,
181 	    wq->cfg.nhdl * sizeof (oce_wq_mdesc_t));
182 
183 	OCE_LIST_DESTROY(&wq->wq_mdesc_list);
184 }
185 
186 /*
187  * function to create  WQ buffer cache
188  *
189  * wq - pointer to WQ structure
190  * buf_size - size of the buffer
191  *
192  * return DDI_SUCCESS=>success, DDI_FAILURE=>error
193  */
194 int
oce_wqb_cache_create(struct oce_wq * wq,size_t buf_size)195 oce_wqb_cache_create(struct oce_wq *wq, size_t buf_size)
196 {
197 	struct oce_dev *dev = wq->parent;
198 	int size;
199 	int cnt;
200 	int ret;
201 
202 	size = wq->cfg.nbufs * sizeof (oce_wq_bdesc_t);
203 	wq->wq_bdesc_array = kmem_zalloc(size, KM_NOSLEEP);
204 	if (wq->wq_bdesc_array == NULL) {
205 		return (DDI_FAILURE);
206 	}
207 
208 	/* Create the free buffer list */
209 	OCE_LIST_CREATE(&wq->wq_buf_list, DDI_INTR_PRI(dev->intr_pri));
210 
211 	for (cnt = 0; cnt <  wq->cfg.nbufs; cnt++) {
212 		ret = oce_wqb_ctor(&wq->wq_bdesc_array[cnt],
213 		    wq, buf_size, DDI_DMA_STREAMING);
214 		if (ret != DDI_SUCCESS) {
215 			goto wqb_fail;
216 		}
217 		OCE_LIST_INSERT_TAIL(&wq->wq_buf_list,
218 		    &wq->wq_bdesc_array[cnt]);
219 	}
220 	return (DDI_SUCCESS);
221 
222 wqb_fail:
223 	oce_wqb_cache_destroy(wq);
224 	return (DDI_FAILURE);
225 }
226 
227 /*
228  * function to destroy WQ buffer cache
229  *
230  * wq - pointer to WQ structure
231  *
232  * return none
233  */
234 void
oce_wqb_cache_destroy(struct oce_wq * wq)235 oce_wqb_cache_destroy(struct oce_wq *wq)
236 {
237 	oce_wq_bdesc_t *wqbd;
238 	while ((wqbd = OCE_LIST_REM_HEAD(&wq->wq_buf_list)) != NULL) {
239 		oce_wqb_dtor(wq, wqbd);
240 	}
241 	kmem_free(wq->wq_bdesc_array,
242 	    wq->cfg.nbufs * sizeof (oce_wq_bdesc_t));
243 	OCE_LIST_DESTROY(&wq->wq_buf_list);
244 }
245 
246 /*
247  * WQ buffer constructor
248  *
249  * wqbd - pointer to WQ buffer descriptor
250  * wq - pointer to WQ structure
251  * size - size of the buffer
252  * flags - KM_SLEEP or KM_NOSLEEP
253  *
254  * return DDI_SUCCESS=>success, DDI_FAILURE=>error
255  */
256 static int
oce_wqb_ctor(oce_wq_bdesc_t * wqbd,struct oce_wq * wq,size_t size,int flags)257 oce_wqb_ctor(oce_wq_bdesc_t *wqbd, struct oce_wq *wq, size_t size, int flags)
258 {
259 	struct oce_dev *dev;
260 	dev = wq->parent;
261 
262 	wqbd->wqb = oce_alloc_dma_buffer(dev, size, &oce_tx_dma_buf_attr,
263 	    flags);
264 	if (wqbd->wqb == NULL) {
265 		return (DDI_FAILURE);
266 	}
267 	wqbd->frag_addr.dw.addr_lo = ADDR_LO(wqbd->wqb->addr);
268 	wqbd->frag_addr.dw.addr_hi = ADDR_HI(wqbd->wqb->addr);
269 	return (DDI_SUCCESS);
270 }
271 
272 /*
273  * WQ buffer destructor
274  *
275  * wq - pointer to WQ structure
276  * wqbd - pointer to WQ buffer descriptor
277  *
278  * return none
279  */
280 static void
oce_wqb_dtor(struct oce_wq * wq,oce_wq_bdesc_t * wqbd)281 oce_wqb_dtor(struct oce_wq *wq, oce_wq_bdesc_t *wqbd)
282 {
283 	oce_free_dma_buffer(wq->parent, wqbd->wqb);
284 }
285 
286 /*
287  * function to alloc   WQE buffer descriptor
288  *
289  * wq - pointer to WQ structure
290  *
291  * return pointer to WQE buffer descriptor
292  */
293 static inline oce_wq_bdesc_t *
oce_wqb_alloc(struct oce_wq * wq)294 oce_wqb_alloc(struct oce_wq *wq)
295 {
296 	return (OCE_LIST_REM_HEAD(&wq->wq_buf_list));
297 }
298 
299 /*
300  * function to free   WQE buffer descriptor
301  *
302  * wq - pointer to WQ structure
303  * wqbd - pointer to WQ buffer descriptor
304  *
305  * return none
306  */
307 static inline void
oce_wqb_free(struct oce_wq * wq,oce_wq_bdesc_t * wqbd)308 oce_wqb_free(struct oce_wq *wq, oce_wq_bdesc_t *wqbd)
309 {
310 	OCE_LIST_INSERT_TAIL(&wq->wq_buf_list, wqbd);
311 } /* oce_wqb_free */
312 
313 /*
314  * function to allocate   WQE mapping descriptor
315  *
316  * wq - pointer to WQ structure
317  *
318  * return pointer to WQE mapping descriptor
319  */
320 static inline oce_wq_mdesc_t *
oce_wqm_alloc(struct oce_wq * wq)321 oce_wqm_alloc(struct oce_wq *wq)
322 {
323 	return (OCE_LIST_REM_HEAD(&wq->wq_mdesc_list));
324 } /* oce_wqm_alloc */
325 
326 /*
327  * function to insert	WQE mapping descriptor to the list
328  *
329  * wq - pointer to WQ structure
330  * wqmd - Pointer to WQ mapping descriptor
331  *
332  * return none
333  */
334 static inline void
oce_wqm_free(struct oce_wq * wq,oce_wq_mdesc_t * wqmd)335 oce_wqm_free(struct oce_wq *wq, oce_wq_mdesc_t *wqmd)
336 {
337 	OCE_LIST_INSERT_TAIL(&wq->wq_mdesc_list, wqmd);
338 }
339 
340 /*
341  * function to free  WQE mapping descriptor
342  *
343  * wq - pointer to WQ structure
344  * wqmd - Pointer to WQ mapping descriptor
345  *
346  * return none
347  */
348 static void
oce_wqmd_free(struct oce_wq * wq,oce_wq_mdesc_t * wqmd)349 oce_wqmd_free(struct oce_wq *wq, oce_wq_mdesc_t *wqmd)
350 {
351 	if (wqmd == NULL) {
352 		return;
353 	}
354 	(void) ddi_dma_unbind_handle(wqmd->dma_handle);
355 	oce_wqm_free(wq, wqmd);
356 }
357 
358 /*
359  * WQED kmem_cache constructor
360  *
361  * buf - pointer to WQE descriptor
362  *
363  * return DDI_SUCCESS
364  */
365 int
oce_wqe_desc_ctor(void * buf,void * arg,int kmflags)366 oce_wqe_desc_ctor(void *buf, void *arg, int kmflags)
367 {
368 	_NOTE(ARGUNUSED(buf));
369 	_NOTE(ARGUNUSED(arg));
370 	_NOTE(ARGUNUSED(kmflags));
371 
372 	return (DDI_SUCCESS);
373 }
374 
375 /*
376  * WQED kmem_cache destructor
377  *
378  * buf - pointer to WQE descriptor
379  *
380  * return none
381  */
382 void
oce_wqe_desc_dtor(void * buf,void * arg)383 oce_wqe_desc_dtor(void *buf, void *arg)
384 {
385 	_NOTE(ARGUNUSED(buf));
386 	_NOTE(ARGUNUSED(arg));
387 }
388 
389 /*
390  * function to choose a WQ given a mblk depending on priority, flowID etc.
391  *
392  * dev - software handle to device
393  * mp - the mblk to send
394  *
395  * return pointer to the WQ selected
396  */
397 static uint8_t oce_tx_hash_policy = 0x4;
398 struct oce_wq *
oce_get_wq(struct oce_dev * dev,mblk_t * mp)399 oce_get_wq(struct oce_dev *dev, mblk_t *mp)
400 {
401 	struct oce_wq *wq;
402 	int qidx = 0;
403 	if (dev->nwqs > 1) {
404 		qidx = mac_pkt_hash(DL_ETHER, mp, oce_tx_hash_policy, B_TRUE);
405 		qidx = qidx % dev->nwqs;
406 
407 	} else {
408 		qidx = 0;
409 	}
410 	wq = dev->wq[qidx];
411 	/* for the time being hardcode */
412 	return (wq);
413 } /* oce_get_wq */
414 
415 /*
416  * function to populate the single WQE
417  *
418  * wq - pointer to wq
419  * wqed - pointer to WQ entry  descriptor
420  *
421  * return none
422  */
423 #pragma inline(oce_fill_ring_descs)
424 static void
oce_fill_ring_descs(struct oce_wq * wq,oce_wqe_desc_t * wqed)425 oce_fill_ring_descs(struct oce_wq *wq, oce_wqe_desc_t *wqed)
426 {
427 
428 	struct oce_nic_frag_wqe *wqe;
429 	int i;
430 	/* Copy the precreate WQE descs to the ring desc */
431 	for (i = 0; i < wqed->wqe_cnt; i++) {
432 		wqe = RING_GET_PRODUCER_ITEM_VA(wq->ring,
433 		    struct oce_nic_frag_wqe);
434 
435 		bcopy(&wqed->frag[i], wqe, NIC_WQE_SIZE);
436 		RING_PUT(wq->ring, 1);
437 	}
438 } /* oce_fill_ring_descs */
439 
440 /*
441  * function to copy the packet to preallocated Tx buffer
442  *
443  * wq - pointer to WQ
444  * wqed - Pointer to WQE descriptor
445  * mp - Pointer to packet chain
446  * pktlen - Size of the packet
447  *
448  * return 0=>success, error code otherwise
449  */
450 static int
oce_bcopy_wqe(struct oce_wq * wq,oce_wqe_desc_t * wqed,mblk_t * mp,uint32_t pkt_len)451 oce_bcopy_wqe(struct oce_wq *wq, oce_wqe_desc_t *wqed, mblk_t *mp,
452     uint32_t pkt_len)
453 {
454 	oce_wq_bdesc_t *wqbd;
455 	caddr_t buf_va;
456 	struct oce_dev *dev = wq->parent;
457 	int len = 0;
458 
459 	wqbd = oce_wqb_alloc(wq);
460 	if (wqbd == NULL) {
461 		atomic_inc_32(&dev->tx_noxmtbuf);
462 		oce_log(dev, CE_WARN, MOD_TX, "%s",
463 		    "wqb pool empty");
464 		return (ENOMEM);
465 	}
466 
467 	/* create a fragment wqe for the packet */
468 	wqed->frag[wqed->frag_idx].u0.s.frag_pa_hi = wqbd->frag_addr.dw.addr_hi;
469 	wqed->frag[wqed->frag_idx].u0.s.frag_pa_lo = wqbd->frag_addr.dw.addr_lo;
470 	buf_va = DBUF_VA(wqbd->wqb);
471 
472 	/* copy pkt into buffer */
473 	for (len = 0; mp != NULL && len < pkt_len; mp = mp->b_cont) {
474 		bcopy(mp->b_rptr, buf_va, MBLKL(mp));
475 		buf_va += MBLKL(mp);
476 		len += MBLKL(mp);
477 	}
478 
479 	(void) ddi_dma_sync(DBUF_DHDL(wqbd->wqb), 0, pkt_len,
480 	    DDI_DMA_SYNC_FORDEV);
481 
482 	if (oce_fm_check_dma_handle(dev, DBUF_DHDL(wqbd->wqb))) {
483 		ddi_fm_service_impact(dev->dip, DDI_SERVICE_DEGRADED);
484 		/* Free the buffer */
485 		oce_wqb_free(wq, wqbd);
486 		return (EIO);
487 	}
488 	wqed->frag[wqed->frag_idx].u0.s.frag_len   =  pkt_len;
489 	wqed->hdesc[wqed->nhdl].hdl = (void *)(wqbd);
490 	wqed->hdesc[wqed->nhdl].type = COPY_WQE;
491 	wqed->frag_cnt++;
492 	wqed->frag_idx++;
493 	wqed->nhdl++;
494 	return (0);
495 } /* oce_bcopy_wqe */
496 
497 /*
498  * function to copy the packet or dma map on the fly depending on size
499  *
500  * wq - pointer to WQ
501  * wqed - Pointer to WQE descriptor
502  * mp - Pointer to packet chain
503  *
504  * return DDI_SUCCESS=>success, DDI_FAILURE=>error
505  */
506 static  int
oce_map_wqe(struct oce_wq * wq,oce_wqe_desc_t * wqed,mblk_t * mp,uint32_t pkt_len)507 oce_map_wqe(struct oce_wq *wq, oce_wqe_desc_t *wqed, mblk_t *mp,
508     uint32_t pkt_len)
509 {
510 	ddi_dma_cookie_t cookie;
511 	oce_wq_mdesc_t *wqmd;
512 	uint32_t ncookies;
513 	int ret;
514 	struct oce_dev *dev = wq->parent;
515 
516 	wqmd = oce_wqm_alloc(wq);
517 	if (wqmd == NULL) {
518 		oce_log(dev, CE_WARN, MOD_TX, "%s",
519 		    "wqm pool empty");
520 		return (ENOMEM);
521 	}
522 
523 	ret = ddi_dma_addr_bind_handle(wqmd->dma_handle,
524 	    (struct as *)0, (caddr_t)mp->b_rptr,
525 	    pkt_len, DDI_DMA_WRITE | DDI_DMA_STREAMING,
526 	    DDI_DMA_DONTWAIT, NULL, &cookie, &ncookies);
527 	if (ret != DDI_DMA_MAPPED) {
528 		oce_log(dev, CE_WARN, MOD_TX, "MAP FAILED %d",
529 		    ret);
530 		/* free the last one */
531 		oce_wqm_free(wq, wqmd);
532 		return (ENOMEM);
533 	}
534 	do {
535 		wqed->frag[wqed->frag_idx].u0.s.frag_pa_hi =
536 		    ADDR_HI(cookie.dmac_laddress);
537 		wqed->frag[wqed->frag_idx].u0.s.frag_pa_lo =
538 		    ADDR_LO(cookie.dmac_laddress);
539 		wqed->frag[wqed->frag_idx].u0.s.frag_len =
540 		    (uint32_t)cookie.dmac_size;
541 		wqed->frag_cnt++;
542 		wqed->frag_idx++;
543 		if (--ncookies > 0)
544 			ddi_dma_nextcookie(wqmd->dma_handle,
545 			    &cookie);
546 			else break;
547 	} while (ncookies > 0);
548 
549 	wqed->hdesc[wqed->nhdl].hdl = (void *)wqmd;
550 	wqed->hdesc[wqed->nhdl].type = MAPPED_WQE;
551 	wqed->nhdl++;
552 	return (0);
553 } /* oce_map_wqe */
554 
555 static inline int
oce_process_tx_compl(struct oce_wq * wq,boolean_t rearm)556 oce_process_tx_compl(struct oce_wq *wq, boolean_t rearm)
557 {
558 	struct oce_nic_tx_cqe *cqe;
559 	uint16_t num_cqe = 0;
560 	struct oce_cq *cq;
561 	oce_wqe_desc_t *wqed;
562 	int wqe_freed = 0;
563 	struct oce_dev *dev;
564 
565 	cq  = wq->cq;
566 	dev = wq->parent;
567 	(void) ddi_dma_sync(cq->ring->dbuf->dma_handle, 0, 0,
568 	    DDI_DMA_SYNC_FORKERNEL);
569 
570 	mutex_enter(&wq->txc_lock);
571 	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
572 	while (WQ_CQE_VALID(cqe)) {
573 
574 		DW_SWAP(u32ptr(cqe), sizeof (struct oce_nic_tx_cqe));
575 
576 		/* update stats */
577 		if (cqe->u0.s.status != 0) {
578 			atomic_inc_32(&dev->tx_errors);
579 		}
580 
581 		/* complete the WQEs */
582 		wqed = OCE_LIST_REM_HEAD(&wq->wqe_desc_list);
583 
584 		wqe_freed = wqed->wqe_cnt;
585 		oce_free_wqed(wq, wqed);
586 		RING_GET(wq->ring, wqe_freed);
587 		atomic_add_32(&wq->wq_free, wqe_freed);
588 		/* clear the valid bit and progress cqe */
589 		WQ_CQE_INVALIDATE(cqe);
590 		RING_GET(cq->ring, 1);
591 		cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring,
592 		    struct oce_nic_tx_cqe);
593 		num_cqe++;
594 	} /* for all valid CQE */
595 	mutex_exit(&wq->txc_lock);
596 	if (num_cqe)
597 		oce_arm_cq(wq->parent, cq->cq_id, num_cqe, rearm);
598 	return (num_cqe);
599 } /* oce_process_tx_completion */
600 
601 /*
602  * function to drain a TxCQ and process its CQEs
603  *
604  * dev - software handle to the device
605  * cq - pointer to the cq to drain
606  *
607  * return the number of CQEs processed
608  */
609 uint16_t
oce_drain_wq_cq(void * arg)610 oce_drain_wq_cq(void *arg)
611 {
612 	uint16_t num_cqe = 0;
613 	struct oce_dev *dev;
614 	struct oce_wq *wq;
615 
616 	wq = (struct oce_wq *)arg;
617 	dev = wq->parent;
618 
619 	/* do while we do not reach a cqe that is not valid */
620 	num_cqe = oce_process_tx_compl(wq, B_FALSE);
621 
622 	/* check if we need to restart Tx */
623 	if (wq->resched && num_cqe) {
624 		wq->resched = B_FALSE;
625 		mac_tx_update(dev->mac_handle);
626 	}
627 
628 	return (num_cqe);
629 } /* oce_process_wq_cqe */
630 
631 /*
632  * function to insert vtag to packet
633  *
634  * mp - mblk pointer
635  * vlan_tag - tag to be inserted
636  *
637  * return none
638  */
639 static inline void
oce_insert_vtag(mblk_t * mp,uint16_t vlan_tag)640 oce_insert_vtag(mblk_t *mp, uint16_t vlan_tag)
641 {
642 	struct ether_vlan_header  *evh;
643 	(void) memmove(mp->b_rptr - VTAG_SIZE,
644 	    mp->b_rptr, 2 * ETHERADDRL);
645 	mp->b_rptr -= VTAG_SIZE;
646 	evh = (struct ether_vlan_header *)(void *)mp->b_rptr;
647 	evh->ether_tpid = htons(VLAN_TPID);
648 	evh->ether_tci = htons(vlan_tag);
649 }
650 
651 /*
652  * function to strip  vtag from packet
653  *
654  * mp - mblk pointer
655  *
656  * return none
657  */
658 
659 static inline void
oce_remove_vtag(mblk_t * mp)660 oce_remove_vtag(mblk_t *mp)
661 {
662 	(void) memmove(mp->b_rptr + VTAG_SIZE, mp->b_rptr,
663 	    ETHERADDRL * 2);
664 	mp->b_rptr += VTAG_SIZE;
665 }
666 
667 /*
668  * function to xmit  Single packet over the wire
669  *
670  * wq - pointer to WQ
671  * mp - Pointer to packet chain
672  *
673  * return pointer to the packet
674  */
675 mblk_t *
oce_send_packet(struct oce_wq * wq,mblk_t * mp)676 oce_send_packet(struct oce_wq *wq, mblk_t *mp)
677 {
678 	struct oce_nic_hdr_wqe *wqeh;
679 	struct oce_dev *dev;
680 	struct ether_header *eh;
681 	struct ether_vlan_header *evh;
682 	int32_t num_wqes;
683 	uint16_t etype;
684 	uint32_t ip_offset;
685 	uint32_t csum_flags = 0;
686 	boolean_t use_copy = B_FALSE;
687 	boolean_t tagged   = B_FALSE;
688 	uint16_t  vlan_tag;
689 	uint32_t  reg_value = 0;
690 	oce_wqe_desc_t *wqed = NULL;
691 	mblk_t *nmp = NULL;
692 	mblk_t *tmp = NULL;
693 	uint32_t pkt_len = 0;
694 	int num_mblks = 0;
695 	int ret = 0;
696 	uint32_t mss = 0;
697 	uint32_t flags = 0;
698 	int len = 0;
699 
700 	/* retrieve the adap priv struct ptr */
701 	dev = wq->parent;
702 
703 	/* check if we have enough free slots */
704 	if (wq->wq_free < dev->tx_reclaim_threshold) {
705 		(void) oce_process_tx_compl(wq, B_FALSE);
706 	}
707 	if (wq->wq_free < OCE_MAX_TX_HDL) {
708 		return (mp);
709 	}
710 
711 	/* check if we should copy */
712 	for (tmp = mp; tmp != NULL; tmp = tmp->b_cont) {
713 		pkt_len += MBLKL(tmp);
714 		num_mblks++;
715 	}
716 
717 	if (pkt_len == 0 || num_mblks == 0) {
718 		freemsg(mp);
719 		return (NULL);
720 	}
721 
722 	/* retrieve LSO information */
723 	mac_lso_get(mp, &mss, &flags);
724 
725 	/* get the offload flags */
726 	mac_hcksum_get(mp, NULL, NULL, NULL, NULL, &csum_flags);
727 
728 	/* restrict the mapped segment to wat we support */
729 	if (num_mblks  > OCE_MAX_TX_HDL) {
730 		nmp = msgpullup(mp, -1);
731 		if (nmp == NULL) {
732 			atomic_inc_32(&wq->pkt_drops);
733 			freemsg(mp);
734 			return (NULL);
735 		}
736 		/* Reset it to new collapsed mp */
737 		freemsg(mp);
738 		mp = nmp;
739 	}
740 
741 	/* Get the packet descriptor for Tx */
742 	wqed = kmem_cache_alloc(wq->wqed_cache, KM_NOSLEEP);
743 	if (wqed == NULL) {
744 		atomic_inc_32(&wq->pkt_drops);
745 		freemsg(mp);
746 		return (NULL);
747 	}
748 	eh = (struct ether_header *)(void *)mp->b_rptr;
749 	if (ntohs(eh->ether_type) == VLAN_TPID) {
750 		evh = (struct ether_vlan_header *)(void *)mp->b_rptr;
751 		tagged = B_TRUE;
752 		etype = ntohs(evh->ether_type);
753 		ip_offset = sizeof (struct ether_vlan_header);
754 		pkt_len -= VTAG_SIZE;
755 		vlan_tag = ntohs(evh->ether_tci);
756 		oce_remove_vtag(mp);
757 	} else {
758 		etype = ntohs(eh->ether_type);
759 		ip_offset = sizeof (struct ether_header);
760 	}
761 
762 	/* Save the WQ pointer */
763 	wqed->wq = wq;
764 	wqed->frag_idx = 1; /* index zero is always header */
765 	wqed->frag_cnt = 0;
766 	wqed->nhdl = 0;
767 	wqed->mp = NULL;
768 	OCE_LIST_LINK_INIT(&wqed->link);
769 
770 	/* If entire packet is less than the copy limit  just do copy */
771 	if (pkt_len < dev->tx_bcopy_limit) {
772 		use_copy = B_TRUE;
773 		ret = oce_bcopy_wqe(wq, wqed, mp, pkt_len);
774 	} else {
775 		/* copy or dma map the individual fragments */
776 		for (nmp = mp; nmp != NULL; nmp = nmp->b_cont) {
777 			len = MBLKL(nmp);
778 			if (len == 0) {
779 				continue;
780 			}
781 			if (len < dev->tx_bcopy_limit) {
782 				ret = oce_bcopy_wqe(wq, wqed, nmp, len);
783 			} else {
784 				ret = oce_map_wqe(wq, wqed, nmp, len);
785 			}
786 			if (ret != 0)
787 				break;
788 		}
789 	}
790 
791 	/*
792 	 * Any failure other than insufficient Q entries
793 	 * drop the packet
794 	 */
795 	if (ret != 0) {
796 		oce_free_wqed(wq, wqed);
797 		atomic_inc_32(&wq->pkt_drops);
798 		freemsg(mp);
799 		return (NULL);
800 	}
801 
802 	wqeh = (struct oce_nic_hdr_wqe *)&wqed->frag[0];
803 	bzero(wqeh, sizeof (struct oce_nic_hdr_wqe));
804 
805 	/* fill rest of wqe header fields based on packet */
806 	if (flags & HW_LSO) {
807 		wqeh->u0.s.lso = B_TRUE;
808 		wqeh->u0.s.lso_mss = mss;
809 	}
810 	if (csum_flags & HCK_FULLCKSUM) {
811 		uint8_t *proto;
812 		if (etype == ETHERTYPE_IP) {
813 			proto = (uint8_t *)(void *)
814 			    (mp->b_rptr + ip_offset);
815 			if (proto[9] == 6)
816 				/* IPPROTO_TCP */
817 				wqeh->u0.s.tcpcs = B_TRUE;
818 			else if (proto[9] == 17)
819 				/* IPPROTO_UDP */
820 				wqeh->u0.s.udpcs = B_TRUE;
821 		}
822 	}
823 
824 	if (csum_flags & HCK_IPV4_HDRCKSUM)
825 		wqeh->u0.s.ipcs = B_TRUE;
826 	if (tagged) {
827 		wqeh->u0.s.vlan = B_TRUE;
828 		wqeh->u0.s.vlan_tag = vlan_tag;
829 	}
830 
831 	wqeh->u0.s.complete = B_TRUE;
832 	wqeh->u0.s.event = B_TRUE;
833 	wqeh->u0.s.crc = B_TRUE;
834 	wqeh->u0.s.total_length = pkt_len;
835 
836 	num_wqes = wqed->frag_cnt + 1;
837 
838 	/* h/w expects even no. of WQEs */
839 	if (num_wqes & 0x1) {
840 		bzero(&wqed->frag[num_wqes], sizeof (struct oce_nic_frag_wqe));
841 		num_wqes++;
842 	}
843 	wqed->wqe_cnt = (uint16_t)num_wqes;
844 	wqeh->u0.s.num_wqe = num_wqes;
845 	DW_SWAP(u32ptr(&wqed->frag[0]), (wqed->wqe_cnt * NIC_WQE_SIZE));
846 
847 	mutex_enter(&wq->tx_lock);
848 	if (num_wqes > wq->wq_free) {
849 		atomic_inc_32(&wq->tx_deferd);
850 		mutex_exit(&wq->tx_lock);
851 		goto wqe_fail;
852 	}
853 	atomic_add_32(&wq->wq_free, -num_wqes);
854 
855 	/* fill the wq for adapter */
856 	oce_fill_ring_descs(wq, wqed);
857 
858 	/* Set the mp pointer in the wqe descriptor */
859 	if (use_copy == B_FALSE) {
860 		wqed->mp = mp;
861 	}
862 	/* Add the packet desc to list to be retrieved during cmpl */
863 	OCE_LIST_INSERT_TAIL(&wq->wqe_desc_list,  wqed);
864 	(void) ddi_dma_sync(wq->ring->dbuf->dma_handle, 0, 0,
865 	    DDI_DMA_SYNC_FORDEV);
866 
867 	/* ring tx doorbell */
868 	reg_value = (num_wqes << 16) | wq->wq_id;
869 	/* Ring the door bell  */
870 	OCE_DB_WRITE32(dev, PD_TXULP_DB, reg_value);
871 	mutex_exit(&wq->tx_lock);
872 	if (oce_fm_check_acc_handle(dev, dev->db_handle) != DDI_FM_OK) {
873 		ddi_fm_service_impact(dev->dip, DDI_SERVICE_DEGRADED);
874 	}
875 
876 	/* free mp if copied or packet chain collapsed */
877 	if (use_copy == B_TRUE) {
878 		freemsg(mp);
879 	}
880 	return (NULL);
881 
882 wqe_fail:
883 
884 	if (tagged) {
885 		oce_insert_vtag(mp, vlan_tag);
886 	}
887 	oce_free_wqed(wq, wqed);
888 	return (mp);
889 } /* oce_send_packet */
890 
891 /*
892  * function to free the WQE descriptor
893  *
894  * wq - pointer to WQ
895  * wqed - Pointer to WQE descriptor
896  *
897  * return none
898  */
899 #pragma inline(oce_free_wqed)
900 static void
oce_free_wqed(struct oce_wq * wq,oce_wqe_desc_t * wqed)901 oce_free_wqed(struct oce_wq *wq, oce_wqe_desc_t *wqed)
902 {
903 	int i = 0;
904 	if (wqed == NULL) {
905 		return;
906 	}
907 
908 	for (i = 0; i < wqed->nhdl; i++) {
909 		if (wqed->hdesc[i].type == COPY_WQE) {
910 		oce_wqb_free(wq, wqed->hdesc[i].hdl);
911 		} else 	if (wqed->hdesc[i].type == MAPPED_WQE) {
912 			oce_wqmd_free(wq, wqed->hdesc[i].hdl);
913 		}
914 	}
915 	if (wqed->mp)
916 		freemsg(wqed->mp);
917 	kmem_cache_free(wq->wqed_cache, wqed);
918 } /* oce_free_wqed */
919 
920 /*
921  * function to start the WQ
922  *
923  * wq - pointer to WQ
924  *
925  * return DDI_SUCCESS
926  */
927 
928 int
oce_start_wq(struct oce_wq * wq)929 oce_start_wq(struct oce_wq *wq)
930 {
931 	_NOTE(ARGUNUSED(wq));
932 	return (DDI_SUCCESS);
933 } /* oce_start_wq */
934 
935 /*
936  * function to stop  the WQ
937  *
938  * wq - pointer to WQ
939  *
940  * return none
941  */
942 void
oce_clean_wq(struct oce_wq * wq)943 oce_clean_wq(struct oce_wq *wq)
944 {
945 	oce_wqe_desc_t *wqed;
946 	int ti;
947 
948 	/* Wait for already posted Tx to complete */
949 
950 	for (ti = 0; ti < DEFAULT_DRAIN_TIME; ti++) {
951 		(void) oce_process_tx_compl(wq, B_FALSE);
952 		OCE_MSDELAY(1);
953 	}
954 
955 	/* Free the remaining descriptors */
956 	while ((wqed = OCE_LIST_REM_HEAD(&wq->wqe_desc_list)) != NULL) {
957 		atomic_add_32(&wq->wq_free, wqed->wqe_cnt);
958 		oce_free_wqed(wq, wqed);
959 	}
960 	oce_drain_eq(wq->cq->eq);
961 } /* oce_stop_wq */
962 
963 /*
964  * function to set the tx mapping handle fma attr
965  *
966  * fm_caps - capability flags
967  *
968  * return none
969  */
970 
971 void
oce_set_tx_map_dma_fma_flags(int fm_caps)972 oce_set_tx_map_dma_fma_flags(int fm_caps)
973 {
974 	if (fm_caps == DDI_FM_NOT_CAPABLE) {
975 		return;
976 	}
977 
978 	if (DDI_FM_DMA_ERR_CAP(fm_caps)) {
979 		tx_map_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
980 	} else {
981 		tx_map_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
982 	}
983 } /* oce_set_tx_map_dma_fma_flags */
984