1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 #include <hxge_impl.h>
28
29 extern uint32_t hxge_reclaim_pending;
30 extern uint32_t hxge_bcopy_thresh;
31 extern uint32_t hxge_dvma_thresh;
32 extern uint32_t hxge_dma_stream_thresh;
33 extern uint32_t hxge_tx_minfree;
34 extern uint32_t hxge_tx_intr_thres;
35 extern uint32_t hxge_tx_max_gathers;
36 extern uint32_t hxge_tx_tiny_pack;
37 extern uint32_t hxge_tx_use_bcopy;
38
39 static int hxge_start(p_hxge_t hxgep, p_tx_ring_t tx_ring_p, p_mblk_t mp);
40
41 void
hxge_tx_ring_task(void * arg)42 hxge_tx_ring_task(void *arg)
43 {
44 p_tx_ring_t ring = (p_tx_ring_t)arg;
45
46 MUTEX_ENTER(&ring->lock);
47 (void) hxge_txdma_reclaim(ring->hxgep, ring, 0);
48 MUTEX_EXIT(&ring->lock);
49
50 mac_tx_ring_update(ring->hxgep->mach, ring->ring_handle);
51 }
52
53 static void
hxge_tx_ring_dispatch(p_tx_ring_t ring)54 hxge_tx_ring_dispatch(p_tx_ring_t ring)
55 {
56 /*
57 * Kick the ring task to reclaim some buffers.
58 */
59 (void) ddi_taskq_dispatch(ring->taskq,
60 hxge_tx_ring_task, (void *)ring, DDI_SLEEP);
61 }
62
63 mblk_t *
hxge_tx_ring_send(void * arg,mblk_t * mp)64 hxge_tx_ring_send(void *arg, mblk_t *mp)
65 {
66 p_hxge_ring_handle_t rhp = (p_hxge_ring_handle_t)arg;
67 p_hxge_t hxgep;
68 p_tx_ring_t tx_ring_p;
69 int status;
70
71 ASSERT(rhp != NULL);
72 ASSERT((rhp->index >= 0) && (rhp->index < HXGE_MAX_TDCS));
73
74 hxgep = rhp->hxgep;
75 tx_ring_p = hxgep->tx_rings->rings[rhp->index];
76 ASSERT(hxgep == tx_ring_p->hxgep);
77
78 status = hxge_start(hxgep, tx_ring_p, mp);
79 if (status != 0) {
80 hxge_tx_ring_dispatch(tx_ring_p);
81 return (mp);
82 }
83
84 return ((mblk_t *)NULL);
85 }
86
87 static int
hxge_start(p_hxge_t hxgep,p_tx_ring_t tx_ring_p,p_mblk_t mp)88 hxge_start(p_hxge_t hxgep, p_tx_ring_t tx_ring_p, p_mblk_t mp)
89 {
90 int dma_status, status = 0;
91 p_tx_desc_t tx_desc_ring_vp;
92 hpi_handle_t hpi_desc_handle;
93 hxge_os_dma_handle_t tx_desc_dma_handle;
94 p_tx_desc_t tx_desc_p;
95 p_tx_msg_t tx_msg_ring;
96 p_tx_msg_t tx_msg_p;
97 tx_desc_t tx_desc, *tmp_desc_p;
98 tx_desc_t sop_tx_desc, *sop_tx_desc_p;
99 p_tx_pkt_header_t hdrp;
100 p_tx_pkt_hdr_all_t pkthdrp;
101 uint8_t npads = 0;
102 uint64_t dma_ioaddr;
103 uint32_t dma_flags;
104 int last_bidx;
105 uint8_t *b_rptr;
106 caddr_t kaddr;
107 uint32_t nmblks;
108 uint32_t ngathers;
109 uint32_t clen;
110 int len;
111 uint32_t pkt_len, pack_len, min_len;
112 uint32_t bcopy_thresh;
113 int i, cur_index, sop_index;
114 uint16_t tail_index;
115 boolean_t tail_wrap = B_FALSE;
116 hxge_dma_common_t desc_area;
117 hxge_os_dma_handle_t dma_handle;
118 ddi_dma_cookie_t dma_cookie;
119 hpi_handle_t hpi_handle;
120 p_mblk_t nmp;
121 p_mblk_t t_mp;
122 uint32_t ncookies;
123 boolean_t good_packet;
124 boolean_t mark_mode = B_FALSE;
125 p_hxge_stats_t statsp;
126 p_hxge_tx_ring_stats_t tdc_stats;
127 t_uscalar_t start_offset = 0;
128 t_uscalar_t stuff_offset = 0;
129 t_uscalar_t end_offset = 0;
130 t_uscalar_t value = 0;
131 t_uscalar_t cksum_flags = 0;
132 boolean_t cksum_on = B_FALSE;
133 uint32_t boff = 0;
134 uint64_t tot_xfer_len = 0, tmp_len = 0;
135 boolean_t header_set = B_FALSE;
136 tdc_tdr_kick_t kick;
137 uint32_t offset;
138 #ifdef HXGE_DEBUG
139 p_tx_desc_t tx_desc_ring_pp;
140 p_tx_desc_t tx_desc_pp;
141 tx_desc_t *save_desc_p;
142 int dump_len;
143 int sad_len;
144 uint64_t sad;
145 int xfer_len;
146 uint32_t msgsize;
147 #endif
148
149 HXGE_DEBUG_MSG((hxgep, TX_CTL,
150 "==> hxge_start: tx dma channel %d", tx_ring_p->tdc));
151 HXGE_DEBUG_MSG((hxgep, TX_CTL,
152 "==> hxge_start: Starting tdc %d desc pending %d",
153 tx_ring_p->tdc, tx_ring_p->descs_pending));
154
155 statsp = hxgep->statsp;
156
157 if (hxgep->statsp->port_stats.lb_mode == hxge_lb_normal) {
158 if (!statsp->mac_stats.link_up) {
159 freemsg(mp);
160 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_start: "
161 "link not up or LB mode"));
162 goto hxge_start_fail1;
163 }
164 }
165
166 mac_hcksum_get(mp, &start_offset, &stuff_offset, &end_offset, &value,
167 &cksum_flags);
168 if (!HXGE_IS_VLAN_PACKET(mp->b_rptr)) {
169 start_offset += sizeof (ether_header_t);
170 stuff_offset += sizeof (ether_header_t);
171 } else {
172 start_offset += sizeof (struct ether_vlan_header);
173 stuff_offset += sizeof (struct ether_vlan_header);
174 }
175
176 if (cksum_flags & HCK_PARTIALCKSUM) {
177 HXGE_DEBUG_MSG((hxgep, TX_CTL,
178 "==> hxge_start: mp $%p len %d "
179 "cksum_flags 0x%x (partial checksum) ",
180 mp, MBLKL(mp), cksum_flags));
181 cksum_on = B_TRUE;
182 }
183
184 MUTEX_ENTER(&tx_ring_p->lock);
185 start_again:
186 ngathers = 0;
187 sop_index = tx_ring_p->wr_index;
188 #ifdef HXGE_DEBUG
189 if (tx_ring_p->descs_pending) {
190 HXGE_DEBUG_MSG((hxgep, TX_CTL,
191 "==> hxge_start: desc pending %d ",
192 tx_ring_p->descs_pending));
193 }
194
195 dump_len = (int)(MBLKL(mp));
196 dump_len = (dump_len > 128) ? 128: dump_len;
197
198 HXGE_DEBUG_MSG((hxgep, TX_CTL,
199 "==> hxge_start: tdc %d: dumping ...: b_rptr $%p "
200 "(Before header reserve: ORIGINAL LEN %d)",
201 tx_ring_p->tdc, mp->b_rptr, dump_len));
202
203 HXGE_DEBUG_MSG((hxgep, TX_CTL,
204 "==> hxge_start: dump packets (IP ORIGINAL b_rptr $%p): %s",
205 mp->b_rptr, hxge_dump_packet((char *)mp->b_rptr, dump_len)));
206 #endif
207
208 tdc_stats = tx_ring_p->tdc_stats;
209 mark_mode = (tx_ring_p->descs_pending &&
210 ((tx_ring_p->tx_ring_size - tx_ring_p->descs_pending) <
211 hxge_tx_minfree));
212
213 HXGE_DEBUG_MSG((hxgep, TX_CTL,
214 "TX Descriptor ring is channel %d mark mode %d",
215 tx_ring_p->tdc, mark_mode));
216
217 if (!hxge_txdma_reclaim(hxgep, tx_ring_p, hxge_tx_minfree)) {
218 HXGE_DEBUG_MSG((hxgep, TX_CTL,
219 "TX Descriptor ring is full: channel %d", tx_ring_p->tdc));
220 HXGE_DEBUG_MSG((hxgep, TX_CTL,
221 "TX Descriptor ring is full: channel %d", tx_ring_p->tdc));
222 (void) atomic_cas_32((uint32_t *)&tx_ring_p->queueing, 0, 1);
223 tdc_stats->tx_no_desc++;
224 MUTEX_EXIT(&tx_ring_p->lock);
225 status = 1;
226 goto hxge_start_fail1;
227 }
228
229 nmp = mp;
230 i = sop_index = tx_ring_p->wr_index;
231 nmblks = 0;
232 ngathers = 0;
233 pkt_len = 0;
234 pack_len = 0;
235 clen = 0;
236 last_bidx = -1;
237 good_packet = B_TRUE;
238
239 desc_area = tx_ring_p->tdc_desc;
240 hpi_handle = desc_area.hpi_handle;
241 hpi_desc_handle.regh = (hxge_os_acc_handle_t)
242 DMA_COMMON_ACC_HANDLE(desc_area);
243 hpi_desc_handle.hxgep = hxgep;
244 tx_desc_ring_vp = (p_tx_desc_t)DMA_COMMON_VPTR(desc_area);
245 #ifdef HXGE_DEBUG
246 tx_desc_ring_pp = (p_tx_desc_t)DMA_COMMON_IOADDR(desc_area);
247 #endif
248 tx_desc_dma_handle = (hxge_os_dma_handle_t)DMA_COMMON_HANDLE(desc_area);
249 tx_msg_ring = tx_ring_p->tx_msg_ring;
250
251 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_start: wr_index %d i %d",
252 sop_index, i));
253
254 #ifdef HXGE_DEBUG
255 msgsize = msgdsize(nmp);
256 HXGE_DEBUG_MSG((hxgep, TX_CTL,
257 "==> hxge_start(1): wr_index %d i %d msgdsize %d",
258 sop_index, i, msgsize));
259 #endif
260 /*
261 * The first 16 bytes of the premapped buffer are reserved
262 * for header. No padding will be used.
263 */
264 pkt_len = pack_len = boff = TX_PKT_HEADER_SIZE;
265 if (hxge_tx_use_bcopy) {
266 bcopy_thresh = (hxge_bcopy_thresh - TX_PKT_HEADER_SIZE);
267 } else {
268 bcopy_thresh = (TX_BCOPY_SIZE - TX_PKT_HEADER_SIZE);
269 }
270 while (nmp) {
271 good_packet = B_TRUE;
272 b_rptr = nmp->b_rptr;
273 len = MBLKL(nmp);
274 if (len <= 0) {
275 nmp = nmp->b_cont;
276 continue;
277 }
278 nmblks++;
279
280 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_start(1): nmblks %d "
281 "len %d pkt_len %d pack_len %d",
282 nmblks, len, pkt_len, pack_len));
283 /*
284 * Hardware limits the transfer length to 4K.
285 * If len is more than 4K, we need to break
286 * nmp into two chunks: Make first chunk smaller
287 * than 4K. The second chunk will be broken into
288 * less than 4K (if needed) during the next pass.
289 */
290 if (len > (TX_MAX_TRANSFER_LENGTH - TX_PKT_HEADER_SIZE)) {
291 if ((t_mp = dupb(nmp)) != NULL) {
292 nmp->b_wptr = nmp->b_rptr +
293 (TX_MAX_TRANSFER_LENGTH -
294 TX_PKT_HEADER_SIZE);
295 t_mp->b_rptr = nmp->b_wptr;
296 t_mp->b_cont = nmp->b_cont;
297 nmp->b_cont = t_mp;
298 len = MBLKL(nmp);
299 } else {
300 good_packet = B_FALSE;
301 goto hxge_start_fail2;
302 }
303 }
304 tx_desc.value = 0;
305 tx_desc_p = &tx_desc_ring_vp[i];
306 #ifdef HXGE_DEBUG
307 tx_desc_pp = &tx_desc_ring_pp[i];
308 #endif
309 tx_msg_p = &tx_msg_ring[i];
310 hpi_desc_handle.regp = (uint64_t)tx_desc_p;
311 if (!header_set &&
312 ((!hxge_tx_use_bcopy && (len > TX_BCOPY_SIZE)) ||
313 (len >= bcopy_thresh))) {
314 header_set = B_TRUE;
315 bcopy_thresh += TX_PKT_HEADER_SIZE;
316 boff = 0;
317 pack_len = 0;
318 kaddr = (caddr_t)DMA_COMMON_VPTR(tx_msg_p->buf_dma);
319 hdrp = (p_tx_pkt_header_t)kaddr;
320 clen = pkt_len;
321 dma_handle = tx_msg_p->buf_dma_handle;
322 dma_ioaddr = DMA_COMMON_IOADDR(tx_msg_p->buf_dma);
323 offset = tx_msg_p->offset_index * hxge_bcopy_thresh;
324 (void) ddi_dma_sync(dma_handle,
325 offset, hxge_bcopy_thresh, DDI_DMA_SYNC_FORDEV);
326
327 tx_msg_p->flags.dma_type = USE_BCOPY;
328 goto hxge_start_control_header_only;
329 }
330
331 pkt_len += len;
332 pack_len += len;
333
334 HXGE_DEBUG_MSG((hxgep, TX_CTL,
335 "==> hxge_start(3): desc entry %d DESC IOADDR $%p "
336 "desc_vp $%p tx_desc_p $%p desc_pp $%p tx_desc_pp $%p "
337 "len %d pkt_len %d pack_len %d",
338 i,
339 DMA_COMMON_IOADDR(desc_area),
340 tx_desc_ring_vp, tx_desc_p,
341 tx_desc_ring_pp, tx_desc_pp,
342 len, pkt_len, pack_len));
343
344 if (len < bcopy_thresh) {
345 HXGE_DEBUG_MSG((hxgep, TX_CTL,
346 "==> hxge_start(4): USE BCOPY: "));
347 if (hxge_tx_tiny_pack) {
348 uint32_t blst = TXDMA_DESC_NEXT_INDEX(i, -1,
349 tx_ring_p->tx_wrap_mask);
350 HXGE_DEBUG_MSG((hxgep, TX_CTL,
351 "==> hxge_start(5): pack"));
352 if ((pack_len <= bcopy_thresh) &&
353 (last_bidx == blst)) {
354 HXGE_DEBUG_MSG((hxgep, TX_CTL,
355 "==> hxge_start: pack(6) "
356 "(pkt_len %d pack_len %d)",
357 pkt_len, pack_len));
358 i = blst;
359 tx_desc_p = &tx_desc_ring_vp[i];
360 #ifdef HXGE_DEBUG
361 tx_desc_pp = &tx_desc_ring_pp[i];
362 #endif
363 tx_msg_p = &tx_msg_ring[i];
364 boff = pack_len - len;
365 ngathers--;
366 } else if (pack_len > bcopy_thresh &&
367 header_set) {
368 pack_len = len;
369 boff = 0;
370 bcopy_thresh = hxge_bcopy_thresh;
371 HXGE_DEBUG_MSG((hxgep, TX_CTL,
372 "==> hxge_start(7): > max NEW "
373 "bcopy thresh %d "
374 "pkt_len %d pack_len %d(next)",
375 bcopy_thresh, pkt_len, pack_len));
376 }
377 last_bidx = i;
378 }
379 kaddr = (caddr_t)DMA_COMMON_VPTR(tx_msg_p->buf_dma);
380 if ((boff == TX_PKT_HEADER_SIZE) && (nmblks == 1)) {
381 hdrp = (p_tx_pkt_header_t)kaddr;
382 header_set = B_TRUE;
383 HXGE_DEBUG_MSG((hxgep, TX_CTL,
384 "==> hxge_start(7_x2): "
385 "pkt_len %d pack_len %d (new hdrp $%p)",
386 pkt_len, pack_len, hdrp));
387 }
388 tx_msg_p->flags.dma_type = USE_BCOPY;
389 kaddr += boff;
390 HXGE_DEBUG_MSG((hxgep, TX_CTL,
391 "==> hxge_start(8): USE BCOPY: before bcopy "
392 "DESC IOADDR $%p entry %d bcopy packets %d "
393 "bcopy kaddr $%p bcopy ioaddr (SAD) $%p "
394 "bcopy clen %d bcopy boff %d",
395 DMA_COMMON_IOADDR(desc_area), i,
396 tdc_stats->tx_hdr_pkts, kaddr, dma_ioaddr,
397 clen, boff));
398 HXGE_DEBUG_MSG((hxgep, TX_CTL,
399 "==> hxge_start: 1USE BCOPY: "));
400 HXGE_DEBUG_MSG((hxgep, TX_CTL,
401 "==> hxge_start: 2USE BCOPY: "));
402 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_start: "
403 "last USE BCOPY: copy from b_rptr $%p "
404 "to KADDR $%p (len %d offset %d",
405 b_rptr, kaddr, len, boff));
406 bcopy(b_rptr, kaddr, len);
407 #ifdef HXGE_DEBUG
408 dump_len = (len > 128) ? 128: len;
409 HXGE_DEBUG_MSG((hxgep, TX_CTL,
410 "==> hxge_start: dump packets "
411 "(After BCOPY len %d)"
412 "(b_rptr $%p): %s", len, nmp->b_rptr,
413 hxge_dump_packet((char *)nmp->b_rptr,
414 dump_len)));
415 #endif
416 dma_handle = tx_msg_p->buf_dma_handle;
417 dma_ioaddr = DMA_COMMON_IOADDR(tx_msg_p->buf_dma);
418 offset = tx_msg_p->offset_index * hxge_bcopy_thresh;
419 (void) ddi_dma_sync(dma_handle,
420 offset, hxge_bcopy_thresh, DDI_DMA_SYNC_FORDEV);
421 clen = len + boff;
422 tdc_stats->tx_hdr_pkts++;
423 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_start(9): "
424 "USE BCOPY: DESC IOADDR $%p entry %d "
425 "bcopy packets %d bcopy kaddr $%p "
426 "bcopy ioaddr (SAD) $%p bcopy clen %d "
427 "bcopy boff %d",
428 DMA_COMMON_IOADDR(desc_area), i,
429 tdc_stats->tx_hdr_pkts, kaddr, dma_ioaddr,
430 clen, boff));
431 } else {
432 HXGE_DEBUG_MSG((hxgep, TX_CTL,
433 "==> hxge_start(12): USE DVMA: len %d", len));
434 tx_msg_p->flags.dma_type = USE_DMA;
435 dma_flags = DDI_DMA_WRITE;
436 if (len < hxge_dma_stream_thresh) {
437 dma_flags |= DDI_DMA_CONSISTENT;
438 } else {
439 dma_flags |= DDI_DMA_STREAMING;
440 }
441
442 dma_handle = tx_msg_p->dma_handle;
443 dma_status = ddi_dma_addr_bind_handle(dma_handle, NULL,
444 (caddr_t)b_rptr, len, dma_flags,
445 DDI_DMA_DONTWAIT, NULL,
446 &dma_cookie, &ncookies);
447 if (dma_status == DDI_DMA_MAPPED) {
448 dma_ioaddr = dma_cookie.dmac_laddress;
449 len = (int)dma_cookie.dmac_size;
450 clen = (uint32_t)dma_cookie.dmac_size;
451 HXGE_DEBUG_MSG((hxgep, TX_CTL,
452 "==> hxge_start(12_1): "
453 "USE DVMA: len %d clen %d ngathers %d",
454 len, clen, ngathers));
455 hpi_desc_handle.regp = (uint64_t)tx_desc_p;
456 while (ncookies > 1) {
457 ngathers++;
458 /*
459 * this is the fix for multiple
460 * cookies, which are basically
461 * a descriptor entry, we don't set
462 * SOP bit as well as related fields
463 */
464
465 (void) hpi_txdma_desc_gather_set(
466 hpi_desc_handle, &tx_desc,
467 (ngathers -1), mark_mode,
468 ngathers, dma_ioaddr, clen);
469 tx_msg_p->tx_msg_size = clen;
470 HXGE_DEBUG_MSG((hxgep, TX_CTL,
471 "==> hxge_start: DMA "
472 "ncookie %d ngathers %d "
473 "dma_ioaddr $%p len %d"
474 "desc $%p descp $%p (%d)",
475 ncookies, ngathers,
476 dma_ioaddr, clen,
477 *tx_desc_p, tx_desc_p, i));
478
479 ddi_dma_nextcookie(dma_handle,
480 &dma_cookie);
481 dma_ioaddr = dma_cookie.dmac_laddress;
482
483 len = (int)dma_cookie.dmac_size;
484 clen = (uint32_t)dma_cookie.dmac_size;
485 HXGE_DEBUG_MSG((hxgep, TX_CTL,
486 "==> hxge_start(12_2): "
487 "USE DVMA: len %d clen %d ",
488 len, clen));
489
490 i = TXDMA_DESC_NEXT_INDEX(i, 1,
491 tx_ring_p->tx_wrap_mask);
492 tx_desc_p = &tx_desc_ring_vp[i];
493
494 hpi_desc_handle.regp =
495 (uint64_t)tx_desc_p;
496 tx_msg_p = &tx_msg_ring[i];
497 tx_msg_p->flags.dma_type = USE_NONE;
498 tx_desc.value = 0;
499 ncookies--;
500 }
501 tdc_stats->tx_ddi_pkts++;
502 HXGE_DEBUG_MSG((hxgep, TX_CTL,
503 "==> hxge_start: DMA: ddi packets %d",
504 tdc_stats->tx_ddi_pkts));
505 } else {
506 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
507 "dma mapping failed for %d "
508 "bytes addr $%p flags %x (%d)",
509 len, b_rptr, status, status));
510 good_packet = B_FALSE;
511 tdc_stats->tx_dma_bind_fail++;
512 tx_msg_p->flags.dma_type = USE_NONE;
513 status = 1;
514 goto hxge_start_fail2;
515 }
516 } /* ddi dvma */
517
518 nmp = nmp->b_cont;
519 hxge_start_control_header_only:
520 hpi_desc_handle.regp = (uint64_t)tx_desc_p;
521 ngathers++;
522
523 if (ngathers == 1) {
524 #ifdef HXGE_DEBUG
525 save_desc_p = &sop_tx_desc;
526 #endif
527 sop_tx_desc_p = &sop_tx_desc;
528 sop_tx_desc_p->value = 0;
529 sop_tx_desc_p->bits.tr_len = clen;
530 sop_tx_desc_p->bits.sad = dma_ioaddr >> 32;
531 sop_tx_desc_p->bits.sad_l = dma_ioaddr & 0xffffffff;
532 } else {
533 #ifdef HXGE_DEBUG
534 save_desc_p = &tx_desc;
535 #endif
536 tmp_desc_p = &tx_desc;
537 tmp_desc_p->value = 0;
538 tmp_desc_p->bits.tr_len = clen;
539 tmp_desc_p->bits.sad = dma_ioaddr >> 32;
540 tmp_desc_p->bits.sad_l = dma_ioaddr & 0xffffffff;
541
542 tx_desc_p->value = tmp_desc_p->value;
543 }
544
545 HXGE_DEBUG_MSG((hxgep, TX_CTL,
546 "==> hxge_start(13): Desc_entry %d ngathers %d "
547 "desc_vp $%p tx_desc_p $%p "
548 "len %d clen %d pkt_len %d pack_len %d nmblks %d "
549 "dma_ioaddr (SAD) $%p mark %d",
550 i, ngathers, tx_desc_ring_vp, tx_desc_p,
551 len, clen, pkt_len, pack_len, nmblks,
552 dma_ioaddr, mark_mode));
553
554 #ifdef HXGE_DEBUG
555 hpi_desc_handle.hxgep = hxgep;
556 hpi_desc_handle.function.function = 0;
557 hpi_desc_handle.function.instance = hxgep->instance;
558 sad = save_desc_p->bits.sad;
559 sad = (sad << 32) | save_desc_p->bits.sad_l;
560 xfer_len = save_desc_p->bits.tr_len;
561
562 HXGE_DEBUG_MSG((hxgep, TX_CTL, "\n\t: value 0x%llx\n"
563 "\t\tsad $%p\ttr_len %d len %d\tnptrs %d\t"
564 "mark %d sop %d\n",
565 save_desc_p->value, sad, save_desc_p->bits.tr_len,
566 xfer_len, save_desc_p->bits.num_ptr,
567 save_desc_p->bits.mark, save_desc_p->bits.sop));
568
569 hpi_txdma_dump_desc_one(hpi_desc_handle, NULL, i);
570 #endif
571
572 tx_msg_p->tx_msg_size = clen;
573 i = TXDMA_DESC_NEXT_INDEX(i, 1, tx_ring_p->tx_wrap_mask);
574 if (ngathers > hxge_tx_max_gathers) {
575 good_packet = B_FALSE;
576 mac_hcksum_get(mp, &start_offset, &stuff_offset,
577 &end_offset, &value, &cksum_flags);
578
579 HXGE_DEBUG_MSG((NULL, TX_CTL,
580 "==> hxge_start(14): pull msg - "
581 "len %d pkt_len %d ngathers %d",
582 len, pkt_len, ngathers));
583 goto hxge_start_fail2;
584 }
585 } /* while (nmp) */
586
587 tx_msg_p->tx_message = mp;
588 tx_desc_p = &tx_desc_ring_vp[sop_index];
589 hpi_desc_handle.regp = (uint64_t)tx_desc_p;
590
591 pkthdrp = (p_tx_pkt_hdr_all_t)hdrp;
592 pkthdrp->reserved = 0;
593 hdrp->value = 0;
594 (void) hxge_fill_tx_hdr(mp, B_FALSE, cksum_on,
595 (pkt_len - TX_PKT_HEADER_SIZE), npads, pkthdrp);
596
597 /*
598 * Hardware header should not be counted as part of the frame
599 * when determining the frame size
600 */
601 if ((pkt_len - TX_PKT_HEADER_SIZE) > (STD_FRAME_SIZE - ETHERFCSL)) {
602 tdc_stats->tx_jumbo_pkts++;
603 }
604
605 min_len = (hxgep->msg_min + TX_PKT_HEADER_SIZE + (npads * 2));
606 if (pkt_len < min_len) {
607 /* Assume we use bcopy to premapped buffers */
608 kaddr = (caddr_t)DMA_COMMON_VPTR(tx_msg_p->buf_dma);
609 HXGE_DEBUG_MSG((NULL, TX_CTL,
610 "==> hxge_start(14-1): < (msg_min + 16)"
611 "len %d pkt_len %d min_len %d bzero %d ngathers %d",
612 len, pkt_len, min_len, (min_len - pkt_len), ngathers));
613 bzero((kaddr + pkt_len), (min_len - pkt_len));
614 pkt_len = tx_msg_p->tx_msg_size = min_len;
615
616 sop_tx_desc_p->bits.tr_len = min_len;
617
618 HXGE_MEM_PIO_WRITE64(hpi_desc_handle, sop_tx_desc_p->value);
619 tx_desc_p->value = sop_tx_desc_p->value;
620
621 HXGE_DEBUG_MSG((NULL, TX_CTL,
622 "==> hxge_start(14-2): < msg_min - "
623 "len %d pkt_len %d min_len %d ngathers %d",
624 len, pkt_len, min_len, ngathers));
625 }
626
627 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_start: cksum_flags 0x%x ",
628 cksum_flags));
629 if (cksum_flags & HCK_PARTIALCKSUM) {
630 HXGE_DEBUG_MSG((hxgep, TX_CTL,
631 "==> hxge_start: cksum_flags 0x%x (partial checksum) ",
632 cksum_flags));
633 cksum_on = B_TRUE;
634 HXGE_DEBUG_MSG((hxgep, TX_CTL,
635 "==> hxge_start: from IP cksum_flags 0x%x "
636 "(partial checksum) "
637 "start_offset %d stuff_offset %d",
638 cksum_flags, start_offset, stuff_offset));
639 tmp_len = (uint64_t)(start_offset >> 1);
640 hdrp->value |= (tmp_len << TX_PKT_HEADER_L4START_SHIFT);
641 tmp_len = (uint64_t)(stuff_offset >> 1);
642 hdrp->value |= (tmp_len << TX_PKT_HEADER_L4STUFF_SHIFT);
643
644 HXGE_DEBUG_MSG((hxgep, TX_CTL,
645 "==> hxge_start: from IP cksum_flags 0x%x "
646 "(partial checksum) "
647 "after SHIFT start_offset %d stuff_offset %d",
648 cksum_flags, start_offset, stuff_offset));
649 }
650
651 /*
652 * pkt_len already includes 16 + paddings!!
653 * Update the control header length
654 */
655
656 /*
657 * Note that Hydra is different from Neptune where
658 * tot_xfer_len = (pkt_len - TX_PKT_HEADER_SIZE);
659 */
660 tot_xfer_len = pkt_len;
661 tmp_len = hdrp->value |
662 (tot_xfer_len << TX_PKT_HEADER_TOT_XFER_LEN_SHIFT);
663
664 HXGE_DEBUG_MSG((hxgep, TX_CTL,
665 "==> hxge_start(15_x1): setting SOP "
666 "tot_xfer_len 0x%llx (%d) pkt_len %d tmp_len "
667 "0x%llx hdrp->value 0x%llx",
668 tot_xfer_len, tot_xfer_len, pkt_len, tmp_len, hdrp->value));
669 #if defined(_BIG_ENDIAN)
670 hdrp->value = ddi_swap64(tmp_len);
671 #else
672 hdrp->value = tmp_len;
673 #endif
674 HXGE_DEBUG_MSG((hxgep,
675 TX_CTL, "==> hxge_start(15_x2): setting SOP "
676 "after SWAP: tot_xfer_len 0x%llx pkt_len %d "
677 "tmp_len 0x%llx hdrp->value 0x%llx",
678 tot_xfer_len, pkt_len, tmp_len, hdrp->value));
679
680 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_start(15): setting SOP "
681 "wr_index %d tot_xfer_len (%d) pkt_len %d npads %d",
682 sop_index, tot_xfer_len, pkt_len, npads));
683
684 sop_tx_desc_p->bits.sop = 1;
685 sop_tx_desc_p->bits.mark = mark_mode;
686 sop_tx_desc_p->bits.num_ptr = ngathers;
687
688 if (mark_mode)
689 tdc_stats->tx_marks++;
690
691 HXGE_MEM_PIO_WRITE64(hpi_desc_handle, sop_tx_desc_p->value);
692 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_start(16): set SOP done"));
693
694 #ifdef HXGE_DEBUG
695 hpi_desc_handle.hxgep = hxgep;
696 hpi_desc_handle.function.function = 0;
697 hpi_desc_handle.function.instance = hxgep->instance;
698
699 HXGE_DEBUG_MSG((hxgep, TX_CTL, "\n\t: value 0x%llx\n"
700 "\t\tsad $%p\ttr_len %d len %d\tnptrs %d\tmark %d sop %d\n",
701 save_desc_p->value, sad, save_desc_p->bits.tr_len,
702 xfer_len, save_desc_p->bits.num_ptr, save_desc_p->bits.mark,
703 save_desc_p->bits.sop));
704 (void) hpi_txdma_dump_desc_one(hpi_desc_handle, NULL, sop_index);
705
706 dump_len = (pkt_len > 128) ? 128: pkt_len;
707 HXGE_DEBUG_MSG((hxgep, TX_CTL,
708 "==> hxge_start: dump packets(17) (after sop set, len "
709 " (len/dump_len/pkt_len/tot_xfer_len) %d/%d/%d/%d):\n"
710 "ptr $%p: %s", len, dump_len, pkt_len, tot_xfer_len,
711 (char *)hdrp, hxge_dump_packet((char *)hdrp, dump_len)));
712 HXGE_DEBUG_MSG((hxgep, TX_CTL,
713 "==> hxge_start(18): TX desc sync: sop_index %d", sop_index));
714 #endif
715
716 if ((ngathers == 1) || tx_ring_p->wr_index < i) {
717 (void) ddi_dma_sync(tx_desc_dma_handle,
718 sop_index * sizeof (tx_desc_t),
719 ngathers * sizeof (tx_desc_t), DDI_DMA_SYNC_FORDEV);
720
721 HXGE_DEBUG_MSG((hxgep, TX_CTL, "hxge_start(19): sync 1 "
722 "cs_off = 0x%02X cs_s_off = 0x%02X "
723 "pkt_len %d ngathers %d sop_index %d\n",
724 stuff_offset, start_offset,
725 pkt_len, ngathers, sop_index));
726 } else { /* more than one descriptor and wrap around */
727 uint32_t nsdescs = tx_ring_p->tx_ring_size - sop_index;
728 (void) ddi_dma_sync(tx_desc_dma_handle,
729 sop_index * sizeof (tx_desc_t),
730 nsdescs * sizeof (tx_desc_t), DDI_DMA_SYNC_FORDEV);
731 HXGE_DEBUG_MSG((hxgep, TX_CTL, "hxge_start(20): sync 1 "
732 "cs_off = 0x%02X cs_s_off = 0x%02X "
733 "pkt_len %d ngathers %d sop_index %d\n",
734 stuff_offset, start_offset, pkt_len, ngathers, sop_index));
735
736 (void) ddi_dma_sync(tx_desc_dma_handle, 0,
737 (ngathers - nsdescs) * sizeof (tx_desc_t),
738 DDI_DMA_SYNC_FORDEV);
739 HXGE_DEBUG_MSG((hxgep, TX_CTL, "hxge_start(21): sync 2 "
740 "cs_off = 0x%02X cs_s_off = 0x%02X "
741 "pkt_len %d ngathers %d sop_index %d\n",
742 stuff_offset, start_offset,
743 pkt_len, ngathers, sop_index));
744 }
745
746 tail_index = tx_ring_p->wr_index;
747 tail_wrap = tx_ring_p->wr_index_wrap;
748
749 tx_ring_p->wr_index = i;
750 if (tx_ring_p->wr_index <= tail_index) {
751 tx_ring_p->wr_index_wrap = ((tail_wrap == B_TRUE) ?
752 B_FALSE : B_TRUE);
753 }
754
755 tx_ring_p->descs_pending += ngathers;
756 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_start: TX kick: "
757 "channel %d wr_index %d wrap %d ngathers %d desc_pend %d",
758 tx_ring_p->tdc, tx_ring_p->wr_index, tx_ring_p->wr_index_wrap,
759 ngathers, tx_ring_p->descs_pending));
760 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_start: TX KICKING: "));
761
762 kick.value = 0;
763 kick.bits.wrap = tx_ring_p->wr_index_wrap;
764 kick.bits.tail = (uint16_t)tx_ring_p->wr_index;
765
766 /* Kick start the Transmit kick register */
767 TXDMA_REG_WRITE64(HXGE_DEV_HPI_HANDLE(hxgep),
768 TDC_TDR_KICK, (uint8_t)tx_ring_p->tdc, kick.value);
769 tdc_stats->tx_starts++;
770 MUTEX_EXIT(&tx_ring_p->lock);
771 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_start"));
772 return (status);
773
774 hxge_start_fail2:
775 if (good_packet == B_FALSE) {
776 cur_index = sop_index;
777 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_start: clean up"));
778 for (i = 0; i < ngathers; i++) {
779 tx_desc_p = &tx_desc_ring_vp[cur_index];
780 hpi_handle.regp = (uint64_t)tx_desc_p;
781 tx_msg_p = &tx_msg_ring[cur_index];
782 (void) hpi_txdma_desc_set_zero(hpi_handle, 1);
783 if (tx_msg_p->flags.dma_type == USE_DVMA) {
784 HXGE_DEBUG_MSG((hxgep, TX_CTL,
785 "tx_desc_p = %X index = %d",
786 tx_desc_p, tx_ring_p->rd_index));
787 (void) dvma_unload(tx_msg_p->dvma_handle,
788 0, -1);
789 tx_msg_p->dvma_handle = NULL;
790 if (tx_ring_p->dvma_wr_index ==
791 tx_ring_p->dvma_wrap_mask)
792 tx_ring_p->dvma_wr_index = 0;
793 else
794 tx_ring_p->dvma_wr_index++;
795 tx_ring_p->dvma_pending--;
796 } else if (tx_msg_p->flags.dma_type == USE_DMA) {
797 if (ddi_dma_unbind_handle(
798 tx_msg_p->dma_handle)) {
799 cmn_err(CE_WARN, "hxge_start: "
800 "ddi_dma_unbind_handle failed");
801 }
802 }
803 tx_msg_p->flags.dma_type = USE_NONE;
804 cur_index = TXDMA_DESC_NEXT_INDEX(cur_index, 1,
805 tx_ring_p->tx_wrap_mask);
806
807 }
808 }
809
810 MUTEX_EXIT(&tx_ring_p->lock);
811
812 hxge_start_fail1:
813 /* Add FMA to check the access handle hxge_hregh */
814 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_start"));
815 return (status);
816 }
817