1 /*
2 * Copyright 2014-2017 Cavium, Inc.
3 * The contents of this file are subject to the terms of the Common Development
4 * and Distribution License, v.1, (the "License").
5 *
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the License at available
9 * at http://opensource.org/licenses/CDDL-1.0
10 *
11 * See the License for the specific language governing permissions and
12 * limitations under the License.
13 */
14
15 #include "lm5706.h"
16
17
18 #ifndef LM_NON_LEGACY_MODE_SUPPORT
19 /*******************************************************************************
20 * Description:
21 *
22 * Return:
23 ******************************************************************************/
24 lm_status_t
lm_send_packet(lm_device_t * pdev,u32_t chain_idx,lm_packet_t * packet,lm_frag_list_t * frags)25 lm_send_packet(
26 lm_device_t *pdev,
27 u32_t chain_idx,
28 lm_packet_t *packet,
29 lm_frag_list_t *frags)
30 {
31 u16_t lso_bd_reserved;
32 u16_t ipv6_ext_len;
33 lm_tx_chain_t *txq;
34 tx_bd_t *start_bd;
35 tx_bd_t *last_bd;
36 tx_bd_t *prod_bd;
37 lm_frag_t *frag;
38 u16_t prod_idx;
39 u32_t flags;
40 u32_t cnt;
41
42 txq = &pdev->tx_info.chain[chain_idx];
43
44 if(packet == NULL)
45 {
46 // hardcode offset in case of L2_ONLY (e.g Solaris)
47 u32_t cmd_offset = 34*sizeof(u32_t); // == OFFSETOF(l4_context_t, l4ctx_cmd)
48 MBQ_WR16(
49 pdev,
50 GET_CID(txq->cid_addr),
51 cmd_offset +
52 OFFSETOF(tcp_context_cmd_cell_te_t, ccell_tx_host_bidx),
53 txq->prod_idx);
54 MBQ_WR32(
55 pdev,
56 GET_CID(txq->cid_addr),
57 cmd_offset +
58 OFFSETOF(tcp_context_cmd_cell_te_t, ccell_tx_host_bseq),
59 txq->prod_bseq);
60
61 return LM_STATUS_SUCCESS;
62 }
63
64 #if DBG
65 if(frags->cnt == 0)
66 {
67 DbgBreakMsg("zero frag_cnt\n");
68
69 return LM_STATUS_INVALID_PARAMETER;
70 }
71
72 packet->u1.tx.dbg_start_bd = txq->prod_bd;
73 packet->u1.tx.dbg_start_bd_idx = txq->prod_idx;
74 packet->u1.tx.dbg_frag_cnt = (u16_t) frags->cnt;
75 #endif
76
77 last_bd = NULL;
78
79 if(frags->cnt > txq->bd_left)
80 {
81 /* The caller should have done this check before calling this
82 * routine. */
83 DbgBreakMsg("No tx bd left.\n");
84
85 return LM_STATUS_RESOURCE;
86 }
87
88 txq->bd_left -= (u16_t) frags->cnt;
89
90 packet->size = 0;
91 flags = 0;
92
93 if(packet->u1.tx.flags & LM_TX_FLAG_INSERT_VLAN_TAG)
94 {
95 flags |= TX_BD_FLAGS_VLAN_TAG;
96 }
97
98 if((packet->u1.tx.flags & LM_TX_FLAG_TCP_LSO_FRAME) == 0)
99 {
100 if(packet->u1.tx.flags & LM_TX_FLAG_COMPUTE_IP_CKSUM)
101 {
102 flags |= TX_BD_FLAGS_IP_CKSUM;
103 LM_INC64(&pdev->tx_info.stats.ip_cso_frames, 1);
104 }
105
106 if(packet->u1.tx.flags & LM_TX_FLAG_COMPUTE_TCP_UDP_CKSUM)
107 {
108 flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
109 if(packet->u1.tx.flags & LM_TX_FLAG_IPV6_PACKET)
110 {
111 LM_INC64(&pdev->tx_info.stats.ipv6_tcp_udp_cso_frames, 1);
112 }
113 else
114 {
115 LM_INC64(&pdev->tx_info.stats.ipv4_tcp_udp_cso_frames, 1);
116 }
117 }
118 }
119
120 if(packet->u1.tx.flags & LM_TX_FLAG_DONT_COMPUTE_CRC)
121 {
122 flags |= TX_BD_FLAGS_DONT_GEN_CRC;
123 }
124
125 if(packet->u1.tx.flags & LM_TX_FLAG_TCP_LSO_FRAME)
126 {
127 if(packet->u1.tx.flags & LM_TX_FLAG_IPV6_PACKET)
128 {
129 /* TCP option length - bottom 4 bits of TX_BD_FLAGS_SW_OPTION_WORD
130 * in term of the number of 4-byte words.
131 * IP header length - bits 1-2 of bd flag, the upper 2 bits of
132 * tx_bd_reserved, and the upper 1 bit of
133 * TX_BD_FLAGS_SW_OPTION_WORD will be used for IPV6 extension
134 * header length in term of 8-btye words.
135 * TX_BD_FLAGS_SW_FLAGS bit will be used to indicate IPV6 LSO. */
136 flags |= TX_BD_FLAGS_SW_FLAGS;
137
138 if(packet->u1.tx.flags & LM_TX_FLAG_TCP_LSO_SNAP_FRAME)
139 {
140 flags |= TX_BD_FLAGS_SW_SNAP;
141 }
142
143 DbgBreakIf(packet->u1.tx.lso_tcp_hdr_len < 20 ||
144 packet->u1.tx.lso_tcp_hdr_len > 84 ||
145 packet->u1.tx.lso_tcp_hdr_len % 4);
146
147 /* tcp option length in term of number of 32-bit word. 4 bits
148 * are used for the number of words. */
149 flags |= (packet->u1.tx.lso_tcp_hdr_len - 20) << 6;
150
151 DbgBreakIf(packet->u1.tx.lso_ip_hdr_len < 20 ||
152 packet->u1.tx.lso_ip_hdr_len > 296 ||
153 (packet->u1.tx.lso_ip_hdr_len - 40) % 8);
154
155 /* ipv6 extension header length. 6 bits are used for the number
156 * of 64-bit words. */
157 ipv6_ext_len = packet->u1.tx.lso_ip_hdr_len - 40;
158
159 DbgBreakIf(ipv6_ext_len & 0x7);
160
161 /* ext_len in number of 8-byte words. */
162 ipv6_ext_len >>= 3;
163
164 flags |= (ipv6_ext_len & 0x3) << 1; /* bit 1-0 */
165
166 lso_bd_reserved = packet->u1.tx.lso_mss;
167 lso_bd_reserved |= (ipv6_ext_len & 0xc) << 12; /* bit 3-2 */
168
169 flags |= (ipv6_ext_len & 0x10) << 8; /* bit 4 */
170
171 DbgBreakIf(ipv6_ext_len >> 5); /* bit 5 & high are invalid. */
172
173 LM_INC64(&pdev->tx_info.stats.ipv6_lso_frames, 1);
174 }
175 else
176 {
177 flags |= TX_BD_FLAGS_SW_LSO;
178 if(packet->u1.tx.flags & LM_TX_FLAG_TCP_LSO_SNAP_FRAME)
179 {
180 flags |= TX_BD_FLAGS_SW_SNAP;
181 }
182
183 DbgBreakIf(packet->u1.tx.lso_ip_hdr_len +
184 packet->u1.tx.lso_tcp_hdr_len > 120);
185
186 /* The size of IP and TCP options in term of 32-bit words. */
187 flags |= (packet->u1.tx.lso_ip_hdr_len +
188 packet->u1.tx.lso_tcp_hdr_len - 40) << 6;
189
190 lso_bd_reserved = packet->u1.tx.lso_mss;
191
192 LM_INC64(&pdev->tx_info.stats.ipv4_lso_frames, 1);
193 }
194 }
195 else
196 {
197 lso_bd_reserved = 0;
198 }
199
200 start_bd = txq->prod_bd;
201 frag = frags->frag_arr;
202
203 /* Get the pointer to the current BD and its index. */
204 prod_idx = txq->prod_idx;
205 prod_bd = txq->prod_bd;
206
207 /* This is the number of times we cross a BD page boundary for this
208 * packet. This and the bd_used value will give us the total number
209 * of BD slots needed to send this packet which is used to determine
210 * if a packet has been sent. We only need this because unlike L2
211 * completion, LSO completion does not end at a request boundary.
212 * For example, if we had an LSO request that spans BD#100-120. We
213 * could get a transmit consumer index of 115. */
214 packet->u1.tx.span_pages = 0;
215
216 /* Initialize the bd's of this packet. */
217 for(cnt = 0; cnt < frags->cnt; cnt++)
218 {
219 DbgBreakIf(frag->size >= 0x10000 || frag->size == 0);
220
221 prod_bd->tx_bd_haddr_lo = frag->addr.as_u32.low;
222 prod_bd->tx_bd_haddr_hi = frag->addr.as_u32.high;
223 prod_bd->tx_bd_nbytes = (u16_t) frag->size;
224 prod_bd->tx_bd_vlan_tag = packet->u1.tx.vlan_tag;
225 prod_bd->tx_bd_flags = (u16_t) flags;
226
227 if(packet->u1.tx.flags & LM_TX_FLAG_TCP_LSO_FRAME)
228 {
229 prod_bd->tx_bd_reserved = lso_bd_reserved;
230 }
231 else if(pdev->params.test_mode & TEST_MODE_TX_BD_TAGGING)
232 {
233 prod_bd->tx_bd_reserved = prod_idx & 0x0fff;
234 prod_bd->tx_bd_reserved |= (u16_t) (GET_CID(txq->cid_addr) << 12);
235 }
236
237 packet->size += frag->size;
238
239 last_bd = prod_bd;
240 frag++;
241
242 /* Advance to the next BD. */
243 prod_bd++;
244 prod_idx++;
245 if((prod_idx & MAX_BD_PER_PAGE) == MAX_BD_PER_PAGE)
246 {
247 /* Only increment span_pages when this BDs for this request
248 * cross a page boundary. */
249 if(cnt+1 < frags->cnt)
250 {
251 packet->u1.tx.span_pages++;
252 }
253
254 prod_idx++;
255 prod_bd = *((tx_bd_t **) ((tx_bd_next_t *)
256 prod_bd)->tx_bd_next_reserved);
257 }
258 }
259
260 /* Set the bd flags of the first and last BDs. */
261 flags |= TX_BD_FLAGS_END;
262 if(packet->u1.tx.flags & LM_TX_FLAG_COAL_NOW)
263 {
264 flags |= TX_BD_FLAGS_COAL_NOW;
265 }
266
267 last_bd->tx_bd_flags |= (u16_t) flags;
268 start_bd->tx_bd_flags |= TX_BD_FLAGS_START;
269
270 #if INCLUDE_OFLD_SUPPORT
271 /* We need to do the padding for the catchup path. */
272 if(chain_idx == pdev->tx_info.cu_idx &&
273 packet->size < MIN_ETHERNET_PACKET_SIZE)
274 {
275 last_bd->tx_bd_nbytes +=
276 (u16_t) (MIN_ETHERNET_PACKET_SIZE - packet->size);
277 packet->size = MIN_ETHERNET_PACKET_SIZE;
278 }
279 #endif
280
281 /* Save the number of BDs used. Later we need to add this value back
282 * to txq->bd_left when the packet is sent. */
283 packet->u1.tx.bd_used = (u16_t) frags->cnt;
284
285 packet->u1.tx.next_bd_idx = prod_idx;
286
287 txq->prod_bd = prod_bd;
288 txq->prod_idx = prod_idx;
289 txq->prod_bseq += packet->size;
290 #if (DBG)
291 if (chain_idx == pdev->tx_info.cu_idx)
292 {
293 DbgBreakIf(packet->size > pdev->params.mtu + 4);
294 }
295 else
296 {
297 DbgBreakIf(packet->size > pdev->params.mtu &&
298 (flags & (TX_BD_FLAGS_SW_LSO | TX_BD_FLAGS_SW_FLAGS)) == 0);
299 }
300 #endif
301 s_list_push_tail(&txq->active_descq, &packet->link);
302
303 if(!(packet->u1.tx.flags & LM_TX_FLAG_SKIP_MBQ_WRITE))
304 {
305 // hardcode offset in case of L2_ONLY (e.g Solaris)
306 u32_t cmd_offset = 34*sizeof(u32_t); // == OFFSETOF(l4_context_t, l4ctx_cmd)
307 MBQ_WR16(
308 pdev,
309 GET_CID(txq->cid_addr),
310 cmd_offset +
311 OFFSETOF(tcp_context_cmd_cell_te_t, ccell_tx_host_bidx),
312 txq->prod_idx);
313 MBQ_WR32(
314 pdev,
315 GET_CID(txq->cid_addr),
316 cmd_offset +
317 OFFSETOF(tcp_context_cmd_cell_te_t, ccell_tx_host_bseq),
318 txq->prod_bseq);
319 }
320
321 return LM_STATUS_SUCCESS;
322 } /* lm_send_packet */
323 #else
324 /*******************************************************************************
325 * Description:
326 *
327 * Return:
328 ******************************************************************************/
329 lm_status_t
lm_send_packet(lm_device_t * pdev,u32_t chain_idx,lm_packet_t * packet,lm_frag_list_t * frags)330 lm_send_packet(
331 lm_device_t *pdev,
332 u32_t chain_idx,
333 lm_packet_t *packet,
334 lm_frag_list_t *frags)
335 {
336 u16_t lso_bd_reserved;
337 u16_t ipv6_ext_len;
338 lm_tx_chain_t *txq;
339 tx_bd_t *start_bd;
340 tx_bd_t *last_bd;
341 tx_bd_t *prod_bd;
342 lm_frag_t *frag;
343 u16_t prod_idx;
344 u32_t flags;
345 u32_t cnt;
346 lm_pkt_tx_info_t *pkt_info;
347
348 txq = &pdev->tx_info.chain[chain_idx];
349
350 if(packet == NULL)
351 {
352 // hardcode offset in case of L2_ONLY (e.g Solaris)
353 u32_t cmd_offset = 34*sizeof(u32_t); // == OFFSETOF(l4_context_t, l4ctx_cmd)
354 MBQ_WR16(
355 pdev,
356 GET_CID(txq->cid_addr),
357 cmd_offset +
358 OFFSETOF(tcp_context_cmd_cell_te_t, ccell_tx_host_bidx),
359 txq->prod_idx);
360 if(pdev->vars.enable_cu_rate_limiter &&
361 txq->idx == TX_CHAIN_IDX1)
362 {
363 REG_WR_IND(
364 pdev,
365 OFFSETOF(reg_space_t, com.com_scratch[0])+COM_HSI_OFFSETOFF(com_cu_host_bseq),
366 txq->prod_bseq);
367 }
368 else
369 {
370 MBQ_WR32(
371 pdev,
372 GET_CID(txq->cid_addr),
373 cmd_offset +
374 OFFSETOF(tcp_context_cmd_cell_te_t, ccell_tx_host_bseq),
375 txq->prod_bseq);
376 }
377
378 return LM_STATUS_SUCCESS;
379 }
380
381 #if DBG
382 if(frags->cnt == 0)
383 {
384 DbgBreakMsg("zero frag_cnt\n");
385
386 return LM_STATUS_INVALID_PARAMETER;
387 }
388
389 packet->u1.tx.dbg_start_bd = txq->prod_bd;
390 packet->u1.tx.dbg_start_bd_idx = txq->prod_idx;
391 packet->u1.tx.dbg_frag_cnt = (u16_t) frags->cnt;
392 #endif
393
394 last_bd = NULL;
395
396 if(frags->cnt > txq->bd_left)
397 {
398 /* The caller should have done this check before calling this
399 * routine. */
400 DbgBreakMsg("No tx bd left.\n");
401
402 return LM_STATUS_RESOURCE;
403 }
404
405 txq->bd_left -= (u16_t) frags->cnt;
406
407 pkt_info = packet->u1.tx.tx_pkt_info;
408 packet->u1.tx.size = 0;
409 flags = 0;
410
411 if(pkt_info->flags & LM_TX_FLAG_INSERT_VLAN_TAG)
412 {
413 flags |= TX_BD_FLAGS_VLAN_TAG;
414 }
415
416 if((pkt_info->flags & LM_TX_FLAG_TCP_LSO_FRAME) == 0)
417 {
418 if(pkt_info->flags & LM_TX_FLAG_COMPUTE_IP_CKSUM)
419 {
420 flags |= TX_BD_FLAGS_IP_CKSUM;
421 LM_INC64(&pdev->tx_info.stats.ip_cso_frames, 1);
422 }
423
424 if(pkt_info->flags & LM_TX_FLAG_COMPUTE_TCP_UDP_CKSUM)
425 {
426 flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
427 if(pkt_info->flags & LM_TX_FLAG_IPV6_PACKET)
428 {
429 LM_INC64(&pdev->tx_info.stats.ipv6_tcp_udp_cso_frames, 1);
430 }
431 else
432 {
433 LM_INC64(&pdev->tx_info.stats.ipv4_tcp_udp_cso_frames, 1);
434 }
435 }
436 }
437
438 if(pkt_info->flags & LM_TX_FLAG_DONT_COMPUTE_CRC)
439 {
440 flags |= TX_BD_FLAGS_DONT_GEN_CRC;
441 }
442
443 if(pkt_info->flags & LM_TX_FLAG_TCP_LSO_FRAME)
444 {
445 if(pkt_info->flags & LM_TX_FLAG_IPV6_PACKET)
446 {
447 /* TCP option length - bottom 4 bits of TX_BD_FLAGS_SW_OPTION_WORD
448 * in term of the number of 4-byte words.
449 * IP header length - bits 1-2 of bd flag, the upper 2 bits of
450 * tx_bd_reserved, and the upper 1 bit of
451 * TX_BD_FLAGS_SW_OPTION_WORD will be used for IPV6 extension
452 * header length in term of 8-btye words.
453 * TX_BD_FLAGS_SW_FLAGS bit will be used to indicate IPV6 LSO. */
454 flags |= TX_BD_FLAGS_SW_FLAGS;
455
456 if(pkt_info->flags & LM_TX_FLAG_TCP_LSO_SNAP_FRAME)
457 {
458 flags |= TX_BD_FLAGS_SW_SNAP;
459 }
460
461 DbgBreakIf(pkt_info->lso_tcp_hdr_len < 20 ||
462 pkt_info->lso_tcp_hdr_len > 84 ||
463 pkt_info->lso_tcp_hdr_len % 4);
464
465 /* tcp option length in term of number of 32-bit word. 4 bits
466 * are used for the number of words. */
467 flags |= (pkt_info->lso_tcp_hdr_len - 20) << 6;
468
469 DbgBreakIf(pkt_info->lso_ip_hdr_len < 20 ||
470 pkt_info->lso_ip_hdr_len > 296 ||
471 (pkt_info->lso_ip_hdr_len - 40) % 8);
472
473 /* ipv6 extension header length. 6 bits are used for the number
474 * of 64-bit words. */
475 ipv6_ext_len = pkt_info->lso_ip_hdr_len - 40;
476
477 DbgBreakIf(ipv6_ext_len & 0x7);
478
479 /* ext_len in number of 8-byte words. */
480 ipv6_ext_len >>= 3;
481
482 flags |= (ipv6_ext_len & 0x3) << 1; /* bit 1-0 */
483
484 lso_bd_reserved = pkt_info->lso_mss;
485 lso_bd_reserved |= (ipv6_ext_len & 0xc) << 12; /* bit 3-2 */
486
487 flags |= (ipv6_ext_len & 0x10) << 8; /* bit 4 */
488
489 DbgBreakIf(ipv6_ext_len >> 5); /* bit 5 & high are invalid. */
490
491 LM_INC64(&pdev->tx_info.stats.ipv6_lso_frames, 1);
492 }
493 else
494 {
495 flags |= TX_BD_FLAGS_SW_LSO;
496 if(pkt_info->flags & LM_TX_FLAG_TCP_LSO_SNAP_FRAME)
497 {
498 flags |= TX_BD_FLAGS_SW_SNAP;
499 }
500
501 DbgBreakIf(pkt_info->lso_ip_hdr_len +
502 pkt_info->lso_tcp_hdr_len > 120);
503
504 /* The size of IP and TCP options in term of 32-bit words. */
505 flags |= (pkt_info->lso_ip_hdr_len +
506 pkt_info->lso_tcp_hdr_len - 40) << 6;
507
508 lso_bd_reserved = pkt_info->lso_mss;
509
510 LM_INC64(&pdev->tx_info.stats.ipv4_lso_frames, 1);
511 }
512 }
513 else
514 {
515 lso_bd_reserved = 0;
516 }
517
518 start_bd = txq->prod_bd;
519 frag = frags->frag_arr;
520
521 /* Get the pointer to the current BD and its index. */
522 prod_idx = txq->prod_idx;
523 prod_bd = txq->prod_bd;
524
525 /* This is the number of times we cross a BD page boundary for this
526 * packet. This and the bd_used value will give us the total number
527 * of BD slots needed to send this packet which is used to determine
528 * if a packet has been sent. We only need this because unlike L2
529 * completion, LSO completion does not end at a request boundary.
530 * For example, if we had an LSO request that spans BD#100-120. We
531 * could get a transmit consumer index of 115. */
532 packet->u1.tx.span_pages = 0;
533
534 /* Initialize the bd's of this packet. */
535 for(cnt = 0; cnt < frags->cnt; cnt++)
536 {
537 DbgBreakIf(frag->size >= 0x10000 || frag->size == 0);
538
539 prod_bd->tx_bd_haddr_lo = frag->addr.as_u32.low;
540 prod_bd->tx_bd_haddr_hi = frag->addr.as_u32.high;
541 prod_bd->tx_bd_nbytes = (u16_t) frag->size;
542 prod_bd->tx_bd_vlan_tag = pkt_info->vlan_tag;
543 prod_bd->tx_bd_flags = (u16_t) flags;
544
545 if(pkt_info->flags & LM_TX_FLAG_TCP_LSO_FRAME)
546 {
547 prod_bd->tx_bd_reserved = lso_bd_reserved;
548 }
549 else if(pdev->params.test_mode & TEST_MODE_TX_BD_TAGGING)
550 {
551 prod_bd->tx_bd_reserved = prod_idx & 0x0fff;
552 prod_bd->tx_bd_reserved |= (u16_t) (GET_CID(txq->cid_addr) << 12);
553 }
554
555 packet->u1.tx.size += frag->size;
556
557 last_bd = prod_bd;
558 frag++;
559
560 /* Advance to the next BD. */
561 prod_bd++;
562 prod_idx++;
563 if((prod_idx & MAX_BD_PER_PAGE) == MAX_BD_PER_PAGE)
564 {
565 /* Only increment span_pages when this BDs for this request
566 * cross a page boundary. */
567 if(cnt+1 < frags->cnt)
568 {
569 packet->u1.tx.span_pages++;
570 }
571
572 prod_idx++;
573 prod_bd = *((tx_bd_t **) ((tx_bd_next_t *)
574 prod_bd)->tx_bd_next_reserved);
575 }
576 }
577
578 /* Set the bd flags of the first and last BDs. */
579 flags |= TX_BD_FLAGS_END;
580 if(pkt_info->flags & LM_TX_FLAG_COAL_NOW)
581 {
582 flags |= TX_BD_FLAGS_COAL_NOW;
583 }
584
585 last_bd->tx_bd_flags |= (u16_t) flags;
586 start_bd->tx_bd_flags |= TX_BD_FLAGS_START;
587
588 #if INCLUDE_OFLD_SUPPORT
589 /* We need to do the padding for the catchup path. */
590 if(chain_idx == pdev->tx_info.cu_idx &&
591 packet->u1.tx.size < MIN_ETHERNET_PACKET_SIZE)
592 {
593 last_bd->tx_bd_nbytes +=
594 (u16_t) (MIN_ETHERNET_PACKET_SIZE - packet->u1.tx.size);
595 packet->u1.tx.size = MIN_ETHERNET_PACKET_SIZE;
596 }
597 #endif
598
599 /* Save the number of BDs used. Later we need to add this value back
600 * to txq->bd_left when the packet is sent. */
601 packet->u1.tx.bd_used = (u16_t) frags->cnt;
602
603 packet->u1.tx.next_bd_idx = prod_idx;
604
605 txq->prod_bd = prod_bd;
606 txq->prod_idx = prod_idx;
607 txq->prod_bseq += packet->u1.tx.size;
608 #if (DBG)
609 if (chain_idx == pdev->tx_info.cu_idx)
610 {
611 DbgBreakIf(packet->u1.tx.size > pdev->params.mtu + 4);
612 }
613 else
614 {
615 DbgBreakIf(packet->u1.tx.size > pdev->params.mtu &&
616 (flags & (TX_BD_FLAGS_SW_LSO | TX_BD_FLAGS_SW_FLAGS)) == 0);
617 }
618 #endif
619 s_list_push_tail(&txq->active_descq, &packet->link);
620
621 if(!(pkt_info->flags & LM_TX_FLAG_SKIP_MBQ_WRITE))
622 {
623 // hardcode offset in case of L2_ONLY (e.g Solaris)
624 u32_t cmd_offset = 34*sizeof(u32_t); // == OFFSETOF(l4_context_t, l4ctx_cmd)
625 MBQ_WR16(
626 pdev,
627 GET_CID(txq->cid_addr),
628 cmd_offset +
629 OFFSETOF(tcp_context_cmd_cell_te_t, ccell_tx_host_bidx),
630 txq->prod_idx);
631 if(pdev->vars.enable_cu_rate_limiter &&
632 txq->idx == TX_CHAIN_IDX1)
633 {
634 REG_WR_IND(
635 pdev,
636 OFFSETOF(reg_space_t, com.com_scratch[0])+COM_HSI_OFFSETOFF(com_cu_host_bseq),
637 txq->prod_bseq);
638 }
639 else
640 {
641 MBQ_WR32(
642 pdev,
643 GET_CID(txq->cid_addr),
644 cmd_offset +
645 OFFSETOF(tcp_context_cmd_cell_te_t, ccell_tx_host_bseq),
646 txq->prod_bseq);
647 }
648 }
649
650 return LM_STATUS_SUCCESS;
651 } /* lm_send_packet */
652 #endif /* LM_NON_LEGACY_MODE_SUPPORT */
653
654
655 /*******************************************************************************
656 * Description:
657 *
658 * Return:
659 ******************************************************************************/
660 STATIC u32_t
get_packets_sent(struct _lm_device_t * pdev,lm_tx_chain_t * txq,u16_t hw_con_idx,s_list_t * sent_list)661 get_packets_sent(
662 struct _lm_device_t *pdev,
663 lm_tx_chain_t *txq,
664 u16_t hw_con_idx,
665 s_list_t *sent_list)
666 {
667 lm_packet_t *pkt;
668 u32_t pkt_cnt;
669
670 /* The consumer index may stop at the end of a page boundary.
671 * In this case, we need to advance the next to the next one. */
672 if((hw_con_idx & MAX_BD_PER_PAGE) == MAX_BD_PER_PAGE)
673 {
674 hw_con_idx++;
675 }
676
677 pkt_cnt = 0;
678
679 while(txq->con_idx != hw_con_idx)
680 {
681 DbgBreakIf(S16_SUB(hw_con_idx, txq->con_idx) <= 0);
682
683 pkt = (lm_packet_t *) s_list_peek_head(&txq->active_descq);
684
685 DbgBreakIf(pkt == NULL);
686
687 if(!pkt)
688 {
689 DbgBreakIf(!s_list_is_empty(&txq->active_descq));
690 break;
691 }
692 /* LSO requests may not complete at the request boundary.
693 *
694 * if(pkt->u1.tx.flags & LM_TX_FLAG_TCP_LSO_FRAME) */
695 {
696 if((u16_t) S16_SUB(hw_con_idx, txq->con_idx) <
697 pkt->u1.tx.bd_used + pkt->u1.tx.span_pages)
698 {
699 break;
700 }
701 }
702
703 #if DBG
704 DbgBreakIf(pkt->u1.tx.dbg_start_bd_idx != txq->con_idx);
705
706 /* Make sure hw_con_idx ends at an l2 packet boundary. For LSO,
707 * request, hw_con_idx may not end at the request boundary. */
708 while(pkt)
709 {
710 if(S16_SUB(hw_con_idx, pkt->u1.tx.next_bd_idx) <= 0)
711 {
712 break;
713 }
714
715 pkt = (lm_packet_t *) s_list_next_entry(&pkt->link);
716 }
717
718 DbgBreakIf(pkt == NULL);
719
720 /* catchup workaround.
721 * DbgBreakIf(
722 * !(pkt->u1.tx.flags & LM_TX_FLAG_TCP_LSO_FRAME) &&
723 * (hw_con_idx != pkt->u1.tx.next_bd_idx)); */
724 #endif
725
726 pkt = (lm_packet_t *) s_list_pop_head(&txq->active_descq);
727
728 /* Advance the txq->con_idx to the start bd_idx of the next packet. */
729 txq->con_idx = pkt->u1.tx.next_bd_idx;
730
731 pkt->status = LM_STATUS_SUCCESS;
732
733 txq->bd_left += pkt->u1.tx.bd_used;
734
735 s_list_push_tail(sent_list, &pkt->link);
736
737 pkt_cnt++;
738 }
739
740 return pkt_cnt;
741 } /* get_packets_sent */
742
743
744
745 /*******************************************************************************
746 * Description:
747 *
748 * Return:
749 ******************************************************************************/
750 u32_t
lm_get_packets_sent(struct _lm_device_t * pdev,u32_t qidx,u32_t con_idx,s_list_t * sent_list)751 lm_get_packets_sent(
752 struct _lm_device_t *pdev,
753 u32_t qidx,
754 u32_t con_idx,
755 s_list_t *sent_list)
756 {
757 lm_tx_chain_t *txq;
758 u16_t hw_con_idx;
759 u32_t pkts_added;
760 u32_t pkt_cnt;
761
762 txq = &pdev->tx_info.chain[qidx];
763
764 if(con_idx)
765 {
766 hw_con_idx = con_idx & 0xffff;
767
768 pkt_cnt = get_packets_sent(pdev, txq, hw_con_idx, sent_list);
769 }
770 else
771 {
772 pkt_cnt = 0;
773
774 for(; ;)
775 {
776 hw_con_idx = *txq->hw_con_idx_ptr;
777
778 pkts_added = get_packets_sent(pdev, txq, hw_con_idx, sent_list);
779 if(pkts_added == 0)
780 {
781 break;
782 }
783
784 pkt_cnt += pkts_added;
785 }
786 }
787
788 return pkt_cnt;
789 } /* lm_get_packets_sent */
790
791
792
793 /*******************************************************************************
794 * Description:
795 *
796 * Return:
797 ******************************************************************************/
798 void
lm_service_tx_int(lm_device_t * pdev,u32_t chain_idx)799 lm_service_tx_int(
800 lm_device_t *pdev,
801 u32_t chain_idx)
802 {
803 lm_packet_t *pkt_arr[MAX_PACKETS_PER_INDICATION];
804 lm_packet_t **pkt_arr_ptr;
805 s_list_t sent_list;
806 lm_packet_t *pkt;
807 u32_t pkt_cnt;
808
809 s_list_init(&sent_list, NULL, NULL, 0);
810
811 (void) lm_get_packets_sent(pdev, chain_idx, 0, &sent_list);
812
813 while(!s_list_is_empty(&sent_list))
814 {
815 pkt_arr_ptr = pkt_arr;
816
817 for(pkt_cnt = 0; pkt_cnt < MAX_PACKETS_PER_INDICATION; pkt_cnt++)
818 {
819 pkt = (lm_packet_t *) s_list_pop_head(&sent_list);
820 if(pkt == NULL)
821 {
822 break;
823 }
824
825 *pkt_arr_ptr = pkt;
826 pkt_arr_ptr++;
827 }
828
829 mm_indicate_tx(pdev, chain_idx, pkt_arr, pkt_cnt);
830 }
831 } /* lm_service_tx_int */
832
833
834
835 /*******************************************************************************
836 * Description:
837 *
838 * Return:
839 ******************************************************************************/
840 void
lm_send_abort(struct _lm_device_t * pdev,u32_t idx)841 lm_send_abort(
842 struct _lm_device_t *pdev,
843 u32_t idx)
844 {
845 lm_tx_chain_t *txq;
846 lm_packet_t *pkt;
847
848 DbgBreakIf(idx >= pdev->tx_info.num_txq);
849
850 txq = &pdev->tx_info.chain[idx];
851
852 for(; ;)
853 {
854 pkt = (lm_packet_t *) s_list_pop_head(&txq->active_descq);
855 if(pkt == NULL)
856 {
857 break;
858 }
859
860 pkt->status = LM_STATUS_ABORTED;
861 pdev->tx_info.stats.aborted++;
862 txq->bd_left += pkt->u1.tx.bd_used;
863
864 mm_indicate_tx(pdev, idx, &pkt, 1);
865 }
866
867 DbgBreakIf(txq->bd_left !=
868 pdev->params.l2_tx_bd_page_cnt[txq->idx] * MAX_BD_PER_PAGE - 1);
869 } /* lm_send_abort */
870