cxgb_sge.c (fc5a2e51fb49ec3cbe8f670d6c1000abecf6df27) | cxgb_sge.c (8090c9f504c0c19831713ab2392d0993a5fc5b36) |
---|---|
1/************************************************************************** 2 3Copyright (c) 2007, Chelsio Inc. 4All rights reserved. 5 6Redistribution and use in source and binary forms, with or without 7modification, are permitted provided that the following conditions are met: 8 --- 28 unchanged lines hidden (view full) --- 37#include <sys/bus.h> 38#include <sys/conf.h> 39#include <machine/bus.h> 40#include <machine/resource.h> 41#include <sys/bus_dma.h> 42#include <sys/rman.h> 43#include <sys/queue.h> 44#include <sys/sysctl.h> | 1/************************************************************************** 2 3Copyright (c) 2007, Chelsio Inc. 4All rights reserved. 5 6Redistribution and use in source and binary forms, with or without 7modification, are permitted provided that the following conditions are met: 8 --- 28 unchanged lines hidden (view full) --- 37#include <sys/bus.h> 38#include <sys/conf.h> 39#include <machine/bus.h> 40#include <machine/resource.h> 41#include <sys/bus_dma.h> 42#include <sys/rman.h> 43#include <sys/queue.h> 44#include <sys/sysctl.h> |
45#include <sys/syslog.h> | |
46#include <sys/taskqueue.h> 47 48#include <sys/proc.h> 49#include <sys/sched.h> 50#include <sys/smp.h> 51#include <sys/systm.h> | 45#include <sys/taskqueue.h> 46 47#include <sys/proc.h> 48#include <sys/sched.h> 49#include <sys/smp.h> 50#include <sys/systm.h> |
51#include <sys/syslog.h> |
|
52 53#include <netinet/in_systm.h> 54#include <netinet/in.h> 55#include <netinet/ip.h> 56#include <netinet/tcp.h> 57 58#include <dev/pci/pcireg.h> 59#include <dev/pci/pcivar.h> 60 61#include <vm/vm.h> | 52 53#include <netinet/in_systm.h> 54#include <netinet/in.h> 55#include <netinet/ip.h> 56#include <netinet/tcp.h> 57 58#include <dev/pci/pcireg.h> 59#include <dev/pci/pcivar.h> 60 61#include <vm/vm.h> |
62#include <vm/vm_page.h> 63#include <vm/vm_map.h> | 62#include <vm/pmap.h> |
64 65#ifdef CONFIG_DEFINED 66#include <cxgb_include.h> 67#include <sys/mvec.h> 68#else 69#include <dev/cxgb/cxgb_include.h> 70#include <dev/cxgb/sys/mvec.h> 71#endif 72 | 63 64#ifdef CONFIG_DEFINED 65#include <cxgb_include.h> 66#include <sys/mvec.h> 67#else 68#include <dev/cxgb/cxgb_include.h> 69#include <dev/cxgb/sys/mvec.h> 70#endif 71 |
73uint32_t collapse_free = 0; 74uint32_t mb_free_vec_free = 0; | |
75int txq_fills = 0; | 72int txq_fills = 0; |
76int collapse_mbufs = 0; | |
77static int bogus_imm = 0; 78#ifndef DISABLE_MBUF_IOVEC 79static int recycle_enable = 1; 80#endif | 73static int bogus_imm = 0; 74#ifndef DISABLE_MBUF_IOVEC 75static int recycle_enable = 1; 76#endif |
77extern int cxgb_txq_buf_ring_size; 78int cxgb_cached_allocations; 79int cxgb_cached; 80int cxgb_ext_freed; |
|
81 82#define USE_GTS 0 83 84#define SGE_RX_SM_BUF_SIZE 1536 85#define SGE_RX_DROP_THRES 16 86#define SGE_RX_COPY_THRES 128 87 88/* --- 40 unchanged lines hidden (view full) --- 129#define TX_SW_DESC_MAPPED (1 << 4) 130 131#define RSPQ_NSOP_NEOP G_RSPD_SOP_EOP(0) 132#define RSPQ_EOP G_RSPD_SOP_EOP(F_RSPD_EOP) 133#define RSPQ_SOP G_RSPD_SOP_EOP(F_RSPD_SOP) 134#define RSPQ_SOP_EOP G_RSPD_SOP_EOP(F_RSPD_SOP|F_RSPD_EOP) 135 136struct tx_sw_desc { /* SW state per Tx descriptor */ | 81 82#define USE_GTS 0 83 84#define SGE_RX_SM_BUF_SIZE 1536 85#define SGE_RX_DROP_THRES 16 86#define SGE_RX_COPY_THRES 128 87 88/* --- 40 unchanged lines hidden (view full) --- 129#define TX_SW_DESC_MAPPED (1 << 4) 130 131#define RSPQ_NSOP_NEOP G_RSPD_SOP_EOP(0) 132#define RSPQ_EOP G_RSPD_SOP_EOP(F_RSPD_EOP) 133#define RSPQ_SOP G_RSPD_SOP_EOP(F_RSPD_SOP) 134#define RSPQ_SOP_EOP G_RSPD_SOP_EOP(F_RSPD_SOP|F_RSPD_EOP) 135 136struct tx_sw_desc { /* SW state per Tx descriptor */ |
137 struct mbuf *m; | 137 struct mbuf_iovec mi; |
138 bus_dmamap_t map; 139 int flags; 140}; 141 142struct rx_sw_desc { /* SW state per Rx descriptor */ | 138 bus_dmamap_t map; 139 int flags; 140}; 141 142struct rx_sw_desc { /* SW state per Rx descriptor */ |
143 void *cl; 144 bus_dmamap_t map; 145 int flags; | 143 caddr_t rxsd_cl; 144 uint32_t *rxsd_ref; 145 caddr_t data; 146 bus_dmamap_t map; 147 int flags; |
146}; 147 148struct txq_state { 149 unsigned int compl; 150 unsigned int gen; 151 unsigned int pidx; 152}; 153 --- 27 unchanged lines hidden (view full) --- 181# error "SGE_NUM_GENBITS must be 1 or 2" 182#endif 183}; 184 185 186static int lro_default = 0; 187int cxgb_debug = 0; 188 | 148}; 149 150struct txq_state { 151 unsigned int compl; 152 unsigned int gen; 153 unsigned int pidx; 154}; 155 --- 27 unchanged lines hidden (view full) --- 183# error "SGE_NUM_GENBITS must be 1 or 2" 184#endif 185}; 186 187 188static int lro_default = 0; 189int cxgb_debug = 0; 190 |
189static void t3_free_qset(adapter_t *sc, struct sge_qset *q); | |
190static void sge_timer_cb(void *arg); 191static void sge_timer_reclaim(void *arg, int ncount); 192static void sge_txq_reclaim_handler(void *arg, int ncount); | 191static void sge_timer_cb(void *arg); 192static void sge_timer_reclaim(void *arg, int ncount); 193static void sge_txq_reclaim_handler(void *arg, int ncount); |
193static int free_tx_desc(struct sge_txq *q, int n, struct mbuf **m_vec); | |
194 195/** 196 * reclaim_completed_tx - reclaims completed Tx descriptors 197 * @adapter: the adapter 198 * @q: the Tx queue to reclaim completed descriptors from 199 * 200 * Reclaims Tx descriptors that the SGE has indicated it has processed, 201 * and frees the associated buffers if possible. Called with the Tx 202 * queue's lock held. 203 */ 204static __inline int | 194 195/** 196 * reclaim_completed_tx - reclaims completed Tx descriptors 197 * @adapter: the adapter 198 * @q: the Tx queue to reclaim completed descriptors from 199 * 200 * Reclaims Tx descriptors that the SGE has indicated it has processed, 201 * and frees the associated buffers if possible. Called with the Tx 202 * queue's lock held. 203 */ 204static __inline int |
205reclaim_completed_tx(struct sge_txq *q, int nbufs, struct mbuf **mvec) | 205reclaim_completed_tx(struct sge_txq *q) |
206{ | 206{ |
207 int reclaimed, reclaim = desc_reclaimable(q); 208 int n = 0; | 207 int reclaim = desc_reclaimable(q); |
209 210 mtx_assert(&q->lock, MA_OWNED); 211 if (reclaim > 0) { | 208 209 mtx_assert(&q->lock, MA_OWNED); 210 if (reclaim > 0) { |
212 n = free_tx_desc(q, min(reclaim, nbufs), mvec); 213 reclaimed = min(reclaim, nbufs); 214 q->cleaned += reclaimed; 215 q->in_use -= reclaimed; | 211 t3_free_tx_desc(q, reclaim); 212 q->cleaned += reclaim; 213 q->in_use -= reclaim; |
216 } | 214 } |
217 return (n); | 215 return (reclaim); |
218} 219 220/** 221 * should_restart_tx - are there enough resources to restart a Tx queue? 222 * @q: the Tx queue 223 * 224 * Checks if there are enough descriptors to restart a suspended Tx queue. 225 */ --- 67 unchanged lines hidden (view full) --- 293 * @resp: the response descriptor containing the packet data 294 * 295 * Return a packet containing the immediate data of the given response. 296 */ 297#ifdef DISABLE_MBUF_IOVEC 298static __inline int 299get_imm_packet(adapter_t *sc, const struct rsp_desc *resp, struct t3_mbuf_hdr *mh) 300{ | 216} 217 218/** 219 * should_restart_tx - are there enough resources to restart a Tx queue? 220 * @q: the Tx queue 221 * 222 * Checks if there are enough descriptors to restart a suspended Tx queue. 223 */ --- 67 unchanged lines hidden (view full) --- 291 * @resp: the response descriptor containing the packet data 292 * 293 * Return a packet containing the immediate data of the given response. 294 */ 295#ifdef DISABLE_MBUF_IOVEC 296static __inline int 297get_imm_packet(adapter_t *sc, const struct rsp_desc *resp, struct t3_mbuf_hdr *mh) 298{ |
301 struct mbuf *m; 302 int len; 303 uint32_t flags = ntohl(resp->flags); 304 uint8_t sopeop = G_RSPD_SOP_EOP(flags); 305 306 /* 307 * would be a firmware bug 308 */ 309 if (sopeop == RSPQ_NSOP_NEOP || sopeop == RSPQ_SOP) | |
310 return (0); 311 | 299 return (0); 300 |
312 m = m_gethdr(M_NOWAIT, MT_DATA); 313 len = G_RSPD_LEN(ntohl(resp->len_cq)); | 301 m = m_gethdr(M_DONTWAIT, MT_DATA); 302 len = IMMED_PKT_SIZE; |
314 315 if (m) { | 303 304 if (m) { |
316 MH_ALIGN(m, IMMED_PKT_SIZE); | |
317 memcpy(m->m_data, resp->imm_data, IMMED_PKT_SIZE); | 305 memcpy(m->m_data, resp->imm_data, IMMED_PKT_SIZE); |
318 m->m_len = len; 319 320 switch (sopeop) { 321 case RSPQ_SOP_EOP: 322 mh->mh_head = mh->mh_tail = m; 323 m->m_pkthdr.len = len; 324 m->m_flags |= M_PKTHDR; 325 break; 326 case RSPQ_EOP: 327 m->m_flags &= ~M_PKTHDR; 328 mh->mh_head->m_pkthdr.len += len; 329 mh->mh_tail->m_next = m; 330 mh->mh_tail = m; 331 break; 332 } | 306 m->m_pkthdr.len = m->m_len = len; |
333 } 334 return (m != NULL); 335} 336 337#else 338static int 339get_imm_packet(adapter_t *sc, const struct rsp_desc *resp, struct mbuf *m, void *cl, uint32_t flags) 340{ | 307 } 308 return (m != NULL); 309} 310 311#else 312static int 313get_imm_packet(adapter_t *sc, const struct rsp_desc *resp, struct mbuf *m, void *cl, uint32_t flags) 314{ |
341 int len, error; 342 uint8_t sopeop = G_RSPD_SOP_EOP(flags); 343 344 /* 345 * would be a firmware bug 346 */ 347 len = G_RSPD_LEN(ntohl(resp->len_cq)); 348 if (sopeop == RSPQ_NSOP_NEOP || sopeop == RSPQ_SOP) { 349 if (cxgb_debug) 350 device_printf(sc->dev, "unexpected value sopeop=%d flags=0x%x len=%din get_imm_packet\n", sopeop, flags, len); 351 bogus_imm++; 352 return (EINVAL); 353 } 354 error = 0; 355 switch (sopeop) { 356 case RSPQ_SOP_EOP: 357 m->m_len = m->m_pkthdr.len = len; 358 memcpy(mtod(m, uint8_t *), resp->imm_data, len); 359 break; 360 case RSPQ_EOP: 361 memcpy(cl, resp->imm_data, len); 362 m_iovappend(m, cl, MSIZE, len, 0); 363 break; 364 default: 365 bogus_imm++; 366 error = EINVAL; 367 } | |
368 | 315 |
369 return (error); | 316 m->m_len = m->m_pkthdr.len = IMMED_PKT_SIZE; 317 memcpy(mtod(m, uint8_t *), resp->imm_data, IMMED_PKT_SIZE); 318 return (0); 319 |
370} 371#endif 372 373static __inline u_int 374flits_to_desc(u_int n) 375{ 376 return (flit_desc_map[n]); 377} --- 30 unchanged lines hidden (view full) --- 408 /* XXX Does ETHER_ALIGN need to be accounted for here? */ 409 p->max_pkt_size = MJUM16BYTES - sizeof(struct cpl_rx_data); 410 411 for (i = 0; i < SGE_QSETS; ++i) { 412 struct qset_params *q = p->qset + i; 413 414 q->polling = adap->params.rev > 0; 415 | 320} 321#endif 322 323static __inline u_int 324flits_to_desc(u_int n) 325{ 326 return (flit_desc_map[n]); 327} --- 30 unchanged lines hidden (view full) --- 358 /* XXX Does ETHER_ALIGN need to be accounted for here? */ 359 p->max_pkt_size = MJUM16BYTES - sizeof(struct cpl_rx_data); 360 361 for (i = 0; i < SGE_QSETS; ++i) { 362 struct qset_params *q = p->qset + i; 363 364 q->polling = adap->params.rev > 0; 365 |
416 if (adap->params.nports > 2) | 366 if (adap->params.nports > 2) { |
417 q->coalesce_nsecs = 50000; | 367 q->coalesce_nsecs = 50000; |
418 else | 368 } else { 369#ifdef INVARIANTS 370 q->coalesce_nsecs = 20000; 371#else |
419 q->coalesce_nsecs = 5000; | 372 q->coalesce_nsecs = 5000; |
420 | 373#endif 374 } |
421 q->rspq_size = RSPQ_Q_SIZE; 422 q->fl_size = FL_Q_SIZE; 423 q->jumbo_size = JUMBO_Q_SIZE; 424 q->txq_size[TXQ_ETH] = TX_ETH_Q_SIZE; 425 q->txq_size[TXQ_OFLD] = 1024; 426 q->txq_size[TXQ_CTRL] = 256; 427 q->cong_thres = 0; 428 } --- 75 unchanged lines hidden (view full) --- 504void 505t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p) 506{ 507 508 qs->rspq.holdoff_tmr = max(p->coalesce_nsecs/100, 1U); 509 qs->rspq.polling = 0 /* p->polling */; 510} 511 | 375 q->rspq_size = RSPQ_Q_SIZE; 376 q->fl_size = FL_Q_SIZE; 377 q->jumbo_size = JUMBO_Q_SIZE; 378 q->txq_size[TXQ_ETH] = TX_ETH_Q_SIZE; 379 q->txq_size[TXQ_OFLD] = 1024; 380 q->txq_size[TXQ_CTRL] = 256; 381 q->cong_thres = 0; 382 } --- 75 unchanged lines hidden (view full) --- 458void 459t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p) 460{ 461 462 qs->rspq.holdoff_tmr = max(p->coalesce_nsecs/100, 1U); 463 qs->rspq.polling = 0 /* p->polling */; 464} 465 |
466#if !defined(__i386__) && !defined(__amd64__) |
|
512static void 513refill_fl_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 514{ 515 struct refill_fl_cb_arg *cb_arg = arg; 516 517 cb_arg->error = error; 518 cb_arg->seg = segs[0]; 519 cb_arg->nseg = nseg; 520 521} | 467static void 468refill_fl_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 469{ 470 struct refill_fl_cb_arg *cb_arg = arg; 471 472 cb_arg->error = error; 473 cb_arg->seg = segs[0]; 474 cb_arg->nseg = nseg; 475 476} |
522 | 477#endif |
523/** 524 * refill_fl - refill an SGE free-buffer list 525 * @sc: the controller softc 526 * @q: the free-list to refill 527 * @n: the number of new buffers to allocate 528 * 529 * (Re)populate an SGE free-buffer list with up to @n new packet buffers. 530 * The caller must assure that @n does not exceed the queue's capacity. 531 */ 532static void 533refill_fl(adapter_t *sc, struct sge_fl *q, int n) 534{ 535 struct rx_sw_desc *sd = &q->sdesc[q->pidx]; 536 struct rx_desc *d = &q->desc[q->pidx]; 537 struct refill_fl_cb_arg cb_arg; | 478/** 479 * refill_fl - refill an SGE free-buffer list 480 * @sc: the controller softc 481 * @q: the free-list to refill 482 * @n: the number of new buffers to allocate 483 * 484 * (Re)populate an SGE free-buffer list with up to @n new packet buffers. 485 * The caller must assure that @n does not exceed the queue's capacity. 486 */ 487static void 488refill_fl(adapter_t *sc, struct sge_fl *q, int n) 489{ 490 struct rx_sw_desc *sd = &q->sdesc[q->pidx]; 491 struct rx_desc *d = &q->desc[q->pidx]; 492 struct refill_fl_cb_arg cb_arg; |
538 void *cl; | 493 caddr_t cl; |
539 int err; 540 541 cb_arg.error = 0; 542 while (n--) { 543 /* 544 * We only allocate a cluster, mbuf allocation happens after rx 545 */ | 494 int err; 495 496 cb_arg.error = 0; 497 while (n--) { 498 /* 499 * We only allocate a cluster, mbuf allocation happens after rx 500 */ |
546 if ((cl = m_cljget(NULL, M_DONTWAIT, q->buf_size)) == NULL) { | 501 if ((cl = cxgb_cache_get(q->zone)) == NULL) { |
547 log(LOG_WARNING, "Failed to allocate cluster\n"); 548 goto done; 549 } | 502 log(LOG_WARNING, "Failed to allocate cluster\n"); 503 goto done; 504 } |
505 |
|
550 if ((sd->flags & RX_SW_DESC_MAP_CREATED) == 0) { 551 if ((err = bus_dmamap_create(q->entry_tag, 0, &sd->map))) { 552 log(LOG_WARNING, "bus_dmamap_create failed %d\n", err); 553 uma_zfree(q->zone, cl); 554 goto done; 555 } 556 sd->flags |= RX_SW_DESC_MAP_CREATED; 557 } | 506 if ((sd->flags & RX_SW_DESC_MAP_CREATED) == 0) { 507 if ((err = bus_dmamap_create(q->entry_tag, 0, &sd->map))) { 508 log(LOG_WARNING, "bus_dmamap_create failed %d\n", err); 509 uma_zfree(q->zone, cl); 510 goto done; 511 } 512 sd->flags |= RX_SW_DESC_MAP_CREATED; 513 } |
558 err = bus_dmamap_load(q->entry_tag, sd->map, cl, q->buf_size, | 514#if !defined(__i386__) && !defined(__amd64__) 515 err = bus_dmamap_load(q->entry_tag, sd->map, 516 cl + sizeof(struct m_hdr) + sizeof(struct pkthdr) + sizeof(struct m_ext_) + sizeof(uint32_t), q->buf_size, |
559 refill_fl_cb, &cb_arg, 0); 560 561 if (err != 0 || cb_arg.error) { 562 log(LOG_WARNING, "failure in refill_fl %d\n", cb_arg.error); 563 /* 564 * XXX free cluster 565 */ 566 return; 567 } | 517 refill_fl_cb, &cb_arg, 0); 518 519 if (err != 0 || cb_arg.error) { 520 log(LOG_WARNING, "failure in refill_fl %d\n", cb_arg.error); 521 /* 522 * XXX free cluster 523 */ 524 return; 525 } |
568 | 526#else 527 cb_arg.seg.ds_addr = pmap_kextract((vm_offset_t)(cl + sizeof(struct m_hdr) + 528 sizeof(struct pkthdr) + sizeof(struct m_ext_) + sizeof(uint32_t))); 529#endif |
569 sd->flags |= RX_SW_DESC_INUSE; | 530 sd->flags |= RX_SW_DESC_INUSE; |
570 sd->cl = cl; | 531 sd->rxsd_cl = cl; 532 sd->rxsd_ref = (uint32_t *)(cl + sizeof(struct m_hdr) + sizeof(struct pkthdr) + sizeof(struct m_ext_)); 533 sd->data = cl + sizeof(struct m_hdr) + sizeof(struct pkthdr) + sizeof(struct m_ext_) + sizeof(uint32_t); |
571 d->addr_lo = htobe32(cb_arg.seg.ds_addr & 0xffffffff); 572 d->addr_hi = htobe32(((uint64_t)cb_arg.seg.ds_addr >>32) & 0xffffffff); 573 d->len_gen = htobe32(V_FLD_GEN1(q->gen)); 574 d->gen2 = htobe32(V_FLD_GEN2(q->gen)); 575 576 d++; 577 sd++; 578 --- 25 unchanged lines hidden (view full) --- 604 u_int cidx = q->cidx; 605 606 while (q->credits--) { 607 struct rx_sw_desc *d = &q->sdesc[cidx]; 608 609 if (d->flags & RX_SW_DESC_INUSE) { 610 bus_dmamap_unload(q->entry_tag, d->map); 611 bus_dmamap_destroy(q->entry_tag, d->map); | 534 d->addr_lo = htobe32(cb_arg.seg.ds_addr & 0xffffffff); 535 d->addr_hi = htobe32(((uint64_t)cb_arg.seg.ds_addr >>32) & 0xffffffff); 536 d->len_gen = htobe32(V_FLD_GEN1(q->gen)); 537 d->gen2 = htobe32(V_FLD_GEN2(q->gen)); 538 539 d++; 540 sd++; 541 --- 25 unchanged lines hidden (view full) --- 567 u_int cidx = q->cidx; 568 569 while (q->credits--) { 570 struct rx_sw_desc *d = &q->sdesc[cidx]; 571 572 if (d->flags & RX_SW_DESC_INUSE) { 573 bus_dmamap_unload(q->entry_tag, d->map); 574 bus_dmamap_destroy(q->entry_tag, d->map); |
612 uma_zfree(q->zone, d->cl); | 575 uma_zfree(q->zone, d->rxsd_cl); |
613 } | 576 } |
614 d->cl = NULL; | 577 d->rxsd_cl = NULL; |
615 if (++cidx == q->size) 616 cidx = 0; 617 } 618} 619 620static __inline void 621__refill_fl(adapter_t *adap, struct sge_fl *fl) 622{ 623 refill_fl(adap, fl, min(16U, fl->size - fl->credits)); 624} 625 | 578 if (++cidx == q->size) 579 cidx = 0; 580 } 581} 582 583static __inline void 584__refill_fl(adapter_t *adap, struct sge_fl *fl) 585{ 586 refill_fl(adap, fl, min(16U, fl->size - fl->credits)); 587} 588 |
589static __inline void 590__refill_fl_lt(adapter_t *adap, struct sge_fl *fl, int max) 591{ 592 if ((fl->size - fl->credits) < max) 593 refill_fl(adap, fl, min(max, fl->size - fl->credits)); 594} 595 596void 597refill_fl_service(adapter_t *adap, struct sge_fl *fl) 598{ 599 __refill_fl_lt(adap, fl, 512); 600} 601 |
|
626#ifndef DISABLE_MBUF_IOVEC 627/** 628 * recycle_rx_buf - recycle a receive buffer 629 * @adapter: the adapter 630 * @q: the SGE free list 631 * @idx: index of buffer to recycle 632 * 633 * Recycles the specified buffer on the given free list by adding it at --- 114 unchanged lines hidden (view full) --- 748 * 749 * d) Ring doorbells for T304 tunnel queues since we have seen doorbell 750 * fifo overflows and the FW doesn't implement any recovery scheme yet. 751 */ 752static void 753sge_timer_cb(void *arg) 754{ 755 adapter_t *sc = arg; | 602#ifndef DISABLE_MBUF_IOVEC 603/** 604 * recycle_rx_buf - recycle a receive buffer 605 * @adapter: the adapter 606 * @q: the SGE free list 607 * @idx: index of buffer to recycle 608 * 609 * Recycles the specified buffer on the given free list by adding it at --- 114 unchanged lines hidden (view full) --- 724 * 725 * d) Ring doorbells for T304 tunnel queues since we have seen doorbell 726 * fifo overflows and the FW doesn't implement any recovery scheme yet. 727 */ 728static void 729sge_timer_cb(void *arg) 730{ 731 adapter_t *sc = arg; |
756 struct port_info *p; | 732#ifndef IFNET_MULTIQUEUE 733 struct port_info *pi; |
757 struct sge_qset *qs; 758 struct sge_txq *txq; 759 int i, j; 760 int reclaim_eth, reclaim_ofl, refill_rx; | 734 struct sge_qset *qs; 735 struct sge_txq *txq; 736 int i, j; 737 int reclaim_eth, reclaim_ofl, refill_rx; |
761 | 738 |
762 for (i = 0; i < sc->params.nports; i++) 763 for (j = 0; j < sc->port[i].nqsets; j++) { 764 qs = &sc->sge.qs[i + j]; 765 txq = &qs->txq[0]; 766 reclaim_eth = txq[TXQ_ETH].processed - txq[TXQ_ETH].cleaned; 767 reclaim_ofl = txq[TXQ_OFLD].processed - txq[TXQ_OFLD].cleaned; 768 refill_rx = ((qs->fl[0].credits < qs->fl[0].size) || 769 (qs->fl[1].credits < qs->fl[1].size)); 770 if (reclaim_eth || reclaim_ofl || refill_rx) { | 739 for (i = 0; i < sc->params.nports; i++) 740 for (j = 0; j < sc->port[i].nqsets; j++) { 741 qs = &sc->sge.qs[i + j]; 742 txq = &qs->txq[0]; 743 reclaim_eth = txq[TXQ_ETH].processed - txq[TXQ_ETH].cleaned; 744 reclaim_ofl = txq[TXQ_OFLD].processed - txq[TXQ_OFLD].cleaned; 745 refill_rx = ((qs->fl[0].credits < qs->fl[0].size) || 746 (qs->fl[1].credits < qs->fl[1].size)); 747 if (reclaim_eth || reclaim_ofl || refill_rx) { |
771 p = &sc->port[i]; 772 taskqueue_enqueue(p->tq, &p->timer_reclaim_task); | 748 pi = &sc->port[i]; 749 taskqueue_enqueue(pi->tq, &pi->timer_reclaim_task); |
773 break; 774 } 775 } | 750 break; 751 } 752 } |
753#endif |
|
776 if (sc->params.nports > 2) { 777 int i; 778 779 for_each_port(sc, i) { 780 struct port_info *pi = &sc->port[i]; 781 782 t3_write_reg(sc, A_SG_KDOORBELL, 783 F_SELEGRCNTX | --- 10 unchanged lines hidden (view full) --- 794 * 795 */ 796int 797t3_sge_init_adapter(adapter_t *sc) 798{ 799 callout_init(&sc->sge_timer_ch, CALLOUT_MPSAFE); 800 callout_reset(&sc->sge_timer_ch, TX_RECLAIM_PERIOD, sge_timer_cb, sc); 801 TASK_INIT(&sc->slow_intr_task, 0, sge_slow_intr_handler, sc); | 754 if (sc->params.nports > 2) { 755 int i; 756 757 for_each_port(sc, i) { 758 struct port_info *pi = &sc->port[i]; 759 760 t3_write_reg(sc, A_SG_KDOORBELL, 761 F_SELEGRCNTX | --- 10 unchanged lines hidden (view full) --- 772 * 773 */ 774int 775t3_sge_init_adapter(adapter_t *sc) 776{ 777 callout_init(&sc->sge_timer_ch, CALLOUT_MPSAFE); 778 callout_reset(&sc->sge_timer_ch, TX_RECLAIM_PERIOD, sge_timer_cb, sc); 779 TASK_INIT(&sc->slow_intr_task, 0, sge_slow_intr_handler, sc); |
780 mi_init(); 781 cxgb_cache_init(); |
|
802 return (0); 803} 804 805int | 782 return (0); 783} 784 785int |
806t3_sge_init_port(struct port_info *p) | 786t3_sge_init_port(struct port_info *pi) |
807{ | 787{ |
808 TASK_INIT(&p->timer_reclaim_task, 0, sge_timer_reclaim, p); | 788 TASK_INIT(&pi->timer_reclaim_task, 0, sge_timer_reclaim, pi); |
809 return (0); 810} 811 812void 813t3_sge_deinit_sw(adapter_t *sc) 814{ 815 int i; 816 817 callout_drain(&sc->sge_timer_ch); 818 if (sc->tq) 819 taskqueue_drain(sc->tq, &sc->slow_intr_task); 820 for (i = 0; i < sc->params.nports; i++) 821 if (sc->port[i].tq != NULL) 822 taskqueue_drain(sc->port[i].tq, &sc->port[i].timer_reclaim_task); | 789 return (0); 790} 791 792void 793t3_sge_deinit_sw(adapter_t *sc) 794{ 795 int i; 796 797 callout_drain(&sc->sge_timer_ch); 798 if (sc->tq) 799 taskqueue_drain(sc->tq, &sc->slow_intr_task); 800 for (i = 0; i < sc->params.nports; i++) 801 if (sc->port[i].tq != NULL) 802 taskqueue_drain(sc->port[i].tq, &sc->port[i].timer_reclaim_task); |
803 804 mi_deinit(); |
|
823} 824 825/** 826 * refill_rspq - replenish an SGE response queue 827 * @adapter: the adapter 828 * @q: the response queue to replenish 829 * @credits: how many new responses to make available 830 * --- 7 unchanged lines hidden (view full) --- 838 /* mbufs are allocated on demand when a rspq entry is processed. */ 839 t3_write_reg(sc, A_SG_RSPQ_CREDIT_RETURN, 840 V_RSPQ(q->cntxt_id) | V_CREDITS(credits)); 841} 842 843static __inline void 844sge_txq_reclaim_(struct sge_txq *txq) 845{ | 805} 806 807/** 808 * refill_rspq - replenish an SGE response queue 809 * @adapter: the adapter 810 * @q: the response queue to replenish 811 * @credits: how many new responses to make available 812 * --- 7 unchanged lines hidden (view full) --- 820 /* mbufs are allocated on demand when a rspq entry is processed. */ 821 t3_write_reg(sc, A_SG_RSPQ_CREDIT_RETURN, 822 V_RSPQ(q->cntxt_id) | V_CREDITS(credits)); 823} 824 825static __inline void 826sge_txq_reclaim_(struct sge_txq *txq) 827{ |
846 int reclaimable, i, n; 847 struct mbuf *m_vec[TX_CLEAN_MAX_DESC]; 848 struct port_info *p; | 828 int reclaimable, n; 829 struct port_info *pi; |
849 | 830 |
850 p = txq->port; | 831 pi = txq->port; |
851reclaim_more: 852 n = 0; 853 reclaimable = desc_reclaimable(txq); 854 if (reclaimable > 0 && mtx_trylock(&txq->lock)) { | 832reclaim_more: 833 n = 0; 834 reclaimable = desc_reclaimable(txq); 835 if (reclaimable > 0 && mtx_trylock(&txq->lock)) { |
855 n = reclaim_completed_tx(txq, TX_CLEAN_MAX_DESC, m_vec); | 836 n = reclaim_completed_tx(txq); |
856 mtx_unlock(&txq->lock); 857 } | 837 mtx_unlock(&txq->lock); 838 } |
858 if (n == 0) 859 return; 860 861 for (i = 0; i < n; i++) { 862 m_freem(m_vec[i]); 863 } 864 if (p && p->ifp->if_drv_flags & IFF_DRV_OACTIVE && | 839 if (pi && pi->ifp->if_drv_flags & IFF_DRV_OACTIVE && |
865 txq->size - txq->in_use >= TX_START_MAX_DESC) { 866 txq_fills++; | 840 txq->size - txq->in_use >= TX_START_MAX_DESC) { 841 txq_fills++; |
867 p->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 868 taskqueue_enqueue(p->tq, &p->start_task); | 842 pi->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 843 taskqueue_enqueue(pi->tq, &pi->start_task); |
869 } 870 871 if (n) 872 goto reclaim_more; 873} 874 875static void 876sge_txq_reclaim_handler(void *arg, int ncount) 877{ 878 struct sge_txq *q = arg; 879 880 sge_txq_reclaim_(q); 881} 882 883static void 884sge_timer_reclaim(void *arg, int ncount) 885{ | 844 } 845 846 if (n) 847 goto reclaim_more; 848} 849 850static void 851sge_txq_reclaim_handler(void *arg, int ncount) 852{ 853 struct sge_txq *q = arg; 854 855 sge_txq_reclaim_(q); 856} 857 858static void 859sge_timer_reclaim(void *arg, int ncount) 860{ |
886 struct port_info *p = arg; 887 int i, nqsets = p->nqsets; 888 adapter_t *sc = p->adapter; | 861 struct port_info *pi = arg; 862 int i, nqsets = pi->nqsets; 863 adapter_t *sc = pi->adapter; |
889 struct sge_qset *qs; 890 struct sge_txq *txq; 891 struct mtx *lock; 892 | 864 struct sge_qset *qs; 865 struct sge_txq *txq; 866 struct mtx *lock; 867 |
868#ifdef IFNET_MULTIQUEUE 869 panic("%s should not be called with multiqueue support\n", __FUNCTION__); 870#endif |
|
893 for (i = 0; i < nqsets; i++) { 894 qs = &sc->sge.qs[i]; 895 txq = &qs->txq[TXQ_ETH]; 896 sge_txq_reclaim_(txq); 897 898 txq = &qs->txq[TXQ_OFLD]; 899 sge_txq_reclaim_(txq); 900 --- 36 unchanged lines hidden (view full) --- 937 qs->rspq.cntxt_id = id; 938 qs->fl[0].cntxt_id = 2 * id; 939 qs->fl[1].cntxt_id = 2 * id + 1; 940 qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id; 941 qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id; 942 qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id; 943 qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id; 944 qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id; | 871 for (i = 0; i < nqsets; i++) { 872 qs = &sc->sge.qs[i]; 873 txq = &qs->txq[TXQ_ETH]; 874 sge_txq_reclaim_(txq); 875 876 txq = &qs->txq[TXQ_OFLD]; 877 sge_txq_reclaim_(txq); 878 --- 36 unchanged lines hidden (view full) --- 915 qs->rspq.cntxt_id = id; 916 qs->fl[0].cntxt_id = 2 * id; 917 qs->fl[1].cntxt_id = 2 * id + 1; 918 qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id; 919 qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id; 920 qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id; 921 qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id; 922 qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id; |
923 924 mbufq_init(&qs->txq[TXQ_ETH].sendq); 925 mbufq_init(&qs->txq[TXQ_OFLD].sendq); 926 mbufq_init(&qs->txq[TXQ_CTRL].sendq); |
|
945} 946 947 948static void 949txq_prod(struct sge_txq *txq, unsigned int ndesc, struct txq_state *txqs) 950{ 951 txq->in_use += ndesc; 952 /* --- 27 unchanged lines hidden (view full) --- 980{ 981 unsigned int flits; 982 983 if (m->m_pkthdr.len <= WR_LEN - sizeof(struct cpl_tx_pkt)) 984 return 1; 985 986 flits = sgl_len(nsegs) + 2; 987#ifdef TSO_SUPPORTED | 927} 928 929 930static void 931txq_prod(struct sge_txq *txq, unsigned int ndesc, struct txq_state *txqs) 932{ 933 txq->in_use += ndesc; 934 /* --- 27 unchanged lines hidden (view full) --- 962{ 963 unsigned int flits; 964 965 if (m->m_pkthdr.len <= WR_LEN - sizeof(struct cpl_tx_pkt)) 966 return 1; 967 968 flits = sgl_len(nsegs) + 2; 969#ifdef TSO_SUPPORTED |
988 if (m->m_pkthdr.csum_flags & (CSUM_TSO)) | 970 if (m->m_pkthdr.csum_flags & CSUM_TSO) |
989 flits++; 990#endif 991 return flits_to_desc(flits); 992} 993 994static unsigned int 995busdma_map_mbufs(struct mbuf **m, struct sge_txq *txq, | 971 flits++; 972#endif 973 return flits_to_desc(flits); 974} 975 976static unsigned int 977busdma_map_mbufs(struct mbuf **m, struct sge_txq *txq, |
996 struct tx_sw_desc *stx, bus_dma_segment_t *segs, int *nsegs) | 978 struct tx_sw_desc *txsd, bus_dma_segment_t *segs, int *nsegs) |
997{ 998 struct mbuf *m0; | 979{ 980 struct mbuf *m0; |
999 int err, pktlen; | 981 int err, pktlen, pass = 0; |
1000 | 982 |
983retry: 984 err = 0; |
|
1001 m0 = *m; 1002 pktlen = m0->m_pkthdr.len; | 985 m0 = *m; 986 pktlen = m0->m_pkthdr.len; |
987#if defined(__i386__) || defined(__amd64__) 988 if (busdma_map_sg_collapse(m, segs, nsegs) == 0) { 989 goto done; 990 } else 991#endif 992 err = bus_dmamap_load_mbuf_sg(txq->entry_tag, txsd->map, m0, segs, nsegs, 0); |
|
1003 | 993 |
1004 err = bus_dmamap_load_mbuf_sg(txq->entry_tag, stx->map, m0, segs, nsegs, 0); 1005#ifdef DEBUG 1006 if (err) { 1007 int n = 0; 1008 struct mbuf *mtmp = m0; 1009 while(mtmp) { 1010 n++; 1011 mtmp = mtmp->m_next; 1012 } 1013 printf("map_mbufs: bus_dmamap_load_mbuf_sg failed with %d - pkthdr.len==%d nmbufs=%d\n", 1014 err, m0->m_pkthdr.len, n); | 994 if (err == 0) { 995 goto done; |
1015 } | 996 } |
1016#endif 1017 if (err == EFBIG) { | 997 if (err == EFBIG && pass == 0) { 998 pass = 1; |
1018 /* Too many segments, try to defrag */ 1019 m0 = m_defrag(m0, M_DONTWAIT); 1020 if (m0 == NULL) { 1021 m_freem(*m); 1022 *m = NULL; 1023 return (ENOBUFS); 1024 } 1025 *m = m0; | 999 /* Too many segments, try to defrag */ 1000 m0 = m_defrag(m0, M_DONTWAIT); 1001 if (m0 == NULL) { 1002 m_freem(*m); 1003 *m = NULL; 1004 return (ENOBUFS); 1005 } 1006 *m = m0; |
1026 err = bus_dmamap_load_mbuf_sg(txq->entry_tag, stx->map, m0, segs, nsegs, 0); 1027 } 1028 1029 if (err == ENOMEM) { | 1007 goto retry; 1008 } else if (err == ENOMEM) { |
1030 return (err); | 1009 return (err); |
1031 } 1032 1033 if (err) { | 1010 } if (err) { |
1034 if (cxgb_debug) 1035 printf("map failure err=%d pktlen=%d\n", err, pktlen); 1036 m_freem(m0); 1037 *m = NULL; 1038 return (err); 1039 } | 1011 if (cxgb_debug) 1012 printf("map failure err=%d pktlen=%d\n", err, pktlen); 1013 m_freem(m0); 1014 *m = NULL; 1015 return (err); 1016 } |
1017done: 1018#if !defined(__i386__) && !defined(__amd64__) 1019 bus_dmamap_sync(txq->entry_tag, txsd->map, BUS_DMASYNC_PREWRITE); 1020#endif 1021 txsd->flags |= TX_SW_DESC_MAPPED; |
|
1040 | 1022 |
1041 bus_dmamap_sync(txq->entry_tag, stx->map, BUS_DMASYNC_PREWRITE); 1042 stx->flags |= TX_SW_DESC_MAPPED; 1043 | |
1044 return (0); 1045} 1046 1047/** 1048 * make_sgl - populate a scatter/gather list for a packet 1049 * @sgp: the SGL to populate 1050 * @segs: the packet dma segments 1051 * @nsegs: the number of segments 1052 * 1053 * Generates a scatter/gather list for the buffers that make up a packet 1054 * and returns the SGL size in 8-byte words. The caller must size the SGL 1055 * appropriately. 1056 */ 1057static __inline void 1058make_sgl(struct sg_ent *sgp, bus_dma_segment_t *segs, int nsegs) 1059{ 1060 int i, idx; 1061 | 1023 return (0); 1024} 1025 1026/** 1027 * make_sgl - populate a scatter/gather list for a packet 1028 * @sgp: the SGL to populate 1029 * @segs: the packet dma segments 1030 * @nsegs: the number of segments 1031 * 1032 * Generates a scatter/gather list for the buffers that make up a packet 1033 * and returns the SGL size in 8-byte words. The caller must size the SGL 1034 * appropriately. 1035 */ 1036static __inline void 1037make_sgl(struct sg_ent *sgp, bus_dma_segment_t *segs, int nsegs) 1038{ 1039 int i, idx; 1040 |
1062 for (idx = 0, i = 0; i < nsegs; i++, idx ^= 1) { | 1041 for (idx = 0, i = 0; i < nsegs; i++) { 1042 /* 1043 * firmware doesn't like empty segments 1044 */ 1045 if (segs[i].ds_len == 0) 1046 continue; |
1063 if (i && idx == 0) 1064 ++sgp; | 1047 if (i && idx == 0) 1048 ++sgp; |
1065 | 1049 |
1066 sgp->len[idx] = htobe32(segs[i].ds_len); 1067 sgp->addr[idx] = htobe64(segs[i].ds_addr); | 1050 sgp->len[idx] = htobe32(segs[i].ds_len); 1051 sgp->addr[idx] = htobe64(segs[i].ds_addr); |
1052 idx ^= 1; |
|
1068 } 1069 1070 if (idx) 1071 sgp->len[idx] = 0; 1072} 1073 1074/** 1075 * check_ring_tx_db - check and potentially ring a Tx queue's doorbell --- 31 unchanged lines hidden (view full) --- 1107static __inline void 1108wr_gen2(struct tx_desc *d, unsigned int gen) 1109{ 1110#if SGE_NUM_GENBITS == 2 1111 d->flit[TX_DESC_FLITS - 1] = htobe64(gen); 1112#endif 1113} 1114 | 1053 } 1054 1055 if (idx) 1056 sgp->len[idx] = 0; 1057} 1058 1059/** 1060 * check_ring_tx_db - check and potentially ring a Tx queue's doorbell --- 31 unchanged lines hidden (view full) --- 1092static __inline void 1093wr_gen2(struct tx_desc *d, unsigned int gen) 1094{ 1095#if SGE_NUM_GENBITS == 2 1096 d->flit[TX_DESC_FLITS - 1] = htobe64(gen); 1097#endif 1098} 1099 |
1100#if 0 1101static int print_wr = 0; 1102static __inline void 1103do_print_wr(struct tx_desc *d, int flits) 1104{ 1105 int i = 0; 1106 1107 if (print_wr) 1108 while (flits--) { 1109 printf("flit[%d]: 0x%016lx\n", i, d->flit[i]); 1110 i++; 1111 } 1112} 1113#endif |
|
1115 1116 1117/** 1118 * write_wr_hdr_sgl - write a WR header and, optionally, SGL 1119 * @ndesc: number of Tx descriptors spanned by the SGL 1120 * @txd: first Tx descriptor to be written 1121 * @txqs: txq state (generation and producer index) 1122 * @txq: the SGE Tx queue 1123 * @sgl: the SGL 1124 * @flits: number of flits to the start of the SGL in the first descriptor 1125 * @sgl_flits: the SGL size in flits 1126 * @wr_hi: top 32 bits of WR header based on WR type (big endian) 1127 * @wr_lo: low 32 bits of WR header based on WR type (big endian) 1128 * 1129 * Write a work request header and an associated SGL. If the SGL is 1130 * small enough to fit into one Tx descriptor it has already been written 1131 * and we just need to write the WR header. Otherwise we distribute the 1132 * SGL across the number of descriptors it spans. 1133 */ | 1114 1115 1116/** 1117 * write_wr_hdr_sgl - write a WR header and, optionally, SGL 1118 * @ndesc: number of Tx descriptors spanned by the SGL 1119 * @txd: first Tx descriptor to be written 1120 * @txqs: txq state (generation and producer index) 1121 * @txq: the SGE Tx queue 1122 * @sgl: the SGL 1123 * @flits: number of flits to the start of the SGL in the first descriptor 1124 * @sgl_flits: the SGL size in flits 1125 * @wr_hi: top 32 bits of WR header based on WR type (big endian) 1126 * @wr_lo: low 32 bits of WR header based on WR type (big endian) 1127 * 1128 * Write a work request header and an associated SGL. If the SGL is 1129 * small enough to fit into one Tx descriptor it has already been written 1130 * and we just need to write the WR header. Otherwise we distribute the 1131 * SGL across the number of descriptors it spans. 1132 */ |
1134 | |
1135static void 1136write_wr_hdr_sgl(unsigned int ndesc, struct tx_desc *txd, struct txq_state *txqs, 1137 const struct sge_txq *txq, const struct sg_ent *sgl, unsigned int flits, 1138 unsigned int sgl_flits, unsigned int wr_hi, unsigned int wr_lo) 1139{ 1140 1141 struct work_request_hdr *wrp = (struct work_request_hdr *)txd; 1142 struct tx_sw_desc *txsd = &txq->sdesc[txqs->pidx]; 1143 1144 if (__predict_true(ndesc == 1)) { 1145 wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) | 1146 V_WR_SGLSFLT(flits)) | wr_hi; 1147 wmb(); 1148 wrp->wr_lo = htonl(V_WR_LEN(flits + sgl_flits) | 1149 V_WR_GEN(txqs->gen)) | wr_lo; 1150 /* XXX gen? */ 1151 wr_gen2(txd, txqs->gen); | 1133static void 1134write_wr_hdr_sgl(unsigned int ndesc, struct tx_desc *txd, struct txq_state *txqs, 1135 const struct sge_txq *txq, const struct sg_ent *sgl, unsigned int flits, 1136 unsigned int sgl_flits, unsigned int wr_hi, unsigned int wr_lo) 1137{ 1138 1139 struct work_request_hdr *wrp = (struct work_request_hdr *)txd; 1140 struct tx_sw_desc *txsd = &txq->sdesc[txqs->pidx]; 1141 1142 if (__predict_true(ndesc == 1)) { 1143 wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) | 1144 V_WR_SGLSFLT(flits)) | wr_hi; 1145 wmb(); 1146 wrp->wr_lo = htonl(V_WR_LEN(flits + sgl_flits) | 1147 V_WR_GEN(txqs->gen)) | wr_lo; 1148 /* XXX gen? */ 1149 wr_gen2(txd, txqs->gen); |
1150 |
|
1152 } else { 1153 unsigned int ogen = txqs->gen; 1154 const uint64_t *fp = (const uint64_t *)sgl; 1155 struct work_request_hdr *wp = wrp; 1156 1157 wrp->wr_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) | 1158 V_WR_SGLSFLT(flits)) | wr_hi; 1159 --- 18 unchanged lines hidden (view full) --- 1178 txsd = txq->sdesc; 1179 } 1180 1181 /* 1182 * when the head of the mbuf chain 1183 * is freed all clusters will be freed 1184 * with it 1185 */ | 1151 } else { 1152 unsigned int ogen = txqs->gen; 1153 const uint64_t *fp = (const uint64_t *)sgl; 1154 struct work_request_hdr *wp = wrp; 1155 1156 wrp->wr_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) | 1157 V_WR_SGLSFLT(flits)) | wr_hi; 1158 --- 18 unchanged lines hidden (view full) --- 1177 txsd = txq->sdesc; 1178 } 1179 1180 /* 1181 * when the head of the mbuf chain 1182 * is freed all clusters will be freed 1183 * with it 1184 */ |
1186 txsd->m = NULL; | 1185 txsd->mi.mi_base = NULL; |
1187 wrp = (struct work_request_hdr *)txd; 1188 wrp->wr_hi = htonl(V_WR_DATATYPE(1) | 1189 V_WR_SGLSFLT(1)) | wr_hi; 1190 wrp->wr_lo = htonl(V_WR_LEN(min(WR_FLITS, 1191 sgl_flits + 1)) | 1192 V_WR_GEN(txqs->gen)) | wr_lo; 1193 wr_gen2(txd, txqs->gen); 1194 flits = 1; 1195 } 1196 wrp->wr_hi |= htonl(F_WR_EOP); 1197 wmb(); 1198 wp->wr_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo; 1199 wr_gen2((struct tx_desc *)wp, ogen); 1200 } 1201} 1202 | 1186 wrp = (struct work_request_hdr *)txd; 1187 wrp->wr_hi = htonl(V_WR_DATATYPE(1) | 1188 V_WR_SGLSFLT(1)) | wr_hi; 1189 wrp->wr_lo = htonl(V_WR_LEN(min(WR_FLITS, 1190 sgl_flits + 1)) | 1191 V_WR_GEN(txqs->gen)) | wr_lo; 1192 wr_gen2(txd, txqs->gen); 1193 flits = 1; 1194 } 1195 wrp->wr_hi |= htonl(F_WR_EOP); 1196 wmb(); 1197 wp->wr_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo; 1198 wr_gen2((struct tx_desc *)wp, ogen); 1199 } 1200} 1201 |
1203 | |
1204/* sizeof(*eh) + sizeof(*vhdr) + sizeof(*ip) + sizeof(*tcp) */ 1205#define TCPPKTHDRSIZE (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + 20 + 20) 1206 | 1202/* sizeof(*eh) + sizeof(*vhdr) + sizeof(*ip) + sizeof(*tcp) */ 1203#define TCPPKTHDRSIZE (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + 20 + 20) 1204 |
1205#ifdef VLAN_SUPPORTED 1206#define GET_VTAG(cntrl, m) \ 1207do { \ 1208 if ((m)->m_flags & M_VLANTAG) \ 1209 cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN((m)->m_pkthdr.ether_vtag); \ 1210} while (0) 1211 1212#define GET_VTAG_MI(cntrl, mi) \ 1213do { \ 1214 if ((mi)->mi_flags & M_VLANTAG) \ 1215 cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN((mi)->mi_ether_vtag); \ 1216} while (0) 1217#else 1218#define GET_VTAG(cntrl, m) 1219#define GET_VTAG_MI(cntrl, m) 1220#endif 1221 |
|
1207int | 1222int |
1208t3_encap(struct port_info *p, struct mbuf **m, int *free) | 1223t3_encap(struct sge_qset *qs, struct mbuf **m, int count) |
1209{ 1210 adapter_t *sc; 1211 struct mbuf *m0; | 1224{ 1225 adapter_t *sc; 1226 struct mbuf *m0; |
1212 struct sge_qset *qs; | |
1213 struct sge_txq *txq; | 1227 struct sge_txq *txq; |
1214 struct tx_sw_desc *stx; | |
1215 struct txq_state txqs; | 1228 struct txq_state txqs; |
1229 struct port_info *pi; |
|
1216 unsigned int ndesc, flits, cntrl, mlen; 1217 int err, nsegs, tso_info = 0; 1218 1219 struct work_request_hdr *wrp; 1220 struct tx_sw_desc *txsd; | 1230 unsigned int ndesc, flits, cntrl, mlen; 1231 int err, nsegs, tso_info = 0; 1232 1233 struct work_request_hdr *wrp; 1234 struct tx_sw_desc *txsd; |
1221 struct sg_ent *sgp, sgl[TX_MAX_SEGS / 2 + 1]; 1222 bus_dma_segment_t segs[TX_MAX_SEGS]; | 1235 struct sg_ent *sgp, *sgl; 1236 bus_dma_segment_t *segs; |
1223 uint32_t wr_hi, wr_lo, sgl_flits; 1224 1225 struct tx_desc *txd; | 1237 uint32_t wr_hi, wr_lo, sgl_flits; 1238 1239 struct tx_desc *txd; |
1226 struct cpl_tx_pkt *cpl; 1227 1228 m0 = *m; 1229 sc = p->adapter; 1230 1231 DPRINTF("t3_encap port_id=%d qsidx=%d ", p->port_id, p->first_qset); | 1240 struct mbuf_vec *mv; 1241 struct mbuf_iovec *mi; 1242 1243 DPRINTF("t3_encap cpu=%d ", curcpu); |
1232 | 1244 |
1233 /* port_id=1 qsid=1 txpkt_intf=2 tx_chan=0 */ 1234 1235 qs = &sc->sge.qs[p->first_qset]; 1236 | 1245 pi = qs->port; 1246 sc = pi->adapter; |
1237 txq = &qs->txq[TXQ_ETH]; | 1247 txq = &qs->txq[TXQ_ETH]; |
1238 stx = &txq->sdesc[txq->pidx]; | 1248 txsd = &txq->sdesc[txq->pidx]; |
1239 txd = &txq->desc[txq->pidx]; | 1249 txd = &txq->desc[txq->pidx]; |
1240 cpl = (struct cpl_tx_pkt *)txd; 1241 mlen = m0->m_pkthdr.len; 1242 cpl->len = htonl(mlen | 0x80000000); 1243 1244 DPRINTF("mlen=%d txpkt_intf=%d tx_chan=%d\n", mlen, p->txpkt_intf, p->tx_chan); 1245 /* 1246 * XXX handle checksum, TSO, and VLAN here 1247 * 1248 */ 1249 cntrl = V_TXPKT_INTF(p->txpkt_intf); | 1250 sgl = txq->txq_sgl; 1251 segs = txq->txq_segs; 1252 m0 = *m; 1253 DPRINTF("t3_encap port_id=%d qsidx=%d ", pi->port_id, pi->first_qset); 1254 DPRINTF("mlen=%d txpkt_intf=%d tx_chan=%d\n", m[0]->m_pkthdr.len, pi->txpkt_intf, pi->tx_chan); |
1250 | 1255 |
1251 /* 1252 * XXX need to add VLAN support for 6.x 1253 */ | 1256 cntrl = V_TXPKT_INTF(pi->txpkt_intf); 1257/* 1258 * XXX need to add VLAN support for 6.x 1259 */ |
1254#ifdef VLAN_SUPPORTED | 1260#ifdef VLAN_SUPPORTED |
1255 if (m0->m_flags & M_VLANTAG) 1256 cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(m0->m_pkthdr.ether_vtag); | |
1257 if (m0->m_pkthdr.csum_flags & (CSUM_TSO)) 1258 tso_info = V_LSO_MSS(m0->m_pkthdr.tso_segsz); | 1261 if (m0->m_pkthdr.csum_flags & (CSUM_TSO)) 1262 tso_info = V_LSO_MSS(m0->m_pkthdr.tso_segsz); |
1259#endif 1260 if (tso_info) { 1261 int eth_type; 1262 struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *) cpl; | 1263#endif 1264 1265 if (count > 1) { 1266 if ((err = busdma_map_sg_vec(m, &m0, segs, count))) 1267 return (err); 1268 nsegs = count; 1269 } else if ((err = busdma_map_sg_collapse(&m0, segs, &nsegs))) { 1270 if (cxgb_debug) 1271 printf("failed ... err=%d\n", err); 1272 return (err); 1273 } 1274 KASSERT(m0->m_pkthdr.len, ("empty packet nsegs=%d count=%d", nsegs, count)); 1275 1276 if (m0->m_type == MT_DATA) 1277 DPRINTF("mbuf type=%d tags:%d head=%p", m0->m_type, !SLIST_EMPTY(&m0->m_pkthdr.tags), 1278 SLIST_FIRST(&m0->m_pkthdr.tags)); 1279 1280 mi_collapse_mbuf(&txsd->mi, m0); 1281 mi = &txsd->mi; 1282 1283 if (count > 1) { 1284 struct cpl_tx_pkt_batch *cpl_batch = (struct cpl_tx_pkt_batch *)txd; 1285 int i, fidx; 1286 struct mbuf_iovec *batchmi; 1287 1288 mv = mtomv(m0); 1289 batchmi = mv->mv_vec; 1290 1291 wrp = (struct work_request_hdr *)txd; 1292 1293 flits = count*2 + 1; 1294 txq_prod(txq, 1, &txqs); 1295 1296 for (fidx = 1, i = 0; i < count; i++, batchmi++, fidx += 2) { 1297 struct cpl_tx_pkt_batch_entry *cbe = &cpl_batch->pkt_entry[i]; 1298 1299 cntrl = V_TXPKT_INTF(pi->txpkt_intf); 1300 GET_VTAG_MI(cntrl, batchmi); 1301 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT); 1302 cbe->cntrl = htonl(cntrl); 1303 cbe->len = htonl(batchmi->mi_len | 0x80000000); 1304 cbe->addr = htobe64(segs[i].ds_addr); 1305 txd->flit[fidx] |= htobe64(1 << 24); 1306 } 1307 1308 wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) | 1309 V_WR_SGLSFLT(flits)) | htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | txqs.compl); 1310 wmb(); 1311 wrp->wr_lo = htonl(V_WR_LEN(flits) | 1312 V_WR_GEN(txqs.gen)) | htonl(V_WR_TID(txq->token)); 1313 /* XXX gen? */ 1314 wr_gen2(txd, txqs.gen); 1315 check_ring_tx_db(sc, txq); 1316 1317 return (0); 1318 } else if (tso_info) { 1319 int undersized, eth_type; 1320 struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)txd; |
1263 struct ip *ip; 1264 struct tcphdr *tcp; | 1321 struct ip *ip; 1322 struct tcphdr *tcp; |
1265 char *pkthdr, tmp[TCPPKTHDRSIZE]; /* is this too large for the stack? */ | 1323 char *pkthdr, tmp[TCPPKTHDRSIZE]; 1324 struct mbuf_vec *mv; 1325 struct mbuf_iovec *tmpmi; 1326 1327 mv = mtomv(m0); 1328 tmpmi = mv->mv_vec; |
1266 1267 txd->flit[2] = 0; | 1329 1330 txd->flit[2] = 0; |
1331 GET_VTAG_MI(cntrl, mi); |
|
1268 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO); 1269 hdr->cntrl = htonl(cntrl); | 1332 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO); 1333 hdr->cntrl = htonl(cntrl); |
1270 1271 if (__predict_false(m0->m_len < TCPPKTHDRSIZE)) { 1272 pkthdr = &tmp[0]; 1273 m_copydata(m0, 0, TCPPKTHDRSIZE, pkthdr); 1274 } else { 1275 pkthdr = mtod(m0, char *); 1276 } | 1334 mlen = m0->m_pkthdr.len; 1335 hdr->len = htonl(mlen | 0x80000000); |
1277 | 1336 |
1337 DPRINTF("tso buf len=%d\n", mlen); 1338 undersized = (((tmpmi->mi_len < TCPPKTHDRSIZE) && 1339 (m0->m_flags & M_VLANTAG)) || 1340 (tmpmi->mi_len < TCPPKTHDRSIZE - ETHER_VLAN_ENCAP_LEN)); 1341 if (__predict_false(undersized)) { 1342 pkthdr = tmp; 1343 dump_mi(mi); 1344 panic("discontig packet - fixxorz"); 1345 } else 1346 pkthdr = m0->m_data; 1347 |
|
1278 if (__predict_false(m0->m_flags & M_VLANTAG)) { 1279 eth_type = CPL_ETH_II_VLAN; 1280 ip = (struct ip *)(pkthdr + ETHER_HDR_LEN + 1281 ETHER_VLAN_ENCAP_LEN); 1282 } else { 1283 eth_type = CPL_ETH_II; 1284 ip = (struct ip *)(pkthdr + ETHER_HDR_LEN); 1285 } 1286 tcp = (struct tcphdr *)((uint8_t *)ip + 1287 sizeof(*ip)); 1288 1289 tso_info |= V_LSO_ETH_TYPE(eth_type) | 1290 V_LSO_IPHDR_WORDS(ip->ip_hl) | 1291 V_LSO_TCPHDR_WORDS(tcp->th_off); 1292 hdr->lso_info = htonl(tso_info); 1293 flits = 3; 1294 } else { | 1348 if (__predict_false(m0->m_flags & M_VLANTAG)) { 1349 eth_type = CPL_ETH_II_VLAN; 1350 ip = (struct ip *)(pkthdr + ETHER_HDR_LEN + 1351 ETHER_VLAN_ENCAP_LEN); 1352 } else { 1353 eth_type = CPL_ETH_II; 1354 ip = (struct ip *)(pkthdr + ETHER_HDR_LEN); 1355 } 1356 tcp = (struct tcphdr *)((uint8_t *)ip + 1357 sizeof(*ip)); 1358 1359 tso_info |= V_LSO_ETH_TYPE(eth_type) | 1360 V_LSO_IPHDR_WORDS(ip->ip_hl) | 1361 V_LSO_TCPHDR_WORDS(tcp->th_off); 1362 hdr->lso_info = htonl(tso_info); 1363 flits = 3; 1364 } else { |
1365 struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)txd; 1366 1367 GET_VTAG(cntrl, m0); |
|
1295 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT); 1296 cpl->cntrl = htonl(cntrl); | 1368 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT); 1369 cpl->cntrl = htonl(cntrl); |
1297 | 1370 mlen = m0->m_pkthdr.len; 1371 cpl->len = htonl(mlen | 0x80000000); 1372 |
1298 if (mlen <= WR_LEN - sizeof(*cpl)) { 1299 txq_prod(txq, 1, &txqs); | 1373 if (mlen <= WR_LEN - sizeof(*cpl)) { 1374 txq_prod(txq, 1, &txqs); |
1300 txq->sdesc[txqs.pidx].m = NULL; | |
1301 | 1375 |
1302 if (m0->m_len == m0->m_pkthdr.len) 1303 memcpy(&txd->flit[2], mtod(m0, uint8_t *), mlen); 1304 else | 1376 DPRINTF("mlen==%d max=%ld\n", mlen, (WR_LEN - sizeof(*cpl))); 1377 if (mi->mi_type != MT_IOVEC && 1378 mi->mi_type != MT_CLIOVEC) 1379 memcpy(&txd->flit[2], mi->mi_data, mlen); 1380 else { 1381 /* 1382 * XXX mbuf_iovec 1383 */ 1384#if 0 |
1305 m_copydata(m0, 0, mlen, (caddr_t)&txd->flit[2]); | 1385 m_copydata(m0, 0, mlen, (caddr_t)&txd->flit[2]); |
1386#endif 1387 printf("bailing on m_copydata\n"); 1388 } 1389 m_freem_iovec(&txsd->mi); 1390 txsd->mi.mi_base = NULL; |
|
1306 | 1391 |
1307 *free = 1; | |
1308 flits = (mlen + 7) / 8 + 2; 1309 cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(mlen & 7) | 1310 V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | 1311 F_WR_SOP | F_WR_EOP | txqs.compl); 1312 wmb(); 1313 cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | 1314 V_WR_GEN(txqs.gen) | V_WR_TID(txq->token)); 1315 1316 wr_gen2(txd, txqs.gen); 1317 check_ring_tx_db(sc, txq); | 1392 flits = (mlen + 7) / 8 + 2; 1393 cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(mlen & 7) | 1394 V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | 1395 F_WR_SOP | F_WR_EOP | txqs.compl); 1396 wmb(); 1397 cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | 1398 V_WR_GEN(txqs.gen) | V_WR_TID(txq->token)); 1399 1400 wr_gen2(txd, txqs.gen); 1401 check_ring_tx_db(sc, txq); |
1402 DPRINTF("pio buf\n"); |
|
1318 return (0); 1319 } | 1403 return (0); 1404 } |
1405 DPRINTF("regular buf\n"); |
|
1320 flits = 2; 1321 } | 1406 flits = 2; 1407 } |
1322 | |
1323 wrp = (struct work_request_hdr *)txd; | 1408 wrp = (struct work_request_hdr *)txd; |
1324 1325 if ((err = busdma_map_mbufs(m, txq, stx, segs, &nsegs)) != 0) { | 1409 1410#ifdef nomore 1411 /* 1412 * XXX need to move into one of the helper routines above 1413 * 1414 */ 1415 if ((err = busdma_map_mbufs(m, txq, txsd, segs, &nsegs)) != 0) |
1326 return (err); | 1416 return (err); |
1327 } | |
1328 m0 = *m; | 1417 m0 = *m; |
1418#endif |
|
1329 ndesc = calc_tx_descs(m0, nsegs); 1330 1331 sgp = (ndesc == 1) ? (struct sg_ent *)&txd->flit[flits] : sgl; 1332 make_sgl(sgp, segs, nsegs); 1333 1334 sgl_flits = sgl_len(nsegs); 1335 1336 DPRINTF("make_sgl success nsegs==%d ndesc==%d\n", nsegs, ndesc); 1337 txq_prod(txq, ndesc, &txqs); | 1419 ndesc = calc_tx_descs(m0, nsegs); 1420 1421 sgp = (ndesc == 1) ? (struct sg_ent *)&txd->flit[flits] : sgl; 1422 make_sgl(sgp, segs, nsegs); 1423 1424 sgl_flits = sgl_len(nsegs); 1425 1426 DPRINTF("make_sgl success nsegs==%d ndesc==%d\n", nsegs, ndesc); 1427 txq_prod(txq, ndesc, &txqs); |
1338 txsd = &txq->sdesc[txqs.pidx]; | |
1339 wr_hi = htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | txqs.compl); 1340 wr_lo = htonl(V_WR_TID(txq->token)); | 1428 wr_hi = htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | txqs.compl); 1429 wr_lo = htonl(V_WR_TID(txq->token)); |
1341 txsd->m = m0; 1342 m_set_priority(m0, txqs.pidx); 1343 | |
1344 write_wr_hdr_sgl(ndesc, txd, &txqs, txq, sgl, flits, sgl_flits, wr_hi, wr_lo); | 1430 write_wr_hdr_sgl(ndesc, txd, &txqs, txq, sgl, flits, sgl_flits, wr_hi, wr_lo); |
1345 check_ring_tx_db(p->adapter, txq); | 1431 check_ring_tx_db(pi->adapter, txq); |
1346 | 1432 |
1433 if ((m0->m_type == MT_DATA) && ((m0->m_flags & (M_EXT|M_NOFREE)) == M_EXT)) { 1434 m0->m_flags &= ~M_EXT ; 1435 m_free(m0); 1436 } 1437 |
|
1347 return (0); 1348} 1349 1350 1351/** 1352 * write_imm - write a packet into a Tx descriptor as immediate data 1353 * @d: the Tx descriptor to write 1354 * @m: the packet --- 7 unchanged lines hidden (view full) --- 1362 */ 1363static __inline void 1364write_imm(struct tx_desc *d, struct mbuf *m, 1365 unsigned int len, unsigned int gen) 1366{ 1367 struct work_request_hdr *from = mtod(m, struct work_request_hdr *); 1368 struct work_request_hdr *to = (struct work_request_hdr *)d; 1369 | 1438 return (0); 1439} 1440 1441 1442/** 1443 * write_imm - write a packet into a Tx descriptor as immediate data 1444 * @d: the Tx descriptor to write 1445 * @m: the packet --- 7 unchanged lines hidden (view full) --- 1453 */ 1454static __inline void 1455write_imm(struct tx_desc *d, struct mbuf *m, 1456 unsigned int len, unsigned int gen) 1457{ 1458 struct work_request_hdr *from = mtod(m, struct work_request_hdr *); 1459 struct work_request_hdr *to = (struct work_request_hdr *)d; 1460 |
1461 if (len > WR_LEN) 1462 panic("len too big %d\n", len); 1463 if (len < sizeof(*from)) 1464 panic("len too small %d", len); 1465 |
|
1370 memcpy(&to[1], &from[1], len - sizeof(*from)); 1371 to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP | 1372 V_WR_BCNTLFLT(len & 7)); 1373 wmb(); 1374 to->wr_lo = from->wr_lo | htonl(V_WR_GEN(gen) | 1375 V_WR_LEN((len + 7) / 8)); 1376 wr_gen2(d, gen); | 1466 memcpy(&to[1], &from[1], len - sizeof(*from)); 1467 to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP | 1468 V_WR_BCNTLFLT(len & 7)); 1469 wmb(); 1470 to->wr_lo = from->wr_lo | htonl(V_WR_GEN(gen) | 1471 V_WR_LEN((len + 7) / 8)); 1472 wr_gen2(d, gen); |
1377 m_freem(m); | 1473 1474 /* 1475 * This check is a hack we should really fix the logic so 1476 * that this can't happen 1477 */ 1478 if (m->m_type != MT_DONTFREE) 1479 m_freem(m); 1480 |
1378} 1379 1380/** 1381 * check_desc_avail - check descriptor availability on a send queue 1382 * @adap: the adapter 1383 * @q: the TX queue 1384 * @m: the packet needing the descriptors 1385 * @ndesc: the number of Tx descriptors needed --- 22 unchanged lines hidden (view full) --- 1408 if (__predict_false(!mbufq_empty(&q->sendq))) { 1409addq_exit: mbufq_tail(&q->sendq, m); 1410 return 1; 1411 } 1412 if (__predict_false(q->size - q->in_use < ndesc)) { 1413 1414 struct sge_qset *qs = txq_to_qset(q, qid); 1415 | 1481} 1482 1483/** 1484 * check_desc_avail - check descriptor availability on a send queue 1485 * @adap: the adapter 1486 * @q: the TX queue 1487 * @m: the packet needing the descriptors 1488 * @ndesc: the number of Tx descriptors needed --- 22 unchanged lines hidden (view full) --- 1511 if (__predict_false(!mbufq_empty(&q->sendq))) { 1512addq_exit: mbufq_tail(&q->sendq, m); 1513 return 1; 1514 } 1515 if (__predict_false(q->size - q->in_use < ndesc)) { 1516 1517 struct sge_qset *qs = txq_to_qset(q, qid); 1518 |
1519 printf("stopping q\n"); 1520 |
|
1416 setbit(&qs->txq_stopped, qid); 1417 smp_mb(); 1418 1419 if (should_restart_tx(q) && 1420 test_and_clear_bit(qid, &qs->txq_stopped)) 1421 return 2; 1422 1423 q->stops++; --- 43 unchanged lines hidden (view full) --- 1467{ 1468 int ret; 1469 struct work_request_hdr *wrp = mtod(m, struct work_request_hdr *); 1470 1471 if (__predict_false(!immediate(m))) { 1472 m_freem(m); 1473 return 0; 1474 } | 1521 setbit(&qs->txq_stopped, qid); 1522 smp_mb(); 1523 1524 if (should_restart_tx(q) && 1525 test_and_clear_bit(qid, &qs->txq_stopped)) 1526 return 2; 1527 1528 q->stops++; --- 43 unchanged lines hidden (view full) --- 1572{ 1573 int ret; 1574 struct work_request_hdr *wrp = mtod(m, struct work_request_hdr *); 1575 1576 if (__predict_false(!immediate(m))) { 1577 m_freem(m); 1578 return 0; 1579 } |
1475 | 1580 |
1476 wrp->wr_hi |= htonl(F_WR_SOP | F_WR_EOP); 1477 wrp->wr_lo = htonl(V_WR_TID(q->token)); 1478 1479 mtx_lock(&q->lock); 1480again: reclaim_completed_tx_imm(q); 1481 1482 ret = check_desc_avail(adap, q, m, 1, TXQ_CTRL); 1483 if (__predict_false(ret)) { 1484 if (ret == 1) { 1485 mtx_unlock(&q->lock); | 1581 wrp->wr_hi |= htonl(F_WR_SOP | F_WR_EOP); 1582 wrp->wr_lo = htonl(V_WR_TID(q->token)); 1583 1584 mtx_lock(&q->lock); 1585again: reclaim_completed_tx_imm(q); 1586 1587 ret = check_desc_avail(adap, q, m, 1, TXQ_CTRL); 1588 if (__predict_false(ret)) { 1589 if (ret == 1) { 1590 mtx_unlock(&q->lock); |
1486 return (-1); | 1591 log(LOG_ERR, "no desc available\n"); 1592 1593 return (ENOSPC); |
1487 } 1488 goto again; 1489 } | 1594 } 1595 goto again; 1596 } |
1490 | |
1491 write_imm(&q->desc[q->pidx], m, m->m_len, q->gen); | 1597 write_imm(&q->desc[q->pidx], m, m->m_len, q->gen); |
1492 | 1598 |
1493 q->in_use++; 1494 if (++q->pidx >= q->size) { 1495 q->pidx = 0; 1496 q->gen ^= 1; 1497 } 1498 mtx_unlock(&q->lock); 1499 wmb(); 1500 t3_write_reg(adap, A_SG_KDOORBELL, --- 11 unchanged lines hidden (view full) --- 1512static void 1513restart_ctrlq(void *data, int npending) 1514{ 1515 struct mbuf *m; 1516 struct sge_qset *qs = (struct sge_qset *)data; 1517 struct sge_txq *q = &qs->txq[TXQ_CTRL]; 1518 adapter_t *adap = qs->port->adapter; 1519 | 1599 q->in_use++; 1600 if (++q->pidx >= q->size) { 1601 q->pidx = 0; 1602 q->gen ^= 1; 1603 } 1604 mtx_unlock(&q->lock); 1605 wmb(); 1606 t3_write_reg(adap, A_SG_KDOORBELL, --- 11 unchanged lines hidden (view full) --- 1618static void 1619restart_ctrlq(void *data, int npending) 1620{ 1621 struct mbuf *m; 1622 struct sge_qset *qs = (struct sge_qset *)data; 1623 struct sge_txq *q = &qs->txq[TXQ_CTRL]; 1624 adapter_t *adap = qs->port->adapter; 1625 |
1626 log(LOG_WARNING, "Restart_ctrlq in_use=%d\n", q->in_use); 1627 |
|
1520 mtx_lock(&q->lock); 1521again: reclaim_completed_tx_imm(q); 1522 1523 while (q->in_use < q->size && 1524 (m = mbufq_dequeue(&q->sendq)) != NULL) { 1525 1526 write_imm(&q->desc[q->pidx], m, m->m_len, q->gen); 1527 --- 22 unchanged lines hidden (view full) --- 1550 * Send a management message through control queue 0 1551 */ 1552int 1553t3_mgmt_tx(struct adapter *adap, struct mbuf *m) 1554{ 1555 return ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], m); 1556} 1557 | 1628 mtx_lock(&q->lock); 1629again: reclaim_completed_tx_imm(q); 1630 1631 while (q->in_use < q->size && 1632 (m = mbufq_dequeue(&q->sendq)) != NULL) { 1633 1634 write_imm(&q->desc[q->pidx], m, m->m_len, q->gen); 1635 --- 22 unchanged lines hidden (view full) --- 1658 * Send a management message through control queue 0 1659 */ 1660int 1661t3_mgmt_tx(struct adapter *adap, struct mbuf *m) 1662{ 1663 return ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], m); 1664} 1665 |
1666 |
|
1558/** 1559 * free_qset - free the resources of an SGE queue set 1560 * @sc: the controller owning the queue set 1561 * @q: the queue set 1562 * 1563 * Release the HW and SW resources associated with an SGE queue set, such 1564 * as HW contexts, packet buffers, and descriptor rings. Traffic to the 1565 * queue set must be quiesced prior to calling this. 1566 */ | 1667/** 1668 * free_qset - free the resources of an SGE queue set 1669 * @sc: the controller owning the queue set 1670 * @q: the queue set 1671 * 1672 * Release the HW and SW resources associated with an SGE queue set, such 1673 * as HW contexts, packet buffers, and descriptor rings. Traffic to the 1674 * queue set must be quiesced prior to calling this. 1675 */ |
1567static void | 1676void |
1568t3_free_qset(adapter_t *sc, struct sge_qset *q) 1569{ 1570 int i; | 1677t3_free_qset(adapter_t *sc, struct sge_qset *q) 1678{ 1679 int i; |
1571 | 1680 1681 t3_free_tx_desc_all(&q->txq[TXQ_ETH]); 1682 1683 for (i = 0; i < SGE_TXQ_PER_SET; i++) 1684 if (q->txq[i].txq_mr.br_ring != NULL) { 1685 free(q->txq[i].txq_mr.br_ring, M_DEVBUF); 1686 mtx_destroy(&q->txq[i].txq_mr.br_lock); 1687 } |
1572 for (i = 0; i < SGE_RXQ_PER_SET; ++i) { 1573 if (q->fl[i].desc) { 1574 mtx_lock(&sc->sge.reg_lock); 1575 t3_sge_disable_fl(sc, q->fl[i].cntxt_id); 1576 mtx_unlock(&sc->sge.reg_lock); 1577 bus_dmamap_unload(q->fl[i].desc_tag, q->fl[i].desc_map); 1578 bus_dmamem_free(q->fl[i].desc_tag, q->fl[i].desc, 1579 q->fl[i].desc_map); --- 44 unchanged lines hidden (view full) --- 1624 * @sc: the adapter softc 1625 * 1626 * Frees resources used by the SGE queue sets. 1627 */ 1628void 1629t3_free_sge_resources(adapter_t *sc) 1630{ 1631 int i, nqsets; | 1688 for (i = 0; i < SGE_RXQ_PER_SET; ++i) { 1689 if (q->fl[i].desc) { 1690 mtx_lock(&sc->sge.reg_lock); 1691 t3_sge_disable_fl(sc, q->fl[i].cntxt_id); 1692 mtx_unlock(&sc->sge.reg_lock); 1693 bus_dmamap_unload(q->fl[i].desc_tag, q->fl[i].desc_map); 1694 bus_dmamem_free(q->fl[i].desc_tag, q->fl[i].desc, 1695 q->fl[i].desc_map); --- 44 unchanged lines hidden (view full) --- 1740 * @sc: the adapter softc 1741 * 1742 * Frees resources used by the SGE queue sets. 1743 */ 1744void 1745t3_free_sge_resources(adapter_t *sc) 1746{ 1747 int i, nqsets; |
1632 | 1748 1749#ifdef IFNET_MULTIQUEUE 1750 panic("%s should not be called when IFNET_MULTIQUEUE is defined", __FUNCTION__); 1751#endif |
1633 for (nqsets = i = 0; i < (sc)->params.nports; i++) 1634 nqsets += sc->port[i].nqsets; | 1752 for (nqsets = i = 0; i < (sc)->params.nports; i++) 1753 nqsets += sc->port[i].nqsets; |
1635 | 1754 |
1636 for (i = 0; i < nqsets; ++i) 1637 t3_free_qset(sc, &sc->sge.qs[i]); 1638} 1639 1640/** 1641 * t3_sge_start - enable SGE 1642 * @sc: the controller softc 1643 * --- 37 unchanged lines hidden (view full) --- 1681 1682 taskqueue_drain(sc->tq, &qs->txq[TXQ_OFLD].qresume_task); 1683 taskqueue_drain(sc->tq, &qs->txq[TXQ_CTRL].qresume_task); 1684 } 1685} 1686 1687 1688/** | 1755 for (i = 0; i < nqsets; ++i) 1756 t3_free_qset(sc, &sc->sge.qs[i]); 1757} 1758 1759/** 1760 * t3_sge_start - enable SGE 1761 * @sc: the controller softc 1762 * --- 37 unchanged lines hidden (view full) --- 1800 1801 taskqueue_drain(sc->tq, &qs->txq[TXQ_OFLD].qresume_task); 1802 taskqueue_drain(sc->tq, &qs->txq[TXQ_CTRL].qresume_task); 1803 } 1804} 1805 1806 1807/** |
1689 * free_tx_desc - reclaims Tx descriptors and their buffers | 1808 * t3_free_tx_desc - reclaims Tx descriptors and their buffers |
1690 * @adapter: the adapter 1691 * @q: the Tx queue to reclaim descriptors from | 1809 * @adapter: the adapter 1810 * @q: the Tx queue to reclaim descriptors from |
1692 * @n: the number of descriptors to reclaim | 1811 * @reclaimable: the number of descriptors to reclaim 1812 * @m_vec_size: maximum number of buffers to reclaim 1813 * @desc_reclaimed: returns the number of descriptors reclaimed |
1693 * 1694 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated 1695 * Tx buffers. Called with the Tx queue lock held. | 1814 * 1815 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated 1816 * Tx buffers. Called with the Tx queue lock held. |
1817 * 1818 * Returns number of buffers of reclaimed |
|
1696 */ | 1819 */ |
1697int 1698free_tx_desc(struct sge_txq *q, int n, struct mbuf **m_vec) | 1820void 1821t3_free_tx_desc(struct sge_txq *q, int reclaimable) |
1699{ | 1822{ |
1700 struct tx_sw_desc *d; 1701 unsigned int cidx = q->cidx; 1702 int nbufs = 0; | 1823 struct tx_sw_desc *txsd; 1824 unsigned int cidx; |
1703 1704#ifdef T3_TRACE 1705 T3_TRACE2(sc->tb[q->cntxt_id & 7], | 1825 1826#ifdef T3_TRACE 1827 T3_TRACE2(sc->tb[q->cntxt_id & 7], |
1706 "reclaiming %u Tx descriptors at cidx %u", n, cidx); | 1828 "reclaiming %u Tx descriptors at cidx %u", reclaimable, cidx); |
1707#endif | 1829#endif |
1708 d = &q->sdesc[cidx]; 1709 1710 while (n-- > 0) { 1711 DPRINTF("cidx=%d d=%p\n", cidx, d); 1712 if (d->m) { 1713 if (d->flags & TX_SW_DESC_MAPPED) { 1714 bus_dmamap_unload(q->entry_tag, d->map); 1715 bus_dmamap_destroy(q->entry_tag, d->map); 1716 d->flags &= ~TX_SW_DESC_MAPPED; | 1830 cidx = q->cidx; 1831 txsd = &q->sdesc[cidx]; 1832 DPRINTF("reclaiming %d WR\n", reclaimable); 1833 while (reclaimable--) { 1834 DPRINTF("cidx=%d d=%p\n", cidx, txsd); 1835 if (txsd->mi.mi_base != NULL) { 1836 if (txsd->flags & TX_SW_DESC_MAPPED) { 1837 bus_dmamap_unload(q->entry_tag, txsd->map); 1838 txsd->flags &= ~TX_SW_DESC_MAPPED; |
1717 } | 1839 } |
1718 if (m_get_priority(d->m) == cidx) { 1719 m_vec[nbufs] = d->m; 1720 d->m = NULL; 1721 nbufs++; 1722 } else { 1723 printf("pri=%d cidx=%d\n", (int)m_get_priority(d->m), cidx); 1724 } 1725 } 1726 ++d; | 1840 m_freem_iovec(&txsd->mi); 1841 txsd->mi.mi_base = NULL; 1842 1843#if defined(DIAGNOSTIC) && 0 1844 if (m_get_priority(txsd->m[0]) != cidx) 1845 printf("pri=%d cidx=%d\n", (int)m_get_priority(txsd->m[0]), cidx); 1846#endif 1847 1848 } else 1849 q->txq_skipped++; 1850 1851 ++txsd; |
1727 if (++cidx == q->size) { 1728 cidx = 0; | 1852 if (++cidx == q->size) { 1853 cidx = 0; |
1729 d = q->sdesc; | 1854 txsd = q->sdesc; |
1730 } 1731 } 1732 q->cidx = cidx; 1733 | 1855 } 1856 } 1857 q->cidx = cidx; 1858 |
1734 return (nbufs); | |
1735} 1736 | 1859} 1860 |
1861void 1862t3_free_tx_desc_all(struct sge_txq *q) 1863{ 1864 int i; 1865 struct tx_sw_desc *txsd; 1866 1867 for (i = 0; i < q->size; i++) { 1868 txsd = &q->sdesc[i]; 1869 if (txsd->mi.mi_base != NULL) { 1870 if (txsd->flags & TX_SW_DESC_MAPPED) { 1871 bus_dmamap_unload(q->entry_tag, txsd->map); 1872 txsd->flags &= ~TX_SW_DESC_MAPPED; 1873 } 1874 m_freem_iovec(&txsd->mi); 1875 bzero(&txsd->mi, sizeof(txsd->mi)); 1876 } 1877 } 1878} 1879 |
|
1737/** 1738 * is_new_response - check if a response is newly written 1739 * @r: the response descriptor 1740 * @q: the response queue 1741 * 1742 * Returns true if a response descriptor contains a yet unprocessed 1743 * response. 1744 */ --- 32 unchanged lines hidden (view full) --- 1777 bus_dma_segment_t *segs, unsigned int nsegs) 1778{ 1779 unsigned int sgl_flits, flits; 1780 struct work_request_hdr *from; 1781 struct sg_ent *sgp, sgl[TX_MAX_SEGS / 2 + 1]; 1782 struct tx_desc *d = &q->desc[pidx]; 1783 struct txq_state txqs; 1784 | 1880/** 1881 * is_new_response - check if a response is newly written 1882 * @r: the response descriptor 1883 * @q: the response queue 1884 * 1885 * Returns true if a response descriptor contains a yet unprocessed 1886 * response. 1887 */ --- 32 unchanged lines hidden (view full) --- 1920 bus_dma_segment_t *segs, unsigned int nsegs) 1921{ 1922 unsigned int sgl_flits, flits; 1923 struct work_request_hdr *from; 1924 struct sg_ent *sgp, sgl[TX_MAX_SEGS / 2 + 1]; 1925 struct tx_desc *d = &q->desc[pidx]; 1926 struct txq_state txqs; 1927 |
1785 if (immediate(m)) { 1786 q->sdesc[pidx].m = NULL; | 1928 if (immediate(m) && segs == NULL) { |
1787 write_imm(d, m, m->m_len, gen); 1788 return; 1789 } 1790 1791 /* Only TX_DATA builds SGLs */ | 1929 write_imm(d, m, m->m_len, gen); 1930 return; 1931 } 1932 1933 /* Only TX_DATA builds SGLs */ |
1792 | |
1793 from = mtod(m, struct work_request_hdr *); | 1934 from = mtod(m, struct work_request_hdr *); |
1794 memcpy(&d->flit[1], &from[1], 1795 (uint8_t *)m->m_pkthdr.header - mtod(m, uint8_t *) - sizeof(*from)); | 1935 memcpy(&d->flit[1], &from[1], m->m_len - sizeof(*from)); |
1796 | 1936 |
1797 flits = ((uint8_t *)m->m_pkthdr.header - mtod(m, uint8_t *)) / 8; | 1937 flits = m->m_len / 8; |
1798 sgp = (ndesc == 1) ? (struct sg_ent *)&d->flit[flits] : sgl; 1799 1800 make_sgl(sgp, segs, nsegs); 1801 sgl_flits = sgl_len(nsegs); 1802 | 1938 sgp = (ndesc == 1) ? (struct sg_ent *)&d->flit[flits] : sgl; 1939 1940 make_sgl(sgp, segs, nsegs); 1941 sgl_flits = sgl_len(nsegs); 1942 |
1803 txqs.gen = q->gen; 1804 txqs.pidx = q->pidx; 1805 txqs.compl = (q->unacked & 8) << (S_WR_COMPL - 3); | 1943 txqs.gen = gen; 1944 txqs.pidx = pidx; 1945 txqs.compl = 0; 1946 |
1806 write_wr_hdr_sgl(ndesc, d, &txqs, q, sgl, flits, sgl_flits, 1807 from->wr_hi, from->wr_lo); 1808} 1809 | 1947 write_wr_hdr_sgl(ndesc, d, &txqs, q, sgl, flits, sgl_flits, 1948 from->wr_hi, from->wr_lo); 1949} 1950 |
1951 1952 |
|
1810/** 1811 * calc_tx_descs_ofld - calculate # of Tx descriptors for an offload packet 1812 * @m: the packet 1813 * 1814 * Returns the number of Tx descriptors needed for the given offload 1815 * packet. These packets are already fully constructed. 1816 */ 1817static __inline unsigned int --- 22 unchanged lines hidden (view full) --- 1840 * Send an offload packet through an SGE offload queue. 1841 */ 1842static int 1843ofld_xmit(adapter_t *adap, struct sge_txq *q, struct mbuf *m) 1844{ 1845 int ret, nsegs; 1846 unsigned int ndesc; 1847 unsigned int pidx, gen; | 1953/** 1954 * calc_tx_descs_ofld - calculate # of Tx descriptors for an offload packet 1955 * @m: the packet 1956 * 1957 * Returns the number of Tx descriptors needed for the given offload 1958 * packet. These packets are already fully constructed. 1959 */ 1960static __inline unsigned int --- 22 unchanged lines hidden (view full) --- 1983 * Send an offload packet through an SGE offload queue. 1984 */ 1985static int 1986ofld_xmit(adapter_t *adap, struct sge_txq *q, struct mbuf *m) 1987{ 1988 int ret, nsegs; 1989 unsigned int ndesc; 1990 unsigned int pidx, gen; |
1848 struct mbuf *m_vec[TX_CLEAN_MAX_DESC]; 1849 bus_dma_segment_t segs[TX_MAX_SEGS]; 1850 int i, cleaned; 1851 struct tx_sw_desc *stx = &q->sdesc[q->pidx]; | 1991 bus_dma_segment_t segs[TX_MAX_SEGS], *vsegs; 1992 struct tx_sw_desc *stx; |
1852 | 1993 |
1853 mtx_lock(&q->lock); 1854 if ((ret = busdma_map_mbufs(&m, q, stx, segs, &nsegs)) != 0) { 1855 mtx_unlock(&q->lock); 1856 return (ret); 1857 } | 1994 nsegs = m_get_sgllen(m); 1995 vsegs = m_get_sgl(m); |
1858 ndesc = calc_tx_descs_ofld(m, nsegs); | 1996 ndesc = calc_tx_descs_ofld(m, nsegs); |
1859again: cleaned = reclaim_completed_tx(q, TX_CLEAN_MAX_DESC, m_vec); | 1997 busdma_map_sgl(vsegs, segs, nsegs); |
1860 | 1998 |
1999 stx = &q->sdesc[q->pidx]; 2000 KASSERT(stx->mi.mi_base == NULL, ("mi_base set")); 2001 2002 mtx_lock(&q->lock); 2003again: reclaim_completed_tx(q); |
|
1861 ret = check_desc_avail(adap, q, m, ndesc, TXQ_OFLD); 1862 if (__predict_false(ret)) { 1863 if (ret == 1) { | 2004 ret = check_desc_avail(adap, q, m, ndesc, TXQ_OFLD); 2005 if (__predict_false(ret)) { 2006 if (ret == 1) { |
2007 printf("no ofld desc avail\n"); 2008 |
|
1864 m_set_priority(m, ndesc); /* save for restart */ 1865 mtx_unlock(&q->lock); | 2009 m_set_priority(m, ndesc); /* save for restart */ 2010 mtx_unlock(&q->lock); |
1866 return EINTR; | 2011 return (EINTR); |
1867 } 1868 goto again; 1869 } 1870 1871 gen = q->gen; 1872 q->in_use += ndesc; 1873 pidx = q->pidx; 1874 q->pidx += ndesc; --- 6 unchanged lines hidden (view full) --- 1881 "ofld_xmit: ndesc %u, pidx %u, len %u, main %u, frags %u", 1882 ndesc, pidx, skb->len, skb->len - skb->data_len, 1883 skb_shinfo(skb)->nr_frags); 1884#endif 1885 mtx_unlock(&q->lock); 1886 1887 write_ofld_wr(adap, m, q, pidx, gen, ndesc, segs, nsegs); 1888 check_ring_tx_db(adap, q); | 2012 } 2013 goto again; 2014 } 2015 2016 gen = q->gen; 2017 q->in_use += ndesc; 2018 pidx = q->pidx; 2019 q->pidx += ndesc; --- 6 unchanged lines hidden (view full) --- 2026 "ofld_xmit: ndesc %u, pidx %u, len %u, main %u, frags %u", 2027 ndesc, pidx, skb->len, skb->len - skb->data_len, 2028 skb_shinfo(skb)->nr_frags); 2029#endif 2030 mtx_unlock(&q->lock); 2031 2032 write_ofld_wr(adap, m, q, pidx, gen, ndesc, segs, nsegs); 2033 check_ring_tx_db(adap, q); |
1889 1890 for (i = 0; i < cleaned; i++) { 1891 m_freem(m_vec[i]); 1892 } | 2034 |
1893 return (0); 1894} 1895 1896/** 1897 * restart_offloadq - restart a suspended offload queue 1898 * @qs: the queue set cotaining the offload queue 1899 * 1900 * Resumes transmission on a suspended Tx offload queue. 1901 */ 1902static void 1903restart_offloadq(void *data, int npending) 1904{ | 2035 return (0); 2036} 2037 2038/** 2039 * restart_offloadq - restart a suspended offload queue 2040 * @qs: the queue set cotaining the offload queue 2041 * 2042 * Resumes transmission on a suspended Tx offload queue. 2043 */ 2044static void 2045restart_offloadq(void *data, int npending) 2046{ |
1905 | |
1906 struct mbuf *m; 1907 struct sge_qset *qs = data; 1908 struct sge_txq *q = &qs->txq[TXQ_OFLD]; 1909 adapter_t *adap = qs->port->adapter; | 2047 struct mbuf *m; 2048 struct sge_qset *qs = data; 2049 struct sge_txq *q = &qs->txq[TXQ_OFLD]; 2050 adapter_t *adap = qs->port->adapter; |
1910 struct mbuf *m_vec[TX_CLEAN_MAX_DESC]; | |
1911 bus_dma_segment_t segs[TX_MAX_SEGS]; | 2051 bus_dma_segment_t segs[TX_MAX_SEGS]; |
1912 int nsegs, i, cleaned; | |
1913 struct tx_sw_desc *stx = &q->sdesc[q->pidx]; | 2052 struct tx_sw_desc *stx = &q->sdesc[q->pidx]; |
2053 int nsegs, cleaned; |
|
1914 1915 mtx_lock(&q->lock); | 2054 2055 mtx_lock(&q->lock); |
1916again: cleaned = reclaim_completed_tx(q, TX_CLEAN_MAX_DESC, m_vec); | 2056again: cleaned = reclaim_completed_tx(q); |
1917 1918 while ((m = mbufq_peek(&q->sendq)) != NULL) { 1919 unsigned int gen, pidx; 1920 unsigned int ndesc = m_get_priority(m); 1921 1922 if (__predict_false(q->size - q->in_use < ndesc)) { 1923 setbit(&qs->txq_stopped, TXQ_OFLD); 1924 smp_mb(); --- 23 unchanged lines hidden (view full) --- 1948 mtx_unlock(&q->lock); 1949 1950#if USE_GTS 1951 set_bit(TXQ_RUNNING, &q->flags); 1952 set_bit(TXQ_LAST_PKT_DB, &q->flags); 1953#endif 1954 t3_write_reg(adap, A_SG_KDOORBELL, 1955 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); | 2057 2058 while ((m = mbufq_peek(&q->sendq)) != NULL) { 2059 unsigned int gen, pidx; 2060 unsigned int ndesc = m_get_priority(m); 2061 2062 if (__predict_false(q->size - q->in_use < ndesc)) { 2063 setbit(&qs->txq_stopped, TXQ_OFLD); 2064 smp_mb(); --- 23 unchanged lines hidden (view full) --- 2088 mtx_unlock(&q->lock); 2089 2090#if USE_GTS 2091 set_bit(TXQ_RUNNING, &q->flags); 2092 set_bit(TXQ_LAST_PKT_DB, &q->flags); 2093#endif 2094 t3_write_reg(adap, A_SG_KDOORBELL, 2095 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); |
2096#if 0 |
|
1956 1957 for (i = 0; i < cleaned; i++) { | 2097 2098 for (i = 0; i < cleaned; i++) { |
1958 m_freem(m_vec[i]); | 2099 m_freem_vec(m_vec[i]); |
1959 } | 2100 } |
2101#endif |
|
1960} 1961 1962/** 1963 * queue_set - return the queue set a packet should use 1964 * @m: the packet 1965 * 1966 * Maps a packet to the SGE queue set it should use. The desired queue 1967 * set is carried in bits 1-3 in the packet's priority. --- 27 unchanged lines hidden (view full) --- 1995 * should be sent as regular or control, bits 1-3 select the queue set. 1996 */ 1997int 1998t3_offload_tx(struct t3cdev *tdev, struct mbuf *m) 1999{ 2000 adapter_t *adap = tdev2adap(tdev); 2001 struct sge_qset *qs = &adap->sge.qs[queue_set(m)]; 2002 | 2102} 2103 2104/** 2105 * queue_set - return the queue set a packet should use 2106 * @m: the packet 2107 * 2108 * Maps a packet to the SGE queue set it should use. The desired queue 2109 * set is carried in bits 1-3 in the packet's priority. --- 27 unchanged lines hidden (view full) --- 2137 * should be sent as regular or control, bits 1-3 select the queue set. 2138 */ 2139int 2140t3_offload_tx(struct t3cdev *tdev, struct mbuf *m) 2141{ 2142 adapter_t *adap = tdev2adap(tdev); 2143 struct sge_qset *qs = &adap->sge.qs[queue_set(m)]; 2144 |
2003 if (__predict_false(is_ctrl_pkt(m))) | 2145 if (__predict_false(is_ctrl_pkt(m))) |
2004 return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], m); 2005 2006 return ofld_xmit(adap, &qs->txq[TXQ_OFLD], m); 2007} 2008 2009/** 2010 * deliver_partial_bundle - deliver a (partial) bundle of Rx offload pkts 2011 * @tdev: the offload device that will be receiving the packets --- 14 unchanged lines hidden (view full) --- 2026 } 2027} 2028 2029static __inline int 2030rx_offload(struct t3cdev *tdev, struct sge_rspq *rq, 2031 struct mbuf *m, struct mbuf *rx_gather[], 2032 unsigned int gather_idx) 2033{ | 2146 return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], m); 2147 2148 return ofld_xmit(adap, &qs->txq[TXQ_OFLD], m); 2149} 2150 2151/** 2152 * deliver_partial_bundle - deliver a (partial) bundle of Rx offload pkts 2153 * @tdev: the offload device that will be receiving the packets --- 14 unchanged lines hidden (view full) --- 2168 } 2169} 2170 2171static __inline int 2172rx_offload(struct t3cdev *tdev, struct sge_rspq *rq, 2173 struct mbuf *m, struct mbuf *rx_gather[], 2174 unsigned int gather_idx) 2175{ |
2176 |
|
2034 rq->offload_pkts++; 2035 m->m_pkthdr.header = mtod(m, void *); | 2177 rq->offload_pkts++; 2178 m->m_pkthdr.header = mtod(m, void *); |
2036 | |
2037 rx_gather[gather_idx++] = m; 2038 if (gather_idx == RX_BUNDLE_SIZE) { 2039 cxgb_ofld_recv(tdev, rx_gather, RX_BUNDLE_SIZE); 2040 gather_idx = 0; 2041 rq->offload_bundles++; 2042 } 2043 return (gather_idx); 2044} 2045 2046static void 2047restart_tx(struct sge_qset *qs) 2048{ 2049 struct adapter *sc = qs->port->adapter; 2050 | 2179 rx_gather[gather_idx++] = m; 2180 if (gather_idx == RX_BUNDLE_SIZE) { 2181 cxgb_ofld_recv(tdev, rx_gather, RX_BUNDLE_SIZE); 2182 gather_idx = 0; 2183 rq->offload_bundles++; 2184 } 2185 return (gather_idx); 2186} 2187 2188static void 2189restart_tx(struct sge_qset *qs) 2190{ 2191 struct adapter *sc = qs->port->adapter; 2192 |
2193 |
|
2051 if (isset(&qs->txq_stopped, TXQ_OFLD) && 2052 should_restart_tx(&qs->txq[TXQ_OFLD]) && 2053 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) { 2054 qs->txq[TXQ_OFLD].restarts++; | 2194 if (isset(&qs->txq_stopped, TXQ_OFLD) && 2195 should_restart_tx(&qs->txq[TXQ_OFLD]) && 2196 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) { 2197 qs->txq[TXQ_OFLD].restarts++; |
2198 printf("restarting TXQ_OFLD\n"); |
|
2055 taskqueue_enqueue(sc->tq, &qs->txq[TXQ_OFLD].qresume_task); 2056 } | 2199 taskqueue_enqueue(sc->tq, &qs->txq[TXQ_OFLD].qresume_task); 2200 } |
2201 printf("stopped=0x%x restart=%d processed=%d cleaned=%d in_use=%d\n", 2202 qs->txq_stopped, should_restart_tx(&qs->txq[TXQ_CTRL]), 2203 qs->txq[TXQ_CTRL].processed, qs->txq[TXQ_CTRL].cleaned, 2204 qs->txq[TXQ_CTRL].in_use); 2205 |
|
2057 if (isset(&qs->txq_stopped, TXQ_CTRL) && 2058 should_restart_tx(&qs->txq[TXQ_CTRL]) && 2059 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) { 2060 qs->txq[TXQ_CTRL].restarts++; | 2206 if (isset(&qs->txq_stopped, TXQ_CTRL) && 2207 should_restart_tx(&qs->txq[TXQ_CTRL]) && 2208 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) { 2209 qs->txq[TXQ_CTRL].restarts++; |
2210 printf("restarting TXQ_CTRL\n"); |
|
2061 taskqueue_enqueue(sc->tq, &qs->txq[TXQ_CTRL].qresume_task); 2062 } 2063} 2064 2065/** 2066 * t3_sge_alloc_qset - initialize an SGE queue set 2067 * @sc: the controller softc 2068 * @id: the queue set id --- 10 unchanged lines hidden (view full) --- 2079 */ 2080int 2081t3_sge_alloc_qset(adapter_t *sc, u_int id, int nports, int irq_vec_idx, 2082 const struct qset_params *p, int ntxq, struct port_info *pi) 2083{ 2084 struct sge_qset *q = &sc->sge.qs[id]; 2085 int i, ret = 0; 2086 | 2211 taskqueue_enqueue(sc->tq, &qs->txq[TXQ_CTRL].qresume_task); 2212 } 2213} 2214 2215/** 2216 * t3_sge_alloc_qset - initialize an SGE queue set 2217 * @sc: the controller softc 2218 * @id: the queue set id --- 10 unchanged lines hidden (view full) --- 2229 */ 2230int 2231t3_sge_alloc_qset(adapter_t *sc, u_int id, int nports, int irq_vec_idx, 2232 const struct qset_params *p, int ntxq, struct port_info *pi) 2233{ 2234 struct sge_qset *q = &sc->sge.qs[id]; 2235 int i, ret = 0; 2236 |
2237 for (i = 0; i < SGE_TXQ_PER_SET; i++) { 2238 if ((q->txq[i].txq_mr.br_ring = malloc(cxgb_txq_buf_ring_size*sizeof(struct mbuf *), 2239 M_DEVBUF, M_WAITOK|M_ZERO)) == NULL) { 2240 device_printf(sc->dev, "failed to allocate mbuf ring\n"); 2241 goto err; 2242 } 2243 q->txq[i].txq_mr.br_prod = q->txq[i].txq_mr.br_cons = 0; 2244 q->txq[i].txq_mr.br_size = cxgb_txq_buf_ring_size; 2245 mtx_init(&q->txq[i].txq_mr.br_lock, "txq mbuf ring", NULL, MTX_DEF); 2246 } 2247 |
|
2087 init_qset_cntxt(q, id); 2088 2089 if ((ret = alloc_ring(sc, p->fl_size, sizeof(struct rx_desc), 2090 sizeof(struct rx_sw_desc), &q->fl[0].phys_addr, 2091 &q->fl[0].desc, &q->fl[0].sdesc, 2092 &q->fl[0].desc_tag, &q->fl[0].desc_map, 2093 sc->rx_dmat, &q->fl[0].entry_tag)) != 0) { 2094 printf("error %d from alloc ring fl0\n", ret); --- 55 unchanged lines hidden (view full) --- 2150 2151 q->rspq.gen = 1; 2152 q->rspq.cidx = 0; 2153 q->rspq.size = p->rspq_size; 2154 2155 q->txq[TXQ_ETH].stop_thres = nports * 2156 flits_to_desc(sgl_len(TX_MAX_SEGS + 1) + 3); 2157 | 2248 init_qset_cntxt(q, id); 2249 2250 if ((ret = alloc_ring(sc, p->fl_size, sizeof(struct rx_desc), 2251 sizeof(struct rx_sw_desc), &q->fl[0].phys_addr, 2252 &q->fl[0].desc, &q->fl[0].sdesc, 2253 &q->fl[0].desc_tag, &q->fl[0].desc_map, 2254 sc->rx_dmat, &q->fl[0].entry_tag)) != 0) { 2255 printf("error %d from alloc ring fl0\n", ret); --- 55 unchanged lines hidden (view full) --- 2311 2312 q->rspq.gen = 1; 2313 q->rspq.cidx = 0; 2314 q->rspq.size = p->rspq_size; 2315 2316 q->txq[TXQ_ETH].stop_thres = nports * 2317 flits_to_desc(sgl_len(TX_MAX_SEGS + 1) + 3); 2318 |
2158 q->fl[0].buf_size = MCLBYTES; | 2319 q->fl[0].buf_size = (MCLBYTES - sizeof(uint32_t) - sizeof(struct m_hdr) - sizeof(struct pkthdr) - sizeof(struct m_ext_)); |
2159 q->fl[0].zone = zone_clust; 2160 q->fl[0].type = EXT_CLUSTER; | 2320 q->fl[0].zone = zone_clust; 2321 q->fl[0].type = EXT_CLUSTER; |
2161 q->fl[1].buf_size = MJUMPAGESIZE; 2162 q->fl[1].zone = zone_jumbop; 2163 q->fl[1].type = EXT_JUMBOP; 2164 | 2322#if __FreeBSD_version > 800000 2323 q->fl[1].buf_size = MJUM16BYTES - sizeof(uint32_t) - sizeof(struct m_hdr) - sizeof(struct pkthdr) - sizeof(struct m_ext_); 2324 q->fl[1].zone = zone_jumbo16; 2325 q->fl[1].type = EXT_JUMBO16; 2326#else 2327 q->fl[1].buf_size = MJUMPAGESIZE - sizeof(uint32_t) - sizeof(struct m_hdr) - sizeof(struct pkthdr) - sizeof(struct m_ext_); 2328 q->fl[1].zone = zone_jumbop; 2329 q->fl[1].type = EXT_JUMBOP; 2330#endif |
2165 q->lro.enabled = lro_default; 2166 2167 mtx_lock(&sc->sge.reg_lock); 2168 ret = -t3_sge_init_rspcntxt(sc, q->rspq.cntxt_id, irq_vec_idx, 2169 q->rspq.phys_addr, q->rspq.size, 2170 q->fl[0].buf_size, 1, 0); 2171 if (ret) { 2172 printf("error %d from t3_sge_init_rspcntxt\n", ret); --- 91 unchanged lines hidden (view full) --- 2264 if (__predict_false(cpl->vlan_valid)) { 2265 m->m_pkthdr.ether_vtag = ntohs(cpl->vlan); 2266 m->m_flags |= M_VLANTAG; 2267 } 2268#endif 2269 2270 m->m_pkthdr.rcvif = ifp; 2271 m->m_pkthdr.header = mtod(m, uint8_t *) + sizeof(*cpl) + ethpad; | 2331 q->lro.enabled = lro_default; 2332 2333 mtx_lock(&sc->sge.reg_lock); 2334 ret = -t3_sge_init_rspcntxt(sc, q->rspq.cntxt_id, irq_vec_idx, 2335 q->rspq.phys_addr, q->rspq.size, 2336 q->fl[0].buf_size, 1, 0); 2337 if (ret) { 2338 printf("error %d from t3_sge_init_rspcntxt\n", ret); --- 91 unchanged lines hidden (view full) --- 2430 if (__predict_false(cpl->vlan_valid)) { 2431 m->m_pkthdr.ether_vtag = ntohs(cpl->vlan); 2432 m->m_flags |= M_VLANTAG; 2433 } 2434#endif 2435 2436 m->m_pkthdr.rcvif = ifp; 2437 m->m_pkthdr.header = mtod(m, uint8_t *) + sizeof(*cpl) + ethpad; |
2438#ifndef DISABLE_MBUF_IOVEC |
|
2272 m_explode(m); | 2439 m_explode(m); |
2440#endif |
|
2273 /* 2274 * adjust after conversion to mbuf chain 2275 */ | 2441 /* 2442 * adjust after conversion to mbuf chain 2443 */ |
2276 m_adj(m, sizeof(*cpl) + ethpad); | 2444 m->m_pkthdr.len -= (sizeof(*cpl) + ethpad); 2445 m->m_len -= (sizeof(*cpl) + ethpad); 2446 m->m_data += (sizeof(*cpl) + ethpad); |
2277 2278 (*ifp->if_input)(ifp, m); 2279} 2280 2281/** 2282 * get_packet - return the next ingress packet buffer from a free list 2283 * @adap: the adapter that received the packet 2284 * @drop_thres: # of remaining buffers before we start dropping packets --- 17 unchanged lines hidden (view full) --- 2302{ 2303 2304 unsigned int len_cq = ntohl(r->len_cq); 2305 struct sge_fl *fl = (len_cq & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0]; 2306 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; 2307 uint32_t len = G_RSPD_LEN(len_cq); 2308 uint32_t flags = ntohl(r->flags); 2309 uint8_t sopeop = G_RSPD_SOP_EOP(flags); | 2447 2448 (*ifp->if_input)(ifp, m); 2449} 2450 2451/** 2452 * get_packet - return the next ingress packet buffer from a free list 2453 * @adap: the adapter that received the packet 2454 * @drop_thres: # of remaining buffers before we start dropping packets --- 17 unchanged lines hidden (view full) --- 2472{ 2473 2474 unsigned int len_cq = ntohl(r->len_cq); 2475 struct sge_fl *fl = (len_cq & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0]; 2476 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; 2477 uint32_t len = G_RSPD_LEN(len_cq); 2478 uint32_t flags = ntohl(r->flags); 2479 uint8_t sopeop = G_RSPD_SOP_EOP(flags); |
2480 uint32_t *ref; |
|
2310 int ret = 0; 2311 | 2481 int ret = 0; 2482 |
2312 prefetch(sd->cl); | 2483 prefetch(sd->rxsd_cl); |
2313 2314 fl->credits--; 2315 bus_dmamap_sync(fl->entry_tag, sd->map, BUS_DMASYNC_POSTREAD); 2316 bus_dmamap_unload(fl->entry_tag, sd->map); 2317 | 2484 2485 fl->credits--; 2486 bus_dmamap_sync(fl->entry_tag, sd->map, BUS_DMASYNC_POSTREAD); 2487 bus_dmamap_unload(fl->entry_tag, sd->map); 2488 |
2318 m_cljset(m, sd->cl, fl->type); | 2489 ref = sd->rxsd_ref; 2490 m_cljset(m, sd->rxsd_cl, fl->type, sd->rxsd_ref); 2491 *ref = 1; |
2319 m->m_len = len; | 2492 m->m_len = len; |
2320 | 2493 /* 2494 * bump past the refcnt address 2495 */ 2496 m->m_data = sd->data; 2497 |
2321 switch(sopeop) { 2322 case RSPQ_SOP_EOP: 2323 DBG(DBG_RX, ("get_packet: SOP-EOP m %p\n", m)); 2324 mh->mh_head = mh->mh_tail = m; 2325 m->m_pkthdr.len = len; 2326 m->m_flags |= M_PKTHDR; 2327 ret = 1; 2328 break; --- 29 unchanged lines hidden (view full) --- 2358 } 2359 if (++fl->cidx == fl->size) 2360 fl->cidx = 0; 2361 2362 return (ret); 2363} 2364 2365#else | 2498 switch(sopeop) { 2499 case RSPQ_SOP_EOP: 2500 DBG(DBG_RX, ("get_packet: SOP-EOP m %p\n", m)); 2501 mh->mh_head = mh->mh_tail = m; 2502 m->m_pkthdr.len = len; 2503 m->m_flags |= M_PKTHDR; 2504 ret = 1; 2505 break; --- 29 unchanged lines hidden (view full) --- 2535 } 2536 if (++fl->cidx == fl->size) 2537 fl->cidx = 0; 2538 2539 return (ret); 2540} 2541 2542#else |
2543static void 2544ext_free_handler(void *cl, void * arg) 2545{ 2546 uintptr_t type = (uintptr_t)arg; 2547 uma_zone_t zone; 2548 struct mbuf *m; 2549 2550 m = cl; 2551 zone = m_getzonefromtype(type); 2552 m->m_ext.ext_type = (int)type; 2553 cxgb_ext_freed++; 2554 cxgb_cache_put(zone, cl); 2555} 2556 2557static void 2558init_cluster_mbuf(caddr_t cl, int flags, int type, uma_zone_t zone) 2559{ 2560 struct mbuf *m; 2561 int header_size; 2562 2563 header_size = sizeof(struct m_hdr) + sizeof(struct pkthdr) + sizeof(struct m_ext_) + sizeof(uint32_t); 2564 2565 bzero(cl, header_size); 2566 m = (struct mbuf *)cl; 2567 2568 SLIST_INIT(&m->m_pkthdr.tags); 2569 m->m_type = MT_DATA; 2570 m->m_flags = flags | M_NOFREE | M_EXT; 2571 m->m_data = cl + header_size; 2572 m->m_ext.ext_buf = cl; 2573 m->m_ext.ref_cnt = (uint32_t *)(cl + header_size - sizeof(uint32_t)); 2574 m->m_ext.ext_size = m_getsizefromtype(type); 2575 m->m_ext.ext_free = ext_free_handler; 2576 m->m_ext.ext_args = (void *)(uintptr_t)type; 2577 m->m_ext.ext_type = EXT_EXTREF; 2578 *(m->m_ext.ref_cnt) = 1; 2579 DPRINTF("data=%p ref_cnt=%p\n", m->m_data, m->m_ext.ref_cnt); 2580} 2581 |
|
2366static int 2367get_packet(adapter_t *adap, unsigned int drop_thres, struct sge_qset *qs, | 2582static int 2583get_packet(adapter_t *adap, unsigned int drop_thres, struct sge_qset *qs, |
2368 struct mbuf *m, struct rsp_desc *r) | 2584 struct mbuf **m, struct rsp_desc *r) |
2369{ 2370 2371 unsigned int len_cq = ntohl(r->len_cq); 2372 struct sge_fl *fl = (len_cq & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0]; 2373 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; 2374 uint32_t len = G_RSPD_LEN(len_cq); 2375 uint32_t flags = ntohl(r->flags); 2376 uint8_t sopeop = G_RSPD_SOP_EOP(flags); 2377 void *cl; 2378 int ret = 0; | 2585{ 2586 2587 unsigned int len_cq = ntohl(r->len_cq); 2588 struct sge_fl *fl = (len_cq & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0]; 2589 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; 2590 uint32_t len = G_RSPD_LEN(len_cq); 2591 uint32_t flags = ntohl(r->flags); 2592 uint8_t sopeop = G_RSPD_SOP_EOP(flags); 2593 void *cl; 2594 int ret = 0; |
2379 2380 prefetch(sd->cl); 2381 | 2595 struct mbuf *m0; 2596#if 0 2597 if ((sd + 1 )->rxsd_cl) 2598 prefetch((sd + 1)->rxsd_cl); 2599 if ((sd + 2)->rxsd_cl) 2600 prefetch((sd + 2)->rxsd_cl); 2601#endif 2602 DPRINTF("rx cpu=%d\n", curcpu); |
2382 fl->credits--; 2383 bus_dmamap_sync(fl->entry_tag, sd->map, BUS_DMASYNC_POSTREAD); 2384 2385 if (recycle_enable && len <= SGE_RX_COPY_THRES && sopeop == RSPQ_SOP_EOP) { | 2603 fl->credits--; 2604 bus_dmamap_sync(fl->entry_tag, sd->map, BUS_DMASYNC_POSTREAD); 2605 2606 if (recycle_enable && len <= SGE_RX_COPY_THRES && sopeop == RSPQ_SOP_EOP) { |
2386 cl = mtod(m, void *); 2387 memcpy(cl, sd->cl, len); | 2607 if ((m0 = m_gethdr(M_DONTWAIT, MT_DATA)) == NULL) 2608 goto skip_recycle; 2609 cl = mtod(m0, void *); 2610 memcpy(cl, sd->data, len); |
2388 recycle_rx_buf(adap, fl, fl->cidx); | 2611 recycle_rx_buf(adap, fl, fl->cidx); |
2612 *m = m0; |
|
2389 } else { | 2613 } else { |
2390 cl = sd->cl; | 2614 skip_recycle: |
2391 bus_dmamap_unload(fl->entry_tag, sd->map); | 2615 bus_dmamap_unload(fl->entry_tag, sd->map); |
2616 cl = sd->rxsd_cl; 2617 *m = m0 = (struct mbuf *)cl; |
|
2392 } | 2618 } |
2619 |
|
2393 switch(sopeop) { 2394 case RSPQ_SOP_EOP: 2395 DBG(DBG_RX, ("get_packet: SOP-EOP m %p\n", m)); | 2620 switch(sopeop) { 2621 case RSPQ_SOP_EOP: 2622 DBG(DBG_RX, ("get_packet: SOP-EOP m %p\n", m)); |
2396 if (cl == sd->cl) 2397 m_cljset(m, cl, fl->type); 2398 m->m_len = m->m_pkthdr.len = len; | 2623 if (cl == sd->rxsd_cl) 2624 init_cluster_mbuf(cl, M_PKTHDR, fl->type, fl->zone); 2625 m0->m_len = m0->m_pkthdr.len = len; |
2399 ret = 1; 2400 goto done; 2401 break; 2402 case RSPQ_NSOP_NEOP: 2403 DBG(DBG_RX, ("get_packet: NO_SOP-NO_EOP m %p\n", m)); | 2626 ret = 1; 2627 goto done; 2628 break; 2629 case RSPQ_NSOP_NEOP: 2630 DBG(DBG_RX, ("get_packet: NO_SOP-NO_EOP m %p\n", m)); |
2631 panic("chaining unsupported"); |
|
2404 ret = 0; 2405 break; 2406 case RSPQ_SOP: 2407 DBG(DBG_RX, ("get_packet: SOP m %p\n", m)); | 2632 ret = 0; 2633 break; 2634 case RSPQ_SOP: 2635 DBG(DBG_RX, ("get_packet: SOP m %p\n", m)); |
2408 m_iovinit(m); | 2636 panic("chaining unsupported"); 2637 m_iovinit(m0); |
2409 ret = 0; 2410 break; 2411 case RSPQ_EOP: 2412 DBG(DBG_RX, ("get_packet: EOP m %p\n", m)); | 2638 ret = 0; 2639 break; 2640 case RSPQ_EOP: 2641 DBG(DBG_RX, ("get_packet: EOP m %p\n", m)); |
2642 panic("chaining unsupported"); |
|
2413 ret = 1; 2414 break; 2415 } | 2643 ret = 1; 2644 break; 2645 } |
2416 m_iovappend(m, cl, fl->buf_size, len, 0); 2417 | 2646 panic("append not supported"); 2647#if 0 2648 m_iovappend(m0, cl, fl->buf_size, len, sizeof(uint32_t), sd->rxsd_ref); 2649#endif |
2418done: 2419 if (++fl->cidx == fl->size) 2420 fl->cidx = 0; 2421 2422 return (ret); 2423} 2424#endif 2425/** --- 12 unchanged lines hidden (view full) --- 2438 2439#if USE_GTS 2440 if (flags & F_RSPD_TXQ0_GTS) 2441 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags); 2442#endif 2443 credits = G_RSPD_TXQ0_CR(flags); 2444 if (credits) { 2445 qs->txq[TXQ_ETH].processed += credits; | 2650done: 2651 if (++fl->cidx == fl->size) 2652 fl->cidx = 0; 2653 2654 return (ret); 2655} 2656#endif 2657/** --- 12 unchanged lines hidden (view full) --- 2670 2671#if USE_GTS 2672 if (flags & F_RSPD_TXQ0_GTS) 2673 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags); 2674#endif 2675 credits = G_RSPD_TXQ0_CR(flags); 2676 if (credits) { 2677 qs->txq[TXQ_ETH].processed += credits; |
2678#ifndef IFNET_MULTIQUEUE |
|
2446 if (desc_reclaimable(&qs->txq[TXQ_ETH]) > TX_START_MAX_DESC) 2447 taskqueue_enqueue(qs->port->adapter->tq, 2448 &qs->port->timer_reclaim_task); | 2679 if (desc_reclaimable(&qs->txq[TXQ_ETH]) > TX_START_MAX_DESC) 2680 taskqueue_enqueue(qs->port->adapter->tq, 2681 &qs->port->timer_reclaim_task); |
2682#endif |
|
2449 } 2450 2451 credits = G_RSPD_TXQ2_CR(flags); 2452 if (credits) 2453 qs->txq[TXQ_CTRL].processed += credits; 2454 2455# if USE_GTS 2456 if (flags & F_RSPD_TXQ1_GTS) 2457 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags); 2458# endif 2459 credits = G_RSPD_TXQ1_CR(flags); 2460 if (credits) 2461 qs->txq[TXQ_OFLD].processed += credits; | 2683 } 2684 2685 credits = G_RSPD_TXQ2_CR(flags); 2686 if (credits) 2687 qs->txq[TXQ_CTRL].processed += credits; 2688 2689# if USE_GTS 2690 if (flags & F_RSPD_TXQ1_GTS) 2691 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags); 2692# endif 2693 credits = G_RSPD_TXQ1_CR(flags); 2694 if (credits) 2695 qs->txq[TXQ_OFLD].processed += credits; |
2696 |
|
2462} 2463 2464static void 2465check_ring_db(adapter_t *adap, struct sge_qset *qs, 2466 unsigned int sleeping) 2467{ 2468 ; 2469} --- 8 unchanged lines hidden (view full) --- 2478 * Responses include received packets as well as credits and other events 2479 * for the queues that belong to the response queue's queue set. 2480 * A negative budget is effectively unlimited. 2481 * 2482 * Additionally choose the interrupt holdoff time for the next interrupt 2483 * on this queue. If the system is under memory shortage use a fairly 2484 * long delay to help recovery. 2485 */ | 2697} 2698 2699static void 2700check_ring_db(adapter_t *adap, struct sge_qset *qs, 2701 unsigned int sleeping) 2702{ 2703 ; 2704} --- 8 unchanged lines hidden (view full) --- 2713 * Responses include received packets as well as credits and other events 2714 * for the queues that belong to the response queue's queue set. 2715 * A negative budget is effectively unlimited. 2716 * 2717 * Additionally choose the interrupt holdoff time for the next interrupt 2718 * on this queue. If the system is under memory shortage use a fairly 2719 * long delay to help recovery. 2720 */ |
2486static int | 2721int |
2487process_responses(adapter_t *adap, struct sge_qset *qs, int budget) 2488{ 2489 struct sge_rspq *rspq = &qs->rspq; 2490 struct rsp_desc *r = &rspq->desc[rspq->cidx]; 2491 int budget_left = budget; 2492 unsigned int sleeping = 0; 2493 int lro = qs->lro.enabled; 2494 struct mbuf *offload_mbufs[RX_BUNDLE_SIZE]; --- 6 unchanged lines hidden (view full) --- 2501 } 2502#endif 2503 rspq->next_holdoff = rspq->holdoff_tmr; 2504 2505 while (__predict_true(budget_left && is_new_response(r, rspq))) { 2506 int eth, eop = 0, ethpad = 0; 2507 uint32_t flags = ntohl(r->flags); 2508 uint32_t rss_csum = *(const uint32_t *)r; | 2722process_responses(adapter_t *adap, struct sge_qset *qs, int budget) 2723{ 2724 struct sge_rspq *rspq = &qs->rspq; 2725 struct rsp_desc *r = &rspq->desc[rspq->cidx]; 2726 int budget_left = budget; 2727 unsigned int sleeping = 0; 2728 int lro = qs->lro.enabled; 2729 struct mbuf *offload_mbufs[RX_BUNDLE_SIZE]; --- 6 unchanged lines hidden (view full) --- 2736 } 2737#endif 2738 rspq->next_holdoff = rspq->holdoff_tmr; 2739 2740 while (__predict_true(budget_left && is_new_response(r, rspq))) { 2741 int eth, eop = 0, ethpad = 0; 2742 uint32_t flags = ntohl(r->flags); 2743 uint32_t rss_csum = *(const uint32_t *)r; |
2509 uint32_t rss_hash = r->rss_hdr.rss_hash_val; | 2744 uint32_t rss_hash = be32toh(r->rss_hdr.rss_hash_val); |
2510 2511 eth = (r->rss_hdr.opcode == CPL_RX_PKT); 2512 2513 if (__predict_false(flags & F_RSPD_ASYNC_NOTIF)) { 2514 /* XXX */ 2515 printf("async notification\n"); 2516 2517 } else if (flags & F_RSPD_IMM_DATA_VALID) { 2518#ifdef DISABLE_MBUF_IOVEC 2519 | 2745 2746 eth = (r->rss_hdr.opcode == CPL_RX_PKT); 2747 2748 if (__predict_false(flags & F_RSPD_ASYNC_NOTIF)) { 2749 /* XXX */ 2750 printf("async notification\n"); 2751 2752 } else if (flags & F_RSPD_IMM_DATA_VALID) { 2753#ifdef DISABLE_MBUF_IOVEC 2754 |
2520 if (cxgb_debug) 2521 printf("IMM DATA VALID opcode=0x%x rspq->cidx=%d\n", r->rss_hdr.opcode, rspq->cidx); | 2755 DPRINTF("IMM DATA VALID opcode=0x%x rspq->cidx=%d\n", r->rss_hdr.opcode, rspq->cidx); |
2522 2523 if(get_imm_packet(adap, r, &rspq->rspq_mh) == 0) { 2524 rspq->next_holdoff = NOMEM_INTR_DELAY; 2525 budget_left--; 2526 break; 2527 } else { 2528 eop = 1; 2529 } 2530#else 2531 struct mbuf *m = NULL; | 2756 2757 if(get_imm_packet(adap, r, &rspq->rspq_mh) == 0) { 2758 rspq->next_holdoff = NOMEM_INTR_DELAY; 2759 budget_left--; 2760 break; 2761 } else { 2762 eop = 1; 2763 } 2764#else 2765 struct mbuf *m = NULL; |
2532 | 2766 2767 DPRINTF("IMM DATA VALID opcode=0x%x rspq->cidx=%d\n", r->rss_hdr.opcode, rspq->cidx); |
2533 if (rspq->rspq_mbuf == NULL) 2534 rspq->rspq_mbuf = m_gethdr(M_DONTWAIT, MT_DATA); | 2768 if (rspq->rspq_mbuf == NULL) 2769 rspq->rspq_mbuf = m_gethdr(M_DONTWAIT, MT_DATA); |
2535 else | 2770 else |
2536 m = m_gethdr(M_DONTWAIT, MT_DATA); 2537 2538 /* 2539 * XXX revisit me 2540 */ 2541 if (rspq->rspq_mbuf == NULL && m == NULL) { 2542 rspq->next_holdoff = NOMEM_INTR_DELAY; 2543 budget_left--; 2544 break; 2545 } | 2771 m = m_gethdr(M_DONTWAIT, MT_DATA); 2772 2773 /* 2774 * XXX revisit me 2775 */ 2776 if (rspq->rspq_mbuf == NULL && m == NULL) { 2777 rspq->next_holdoff = NOMEM_INTR_DELAY; 2778 budget_left--; 2779 break; 2780 } |
2546 if (get_imm_packet(adap, r, rspq->rspq_mbuf, m, flags)) 2547 goto skip; | 2781 get_imm_packet(adap, r, rspq->rspq_mbuf, m, flags); 2782 |
2548 eop = 1; | 2783 eop = 1; |
2549#endif | |
2550 rspq->imm_data++; | 2784 rspq->imm_data++; |
2785#endif |
|
2551 } else if (r->len_cq) { 2552 int drop_thresh = eth ? SGE_RX_DROP_THRES : 0; 2553 2554#ifdef DISABLE_MBUF_IOVEC 2555 struct mbuf *m; | 2786 } else if (r->len_cq) { 2787 int drop_thresh = eth ? SGE_RX_DROP_THRES : 0; 2788 2789#ifdef DISABLE_MBUF_IOVEC 2790 struct mbuf *m; |
2556 m = m_gethdr(M_NOWAIT, MT_DATA); | 2791 m = m_gethdr(M_DONTWAIT, MT_DATA); |
2557 2558 if (m == NULL) { 2559 log(LOG_WARNING, "failed to get mbuf for packet\n"); 2560 break; | 2792 2793 if (m == NULL) { 2794 log(LOG_WARNING, "failed to get mbuf for packet\n"); 2795 break; |
2796 } else { 2797 m->m_next = m->m_nextpkt = NULL; |
|
2561 } | 2798 } |
2562 | 2799 |
2563 eop = get_packet(adap, drop_thresh, qs, &rspq->rspq_mh, r, m); 2564#else | 2800 eop = get_packet(adap, drop_thresh, qs, &rspq->rspq_mh, r, m); 2801#else |
2565 if (rspq->rspq_mbuf == NULL) 2566 rspq->rspq_mbuf = m_gethdr(M_DONTWAIT, MT_DATA); 2567 if (rspq->rspq_mbuf == NULL) { 2568 log(LOG_WARNING, "failed to get mbuf for packet\n"); 2569 break; 2570 } 2571 eop = get_packet(adap, drop_thresh, qs, rspq->rspq_mbuf, r); | 2802 eop = get_packet(adap, drop_thresh, qs, &rspq->rspq_mbuf, r); 2803#ifdef IFNET_MULTIQUEUE 2804 rspq->rspq_mbuf->m_pkthdr.rss_hash = rss_hash; 2805#endif |
2572#endif 2573 ethpad = 2; 2574 } else { 2575 DPRINTF("pure response\n"); 2576 rspq->pure_rsps++; 2577 } | 2806#endif 2807 ethpad = 2; 2808 } else { 2809 DPRINTF("pure response\n"); 2810 rspq->pure_rsps++; 2811 } |
2578 | |
2579 if (flags & RSPD_CTRL_MASK) { 2580 sleeping |= flags & RSPD_GTS_MASK; 2581 handle_rsp_cntrl_info(qs, flags); 2582 } | 2812 if (flags & RSPD_CTRL_MASK) { 2813 sleeping |= flags & RSPD_GTS_MASK; 2814 handle_rsp_cntrl_info(qs, flags); 2815 } |
2583#ifndef DISABLE_MBUF_IOVEC 2584 skip: 2585#endif | 2816 |
2586 r++; 2587 if (__predict_false(++rspq->cidx == rspq->size)) { 2588 rspq->cidx = 0; 2589 rspq->gen ^= 1; 2590 r = rspq->desc; 2591 } | 2817 r++; 2818 if (__predict_false(++rspq->cidx == rspq->size)) { 2819 rspq->cidx = 0; 2820 rspq->gen ^= 1; 2821 r = rspq->desc; 2822 } |
2592 | |
2593 prefetch(r); 2594 if (++rspq->credits >= (rspq->size / 4)) { 2595 refill_rspq(adap, rspq, rspq->credits); 2596 rspq->credits = 0; 2597 } | 2823 prefetch(r); 2824 if (++rspq->credits >= (rspq->size / 4)) { 2825 refill_rspq(adap, rspq, rspq->credits); 2826 rspq->credits = 0; 2827 } |
2598 2599 if (eop) { | 2828 DPRINTF("eth=%d eop=%d flags=0x%x\n", eth, eop, flags); 2829 2830 if (!eth && eop) { 2831 rspq->rspq_mh.mh_head->m_pkthdr.csum_data = rss_csum; 2832 /* 2833 * XXX size mismatch 2834 */ 2835 m_set_priority(rspq->rspq_mh.mh_head, rss_hash); 2836 2837 ngathered = rx_offload(&adap->tdev, rspq, 2838 rspq->rspq_mh.mh_head, offload_mbufs, ngathered); 2839 rspq->rspq_mh.mh_head = NULL; 2840 DPRINTF("received offload packet\n"); 2841 2842 } else if (eth && eop) { |
2600 prefetch(mtod(rspq->rspq_mh.mh_head, uint8_t *)); | 2843 prefetch(mtod(rspq->rspq_mh.mh_head, uint8_t *)); |
2601 prefetch(mtod(rspq->rspq_mh.mh_head, uint8_t *) + L1_CACHE_BYTES); | 2844 prefetch(mtod(rspq->rspq_mh.mh_head, uint8_t *) + L1_CACHE_BYTES); |
2602 | 2845 |
2603 if (eth) { 2604 t3_rx_eth_lro(adap, rspq, rspq->rspq_mh.mh_head, ethpad, 2605 rss_hash, rss_csum, lro); 2606 | 2846 t3_rx_eth_lro(adap, rspq, rspq->rspq_mh.mh_head, ethpad, 2847 rss_hash, rss_csum, lro); 2848 DPRINTF("received tunnel packet\n"); |
2607 rspq->rspq_mh.mh_head = NULL; | 2849 rspq->rspq_mh.mh_head = NULL; |
2608 } else { 2609 rspq->rspq_mh.mh_head->m_pkthdr.csum_data = rss_csum; 2610 /* 2611 * XXX size mismatch 2612 */ 2613 m_set_priority(rspq->rspq_mh.mh_head, rss_hash); 2614 2615 ngathered = rx_offload(&adap->tdev, rspq, 2616 rspq->rspq_mh.mh_head, offload_mbufs, ngathered); 2617 } 2618 __refill_fl(adap, &qs->fl[0]); 2619 __refill_fl(adap, &qs->fl[1]); | |
2620 2621 } | 2850 2851 } |
2852 __refill_fl_lt(adap, &qs->fl[0], 32); 2853 __refill_fl_lt(adap, &qs->fl[1], 32); |
|
2622 --budget_left; 2623 } 2624 2625 deliver_partial_bundle(&adap->tdev, rspq, offload_mbufs, ngathered); 2626 t3_lro_flush(adap, qs, &qs->lro); 2627 2628 if (sleeping) 2629 check_ring_db(adap, qs, sleeping); 2630 2631 smp_mb(); /* commit Tx queue processed updates */ | 2854 --budget_left; 2855 } 2856 2857 deliver_partial_bundle(&adap->tdev, rspq, offload_mbufs, ngathered); 2858 t3_lro_flush(adap, qs, &qs->lro); 2859 2860 if (sleeping) 2861 check_ring_db(adap, qs, sleeping); 2862 2863 smp_mb(); /* commit Tx queue processed updates */ |
2632 if (__predict_false(qs->txq_stopped != 0)) | 2864 if (__predict_false(qs->txq_stopped != 0)) { 2865 printf("restarting tx on %p\n", qs); 2866 |
2633 restart_tx(qs); | 2867 restart_tx(qs); |
2634 | 2868 } 2869 2870 __refill_fl_lt(adap, &qs->fl[0], 512); 2871 __refill_fl_lt(adap, &qs->fl[1], 512); |
2635 budget -= budget_left; 2636 return (budget); 2637} 2638 2639/* 2640 * A helper function that processes responses and issues GTS. 2641 */ 2642static __inline int --- 70 unchanged lines hidden (view full) --- 2713 2714void 2715t3_intr_msix(void *data) 2716{ 2717 struct sge_qset *qs = data; 2718 adapter_t *adap = qs->port->adapter; 2719 struct sge_rspq *rspq = &qs->rspq; 2720 | 2872 budget -= budget_left; 2873 return (budget); 2874} 2875 2876/* 2877 * A helper function that processes responses and issues GTS. 2878 */ 2879static __inline int --- 70 unchanged lines hidden (view full) --- 2950 2951void 2952t3_intr_msix(void *data) 2953{ 2954 struct sge_qset *qs = data; 2955 adapter_t *adap = qs->port->adapter; 2956 struct sge_rspq *rspq = &qs->rspq; 2957 |
2721 mtx_lock(&rspq->lock); 2722 if (process_responses_gts(adap, rspq) == 0) 2723 rspq->unhandled_irqs++; 2724 mtx_unlock(&rspq->lock); | 2958 if (mtx_trylock(&rspq->lock)) { 2959 if (process_responses_gts(adap, rspq) == 0) 2960 rspq->unhandled_irqs++; 2961 mtx_unlock(&rspq->lock); 2962 } |
2725} 2726 2727/* 2728 * broken by recent mbuf changes 2729 */ 2730static int 2731t3_lro_enable(SYSCTL_HANDLER_ARGS) 2732{ --- 27 unchanged lines hidden (view full) --- 2760t3_set_coalesce_nsecs(SYSCTL_HANDLER_ARGS) 2761{ 2762 adapter_t *sc = arg1; 2763 struct qset_params *qsp = &sc->params.sge.qset[0]; 2764 int coalesce_nsecs; 2765 struct sge_qset *qs; 2766 int i, j, err, nqsets = 0; 2767 struct mtx *lock; | 2963} 2964 2965/* 2966 * broken by recent mbuf changes 2967 */ 2968static int 2969t3_lro_enable(SYSCTL_HANDLER_ARGS) 2970{ --- 27 unchanged lines hidden (view full) --- 2998t3_set_coalesce_nsecs(SYSCTL_HANDLER_ARGS) 2999{ 3000 adapter_t *sc = arg1; 3001 struct qset_params *qsp = &sc->params.sge.qset[0]; 3002 int coalesce_nsecs; 3003 struct sge_qset *qs; 3004 int i, j, err, nqsets = 0; 3005 struct mtx *lock; |
2768 | 3006 3007 if ((sc->flags & FULL_INIT_DONE) == 0) 3008 return (ENXIO); 3009 |
2769 coalesce_nsecs = qsp->coalesce_nsecs; 2770 err = sysctl_handle_int(oidp, &coalesce_nsecs, arg2, req); 2771 2772 if (err != 0) { 2773 return (err); 2774 } 2775 if (coalesce_nsecs == qsp->coalesce_nsecs) 2776 return (0); --- 19 unchanged lines hidden (view full) --- 2796 mtx_unlock(lock); 2797 } 2798 2799 return (0); 2800} 2801 2802 2803void | 3010 coalesce_nsecs = qsp->coalesce_nsecs; 3011 err = sysctl_handle_int(oidp, &coalesce_nsecs, arg2, req); 3012 3013 if (err != 0) { 3014 return (err); 3015 } 3016 if (coalesce_nsecs == qsp->coalesce_nsecs) 3017 return (0); --- 19 unchanged lines hidden (view full) --- 3037 mtx_unlock(lock); 3038 } 3039 3040 return (0); 3041} 3042 3043 3044void |
2804t3_add_sysctls(adapter_t *sc) | 3045t3_add_attach_sysctls(adapter_t *sc) |
2805{ 2806 struct sysctl_ctx_list *ctx; 2807 struct sysctl_oid_list *children; | 3046{ 3047 struct sysctl_ctx_list *ctx; 3048 struct sysctl_oid_list *children; |
2808 | 3049 |
2809 ctx = device_get_sysctl_ctx(sc->dev); 2810 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)); 2811 2812 /* random information */ 2813 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, 2814 "firmware_version", 2815 CTLFLAG_RD, &sc->fw_version, 2816 0, "firmware version"); 2817 2818 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 2819 "enable_lro", 2820 CTLTYPE_INT|CTLFLAG_RW, sc, 2821 0, t3_lro_enable, 2822 "I", "enable large receive offload"); 2823 | 3050 ctx = device_get_sysctl_ctx(sc->dev); 3051 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)); 3052 3053 /* random information */ 3054 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, 3055 "firmware_version", 3056 CTLFLAG_RD, &sc->fw_version, 3057 0, "firmware version"); 3058 3059 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 3060 "enable_lro", 3061 CTLTYPE_INT|CTLFLAG_RW, sc, 3062 0, t3_lro_enable, 3063 "I", "enable large receive offload"); 3064 |
2824 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 2825 "intr_coal", 2826 CTLTYPE_INT|CTLFLAG_RW, sc, 2827 0, t3_set_coalesce_nsecs, 2828 "I", "interrupt coalescing timer (ns)"); | |
2829 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 2830 "enable_debug", 2831 CTLFLAG_RW, &cxgb_debug, 2832 0, "enable verbose debugging output"); | 3065 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 3066 "enable_debug", 3067 CTLFLAG_RW, &cxgb_debug, 3068 0, "enable verbose debugging output"); |
2833 | 3069 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "tunq_coalesce", 3070 CTLFLAG_RD, &sc->tunq_coalesce, 3071 "#tunneled packets freed"); |
2834 SYSCTL_ADD_INT(ctx, children, OID_AUTO, | 3072 SYSCTL_ADD_INT(ctx, children, OID_AUTO, |
2835 "collapse_free", 2836 CTLFLAG_RD, &collapse_free, 2837 0, "frees during collapse"); 2838 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 2839 "mb_free_vec_free", 2840 CTLFLAG_RD, &mb_free_vec_free, 2841 0, "frees during mb_free_vec"); 2842 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 2843 "collapse_mbufs", 2844 CTLFLAG_RW, &collapse_mbufs, 2845 0, "collapse mbuf chains into iovecs"); 2846 SYSCTL_ADD_INT(ctx, children, OID_AUTO, | |
2847 "txq_overrun", 2848 CTLFLAG_RD, &txq_fills, 2849 0, "#times txq overrun"); 2850 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 2851 "bogus_imm", 2852 CTLFLAG_RD, &bogus_imm, 2853 0, "#times a bogus immediate response was seen"); | 3073 "txq_overrun", 3074 CTLFLAG_RD, &txq_fills, 3075 0, "#times txq overrun"); 3076 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 3077 "bogus_imm", 3078 CTLFLAG_RD, &bogus_imm, 3079 0, "#times a bogus immediate response was seen"); |
3080 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 3081 "cache_alloc", 3082 CTLFLAG_RD, &cxgb_cached_allocations, 3083 0, "#times a cluster was allocated from cache"); 3084 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 3085 "cached", 3086 CTLFLAG_RD, &cxgb_cached, 3087 0, "#times a cluster was cached"); 3088 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 3089 "ext_freed", 3090 CTLFLAG_RD, &cxgb_ext_freed, 3091 0, "#times a cluster was freed through ext_free"); 3092 |
|
2854} 2855 | 3093} 3094 |
3095void 3096t3_add_configured_sysctls(adapter_t *sc) 3097{ 3098 struct sysctl_ctx_list *ctx; 3099 struct sysctl_oid_list *children; 3100 int i, j; 3101 3102 ctx = device_get_sysctl_ctx(sc->dev); 3103 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)); 3104 3105 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, 3106 "intr_coal", 3107 CTLTYPE_INT|CTLFLAG_RW, sc, 3108 0, t3_set_coalesce_nsecs, 3109 "I", "interrupt coalescing timer (ns)"); 3110 3111 for (i = 0; i < sc->params.nports; i++) { 3112 struct port_info *pi = &sc->port[i]; 3113 struct sysctl_oid *poid; 3114 struct sysctl_oid_list *poidlist; 3115 3116 snprintf(pi->namebuf, PORT_NAME_LEN, "port%d", i); 3117 poid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, 3118 pi->namebuf, CTLFLAG_RD, NULL, "port statistics"); 3119 poidlist = SYSCTL_CHILDREN(poid); 3120 SYSCTL_ADD_INT(ctx, poidlist, OID_AUTO, 3121 "nqsets", CTLFLAG_RD, &pi->nqsets, 3122 0, "#queue sets"); 3123 3124 for (j = 0; j < pi->nqsets; j++) { 3125 struct sge_qset *qs = &sc->sge.qs[pi->first_qset + j]; 3126 struct sysctl_oid *qspoid; 3127 struct sysctl_oid_list *qspoidlist; 3128 struct sge_txq *txq = &qs->txq[TXQ_ETH]; 3129 3130 snprintf(qs->namebuf, QS_NAME_LEN, "qs%d", j); 3131 3132 qspoid = SYSCTL_ADD_NODE(ctx, poidlist, OID_AUTO, 3133 qs->namebuf, CTLFLAG_RD, NULL, "qset statistics"); 3134 qspoidlist = SYSCTL_CHILDREN(qspoid); 3135 3136 SYSCTL_ADD_INT(ctx, qspoidlist, OID_AUTO, "dropped", 3137 CTLFLAG_RD, &qs->txq[TXQ_ETH].txq_drops, 3138 0, "#tunneled packets dropped"); 3139 SYSCTL_ADD_INT(ctx, qspoidlist, OID_AUTO, "sendqlen", 3140 CTLFLAG_RD, &qs->txq[TXQ_ETH].sendq.qlen, 3141 0, "#tunneled packets waiting to be sent"); 3142 SYSCTL_ADD_UINT(ctx, qspoidlist, OID_AUTO, "queue_pidx", 3143 CTLFLAG_RD, (uint32_t *)(uintptr_t)&qs->txq[TXQ_ETH].txq_mr.br_prod, 3144 0, "#tunneled packets queue producer index"); 3145 SYSCTL_ADD_UINT(ctx, qspoidlist, OID_AUTO, "queue_cidx", 3146 CTLFLAG_RD, (uint32_t *)(uintptr_t)&qs->txq[TXQ_ETH].txq_mr.br_cons, 3147 0, "#tunneled packets queue consumer index"); 3148 SYSCTL_ADD_INT(ctx, qspoidlist, OID_AUTO, "processed", 3149 CTLFLAG_RD, &qs->txq[TXQ_ETH].processed, 3150 0, "#tunneled packets processed by the card"); 3151 SYSCTL_ADD_UINT(ctx, qspoidlist, OID_AUTO, "cleaned", 3152 CTLFLAG_RD, &txq->cleaned, 3153 0, "#tunneled packets cleaned"); 3154 SYSCTL_ADD_UINT(ctx, qspoidlist, OID_AUTO, "in_use", 3155 CTLFLAG_RD, &txq->in_use, 3156 0, "#tunneled packet slots in use"); 3157 SYSCTL_ADD_ULONG(ctx, qspoidlist, OID_AUTO, "frees", 3158 CTLFLAG_RD, &txq->txq_frees, 3159 "#tunneled packets freed"); 3160 SYSCTL_ADD_UINT(ctx, qspoidlist, OID_AUTO, "skipped", 3161 CTLFLAG_RD, &txq->txq_skipped, 3162 0, "#tunneled packet descriptors skipped"); 3163 SYSCTL_ADD_UINT(ctx, qspoidlist, OID_AUTO, "coalesced", 3164 CTLFLAG_RD, &txq->txq_coalesced, 3165 0, "#tunneled packets coalesced"); 3166 SYSCTL_ADD_UINT(ctx, qspoidlist, OID_AUTO, "enqueued", 3167 CTLFLAG_RD, &txq->txq_enqueued, 3168 0, "#tunneled packets enqueued to hardware"); 3169 SYSCTL_ADD_UINT(ctx, qspoidlist, OID_AUTO, "stopped_flags", 3170 CTLFLAG_RD, &qs->txq_stopped, 3171 0, "tx queues stopped"); 3172 3173 } 3174 } 3175} 3176 |
|
2856/** 2857 * t3_get_desc - dump an SGE descriptor for debugging purposes 2858 * @qs: the queue set 2859 * @qnum: identifies the specific queue (0..2: Tx, 3:response, 4..5: Rx) 2860 * @idx: the descriptor index in the queue 2861 * @data: where to dump the descriptor contents 2862 * 2863 * Dumps the contents of a HW descriptor of an SGE queue. Returns the --- 29 unchanged lines hidden --- | 3177/** 3178 * t3_get_desc - dump an SGE descriptor for debugging purposes 3179 * @qs: the queue set 3180 * @qnum: identifies the specific queue (0..2: Tx, 3:response, 4..5: Rx) 3181 * @idx: the descriptor index in the queue 3182 * @data: where to dump the descriptor contents 3183 * 3184 * Dumps the contents of a HW descriptor of an SGE queue. Returns the --- 29 unchanged lines hidden --- |