1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /****************************************************************************** 3 * 4 * (C)Copyright 1998,1999 SysKonnect, 5 * a business unit of Schneider & Koch & Co. Datensysteme GmbH. 6 * 7 * See the file "skfddi.c" for further information. 8 * 9 * The information in this file is provided "AS IS" without warranty. 10 * 11 ******************************************************************************/ 12 13 #ifndef lint 14 static char const ID_sccs[] = "@(#)hwmtm.c 1.40 99/05/31 (C) SK" ; 15 #endif 16 17 #define HWMTM 18 19 #ifndef FDDI 20 #define FDDI 21 #endif 22 23 #include "h/types.h" 24 #include "h/fddi.h" 25 #include "h/smc.h" 26 #include "h/supern_2.h" 27 #include "h/skfbiinc.h" 28 29 /* 30 ------------------------------------------------------------- 31 DOCUMENTATION 32 ------------------------------------------------------------- 33 BEGIN_MANUAL_ENTRY(DOCUMENTATION) 34 35 T B D 36 37 END_MANUAL_ENTRY 38 */ 39 /* 40 ------------------------------------------------------------- 41 LOCAL VARIABLES: 42 ------------------------------------------------------------- 43 */ 44 #ifdef COMMON_MB_POOL 45 static SMbuf *mb_start = 0 ; 46 static SMbuf *mb_free = 0 ; 47 static int mb_init = FALSE ; 48 static int call_count = 0 ; 49 #endif 50 51 /* 52 ------------------------------------------------------------- 53 EXTERNE VARIABLES: 54 ------------------------------------------------------------- 55 */ 56 57 #ifdef DEBUG 58 #ifndef DEBUG_BRD 59 extern struct smt_debug debug ; 60 #endif 61 #endif 62 63 #ifdef NDIS_OS2 64 extern u_char offDepth ; 65 extern u_char force_irq_pending ; 66 #endif 67 68 /* 69 ------------------------------------------------------------- 70 LOCAL FUNCTIONS: 71 ------------------------------------------------------------- 72 */ 73 74 static void queue_llc_rx(struct s_smc *smc, SMbuf *mb); 75 static void smt_to_llc(struct s_smc *smc, SMbuf *mb); 76 static void init_txd_ring(struct s_smc *smc); 77 static void init_rxd_ring(struct s_smc *smc); 78 static void queue_txd_mb(struct s_smc *smc, SMbuf *mb); 79 static u_long init_descr_ring(struct s_smc *smc, union s_fp_descr volatile *start, 80 int count); 81 static u_long repair_txd_ring(struct s_smc *smc, struct s_smt_tx_queue *queue); 82 static u_long repair_rxd_ring(struct s_smc *smc, struct s_smt_rx_queue *queue); 83 static SMbuf* get_llc_rx(struct s_smc *smc); 84 static SMbuf* get_txd_mb(struct s_smc *smc); 85 static void mac_drv_clear_txd(struct s_smc *smc); 86 87 /* 88 ------------------------------------------------------------- 89 EXTERNAL FUNCTIONS: 90 ------------------------------------------------------------- 91 */ 92 /* The external SMT functions are listed in cmtdef.h */ 93 94 extern void* mac_drv_get_space(struct s_smc *smc, unsigned int size); 95 extern void* mac_drv_get_desc_mem(struct s_smc *smc, unsigned int size); 96 extern void mac_drv_fill_rxd(struct s_smc *smc); 97 extern void mac_drv_tx_complete(struct s_smc *smc, 98 volatile struct s_smt_fp_txd *txd); 99 extern void mac_drv_rx_complete(struct s_smc *smc, 100 volatile struct s_smt_fp_rxd *rxd, 101 int frag_count, int len); 102 extern void mac_drv_requeue_rxd(struct s_smc *smc, 103 volatile struct s_smt_fp_rxd *rxd, 104 int frag_count); 105 extern void mac_drv_clear_rxd(struct s_smc *smc, 106 volatile struct s_smt_fp_rxd *rxd, int frag_count); 107 108 #ifdef USE_OS_CPY 109 extern void hwm_cpy_rxd2mb(void); 110 extern void hwm_cpy_txd2mb(void); 111 #endif 112 113 #ifdef ALL_RX_COMPLETE 114 extern void mac_drv_all_receives_complete(void); 115 #endif 116 117 extern u_long mac_drv_virt2phys(struct s_smc *smc, void *virt); 118 extern u_long dma_master(struct s_smc *smc, void *virt, int len, int flag); 119 120 #ifdef NDIS_OS2 121 extern void post_proc(void); 122 #else 123 extern void dma_complete(struct s_smc *smc, volatile union s_fp_descr *descr, 124 int flag); 125 #endif 126 127 extern int mac_drv_rx_init(struct s_smc *smc, int len, int fc, char *look_ahead, 128 int la_len); 129 130 /* 131 ------------------------------------------------------------- 132 PUBLIC FUNCTIONS: 133 ------------------------------------------------------------- 134 */ 135 void process_receive(struct s_smc *smc); 136 void fddi_isr(struct s_smc *smc); 137 void smt_free_mbuf(struct s_smc *smc, SMbuf *mb); 138 void init_driver_fplus(struct s_smc *smc); 139 void mac_drv_rx_mode(struct s_smc *smc, int mode); 140 void init_fddi_driver(struct s_smc *smc, u_char *mac_addr); 141 void mac_drv_clear_tx_queue(struct s_smc *smc); 142 void mac_drv_clear_rx_queue(struct s_smc *smc); 143 void hwm_tx_frag(struct s_smc *smc, char far *virt, u_long phys, int len, 144 int frame_status); 145 void hwm_rx_frag(struct s_smc *smc, char far *virt, u_long phys, int len, 146 int frame_status); 147 148 int mac_drv_init(struct s_smc *smc); 149 int hwm_tx_init(struct s_smc *smc, u_char fc, int frag_count, int frame_len, 150 int frame_status); 151 152 u_int mac_drv_check_space(void); 153 154 SMbuf* smt_get_mbuf(struct s_smc *smc); 155 156 #ifdef DEBUG 157 void mac_drv_debug_lev(struct s_smc *smc, int flag, int lev); 158 #endif 159 160 /* 161 ------------------------------------------------------------- 162 MACROS: 163 ------------------------------------------------------------- 164 */ 165 #ifndef UNUSED 166 #ifdef lint 167 #define UNUSED(x) (x) = (x) 168 #else 169 #define UNUSED(x) 170 #endif 171 #endif 172 173 #ifdef USE_CAN_ADDR 174 #define MA smc->hw.fddi_canon_addr.a 175 #define GROUP_ADDR_BIT 0x01 176 #else 177 #define MA smc->hw.fddi_home_addr.a 178 #define GROUP_ADDR_BIT 0x80 179 #endif 180 181 #define RXD_TXD_COUNT (HWM_ASYNC_TXD_COUNT+HWM_SYNC_TXD_COUNT+\ 182 SMT_R1_RXD_COUNT+SMT_R2_RXD_COUNT) 183 184 #ifdef MB_OUTSIDE_SMC 185 #define EXT_VIRT_MEM ((RXD_TXD_COUNT+1)*sizeof(struct s_smt_fp_txd) +\ 186 MAX_MBUF*sizeof(SMbuf)) 187 #define EXT_VIRT_MEM_2 ((RXD_TXD_COUNT+1)*sizeof(struct s_smt_fp_txd)) 188 #else 189 #define EXT_VIRT_MEM ((RXD_TXD_COUNT+1)*sizeof(struct s_smt_fp_txd)) 190 #endif 191 192 /* 193 * define critical read for 16 Bit drivers 194 */ 195 #if defined(NDIS_OS2) || defined(ODI2) 196 #define CR_READ(var) ((var) & 0xffff0000 | ((var) & 0xffff)) 197 #else 198 #define CR_READ(var) (__le32)(var) 199 #endif 200 201 #define IMASK_SLOW (IS_PLINT1 | IS_PLINT2 | IS_TIMINT | IS_TOKEN | \ 202 IS_MINTR1 | IS_MINTR2 | IS_MINTR3 | IS_R1_P | \ 203 IS_R1_C | IS_XA_C | IS_XS_C) 204 205 /* 206 ------------------------------------------------------------- 207 INIT- AND SMT FUNCTIONS: 208 ------------------------------------------------------------- 209 */ 210 211 212 /* 213 * BEGIN_MANUAL_ENTRY(mac_drv_check_space) 214 * u_int mac_drv_check_space() 215 * 216 * function DOWNCALL (drvsr.c) 217 * This function calculates the needed non virtual 218 * memory for MBufs, RxD and TxD descriptors etc. 219 * needed by the driver. 220 * 221 * return u_int memory in bytes 222 * 223 * END_MANUAL_ENTRY 224 */ 225 u_int mac_drv_check_space(void) 226 { 227 #ifdef MB_OUTSIDE_SMC 228 #ifdef COMMON_MB_POOL 229 call_count++ ; 230 if (call_count == 1) { 231 return EXT_VIRT_MEM; 232 } 233 else { 234 return EXT_VIRT_MEM_2; 235 } 236 #else 237 return EXT_VIRT_MEM; 238 #endif 239 #else 240 return 0; 241 #endif 242 } 243 244 /* 245 * BEGIN_MANUAL_ENTRY(mac_drv_init) 246 * void mac_drv_init(smc) 247 * 248 * function DOWNCALL (drvsr.c) 249 * In this function the hardware module allocates it's 250 * memory. 251 * The operating system dependent module should call 252 * mac_drv_init once, after the adatper is detected. 253 * END_MANUAL_ENTRY 254 */ 255 int mac_drv_init(struct s_smc *smc) 256 { 257 if (sizeof(struct s_smt_fp_rxd) % 16) { 258 SMT_PANIC(smc,HWM_E0001,HWM_E0001_MSG) ; 259 } 260 if (sizeof(struct s_smt_fp_txd) % 16) { 261 SMT_PANIC(smc,HWM_E0002,HWM_E0002_MSG) ; 262 } 263 264 /* 265 * get the required memory for the RxDs and TxDs 266 */ 267 if (!(smc->os.hwm.descr_p = (union s_fp_descr volatile *) 268 mac_drv_get_desc_mem(smc,(u_int) 269 (RXD_TXD_COUNT+1)*sizeof(struct s_smt_fp_txd)))) { 270 return 1; /* no space the hwm modul can't work */ 271 } 272 273 /* 274 * get the memory for the SMT MBufs 275 */ 276 #ifndef MB_OUTSIDE_SMC 277 smc->os.hwm.mbuf_pool.mb_start=(SMbuf *)(&smc->os.hwm.mbuf_pool.mb[0]) ; 278 #else 279 #ifndef COMMON_MB_POOL 280 if (!(smc->os.hwm.mbuf_pool.mb_start = (SMbuf *) mac_drv_get_space(smc, 281 MAX_MBUF*sizeof(SMbuf)))) { 282 return 1; /* no space the hwm modul can't work */ 283 } 284 #else 285 if (!mb_start) { 286 if (!(mb_start = (SMbuf *) mac_drv_get_space(smc, 287 MAX_MBUF*sizeof(SMbuf)))) { 288 return 1; /* no space the hwm modul can't work */ 289 } 290 } 291 #endif 292 #endif 293 return 0; 294 } 295 296 /* 297 * BEGIN_MANUAL_ENTRY(init_driver_fplus) 298 * init_driver_fplus(smc) 299 * 300 * Sets hardware modul specific values for the mode register 2 301 * (e.g. the byte alignment for the received frames, the position of the 302 * least significant byte etc.) 303 * END_MANUAL_ENTRY 304 */ 305 void init_driver_fplus(struct s_smc *smc) 306 { 307 smc->hw.fp.mdr2init = FM_LSB | FM_BMMODE | FM_ENNPRQ | FM_ENHSRQ | 3 ; 308 309 #ifdef PCI 310 smc->hw.fp.mdr2init |= FM_CHKPAR | FM_PARITY ; 311 #endif 312 smc->hw.fp.mdr3init = FM_MENRQAUNLCK | FM_MENRS ; 313 314 #ifdef USE_CAN_ADDR 315 /* enable address bit swapping */ 316 smc->hw.fp.frselreg_init = FM_ENXMTADSWAP | FM_ENRCVADSWAP ; 317 #endif 318 } 319 320 static u_long init_descr_ring(struct s_smc *smc, 321 union s_fp_descr volatile *start, 322 int count) 323 { 324 int i ; 325 union s_fp_descr volatile *d1 ; 326 union s_fp_descr volatile *d2 ; 327 u_long phys ; 328 329 DB_GEN(3, "descr ring starts at = %p", start); 330 for (i=count-1, d1=start; i ; i--) { 331 d2 = d1 ; 332 d1++ ; /* descr is owned by the host */ 333 d2->r.rxd_rbctrl = cpu_to_le32(BMU_CHECK) ; 334 d2->r.rxd_next = &d1->r ; 335 phys = mac_drv_virt2phys(smc,(void *)d1) ; 336 d2->r.rxd_nrdadr = cpu_to_le32(phys) ; 337 } 338 DB_GEN(3, "descr ring ends at = %p", d1); 339 d1->r.rxd_rbctrl = cpu_to_le32(BMU_CHECK) ; 340 d1->r.rxd_next = &start->r ; 341 phys = mac_drv_virt2phys(smc,(void *)start) ; 342 d1->r.rxd_nrdadr = cpu_to_le32(phys) ; 343 344 for (i=count, d1=start; i ; i--) { 345 DRV_BUF_FLUSH(&d1->r,DDI_DMA_SYNC_FORDEV) ; 346 d1++; 347 } 348 return phys; 349 } 350 351 static void init_txd_ring(struct s_smc *smc) 352 { 353 struct s_smt_fp_txd volatile *ds ; 354 struct s_smt_tx_queue *queue ; 355 u_long phys ; 356 357 /* 358 * initialize the transmit descriptors 359 */ 360 ds = (struct s_smt_fp_txd volatile *) ((char *)smc->os.hwm.descr_p + 361 SMT_R1_RXD_COUNT*sizeof(struct s_smt_fp_rxd)) ; 362 queue = smc->hw.fp.tx[QUEUE_A0] ; 363 DB_GEN(3, "Init async TxD ring, %d TxDs", HWM_ASYNC_TXD_COUNT); 364 (void)init_descr_ring(smc,(union s_fp_descr volatile *)ds, 365 HWM_ASYNC_TXD_COUNT) ; 366 phys = le32_to_cpu(ds->txd_ntdadr) ; 367 ds++ ; 368 queue->tx_curr_put = queue->tx_curr_get = ds ; 369 ds-- ; 370 queue->tx_free = HWM_ASYNC_TXD_COUNT ; 371 queue->tx_used = 0 ; 372 outpd(ADDR(B5_XA_DA),phys) ; 373 374 ds = (struct s_smt_fp_txd volatile *) ((char *)ds + 375 HWM_ASYNC_TXD_COUNT*sizeof(struct s_smt_fp_txd)) ; 376 queue = smc->hw.fp.tx[QUEUE_S] ; 377 DB_GEN(3, "Init sync TxD ring, %d TxDs", HWM_SYNC_TXD_COUNT); 378 (void)init_descr_ring(smc,(union s_fp_descr volatile *)ds, 379 HWM_SYNC_TXD_COUNT) ; 380 phys = le32_to_cpu(ds->txd_ntdadr) ; 381 ds++ ; 382 queue->tx_curr_put = queue->tx_curr_get = ds ; 383 queue->tx_free = HWM_SYNC_TXD_COUNT ; 384 queue->tx_used = 0 ; 385 outpd(ADDR(B5_XS_DA),phys) ; 386 } 387 388 static void init_rxd_ring(struct s_smc *smc) 389 { 390 struct s_smt_fp_rxd volatile *ds ; 391 struct s_smt_rx_queue *queue ; 392 u_long phys ; 393 394 /* 395 * initialize the receive descriptors 396 */ 397 ds = (struct s_smt_fp_rxd volatile *) smc->os.hwm.descr_p ; 398 queue = smc->hw.fp.rx[QUEUE_R1] ; 399 DB_GEN(3, "Init RxD ring, %d RxDs", SMT_R1_RXD_COUNT); 400 (void)init_descr_ring(smc,(union s_fp_descr volatile *)ds, 401 SMT_R1_RXD_COUNT) ; 402 phys = le32_to_cpu(ds->rxd_nrdadr) ; 403 ds++ ; 404 queue->rx_curr_put = queue->rx_curr_get = ds ; 405 queue->rx_free = SMT_R1_RXD_COUNT ; 406 queue->rx_used = 0 ; 407 outpd(ADDR(B4_R1_DA),phys) ; 408 } 409 410 /* 411 * BEGIN_MANUAL_ENTRY(init_fddi_driver) 412 * void init_fddi_driver(smc,mac_addr) 413 * 414 * initializes the driver and it's variables 415 * 416 * END_MANUAL_ENTRY 417 */ 418 void init_fddi_driver(struct s_smc *smc, u_char *mac_addr) 419 { 420 SMbuf *mb ; 421 int i ; 422 423 init_board(smc,mac_addr) ; 424 (void)init_fplus(smc) ; 425 426 /* 427 * initialize the SMbufs for the SMT 428 */ 429 #ifndef COMMON_MB_POOL 430 mb = smc->os.hwm.mbuf_pool.mb_start ; 431 smc->os.hwm.mbuf_pool.mb_free = (SMbuf *)NULL ; 432 for (i = 0; i < MAX_MBUF; i++) { 433 mb->sm_use_count = 1 ; 434 smt_free_mbuf(smc,mb) ; 435 mb++ ; 436 } 437 #else 438 mb = mb_start ; 439 if (!mb_init) { 440 mb_free = 0 ; 441 for (i = 0; i < MAX_MBUF; i++) { 442 mb->sm_use_count = 1 ; 443 smt_free_mbuf(smc,mb) ; 444 mb++ ; 445 } 446 mb_init = TRUE ; 447 } 448 #endif 449 450 /* 451 * initialize the other variables 452 */ 453 smc->os.hwm.llc_rx_pipe = smc->os.hwm.llc_rx_tail = (SMbuf *)NULL ; 454 smc->os.hwm.txd_tx_pipe = smc->os.hwm.txd_tx_tail = NULL ; 455 smc->os.hwm.pass_SMT = smc->os.hwm.pass_NSA = smc->os.hwm.pass_DB = 0 ; 456 smc->os.hwm.pass_llc_promisc = TRUE ; 457 smc->os.hwm.queued_rx_frames = smc->os.hwm.queued_txd_mb = 0 ; 458 smc->os.hwm.detec_count = 0 ; 459 smc->os.hwm.rx_break = 0 ; 460 smc->os.hwm.rx_len_error = 0 ; 461 smc->os.hwm.isr_flag = FALSE ; 462 463 /* 464 * make sure that the start pointer is 16 byte aligned 465 */ 466 i = 16 - ((long)smc->os.hwm.descr_p & 0xf) ; 467 if (i != 16) { 468 DB_GEN(3, "i = %d", i); 469 smc->os.hwm.descr_p = (union s_fp_descr volatile *) 470 ((char *)smc->os.hwm.descr_p+i) ; 471 } 472 DB_GEN(3, "pt to descr area = %p", smc->os.hwm.descr_p); 473 474 init_txd_ring(smc) ; 475 init_rxd_ring(smc) ; 476 mac_drv_fill_rxd(smc) ; 477 478 init_plc(smc) ; 479 } 480 481 482 SMbuf *smt_get_mbuf(struct s_smc *smc) 483 { 484 register SMbuf *mb ; 485 486 #ifndef COMMON_MB_POOL 487 mb = smc->os.hwm.mbuf_pool.mb_free ; 488 #else 489 mb = mb_free ; 490 #endif 491 if (mb) { 492 #ifndef COMMON_MB_POOL 493 smc->os.hwm.mbuf_pool.mb_free = mb->sm_next ; 494 #else 495 mb_free = mb->sm_next ; 496 #endif 497 mb->sm_off = 8 ; 498 mb->sm_use_count = 1 ; 499 } 500 DB_GEN(3, "get SMbuf: mb = %p", mb); 501 return mb; /* May be NULL */ 502 } 503 504 void smt_free_mbuf(struct s_smc *smc, SMbuf *mb) 505 { 506 507 if (mb) { 508 mb->sm_use_count-- ; 509 DB_GEN(3, "free_mbuf: sm_use_count = %d", mb->sm_use_count); 510 /* 511 * If the use_count is != zero the MBuf is queued 512 * more than once and must not queued into the 513 * free MBuf queue 514 */ 515 if (!mb->sm_use_count) { 516 DB_GEN(3, "free SMbuf: mb = %p", mb); 517 #ifndef COMMON_MB_POOL 518 mb->sm_next = smc->os.hwm.mbuf_pool.mb_free ; 519 smc->os.hwm.mbuf_pool.mb_free = mb ; 520 #else 521 mb->sm_next = mb_free ; 522 mb_free = mb ; 523 #endif 524 } 525 } 526 else 527 SMT_PANIC(smc,HWM_E0003,HWM_E0003_MSG) ; 528 } 529 530 531 /* 532 * BEGIN_MANUAL_ENTRY(mac_drv_repair_descr) 533 * void mac_drv_repair_descr(smc) 534 * 535 * function called from SMT (HWM / hwmtm.c) 536 * The BMU is idle when this function is called. 537 * Mac_drv_repair_descr sets up the physical address 538 * for all receive and transmit queues where the BMU 539 * should continue. 540 * It may be that the BMU was reseted during a fragmented 541 * transfer. In this case there are some fragments which will 542 * never completed by the BMU. The OWN bit of this fragments 543 * must be switched to be owned by the host. 544 * 545 * Give a start command to the receive BMU. 546 * Start the transmit BMUs if transmit frames pending. 547 * 548 * END_MANUAL_ENTRY 549 */ 550 void mac_drv_repair_descr(struct s_smc *smc) 551 { 552 u_long phys ; 553 554 if (smc->hw.hw_state != STOPPED) { 555 SK_BREAK() ; 556 SMT_PANIC(smc,HWM_E0013,HWM_E0013_MSG) ; 557 return ; 558 } 559 560 /* 561 * repair tx queues: don't start 562 */ 563 phys = repair_txd_ring(smc,smc->hw.fp.tx[QUEUE_A0]) ; 564 outpd(ADDR(B5_XA_DA),phys) ; 565 if (smc->hw.fp.tx_q[QUEUE_A0].tx_used) { 566 outpd(ADDR(B0_XA_CSR),CSR_START) ; 567 } 568 phys = repair_txd_ring(smc,smc->hw.fp.tx[QUEUE_S]) ; 569 outpd(ADDR(B5_XS_DA),phys) ; 570 if (smc->hw.fp.tx_q[QUEUE_S].tx_used) { 571 outpd(ADDR(B0_XS_CSR),CSR_START) ; 572 } 573 574 /* 575 * repair rx queues 576 */ 577 phys = repair_rxd_ring(smc,smc->hw.fp.rx[QUEUE_R1]) ; 578 outpd(ADDR(B4_R1_DA),phys) ; 579 outpd(ADDR(B0_R1_CSR),CSR_START) ; 580 } 581 582 static u_long repair_txd_ring(struct s_smc *smc, struct s_smt_tx_queue *queue) 583 { 584 int i ; 585 int tx_used ; 586 u_long phys ; 587 u_long tbctrl ; 588 struct s_smt_fp_txd volatile *t ; 589 590 SK_UNUSED(smc) ; 591 592 t = queue->tx_curr_get ; 593 tx_used = queue->tx_used ; 594 for (i = tx_used+queue->tx_free-1 ; i ; i-- ) { 595 t = t->txd_next ; 596 } 597 phys = le32_to_cpu(t->txd_ntdadr) ; 598 599 t = queue->tx_curr_get ; 600 while (tx_used) { 601 DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORCPU) ; 602 tbctrl = le32_to_cpu(t->txd_tbctrl) ; 603 604 if (tbctrl & BMU_OWN) { 605 if (tbctrl & BMU_STF) { 606 break ; /* exit the loop */ 607 } 608 else { 609 /* 610 * repair the descriptor 611 */ 612 t->txd_tbctrl &= ~cpu_to_le32(BMU_OWN) ; 613 } 614 } 615 phys = le32_to_cpu(t->txd_ntdadr) ; 616 DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ; 617 t = t->txd_next ; 618 tx_used-- ; 619 } 620 return phys; 621 } 622 623 /* 624 * Repairs the receive descriptor ring and returns the physical address 625 * where the BMU should continue working. 626 * 627 * o The physical address where the BMU was stopped has to be 628 * determined. This is the next RxD after rx_curr_get with an OWN 629 * bit set. 630 * o The BMU should start working at beginning of the next frame. 631 * RxDs with an OWN bit set but with a reset STF bit should be 632 * skipped and owned by the driver (OWN = 0). 633 */ 634 static u_long repair_rxd_ring(struct s_smc *smc, struct s_smt_rx_queue *queue) 635 { 636 int i ; 637 int rx_used ; 638 u_long phys ; 639 u_long rbctrl ; 640 struct s_smt_fp_rxd volatile *r ; 641 642 SK_UNUSED(smc) ; 643 644 r = queue->rx_curr_get ; 645 rx_used = queue->rx_used ; 646 for (i = SMT_R1_RXD_COUNT-1 ; i ; i-- ) { 647 r = r->rxd_next ; 648 } 649 phys = le32_to_cpu(r->rxd_nrdadr) ; 650 651 r = queue->rx_curr_get ; 652 while (rx_used) { 653 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ; 654 rbctrl = le32_to_cpu(r->rxd_rbctrl) ; 655 656 if (rbctrl & BMU_OWN) { 657 if (rbctrl & BMU_STF) { 658 break ; /* exit the loop */ 659 } 660 else { 661 /* 662 * repair the descriptor 663 */ 664 r->rxd_rbctrl &= ~cpu_to_le32(BMU_OWN) ; 665 } 666 } 667 phys = le32_to_cpu(r->rxd_nrdadr) ; 668 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ; 669 r = r->rxd_next ; 670 rx_used-- ; 671 } 672 return phys; 673 } 674 675 676 /* 677 ------------------------------------------------------------- 678 INTERRUPT SERVICE ROUTINE: 679 ------------------------------------------------------------- 680 */ 681 682 /* 683 * BEGIN_MANUAL_ENTRY(fddi_isr) 684 * void fddi_isr(smc) 685 * 686 * function DOWNCALL (drvsr.c) 687 * interrupt service routine, handles the interrupt requests 688 * generated by the FDDI adapter. 689 * 690 * NOTE: The operating system dependent module must guarantee that the 691 * interrupts of the adapter are disabled when it calls fddi_isr. 692 * 693 * About the USE_BREAK_ISR mechanismn: 694 * 695 * The main requirement of this mechanismn is to force an timer IRQ when 696 * leaving process_receive() with leave_isr set. process_receive() may 697 * be called at any time from anywhere! 698 * To be sure we don't miss such event we set 'force_irq' per default. 699 * We have to force and Timer IRQ if 'smc->os.hwm.leave_isr' AND 700 * 'force_irq' are set. 'force_irq' may be reset if a receive complete 701 * IRQ is pending. 702 * 703 * END_MANUAL_ENTRY 704 */ 705 void fddi_isr(struct s_smc *smc) 706 { 707 u_long is ; /* ISR source */ 708 u_short stu, stl ; 709 SMbuf *mb ; 710 711 #ifdef USE_BREAK_ISR 712 int force_irq ; 713 #endif 714 715 #ifdef ODI2 716 if (smc->os.hwm.rx_break) { 717 mac_drv_fill_rxd(smc) ; 718 if (smc->hw.fp.rx_q[QUEUE_R1].rx_used > 0) { 719 smc->os.hwm.rx_break = 0 ; 720 process_receive(smc) ; 721 } 722 else { 723 smc->os.hwm.detec_count = 0 ; 724 smt_force_irq(smc) ; 725 } 726 } 727 #endif 728 smc->os.hwm.isr_flag = TRUE ; 729 730 #ifdef USE_BREAK_ISR 731 force_irq = TRUE ; 732 if (smc->os.hwm.leave_isr) { 733 smc->os.hwm.leave_isr = FALSE ; 734 process_receive(smc) ; 735 } 736 #endif 737 738 while ((is = GET_ISR() & ISR_MASK)) { 739 NDD_TRACE("CH0B",is,0,0) ; 740 DB_GEN(7, "ISA = 0x%lx", is); 741 742 if (is & IMASK_SLOW) { 743 NDD_TRACE("CH1b",is,0,0) ; 744 if (is & IS_PLINT1) { /* PLC1 */ 745 plc1_irq(smc) ; 746 } 747 if (is & IS_PLINT2) { /* PLC2 */ 748 plc2_irq(smc) ; 749 } 750 if (is & IS_MINTR1) { /* FORMAC+ STU1(U/L) */ 751 stu = inpw(FM_A(FM_ST1U)) ; 752 stl = inpw(FM_A(FM_ST1L)) ; 753 DB_GEN(6, "Slow transmit complete"); 754 mac1_irq(smc,stu,stl) ; 755 } 756 if (is & IS_MINTR2) { /* FORMAC+ STU2(U/L) */ 757 stu= inpw(FM_A(FM_ST2U)) ; 758 stl= inpw(FM_A(FM_ST2L)) ; 759 DB_GEN(6, "Slow receive complete"); 760 DB_GEN(7, "stl = %x : stu = %x", stl, stu); 761 mac2_irq(smc,stu,stl) ; 762 } 763 if (is & IS_MINTR3) { /* FORMAC+ STU3(U/L) */ 764 stu= inpw(FM_A(FM_ST3U)) ; 765 stl= inpw(FM_A(FM_ST3L)) ; 766 DB_GEN(6, "FORMAC Mode Register 3"); 767 mac3_irq(smc,stu,stl) ; 768 } 769 if (is & IS_TIMINT) { /* Timer 82C54-2 */ 770 timer_irq(smc) ; 771 #ifdef NDIS_OS2 772 force_irq_pending = 0 ; 773 #endif 774 /* 775 * out of RxD detection 776 */ 777 if (++smc->os.hwm.detec_count > 4) { 778 /* 779 * check out of RxD condition 780 */ 781 process_receive(smc) ; 782 } 783 } 784 if (is & IS_TOKEN) { /* Restricted Token Monitor */ 785 rtm_irq(smc) ; 786 } 787 if (is & IS_R1_P) { /* Parity error rx queue 1 */ 788 /* clear IRQ */ 789 outpd(ADDR(B4_R1_CSR),CSR_IRQ_CL_P) ; 790 SMT_PANIC(smc,HWM_E0004,HWM_E0004_MSG) ; 791 } 792 if (is & IS_R1_C) { /* Encoding error rx queue 1 */ 793 /* clear IRQ */ 794 outpd(ADDR(B4_R1_CSR),CSR_IRQ_CL_C) ; 795 SMT_PANIC(smc,HWM_E0005,HWM_E0005_MSG) ; 796 } 797 if (is & IS_XA_C) { /* Encoding error async tx q */ 798 /* clear IRQ */ 799 outpd(ADDR(B5_XA_CSR),CSR_IRQ_CL_C) ; 800 SMT_PANIC(smc,HWM_E0006,HWM_E0006_MSG) ; 801 } 802 if (is & IS_XS_C) { /* Encoding error sync tx q */ 803 /* clear IRQ */ 804 outpd(ADDR(B5_XS_CSR),CSR_IRQ_CL_C) ; 805 SMT_PANIC(smc,HWM_E0007,HWM_E0007_MSG) ; 806 } 807 } 808 809 /* 810 * Fast Tx complete Async/Sync Queue (BMU service) 811 */ 812 if (is & (IS_XS_F|IS_XA_F)) { 813 DB_GEN(6, "Fast tx complete queue"); 814 /* 815 * clear IRQ, Note: no IRQ is lost, because 816 * we always service both queues 817 */ 818 outpd(ADDR(B5_XS_CSR),CSR_IRQ_CL_F) ; 819 outpd(ADDR(B5_XA_CSR),CSR_IRQ_CL_F) ; 820 mac_drv_clear_txd(smc) ; 821 llc_restart_tx(smc) ; 822 } 823 824 /* 825 * Fast Rx Complete (BMU service) 826 */ 827 if (is & IS_R1_F) { 828 DB_GEN(6, "Fast receive complete"); 829 /* clear IRQ */ 830 #ifndef USE_BREAK_ISR 831 outpd(ADDR(B4_R1_CSR),CSR_IRQ_CL_F) ; 832 process_receive(smc) ; 833 #else 834 process_receive(smc) ; 835 if (smc->os.hwm.leave_isr) { 836 force_irq = FALSE ; 837 } else { 838 outpd(ADDR(B4_R1_CSR),CSR_IRQ_CL_F) ; 839 process_receive(smc) ; 840 } 841 #endif 842 } 843 844 #ifndef NDIS_OS2 845 while ((mb = get_llc_rx(smc))) { 846 smt_to_llc(smc,mb) ; 847 } 848 #else 849 if (offDepth) 850 post_proc() ; 851 852 while (!offDepth && (mb = get_llc_rx(smc))) { 853 smt_to_llc(smc,mb) ; 854 } 855 856 if (!offDepth && smc->os.hwm.rx_break) { 857 process_receive(smc) ; 858 } 859 #endif 860 if (smc->q.ev_get != smc->q.ev_put) { 861 NDD_TRACE("CH2a",0,0,0) ; 862 ev_dispatcher(smc) ; 863 } 864 #ifdef NDIS_OS2 865 post_proc() ; 866 if (offDepth) { /* leave fddi_isr because */ 867 break ; /* indications not allowed */ 868 } 869 #endif 870 #ifdef USE_BREAK_ISR 871 if (smc->os.hwm.leave_isr) { 872 break ; /* leave fddi_isr */ 873 } 874 #endif 875 876 /* NOTE: when the isr is left, no rx is pending */ 877 } /* end of interrupt source polling loop */ 878 879 #ifdef USE_BREAK_ISR 880 if (smc->os.hwm.leave_isr && force_irq) { 881 smt_force_irq(smc) ; 882 } 883 #endif 884 smc->os.hwm.isr_flag = FALSE ; 885 NDD_TRACE("CH0E",0,0,0) ; 886 } 887 888 889 /* 890 ------------------------------------------------------------- 891 RECEIVE FUNCTIONS: 892 ------------------------------------------------------------- 893 */ 894 895 #ifndef NDIS_OS2 896 /* 897 * BEGIN_MANUAL_ENTRY(mac_drv_rx_mode) 898 * void mac_drv_rx_mode(smc,mode) 899 * 900 * function DOWNCALL (fplus.c) 901 * Corresponding to the parameter mode, the operating system 902 * dependent module can activate several receive modes. 903 * 904 * para mode = 1: RX_ENABLE_ALLMULTI enable all multicasts 905 * = 2: RX_DISABLE_ALLMULTI disable "enable all multicasts" 906 * = 3: RX_ENABLE_PROMISC enable promiscuous 907 * = 4: RX_DISABLE_PROMISC disable promiscuous 908 * = 5: RX_ENABLE_NSA enable rec. of all NSA frames 909 * (disabled after 'driver reset' & 'set station address') 910 * = 6: RX_DISABLE_NSA disable rec. of all NSA frames 911 * 912 * = 21: RX_ENABLE_PASS_SMT ( see description ) 913 * = 22: RX_DISABLE_PASS_SMT ( " " ) 914 * = 23: RX_ENABLE_PASS_NSA ( " " ) 915 * = 24: RX_DISABLE_PASS_NSA ( " " ) 916 * = 25: RX_ENABLE_PASS_DB ( " " ) 917 * = 26: RX_DISABLE_PASS_DB ( " " ) 918 * = 27: RX_DISABLE_PASS_ALL ( " " ) 919 * = 28: RX_DISABLE_LLC_PROMISC ( " " ) 920 * = 29: RX_ENABLE_LLC_PROMISC ( " " ) 921 * 922 * 923 * RX_ENABLE_PASS_SMT / RX_DISABLE_PASS_SMT 924 * 925 * If the operating system dependent module activates the 926 * mode RX_ENABLE_PASS_SMT, the hardware module 927 * duplicates all SMT frames with the frame control 928 * FC_SMT_INFO and passes them to the LLC receive channel 929 * by calling mac_drv_rx_init. 930 * The SMT Frames which are sent by the local SMT and the NSA 931 * frames whose A- and C-Indicator is not set are also duplicated 932 * and passed. 933 * The receive mode RX_DISABLE_PASS_SMT disables the passing 934 * of SMT frames. 935 * 936 * RX_ENABLE_PASS_NSA / RX_DISABLE_PASS_NSA 937 * 938 * If the operating system dependent module activates the 939 * mode RX_ENABLE_PASS_NSA, the hardware module 940 * duplicates all NSA frames with frame control FC_SMT_NSA 941 * and a set A-Indicator and passed them to the LLC 942 * receive channel by calling mac_drv_rx_init. 943 * All NSA Frames which are sent by the local SMT 944 * are also duplicated and passed. 945 * The receive mode RX_DISABLE_PASS_NSA disables the passing 946 * of NSA frames with the A- or C-Indicator set. 947 * 948 * NOTE: For fear that the hardware module receives NSA frames with 949 * a reset A-Indicator, the operating system dependent module 950 * has to call mac_drv_rx_mode with the mode RX_ENABLE_NSA 951 * before activate the RX_ENABLE_PASS_NSA mode and after every 952 * 'driver reset' and 'set station address'. 953 * 954 * RX_ENABLE_PASS_DB / RX_DISABLE_PASS_DB 955 * 956 * If the operating system dependent module activates the 957 * mode RX_ENABLE_PASS_DB, direct BEACON frames 958 * (FC_BEACON frame control) are passed to the LLC receive 959 * channel by mac_drv_rx_init. 960 * The receive mode RX_DISABLE_PASS_DB disables the passing 961 * of direct BEACON frames. 962 * 963 * RX_DISABLE_PASS_ALL 964 * 965 * Disables all special receives modes. It is equal to 966 * call mac_drv_set_rx_mode successively with the 967 * parameters RX_DISABLE_NSA, RX_DISABLE_PASS_SMT, 968 * RX_DISABLE_PASS_NSA and RX_DISABLE_PASS_DB. 969 * 970 * RX_ENABLE_LLC_PROMISC 971 * 972 * (default) all received LLC frames and all SMT/NSA/DBEACON 973 * frames depending on the attitude of the flags 974 * PASS_SMT/PASS_NSA/PASS_DBEACON will be delivered to the 975 * LLC layer 976 * 977 * RX_DISABLE_LLC_PROMISC 978 * 979 * all received SMT/NSA/DBEACON frames depending on the 980 * attitude of the flags PASS_SMT/PASS_NSA/PASS_DBEACON 981 * will be delivered to the LLC layer. 982 * all received LLC frames with a directed address, Multicast 983 * or Broadcast address will be delivered to the LLC 984 * layer too. 985 * 986 * END_MANUAL_ENTRY 987 */ 988 void mac_drv_rx_mode(struct s_smc *smc, int mode) 989 { 990 switch(mode) { 991 case RX_ENABLE_PASS_SMT: 992 smc->os.hwm.pass_SMT = TRUE ; 993 break ; 994 case RX_DISABLE_PASS_SMT: 995 smc->os.hwm.pass_SMT = FALSE ; 996 break ; 997 case RX_ENABLE_PASS_NSA: 998 smc->os.hwm.pass_NSA = TRUE ; 999 break ; 1000 case RX_DISABLE_PASS_NSA: 1001 smc->os.hwm.pass_NSA = FALSE ; 1002 break ; 1003 case RX_ENABLE_PASS_DB: 1004 smc->os.hwm.pass_DB = TRUE ; 1005 break ; 1006 case RX_DISABLE_PASS_DB: 1007 smc->os.hwm.pass_DB = FALSE ; 1008 break ; 1009 case RX_DISABLE_PASS_ALL: 1010 smc->os.hwm.pass_SMT = smc->os.hwm.pass_NSA = FALSE ; 1011 smc->os.hwm.pass_DB = FALSE ; 1012 smc->os.hwm.pass_llc_promisc = TRUE ; 1013 mac_set_rx_mode(smc,RX_DISABLE_NSA) ; 1014 break ; 1015 case RX_DISABLE_LLC_PROMISC: 1016 smc->os.hwm.pass_llc_promisc = FALSE ; 1017 break ; 1018 case RX_ENABLE_LLC_PROMISC: 1019 smc->os.hwm.pass_llc_promisc = TRUE ; 1020 break ; 1021 case RX_ENABLE_ALLMULTI: 1022 case RX_DISABLE_ALLMULTI: 1023 case RX_ENABLE_PROMISC: 1024 case RX_DISABLE_PROMISC: 1025 case RX_ENABLE_NSA: 1026 case RX_DISABLE_NSA: 1027 default: 1028 mac_set_rx_mode(smc,mode) ; 1029 break ; 1030 } 1031 } 1032 #endif /* ifndef NDIS_OS2 */ 1033 1034 /* 1035 * process receive queue 1036 */ 1037 void process_receive(struct s_smc *smc) 1038 { 1039 int i ; 1040 int n ; 1041 int frag_count ; /* number of RxDs of the curr rx buf */ 1042 int used_frags ; /* number of RxDs of the curr frame */ 1043 struct s_smt_rx_queue *queue ; /* points to the queue ctl struct */ 1044 struct s_smt_fp_rxd volatile *r ; /* rxd pointer */ 1045 struct s_smt_fp_rxd volatile *rxd ; /* first rxd of rx frame */ 1046 u_long rbctrl ; /* receive buffer control word */ 1047 u_long rfsw ; /* receive frame status word */ 1048 u_short rx_used ; 1049 u_char far *virt ; 1050 char far *data ; 1051 SMbuf *mb ; 1052 u_char fc ; /* Frame control */ 1053 int len ; /* Frame length */ 1054 1055 smc->os.hwm.detec_count = 0 ; 1056 queue = smc->hw.fp.rx[QUEUE_R1] ; 1057 NDD_TRACE("RHxB",0,0,0) ; 1058 for ( ; ; ) { 1059 r = queue->rx_curr_get ; 1060 rx_used = queue->rx_used ; 1061 frag_count = 0 ; 1062 1063 #ifdef USE_BREAK_ISR 1064 if (smc->os.hwm.leave_isr) { 1065 goto rx_end ; 1066 } 1067 #endif 1068 #ifdef NDIS_OS2 1069 if (offDepth) { 1070 smc->os.hwm.rx_break = 1 ; 1071 goto rx_end ; 1072 } 1073 smc->os.hwm.rx_break = 0 ; 1074 #endif 1075 #ifdef ODI2 1076 if (smc->os.hwm.rx_break) { 1077 goto rx_end ; 1078 } 1079 #endif 1080 n = 0 ; 1081 do { 1082 DB_RX(5, "Check RxD %p for OWN and EOF", r); 1083 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ; 1084 rbctrl = le32_to_cpu(CR_READ(r->rxd_rbctrl)); 1085 1086 if (rbctrl & BMU_OWN) { 1087 NDD_TRACE("RHxE",r,rfsw,rbctrl) ; 1088 DB_RX(4, "End of RxDs"); 1089 goto rx_end ; 1090 } 1091 /* 1092 * out of RxD detection 1093 */ 1094 if (!rx_used) { 1095 SK_BREAK() ; 1096 SMT_PANIC(smc,HWM_E0009,HWM_E0009_MSG) ; 1097 /* Either we don't have an RxD or all 1098 * RxDs are filled. Therefore it's allowed 1099 * for to set the STOPPED flag */ 1100 smc->hw.hw_state = STOPPED ; 1101 mac_drv_clear_rx_queue(smc) ; 1102 smc->hw.hw_state = STARTED ; 1103 mac_drv_fill_rxd(smc) ; 1104 smc->os.hwm.detec_count = 0 ; 1105 goto rx_end ; 1106 } 1107 rfsw = le32_to_cpu(r->rxd_rfsw) ; 1108 if ((rbctrl & BMU_STF) != ((rbctrl & BMU_ST_BUF) <<5)) { 1109 /* 1110 * The BMU_STF bit is deleted, 1 frame is 1111 * placed into more than 1 rx buffer 1112 * 1113 * skip frame by setting the rx len to 0 1114 * 1115 * if fragment count == 0 1116 * The missing STF bit belongs to the 1117 * current frame, search for the 1118 * EOF bit to complete the frame 1119 * else 1120 * the fragment belongs to the next frame, 1121 * exit the loop and process the frame 1122 */ 1123 SK_BREAK() ; 1124 rfsw = 0 ; 1125 if (frag_count) { 1126 break ; 1127 } 1128 } 1129 n += rbctrl & 0xffff ; 1130 r = r->rxd_next ; 1131 frag_count++ ; 1132 rx_used-- ; 1133 } while (!(rbctrl & BMU_EOF)) ; 1134 used_frags = frag_count ; 1135 DB_RX(5, "EOF set in RxD, used_frags = %d", used_frags); 1136 1137 /* may be next 2 DRV_BUF_FLUSH() can be skipped, because */ 1138 /* BMU_ST_BUF will not be changed by the ASIC */ 1139 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ; 1140 while (rx_used && !(r->rxd_rbctrl & cpu_to_le32(BMU_ST_BUF))) { 1141 DB_RX(5, "Check STF bit in %p", r); 1142 r = r->rxd_next ; 1143 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ; 1144 frag_count++ ; 1145 rx_used-- ; 1146 } 1147 DB_RX(5, "STF bit found"); 1148 1149 /* 1150 * The received frame is finished for the process receive 1151 */ 1152 rxd = queue->rx_curr_get ; 1153 queue->rx_curr_get = r ; 1154 queue->rx_free += frag_count ; 1155 queue->rx_used = rx_used ; 1156 1157 /* 1158 * ASIC Errata no. 7 (STF - Bit Bug) 1159 */ 1160 rxd->rxd_rbctrl &= cpu_to_le32(~BMU_STF) ; 1161 1162 for (r=rxd, i=frag_count ; i ; r=r->rxd_next, i--){ 1163 DB_RX(5, "dma_complete for RxD %p", r); 1164 dma_complete(smc,(union s_fp_descr volatile *)r,DMA_WR); 1165 } 1166 smc->hw.fp.err_stats.err_valid++ ; 1167 smc->mib.m[MAC0].fddiMACCopied_Ct++ ; 1168 1169 /* the length of the data including the FC */ 1170 len = (rfsw & RD_LENGTH) - 4 ; 1171 1172 DB_RX(4, "frame length = %d", len); 1173 /* 1174 * check the frame_length and all error flags 1175 */ 1176 if (rfsw & (RX_MSRABT|RX_FS_E|RX_FS_CRC|RX_FS_IMPL)){ 1177 if (rfsw & RD_S_MSRABT) { 1178 DB_RX(2, "Frame aborted by the FORMAC"); 1179 smc->hw.fp.err_stats.err_abort++ ; 1180 } 1181 /* 1182 * check frame status 1183 */ 1184 if (rfsw & RD_S_SEAC2) { 1185 DB_RX(2, "E-Indicator set"); 1186 smc->hw.fp.err_stats.err_e_indicator++ ; 1187 } 1188 if (rfsw & RD_S_SFRMERR) { 1189 DB_RX(2, "CRC error"); 1190 smc->hw.fp.err_stats.err_crc++ ; 1191 } 1192 if (rfsw & RX_FS_IMPL) { 1193 DB_RX(2, "Implementer frame"); 1194 smc->hw.fp.err_stats.err_imp_frame++ ; 1195 } 1196 goto abort_frame ; 1197 } 1198 if (len > FDDI_RAW_MTU-4) { 1199 DB_RX(2, "Frame too long error"); 1200 smc->hw.fp.err_stats.err_too_long++ ; 1201 goto abort_frame ; 1202 } 1203 /* 1204 * SUPERNET 3 Bug: FORMAC delivers status words 1205 * of aborted frames to the BMU 1206 */ 1207 if (len <= 4) { 1208 DB_RX(2, "Frame length = 0"); 1209 goto abort_frame ; 1210 } 1211 1212 if (len != (n-4)) { 1213 DB_RX(4, "BMU: rx len differs: [%d:%d]", len, n); 1214 smc->os.hwm.rx_len_error++ ; 1215 goto abort_frame ; 1216 } 1217 1218 /* 1219 * Check SA == MA 1220 */ 1221 virt = (u_char far *) rxd->rxd_virt ; 1222 DB_RX(2, "FC = %x", *virt); 1223 if (virt[12] == MA[5] && 1224 virt[11] == MA[4] && 1225 virt[10] == MA[3] && 1226 virt[9] == MA[2] && 1227 virt[8] == MA[1] && 1228 (virt[7] & ~GROUP_ADDR_BIT) == MA[0]) { 1229 goto abort_frame ; 1230 } 1231 1232 /* 1233 * test if LLC frame 1234 */ 1235 if (rfsw & RX_FS_LLC) { 1236 /* 1237 * if pass_llc_promisc is disable 1238 * if DA != Multicast or Broadcast or DA!=MA 1239 * abort the frame 1240 */ 1241 if (!smc->os.hwm.pass_llc_promisc) { 1242 if(!(virt[1] & GROUP_ADDR_BIT)) { 1243 if (virt[6] != MA[5] || 1244 virt[5] != MA[4] || 1245 virt[4] != MA[3] || 1246 virt[3] != MA[2] || 1247 virt[2] != MA[1] || 1248 virt[1] != MA[0]) { 1249 DB_RX(2, "DA != MA and not multi- or broadcast"); 1250 goto abort_frame ; 1251 } 1252 } 1253 } 1254 1255 /* 1256 * LLC frame received 1257 */ 1258 DB_RX(4, "LLC - receive"); 1259 mac_drv_rx_complete(smc,rxd,frag_count,len) ; 1260 } 1261 else { 1262 if (!(mb = smt_get_mbuf(smc))) { 1263 smc->hw.fp.err_stats.err_no_buf++ ; 1264 DB_RX(4, "No SMbuf; receive terminated"); 1265 goto abort_frame ; 1266 } 1267 data = smtod(mb,char *) - 1 ; 1268 1269 /* 1270 * copy the frame into a SMT_MBuf 1271 */ 1272 #ifdef USE_OS_CPY 1273 hwm_cpy_rxd2mb(rxd,data,len) ; 1274 #else 1275 for (r=rxd, i=used_frags ; i ; r=r->rxd_next, i--){ 1276 n = le32_to_cpu(r->rxd_rbctrl) & RD_LENGTH ; 1277 DB_RX(6, "cp SMT frame to mb: len = %d", n); 1278 memcpy(data,r->rxd_virt,n) ; 1279 data += n ; 1280 } 1281 data = smtod(mb,char *) - 1 ; 1282 #endif 1283 fc = *(char *)mb->sm_data = *data ; 1284 mb->sm_len = len - 1 ; /* len - fc */ 1285 data++ ; 1286 1287 /* 1288 * SMT frame received 1289 */ 1290 switch(fc) { 1291 case FC_SMT_INFO : 1292 smc->hw.fp.err_stats.err_smt_frame++ ; 1293 DB_RX(5, "SMT frame received"); 1294 1295 if (smc->os.hwm.pass_SMT) { 1296 DB_RX(5, "pass SMT frame"); 1297 mac_drv_rx_complete(smc, rxd, 1298 frag_count,len) ; 1299 } 1300 else { 1301 DB_RX(5, "requeue RxD"); 1302 mac_drv_requeue_rxd(smc,rxd,frag_count); 1303 } 1304 1305 smt_received_pack(smc,mb,(int)(rfsw>>25)) ; 1306 break ; 1307 case FC_SMT_NSA : 1308 smc->hw.fp.err_stats.err_smt_frame++ ; 1309 DB_RX(5, "SMT frame received"); 1310 1311 /* if pass_NSA set pass the NSA frame or */ 1312 /* pass_SMT set and the A-Indicator */ 1313 /* is not set, pass the NSA frame */ 1314 if (smc->os.hwm.pass_NSA || 1315 (smc->os.hwm.pass_SMT && 1316 !(rfsw & A_INDIC))) { 1317 DB_RX(5, "pass SMT frame"); 1318 mac_drv_rx_complete(smc, rxd, 1319 frag_count,len) ; 1320 } 1321 else { 1322 DB_RX(5, "requeue RxD"); 1323 mac_drv_requeue_rxd(smc,rxd,frag_count); 1324 } 1325 1326 smt_received_pack(smc,mb,(int)(rfsw>>25)) ; 1327 break ; 1328 case FC_BEACON : 1329 if (smc->os.hwm.pass_DB) { 1330 DB_RX(5, "pass DB frame"); 1331 mac_drv_rx_complete(smc, rxd, 1332 frag_count,len) ; 1333 } 1334 else { 1335 DB_RX(5, "requeue RxD"); 1336 mac_drv_requeue_rxd(smc,rxd,frag_count); 1337 } 1338 smt_free_mbuf(smc,mb) ; 1339 break ; 1340 default : 1341 /* 1342 * unknown FC abort the frame 1343 */ 1344 DB_RX(2, "unknown FC error"); 1345 smt_free_mbuf(smc,mb) ; 1346 DB_RX(5, "requeue RxD"); 1347 mac_drv_requeue_rxd(smc,rxd,frag_count) ; 1348 if ((fc & 0xf0) == FC_MAC) 1349 smc->hw.fp.err_stats.err_mac_frame++ ; 1350 else 1351 smc->hw.fp.err_stats.err_imp_frame++ ; 1352 1353 break ; 1354 } 1355 } 1356 1357 DB_RX(3, "next RxD is %p", queue->rx_curr_get); 1358 NDD_TRACE("RHx1",queue->rx_curr_get,0,0) ; 1359 1360 continue ; 1361 /*--------------------------------------------------------------------*/ 1362 abort_frame: 1363 DB_RX(5, "requeue RxD"); 1364 mac_drv_requeue_rxd(smc,rxd,frag_count) ; 1365 1366 DB_RX(3, "next RxD is %p", queue->rx_curr_get); 1367 NDD_TRACE("RHx2",queue->rx_curr_get,0,0) ; 1368 } 1369 rx_end: 1370 #ifdef ALL_RX_COMPLETE 1371 mac_drv_all_receives_complete(smc) ; 1372 #endif 1373 return ; /* lint bug: needs return detect end of function */ 1374 } 1375 1376 static void smt_to_llc(struct s_smc *smc, SMbuf *mb) 1377 { 1378 u_char fc ; 1379 1380 DB_RX(4, "send a queued frame to the llc layer"); 1381 smc->os.hwm.r.len = mb->sm_len ; 1382 smc->os.hwm.r.mb_pos = smtod(mb,char *) ; 1383 fc = *smc->os.hwm.r.mb_pos ; 1384 (void)mac_drv_rx_init(smc,(int)mb->sm_len,(int)fc, 1385 smc->os.hwm.r.mb_pos,(int)mb->sm_len) ; 1386 smt_free_mbuf(smc,mb) ; 1387 } 1388 1389 /* 1390 * BEGIN_MANUAL_ENTRY(hwm_rx_frag) 1391 * void hwm_rx_frag(smc,virt,phys,len,frame_status) 1392 * 1393 * function MACRO (hardware module, hwmtm.h) 1394 * This function calls dma_master for preparing the 1395 * system hardware for the DMA transfer and initializes 1396 * the current RxD with the length and the physical and 1397 * virtual address of the fragment. Furthermore, it sets the 1398 * STF and EOF bits depending on the frame status byte, 1399 * switches the OWN flag of the RxD, so that it is owned by the 1400 * adapter and issues an rx_start. 1401 * 1402 * para virt virtual pointer to the fragment 1403 * len the length of the fragment 1404 * frame_status status of the frame, see design description 1405 * 1406 * NOTE: It is possible to call this function with a fragment length 1407 * of zero. 1408 * 1409 * END_MANUAL_ENTRY 1410 */ 1411 void hwm_rx_frag(struct s_smc *smc, char far *virt, u_long phys, int len, 1412 int frame_status) 1413 { 1414 struct s_smt_fp_rxd volatile *r ; 1415 __le32 rbctrl; 1416 1417 NDD_TRACE("RHfB",virt,len,frame_status) ; 1418 DB_RX(2, "hwm_rx_frag: len = %d, frame_status = %x", len, frame_status); 1419 r = smc->hw.fp.rx_q[QUEUE_R1].rx_curr_put ; 1420 r->rxd_virt = virt ; 1421 r->rxd_rbadr = cpu_to_le32(phys) ; 1422 rbctrl = cpu_to_le32( (((__u32)frame_status & 1423 (FIRST_FRAG|LAST_FRAG))<<26) | 1424 (((u_long) frame_status & FIRST_FRAG) << 21) | 1425 BMU_OWN | BMU_CHECK | BMU_EN_IRQ_EOF | len) ; 1426 r->rxd_rbctrl = rbctrl ; 1427 1428 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ; 1429 outpd(ADDR(B0_R1_CSR),CSR_START) ; 1430 smc->hw.fp.rx_q[QUEUE_R1].rx_free-- ; 1431 smc->hw.fp.rx_q[QUEUE_R1].rx_used++ ; 1432 smc->hw.fp.rx_q[QUEUE_R1].rx_curr_put = r->rxd_next ; 1433 NDD_TRACE("RHfE",r,le32_to_cpu(r->rxd_rbadr),0) ; 1434 } 1435 1436 /* 1437 * BEGINN_MANUAL_ENTRY(mac_drv_clear_rx_queue) 1438 * 1439 * void mac_drv_clear_rx_queue(smc) 1440 * struct s_smc *smc ; 1441 * 1442 * function DOWNCALL (hardware module, hwmtm.c) 1443 * mac_drv_clear_rx_queue is called by the OS-specific module 1444 * after it has issued a card_stop. 1445 * In this case, the frames in the receive queue are obsolete and 1446 * should be removed. For removing mac_drv_clear_rx_queue 1447 * calls dma_master for each RxD and mac_drv_clear_rxd for each 1448 * receive buffer. 1449 * 1450 * NOTE: calling sequence card_stop: 1451 * CLI_FBI(), card_stop(), 1452 * mac_drv_clear_tx_queue(), mac_drv_clear_rx_queue(), 1453 * 1454 * NOTE: The caller is responsible that the BMUs are idle 1455 * when this function is called. 1456 * 1457 * END_MANUAL_ENTRY 1458 */ 1459 void mac_drv_clear_rx_queue(struct s_smc *smc) 1460 { 1461 struct s_smt_fp_rxd volatile *r ; 1462 struct s_smt_fp_rxd volatile *next_rxd ; 1463 struct s_smt_rx_queue *queue ; 1464 int frag_count ; 1465 int i ; 1466 1467 if (smc->hw.hw_state != STOPPED) { 1468 SK_BREAK() ; 1469 SMT_PANIC(smc,HWM_E0012,HWM_E0012_MSG) ; 1470 return ; 1471 } 1472 1473 queue = smc->hw.fp.rx[QUEUE_R1] ; 1474 DB_RX(5, "clear_rx_queue"); 1475 1476 /* 1477 * dma_complete and mac_drv_clear_rxd for all RxDs / receive buffers 1478 */ 1479 r = queue->rx_curr_get ; 1480 while (queue->rx_used) { 1481 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ; 1482 DB_RX(5, "switch OWN bit of RxD 0x%p", r); 1483 r->rxd_rbctrl &= ~cpu_to_le32(BMU_OWN) ; 1484 frag_count = 1 ; 1485 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ; 1486 r = r->rxd_next ; 1487 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ; 1488 while (r != queue->rx_curr_put && 1489 !(r->rxd_rbctrl & cpu_to_le32(BMU_ST_BUF))) { 1490 DB_RX(5, "Check STF bit in %p", r); 1491 r->rxd_rbctrl &= ~cpu_to_le32(BMU_OWN) ; 1492 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ; 1493 r = r->rxd_next ; 1494 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ; 1495 frag_count++ ; 1496 } 1497 DB_RX(5, "STF bit found"); 1498 next_rxd = r ; 1499 1500 for (r=queue->rx_curr_get,i=frag_count; i ; r=r->rxd_next,i--){ 1501 DB_RX(5, "dma_complete for RxD %p", r); 1502 dma_complete(smc,(union s_fp_descr volatile *)r,DMA_WR); 1503 } 1504 1505 DB_RX(5, "mac_drv_clear_rxd: RxD %p frag_count %d", 1506 queue->rx_curr_get, frag_count); 1507 mac_drv_clear_rxd(smc,queue->rx_curr_get,frag_count) ; 1508 1509 queue->rx_curr_get = next_rxd ; 1510 queue->rx_used -= frag_count ; 1511 queue->rx_free += frag_count ; 1512 } 1513 } 1514 1515 1516 /* 1517 ------------------------------------------------------------- 1518 SEND FUNCTIONS: 1519 ------------------------------------------------------------- 1520 */ 1521 1522 /* 1523 * BEGIN_MANUAL_ENTRY(hwm_tx_init) 1524 * int hwm_tx_init(smc,fc,frag_count,frame_len,frame_status) 1525 * 1526 * function DOWN_CALL (hardware module, hwmtm.c) 1527 * hwm_tx_init checks if the frame can be sent through the 1528 * corresponding send queue. 1529 * 1530 * para fc the frame control. To determine through which 1531 * send queue the frame should be transmitted. 1532 * 0x50 - 0x57: asynchronous LLC frame 1533 * 0xD0 - 0xD7: synchronous LLC frame 1534 * 0x41, 0x4F: SMT frame to the network 1535 * 0x42: SMT frame to the network and to the local SMT 1536 * 0x43: SMT frame to the local SMT 1537 * frag_count count of the fragments for this frame 1538 * frame_len length of the frame 1539 * frame_status status of the frame, the send queue bit is already 1540 * specified 1541 * 1542 * return frame_status 1543 * 1544 * END_MANUAL_ENTRY 1545 */ 1546 int hwm_tx_init(struct s_smc *smc, u_char fc, int frag_count, int frame_len, 1547 int frame_status) 1548 { 1549 NDD_TRACE("THiB",fc,frag_count,frame_len) ; 1550 smc->os.hwm.tx_p = smc->hw.fp.tx[frame_status & QUEUE_A0] ; 1551 smc->os.hwm.tx_descr = TX_DESCRIPTOR | (((u_long)(frame_len-1)&3)<<27) ; 1552 smc->os.hwm.tx_len = frame_len ; 1553 DB_TX(3, "hwm_tx_init: fc = %x, len = %d", fc, frame_len); 1554 if ((fc & ~(FC_SYNC_BIT|FC_LLC_PRIOR)) == FC_ASYNC_LLC) { 1555 frame_status |= LAN_TX ; 1556 } 1557 else { 1558 switch (fc) { 1559 case FC_SMT_INFO : 1560 case FC_SMT_NSA : 1561 frame_status |= LAN_TX ; 1562 break ; 1563 case FC_SMT_LOC : 1564 frame_status |= LOC_TX ; 1565 break ; 1566 case FC_SMT_LAN_LOC : 1567 frame_status |= LAN_TX | LOC_TX ; 1568 break ; 1569 default : 1570 SMT_PANIC(smc,HWM_E0010,HWM_E0010_MSG) ; 1571 } 1572 } 1573 if (!smc->hw.mac_ring_is_up) { 1574 frame_status &= ~LAN_TX ; 1575 frame_status |= RING_DOWN ; 1576 DB_TX(2, "Ring is down: terminate LAN_TX"); 1577 } 1578 if (frag_count > smc->os.hwm.tx_p->tx_free) { 1579 #ifndef NDIS_OS2 1580 mac_drv_clear_txd(smc) ; 1581 if (frag_count > smc->os.hwm.tx_p->tx_free) { 1582 DB_TX(2, "Out of TxDs, terminate LAN_TX"); 1583 frame_status &= ~LAN_TX ; 1584 frame_status |= OUT_OF_TXD ; 1585 } 1586 #else 1587 DB_TX(2, "Out of TxDs, terminate LAN_TX"); 1588 frame_status &= ~LAN_TX ; 1589 frame_status |= OUT_OF_TXD ; 1590 #endif 1591 } 1592 DB_TX(3, "frame_status = %x", frame_status); 1593 NDD_TRACE("THiE",frame_status,smc->os.hwm.tx_p->tx_free,0) ; 1594 return frame_status; 1595 } 1596 1597 /* 1598 * BEGIN_MANUAL_ENTRY(hwm_tx_frag) 1599 * void hwm_tx_frag(smc,virt,phys,len,frame_status) 1600 * 1601 * function DOWNCALL (hardware module, hwmtm.c) 1602 * If the frame should be sent to the LAN, this function calls 1603 * dma_master, fills the current TxD with the virtual and the 1604 * physical address, sets the STF and EOF bits dependent on 1605 * the frame status, and requests the BMU to start the 1606 * transmit. 1607 * If the frame should be sent to the local SMT, an SMT_MBuf 1608 * is allocated if the FIRST_FRAG bit is set in the frame_status. 1609 * The fragment of the frame is copied into the SMT MBuf. 1610 * The function smt_received_pack is called if the LAST_FRAG 1611 * bit is set in the frame_status word. 1612 * 1613 * para virt virtual pointer to the fragment 1614 * len the length of the fragment 1615 * frame_status status of the frame, see design description 1616 * 1617 * return nothing returned, no parameter is modified 1618 * 1619 * NOTE: It is possible to invoke this macro with a fragment length 1620 * of zero. 1621 * 1622 * END_MANUAL_ENTRY 1623 */ 1624 void hwm_tx_frag(struct s_smc *smc, char far *virt, u_long phys, int len, 1625 int frame_status) 1626 { 1627 struct s_smt_fp_txd volatile *t ; 1628 struct s_smt_tx_queue *queue ; 1629 __le32 tbctrl ; 1630 1631 queue = smc->os.hwm.tx_p ; 1632 1633 NDD_TRACE("THfB",virt,len,frame_status) ; 1634 /* Bug fix: AF / May 31 1999 (#missing) 1635 * snmpinfo problem reported by IBM is caused by invalid 1636 * t-pointer (txd) if LAN_TX is not set but LOC_TX only. 1637 * Set: t = queue->tx_curr_put here ! 1638 */ 1639 t = queue->tx_curr_put ; 1640 1641 DB_TX(2, "hwm_tx_frag: len = %d, frame_status = %x", len, frame_status); 1642 if (frame_status & LAN_TX) { 1643 /* '*t' is already defined */ 1644 DB_TX(3, "LAN_TX: TxD = %p, virt = %p", t, virt); 1645 t->txd_virt = virt ; 1646 t->txd_txdscr = cpu_to_le32(smc->os.hwm.tx_descr) ; 1647 t->txd_tbadr = cpu_to_le32(phys) ; 1648 tbctrl = cpu_to_le32((((__u32)frame_status & 1649 (FIRST_FRAG|LAST_FRAG|EN_IRQ_EOF))<< 26) | 1650 BMU_OWN|BMU_CHECK |len) ; 1651 t->txd_tbctrl = tbctrl ; 1652 1653 #ifndef AIX 1654 DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ; 1655 outpd(queue->tx_bmu_ctl,CSR_START) ; 1656 #else /* ifndef AIX */ 1657 DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ; 1658 if (frame_status & QUEUE_A0) { 1659 outpd(ADDR(B0_XA_CSR),CSR_START) ; 1660 } 1661 else { 1662 outpd(ADDR(B0_XS_CSR),CSR_START) ; 1663 } 1664 #endif 1665 queue->tx_free-- ; 1666 queue->tx_used++ ; 1667 queue->tx_curr_put = t->txd_next ; 1668 if (frame_status & LAST_FRAG) { 1669 smc->mib.m[MAC0].fddiMACTransmit_Ct++ ; 1670 } 1671 } 1672 if (frame_status & LOC_TX) { 1673 DB_TX(3, "LOC_TX:"); 1674 if (frame_status & FIRST_FRAG) { 1675 if(!(smc->os.hwm.tx_mb = smt_get_mbuf(smc))) { 1676 smc->hw.fp.err_stats.err_no_buf++ ; 1677 DB_TX(4, "No SMbuf; transmit terminated"); 1678 } 1679 else { 1680 smc->os.hwm.tx_data = 1681 smtod(smc->os.hwm.tx_mb,char *) - 1 ; 1682 #ifdef USE_OS_CPY 1683 #ifdef PASS_1ST_TXD_2_TX_COMP 1684 hwm_cpy_txd2mb(t,smc->os.hwm.tx_data, 1685 smc->os.hwm.tx_len) ; 1686 #endif 1687 #endif 1688 } 1689 } 1690 if (smc->os.hwm.tx_mb) { 1691 #ifndef USE_OS_CPY 1692 DB_TX(3, "copy fragment into MBuf"); 1693 memcpy(smc->os.hwm.tx_data,virt,len) ; 1694 smc->os.hwm.tx_data += len ; 1695 #endif 1696 if (frame_status & LAST_FRAG) { 1697 #ifdef USE_OS_CPY 1698 #ifndef PASS_1ST_TXD_2_TX_COMP 1699 /* 1700 * hwm_cpy_txd2mb(txd,data,len) copies 'len' 1701 * bytes from the virtual pointer in 'rxd' 1702 * to 'data'. The virtual pointer of the 1703 * os-specific tx-buffer should be written 1704 * in the LAST txd. 1705 */ 1706 hwm_cpy_txd2mb(t,smc->os.hwm.tx_data, 1707 smc->os.hwm.tx_len) ; 1708 #endif /* nPASS_1ST_TXD_2_TX_COMP */ 1709 #endif /* USE_OS_CPY */ 1710 smc->os.hwm.tx_data = 1711 smtod(smc->os.hwm.tx_mb,char *) - 1 ; 1712 *(char *)smc->os.hwm.tx_mb->sm_data = 1713 *smc->os.hwm.tx_data ; 1714 smc->os.hwm.tx_data++ ; 1715 smc->os.hwm.tx_mb->sm_len = 1716 smc->os.hwm.tx_len - 1 ; 1717 DB_TX(3, "pass LLC frame to SMT"); 1718 smt_received_pack(smc,smc->os.hwm.tx_mb, 1719 RD_FS_LOCAL) ; 1720 } 1721 } 1722 } 1723 NDD_TRACE("THfE",t,queue->tx_free,0) ; 1724 } 1725 1726 1727 /* 1728 * queues a receive for later send 1729 */ 1730 static void queue_llc_rx(struct s_smc *smc, SMbuf *mb) 1731 { 1732 DB_GEN(4, "queue_llc_rx: mb = %p", mb); 1733 smc->os.hwm.queued_rx_frames++ ; 1734 mb->sm_next = (SMbuf *)NULL ; 1735 if (smc->os.hwm.llc_rx_pipe == NULL) { 1736 smc->os.hwm.llc_rx_pipe = mb ; 1737 } 1738 else { 1739 smc->os.hwm.llc_rx_tail->sm_next = mb ; 1740 } 1741 smc->os.hwm.llc_rx_tail = mb ; 1742 1743 /* 1744 * force an timer IRQ to receive the data 1745 */ 1746 if (!smc->os.hwm.isr_flag) { 1747 smt_force_irq(smc) ; 1748 } 1749 } 1750 1751 /* 1752 * get a SMbuf from the llc_rx_queue 1753 */ 1754 static SMbuf *get_llc_rx(struct s_smc *smc) 1755 { 1756 SMbuf *mb ; 1757 1758 if ((mb = smc->os.hwm.llc_rx_pipe)) { 1759 smc->os.hwm.queued_rx_frames-- ; 1760 smc->os.hwm.llc_rx_pipe = mb->sm_next ; 1761 } 1762 DB_GEN(4, "get_llc_rx: mb = 0x%p", mb); 1763 return mb; 1764 } 1765 1766 /* 1767 * queues a transmit SMT MBuf during the time were the MBuf is 1768 * queued the TxD ring 1769 */ 1770 static void queue_txd_mb(struct s_smc *smc, SMbuf *mb) 1771 { 1772 DB_GEN(4, "_rx: queue_txd_mb = %p", mb); 1773 smc->os.hwm.queued_txd_mb++ ; 1774 mb->sm_next = (SMbuf *)NULL ; 1775 if (smc->os.hwm.txd_tx_pipe == NULL) { 1776 smc->os.hwm.txd_tx_pipe = mb ; 1777 } 1778 else { 1779 smc->os.hwm.txd_tx_tail->sm_next = mb ; 1780 } 1781 smc->os.hwm.txd_tx_tail = mb ; 1782 } 1783 1784 /* 1785 * get a SMbuf from the txd_tx_queue 1786 */ 1787 static SMbuf *get_txd_mb(struct s_smc *smc) 1788 { 1789 SMbuf *mb ; 1790 1791 if ((mb = smc->os.hwm.txd_tx_pipe)) { 1792 smc->os.hwm.queued_txd_mb-- ; 1793 smc->os.hwm.txd_tx_pipe = mb->sm_next ; 1794 } 1795 DB_GEN(4, "get_txd_mb: mb = 0x%p", mb); 1796 return mb; 1797 } 1798 1799 /* 1800 * SMT Send function 1801 */ 1802 void smt_send_mbuf(struct s_smc *smc, SMbuf *mb, int fc) 1803 { 1804 char far *data ; 1805 int len ; 1806 int n ; 1807 int i ; 1808 int frag_count ; 1809 int frame_status ; 1810 SK_LOC_DECL(char far,*virt[3]) ; 1811 int frag_len[3] ; 1812 struct s_smt_tx_queue *queue ; 1813 struct s_smt_fp_txd volatile *t ; 1814 u_long phys ; 1815 __le32 tbctrl; 1816 1817 NDD_TRACE("THSB",mb,fc,0) ; 1818 DB_TX(4, "smt_send_mbuf: mb = 0x%p, fc = 0x%x", mb, fc); 1819 1820 mb->sm_off-- ; /* set to fc */ 1821 mb->sm_len++ ; /* + fc */ 1822 data = smtod(mb,char *) ; 1823 *data = fc ; 1824 if (fc == FC_SMT_LOC) 1825 *data = FC_SMT_INFO ; 1826 1827 /* 1828 * determine the frag count and the virt addresses of the frags 1829 */ 1830 frag_count = 0 ; 1831 len = mb->sm_len ; 1832 while (len) { 1833 n = SMT_PAGESIZE - ((long)data & (SMT_PAGESIZE-1)) ; 1834 if (n >= len) { 1835 n = len ; 1836 } 1837 DB_TX(5, "frag: virt/len = 0x%p/%d", data, n); 1838 virt[frag_count] = data ; 1839 frag_len[frag_count] = n ; 1840 frag_count++ ; 1841 len -= n ; 1842 data += n ; 1843 } 1844 1845 /* 1846 * determine the frame status 1847 */ 1848 queue = smc->hw.fp.tx[QUEUE_A0] ; 1849 if (fc == FC_BEACON || fc == FC_SMT_LOC) { 1850 frame_status = LOC_TX ; 1851 } 1852 else { 1853 frame_status = LAN_TX ; 1854 if ((smc->os.hwm.pass_NSA &&(fc == FC_SMT_NSA)) || 1855 (smc->os.hwm.pass_SMT &&(fc == FC_SMT_INFO))) 1856 frame_status |= LOC_TX ; 1857 } 1858 1859 if (!smc->hw.mac_ring_is_up || frag_count > queue->tx_free) { 1860 frame_status &= ~LAN_TX; 1861 if (frame_status) { 1862 DB_TX(2, "Ring is down: terminate LAN_TX"); 1863 } 1864 else { 1865 DB_TX(2, "Ring is down: terminate transmission"); 1866 smt_free_mbuf(smc,mb) ; 1867 return ; 1868 } 1869 } 1870 DB_TX(5, "frame_status = 0x%x", frame_status); 1871 1872 if ((frame_status & LAN_TX) && (frame_status & LOC_TX)) { 1873 mb->sm_use_count = 2 ; 1874 } 1875 1876 if (frame_status & LAN_TX) { 1877 t = queue->tx_curr_put ; 1878 frame_status |= FIRST_FRAG ; 1879 for (i = 0; i < frag_count; i++) { 1880 DB_TX(5, "init TxD = 0x%p", t); 1881 if (i == frag_count-1) { 1882 frame_status |= LAST_FRAG ; 1883 t->txd_txdscr = cpu_to_le32(TX_DESCRIPTOR | 1884 (((__u32)(mb->sm_len-1)&3) << 27)) ; 1885 } 1886 t->txd_virt = virt[i] ; 1887 phys = dma_master(smc, (void far *)virt[i], 1888 frag_len[i], DMA_RD|SMT_BUF) ; 1889 t->txd_tbadr = cpu_to_le32(phys) ; 1890 tbctrl = cpu_to_le32((((__u32)frame_status & 1891 (FIRST_FRAG|LAST_FRAG)) << 26) | 1892 BMU_OWN | BMU_CHECK | BMU_SMT_TX |frag_len[i]) ; 1893 t->txd_tbctrl = tbctrl ; 1894 #ifndef AIX 1895 DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ; 1896 outpd(queue->tx_bmu_ctl,CSR_START) ; 1897 #else 1898 DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ; 1899 outpd(ADDR(B0_XA_CSR),CSR_START) ; 1900 #endif 1901 frame_status &= ~FIRST_FRAG ; 1902 queue->tx_curr_put = t = t->txd_next ; 1903 queue->tx_free-- ; 1904 queue->tx_used++ ; 1905 } 1906 smc->mib.m[MAC0].fddiMACTransmit_Ct++ ; 1907 queue_txd_mb(smc,mb) ; 1908 } 1909 1910 if (frame_status & LOC_TX) { 1911 DB_TX(5, "pass Mbuf to LLC queue"); 1912 queue_llc_rx(smc,mb) ; 1913 } 1914 1915 /* 1916 * We need to unqueue the free SMT_MBUFs here, because it may 1917 * be that the SMT want's to send more than 1 frame for one down call 1918 */ 1919 mac_drv_clear_txd(smc) ; 1920 NDD_TRACE("THSE",t,queue->tx_free,frag_count) ; 1921 } 1922 1923 /* BEGIN_MANUAL_ENTRY(mac_drv_clear_txd) 1924 * void mac_drv_clear_txd(smc) 1925 * 1926 * function DOWNCALL (hardware module, hwmtm.c) 1927 * mac_drv_clear_txd searches in both send queues for TxD's 1928 * which were finished by the adapter. It calls dma_complete 1929 * for each TxD. If the last fragment of an LLC frame is 1930 * reached, it calls mac_drv_tx_complete to release the 1931 * send buffer. 1932 * 1933 * return nothing 1934 * 1935 * END_MANUAL_ENTRY 1936 */ 1937 static void mac_drv_clear_txd(struct s_smc *smc) 1938 { 1939 struct s_smt_tx_queue *queue ; 1940 struct s_smt_fp_txd volatile *t1 ; 1941 struct s_smt_fp_txd volatile *t2 = NULL ; 1942 SMbuf *mb ; 1943 u_long tbctrl ; 1944 int i ; 1945 int frag_count ; 1946 int n ; 1947 1948 NDD_TRACE("THcB",0,0,0) ; 1949 for (i = QUEUE_S; i <= QUEUE_A0; i++) { 1950 queue = smc->hw.fp.tx[i] ; 1951 t1 = queue->tx_curr_get ; 1952 DB_TX(5, "clear_txd: QUEUE = %d (0=sync/1=async)", i); 1953 1954 for ( ; ; ) { 1955 frag_count = 0 ; 1956 1957 do { 1958 DRV_BUF_FLUSH(t1,DDI_DMA_SYNC_FORCPU) ; 1959 DB_TX(5, "check OWN/EOF bit of TxD 0x%p", t1); 1960 tbctrl = le32_to_cpu(CR_READ(t1->txd_tbctrl)); 1961 1962 if (tbctrl & BMU_OWN || !queue->tx_used){ 1963 DB_TX(4, "End of TxDs queue %d", i); 1964 goto free_next_queue ; /* next queue */ 1965 } 1966 t1 = t1->txd_next ; 1967 frag_count++ ; 1968 } while (!(tbctrl & BMU_EOF)) ; 1969 1970 t1 = queue->tx_curr_get ; 1971 for (n = frag_count; n; n--) { 1972 tbctrl = le32_to_cpu(t1->txd_tbctrl) ; 1973 dma_complete(smc, 1974 (union s_fp_descr volatile *) t1, 1975 (int) (DMA_RD | 1976 ((tbctrl & BMU_SMT_TX) >> 18))) ; 1977 t2 = t1 ; 1978 t1 = t1->txd_next ; 1979 } 1980 1981 if (tbctrl & BMU_SMT_TX) { 1982 mb = get_txd_mb(smc) ; 1983 smt_free_mbuf(smc,mb) ; 1984 } 1985 else { 1986 #ifndef PASS_1ST_TXD_2_TX_COMP 1987 DB_TX(4, "mac_drv_tx_comp for TxD 0x%p", t2); 1988 mac_drv_tx_complete(smc,t2) ; 1989 #else 1990 DB_TX(4, "mac_drv_tx_comp for TxD 0x%x", 1991 queue->tx_curr_get); 1992 mac_drv_tx_complete(smc,queue->tx_curr_get) ; 1993 #endif 1994 } 1995 queue->tx_curr_get = t1 ; 1996 queue->tx_free += frag_count ; 1997 queue->tx_used -= frag_count ; 1998 } 1999 free_next_queue: ; 2000 } 2001 NDD_TRACE("THcE",0,0,0) ; 2002 } 2003 2004 /* 2005 * BEGINN_MANUAL_ENTRY(mac_drv_clear_tx_queue) 2006 * 2007 * void mac_drv_clear_tx_queue(smc) 2008 * struct s_smc *smc ; 2009 * 2010 * function DOWNCALL (hardware module, hwmtm.c) 2011 * mac_drv_clear_tx_queue is called from the SMT when 2012 * the RMT state machine has entered the ISOLATE state. 2013 * This function is also called by the os-specific module 2014 * after it has called the function card_stop(). 2015 * In this case, the frames in the send queues are obsolete and 2016 * should be removed. 2017 * 2018 * note calling sequence: 2019 * CLI_FBI(), card_stop(), 2020 * mac_drv_clear_tx_queue(), mac_drv_clear_rx_queue(), 2021 * 2022 * NOTE: The caller is responsible that the BMUs are idle 2023 * when this function is called. 2024 * 2025 * END_MANUAL_ENTRY 2026 */ 2027 void mac_drv_clear_tx_queue(struct s_smc *smc) 2028 { 2029 struct s_smt_fp_txd volatile *t ; 2030 struct s_smt_tx_queue *queue ; 2031 int tx_used ; 2032 int i ; 2033 2034 if (smc->hw.hw_state != STOPPED) { 2035 SK_BREAK() ; 2036 SMT_PANIC(smc,HWM_E0011,HWM_E0011_MSG) ; 2037 return ; 2038 } 2039 2040 for (i = QUEUE_S; i <= QUEUE_A0; i++) { 2041 queue = smc->hw.fp.tx[i] ; 2042 DB_TX(5, "clear_tx_queue: QUEUE = %d (0=sync/1=async)", i); 2043 2044 /* 2045 * switch the OWN bit of all pending frames to the host 2046 */ 2047 t = queue->tx_curr_get ; 2048 tx_used = queue->tx_used ; 2049 while (tx_used) { 2050 DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORCPU) ; 2051 DB_TX(5, "switch OWN bit of TxD 0x%p", t); 2052 t->txd_tbctrl &= ~cpu_to_le32(BMU_OWN) ; 2053 DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ; 2054 t = t->txd_next ; 2055 tx_used-- ; 2056 } 2057 } 2058 2059 /* 2060 * release all TxD's for both send queues 2061 */ 2062 mac_drv_clear_txd(smc) ; 2063 2064 for (i = QUEUE_S; i <= QUEUE_A0; i++) { 2065 queue = smc->hw.fp.tx[i] ; 2066 t = queue->tx_curr_get ; 2067 2068 /* 2069 * write the phys pointer of the NEXT descriptor into the 2070 * BMU's current address descriptor pointer and set 2071 * tx_curr_get and tx_curr_put to this position 2072 */ 2073 if (i == QUEUE_S) { 2074 outpd(ADDR(B5_XS_DA),le32_to_cpu(t->txd_ntdadr)) ; 2075 } 2076 else { 2077 outpd(ADDR(B5_XA_DA),le32_to_cpu(t->txd_ntdadr)) ; 2078 } 2079 2080 queue->tx_curr_put = queue->tx_curr_get->txd_next ; 2081 queue->tx_curr_get = queue->tx_curr_put ; 2082 } 2083 } 2084 2085 2086 /* 2087 ------------------------------------------------------------- 2088 TEST FUNCTIONS: 2089 ------------------------------------------------------------- 2090 */ 2091 2092 #ifdef DEBUG 2093 /* 2094 * BEGIN_MANUAL_ENTRY(mac_drv_debug_lev) 2095 * void mac_drv_debug_lev(smc,flag,lev) 2096 * 2097 * function DOWNCALL (drvsr.c) 2098 * To get a special debug info the user can assign a debug level 2099 * to any debug flag. 2100 * 2101 * para flag debug flag, possible values are: 2102 * = 0: reset all debug flags (the defined level is 2103 * ignored) 2104 * = 1: debug.d_smtf 2105 * = 2: debug.d_smt 2106 * = 3: debug.d_ecm 2107 * = 4: debug.d_rmt 2108 * = 5: debug.d_cfm 2109 * = 6: debug.d_pcm 2110 * 2111 * = 10: debug.d_os.hwm_rx (hardware module receive path) 2112 * = 11: debug.d_os.hwm_tx(hardware module transmit path) 2113 * = 12: debug.d_os.hwm_gen(hardware module general flag) 2114 * 2115 * lev debug level 2116 * 2117 * END_MANUAL_ENTRY 2118 */ 2119 void mac_drv_debug_lev(struct s_smc *smc, int flag, int lev) 2120 { 2121 switch(flag) { 2122 case (int)NULL: 2123 DB_P.d_smtf = DB_P.d_smt = DB_P.d_ecm = DB_P.d_rmt = 0 ; 2124 DB_P.d_cfm = 0 ; 2125 DB_P.d_os.hwm_rx = DB_P.d_os.hwm_tx = DB_P.d_os.hwm_gen = 0 ; 2126 #ifdef SBA 2127 DB_P.d_sba = 0 ; 2128 #endif 2129 #ifdef ESS 2130 DB_P.d_ess = 0 ; 2131 #endif 2132 break ; 2133 case DEBUG_SMTF: 2134 DB_P.d_smtf = lev ; 2135 break ; 2136 case DEBUG_SMT: 2137 DB_P.d_smt = lev ; 2138 break ; 2139 case DEBUG_ECM: 2140 DB_P.d_ecm = lev ; 2141 break ; 2142 case DEBUG_RMT: 2143 DB_P.d_rmt = lev ; 2144 break ; 2145 case DEBUG_CFM: 2146 DB_P.d_cfm = lev ; 2147 break ; 2148 case DEBUG_PCM: 2149 DB_P.d_pcm = lev ; 2150 break ; 2151 case DEBUG_SBA: 2152 #ifdef SBA 2153 DB_P.d_sba = lev ; 2154 #endif 2155 break ; 2156 case DEBUG_ESS: 2157 #ifdef ESS 2158 DB_P.d_ess = lev ; 2159 #endif 2160 break ; 2161 case DB_HWM_RX: 2162 DB_P.d_os.hwm_rx = lev ; 2163 break ; 2164 case DB_HWM_TX: 2165 DB_P.d_os.hwm_tx = lev ; 2166 break ; 2167 case DB_HWM_GEN: 2168 DB_P.d_os.hwm_gen = lev ; 2169 break ; 2170 default: 2171 break ; 2172 } 2173 } 2174 #endif 2175