1 /* 2 * Copyright 2014-2017 Cavium, Inc. 3 * The contents of this file are subject to the terms of the Common Development 4 * and Distribution License, v.1, (the "License"). 5 * 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the License at available 9 * at http://opensource.org/licenses/CDDL-1.0 10 * 11 * See the License for the specific language governing permissions and 12 * limitations under the License. 13 */ 14 15 #ifndef _LM5706_H 16 #define _LM5706_H 17 18 19 #include "bcmtype.h" 20 #include "debug.h" 21 #include "5706_reg.h" 22 #include "l2_defs.h" 23 #include "l5_defs.h" 24 #ifndef EXCLUDE_KQE_SUPPORT 25 #include "l4_kqe.h" 26 #endif 27 #ifndef L2_ONLY 28 #include "status_code.h" 29 #endif 30 #include "shmem.h" 31 #include "lm_desc.h" 32 #include "listq.h" 33 #include "lm.h" 34 #include "mm.h" 35 #ifndef L2_ONLY 36 #include "toe_ctx.h" 37 #endif 38 #ifdef UEFI 39 #include "5706_efi.h" 40 #endif 41 #ifdef SOLARIS 42 #include <sys/ddi.h> 43 #include <sys/sunddi.h> 44 #endif 45 46 #ifdef LINUX /*lediag*/ 47 #include "../../mpd_driver_hybrid/pal2.h" 48 #endif 49 50 typedef struct fw_version 51 { 52 u8_t name[11]; 53 u8_t namez; 54 u32_t version; 55 } fw_version_t; 56 57 #ifndef PRIVATE_HSI_HEADER 58 #include "rxp_hsi.h" 59 #include "com_hsi.h" 60 #include "cp_hsi.h" 61 #include "txp_hsi.h" 62 #include "tpat_hsi.h" 63 #else 64 #include "hsi.h" 65 #endif 66 67 /******************************************************************************* 68 * Constants. 69 ******************************************************************************/ 70 71 #define MAX_TX_CHAIN 12 72 #define MAX_RX_CHAIN 12 73 #define FIRST_RSS_RXQ 4 74 75 #ifndef NUM_RX_CHAIN 76 #define NUM_RX_CHAIN 1 77 #endif 78 79 #ifndef NUM_TX_CHAIN 80 #define NUM_TX_CHAIN 1 81 #endif 82 83 #if NUM_TX_CHAIN > MAX_TX_CHAIN 84 #error Exceeded maximum number of tx chains. 85 #endif 86 87 #if NUM_RX_CHAIN > MAX_RX_CHAIN 88 #error Exceeded maximum number of rx chains. 89 #endif 90 91 /* Number of bits must be 10 to 25. */ 92 #ifndef LM_PAGE_BITS 93 #define LM_PAGE_BITS 12 /* 4K page. */ 94 #endif 95 96 #define LM_PAGE_SIZE (1 << LM_PAGE_BITS) 97 #define LM_PAGE_MASK (LM_PAGE_SIZE - 1) 98 99 100 #ifndef CACHE_LINE_SIZE_MASK 101 #define CACHE_LINE_SIZE_MASK 0x3f 102 #endif 103 104 105 /* Number of packets per indication in calls to mm_indicate_rx/tx. */ 106 #ifndef MAX_PACKETS_PER_INDICATION 107 #define MAX_PACKETS_PER_INDICATION 50 108 #endif 109 110 111 #ifndef MAX_FRAG_CNT 112 #define MAX_FRAG_CNT 33 113 #endif 114 115 /* The maximum is actually 0xffff which can be described by a BD. */ 116 #define MAX_FRAGMENT_SIZE 0xf000 117 118 119 /* Context size. */ 120 #define CTX_SHIFT 7 121 #define CTX_SIZE (1 << CTX_SHIFT) 122 #define CTX_MASK (CTX_SIZE - 1) 123 #define GET_CID_ADDR(_cid) ((_cid) << CTX_SHIFT) 124 #define GET_CID(_cid_addr) ((_cid_addr) >> CTX_SHIFT) 125 126 #define PHY_CTX_SHIFT 6 127 #define PHY_CTX_SIZE (1 << PHY_CTX_SHIFT) 128 #define PHY_CTX_MASK (PHY_CTX_SIZE - 1) 129 #define GET_PCID_ADDR(_pcid) ((_pcid) << PHY_CTX_SHIFT) 130 #define GET_PCID(_pcid_addr) ((_pcid_addr) >> PHY_CTX_SHIFT) 131 132 #define MB_KERNEL_CTX_SHIFT 8 133 #define MB_KERNEL_CTX_SIZE (1 << MB_KERNEL_CTX_SHIFT) 134 #define MB_KERNEL_CTX_MASK (MB_KERNEL_CTX_SIZE - 1) 135 /* #define MB_GET_CID_ADDR(_cid) (0x10000 + ((_cid) << MB_KERNEL_CTX_SHIFT)) */ 136 #define MB_GET_CID_ADDR(_p, _c) lm_mb_get_cid_addr(_p, _c) 137 138 #define MAX_CID_CNT 0x4000 139 #define MAX_CID_ADDR (GET_CID_ADDR(MAX_CID_CNT)) 140 #define INVALID_CID_ADDR 0xffffffff 141 142 143 /* The size of the GRC window that appears in 32k-64k. */ 144 #define GRC_WINDOW_BASE 0x8000 145 #define GRC_WINDOW_SIZE 0x8000 146 147 148 /* L2 rx frame header size. */ 149 #define L2RX_FRAME_HDR_LEN (sizeof(l2_fhdr_t)+2) 150 151 152 /* The number of bd's per page including the last bd which is used as 153 * a pointer to the next bd page. */ 154 #define BD_PER_PAGE (LM_PAGE_SIZE/sizeof(tx_bd_t)) 155 156 /* The number of useable bd's per page. This number does not include 157 * the last bd at the end of the page. */ 158 #define MAX_BD_PER_PAGE ((u32_t) (BD_PER_PAGE-1)) 159 160 161 /* Buffer size of the statistics block. */ 162 #define CHIP_STATS_BUFFER_SIZE ((sizeof(statistics_block_t) + \ 163 CACHE_LINE_SIZE_MASK) & \ 164 ~CACHE_LINE_SIZE_MASK) 165 166 /* Buffer size of the status block. */ 167 #define STATUS_BLOCK_BUFFER_SIZE ((sizeof(status_blk_combined_t) + \ 168 CACHE_LINE_SIZE_MASK) & \ 169 ~CACHE_LINE_SIZE_MASK) 170 171 172 #define RSS_INDIRECTION_TABLE_SIZE 0x80 /* Maximum indirection table. */ 173 #define RSS_HASH_KEY_SIZE 0x40 /* Maximum key size. */ 174 #ifndef RSS_LOOKUP_TABLE_WA 175 #define RSS_LOOKUP_TABLE_WA (4*12*256) /* 0 to disable workaround. */ 176 #endif 177 178 179 /* Quick context assigments. */ 180 #define L2RX_CID_BASE 0 /* 0-15 */ 181 #define L2TX_CID_BASE 16 /* 16-23 */ 182 #define KWQ_CID 24 183 #define KCQ_CID 25 184 #define HCOPY_CID 26 /* 26-27 */ 185 #define GEN_CHAIN_CID 29 186 187 /* Xinan definitions. */ 188 #define L2TX_TSS_CID_BASE 32 /* 32-43 */ 189 190 /* MSIX definitions. */ 191 #define IRQ_MODE_UNKNOWN 0 192 #define IRQ_MODE_LINE_BASED 1 193 #define IRQ_MODE_SIMD 2 194 #define IRQ_MODE_MSI_BASED 3 195 #define IRQ_MODE_MSIX_BASED 4 196 #define MAX_MSIX_HW_VEC 9 197 #define PCI_GRC_WINDOW2_BASE 0xc000 198 #define PCI_GRC_WINDOW3_BASE 0xe000 199 #define MSIX_TABLE_ADDR 0x318000 200 #define MSIX_PBA_ADDR 0x31c000 201 202 /******************************************************************************* 203 * Macros. 204 ******************************************************************************/ 205 206 /* These macros have been moved to bcmtype.h. */ 207 #if 0 208 /* Signed subtraction macros with no sign extending. */ 209 #define S64_SUB(_a, _b) ((s64_t) ((s64_t) (_a) - (s64_t) (_b))) 210 #define u64_SUB(_a, _b) ((u64_t) ((s64_t) (_a) - (s64_t) (_b))) 211 #define S32_SUB(_a, _b) ((s32_t) ((s32_t) (_a) - (s32_t) (_b))) 212 #define uS32_SUB(_a, _b) ((u32_t) ((s32_t) (_a) - (s32_t) (_b))) 213 #define S16_SUB(_a, _b) ((s16_t) ((s16_t) (_a) - (s16_t) (_b))) 214 #define u16_SUB(_a, _b) ((u16_t) ((s16_t) (_a) - (s16_t) (_b))) 215 #define PTR_SUB(_a, _b) ((u8_t *) (_a) - (u8_t *) (_b)) 216 #endif 217 218 #ifndef OFFSETOF 219 #define OFFSETOF(_s, _m) ((u32_t) PTR_SUB(&((_s *) 0)->_m, (u8_t *) 0)) 220 #endif 221 #define WORD_ALIGNED_OFFSETOF(_s, _m) (OFFSETOF(_s, _m) & ~0x03) 222 223 224 /* STATIC void 225 * get_attn_chng_bits( 226 * lm_device_t *pdev, 227 * u32_t *asserted_attns, 228 * u32_t *deasserted_attns); */ 229 #define GET_ATTN_CHNG_BITS(_pdev, _asserted_attns_ptr, _deasserted_attns_ptr) \ 230 { \ 231 u32_t attn_chng; \ 232 u32_t attn_bits; \ 233 u32_t attn_ack; \ 234 \ 235 attn_bits = (_pdev)->vars.status_virt->deflt.status_attn_bits; \ 236 attn_ack = (_pdev)->vars.status_virt->deflt.status_attn_bits_ack; \ 237 \ 238 attn_chng = attn_bits ^ attn_ack; \ 239 \ 240 *(_asserted_attns_ptr) = attn_bits & attn_chng; \ 241 *(_deasserted_attns_ptr) = ~attn_bits & attn_chng; \ 242 } 243 244 245 246 /******************************************************************************* 247 * Statistics. 248 ******************************************************************************/ 249 250 typedef struct _lm_tx_statistics_t 251 { 252 lm_u64_t ipv4_lso_frames; 253 lm_u64_t ipv6_lso_frames; 254 lm_u64_t ip_cso_frames; 255 lm_u64_t ipv4_tcp_udp_cso_frames; 256 lm_u64_t ipv6_tcp_udp_cso_frames; 257 u32_t aborted; 258 u32_t no_bd; 259 u32_t no_desc; 260 u32_t no_coalesce_buf; 261 u32_t no_map_reg; 262 } lm_tx_stats_t; 263 264 265 typedef struct _lm_rx_statistics_t 266 { 267 u32_t aborted; 268 u32_t err; 269 u32_t crc; 270 u32_t phy_err; 271 u32_t alignment; 272 u32_t short_packet; 273 u32_t giant_packet; 274 } lm_rx_stats_t; 275 276 277 278 /******************************************************************************* 279 * Packet descriptor. 280 ******************************************************************************/ 281 #if defined(LM_NON_LEGACY_MODE_SUPPORT) 282 typedef struct _lm_packet_t 283 { 284 /* Must be the first entry in this structure. */ 285 s_list_entry_t link; 286 287 lm_status_t status; 288 289 union _lm_pkt_info_t 290 { 291 struct _tx_pkt_info_t 292 { 293 lm_pkt_tx_info_t *tx_pkt_info; 294 u16_t next_bd_idx; 295 u16_t bd_used; 296 u8_t span_pages; 297 u8_t pad; 298 u16_t pad1; 299 u32_t size; 300 #if DBG 301 tx_bd_t *dbg_start_bd; 302 u16_t dbg_start_bd_idx; 303 u16_t dbg_frag_cnt; 304 #endif 305 } tx; 306 307 struct _rx_pkt_info_t 308 { 309 lm_pkt_rx_info_t *rx_pkt_info; 310 u16_t next_bd_idx; 311 u16_t pad; 312 u32_t hash_value; /* RSS hash value. */ 313 #if DBG 314 rx_bd_t *dbg_bd; 315 rx_bd_t *dbg_bd1; /* when vmq header split is enabled */ 316 #endif 317 } rx; 318 } u1; 319 } lm_packet_t; 320 #else 321 typedef struct _lm_packet_t 322 { 323 /* Must be the first entry in this structure. */ 324 s_list_entry_t link; 325 326 lm_status_t status; 327 u32_t size; 328 329 union _lm_pkt_info_t 330 { 331 struct _lm_tx_pkt_info_t 332 { 333 lm_tx_flag_t flags; 334 335 u16_t vlan_tag; 336 u16_t next_bd_idx; 337 u16_t bd_used; 338 u8_t span_pages; 339 u8_t _pad; 340 341 u16_t lso_mss; 342 u16_t _pad2; 343 344 u16_t lso_ip_hdr_len; 345 u16_t lso_tcp_hdr_len; 346 347 #if DBG 348 tx_bd_t *dbg_start_bd; 349 u16_t dbg_start_bd_idx; 350 u16_t dbg_frag_cnt; 351 #endif 352 } tx; 353 354 struct _lm_rx_pkt_info_t 355 { 356 lm_rx_flag_t flags; 357 358 u16_t vlan_tag; 359 u16_t ip_cksum; 360 u16_t tcp_or_udp_cksum; 361 u16_t next_bd_idx; 362 363 u8_t *mem_virt; 364 lm_address_t mem_phy; 365 u32_t buf_size; 366 367 u32_t hash_value; /* RSS hash value. */ 368 369 #if DBG 370 rx_bd_t *dbg_bd; 371 #endif 372 } rx; 373 } u1; 374 } lm_packet_t; 375 #endif 376 377 DECLARE_FRAG_LIST_BUFFER_TYPE(lm_packet_frag_list_t, MAX_FRAG_CNT); 378 379 380 381 /******************************************************************************* 382 * Configurable parameters for the hardware dependent module. 383 ******************************************************************************/ 384 385 typedef struct _lm_params_t 386 { 387 /* This value is used by the upper module to inform the protocol 388 * of the maximum transmit/receive packet size. Packet size 389 * ranges from 1514-9014 bytes. This value does not include CRC32 and 390 * VLAN tag. */ 391 u32_t mtu; 392 /* Current node address. The MAC address is initially set to the 393 * hardware address. This entry can be modified to allow the driver 394 * to override the default MAC address. The new MAC address takes 395 * effect after a driver reset. */ 396 u8_t mac_addr[8]; 397 398 u32_t l2_rx_desc_cnt[MAX_RX_CHAIN]; 399 u32_t l2_tx_bd_page_cnt[MAX_TX_CHAIN]; 400 u32_t l2_rx_bd_page_cnt[MAX_RX_CHAIN]; 401 402 u32_t l4_tx_bd_page_cnt; 403 u32_t limit_l4_tx_bd_cnt; 404 u32_t l4_rx_bd_page_cnt; 405 u32_t limit_l4_rx_bd_cnt; 406 407 #ifndef EXCLUDE_KQE_SUPPORT 408 u32_t kwq_page_cnt; 409 u32_t kcq_page_cnt; 410 u32_t kcq_history_size; 411 u32_t con_kcqe_history_size; 412 u32_t con_kwqe_history_size; 413 #endif 414 415 u32_t gen_bd_page_cnt; 416 u32_t max_gen_buf_cnt; 417 u32_t gen_buf_per_alloc; 418 419 /* This parameter controls whether the buffered data (generic buffers) 420 * should be copied to a staging buffer for indication. */ 421 u32_t copy_buffered_data; 422 423 /* All the L2 receive buffers start at a cache line size aligned 424 * address. This value determines the location of the L2 frame header 425 * from the beginning of the receive buffer. The value must be a 426 * multiple of 4. */ 427 u32_t rcv_buffer_offset; 428 429 /* Enable a separate receive queue for receiving packets with 430 * TCP SYN bit set. */ 431 u32_t enable_syn_rcvq; 432 433 /* Buffer of hcopy descriptor to allocate for a connection. When 434 * this value is 0, hcopy is disabled. */ 435 u32_t hcopy_desc_cnt; 436 437 /* Number of pages used for the hcopy bd chain. */ 438 u32_t hcopy_bd_page_cnt; 439 440 /* This parameter is only valid when enable_hcopy is enabled. 441 * When enable_hcopy is enabled, a given connection will not 442 * be able to process subsequent kcqe's after the copy_gen kcqe 443 * until the hcopy request (for the copy_gen) has completed. 444 * The subsequent kcqe's will be copied to a per-connection kcq 445 * buffer. The parameter controls the size of this buffer. */ 446 u32_t buffered_kcqe_cnt; 447 448 /* Size of the deferred kcqe queue. */ 449 u32_t deferred_kcqe_cnt; 450 451 /* Various test/debug modes. Any validation failure will cause the 452 * driver to write to misc.swap_diag0 with the corresponding flag. 453 * The intention is to trigger the bus analyzer. */ 454 u32_t test_mode; 455 #define TEST_MODE_DISABLED 0x00 456 #define TEST_MODE_OBSOLETE_0 0x01 /* was TEST_MODE_IKOS */ 457 #define TEST_MODE_OBSOLETE_1 0x02 /* was TEST_MODE_FPGA */ 458 #define TEST_MODE_VERIFY_RX_CRC 0x10 459 #define TEST_MODE_RX_BD_TAGGING 0x20 460 #define TEST_MODE_TX_BD_TAGGING 0x40 461 #define TEST_MODE_LOG_REG_ACCESS 0x80 462 #define TEST_MODE_SAVE_DUMMY_DMA_DATA 0x0100 463 #define TEST_MODE_INIT_GEN_BUF_DATA 0x0200 464 #define TEST_MODE_DRIVER_PULSE_ALWAYS_ALIVE 0x0400 465 #define TEST_MODE_IGNORE_SHMEM_SIGNATURE 0x0800 466 #define TEST_MODE_XDIAG_ISCSI 0x1000 467 468 lm_offload_t ofld_cap; 469 lm_wake_up_mode_t wol_cap; 470 lm_flow_control_t flow_ctrl_cap; 471 lm_medium_t req_medium; 472 473 u32_t selective_autoneg; 474 #define SELECTIVE_AUTONEG_OFF 0 475 #define SELECTIVE_AUTONEG_SINGLE_SPEED 1 476 #define SELECTIVE_AUTONEG_ENABLE_SLOWER_SPEEDS 2 477 478 u32_t wire_speed; /* Not valid on SERDES. */ 479 u32_t phy_addr; /* PHY address. */ 480 481 /* Ways for the MAC to determine a link change. */ 482 u32_t phy_int_mode; 483 #define PHY_INT_MODE_AUTO 0 484 #define PHY_INT_MODE_MI_INTERRUPT 1 485 #define PHY_INT_MODE_LINK_READY 2 486 #define PHY_INT_MODE_AUTO_POLLING 3 487 488 /* Ways for the driver to get the link change event. */ 489 u32_t link_chng_mode; 490 #define LINK_CHNG_MODE_AUTO 0 491 #define LINK_CHNG_MODE_USE_STATUS_REG 1 492 #define LINK_CHNG_MODE_USE_STATUS_BLOCK 2 493 494 /* Coalescing paramers. */ 495 u32_t hc_timer_mode; 496 #define HC_COLLECT_MODE 0x0000 497 #define HC_RX_TIMER_MODE 0x0001 498 #define HC_TX_TIMER_MODE 0x0002 499 #define HC_COM_TIMER_MODE 0x0004 500 #define HC_CMD_TIMER_MODE 0x0008 501 #define HC_TIMER_MODE 0x000f 502 503 u32_t ind_comp_limit; 504 u32_t tx_quick_cons_trip; 505 u32_t tx_quick_cons_trip_int; 506 u32_t rx_quick_cons_trip; 507 u32_t rx_quick_cons_trip_int; 508 u32_t comp_prod_trip; 509 u32_t comp_prod_trip_int; 510 u32_t tx_ticks; 511 u32_t tx_ticks_int; 512 u32_t com_ticks; 513 u32_t com_ticks_int; 514 u32_t cmd_ticks; 515 u32_t cmd_ticks_int; 516 u32_t rx_ticks; 517 u32_t rx_ticks_int; 518 u32_t stats_ticks; 519 520 /* Xinan per-processor HC configuration. */ 521 u32_t psb_tx_cons_trip; 522 u32_t psb_tx_ticks; 523 u32_t psb_rx_cons_trip; 524 u32_t psb_rx_ticks; 525 u32_t psb_comp_prod_trip; 526 u32_t psb_com_ticks; 527 u32_t psb_cmd_ticks; 528 u32_t psb_period_ticks; 529 530 u32_t enable_fir; 531 u32_t num_rchans; 532 u32_t num_wchans; 533 u32_t one_tdma; 534 u32_t ping_pong_dma; 535 u32_t serdes_pre_emphasis; 536 u32_t tmr_reload_value1; 537 538 u32_t keep_vlan_tag; 539 540 u32_t enable_remote_phy; 541 u32_t rphy_req_medium; 542 u32_t rphy_flow_ctrl_cap; 543 u32_t rphy_selective_autoneg; 544 u32_t rphy_wire_speed; 545 546 u32_t bin_mq_mode; 547 u32_t validate_l4_data; 548 549 /* disable PCIe non-FATAL error reporting */ 550 u32_t disable_pcie_nfr; 551 552 // setting for L2 flow control 0 for disable 1 for enable: 553 u32_t fw_flow_control; 554 // This parameter dictates how long to wait before dropping L2 packet 555 // due to insufficient posted buffers 556 // 0 mean no waiting before dropping, 0xFFFF means maximum wait 557 u32_t fw_flow_control_wait; 558 // 8 lsb represents watermark for flow control, 0 is disable 559 u32_t fw_flow_control_watermarks; 560 561 u32_t ena_large_grc_timeout; 562 563 /* 0 causes the driver to report the current flow control configuration. 564 * 1 causes the driver to report the flow control autoneg result. */ 565 u32_t flow_control_reporting_mode; 566 } lm_params_t; 567 568 569 570 /******************************************************************************* 571 * Device NVM info -- The native strapping does not support the new parts, the 572 * software needs to reconfigure for them. 573 ******************************************************************************/ 574 575 typedef struct _flash_spec_t 576 { 577 u32_t buffered; 578 u32_t shift_bits; 579 u32_t page_size; 580 u32_t addr_mask; 581 u32_t total_size; 582 } flash_spec_t; 583 584 585 /******************************************************************************* 586 * Device info. 587 ******************************************************************************/ 588 589 typedef struct _lm_hardware_info_t 590 { 591 /* PCI info. */ 592 u16_t vid; 593 u16_t did; 594 u16_t ssid; 595 u16_t svid; 596 597 u8_t irq; 598 u8_t int_pin; 599 u8_t latency_timer; 600 u8_t cache_line_size; 601 u8_t rev_id; 602 u8_t _pad[3]; 603 604 u8_t mac_id; /* 5709 function 0 or 1. */ 605 u8_t bin_size; /* 5709 bin size in term of context pages. */ 606 u16_t first_l4_l5_bin; /* 5709 first bin. */ 607 608 lm_address_t mem_base; 609 u32_t bar_size; 610 611 /* Device info. */ 612 u32_t phy_id; /* (phy_reg2 << 16) | phy_reg3 */ 613 u8_t mac_addr[8]; /* Hardware MAC address. */ 614 u8_t iscsi_mac_addr[8]; /* Hardware MAC address for iSCSI. */ 615 616 u32_t shmem_base; /* Firmware share memory base addr. */ 617 618 u32_t chip_id; /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */ 619 #define CHIP_NUM(_p) (((_p)->hw_info.chip_id) & 0xffff0000) 620 #define CHIP_NUM_5706 0x57060000 621 #define CHIP_NUM_5708 0x57080000 622 #define CHIP_NUM_5709 0x57090000 623 #define CHIP_NUM_57728 0x00000000 624 625 #define CHIP_REV(_p) (((_p)->hw_info.chip_id) & 0x0000f000) 626 #define CHIP_REV_Ax 0x00000000 627 #define CHIP_REV_Bx 0x00001000 628 #define CHIP_REV_Cx 0x00002000 629 #define CHIP_REV_FPGA 0x0000f000 630 #define CHIP_REV_IKOS 0x0000e000 631 632 #define CHIP_METAL(_p) (((_p)->hw_info.chip_id) & 0x00000ff0) 633 #define CHIP_BONDING(_p) (((_p)->hw_info.chip_id) & 0x0000000f) 634 635 #define CHIP_ID(_p) (((_p)->hw_info.chip_id) & 0xfffffff0) 636 #define CHIP_ID_5706_A0 0x57060000 637 #define CHIP_ID_5706_A1 0x57060010 638 #define CHIP_ID_5706_FPGA 0x5706f000 639 #define CHIP_ID_5706_IKOS 0x5706e000 640 #define CHIP_ID_5708_A0 0x57080000 641 #define CHIP_ID_5708_B0 0x57081000 642 #define CHIP_ID_5708_B1 0x57081010 643 #define CHIP_ID_5708_FPGA 0x5708f000 644 #define CHIP_ID_5708_IKOS 0x5708e000 645 #define CHIP_ID_5709_A0 0x57090000 646 #define CHIP_ID_5709_A1 0x57090010 647 #define CHIP_ID_5709_B0 0x57091000 648 #define CHIP_ID_5709_B1 0x57091010 649 #define CHIP_ID_5709_B2 0x57091020 650 #define CHIP_ID_5709_FPGA 0x5709f000 651 #define CHIP_ID_5709_IKOS 0x5709e000 652 653 #define CHIP_BOND_ID(_p) (((_p)->hw_info.chip_id) & 0xf) 654 655 /* A serdes chip will have the first bit of the bond id set. */ 656 #define CHIP_BOND_ID_SERDES_BIT 0x01 657 658 /* HW config from nvram. */ 659 u32_t nvm_hw_config; 660 661 u32_t max_toe_conn; 662 u32_t max_iscsi_conn; 663 u32_t max_iscsi_pending_tasks; 664 665 /* Bus info. */ 666 u8_t bus_mode; 667 #define BUS_MODE_PCI 0 668 #define BUS_MODE_PCIX 1 669 #define BUS_MODE_PCIE 2 670 671 u8_t bus_width; 672 #define BUS_WIDTH_32_BIT 32 673 #define BUS_WIDTH_64_BIT 64 674 675 u16_t bus_speed; 676 #define BUS_SPEED_33_MHZ 33 677 #define BUS_SPEED_50_MHZ 50 678 #define BUS_SPEED_66_MHZ 66 679 #define BUS_SPEED_100_MHZ 100 680 #define BUS_SPEED_133_MHZ 133 681 682 /* EPB info. Only valid for 5708. */ 683 u8_t pcie_bus_num; 684 685 u8_t pcie_max_width; 686 u8_t pcie_width; 687 #define PCIE_WIDTH_1 1 688 #define PCIE_WIDTH_2 2 689 #define PCIE_WIDTH_4 4 690 #define PCIE_WIDTH_8 8 691 #define PCIE_WIDTH_16 16 692 #define PCIE_WIDTH_32 32 693 694 u8_t _unused_; 695 696 u16_t pcie_max_speed; 697 u16_t pcie_speed; 698 #define PCIE_SPEED_2_5_G 25 699 #define PCIE_SPEED_5_G 50 700 701 /* Flash info. */ 702 flash_spec_t flash_spec; 703 } lm_hardware_info_t; 704 705 706 707 /******************************************************************************* 708 * Device state variables. 709 ******************************************************************************/ 710 711 typedef struct _phy_mem_block_t 712 { 713 lm_address_t start_phy; 714 u8_t *start; 715 u32_t size; 716 } phy_mem_block_t; 717 718 719 typedef struct _lm_variables_t 720 { 721 #ifdef SOLARIS 722 ddi_acc_handle_t dmaRegAccHandle; 723 #endif 724 volatile reg_space_t *regview; 725 726 volatile status_blk_combined_t *status_virt; 727 lm_address_t status_phy; 728 729 lm_status_t link_status; 730 lm_medium_t medium; 731 lm_flow_control_t flow_control; 732 733 /* remote phy status. */ 734 u8_t rphy_status; 735 #define RPHY_STATUS_ACTIVE 0x01 736 #define RPHY_STATUS_MODULE_PRESENT 0x02 737 738 u8_t enable_cu_rate_limiter; 739 740 u16_t bcm5706s_tx_drv_cur; 741 742 volatile statistics_block_t *stats_virt; 743 lm_address_t stats_phy; 744 745 u16_t fw_wr_seq; 746 u8_t fw_timed_out; 747 748 /* Serdes autonegotiation fallback. For a serdes medium, 749 * if we cannot get link via autonegotiation, we'll force 750 * the speed to get link. */ 751 u8_t serdes_fallback_select; 752 u8_t serdes_fallback_status; 753 #define SERDES_FALLBACK_NONE 0 754 #define SERDES_FALLBACK_1G 1 755 #define SERDES_FALLBACK_2_5G 2 756 757 /* This flag is set if the cable is attached when there 758 * is no link. The upper module could check this flag to 759 * determine if there is a need to wait for link. */ 760 u8_t cable_is_attached; 761 762 /* Write sequence for driver pulse. */ 763 u16_t drv_pulse_wr_seq; 764 765 /* 5708 pre-emphasis. */ 766 u32_t serdes_pre_emphasis; 767 768 u32_t interrupt_mode; 769 770 u32_t cu_mbuf_cnt; /*5709 only */ 771 772 u32_t hw_filter_ctx_offset; 773 /* 5709 backing store context memory. */ 774 #ifndef MAX_CTX 775 #define MAX_CTX (16 * 1024) 776 #endif 777 #define ONE_CTX_SIZE 0x80 778 #define NUM_CTX_MBLKS 16 779 #define CTX_MBLK_SIZE (128 * 1024) 780 phy_mem_block_t ctx_mem[NUM_CTX_MBLKS]; 781 } lm_variables_t; 782 783 784 785 /******************************************************************************* 786 * Transmit info. 787 ******************************************************************************/ 788 789 typedef struct _lm_tx_chain_t 790 { 791 u32_t idx; 792 #define TX_CHAIN_IDX0 0 793 #define TX_CHAIN_IDX1 1 794 #define TX_CHAIN_IDX2 2 795 #define TX_CHAIN_IDX3 3 796 #define TX_CHAIN_IDX4 4 797 #define TX_CHAIN_IDX5 5 798 #define TX_CHAIN_IDX6 6 799 #define TX_CHAIN_IDX7 7 800 #define TX_CHAIN_IDX8 8 801 #define TX_CHAIN_IDX9 9 802 #define TX_CHAIN_IDX10 10 803 #define TX_CHAIN_IDX11 11 804 805 u8_t cpu_num; 806 u8_t cpu_num_valid; 807 u16_t reserve2; 808 /* This is a contiguous memory block of params.l2_tx_bd_page_cnt pages 809 * used for L2 tx_bd chain. The BD chain is arranged as a circular 810 * chain where the last BD entry of a page points to the next page, 811 * and the last BD entry of the last page points to the first. */ 812 tx_bd_t *bd_chain_virt; 813 lm_address_t bd_chain_phy; 814 815 u32_t cid_addr; 816 u16_t prod_idx; 817 u16_t con_idx; 818 tx_bd_t *prod_bd; 819 u32_t prod_bseq; 820 volatile u16_t *hw_con_idx_ptr; 821 u16_t bd_left; 822 823 s_list_t active_descq; 824 } lm_tx_chain_t; 825 826 827 typedef struct _lm_tx_info_t 828 { 829 lm_tx_chain_t chain[MAX_TX_CHAIN]; 830 831 u32_t num_txq; 832 u32_t cu_idx; 833 834 lm_tx_stats_t stats; 835 } lm_tx_info_t; 836 837 838 839 /******************************************************************************* 840 * Receive info. 841 ******************************************************************************/ 842 843 typedef struct _lm_rx_chain_t 844 { 845 u32_t idx; 846 #define RX_CHAIN_IDX0 0 847 #define RX_CHAIN_IDX1 1 848 #define RX_CHAIN_IDX2 2 849 #define RX_CHAIN_IDX3 3 850 #define RX_CHAIN_IDX4 4 851 #define RX_CHAIN_IDX5 5 852 #define RX_CHAIN_IDX6 6 853 #define RX_CHAIN_IDX7 7 854 #define RX_CHAIN_IDX8 8 855 #define RX_CHAIN_IDX9 9 856 #define RX_CHAIN_IDX10 10 857 #define RX_CHAIN_IDX11 11 858 #define RX_CHAIN_IDX12 12 859 #define RX_CHAIN_IDX13 13 860 #define RX_CHAIN_IDX14 14 861 #define RX_CHAIN_IDX15 15 862 863 u8_t cpu_num; /* place holder for cpu affinity(msix) */ 864 u8_t cpu_num_valid; 865 u16_t max_pkt_len; 866 /* This is a contiguous memory block of params.l2_rx_bd_page_cnt pages 867 * used for rx completion. The BD chain is arranged as a circular 868 * chain where the last BD entry of a page points to the next page, 869 * and the last BD entry of the last page points to the first. */ 870 rx_bd_t *bd_chain_virt; 871 lm_address_t bd_chain_phy; 872 873 u32_t cid_addr; 874 u16_t prod_idx; 875 u16_t con_idx; 876 u16_t hw_con_idx; 877 u16_t _pad; 878 879 rx_bd_t *prod_bd; 880 u32_t prod_bseq; 881 volatile u16_t *hw_con_idx_ptr; 882 u16_t bd_left; 883 884 u32_t vmq_lookahead_size; 885 s_list_t free_descq; /* legacy mode variable */ 886 s_list_t active_descq; 887 } lm_rx_chain_t; 888 889 890 typedef struct _lm_rx_info_t 891 { 892 lm_rx_chain_t chain[MAX_RX_CHAIN]; 893 894 u32_t num_rxq; 895 896 #define RX_FILTER_USER_IDX0 0 897 #define RX_FILTER_USER_IDX1 1 898 #define RX_FILTER_USER_IDX2 2 899 #define RX_FILTER_USER_IDX3 3 900 #define MAX_RX_FILTER_USER_CNT 4 901 lm_rx_mask_t mask[MAX_RX_FILTER_USER_CNT]; 902 903 lm_rx_stats_t stats; 904 905 #ifndef EXCLUDE_RSS_SUPPORT 906 u32_t rss_tbl_size; 907 u8_t *rss_ind_table_virt; 908 lm_address_t rss_ind_table_phy; 909 #endif 910 } lm_rx_info_t; 911 912 913 914 #ifndef EXCLUDE_KQE_SUPPORT 915 /******************************************************************************* 916 * Kernel work and completion queue info. 917 ******************************************************************************/ 918 919 typedef struct _lm_kq_info_t 920 { 921 u32_t kwq_cid_addr; 922 u32_t kcq_cid_addr; 923 924 kwqe_t *kwq_virt; 925 kwqe_t *kwq_prod_qe; 926 kwqe_t *kwq_con_qe; 927 kwqe_t *kwq_last_qe; 928 u16_t kwq_prod_idx; 929 u16_t kwq_con_idx; 930 u32_t kwqe_left; 931 932 kcqe_t *kcq_virt; 933 kcqe_t *kcq_con_qe; 934 kcqe_t *kcq_last_qe; 935 u16_t kcq_con_idx; 936 u16_t history_kcq_con_idx; 937 kcqe_t *history_kcq_con_qe; 938 939 void *kwq_pgtbl_virt; 940 lm_address_t kwq_pgtbl_phy; 941 lm_address_t kwq_phy; 942 943 void *kcq_pgtbl_virt; 944 lm_address_t kcq_pgtbl_phy; 945 lm_address_t kcq_phy; 946 947 /* Statistics. */ 948 u32_t no_kwq_bd_left; 949 } lm_kq_info_t; 950 #endif /* EXCLUDE_KQE_SUPPORT */ 951 952 953 954 /******************************************************************************* 955 * Include the l4 offload header file. 956 ******************************************************************************/ 957 958 #if INCLUDE_OFLD_SUPPORT 959 #include "lm_ofld.h" 960 #else 961 /* This structure is only used as a placed holder and it is not referenced. */ 962 typedef struct _lm_offload_info_t 963 { 964 void *unused; 965 } lm_offload_info_t; 966 #endif 967 968 969 970 /******************************************************************************* 971 * Main device block. 972 ******************************************************************************/ 973 974 typedef enum 975 { 976 OS_TYPE_UNKNOWN = 0, 977 OS_TYPE_W2K = 1, 978 OS_TYPE_WXP = 2, 979 OS_TYPE_W2K3 = 3, 980 OS_TYPE_VISTA = 4, 981 OS_TYPE_W2K8 = 5, 982 OS_TYPE_WIN7 = 6, 983 OS_TYPE_WIN8 = 7, 984 } lm_os_type_t; 985 986 987 typedef struct _lm_device_t 988 { 989 d_list_entry_t link; /* Link for the device list. */ 990 991 u32_t ver_num; /* major:8 minor:8 rel:8 fix:8 */ 992 u8_t ver_str[32]; /* null terminated version string. */ 993 994 lm_os_type_t os_type; 995 996 lm_variables_t vars; 997 lm_tx_info_t tx_info; 998 lm_rx_info_t rx_info; 999 #ifndef EXCLUDE_KQE_SUPPORT 1000 lm_kq_info_t kq_info; 1001 #endif 1002 lm_offload_info_t ofld; 1003 lm_hardware_info_t hw_info; 1004 lm_params_t params; 1005 lm_mc_table_t mc_table; 1006 lm_nwuf_list_t nwuf_list; 1007 1008 #ifdef UEFI 1009 EFI_PCI_IO_PROTOCOL *PciIoFuncs; 1010 #endif 1011 1012 /* Statistics. */ 1013 u32_t chip_reset_cnt; 1014 u32_t fw_timed_out_cnt; 1015 } lm_device_t; 1016 1017 1018 1019 /******************************************************************************* 1020 * Functions exported between file modules. 1021 ******************************************************************************/ 1022 1023 lm_status_t 1024 lm_mwrite( 1025 lm_device_t *pdev, 1026 u32_t phy_addr, 1027 u32_t phy_reg, 1028 u32_t val); 1029 1030 lm_status_t 1031 lm_mread( 1032 lm_device_t *pdev, 1033 u32_t phy_addr, 1034 u32_t phy_reg, 1035 u32_t *ret_val); 1036 1037 u32_t 1038 lm_nvram_query( 1039 lm_device_t *pdev, 1040 u8_t reset_flash_block, 1041 u8_t no_hw_mod); 1042 1043 void 1044 lm_nvram_init( 1045 lm_device_t *pdev, 1046 u8_t reset_flash_block); 1047 1048 lm_status_t 1049 lm_nvram_read( 1050 lm_device_t *pdev, 1051 u32_t offset, 1052 u32_t *ret_buf, 1053 u32_t buf_size); /* Must be a multiple of 4. */ 1054 1055 lm_status_t 1056 lm_nvram_write( 1057 lm_device_t *pdev, 1058 u32_t offset, 1059 u32_t *data_buf, 1060 u32_t buf_size); /* Must be a multiple of 4. */ 1061 1062 void 1063 lm_init_cpus( 1064 lm_device_t *pdev, 1065 u32_t cpu_mask); 1066 #define CPU_RV2P_1 0x00000001 1067 #define CPU_RV2P_2 0x00000002 1068 #define CPU_RXP 0x00000004 1069 #define CPU_TXP 0x00000008 1070 #define CPU_TPAT 0x00000010 1071 #define CPU_COM 0x00000020 1072 #define CPU_CP 0x00000040 1073 #define CPU_ALL 0xffffffff 1074 1075 void 1076 lm_reg_rd_ind( 1077 lm_device_t *pdev, 1078 u32_t offset, 1079 u32_t *ret); 1080 1081 void 1082 lm_reg_wr_ind( 1083 lm_device_t *pdev, 1084 u32_t offset, 1085 u32_t val); 1086 1087 void 1088 lm_ctx_wr( 1089 lm_device_t *pdev, 1090 u32_t cid_addr, 1091 u32_t offset, 1092 u32_t val); 1093 1094 u32_t 1095 lm_ctx_rd( 1096 lm_device_t *pdev, 1097 u32_t cid_addr, 1098 u32_t offset); 1099 1100 void 1101 lm_setup_bd_chain_ring( 1102 u8_t *mem_virt, 1103 lm_address_t mem_phy, 1104 u32_t page_cnt); 1105 1106 lm_status_t 1107 lm_init_remote_phy( 1108 lm_device_t *pdev, 1109 lm_link_settings_t *local_link, 1110 lm_link_settings_t *rphy_link); 1111 1112 lm_status_t 1113 lm_init_mac_link( 1114 lm_device_t *pdev); 1115 1116 #ifndef EXCLUDE_KQE_SUPPORT 1117 u32_t 1118 lm_submit_kernel_wqes( 1119 lm_device_t *pdev, 1120 kwqe_t *wqes[], 1121 u32_t num_wqes); 1122 1123 u32_t 1124 lm_get_kernel_cqes( 1125 lm_device_t *pdev, 1126 kcqe_t *cqe_ptr[], 1127 u32_t ptr_cnt); 1128 1129 u8_t 1130 lm_ack_kernel_cqes( 1131 lm_device_t *pdev, 1132 u32_t num_cqes); 1133 1134 void 1135 lm_ack_completed_wqes( 1136 lm_device_t *pdev); 1137 #endif /* EXCLUDE_KQE_SUPPORT */ 1138 1139 u8_t 1140 fw_reset_sync( 1141 lm_device_t *pdev, 1142 lm_reason_t reason, 1143 u32_t msg_data, 1144 u32_t fw_ack_timeout_us); /* timeout in microseconds. */ 1145 1146 void 1147 lm_reg_rd_blk( 1148 lm_device_t *pdev, 1149 u32_t reg_offset, 1150 u32_t *buf_ptr, 1151 u32_t u32t_cnt); 1152 1153 void 1154 lm_reg_rd_blk_ind( 1155 lm_device_t *pdev, 1156 u32_t reg_offset, 1157 u32_t *buf_ptr, 1158 u32_t u32t_cnt); 1159 1160 void 1161 lm_reg_wr_blk( 1162 lm_device_t *pdev, 1163 u32_t reg_offset, 1164 u32_t *data_ptr, 1165 u32_t u32t_cnt); 1166 1167 void 1168 lm_reg_wr_blk_ind( 1169 lm_device_t *pdev, 1170 u32_t reg_offset, 1171 u32_t *data_ptr, 1172 u32_t u32t_cnt); 1173 1174 lm_status_t 1175 lm_submit_fw_cmd( 1176 lm_device_t *pdev, 1177 u32_t drv_msg); 1178 1179 lm_status_t 1180 lm_last_fw_cmd_status( 1181 lm_device_t *pdev); 1182 1183 #ifndef EXCLUDE_RSS_SUPPORT 1184 1185 #if defined(LM_NON_LEGACY_MODE_SUPPORT) 1186 lm_status_t 1187 lm_enable_rss( 1188 lm_device_t *pdev, 1189 lm_rss_hash_t hash_type, 1190 PROCESSOR_NUMBER *indirection_table, 1191 u32_t table_size, 1192 u8_t *hash_key, 1193 u32_t key_size, 1194 u8_t *cpu_tbl, 1195 u8_t *rss_qidx_tbl); 1196 #else 1197 lm_status_t 1198 lm_enable_rss( 1199 lm_device_t *pdev, 1200 lm_rss_hash_t hash_type, 1201 u8_t *indirection_table, 1202 u32_t table_size, 1203 u8_t *hash_key, 1204 u32_t key_size); 1205 #endif 1206 1207 lm_status_t 1208 lm_disable_rss( 1209 lm_device_t *pdev); 1210 #endif /* EXCLUDE_RSS_SUPPORT */ 1211 1212 lm_medium_t 1213 lm_get_medium( 1214 lm_device_t *pdev); 1215 1216 u32_t 1217 lm_mb_get_cid_addr( 1218 lm_device_t *pdev, 1219 u32_t cid); 1220 1221 u32_t 1222 lm_mb_get_bypass_addr( 1223 lm_device_t *pdev, 1224 u32_t cid); 1225 1226 void 1227 lm_set_pcie_nfe_report( 1228 lm_device_t *pdev); 1229 1230 void 1231 lm_clear_coalescing_ticks( 1232 lm_device_t *pdev); 1233 1234 void 1235 lm_post_rx_bd( 1236 lm_device_t *pdev, 1237 lm_rx_chain_t *rxq 1238 ); 1239 1240 void 1241 lm_create_q_group( 1242 lm_device_t *pdev, 1243 u32_t q_group_id, 1244 u32_t lookahead_sz 1245 ); 1246 1247 lm_status_t 1248 lm_destroy_q_group( 1249 lm_device_t *pdev, 1250 u32_t q_group_id, 1251 u32_t num_queues 1252 ); 1253 1254 void 1255 lm_update_defq_filter_ctx( 1256 lm_device_t *pdev, 1257 u8_t valid 1258 ); 1259 1260 lm_status_t 1261 lm_chng_q_group_filter( 1262 lm_device_t *pdev, 1263 u32_t q_group_id, 1264 u8_t *dest_mac, 1265 u16_t *vlan_ptr, 1266 u32_t filter_id 1267 ); 1268 1269 #ifndef EXCLUDE_KQE_SUPPORT 1270 u32_t 1271 lm_service_l2_kcqes( 1272 struct _lm_device_t *pdev, 1273 kcqe_t *cqe_ptr[], 1274 u32_t num_cqes); 1275 #endif 1276 1277 /******************************************************************************* 1278 * Register access macros. 1279 ******************************************************************************/ 1280 1281 #if DBG && LOG_REG_ACCESS 1282 1283 #define LOG_REG_RD(_pdev, _offset, _val) \ 1284 if((_pdev)->params.test_mode & TEST_MODE_LOG_REG_ACCESS) \ 1285 { \ 1286 DbgMessage2(_pdev, INFORM, "rd 0x%04x = 0x%08x\n", _offset, _val); \ 1287 } 1288 1289 #define LOG_REG_WR(_pdev, _offset, _val) \ 1290 if((_pdev)->params.test_mode & TEST_MODE_LOG_REG_ACCESS) \ 1291 { \ 1292 DbgMessage2(_pdev, INFORM, "wr 0x%04x 0x%08x\n", _offset, _val); \ 1293 } 1294 1295 #define LOG_MBQ_WR32(_pdev, _cid, _offset, _val) \ 1296 if((_pdev)->params.test_mode & TEST_MODE_LOG_REG_ACCESS) \ 1297 { \ 1298 DbgMessage3(_pdev, INFORM, "mbq_wr32 (0x%04x,0x%02x) = 0x%08x\n", \ 1299 _cid, _offset, _val); \ 1300 } 1301 1302 #define LOG_MBQ_WR32(_pdev, _cid, _offset, _val) \ 1303 if((_pdev)->params.test_mode & TEST_MODE_LOG_REG_ACCESS) \ 1304 { \ 1305 DbgMessage3(_pdev, INFORM, "mbq_wr32 (0x%04x,0x%02x) = 0x%08x\n", \ 1306 _cid, _offset, _val); \ 1307 } 1308 1309 #define LOG_MBQ_WR16(_pdev, _cid, _offset, _val) \ 1310 if((_pdev)->params.test_mode & TEST_MODE_LOG_REG_ACCESS) \ 1311 { \ 1312 DbgMessage3(_pdev, INFORM, "mbq_wr16 (0x%04x,0x%02x) = 0x%04x\n", \ 1313 _cid, _offset, _val); \ 1314 } 1315 1316 #define LOG_MBQ_WR8(_pdev, _cid, _offset, _val) \ 1317 if((_pdev)->params.test_mode & TEST_MODE_LOG_REG_ACCESS) \ 1318 { \ 1319 DbgMessage3(_pdev, INFORM, "mbq_wr8 (0x%04x,0x%02x) = 0x%02x\n", \ 1320 _cid, _offset, _val); \ 1321 } 1322 1323 #else 1324 #define LOG_REG_RD(_pdev, _offset, _val) 1325 #define LOG_REG_WR(_pdev, _offset, _val) 1326 #define LOG_MBQ_WR32(_pdev, _cid, _offset, _val) 1327 #define LOG_MBQ_WR16(_pdev, _cid, _offset, _val) 1328 #define LOG_MBQ_WR8(_pdev, _cid, _offset, _val) 1329 #endif 1330 1331 /* Indirect register access. */ 1332 #define REG_RD_IND(_pdev, _offset, _ret) lm_reg_rd_ind(_pdev, _offset, _ret) 1333 #define REG_WR_IND(_pdev, _offset, _val) lm_reg_wr_ind(_pdev, _offset, _val) 1334 1335 #ifdef CONFIG_PPC64 1336 1337 /* Register access via register name. */ 1338 #define REG_RD(_pdev, _name, _ret) \ 1339 mm_read_barrier(); \ 1340 *(_ret) = pal_readl(&((_pdev)->vars.regview->_name)); \ 1341 LOG_REG_RD( \ 1342 _pdev, \ 1343 OFFSETOF(reg_space_t, _name), \ 1344 (_pdev)->vars.regview->_name) 1345 1346 #define REG_WR(_pdev, _name, _val) \ 1347 LOG_REG_WR(_pdev, OFFSETOF(reg_space_t, _name), _val); \ 1348 pal_writel((_val), &((_pdev)->vars.regview->_name)); \ 1349 mm_write_barrier() 1350 1351 1352 /* Register access via register offset. */ 1353 #define REG_RD_OFFSET(_pdev, _offset, _ret) \ 1354 mm_read_barrier(); \ 1355 *(_ret) = pal_readl((volatile u32_t *) ((u8_t *) (_pdev)->vars.regview + (_offset))); \ 1356 LOG_REG_RD( \ 1357 _pdev, \ 1358 _offset, \ 1359 *((volatile u32_t *) ((u8_t *) (_pdev)->vars.regview + (_offset)))) 1360 1361 #define REG_WR_OFFSET(_pdev, _offset, _val) \ 1362 LOG_REG_WR(_pdev, _offset, _val); \ 1363 pal_writel((_val), (volatile u32_t *) ((u8_t *) (_pdev)->vars.regview + (_offset))); \ 1364 mm_write_barrier() 1365 1366 1367 /* Context write via mailbox queue. */ 1368 #define MBQ_WR32(_pdev, _cid, _offset, _val) \ 1369 LOG_MBQ_WR32(_pdev, _cid, _offset, _val); \ 1370 pal_writel((_val), (volatile u32_t *) ((u8_t *) (_pdev)->vars.regview + \ 1371 MB_GET_CID_ADDR(_pdev, _cid) + (_offset))); \ 1372 mm_write_barrier(); \ 1373 if(CHIP_REV(_pdev) == CHIP_REV_IKOS) \ 1374 { \ 1375 mm_wait(_pdev, 1); \ 1376 } 1377 1378 #define MBQ_WR16(_pdev, _cid, _offset, _val) \ 1379 LOG_MBQ_WR16(_pdev, _cid, _offset, _val); \ 1380 pal_writew((_val), (volatile u16_t *) ((u8_t *) (_pdev)->vars.regview + \ 1381 MB_GET_CID_ADDR(_pdev, _cid) + (_offset))); \ 1382 mm_write_barrier(); \ 1383 if(CHIP_REV(_pdev) == CHIP_REV_IKOS) \ 1384 { \ 1385 mm_wait(_pdev, 1); \ 1386 } 1387 1388 #define MBQ_WR8(_pdev, _cid, _offset, _val) \ 1389 LOG_MBQ_WR8(_pdev, _cid, _offset, _val); \ 1390 pal_writeb((_val), (volatile u8_t *) ((u8_t *) (_pdev)->vars.regview + \ 1391 MB_GET_CID_ADDR(_pdev, _cid) + (_offset))); \ 1392 mm_write_barrier(); \ 1393 if(CHIP_REV(_pdev) == CHIP_REV_IKOS) \ 1394 { \ 1395 mm_wait(_pdev, 1); \ 1396 } 1397 1398 #else /* CONFIG_PPC64 */ 1399 1400 #ifdef SOLARIS 1401 1402 /* Register access via register name. */ 1403 #define REG_RD(_pdev, _name, _ret) \ 1404 mm_read_barrier(); \ 1405 if ((OFFSETOF(reg_space_t, _name) % 4) == 0) \ 1406 { \ 1407 *(_ret) = \ 1408 ddi_get32((_pdev)->vars.dmaRegAccHandle, \ 1409 (u32_t *)&(_pdev)->vars.regview->_name); \ 1410 } \ 1411 else \ 1412 { \ 1413 *(_ret) = \ 1414 ddi_get16((_pdev)->vars.dmaRegAccHandle, \ 1415 (u16_t *)&(_pdev)->vars.regview->_name); \ 1416 } \ 1417 LOG_REG_RD(_pdev, OFFSETOF(reg_space_t, _name), *(_ret)) 1418 1419 #define REG_WR(_pdev, _name, _val) \ 1420 LOG_REG_WR(_pdev, OFFSETOF(reg_space_t, _name), _val); \ 1421 if ((OFFSETOF(reg_space_t, _name) % 4) == 0) \ 1422 { \ 1423 ddi_put32((_pdev)->vars.dmaRegAccHandle, \ 1424 (u32_t *)&(_pdev)->vars.regview->_name, \ 1425 (_val)); \ 1426 } \ 1427 else \ 1428 { \ 1429 ddi_put16((_pdev)->vars.dmaRegAccHandle, \ 1430 (u16_t *)&(_pdev)->vars.regview->_name, \ 1431 (u16_t)(_val)); \ 1432 } \ 1433 mm_write_barrier() 1434 1435 /* Register access via register offset. */ 1436 #define REG_RD_OFFSET(_pdev, _offset, _ret) \ 1437 mm_read_barrier(); \ 1438 *(_ret) = ddi_get32((_pdev)->vars.dmaRegAccHandle, \ 1439 (u32_t *)((u8_t *)(_pdev)->vars.regview + (_offset))); \ 1440 LOG_REG_RD(_pdev, _offset, *(_ret)) 1441 1442 #define REG_WR_OFFSET(_pdev, _offset, _val) \ 1443 LOG_REG_WR(_pdev, _offset, _val); \ 1444 ddi_put32((_pdev)->vars.dmaRegAccHandle, \ 1445 (u32_t *)((u8_t *)(_pdev)->vars.regview + (_offset)), \ 1446 (_val)); \ 1447 mm_write_barrier() 1448 1449 /* Context write via mailbox queue. */ 1450 #define MBQ_WR32(_pdev, _cid, _offset, _val) \ 1451 LOG_MBQ_WR32(_pdev, _cid, _offset, _val); \ 1452 ddi_put32((_pdev)->vars.dmaRegAccHandle, \ 1453 (u32_t *)((u8_t *)(_pdev)->vars.regview + \ 1454 MB_GET_CID_ADDR(_pdev, _cid) + (_offset)), \ 1455 (_val)); \ 1456 mm_write_barrier(); \ 1457 if(CHIP_REV(_pdev) == CHIP_REV_IKOS) \ 1458 { \ 1459 mm_wait(_pdev, 1); \ 1460 } 1461 1462 #define MBQ_WR16(_pdev, _cid, _offset, _val) \ 1463 LOG_MBQ_WR16(_pdev, _cid, _offset, _val); \ 1464 ddi_put16((_pdev)->vars.dmaRegAccHandle, \ 1465 (u16_t *)((u8_t *)(_pdev)->vars.regview + \ 1466 MB_GET_CID_ADDR(_pdev, _cid) + (_offset)), \ 1467 (u16_t)(_val)); \ 1468 mm_write_barrier(); \ 1469 if(CHIP_REV(_pdev) == CHIP_REV_IKOS) \ 1470 { \ 1471 mm_wait(_pdev, 1); \ 1472 } 1473 1474 #define MBQ_WR8(_pdev, _cid, _offset, _val) \ 1475 LOG_MBQ_WR8(_pdev, _cid, _offset, _val); \ 1476 ddi_put8((_pdev)->vars.dmaRegAccHandle, \ 1477 (u8_t *)((u8_t *)(_pdev)->vars.regview + \ 1478 MB_GET_CID_ADDR(_pdev, _cid) + (_offset)), \ 1479 (u8_t)(_val)); \ 1480 mm_write_barrier(); \ 1481 if(CHIP_REV(_pdev) == CHIP_REV_IKOS) \ 1482 { \ 1483 mm_wait(_pdev, 1); \ 1484 } 1485 1486 #elif !defined(UEFI) 1487 1488 /* Register access via register name. */ 1489 #define REG_RD(_pdev, _name, _ret) \ 1490 mm_read_barrier(); \ 1491 *(_ret) = ((_pdev)->vars.regview->_name); \ 1492 LOG_REG_RD( \ 1493 _pdev, \ 1494 OFFSETOF(reg_space_t, _name), \ 1495 (_pdev)->vars.regview->_name) 1496 1497 #define REG_WR(_pdev, _name, _val) \ 1498 LOG_REG_WR(_pdev, OFFSETOF(reg_space_t, _name), _val); \ 1499 (_pdev)->vars.regview->_name = (_val); \ 1500 mm_write_barrier() 1501 1502 1503 /* Register access via register offset. */ 1504 #define REG_RD_OFFSET(_pdev, _offset, _ret) \ 1505 mm_read_barrier(); \ 1506 *(_ret) = *((volatile u32_t *) ((u8_t *) (_pdev)->vars.regview+(_offset)));\ 1507 LOG_REG_RD( \ 1508 _pdev, \ 1509 _offset, \ 1510 *((volatile u32_t *) ((u8_t *) (_pdev)->vars.regview + (_offset)))) 1511 1512 #define REG_WR_OFFSET(_pdev, _offset, _val) \ 1513 LOG_REG_WR(_pdev, _offset, _val); \ 1514 *((volatile u32_t *) ((u8_t *) (_pdev)->vars.regview+(_offset)))=(_val); \ 1515 mm_write_barrier() 1516 1517 1518 /* Context write via mailbox queue. */ 1519 #define MBQ_WR32(_pdev, _cid, _offset, _val) \ 1520 LOG_MBQ_WR32(_pdev, _cid, _offset, _val); \ 1521 *((volatile u32_t *) (((u8_t *) (_pdev)->vars.regview) + \ 1522 MB_GET_CID_ADDR(_pdev, _cid) + (_offset))) = (_val); \ 1523 mm_write_barrier(); \ 1524 if(CHIP_REV(_pdev) == CHIP_REV_IKOS) \ 1525 { \ 1526 mm_wait(_pdev, 1); \ 1527 } 1528 1529 #define MBQ_WR16(_pdev, _cid, _offset, _val) \ 1530 LOG_MBQ_WR16(_pdev, _cid, _offset, _val); \ 1531 *((volatile u16_t *) (((u8_t *) (_pdev)->vars.regview) + \ 1532 MB_GET_CID_ADDR(_pdev, _cid) + (_offset))) = (u16_t) (_val); \ 1533 mm_write_barrier(); \ 1534 if(CHIP_REV(_pdev) == CHIP_REV_IKOS) \ 1535 { \ 1536 mm_wait(_pdev, 1); \ 1537 } 1538 1539 #define MBQ_WR8(_pdev, _cid, _offset, _val) \ 1540 LOG_MBQ_WR8(_pdev, _cid, _offset, _val); \ 1541 *((volatile u8_t *) (((u8_t *) (_pdev)->vars.regview) + \ 1542 MB_GET_CID_ADDR(_pdev, _cid) + (_offset))) = (u8_t) (_val); \ 1543 mm_write_barrier(); \ 1544 if(CHIP_REV(_pdev) == CHIP_REV_IKOS) \ 1545 { \ 1546 mm_wait(_pdev, 1); \ 1547 } 1548 1549 #else //UEFI 1550 1551 /* Register access via register name. */ 1552 #define REG_RD(_pdev, _name, _ret) \ 1553 if ((OFFSETOF(reg_space_t, _name) % 4) == 0) \ 1554 { \ 1555 (_pdev)->PciIoFuncs->Mem.Read( \ 1556 (_pdev)->PciIoFuncs, \ 1557 EfiPciIoWidthUint32, \ 1558 0, \ 1559 (UINT64)(OFFSETOF(reg_space_t, _name)), \ 1560 1, \ 1561 _ret); \ 1562 } \ 1563 else \ 1564 { \ 1565 (_pdev)->PciIoFuncs->Mem.Read( \ 1566 (_pdev)->PciIoFuncs, \ 1567 EfiPciIoWidthUint16, \ 1568 0, \ 1569 (UINT64)(OFFSETOF(reg_space_t, _name)), \ 1570 1, \ 1571 _ret); \ 1572 } 1573 1574 #define REG_WR(_pdev, _name, _val) \ 1575 if ((OFFSETOF(reg_space_t, _name) % 4) == 0) \ 1576 { \ 1577 { \ 1578 u32_t w_val; \ 1579 w_val = _val; \ 1580 (_pdev)->PciIoFuncs->Mem.Write( \ 1581 (_pdev)->PciIoFuncs, \ 1582 EfiPciIoWidthUint32, \ 1583 0, \ 1584 (UINT64)(OFFSETOF(reg_space_t, _name)), \ 1585 1, \ 1586 &w_val); \ 1587 } \ 1588 } \ 1589 else \ 1590 { \ 1591 { \ 1592 u16_t w_val; \ 1593 w_val = (u16_t)_val; \ 1594 (_pdev)->PciIoFuncs->Mem.Write( \ 1595 (_pdev)->PciIoFuncs, \ 1596 EfiPciIoWidthUint16, \ 1597 0, \ 1598 (UINT64)(OFFSETOF(reg_space_t, _name)), \ 1599 1, \ 1600 &w_val); \ 1601 } \ 1602 } 1603 1604 1605 /* Register access via register offset. */ 1606 #define REG_RD_OFFSET(_pdev, _offset, _ret) \ 1607 (_pdev)->PciIoFuncs->Mem.Read( \ 1608 (_pdev)->PciIoFuncs, \ 1609 EfiPciIoWidthUint32, \ 1610 0, \ 1611 (UINT64)(_offset), \ 1612 1, \ 1613 _ret) 1614 1615 #define REG_WR_OFFSET(_pdev, _offset, _val) \ 1616 { \ 1617 u32_t w_val; \ 1618 w_val = _val; \ 1619 (_pdev)->PciIoFuncs->Mem.Write( \ 1620 (_pdev)->PciIoFuncs, \ 1621 EfiPciIoWidthUint32, \ 1622 0, \ 1623 (UINT64)(_offset), \ 1624 1, \ 1625 &w_val); \ 1626 } 1627 1628 1629 /* Context write via mailbox queue. */ 1630 #define MBQ_WR32(_pdev, _cid, _offset, _val) \ 1631 { \ 1632 u32_t w_val; \ 1633 w_val = _val; \ 1634 (_pdev)->PciIoFuncs->Mem.Write( \ 1635 (_pdev)->PciIoFuncs, \ 1636 EfiPciIoWidthUint32, \ 1637 0, \ 1638 (UINT64)(MB_GET_CID_ADDR(_pdev, _cid) + (_offset)), \ 1639 1, \ 1640 &w_val); \ 1641 if(CHIP_REV(_pdev) == CHIP_REV_IKOS) \ 1642 { \ 1643 mm_wait(_pdev, 1); \ 1644 } \ 1645 } 1646 1647 #define MBQ_WR16(_pdev, _cid, _offset, _val) \ 1648 { \ 1649 u16_t w_val; \ 1650 w_val = _val; \ 1651 (_pdev)->PciIoFuncs->Mem.Write( \ 1652 (_pdev)->PciIoFuncs, \ 1653 EfiPciIoWidthUint16, \ 1654 0, \ 1655 (UINT64)(MB_GET_CID_ADDR(_pdev, _cid) + (_offset)), \ 1656 1, \ 1657 &w_val); \ 1658 if(CHIP_REV(_pdev) == CHIP_REV_IKOS) \ 1659 { \ 1660 mm_wait(_pdev, 1); \ 1661 } \ 1662 } 1663 1664 #define MBQ_WR8(_pdev, _cid, _offset, _val) \ 1665 { \ 1666 u8_t w_val; \ 1667 w_val = _val; \ 1668 (_pdev)->PciIoFuncs->Mem.Write( \ 1669 (_pdev)->PciIoFuncs, \ 1670 EfiPciIoWidthUint8, \ 1671 0, \ 1672 (UINT64)(MB_GET_CID_ADDR(_pdev, _cid) + (_offset)), \ 1673 1, \ 1674 &w_val); \ 1675 if(CHIP_REV(_pdev) == CHIP_REV_IKOS) \ 1676 { \ 1677 mm_wait(_pdev, 1); \ 1678 } \ 1679 } 1680 1681 #endif //!UEFI 1682 1683 #endif /* CONFIG_PPC64 */ 1684 1685 /* Indirect context access. Unlike the MBQ_WR, these macros will not 1686 * trigger a chip event. */ 1687 #define CTX_WR(_pdev, _cid_addr, _offset, _val) \ 1688 lm_ctx_wr(_pdev, _cid_addr, _offset, _val) 1689 1690 #define CTX_RD(_pdev, _cid_addr, _offset) \ 1691 lm_ctx_rd(_pdev, _cid_addr, _offset) 1692 1693 1694 /* Away to trigger the bus analyzer. */ 1695 #define TRIGGER(_pdev, _val) REG_WR(_pdev, misc.misc_id, _val) 1696 1697 1698 1699 #endif /* _LM5706_H */ 1700 1701