1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 * 21 * Copyright (c) 2002-2006 Neterion, Inc. 22 */ 23 24 #include "xgehal-device.h" 25 #include "xgehal-channel.h" 26 #include "xgehal-fifo.h" 27 #include "xgehal-ring.h" 28 #include "xgehal-driver.h" 29 #include "xgehal-mgmt.h" 30 31 #define SWITCH_SIGN 0xA5A5A5A5A5A5A5A5ULL 32 #define END_SIGN 0x0 33 34 #ifdef XGE_HAL_HERC_EMULATION 35 #undef XGE_HAL_PROCESS_LINK_INT_IN_ISR 36 #endif 37 38 /* 39 * Jenkins hash key length(in bytes) 40 */ 41 #define XGE_HAL_JHASH_MSG_LEN 50 42 43 /* 44 * mix(a,b,c) used in Jenkins hash algorithm 45 */ 46 #define mix(a,b,c) { \ 47 a -= b; a -= c; a ^= (c>>13); \ 48 b -= c; b -= a; b ^= (a<<8); \ 49 c -= a; c -= b; c ^= (b>>13); \ 50 a -= b; a -= c; a ^= (c>>12); \ 51 b -= c; b -= a; b ^= (a<<16); \ 52 c -= a; c -= b; c ^= (b>>5); \ 53 a -= b; a -= c; a ^= (c>>3); \ 54 b -= c; b -= a; b ^= (a<<10); \ 55 c -= a; c -= b; c ^= (b>>15); \ 56 } 57 58 59 /* 60 * __hal_device_event_queued 61 * @data: pointer to xge_hal_device_t structure 62 * 63 * Will be called when new event succesfully queued. 64 */ 65 void 66 __hal_device_event_queued(void *data, int event_type) 67 { 68 xge_assert(((xge_hal_device_t*)data)->magic == XGE_HAL_MAGIC); 69 if (g_xge_hal_driver->uld_callbacks.event_queued) { 70 g_xge_hal_driver->uld_callbacks.event_queued(data, event_type); 71 } 72 } 73 74 /* 75 * __hal_pio_mem_write32_upper 76 * 77 * Endiann-aware implementation of xge_os_pio_mem_write32(). 78 * Since Xframe has 64bit registers, we differintiate uppper and lower 79 * parts. 80 */ 81 void 82 __hal_pio_mem_write32_upper(pci_dev_h pdev, pci_reg_h regh, u32 val, void *addr) 83 { 84 #if defined(XGE_OS_HOST_BIG_ENDIAN) && !defined(XGE_OS_PIO_LITTLE_ENDIAN) 85 xge_os_pio_mem_write32(pdev, regh, val, addr); 86 #else 87 xge_os_pio_mem_write32(pdev, regh, val, (void *)((char *)addr + 4)); 88 #endif 89 } 90 91 /* 92 * __hal_pio_mem_write32_upper 93 * 94 * Endiann-aware implementation of xge_os_pio_mem_write32(). 95 * Since Xframe has 64bit registers, we differintiate uppper and lower 96 * parts. 97 */ 98 void 99 __hal_pio_mem_write32_lower(pci_dev_h pdev, pci_reg_h regh, u32 val, 100 void *addr) 101 { 102 #if defined(XGE_OS_HOST_BIG_ENDIAN) && !defined(XGE_OS_PIO_LITTLE_ENDIAN) 103 xge_os_pio_mem_write32(pdev, regh, val, 104 (void *) ((char *)addr + 4)); 105 #else 106 xge_os_pio_mem_write32(pdev, regh, val, addr); 107 #endif 108 } 109 110 /* 111 * __hal_device_register_poll 112 * @hldev: pointer to xge_hal_device_t structure 113 * @reg: register to poll for 114 * @op: 0 - bit reset, 1 - bit set 115 * @mask: mask for logical "and" condition based on %op 116 * @max_millis: maximum time to try to poll in milliseconds 117 * 118 * Will poll certain register for specified amount of time. 119 * Will poll until masked bit is not cleared. 120 */ 121 xge_hal_status_e 122 __hal_device_register_poll(xge_hal_device_t *hldev, u64 *reg, 123 int op, u64 mask, int max_millis) 124 { 125 u64 val64; 126 int i = 0; 127 xge_hal_status_e ret = XGE_HAL_FAIL; 128 129 xge_os_udelay(10); 130 131 do { 132 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, reg); 133 if (op == 0 && !(val64 & mask)) 134 return XGE_HAL_OK; 135 else if (op == 1 && (val64 & mask) == mask) 136 return XGE_HAL_OK; 137 xge_os_udelay(100); 138 } while (++i <= 9); 139 140 do { 141 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, reg); 142 if (op == 0 && !(val64 & mask)) 143 return XGE_HAL_OK; 144 else if (op == 1 && (val64 & mask) == mask) 145 return XGE_HAL_OK; 146 xge_os_udelay(1000); 147 } while (++i < max_millis); 148 149 return ret; 150 } 151 152 /* 153 * __hal_device_wait_quiescent 154 * @hldev: the device 155 * @hw_status: hw_status in case of error 156 * 157 * Will wait until device is quiescent for some blocks. 158 */ 159 static xge_hal_status_e 160 __hal_device_wait_quiescent(xge_hal_device_t *hldev, u64 *hw_status) 161 { 162 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 163 164 /* poll and wait first */ 165 #ifdef XGE_HAL_HERC_EMULATION 166 (void) __hal_device_register_poll(hldev, &bar0->adapter_status, 1, 167 (XGE_HAL_ADAPTER_STATUS_TDMA_READY | 168 XGE_HAL_ADAPTER_STATUS_RDMA_READY | 169 XGE_HAL_ADAPTER_STATUS_PFC_READY | 170 XGE_HAL_ADAPTER_STATUS_TMAC_BUF_EMPTY | 171 XGE_HAL_ADAPTER_STATUS_PIC_QUIESCENT | 172 XGE_HAL_ADAPTER_STATUS_MC_DRAM_READY | 173 XGE_HAL_ADAPTER_STATUS_MC_QUEUES_READY | 174 XGE_HAL_ADAPTER_STATUS_M_PLL_LOCK), 175 XGE_HAL_DEVICE_QUIESCENT_WAIT_MAX_MILLIS); 176 #else 177 (void) __hal_device_register_poll(hldev, &bar0->adapter_status, 1, 178 (XGE_HAL_ADAPTER_STATUS_TDMA_READY | 179 XGE_HAL_ADAPTER_STATUS_RDMA_READY | 180 XGE_HAL_ADAPTER_STATUS_PFC_READY | 181 XGE_HAL_ADAPTER_STATUS_TMAC_BUF_EMPTY | 182 XGE_HAL_ADAPTER_STATUS_PIC_QUIESCENT | 183 XGE_HAL_ADAPTER_STATUS_MC_DRAM_READY | 184 XGE_HAL_ADAPTER_STATUS_MC_QUEUES_READY | 185 XGE_HAL_ADAPTER_STATUS_M_PLL_LOCK | 186 XGE_HAL_ADAPTER_STATUS_P_PLL_LOCK), 187 XGE_HAL_DEVICE_QUIESCENT_WAIT_MAX_MILLIS); 188 #endif 189 190 return xge_hal_device_status(hldev, hw_status); 191 } 192 193 /** 194 * xge_hal_device_is_slot_freeze 195 * @devh: the device 196 * 197 * Returns non-zero if the slot is freezed. 198 * The determination is made based on the adapter_status 199 * register which will never give all FFs, unless PCI read 200 * cannot go through. 201 */ 202 int 203 xge_hal_device_is_slot_freeze(xge_hal_device_h devh) 204 { 205 xge_hal_device_t *hldev = (xge_hal_device_t *)devh; 206 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 207 u16 device_id; 208 u64 adapter_status = 209 xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 210 &bar0->adapter_status); 211 xge_os_pci_read16(hldev->pdev,hldev->cfgh, 212 xge_offsetof(xge_hal_pci_config_le_t, device_id), 213 &device_id); 214 #ifdef TX_DEBUG 215 if (adapter_status == XGE_HAL_ALL_FOXES) 216 { 217 u64 dummy; 218 dummy = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 219 &bar0->pcc_enable); 220 printf(">>> Slot is frozen!\n"); 221 brkpoint(0); 222 } 223 #endif 224 return((adapter_status == XGE_HAL_ALL_FOXES) || (device_id == 0xffff)); 225 } 226 227 228 /* 229 * __hal_device_led_actifity_fix 230 * @hldev: pointer to xge_hal_device_t structure 231 * 232 * SXE-002: Configure link and activity LED to turn it off 233 */ 234 static void 235 __hal_device_led_actifity_fix(xge_hal_device_t *hldev) 236 { 237 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 238 u16 subid; 239 u64 val64; 240 241 xge_os_pci_read16(hldev->pdev, hldev->cfgh, 242 xge_offsetof(xge_hal_pci_config_le_t, subsystem_id), &subid); 243 244 /* 245 * In the case of Herc, there is a new register named beacon control 246 * is added which was not present in Xena. 247 * Beacon control register in Herc is at the same offset as 248 * gpio control register in Xena. It means they are one and same in 249 * the case of Xena. Also, gpio control register offset in Herc and 250 * Xena is different. 251 * The current register map represents Herc(It means we have 252 * both beacon and gpio control registers in register map). 253 * WRT transition from Xena to Herc, all the code in Xena which was 254 * using gpio control register for LED handling would have to 255 * use beacon control register in Herc and the rest of the code 256 * which uses gpio control in Xena would use the same register 257 * in Herc. 258 * WRT LED handling(following code), In the case of Herc, beacon 259 * control register has to be used. This is applicable for Xena also, 260 * since it represents the gpio control register in Xena. 261 */ 262 if ((subid & 0xFF) >= 0x07) { 263 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 264 &bar0->beacon_control); 265 val64 |= 0x0000800000000000ULL; 266 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 267 val64, &bar0->beacon_control); 268 val64 = 0x0411040400000000ULL; 269 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 270 (void *) ((u8 *)bar0 + 0x2700)); 271 } 272 } 273 274 /* Constants for Fixing the MacAddress problem seen mostly on 275 * Alpha machines. 276 */ 277 static u64 xena_fix_mac[] = { 278 0x0060000000000000ULL, 0x0060600000000000ULL, 279 0x0040600000000000ULL, 0x0000600000000000ULL, 280 0x0020600000000000ULL, 0x0060600000000000ULL, 281 0x0020600000000000ULL, 0x0060600000000000ULL, 282 0x0020600000000000ULL, 0x0060600000000000ULL, 283 0x0020600000000000ULL, 0x0060600000000000ULL, 284 0x0020600000000000ULL, 0x0060600000000000ULL, 285 0x0020600000000000ULL, 0x0060600000000000ULL, 286 0x0020600000000000ULL, 0x0060600000000000ULL, 287 0x0020600000000000ULL, 0x0060600000000000ULL, 288 0x0020600000000000ULL, 0x0060600000000000ULL, 289 0x0020600000000000ULL, 0x0060600000000000ULL, 290 0x0020600000000000ULL, 0x0000600000000000ULL, 291 0x0040600000000000ULL, 0x0060600000000000ULL, 292 END_SIGN 293 }; 294 295 /* 296 * __hal_device_fix_mac 297 * @hldev: HAL device handle. 298 * 299 * Fix for all "FFs" MAC address problems observed on Alpha platforms. 300 */ 301 static void 302 __hal_device_xena_fix_mac(xge_hal_device_t *hldev) 303 { 304 int i = 0; 305 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 306 307 /* 308 * In the case of Herc, there is a new register named beacon control 309 * is added which was not present in Xena. 310 * Beacon control register in Herc is at the same offset as 311 * gpio control register in Xena. It means they are one and same in 312 * the case of Xena. Also, gpio control register offset in Herc and 313 * Xena is different. 314 * The current register map represents Herc(It means we have 315 * both beacon and gpio control registers in register map). 316 * WRT transition from Xena to Herc, all the code in Xena which was 317 * using gpio control register for LED handling would have to 318 * use beacon control register in Herc and the rest of the code 319 * which uses gpio control in Xena would use the same register 320 * in Herc. 321 * In the following code(xena_fix_mac), beacon control register has 322 * to be used in the case of Xena, since it represents gpio control 323 * register. In the case of Herc, there is no change required. 324 */ 325 while (xena_fix_mac[i] != END_SIGN) { 326 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 327 xena_fix_mac[i++], &bar0->beacon_control); 328 xge_os_mdelay(1); 329 } 330 } 331 332 /* 333 * xge_hal_device_bcast_enable 334 * @hldev: HAL device handle. 335 * 336 * Enable receiving broadcasts. 337 * The host must first write RMAC_CFG_KEY "key" 338 * register, and then - MAC_CFG register. 339 */ 340 void 341 xge_hal_device_bcast_enable(xge_hal_device_h devh) 342 { 343 xge_hal_device_t *hldev = (xge_hal_device_t *)devh; 344 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 345 u64 val64; 346 347 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 348 &bar0->mac_cfg); 349 val64 |= XGE_HAL_MAC_RMAC_BCAST_ENABLE; 350 351 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 352 XGE_HAL_RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); 353 354 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, 355 (u32)(val64 >> 32), &bar0->mac_cfg); 356 357 xge_debug_device(XGE_TRACE, "mac_cfg 0x"XGE_OS_LLXFMT": broadcast %s", 358 (unsigned long long)val64, 359 hldev->config.mac.rmac_bcast_en ? "enabled" : "disabled"); 360 } 361 362 /* 363 * xge_hal_device_bcast_disable 364 * @hldev: HAL device handle. 365 * 366 * Disable receiving broadcasts. 367 * The host must first write RMAC_CFG_KEY "key" 368 * register, and then - MAC_CFG register. 369 */ 370 void 371 xge_hal_device_bcast_disable(xge_hal_device_h devh) 372 { 373 xge_hal_device_t *hldev = (xge_hal_device_t *)devh; 374 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 375 u64 val64; 376 377 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 378 &bar0->mac_cfg); 379 380 val64 &= ~(XGE_HAL_MAC_RMAC_BCAST_ENABLE); 381 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 382 XGE_HAL_RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); 383 384 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, 385 (u32)(val64 >> 32), &bar0->mac_cfg); 386 387 xge_debug_device(XGE_TRACE, "mac_cfg 0x"XGE_OS_LLXFMT": broadcast %s", 388 (unsigned long long)val64, 389 hldev->config.mac.rmac_bcast_en ? "enabled" : "disabled"); 390 } 391 392 /* 393 * __hal_device_shared_splits_configure 394 * @hldev: HAL device handle. 395 * 396 * TxDMA will stop Read request if the number of read split had exceeded 397 * the limit set by shared_splits 398 */ 399 static void 400 __hal_device_shared_splits_configure(xge_hal_device_t *hldev) 401 { 402 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 403 u64 val64; 404 405 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 406 &bar0->pic_control); 407 val64 |= 408 XGE_HAL_PIC_CNTL_SHARED_SPLITS(hldev->config.shared_splits); 409 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 410 &bar0->pic_control); 411 xge_debug_device(XGE_TRACE, "%s", "shared splits configured"); 412 } 413 414 /* 415 * __hal_device_rmac_padding_configure 416 * @hldev: HAL device handle. 417 * 418 * Configure RMAC frame padding. Depends on configuration, it 419 * can be send to host or removed by MAC. 420 */ 421 static void 422 __hal_device_rmac_padding_configure(xge_hal_device_t *hldev) 423 { 424 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 425 u64 val64; 426 427 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 428 XGE_HAL_RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); 429 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 430 &bar0->mac_cfg); 431 val64 &= ( ~XGE_HAL_MAC_RMAC_ALL_ADDR_ENABLE ); 432 val64 &= ( ~XGE_HAL_MAC_CFG_RMAC_PROM_ENABLE ); 433 val64 |= XGE_HAL_MAC_CFG_TMAC_APPEND_PAD; 434 435 /* 436 * If the RTH enable bit is not set, strip the FCS 437 */ 438 if (!hldev->config.rth_en || 439 !(xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 440 &bar0->rts_rth_cfg) & XGE_HAL_RTS_RTH_EN)) { 441 val64 |= XGE_HAL_MAC_CFG_RMAC_STRIP_FCS; 442 } 443 444 val64 &= ( ~XGE_HAL_MAC_CFG_RMAC_STRIP_PAD ); 445 val64 |= XGE_HAL_MAC_RMAC_DISCARD_PFRM; 446 447 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, 448 (u32)(val64 >> 32), (char*)&bar0->mac_cfg); 449 xge_os_mdelay(1); 450 451 xge_debug_device(XGE_TRACE, 452 "mac_cfg 0x"XGE_OS_LLXFMT": frame padding configured", 453 (unsigned long long)val64); 454 } 455 456 /* 457 * __hal_device_pause_frames_configure 458 * @hldev: HAL device handle. 459 * 460 * Set Pause threshold. 461 * 462 * Pause frame is generated if the amount of data outstanding 463 * on any queue exceeded the ratio of 464 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256 465 */ 466 static void 467 __hal_device_pause_frames_configure(xge_hal_device_t *hldev) 468 { 469 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 470 int i; 471 u64 val64; 472 473 switch (hldev->config.mac.media) { 474 case XGE_HAL_MEDIA_SR: 475 case XGE_HAL_MEDIA_SW: 476 val64=0xfffbfffbfffbfffbULL; 477 break; 478 case XGE_HAL_MEDIA_LR: 479 case XGE_HAL_MEDIA_LW: 480 val64=0xffbbffbbffbbffbbULL; 481 break; 482 case XGE_HAL_MEDIA_ER: 483 case XGE_HAL_MEDIA_EW: 484 default: 485 val64=0xffbbffbbffbbffbbULL; 486 break; 487 } 488 489 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 490 val64, &bar0->mc_pause_thresh_q0q3); 491 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 492 val64, &bar0->mc_pause_thresh_q4q7); 493 494 /* Set the time value to be inserted in the pause frame generated 495 * by Xframe */ 496 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 497 &bar0->rmac_pause_cfg); 498 if (hldev->config.mac.rmac_pause_gen_en) 499 val64 |= XGE_HAL_RMAC_PAUSE_GEN_EN; 500 else 501 val64 &= ~(XGE_HAL_RMAC_PAUSE_GEN_EN); 502 if (hldev->config.mac.rmac_pause_rcv_en) 503 val64 |= XGE_HAL_RMAC_PAUSE_RCV_EN; 504 else 505 val64 &= ~(XGE_HAL_RMAC_PAUSE_RCV_EN); 506 val64 &= ~(XGE_HAL_RMAC_PAUSE_HG_PTIME(0xffff)); 507 val64 |= XGE_HAL_RMAC_PAUSE_HG_PTIME(hldev->config.mac.rmac_pause_time); 508 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 509 &bar0->rmac_pause_cfg); 510 511 val64 = 0; 512 for (i = 0; i<4; i++) { 513 val64 |= 514 (((u64)0xFF00|hldev->config.mac.mc_pause_threshold_q0q3) 515 <<(i*2*8)); 516 } 517 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 518 &bar0->mc_pause_thresh_q0q3); 519 520 val64 = 0; 521 for (i = 0; i<4; i++) { 522 val64 |= 523 (((u64)0xFF00|hldev->config.mac.mc_pause_threshold_q4q7) 524 <<(i*2*8)); 525 } 526 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 527 &bar0->mc_pause_thresh_q4q7); 528 xge_debug_device(XGE_TRACE, "%s", "pause frames configured"); 529 } 530 531 /* 532 * Herc's clock rate doubled, unless the slot is 33MHz. 533 */ 534 unsigned int __hal_fix_time_ival_herc(xge_hal_device_t *hldev, 535 unsigned int time_ival) 536 { 537 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) 538 return time_ival; 539 540 xge_assert(xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC); 541 542 if (hldev->bus_frequency != XGE_HAL_PCI_BUS_FREQUENCY_UNKNOWN && 543 hldev->bus_frequency != XGE_HAL_PCI_BUS_FREQUENCY_33MHZ) 544 time_ival *= 2; 545 546 return time_ival; 547 } 548 549 550 /* 551 * __hal_device_bus_master_disable 552 * @hldev: HAL device handle. 553 * 554 * Disable bus mastership. 555 */ 556 static void 557 __hal_device_bus_master_disable (xge_hal_device_t *hldev) 558 { 559 u16 cmd; 560 u16 bus_master = 4; 561 562 xge_os_pci_read16(hldev->pdev, hldev->cfgh, 563 xge_offsetof(xge_hal_pci_config_le_t, command), &cmd); 564 cmd &= ~bus_master; 565 xge_os_pci_write16(hldev->pdev, hldev->cfgh, 566 xge_offsetof(xge_hal_pci_config_le_t, command), cmd); 567 } 568 569 /* 570 * __hal_device_bus_master_enable 571 * @hldev: HAL device handle. 572 * 573 * Disable bus mastership. 574 */ 575 static void 576 __hal_device_bus_master_enable (xge_hal_device_t *hldev) 577 { 578 u16 cmd; 579 u16 bus_master = 4; 580 581 xge_os_pci_read16(hldev->pdev, hldev->cfgh, 582 xge_offsetof(xge_hal_pci_config_le_t, command), &cmd); 583 584 /* already enabled? do nothing */ 585 if (cmd & bus_master) 586 return; 587 588 cmd |= bus_master; 589 xge_os_pci_write16(hldev->pdev, hldev->cfgh, 590 xge_offsetof(xge_hal_pci_config_le_t, command), cmd); 591 } 592 /* 593 * __hal_device_intr_mgmt 594 * @hldev: HAL device handle. 595 * @mask: mask indicating which Intr block must be modified. 596 * @flag: if true - enable, otherwise - disable interrupts. 597 * 598 * Disable or enable device interrupts. Mask is used to specify 599 * which hardware blocks should produce interrupts. For details 600 * please refer to Xframe User Guide. 601 */ 602 static void 603 __hal_device_intr_mgmt(xge_hal_device_t *hldev, u64 mask, int flag) 604 { 605 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 606 u64 val64 = 0, temp64 = 0; 607 u64 gim, gim_saved; 608 609 gim_saved = gim = xge_os_pio_mem_read64(hldev->pdev, 610 hldev->regh0, &bar0->general_int_mask); 611 612 /* Top level interrupt classification */ 613 /* PIC Interrupts */ 614 if ((mask & (XGE_HAL_TX_PIC_INTR/* | XGE_HAL_RX_PIC_INTR*/))) { 615 /* Enable PIC Intrs in the general intr mask register */ 616 val64 = XGE_HAL_TXPIC_INT_M/* | XGE_HAL_PIC_RX_INT_M*/; 617 if (flag) { 618 gim &= ~((u64) val64); 619 temp64 = xge_os_pio_mem_read64(hldev->pdev, 620 hldev->regh0, &bar0->pic_int_mask); 621 622 temp64 &= ~XGE_HAL_PIC_INT_TX; 623 #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR 624 if (xge_hal_device_check_id(hldev) == 625 XGE_HAL_CARD_HERC) { 626 temp64 &= ~XGE_HAL_PIC_INT_MISC; 627 } 628 #endif 629 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 630 temp64, &bar0->pic_int_mask); 631 #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR 632 if (xge_hal_device_check_id(hldev) == 633 XGE_HAL_CARD_HERC) { 634 /* 635 * Unmask only Link Up interrupt 636 */ 637 temp64 = xge_os_pio_mem_read64(hldev->pdev, 638 hldev->regh0, &bar0->misc_int_mask); 639 temp64 &= ~XGE_HAL_MISC_INT_REG_LINK_UP_INT; 640 xge_os_pio_mem_write64(hldev->pdev, 641 hldev->regh0, temp64, 642 &bar0->misc_int_mask); 643 xge_debug_device(XGE_TRACE, 644 "unmask link up flag "XGE_OS_LLXFMT, 645 (unsigned long long)temp64); 646 } 647 #endif 648 } else { /* flag == 0 */ 649 650 #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR 651 if (xge_hal_device_check_id(hldev) == 652 XGE_HAL_CARD_HERC) { 653 /* 654 * Mask both Link Up and Down interrupts 655 */ 656 temp64 = xge_os_pio_mem_read64(hldev->pdev, 657 hldev->regh0, &bar0->misc_int_mask); 658 temp64 |= XGE_HAL_MISC_INT_REG_LINK_UP_INT; 659 temp64 |= XGE_HAL_MISC_INT_REG_LINK_DOWN_INT; 660 xge_os_pio_mem_write64(hldev->pdev, 661 hldev->regh0, temp64, 662 &bar0->misc_int_mask); 663 xge_debug_device(XGE_TRACE, 664 "mask link up/down flag "XGE_OS_LLXFMT, 665 (unsigned long long)temp64); 666 } 667 #endif 668 /* Disable PIC Intrs in the general intr mask 669 * register */ 670 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 671 XGE_HAL_ALL_INTRS_DIS, 672 &bar0->pic_int_mask); 673 gim |= val64; 674 } 675 } 676 677 /* DMA Interrupts */ 678 /* Enabling/Disabling Tx DMA interrupts */ 679 if (mask & XGE_HAL_TX_DMA_INTR) { 680 /* Enable TxDMA Intrs in the general intr mask register */ 681 val64 = XGE_HAL_TXDMA_INT_M; 682 if (flag) { 683 gim &= ~((u64) val64); 684 /* Enable all TxDMA interrupts */ 685 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 686 0x0, &bar0->txdma_int_mask); 687 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 688 0x0, &bar0->pfc_err_mask); 689 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 690 0x0, &bar0->tda_err_mask); 691 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 692 0x0, &bar0->pcc_err_mask); 693 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 694 0x0, &bar0->tti_err_mask); 695 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 696 0x0, &bar0->lso_err_mask); 697 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 698 0x0, &bar0->tpa_err_mask); 699 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 700 0x0, &bar0->sm_err_mask); 701 702 } else { /* flag == 0 */ 703 704 /* Disable TxDMA Intrs in the general intr mask 705 * register */ 706 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 707 XGE_HAL_ALL_INTRS_DIS, 708 &bar0->txdma_int_mask); 709 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 710 XGE_HAL_ALL_INTRS_DIS, 711 &bar0->pfc_err_mask); 712 713 gim |= val64; 714 } 715 } 716 717 /* Enabling/Disabling Rx DMA interrupts */ 718 if (mask & XGE_HAL_RX_DMA_INTR) { 719 /* Enable RxDMA Intrs in the general intr mask register */ 720 val64 = XGE_HAL_RXDMA_INT_M; 721 if (flag) { 722 723 gim &= ~((u64) val64); 724 /* All RxDMA block interrupts are disabled for now 725 * TODO */ 726 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 727 XGE_HAL_ALL_INTRS_DIS, 728 &bar0->rxdma_int_mask); 729 730 } else { /* flag == 0 */ 731 732 /* Disable RxDMA Intrs in the general intr mask 733 * register */ 734 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 735 XGE_HAL_ALL_INTRS_DIS, 736 &bar0->rxdma_int_mask); 737 738 gim |= val64; 739 } 740 } 741 742 /* MAC Interrupts */ 743 /* Enabling/Disabling MAC interrupts */ 744 if (mask & (XGE_HAL_TX_MAC_INTR | XGE_HAL_RX_MAC_INTR)) { 745 val64 = XGE_HAL_TXMAC_INT_M | XGE_HAL_RXMAC_INT_M; 746 if (flag) { 747 748 gim &= ~((u64) val64); 749 750 /* All MAC block error inter. are disabled for now. */ 751 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 752 XGE_HAL_ALL_INTRS_DIS, &bar0->mac_int_mask); 753 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 754 XGE_HAL_ALL_INTRS_DIS, &bar0->mac_rmac_err_mask); 755 756 } else { /* flag == 0 */ 757 758 /* Disable MAC Intrs in the general intr mask 759 * register */ 760 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 761 XGE_HAL_ALL_INTRS_DIS, &bar0->mac_int_mask); 762 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 763 XGE_HAL_ALL_INTRS_DIS, &bar0->mac_rmac_err_mask); 764 765 gim |= val64; 766 } 767 } 768 769 /* XGXS Interrupts */ 770 if (mask & (XGE_HAL_TX_XGXS_INTR | XGE_HAL_RX_XGXS_INTR)) { 771 val64 = XGE_HAL_TXXGXS_INT_M | XGE_HAL_RXXGXS_INT_M; 772 if (flag) { 773 774 gim &= ~((u64) val64); 775 /* All XGXS block error interrupts are disabled for now 776 * TODO */ 777 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 778 XGE_HAL_ALL_INTRS_DIS, &bar0->xgxs_int_mask); 779 780 } else { /* flag == 0 */ 781 782 /* Disable MC Intrs in the general intr mask register */ 783 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 784 XGE_HAL_ALL_INTRS_DIS, &bar0->xgxs_int_mask); 785 786 gim |= val64; 787 } 788 } 789 790 /* Memory Controller(MC) interrupts */ 791 if (mask & XGE_HAL_MC_INTR) { 792 val64 = XGE_HAL_MC_INT_M; 793 if (flag) { 794 795 gim &= ~((u64) val64); 796 797 /* Enable all MC blocks error interrupts */ 798 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 799 0x0ULL, &bar0->mc_int_mask); 800 801 } else { /* flag == 0 */ 802 803 /* Disable MC Intrs in the general intr mask 804 * register */ 805 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 806 XGE_HAL_ALL_INTRS_DIS, &bar0->mc_int_mask); 807 808 gim |= val64; 809 } 810 } 811 812 813 /* Tx traffic interrupts */ 814 if (mask & XGE_HAL_TX_TRAFFIC_INTR) { 815 val64 = XGE_HAL_TXTRAFFIC_INT_M; 816 if (flag) { 817 818 gim &= ~((u64) val64); 819 820 /* Enable all the Tx side interrupts */ 821 /* '0' Enables all 64 TX interrupt levels. */ 822 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0x0, 823 &bar0->tx_traffic_mask); 824 825 } else { /* flag == 0 */ 826 827 /* Disable Tx Traffic Intrs in the general intr mask 828 * register. */ 829 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 830 XGE_HAL_ALL_INTRS_DIS, 831 &bar0->tx_traffic_mask); 832 gim |= val64; 833 } 834 } 835 836 /* Rx traffic interrupts */ 837 if (mask & XGE_HAL_RX_TRAFFIC_INTR) { 838 val64 = XGE_HAL_RXTRAFFIC_INT_M; 839 if (flag) { 840 gim &= ~((u64) val64); 841 /* '0' Enables all 8 RX interrupt levels. */ 842 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0x0, 843 &bar0->rx_traffic_mask); 844 845 } else { /* flag == 0 */ 846 847 /* Disable Rx Traffic Intrs in the general intr mask 848 * register. 849 */ 850 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 851 XGE_HAL_ALL_INTRS_DIS, 852 &bar0->rx_traffic_mask); 853 854 gim |= val64; 855 } 856 } 857 858 /* Sched Timer interrupt */ 859 if (mask & XGE_HAL_SCHED_INTR) { 860 if (flag) { 861 temp64 = xge_os_pio_mem_read64(hldev->pdev, 862 hldev->regh0, &bar0->txpic_int_mask); 863 temp64 &= ~XGE_HAL_TXPIC_INT_SCHED_INTR; 864 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 865 temp64, &bar0->txpic_int_mask); 866 867 xge_hal_device_sched_timer(hldev, 868 hldev->config.sched_timer_us, 869 hldev->config.sched_timer_one_shot); 870 } else { 871 temp64 = xge_os_pio_mem_read64(hldev->pdev, 872 hldev->regh0, &bar0->txpic_int_mask); 873 temp64 |= XGE_HAL_TXPIC_INT_SCHED_INTR; 874 875 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 876 temp64, &bar0->txpic_int_mask); 877 878 xge_hal_device_sched_timer(hldev, 879 XGE_HAL_SCHED_TIMER_DISABLED, 880 XGE_HAL_SCHED_TIMER_ON_SHOT_ENABLE); 881 } 882 } 883 884 if (gim != gim_saved) { 885 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, gim, 886 &bar0->general_int_mask); 887 xge_debug_device(XGE_TRACE, "general_int_mask updated " 888 XGE_OS_LLXFMT" => "XGE_OS_LLXFMT, 889 (unsigned long long)gim_saved, (unsigned long long)gim); 890 } 891 } 892 893 /* 894 * __hal_device_bimodal_configure 895 * @hldev: HAL device handle. 896 * 897 * Bimodal parameters initialization. 898 */ 899 static void 900 __hal_device_bimodal_configure(xge_hal_device_t *hldev) 901 { 902 int i; 903 904 for (i=0; i<XGE_HAL_MAX_RING_NUM; i++) { 905 xge_hal_tti_config_t *tti; 906 xge_hal_rti_config_t *rti; 907 908 if (!hldev->config.ring.queue[i].configured) 909 continue; 910 rti = &hldev->config.ring.queue[i].rti; 911 tti = &hldev->bimodal_tti[i]; 912 913 tti->enabled = 1; 914 tti->urange_a = hldev->bimodal_urange_a_en * 10; 915 tti->urange_b = 20; 916 tti->urange_c = 30; 917 tti->ufc_a = hldev->bimodal_urange_a_en * 8; 918 tti->ufc_b = 16; 919 tti->ufc_c = 32; 920 tti->ufc_d = 64; 921 tti->timer_val_us = hldev->bimodal_timer_val_us; 922 tti->timer_ac_en = 1; 923 tti->timer_ci_en = 0; 924 925 rti->urange_a = 10; 926 rti->urange_b = 20; 927 rti->urange_c = 30; 928 rti->ufc_a = 1; /* <= for netpipe type of tests */ 929 rti->ufc_b = 4; 930 rti->ufc_c = 4; 931 rti->ufc_d = 4; /* <= 99% of a bandwidth traffic counts here */ 932 rti->timer_ac_en = 1; 933 rti->timer_val_us = 5; /* for optimal bus efficiency usage */ 934 } 935 } 936 937 /* 938 * __hal_device_tti_apply 939 * @hldev: HAL device handle. 940 * 941 * apply TTI configuration. 942 */ 943 static xge_hal_status_e 944 __hal_device_tti_apply(xge_hal_device_t *hldev, xge_hal_tti_config_t *tti, 945 int num, int runtime) 946 { 947 u64 val64, data1 = 0, data2 = 0; 948 xge_hal_pci_bar0_t *bar0; 949 950 if (runtime) 951 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0; 952 else 953 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 954 955 if (tti->timer_val_us) { 956 unsigned int tx_interval; 957 958 if (hldev->config.pci_freq_mherz) { 959 tx_interval = hldev->config.pci_freq_mherz * 960 tti->timer_val_us / 64; 961 tx_interval = 962 __hal_fix_time_ival_herc(hldev, 963 tx_interval); 964 } else { 965 tx_interval = tti->timer_val_us; 966 } 967 data1 |= XGE_HAL_TTI_DATA1_MEM_TX_TIMER_VAL(tx_interval); 968 if (tti->timer_ac_en) { 969 data1 |= XGE_HAL_TTI_DATA1_MEM_TX_TIMER_AC_EN; 970 } 971 if (tti->timer_ci_en) { 972 data1 |= XGE_HAL_TTI_DATA1_MEM_TX_TIMER_CI_EN; 973 } 974 975 if (!runtime) { 976 xge_debug_device(XGE_TRACE, "TTI[%d] timer enabled to %d, ci %s", 977 num, tx_interval, tti->timer_ci_en ? 978 "enabled": "disabled"); 979 } 980 } 981 982 if (tti->urange_a || 983 tti->urange_b || 984 tti->urange_c || 985 tti->ufc_a || 986 tti->ufc_b || 987 tti->ufc_c || 988 tti->ufc_d ) { 989 data1 |= XGE_HAL_TTI_DATA1_MEM_TX_URNG_A(tti->urange_a) | 990 XGE_HAL_TTI_DATA1_MEM_TX_URNG_B(tti->urange_b) | 991 XGE_HAL_TTI_DATA1_MEM_TX_URNG_C(tti->urange_c); 992 993 data2 |= XGE_HAL_TTI_DATA2_MEM_TX_UFC_A(tti->ufc_a) | 994 XGE_HAL_TTI_DATA2_MEM_TX_UFC_B(tti->ufc_b) | 995 XGE_HAL_TTI_DATA2_MEM_TX_UFC_C(tti->ufc_c) | 996 XGE_HAL_TTI_DATA2_MEM_TX_UFC_D(tti->ufc_d); 997 } 998 999 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 1000 data1, &bar0->tti_data1_mem); 1001 (void)xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 1002 &bar0->tti_data1_mem); 1003 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 1004 data2, &bar0->tti_data2_mem); 1005 (void)xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 1006 &bar0->tti_data2_mem); 1007 xge_os_wmb(); 1008 1009 val64 = XGE_HAL_TTI_CMD_MEM_WE | XGE_HAL_TTI_CMD_MEM_STROBE_NEW_CMD | 1010 XGE_HAL_TTI_CMD_MEM_OFFSET(num); 1011 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 1012 &bar0->tti_command_mem); 1013 1014 if (!runtime && __hal_device_register_poll(hldev, &bar0->tti_command_mem, 1015 0, XGE_HAL_TTI_CMD_MEM_STROBE_NEW_CMD, 1016 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { 1017 /* upper layer may require to repeat */ 1018 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; 1019 } 1020 1021 if (!runtime) { 1022 xge_debug_device(XGE_TRACE, "TTI[%d] configured: tti_data1_mem 0x" 1023 XGE_OS_LLXFMT, num, 1024 (unsigned long long)xge_os_pio_mem_read64(hldev->pdev, 1025 hldev->regh0, &bar0->tti_data1_mem)); 1026 } 1027 1028 return XGE_HAL_OK; 1029 } 1030 1031 /* 1032 * __hal_device_tti_configure 1033 * @hldev: HAL device handle. 1034 * 1035 * TTI Initialization. 1036 * Initialize Transmit Traffic Interrupt Scheme. 1037 */ 1038 static xge_hal_status_e 1039 __hal_device_tti_configure(xge_hal_device_t *hldev, int runtime) 1040 { 1041 int i; 1042 1043 for (i=0; i<XGE_HAL_MAX_FIFO_NUM; i++) { 1044 int j; 1045 1046 if (!hldev->config.fifo.queue[i].configured) 1047 continue; 1048 1049 for (j=0; j<XGE_HAL_MAX_FIFO_TTI_NUM; j++) { 1050 xge_hal_status_e status; 1051 1052 if (!hldev->config.fifo.queue[i].tti[j].enabled) 1053 continue; 1054 1055 /* at least some TTI enabled. Record it. */ 1056 hldev->tti_enabled = 1; 1057 1058 status = __hal_device_tti_apply(hldev, 1059 &hldev->config.fifo.queue[i].tti[j], 1060 i * XGE_HAL_MAX_FIFO_TTI_NUM + j, runtime); 1061 if (status != XGE_HAL_OK) 1062 return status; 1063 } 1064 } 1065 1066 /* processing bimodal TTIs */ 1067 for (i=0; i<XGE_HAL_MAX_RING_NUM; i++) { 1068 xge_hal_status_e status; 1069 1070 if (!hldev->bimodal_tti[i].enabled) 1071 continue; 1072 1073 /* at least some bimodal TTI enabled. Record it. */ 1074 hldev->tti_enabled = 1; 1075 1076 status = __hal_device_tti_apply(hldev, &hldev->bimodal_tti[i], 1077 XGE_HAL_MAX_FIFO_TTI_RING_0 + i, runtime); 1078 if (status != XGE_HAL_OK) 1079 return status; 1080 1081 } 1082 1083 return XGE_HAL_OK; 1084 } 1085 1086 /* 1087 * __hal_device_rti_configure 1088 * @hldev: HAL device handle. 1089 * 1090 * RTI Initialization. 1091 * Initialize Receive Traffic Interrupt Scheme. 1092 */ 1093 xge_hal_status_e 1094 __hal_device_rti_configure(xge_hal_device_t *hldev, int runtime) 1095 { 1096 xge_hal_pci_bar0_t *bar0; 1097 u64 val64, data1 = 0, data2 = 0; 1098 int i; 1099 1100 if (runtime) { 1101 /* 1102 * we don't want to re-configure RTI in case when 1103 * bimodal interrupts are in use. Instead reconfigure TTI 1104 * with new RTI values. 1105 */ 1106 if (hldev->config.bimodal_interrupts) { 1107 __hal_device_bimodal_configure(hldev); 1108 return __hal_device_tti_configure(hldev, 1); 1109 } 1110 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0; 1111 } else 1112 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 1113 1114 for (i=0; i<XGE_HAL_MAX_RING_NUM; i++) { 1115 xge_hal_rti_config_t *rti = &hldev->config.ring.queue[i].rti; 1116 1117 if (!hldev->config.ring.queue[i].configured) 1118 continue; 1119 1120 if (rti->timer_val_us) { 1121 unsigned int rx_interval; 1122 1123 if (hldev->config.pci_freq_mherz) { 1124 rx_interval = hldev->config.pci_freq_mherz * 1125 rti->timer_val_us / 8; 1126 rx_interval = 1127 __hal_fix_time_ival_herc(hldev, 1128 rx_interval); 1129 } else { 1130 rx_interval = rti->timer_val_us; 1131 } 1132 data1 |=XGE_HAL_RTI_DATA1_MEM_RX_TIMER_VAL(rx_interval); 1133 if (rti->timer_ac_en) { 1134 data1 |= XGE_HAL_RTI_DATA1_MEM_RX_TIMER_AC_EN; 1135 } 1136 data1 |= XGE_HAL_RTI_DATA1_MEM_RX_TIMER_CI_EN; 1137 } 1138 1139 if (rti->urange_a || 1140 rti->urange_b || 1141 rti->urange_c || 1142 rti->ufc_a || 1143 rti->ufc_b || 1144 rti->ufc_c || 1145 rti->ufc_d) { 1146 data1 |=XGE_HAL_RTI_DATA1_MEM_RX_URNG_A(rti->urange_a) | 1147 XGE_HAL_RTI_DATA1_MEM_RX_URNG_B(rti->urange_b) | 1148 XGE_HAL_RTI_DATA1_MEM_RX_URNG_C(rti->urange_c); 1149 1150 data2 |= XGE_HAL_RTI_DATA2_MEM_RX_UFC_A(rti->ufc_a) | 1151 XGE_HAL_RTI_DATA2_MEM_RX_UFC_B(rti->ufc_b) | 1152 XGE_HAL_RTI_DATA2_MEM_RX_UFC_C(rti->ufc_c) | 1153 XGE_HAL_RTI_DATA2_MEM_RX_UFC_D(rti->ufc_d); 1154 } 1155 1156 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 1157 data1, &bar0->rti_data1_mem); 1158 (void)xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 1159 &bar0->rti_data1_mem); 1160 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 1161 data2, &bar0->rti_data2_mem); 1162 (void)xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 1163 &bar0->rti_data2_mem); 1164 1165 xge_os_wmb(); 1166 1167 val64 = XGE_HAL_RTI_CMD_MEM_WE | 1168 XGE_HAL_RTI_CMD_MEM_STROBE_NEW_CMD; 1169 val64 |= XGE_HAL_RTI_CMD_MEM_OFFSET(i); 1170 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 1171 &bar0->rti_command_mem); 1172 1173 if (!runtime && __hal_device_register_poll(hldev, 1174 &bar0->rti_command_mem, 0, 1175 XGE_HAL_RTI_CMD_MEM_STROBE_NEW_CMD, 1176 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { 1177 /* upper layer may require to repeat */ 1178 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; 1179 } 1180 1181 if (!runtime) { 1182 xge_debug_device(XGE_TRACE, 1183 "RTI[%d] configured: rti_data1_mem 0x"XGE_OS_LLXFMT, 1184 i, 1185 (unsigned long long)xge_os_pio_mem_read64(hldev->pdev, 1186 hldev->regh0, &bar0->rti_data1_mem)); 1187 } 1188 } 1189 1190 return XGE_HAL_OK; 1191 } 1192 1193 1194 /* Constants to be programmed into the Xena's registers to configure 1195 * the XAUI. */ 1196 static u64 default_xena_mdio_cfg[] = { 1197 /* Reset PMA PLL */ 1198 0xC001010000000000ULL, 0xC0010100000000E0ULL, 1199 0xC0010100008000E4ULL, 1200 /* Remove Reset from PMA PLL */ 1201 0xC001010000000000ULL, 0xC0010100000000E0ULL, 1202 0xC0010100000000E4ULL, 1203 END_SIGN 1204 }; 1205 1206 static u64 default_herc_mdio_cfg[] = { 1207 END_SIGN 1208 }; 1209 1210 static u64 default_xena_dtx_cfg[] = { 1211 0x8000051500000000ULL, 0x80000515000000E0ULL, 1212 0x80000515D93500E4ULL, 0x8001051500000000ULL, 1213 0x80010515000000E0ULL, 0x80010515001E00E4ULL, 1214 0x8002051500000000ULL, 0x80020515000000E0ULL, 1215 0x80020515F21000E4ULL, 1216 /* Set PADLOOPBACKN */ 1217 0x8002051500000000ULL, 0x80020515000000E0ULL, 1218 0x80020515B20000E4ULL, 0x8003051500000000ULL, 1219 0x80030515000000E0ULL, 0x80030515B20000E4ULL, 1220 0x8004051500000000ULL, 0x80040515000000E0ULL, 1221 0x80040515B20000E4ULL, 0x8005051500000000ULL, 1222 0x80050515000000E0ULL, 0x80050515B20000E4ULL, 1223 SWITCH_SIGN, 1224 /* Remove PADLOOPBACKN */ 1225 0x8002051500000000ULL, 0x80020515000000E0ULL, 1226 0x80020515F20000E4ULL, 0x8003051500000000ULL, 1227 0x80030515000000E0ULL, 0x80030515F20000E4ULL, 1228 0x8004051500000000ULL, 0x80040515000000E0ULL, 1229 0x80040515F20000E4ULL, 0x8005051500000000ULL, 1230 0x80050515000000E0ULL, 0x80050515F20000E4ULL, 1231 END_SIGN 1232 }; 1233 1234 /* 1235 static u64 default_herc_dtx_cfg[] = { 1236 0x80000515BA750000ULL, 0x80000515BA7500E0ULL, 1237 0x80000515BA750004ULL, 0x80000515BA7500E4ULL, 1238 0x80010515003F0000ULL, 0x80010515003F00E0ULL, 1239 0x80010515003F0004ULL, 0x80010515003F00E4ULL, 1240 0x80020515F2100000ULL, 0x80020515F21000E0ULL, 1241 0x80020515F2100004ULL, 0x80020515F21000E4ULL, 1242 END_SIGN 1243 }; 1244 */ 1245 1246 static u64 default_herc_dtx_cfg[] = { 1247 0x8000051536750000ULL, 0x80000515367500E0ULL, 1248 0x8000051536750004ULL, 0x80000515367500E4ULL, 1249 1250 0x80010515003F0000ULL, 0x80010515003F00E0ULL, 1251 0x80010515003F0004ULL, 0x80010515003F00E4ULL, 1252 1253 0x801205150D440000ULL, 0x801205150D4400E0ULL, 1254 0x801205150D440004ULL, 0x801205150D4400E4ULL, 1255 1256 0x80020515F2100000ULL, 0x80020515F21000E0ULL, 1257 0x80020515F2100004ULL, 0x80020515F21000E4ULL, 1258 END_SIGN 1259 }; 1260 1261 1262 void 1263 __hal_serial_mem_write64(xge_hal_device_t *hldev, u64 value, u64 *reg) 1264 { 1265 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, 1266 (u32)(value>>32), reg); 1267 xge_os_wmb(); 1268 __hal_pio_mem_write32_lower(hldev->pdev, hldev->regh0, 1269 (u32)value, reg); 1270 xge_os_wmb(); 1271 xge_os_mdelay(1); 1272 } 1273 1274 u64 1275 __hal_serial_mem_read64(xge_hal_device_t *hldev, u64 *reg) 1276 { 1277 u64 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 1278 reg); 1279 xge_os_mdelay(1); 1280 return val64; 1281 } 1282 1283 /* 1284 * __hal_device_xaui_configure 1285 * @hldev: HAL device handle. 1286 * 1287 * Configure XAUI Interface of Xena. 1288 * 1289 * To Configure the Xena's XAUI, one has to write a series 1290 * of 64 bit values into two registers in a particular 1291 * sequence. Hence a macro 'SWITCH_SIGN' has been defined 1292 * which will be defined in the array of configuration values 1293 * (default_dtx_cfg & default_mdio_cfg) at appropriate places 1294 * to switch writing from one regsiter to another. We continue 1295 * writing these values until we encounter the 'END_SIGN' macro. 1296 * For example, After making a series of 21 writes into 1297 * dtx_control register the 'SWITCH_SIGN' appears and hence we 1298 * start writing into mdio_control until we encounter END_SIGN. 1299 */ 1300 static void 1301 __hal_device_xaui_configure(xge_hal_device_t *hldev) 1302 { 1303 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 1304 int mdio_cnt = 0, dtx_cnt = 0; 1305 u64 *default_dtx_cfg = NULL, *default_mdio_cfg = NULL; 1306 1307 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) { 1308 default_dtx_cfg = default_xena_dtx_cfg; 1309 default_mdio_cfg = default_xena_mdio_cfg; 1310 } else if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { 1311 default_dtx_cfg = default_herc_dtx_cfg; 1312 default_mdio_cfg = default_herc_mdio_cfg; 1313 } else { 1314 xge_assert(default_dtx_cfg); 1315 return; 1316 } 1317 1318 do { 1319 dtx_cfg: 1320 while (default_dtx_cfg[dtx_cnt] != END_SIGN) { 1321 if (default_dtx_cfg[dtx_cnt] == SWITCH_SIGN) { 1322 dtx_cnt++; 1323 goto mdio_cfg; 1324 } 1325 __hal_serial_mem_write64(hldev, default_dtx_cfg[dtx_cnt], 1326 &bar0->dtx_control); 1327 dtx_cnt++; 1328 } 1329 mdio_cfg: 1330 while (default_mdio_cfg[mdio_cnt] != END_SIGN) { 1331 if (default_mdio_cfg[mdio_cnt] == SWITCH_SIGN) { 1332 mdio_cnt++; 1333 goto dtx_cfg; 1334 } 1335 __hal_serial_mem_write64(hldev, default_mdio_cfg[mdio_cnt], 1336 &bar0->mdio_control); 1337 mdio_cnt++; 1338 } 1339 } while ( !((default_dtx_cfg[dtx_cnt] == END_SIGN) && 1340 (default_mdio_cfg[mdio_cnt] == END_SIGN)) ); 1341 1342 xge_debug_device(XGE_TRACE, "%s", "XAUI interface configured"); 1343 } 1344 1345 /* 1346 * __hal_device_mac_link_util_set 1347 * @hldev: HAL device handle. 1348 * 1349 * Set sampling rate to calculate link utilization. 1350 */ 1351 static void 1352 __hal_device_mac_link_util_set(xge_hal_device_t *hldev) 1353 { 1354 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 1355 u64 val64; 1356 1357 val64 = XGE_HAL_MAC_TX_LINK_UTIL_VAL( 1358 hldev->config.mac.tmac_util_period) | 1359 XGE_HAL_MAC_RX_LINK_UTIL_VAL( 1360 hldev->config.mac.rmac_util_period); 1361 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 1362 &bar0->mac_link_util); 1363 xge_debug_device(XGE_TRACE, "%s", 1364 "bandwidth link utilization configured"); 1365 } 1366 1367 /* 1368 * __hal_device_set_swapper 1369 * @hldev: HAL device handle. 1370 * 1371 * Set the Xframe's byte "swapper" in accordance with 1372 * endianness of the host. 1373 */ 1374 xge_hal_status_e 1375 __hal_device_set_swapper(xge_hal_device_t *hldev) 1376 { 1377 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 1378 u64 val64; 1379 1380 /* 1381 * from 32bit errarta: 1382 * 1383 * The SWAPPER_CONTROL register determines how the adapter accesses 1384 * host memory as well as how it responds to read and write requests 1385 * from the host system. Writes to this register should be performed 1386 * carefully, since the byte swappers could reverse the order of bytes. 1387 * When configuring this register keep in mind that writes to the PIF 1388 * read and write swappers could reverse the order of the upper and 1389 * lower 32-bit words. This means that the driver may have to write 1390 * to the upper 32 bits of the SWAPPER_CONTROL twice in order to 1391 * configure the entire register. */ 1392 1393 /* 1394 * The device by default set to a big endian format, so a big endian 1395 * driver need not set anything. 1396 */ 1397 1398 #if defined(XGE_HAL_CUSTOM_HW_SWAPPER) 1399 1400 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 1401 0xffffffffffffffffULL, &bar0->swapper_ctrl); 1402 1403 val64 = XGE_HAL_CUSTOM_HW_SWAPPER; 1404 1405 xge_os_wmb(); 1406 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 1407 &bar0->swapper_ctrl); 1408 1409 xge_debug_device(XGE_TRACE, "using custom HW swapper 0x"XGE_OS_LLXFMT, 1410 (unsigned long long)val64); 1411 1412 #elif !defined(XGE_OS_HOST_BIG_ENDIAN) 1413 1414 /* 1415 * Initially we enable all bits to make it accessible by the driver, 1416 * then we selectively enable only those bits that we want to set. 1417 * i.e. force swapper to swap for the first time since second write 1418 * will overwrite with the final settings. 1419 * 1420 * Use only for little endian platforms. 1421 */ 1422 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 1423 0xffffffffffffffffULL, &bar0->swapper_ctrl); 1424 xge_os_wmb(); 1425 val64 = (XGE_HAL_SWAPPER_CTRL_PIF_R_FE | 1426 XGE_HAL_SWAPPER_CTRL_PIF_R_SE | 1427 XGE_HAL_SWAPPER_CTRL_PIF_W_FE | 1428 XGE_HAL_SWAPPER_CTRL_PIF_W_SE | 1429 XGE_HAL_SWAPPER_CTRL_RTH_FE | 1430 XGE_HAL_SWAPPER_CTRL_RTH_SE | 1431 XGE_HAL_SWAPPER_CTRL_TXP_FE | 1432 XGE_HAL_SWAPPER_CTRL_TXP_SE | 1433 XGE_HAL_SWAPPER_CTRL_TXD_R_FE | 1434 XGE_HAL_SWAPPER_CTRL_TXD_R_SE | 1435 XGE_HAL_SWAPPER_CTRL_TXD_W_FE | 1436 XGE_HAL_SWAPPER_CTRL_TXD_W_SE | 1437 XGE_HAL_SWAPPER_CTRL_TXF_R_FE | 1438 XGE_HAL_SWAPPER_CTRL_RXD_R_FE | 1439 XGE_HAL_SWAPPER_CTRL_RXD_R_SE | 1440 XGE_HAL_SWAPPER_CTRL_RXD_W_FE | 1441 XGE_HAL_SWAPPER_CTRL_RXD_W_SE | 1442 XGE_HAL_SWAPPER_CTRL_RXF_W_FE | 1443 XGE_HAL_SWAPPER_CTRL_XMSI_FE | 1444 XGE_HAL_SWAPPER_CTRL_STATS_FE | XGE_HAL_SWAPPER_CTRL_STATS_SE); 1445 1446 /* 1447 if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX) { 1448 val64 |= XGE_HAL_SWAPPER_CTRL_XMSI_SE; 1449 } */ 1450 __hal_pio_mem_write32_lower(hldev->pdev, hldev->regh0, (u32)val64, 1451 &bar0->swapper_ctrl); 1452 xge_os_wmb(); 1453 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, (u32)(val64>>32), 1454 &bar0->swapper_ctrl); 1455 xge_os_wmb(); 1456 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, (u32)(val64>>32), 1457 &bar0->swapper_ctrl); 1458 xge_debug_device(XGE_TRACE, "%s", "using little endian set"); 1459 #endif 1460 1461 /* Verifying if endian settings are accurate by reading a feedback 1462 * register. */ 1463 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 1464 &bar0->pif_rd_swapper_fb); 1465 if (val64 != XGE_HAL_IF_RD_SWAPPER_FB) { 1466 xge_debug_device(XGE_ERR, "pif_rd_swapper_fb read "XGE_OS_LLXFMT, 1467 (unsigned long long) val64); 1468 return XGE_HAL_ERR_SWAPPER_CTRL; 1469 } 1470 1471 xge_debug_device(XGE_TRACE, "%s", "be/le swapper enabled"); 1472 1473 return XGE_HAL_OK; 1474 } 1475 1476 /* 1477 * __hal_device_rts_mac_configure - Configure RTS steering based on 1478 * destination mac address. 1479 * @hldev: HAL device handle. 1480 * 1481 */ 1482 xge_hal_status_e 1483 __hal_device_rts_mac_configure(xge_hal_device_t *hldev) 1484 { 1485 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 1486 u64 val64; 1487 1488 if (!hldev->config.rts_mac_en) { 1489 return XGE_HAL_OK; 1490 } 1491 1492 /* 1493 * Set the receive traffic steering mode from default(classic) 1494 * to enhanced. 1495 */ 1496 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 1497 &bar0->rts_ctrl); 1498 val64 |= XGE_HAL_RTS_CTRL_ENHANCED_MODE; 1499 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 1500 val64, &bar0->rts_ctrl); 1501 return XGE_HAL_OK; 1502 } 1503 1504 /* 1505 * __hal_device_rts_port_configure - Configure RTS steering based on 1506 * destination or source port number. 1507 * @hldev: HAL device handle. 1508 * 1509 */ 1510 xge_hal_status_e 1511 __hal_device_rts_port_configure(xge_hal_device_t *hldev) 1512 { 1513 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 1514 u64 val64; 1515 int rnum; 1516 1517 if (!hldev->config.rts_port_en) { 1518 return XGE_HAL_OK; 1519 } 1520 1521 /* 1522 * Set the receive traffic steering mode from default(classic) 1523 * to enhanced. 1524 */ 1525 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 1526 &bar0->rts_ctrl); 1527 val64 |= XGE_HAL_RTS_CTRL_ENHANCED_MODE; 1528 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 1529 val64, &bar0->rts_ctrl); 1530 1531 /* 1532 * Initiate port steering according to per-ring configuration 1533 */ 1534 for (rnum = 0; rnum < XGE_HAL_MAX_RING_NUM; rnum++) { 1535 int pnum; 1536 xge_hal_ring_queue_t *queue = &hldev->config.ring.queue[rnum]; 1537 1538 if (!queue->configured || queue->rts_port_en) 1539 continue; 1540 1541 for (pnum = 0; pnum < XGE_HAL_MAX_STEERABLE_PORTS; pnum++) { 1542 xge_hal_rts_port_t *port = &queue->rts_ports[pnum]; 1543 1544 /* 1545 * Skip and clear empty ports 1546 */ 1547 if (!port->num) { 1548 /* 1549 * Clear CAM memory 1550 */ 1551 xge_os_pio_mem_write64(hldev->pdev, 1552 hldev->regh0, 0ULL, 1553 &bar0->rts_pn_cam_data); 1554 1555 val64 = BIT(7) | BIT(15); 1556 } else { 1557 /* 1558 * Assign new Port values according 1559 * to configuration 1560 */ 1561 val64 = vBIT(port->num,8,16) | 1562 vBIT(rnum,37,3) | BIT(63); 1563 if (port->src) 1564 val64 = BIT(47); 1565 if (!port->udp) 1566 val64 = BIT(7); 1567 xge_os_pio_mem_write64(hldev->pdev, 1568 hldev->regh0, val64, 1569 &bar0->rts_pn_cam_data); 1570 1571 val64 = BIT(7) | BIT(15) | vBIT(pnum,24,8); 1572 } 1573 1574 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 1575 val64, &bar0->rts_pn_cam_ctrl); 1576 1577 /* poll until done */ 1578 if (__hal_device_register_poll(hldev, 1579 &bar0->rts_pn_cam_ctrl, 0, 1580 XGE_HAL_RTS_PN_CAM_CTRL_STROBE_BEING_EXECUTED, 1581 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != 1582 XGE_HAL_OK) { 1583 /* upper layer may require to repeat */ 1584 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; 1585 } 1586 } 1587 } 1588 return XGE_HAL_OK; 1589 } 1590 1591 /* 1592 * __hal_device_rts_qos_configure - Configure RTS steering based on 1593 * qos. 1594 * @hldev: HAL device handle. 1595 * 1596 */ 1597 xge_hal_status_e 1598 __hal_device_rts_qos_configure(xge_hal_device_t *hldev) 1599 { 1600 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 1601 u64 val64; 1602 int j, rx_ring_num; 1603 1604 if (!hldev->config.rts_qos_en) { 1605 return XGE_HAL_OK; 1606 } 1607 1608 /* First clear the RTS_DS_MEM_DATA */ 1609 val64 = 0; 1610 for (j = 0; j < 64; j++ ) 1611 { 1612 /* First clear the value */ 1613 val64 = XGE_HAL_RTS_DS_MEM_DATA(0); 1614 1615 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 1616 &bar0->rts_ds_mem_data); 1617 1618 val64 = XGE_HAL_RTS_DS_MEM_CTRL_WE | 1619 XGE_HAL_RTS_DS_MEM_CTRL_STROBE_NEW_CMD | 1620 XGE_HAL_RTS_DS_MEM_CTRL_OFFSET ( j ); 1621 1622 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 1623 &bar0->rts_ds_mem_ctrl); 1624 1625 1626 /* poll until done */ 1627 if (__hal_device_register_poll(hldev, 1628 &bar0->rts_ds_mem_ctrl, 0, 1629 XGE_HAL_RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED, 1630 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { 1631 /* upper layer may require to repeat */ 1632 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; 1633 } 1634 1635 } 1636 1637 rx_ring_num = 0; 1638 for (j = 0; j < XGE_HAL_MAX_RING_NUM; j++) { 1639 if (hldev->config.ring.queue[j].configured) 1640 rx_ring_num++; 1641 } 1642 1643 switch (rx_ring_num) { 1644 case 1: 1645 val64 = 0x0; 1646 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0); 1647 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1); 1648 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2); 1649 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3); 1650 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4); 1651 break; 1652 case 2: 1653 val64 = 0x0001000100010001ULL; 1654 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0); 1655 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1); 1656 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2); 1657 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3); 1658 val64 = 0x0001000100000000ULL; 1659 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4); 1660 break; 1661 case 3: 1662 val64 = 0x0001020001020001ULL; 1663 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0); 1664 val64 = 0x0200010200010200ULL; 1665 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1); 1666 val64 = 0x0102000102000102ULL; 1667 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2); 1668 val64 = 0x0001020001020001ULL; 1669 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3); 1670 val64 = 0x0200010200000000ULL; 1671 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4); 1672 break; 1673 case 4: 1674 val64 = 0x0001020300010203ULL; 1675 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0); 1676 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1); 1677 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2); 1678 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3); 1679 val64 = 0x0001020300000000ULL; 1680 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4); 1681 break; 1682 case 5: 1683 val64 = 0x0001020304000102ULL; 1684 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0); 1685 val64 = 0x0304000102030400ULL; 1686 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1); 1687 val64 = 0x0102030400010203ULL; 1688 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2); 1689 val64 = 0x0400010203040001ULL; 1690 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3); 1691 val64 = 0x0203040000000000ULL; 1692 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4); 1693 break; 1694 case 6: 1695 val64 = 0x0001020304050001ULL; 1696 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0); 1697 val64 = 0x0203040500010203ULL; 1698 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1); 1699 val64 = 0x0405000102030405ULL; 1700 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2); 1701 val64 = 0x0001020304050001ULL; 1702 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3); 1703 val64 = 0x0203040500000000ULL; 1704 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4); 1705 break; 1706 case 7: 1707 val64 = 0x0001020304050600ULL; 1708 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0); 1709 val64 = 0x0102030405060001ULL; 1710 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1); 1711 val64 = 0x0203040506000102ULL; 1712 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2); 1713 val64 = 0x0304050600010203ULL; 1714 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3); 1715 val64 = 0x0405060000000000ULL; 1716 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4); 1717 break; 1718 case 8: 1719 val64 = 0x0001020304050607ULL; 1720 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0); 1721 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1); 1722 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2); 1723 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3); 1724 val64 = 0x0001020300000000ULL; 1725 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4); 1726 break; 1727 } 1728 1729 return XGE_HAL_OK; 1730 } 1731 1732 /* 1733 * xge__hal_device_rts_mac_enable 1734 * 1735 * @devh: HAL device handle. 1736 * @index: index number where the MAC addr will be stored 1737 * @macaddr: MAC address 1738 * 1739 * - Enable RTS steering for the given MAC address. This function has to be 1740 * called with lock acquired. 1741 * 1742 * NOTE: 1743 * 1. ULD has to call this function with the index value which 1744 * statisfies the following condition: 1745 * ring_num = (index % 8) 1746 * 2.ULD also needs to make sure that the index is not 1747 * occupied by any MAC address. If that index has any MAC address 1748 * it will be overwritten and HAL will not check for it. 1749 * 1750 */ 1751 xge_hal_status_e 1752 xge_hal_device_rts_mac_enable(xge_hal_device_h devh, int index, macaddr_t macaddr) 1753 { 1754 int max_addr = XGE_HAL_MAX_MAC_ADDRESSES; 1755 xge_hal_status_e status; 1756 1757 xge_hal_device_t *hldev = (xge_hal_device_t *)devh; 1758 1759 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) 1760 max_addr = XGE_HAL_MAX_MAC_ADDRESSES_HERC; 1761 1762 if ( index >= max_addr ) 1763 return XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES; 1764 1765 /* 1766 * Set the MAC address at the given location marked by index. 1767 */ 1768 status = xge_hal_device_macaddr_set(hldev, index, macaddr); 1769 if (status != XGE_HAL_OK) { 1770 xge_debug_device(XGE_ERR, "%s", 1771 "Not able to set the mac addr"); 1772 return status; 1773 } 1774 1775 return xge_hal_device_rts_section_enable(hldev, index); 1776 } 1777 1778 /* 1779 * xge__hal_device_rts_mac_disable 1780 * @hldev: HAL device handle. 1781 * @index: index number where to disable the MAC addr 1782 * 1783 * Disable RTS Steering based on the MAC address. 1784 * This function should be called with lock acquired. 1785 * 1786 */ 1787 xge_hal_status_e 1788 xge_hal_device_rts_mac_disable(xge_hal_device_h devh, int index) 1789 { 1790 xge_hal_status_e status; 1791 u8 macaddr[6] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}; 1792 int max_addr = XGE_HAL_MAX_MAC_ADDRESSES; 1793 1794 xge_hal_device_t *hldev = (xge_hal_device_t *)devh; 1795 1796 xge_debug_ll(XGE_TRACE, "the index value is %d ", index); 1797 1798 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) 1799 max_addr = XGE_HAL_MAX_MAC_ADDRESSES_HERC; 1800 1801 if ( index >= max_addr ) 1802 return XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES; 1803 1804 /* 1805 * Disable MAC address @ given index location 1806 */ 1807 status = xge_hal_device_macaddr_set(hldev, index, macaddr); 1808 if (status != XGE_HAL_OK) { 1809 xge_debug_device(XGE_ERR, "%s", 1810 "Not able to set the mac addr"); 1811 return status; 1812 } 1813 1814 return XGE_HAL_OK; 1815 } 1816 1817 1818 /* 1819 * __hal_device_rth_configure - Configure RTH for the device 1820 * @hldev: HAL device handle. 1821 * 1822 * Using IT (Indirection Table). 1823 */ 1824 xge_hal_status_e 1825 __hal_device_rth_it_configure(xge_hal_device_t *hldev) 1826 { 1827 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 1828 u64 val64; 1829 int rings[XGE_HAL_MAX_RING_NUM]={0}; 1830 int rnum; 1831 int rmax; 1832 int buckets_num; 1833 int bucket; 1834 1835 if (!hldev->config.rth_en) { 1836 return XGE_HAL_OK; 1837 } 1838 1839 /* 1840 * Set the receive traffic steering mode from default(classic) 1841 * to enhanced. 1842 */ 1843 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 1844 &bar0->rts_ctrl); 1845 val64 |= XGE_HAL_RTS_CTRL_ENHANCED_MODE; 1846 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 1847 val64, &bar0->rts_ctrl); 1848 1849 buckets_num = (1 << hldev->config.rth_bucket_size); 1850 1851 rmax=0; 1852 for (rnum = 0; rnum < XGE_HAL_MAX_RING_NUM; rnum++) { 1853 if (hldev->config.ring.queue[rnum].configured && 1854 hldev->config.ring.queue[rnum].rth_en) 1855 rings[rmax++] = rnum; 1856 } 1857 1858 rnum = 0; 1859 /* for starters: fill in all the buckets with rings "equally" */ 1860 for (bucket = 0; bucket < buckets_num; bucket++) { 1861 1862 if (rnum == rmax) 1863 rnum = 0; 1864 1865 /* write data */ 1866 val64 = XGE_HAL_RTS_RTH_MAP_MEM_DATA_ENTRY_EN | 1867 XGE_HAL_RTS_RTH_MAP_MEM_DATA(rings[rnum]); 1868 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 1869 &bar0->rts_rth_map_mem_data); 1870 1871 /* execute */ 1872 val64 = XGE_HAL_RTS_RTH_MAP_MEM_CTRL_WE | 1873 XGE_HAL_RTS_RTH_MAP_MEM_CTRL_STROBE | 1874 XGE_HAL_RTS_RTH_MAP_MEM_CTRL_OFFSET(bucket); 1875 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 1876 &bar0->rts_rth_map_mem_ctrl); 1877 1878 /* poll until done */ 1879 if (__hal_device_register_poll(hldev, 1880 &bar0->rts_rth_map_mem_ctrl, 0, 1881 XGE_HAL_RTS_RTH_MAP_MEM_CTRL_STROBE, 1882 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { 1883 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; 1884 } 1885 1886 rnum++; 1887 } 1888 1889 val64 = XGE_HAL_RTS_RTH_EN; 1890 val64 |= XGE_HAL_RTS_RTH_BUCKET_SIZE(hldev->config.rth_bucket_size); 1891 val64 |= XGE_HAL_RTS_RTH_TCP_IPV4_EN | XGE_HAL_RTS_RTH_UDP_IPV4_EN | XGE_HAL_RTS_RTH_IPV4_EN | 1892 XGE_HAL_RTS_RTH_TCP_IPV6_EN |XGE_HAL_RTS_RTH_UDP_IPV6_EN | XGE_HAL_RTS_RTH_IPV6_EN | 1893 XGE_HAL_RTS_RTH_TCP_IPV6_EX_EN | XGE_HAL_RTS_RTH_UDP_IPV6_EX_EN | XGE_HAL_RTS_RTH_IPV6_EX_EN; 1894 1895 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 1896 &bar0->rts_rth_cfg); 1897 1898 xge_debug_device(XGE_TRACE, "RTH configured, bucket_size %d", 1899 hldev->config.rth_bucket_size); 1900 1901 return XGE_HAL_OK; 1902 } 1903 1904 1905 /* 1906 * __hal_spdm_entry_add - Add a new entry to the SPDM table. 1907 * 1908 * Add a new entry to the SPDM table 1909 * 1910 * This function add a new entry to the SPDM table. 1911 * 1912 * Note: 1913 * This function should be called with spdm_lock. 1914 * 1915 * See also: xge_hal_spdm_entry_add , xge_hal_spdm_entry_remove. 1916 */ 1917 static xge_hal_status_e 1918 __hal_spdm_entry_add(xge_hal_device_t *hldev, xge_hal_ipaddr_t *src_ip, 1919 xge_hal_ipaddr_t *dst_ip, u16 l4_sp, u16 l4_dp, u8 is_tcp, 1920 u8 is_ipv4, u8 tgt_queue, u32 jhash_value, u16 spdm_entry) 1921 { 1922 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 1923 u64 val64; 1924 u64 spdm_line_arr[8]; 1925 u8 line_no; 1926 1927 /* 1928 * Clear the SPDM READY bit 1929 */ 1930 val64 = XGE_HAL_RX_PIC_INT_REG_SPDM_READY; 1931 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 1932 &bar0->rxpic_int_reg); 1933 1934 xge_debug_device(XGE_TRACE, 1935 "L4 SP %x:DP %x: hash %x tgt_queue %d ", 1936 l4_sp, l4_dp, jhash_value, tgt_queue); 1937 1938 xge_os_memzero(&spdm_line_arr, sizeof(spdm_line_arr)); 1939 1940 /* 1941 * Construct the SPDM entry. 1942 */ 1943 spdm_line_arr[0] = vBIT(l4_sp,0,16) | 1944 vBIT(l4_dp,16,32) | 1945 vBIT(tgt_queue,53,3) | 1946 vBIT(is_tcp,59,1) | 1947 vBIT(is_ipv4,63,1); 1948 1949 1950 if (is_ipv4) { 1951 spdm_line_arr[1] = vBIT(src_ip->ipv4.addr,0,32) | 1952 vBIT(dst_ip->ipv4.addr,32,32); 1953 1954 } else { 1955 xge_os_memcpy(&spdm_line_arr[1], &src_ip->ipv6.addr[0], 8); 1956 xge_os_memcpy(&spdm_line_arr[2], &src_ip->ipv6.addr[1], 8); 1957 xge_os_memcpy(&spdm_line_arr[3], &dst_ip->ipv6.addr[0], 8); 1958 xge_os_memcpy(&spdm_line_arr[4], &dst_ip->ipv6.addr[1], 8); 1959 } 1960 1961 spdm_line_arr[7] = vBIT(jhash_value,0,32) | 1962 BIT(63); /* entry enable bit */ 1963 1964 /* 1965 * Add the entry to the SPDM table 1966 */ 1967 for(line_no = 0; line_no < 8; line_no++) { 1968 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 1969 spdm_line_arr[line_no], 1970 (void *)((char *)hldev->spdm_mem_base + 1971 (spdm_entry * 64) + 1972 (line_no * 8))); 1973 } 1974 1975 /* 1976 * Wait for the operation to be completed. 1977 */ 1978 if (__hal_device_register_poll(hldev, &bar0->rxpic_int_reg, 1, 1979 XGE_HAL_RX_PIC_INT_REG_SPDM_READY, 1980 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { 1981 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; 1982 } 1983 1984 /* 1985 * Add this information to a local SPDM table. The purpose of 1986 * maintaining a local SPDM table is to avoid a search in the 1987 * adapter SPDM table for spdm entry lookup which is very costly 1988 * in terms of time. 1989 */ 1990 hldev->spdm_table[spdm_entry]->in_use = 1; 1991 xge_os_memcpy(&hldev->spdm_table[spdm_entry]->src_ip, src_ip, 1992 sizeof(xge_hal_ipaddr_t)); 1993 xge_os_memcpy(&hldev->spdm_table[spdm_entry]->dst_ip, dst_ip, 1994 sizeof(xge_hal_ipaddr_t)); 1995 hldev->spdm_table[spdm_entry]->l4_sp = l4_sp; 1996 hldev->spdm_table[spdm_entry]->l4_dp = l4_dp; 1997 hldev->spdm_table[spdm_entry]->is_tcp = is_tcp; 1998 hldev->spdm_table[spdm_entry]->is_ipv4 = is_ipv4; 1999 hldev->spdm_table[spdm_entry]->tgt_queue = tgt_queue; 2000 hldev->spdm_table[spdm_entry]->jhash_value = jhash_value; 2001 hldev->spdm_table[spdm_entry]->spdm_entry = spdm_entry; 2002 2003 return XGE_HAL_OK; 2004 } 2005 2006 /* 2007 * __hal_device_rth_spdm_configure - Configure RTH for the device 2008 * @hldev: HAL device handle. 2009 * 2010 * Using SPDM (Socket-Pair Direct Match). 2011 */ 2012 xge_hal_status_e 2013 __hal_device_rth_spdm_configure(xge_hal_device_t *hldev) 2014 { 2015 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0; 2016 u64 val64; 2017 u8 spdm_bar_num; 2018 u32 spdm_bar_offset; 2019 int spdm_table_size; 2020 int i; 2021 2022 if (!hldev->config.rth_spdm_en) { 2023 return XGE_HAL_OK; 2024 } 2025 2026 /* 2027 * Retrieve the base address of SPDM Table. 2028 */ 2029 val64 = xge_os_pio_mem_read64(hldev->pdev, 2030 hldev->regh0, &bar0->spdm_bir_offset); 2031 2032 spdm_bar_num = XGE_HAL_SPDM_PCI_BAR_NUM(val64); 2033 spdm_bar_offset = XGE_HAL_SPDM_PCI_BAR_OFFSET(val64); 2034 2035 2036 /* 2037 * spdm_bar_num specifies the PCI bar num register used to 2038 * address the memory space. spdm_bar_offset specifies the offset 2039 * of the SPDM memory with in the bar num memory space. 2040 */ 2041 switch (spdm_bar_num) { 2042 case 0: 2043 { 2044 hldev->spdm_mem_base = (char *)bar0 + 2045 (spdm_bar_offset * 8); 2046 break; 2047 } 2048 case 1: 2049 { 2050 char *bar1 = (char *)hldev->bar1; 2051 hldev->spdm_mem_base = bar1 + (spdm_bar_offset * 8); 2052 break; 2053 } 2054 default: 2055 xge_assert(((spdm_bar_num != 0) && (spdm_bar_num != 1))); 2056 } 2057 2058 /* 2059 * Retrieve the size of SPDM table(number of entries). 2060 */ 2061 val64 = xge_os_pio_mem_read64(hldev->pdev, 2062 hldev->regh0, &bar0->spdm_structure); 2063 hldev->spdm_max_entries = XGE_HAL_SPDM_MAX_ENTRIES(val64); 2064 2065 2066 spdm_table_size = hldev->spdm_max_entries * 2067 sizeof(xge_hal_spdm_entry_t); 2068 if (hldev->spdm_table == NULL) { 2069 void *mem; 2070 2071 /* 2072 * Allocate memory to hold the copy of SPDM table. 2073 */ 2074 if ((hldev->spdm_table = (xge_hal_spdm_entry_t **) 2075 xge_os_malloc( 2076 hldev->pdev, 2077 (sizeof(xge_hal_spdm_entry_t *) * 2078 hldev->spdm_max_entries))) == NULL) { 2079 return XGE_HAL_ERR_OUT_OF_MEMORY; 2080 } 2081 2082 if ((mem = xge_os_malloc(hldev->pdev, spdm_table_size)) == NULL) 2083 { 2084 xge_os_free(hldev->pdev, hldev->spdm_table, 2085 (sizeof(xge_hal_spdm_entry_t *) * 2086 hldev->spdm_max_entries)); 2087 return XGE_HAL_ERR_OUT_OF_MEMORY; 2088 } 2089 2090 xge_os_memzero(mem, spdm_table_size); 2091 for (i = 0; i < hldev->spdm_max_entries; i++) { 2092 hldev->spdm_table[i] = (xge_hal_spdm_entry_t *) 2093 ((char *)mem + 2094 i * sizeof(xge_hal_spdm_entry_t)); 2095 } 2096 xge_os_spin_lock_init(&hldev->spdm_lock, hldev->pdev); 2097 } else { 2098 /* 2099 * We are here because the host driver tries to 2100 * do a soft reset on the device. 2101 * Since the device soft reset clears the SPDM table, copy 2102 * the entries from the local SPDM table to the actual one. 2103 */ 2104 xge_os_spin_lock(&hldev->spdm_lock); 2105 for (i = 0; i < hldev->spdm_max_entries; i++) { 2106 xge_hal_spdm_entry_t *spdm_entry = hldev->spdm_table[i]; 2107 2108 if (spdm_entry->in_use) { 2109 if (__hal_spdm_entry_add(hldev, 2110 &spdm_entry->src_ip, 2111 &spdm_entry->dst_ip, 2112 spdm_entry->l4_sp, 2113 spdm_entry->l4_dp, 2114 spdm_entry->is_tcp, 2115 spdm_entry->is_ipv4, 2116 spdm_entry->tgt_queue, 2117 spdm_entry->jhash_value, 2118 spdm_entry->spdm_entry) 2119 != XGE_HAL_OK) { 2120 /* Log an warning */ 2121 xge_debug_device(XGE_ERR, 2122 "SPDM table update from local" 2123 " memory failed"); 2124 } 2125 } 2126 } 2127 xge_os_spin_unlock(&hldev->spdm_lock); 2128 } 2129 2130 /* 2131 * Set the receive traffic steering mode from default(classic) 2132 * to enhanced. 2133 */ 2134 val64 = xge_os_pio_mem_read64(hldev->pdev, 2135 hldev->regh0, &bar0->rts_ctrl); 2136 val64 |= XGE_HAL_RTS_CTRL_ENHANCED_MODE; 2137 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 2138 val64, &bar0->rts_ctrl); 2139 2140 /* 2141 * We may not need to configure rts_rth_jhash_cfg register as the 2142 * default values are good enough to calculate the hash. 2143 */ 2144 2145 /* 2146 * As of now, set all the rth mask registers to zero. TODO. 2147 */ 2148 for(i = 0; i < 5; i++) { 2149 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 2150 0, &bar0->rts_rth_hash_mask[i]); 2151 } 2152 2153 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 2154 0, &bar0->rts_rth_hash_mask_5); 2155 2156 if (hldev->config.rth_spdm_use_l4) { 2157 val64 = XGE_HAL_RTH_STATUS_SPDM_USE_L4; 2158 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 2159 val64, &bar0->rts_rth_status); 2160 } 2161 2162 val64 = XGE_HAL_RTS_RTH_EN; 2163 val64 |= XGE_HAL_RTS_RTH_IPV4_EN | XGE_HAL_RTS_RTH_TCP_IPV4_EN; 2164 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 2165 &bar0->rts_rth_cfg); 2166 2167 2168 return XGE_HAL_OK; 2169 } 2170 2171 /* 2172 * __hal_device_pci_init 2173 * @hldev: HAL device handle. 2174 * 2175 * Initialize certain PCI/PCI-X configuration registers 2176 * with recommended values. Save config space for future hw resets. 2177 */ 2178 static void 2179 __hal_device_pci_init(xge_hal_device_t *hldev) 2180 { 2181 int i, pcisize = 0; 2182 u16 cmd = 0; 2183 u8 val; 2184 2185 /* Store PCI device ID and revision for future references where in we 2186 * decide Xena revision using PCI sub system ID */ 2187 xge_os_pci_read16(hldev->pdev,hldev->cfgh, 2188 xge_offsetof(xge_hal_pci_config_le_t, device_id), 2189 &hldev->device_id); 2190 xge_os_pci_read8(hldev->pdev,hldev->cfgh, 2191 xge_offsetof(xge_hal_pci_config_le_t, revision), 2192 &hldev->revision); 2193 2194 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) 2195 pcisize = XGE_HAL_PCISIZE_HERC; 2196 else if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) 2197 pcisize = XGE_HAL_PCISIZE_XENA; 2198 2199 /* save original PCI config space to restore it on device_terminate() */ 2200 for (i = 0; i < pcisize; i++) { 2201 xge_os_pci_read32(hldev->pdev, hldev->cfgh, i*4, 2202 (u32*)&hldev->pci_config_space_bios + i); 2203 } 2204 2205 /* Set the PErr Repconse bit and SERR in PCI command register. */ 2206 xge_os_pci_read16(hldev->pdev, hldev->cfgh, 2207 xge_offsetof(xge_hal_pci_config_le_t, command), &cmd); 2208 cmd |= 0x140; 2209 xge_os_pci_write16(hldev->pdev, hldev->cfgh, 2210 xge_offsetof(xge_hal_pci_config_le_t, command), cmd); 2211 2212 /* Set user spcecified value for the PCI Latency Timer */ 2213 if (hldev->config.latency_timer && 2214 hldev->config.latency_timer != XGE_HAL_USE_BIOS_DEFAULT_LATENCY) { 2215 xge_os_pci_write8(hldev->pdev, hldev->cfgh, 2216 xge_offsetof(xge_hal_pci_config_le_t, 2217 latency_timer), 2218 (u8)hldev->config.latency_timer); 2219 } 2220 /* Read back latency timer to reflect it into user level */ 2221 xge_os_pci_read8(hldev->pdev, hldev->cfgh, 2222 xge_offsetof(xge_hal_pci_config_le_t, latency_timer), &val); 2223 hldev->config.latency_timer = val; 2224 2225 /* Enable Data Parity Error Recovery in PCI-X command register. */ 2226 xge_os_pci_read16(hldev->pdev, hldev->cfgh, 2227 xge_offsetof(xge_hal_pci_config_le_t, pcix_command), &cmd); 2228 cmd |= 1; 2229 xge_os_pci_write16(hldev->pdev, hldev->cfgh, 2230 xge_offsetof(xge_hal_pci_config_le_t, pcix_command), cmd); 2231 2232 /* Set MMRB count in PCI-X command register. */ 2233 if (hldev->config.mmrb_count != XGE_HAL_DEFAULT_BIOS_MMRB_COUNT) { 2234 cmd &= 0xFFF3; 2235 cmd |= hldev->config.mmrb_count << 2; 2236 xge_os_pci_write16(hldev->pdev, hldev->cfgh, 2237 xge_offsetof(xge_hal_pci_config_le_t, pcix_command), 2238 cmd); 2239 } 2240 /* Read back MMRB count to reflect it into user level */ 2241 xge_os_pci_read16(hldev->pdev, hldev->cfgh, 2242 xge_offsetof(xge_hal_pci_config_le_t, pcix_command), 2243 &cmd); 2244 cmd &= 0x000C; 2245 hldev->config.mmrb_count = cmd>>2; 2246 2247 /* Setting Maximum outstanding splits based on system type. */ 2248 if (hldev->config.max_splits_trans != XGE_HAL_USE_BIOS_DEFAULT_SPLITS) { 2249 xge_os_pci_read16(hldev->pdev, hldev->cfgh, 2250 xge_offsetof(xge_hal_pci_config_le_t, pcix_command), 2251 &cmd); 2252 cmd &= 0xFF8F; 2253 cmd |= hldev->config.max_splits_trans << 4; 2254 xge_os_pci_write16(hldev->pdev, hldev->cfgh, 2255 xge_offsetof(xge_hal_pci_config_le_t, pcix_command), 2256 cmd); 2257 } 2258 2259 /* Read back max split trans to reflect it into user level */ 2260 xge_os_pci_read16(hldev->pdev, hldev->cfgh, 2261 xge_offsetof(xge_hal_pci_config_le_t, pcix_command), &cmd); 2262 cmd &= 0x0070; 2263 hldev->config.max_splits_trans = cmd>>4; 2264 2265 /* Forcibly disabling relaxed ordering capability of the card. */ 2266 xge_os_pci_read16(hldev->pdev, hldev->cfgh, 2267 xge_offsetof(xge_hal_pci_config_le_t, pcix_command), &cmd); 2268 cmd &= 0xFFFD; 2269 xge_os_pci_write16(hldev->pdev, hldev->cfgh, 2270 xge_offsetof(xge_hal_pci_config_le_t, pcix_command), cmd); 2271 2272 /* save PCI config space for future resets */ 2273 for (i = 0; i < pcisize; i++) { 2274 xge_os_pci_read32(hldev->pdev, hldev->cfgh, i*4, 2275 (u32*)&hldev->pci_config_space + i); 2276 } 2277 } 2278 2279 /* 2280 * __hal_device_pci_info_get - Get PCI bus informations such as width, frequency 2281 * and mode. 2282 * @devh: HAL device handle. 2283 * @pci_mode: pointer to a variable of enumerated type 2284 * xge_hal_pci_mode_e{}. 2285 * @bus_frequency: pointer to a variable of enumerated type 2286 * xge_hal_pci_bus_frequency_e{}. 2287 * @bus_width: pointer to a variable of enumerated type 2288 * xge_hal_pci_bus_width_e{}. 2289 * 2290 * Get pci mode, frequency, and PCI bus width. 2291 * 2292 * Returns: one of the xge_hal_status_e{} enumerated types. 2293 * XGE_HAL_OK - for success. 2294 * XGE_HAL_ERR_INVALID_PCI_INFO - for invalid PCI information from the card. 2295 * XGE_HAL_ERR_BAD_DEVICE_ID - for invalid card. 2296 * 2297 * See Also: xge_hal_pci_mode_e, xge_hal_pci_mode_e, xge_hal_pci_width_e. 2298 */ 2299 static xge_hal_status_e 2300 __hal_device_pci_info_get(xge_hal_device_h devh, xge_hal_pci_mode_e *pci_mode, 2301 xge_hal_pci_bus_frequency_e *bus_frequency, 2302 xge_hal_pci_bus_width_e *bus_width) 2303 { 2304 xge_hal_device_t *hldev = (xge_hal_device_t *)devh; 2305 xge_hal_status_e rc_status = XGE_HAL_OK; 2306 xge_hal_card_e card_id = xge_hal_device_check_id (devh); 2307 2308 #ifdef XGE_HAL_HERC_EMULATION 2309 hldev->config.pci_freq_mherz = 2310 XGE_HAL_PCI_BUS_FREQUENCY_66MHZ; 2311 *bus_frequency = 2312 XGE_HAL_PCI_BUS_FREQUENCY_66MHZ; 2313 *pci_mode = XGE_HAL_PCI_66MHZ_MODE; 2314 #else 2315 if (card_id == XGE_HAL_CARD_HERC) { 2316 xge_hal_pci_bar0_t *bar0 = 2317 (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 2318 u64 pci_info = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 2319 &bar0->pci_info); 2320 if (XGE_HAL_PCI_32_BIT & pci_info) 2321 *bus_width = XGE_HAL_PCI_BUS_WIDTH_32BIT; 2322 else 2323 *bus_width = XGE_HAL_PCI_BUS_WIDTH_64BIT; 2324 switch((pci_info & XGE_HAL_PCI_INFO)>>60) 2325 { 2326 case XGE_HAL_PCI_33MHZ_MODE: 2327 *bus_frequency = 2328 XGE_HAL_PCI_BUS_FREQUENCY_33MHZ; 2329 *pci_mode = XGE_HAL_PCI_33MHZ_MODE; 2330 break; 2331 case XGE_HAL_PCI_66MHZ_MODE: 2332 *bus_frequency = 2333 XGE_HAL_PCI_BUS_FREQUENCY_66MHZ; 2334 *pci_mode = XGE_HAL_PCI_66MHZ_MODE; 2335 break; 2336 case XGE_HAL_PCIX_M1_66MHZ_MODE: 2337 *bus_frequency = 2338 XGE_HAL_PCI_BUS_FREQUENCY_66MHZ; 2339 *pci_mode = XGE_HAL_PCIX_M1_66MHZ_MODE; 2340 break; 2341 case XGE_HAL_PCIX_M1_100MHZ_MODE: 2342 *bus_frequency = 2343 XGE_HAL_PCI_BUS_FREQUENCY_100MHZ; 2344 *pci_mode = XGE_HAL_PCIX_M1_100MHZ_MODE; 2345 break; 2346 case XGE_HAL_PCIX_M1_133MHZ_MODE: 2347 *bus_frequency = 2348 XGE_HAL_PCI_BUS_FREQUENCY_133MHZ; 2349 *pci_mode = XGE_HAL_PCIX_M1_133MHZ_MODE; 2350 break; 2351 case XGE_HAL_PCIX_M2_66MHZ_MODE: 2352 *bus_frequency = 2353 XGE_HAL_PCI_BUS_FREQUENCY_133MHZ; 2354 *pci_mode = XGE_HAL_PCIX_M2_66MHZ_MODE; 2355 break; 2356 case XGE_HAL_PCIX_M2_100MHZ_MODE: 2357 *bus_frequency = 2358 XGE_HAL_PCI_BUS_FREQUENCY_200MHZ; 2359 *pci_mode = XGE_HAL_PCIX_M2_100MHZ_MODE; 2360 break; 2361 case XGE_HAL_PCIX_M2_133MHZ_MODE: 2362 *bus_frequency = 2363 XGE_HAL_PCI_BUS_FREQUENCY_266MHZ; 2364 *pci_mode = XGE_HAL_PCIX_M2_133MHZ_MODE; 2365 break; 2366 case XGE_HAL_PCIX_M1_RESERVED: 2367 case XGE_HAL_PCIX_M1_66MHZ_NS: 2368 case XGE_HAL_PCIX_M1_100MHZ_NS: 2369 case XGE_HAL_PCIX_M1_133MHZ_NS: 2370 case XGE_HAL_PCIX_M2_RESERVED: 2371 case XGE_HAL_PCIX_533_RESERVED: 2372 default: 2373 rc_status = XGE_HAL_ERR_INVALID_PCI_INFO; 2374 xge_debug_device(XGE_ERR, 2375 "invalid pci info "XGE_OS_LLXFMT, 2376 (unsigned long long)pci_info); 2377 break; 2378 } 2379 if (rc_status != XGE_HAL_ERR_INVALID_PCI_INFO) 2380 xge_debug_device(XGE_TRACE, "PCI info: mode %d width " 2381 "%d frequency %d", *pci_mode, *bus_width, 2382 *bus_frequency); 2383 if (hldev->config.pci_freq_mherz == 2384 XGE_HAL_DEFAULT_USE_HARDCODE) { 2385 hldev->config.pci_freq_mherz = *bus_frequency; 2386 } 2387 } 2388 /* for XENA, we report PCI mode, only. PCI bus frequency, and bus width 2389 * are set to unknown */ 2390 else if (card_id == XGE_HAL_CARD_XENA) { 2391 u32 pcix_status; 2392 u8 dev_num, bus_num; 2393 /* initialize defaults for XENA */ 2394 *bus_frequency = XGE_HAL_PCI_BUS_FREQUENCY_UNKNOWN; 2395 *bus_width = XGE_HAL_PCI_BUS_WIDTH_UNKNOWN; 2396 xge_os_pci_read32(hldev->pdev, hldev->cfgh, 2397 xge_offsetof(xge_hal_pci_config_le_t, pcix_status), 2398 &pcix_status); 2399 dev_num = (u8)((pcix_status & 0xF8) >> 3); 2400 bus_num = (u8)((pcix_status & 0xFF00) >> 8); 2401 if (dev_num == 0 && bus_num == 0) 2402 *pci_mode = XGE_HAL_PCI_BASIC_MODE; 2403 else 2404 *pci_mode = XGE_HAL_PCIX_BASIC_MODE; 2405 xge_debug_device(XGE_TRACE, "PCI info: mode %d", *pci_mode); 2406 if (hldev->config.pci_freq_mherz == 2407 XGE_HAL_DEFAULT_USE_HARDCODE) { 2408 /* 2409 * There is no way to detect BUS frequency on Xena, 2410 * so, in case of automatic configuration we hopelessly 2411 * assume 133MHZ. 2412 */ 2413 hldev->config.pci_freq_mherz = 2414 XGE_HAL_PCI_BUS_FREQUENCY_133MHZ; 2415 } 2416 } else if (card_id == XGE_HAL_CARD_TITAN) { 2417 *bus_width = XGE_HAL_PCI_BUS_WIDTH_64BIT; 2418 *bus_frequency = XGE_HAL_PCI_BUS_FREQUENCY_250MHZ; 2419 if (hldev->config.pci_freq_mherz == 2420 XGE_HAL_DEFAULT_USE_HARDCODE) { 2421 hldev->config.pci_freq_mherz = *bus_frequency; 2422 } 2423 } else{ 2424 rc_status = XGE_HAL_ERR_BAD_DEVICE_ID; 2425 xge_debug_device(XGE_ERR, "invalid device id %d", card_id); 2426 } 2427 #endif 2428 2429 return rc_status; 2430 } 2431 2432 /* 2433 * __hal_device_handle_link_up_ind 2434 * @hldev: HAL device handle. 2435 * 2436 * Link up indication handler. The function is invoked by HAL when 2437 * Xframe indicates that the link is up for programmable amount of time. 2438 */ 2439 static int 2440 __hal_device_handle_link_up_ind(xge_hal_device_t *hldev) 2441 { 2442 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 2443 u64 val64; 2444 2445 /* 2446 * If the previous link state is not down, return. 2447 */ 2448 if (hldev->link_state == XGE_HAL_LINK_UP) { 2449 #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR 2450 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC){ 2451 val64 = xge_os_pio_mem_read64( 2452 hldev->pdev, hldev->regh0, 2453 &bar0->misc_int_mask); 2454 val64 |= XGE_HAL_MISC_INT_REG_LINK_UP_INT; 2455 val64 &= ~XGE_HAL_MISC_INT_REG_LINK_DOWN_INT; 2456 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 2457 val64, &bar0->misc_int_mask); 2458 } 2459 #endif 2460 xge_debug_device(XGE_TRACE, 2461 "link up indication while link is up, ignoring.."); 2462 return 0; 2463 } 2464 2465 /* Now re-enable it as due to noise, hardware turned it off */ 2466 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 2467 &bar0->adapter_control); 2468 val64 |= XGE_HAL_ADAPTER_CNTL_EN; 2469 val64 = val64 & (~XGE_HAL_ADAPTER_ECC_EN); /* ECC enable */ 2470 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 2471 &bar0->adapter_control); 2472 2473 /* Turn on the Laser */ 2474 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 2475 &bar0->adapter_control); 2476 val64 = val64|(XGE_HAL_ADAPTER_EOI_TX_ON | 2477 XGE_HAL_ADAPTER_LED_ON); 2478 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 2479 &bar0->adapter_control); 2480 2481 #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR 2482 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { 2483 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 2484 &bar0->adapter_status); 2485 if (val64 & (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT | 2486 XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT)) { 2487 xge_debug_device(XGE_TRACE, "%s", 2488 "fail to transition link to up..."); 2489 return 0; 2490 } 2491 else { 2492 /* 2493 * Mask the Link Up interrupt and unmask the Link Down 2494 * interrupt. 2495 */ 2496 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 2497 &bar0->misc_int_mask); 2498 val64 |= XGE_HAL_MISC_INT_REG_LINK_UP_INT; 2499 val64 &= ~XGE_HAL_MISC_INT_REG_LINK_DOWN_INT; 2500 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 2501 &bar0->misc_int_mask); 2502 xge_debug_device(XGE_TRACE, "calling link up.."); 2503 hldev->link_state = XGE_HAL_LINK_UP; 2504 2505 /* notify ULD */ 2506 if (g_xge_hal_driver->uld_callbacks.link_up) { 2507 g_xge_hal_driver->uld_callbacks.link_up( 2508 hldev->upper_layer_info); 2509 } 2510 return 1; 2511 } 2512 } 2513 #endif 2514 xge_os_mdelay(1); 2515 if (__hal_device_register_poll(hldev, &bar0->adapter_status, 0, 2516 (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT | 2517 XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT), 2518 XGE_HAL_DEVICE_FAULT_WAIT_MAX_MILLIS) == XGE_HAL_OK) { 2519 2520 /* notify ULD */ 2521 (void) xge_queue_produce_context(hldev->queueh, 2522 XGE_HAL_EVENT_LINK_IS_UP, hldev); 2523 /* link is up after been enabled */ 2524 return 1; 2525 } else { 2526 xge_debug_device(XGE_TRACE, "%s", 2527 "fail to transition link to up..."); 2528 return 0; 2529 } 2530 } 2531 2532 /* 2533 * __hal_device_handle_link_down_ind 2534 * @hldev: HAL device handle. 2535 * 2536 * Link down indication handler. The function is invoked by HAL when 2537 * Xframe indicates that the link is down. 2538 */ 2539 static int 2540 __hal_device_handle_link_down_ind(xge_hal_device_t *hldev) 2541 { 2542 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 2543 u64 val64; 2544 2545 /* 2546 * If the previous link state is not up, return. 2547 */ 2548 if (hldev->link_state == XGE_HAL_LINK_DOWN) { 2549 #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR 2550 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC){ 2551 val64 = xge_os_pio_mem_read64( 2552 hldev->pdev, hldev->regh0, 2553 &bar0->misc_int_mask); 2554 val64 |= XGE_HAL_MISC_INT_REG_LINK_DOWN_INT; 2555 val64 &= ~XGE_HAL_MISC_INT_REG_LINK_UP_INT; 2556 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 2557 val64, &bar0->misc_int_mask); 2558 } 2559 #endif 2560 xge_debug_device(XGE_TRACE, 2561 "link down indication while link is down, ignoring.."); 2562 return 0; 2563 } 2564 xge_os_mdelay(1); 2565 2566 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 2567 &bar0->adapter_control); 2568 2569 /* try to debounce the link only if the adapter is enabled. */ 2570 if (val64 & XGE_HAL_ADAPTER_CNTL_EN) { 2571 if (__hal_device_register_poll(hldev, &bar0->adapter_status, 0, 2572 (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT | 2573 XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT), 2574 XGE_HAL_DEVICE_FAULT_WAIT_MAX_MILLIS) == XGE_HAL_OK) { 2575 xge_debug_device(XGE_TRACE, 2576 "link is actually up (possible noisy link?), ignoring."); 2577 return(0); 2578 } 2579 } 2580 2581 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 2582 &bar0->adapter_control); 2583 /* turn off LED */ 2584 val64 = val64 & (~XGE_HAL_ADAPTER_LED_ON); 2585 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 2586 &bar0->adapter_control); 2587 2588 #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR 2589 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { 2590 /* 2591 * Mask the Link Down interrupt and unmask the Link up 2592 * interrupt 2593 */ 2594 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 2595 &bar0->misc_int_mask); 2596 val64 |= XGE_HAL_MISC_INT_REG_LINK_DOWN_INT; 2597 val64 &= ~XGE_HAL_MISC_INT_REG_LINK_UP_INT; 2598 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 2599 &bar0->misc_int_mask); 2600 2601 /* link is down */ 2602 xge_debug_device(XGE_TRACE, "calling link down.."); 2603 hldev->link_state = XGE_HAL_LINK_DOWN; 2604 2605 /* notify ULD */ 2606 if (g_xge_hal_driver->uld_callbacks.link_down) { 2607 g_xge_hal_driver->uld_callbacks.link_down( 2608 hldev->upper_layer_info); 2609 } 2610 return 1; 2611 } 2612 #endif 2613 /* notify ULD */ 2614 (void) xge_queue_produce_context(hldev->queueh, 2615 XGE_HAL_EVENT_LINK_IS_DOWN, hldev); 2616 /* link is down */ 2617 return 1; 2618 } 2619 /* 2620 * __hal_device_handle_link_state_change 2621 * @hldev: HAL device handle. 2622 * 2623 * Link state change handler. The function is invoked by HAL when 2624 * Xframe indicates link state change condition. The code here makes sure to 2625 * 1) ignore redundant state change indications; 2626 * 2) execute link-up sequence, and handle the failure to bring the link up; 2627 * 3) generate XGE_HAL_LINK_UP/DOWN event for the subsequent handling by 2628 * upper-layer driver (ULD). 2629 */ 2630 static int 2631 __hal_device_handle_link_state_change(xge_hal_device_t *hldev) 2632 { 2633 u64 hw_status; 2634 int hw_link_state; 2635 int retcode; 2636 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 2637 u64 val64; 2638 int i = 0; 2639 2640 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 2641 &bar0->adapter_control); 2642 2643 /* If the adapter is not enabled but the hal thinks we are in the up 2644 * state then transition to the down state. 2645 */ 2646 if ( !(val64 & XGE_HAL_ADAPTER_CNTL_EN) && 2647 (hldev->link_state == XGE_HAL_LINK_UP) ) { 2648 return(__hal_device_handle_link_down_ind(hldev)); 2649 } 2650 2651 do { 2652 xge_os_mdelay(1); 2653 (void) xge_hal_device_status(hldev, &hw_status); 2654 hw_link_state = (hw_status & 2655 (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT | 2656 XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT)) ? 2657 XGE_HAL_LINK_DOWN : XGE_HAL_LINK_UP; 2658 2659 /* check if the current link state is still considered 2660 * to be changed. This way we will make sure that this is 2661 * not a noise which needs to be filtered out */ 2662 if (hldev->link_state == hw_link_state) 2663 break; 2664 } while (i++ < hldev->config.link_valid_cnt); 2665 2666 /* If the current link state is same as previous, just return */ 2667 if (hldev->link_state == hw_link_state) 2668 retcode = 0; 2669 /* detected state change */ 2670 else if (hw_link_state == XGE_HAL_LINK_UP) 2671 retcode = __hal_device_handle_link_up_ind(hldev); 2672 else 2673 retcode = __hal_device_handle_link_down_ind(hldev); 2674 return retcode; 2675 } 2676 2677 /* 2678 * 2679 */ 2680 static void 2681 __hal_device_handle_serr(xge_hal_device_t *hldev, char *reg, u64 value) 2682 { 2683 hldev->stats.sw_dev_err_stats.serr_cnt++; 2684 if (hldev->config.dump_on_serr) { 2685 #ifdef XGE_HAL_USE_MGMT_AUX 2686 (void) xge_hal_aux_device_dump(hldev); 2687 #endif 2688 } 2689 2690 (void) xge_queue_produce(hldev->queueh, XGE_HAL_EVENT_SERR, hldev, 2691 1, sizeof(u64), (void *)&value); 2692 2693 xge_debug_device(XGE_ERR, "%s: read "XGE_OS_LLXFMT, reg, 2694 (unsigned long long) value); 2695 } 2696 2697 /* 2698 * 2699 */ 2700 static void 2701 __hal_device_handle_eccerr(xge_hal_device_t *hldev, char *reg, u64 value) 2702 { 2703 if (hldev->config.dump_on_eccerr) { 2704 #ifdef XGE_HAL_USE_MGMT_AUX 2705 (void) xge_hal_aux_device_dump(hldev); 2706 #endif 2707 } 2708 2709 /* Herc smart enough to recover on its own! */ 2710 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) { 2711 (void) xge_queue_produce(hldev->queueh, 2712 XGE_HAL_EVENT_ECCERR, hldev, 2713 1, sizeof(u64), (void *)&value); 2714 } 2715 2716 xge_debug_device(XGE_ERR, "%s: read "XGE_OS_LLXFMT, reg, 2717 (unsigned long long) value); 2718 } 2719 2720 /* 2721 * 2722 */ 2723 static void 2724 __hal_device_handle_parityerr(xge_hal_device_t *hldev, char *reg, u64 value) 2725 { 2726 if (hldev->config.dump_on_parityerr) { 2727 #ifdef XGE_HAL_USE_MGMT_AUX 2728 (void) xge_hal_aux_device_dump(hldev); 2729 #endif 2730 } 2731 (void) xge_queue_produce_context(hldev->queueh, 2732 XGE_HAL_EVENT_PARITYERR, hldev); 2733 2734 xge_debug_device(XGE_ERR, "%s: read "XGE_OS_LLXFMT, reg, 2735 (unsigned long long) value); 2736 } 2737 2738 /* 2739 * 2740 */ 2741 static void 2742 __hal_device_handle_targetabort(xge_hal_device_t *hldev) 2743 { 2744 (void) xge_queue_produce_context(hldev->queueh, 2745 XGE_HAL_EVENT_TARGETABORT, hldev); 2746 } 2747 2748 2749 /* 2750 * __hal_device_hw_initialize 2751 * @hldev: HAL device handle. 2752 * 2753 * Initialize Xframe hardware. 2754 */ 2755 static xge_hal_status_e 2756 __hal_device_hw_initialize(xge_hal_device_t *hldev) 2757 { 2758 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 2759 xge_hal_status_e status; 2760 u64 val64; 2761 2762 /* Set proper endian settings and verify the same by reading the PIF 2763 * Feed-back register. */ 2764 status = __hal_device_set_swapper(hldev); 2765 if (status != XGE_HAL_OK) { 2766 return status; 2767 } 2768 2769 /* update the pci mode, frequency, and width */ 2770 if (__hal_device_pci_info_get(hldev, &hldev->pci_mode, 2771 &hldev->bus_frequency, &hldev->bus_width) != XGE_HAL_OK){ 2772 hldev->pci_mode = XGE_HAL_PCI_INVALID_MODE; 2773 hldev->bus_frequency = XGE_HAL_PCI_BUS_FREQUENCY_UNKNOWN; 2774 hldev->bus_width = XGE_HAL_PCI_BUS_WIDTH_UNKNOWN; 2775 /* 2776 * FIXME: this cannot happen. 2777 * But if it happens we cannot continue just like that 2778 */ 2779 xge_debug_device(XGE_ERR, "unable to get pci info"); 2780 } 2781 2782 if ((hldev->pci_mode == XGE_HAL_PCI_33MHZ_MODE) || 2783 (hldev->pci_mode == XGE_HAL_PCI_66MHZ_MODE) || 2784 (hldev->pci_mode == XGE_HAL_PCI_BASIC_MODE)) { 2785 /* PCI optimization: set TxReqTimeOut 2786 * register (0x800+0x120) to 0x1ff or 2787 * something close to this. 2788 * Note: not to be used for PCI-X! */ 2789 2790 val64 = XGE_HAL_TXREQTO_VAL(0x1FF); 2791 val64 |= XGE_HAL_TXREQTO_EN; 2792 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 2793 &bar0->txreqtimeout); 2794 2795 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0ULL, 2796 &bar0->read_retry_delay); 2797 2798 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0ULL, 2799 &bar0->write_retry_delay); 2800 2801 xge_debug_device(XGE_TRACE, "%s", "optimizing for PCI mode"); 2802 } 2803 2804 if (hldev->bus_frequency == XGE_HAL_PCI_BUS_FREQUENCY_266MHZ || 2805 hldev->bus_frequency == XGE_HAL_PCI_BUS_FREQUENCY_250MHZ) { 2806 2807 /* Optimizing for PCI-X 266/250 */ 2808 2809 val64 = XGE_HAL_TXREQTO_VAL(0x7F); 2810 val64 |= XGE_HAL_TXREQTO_EN; 2811 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 2812 &bar0->txreqtimeout); 2813 2814 xge_debug_device(XGE_TRACE, "%s", "optimizing for PCI-X 266/250 modes"); 2815 } 2816 2817 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { 2818 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0x4000000000000ULL, 2819 &bar0->read_retry_delay); 2820 2821 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0x4000000000000ULL, 2822 &bar0->write_retry_delay); 2823 } 2824 2825 /* added this to set the no of bytes used to update lso_bytes_sent 2826 returned TxD0 */ 2827 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 2828 &bar0->pic_control_2); 2829 val64 &= ~XGE_HAL_TXD_WRITE_BC(0x2); 2830 val64 |= XGE_HAL_TXD_WRITE_BC(0x4); 2831 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 2832 &bar0->pic_control_2); 2833 /* added this to clear the EOI_RESET field while leaving XGXS_RESET 2834 * in reset, then a 1-second delay */ 2835 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 2836 XGE_HAL_SW_RESET_XGXS, &bar0->sw_reset); 2837 xge_os_mdelay(1000); 2838 2839 /* Clear the XGXS_RESET field of the SW_RESET register in order to 2840 * release the XGXS from reset. Its reset value is 0xA5; write 0x00 2841 * to activate the XGXS. The core requires a minimum 500 us reset.*/ 2842 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0, &bar0->sw_reset); 2843 (void) xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 2844 &bar0->sw_reset); 2845 xge_os_mdelay(1); 2846 2847 /* read registers in all blocks */ 2848 (void) xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 2849 &bar0->mac_int_mask); 2850 (void) xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 2851 &bar0->mc_int_mask); 2852 (void) xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 2853 &bar0->xgxs_int_mask); 2854 2855 /* set default MTU and steer based on length*/ 2856 __hal_ring_mtu_set(hldev, hldev->config.mtu+22); // Alway set 22 bytes extra for steering to work 2857 2858 if (hldev->config.mac.rmac_bcast_en) { 2859 xge_hal_device_bcast_enable(hldev); 2860 } else { 2861 xge_hal_device_bcast_disable(hldev); 2862 } 2863 2864 #ifndef XGE_HAL_HERC_EMULATION 2865 __hal_device_xaui_configure(hldev); 2866 #endif 2867 __hal_device_mac_link_util_set(hldev); 2868 2869 __hal_device_mac_link_util_set(hldev); 2870 2871 /* 2872 * Keep its PCI REQ# line asserted during a write 2873 * transaction up to the end of the transaction 2874 */ 2875 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 2876 &bar0->misc_control); 2877 2878 val64 |= XGE_HAL_MISC_CONTROL_EXT_REQ_EN; 2879 2880 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 2881 val64, &bar0->misc_control); 2882 2883 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { 2884 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 2885 &bar0->misc_control); 2886 2887 val64 |= XGE_HAL_MISC_CONTROL_LINK_FAULT; 2888 2889 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 2890 val64, &bar0->misc_control); 2891 } 2892 2893 /* 2894 * bimodal interrupts is when all Rx traffic interrupts 2895 * will go to TTI, so we need to adjust RTI settings and 2896 * use adaptive TTI timer. We need to make sure RTI is 2897 * properly configured to sane value which will not 2898 * distrupt bimodal behavior. 2899 */ 2900 if (hldev->config.bimodal_interrupts) { 2901 int i; 2902 2903 /* force polling_cnt to be "0", otherwise 2904 * IRQ workload statistics will be screwed. This could 2905 * be worked out in TXPIC handler later. */ 2906 hldev->config.isr_polling_cnt = 0; 2907 hldev->config.sched_timer_us = 10000; 2908 2909 /* disable all TTI < 56 */ 2910 for (i=0; i<XGE_HAL_MAX_FIFO_NUM; i++) { 2911 int j; 2912 if (!hldev->config.fifo.queue[i].configured) 2913 continue; 2914 for (j=0; j<XGE_HAL_MAX_FIFO_TTI_NUM; j++) { 2915 if (hldev->config.fifo.queue[i].tti[j].enabled) 2916 hldev->config.fifo.queue[i].tti[j].enabled = 0; 2917 } 2918 } 2919 2920 /* now configure bimodal interrupts */ 2921 __hal_device_bimodal_configure(hldev); 2922 } 2923 2924 status = __hal_device_tti_configure(hldev, 0); 2925 if (status != XGE_HAL_OK) 2926 return status; 2927 2928 status = __hal_device_rti_configure(hldev, 0); 2929 if (status != XGE_HAL_OK) 2930 return status; 2931 2932 status = __hal_device_rth_it_configure(hldev); 2933 if (status != XGE_HAL_OK) 2934 return status; 2935 2936 status = __hal_device_rth_spdm_configure(hldev); 2937 if (status != XGE_HAL_OK) 2938 return status; 2939 2940 status = __hal_device_rts_mac_configure(hldev); 2941 if (status != XGE_HAL_OK) { 2942 xge_debug_device(XGE_ERR, "__hal_device_rts_mac_configure Failed "); 2943 return status; 2944 } 2945 2946 status = __hal_device_rts_port_configure(hldev); 2947 if (status != XGE_HAL_OK) { 2948 xge_debug_device(XGE_ERR, "__hal_device_rts_port_configure Failed "); 2949 return status; 2950 } 2951 2952 status = __hal_device_rts_qos_configure(hldev); 2953 if (status != XGE_HAL_OK) { 2954 xge_debug_device(XGE_ERR, "__hal_device_rts_qos_configure Failed "); 2955 return status; 2956 } 2957 2958 __hal_device_pause_frames_configure(hldev); 2959 __hal_device_rmac_padding_configure(hldev); 2960 __hal_device_shared_splits_configure(hldev); 2961 2962 /* make sure all interrupts going to be disabled at the moment */ 2963 __hal_device_intr_mgmt(hldev, XGE_HAL_ALL_INTRS, 0); 2964 2965 /* SXE-008 Transmit DMA arbitration issue */ 2966 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA && 2967 hldev->revision < 4) { 2968 xge_os_pio_mem_write64(hldev->pdev,hldev->regh0, 2969 XGE_HAL_ADAPTER_PCC_ENABLE_FOUR, 2970 &bar0->pcc_enable); 2971 } 2972 __hal_fifo_hw_initialize(hldev); 2973 __hal_ring_hw_initialize(hldev); 2974 2975 if (__hal_device_wait_quiescent(hldev, &val64)) { 2976 return XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT; 2977 } 2978 2979 if (__hal_device_register_poll(hldev, &bar0->adapter_status, 1, 2980 XGE_HAL_ADAPTER_STATUS_RC_PRC_QUIESCENT, 2981 XGE_HAL_DEVICE_QUIESCENT_WAIT_MAX_MILLIS) != XGE_HAL_OK) { 2982 xge_debug_device(XGE_TRACE, "%s", "PRC is not QUIESCENT!"); 2983 return XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT; 2984 } 2985 2986 xge_debug_device(XGE_TRACE, "device 0x"XGE_OS_LLXFMT" is quiescent", 2987 (unsigned long long)(ulong_t)hldev); 2988 2989 if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX || 2990 hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSI) { 2991 /* 2992 * If MSI is enabled, ensure that One Shot for MSI in PCI_CTRL 2993 * is disabled. 2994 */ 2995 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 2996 &bar0->pic_control); 2997 val64 &= ~(XGE_HAL_PIC_CNTL_ONE_SHOT_TINT); 2998 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 2999 &bar0->pic_control); 3000 } 3001 3002 hldev->hw_is_initialized = 1; 3003 hldev->terminating = 0; 3004 return XGE_HAL_OK; 3005 } 3006 3007 /* 3008 * __hal_device_reset - Reset device only. 3009 * @hldev: HAL device handle. 3010 * 3011 * Reset the device, and subsequently restore 3012 * the previously saved PCI configuration space. 3013 */ 3014 #define XGE_HAL_MAX_PCI_CONFIG_SPACE_REINIT 50 3015 static xge_hal_status_e 3016 __hal_device_reset(xge_hal_device_t *hldev) 3017 { 3018 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 3019 int i, j, swap_done, pcisize = 0; 3020 u64 val64, rawval = 0ULL; 3021 3022 if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX) { 3023 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { 3024 if ( hldev->bar2 ) { 3025 u64 *msix_vetor_table = (u64 *)hldev->bar2; 3026 3027 // 2 64bit words for each entry 3028 for (i = 0; i < XGE_HAL_MAX_MSIX_MESSAGES * 2; 3029 i++) { 3030 hldev->msix_vector_table[i] = 3031 xge_os_pio_mem_read64(hldev->pdev, 3032 hldev->regh2, &msix_vetor_table[i]); 3033 } 3034 } 3035 } 3036 } 3037 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3038 &bar0->pif_rd_swapper_fb); 3039 swap_done = (val64 == XGE_HAL_IF_RD_SWAPPER_FB); 3040 3041 if (swap_done) { 3042 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, 3043 (u32)(XGE_HAL_SW_RESET_ALL>>32), (char *)&bar0->sw_reset); 3044 } else { 3045 u32 val = (u32)(XGE_HAL_SW_RESET_ALL >> 32); 3046 #if defined(XGE_OS_HOST_LITTLE_ENDIAN) || defined(XGE_OS_PIO_LITTLE_ENDIAN) 3047 /* swap it */ 3048 val = (((val & (u32)0x000000ffUL) << 24) | 3049 ((val & (u32)0x0000ff00UL) << 8) | 3050 ((val & (u32)0x00ff0000UL) >> 8) | 3051 ((val & (u32)0xff000000UL) >> 24)); 3052 #endif 3053 xge_os_pio_mem_write32(hldev->pdev, hldev->regh0, val, 3054 &bar0->sw_reset); 3055 } 3056 3057 pcisize = (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)? 3058 XGE_HAL_PCISIZE_HERC : XGE_HAL_PCISIZE_XENA; 3059 3060 xge_os_mdelay(20); /* Wait for 20 ms after reset */ 3061 3062 { 3063 /* Poll for no more than 1 second */ 3064 for (i = 0; i < XGE_HAL_MAX_PCI_CONFIG_SPACE_REINIT; i++) 3065 { 3066 for (j = 0; j < pcisize; j++) { 3067 xge_os_pci_write32(hldev->pdev, hldev->cfgh, j * 4, 3068 *((u32*)&hldev->pci_config_space + j)); 3069 } 3070 3071 xge_os_pci_read16(hldev->pdev,hldev->cfgh, 3072 xge_offsetof(xge_hal_pci_config_le_t, device_id), 3073 &hldev->device_id); 3074 3075 if (xge_hal_device_check_id(hldev) != XGE_HAL_CARD_UNKNOWN) 3076 break; 3077 xge_os_mdelay(20); 3078 } 3079 } 3080 3081 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_UNKNOWN) 3082 { 3083 xge_debug_device(XGE_ERR, "device reset failed"); 3084 return XGE_HAL_ERR_RESET_FAILED; 3085 } 3086 3087 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { 3088 int cnt = 0; 3089 3090 rawval = XGE_HAL_SW_RESET_RAW_VAL_HERC; 3091 pcisize = XGE_HAL_PCISIZE_HERC; 3092 xge_os_mdelay(1); 3093 do { 3094 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3095 &bar0->sw_reset); 3096 if (val64 != rawval) { 3097 break; 3098 } 3099 cnt++; 3100 xge_os_mdelay(1); /* Wait for 1ms before retry */ 3101 } while(cnt < 20); 3102 } else if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) { 3103 rawval = XGE_HAL_SW_RESET_RAW_VAL_XENA; 3104 pcisize = XGE_HAL_PCISIZE_XENA; 3105 xge_os_mdelay(XGE_HAL_DEVICE_RESET_WAIT_MAX_MILLIS); 3106 } 3107 3108 /* Restore MSI-X vector table */ 3109 if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX) { 3110 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { 3111 if ( hldev->bar2 ) { 3112 /* 3113 * 94: MSIXTable 00000004 ( BIR:4 Offset:0x0 ) 3114 * 98: PBATable 00000404 ( BIR:4 Offset:0x400 ) 3115 */ 3116 u64 *msix_vetor_table = (u64 *)hldev->bar2; 3117 3118 /* 2 64bit words for each entry */ 3119 for (i = 0; i < XGE_HAL_MAX_MSIX_MESSAGES * 2; 3120 i++) { 3121 xge_os_pio_mem_write64(hldev->pdev, 3122 hldev->regh2, 3123 hldev->msix_vector_table[i], 3124 &msix_vetor_table[i]); 3125 } 3126 } 3127 } 3128 } 3129 3130 hldev->link_state = XGE_HAL_LINK_DOWN; 3131 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3132 &bar0->sw_reset); 3133 3134 if (val64 != rawval) { 3135 xge_debug_device(XGE_ERR, "device has not been reset " 3136 "got 0x"XGE_OS_LLXFMT", expected 0x"XGE_OS_LLXFMT, 3137 (unsigned long long)val64, (unsigned long long)rawval); 3138 return XGE_HAL_ERR_RESET_FAILED; 3139 } 3140 3141 hldev->hw_is_initialized = 0; 3142 return XGE_HAL_OK; 3143 } 3144 3145 /* 3146 * __hal_device_poll - General private routine to poll the device. 3147 * @hldev: HAL device handle. 3148 * 3149 * Returns: one of the xge_hal_status_e{} enumerated types. 3150 * XGE_HAL_OK - for success. 3151 * XGE_HAL_ERR_CRITICAL - when encounters critical error. 3152 */ 3153 static xge_hal_status_e 3154 __hal_device_poll(xge_hal_device_t *hldev) 3155 { 3156 xge_hal_pci_bar0_t *bar0; 3157 u64 err_reg; 3158 3159 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 3160 3161 /* Handling SERR errors by forcing a H/W reset. */ 3162 err_reg = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3163 &bar0->serr_source); 3164 if (err_reg & XGE_HAL_SERR_SOURCE_ANY) { 3165 __hal_device_handle_serr(hldev, "serr_source", err_reg); 3166 return XGE_HAL_ERR_CRITICAL; 3167 } 3168 3169 err_reg = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3170 &bar0->misc_int_reg); 3171 3172 if (err_reg & XGE_HAL_MISC_INT_REG_DP_ERR_INT) { 3173 hldev->stats.sw_dev_err_stats.parity_err_cnt++; 3174 __hal_device_handle_parityerr(hldev, "misc_int_reg", err_reg); 3175 return XGE_HAL_ERR_CRITICAL; 3176 } 3177 3178 #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR 3179 if ((xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) || 3180 (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX)) 3181 #endif 3182 { 3183 3184 /* Handling link status change error Intr */ 3185 err_reg = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3186 &bar0->mac_rmac_err_reg); 3187 if (__hal_device_handle_link_state_change(hldev)) 3188 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3189 err_reg, &bar0->mac_rmac_err_reg); 3190 } 3191 3192 if (hldev->inject_serr != 0) { 3193 err_reg = hldev->inject_serr; 3194 hldev->inject_serr = 0; 3195 __hal_device_handle_serr(hldev, "inject_serr", err_reg); 3196 return XGE_HAL_ERR_CRITICAL; 3197 } 3198 3199 if (hldev->inject_ecc != 0) { 3200 err_reg = hldev->inject_ecc; 3201 hldev->inject_ecc = 0; 3202 hldev->stats.sw_dev_err_stats.ecc_err_cnt++; 3203 __hal_device_handle_eccerr(hldev, "inject_ecc", err_reg); 3204 return XGE_HAL_ERR_CRITICAL; 3205 } 3206 3207 if (hldev->inject_bad_tcode != 0) { 3208 u8 t_code = hldev->inject_bad_tcode; 3209 xge_hal_channel_t channel; 3210 xge_hal_fifo_txd_t txd; 3211 xge_hal_ring_rxd_1_t rxd; 3212 3213 channel.devh = hldev; 3214 3215 if (hldev->inject_bad_tcode_for_chan_type == 3216 XGE_HAL_CHANNEL_TYPE_FIFO) { 3217 channel.type = XGE_HAL_CHANNEL_TYPE_FIFO; 3218 3219 } else { 3220 channel.type = XGE_HAL_CHANNEL_TYPE_RING; 3221 } 3222 3223 hldev->inject_bad_tcode = 0; 3224 3225 if (channel.type == XGE_HAL_CHANNEL_TYPE_FIFO) 3226 return xge_hal_device_handle_tcode(&channel, &txd, 3227 t_code); 3228 else 3229 return xge_hal_device_handle_tcode(&channel, &rxd, 3230 t_code); 3231 } 3232 3233 return XGE_HAL_OK; 3234 } 3235 3236 /* 3237 * __hal_verify_pcc_idle - Verify All Enbled PCC are IDLE or not 3238 * @hldev: HAL device handle. 3239 * @adp_status: Adapter Status value 3240 * Usage: See xge_hal_device_enable{}. 3241 */ 3242 xge_hal_status_e 3243 __hal_verify_pcc_idle(xge_hal_device_t *hldev, u64 adp_status) 3244 { 3245 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA && 3246 hldev->revision < 4) { 3247 /* 3248 * For Xena 1,2,3 we enable only 4 PCCs Due to 3249 * SXE-008 (Transmit DMA arbitration issue) 3250 */ 3251 if ((adp_status & XGE_HAL_ADAPTER_STATUS_RMAC_PCC_4_IDLE) 3252 != XGE_HAL_ADAPTER_STATUS_RMAC_PCC_4_IDLE) { 3253 xge_debug_device(XGE_TRACE, "%s", 3254 "PCC is not IDLE after adapter enabled!"); 3255 return XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT; 3256 } 3257 } else { 3258 if ((adp_status & XGE_HAL_ADAPTER_STATUS_RMAC_PCC_IDLE) != 3259 XGE_HAL_ADAPTER_STATUS_RMAC_PCC_IDLE) { 3260 xge_debug_device(XGE_TRACE, "%s", 3261 "PCC is not IDLE after adapter enabled!"); 3262 return XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT; 3263 } 3264 } 3265 return XGE_HAL_OK; 3266 } 3267 3268 static void 3269 __hal_update_bimodal(xge_hal_device_t *hldev, int ring_no) 3270 { 3271 int tval, d, iwl_avg, len_avg, bytes_avg, bytes_hist, d_hist; 3272 int iwl_rxcnt, iwl_txcnt, iwl_txavg, len_rxavg, iwl_rxavg, len_txavg; 3273 int iwl_cnt, i; 3274 3275 #define _HIST_SIZE 50 /* 0.5 sec history */ 3276 #define _HIST_ADJ_TIMER 1 3277 #define _STEP 2 3278 3279 static int bytes_avg_history[_HIST_SIZE] = {0}; 3280 static int d_avg_history[_HIST_SIZE] = {0}; 3281 static int history_idx = 0; 3282 static int pstep = 1; 3283 static int hist_adj_timer = 0; 3284 3285 /* 3286 * tval - current value of this bimodal timer 3287 */ 3288 tval = hldev->bimodal_tti[ring_no].timer_val_us; 3289 3290 /* 3291 * d - how many interrupts we were getting since last 3292 * bimodal timer tick. 3293 */ 3294 d = hldev->stats.sw_dev_info_stats.tx_traffic_intr_cnt - 3295 hldev->bimodal_intr_cnt; 3296 3297 /* advance bimodal interrupt counter */ 3298 hldev->bimodal_intr_cnt = 3299 hldev->stats.sw_dev_info_stats.tx_traffic_intr_cnt; 3300 3301 /* 3302 * iwl_cnt - how many interrupts we've got since last 3303 * bimodal timer tick. 3304 */ 3305 iwl_rxcnt = (hldev->irq_workload_rxcnt[ring_no] ? 3306 hldev->irq_workload_rxcnt[ring_no] : 1); 3307 iwl_txcnt = (hldev->irq_workload_txcnt[ring_no] ? 3308 hldev->irq_workload_txcnt[ring_no] : 1); 3309 iwl_cnt = iwl_rxcnt + iwl_txcnt; 3310 iwl_cnt = iwl_cnt; /* just to remove the lint warning */ 3311 3312 /* 3313 * we need to take hldev->config.isr_polling_cnt into account 3314 * but for some reason this line causing GCC to produce wrong 3315 * code on Solaris. As of now, if bimodal_interrupts is configured 3316 * hldev->config.isr_polling_cnt is forced to be "0". 3317 * 3318 * iwl_cnt = iwl_cnt / (hldev->config.isr_polling_cnt + 1); */ 3319 3320 /* 3321 * iwl_avg - how many RXDs on avarage been processed since 3322 * last bimodal timer tick. This indirectly includes 3323 * CPU utilizations. 3324 */ 3325 iwl_rxavg = hldev->irq_workload_rxd[ring_no] / iwl_rxcnt; 3326 iwl_txavg = hldev->irq_workload_txd[ring_no] / iwl_txcnt; 3327 iwl_avg = iwl_rxavg + iwl_txavg; 3328 iwl_avg = iwl_avg == 0 ? 1 : iwl_avg; 3329 3330 /* 3331 * len_avg - how many bytes on avarage been processed since 3332 * last bimodal timer tick. i.e. avarage frame size. 3333 */ 3334 len_rxavg = 1 + hldev->irq_workload_rxlen[ring_no] / 3335 (hldev->irq_workload_rxd[ring_no] ? 3336 hldev->irq_workload_rxd[ring_no] : 1); 3337 len_txavg = 1 + hldev->irq_workload_txlen[ring_no] / 3338 (hldev->irq_workload_txd[ring_no] ? 3339 hldev->irq_workload_txd[ring_no] : 1); 3340 len_avg = len_rxavg + len_txavg; 3341 if (len_avg < 60) 3342 len_avg = 60; 3343 3344 /* align on low boundary */ 3345 if ((tval -_STEP) < hldev->config.bimodal_timer_lo_us) 3346 tval = hldev->config.bimodal_timer_lo_us; 3347 3348 /* reset faster */ 3349 if (iwl_avg == 1) { 3350 tval = hldev->config.bimodal_timer_lo_us; 3351 /* reset history */ 3352 for (i = 0; i < _HIST_SIZE; i++) 3353 bytes_avg_history[i] = d_avg_history[i] = 0; 3354 history_idx = 0; 3355 pstep = 1; 3356 hist_adj_timer = 0; 3357 } 3358 3359 /* always try to ajust timer to the best throughput value */ 3360 bytes_avg = iwl_avg * len_avg; 3361 history_idx %= _HIST_SIZE; 3362 bytes_avg_history[history_idx] = bytes_avg; 3363 d_avg_history[history_idx] = d; 3364 history_idx++; 3365 d_hist = bytes_hist = 0; 3366 for (i = 0; i < _HIST_SIZE; i++) { 3367 /* do not re-configure until history is gathered */ 3368 if (!bytes_avg_history[i]) { 3369 tval = hldev->config.bimodal_timer_lo_us; 3370 goto _end; 3371 } 3372 bytes_hist += bytes_avg_history[i]; 3373 d_hist += d_avg_history[i]; 3374 } 3375 bytes_hist /= _HIST_SIZE; 3376 d_hist /= _HIST_SIZE; 3377 3378 // xge_os_printf("d %d iwl_avg %d len_avg %d:%d:%d tval %d avg %d hist %d pstep %d", 3379 // d, iwl_avg, len_txavg, len_rxavg, len_avg, tval, d*bytes_avg, 3380 // d_hist*bytes_hist, pstep); 3381 3382 /* make an adaptive step */ 3383 if (d * bytes_avg < d_hist * bytes_hist && hist_adj_timer++ > _HIST_ADJ_TIMER) { 3384 pstep = !pstep; 3385 hist_adj_timer = 0; 3386 } 3387 3388 if (pstep && 3389 (tval + _STEP) <= hldev->config.bimodal_timer_hi_us) { 3390 tval += _STEP; 3391 hldev->stats.sw_dev_info_stats.bimodal_hi_adjust_cnt++; 3392 } else if ((tval - _STEP) >= hldev->config.bimodal_timer_lo_us) { 3393 tval -= _STEP; 3394 hldev->stats.sw_dev_info_stats.bimodal_lo_adjust_cnt++; 3395 } 3396 3397 /* enable TTI range A for better latencies */ 3398 hldev->bimodal_urange_a_en = 0; 3399 if (tval <= hldev->config.bimodal_timer_lo_us && iwl_avg > 2) 3400 hldev->bimodal_urange_a_en = 1; 3401 3402 _end: 3403 /* reset workload statistics counters */ 3404 hldev->irq_workload_rxcnt[ring_no] = 0; 3405 hldev->irq_workload_rxd[ring_no] = 0; 3406 hldev->irq_workload_rxlen[ring_no] = 0; 3407 hldev->irq_workload_txcnt[ring_no] = 0; 3408 hldev->irq_workload_txd[ring_no] = 0; 3409 hldev->irq_workload_txlen[ring_no] = 0; 3410 3411 /* reconfigure TTI56 + ring_no with new timer value */ 3412 hldev->bimodal_timer_val_us = tval; 3413 (void) __hal_device_rti_configure(hldev, 1); 3414 } 3415 3416 static void 3417 __hal_update_rxufca(xge_hal_device_t *hldev, int ring_no) 3418 { 3419 int ufc, ic, i; 3420 3421 ufc = hldev->config.ring.queue[ring_no].rti.ufc_a; 3422 ic = hldev->stats.sw_dev_info_stats.rx_traffic_intr_cnt; 3423 3424 /* urange_a adaptive coalescing */ 3425 if (hldev->rxufca_lbolt > hldev->rxufca_lbolt_time) { 3426 if (ic > hldev->rxufca_intr_thres) { 3427 if (ufc < hldev->config.rxufca_hi_lim) { 3428 ufc += 1; 3429 for (i=0; i<XGE_HAL_MAX_RING_NUM; i++) 3430 hldev->config.ring.queue[i].rti.ufc_a = ufc; 3431 (void) __hal_device_rti_configure(hldev, 1); 3432 hldev->stats.sw_dev_info_stats.rxufca_hi_adjust_cnt++; 3433 } 3434 hldev->rxufca_intr_thres = ic + 3435 hldev->config.rxufca_intr_thres; /* def: 30 */ 3436 } else { 3437 if (ufc > hldev->config.rxufca_lo_lim) { 3438 ufc -= 1; 3439 for (i=0; i<XGE_HAL_MAX_RING_NUM; i++) 3440 hldev->config.ring.queue[i].rti.ufc_a = ufc; 3441 (void) __hal_device_rti_configure(hldev, 1); 3442 hldev->stats.sw_dev_info_stats.rxufca_lo_adjust_cnt++; 3443 } 3444 } 3445 hldev->rxufca_lbolt_time = hldev->rxufca_lbolt + 3446 hldev->config.rxufca_lbolt_period; 3447 } 3448 hldev->rxufca_lbolt++; 3449 } 3450 3451 /* 3452 * __hal_device_handle_mc - Handle MC interrupt reason 3453 * @hldev: HAL device handle. 3454 * @reason: interrupt reason 3455 */ 3456 xge_hal_status_e 3457 __hal_device_handle_mc(xge_hal_device_t *hldev, u64 reason) 3458 { 3459 xge_hal_pci_bar0_t *isrbar0 = 3460 (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0; 3461 u64 val64; 3462 3463 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3464 &isrbar0->mc_int_status); 3465 if (!(val64 & XGE_HAL_MC_INT_STATUS_MC_INT)) 3466 return XGE_HAL_OK; 3467 3468 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3469 &isrbar0->mc_err_reg); 3470 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3471 val64, &isrbar0->mc_err_reg); 3472 3473 if (val64 & XGE_HAL_MC_ERR_REG_ETQ_ECC_SG_ERR_L || 3474 val64 & XGE_HAL_MC_ERR_REG_ETQ_ECC_SG_ERR_U || 3475 val64 & XGE_HAL_MC_ERR_REG_MIRI_ECC_SG_ERR_0 || 3476 val64 & XGE_HAL_MC_ERR_REG_MIRI_ECC_SG_ERR_1 || 3477 (xge_hal_device_check_id(hldev) != XGE_HAL_CARD_XENA && 3478 (val64 & XGE_HAL_MC_ERR_REG_ITQ_ECC_SG_ERR_L || 3479 val64 & XGE_HAL_MC_ERR_REG_ITQ_ECC_SG_ERR_U || 3480 val64 & XGE_HAL_MC_ERR_REG_RLD_ECC_SG_ERR_L || 3481 val64 & XGE_HAL_MC_ERR_REG_RLD_ECC_SG_ERR_U))) { 3482 hldev->stats.sw_dev_err_stats.single_ecc_err_cnt++; 3483 hldev->stats.sw_dev_err_stats.ecc_err_cnt++; 3484 } 3485 3486 if (val64 & XGE_HAL_MC_ERR_REG_ETQ_ECC_DB_ERR_L || 3487 val64 & XGE_HAL_MC_ERR_REG_ETQ_ECC_DB_ERR_U || 3488 val64 & XGE_HAL_MC_ERR_REG_MIRI_ECC_DB_ERR_0 || 3489 val64 & XGE_HAL_MC_ERR_REG_MIRI_ECC_DB_ERR_1 || 3490 (xge_hal_device_check_id(hldev) != XGE_HAL_CARD_XENA && 3491 (val64 & XGE_HAL_MC_ERR_REG_ITQ_ECC_DB_ERR_L || 3492 val64 & XGE_HAL_MC_ERR_REG_ITQ_ECC_DB_ERR_U || 3493 val64 & XGE_HAL_MC_ERR_REG_RLD_ECC_DB_ERR_L || 3494 val64 & XGE_HAL_MC_ERR_REG_RLD_ECC_DB_ERR_U))) { 3495 hldev->stats.sw_dev_err_stats.double_ecc_err_cnt++; 3496 hldev->stats.sw_dev_err_stats.ecc_err_cnt++; 3497 } 3498 3499 if (val64 & XGE_HAL_MC_ERR_REG_SM_ERR) { 3500 hldev->stats.sw_dev_err_stats.sm_err_cnt++; 3501 } 3502 3503 /* those two should result in device reset */ 3504 if (val64 & XGE_HAL_MC_ERR_REG_MIRI_ECC_DB_ERR_0 || 3505 val64 & XGE_HAL_MC_ERR_REG_MIRI_ECC_DB_ERR_1) { 3506 __hal_device_handle_eccerr(hldev, "mc_err_reg", val64); 3507 return XGE_HAL_ERR_CRITICAL; 3508 } 3509 3510 return XGE_HAL_OK; 3511 } 3512 3513 /* 3514 * __hal_device_handle_pic - Handle non-traffic PIC interrupt reason 3515 * @hldev: HAL device handle. 3516 * @reason: interrupt reason 3517 */ 3518 xge_hal_status_e 3519 __hal_device_handle_pic(xge_hal_device_t *hldev, u64 reason) 3520 { 3521 xge_hal_pci_bar0_t *isrbar0 = 3522 (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0; 3523 u64 val64; 3524 3525 if (reason & XGE_HAL_PIC_INT_FLSH) { 3526 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3527 &isrbar0->flsh_int_reg); 3528 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3529 val64, &isrbar0->flsh_int_reg); 3530 /* FIXME: handle register */ 3531 } 3532 if (reason & XGE_HAL_PIC_INT_MDIO) { 3533 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3534 &isrbar0->mdio_int_reg); 3535 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3536 val64, &isrbar0->mdio_int_reg); 3537 /* FIXME: handle register */ 3538 } 3539 if (reason & XGE_HAL_PIC_INT_IIC) { 3540 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3541 &isrbar0->iic_int_reg); 3542 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3543 val64, &isrbar0->iic_int_reg); 3544 /* FIXME: handle register */ 3545 } 3546 if (reason & XGE_HAL_PIC_INT_MISC) { 3547 val64 = xge_os_pio_mem_read64(hldev->pdev, 3548 hldev->regh0, &isrbar0->misc_int_reg); 3549 #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR 3550 if ((xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) && 3551 (hldev->config.intr_mode != XGE_HAL_INTR_MODE_MSIX)) { 3552 /* Check for Link interrupts. If both Link Up/Down 3553 * bits are set, clear both and check adapter status 3554 */ 3555 if ((val64 & XGE_HAL_MISC_INT_REG_LINK_UP_INT) && 3556 (val64 & XGE_HAL_MISC_INT_REG_LINK_DOWN_INT)) { 3557 u64 temp64; 3558 3559 xge_debug_device(XGE_TRACE, 3560 "both link up and link down detected "XGE_OS_LLXFMT, 3561 (unsigned long long)val64); 3562 3563 temp64 = (XGE_HAL_MISC_INT_REG_LINK_DOWN_INT | 3564 XGE_HAL_MISC_INT_REG_LINK_UP_INT); 3565 xge_os_pio_mem_write64(hldev->pdev, 3566 hldev->regh0, temp64, 3567 &isrbar0->misc_int_reg); 3568 } 3569 else if (val64 & XGE_HAL_MISC_INT_REG_LINK_UP_INT) { 3570 xge_debug_device(XGE_TRACE, 3571 "link up call request, misc_int "XGE_OS_LLXFMT, 3572 (unsigned long long)val64); 3573 __hal_device_handle_link_up_ind(hldev); 3574 } 3575 else if (val64 & XGE_HAL_MISC_INT_REG_LINK_DOWN_INT){ 3576 xge_debug_device(XGE_TRACE, 3577 "link down request, misc_int "XGE_OS_LLXFMT, 3578 (unsigned long long)val64); 3579 __hal_device_handle_link_down_ind(hldev); 3580 } 3581 } else 3582 #endif 3583 { 3584 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3585 val64, &isrbar0->misc_int_reg); 3586 } 3587 } 3588 3589 return XGE_HAL_OK; 3590 } 3591 3592 /* 3593 * __hal_device_handle_txpic - Handle TxPIC interrupt reason 3594 * @hldev: HAL device handle. 3595 * @reason: interrupt reason 3596 */ 3597 xge_hal_status_e 3598 __hal_device_handle_txpic(xge_hal_device_t *hldev, u64 reason) 3599 { 3600 xge_hal_status_e status = XGE_HAL_OK; 3601 xge_hal_pci_bar0_t *isrbar0 = 3602 (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0; 3603 volatile u64 val64; 3604 3605 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3606 &isrbar0->pic_int_status); 3607 if ( val64 & (XGE_HAL_PIC_INT_FLSH | 3608 XGE_HAL_PIC_INT_MDIO | 3609 XGE_HAL_PIC_INT_IIC | 3610 XGE_HAL_PIC_INT_MISC) ) { 3611 status = __hal_device_handle_pic(hldev, val64); 3612 xge_os_wmb(); 3613 } 3614 3615 if (!(val64 & XGE_HAL_PIC_INT_TX)) 3616 return status; 3617 3618 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3619 &isrbar0->txpic_int_reg); 3620 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3621 val64, &isrbar0->txpic_int_reg); 3622 xge_os_wmb(); 3623 3624 if (val64 & XGE_HAL_TXPIC_INT_SCHED_INTR) { 3625 int i; 3626 3627 if (g_xge_hal_driver->uld_callbacks.sched_timer != NULL) 3628 g_xge_hal_driver->uld_callbacks.sched_timer( 3629 hldev, hldev->upper_layer_info); 3630 /* 3631 * This feature implements adaptive receive interrupt 3632 * coalecing. It is disabled by default. To enable it 3633 * set hldev->config.rxufca_lo_lim to be not equal to 3634 * hldev->config.rxufca_hi_lim. 3635 * 3636 * We are using HW timer for this feature, so 3637 * use needs to configure hldev->config.rxufca_lbolt_period 3638 * which is essentially a time slice of timer. 3639 * 3640 * For those who familiar with Linux, lbolt means jiffies 3641 * of this timer. I.e. timer tick. 3642 */ 3643 if (hldev->config.rxufca_lo_lim != 3644 hldev->config.rxufca_hi_lim && 3645 hldev->config.rxufca_lo_lim != 0) { 3646 for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) { 3647 if (!hldev->config.ring.queue[i].configured) 3648 continue; 3649 if (hldev->config.ring.queue[i].rti.urange_a) 3650 __hal_update_rxufca(hldev, i); 3651 } 3652 } 3653 3654 /* 3655 * This feature implements adaptive TTI timer re-calculation 3656 * based on host utilization, number of interrupt processed, 3657 * number of RXD per tick and avarage length of packets per 3658 * tick. 3659 */ 3660 if (hldev->config.bimodal_interrupts) { 3661 for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) { 3662 if (!hldev->config.ring.queue[i].configured) 3663 continue; 3664 if (hldev->bimodal_tti[i].enabled) 3665 __hal_update_bimodal(hldev, i); 3666 } 3667 } 3668 } 3669 3670 return XGE_HAL_OK; 3671 } 3672 3673 /* 3674 * __hal_device_handle_txdma - Handle TxDMA interrupt reason 3675 * @hldev: HAL device handle. 3676 * @reason: interrupt reason 3677 */ 3678 xge_hal_status_e 3679 __hal_device_handle_txdma(xge_hal_device_t *hldev, u64 reason) 3680 { 3681 xge_hal_pci_bar0_t *isrbar0 = 3682 (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0; 3683 u64 val64, temp64, err; 3684 3685 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3686 &isrbar0->txdma_int_status); 3687 if (val64 & XGE_HAL_TXDMA_PFC_INT) { 3688 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3689 &isrbar0->pfc_err_reg); 3690 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3691 err, &isrbar0->pfc_err_reg); 3692 hldev->stats.sw_dev_info_stats.pfc_err_cnt++; 3693 temp64 = XGE_HAL_PFC_ECC_DB_ERR|XGE_HAL_PFC_SM_ERR_ALARM 3694 |XGE_HAL_PFC_MISC_0_ERR|XGE_HAL_PFC_MISC_1_ERR 3695 |XGE_HAL_PFC_PCIX_ERR; 3696 if (val64 & temp64) 3697 goto reset; 3698 } 3699 if (val64 & XGE_HAL_TXDMA_TDA_INT) { 3700 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3701 &isrbar0->tda_err_reg); 3702 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3703 err, &isrbar0->tda_err_reg); 3704 hldev->stats.sw_dev_info_stats.tda_err_cnt++; 3705 temp64 = XGE_HAL_TDA_Fn_ECC_DB_ERR|XGE_HAL_TDA_SM0_ERR_ALARM 3706 |XGE_HAL_TDA_SM1_ERR_ALARM; 3707 if (val64 & temp64) 3708 goto reset; 3709 } 3710 if (val64 & XGE_HAL_TXDMA_PCC_INT) { 3711 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3712 &isrbar0->pcc_err_reg); 3713 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3714 err, &isrbar0->pcc_err_reg); 3715 hldev->stats.sw_dev_info_stats.pcc_err_cnt++; 3716 temp64 = XGE_HAL_PCC_FB_ECC_DB_ERR|XGE_HAL_PCC_TXB_ECC_DB_ERR 3717 |XGE_HAL_PCC_SM_ERR_ALARM|XGE_HAL_PCC_WR_ERR_ALARM 3718 |XGE_HAL_PCC_N_SERR|XGE_HAL_PCC_6_COF_OV_ERR 3719 |XGE_HAL_PCC_7_COF_OV_ERR|XGE_HAL_PCC_6_LSO_OV_ERR 3720 |XGE_HAL_PCC_7_LSO_OV_ERR; 3721 if (val64 & temp64) 3722 goto reset; 3723 } 3724 if (val64 & XGE_HAL_TXDMA_TTI_INT) { 3725 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3726 &isrbar0->tti_err_reg); 3727 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3728 err, &isrbar0->tti_err_reg); 3729 hldev->stats.sw_dev_info_stats.tti_err_cnt++; 3730 temp64 = XGE_HAL_TTI_SM_ERR_ALARM; 3731 if (val64 & temp64) 3732 goto reset; 3733 } 3734 if (val64 & XGE_HAL_TXDMA_LSO_INT) { 3735 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3736 &isrbar0->lso_err_reg); 3737 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3738 err, &isrbar0->lso_err_reg); 3739 hldev->stats.sw_dev_info_stats.lso_err_cnt++; 3740 temp64 = XGE_HAL_LSO6_ABORT|XGE_HAL_LSO7_ABORT 3741 |XGE_HAL_LSO6_SM_ERR_ALARM|XGE_HAL_LSO7_SM_ERR_ALARM; 3742 if (val64 & temp64) 3743 goto reset; 3744 } 3745 if (val64 & XGE_HAL_TXDMA_TPA_INT) { 3746 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3747 &isrbar0->tpa_err_reg); 3748 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3749 err, &isrbar0->tpa_err_reg); 3750 hldev->stats.sw_dev_info_stats.tpa_err_cnt++; 3751 temp64 = XGE_HAL_TPA_SM_ERR_ALARM; 3752 if (val64 & temp64) 3753 goto reset; 3754 } 3755 if (val64 & XGE_HAL_TXDMA_SM_INT) { 3756 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3757 &isrbar0->sm_err_reg); 3758 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3759 err, &isrbar0->sm_err_reg); 3760 hldev->stats.sw_dev_info_stats.sm_err_cnt++; 3761 temp64 = XGE_HAL_SM_SM_ERR_ALARM; 3762 if (val64 & temp64) 3763 goto reset; 3764 } 3765 3766 return XGE_HAL_OK; 3767 3768 reset : (void) xge_hal_device_reset(hldev); 3769 (void) xge_hal_device_enable(hldev); 3770 xge_hal_device_intr_enable(hldev); 3771 return XGE_HAL_OK; 3772 } 3773 3774 /* 3775 * __hal_device_handle_txmac - Handle TxMAC interrupt reason 3776 * @hldev: HAL device handle. 3777 * @reason: interrupt reason 3778 */ 3779 xge_hal_status_e 3780 __hal_device_handle_txmac(xge_hal_device_t *hldev, u64 reason) 3781 { 3782 xge_hal_pci_bar0_t *isrbar0 = 3783 (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0; 3784 u64 val64, temp64; 3785 3786 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3787 &isrbar0->mac_int_status); 3788 if (!(val64 & XGE_HAL_MAC_INT_STATUS_TMAC_INT)) 3789 return XGE_HAL_OK; 3790 3791 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3792 &isrbar0->mac_tmac_err_reg); 3793 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3794 val64, &isrbar0->mac_tmac_err_reg); 3795 hldev->stats.sw_dev_info_stats.mac_tmac_err_cnt++; 3796 temp64 = XGE_HAL_TMAC_TX_BUF_OVRN|XGE_HAL_TMAC_TX_SM_ERR; 3797 if (val64 & temp64) { 3798 (void) xge_hal_device_reset(hldev); 3799 (void) xge_hal_device_enable(hldev); 3800 xge_hal_device_intr_enable(hldev); 3801 } 3802 3803 return XGE_HAL_OK; 3804 } 3805 3806 /* 3807 * __hal_device_handle_txxgxs - Handle TxXGXS interrupt reason 3808 * @hldev: HAL device handle. 3809 * @reason: interrupt reason 3810 */ 3811 xge_hal_status_e 3812 __hal_device_handle_txxgxs(xge_hal_device_t *hldev, u64 reason) 3813 { 3814 xge_hal_pci_bar0_t *isrbar0 = 3815 (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0; 3816 u64 val64, temp64; 3817 3818 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3819 &isrbar0->xgxs_int_status); 3820 if (!(val64 & XGE_HAL_XGXS_INT_STATUS_TXGXS)) 3821 return XGE_HAL_OK; 3822 3823 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3824 &isrbar0->xgxs_txgxs_err_reg); 3825 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3826 val64, &isrbar0->xgxs_txgxs_err_reg); 3827 hldev->stats.sw_dev_info_stats.xgxs_txgxs_err_cnt++; 3828 temp64 = XGE_HAL_TXGXS_ESTORE_UFLOW|XGE_HAL_TXGXS_TX_SM_ERR; 3829 if (val64 & temp64) { 3830 (void) xge_hal_device_reset(hldev); 3831 (void) xge_hal_device_enable(hldev); 3832 xge_hal_device_intr_enable(hldev); 3833 } 3834 3835 return XGE_HAL_OK; 3836 } 3837 3838 /* 3839 * __hal_device_handle_rxpic - Handle RxPIC interrupt reason 3840 * @hldev: HAL device handle. 3841 * @reason: interrupt reason 3842 */ 3843 xge_hal_status_e 3844 __hal_device_handle_rxpic(xge_hal_device_t *hldev, u64 reason) 3845 { 3846 /* FIXME: handle register */ 3847 3848 return XGE_HAL_OK; 3849 } 3850 3851 /* 3852 * __hal_device_handle_rxdma - Handle RxDMA interrupt reason 3853 * @hldev: HAL device handle. 3854 * @reason: interrupt reason 3855 */ 3856 xge_hal_status_e 3857 __hal_device_handle_rxdma(xge_hal_device_t *hldev, u64 reason) 3858 { 3859 xge_hal_pci_bar0_t *isrbar0 = 3860 (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0; 3861 u64 val64, err, temp64; 3862 3863 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3864 &isrbar0->rxdma_int_status); 3865 if (val64 & XGE_HAL_RXDMA_RC_INT) { 3866 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3867 &isrbar0->rc_err_reg); 3868 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3869 err, &isrbar0->rc_err_reg); 3870 hldev->stats.sw_dev_info_stats.rc_err_cnt++; 3871 temp64 = XGE_HAL_RC_PRCn_ECC_DB_ERR|XGE_HAL_RC_FTC_ECC_DB_ERR 3872 |XGE_HAL_RC_PRCn_SM_ERR_ALARM 3873 |XGE_HAL_RC_FTC_SM_ERR_ALARM; 3874 if (val64 & temp64) 3875 goto reset; 3876 } 3877 if (val64 & XGE_HAL_RXDMA_RPA_INT) { 3878 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3879 &isrbar0->rpa_err_reg); 3880 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3881 err, &isrbar0->rpa_err_reg); 3882 hldev->stats.sw_dev_info_stats.rpa_err_cnt++; 3883 temp64 = XGE_HAL_RPA_SM_ERR_ALARM|XGE_HAL_RPA_CREDIT_ERR; 3884 if (val64 & temp64) 3885 goto reset; 3886 } 3887 if (val64 & XGE_HAL_RXDMA_RDA_INT) { 3888 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3889 &isrbar0->rda_err_reg); 3890 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3891 err, &isrbar0->rda_err_reg); 3892 hldev->stats.sw_dev_info_stats.rda_err_cnt++; 3893 temp64 = XGE_HAL_RDA_RXDn_ECC_DB_ERR 3894 |XGE_HAL_RDA_FRM_ECC_DB_N_AERR 3895 |XGE_HAL_RDA_SM1_ERR_ALARM|XGE_HAL_RDA_SM0_ERR_ALARM 3896 |XGE_HAL_RDA_RXD_ECC_DB_SERR; 3897 if (val64 & temp64) 3898 goto reset; 3899 } 3900 if (val64 & XGE_HAL_RXDMA_RTI_INT) { 3901 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3902 &isrbar0->rti_err_reg); 3903 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3904 err, &isrbar0->rti_err_reg); 3905 hldev->stats.sw_dev_info_stats.rti_err_cnt++; 3906 temp64 = XGE_HAL_RTI_SM_ERR_ALARM; 3907 if (val64 & temp64) 3908 goto reset; 3909 } 3910 3911 return XGE_HAL_OK; 3912 3913 reset : (void) xge_hal_device_reset(hldev); 3914 (void) xge_hal_device_enable(hldev); 3915 xge_hal_device_intr_enable(hldev); 3916 return XGE_HAL_OK; 3917 } 3918 3919 /* 3920 * __hal_device_handle_rxmac - Handle RxMAC interrupt reason 3921 * @hldev: HAL device handle. 3922 * @reason: interrupt reason 3923 */ 3924 xge_hal_status_e 3925 __hal_device_handle_rxmac(xge_hal_device_t *hldev, u64 reason) 3926 { 3927 xge_hal_pci_bar0_t *isrbar0 = 3928 (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0; 3929 u64 val64, temp64; 3930 3931 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3932 &isrbar0->mac_int_status); 3933 if (!(val64 & XGE_HAL_MAC_INT_STATUS_RMAC_INT)) 3934 return XGE_HAL_OK; 3935 3936 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3937 &isrbar0->mac_rmac_err_reg); 3938 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3939 val64, &isrbar0->mac_rmac_err_reg); 3940 hldev->stats.sw_dev_info_stats.mac_rmac_err_cnt++; 3941 temp64 = XGE_HAL_RMAC_RX_BUFF_OVRN|XGE_HAL_RMAC_RX_SM_ERR; 3942 if (val64 & temp64) { 3943 (void) xge_hal_device_reset(hldev); 3944 (void) xge_hal_device_enable(hldev); 3945 xge_hal_device_intr_enable(hldev); 3946 } 3947 3948 return XGE_HAL_OK; 3949 } 3950 3951 /* 3952 * __hal_device_handle_rxxgxs - Handle RxXGXS interrupt reason 3953 * @hldev: HAL device handle. 3954 * @reason: interrupt reason 3955 */ 3956 xge_hal_status_e 3957 __hal_device_handle_rxxgxs(xge_hal_device_t *hldev, u64 reason) 3958 { 3959 xge_hal_pci_bar0_t *isrbar0 = 3960 (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0; 3961 u64 val64, temp64; 3962 3963 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3964 &isrbar0->xgxs_int_status); 3965 if (!(val64 & XGE_HAL_XGXS_INT_STATUS_RXGXS)) 3966 return XGE_HAL_OK; 3967 3968 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3969 &isrbar0->xgxs_rxgxs_err_reg); 3970 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3971 val64, &isrbar0->xgxs_rxgxs_err_reg); 3972 hldev->stats.sw_dev_info_stats.xgxs_rxgxs_err_cnt++; 3973 temp64 = XGE_HAL_RXGXS_ESTORE_OFLOW|XGE_HAL_RXGXS_RX_SM_ERR; 3974 if (val64 & temp64) { 3975 (void) xge_hal_device_reset(hldev); 3976 (void) xge_hal_device_enable(hldev); 3977 xge_hal_device_intr_enable(hldev); 3978 } 3979 3980 return XGE_HAL_OK; 3981 } 3982 3983 /** 3984 * xge_hal_device_enable - Enable device. 3985 * @hldev: HAL device handle. 3986 * 3987 * Enable the specified device: bring up the link/interface. 3988 * Returns: XGE_HAL_OK - success. 3989 * XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT - Failed to restore the device 3990 * to a "quiescent" state. 3991 * 3992 * See also: xge_hal_status_e{}. 3993 * 3994 * Usage: See ex_open{}. 3995 */ 3996 xge_hal_status_e 3997 xge_hal_device_enable(xge_hal_device_t *hldev) 3998 { 3999 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 4000 u64 val64; 4001 u64 adp_status; 4002 int i, j; 4003 4004 if (!hldev->hw_is_initialized) { 4005 xge_hal_status_e status; 4006 4007 status = __hal_device_hw_initialize(hldev); 4008 if (status != XGE_HAL_OK) { 4009 return status; 4010 } 4011 } 4012 4013 /* 4014 * Not needed in most cases, i.e. 4015 * when device_disable() is followed by reset - 4016 * the latter copies back PCI config space, along with 4017 * the bus mastership - see __hal_device_reset(). 4018 * However, there are/may-in-future be other cases, and 4019 * does not hurt. 4020 */ 4021 __hal_device_bus_master_enable(hldev); 4022 4023 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { 4024 /* 4025 * Configure the link stability period. 4026 */ 4027 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 4028 &bar0->misc_control); 4029 if (hldev->config.link_stability_period != 4030 XGE_HAL_DEFAULT_USE_HARDCODE) { 4031 4032 val64 |= XGE_HAL_MISC_CONTROL_LINK_STABILITY_PERIOD( 4033 hldev->config.link_stability_period); 4034 } else { 4035 /* 4036 * Use the link stability period 1 ms as default 4037 */ 4038 val64 |= XGE_HAL_MISC_CONTROL_LINK_STABILITY_PERIOD( 4039 XGE_HAL_DEFAULT_LINK_STABILITY_PERIOD); 4040 } 4041 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 4042 val64, &bar0->misc_control); 4043 4044 /* 4045 * Clearing any possible Link up/down interrupts that 4046 * could have popped up just before Enabling the card. 4047 */ 4048 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 4049 &bar0->misc_int_reg); 4050 if (val64) { 4051 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 4052 val64, &bar0->misc_int_reg); 4053 xge_debug_device(XGE_TRACE, "%s","link state cleared"); 4054 } 4055 } else if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) { 4056 /* 4057 * Clearing any possible Link state change interrupts that 4058 * could have popped up just before Enabling the card. 4059 */ 4060 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 4061 &bar0->mac_rmac_err_reg); 4062 if (val64) { 4063 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 4064 val64, &bar0->mac_rmac_err_reg); 4065 xge_debug_device(XGE_TRACE, "%s", "link state cleared"); 4066 } 4067 } 4068 4069 if (__hal_device_wait_quiescent(hldev, &val64)) { 4070 return XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT; 4071 } 4072 4073 /* Enabling Laser. */ 4074 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 4075 &bar0->adapter_control); 4076 val64 |= XGE_HAL_ADAPTER_EOI_TX_ON; 4077 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 4078 &bar0->adapter_control); 4079 4080 /* let link establish */ 4081 xge_os_mdelay(1); 4082 4083 /* set link down untill poll() routine will set it up (maybe) */ 4084 hldev->link_state = XGE_HAL_LINK_DOWN; 4085 4086 /* If link is UP (adpter is connected) then enable the adapter */ 4087 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 4088 &bar0->adapter_status); 4089 if( val64 & (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT | 4090 XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT) ) { 4091 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 4092 &bar0->adapter_control); 4093 val64 = val64 & (~XGE_HAL_ADAPTER_LED_ON); 4094 } else { 4095 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 4096 &bar0->adapter_control); 4097 val64 = val64 | ( XGE_HAL_ADAPTER_EOI_TX_ON | 4098 XGE_HAL_ADAPTER_LED_ON ); 4099 } 4100 4101 val64 = val64 | XGE_HAL_ADAPTER_CNTL_EN; /* adapter enable */ 4102 val64 = val64 & (~XGE_HAL_ADAPTER_ECC_EN); /* ECC enable */ 4103 xge_os_pio_mem_write64 (hldev->pdev, hldev->regh0, val64, 4104 &bar0->adapter_control); 4105 4106 /* We spin here waiting for the Link to come up. 4107 * This is the fix for the Link being unstable after the reset. */ 4108 i = 0; 4109 j = 0; 4110 do 4111 { 4112 adp_status = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 4113 &bar0->adapter_status); 4114 4115 /* Read the adapter control register for Adapter_enable bit */ 4116 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 4117 &bar0->adapter_control); 4118 if (!(adp_status & (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT | 4119 XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT)) && 4120 (val64 & XGE_HAL_ADAPTER_CNTL_EN)) { 4121 j++; 4122 if (j >= hldev->config.link_valid_cnt) { 4123 if (xge_hal_device_status(hldev, &adp_status) == 4124 XGE_HAL_OK) { 4125 if (__hal_verify_pcc_idle(hldev, 4126 adp_status) != XGE_HAL_OK) { 4127 return 4128 XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT; 4129 } 4130 xge_debug_device(XGE_TRACE, 4131 "adp_status: "XGE_OS_LLXFMT 4132 ", link is up on " 4133 "adapter enable!", 4134 (unsigned long long)adp_status); 4135 val64 = xge_os_pio_mem_read64( 4136 hldev->pdev, 4137 hldev->regh0, 4138 &bar0->adapter_control); 4139 val64 = val64| 4140 (XGE_HAL_ADAPTER_EOI_TX_ON | 4141 XGE_HAL_ADAPTER_LED_ON ); 4142 xge_os_pio_mem_write64(hldev->pdev, 4143 hldev->regh0, val64, 4144 &bar0->adapter_control); 4145 xge_os_mdelay(1); 4146 4147 val64 = xge_os_pio_mem_read64( 4148 hldev->pdev, 4149 hldev->regh0, 4150 &bar0->adapter_control); 4151 break; /* out of for loop */ 4152 } else { 4153 return 4154 XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT; 4155 } 4156 } 4157 } else { 4158 j = 0; /* Reset the count */ 4159 /* Turn on the Laser */ 4160 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 4161 &bar0->adapter_control); 4162 val64 = val64 | XGE_HAL_ADAPTER_EOI_TX_ON; 4163 xge_os_pio_mem_write64 (hldev->pdev, hldev->regh0, 4164 val64, &bar0->adapter_control); 4165 4166 xge_os_mdelay(1); 4167 4168 /* Now re-enable it as due to noise, hardware 4169 * turned it off */ 4170 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 4171 &bar0->adapter_control); 4172 val64 |= XGE_HAL_ADAPTER_CNTL_EN; 4173 val64 = val64 & (~XGE_HAL_ADAPTER_ECC_EN);/*ECC enable*/ 4174 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 4175 &bar0->adapter_control); 4176 } 4177 xge_os_mdelay(1); /* Sleep for 1 msec */ 4178 i++; 4179 } while (i < hldev->config.link_retry_cnt); 4180 4181 __hal_device_led_actifity_fix(hldev); 4182 4183 #ifndef XGE_HAL_PROCESS_LINK_INT_IN_ISR 4184 /* Here we are performing soft reset on XGXS to force link down. 4185 * Since link is already up, we will get link state change 4186 * poll notificatoin after adapter is enabled */ 4187 4188 __hal_serial_mem_write64(hldev, 0x80010515001E0000ULL, 4189 &bar0->dtx_control); 4190 (void) __hal_serial_mem_read64(hldev, &bar0->dtx_control); 4191 4192 __hal_serial_mem_write64(hldev, 0x80010515001E00E0ULL, 4193 &bar0->dtx_control); 4194 (void) __hal_serial_mem_read64(hldev, &bar0->dtx_control); 4195 4196 __hal_serial_mem_write64(hldev, 0x80070515001F00E4ULL, 4197 &bar0->dtx_control); 4198 (void) __hal_serial_mem_read64(hldev, &bar0->dtx_control); 4199 4200 xge_os_mdelay(100); /* Sleep for 500 msec */ 4201 #else 4202 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) 4203 #endif 4204 { 4205 /* 4206 * With some switches the link state change interrupt does not 4207 * occur even though the xgxs reset is done as per SPN-006. So, 4208 * poll the adapter status register and check if the link state 4209 * is ok. 4210 */ 4211 adp_status = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 4212 &bar0->adapter_status); 4213 if (!(adp_status & (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT | 4214 XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT))) 4215 { 4216 xge_debug_device(XGE_TRACE, "%s", 4217 "enable device causing link state change ind.."); 4218 (void) __hal_device_handle_link_state_change(hldev); 4219 } 4220 } 4221 4222 if (hldev->config.stats_refresh_time_sec != 4223 XGE_HAL_STATS_REFRESH_DISABLE) 4224 __hal_stats_enable(&hldev->stats); 4225 4226 return XGE_HAL_OK; 4227 } 4228 4229 /** 4230 * xge_hal_device_disable - Disable Xframe adapter. 4231 * @hldev: Device handle. 4232 * 4233 * Disable this device. To gracefully reset the adapter, the host should: 4234 * 4235 * - call xge_hal_device_disable(); 4236 * 4237 * - call xge_hal_device_intr_disable(); 4238 * 4239 * - close all opened channels and clean up outstanding resources; 4240 * 4241 * - do some work (error recovery, change mtu, reset, etc); 4242 * 4243 * - call xge_hal_device_enable(); 4244 * 4245 * - open channels, replenish RxDs, etc. 4246 * 4247 * - call xge_hal_device_intr_enable(). 4248 * 4249 * Note: Disabling the device does _not_ include disabling of interrupts. 4250 * After disabling the device stops receiving new frames but those frames 4251 * that were already in the pipe will keep coming for some few milliseconds. 4252 * 4253 * Returns: XGE_HAL_OK - success. 4254 * XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT - Failed to restore the device to 4255 * a "quiescent" state. 4256 * 4257 * See also: xge_hal_status_e{}. 4258 */ 4259 xge_hal_status_e 4260 xge_hal_device_disable(xge_hal_device_t *hldev) 4261 { 4262 xge_hal_status_e status = XGE_HAL_OK; 4263 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 4264 u64 val64; 4265 4266 xge_debug_device(XGE_TRACE, "%s", "turn off laser, cleanup hardware"); 4267 4268 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 4269 &bar0->adapter_control); 4270 val64 = val64 & (~XGE_HAL_ADAPTER_CNTL_EN); 4271 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 4272 &bar0->adapter_control); 4273 4274 if (__hal_device_wait_quiescent(hldev, &val64) != XGE_HAL_OK) { 4275 status = XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT; 4276 } 4277 4278 if (__hal_device_register_poll(hldev, &bar0->adapter_status, 1, 4279 XGE_HAL_ADAPTER_STATUS_RC_PRC_QUIESCENT, 4280 XGE_HAL_DEVICE_QUIESCENT_WAIT_MAX_MILLIS) != XGE_HAL_OK) { 4281 xge_debug_device(XGE_TRACE, "%s", "PRC is not QUIESCENT!"); 4282 status = XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT; 4283 } 4284 4285 if (hldev->config.stats_refresh_time_sec != 4286 XGE_HAL_STATS_REFRESH_DISABLE) 4287 __hal_stats_disable(&hldev->stats); 4288 #ifdef XGE_DEBUG_ASSERT 4289 else 4290 xge_assert(!hldev->stats.is_enabled); 4291 #endif 4292 4293 #ifndef XGE_HAL_DONT_DISABLE_BUS_MASTER_ON_STOP 4294 __hal_device_bus_master_disable(hldev); 4295 #endif 4296 4297 return status; 4298 } 4299 4300 /** 4301 * xge_hal_device_reset - Reset device. 4302 * @hldev: HAL device handle. 4303 * 4304 * Soft-reset the device, reset the device stats except reset_cnt. 4305 * 4306 * After reset is done, will try to re-initialize HW. 4307 * 4308 * Returns: XGE_HAL_OK - success. 4309 * XGE_HAL_ERR_DEVICE_NOT_INITIALIZED - Device is not initialized. 4310 * XGE_HAL_ERR_RESET_FAILED - Reset failed. 4311 * 4312 * See also: xge_hal_status_e{}. 4313 */ 4314 xge_hal_status_e 4315 xge_hal_device_reset(xge_hal_device_t *hldev) 4316 { 4317 xge_hal_status_e status; 4318 4319 /* increment the soft reset counter */ 4320 u32 reset_cnt = hldev->stats.sw_dev_info_stats.soft_reset_cnt; 4321 4322 xge_debug_device(XGE_TRACE, "%s (%d)", "resetting the device", reset_cnt); 4323 4324 if (!hldev->is_initialized) 4325 return XGE_HAL_ERR_DEVICE_NOT_INITIALIZED; 4326 4327 /* actual "soft" reset of the adapter */ 4328 status = __hal_device_reset(hldev); 4329 4330 /* reset all stats including saved */ 4331 __hal_stats_soft_reset(hldev, 1); 4332 4333 /* increment reset counter */ 4334 hldev->stats.sw_dev_info_stats.soft_reset_cnt = reset_cnt + 1; 4335 4336 /* re-initialize rxufca_intr_thres */ 4337 hldev->rxufca_intr_thres = hldev->config.rxufca_intr_thres; 4338 4339 hldev->reset_needed_after_close = 0; 4340 4341 return status; 4342 } 4343 4344 /** 4345 * xge_hal_device_status - Check whether Xframe hardware is ready for 4346 * operation. 4347 * @hldev: HAL device handle. 4348 * @hw_status: Xframe status register. Returned by HAL. 4349 * 4350 * Check whether Xframe hardware is ready for operation. 4351 * The checking includes TDMA, RDMA, PFC, PIC, MC_DRAM, and the rest 4352 * hardware functional blocks. 4353 * 4354 * Returns: XGE_HAL_OK if the device is ready for operation. Otherwise 4355 * returns XGE_HAL_FAIL. Also, fills in adapter status (in @hw_status). 4356 * 4357 * See also: xge_hal_status_e{}. 4358 * Usage: See ex_open{}. 4359 */ 4360 xge_hal_status_e 4361 xge_hal_device_status(xge_hal_device_t *hldev, u64 *hw_status) 4362 { 4363 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 4364 u64 tmp64; 4365 4366 tmp64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 4367 &bar0->adapter_status); 4368 4369 *hw_status = tmp64; 4370 4371 if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_TDMA_READY)) { 4372 xge_debug_device(XGE_TRACE, "%s", "TDMA is not ready!"); 4373 return XGE_HAL_FAIL; 4374 } 4375 if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_RDMA_READY)) { 4376 xge_debug_device(XGE_TRACE, "%s", "RDMA is not ready!"); 4377 return XGE_HAL_FAIL; 4378 } 4379 if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_PFC_READY)) { 4380 xge_debug_device(XGE_TRACE, "%s", "PFC is not ready!"); 4381 return XGE_HAL_FAIL; 4382 } 4383 if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_TMAC_BUF_EMPTY)) { 4384 xge_debug_device(XGE_TRACE, "%s", "TMAC BUF is not empty!"); 4385 return XGE_HAL_FAIL; 4386 } 4387 if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_PIC_QUIESCENT)) { 4388 xge_debug_device(XGE_TRACE, "%s", "PIC is not QUIESCENT!"); 4389 return XGE_HAL_FAIL; 4390 } 4391 if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_MC_DRAM_READY)) { 4392 xge_debug_device(XGE_TRACE, "%s", "MC_DRAM is not ready!"); 4393 return XGE_HAL_FAIL; 4394 } 4395 if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_MC_QUEUES_READY)) { 4396 xge_debug_device(XGE_TRACE, "%s", "MC_QUEUES is not ready!"); 4397 return XGE_HAL_FAIL; 4398 } 4399 if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_M_PLL_LOCK)) { 4400 xge_debug_device(XGE_TRACE, "%s", "M_PLL is not locked!"); 4401 return XGE_HAL_FAIL; 4402 } 4403 #ifndef XGE_HAL_HERC_EMULATION 4404 /* 4405 * Andrew: in PCI 33 mode, the P_PLL is not used, and therefore, 4406 * the the P_PLL_LOCK bit in the adapter_status register will 4407 * not be asserted. 4408 */ 4409 if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_P_PLL_LOCK) && 4410 xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC && 4411 hldev->pci_mode != XGE_HAL_PCI_33MHZ_MODE) { 4412 xge_debug_device(XGE_TRACE, "%s", "P_PLL is not locked!"); 4413 return XGE_HAL_FAIL; 4414 } 4415 #endif 4416 4417 return XGE_HAL_OK; 4418 } 4419 4420 void 4421 __hal_device_msi_intr_endis(xge_hal_device_t *hldev, int flag) 4422 { 4423 u16 msi_control_reg; 4424 4425 xge_os_pci_read16(hldev->pdev, hldev->cfgh, 4426 xge_offsetof(xge_hal_pci_config_le_t, 4427 msi_control), &msi_control_reg); 4428 4429 if (flag) 4430 msi_control_reg |= 0x1; 4431 else 4432 msi_control_reg &= ~0x1; 4433 4434 xge_os_pci_write16(hldev->pdev, hldev->cfgh, 4435 xge_offsetof(xge_hal_pci_config_le_t, 4436 msi_control), msi_control_reg); 4437 } 4438 4439 void 4440 __hal_device_msix_intr_endis(xge_hal_device_t *hldev, 4441 xge_hal_channel_t *channel, int flag) 4442 { 4443 u64 val64; 4444 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0; 4445 4446 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 4447 &bar0->xmsi_mask_reg); 4448 4449 if (flag) 4450 val64 &= ~(1LL << ( 63 - channel->msix_idx )); 4451 else 4452 val64 |= (1LL << ( 63 - channel->msix_idx )); 4453 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 4454 &bar0->xmsi_mask_reg); 4455 } 4456 4457 /** 4458 * xge_hal_device_intr_enable - Enable Xframe interrupts. 4459 * @hldev: HAL device handle. 4460 * @op: One of the xge_hal_device_intr_e enumerated values specifying 4461 * the type(s) of interrupts to enable. 4462 * 4463 * Enable Xframe interrupts. The function is to be executed the last in 4464 * Xframe initialization sequence. 4465 * 4466 * See also: xge_hal_device_intr_disable() 4467 */ 4468 void 4469 xge_hal_device_intr_enable(xge_hal_device_t *hldev) 4470 { 4471 xge_list_t *item; 4472 u64 val64; 4473 4474 /* PRC initialization and configuration */ 4475 xge_list_for_each(item, &hldev->ring_channels) { 4476 xge_hal_channel_h channel; 4477 channel = xge_container_of(item, xge_hal_channel_t, item); 4478 __hal_ring_prc_enable(channel); 4479 } 4480 4481 /* enable traffic only interrupts */ 4482 if (hldev->config.intr_mode != XGE_HAL_INTR_MODE_IRQLINE) { 4483 /* 4484 * make sure all interrupts going to be disabled if MSI 4485 * is enabled. 4486 */ 4487 #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR 4488 __hal_device_intr_mgmt(hldev, XGE_HAL_TX_PIC_INTR, 1); 4489 #else 4490 __hal_device_intr_mgmt(hldev, XGE_HAL_ALL_INTRS, 0); 4491 #endif 4492 } else { 4493 /* 4494 * Enable the Tx traffic interrupts only if the TTI feature is 4495 * enabled. 4496 */ 4497 val64 = 0; 4498 if (hldev->tti_enabled) 4499 val64 = XGE_HAL_TX_TRAFFIC_INTR; 4500 4501 if (!hldev->config.bimodal_interrupts) 4502 val64 |= XGE_HAL_RX_TRAFFIC_INTR; 4503 4504 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) 4505 val64 |= XGE_HAL_RX_TRAFFIC_INTR; 4506 4507 val64 |=XGE_HAL_TX_PIC_INTR | 4508 XGE_HAL_MC_INTR | 4509 XGE_HAL_TX_DMA_INTR | 4510 (hldev->config.sched_timer_us != 4511 XGE_HAL_SCHED_TIMER_DISABLED ? XGE_HAL_SCHED_INTR : 0); 4512 __hal_device_intr_mgmt(hldev, val64, 1); 4513 } 4514 4515 /* 4516 * Enable MSI-X interrupts 4517 */ 4518 if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX) { 4519 4520 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { 4521 /* 4522 * To enable MSI-X, MSI also needs to be enabled, 4523 * due to a bug in the herc NIC. 4524 */ 4525 __hal_device_msi_intr_endis(hldev, 1); 4526 } 4527 4528 4529 /* Enable the MSI-X interrupt for each configured channel */ 4530 xge_list_for_each(item, &hldev->fifo_channels) { 4531 xge_hal_channel_t *channel; 4532 4533 channel = xge_container_of(item, 4534 xge_hal_channel_t, item); 4535 4536 /* 0 vector is reserved for alarms */ 4537 if (!channel->msix_idx) 4538 continue; 4539 4540 __hal_device_msix_intr_endis(hldev, channel, 1); 4541 } 4542 4543 xge_list_for_each(item, &hldev->ring_channels) { 4544 xge_hal_channel_t *channel; 4545 4546 channel = xge_container_of(item, 4547 xge_hal_channel_t, item); 4548 4549 /* 0 vector is reserved for alarms */ 4550 if (!channel->msix_idx) 4551 continue; 4552 4553 __hal_device_msix_intr_endis(hldev, channel, 1); 4554 } 4555 } 4556 4557 xge_debug_device(XGE_TRACE, "%s", "interrupts are enabled"); 4558 } 4559 4560 4561 /** 4562 * xge_hal_device_intr_disable - Disable Xframe interrupts. 4563 * @hldev: HAL device handle. 4564 * @op: One of the xge_hal_device_intr_e enumerated values specifying 4565 * the type(s) of interrupts to disable. 4566 * 4567 * Disable Xframe interrupts. 4568 * 4569 * See also: xge_hal_device_intr_enable() 4570 */ 4571 void 4572 xge_hal_device_intr_disable(xge_hal_device_t *hldev) 4573 { 4574 xge_list_t *item; 4575 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 4576 u64 val64; 4577 4578 if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX) { 4579 4580 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { 4581 /* 4582 * To disable MSI-X, MSI also needs to be disabled, 4583 * due to a bug in the herc NIC. 4584 */ 4585 __hal_device_msi_intr_endis(hldev, 0); 4586 } 4587 4588 /* Disable the MSI-X interrupt for each configured channel */ 4589 xge_list_for_each(item, &hldev->fifo_channels) { 4590 xge_hal_channel_t *channel; 4591 4592 channel = xge_container_of(item, 4593 xge_hal_channel_t, item); 4594 4595 /* 0 vector is reserved for alarms */ 4596 if (!channel->msix_idx) 4597 continue; 4598 4599 __hal_device_msix_intr_endis(hldev, channel, 0); 4600 4601 } 4602 4603 xge_os_pio_mem_write64(hldev->pdev, 4604 hldev->regh0, 0xFFFFFFFFFFFFFFFFULL, 4605 &bar0->tx_traffic_mask); 4606 4607 xge_list_for_each(item, &hldev->ring_channels) { 4608 xge_hal_channel_t *channel; 4609 4610 channel = xge_container_of(item, 4611 xge_hal_channel_t, item); 4612 4613 /* 0 vector is reserved for alarms */ 4614 if (!channel->msix_idx) 4615 continue; 4616 4617 __hal_device_msix_intr_endis(hldev, channel, 0); 4618 } 4619 4620 xge_os_pio_mem_write64(hldev->pdev, 4621 hldev->regh0, 0xFFFFFFFFFFFFFFFFULL, 4622 &bar0->rx_traffic_mask); 4623 } 4624 4625 /* 4626 * Disable traffic only interrupts. 4627 * Tx traffic interrupts are used only if the TTI feature is 4628 * enabled. 4629 */ 4630 val64 = 0; 4631 if (hldev->tti_enabled) 4632 val64 = XGE_HAL_TX_TRAFFIC_INTR; 4633 4634 val64 |= XGE_HAL_RX_TRAFFIC_INTR | 4635 XGE_HAL_TX_PIC_INTR | 4636 XGE_HAL_MC_INTR | 4637 (hldev->config.sched_timer_us != XGE_HAL_SCHED_TIMER_DISABLED ? 4638 XGE_HAL_SCHED_INTR : 0); 4639 __hal_device_intr_mgmt(hldev, val64, 0); 4640 4641 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 4642 0xFFFFFFFFFFFFFFFFULL, 4643 &bar0->general_int_mask); 4644 4645 4646 /* disable all configured PRCs */ 4647 xge_list_for_each(item, &hldev->ring_channels) { 4648 xge_hal_channel_h channel; 4649 channel = xge_container_of(item, xge_hal_channel_t, item); 4650 __hal_ring_prc_disable(channel); 4651 } 4652 4653 xge_debug_device(XGE_TRACE, "%s", "interrupts are disabled"); 4654 } 4655 4656 4657 /** 4658 * xge_hal_device_mcast_enable - Enable Xframe multicast addresses. 4659 * @hldev: HAL device handle. 4660 * 4661 * Enable Xframe multicast addresses. 4662 * Returns: XGE_HAL_OK on success. 4663 * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to enable mcast 4664 * feature within the time(timeout). 4665 * 4666 * See also: xge_hal_device_mcast_disable(), xge_hal_status_e{}. 4667 */ 4668 xge_hal_status_e 4669 xge_hal_device_mcast_enable(xge_hal_device_t *hldev) 4670 { 4671 u64 val64; 4672 xge_hal_pci_bar0_t *bar0; 4673 int mc_offset = XGE_HAL_MAC_MC_ALL_MC_ADDR_OFFSET; 4674 4675 if (hldev == NULL) 4676 return XGE_HAL_ERR_INVALID_DEVICE; 4677 4678 if (hldev->mcast_refcnt) 4679 return XGE_HAL_OK; 4680 4681 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) 4682 mc_offset = XGE_HAL_MAC_MC_ALL_MC_ADDR_OFFSET_HERC; 4683 4684 hldev->mcast_refcnt = 1; 4685 4686 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 4687 4688 /* Enable all Multicast addresses */ 4689 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 4690 XGE_HAL_RMAC_ADDR_DATA0_MEM_ADDR(0x010203040506ULL), 4691 &bar0->rmac_addr_data0_mem); 4692 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 4693 XGE_HAL_RMAC_ADDR_DATA1_MEM_MASK(0xfeffffffffffULL), 4694 &bar0->rmac_addr_data1_mem); 4695 val64 = XGE_HAL_RMAC_ADDR_CMD_MEM_WE | 4696 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | 4697 XGE_HAL_RMAC_ADDR_CMD_MEM_OFFSET(mc_offset); 4698 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 4699 &bar0->rmac_addr_cmd_mem); 4700 4701 if (__hal_device_register_poll(hldev, 4702 &bar0->rmac_addr_cmd_mem, 0, 4703 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, 4704 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { 4705 /* upper layer may require to repeat */ 4706 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; 4707 } 4708 4709 return XGE_HAL_OK; 4710 } 4711 4712 /** 4713 * xge_hal_device_mcast_disable - Disable Xframe multicast addresses. 4714 * @hldev: HAL device handle. 4715 * 4716 * Disable Xframe multicast addresses. 4717 * Returns: XGE_HAL_OK - success. 4718 * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to disable mcast 4719 * feature within the time(timeout). 4720 * 4721 * See also: xge_hal_device_mcast_enable(), xge_hal_status_e{}. 4722 */ 4723 xge_hal_status_e 4724 xge_hal_device_mcast_disable(xge_hal_device_t *hldev) 4725 { 4726 u64 val64; 4727 xge_hal_pci_bar0_t *bar0; 4728 int mc_offset = XGE_HAL_MAC_MC_ALL_MC_ADDR_OFFSET; 4729 4730 if (hldev == NULL) 4731 return XGE_HAL_ERR_INVALID_DEVICE; 4732 4733 if (hldev->mcast_refcnt == 0) 4734 return XGE_HAL_OK; 4735 4736 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) 4737 mc_offset = XGE_HAL_MAC_MC_ALL_MC_ADDR_OFFSET_HERC; 4738 4739 hldev->mcast_refcnt = 0; 4740 4741 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 4742 4743 /* Disable all Multicast addresses */ 4744 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 4745 XGE_HAL_RMAC_ADDR_DATA0_MEM_ADDR(0xffffffffffffULL), 4746 &bar0->rmac_addr_data0_mem); 4747 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 4748 XGE_HAL_RMAC_ADDR_DATA1_MEM_MASK(0), 4749 &bar0->rmac_addr_data1_mem); 4750 4751 val64 = XGE_HAL_RMAC_ADDR_CMD_MEM_WE | 4752 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | 4753 XGE_HAL_RMAC_ADDR_CMD_MEM_OFFSET(mc_offset); 4754 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 4755 &bar0->rmac_addr_cmd_mem); 4756 4757 if (__hal_device_register_poll(hldev, 4758 &bar0->rmac_addr_cmd_mem, 0, 4759 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, 4760 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { 4761 /* upper layer may require to repeat */ 4762 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; 4763 } 4764 4765 return XGE_HAL_OK; 4766 } 4767 4768 /** 4769 * xge_hal_device_promisc_enable - Enable promiscuous mode. 4770 * @hldev: HAL device handle. 4771 * 4772 * Enable promiscuous mode of Xframe operation. 4773 * 4774 * See also: xge_hal_device_promisc_disable(). 4775 */ 4776 void 4777 xge_hal_device_promisc_enable(xge_hal_device_t *hldev) 4778 { 4779 u64 val64; 4780 xge_hal_pci_bar0_t *bar0; 4781 4782 xge_assert(hldev); 4783 4784 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 4785 4786 if (!hldev->is_promisc) { 4787 /* Put the NIC into promiscuous mode */ 4788 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 4789 &bar0->mac_cfg); 4790 val64 |= XGE_HAL_MAC_CFG_RMAC_PROM_ENABLE; 4791 4792 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 4793 XGE_HAL_RMAC_CFG_KEY(0x4C0D), 4794 &bar0->rmac_cfg_key); 4795 4796 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, 4797 (u32)(val64 >> 32), 4798 &bar0->mac_cfg); 4799 4800 hldev->is_promisc = 1; 4801 xge_debug_device(XGE_TRACE, 4802 "mac_cfg 0x"XGE_OS_LLXFMT": promisc enabled", 4803 (unsigned long long)val64); 4804 } 4805 } 4806 4807 /** 4808 * xge_hal_device_promisc_disable - Disable promiscuous mode. 4809 * @hldev: HAL device handle. 4810 * 4811 * Disable promiscuous mode of Xframe operation. 4812 * 4813 * See also: xge_hal_device_promisc_enable(). 4814 */ 4815 void 4816 xge_hal_device_promisc_disable(xge_hal_device_t *hldev) 4817 { 4818 u64 val64; 4819 xge_hal_pci_bar0_t *bar0; 4820 4821 xge_assert(hldev); 4822 4823 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 4824 4825 if (hldev->is_promisc) { 4826 /* Remove the NIC from promiscuous mode */ 4827 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 4828 &bar0->mac_cfg); 4829 val64 &= ~XGE_HAL_MAC_CFG_RMAC_PROM_ENABLE; 4830 4831 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 4832 XGE_HAL_RMAC_CFG_KEY(0x4C0D), 4833 &bar0->rmac_cfg_key); 4834 4835 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, 4836 (u32)(val64 >> 32), 4837 &bar0->mac_cfg); 4838 4839 hldev->is_promisc = 0; 4840 xge_debug_device(XGE_TRACE, 4841 "mac_cfg 0x"XGE_OS_LLXFMT": promisc disabled", 4842 (unsigned long long)val64); 4843 } 4844 } 4845 4846 /** 4847 * xge_hal_device_macaddr_get - Get MAC addresses. 4848 * @hldev: HAL device handle. 4849 * @index: MAC address index, in the range from 0 to 4850 * XGE_HAL_MAX_MAC_ADDRESSES. 4851 * @macaddr: MAC address. Returned by HAL. 4852 * 4853 * Retrieve one of the stored MAC addresses by reading non-volatile 4854 * memory on the chip. 4855 * 4856 * Up to %XGE_HAL_MAX_MAC_ADDRESSES addresses is supported. 4857 * 4858 * Returns: XGE_HAL_OK - success. 4859 * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to retrieve the mac 4860 * address within the time(timeout). 4861 * XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES - Invalid MAC address index. 4862 * 4863 * See also: xge_hal_device_macaddr_set(), xge_hal_status_e{}. 4864 */ 4865 xge_hal_status_e 4866 xge_hal_device_macaddr_get(xge_hal_device_t *hldev, int index, 4867 macaddr_t *macaddr) 4868 { 4869 xge_hal_pci_bar0_t *bar0; 4870 u64 val64; 4871 int i; 4872 4873 if (hldev == NULL) { 4874 return XGE_HAL_ERR_INVALID_DEVICE; 4875 } 4876 4877 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 4878 4879 if ( index >= XGE_HAL_MAX_MAC_ADDRESSES ) { 4880 return XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES; 4881 } 4882 4883 #ifdef XGE_HAL_HERC_EMULATION 4884 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,0x0000010000000000, 4885 &bar0->rmac_addr_data0_mem); 4886 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,0x0000000000000000, 4887 &bar0->rmac_addr_data1_mem); 4888 val64 = XGE_HAL_RMAC_ADDR_CMD_MEM_RD | 4889 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | 4890 XGE_HAL_RMAC_ADDR_CMD_MEM_OFFSET((index)); 4891 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 4892 &bar0->rmac_addr_cmd_mem); 4893 4894 /* poll until done */ 4895 __hal_device_register_poll(hldev, 4896 &bar0->rmac_addr_cmd_mem, 0, 4897 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD, 4898 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS); 4899 4900 #endif 4901 4902 val64 = ( XGE_HAL_RMAC_ADDR_CMD_MEM_RD | 4903 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | 4904 XGE_HAL_RMAC_ADDR_CMD_MEM_OFFSET((index)) ); 4905 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 4906 &bar0->rmac_addr_cmd_mem); 4907 4908 if (__hal_device_register_poll(hldev, &bar0->rmac_addr_cmd_mem, 0, 4909 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, 4910 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { 4911 /* upper layer may require to repeat */ 4912 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; 4913 } 4914 4915 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 4916 &bar0->rmac_addr_data0_mem); 4917 for (i=0; i < XGE_HAL_ETH_ALEN; i++) { 4918 (*macaddr)[i] = (u8)(val64 >> ((64 - 8) - (i * 8))); 4919 } 4920 4921 #ifdef XGE_HAL_HERC_EMULATION 4922 for (i=0; i < XGE_HAL_ETH_ALEN; i++) { 4923 (*macaddr)[i] = (u8)0; 4924 } 4925 (*macaddr)[1] = (u8)1; 4926 4927 #endif 4928 4929 return XGE_HAL_OK; 4930 } 4931 4932 /** 4933 * xge_hal_device_macaddr_set - Set MAC address. 4934 * @hldev: HAL device handle. 4935 * @index: MAC address index, in the range from 0 to 4936 * XGE_HAL_MAX_MAC_ADDRESSES. 4937 * @macaddr: New MAC address to configure. 4938 * 4939 * Configure one of the available MAC address "slots". 4940 * 4941 * Up to %XGE_HAL_MAX_MAC_ADDRESSES addresses is supported. 4942 * 4943 * Returns: XGE_HAL_OK - success. 4944 * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to set the new mac 4945 * address within the time(timeout). 4946 * XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES - Invalid MAC address index. 4947 * 4948 * See also: xge_hal_device_macaddr_get(), xge_hal_status_e{}. 4949 */ 4950 xge_hal_status_e 4951 xge_hal_device_macaddr_set(xge_hal_device_t *hldev, int index, 4952 macaddr_t macaddr) 4953 { 4954 xge_hal_pci_bar0_t *bar0 = 4955 (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 4956 u64 val64, temp64; 4957 int i; 4958 4959 if ( index >= XGE_HAL_MAX_MAC_ADDRESSES ) 4960 return XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES; 4961 4962 temp64 = 0; 4963 for (i=0; i < XGE_HAL_ETH_ALEN; i++) { 4964 temp64 |= macaddr[i]; 4965 temp64 <<= 8; 4966 } 4967 temp64 >>= 8; 4968 4969 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 4970 XGE_HAL_RMAC_ADDR_DATA0_MEM_ADDR(temp64), 4971 &bar0->rmac_addr_data0_mem); 4972 4973 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 4974 XGE_HAL_RMAC_ADDR_DATA1_MEM_MASK(0ULL), 4975 &bar0->rmac_addr_data1_mem); 4976 4977 val64 = ( XGE_HAL_RMAC_ADDR_CMD_MEM_WE | 4978 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | 4979 XGE_HAL_RMAC_ADDR_CMD_MEM_OFFSET((index)) ); 4980 4981 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 4982 &bar0->rmac_addr_cmd_mem); 4983 4984 if (__hal_device_register_poll(hldev, &bar0->rmac_addr_cmd_mem, 0, 4985 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, 4986 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { 4987 /* upper layer may require to repeat */ 4988 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; 4989 } 4990 4991 return XGE_HAL_OK; 4992 } 4993 4994 /** 4995 * xge_hal_device_macaddr_clear - Set MAC address. 4996 * @hldev: HAL device handle. 4997 * @index: MAC address index, in the range from 0 to 4998 * XGE_HAL_MAX_MAC_ADDRESSES. 4999 * 5000 * Clear one of the available MAC address "slots". 5001 * 5002 * Returns: XGE_HAL_OK - success. 5003 * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to set the new mac 5004 * address within the time(timeout). 5005 * XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES - Invalid MAC address index. 5006 * 5007 * See also: xge_hal_device_macaddr_set(), xge_hal_status_e{}. 5008 */ 5009 xge_hal_status_e 5010 xge_hal_device_macaddr_clear(xge_hal_device_t *hldev, int index) 5011 { 5012 xge_hal_status_e status; 5013 u8 macaddr[6] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}; 5014 5015 status = xge_hal_device_macaddr_set(hldev, index, macaddr); 5016 if (status != XGE_HAL_OK) { 5017 xge_debug_device(XGE_ERR, "%s", 5018 "Not able to set the mac addr"); 5019 return status; 5020 } 5021 5022 return XGE_HAL_OK; 5023 } 5024 5025 /** 5026 * xge_hal_device_macaddr_find - Finds index in the rmac table. 5027 * @hldev: HAL device handle. 5028 * @wanted: Wanted MAC address. 5029 * 5030 * See also: xge_hal_device_macaddr_set(). 5031 */ 5032 int 5033 xge_hal_device_macaddr_find(xge_hal_device_t *hldev, macaddr_t wanted) 5034 { 5035 int i; 5036 macaddr_t macaddr; 5037 5038 if (hldev == NULL) { 5039 return XGE_HAL_ERR_INVALID_DEVICE; 5040 } 5041 5042 for (i=1; i<XGE_HAL_MAX_MAC_ADDRESSES; i++) { 5043 (void) xge_hal_device_macaddr_get(hldev, i, &macaddr); 5044 if (!xge_os_memcmp(macaddr, wanted, sizeof(macaddr_t))) { 5045 return i; 5046 } 5047 } 5048 5049 return -1; 5050 } 5051 5052 /** 5053 * xge_hal_device_mtu_set - Set MTU. 5054 * @hldev: HAL device handle. 5055 * @new_mtu: New MTU size to configure. 5056 * 5057 * Set new MTU value. Example, to use jumbo frames: 5058 * xge_hal_device_mtu_set(my_device, my_channel, 9600); 5059 * 5060 * Returns: XGE_HAL_OK on success. 5061 * XGE_HAL_ERR_SWAPPER_CTRL - Failed to configure swapper control 5062 * register. 5063 * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to initialize TTI/RTI 5064 * schemes. 5065 * XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT - Failed to restore the device to 5066 * a "quiescent" state. 5067 */ 5068 xge_hal_status_e 5069 xge_hal_device_mtu_set(xge_hal_device_t *hldev, int new_mtu) 5070 { 5071 xge_hal_status_e status; 5072 5073 /* 5074 * reset needed if 1) new MTU differs, and 5075 * 2a) device was closed or 5076 * 2b) device is being upped for first time. 5077 */ 5078 if (hldev->config.mtu != new_mtu) { 5079 if (hldev->reset_needed_after_close || 5080 !hldev->mtu_first_time_set) { 5081 status = xge_hal_device_reset(hldev); 5082 if (status != XGE_HAL_OK) { 5083 xge_debug_device(XGE_TRACE, "%s", 5084 "fatal: can not reset the device"); 5085 return status; 5086 } 5087 } 5088 /* store the new MTU in device, reset will use it */ 5089 hldev->config.mtu = new_mtu; 5090 xge_debug_device(XGE_TRACE, "new MTU %d applied", 5091 new_mtu); 5092 } 5093 5094 if (!hldev->mtu_first_time_set) 5095 hldev->mtu_first_time_set = 1; 5096 5097 return XGE_HAL_OK; 5098 } 5099 5100 /** 5101 * xge_hal_device_initialize - Initialize Xframe device. 5102 * @hldev: HAL device handle. 5103 * @attr: pointer to xge_hal_device_attr_t structure 5104 * @device_config: Configuration to be _applied_ to the device, 5105 * For the Xframe configuration "knobs" please 5106 * refer to xge_hal_device_config_t and Xframe 5107 * User Guide. 5108 * 5109 * Initialize Xframe device. Note that all the arguments of this public API 5110 * are 'IN', including @hldev. Upper-layer driver (ULD) cooperates with 5111 * OS to find new Xframe device, locate its PCI and memory spaces. 5112 * 5113 * When done, the ULD allocates sizeof(xge_hal_device_t) bytes for HAL 5114 * to enable the latter to perform Xframe hardware initialization. 5115 * 5116 * Returns: XGE_HAL_OK - success. 5117 * XGE_HAL_ERR_DRIVER_NOT_INITIALIZED - Driver is not initialized. 5118 * XGE_HAL_ERR_BAD_DEVICE_CONFIG - Device configuration params are not 5119 * valid. 5120 * XGE_HAL_ERR_OUT_OF_MEMORY - Memory allocation failed. 5121 * XGE_HAL_ERR_BAD_SUBSYSTEM_ID - Device subsystem id is invalid. 5122 * XGE_HAL_ERR_INVALID_MAC_ADDRESS - Device mac address in not valid. 5123 * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to retrieve the mac 5124 * address within the time(timeout) or TTI/RTI initialization failed. 5125 * XGE_HAL_ERR_SWAPPER_CTRL - Failed to configure swapper control. 5126 * XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT -Device is not queiscent. 5127 * 5128 * See also: xge_hal_device_terminate(), xge_hal_status_e{} 5129 * xge_hal_device_attr_t{}. 5130 */ 5131 xge_hal_status_e 5132 xge_hal_device_initialize(xge_hal_device_t *hldev, xge_hal_device_attr_t *attr, 5133 xge_hal_device_config_t *device_config) 5134 { 5135 int i; 5136 xge_hal_status_e status; 5137 xge_hal_channel_t *channel; 5138 u16 subsys_device; 5139 u16 subsys_vendor; 5140 int total_dram_size, ring_auto_dram_cfg, left_dram_size; 5141 int total_dram_size_max = 0; 5142 5143 xge_debug_device(XGE_TRACE, "device 0x"XGE_OS_LLXFMT" is initializing", 5144 (unsigned long long)(ulong_t)hldev); 5145 5146 /* sanity check */ 5147 if (g_xge_hal_driver == NULL || 5148 !g_xge_hal_driver->is_initialized) { 5149 return XGE_HAL_ERR_DRIVER_NOT_INITIALIZED; 5150 } 5151 5152 xge_os_memzero(hldev, sizeof(xge_hal_device_t)); 5153 5154 /* 5155 * validate a common part of Xframe-I/II configuration 5156 * (and run check_card() later, once PCI inited - see below) 5157 */ 5158 status = __hal_device_config_check_common(device_config); 5159 if (status != XGE_HAL_OK) 5160 return status; 5161 5162 /* apply config */ 5163 xge_os_memcpy(&hldev->config, device_config, 5164 sizeof(xge_hal_device_config_t)); 5165 5166 /* save original attr */ 5167 xge_os_memcpy(&hldev->orig_attr, attr, 5168 sizeof(xge_hal_device_attr_t)); 5169 5170 /* initialize rxufca_intr_thres */ 5171 hldev->rxufca_intr_thres = hldev->config.rxufca_intr_thres; 5172 5173 hldev->regh0 = attr->regh0; 5174 hldev->regh1 = attr->regh1; 5175 hldev->regh2 = attr->regh2; 5176 hldev->isrbar0 = hldev->bar0 = attr->bar0; 5177 hldev->bar1 = attr->bar1; 5178 hldev->bar2 = attr->bar2; 5179 hldev->pdev = attr->pdev; 5180 hldev->irqh = attr->irqh; 5181 hldev->cfgh = attr->cfgh; 5182 5183 /* set initial bimodal timer for bimodal adaptive schema */ 5184 hldev->bimodal_timer_val_us = hldev->config.bimodal_timer_lo_us; 5185 5186 hldev->queueh = xge_queue_create(hldev->pdev, hldev->irqh, 5187 g_xge_hal_driver->config.queue_size_initial, 5188 g_xge_hal_driver->config.queue_size_max, 5189 __hal_device_event_queued, hldev); 5190 if (hldev->queueh == NULL) 5191 return XGE_HAL_ERR_OUT_OF_MEMORY; 5192 5193 hldev->magic = XGE_HAL_MAGIC; 5194 5195 xge_assert(hldev->regh0); 5196 xge_assert(hldev->regh1); 5197 xge_assert(hldev->bar0); 5198 xge_assert(hldev->bar1); 5199 xge_assert(hldev->pdev); 5200 xge_assert(hldev->irqh); 5201 xge_assert(hldev->cfgh); 5202 5203 /* initialize some PCI/PCI-X fields of this PCI device. */ 5204 __hal_device_pci_init(hldev); 5205 5206 /* 5207 * initlialize lists to properly handling a potential 5208 * terminate request 5209 */ 5210 xge_list_init(&hldev->free_channels); 5211 xge_list_init(&hldev->fifo_channels); 5212 xge_list_init(&hldev->ring_channels); 5213 5214 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) { 5215 /* fixups for xena */ 5216 hldev->config.rth_en = 0; 5217 hldev->config.rth_spdm_en = 0; 5218 hldev->config.rts_mac_en = 0; 5219 total_dram_size_max = XGE_HAL_MAX_RING_QUEUE_SIZE_XENA; 5220 5221 status = __hal_device_config_check_xena(device_config); 5222 if (status != XGE_HAL_OK) { 5223 xge_hal_device_terminate(hldev); 5224 return status; 5225 } 5226 if (hldev->config.bimodal_interrupts == 1) { 5227 xge_hal_device_terminate(hldev); 5228 return XGE_HAL_BADCFG_BIMODAL_XENA_NOT_ALLOWED; 5229 } else if (hldev->config.bimodal_interrupts == 5230 XGE_HAL_DEFAULT_USE_HARDCODE) 5231 hldev->config.bimodal_interrupts = 0; 5232 } else if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { 5233 /* fixups for herc */ 5234 total_dram_size_max = XGE_HAL_MAX_RING_QUEUE_SIZE_HERC; 5235 status = __hal_device_config_check_herc(device_config); 5236 if (status != XGE_HAL_OK) { 5237 xge_hal_device_terminate(hldev); 5238 return status; 5239 } 5240 if (hldev->config.bimodal_interrupts == 5241 XGE_HAL_DEFAULT_USE_HARDCODE) 5242 hldev->config.bimodal_interrupts = 1; 5243 } else { 5244 xge_debug_device(XGE_ERR, 5245 "detected unknown device_id 0x%x", hldev->device_id); 5246 xge_hal_device_terminate(hldev); 5247 return XGE_HAL_ERR_BAD_DEVICE_ID; 5248 } 5249 5250 5251 /* allocate and initialize FIFO types of channels according to 5252 * configuration */ 5253 for (i = 0; i < XGE_HAL_MAX_FIFO_NUM; i++) { 5254 if (!device_config->fifo.queue[i].configured) 5255 continue; 5256 5257 channel = __hal_channel_allocate(hldev, i, 5258 XGE_HAL_CHANNEL_TYPE_FIFO); 5259 if (channel == NULL) { 5260 xge_debug_device(XGE_ERR, 5261 "fifo: __hal_channel_allocate failed"); 5262 xge_hal_device_terminate(hldev); 5263 return XGE_HAL_ERR_OUT_OF_MEMORY; 5264 } 5265 /* add new channel to the device */ 5266 xge_list_insert(&channel->item, &hldev->free_channels); 5267 } 5268 5269 /* 5270 * automatic DRAM adjustment 5271 */ 5272 total_dram_size = 0; 5273 ring_auto_dram_cfg = 0; 5274 for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) { 5275 if (!device_config->ring.queue[i].configured) 5276 continue; 5277 if (device_config->ring.queue[i].dram_size_mb == 5278 XGE_HAL_DEFAULT_USE_HARDCODE) { 5279 ring_auto_dram_cfg++; 5280 continue; 5281 } 5282 total_dram_size += device_config->ring.queue[i].dram_size_mb; 5283 } 5284 left_dram_size = total_dram_size_max - total_dram_size; 5285 if (left_dram_size < 0 || 5286 (ring_auto_dram_cfg && left_dram_size / ring_auto_dram_cfg == 0)) { 5287 xge_debug_device(XGE_ERR, 5288 "ring config: exceeded DRAM size %d MB", 5289 total_dram_size_max); 5290 xge_hal_device_terminate(hldev); 5291 return XGE_HAL_BADCFG_RING_QUEUE_SIZE; 5292 } 5293 5294 /* 5295 * allocate and initialize RING types of channels according to 5296 * configuration 5297 */ 5298 for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) { 5299 if (!device_config->ring.queue[i].configured) 5300 continue; 5301 5302 if (device_config->ring.queue[i].dram_size_mb == 5303 XGE_HAL_DEFAULT_USE_HARDCODE) { 5304 hldev->config.ring.queue[i].dram_size_mb = 5305 device_config->ring.queue[i].dram_size_mb = 5306 left_dram_size / ring_auto_dram_cfg; 5307 } 5308 5309 channel = __hal_channel_allocate(hldev, i, 5310 XGE_HAL_CHANNEL_TYPE_RING); 5311 if (channel == NULL) { 5312 xge_debug_device(XGE_ERR, 5313 "ring: __hal_channel_allocate failed"); 5314 xge_hal_device_terminate(hldev); 5315 return XGE_HAL_ERR_OUT_OF_MEMORY; 5316 } 5317 /* add new channel to the device */ 5318 xge_list_insert(&channel->item, &hldev->free_channels); 5319 } 5320 5321 /* get subsystem IDs */ 5322 xge_os_pci_read16(hldev->pdev, hldev->cfgh, 5323 xge_offsetof(xge_hal_pci_config_le_t, subsystem_id), 5324 &subsys_device); 5325 xge_os_pci_read16(hldev->pdev, hldev->cfgh, 5326 xge_offsetof(xge_hal_pci_config_le_t, subsystem_vendor_id), 5327 &subsys_vendor); 5328 xge_debug_device(XGE_TRACE, 5329 "subsystem_id %04x:%04x", 5330 subsys_vendor, subsys_device); 5331 5332 /* reset device initially */ 5333 (void) __hal_device_reset(hldev); 5334 5335 /* set host endian before, to assure proper action */ 5336 status = __hal_device_set_swapper(hldev); 5337 if (status != XGE_HAL_OK) { 5338 xge_debug_device(XGE_ERR, 5339 "__hal_device_set_swapper failed"); 5340 xge_hal_device_terminate(hldev); 5341 (void) __hal_device_reset(hldev); 5342 return status; 5343 } 5344 5345 #ifndef XGE_HAL_HERC_EMULATION 5346 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) 5347 __hal_device_xena_fix_mac(hldev); 5348 #endif 5349 5350 /* MAC address initialization. 5351 * For now only one mac address will be read and used. */ 5352 status = xge_hal_device_macaddr_get(hldev, 0, &hldev->macaddr[0]); 5353 if (status != XGE_HAL_OK) { 5354 xge_debug_device(XGE_ERR, 5355 "xge_hal_device_macaddr_get failed"); 5356 xge_hal_device_terminate(hldev); 5357 return status; 5358 } 5359 5360 if (hldev->macaddr[0][0] == 0xFF && 5361 hldev->macaddr[0][1] == 0xFF && 5362 hldev->macaddr[0][2] == 0xFF && 5363 hldev->macaddr[0][3] == 0xFF && 5364 hldev->macaddr[0][4] == 0xFF && 5365 hldev->macaddr[0][5] == 0xFF) { 5366 xge_debug_device(XGE_ERR, 5367 "xge_hal_device_macaddr_get returns all FFs"); 5368 xge_hal_device_terminate(hldev); 5369 return XGE_HAL_ERR_INVALID_MAC_ADDRESS; 5370 } 5371 5372 xge_debug_device(XGE_TRACE, 5373 "default macaddr: 0x%02x-%02x-%02x-%02x-%02x-%02x", 5374 hldev->macaddr[0][0], hldev->macaddr[0][1], 5375 hldev->macaddr[0][2], hldev->macaddr[0][3], 5376 hldev->macaddr[0][4], hldev->macaddr[0][5]); 5377 5378 status = __hal_stats_initialize(&hldev->stats, hldev); 5379 if (status != XGE_HAL_OK) { 5380 xge_debug_device(XGE_ERR, 5381 "__hal_stats_initialize failed"); 5382 xge_hal_device_terminate(hldev); 5383 return status; 5384 } 5385 5386 status = __hal_device_hw_initialize(hldev); 5387 if (status != XGE_HAL_OK) { 5388 xge_debug_device(XGE_ERR, 5389 "__hal_device_hw_initialize failed"); 5390 xge_hal_device_terminate(hldev); 5391 return status; 5392 } 5393 hldev->dump_buf=(char*)xge_os_malloc(hldev->pdev, XGE_HAL_DUMP_BUF_SIZE); 5394 if (hldev->dump_buf == NULL) { 5395 xge_debug_device(XGE_ERR, 5396 "__hal_device_hw_initialize failed"); 5397 xge_hal_device_terminate(hldev); 5398 return XGE_HAL_ERR_OUT_OF_MEMORY; 5399 } 5400 5401 5402 /* Xena-only: need to serialize fifo posts across all device fifos */ 5403 #if defined(XGE_HAL_TX_MULTI_POST) 5404 xge_os_spin_lock_init(&hldev->xena_post_lock, hldev->pdev); 5405 #elif defined(XGE_HAL_TX_MULTI_POST_IRQ) 5406 xge_os_spin_lock_init_irq(&hldev->xena_post_lock, hldev->irqh); 5407 #endif 5408 /* Getting VPD data */ 5409 __hal_device_get_vpd_data(hldev); 5410 5411 hldev->is_initialized = 1; 5412 5413 return XGE_HAL_OK; 5414 } 5415 5416 /** 5417 * xge_hal_device_terminating - Mark the device as 'terminating'. 5418 * @devh: HAL device handle. 5419 * 5420 * Mark the device as 'terminating', going to terminate. Can be used 5421 * to serialize termination with other running processes/contexts. 5422 * 5423 * See also: xge_hal_device_terminate(). 5424 */ 5425 void 5426 xge_hal_device_terminating(xge_hal_device_h devh) 5427 { 5428 xge_hal_device_t *hldev = (xge_hal_device_t*)devh; 5429 xge_list_t *item; 5430 xge_hal_channel_t *channel; 5431 #if defined(XGE_HAL_TX_MULTI_RESERVE_IRQ) 5432 unsigned long flags = 0; 5433 #endif 5434 5435 /* 5436 * go through each opened tx channel and aquire 5437 * lock, so it will serialize with HAL termination flag 5438 */ 5439 xge_list_for_each(item, &hldev->fifo_channels) { 5440 channel = xge_container_of(item, xge_hal_channel_t, item); 5441 #if defined(XGE_HAL_TX_MULTI_RESERVE) 5442 xge_os_spin_lock(&channel->reserve_lock); 5443 #elif defined(XGE_HAL_TX_MULTI_RESERVE_IRQ) 5444 xge_os_spin_lock_irq(&channel->reserve_lock, flags); 5445 #endif 5446 5447 channel->terminating = 1; 5448 5449 #if defined(XGE_HAL_TX_MULTI_RESERVE) 5450 xge_os_spin_unlock(&channel->reserve_lock); 5451 #elif defined(XGE_HAL_TX_MULTI_RESERVE_IRQ) 5452 xge_os_spin_unlock_irq(&channel->reserve_lock, flags); 5453 #endif 5454 } 5455 5456 hldev->terminating = 1; 5457 } 5458 5459 /** 5460 * xge_hal_device_terminate - Terminate Xframe device. 5461 * @hldev: HAL device handle. 5462 * 5463 * Terminate HAL device. 5464 * 5465 * See also: xge_hal_device_initialize(). 5466 */ 5467 void 5468 xge_hal_device_terminate(xge_hal_device_t *hldev) 5469 { 5470 xge_assert(g_xge_hal_driver != NULL); 5471 xge_assert(hldev != NULL); 5472 xge_assert(hldev->magic == XGE_HAL_MAGIC); 5473 5474 xge_queue_flush(hldev->queueh); 5475 5476 hldev->terminating = 1; 5477 hldev->is_initialized = 0; 5478 hldev->in_poll = 0; 5479 hldev->magic = XGE_HAL_DEAD; 5480 5481 #if defined(XGE_HAL_TX_MULTI_POST) 5482 xge_os_spin_lock_destroy(&hldev->xena_post_lock, hldev->pdev); 5483 #elif defined(XGE_HAL_TX_MULTI_POST_IRQ) 5484 xge_os_spin_lock_destroy_irq(&hldev->xena_post_lock, hldev->pdev); 5485 #endif 5486 5487 xge_debug_device(XGE_TRACE, "device "XGE_OS_LLXFMT" is terminating", 5488 (unsigned long long)(ulong_t)hldev); 5489 5490 xge_assert(xge_list_is_empty(&hldev->fifo_channels)); 5491 xge_assert(xge_list_is_empty(&hldev->ring_channels)); 5492 5493 if (hldev->stats.is_initialized) { 5494 __hal_stats_terminate(&hldev->stats); 5495 } 5496 5497 /* close if open and free all channels */ 5498 while (!xge_list_is_empty(&hldev->free_channels)) { 5499 xge_hal_channel_t *channel = (xge_hal_channel_t*) 5500 hldev->free_channels.next; 5501 5502 xge_assert(!channel->is_open); 5503 xge_list_remove(&channel->item); 5504 __hal_channel_free(channel); 5505 } 5506 5507 if (hldev->queueh) { 5508 xge_queue_destroy(hldev->queueh); 5509 } 5510 5511 if (hldev->spdm_table) { 5512 xge_os_free(hldev->pdev, 5513 hldev->spdm_table[0], 5514 (sizeof(xge_hal_spdm_entry_t) * 5515 hldev->spdm_max_entries)); 5516 xge_os_free(hldev->pdev, 5517 hldev->spdm_table, 5518 (sizeof(xge_hal_spdm_entry_t *) * 5519 hldev->spdm_max_entries)); 5520 xge_os_spin_lock_destroy(&hldev->spdm_lock, hldev->pdev); 5521 hldev->spdm_table = NULL; 5522 } 5523 5524 if (hldev->dump_buf) { 5525 xge_os_free(hldev->pdev, hldev->dump_buf, 5526 XGE_HAL_DUMP_BUF_SIZE); 5527 hldev->dump_buf = NULL; 5528 } 5529 5530 if (hldev->device_id != 0) { 5531 int j, pcisize; 5532 5533 pcisize = (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)? 5534 XGE_HAL_PCISIZE_HERC : XGE_HAL_PCISIZE_XENA; 5535 for (j = 0; j < pcisize; j++) { 5536 xge_os_pci_write32(hldev->pdev, hldev->cfgh, j * 4, 5537 *((u32*)&hldev->pci_config_space_bios + j)); 5538 } 5539 } 5540 } 5541 /** 5542 * __hal_device_get_vpd_data - Getting vpd_data. 5543 * 5544 * @hldev: HAL device handle. 5545 * 5546 * Getting product name and serial number from vpd capabilites structure 5547 * 5548 */ 5549 void 5550 __hal_device_get_vpd_data(xge_hal_device_t *hldev) 5551 { 5552 u8 * vpd_data; 5553 u8 data; 5554 int index = 0, count, fail = 0; 5555 u8 vpd_addr = XGE_HAL_CARD_XENA_VPD_ADDR; 5556 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) 5557 vpd_addr = XGE_HAL_CARD_HERC_VPD_ADDR; 5558 5559 xge_os_strlcpy((char *) hldev->vpd_data.product_name, 5560 "10 Gigabit Ethernet Adapter", 5561 sizeof(hldev->vpd_data.product_name)); 5562 5563 xge_os_strlcpy((char *) hldev->vpd_data.serial_num, 5564 "not available", 5565 sizeof(hldev->vpd_data.serial_num)); 5566 5567 vpd_data = ( u8*) xge_os_malloc(hldev->pdev, XGE_HAL_VPD_BUFFER_SIZE + 16); 5568 if ( vpd_data == 0 ) 5569 return; 5570 5571 for (index = 0; index < XGE_HAL_VPD_BUFFER_SIZE; index +=4 ) { 5572 xge_os_pci_write8(hldev->pdev, hldev->cfgh, (vpd_addr + 2), (u8)index); 5573 xge_os_pci_read8(hldev->pdev, hldev->cfgh,(vpd_addr + 2), &data); 5574 xge_os_pci_write8(hldev->pdev, hldev->cfgh, (vpd_addr + 3), 0); 5575 for (count = 0; count < 5; count++ ) { 5576 xge_os_mdelay(2); 5577 xge_os_pci_read8(hldev->pdev, hldev->cfgh,(vpd_addr + 3), &data); 5578 if (data == XGE_HAL_VPD_READ_COMPLETE) 5579 break; 5580 } 5581 5582 if (count >= 5) { 5583 xge_os_printf("ERR, Reading VPD data failed"); 5584 fail = 1; 5585 break; 5586 } 5587 5588 xge_os_pci_read32(hldev->pdev, hldev->cfgh,(vpd_addr + 4), 5589 (u32 *)&vpd_data[index]); 5590 } 5591 5592 if(!fail) { 5593 5594 /* read serial number of adapter */ 5595 for (count = 0; count < XGE_HAL_VPD_BUFFER_SIZE; count++) { 5596 if ((vpd_data[count] == 'S') && 5597 (vpd_data[count + 1] == 'N') && 5598 (vpd_data[count + 2] < XGE_HAL_VPD_LENGTH)) { 5599 (void) memset(hldev->vpd_data.serial_num, 0, XGE_HAL_VPD_LENGTH); 5600 (void) memcpy(hldev->vpd_data.serial_num, &vpd_data[count + 3], 5601 vpd_data[count + 2]); 5602 break; 5603 } 5604 } 5605 5606 if (vpd_data[1] < XGE_HAL_VPD_LENGTH) { 5607 (void) memset(hldev->vpd_data.product_name, 0, vpd_data[1]); 5608 (void) memcpy(hldev->vpd_data.product_name, &vpd_data[3], vpd_data[1]); 5609 } 5610 5611 } 5612 5613 xge_os_free(hldev->pdev, vpd_data, XGE_HAL_VPD_BUFFER_SIZE + 16); 5614 } 5615 5616 5617 /** 5618 * xge_hal_device_handle_tcode - Handle transfer code. 5619 * @channelh: Channel handle. 5620 * @dtrh: Descriptor handle. 5621 * @t_code: One of the enumerated (and documented in the Xframe user guide) 5622 * "transfer codes". 5623 * 5624 * Handle descriptor's transfer code. The latter comes with each completed 5625 * descriptor, see xge_hal_fifo_dtr_next_completed() and 5626 * xge_hal_ring_dtr_next_completed(). 5627 * Transfer codes are enumerated in xgehal-fifo.h and xgehal-ring.h. 5628 * 5629 * Returns: one of the xge_hal_status_e{} enumerated types. 5630 * XGE_HAL_OK - for success. 5631 * XGE_HAL_ERR_CRITICAL - when encounters critical error. 5632 */ 5633 xge_hal_status_e 5634 xge_hal_device_handle_tcode (xge_hal_channel_h channelh, 5635 xge_hal_dtr_h dtrh, u8 t_code) 5636 { 5637 xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh; 5638 xge_hal_device_t *hldev = (xge_hal_device_t *)channel->devh; 5639 5640 if (t_code > 15) { 5641 xge_os_printf("invalid t_code %d", t_code); 5642 return XGE_HAL_OK; 5643 } 5644 5645 if (channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) { 5646 hldev->stats.sw_dev_err_stats.txd_t_code_err_cnt[t_code]++; 5647 5648 #if defined(XGE_HAL_DEBUG_BAD_TCODE) 5649 xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t *)dtrh; 5650 xge_os_printf(""XGE_OS_LLXFMT":"XGE_OS_LLXFMT":" 5651 XGE_OS_LLXFMT":"XGE_OS_LLXFMT, 5652 txdp->control_1, txdp->control_2, txdp->buffer_pointer, 5653 txdp->host_control); 5654 #endif 5655 5656 /* handle link "down" immediately without going through 5657 * xge_hal_device_poll() routine. */ 5658 if (t_code == XGE_HAL_TXD_T_CODE_LOSS_OF_LINK) { 5659 /* link is down */ 5660 if (hldev->link_state != XGE_HAL_LINK_DOWN) { 5661 xge_hal_pci_bar0_t *bar0 = 5662 (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 5663 u64 val64; 5664 5665 hldev->link_state = XGE_HAL_LINK_DOWN; 5666 5667 val64 = xge_os_pio_mem_read64(hldev->pdev, 5668 hldev->regh0, &bar0->adapter_control); 5669 5670 /* turn off LED */ 5671 val64 = val64 & (~XGE_HAL_ADAPTER_LED_ON); 5672 xge_os_pio_mem_write64(hldev->pdev, 5673 hldev->regh0, val64, 5674 &bar0->adapter_control); 5675 5676 g_xge_hal_driver->uld_callbacks.link_down( 5677 hldev->upper_layer_info); 5678 } 5679 } else if (t_code == XGE_HAL_TXD_T_CODE_ABORT_BUFFER || 5680 t_code == XGE_HAL_TXD_T_CODE_ABORT_DTOR) { 5681 __hal_device_handle_targetabort(hldev); 5682 return XGE_HAL_ERR_CRITICAL; 5683 } 5684 return XGE_HAL_ERR_PKT_DROP; 5685 } else if (channel->type == XGE_HAL_CHANNEL_TYPE_RING) { 5686 hldev->stats.sw_dev_err_stats.rxd_t_code_err_cnt[t_code]++; 5687 5688 #if defined(XGE_HAL_DEBUG_BAD_TCODE) 5689 xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh; 5690 xge_os_printf(""XGE_OS_LLXFMT":"XGE_OS_LLXFMT":"XGE_OS_LLXFMT 5691 ":"XGE_OS_LLXFMT, rxdp->control_1, 5692 rxdp->control_2, rxdp->buffer0_ptr, 5693 rxdp->host_control); 5694 #endif 5695 if (t_code == XGE_HAL_RXD_T_CODE_BAD_ECC) { 5696 hldev->stats.sw_dev_err_stats.ecc_err_cnt++; 5697 __hal_device_handle_eccerr(hldev, "rxd_t_code", 5698 (u64)t_code); 5699 return XGE_HAL_ERR_CRITICAL; 5700 } else if (t_code == XGE_HAL_RXD_T_CODE_PARITY || 5701 t_code == XGE_HAL_RXD_T_CODE_PARITY_ABORT) { 5702 hldev->stats.sw_dev_err_stats.parity_err_cnt++; 5703 __hal_device_handle_parityerr(hldev, "rxd_t_code", 5704 (u64)t_code); 5705 return XGE_HAL_ERR_CRITICAL; 5706 /* do not drop if detected unknown IPv6 extension */ 5707 } else if (t_code != XGE_HAL_RXD_T_CODE_UNKNOWN_PROTO) { 5708 return XGE_HAL_ERR_PKT_DROP; 5709 } 5710 } 5711 return XGE_HAL_OK; 5712 } 5713 5714 /** 5715 * xge_hal_device_link_state - Get link state. 5716 * @devh: HAL device handle. 5717 * @ls: Link state, see xge_hal_device_link_state_e{}. 5718 * 5719 * Get link state. 5720 * Returns: XGE_HAL_OK. 5721 * See also: xge_hal_device_link_state_e{}. 5722 */ 5723 xge_hal_status_e xge_hal_device_link_state(xge_hal_device_h devh, 5724 xge_hal_device_link_state_e *ls) 5725 { 5726 xge_hal_device_t *hldev = (xge_hal_device_t *)devh; 5727 5728 xge_assert(ls != NULL); 5729 *ls = hldev->link_state; 5730 return XGE_HAL_OK; 5731 } 5732 5733 /** 5734 * xge_hal_device_sched_timer - Configure scheduled device interrupt. 5735 * @devh: HAL device handle. 5736 * @interval_us: Time interval, in miscoseconds. 5737 * Unlike transmit and receive interrupts, 5738 * the scheduled interrupt is generated independently of 5739 * traffic, but purely based on time. 5740 * @one_shot: 1 - generate scheduled interrupt only once. 5741 * 0 - generate scheduled interrupt periodically at the specified 5742 * @interval_us interval. 5743 * 5744 * (Re-)configure scheduled interrupt. Can be called at runtime to change 5745 * the setting, generate one-shot interrupts based on the resource and/or 5746 * traffic conditions, other purposes. 5747 * See also: xge_hal_device_config_t{}. 5748 */ 5749 void xge_hal_device_sched_timer(xge_hal_device_h devh, int interval_us, 5750 int one_shot) 5751 { 5752 u64 val64; 5753 xge_hal_device_t *hldev = (xge_hal_device_t *)devh; 5754 xge_hal_pci_bar0_t *bar0 = 5755 (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 5756 unsigned int interval = hldev->config.pci_freq_mherz * interval_us; 5757 5758 interval = __hal_fix_time_ival_herc(hldev, interval); 5759 5760 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 5761 &bar0->scheduled_int_ctrl); 5762 if (interval) { 5763 val64 &= XGE_HAL_SCHED_INT_PERIOD_MASK; 5764 val64 |= XGE_HAL_SCHED_INT_PERIOD(interval); 5765 if (one_shot) { 5766 val64 |= XGE_HAL_SCHED_INT_CTRL_ONE_SHOT; 5767 } 5768 val64 |= XGE_HAL_SCHED_INT_CTRL_TIMER_EN; 5769 } else { 5770 val64 &= ~XGE_HAL_SCHED_INT_CTRL_TIMER_EN; 5771 } 5772 5773 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 5774 val64, &bar0->scheduled_int_ctrl); 5775 5776 xge_debug_device(XGE_TRACE, "sched_timer 0x"XGE_OS_LLXFMT": %s", 5777 (unsigned long long)val64, 5778 interval ? "enabled" : "disabled"); 5779 } 5780 5781 /** 5782 * xge_hal_device_check_id - Verify device ID. 5783 * @devh: HAL device handle. 5784 * 5785 * Verify device ID. 5786 * Returns: one of the xge_hal_card_e{} enumerated types. 5787 * See also: xge_hal_card_e{}. 5788 */ 5789 xge_hal_card_e 5790 xge_hal_device_check_id(xge_hal_device_h devh) 5791 { 5792 xge_hal_device_t *hldev = (xge_hal_device_t *)devh; 5793 switch (hldev->device_id) { 5794 case XGE_PCI_DEVICE_ID_XENA_1: 5795 case XGE_PCI_DEVICE_ID_XENA_2: 5796 return XGE_HAL_CARD_XENA; 5797 case XGE_PCI_DEVICE_ID_HERC_1: 5798 case XGE_PCI_DEVICE_ID_HERC_2: 5799 return XGE_HAL_CARD_HERC; 5800 case XGE_PCI_DEVICE_ID_TITAN_1: 5801 case XGE_PCI_DEVICE_ID_TITAN_2: 5802 return XGE_HAL_CARD_TITAN; 5803 default: 5804 return XGE_HAL_CARD_UNKNOWN; 5805 } 5806 } 5807 5808 /** 5809 * xge_hal_device_pci_info_get - Get PCI bus informations such as width, 5810 * frequency, and mode from previously stored values. 5811 * @devh: HAL device handle. 5812 * @pci_mode: pointer to a variable of enumerated type 5813 * xge_hal_pci_mode_e{}. 5814 * @bus_frequency: pointer to a variable of enumerated type 5815 * xge_hal_pci_bus_frequency_e{}. 5816 * @bus_width: pointer to a variable of enumerated type 5817 * xge_hal_pci_bus_width_e{}. 5818 * 5819 * Get pci mode, frequency, and PCI bus width. 5820 * Returns: one of the xge_hal_status_e{} enumerated types. 5821 * XGE_HAL_OK - for success. 5822 * XGE_HAL_ERR_INVALID_DEVICE - for invalid device handle. 5823 * See Also: xge_hal_pci_mode_e, xge_hal_pci_mode_e, xge_hal_pci_width_e. 5824 */ 5825 xge_hal_status_e 5826 xge_hal_device_pci_info_get(xge_hal_device_h devh, xge_hal_pci_mode_e *pci_mode, 5827 xge_hal_pci_bus_frequency_e *bus_frequency, 5828 xge_hal_pci_bus_width_e *bus_width) 5829 { 5830 xge_hal_status_e rc_status; 5831 xge_hal_device_t *hldev = (xge_hal_device_t *)devh; 5832 5833 if (!hldev || !hldev->is_initialized || hldev->magic != XGE_HAL_MAGIC) { 5834 rc_status = XGE_HAL_ERR_INVALID_DEVICE; 5835 xge_debug_device(XGE_ERR, 5836 "xge_hal_device_pci_info_get error, rc %d for device %p", 5837 rc_status, hldev); 5838 5839 return rc_status; 5840 } 5841 5842 *pci_mode = hldev->pci_mode; 5843 *bus_frequency = hldev->bus_frequency; 5844 *bus_width = hldev->bus_width; 5845 rc_status = XGE_HAL_OK; 5846 return rc_status; 5847 } 5848 5849 /** 5850 * xge_hal_reinitialize_hw 5851 * @hldev: private member of the device structure. 5852 * 5853 * This function will soft reset the NIC and re-initalize all the 5854 * I/O registers to the values they had after it's inital initialization 5855 * through the probe function. 5856 */ 5857 int xge_hal_reinitialize_hw(xge_hal_device_t * hldev) 5858 { 5859 (void) xge_hal_device_reset(hldev); 5860 if (__hal_device_hw_initialize(hldev) != XGE_HAL_OK) { 5861 xge_hal_device_terminate(hldev); 5862 (void) __hal_device_reset(hldev); 5863 return 1; 5864 } 5865 return 0; 5866 } 5867 5868 5869 /* 5870 * __hal_read_spdm_entry_line 5871 * @hldev: pointer to xge_hal_device_t structure 5872 * @spdm_line: spdm line in the spdm entry to be read. 5873 * @spdm_entry: spdm entry of the spdm_line in the SPDM table. 5874 * @spdm_line_val: Contains the value stored in the spdm line. 5875 * 5876 * SPDM table contains upto a maximum of 256 spdm entries. 5877 * Each spdm entry contains 8 lines and each line stores 8 bytes. 5878 * This function reads the spdm line(addressed by @spdm_line) 5879 * of the spdm entry(addressed by @spdm_entry) in 5880 * the SPDM table. 5881 */ 5882 xge_hal_status_e 5883 __hal_read_spdm_entry_line(xge_hal_device_t *hldev, u8 spdm_line, 5884 u16 spdm_entry, u64 *spdm_line_val) 5885 { 5886 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 5887 u64 val64; 5888 5889 val64 = XGE_HAL_RTS_RTH_SPDM_MEM_CTRL_STROBE | 5890 XGE_HAL_RTS_RTH_SPDM_MEM_CTRL_LINE_SEL(spdm_line) | 5891 XGE_HAL_RTS_RTH_SPDM_MEM_CTRL_OFFSET(spdm_entry); 5892 5893 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 5894 &bar0->rts_rth_spdm_mem_ctrl); 5895 5896 /* poll until done */ 5897 if (__hal_device_register_poll(hldev, 5898 &bar0->rts_rth_spdm_mem_ctrl, 0, 5899 XGE_HAL_RTS_RTH_SPDM_MEM_CTRL_STROBE, 5900 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { 5901 5902 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; 5903 } 5904 5905 *spdm_line_val = xge_os_pio_mem_read64(hldev->pdev, 5906 hldev->regh0, &bar0->rts_rth_spdm_mem_data); 5907 return XGE_HAL_OK; 5908 } 5909 5910 5911 /* 5912 * __hal_get_free_spdm_entry 5913 * @hldev: pointer to xge_hal_device_t structure 5914 * @spdm_entry: Contains an index to the unused spdm entry in the SPDM table. 5915 * 5916 * This function returns an index of unused spdm entry in the SPDM 5917 * table. 5918 */ 5919 static xge_hal_status_e 5920 __hal_get_free_spdm_entry(xge_hal_device_t *hldev, u16 *spdm_entry) 5921 { 5922 xge_hal_status_e status; 5923 u64 spdm_line_val=0; 5924 5925 /* 5926 * Search in the local SPDM table for a free slot. 5927 */ 5928 *spdm_entry = 0; 5929 for(; *spdm_entry < hldev->spdm_max_entries; (*spdm_entry)++) { 5930 if (hldev->spdm_table[*spdm_entry]->in_use) { 5931 break; 5932 } 5933 } 5934 5935 if (*spdm_entry >= hldev->spdm_max_entries) { 5936 return XGE_HAL_ERR_SPDM_TABLE_FULL; 5937 } 5938 5939 /* 5940 * Make sure that the corresponding spdm entry in the SPDM 5941 * table is free. 5942 * Seventh line of the spdm entry contains information about 5943 * whether the entry is free or not. 5944 */ 5945 if ((status = __hal_read_spdm_entry_line(hldev, 7, *spdm_entry, 5946 &spdm_line_val)) != XGE_HAL_OK) { 5947 return status; 5948 } 5949 5950 /* BIT(63) in spdm_line 7 corresponds to entry_enable bit */ 5951 if ((spdm_line_val & BIT(63))) { 5952 /* 5953 * Log a warning 5954 */ 5955 xge_debug_device(XGE_ERR, "Local SPDM table is not " 5956 "consistent with the actual one for the spdm " 5957 "entry %d", *spdm_entry); 5958 return XGE_HAL_ERR_SPDM_TABLE_DATA_INCONSISTENT; 5959 } 5960 5961 return XGE_HAL_OK; 5962 } 5963 5964 5965 /* 5966 * __hal_calc_jhash - Calculate Jenkins hash. 5967 * @msg: Jenkins hash algorithm key. 5968 * @length: Length of the key. 5969 * @golden_ratio: Jenkins hash golden ratio. 5970 * @init_value: Jenkins hash initial value. 5971 * 5972 * This function implements the Jenkins based algorithm used for the 5973 * calculation of the RTH hash. 5974 * Returns: Jenkins hash value. 5975 * 5976 */ 5977 static u32 5978 __hal_calc_jhash(u8 *msg, u32 length, u32 golden_ratio, u32 init_value) 5979 { 5980 5981 register u32 a,b,c,len; 5982 5983 /* 5984 * Set up the internal state 5985 */ 5986 len = length; 5987 a = b = golden_ratio; /* the golden ratio; an arbitrary value */ 5988 c = init_value; /* the previous hash value */ 5989 5990 /* handle most of the key */ 5991 while (len >= 12) 5992 { 5993 a += (msg[0] + ((u32)msg[1]<<8) + ((u32)msg[2]<<16) 5994 + ((u32)msg[3]<<24)); 5995 b += (msg[4] + ((u32)msg[5]<<8) + ((u32)msg[6]<<16) 5996 + ((u32)msg[7]<<24)); 5997 c += (msg[8] + ((u32)msg[9]<<8) + ((u32)msg[10]<<16) 5998 + ((u32)msg[11]<<24)); 5999 mix(a,b,c); 6000 msg += 12; len -= 12; 6001 } 6002 6003 /* handle the last 11 bytes */ 6004 c += length; 6005 switch(len) /* all the case statements fall through */ 6006 { 6007 case 11: c+= ((u32)msg[10]<<24); 6008 break; 6009 case 10: c+= ((u32)msg[9]<<16); 6010 break; 6011 case 9 : c+= ((u32)msg[8]<<8); 6012 break; 6013 /* the first byte of c is reserved for the length */ 6014 case 8 : b+= ((u32)msg[7]<<24); 6015 break; 6016 case 7 : b+= ((u32)msg[6]<<16); 6017 break; 6018 case 6 : b+= ((u32)msg[5]<<8); 6019 break; 6020 case 5 : b+= msg[4]; 6021 break; 6022 case 4 : a+= ((u32)msg[3]<<24); 6023 break; 6024 case 3 : a+= ((u32)msg[2]<<16); 6025 break; 6026 case 2 : a+= ((u32)msg[1]<<8); 6027 break; 6028 case 1 : a+= msg[0]; 6029 break; 6030 /* case 0: nothing left to add */ 6031 } 6032 6033 mix(a,b,c); 6034 6035 /* report the result */ 6036 return c; 6037 } 6038 6039 6040 /** 6041 * xge_hal_spdm_entry_add - Add a new entry to the SPDM table. 6042 * @devh: HAL device handle. 6043 * @src_ip: Source ip address(IPv4/IPv6). 6044 * @dst_ip: Destination ip address(IPv4/IPv6). 6045 * @l4_sp: L4 source port. 6046 * @l4_dp: L4 destination port. 6047 * @is_tcp: Set to 1, if the protocol is TCP. 6048 * 0, if the protocol is UDP. 6049 * @is_ipv4: Set to 1, if the protocol is IPv4. 6050 * 0, if the protocol is IPv6. 6051 * @tgt_queue: Target queue to route the receive packet. 6052 * 6053 * This function add a new entry to the SPDM table. 6054 * 6055 * Returns: XGE_HAL_OK - success. 6056 * XGE_HAL_ERR_SPDM_NOT_ENABLED - SPDM support is not enabled. 6057 * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to add a new entry with in 6058 * the time(timeout). 6059 * XGE_HAL_ERR_SPDM_TABLE_FULL - SPDM table is full. 6060 * XGE_HAL_ERR_SPDM_INVALID_ENTRY - Invalid SPDM entry. 6061 * 6062 * See also: xge_hal_spdm_entry_remove{}. 6063 */ 6064 xge_hal_status_e 6065 xge_hal_spdm_entry_add(xge_hal_device_h devh, xge_hal_ipaddr_t *src_ip, 6066 xge_hal_ipaddr_t *dst_ip, u16 l4_sp, u16 l4_dp, 6067 u8 is_tcp, u8 is_ipv4, u8 tgt_queue) 6068 { 6069 6070 xge_hal_device_t *hldev = (xge_hal_device_t *)devh; 6071 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 6072 u32 jhash_value; 6073 u32 jhash_init_val; 6074 u32 jhash_golden_ratio; 6075 u64 val64; 6076 int off; 6077 u16 spdm_entry; 6078 u8 msg[XGE_HAL_JHASH_MSG_LEN]; 6079 int ipaddr_len; 6080 xge_hal_status_e status; 6081 6082 6083 if (!hldev->config.rth_spdm_en) { 6084 return XGE_HAL_ERR_SPDM_NOT_ENABLED; 6085 } 6086 6087 if ((tgt_queue < XGE_HAL_MIN_RING_NUM) || 6088 (tgt_queue > XGE_HAL_MAX_RING_NUM)) { 6089 return XGE_HAL_ERR_SPDM_INVALID_ENTRY; 6090 } 6091 6092 6093 /* 6094 * Calculate the jenkins hash. 6095 */ 6096 /* 6097 * Create the Jenkins hash algorithm key. 6098 * key = {L3SA, L3DA, L4SP, L4DP}, if SPDM is configured to 6099 * use L4 information. Otherwize key = {L3SA, L3DA}. 6100 */ 6101 6102 if (is_ipv4) { 6103 ipaddr_len = 4; // In bytes 6104 } else { 6105 ipaddr_len = 16; 6106 } 6107 6108 /* 6109 * Jenkins hash algorithm expects the key in the big endian 6110 * format. Since key is the byte array, memcpy won't work in the 6111 * case of little endian. So, the current code extracts each 6112 * byte starting from MSB and store it in the key. 6113 */ 6114 if (is_ipv4) { 6115 for (off = 0; off < ipaddr_len; off++) { 6116 u32 mask = vBIT32(0xff,(off*8),8); 6117 int shift = 32-(off+1)*8; 6118 msg[off] = (u8)((src_ip->ipv4.addr & mask) >> shift); 6119 msg[off+ipaddr_len] = 6120 (u8)((dst_ip->ipv4.addr & mask) >> shift); 6121 } 6122 } else { 6123 for (off = 0; off < ipaddr_len; off++) { 6124 int loc = off % 8; 6125 u64 mask = vBIT(0xff,(loc*8),8); 6126 int shift = 64-(loc+1)*8; 6127 6128 msg[off] = (u8)((src_ip->ipv6.addr[off/8] & mask) 6129 >> shift); 6130 msg[off+ipaddr_len] = (u8)((dst_ip->ipv6.addr[off/8] 6131 & mask) >> shift); 6132 } 6133 } 6134 6135 off = (2*ipaddr_len); 6136 6137 if (hldev->config.rth_spdm_use_l4) { 6138 msg[off] = (u8)((l4_sp & 0xff00) >> 8); 6139 msg[off + 1] = (u8)(l4_sp & 0xff); 6140 msg[off + 2] = (u8)((l4_dp & 0xff00) >> 8); 6141 msg[off + 3] = (u8)(l4_dp & 0xff); 6142 off += 4; 6143 } 6144 6145 /* 6146 * Calculate jenkins hash for this configuration 6147 */ 6148 val64 = xge_os_pio_mem_read64(hldev->pdev, 6149 hldev->regh0, 6150 &bar0->rts_rth_jhash_cfg); 6151 jhash_golden_ratio = (u32)(val64 >> 32); 6152 jhash_init_val = (u32)(val64 & 0xffffffff); 6153 6154 jhash_value = __hal_calc_jhash(msg, off, 6155 jhash_golden_ratio, 6156 jhash_init_val); 6157 6158 xge_os_spin_lock(&hldev->spdm_lock); 6159 6160 /* 6161 * Locate a free slot in the SPDM table. To avoid a seach in the 6162 * actual SPDM table, which is very expensive in terms of time, 6163 * we are maintaining a local copy of the table and the search for 6164 * the free entry is performed in the local table. 6165 */ 6166 if ((status = __hal_get_free_spdm_entry(hldev,&spdm_entry)) 6167 != XGE_HAL_OK) { 6168 xge_os_spin_unlock(&hldev->spdm_lock); 6169 return status; 6170 } 6171 6172 /* 6173 * Add this entry to the SPDM table 6174 */ 6175 status = __hal_spdm_entry_add(hldev, src_ip, dst_ip, l4_sp, l4_dp, 6176 is_tcp, is_ipv4, tgt_queue, 6177 jhash_value, /* calculated jhash */ 6178 spdm_entry); 6179 6180 xge_os_spin_unlock(&hldev->spdm_lock); 6181 6182 return status; 6183 } 6184 6185 /** 6186 * xge_hal_spdm_entry_remove - Remove an entry from the SPDM table. 6187 * @devh: HAL device handle. 6188 * @src_ip: Source ip address(IPv4/IPv6). 6189 * @dst_ip: Destination ip address(IPv4/IPv6). 6190 * @l4_sp: L4 source port. 6191 * @l4_dp: L4 destination port. 6192 * @is_tcp: Set to 1, if the protocol is TCP. 6193 * 0, if the protocol os UDP. 6194 * @is_ipv4: Set to 1, if the protocol is IPv4. 6195 * 0, if the protocol is IPv6. 6196 * 6197 * This function remove an entry from the SPDM table. 6198 * 6199 * Returns: XGE_HAL_OK - success. 6200 * XGE_HAL_ERR_SPDM_NOT_ENABLED - SPDM support is not enabled. 6201 * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to remove an entry with in 6202 * the time(timeout). 6203 * XGE_HAL_ERR_SPDM_ENTRY_NOT_FOUND - Unable to locate the entry in the SPDM 6204 * table. 6205 * 6206 * See also: xge_hal_spdm_entry_add{}. 6207 */ 6208 xge_hal_status_e 6209 xge_hal_spdm_entry_remove(xge_hal_device_h devh, xge_hal_ipaddr_t *src_ip, 6210 xge_hal_ipaddr_t *dst_ip, u16 l4_sp, u16 l4_dp, 6211 u8 is_tcp, u8 is_ipv4) 6212 { 6213 6214 xge_hal_device_t *hldev = (xge_hal_device_t *)devh; 6215 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 6216 u64 val64; 6217 u16 spdm_entry; 6218 xge_hal_status_e status; 6219 u64 spdm_line_arr[8]; 6220 u8 line_no; 6221 u8 spdm_is_tcp; 6222 u8 spdm_is_ipv4; 6223 u16 spdm_l4_sp; 6224 u16 spdm_l4_dp; 6225 6226 if (!hldev->config.rth_spdm_en) { 6227 return XGE_HAL_ERR_SPDM_NOT_ENABLED; 6228 } 6229 6230 xge_os_spin_lock(&hldev->spdm_lock); 6231 6232 /* 6233 * Poll the rxpic_int_reg register until spdm ready bit is set or 6234 * timeout happens. 6235 */ 6236 if (__hal_device_register_poll(hldev, &bar0->rxpic_int_reg, 1, 6237 XGE_HAL_RX_PIC_INT_REG_SPDM_READY, 6238 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { 6239 6240 /* upper layer may require to repeat */ 6241 xge_os_spin_unlock(&hldev->spdm_lock); 6242 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; 6243 } 6244 6245 /* 6246 * Clear the SPDM READY bit. 6247 */ 6248 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 6249 &bar0->rxpic_int_reg); 6250 val64 &= ~XGE_HAL_RX_PIC_INT_REG_SPDM_READY; 6251 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 6252 &bar0->rxpic_int_reg); 6253 6254 /* 6255 * Search in the local SPDM table to get the index of the 6256 * corresponding entry in the SPDM table. 6257 */ 6258 spdm_entry = 0; 6259 for (;spdm_entry < hldev->spdm_max_entries; spdm_entry++) { 6260 if ((!hldev->spdm_table[spdm_entry]->in_use) || 6261 (hldev->spdm_table[spdm_entry]->is_tcp != is_tcp) || 6262 (hldev->spdm_table[spdm_entry]->l4_sp != l4_sp) || 6263 (hldev->spdm_table[spdm_entry]->l4_dp != l4_dp) || 6264 (hldev->spdm_table[spdm_entry]->is_ipv4 != is_ipv4)) { 6265 continue; 6266 } 6267 6268 /* 6269 * Compare the src/dst IP addresses of source and target 6270 */ 6271 if (is_ipv4) { 6272 if ((hldev->spdm_table[spdm_entry]->src_ip.ipv4.addr 6273 != src_ip->ipv4.addr) || 6274 (hldev->spdm_table[spdm_entry]->dst_ip.ipv4.addr 6275 != dst_ip->ipv4.addr)) { 6276 continue; 6277 } 6278 } else { 6279 if ((hldev->spdm_table[spdm_entry]->src_ip.ipv6.addr[0] 6280 != src_ip->ipv6.addr[0]) || 6281 (hldev->spdm_table[spdm_entry]->src_ip.ipv6.addr[1] 6282 != src_ip->ipv6.addr[1]) || 6283 (hldev->spdm_table[spdm_entry]->dst_ip.ipv6.addr[0] 6284 != dst_ip->ipv6.addr[0]) || 6285 (hldev->spdm_table[spdm_entry]->dst_ip.ipv6.addr[1] 6286 != dst_ip->ipv6.addr[1])) { 6287 continue; 6288 } 6289 } 6290 break; 6291 } 6292 6293 if (spdm_entry >= hldev->spdm_max_entries) { 6294 xge_os_spin_unlock(&hldev->spdm_lock); 6295 return XGE_HAL_ERR_SPDM_ENTRY_NOT_FOUND; 6296 } 6297 6298 /* 6299 * Retrieve the corresponding entry from the SPDM table and 6300 * make sure that the data is consistent. 6301 */ 6302 for(line_no = 0; line_no < 8; line_no++) { 6303 6304 /* 6305 * SPDM line 2,3,4 are valid only for IPv6 entry. 6306 * SPDM line 5 & 6 are reserved. We don't have to 6307 * read these entries in the above cases. 6308 */ 6309 if (((is_ipv4) && 6310 ((line_no == 2)||(line_no == 3)||(line_no == 4))) || 6311 (line_no == 5) || 6312 (line_no == 6)) { 6313 continue; 6314 } 6315 6316 if ((status = __hal_read_spdm_entry_line( 6317 hldev, 6318 line_no, 6319 spdm_entry, 6320 &spdm_line_arr[line_no])) 6321 != XGE_HAL_OK) { 6322 xge_os_spin_unlock(&hldev->spdm_lock); 6323 return status; 6324 } 6325 } 6326 6327 /* 6328 * Seventh line of the spdm entry contains the entry_enable 6329 * bit. Make sure that the entry_enable bit of this spdm entry 6330 * is set. 6331 * To remove an entry from the SPDM table, reset this 6332 * bit. 6333 */ 6334 if (!(spdm_line_arr[7] & BIT(63))) { 6335 /* 6336 * Log a warning 6337 */ 6338 xge_debug_device(XGE_ERR, "Local SPDM table is not " 6339 "consistent with the actual one for the spdm " 6340 "entry %d ", spdm_entry); 6341 goto err_exit; 6342 } 6343 6344 /* 6345 * Retreive the L4 SP/DP, src/dst ip addresses from the SPDM 6346 * table and do a comparision. 6347 */ 6348 spdm_is_tcp = (u8)((spdm_line_arr[0] & BIT(59)) >> 4); 6349 spdm_is_ipv4 = (u8)(spdm_line_arr[0] & BIT(63)); 6350 spdm_l4_sp = (u16)(spdm_line_arr[0] >> 48); 6351 spdm_l4_dp = (u16)((spdm_line_arr[0] >> 32) & 0xffff); 6352 6353 6354 if ((spdm_is_tcp != is_tcp) || 6355 (spdm_is_ipv4 != is_ipv4) || 6356 (spdm_l4_sp != l4_sp) || 6357 (spdm_l4_dp != l4_dp)) { 6358 /* 6359 * Log a warning 6360 */ 6361 xge_debug_device(XGE_ERR, "Local SPDM table is not " 6362 "consistent with the actual one for the spdm " 6363 "entry %d ", spdm_entry); 6364 goto err_exit; 6365 } 6366 6367 if (is_ipv4) { 6368 /* Upper 32 bits of spdm_line(64 bit) contains the 6369 * src IPv4 address. Lower 32 bits of spdm_line 6370 * contains the destination IPv4 address. 6371 */ 6372 u32 temp_src_ip = (u32)(spdm_line_arr[1] >> 32); 6373 u32 temp_dst_ip = (u32)(spdm_line_arr[1] & 0xffffffff); 6374 6375 if ((temp_src_ip != src_ip->ipv4.addr) || 6376 (temp_dst_ip != dst_ip->ipv4.addr)) { 6377 xge_debug_device(XGE_ERR, "Local SPDM table is not " 6378 "consistent with the actual one for the spdm " 6379 "entry %d ", spdm_entry); 6380 goto err_exit; 6381 } 6382 6383 } else { 6384 /* 6385 * SPDM line 1 & 2 contains the src IPv6 address. 6386 * SPDM line 3 & 4 contains the dst IPv6 address. 6387 */ 6388 if ((spdm_line_arr[1] != src_ip->ipv6.addr[0]) || 6389 (spdm_line_arr[2] != src_ip->ipv6.addr[1]) || 6390 (spdm_line_arr[3] != dst_ip->ipv6.addr[0]) || 6391 (spdm_line_arr[4] != dst_ip->ipv6.addr[1])) { 6392 6393 /* 6394 * Log a warning 6395 */ 6396 xge_debug_device(XGE_ERR, "Local SPDM table is not " 6397 "consistent with the actual one for the spdm " 6398 "entry %d ", spdm_entry); 6399 goto err_exit; 6400 } 6401 } 6402 6403 /* 6404 * Reset the entry_enable bit to zero 6405 */ 6406 spdm_line_arr[7] &= ~BIT(63); 6407 6408 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 6409 spdm_line_arr[7], 6410 (void *)((char *)hldev->spdm_mem_base + 6411 (spdm_entry * 64) + (7 * 8))); 6412 6413 /* 6414 * Wait for the operation to be completed. 6415 */ 6416 if (__hal_device_register_poll(hldev, 6417 &bar0->rxpic_int_reg, 1, 6418 XGE_HAL_RX_PIC_INT_REG_SPDM_READY, 6419 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { 6420 xge_os_spin_unlock(&hldev->spdm_lock); 6421 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; 6422 } 6423 6424 /* 6425 * Make the corresponding spdm entry in the local SPDM table 6426 * available for future use. 6427 */ 6428 hldev->spdm_table[spdm_entry]->in_use = 0; 6429 xge_os_spin_unlock(&hldev->spdm_lock); 6430 6431 return XGE_HAL_OK; 6432 6433 err_exit: 6434 xge_os_spin_unlock(&hldev->spdm_lock); 6435 return XGE_HAL_ERR_SPDM_TABLE_DATA_INCONSISTENT; 6436 } 6437 6438 /* 6439 * __hal_device_rti_set 6440 * @ring: The post_qid of the ring. 6441 * @channel: HAL channel of the ring. 6442 * 6443 * This function stores the RTI value associated for the MSI and 6444 * also unmasks this particular RTI in the rti_mask register. 6445 */ 6446 static void __hal_device_rti_set(int ring_qid, xge_hal_channel_t *channel) 6447 { 6448 xge_hal_device_t *hldev = (xge_hal_device_t*)channel->devh; 6449 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0; 6450 u64 val64; 6451 6452 if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSI || 6453 hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX) 6454 channel->rti = (u8)ring_qid; 6455 6456 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 6457 &bar0->rx_traffic_mask); 6458 val64 &= ~BIT(ring_qid); 6459 xge_os_pio_mem_write64(hldev->pdev, 6460 hldev->regh0, val64, 6461 &bar0->rx_traffic_mask); 6462 } 6463 6464 /* 6465 * __hal_device_tti_set 6466 * @ring: The post_qid of the FIFO. 6467 * @channel: HAL channel the FIFO. 6468 * 6469 * This function stores the TTI value associated for the MSI and 6470 * also unmasks this particular TTI in the tti_mask register. 6471 */ 6472 static void __hal_device_tti_set(int fifo_qid, xge_hal_channel_t *channel) 6473 { 6474 xge_hal_device_t *hldev = (xge_hal_device_t*)channel->devh; 6475 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0; 6476 u64 val64; 6477 6478 if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSI || 6479 hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX) 6480 channel->tti = (u8)fifo_qid; 6481 6482 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 6483 &bar0->tx_traffic_mask); 6484 val64 &= ~BIT(fifo_qid); 6485 xge_os_pio_mem_write64(hldev->pdev, 6486 hldev->regh0, val64, 6487 &bar0->tx_traffic_mask); 6488 } 6489 6490 /** 6491 * xge_hal_channel_msi_set - Associate a RTI with a ring or TTI with a 6492 * FIFO for a given MSI. 6493 * @channelh: HAL channel handle. 6494 * @msi: MSI Number associated with the channel. 6495 * @msi_msg: The MSI message associated with the MSI number above. 6496 * 6497 * This API will associate a given channel (either Ring or FIFO) with the 6498 * given MSI number. It will alo program the Tx_Mat/Rx_Mat tables in the 6499 * hardware to indicate this association to the hardware. 6500 */ 6501 xge_hal_status_e 6502 xge_hal_channel_msi_set(xge_hal_channel_h channelh, int msi, u32 msi_msg) 6503 { 6504 xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh; 6505 xge_hal_device_t *hldev = (xge_hal_device_t*)channel->devh; 6506 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0; 6507 u64 val64; 6508 6509 channel->msi_msg = msi_msg; 6510 if (channel->type == XGE_HAL_CHANNEL_TYPE_RING) { 6511 int ring = channel->post_qid; 6512 xge_debug_osdep(XGE_TRACE, "MSI Data: 0x%4x, Ring: %d," 6513 " MSI: %d", channel->msi_msg, ring, msi); 6514 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 6515 &bar0->rx_mat); 6516 val64 |= XGE_HAL_SET_RX_MAT(ring, msi); 6517 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 6518 &bar0->rx_mat); 6519 __hal_device_rti_set(ring, channel); 6520 } else { 6521 int fifo = channel->post_qid; 6522 xge_debug_osdep(XGE_TRACE, "MSI Data: 0x%4x, Fifo: %d," 6523 " MSI: %d", channel->msi_msg, fifo, msi); 6524 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 6525 &bar0->tx_mat[0]); 6526 val64 |= XGE_HAL_SET_TX_MAT(fifo, msi); 6527 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 6528 &bar0->tx_mat[0]); 6529 __hal_device_tti_set(fifo, channel); 6530 } 6531 6532 return XGE_HAL_OK; 6533 } 6534 6535 /** 6536 * xge_hal_mask_msix - Begin IRQ processing. 6537 * @hldev: HAL device handle. 6538 * @msi_id: MSI ID 6539 * 6540 * The function masks the msix interrupt for the given msi_id 6541 * 6542 * Note: 6543 * 6544 * Returns: 0, 6545 * Otherwise, XGE_HAL_ERR_WRONG_IRQ if the msix index is out of range 6546 * status. 6547 * See also: 6548 */ 6549 xge_hal_status_e 6550 xge_hal_mask_msix(xge_hal_device_h devh, int msi_id) 6551 { 6552 xge_hal_status_e status = XGE_HAL_OK; 6553 xge_hal_device_t *hldev = (xge_hal_device_t *)devh; 6554 u32 *bar2 = (u32 *)hldev->bar2; 6555 u32 val32; 6556 6557 xge_assert(msi_id < XGE_HAL_MAX_MSIX_MESSAGES); 6558 6559 val32 = xge_os_pio_mem_read32(hldev->pdev, hldev->regh2, &bar2[msi_id*4+3]); 6560 val32 |= 1; 6561 xge_os_pio_mem_write32(hldev->pdev, hldev->regh2, val32, &bar2[msi_id*4+3]); 6562 return status; 6563 } 6564 6565 /** 6566 * xge_hal_mask_msix - Begin IRQ processing. 6567 * @hldev: HAL device handle. 6568 * @msi_id: MSI ID 6569 * 6570 * The function masks the msix interrupt for the given msi_id 6571 * 6572 * Note: 6573 * 6574 * Returns: 0, 6575 * Otherwise, XGE_HAL_ERR_WRONG_IRQ if the msix index is out of range 6576 * status. 6577 * See also: 6578 */ 6579 xge_hal_status_e 6580 xge_hal_unmask_msix(xge_hal_device_h devh, int msi_id) 6581 { 6582 xge_hal_status_e status = XGE_HAL_OK; 6583 xge_hal_device_t *hldev = (xge_hal_device_t *)devh; 6584 u32 *bar2 = (u32 *)hldev->bar2; 6585 u32 val32; 6586 6587 xge_assert(msi_id < XGE_HAL_MAX_MSIX_MESSAGES); 6588 6589 val32 = xge_os_pio_mem_read32(hldev->pdev, hldev->regh2, &bar2[msi_id*4+3]); 6590 val32 &= ~1; 6591 xge_os_pio_mem_write32(hldev->pdev, hldev->regh2, val32, &bar2[msi_id*4+3]); 6592 return status; 6593 } 6594 6595 /* 6596 * __hal_set_msix_vals 6597 * @devh: HAL device handle. 6598 * @msix_value: 32bit MSI-X value transferred across PCI to @msix_address. 6599 * Filled in by this function. 6600 * @msix_address: 32bit MSI-X DMA address. 6601 * Filled in by this function. 6602 * @msix_idx: index that corresponds to the (@msix_value, @msix_address) 6603 * entry in the table of MSI-X (value, address) pairs. 6604 * 6605 * This function will program the hardware associating the given 6606 * address/value cobination to the specified msi number. 6607 */ 6608 static void __hal_set_msix_vals (xge_hal_device_h devh, 6609 u32 *msix_value, 6610 u64 *msix_addr, 6611 int msix_idx) 6612 { 6613 int cnt = 0; 6614 6615 xge_hal_device_t *hldev = (xge_hal_device_t*)devh; 6616 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0; 6617 u64 val64; 6618 6619 val64 = XGE_HAL_XMSI_NO(msix_idx) | XGE_HAL_XMSI_STROBE; 6620 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, 6621 (u32)(val64 >> 32), &bar0->xmsi_access); 6622 __hal_pio_mem_write32_lower(hldev->pdev, hldev->regh0, 6623 (u32)(val64), &bar0->xmsi_access); 6624 do { 6625 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 6626 &bar0->xmsi_access); 6627 if (val64 & XGE_HAL_XMSI_STROBE) 6628 break; 6629 cnt++; 6630 xge_os_mdelay(20); 6631 } while(cnt < 5); 6632 *msix_value = (u32)(xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 6633 &bar0->xmsi_data)); 6634 *msix_addr = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 6635 &bar0->xmsi_address); 6636 } 6637 6638 /** 6639 * xge_hal_channel_msix_set - Associate MSI-X with a channel. 6640 * @channelh: HAL channel handle. 6641 * @msix_idx: index that corresponds to a particular (@msix_value, 6642 * @msix_address) entry in the MSI-X table. 6643 * 6644 * This API associates a given channel (either Ring or FIFO) with the 6645 * given MSI-X number. It programs the Xframe's Tx_Mat/Rx_Mat tables 6646 * to indicate this association. 6647 */ 6648 xge_hal_status_e 6649 xge_hal_channel_msix_set(xge_hal_channel_h channelh, int msix_idx) 6650 { 6651 xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh; 6652 xge_hal_device_t *hldev = (xge_hal_device_t*)channel->devh; 6653 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0; 6654 u64 val64; 6655 6656 if (channel->type == XGE_HAL_CHANNEL_TYPE_RING) { 6657 /* Currently Ring and RTI is one on one. */ 6658 int ring = channel->post_qid; 6659 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 6660 &bar0->rx_mat); 6661 val64 |= XGE_HAL_SET_RX_MAT(ring, msix_idx); 6662 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 6663 &bar0->rx_mat); 6664 __hal_device_rti_set(ring, channel); 6665 hldev->config.ring.queue[channel->post_qid].intr_vector = 6666 msix_idx; 6667 } else if (channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) { 6668 int fifo = channel->post_qid; 6669 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 6670 &bar0->tx_mat[0]); 6671 val64 |= XGE_HAL_SET_TX_MAT(fifo, msix_idx); 6672 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 6673 &bar0->tx_mat[0]); 6674 __hal_device_tti_set(fifo, channel); 6675 hldev->config.fifo.queue[channel->post_qid].intr_vector = 6676 msix_idx; 6677 } 6678 channel->msix_idx = msix_idx; 6679 __hal_set_msix_vals(hldev, &channel->msix_data, 6680 &channel->msix_address, 6681 channel->msix_idx); 6682 6683 return XGE_HAL_OK; 6684 } 6685 6686 #if defined(XGE_HAL_CONFIG_LRO) 6687 /** 6688 * xge_hal_lro_terminate - Terminate lro resources. 6689 * @lro_scale: Amount of lro memory. 6690 * @hldev: Hal device structure. 6691 * 6692 */ 6693 void 6694 xge_hal_lro_terminate(u32 lro_scale, 6695 xge_hal_device_t *hldev) 6696 { 6697 } 6698 6699 /** 6700 * xge_hal_lro_init - Initiate lro resources. 6701 * @lro_scale: Amount of lro memory. 6702 * @hldev: Hal device structure. 6703 * Note: For time being I am using only one LRO per device. Later on size 6704 * will be increased. 6705 */ 6706 6707 xge_hal_status_e 6708 xge_hal_lro_init(u32 lro_scale, 6709 xge_hal_device_t *hldev) 6710 { 6711 int i; 6712 6713 if (hldev->config.lro_sg_size == XGE_HAL_DEFAULT_USE_HARDCODE) 6714 hldev->config.lro_sg_size = XGE_HAL_LRO_DEFAULT_SG_SIZE; 6715 6716 if (hldev->config.lro_frm_len == XGE_HAL_DEFAULT_USE_HARDCODE) 6717 hldev->config.lro_frm_len = XGE_HAL_LRO_DEFAULT_FRM_LEN; 6718 6719 for (i=0; i < XGE_HAL_MAX_RING_NUM; i++) 6720 { 6721 xge_os_memzero(hldev->lro_desc[i].lro_pool, 6722 sizeof(lro_t) * XGE_HAL_LRO_MAX_BUCKETS); 6723 6724 hldev->lro_desc[i].lro_next_idx = 0; 6725 hldev->lro_desc[i].lro_recent = NULL; 6726 } 6727 6728 return XGE_HAL_OK; 6729 } 6730 #endif 6731 6732 6733 /** 6734 * xge_hal_device_poll - HAL device "polling" entry point. 6735 * @devh: HAL device. 6736 * 6737 * HAL "polling" entry point. Note that this is part of HAL public API. 6738 * Upper-Layer driver _must_ periodically poll HAL via 6739 * xge_hal_device_poll(). 6740 * 6741 * HAL uses caller's execution context to serially process accumulated 6742 * slow-path events, such as link state changes and hardware error 6743 * indications. 6744 * 6745 * The rate of polling could be somewhere between 500us to 10ms, 6746 * depending on requirements (e.g., the requirement to support fail-over 6747 * could mean that 500us or even 100us polling interval need to be used). 6748 * 6749 * The need and motivation for external polling includes 6750 * 6751 * - remove the error-checking "burden" from the HAL interrupt handler 6752 * (see xge_hal_device_handle_irq()); 6753 * 6754 * - remove the potential source of portability issues by _not_ 6755 * implementing separate polling thread within HAL itself. 6756 * 6757 * See also: xge_hal_event_e{}, xge_hal_driver_config_t{}. 6758 * Usage: See ex_slow_path{}. 6759 */ 6760 void 6761 xge_hal_device_poll(xge_hal_device_h devh) 6762 { 6763 unsigned char item_buf[sizeof(xge_queue_item_t) + 6764 XGE_DEFAULT_EVENT_MAX_DATA_SIZE]; 6765 xge_queue_item_t *item = (xge_queue_item_t *)(void *)item_buf; 6766 xge_queue_status_e qstatus; 6767 xge_hal_status_e hstatus; 6768 int i = 0; 6769 int queue_has_critical_event = 0; 6770 xge_hal_device_t *hldev = (xge_hal_device_t*)devh; 6771 6772 xge_os_memzero(item_buf, (sizeof(xge_queue_item_t) + 6773 XGE_DEFAULT_EVENT_MAX_DATA_SIZE)); 6774 6775 _again: 6776 if (!hldev->is_initialized || 6777 hldev->terminating || 6778 hldev->magic != XGE_HAL_MAGIC) 6779 return; 6780 6781 if(hldev->stats.sw_dev_err_stats.xpak_counter.tick_period < 72000) 6782 { 6783 /* 6784 * Wait for an Hour 6785 */ 6786 hldev->stats.sw_dev_err_stats.xpak_counter.tick_period++; 6787 } else { 6788 /* 6789 * Logging Error messages in the excess temperature, 6790 * Bias current, laser ouput for three cycle 6791 */ 6792 __hal_updt_stats_xpak(hldev); 6793 hldev->stats.sw_dev_err_stats.xpak_counter.tick_period = 0; 6794 } 6795 6796 if (!queue_has_critical_event) 6797 queue_has_critical_event = 6798 __queue_get_reset_critical(hldev->queueh); 6799 6800 hldev->in_poll = 1; 6801 while (i++ < XGE_HAL_DRIVER_QUEUE_CONSUME_MAX || queue_has_critical_event) { 6802 6803 qstatus = xge_queue_consume(hldev->queueh, 6804 XGE_DEFAULT_EVENT_MAX_DATA_SIZE, 6805 item); 6806 if (qstatus == XGE_QUEUE_IS_EMPTY) 6807 break; 6808 6809 xge_debug_queue(XGE_TRACE, 6810 "queueh 0x"XGE_OS_LLXFMT" consumed event: %d ctxt 0x" 6811 XGE_OS_LLXFMT, (u64)(ulong_t)hldev->queueh, item->event_type, 6812 (u64)(ulong_t)item->context); 6813 6814 if (!hldev->is_initialized || 6815 hldev->magic != XGE_HAL_MAGIC) { 6816 hldev->in_poll = 0; 6817 return; 6818 } 6819 6820 switch (item->event_type) { 6821 case XGE_HAL_EVENT_LINK_IS_UP: { 6822 if (!queue_has_critical_event && 6823 g_xge_hal_driver->uld_callbacks.link_up) { 6824 g_xge_hal_driver->uld_callbacks.link_up( 6825 hldev->upper_layer_info); 6826 hldev->link_state = XGE_HAL_LINK_UP; 6827 } 6828 } break; 6829 case XGE_HAL_EVENT_LINK_IS_DOWN: { 6830 if (!queue_has_critical_event && 6831 g_xge_hal_driver->uld_callbacks.link_down) { 6832 g_xge_hal_driver->uld_callbacks.link_down( 6833 hldev->upper_layer_info); 6834 hldev->link_state = XGE_HAL_LINK_DOWN; 6835 } 6836 } break; 6837 case XGE_HAL_EVENT_SERR: 6838 case XGE_HAL_EVENT_ECCERR: 6839 case XGE_HAL_EVENT_PARITYERR: 6840 case XGE_HAL_EVENT_TARGETABORT: 6841 case XGE_HAL_EVENT_SLOT_FREEZE: { 6842 void *item_data = xge_queue_item_data(item); 6843 xge_hal_event_e event_type = item->event_type; 6844 u64 val64 = *((u64*)item_data); 6845 6846 if (event_type != XGE_HAL_EVENT_SLOT_FREEZE) 6847 if (xge_hal_device_is_slot_freeze(hldev)) 6848 event_type = XGE_HAL_EVENT_SLOT_FREEZE; 6849 if (g_xge_hal_driver->uld_callbacks.crit_err) { 6850 g_xge_hal_driver->uld_callbacks.crit_err( 6851 hldev->upper_layer_info, 6852 event_type, 6853 val64); 6854 /* handle one critical event per poll cycle */ 6855 hldev->in_poll = 0; 6856 return; 6857 } 6858 } break; 6859 default: { 6860 xge_debug_queue(XGE_TRACE, 6861 "got non-HAL event %d", 6862 item->event_type); 6863 } break; 6864 } 6865 6866 /* broadcast this event */ 6867 if (g_xge_hal_driver->uld_callbacks.event) 6868 g_xge_hal_driver->uld_callbacks.event(item); 6869 } 6870 6871 if (g_xge_hal_driver->uld_callbacks.before_device_poll) { 6872 if (g_xge_hal_driver->uld_callbacks.before_device_poll( 6873 hldev) != 0) { 6874 hldev->in_poll = 0; 6875 return; 6876 } 6877 } 6878 6879 hstatus = __hal_device_poll(hldev); 6880 if (g_xge_hal_driver->uld_callbacks.after_device_poll) 6881 g_xge_hal_driver->uld_callbacks.after_device_poll(hldev); 6882 6883 /* 6884 * handle critical error right away: 6885 * - walk the device queue again 6886 * - drop non-critical events, if any 6887 * - look for the 1st critical 6888 */ 6889 if (hstatus == XGE_HAL_ERR_CRITICAL) { 6890 queue_has_critical_event = 1; 6891 goto _again; 6892 } 6893 6894 hldev->in_poll = 0; 6895 } 6896 6897 /** 6898 * xge_hal_rts_rth_init - Set enhanced mode for RTS hashing. 6899 * @hldev: HAL device handle. 6900 * 6901 * This function is used to set the adapter to enhanced mode. 6902 * 6903 * See also: xge_hal_rts_rth_clr(), xge_hal_rts_rth_set(). 6904 */ 6905 void 6906 xge_hal_rts_rth_init(xge_hal_device_t *hldev) 6907 { 6908 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 6909 u64 val64; 6910 6911 /* 6912 * Set the receive traffic steering mode from default(classic) 6913 * to enhanced. 6914 */ 6915 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 6916 &bar0->rts_ctrl); 6917 val64 |= XGE_HAL_RTS_CTRL_ENHANCED_MODE; 6918 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 6919 val64, &bar0->rts_ctrl); 6920 } 6921 6922 /** 6923 * xge_hal_rts_rth_clr - Clear RTS hashing. 6924 * @hldev: HAL device handle. 6925 * 6926 * This function is used to clear all RTS hashing related stuff. 6927 * It brings the adapter out from enhanced mode to classic mode. 6928 * It also clears RTS_RTH_CFG register i.e clears hash type, function etc. 6929 * 6930 * See also: xge_hal_rts_rth_set(), xge_hal_rts_rth_itable_set(). 6931 */ 6932 void 6933 xge_hal_rts_rth_clr(xge_hal_device_t *hldev) 6934 { 6935 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 6936 u64 val64; 6937 6938 /* 6939 * Set the receive traffic steering mode from default(classic) 6940 * to enhanced. 6941 */ 6942 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 6943 &bar0->rts_ctrl); 6944 val64 &= ~XGE_HAL_RTS_CTRL_ENHANCED_MODE; 6945 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 6946 val64, &bar0->rts_ctrl); 6947 val64 = 0; 6948 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 6949 &bar0->rts_rth_cfg); 6950 } 6951 6952 /** 6953 * xge_hal_rts_rth_set - Set/configure RTS hashing. 6954 * @hldev: HAL device handle. 6955 * @def_q: default queue 6956 * @hash_type: hash type i.e TcpIpV4, TcpIpV6 etc. 6957 * @bucket_size: no of least significant bits to be used for hashing. 6958 * 6959 * Used to set/configure all RTS hashing related stuff. 6960 * - set the steering mode to enhanced. 6961 * - set hash function i.e algo selection. 6962 * - set the default queue. 6963 * 6964 * See also: xge_hal_rts_rth_clr(), xge_hal_rts_rth_itable_set(). 6965 */ 6966 void 6967 xge_hal_rts_rth_set(xge_hal_device_t *hldev, u8 def_q, u64 hash_type, 6968 u16 bucket_size) 6969 { 6970 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 6971 u64 val64; 6972 6973 val64 = XGE_HAL_RTS_DEFAULT_Q(def_q); 6974 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 6975 &bar0->rts_default_q); 6976 6977 val64 = hash_type; 6978 val64 |= XGE_HAL_RTS_RTH_EN; 6979 val64 |= XGE_HAL_RTS_RTH_BUCKET_SIZE(bucket_size); 6980 val64 |= XGE_HAL_RTS_RTH_ALG_SEL_MS; 6981 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 6982 &bar0->rts_rth_cfg); 6983 } 6984 6985 /** 6986 * xge_hal_rts_rth_start - Start RTS hashing. 6987 * @hldev: HAL device handle. 6988 * 6989 * Used to Start RTS hashing . 6990 * 6991 * See also: xge_hal_rts_rth_clr(), xge_hal_rts_rth_itable_set(), xge_hal_rts_rth_start. 6992 */ 6993 void 6994 xge_hal_rts_rth_start(xge_hal_device_t *hldev) 6995 { 6996 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 6997 u64 val64; 6998 6999 7000 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 7001 &bar0->rts_rth_cfg); 7002 val64 |= XGE_HAL_RTS_RTH_EN; 7003 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 7004 &bar0->rts_rth_cfg); 7005 } 7006 7007 /** 7008 * xge_hal_rts_rth_stop - Stop the RTS hashing. 7009 * @hldev: HAL device handle. 7010 * 7011 * Used to Staop RTS hashing . 7012 * 7013 * See also: xge_hal_rts_rth_clr(), xge_hal_rts_rth_itable_set(), xge_hal_rts_rth_start. 7014 */ 7015 void 7016 xge_hal_rts_rth_stop(xge_hal_device_t *hldev) 7017 { 7018 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 7019 u64 val64; 7020 7021 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 7022 &bar0->rts_rth_cfg); 7023 val64 &= ~XGE_HAL_RTS_RTH_EN; 7024 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 7025 &bar0->rts_rth_cfg); 7026 } 7027 7028 /** 7029 * xge_hal_rts_rth_itable_set - Set/configure indirection table (IT). 7030 * @hldev: HAL device handle. 7031 * @itable: Pointer to the indirection table 7032 * @itable_size: no of least significant bits to be used for hashing 7033 * 7034 * Used to set/configure indirection table. 7035 * It enables the required no of entries in the IT. 7036 * It adds entries to the IT. 7037 * 7038 * See also: xge_hal_rts_rth_clr(), xge_hal_rts_rth_set(). 7039 */ 7040 xge_hal_status_e 7041 xge_hal_rts_rth_itable_set(xge_hal_device_t *hldev, u8 *itable, u32 itable_size) 7042 { 7043 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 7044 u64 val64; 7045 u32 idx; 7046 7047 for (idx = 0; idx < itable_size; idx++) { 7048 val64 = XGE_HAL_RTS_RTH_MAP_MEM_DATA_ENTRY_EN | 7049 XGE_HAL_RTS_RTH_MAP_MEM_DATA(itable[idx]); 7050 7051 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 7052 &bar0->rts_rth_map_mem_data); 7053 7054 /* execute */ 7055 val64 = (XGE_HAL_RTS_RTH_MAP_MEM_CTRL_WE | 7056 XGE_HAL_RTS_RTH_MAP_MEM_CTRL_STROBE | 7057 XGE_HAL_RTS_RTH_MAP_MEM_CTRL_OFFSET(idx)); 7058 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 7059 &bar0->rts_rth_map_mem_ctrl); 7060 7061 /* poll until done */ 7062 if (__hal_device_register_poll(hldev, 7063 &bar0->rts_rth_map_mem_ctrl, 0, 7064 XGE_HAL_RTS_RTH_MAP_MEM_CTRL_STROBE, 7065 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { 7066 /* upper layer may require to repeat */ 7067 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; 7068 } 7069 } 7070 7071 return XGE_HAL_OK; 7072 } 7073 7074 7075 /** 7076 * xge_hal_device_rts_rth_key_set - Configure 40byte secret for hash calc. 7077 * 7078 * @hldev: HAL device handle. 7079 * @KeySize: Number of 64-bit words 7080 * @Key: upto 40-byte array of 8-bit values 7081 * This function configures the 40-byte secret which is used for hash 7082 * calculation. 7083 * 7084 * See also: xge_hal_rts_rth_clr(), xge_hal_rts_rth_set(). 7085 */ 7086 void 7087 xge_hal_device_rts_rth_key_set(xge_hal_device_t *hldev, u8 KeySize, u8 *Key) 7088 { 7089 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *) hldev->bar0; 7090 u64 val64; 7091 u32 entry, nreg, i; 7092 7093 entry = 0; 7094 nreg = 0; 7095 7096 while( KeySize ) { 7097 val64 = 0; 7098 for ( i = 0; i < 8 ; i++) { 7099 /* Prepare 64-bit word for 'nreg' containing 8 keys. */ 7100 if (i) 7101 val64 <<= 8; 7102 val64 |= Key[entry++]; 7103 } 7104 7105 KeySize--; 7106 7107 /* temp64 = XGE_HAL_RTH_HASH_MASK_n(val64, (n<<3), (n<<3)+7);*/ 7108 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 7109 &bar0->rts_rth_hash_mask[nreg++]); 7110 } 7111 7112 while( nreg < 5 ) { 7113 /* Clear the rest if key is less than 40 bytes */ 7114 val64 = 0; 7115 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 7116 &bar0->rts_rth_hash_mask[nreg++]); 7117 } 7118 } 7119 7120 7121 /** 7122 * xge_hal_device_is_closed - Device is closed 7123 * 7124 * @devh: HAL device handle. 7125 */ 7126 int 7127 xge_hal_device_is_closed(xge_hal_device_h devh) 7128 { 7129 xge_hal_device_t *hldev = (xge_hal_device_t *)devh; 7130 7131 if (xge_list_is_empty(&hldev->fifo_channels) && 7132 xge_list_is_empty(&hldev->ring_channels)) 7133 return 1; 7134 7135 return 0; 7136 } 7137 7138 xge_hal_status_e 7139 xge_hal_device_rts_section_enable(xge_hal_device_h devh, int index) 7140 { 7141 u64 val64; 7142 int section; 7143 int max_addr = XGE_HAL_MAX_MAC_ADDRESSES; 7144 7145 xge_hal_device_t *hldev = (xge_hal_device_t *)devh; 7146 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 7147 7148 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) 7149 max_addr = XGE_HAL_MAX_MAC_ADDRESSES_HERC; 7150 7151 if ( index >= max_addr ) 7152 return XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES; 7153 7154 /* 7155 * Calculate the section value 7156 */ 7157 section = index / 32; 7158 7159 xge_debug_device(XGE_TRACE, "the Section value is %d ", section); 7160 7161 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 7162 &bar0->rts_mac_cfg); 7163 switch(section) 7164 { 7165 case 0: 7166 val64 |= XGE_HAL_RTS_MAC_SECT0_EN; 7167 break; 7168 case 1: 7169 val64 |= XGE_HAL_RTS_MAC_SECT1_EN; 7170 break; 7171 case 2: 7172 val64 |= XGE_HAL_RTS_MAC_SECT2_EN; 7173 break; 7174 case 3: 7175 val64 |= XGE_HAL_RTS_MAC_SECT3_EN; 7176 break; 7177 case 4: 7178 val64 |= XGE_HAL_RTS_MAC_SECT4_EN; 7179 break; 7180 case 5: 7181 val64 |= XGE_HAL_RTS_MAC_SECT5_EN; 7182 break; 7183 case 6: 7184 val64 |= XGE_HAL_RTS_MAC_SECT6_EN; 7185 break; 7186 case 7: 7187 val64 |= XGE_HAL_RTS_MAC_SECT7_EN; 7188 break; 7189 default: 7190 xge_debug_device(XGE_ERR, "Invalid Section value %d " 7191 , section); 7192 } 7193 7194 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 7195 val64, &bar0->rts_mac_cfg); 7196 return XGE_HAL_OK; 7197 } 7198 7199 /* 7200 * xge_hal_fix_rldram_ecc_error 7201 * @hldev: private member of the device structure. 7202 * 7203 * SXE-02-010. This function will turn OFF the ECC error reporting for the 7204 * interface bet'n external Micron RLDRAM II device and memory controller. 7205 * The error would have been reported in RLD_ECC_DB_ERR_L and RLD_ECC_DB_ERR_U 7206 * fileds of MC_ERR_REG register. Issue reported by HP-Unix folks during the 7207 * qualification of Herc. 7208 */ 7209 xge_hal_status_e 7210 xge_hal_fix_rldram_ecc_error(xge_hal_device_t *hldev) 7211 { 7212 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0; 7213 u64 val64; 7214 7215 // Enter Test Mode. 7216 val64 = XGE_HAL_MC_RLDRAM_TEST_MODE; 7217 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 7218 &bar0->mc_rldram_test_ctrl); 7219 7220 // Enable fg/bg tests. 7221 val64 = 0x0100000000000000ULL; 7222 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 7223 &bar0->mc_driver); 7224 7225 // Enable RLDRAM configuration. 7226 val64 = 0x0000000000017B00ULL; 7227 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 7228 &bar0->mc_rldram_mrs); 7229 7230 // Enable RLDRAM queues. 7231 val64 = 0x0000000001017B00ULL; 7232 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 7233 &bar0->mc_rldram_mrs); 7234 7235 // Setup test ranges. 7236 val64 = 0x00000000001E0100ULL; 7237 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 7238 &bar0->mc_rldram_test_add); 7239 7240 val64 = 0x00000100001F0100ULL; 7241 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 7242 &bar0->mc_rldram_test_add_bkg); 7243 7244 // Start Reads. 7245 val64 = 0x0001000000010000ULL; 7246 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 7247 &bar0->mc_rldram_test_ctrl); 7248 7249 if (__hal_device_register_poll(hldev, &bar0->mc_rldram_test_ctrl, 1, 7250 XGE_HAL_MC_RLDRAM_TEST_DONE, 7251 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { 7252 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; 7253 } 7254 7255 // Exit test mode. 7256 val64 = 0x0000000000000000ULL; 7257 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 7258 &bar0->mc_rldram_test_ctrl); 7259 7260 return XGE_HAL_OK; 7261 } 7262