1 // SPDX-License-Identifier: GPL-2.0-only 2 /**************************************************************************** 3 * Driver for Solarflare network controllers and boards 4 * Copyright 2005-2018 Solarflare Communications Inc. 5 * Copyright 2019-2022 Xilinx Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms of the GNU General Public License version 2 as published 9 * by the Free Software Foundation, incorporated herein by reference. 10 */ 11 12 #include "net_driver.h" 13 #include <linux/module.h> 14 #include <linux/aer.h> 15 #include "efx_common.h" 16 #include "efx_channels.h" 17 #include "io.h" 18 #include "ef100_nic.h" 19 #include "ef100_netdev.h" 20 #include "ef100_sriov.h" 21 #include "ef100_regs.h" 22 #include "ef100.h" 23 24 #define EFX_EF100_PCI_DEFAULT_BAR 2 25 26 /* Number of bytes at start of vendor specified extended capability that indicate 27 * that the capability is vendor specified. i.e. offset from value returned by 28 * pci_find_next_ext_capability() to beginning of vendor specified capability 29 * header. 30 */ 31 #define PCI_EXT_CAP_HDR_LENGTH 4 32 33 /* Expected size of a Xilinx continuation address table entry. */ 34 #define ESE_GZ_CFGBAR_CONT_CAP_MIN_LENGTH 16 35 36 struct ef100_func_ctl_window { 37 bool valid; 38 unsigned int bar; 39 u64 offset; 40 }; 41 42 static int ef100_pci_walk_xilinx_table(struct efx_nic *efx, u64 offset, 43 struct ef100_func_ctl_window *result); 44 45 /* Number of bytes to offset when reading bit position x with dword accessors. */ 46 #define ROUND_DOWN_TO_DWORD(x) (((x) & (~31)) >> 3) 47 48 #define EXTRACT_BITS(x, lbn, width) \ 49 (((x) >> ((lbn) & 31)) & ((1ull << (width)) - 1)) 50 51 static u32 _ef100_pci_get_bar_bits_with_width(struct efx_nic *efx, 52 int structure_start, 53 int lbn, int width) 54 { 55 efx_dword_t dword; 56 57 efx_readd(efx, &dword, structure_start + ROUND_DOWN_TO_DWORD(lbn)); 58 59 return EXTRACT_BITS(le32_to_cpu(dword.u32[0]), lbn, width); 60 } 61 62 #define ef100_pci_get_bar_bits(efx, entry_location, bitdef) \ 63 _ef100_pci_get_bar_bits_with_width(efx, entry_location, \ 64 ESF_GZ_CFGBAR_ ## bitdef ## _LBN, \ 65 ESF_GZ_CFGBAR_ ## bitdef ## _WIDTH) 66 67 static int ef100_pci_parse_ef100_entry(struct efx_nic *efx, int entry_location, 68 struct ef100_func_ctl_window *result) 69 { 70 u64 offset = ef100_pci_get_bar_bits(efx, entry_location, EF100_FUNC_CTL_WIN_OFF) << 71 ESE_GZ_EF100_FUNC_CTL_WIN_OFF_SHIFT; 72 u32 bar = ef100_pci_get_bar_bits(efx, entry_location, EF100_BAR); 73 74 netif_dbg(efx, probe, efx->net_dev, 75 "Found EF100 function control window bar=%d offset=0x%llx\n", 76 bar, offset); 77 78 if (result->valid) { 79 netif_err(efx, probe, efx->net_dev, 80 "Duplicated EF100 table entry.\n"); 81 return -EINVAL; 82 } 83 84 if (bar == ESE_GZ_CFGBAR_EF100_BAR_NUM_EXPANSION_ROM || 85 bar == ESE_GZ_CFGBAR_EF100_BAR_NUM_INVALID) { 86 netif_err(efx, probe, efx->net_dev, 87 "Bad BAR value of %d in Xilinx capabilities EF100 entry.\n", 88 bar); 89 return -EINVAL; 90 } 91 92 result->bar = bar; 93 result->offset = offset; 94 result->valid = true; 95 return 0; 96 } 97 98 static bool ef100_pci_does_bar_overflow(struct efx_nic *efx, int bar, 99 u64 next_entry) 100 { 101 return next_entry + ESE_GZ_CFGBAR_ENTRY_HEADER_SIZE > 102 pci_resource_len(efx->pci_dev, bar); 103 } 104 105 /* Parse a Xilinx capabilities table entry describing a continuation to a new 106 * sub-table. 107 */ 108 static int ef100_pci_parse_continue_entry(struct efx_nic *efx, int entry_location, 109 struct ef100_func_ctl_window *result) 110 { 111 unsigned int previous_bar; 112 efx_oword_t entry; 113 u64 offset; 114 int rc = 0; 115 u32 bar; 116 117 efx_reado(efx, &entry, entry_location); 118 119 bar = EFX_OWORD_FIELD32(entry, ESF_GZ_CFGBAR_CONT_CAP_BAR); 120 121 offset = EFX_OWORD_FIELD64(entry, ESF_GZ_CFGBAR_CONT_CAP_OFFSET) << 122 ESE_GZ_CONT_CAP_OFFSET_BYTES_SHIFT; 123 124 previous_bar = efx->mem_bar; 125 126 if (bar == ESE_GZ_VSEC_BAR_NUM_EXPANSION_ROM || 127 bar == ESE_GZ_VSEC_BAR_NUM_INVALID) { 128 netif_err(efx, probe, efx->net_dev, 129 "Bad BAR value of %d in Xilinx capabilities sub-table.\n", 130 bar); 131 return -EINVAL; 132 } 133 134 if (bar != previous_bar) { 135 efx_fini_io(efx); 136 137 if (ef100_pci_does_bar_overflow(efx, bar, offset)) { 138 netif_err(efx, probe, efx->net_dev, 139 "Xilinx table will overrun BAR[%d] offset=0x%llx\n", 140 bar, offset); 141 return -EINVAL; 142 } 143 144 /* Temporarily map new BAR. */ 145 rc = efx_init_io(efx, bar, 146 (dma_addr_t)DMA_BIT_MASK(ESF_GZ_TX_SEND_ADDR_WIDTH), 147 pci_resource_len(efx->pci_dev, bar)); 148 if (rc) { 149 netif_err(efx, probe, efx->net_dev, 150 "Mapping new BAR for Xilinx table failed, rc=%d\n", rc); 151 return rc; 152 } 153 } 154 155 rc = ef100_pci_walk_xilinx_table(efx, offset, result); 156 if (rc) 157 return rc; 158 159 if (bar != previous_bar) { 160 efx_fini_io(efx); 161 162 /* Put old BAR back. */ 163 rc = efx_init_io(efx, previous_bar, 164 (dma_addr_t)DMA_BIT_MASK(ESF_GZ_TX_SEND_ADDR_WIDTH), 165 pci_resource_len(efx->pci_dev, previous_bar)); 166 if (rc) { 167 netif_err(efx, probe, efx->net_dev, 168 "Putting old BAR back failed, rc=%d\n", rc); 169 return rc; 170 } 171 } 172 173 return 0; 174 } 175 176 /* Iterate over the Xilinx capabilities table in the currently mapped BAR and 177 * call ef100_pci_parse_ef100_entry() on any EF100 entries and 178 * ef100_pci_parse_continue_entry() on any table continuations. 179 */ 180 static int ef100_pci_walk_xilinx_table(struct efx_nic *efx, u64 offset, 181 struct ef100_func_ctl_window *result) 182 { 183 u64 current_entry = offset; 184 int rc = 0; 185 186 while (true) { 187 u32 id = ef100_pci_get_bar_bits(efx, current_entry, ENTRY_FORMAT); 188 u32 last = ef100_pci_get_bar_bits(efx, current_entry, ENTRY_LAST); 189 u32 rev = ef100_pci_get_bar_bits(efx, current_entry, ENTRY_REV); 190 u32 entry_size; 191 192 if (id == ESE_GZ_CFGBAR_ENTRY_LAST) 193 return 0; 194 195 entry_size = ef100_pci_get_bar_bits(efx, current_entry, ENTRY_SIZE); 196 197 netif_dbg(efx, probe, efx->net_dev, 198 "Seen Xilinx table entry 0x%x size 0x%x at 0x%llx in BAR[%d]\n", 199 id, entry_size, current_entry, efx->mem_bar); 200 201 if (entry_size < sizeof(u32) * 2) { 202 netif_err(efx, probe, efx->net_dev, 203 "Xilinx table entry too short len=0x%x\n", entry_size); 204 return -EINVAL; 205 } 206 207 switch (id) { 208 case ESE_GZ_CFGBAR_ENTRY_EF100: 209 if (rev != ESE_GZ_CFGBAR_ENTRY_REV_EF100 || 210 entry_size < ESE_GZ_CFGBAR_ENTRY_SIZE_EF100) { 211 netif_err(efx, probe, efx->net_dev, 212 "Bad length or rev for EF100 entry in Xilinx capabilities table. entry_size=%d rev=%d.\n", 213 entry_size, rev); 214 return -EINVAL; 215 } 216 217 rc = ef100_pci_parse_ef100_entry(efx, current_entry, 218 result); 219 if (rc) 220 return rc; 221 break; 222 case ESE_GZ_CFGBAR_ENTRY_CONT_CAP_ADDR: 223 if (rev != 0 || entry_size < ESE_GZ_CFGBAR_CONT_CAP_MIN_LENGTH) { 224 netif_err(efx, probe, efx->net_dev, 225 "Bad length or rev for continue entry in Xilinx capabilities table. entry_size=%d rev=%d.\n", 226 entry_size, rev); 227 return -EINVAL; 228 } 229 230 rc = ef100_pci_parse_continue_entry(efx, current_entry, result); 231 if (rc) 232 return rc; 233 break; 234 default: 235 /* Ignore unknown table entries. */ 236 break; 237 } 238 239 if (last) 240 return 0; 241 242 current_entry += entry_size; 243 244 if (ef100_pci_does_bar_overflow(efx, efx->mem_bar, current_entry)) { 245 netif_err(efx, probe, efx->net_dev, 246 "Xilinx table overrun at position=0x%llx.\n", 247 current_entry); 248 return -EINVAL; 249 } 250 } 251 } 252 253 static int _ef100_pci_get_config_bits_with_width(struct efx_nic *efx, 254 int structure_start, int lbn, 255 int width, u32 *result) 256 { 257 int rc, pos = structure_start + ROUND_DOWN_TO_DWORD(lbn); 258 u32 temp; 259 260 rc = pci_read_config_dword(efx->pci_dev, pos, &temp); 261 if (rc) { 262 netif_err(efx, probe, efx->net_dev, 263 "Failed to read PCI config dword at %d\n", 264 pos); 265 return rc; 266 } 267 268 *result = EXTRACT_BITS(temp, lbn, width); 269 270 return 0; 271 } 272 273 #define ef100_pci_get_config_bits(efx, entry_location, bitdef, result) \ 274 _ef100_pci_get_config_bits_with_width(efx, entry_location, \ 275 ESF_GZ_VSEC_ ## bitdef ## _LBN, \ 276 ESF_GZ_VSEC_ ## bitdef ## _WIDTH, result) 277 278 /* Call ef100_pci_walk_xilinx_table() for the Xilinx capabilities table pointed 279 * to by this PCI_EXT_CAP_ID_VNDR. 280 */ 281 static int ef100_pci_parse_xilinx_cap(struct efx_nic *efx, int vndr_cap, 282 bool has_offset_hi, 283 struct ef100_func_ctl_window *result) 284 { 285 u32 offset_high = 0; 286 u32 offset_lo = 0; 287 u64 offset = 0; 288 u32 bar = 0; 289 int rc = 0; 290 291 rc = ef100_pci_get_config_bits(efx, vndr_cap, TBL_BAR, &bar); 292 if (rc) { 293 netif_err(efx, probe, efx->net_dev, 294 "Failed to read ESF_GZ_VSEC_TBL_BAR, rc=%d\n", 295 rc); 296 return rc; 297 } 298 299 if (bar == ESE_GZ_CFGBAR_CONT_CAP_BAR_NUM_EXPANSION_ROM || 300 bar == ESE_GZ_CFGBAR_CONT_CAP_BAR_NUM_INVALID) { 301 netif_err(efx, probe, efx->net_dev, 302 "Bad BAR value of %d in Xilinx capabilities sub-table.\n", 303 bar); 304 return -EINVAL; 305 } 306 307 rc = ef100_pci_get_config_bits(efx, vndr_cap, TBL_OFF_LO, &offset_lo); 308 if (rc) { 309 netif_err(efx, probe, efx->net_dev, 310 "Failed to read ESF_GZ_VSEC_TBL_OFF_LO, rc=%d\n", 311 rc); 312 return rc; 313 } 314 315 /* Get optional extension to 64bit offset. */ 316 if (has_offset_hi) { 317 rc = ef100_pci_get_config_bits(efx, vndr_cap, TBL_OFF_HI, &offset_high); 318 if (rc) { 319 netif_err(efx, probe, efx->net_dev, 320 "Failed to read ESF_GZ_VSEC_TBL_OFF_HI, rc=%d\n", 321 rc); 322 return rc; 323 } 324 } 325 326 offset = (((u64)offset_lo) << ESE_GZ_VSEC_TBL_OFF_LO_BYTES_SHIFT) | 327 (((u64)offset_high) << ESE_GZ_VSEC_TBL_OFF_HI_BYTES_SHIFT); 328 329 if (offset > pci_resource_len(efx->pci_dev, bar) - sizeof(u32) * 2) { 330 netif_err(efx, probe, efx->net_dev, 331 "Xilinx table will overrun BAR[%d] offset=0x%llx\n", 332 bar, offset); 333 return -EINVAL; 334 } 335 336 /* Temporarily map BAR. */ 337 rc = efx_init_io(efx, bar, 338 (dma_addr_t)DMA_BIT_MASK(ESF_GZ_TX_SEND_ADDR_WIDTH), 339 pci_resource_len(efx->pci_dev, bar)); 340 if (rc) { 341 netif_err(efx, probe, efx->net_dev, 342 "efx_init_io failed, rc=%d\n", rc); 343 return rc; 344 } 345 346 rc = ef100_pci_walk_xilinx_table(efx, offset, result); 347 348 /* Unmap temporarily mapped BAR. */ 349 efx_fini_io(efx); 350 return rc; 351 } 352 353 /* Call ef100_pci_parse_ef100_entry() for each Xilinx PCI_EXT_CAP_ID_VNDR 354 * capability. 355 */ 356 static int ef100_pci_find_func_ctrl_window(struct efx_nic *efx, 357 struct ef100_func_ctl_window *result) 358 { 359 int num_xilinx_caps = 0; 360 int cap = 0; 361 362 result->valid = false; 363 364 while ((cap = pci_find_next_ext_capability(efx->pci_dev, cap, PCI_EXT_CAP_ID_VNDR)) != 0) { 365 int vndr_cap = cap + PCI_EXT_CAP_HDR_LENGTH; 366 u32 vsec_ver = 0; 367 u32 vsec_len = 0; 368 u32 vsec_id = 0; 369 int rc = 0; 370 371 num_xilinx_caps++; 372 373 rc = ef100_pci_get_config_bits(efx, vndr_cap, ID, &vsec_id); 374 if (rc) { 375 netif_err(efx, probe, efx->net_dev, 376 "Failed to read ESF_GZ_VSEC_ID, rc=%d\n", 377 rc); 378 return rc; 379 } 380 381 rc = ef100_pci_get_config_bits(efx, vndr_cap, VER, &vsec_ver); 382 if (rc) { 383 netif_err(efx, probe, efx->net_dev, 384 "Failed to read ESF_GZ_VSEC_VER, rc=%d\n", 385 rc); 386 return rc; 387 } 388 389 /* Get length of whole capability - i.e. starting at cap */ 390 rc = ef100_pci_get_config_bits(efx, vndr_cap, LEN, &vsec_len); 391 if (rc) { 392 netif_err(efx, probe, efx->net_dev, 393 "Failed to read ESF_GZ_VSEC_LEN, rc=%d\n", 394 rc); 395 return rc; 396 } 397 398 if (vsec_id == ESE_GZ_XILINX_VSEC_ID && 399 vsec_ver == ESE_GZ_VSEC_VER_XIL_CFGBAR && 400 vsec_len >= ESE_GZ_VSEC_LEN_MIN) { 401 bool has_offset_hi = (vsec_len >= ESE_GZ_VSEC_LEN_HIGH_OFFT); 402 403 rc = ef100_pci_parse_xilinx_cap(efx, vndr_cap, 404 has_offset_hi, result); 405 if (rc) 406 return rc; 407 } 408 } 409 410 if (num_xilinx_caps && !result->valid) { 411 netif_err(efx, probe, efx->net_dev, 412 "Seen %d Xilinx tables, but no EF100 entry.\n", 413 num_xilinx_caps); 414 return -EINVAL; 415 } 416 417 return 0; 418 } 419 420 /* Final NIC shutdown 421 * This is called only at module unload (or hotplug removal). A PF can call 422 * this on its VFs to ensure they are unbound first. 423 */ 424 static void ef100_pci_remove(struct pci_dev *pci_dev) 425 { 426 struct efx_nic *efx = pci_get_drvdata(pci_dev); 427 struct efx_probe_data *probe_data; 428 429 if (!efx) 430 return; 431 432 probe_data = container_of(efx, struct efx_probe_data, efx); 433 ef100_remove_netdev(probe_data); 434 #ifdef CONFIG_SFC_SRIOV 435 efx_fini_struct_tc(efx); 436 #endif 437 438 ef100_remove(efx); 439 efx_fini_io(efx); 440 441 pci_dbg(pci_dev, "shutdown successful\n"); 442 443 pci_disable_pcie_error_reporting(pci_dev); 444 445 pci_set_drvdata(pci_dev, NULL); 446 efx_fini_struct(efx); 447 kfree(probe_data); 448 }; 449 450 static int ef100_pci_probe(struct pci_dev *pci_dev, 451 const struct pci_device_id *entry) 452 { 453 struct ef100_func_ctl_window fcw = { 0 }; 454 struct efx_probe_data *probe_data; 455 struct efx_nic *efx; 456 int rc; 457 458 /* Allocate probe data and struct efx_nic */ 459 probe_data = kzalloc(sizeof(*probe_data), GFP_KERNEL); 460 if (!probe_data) 461 return -ENOMEM; 462 probe_data->pci_dev = pci_dev; 463 efx = &probe_data->efx; 464 465 efx->type = (const struct efx_nic_type *)entry->driver_data; 466 467 efx->pci_dev = pci_dev; 468 pci_set_drvdata(pci_dev, efx); 469 rc = efx_init_struct(efx, pci_dev); 470 if (rc) 471 goto fail; 472 473 efx->vi_stride = EF100_DEFAULT_VI_STRIDE; 474 pci_info(pci_dev, "Solarflare EF100 NIC detected\n"); 475 476 rc = ef100_pci_find_func_ctrl_window(efx, &fcw); 477 if (rc) { 478 pci_err(pci_dev, 479 "Error looking for ef100 function control window, rc=%d\n", 480 rc); 481 goto fail; 482 } 483 484 if (!fcw.valid) { 485 /* Extended capability not found - use defaults. */ 486 fcw.bar = EFX_EF100_PCI_DEFAULT_BAR; 487 fcw.offset = 0; 488 fcw.valid = true; 489 } 490 491 if (fcw.offset > pci_resource_len(efx->pci_dev, fcw.bar) - ESE_GZ_FCW_LEN) { 492 pci_err(pci_dev, "Func control window overruns BAR\n"); 493 rc = -EIO; 494 goto fail; 495 } 496 497 /* Set up basic I/O (BAR mappings etc) */ 498 rc = efx_init_io(efx, fcw.bar, 499 (dma_addr_t)DMA_BIT_MASK(ESF_GZ_TX_SEND_ADDR_WIDTH), 500 pci_resource_len(efx->pci_dev, fcw.bar)); 501 if (rc) 502 goto fail; 503 504 efx->reg_base = fcw.offset; 505 506 rc = efx->type->probe(efx); 507 if (rc) 508 goto fail; 509 510 efx->state = STATE_PROBED; 511 rc = ef100_probe_netdev(probe_data); 512 if (rc) 513 goto fail; 514 515 pci_dbg(pci_dev, "initialisation successful\n"); 516 517 return 0; 518 519 fail: 520 ef100_pci_remove(pci_dev); 521 return rc; 522 } 523 524 #ifdef CONFIG_SFC_SRIOV 525 static int ef100_pci_sriov_configure(struct pci_dev *dev, int num_vfs) 526 { 527 struct efx_nic *efx = pci_get_drvdata(dev); 528 int rc; 529 530 if (efx->type->sriov_configure) { 531 rc = efx->type->sriov_configure(efx, num_vfs); 532 if (rc) 533 return rc; 534 else 535 return num_vfs; 536 } 537 return -ENOENT; 538 } 539 #endif 540 541 /* PCI device ID table */ 542 static const struct pci_device_id ef100_pci_table[] = { 543 {PCI_DEVICE(PCI_VENDOR_ID_XILINX, 0x0100), /* Riverhead PF */ 544 .driver_data = (unsigned long) &ef100_pf_nic_type }, 545 {PCI_DEVICE(PCI_VENDOR_ID_XILINX, 0x1100), /* Riverhead VF */ 546 .driver_data = (unsigned long) &ef100_vf_nic_type }, 547 {0} /* end of list */ 548 }; 549 550 struct pci_driver ef100_pci_driver = { 551 .name = "sfc_ef100", 552 .id_table = ef100_pci_table, 553 .probe = ef100_pci_probe, 554 .remove = ef100_pci_remove, 555 #ifdef CONFIG_SFC_SRIOV 556 .sriov_configure = ef100_pci_sriov_configure, 557 #endif 558 .err_handler = &efx_err_handlers, 559 }; 560 561 MODULE_DEVICE_TABLE(pci, ef100_pci_table); 562