1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. 4 * Copyright 2007 Nuova Systems, Inc. All rights reserved. 5 */ 6 7 #include <linux/kernel.h> 8 #include <linux/errno.h> 9 #include <linux/types.h> 10 #include <linux/pci.h> 11 #include <linux/delay.h> 12 #include <linux/if_ether.h> 13 14 #include "vnic_resource.h" 15 #include "vnic_devcmd.h" 16 #include "vnic_dev.h" 17 #include "vnic_wq.h" 18 #include "vnic_stats.h" 19 #include "enic.h" 20 21 #define VNIC_MAX_RES_HDR_SIZE \ 22 (sizeof(struct vnic_resource_header) + \ 23 sizeof(struct vnic_resource) * RES_TYPE_MAX) 24 #define VNIC_RES_STRIDE 128 25 26 void *vnic_dev_priv(struct vnic_dev *vdev) 27 { 28 return vdev->priv; 29 } 30 31 static int vnic_dev_discover_res(struct vnic_dev *vdev, 32 struct vnic_dev_bar *bar, unsigned int num_bars) 33 { 34 struct vnic_resource_header __iomem *rh; 35 struct mgmt_barmap_hdr __iomem *mrh; 36 struct vnic_resource __iomem *r; 37 u8 type; 38 39 if (num_bars == 0) 40 return -EINVAL; 41 42 if (bar->len < VNIC_MAX_RES_HDR_SIZE) { 43 vdev_err(vdev, "vNIC BAR0 res hdr length error\n"); 44 return -EINVAL; 45 } 46 47 rh = bar->vaddr; 48 mrh = bar->vaddr; 49 if (!rh) { 50 vdev_err(vdev, "vNIC BAR0 res hdr not mem-mapped\n"); 51 return -EINVAL; 52 } 53 54 /* Check for mgmt vnic in addition to normal vnic */ 55 if ((ioread32(&rh->magic) != VNIC_RES_MAGIC) || 56 (ioread32(&rh->version) != VNIC_RES_VERSION)) { 57 if ((ioread32(&mrh->magic) != MGMTVNIC_MAGIC) || 58 (ioread32(&mrh->version) != MGMTVNIC_VERSION)) { 59 vdev_err(vdev, "vNIC BAR0 res magic/version error exp (%lx/%lx) or (%lx/%lx), curr (%x/%x)\n", 60 VNIC_RES_MAGIC, VNIC_RES_VERSION, 61 MGMTVNIC_MAGIC, MGMTVNIC_VERSION, 62 ioread32(&rh->magic), ioread32(&rh->version)); 63 return -EINVAL; 64 } 65 } 66 67 if (ioread32(&mrh->magic) == MGMTVNIC_MAGIC) 68 r = (struct vnic_resource __iomem *)(mrh + 1); 69 else 70 r = (struct vnic_resource __iomem *)(rh + 1); 71 72 73 while ((type = ioread8(&r->type)) != RES_TYPE_EOL) { 74 75 u8 bar_num = ioread8(&r->bar); 76 u32 bar_offset = ioread32(&r->bar_offset); 77 u32 count = ioread32(&r->count); 78 u32 len; 79 80 r++; 81 82 if (bar_num >= num_bars) 83 continue; 84 85 if (!bar[bar_num].len || !bar[bar_num].vaddr) 86 continue; 87 88 switch (type) { 89 case RES_TYPE_WQ: 90 case RES_TYPE_RQ: 91 case RES_TYPE_CQ: 92 case RES_TYPE_INTR_CTRL: 93 /* each count is stride bytes long */ 94 len = count * VNIC_RES_STRIDE; 95 if (len + bar_offset > bar[bar_num].len) { 96 vdev_err(vdev, "vNIC BAR0 resource %d out-of-bounds, offset 0x%x + size 0x%x > bar len 0x%lx\n", 97 type, bar_offset, len, 98 bar[bar_num].len); 99 return -EINVAL; 100 } 101 break; 102 case RES_TYPE_INTR_PBA_LEGACY: 103 case RES_TYPE_DEVCMD: 104 case RES_TYPE_DEVCMD2: 105 len = count; 106 break; 107 default: 108 continue; 109 } 110 111 vdev->res[type].count = count; 112 vdev->res[type].vaddr = (char __iomem *)bar[bar_num].vaddr + 113 bar_offset; 114 vdev->res[type].bus_addr = bar[bar_num].bus_addr + bar_offset; 115 } 116 117 return 0; 118 } 119 120 unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev, 121 enum vnic_res_type type) 122 { 123 return vdev->res[type].count; 124 } 125 EXPORT_SYMBOL(vnic_dev_get_res_count); 126 127 void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type, 128 unsigned int index) 129 { 130 if (!vdev->res[type].vaddr) 131 return NULL; 132 133 switch (type) { 134 case RES_TYPE_WQ: 135 case RES_TYPE_RQ: 136 case RES_TYPE_CQ: 137 case RES_TYPE_INTR_CTRL: 138 return (char __iomem *)vdev->res[type].vaddr + 139 index * VNIC_RES_STRIDE; 140 default: 141 return (char __iomem *)vdev->res[type].vaddr; 142 } 143 } 144 EXPORT_SYMBOL(vnic_dev_get_res); 145 146 static unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring, 147 unsigned int desc_count, unsigned int desc_size) 148 { 149 150 /* Descriptor ring base address alignment in bytes*/ 151 ring->base_align = VNIC_DESC_BASE_ALIGN; 152 153 /* A count of 0 means the maximum descriptors */ 154 if (desc_count == 0) 155 desc_count = VNIC_DESC_MAX_COUNT; 156 157 /* Descriptor count aligned in groups of VNIC_DESC_COUNT_ALIGN descriptors */ 158 ring->desc_count = ALIGN(desc_count, VNIC_DESC_COUNT_ALIGN); 159 160 /* Descriptor size alignment in bytes */ 161 ring->desc_size = ALIGN(desc_size, VNIC_DESC_SIZE_ALIGN); 162 163 ring->size = ring->desc_count * ring->desc_size; 164 ring->size_unaligned = ring->size + ring->base_align; 165 166 return ring->size_unaligned; 167 } 168 169 void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring) 170 { 171 memset(ring->descs, 0, ring->size); 172 } 173 174 int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring, 175 unsigned int desc_count, unsigned int desc_size) 176 { 177 vnic_dev_desc_ring_size(ring, desc_count, desc_size); 178 179 ring->descs_unaligned = dma_alloc_coherent(&vdev->pdev->dev, 180 ring->size_unaligned, 181 &ring->base_addr_unaligned, 182 GFP_KERNEL); 183 184 if (!ring->descs_unaligned) { 185 vdev_err(vdev, "Failed to allocate ring (size=%d), aborting\n", 186 (int)ring->size); 187 return -ENOMEM; 188 } 189 190 ring->base_addr = ALIGN(ring->base_addr_unaligned, 191 ring->base_align); 192 ring->descs = (u8 *)ring->descs_unaligned + 193 (ring->base_addr - ring->base_addr_unaligned); 194 195 vnic_dev_clear_desc_ring(ring); 196 197 ring->desc_avail = ring->desc_count - 1; 198 199 return 0; 200 } 201 202 void vnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring) 203 { 204 if (ring->descs) { 205 dma_free_coherent(&vdev->pdev->dev, ring->size_unaligned, 206 ring->descs_unaligned, 207 ring->base_addr_unaligned); 208 ring->descs = NULL; 209 } 210 } 211 212 static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, 213 int wait) 214 { 215 struct vnic_devcmd __iomem *devcmd = vdev->devcmd; 216 unsigned int i; 217 int delay; 218 u32 status; 219 int err; 220 221 status = ioread32(&devcmd->status); 222 if (status == 0xFFFFFFFF) { 223 /* PCI-e target device is gone */ 224 return -ENODEV; 225 } 226 if (status & STAT_BUSY) { 227 vdev_neterr(vdev, "Busy devcmd %d\n", _CMD_N(cmd)); 228 return -EBUSY; 229 } 230 231 if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) { 232 for (i = 0; i < VNIC_DEVCMD_NARGS; i++) 233 writeq(vdev->args[i], &devcmd->args[i]); 234 wmb(); 235 } 236 237 iowrite32(cmd, &devcmd->cmd); 238 239 if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT)) 240 return 0; 241 242 for (delay = 0; delay < wait; delay++) { 243 244 udelay(100); 245 246 status = ioread32(&devcmd->status); 247 if (status == 0xFFFFFFFF) { 248 /* PCI-e target device is gone */ 249 return -ENODEV; 250 } 251 252 if (!(status & STAT_BUSY)) { 253 254 if (status & STAT_ERROR) { 255 err = (int)readq(&devcmd->args[0]); 256 if (err == ERR_EINVAL && 257 cmd == CMD_CAPABILITY) 258 return -err; 259 if (err != ERR_ECMDUNKNOWN || 260 cmd != CMD_CAPABILITY) 261 vdev_neterr(vdev, "Error %d devcmd %d\n", 262 err, _CMD_N(cmd)); 263 return -err; 264 } 265 266 if (_CMD_DIR(cmd) & _CMD_DIR_READ) { 267 rmb(); 268 for (i = 0; i < VNIC_DEVCMD_NARGS; i++) 269 vdev->args[i] = readq(&devcmd->args[i]); 270 } 271 272 return 0; 273 } 274 } 275 276 vdev_neterr(vdev, "Timedout devcmd %d\n", _CMD_N(cmd)); 277 return -ETIMEDOUT; 278 } 279 280 static int _vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, 281 int wait) 282 { 283 struct devcmd2_controller *dc2c = vdev->devcmd2; 284 struct devcmd2_result *result; 285 u8 color; 286 unsigned int i; 287 int delay, err; 288 u32 fetch_index, new_posted; 289 u32 posted = dc2c->posted; 290 291 fetch_index = ioread32(&dc2c->wq_ctrl->fetch_index); 292 293 if (fetch_index == 0xFFFFFFFF) 294 return -ENODEV; 295 296 new_posted = (posted + 1) % DEVCMD2_RING_SIZE; 297 298 if (new_posted == fetch_index) { 299 vdev_neterr(vdev, "devcmd2 %d: wq is full. fetch index: %u, posted index: %u\n", 300 _CMD_N(cmd), fetch_index, posted); 301 return -EBUSY; 302 } 303 dc2c->cmd_ring[posted].cmd = cmd; 304 dc2c->cmd_ring[posted].flags = 0; 305 306 if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT)) 307 dc2c->cmd_ring[posted].flags |= DEVCMD2_FNORESULT; 308 if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) 309 for (i = 0; i < VNIC_DEVCMD_NARGS; i++) 310 dc2c->cmd_ring[posted].args[i] = vdev->args[i]; 311 312 /* Adding write memory barrier prevents compiler and/or CPU reordering, 313 * thus avoiding descriptor posting before descriptor is initialized. 314 * Otherwise, hardware can read stale descriptor fields. 315 */ 316 wmb(); 317 iowrite32(new_posted, &dc2c->wq_ctrl->posted_index); 318 dc2c->posted = new_posted; 319 320 if (dc2c->cmd_ring[posted].flags & DEVCMD2_FNORESULT) 321 return 0; 322 323 result = dc2c->result + dc2c->next_result; 324 color = dc2c->color; 325 326 dc2c->next_result++; 327 if (dc2c->next_result == dc2c->result_size) { 328 dc2c->next_result = 0; 329 dc2c->color = dc2c->color ? 0 : 1; 330 } 331 332 for (delay = 0; delay < wait; delay++) { 333 if (result->color == color) { 334 if (result->error) { 335 err = result->error; 336 if (err != ERR_ECMDUNKNOWN || 337 cmd != CMD_CAPABILITY) 338 vdev_neterr(vdev, "Error %d devcmd %d\n", 339 err, _CMD_N(cmd)); 340 return -err; 341 } 342 if (_CMD_DIR(cmd) & _CMD_DIR_READ) 343 for (i = 0; i < VNIC_DEVCMD2_NARGS; i++) 344 vdev->args[i] = result->results[i]; 345 346 return 0; 347 } 348 udelay(100); 349 } 350 351 vdev_neterr(vdev, "devcmd %d timed out\n", _CMD_N(cmd)); 352 353 return -ETIMEDOUT; 354 } 355 356 static int vnic_dev_init_devcmd1(struct vnic_dev *vdev) 357 { 358 vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0); 359 if (!vdev->devcmd) 360 return -ENODEV; 361 vdev->devcmd_rtn = _vnic_dev_cmd; 362 363 return 0; 364 } 365 366 static int vnic_dev_init_devcmd2(struct vnic_dev *vdev) 367 { 368 int err; 369 unsigned int fetch_index; 370 371 if (vdev->devcmd2) 372 return 0; 373 374 vdev->devcmd2 = kzalloc(sizeof(*vdev->devcmd2), GFP_KERNEL); 375 if (!vdev->devcmd2) 376 return -ENOMEM; 377 378 vdev->devcmd2->color = 1; 379 vdev->devcmd2->result_size = DEVCMD2_RING_SIZE; 380 err = enic_wq_devcmd2_alloc(vdev, &vdev->devcmd2->wq, DEVCMD2_RING_SIZE, 381 DEVCMD2_DESC_SIZE); 382 if (err) 383 goto err_free_devcmd2; 384 385 fetch_index = ioread32(&vdev->devcmd2->wq.ctrl->fetch_index); 386 if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone */ 387 vdev_err(vdev, "Fatal error in devcmd2 init - hardware surprise removal\n"); 388 err = -ENODEV; 389 goto err_free_wq; 390 } 391 392 enic_wq_init_start(&vdev->devcmd2->wq, 0, fetch_index, fetch_index, 0, 393 0); 394 vdev->devcmd2->posted = fetch_index; 395 vnic_wq_enable(&vdev->devcmd2->wq); 396 397 err = vnic_dev_alloc_desc_ring(vdev, &vdev->devcmd2->results_ring, 398 DEVCMD2_RING_SIZE, DEVCMD2_DESC_SIZE); 399 if (err) 400 goto err_disable_wq; 401 402 vdev->devcmd2->result = vdev->devcmd2->results_ring.descs; 403 vdev->devcmd2->cmd_ring = vdev->devcmd2->wq.ring.descs; 404 vdev->devcmd2->wq_ctrl = vdev->devcmd2->wq.ctrl; 405 vdev->args[0] = (u64)vdev->devcmd2->results_ring.base_addr | 406 VNIC_PADDR_TARGET; 407 vdev->args[1] = DEVCMD2_RING_SIZE; 408 409 err = _vnic_dev_cmd2(vdev, CMD_INITIALIZE_DEVCMD2, 1000); 410 if (err) 411 goto err_free_desc_ring; 412 413 vdev->devcmd_rtn = _vnic_dev_cmd2; 414 415 return 0; 416 417 err_free_desc_ring: 418 vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring); 419 err_disable_wq: 420 vnic_wq_disable(&vdev->devcmd2->wq); 421 err_free_wq: 422 vnic_wq_free(&vdev->devcmd2->wq); 423 err_free_devcmd2: 424 kfree(vdev->devcmd2); 425 vdev->devcmd2 = NULL; 426 427 return err; 428 } 429 430 static void vnic_dev_deinit_devcmd2(struct vnic_dev *vdev) 431 { 432 vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring); 433 vnic_wq_disable(&vdev->devcmd2->wq); 434 vnic_wq_free(&vdev->devcmd2->wq); 435 kfree(vdev->devcmd2); 436 } 437 438 static int vnic_dev_cmd_proxy(struct vnic_dev *vdev, 439 enum vnic_devcmd_cmd proxy_cmd, enum vnic_devcmd_cmd cmd, 440 u64 *a0, u64 *a1, int wait) 441 { 442 u32 status; 443 int err; 444 445 memset(vdev->args, 0, sizeof(vdev->args)); 446 447 vdev->args[0] = vdev->proxy_index; 448 vdev->args[1] = cmd; 449 vdev->args[2] = *a0; 450 vdev->args[3] = *a1; 451 452 err = vdev->devcmd_rtn(vdev, proxy_cmd, wait); 453 if (err) 454 return err; 455 456 status = (u32)vdev->args[0]; 457 if (status & STAT_ERROR) { 458 err = (int)vdev->args[1]; 459 if (err != ERR_ECMDUNKNOWN || 460 cmd != CMD_CAPABILITY) 461 vdev_neterr(vdev, "Error %d proxy devcmd %d\n", 462 err, _CMD_N(cmd)); 463 return err; 464 } 465 466 *a0 = vdev->args[1]; 467 *a1 = vdev->args[2]; 468 469 return 0; 470 } 471 472 static int vnic_dev_cmd_no_proxy(struct vnic_dev *vdev, 473 enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait) 474 { 475 int err; 476 477 vdev->args[0] = *a0; 478 vdev->args[1] = *a1; 479 480 err = vdev->devcmd_rtn(vdev, cmd, wait); 481 482 *a0 = vdev->args[0]; 483 *a1 = vdev->args[1]; 484 485 return err; 486 } 487 488 void vnic_dev_cmd_proxy_by_index_start(struct vnic_dev *vdev, u16 index) 489 { 490 vdev->proxy = PROXY_BY_INDEX; 491 vdev->proxy_index = index; 492 } 493 494 void vnic_dev_cmd_proxy_end(struct vnic_dev *vdev) 495 { 496 vdev->proxy = PROXY_NONE; 497 vdev->proxy_index = 0; 498 } 499 500 int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, 501 u64 *a0, u64 *a1, int wait) 502 { 503 memset(vdev->args, 0, sizeof(vdev->args)); 504 505 switch (vdev->proxy) { 506 case PROXY_BY_INDEX: 507 return vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_INDEX, cmd, 508 a0, a1, wait); 509 case PROXY_BY_BDF: 510 return vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_BDF, cmd, 511 a0, a1, wait); 512 case PROXY_NONE: 513 default: 514 return vnic_dev_cmd_no_proxy(vdev, cmd, a0, a1, wait); 515 } 516 } 517 518 static int vnic_dev_capable(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd) 519 { 520 u64 a0 = (u32)cmd, a1 = 0; 521 int wait = 1000; 522 int err; 523 524 err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait); 525 526 return !(err || a0); 527 } 528 529 int vnic_dev_fw_info(struct vnic_dev *vdev, 530 struct vnic_devcmd_fw_info **fw_info) 531 { 532 u64 a0, a1 = 0; 533 int wait = 1000; 534 int err = 0; 535 536 if (!vdev->fw_info) { 537 vdev->fw_info = dma_alloc_coherent(&vdev->pdev->dev, 538 sizeof(struct vnic_devcmd_fw_info), 539 &vdev->fw_info_pa, GFP_ATOMIC); 540 if (!vdev->fw_info) 541 return -ENOMEM; 542 543 a0 = vdev->fw_info_pa; 544 a1 = sizeof(struct vnic_devcmd_fw_info); 545 546 /* only get fw_info once and cache it */ 547 if (vnic_dev_capable(vdev, CMD_MCPU_FW_INFO)) 548 err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO, 549 &a0, &a1, wait); 550 else 551 err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO_OLD, 552 &a0, &a1, wait); 553 } 554 555 *fw_info = vdev->fw_info; 556 557 return err; 558 } 559 560 int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size, 561 void *value) 562 { 563 u64 a0, a1; 564 int wait = 1000; 565 int err; 566 567 a0 = offset; 568 a1 = size; 569 570 err = vnic_dev_cmd(vdev, CMD_DEV_SPEC, &a0, &a1, wait); 571 572 switch (size) { 573 case 1: *(u8 *)value = (u8)a0; break; 574 case 2: *(u16 *)value = (u16)a0; break; 575 case 4: *(u32 *)value = (u32)a0; break; 576 case 8: *(u64 *)value = a0; break; 577 default: BUG(); break; 578 } 579 580 return err; 581 } 582 583 int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats) 584 { 585 u64 a0, a1; 586 int wait = 1000; 587 588 if (!vdev->stats) { 589 vdev->stats = dma_alloc_coherent(&vdev->pdev->dev, 590 sizeof(struct vnic_stats), 591 &vdev->stats_pa, GFP_ATOMIC); 592 if (!vdev->stats) 593 return -ENOMEM; 594 } 595 596 *stats = vdev->stats; 597 a0 = vdev->stats_pa; 598 a1 = sizeof(struct vnic_stats); 599 600 return vnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait); 601 } 602 603 int vnic_dev_close(struct vnic_dev *vdev) 604 { 605 u64 a0 = 0, a1 = 0; 606 int wait = 1000; 607 return vnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait); 608 } 609 610 int vnic_dev_enable_wait(struct vnic_dev *vdev) 611 { 612 u64 a0 = 0, a1 = 0; 613 int wait = 1000; 614 615 if (vnic_dev_capable(vdev, CMD_ENABLE_WAIT)) 616 return vnic_dev_cmd(vdev, CMD_ENABLE_WAIT, &a0, &a1, wait); 617 else 618 return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait); 619 } 620 621 int vnic_dev_disable(struct vnic_dev *vdev) 622 { 623 u64 a0 = 0, a1 = 0; 624 int wait = 1000; 625 return vnic_dev_cmd(vdev, CMD_DISABLE, &a0, &a1, wait); 626 } 627 628 int vnic_dev_open(struct vnic_dev *vdev, int arg) 629 { 630 u64 a0 = (u32)arg, a1 = 0; 631 int wait = 1000; 632 return vnic_dev_cmd(vdev, CMD_OPEN, &a0, &a1, wait); 633 } 634 635 int vnic_dev_open_done(struct vnic_dev *vdev, int *done) 636 { 637 u64 a0 = 0, a1 = 0; 638 int wait = 1000; 639 int err; 640 641 *done = 0; 642 643 err = vnic_dev_cmd(vdev, CMD_OPEN_STATUS, &a0, &a1, wait); 644 if (err) 645 return err; 646 647 *done = (a0 == 0); 648 649 return 0; 650 } 651 652 int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg) 653 { 654 u64 a0 = (u32)arg, a1 = 0; 655 int wait = 1000; 656 return vnic_dev_cmd(vdev, CMD_SOFT_RESET, &a0, &a1, wait); 657 } 658 659 int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done) 660 { 661 u64 a0 = 0, a1 = 0; 662 int wait = 1000; 663 int err; 664 665 *done = 0; 666 667 err = vnic_dev_cmd(vdev, CMD_SOFT_RESET_STATUS, &a0, &a1, wait); 668 if (err) 669 return err; 670 671 *done = (a0 == 0); 672 673 return 0; 674 } 675 676 int vnic_dev_hang_reset(struct vnic_dev *vdev, int arg) 677 { 678 u64 a0 = (u32)arg, a1 = 0; 679 int wait = 1000; 680 int err; 681 682 if (vnic_dev_capable(vdev, CMD_HANG_RESET)) { 683 return vnic_dev_cmd(vdev, CMD_HANG_RESET, 684 &a0, &a1, wait); 685 } else { 686 err = vnic_dev_soft_reset(vdev, arg); 687 if (err) 688 return err; 689 return vnic_dev_init(vdev, 0); 690 } 691 } 692 693 int vnic_dev_hang_reset_done(struct vnic_dev *vdev, int *done) 694 { 695 u64 a0 = 0, a1 = 0; 696 int wait = 1000; 697 int err; 698 699 *done = 0; 700 701 if (vnic_dev_capable(vdev, CMD_HANG_RESET_STATUS)) { 702 err = vnic_dev_cmd(vdev, CMD_HANG_RESET_STATUS, 703 &a0, &a1, wait); 704 if (err) 705 return err; 706 } else { 707 return vnic_dev_soft_reset_done(vdev, done); 708 } 709 710 *done = (a0 == 0); 711 712 return 0; 713 } 714 715 int vnic_dev_hang_notify(struct vnic_dev *vdev) 716 { 717 u64 a0, a1; 718 int wait = 1000; 719 return vnic_dev_cmd(vdev, CMD_HANG_NOTIFY, &a0, &a1, wait); 720 } 721 722 int vnic_dev_get_mac_addr(struct vnic_dev *vdev, u8 *mac_addr) 723 { 724 u64 a0, a1; 725 int wait = 1000; 726 int err, i; 727 728 for (i = 0; i < ETH_ALEN; i++) 729 mac_addr[i] = 0; 730 731 err = vnic_dev_cmd(vdev, CMD_GET_MAC_ADDR, &a0, &a1, wait); 732 if (err) 733 return err; 734 735 for (i = 0; i < ETH_ALEN; i++) 736 mac_addr[i] = ((u8 *)&a0)[i]; 737 738 return 0; 739 } 740 741 int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast, 742 int broadcast, int promisc, int allmulti) 743 { 744 u64 a0, a1 = 0; 745 int wait = 1000; 746 int err; 747 748 a0 = (directed ? CMD_PFILTER_DIRECTED : 0) | 749 (multicast ? CMD_PFILTER_MULTICAST : 0) | 750 (broadcast ? CMD_PFILTER_BROADCAST : 0) | 751 (promisc ? CMD_PFILTER_PROMISCUOUS : 0) | 752 (allmulti ? CMD_PFILTER_ALL_MULTICAST : 0); 753 754 err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait); 755 if (err) 756 vdev_neterr(vdev, "Can't set packet filter\n"); 757 758 return err; 759 } 760 761 int vnic_dev_add_addr(struct vnic_dev *vdev, const u8 *addr) 762 { 763 u64 a0 = 0, a1 = 0; 764 int wait = 1000; 765 int err; 766 int i; 767 768 for (i = 0; i < ETH_ALEN; i++) 769 ((u8 *)&a0)[i] = addr[i]; 770 771 err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait); 772 if (err) 773 vdev_neterr(vdev, "Can't add addr [%pM], %d\n", addr, err); 774 775 return err; 776 } 777 778 int vnic_dev_del_addr(struct vnic_dev *vdev, const u8 *addr) 779 { 780 u64 a0 = 0, a1 = 0; 781 int wait = 1000; 782 int err; 783 int i; 784 785 for (i = 0; i < ETH_ALEN; i++) 786 ((u8 *)&a0)[i] = addr[i]; 787 788 err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait); 789 if (err) 790 vdev_neterr(vdev, "Can't del addr [%pM], %d\n", addr, err); 791 792 return err; 793 } 794 795 int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev, 796 u8 ig_vlan_rewrite_mode) 797 { 798 u64 a0 = ig_vlan_rewrite_mode, a1 = 0; 799 int wait = 1000; 800 801 if (vnic_dev_capable(vdev, CMD_IG_VLAN_REWRITE_MODE)) 802 return vnic_dev_cmd(vdev, CMD_IG_VLAN_REWRITE_MODE, 803 &a0, &a1, wait); 804 else 805 return 0; 806 } 807 808 static int vnic_dev_notify_setcmd(struct vnic_dev *vdev, 809 void *notify_addr, dma_addr_t notify_pa, u16 intr) 810 { 811 u64 a0, a1; 812 int wait = 1000; 813 int r; 814 815 memset(notify_addr, 0, sizeof(struct vnic_devcmd_notify)); 816 vdev->notify = notify_addr; 817 vdev->notify_pa = notify_pa; 818 819 a0 = (u64)notify_pa; 820 a1 = ((u64)intr << 32) & 0x0000ffff00000000ULL; 821 a1 += sizeof(struct vnic_devcmd_notify); 822 823 r = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait); 824 vdev->notify_sz = (r == 0) ? (u32)a1 : 0; 825 return r; 826 } 827 828 int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr) 829 { 830 void *notify_addr; 831 dma_addr_t notify_pa; 832 833 if (vdev->notify || vdev->notify_pa) { 834 vdev_neterr(vdev, "notify block %p still allocated\n", 835 vdev->notify); 836 return -EINVAL; 837 } 838 839 notify_addr = dma_alloc_coherent(&vdev->pdev->dev, 840 sizeof(struct vnic_devcmd_notify), 841 ¬ify_pa, GFP_ATOMIC); 842 if (!notify_addr) 843 return -ENOMEM; 844 845 return vnic_dev_notify_setcmd(vdev, notify_addr, notify_pa, intr); 846 } 847 848 static int vnic_dev_notify_unsetcmd(struct vnic_dev *vdev) 849 { 850 u64 a0, a1; 851 int wait = 1000; 852 int err; 853 854 a0 = 0; /* paddr = 0 to unset notify buffer */ 855 a1 = 0x0000ffff00000000ULL; /* intr num = -1 to unreg for intr */ 856 a1 += sizeof(struct vnic_devcmd_notify); 857 858 err = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait); 859 vdev->notify = NULL; 860 vdev->notify_pa = 0; 861 vdev->notify_sz = 0; 862 863 return err; 864 } 865 866 int vnic_dev_notify_unset(struct vnic_dev *vdev) 867 { 868 if (vdev->notify) { 869 dma_free_coherent(&vdev->pdev->dev, 870 sizeof(struct vnic_devcmd_notify), 871 vdev->notify, vdev->notify_pa); 872 } 873 874 return vnic_dev_notify_unsetcmd(vdev); 875 } 876 877 static int vnic_dev_notify_ready(struct vnic_dev *vdev) 878 { 879 u32 *words; 880 unsigned int nwords = vdev->notify_sz / 4; 881 unsigned int i; 882 u32 csum; 883 884 if (!vdev->notify || !vdev->notify_sz) 885 return 0; 886 887 do { 888 csum = 0; 889 memcpy(&vdev->notify_copy, vdev->notify, vdev->notify_sz); 890 words = (u32 *)&vdev->notify_copy; 891 for (i = 1; i < nwords; i++) 892 csum += words[i]; 893 } while (csum != words[0]); 894 895 return 1; 896 } 897 898 int vnic_dev_init(struct vnic_dev *vdev, int arg) 899 { 900 u64 a0 = (u32)arg, a1 = 0; 901 int wait = 1000; 902 int r = 0; 903 904 if (vnic_dev_capable(vdev, CMD_INIT)) 905 r = vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait); 906 else { 907 vnic_dev_cmd(vdev, CMD_INIT_v1, &a0, &a1, wait); 908 if (a0 & CMD_INITF_DEFAULT_MAC) { 909 /* Emulate these for old CMD_INIT_v1 which 910 * didn't pass a0 so no CMD_INITF_*. 911 */ 912 vnic_dev_cmd(vdev, CMD_GET_MAC_ADDR, &a0, &a1, wait); 913 vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait); 914 } 915 } 916 return r; 917 } 918 919 int vnic_dev_deinit(struct vnic_dev *vdev) 920 { 921 u64 a0 = 0, a1 = 0; 922 int wait = 1000; 923 924 return vnic_dev_cmd(vdev, CMD_DEINIT, &a0, &a1, wait); 925 } 926 927 void vnic_dev_intr_coal_timer_info_default(struct vnic_dev *vdev) 928 { 929 /* Default: hardware intr coal timer is in units of 1.5 usecs */ 930 vdev->intr_coal_timer_info.mul = 2; 931 vdev->intr_coal_timer_info.div = 3; 932 vdev->intr_coal_timer_info.max_usec = 933 vnic_dev_intr_coal_timer_hw_to_usec(vdev, 0xffff); 934 } 935 936 int vnic_dev_intr_coal_timer_info(struct vnic_dev *vdev) 937 { 938 int wait = 1000; 939 int err; 940 941 memset(vdev->args, 0, sizeof(vdev->args)); 942 943 if (vnic_dev_capable(vdev, CMD_INTR_COAL_CONVERT)) 944 err = vdev->devcmd_rtn(vdev, CMD_INTR_COAL_CONVERT, wait); 945 else 946 err = ERR_ECMDUNKNOWN; 947 948 /* Use defaults when firmware doesn't support the devcmd at all or 949 * supports it for only specific hardware 950 */ 951 if ((err == ERR_ECMDUNKNOWN) || 952 (!err && !(vdev->args[0] && vdev->args[1] && vdev->args[2]))) { 953 vdev_netwarn(vdev, "Using default conversion factor for interrupt coalesce timer\n"); 954 vnic_dev_intr_coal_timer_info_default(vdev); 955 return 0; 956 } 957 958 if (!err) { 959 vdev->intr_coal_timer_info.mul = (u32) vdev->args[0]; 960 vdev->intr_coal_timer_info.div = (u32) vdev->args[1]; 961 vdev->intr_coal_timer_info.max_usec = (u32) vdev->args[2]; 962 } 963 964 return err; 965 } 966 967 int vnic_dev_link_status(struct vnic_dev *vdev) 968 { 969 if (!vnic_dev_notify_ready(vdev)) 970 return 0; 971 972 return vdev->notify_copy.link_state; 973 } 974 975 u32 vnic_dev_port_speed(struct vnic_dev *vdev) 976 { 977 if (!vnic_dev_notify_ready(vdev)) 978 return 0; 979 980 return vdev->notify_copy.port_speed; 981 } 982 983 u32 vnic_dev_msg_lvl(struct vnic_dev *vdev) 984 { 985 if (!vnic_dev_notify_ready(vdev)) 986 return 0; 987 988 return vdev->notify_copy.msglvl; 989 } 990 991 u32 vnic_dev_mtu(struct vnic_dev *vdev) 992 { 993 if (!vnic_dev_notify_ready(vdev)) 994 return 0; 995 996 return vdev->notify_copy.mtu; 997 } 998 999 void vnic_dev_set_intr_mode(struct vnic_dev *vdev, 1000 enum vnic_dev_intr_mode intr_mode) 1001 { 1002 vdev->intr_mode = intr_mode; 1003 } 1004 1005 enum vnic_dev_intr_mode vnic_dev_get_intr_mode( 1006 struct vnic_dev *vdev) 1007 { 1008 return vdev->intr_mode; 1009 } 1010 1011 u32 vnic_dev_intr_coal_timer_usec_to_hw(struct vnic_dev *vdev, u32 usec) 1012 { 1013 return (usec * vdev->intr_coal_timer_info.mul) / 1014 vdev->intr_coal_timer_info.div; 1015 } 1016 1017 u32 vnic_dev_intr_coal_timer_hw_to_usec(struct vnic_dev *vdev, u32 hw_cycles) 1018 { 1019 return (hw_cycles * vdev->intr_coal_timer_info.div) / 1020 vdev->intr_coal_timer_info.mul; 1021 } 1022 1023 u32 vnic_dev_get_intr_coal_timer_max(struct vnic_dev *vdev) 1024 { 1025 return vdev->intr_coal_timer_info.max_usec; 1026 } 1027 1028 void vnic_dev_unregister(struct vnic_dev *vdev) 1029 { 1030 if (vdev) { 1031 if (vdev->notify) 1032 dma_free_coherent(&vdev->pdev->dev, 1033 sizeof(struct vnic_devcmd_notify), 1034 vdev->notify, vdev->notify_pa); 1035 if (vdev->stats) 1036 dma_free_coherent(&vdev->pdev->dev, 1037 sizeof(struct vnic_stats), 1038 vdev->stats, vdev->stats_pa); 1039 if (vdev->fw_info) 1040 dma_free_coherent(&vdev->pdev->dev, 1041 sizeof(struct vnic_devcmd_fw_info), 1042 vdev->fw_info, vdev->fw_info_pa); 1043 if (vdev->devcmd2) 1044 vnic_dev_deinit_devcmd2(vdev); 1045 1046 kfree(vdev); 1047 } 1048 } 1049 EXPORT_SYMBOL(vnic_dev_unregister); 1050 1051 struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev, 1052 void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar, 1053 unsigned int num_bars) 1054 { 1055 if (!vdev) { 1056 vdev = kzalloc(sizeof(struct vnic_dev), GFP_KERNEL); 1057 if (!vdev) 1058 return NULL; 1059 } 1060 1061 vdev->priv = priv; 1062 vdev->pdev = pdev; 1063 1064 if (vnic_dev_discover_res(vdev, bar, num_bars)) 1065 goto err_out; 1066 1067 return vdev; 1068 1069 err_out: 1070 vnic_dev_unregister(vdev); 1071 return NULL; 1072 } 1073 EXPORT_SYMBOL(vnic_dev_register); 1074 1075 struct pci_dev *vnic_dev_get_pdev(struct vnic_dev *vdev) 1076 { 1077 return vdev->pdev; 1078 } 1079 EXPORT_SYMBOL(vnic_dev_get_pdev); 1080 1081 int vnic_devcmd_init(struct vnic_dev *vdev) 1082 { 1083 void __iomem *res; 1084 int err; 1085 1086 res = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD2, 0); 1087 if (res) { 1088 err = vnic_dev_init_devcmd2(vdev); 1089 if (err) 1090 vdev_warn(vdev, "DEVCMD2 init failed: %d, Using DEVCMD1\n", 1091 err); 1092 else 1093 return 0; 1094 } else { 1095 vdev_warn(vdev, "DEVCMD2 resource not found (old firmware?) Using DEVCMD1\n"); 1096 } 1097 err = vnic_dev_init_devcmd1(vdev); 1098 if (err) 1099 vdev_err(vdev, "DEVCMD1 initialization failed: %d\n", err); 1100 1101 return err; 1102 } 1103 1104 int vnic_dev_init_prov2(struct vnic_dev *vdev, u8 *buf, u32 len) 1105 { 1106 u64 a0, a1 = len; 1107 int wait = 1000; 1108 dma_addr_t prov_pa; 1109 void *prov_buf; 1110 int ret; 1111 1112 prov_buf = dma_alloc_coherent(&vdev->pdev->dev, len, &prov_pa, GFP_ATOMIC); 1113 if (!prov_buf) 1114 return -ENOMEM; 1115 1116 memcpy(prov_buf, buf, len); 1117 1118 a0 = prov_pa; 1119 1120 ret = vnic_dev_cmd(vdev, CMD_INIT_PROV_INFO2, &a0, &a1, wait); 1121 1122 dma_free_coherent(&vdev->pdev->dev, len, prov_buf, prov_pa); 1123 1124 return ret; 1125 } 1126 1127 int vnic_dev_enable2(struct vnic_dev *vdev, int active) 1128 { 1129 u64 a0, a1 = 0; 1130 int wait = 1000; 1131 1132 a0 = (active ? CMD_ENABLE2_ACTIVE : 0); 1133 1134 return vnic_dev_cmd(vdev, CMD_ENABLE2, &a0, &a1, wait); 1135 } 1136 1137 static int vnic_dev_cmd_status(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, 1138 int *status) 1139 { 1140 u64 a0 = cmd, a1 = 0; 1141 int wait = 1000; 1142 int ret; 1143 1144 ret = vnic_dev_cmd(vdev, CMD_STATUS, &a0, &a1, wait); 1145 if (!ret) 1146 *status = (int)a0; 1147 1148 return ret; 1149 } 1150 1151 int vnic_dev_enable2_done(struct vnic_dev *vdev, int *status) 1152 { 1153 return vnic_dev_cmd_status(vdev, CMD_ENABLE2, status); 1154 } 1155 1156 int vnic_dev_deinit_done(struct vnic_dev *vdev, int *status) 1157 { 1158 return vnic_dev_cmd_status(vdev, CMD_DEINIT, status); 1159 } 1160 1161 int vnic_dev_set_mac_addr(struct vnic_dev *vdev, u8 *mac_addr) 1162 { 1163 u64 a0, a1; 1164 int wait = 1000; 1165 int i; 1166 1167 for (i = 0; i < ETH_ALEN; i++) 1168 ((u8 *)&a0)[i] = mac_addr[i]; 1169 1170 return vnic_dev_cmd(vdev, CMD_SET_MAC_ADDR, &a0, &a1, wait); 1171 } 1172 1173 /* vnic_dev_classifier: Add/Delete classifier entries 1174 * @vdev: vdev of the device 1175 * @cmd: CLSF_ADD for Add filter 1176 * CLSF_DEL for Delete filter 1177 * @entry: In case of ADD filter, the caller passes the RQ number in this 1178 * variable. 1179 * 1180 * This function stores the filter_id returned by the firmware in the 1181 * same variable before return; 1182 * 1183 * In case of DEL filter, the caller passes the RQ number. Return 1184 * value is irrelevant. 1185 * @data: filter data 1186 */ 1187 int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry, 1188 struct filter *data) 1189 { 1190 u64 a0, a1; 1191 int wait = 1000; 1192 dma_addr_t tlv_pa; 1193 int ret = -EINVAL; 1194 struct filter_tlv *tlv, *tlv_va; 1195 struct filter_action *action; 1196 u64 tlv_size; 1197 1198 if (cmd == CLSF_ADD) { 1199 tlv_size = sizeof(struct filter) + 1200 sizeof(struct filter_action) + 1201 2 * sizeof(struct filter_tlv); 1202 tlv_va = dma_alloc_coherent(&vdev->pdev->dev, tlv_size, 1203 &tlv_pa, GFP_ATOMIC); 1204 if (!tlv_va) 1205 return -ENOMEM; 1206 tlv = tlv_va; 1207 a0 = tlv_pa; 1208 a1 = tlv_size; 1209 memset(tlv, 0, tlv_size); 1210 tlv->type = CLSF_TLV_FILTER; 1211 tlv->length = sizeof(struct filter); 1212 *(struct filter *)&tlv->val = *data; 1213 1214 tlv = (struct filter_tlv *)((char *)tlv + 1215 sizeof(struct filter_tlv) + 1216 sizeof(struct filter)); 1217 1218 tlv->type = CLSF_TLV_ACTION; 1219 tlv->length = sizeof(struct filter_action); 1220 action = (struct filter_action *)&tlv->val; 1221 action->type = FILTER_ACTION_RQ_STEERING; 1222 action->u.rq_idx = *entry; 1223 1224 ret = vnic_dev_cmd(vdev, CMD_ADD_FILTER, &a0, &a1, wait); 1225 *entry = (u16)a0; 1226 dma_free_coherent(&vdev->pdev->dev, tlv_size, tlv_va, tlv_pa); 1227 } else if (cmd == CLSF_DEL) { 1228 a0 = *entry; 1229 ret = vnic_dev_cmd(vdev, CMD_DEL_FILTER, &a0, &a1, wait); 1230 } 1231 1232 return ret; 1233 } 1234 1235 int vnic_dev_overlay_offload_ctrl(struct vnic_dev *vdev, u8 overlay, u8 config) 1236 { 1237 u64 a0 = overlay; 1238 u64 a1 = config; 1239 int wait = 1000; 1240 1241 return vnic_dev_cmd(vdev, CMD_OVERLAY_OFFLOAD_CTRL, &a0, &a1, wait); 1242 } 1243 1244 int vnic_dev_overlay_offload_cfg(struct vnic_dev *vdev, u8 overlay, 1245 u16 vxlan_udp_port_number) 1246 { 1247 u64 a1 = vxlan_udp_port_number; 1248 u64 a0 = overlay; 1249 int wait = 1000; 1250 1251 return vnic_dev_cmd(vdev, CMD_OVERLAY_OFFLOAD_CFG, &a0, &a1, wait); 1252 } 1253 1254 int vnic_dev_get_supported_feature_ver(struct vnic_dev *vdev, u8 feature, 1255 u64 *supported_versions, u64 *a1) 1256 { 1257 u64 a0 = feature; 1258 int wait = 1000; 1259 int ret; 1260 1261 ret = vnic_dev_cmd(vdev, CMD_GET_SUPP_FEATURE_VER, &a0, a1, wait); 1262 if (!ret) 1263 *supported_versions = a0; 1264 1265 return ret; 1266 } 1267 1268 int vnic_dev_capable_rss_hash_type(struct vnic_dev *vdev, u8 *rss_hash_type) 1269 { 1270 u64 a0 = CMD_NIC_CFG, a1 = 0; 1271 int wait = 1000; 1272 int err; 1273 1274 err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait); 1275 /* rss_hash_type is valid only when a0 is 1. Adapter which does not 1276 * support CMD_CAPABILITY for rss_hash_type has a0 = 0 1277 */ 1278 if (err || (a0 != 1)) 1279 return -EOPNOTSUPP; 1280 1281 a1 = (a1 >> NIC_CFG_RSS_HASH_TYPE_SHIFT) & 1282 NIC_CFG_RSS_HASH_TYPE_MASK_FIELD; 1283 1284 *rss_hash_type = (u8)a1; 1285 1286 return 0; 1287 } 1288