1 /**************************************************************************/ 2 /* */ 3 /* IBM System i and System p Virtual NIC Device Driver */ 4 /* Copyright (C) 2014 IBM Corp. */ 5 /* Santiago Leon (santi_leon@yahoo.com) */ 6 /* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */ 7 /* John Allen (jallen@linux.vnet.ibm.com) */ 8 /* */ 9 /* This program is free software; you can redistribute it and/or modify */ 10 /* it under the terms of the GNU General Public License as published by */ 11 /* the Free Software Foundation; either version 2 of the License, or */ 12 /* (at your option) any later version. */ 13 /* */ 14 /* This program is distributed in the hope that it will be useful, */ 15 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */ 16 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */ 17 /* GNU General Public License for more details. */ 18 /* */ 19 /* You should have received a copy of the GNU General Public License */ 20 /* along with this program. */ 21 /* */ 22 /* This module contains the implementation of a virtual ethernet device */ 23 /* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */ 24 /* option of the RS/6000 Platform Architecture to interface with virtual */ 25 /* ethernet NICs that are presented to the partition by the hypervisor. */ 26 /* */ 27 /* Messages are passed between the VNIC driver and the VNIC server using */ 28 /* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */ 29 /* issue and receive commands that initiate communication with the server */ 30 /* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */ 31 /* are used by the driver to notify the server that a packet is */ 32 /* ready for transmission or that a buffer has been added to receive a */ 33 /* packet. Subsequently, sCRQs are used by the server to notify the */ 34 /* driver that a packet transmission has been completed or that a packet */ 35 /* has been received and placed in a waiting buffer. */ 36 /* */ 37 /* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */ 38 /* which skbs are DMA mapped and immediately unmapped when the transmit */ 39 /* or receive has been completed, the VNIC driver is required to use */ 40 /* "long term mapping". This entails that large, continuous DMA mapped */ 41 /* buffers are allocated on driver initialization and these buffers are */ 42 /* then continuously reused to pass skbs to and from the VNIC server. */ 43 /* */ 44 /**************************************************************************/ 45 46 #include <linux/module.h> 47 #include <linux/moduleparam.h> 48 #include <linux/types.h> 49 #include <linux/errno.h> 50 #include <linux/completion.h> 51 #include <linux/ioport.h> 52 #include <linux/dma-mapping.h> 53 #include <linux/kernel.h> 54 #include <linux/netdevice.h> 55 #include <linux/etherdevice.h> 56 #include <linux/skbuff.h> 57 #include <linux/init.h> 58 #include <linux/delay.h> 59 #include <linux/mm.h> 60 #include <linux/ethtool.h> 61 #include <linux/proc_fs.h> 62 #include <linux/if_arp.h> 63 #include <linux/in.h> 64 #include <linux/ip.h> 65 #include <linux/ipv6.h> 66 #include <linux/irq.h> 67 #include <linux/kthread.h> 68 #include <linux/seq_file.h> 69 #include <linux/interrupt.h> 70 #include <net/net_namespace.h> 71 #include <asm/hvcall.h> 72 #include <linux/atomic.h> 73 #include <asm/vio.h> 74 #include <asm/iommu.h> 75 #include <linux/uaccess.h> 76 #include <asm/firmware.h> 77 #include <linux/workqueue.h> 78 #include <linux/if_vlan.h> 79 #include <linux/utsname.h> 80 81 #include "ibmvnic.h" 82 83 static const char ibmvnic_driver_name[] = "ibmvnic"; 84 static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver"; 85 86 MODULE_AUTHOR("Santiago Leon"); 87 MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver"); 88 MODULE_LICENSE("GPL"); 89 MODULE_VERSION(IBMVNIC_DRIVER_VERSION); 90 91 static int ibmvnic_version = IBMVNIC_INITIAL_VERSION; 92 static int ibmvnic_remove(struct vio_dev *); 93 static void release_sub_crqs(struct ibmvnic_adapter *); 94 static int ibmvnic_reset_crq(struct ibmvnic_adapter *); 95 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *); 96 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *); 97 static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *); 98 static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle, 99 union sub_crq *sub_crq); 100 static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64); 101 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance); 102 static int enable_scrq_irq(struct ibmvnic_adapter *, 103 struct ibmvnic_sub_crq_queue *); 104 static int disable_scrq_irq(struct ibmvnic_adapter *, 105 struct ibmvnic_sub_crq_queue *); 106 static int pending_scrq(struct ibmvnic_adapter *, 107 struct ibmvnic_sub_crq_queue *); 108 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *, 109 struct ibmvnic_sub_crq_queue *); 110 static int ibmvnic_poll(struct napi_struct *napi, int data); 111 static void send_map_query(struct ibmvnic_adapter *adapter); 112 static void send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8); 113 static void send_request_unmap(struct ibmvnic_adapter *, u8); 114 static void send_login(struct ibmvnic_adapter *adapter); 115 static void send_cap_queries(struct ibmvnic_adapter *adapter); 116 static int init_sub_crqs(struct ibmvnic_adapter *); 117 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter); 118 static int ibmvnic_init(struct ibmvnic_adapter *); 119 static void release_crq_queue(struct ibmvnic_adapter *); 120 static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p); 121 122 struct ibmvnic_stat { 123 char name[ETH_GSTRING_LEN]; 124 int offset; 125 }; 126 127 #define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \ 128 offsetof(struct ibmvnic_statistics, stat)) 129 #define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off))) 130 131 static const struct ibmvnic_stat ibmvnic_stats[] = { 132 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)}, 133 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)}, 134 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)}, 135 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)}, 136 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)}, 137 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)}, 138 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)}, 139 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)}, 140 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)}, 141 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)}, 142 {"align_errors", IBMVNIC_STAT_OFF(align_errors)}, 143 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)}, 144 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)}, 145 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)}, 146 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)}, 147 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)}, 148 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)}, 149 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)}, 150 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)}, 151 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)}, 152 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)}, 153 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)}, 154 }; 155 156 static long h_reg_sub_crq(unsigned long unit_address, unsigned long token, 157 unsigned long length, unsigned long *number, 158 unsigned long *irq) 159 { 160 unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; 161 long rc; 162 163 rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length); 164 *number = retbuf[0]; 165 *irq = retbuf[1]; 166 167 return rc; 168 } 169 170 static int alloc_long_term_buff(struct ibmvnic_adapter *adapter, 171 struct ibmvnic_long_term_buff *ltb, int size) 172 { 173 struct device *dev = &adapter->vdev->dev; 174 175 ltb->size = size; 176 ltb->buff = dma_alloc_coherent(dev, ltb->size, <b->addr, 177 GFP_KERNEL); 178 179 if (!ltb->buff) { 180 dev_err(dev, "Couldn't alloc long term buffer\n"); 181 return -ENOMEM; 182 } 183 ltb->map_id = adapter->map_id; 184 adapter->map_id++; 185 186 init_completion(&adapter->fw_done); 187 send_request_map(adapter, ltb->addr, 188 ltb->size, ltb->map_id); 189 wait_for_completion(&adapter->fw_done); 190 191 if (adapter->fw_done_rc) { 192 dev_err(dev, "Couldn't map long term buffer,rc = %d\n", 193 adapter->fw_done_rc); 194 return -1; 195 } 196 return 0; 197 } 198 199 static void free_long_term_buff(struct ibmvnic_adapter *adapter, 200 struct ibmvnic_long_term_buff *ltb) 201 { 202 struct device *dev = &adapter->vdev->dev; 203 204 if (!ltb->buff) 205 return; 206 207 if (adapter->reset_reason != VNIC_RESET_FAILOVER && 208 adapter->reset_reason != VNIC_RESET_MOBILITY) 209 send_request_unmap(adapter, ltb->map_id); 210 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr); 211 } 212 213 static int reset_long_term_buff(struct ibmvnic_adapter *adapter, 214 struct ibmvnic_long_term_buff *ltb) 215 { 216 memset(ltb->buff, 0, ltb->size); 217 218 init_completion(&adapter->fw_done); 219 send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id); 220 wait_for_completion(&adapter->fw_done); 221 222 if (adapter->fw_done_rc) { 223 dev_info(&adapter->vdev->dev, 224 "Reset failed, attempting to free and reallocate buffer\n"); 225 free_long_term_buff(adapter, ltb); 226 return alloc_long_term_buff(adapter, ltb, ltb->size); 227 } 228 return 0; 229 } 230 231 static void deactivate_rx_pools(struct ibmvnic_adapter *adapter) 232 { 233 int i; 234 235 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); 236 i++) 237 adapter->rx_pool[i].active = 0; 238 } 239 240 static void replenish_rx_pool(struct ibmvnic_adapter *adapter, 241 struct ibmvnic_rx_pool *pool) 242 { 243 int count = pool->size - atomic_read(&pool->available); 244 struct device *dev = &adapter->vdev->dev; 245 int buffers_added = 0; 246 unsigned long lpar_rc; 247 union sub_crq sub_crq; 248 struct sk_buff *skb; 249 unsigned int offset; 250 dma_addr_t dma_addr; 251 unsigned char *dst; 252 u64 *handle_array; 253 int shift = 0; 254 int index; 255 int i; 256 257 if (!pool->active) 258 return; 259 260 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + 261 be32_to_cpu(adapter->login_rsp_buf-> 262 off_rxadd_subcrqs)); 263 264 for (i = 0; i < count; ++i) { 265 skb = alloc_skb(pool->buff_size, GFP_ATOMIC); 266 if (!skb) { 267 dev_err(dev, "Couldn't replenish rx buff\n"); 268 adapter->replenish_no_mem++; 269 break; 270 } 271 272 index = pool->free_map[pool->next_free]; 273 274 if (pool->rx_buff[index].skb) 275 dev_err(dev, "Inconsistent free_map!\n"); 276 277 /* Copy the skb to the long term mapped DMA buffer */ 278 offset = index * pool->buff_size; 279 dst = pool->long_term_buff.buff + offset; 280 memset(dst, 0, pool->buff_size); 281 dma_addr = pool->long_term_buff.addr + offset; 282 pool->rx_buff[index].data = dst; 283 284 pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP; 285 pool->rx_buff[index].dma = dma_addr; 286 pool->rx_buff[index].skb = skb; 287 pool->rx_buff[index].pool_index = pool->index; 288 pool->rx_buff[index].size = pool->buff_size; 289 290 memset(&sub_crq, 0, sizeof(sub_crq)); 291 sub_crq.rx_add.first = IBMVNIC_CRQ_CMD; 292 sub_crq.rx_add.correlator = 293 cpu_to_be64((u64)&pool->rx_buff[index]); 294 sub_crq.rx_add.ioba = cpu_to_be32(dma_addr); 295 sub_crq.rx_add.map_id = pool->long_term_buff.map_id; 296 297 /* The length field of the sCRQ is defined to be 24 bits so the 298 * buffer size needs to be left shifted by a byte before it is 299 * converted to big endian to prevent the last byte from being 300 * truncated. 301 */ 302 #ifdef __LITTLE_ENDIAN__ 303 shift = 8; 304 #endif 305 sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift); 306 307 lpar_rc = send_subcrq(adapter, handle_array[pool->index], 308 &sub_crq); 309 if (lpar_rc != H_SUCCESS) 310 goto failure; 311 312 buffers_added++; 313 adapter->replenish_add_buff_success++; 314 pool->next_free = (pool->next_free + 1) % pool->size; 315 } 316 atomic_add(buffers_added, &pool->available); 317 return; 318 319 failure: 320 dev_info(dev, "replenish pools failure\n"); 321 pool->free_map[pool->next_free] = index; 322 pool->rx_buff[index].skb = NULL; 323 if (!dma_mapping_error(dev, dma_addr)) 324 dma_unmap_single(dev, dma_addr, pool->buff_size, 325 DMA_FROM_DEVICE); 326 327 dev_kfree_skb_any(skb); 328 adapter->replenish_add_buff_failure++; 329 atomic_add(buffers_added, &pool->available); 330 331 if (lpar_rc == H_CLOSED) { 332 /* Disable buffer pool replenishment and report carrier off if 333 * queue is closed. Firmware guarantees that a signal will 334 * be sent to the driver, triggering a reset. 335 */ 336 deactivate_rx_pools(adapter); 337 netif_carrier_off(adapter->netdev); 338 } 339 } 340 341 static void replenish_pools(struct ibmvnic_adapter *adapter) 342 { 343 int i; 344 345 adapter->replenish_task_cycles++; 346 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); 347 i++) { 348 if (adapter->rx_pool[i].active) 349 replenish_rx_pool(adapter, &adapter->rx_pool[i]); 350 } 351 } 352 353 static void release_stats_buffers(struct ibmvnic_adapter *adapter) 354 { 355 kfree(adapter->tx_stats_buffers); 356 kfree(adapter->rx_stats_buffers); 357 } 358 359 static int init_stats_buffers(struct ibmvnic_adapter *adapter) 360 { 361 adapter->tx_stats_buffers = 362 kcalloc(adapter->req_tx_queues, 363 sizeof(struct ibmvnic_tx_queue_stats), 364 GFP_KERNEL); 365 if (!adapter->tx_stats_buffers) 366 return -ENOMEM; 367 368 adapter->rx_stats_buffers = 369 kcalloc(adapter->req_rx_queues, 370 sizeof(struct ibmvnic_rx_queue_stats), 371 GFP_KERNEL); 372 if (!adapter->rx_stats_buffers) 373 return -ENOMEM; 374 375 return 0; 376 } 377 378 static void release_stats_token(struct ibmvnic_adapter *adapter) 379 { 380 struct device *dev = &adapter->vdev->dev; 381 382 if (!adapter->stats_token) 383 return; 384 385 dma_unmap_single(dev, adapter->stats_token, 386 sizeof(struct ibmvnic_statistics), 387 DMA_FROM_DEVICE); 388 adapter->stats_token = 0; 389 } 390 391 static int init_stats_token(struct ibmvnic_adapter *adapter) 392 { 393 struct device *dev = &adapter->vdev->dev; 394 dma_addr_t stok; 395 396 stok = dma_map_single(dev, &adapter->stats, 397 sizeof(struct ibmvnic_statistics), 398 DMA_FROM_DEVICE); 399 if (dma_mapping_error(dev, stok)) { 400 dev_err(dev, "Couldn't map stats buffer\n"); 401 return -1; 402 } 403 404 adapter->stats_token = stok; 405 netdev_dbg(adapter->netdev, "Stats token initialized (%llx)\n", stok); 406 return 0; 407 } 408 409 static int reset_rx_pools(struct ibmvnic_adapter *adapter) 410 { 411 struct ibmvnic_rx_pool *rx_pool; 412 int rx_scrqs; 413 int i, j, rc; 414 u64 *size_array; 415 416 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + 417 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size)); 418 419 rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); 420 for (i = 0; i < rx_scrqs; i++) { 421 rx_pool = &adapter->rx_pool[i]; 422 423 netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i); 424 425 if (rx_pool->buff_size != be64_to_cpu(size_array[i])) { 426 free_long_term_buff(adapter, &rx_pool->long_term_buff); 427 rx_pool->buff_size = be64_to_cpu(size_array[i]); 428 alloc_long_term_buff(adapter, &rx_pool->long_term_buff, 429 rx_pool->size * 430 rx_pool->buff_size); 431 } else { 432 rc = reset_long_term_buff(adapter, 433 &rx_pool->long_term_buff); 434 } 435 436 if (rc) 437 return rc; 438 439 for (j = 0; j < rx_pool->size; j++) 440 rx_pool->free_map[j] = j; 441 442 memset(rx_pool->rx_buff, 0, 443 rx_pool->size * sizeof(struct ibmvnic_rx_buff)); 444 445 atomic_set(&rx_pool->available, 0); 446 rx_pool->next_alloc = 0; 447 rx_pool->next_free = 0; 448 rx_pool->active = 1; 449 } 450 451 return 0; 452 } 453 454 static void release_rx_pools(struct ibmvnic_adapter *adapter) 455 { 456 struct ibmvnic_rx_pool *rx_pool; 457 int i, j; 458 459 if (!adapter->rx_pool) 460 return; 461 462 for (i = 0; i < adapter->num_active_rx_pools; i++) { 463 rx_pool = &adapter->rx_pool[i]; 464 465 netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i); 466 467 kfree(rx_pool->free_map); 468 free_long_term_buff(adapter, &rx_pool->long_term_buff); 469 470 if (!rx_pool->rx_buff) 471 continue; 472 473 for (j = 0; j < rx_pool->size; j++) { 474 if (rx_pool->rx_buff[j].skb) { 475 dev_kfree_skb_any(rx_pool->rx_buff[i].skb); 476 rx_pool->rx_buff[i].skb = NULL; 477 } 478 } 479 480 kfree(rx_pool->rx_buff); 481 } 482 483 kfree(adapter->rx_pool); 484 adapter->rx_pool = NULL; 485 adapter->num_active_rx_pools = 0; 486 } 487 488 static int init_rx_pools(struct net_device *netdev) 489 { 490 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 491 struct device *dev = &adapter->vdev->dev; 492 struct ibmvnic_rx_pool *rx_pool; 493 int rxadd_subcrqs; 494 u64 *size_array; 495 int i, j; 496 497 rxadd_subcrqs = 498 be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); 499 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + 500 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size)); 501 502 adapter->rx_pool = kcalloc(rxadd_subcrqs, 503 sizeof(struct ibmvnic_rx_pool), 504 GFP_KERNEL); 505 if (!adapter->rx_pool) { 506 dev_err(dev, "Failed to allocate rx pools\n"); 507 return -1; 508 } 509 510 adapter->num_active_rx_pools = 0; 511 512 for (i = 0; i < rxadd_subcrqs; i++) { 513 rx_pool = &adapter->rx_pool[i]; 514 515 netdev_dbg(adapter->netdev, 516 "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n", 517 i, adapter->req_rx_add_entries_per_subcrq, 518 be64_to_cpu(size_array[i])); 519 520 rx_pool->size = adapter->req_rx_add_entries_per_subcrq; 521 rx_pool->index = i; 522 rx_pool->buff_size = be64_to_cpu(size_array[i]); 523 rx_pool->active = 1; 524 525 rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int), 526 GFP_KERNEL); 527 if (!rx_pool->free_map) { 528 release_rx_pools(adapter); 529 return -1; 530 } 531 532 rx_pool->rx_buff = kcalloc(rx_pool->size, 533 sizeof(struct ibmvnic_rx_buff), 534 GFP_KERNEL); 535 if (!rx_pool->rx_buff) { 536 dev_err(dev, "Couldn't alloc rx buffers\n"); 537 release_rx_pools(adapter); 538 return -1; 539 } 540 541 if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff, 542 rx_pool->size * rx_pool->buff_size)) { 543 release_rx_pools(adapter); 544 return -1; 545 } 546 547 for (j = 0; j < rx_pool->size; ++j) 548 rx_pool->free_map[j] = j; 549 550 atomic_set(&rx_pool->available, 0); 551 rx_pool->next_alloc = 0; 552 rx_pool->next_free = 0; 553 } 554 555 adapter->num_active_rx_pools = rxadd_subcrqs; 556 557 return 0; 558 } 559 560 static int reset_tx_pools(struct ibmvnic_adapter *adapter) 561 { 562 struct ibmvnic_tx_pool *tx_pool; 563 int tx_scrqs; 564 int i, j, rc; 565 566 tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); 567 for (i = 0; i < tx_scrqs; i++) { 568 netdev_dbg(adapter->netdev, "Re-setting tx_pool[%d]\n", i); 569 570 tx_pool = &adapter->tx_pool[i]; 571 572 rc = reset_long_term_buff(adapter, &tx_pool->long_term_buff); 573 if (rc) 574 return rc; 575 576 rc = reset_long_term_buff(adapter, &tx_pool->tso_ltb); 577 if (rc) 578 return rc; 579 580 memset(tx_pool->tx_buff, 0, 581 adapter->req_tx_entries_per_subcrq * 582 sizeof(struct ibmvnic_tx_buff)); 583 584 for (j = 0; j < adapter->req_tx_entries_per_subcrq; j++) 585 tx_pool->free_map[j] = j; 586 587 tx_pool->consumer_index = 0; 588 tx_pool->producer_index = 0; 589 tx_pool->tso_index = 0; 590 } 591 592 return 0; 593 } 594 595 static void release_vpd_data(struct ibmvnic_adapter *adapter) 596 { 597 if (!adapter->vpd) 598 return; 599 600 kfree(adapter->vpd->buff); 601 kfree(adapter->vpd); 602 } 603 604 static void release_tx_pools(struct ibmvnic_adapter *adapter) 605 { 606 struct ibmvnic_tx_pool *tx_pool; 607 int i; 608 609 if (!adapter->tx_pool) 610 return; 611 612 for (i = 0; i < adapter->num_active_tx_pools; i++) { 613 netdev_dbg(adapter->netdev, "Releasing tx_pool[%d]\n", i); 614 tx_pool = &adapter->tx_pool[i]; 615 kfree(tx_pool->tx_buff); 616 free_long_term_buff(adapter, &tx_pool->long_term_buff); 617 free_long_term_buff(adapter, &tx_pool->tso_ltb); 618 kfree(tx_pool->free_map); 619 } 620 621 kfree(adapter->tx_pool); 622 adapter->tx_pool = NULL; 623 adapter->num_active_tx_pools = 0; 624 } 625 626 static int init_tx_pools(struct net_device *netdev) 627 { 628 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 629 struct device *dev = &adapter->vdev->dev; 630 struct ibmvnic_tx_pool *tx_pool; 631 int tx_subcrqs; 632 int i, j; 633 634 tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); 635 adapter->tx_pool = kcalloc(tx_subcrqs, 636 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL); 637 if (!adapter->tx_pool) 638 return -1; 639 640 adapter->num_active_tx_pools = 0; 641 642 for (i = 0; i < tx_subcrqs; i++) { 643 tx_pool = &adapter->tx_pool[i]; 644 645 netdev_dbg(adapter->netdev, 646 "Initializing tx_pool[%d], %lld buffs\n", 647 i, adapter->req_tx_entries_per_subcrq); 648 649 tx_pool->tx_buff = kcalloc(adapter->req_tx_entries_per_subcrq, 650 sizeof(struct ibmvnic_tx_buff), 651 GFP_KERNEL); 652 if (!tx_pool->tx_buff) { 653 dev_err(dev, "tx pool buffer allocation failed\n"); 654 release_tx_pools(adapter); 655 return -1; 656 } 657 658 if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff, 659 adapter->req_tx_entries_per_subcrq * 660 adapter->req_mtu)) { 661 release_tx_pools(adapter); 662 return -1; 663 } 664 665 /* alloc TSO ltb */ 666 if (alloc_long_term_buff(adapter, &tx_pool->tso_ltb, 667 IBMVNIC_TSO_BUFS * 668 IBMVNIC_TSO_BUF_SZ)) { 669 release_tx_pools(adapter); 670 return -1; 671 } 672 673 tx_pool->tso_index = 0; 674 675 tx_pool->free_map = kcalloc(adapter->req_tx_entries_per_subcrq, 676 sizeof(int), GFP_KERNEL); 677 if (!tx_pool->free_map) { 678 release_tx_pools(adapter); 679 return -1; 680 } 681 682 for (j = 0; j < adapter->req_tx_entries_per_subcrq; j++) 683 tx_pool->free_map[j] = j; 684 685 tx_pool->consumer_index = 0; 686 tx_pool->producer_index = 0; 687 } 688 689 adapter->num_active_tx_pools = tx_subcrqs; 690 691 return 0; 692 } 693 694 static void release_error_buffers(struct ibmvnic_adapter *adapter) 695 { 696 struct device *dev = &adapter->vdev->dev; 697 struct ibmvnic_error_buff *error_buff, *tmp; 698 unsigned long flags; 699 700 spin_lock_irqsave(&adapter->error_list_lock, flags); 701 list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list) { 702 list_del(&error_buff->list); 703 dma_unmap_single(dev, error_buff->dma, error_buff->len, 704 DMA_FROM_DEVICE); 705 kfree(error_buff->buff); 706 kfree(error_buff); 707 } 708 spin_unlock_irqrestore(&adapter->error_list_lock, flags); 709 } 710 711 static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter) 712 { 713 int i; 714 715 if (adapter->napi_enabled) 716 return; 717 718 for (i = 0; i < adapter->req_rx_queues; i++) 719 napi_enable(&adapter->napi[i]); 720 721 adapter->napi_enabled = true; 722 } 723 724 static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter) 725 { 726 int i; 727 728 if (!adapter->napi_enabled) 729 return; 730 731 for (i = 0; i < adapter->req_rx_queues; i++) { 732 netdev_dbg(adapter->netdev, "Disabling napi[%d]\n", i); 733 napi_disable(&adapter->napi[i]); 734 } 735 736 adapter->napi_enabled = false; 737 } 738 739 static int ibmvnic_login(struct net_device *netdev) 740 { 741 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 742 unsigned long timeout = msecs_to_jiffies(30000); 743 struct device *dev = &adapter->vdev->dev; 744 int rc; 745 746 do { 747 if (adapter->renegotiate) { 748 adapter->renegotiate = false; 749 release_sub_crqs(adapter); 750 751 reinit_completion(&adapter->init_done); 752 send_cap_queries(adapter); 753 if (!wait_for_completion_timeout(&adapter->init_done, 754 timeout)) { 755 dev_err(dev, "Capabilities query timeout\n"); 756 return -1; 757 } 758 rc = init_sub_crqs(adapter); 759 if (rc) { 760 dev_err(dev, 761 "Initialization of SCRQ's failed\n"); 762 return -1; 763 } 764 rc = init_sub_crq_irqs(adapter); 765 if (rc) { 766 dev_err(dev, 767 "Initialization of SCRQ's irqs failed\n"); 768 return -1; 769 } 770 } 771 772 reinit_completion(&adapter->init_done); 773 send_login(adapter); 774 if (!wait_for_completion_timeout(&adapter->init_done, 775 timeout)) { 776 dev_err(dev, "Login timeout\n"); 777 return -1; 778 } 779 } while (adapter->renegotiate); 780 781 /* handle pending MAC address changes after successful login */ 782 if (adapter->mac_change_pending) { 783 __ibmvnic_set_mac(netdev, &adapter->desired.mac); 784 adapter->mac_change_pending = false; 785 } 786 787 return 0; 788 } 789 790 static void release_resources(struct ibmvnic_adapter *adapter) 791 { 792 int i; 793 794 release_vpd_data(adapter); 795 796 release_tx_pools(adapter); 797 release_rx_pools(adapter); 798 799 release_stats_token(adapter); 800 release_stats_buffers(adapter); 801 release_error_buffers(adapter); 802 803 if (adapter->napi) { 804 for (i = 0; i < adapter->req_rx_queues; i++) { 805 if (&adapter->napi[i]) { 806 netdev_dbg(adapter->netdev, 807 "Releasing napi[%d]\n", i); 808 netif_napi_del(&adapter->napi[i]); 809 } 810 } 811 } 812 } 813 814 static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state) 815 { 816 struct net_device *netdev = adapter->netdev; 817 unsigned long timeout = msecs_to_jiffies(30000); 818 union ibmvnic_crq crq; 819 bool resend; 820 int rc; 821 822 netdev_dbg(netdev, "setting link state %d\n", link_state); 823 824 memset(&crq, 0, sizeof(crq)); 825 crq.logical_link_state.first = IBMVNIC_CRQ_CMD; 826 crq.logical_link_state.cmd = LOGICAL_LINK_STATE; 827 crq.logical_link_state.link_state = link_state; 828 829 do { 830 resend = false; 831 832 reinit_completion(&adapter->init_done); 833 rc = ibmvnic_send_crq(adapter, &crq); 834 if (rc) { 835 netdev_err(netdev, "Failed to set link state\n"); 836 return rc; 837 } 838 839 if (!wait_for_completion_timeout(&adapter->init_done, 840 timeout)) { 841 netdev_err(netdev, "timeout setting link state\n"); 842 return -1; 843 } 844 845 if (adapter->init_done_rc == 1) { 846 /* Partuial success, delay and re-send */ 847 mdelay(1000); 848 resend = true; 849 } 850 } while (resend); 851 852 return 0; 853 } 854 855 static int set_real_num_queues(struct net_device *netdev) 856 { 857 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 858 int rc; 859 860 netdev_dbg(netdev, "Setting real tx/rx queues (%llx/%llx)\n", 861 adapter->req_tx_queues, adapter->req_rx_queues); 862 863 rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues); 864 if (rc) { 865 netdev_err(netdev, "failed to set the number of tx queues\n"); 866 return rc; 867 } 868 869 rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues); 870 if (rc) 871 netdev_err(netdev, "failed to set the number of rx queues\n"); 872 873 return rc; 874 } 875 876 static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter) 877 { 878 struct device *dev = &adapter->vdev->dev; 879 union ibmvnic_crq crq; 880 int len = 0; 881 882 if (adapter->vpd->buff) 883 len = adapter->vpd->len; 884 885 init_completion(&adapter->fw_done); 886 crq.get_vpd_size.first = IBMVNIC_CRQ_CMD; 887 crq.get_vpd_size.cmd = GET_VPD_SIZE; 888 ibmvnic_send_crq(adapter, &crq); 889 wait_for_completion(&adapter->fw_done); 890 891 if (!adapter->vpd->len) 892 return -ENODATA; 893 894 if (!adapter->vpd->buff) 895 adapter->vpd->buff = kzalloc(adapter->vpd->len, GFP_KERNEL); 896 else if (adapter->vpd->len != len) 897 adapter->vpd->buff = 898 krealloc(adapter->vpd->buff, 899 adapter->vpd->len, GFP_KERNEL); 900 901 if (!adapter->vpd->buff) { 902 dev_err(dev, "Could allocate VPD buffer\n"); 903 return -ENOMEM; 904 } 905 906 adapter->vpd->dma_addr = 907 dma_map_single(dev, adapter->vpd->buff, adapter->vpd->len, 908 DMA_FROM_DEVICE); 909 if (dma_mapping_error(dev, adapter->vpd->dma_addr)) { 910 dev_err(dev, "Could not map VPD buffer\n"); 911 kfree(adapter->vpd->buff); 912 return -ENOMEM; 913 } 914 915 reinit_completion(&adapter->fw_done); 916 crq.get_vpd.first = IBMVNIC_CRQ_CMD; 917 crq.get_vpd.cmd = GET_VPD; 918 crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr); 919 crq.get_vpd.len = cpu_to_be32((u32)adapter->vpd->len); 920 ibmvnic_send_crq(adapter, &crq); 921 wait_for_completion(&adapter->fw_done); 922 923 return 0; 924 } 925 926 static int init_resources(struct ibmvnic_adapter *adapter) 927 { 928 struct net_device *netdev = adapter->netdev; 929 int i, rc; 930 931 rc = set_real_num_queues(netdev); 932 if (rc) 933 return rc; 934 935 rc = init_stats_buffers(adapter); 936 if (rc) 937 return rc; 938 939 rc = init_stats_token(adapter); 940 if (rc) 941 return rc; 942 943 adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL); 944 if (!adapter->vpd) 945 return -ENOMEM; 946 947 /* Vital Product Data (VPD) */ 948 rc = ibmvnic_get_vpd(adapter); 949 if (rc) { 950 netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n"); 951 return rc; 952 } 953 954 adapter->map_id = 1; 955 adapter->napi = kcalloc(adapter->req_rx_queues, 956 sizeof(struct napi_struct), GFP_KERNEL); 957 if (!adapter->napi) 958 return -ENOMEM; 959 960 for (i = 0; i < adapter->req_rx_queues; i++) { 961 netdev_dbg(netdev, "Adding napi[%d]\n", i); 962 netif_napi_add(netdev, &adapter->napi[i], ibmvnic_poll, 963 NAPI_POLL_WEIGHT); 964 } 965 966 send_map_query(adapter); 967 968 rc = init_rx_pools(netdev); 969 if (rc) 970 return rc; 971 972 rc = init_tx_pools(netdev); 973 return rc; 974 } 975 976 static int __ibmvnic_open(struct net_device *netdev) 977 { 978 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 979 enum vnic_state prev_state = adapter->state; 980 int i, rc; 981 982 adapter->state = VNIC_OPENING; 983 replenish_pools(adapter); 984 ibmvnic_napi_enable(adapter); 985 986 /* We're ready to receive frames, enable the sub-crq interrupts and 987 * set the logical link state to up 988 */ 989 for (i = 0; i < adapter->req_rx_queues; i++) { 990 netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i); 991 if (prev_state == VNIC_CLOSED) 992 enable_irq(adapter->rx_scrq[i]->irq); 993 else 994 enable_scrq_irq(adapter, adapter->rx_scrq[i]); 995 } 996 997 for (i = 0; i < adapter->req_tx_queues; i++) { 998 netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i); 999 if (prev_state == VNIC_CLOSED) 1000 enable_irq(adapter->tx_scrq[i]->irq); 1001 else 1002 enable_scrq_irq(adapter, adapter->tx_scrq[i]); 1003 } 1004 1005 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP); 1006 if (rc) { 1007 for (i = 0; i < adapter->req_rx_queues; i++) 1008 napi_disable(&adapter->napi[i]); 1009 release_resources(adapter); 1010 return rc; 1011 } 1012 1013 netif_tx_start_all_queues(netdev); 1014 1015 if (prev_state == VNIC_CLOSED) { 1016 for (i = 0; i < adapter->req_rx_queues; i++) 1017 napi_schedule(&adapter->napi[i]); 1018 } 1019 1020 adapter->state = VNIC_OPEN; 1021 return rc; 1022 } 1023 1024 static int ibmvnic_open(struct net_device *netdev) 1025 { 1026 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1027 int rc; 1028 1029 mutex_lock(&adapter->reset_lock); 1030 1031 if (adapter->state != VNIC_CLOSED) { 1032 rc = ibmvnic_login(netdev); 1033 if (rc) { 1034 mutex_unlock(&adapter->reset_lock); 1035 return rc; 1036 } 1037 1038 rc = init_resources(adapter); 1039 if (rc) { 1040 netdev_err(netdev, "failed to initialize resources\n"); 1041 release_resources(adapter); 1042 mutex_unlock(&adapter->reset_lock); 1043 return rc; 1044 } 1045 } 1046 1047 rc = __ibmvnic_open(netdev); 1048 netif_carrier_on(netdev); 1049 1050 mutex_unlock(&adapter->reset_lock); 1051 1052 return rc; 1053 } 1054 1055 static void clean_tx_pools(struct ibmvnic_adapter *adapter) 1056 { 1057 struct ibmvnic_tx_pool *tx_pool; 1058 u64 tx_entries; 1059 int tx_scrqs; 1060 int i, j; 1061 1062 if (!adapter->tx_pool) 1063 return; 1064 1065 tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); 1066 tx_entries = adapter->req_tx_entries_per_subcrq; 1067 1068 /* Free any remaining skbs in the tx buffer pools */ 1069 for (i = 0; i < tx_scrqs; i++) { 1070 tx_pool = &adapter->tx_pool[i]; 1071 if (!tx_pool) 1072 continue; 1073 1074 netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i); 1075 for (j = 0; j < tx_entries; j++) { 1076 if (tx_pool->tx_buff[j].skb) { 1077 dev_kfree_skb_any(tx_pool->tx_buff[j].skb); 1078 tx_pool->tx_buff[j].skb = NULL; 1079 } 1080 } 1081 } 1082 } 1083 1084 static int __ibmvnic_close(struct net_device *netdev) 1085 { 1086 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1087 int rc = 0; 1088 int i; 1089 1090 adapter->state = VNIC_CLOSING; 1091 1092 /* ensure that transmissions are stopped if called by do_reset */ 1093 if (adapter->resetting) 1094 netif_tx_disable(netdev); 1095 else 1096 netif_tx_stop_all_queues(netdev); 1097 1098 ibmvnic_napi_disable(adapter); 1099 1100 if (adapter->tx_scrq) { 1101 for (i = 0; i < adapter->req_tx_queues; i++) 1102 if (adapter->tx_scrq[i]->irq) { 1103 netdev_dbg(adapter->netdev, 1104 "Disabling tx_scrq[%d] irq\n", i); 1105 disable_irq(adapter->tx_scrq[i]->irq); 1106 } 1107 } 1108 1109 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN); 1110 if (rc) 1111 return rc; 1112 1113 if (adapter->rx_scrq) { 1114 for (i = 0; i < adapter->req_rx_queues; i++) { 1115 int retries = 10; 1116 1117 while (pending_scrq(adapter, adapter->rx_scrq[i])) { 1118 retries--; 1119 mdelay(100); 1120 1121 if (retries == 0) 1122 break; 1123 } 1124 1125 if (adapter->rx_scrq[i]->irq) { 1126 netdev_dbg(adapter->netdev, 1127 "Disabling rx_scrq[%d] irq\n", i); 1128 disable_irq(adapter->rx_scrq[i]->irq); 1129 } 1130 } 1131 } 1132 1133 clean_tx_pools(adapter); 1134 adapter->state = VNIC_CLOSED; 1135 return rc; 1136 } 1137 1138 static int ibmvnic_close(struct net_device *netdev) 1139 { 1140 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1141 int rc; 1142 1143 mutex_lock(&adapter->reset_lock); 1144 rc = __ibmvnic_close(netdev); 1145 mutex_unlock(&adapter->reset_lock); 1146 1147 return rc; 1148 } 1149 1150 /** 1151 * build_hdr_data - creates L2/L3/L4 header data buffer 1152 * @hdr_field - bitfield determining needed headers 1153 * @skb - socket buffer 1154 * @hdr_len - array of header lengths 1155 * @tot_len - total length of data 1156 * 1157 * Reads hdr_field to determine which headers are needed by firmware. 1158 * Builds a buffer containing these headers. Saves individual header 1159 * lengths and total buffer length to be used to build descriptors. 1160 */ 1161 static int build_hdr_data(u8 hdr_field, struct sk_buff *skb, 1162 int *hdr_len, u8 *hdr_data) 1163 { 1164 int len = 0; 1165 u8 *hdr; 1166 1167 hdr_len[0] = sizeof(struct ethhdr); 1168 1169 if (skb->protocol == htons(ETH_P_IP)) { 1170 hdr_len[1] = ip_hdr(skb)->ihl * 4; 1171 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 1172 hdr_len[2] = tcp_hdrlen(skb); 1173 else if (ip_hdr(skb)->protocol == IPPROTO_UDP) 1174 hdr_len[2] = sizeof(struct udphdr); 1175 } else if (skb->protocol == htons(ETH_P_IPV6)) { 1176 hdr_len[1] = sizeof(struct ipv6hdr); 1177 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) 1178 hdr_len[2] = tcp_hdrlen(skb); 1179 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP) 1180 hdr_len[2] = sizeof(struct udphdr); 1181 } else if (skb->protocol == htons(ETH_P_ARP)) { 1182 hdr_len[1] = arp_hdr_len(skb->dev); 1183 hdr_len[2] = 0; 1184 } 1185 1186 memset(hdr_data, 0, 120); 1187 if ((hdr_field >> 6) & 1) { 1188 hdr = skb_mac_header(skb); 1189 memcpy(hdr_data, hdr, hdr_len[0]); 1190 len += hdr_len[0]; 1191 } 1192 1193 if ((hdr_field >> 5) & 1) { 1194 hdr = skb_network_header(skb); 1195 memcpy(hdr_data + len, hdr, hdr_len[1]); 1196 len += hdr_len[1]; 1197 } 1198 1199 if ((hdr_field >> 4) & 1) { 1200 hdr = skb_transport_header(skb); 1201 memcpy(hdr_data + len, hdr, hdr_len[2]); 1202 len += hdr_len[2]; 1203 } 1204 return len; 1205 } 1206 1207 /** 1208 * create_hdr_descs - create header and header extension descriptors 1209 * @hdr_field - bitfield determining needed headers 1210 * @data - buffer containing header data 1211 * @len - length of data buffer 1212 * @hdr_len - array of individual header lengths 1213 * @scrq_arr - descriptor array 1214 * 1215 * Creates header and, if needed, header extension descriptors and 1216 * places them in a descriptor array, scrq_arr 1217 */ 1218 1219 static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len, 1220 union sub_crq *scrq_arr) 1221 { 1222 union sub_crq hdr_desc; 1223 int tmp_len = len; 1224 int num_descs = 0; 1225 u8 *data, *cur; 1226 int tmp; 1227 1228 while (tmp_len > 0) { 1229 cur = hdr_data + len - tmp_len; 1230 1231 memset(&hdr_desc, 0, sizeof(hdr_desc)); 1232 if (cur != hdr_data) { 1233 data = hdr_desc.hdr_ext.data; 1234 tmp = tmp_len > 29 ? 29 : tmp_len; 1235 hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD; 1236 hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC; 1237 hdr_desc.hdr_ext.len = tmp; 1238 } else { 1239 data = hdr_desc.hdr.data; 1240 tmp = tmp_len > 24 ? 24 : tmp_len; 1241 hdr_desc.hdr.first = IBMVNIC_CRQ_CMD; 1242 hdr_desc.hdr.type = IBMVNIC_HDR_DESC; 1243 hdr_desc.hdr.len = tmp; 1244 hdr_desc.hdr.l2_len = (u8)hdr_len[0]; 1245 hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]); 1246 hdr_desc.hdr.l4_len = (u8)hdr_len[2]; 1247 hdr_desc.hdr.flag = hdr_field << 1; 1248 } 1249 memcpy(data, cur, tmp); 1250 tmp_len -= tmp; 1251 *scrq_arr = hdr_desc; 1252 scrq_arr++; 1253 num_descs++; 1254 } 1255 1256 return num_descs; 1257 } 1258 1259 /** 1260 * build_hdr_descs_arr - build a header descriptor array 1261 * @skb - socket buffer 1262 * @num_entries - number of descriptors to be sent 1263 * @subcrq - first TX descriptor 1264 * @hdr_field - bit field determining which headers will be sent 1265 * 1266 * This function will build a TX descriptor array with applicable 1267 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect. 1268 */ 1269 1270 static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff, 1271 int *num_entries, u8 hdr_field) 1272 { 1273 int hdr_len[3] = {0, 0, 0}; 1274 int tot_len; 1275 u8 *hdr_data = txbuff->hdr_data; 1276 1277 tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len, 1278 txbuff->hdr_data); 1279 *num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len, 1280 txbuff->indir_arr + 1); 1281 } 1282 1283 static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) 1284 { 1285 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1286 int queue_num = skb_get_queue_mapping(skb); 1287 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req; 1288 struct device *dev = &adapter->vdev->dev; 1289 struct ibmvnic_tx_buff *tx_buff = NULL; 1290 struct ibmvnic_sub_crq_queue *tx_scrq; 1291 struct ibmvnic_tx_pool *tx_pool; 1292 unsigned int tx_send_failed = 0; 1293 unsigned int tx_map_failed = 0; 1294 unsigned int tx_dropped = 0; 1295 unsigned int tx_packets = 0; 1296 unsigned int tx_bytes = 0; 1297 dma_addr_t data_dma_addr; 1298 struct netdev_queue *txq; 1299 unsigned long lpar_rc; 1300 union sub_crq tx_crq; 1301 unsigned int offset; 1302 int num_entries = 1; 1303 unsigned char *dst; 1304 u64 *handle_array; 1305 int index = 0; 1306 u8 proto = 0; 1307 int ret = 0; 1308 1309 if (adapter->resetting) { 1310 if (!netif_subqueue_stopped(netdev, skb)) 1311 netif_stop_subqueue(netdev, queue_num); 1312 dev_kfree_skb_any(skb); 1313 1314 tx_send_failed++; 1315 tx_dropped++; 1316 ret = NETDEV_TX_OK; 1317 goto out; 1318 } 1319 1320 tx_pool = &adapter->tx_pool[queue_num]; 1321 tx_scrq = adapter->tx_scrq[queue_num]; 1322 txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb)); 1323 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + 1324 be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs)); 1325 1326 index = tx_pool->free_map[tx_pool->consumer_index]; 1327 1328 if (skb_is_gso(skb)) { 1329 offset = tx_pool->tso_index * IBMVNIC_TSO_BUF_SZ; 1330 dst = tx_pool->tso_ltb.buff + offset; 1331 memset(dst, 0, IBMVNIC_TSO_BUF_SZ); 1332 data_dma_addr = tx_pool->tso_ltb.addr + offset; 1333 tx_pool->tso_index++; 1334 if (tx_pool->tso_index == IBMVNIC_TSO_BUFS) 1335 tx_pool->tso_index = 0; 1336 } else { 1337 offset = index * adapter->req_mtu; 1338 dst = tx_pool->long_term_buff.buff + offset; 1339 memset(dst, 0, adapter->req_mtu); 1340 data_dma_addr = tx_pool->long_term_buff.addr + offset; 1341 } 1342 1343 if (skb_shinfo(skb)->nr_frags) { 1344 int cur, i; 1345 1346 /* Copy the head */ 1347 skb_copy_from_linear_data(skb, dst, skb_headlen(skb)); 1348 cur = skb_headlen(skb); 1349 1350 /* Copy the frags */ 1351 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1352 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1353 1354 memcpy(dst + cur, 1355 page_address(skb_frag_page(frag)) + 1356 frag->page_offset, skb_frag_size(frag)); 1357 cur += skb_frag_size(frag); 1358 } 1359 } else { 1360 skb_copy_from_linear_data(skb, dst, skb->len); 1361 } 1362 1363 tx_pool->consumer_index = 1364 (tx_pool->consumer_index + 1) % 1365 adapter->req_tx_entries_per_subcrq; 1366 1367 tx_buff = &tx_pool->tx_buff[index]; 1368 tx_buff->skb = skb; 1369 tx_buff->data_dma[0] = data_dma_addr; 1370 tx_buff->data_len[0] = skb->len; 1371 tx_buff->index = index; 1372 tx_buff->pool_index = queue_num; 1373 tx_buff->last_frag = true; 1374 1375 memset(&tx_crq, 0, sizeof(tx_crq)); 1376 tx_crq.v1.first = IBMVNIC_CRQ_CMD; 1377 tx_crq.v1.type = IBMVNIC_TX_DESC; 1378 tx_crq.v1.n_crq_elem = 1; 1379 tx_crq.v1.n_sge = 1; 1380 tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED; 1381 tx_crq.v1.correlator = cpu_to_be32(index); 1382 if (skb_is_gso(skb)) 1383 tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->tso_ltb.map_id); 1384 else 1385 tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id); 1386 tx_crq.v1.sge_len = cpu_to_be32(skb->len); 1387 tx_crq.v1.ioba = cpu_to_be64(data_dma_addr); 1388 1389 if (adapter->vlan_header_insertion) { 1390 tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT; 1391 tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci); 1392 } 1393 1394 if (skb->protocol == htons(ETH_P_IP)) { 1395 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4; 1396 proto = ip_hdr(skb)->protocol; 1397 } else if (skb->protocol == htons(ETH_P_IPV6)) { 1398 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6; 1399 proto = ipv6_hdr(skb)->nexthdr; 1400 } 1401 1402 if (proto == IPPROTO_TCP) 1403 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP; 1404 else if (proto == IPPROTO_UDP) 1405 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP; 1406 1407 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1408 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD; 1409 hdrs += 2; 1410 } 1411 if (skb_is_gso(skb)) { 1412 tx_crq.v1.flags1 |= IBMVNIC_TX_LSO; 1413 tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size); 1414 hdrs += 2; 1415 } 1416 /* determine if l2/3/4 headers are sent to firmware */ 1417 if ((*hdrs >> 7) & 1 && 1418 (skb->protocol == htons(ETH_P_IP) || 1419 skb->protocol == htons(ETH_P_IPV6) || 1420 skb->protocol == htons(ETH_P_ARP))) { 1421 build_hdr_descs_arr(tx_buff, &num_entries, *hdrs); 1422 tx_crq.v1.n_crq_elem = num_entries; 1423 tx_buff->indir_arr[0] = tx_crq; 1424 tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr, 1425 sizeof(tx_buff->indir_arr), 1426 DMA_TO_DEVICE); 1427 if (dma_mapping_error(dev, tx_buff->indir_dma)) { 1428 dev_kfree_skb_any(skb); 1429 tx_buff->skb = NULL; 1430 if (!firmware_has_feature(FW_FEATURE_CMO)) 1431 dev_err(dev, "tx: unable to map descriptor array\n"); 1432 tx_map_failed++; 1433 tx_dropped++; 1434 ret = NETDEV_TX_OK; 1435 goto out; 1436 } 1437 lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num], 1438 (u64)tx_buff->indir_dma, 1439 (u64)num_entries); 1440 } else { 1441 lpar_rc = send_subcrq(adapter, handle_array[queue_num], 1442 &tx_crq); 1443 } 1444 if (lpar_rc != H_SUCCESS) { 1445 dev_err(dev, "tx failed with code %ld\n", lpar_rc); 1446 1447 if (tx_pool->consumer_index == 0) 1448 tx_pool->consumer_index = 1449 adapter->req_tx_entries_per_subcrq - 1; 1450 else 1451 tx_pool->consumer_index--; 1452 1453 dev_kfree_skb_any(skb); 1454 tx_buff->skb = NULL; 1455 1456 if (lpar_rc == H_CLOSED) { 1457 /* Disable TX and report carrier off if queue is closed. 1458 * Firmware guarantees that a signal will be sent to the 1459 * driver, triggering a reset or some other action. 1460 */ 1461 netif_tx_stop_all_queues(netdev); 1462 netif_carrier_off(netdev); 1463 } 1464 1465 tx_send_failed++; 1466 tx_dropped++; 1467 ret = NETDEV_TX_OK; 1468 goto out; 1469 } 1470 1471 if (atomic_inc_return(&tx_scrq->used) 1472 >= adapter->req_tx_entries_per_subcrq) { 1473 netdev_info(netdev, "Stopping queue %d\n", queue_num); 1474 netif_stop_subqueue(netdev, queue_num); 1475 } 1476 1477 tx_packets++; 1478 tx_bytes += skb->len; 1479 txq->trans_start = jiffies; 1480 ret = NETDEV_TX_OK; 1481 1482 out: 1483 netdev->stats.tx_dropped += tx_dropped; 1484 netdev->stats.tx_bytes += tx_bytes; 1485 netdev->stats.tx_packets += tx_packets; 1486 adapter->tx_send_failed += tx_send_failed; 1487 adapter->tx_map_failed += tx_map_failed; 1488 adapter->tx_stats_buffers[queue_num].packets += tx_packets; 1489 adapter->tx_stats_buffers[queue_num].bytes += tx_bytes; 1490 adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped; 1491 1492 return ret; 1493 } 1494 1495 static void ibmvnic_set_multi(struct net_device *netdev) 1496 { 1497 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1498 struct netdev_hw_addr *ha; 1499 union ibmvnic_crq crq; 1500 1501 memset(&crq, 0, sizeof(crq)); 1502 crq.request_capability.first = IBMVNIC_CRQ_CMD; 1503 crq.request_capability.cmd = REQUEST_CAPABILITY; 1504 1505 if (netdev->flags & IFF_PROMISC) { 1506 if (!adapter->promisc_supported) 1507 return; 1508 } else { 1509 if (netdev->flags & IFF_ALLMULTI) { 1510 /* Accept all multicast */ 1511 memset(&crq, 0, sizeof(crq)); 1512 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD; 1513 crq.multicast_ctrl.cmd = MULTICAST_CTRL; 1514 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL; 1515 ibmvnic_send_crq(adapter, &crq); 1516 } else if (netdev_mc_empty(netdev)) { 1517 /* Reject all multicast */ 1518 memset(&crq, 0, sizeof(crq)); 1519 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD; 1520 crq.multicast_ctrl.cmd = MULTICAST_CTRL; 1521 crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL; 1522 ibmvnic_send_crq(adapter, &crq); 1523 } else { 1524 /* Accept one or more multicast(s) */ 1525 netdev_for_each_mc_addr(ha, netdev) { 1526 memset(&crq, 0, sizeof(crq)); 1527 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD; 1528 crq.multicast_ctrl.cmd = MULTICAST_CTRL; 1529 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC; 1530 ether_addr_copy(&crq.multicast_ctrl.mac_addr[0], 1531 ha->addr); 1532 ibmvnic_send_crq(adapter, &crq); 1533 } 1534 } 1535 } 1536 } 1537 1538 static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p) 1539 { 1540 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1541 struct sockaddr *addr = p; 1542 union ibmvnic_crq crq; 1543 1544 if (!is_valid_ether_addr(addr->sa_data)) 1545 return -EADDRNOTAVAIL; 1546 1547 memset(&crq, 0, sizeof(crq)); 1548 crq.change_mac_addr.first = IBMVNIC_CRQ_CMD; 1549 crq.change_mac_addr.cmd = CHANGE_MAC_ADDR; 1550 ether_addr_copy(&crq.change_mac_addr.mac_addr[0], addr->sa_data); 1551 1552 init_completion(&adapter->fw_done); 1553 ibmvnic_send_crq(adapter, &crq); 1554 wait_for_completion(&adapter->fw_done); 1555 /* netdev->dev_addr is changed in handle_change_mac_rsp function */ 1556 return adapter->fw_done_rc ? -EIO : 0; 1557 } 1558 1559 static int ibmvnic_set_mac(struct net_device *netdev, void *p) 1560 { 1561 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1562 struct sockaddr *addr = p; 1563 int rc; 1564 1565 if (adapter->state == VNIC_PROBED) { 1566 memcpy(&adapter->desired.mac, addr, sizeof(struct sockaddr)); 1567 adapter->mac_change_pending = true; 1568 return 0; 1569 } 1570 1571 rc = __ibmvnic_set_mac(netdev, addr); 1572 1573 return rc; 1574 } 1575 1576 /** 1577 * do_reset returns zero if we are able to keep processing reset events, or 1578 * non-zero if we hit a fatal error and must halt. 1579 */ 1580 static int do_reset(struct ibmvnic_adapter *adapter, 1581 struct ibmvnic_rwi *rwi, u32 reset_state) 1582 { 1583 u64 old_num_rx_queues, old_num_tx_queues; 1584 struct net_device *netdev = adapter->netdev; 1585 int i, rc; 1586 1587 netdev_dbg(adapter->netdev, "Re-setting driver (%d)\n", 1588 rwi->reset_reason); 1589 1590 netif_carrier_off(netdev); 1591 adapter->reset_reason = rwi->reset_reason; 1592 1593 old_num_rx_queues = adapter->req_rx_queues; 1594 old_num_tx_queues = adapter->req_tx_queues; 1595 1596 if (rwi->reset_reason == VNIC_RESET_MOBILITY) { 1597 rc = ibmvnic_reenable_crq_queue(adapter); 1598 if (rc) 1599 return 0; 1600 } 1601 1602 rc = __ibmvnic_close(netdev); 1603 if (rc) 1604 return rc; 1605 1606 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM || 1607 adapter->wait_for_reset) { 1608 release_resources(adapter); 1609 release_sub_crqs(adapter); 1610 release_crq_queue(adapter); 1611 } 1612 1613 if (adapter->reset_reason != VNIC_RESET_NON_FATAL) { 1614 /* remove the closed state so when we call open it appears 1615 * we are coming from the probed state. 1616 */ 1617 adapter->state = VNIC_PROBED; 1618 1619 rc = ibmvnic_init(adapter); 1620 if (rc) 1621 return IBMVNIC_INIT_FAILED; 1622 1623 /* If the adapter was in PROBE state prior to the reset, 1624 * exit here. 1625 */ 1626 if (reset_state == VNIC_PROBED) 1627 return 0; 1628 1629 rc = ibmvnic_login(netdev); 1630 if (rc) { 1631 adapter->state = VNIC_PROBED; 1632 return 0; 1633 } 1634 1635 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM || 1636 adapter->wait_for_reset) { 1637 rc = init_resources(adapter); 1638 if (rc) 1639 return rc; 1640 } else if (adapter->req_rx_queues != old_num_rx_queues || 1641 adapter->req_tx_queues != old_num_tx_queues) { 1642 release_rx_pools(adapter); 1643 release_tx_pools(adapter); 1644 init_rx_pools(netdev); 1645 init_tx_pools(netdev); 1646 } else { 1647 rc = reset_tx_pools(adapter); 1648 if (rc) 1649 return rc; 1650 1651 rc = reset_rx_pools(adapter); 1652 if (rc) 1653 return rc; 1654 1655 if (reset_state == VNIC_CLOSED) 1656 return 0; 1657 } 1658 } 1659 1660 rc = __ibmvnic_open(netdev); 1661 if (rc) { 1662 if (list_empty(&adapter->rwi_list)) 1663 adapter->state = VNIC_CLOSED; 1664 else 1665 adapter->state = reset_state; 1666 1667 return 0; 1668 } 1669 1670 netif_carrier_on(netdev); 1671 1672 /* kick napi */ 1673 for (i = 0; i < adapter->req_rx_queues; i++) 1674 napi_schedule(&adapter->napi[i]); 1675 1676 if (adapter->reset_reason != VNIC_RESET_FAILOVER) 1677 netdev_notify_peers(netdev); 1678 1679 return 0; 1680 } 1681 1682 static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter) 1683 { 1684 struct ibmvnic_rwi *rwi; 1685 1686 mutex_lock(&adapter->rwi_lock); 1687 1688 if (!list_empty(&adapter->rwi_list)) { 1689 rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi, 1690 list); 1691 list_del(&rwi->list); 1692 } else { 1693 rwi = NULL; 1694 } 1695 1696 mutex_unlock(&adapter->rwi_lock); 1697 return rwi; 1698 } 1699 1700 static void free_all_rwi(struct ibmvnic_adapter *adapter) 1701 { 1702 struct ibmvnic_rwi *rwi; 1703 1704 rwi = get_next_rwi(adapter); 1705 while (rwi) { 1706 kfree(rwi); 1707 rwi = get_next_rwi(adapter); 1708 } 1709 } 1710 1711 static void __ibmvnic_reset(struct work_struct *work) 1712 { 1713 struct ibmvnic_rwi *rwi; 1714 struct ibmvnic_adapter *adapter; 1715 struct net_device *netdev; 1716 u32 reset_state; 1717 int rc = 0; 1718 1719 adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset); 1720 netdev = adapter->netdev; 1721 1722 mutex_lock(&adapter->reset_lock); 1723 adapter->resetting = true; 1724 reset_state = adapter->state; 1725 1726 rwi = get_next_rwi(adapter); 1727 while (rwi) { 1728 rc = do_reset(adapter, rwi, reset_state); 1729 kfree(rwi); 1730 if (rc && rc != IBMVNIC_INIT_FAILED) 1731 break; 1732 1733 rwi = get_next_rwi(adapter); 1734 } 1735 1736 if (adapter->wait_for_reset) { 1737 adapter->wait_for_reset = false; 1738 adapter->reset_done_rc = rc; 1739 complete(&adapter->reset_done); 1740 } 1741 1742 if (rc) { 1743 netdev_dbg(adapter->netdev, "Reset failed\n"); 1744 free_all_rwi(adapter); 1745 mutex_unlock(&adapter->reset_lock); 1746 return; 1747 } 1748 1749 adapter->resetting = false; 1750 mutex_unlock(&adapter->reset_lock); 1751 } 1752 1753 static void ibmvnic_reset(struct ibmvnic_adapter *adapter, 1754 enum ibmvnic_reset_reason reason) 1755 { 1756 struct ibmvnic_rwi *rwi, *tmp; 1757 struct net_device *netdev = adapter->netdev; 1758 struct list_head *entry; 1759 1760 if (adapter->state == VNIC_REMOVING || 1761 adapter->state == VNIC_REMOVED) { 1762 netdev_dbg(netdev, "Adapter removing, skipping reset\n"); 1763 return; 1764 } 1765 1766 if (adapter->state == VNIC_PROBING) { 1767 netdev_warn(netdev, "Adapter reset during probe\n"); 1768 adapter->init_done_rc = EAGAIN; 1769 return; 1770 } 1771 1772 mutex_lock(&adapter->rwi_lock); 1773 1774 list_for_each(entry, &adapter->rwi_list) { 1775 tmp = list_entry(entry, struct ibmvnic_rwi, list); 1776 if (tmp->reset_reason == reason) { 1777 netdev_dbg(netdev, "Skipping matching reset\n"); 1778 mutex_unlock(&adapter->rwi_lock); 1779 return; 1780 } 1781 } 1782 1783 rwi = kzalloc(sizeof(*rwi), GFP_KERNEL); 1784 if (!rwi) { 1785 mutex_unlock(&adapter->rwi_lock); 1786 ibmvnic_close(netdev); 1787 return; 1788 } 1789 1790 rwi->reset_reason = reason; 1791 list_add_tail(&rwi->list, &adapter->rwi_list); 1792 mutex_unlock(&adapter->rwi_lock); 1793 1794 netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason); 1795 schedule_work(&adapter->ibmvnic_reset); 1796 } 1797 1798 static void ibmvnic_tx_timeout(struct net_device *dev) 1799 { 1800 struct ibmvnic_adapter *adapter = netdev_priv(dev); 1801 1802 ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT); 1803 } 1804 1805 static void remove_buff_from_pool(struct ibmvnic_adapter *adapter, 1806 struct ibmvnic_rx_buff *rx_buff) 1807 { 1808 struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index]; 1809 1810 rx_buff->skb = NULL; 1811 1812 pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff); 1813 pool->next_alloc = (pool->next_alloc + 1) % pool->size; 1814 1815 atomic_dec(&pool->available); 1816 } 1817 1818 static int ibmvnic_poll(struct napi_struct *napi, int budget) 1819 { 1820 struct net_device *netdev = napi->dev; 1821 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1822 int scrq_num = (int)(napi - adapter->napi); 1823 int frames_processed = 0; 1824 1825 restart_poll: 1826 while (frames_processed < budget) { 1827 struct sk_buff *skb; 1828 struct ibmvnic_rx_buff *rx_buff; 1829 union sub_crq *next; 1830 u32 length; 1831 u16 offset; 1832 u8 flags = 0; 1833 1834 if (unlikely(adapter->resetting)) { 1835 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]); 1836 napi_complete_done(napi, frames_processed); 1837 return frames_processed; 1838 } 1839 1840 if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num])) 1841 break; 1842 next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]); 1843 rx_buff = 1844 (struct ibmvnic_rx_buff *)be64_to_cpu(next-> 1845 rx_comp.correlator); 1846 /* do error checking */ 1847 if (next->rx_comp.rc) { 1848 netdev_dbg(netdev, "rx buffer returned with rc %x\n", 1849 be16_to_cpu(next->rx_comp.rc)); 1850 /* free the entry */ 1851 next->rx_comp.first = 0; 1852 remove_buff_from_pool(adapter, rx_buff); 1853 continue; 1854 } 1855 1856 length = be32_to_cpu(next->rx_comp.len); 1857 offset = be16_to_cpu(next->rx_comp.off_frame_data); 1858 flags = next->rx_comp.flags; 1859 skb = rx_buff->skb; 1860 skb_copy_to_linear_data(skb, rx_buff->data + offset, 1861 length); 1862 1863 /* VLAN Header has been stripped by the system firmware and 1864 * needs to be inserted by the driver 1865 */ 1866 if (adapter->rx_vlan_header_insertion && 1867 (flags & IBMVNIC_VLAN_STRIPPED)) 1868 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), 1869 ntohs(next->rx_comp.vlan_tci)); 1870 1871 /* free the entry */ 1872 next->rx_comp.first = 0; 1873 remove_buff_from_pool(adapter, rx_buff); 1874 1875 skb_put(skb, length); 1876 skb->protocol = eth_type_trans(skb, netdev); 1877 skb_record_rx_queue(skb, scrq_num); 1878 1879 if (flags & IBMVNIC_IP_CHKSUM_GOOD && 1880 flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) { 1881 skb->ip_summed = CHECKSUM_UNNECESSARY; 1882 } 1883 1884 length = skb->len; 1885 napi_gro_receive(napi, skb); /* send it up */ 1886 netdev->stats.rx_packets++; 1887 netdev->stats.rx_bytes += length; 1888 adapter->rx_stats_buffers[scrq_num].packets++; 1889 adapter->rx_stats_buffers[scrq_num].bytes += length; 1890 frames_processed++; 1891 } 1892 1893 if (adapter->state != VNIC_CLOSING) 1894 replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]); 1895 1896 if (frames_processed < budget) { 1897 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]); 1898 napi_complete_done(napi, frames_processed); 1899 if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) && 1900 napi_reschedule(napi)) { 1901 disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]); 1902 goto restart_poll; 1903 } 1904 } 1905 return frames_processed; 1906 } 1907 1908 #ifdef CONFIG_NET_POLL_CONTROLLER 1909 static void ibmvnic_netpoll_controller(struct net_device *dev) 1910 { 1911 struct ibmvnic_adapter *adapter = netdev_priv(dev); 1912 int i; 1913 1914 replenish_pools(netdev_priv(dev)); 1915 for (i = 0; i < adapter->req_rx_queues; i++) 1916 ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq, 1917 adapter->rx_scrq[i]); 1918 } 1919 #endif 1920 1921 static int wait_for_reset(struct ibmvnic_adapter *adapter) 1922 { 1923 adapter->fallback.mtu = adapter->req_mtu; 1924 adapter->fallback.rx_queues = adapter->req_rx_queues; 1925 adapter->fallback.tx_queues = adapter->req_tx_queues; 1926 adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq; 1927 adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq; 1928 1929 init_completion(&adapter->reset_done); 1930 ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM); 1931 adapter->wait_for_reset = true; 1932 wait_for_completion(&adapter->reset_done); 1933 1934 if (adapter->reset_done_rc) { 1935 adapter->desired.mtu = adapter->fallback.mtu; 1936 adapter->desired.rx_queues = adapter->fallback.rx_queues; 1937 adapter->desired.tx_queues = adapter->fallback.tx_queues; 1938 adapter->desired.rx_entries = adapter->fallback.rx_entries; 1939 adapter->desired.tx_entries = adapter->fallback.tx_entries; 1940 1941 init_completion(&adapter->reset_done); 1942 ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM); 1943 wait_for_completion(&adapter->reset_done); 1944 } 1945 adapter->wait_for_reset = false; 1946 1947 return adapter->reset_done_rc; 1948 } 1949 1950 static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu) 1951 { 1952 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1953 1954 adapter->desired.mtu = new_mtu + ETH_HLEN; 1955 1956 return wait_for_reset(adapter); 1957 } 1958 1959 static const struct net_device_ops ibmvnic_netdev_ops = { 1960 .ndo_open = ibmvnic_open, 1961 .ndo_stop = ibmvnic_close, 1962 .ndo_start_xmit = ibmvnic_xmit, 1963 .ndo_set_rx_mode = ibmvnic_set_multi, 1964 .ndo_set_mac_address = ibmvnic_set_mac, 1965 .ndo_validate_addr = eth_validate_addr, 1966 .ndo_tx_timeout = ibmvnic_tx_timeout, 1967 #ifdef CONFIG_NET_POLL_CONTROLLER 1968 .ndo_poll_controller = ibmvnic_netpoll_controller, 1969 #endif 1970 .ndo_change_mtu = ibmvnic_change_mtu, 1971 }; 1972 1973 /* ethtool functions */ 1974 1975 static int ibmvnic_get_link_ksettings(struct net_device *netdev, 1976 struct ethtool_link_ksettings *cmd) 1977 { 1978 u32 supported, advertising; 1979 1980 supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | 1981 SUPPORTED_FIBRE); 1982 advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg | 1983 ADVERTISED_FIBRE); 1984 cmd->base.speed = SPEED_1000; 1985 cmd->base.duplex = DUPLEX_FULL; 1986 cmd->base.port = PORT_FIBRE; 1987 cmd->base.phy_address = 0; 1988 cmd->base.autoneg = AUTONEG_ENABLE; 1989 1990 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, 1991 supported); 1992 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, 1993 advertising); 1994 1995 return 0; 1996 } 1997 1998 static void ibmvnic_get_drvinfo(struct net_device *netdev, 1999 struct ethtool_drvinfo *info) 2000 { 2001 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2002 2003 strlcpy(info->driver, ibmvnic_driver_name, sizeof(info->driver)); 2004 strlcpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version)); 2005 strlcpy(info->fw_version, adapter->fw_version, 2006 sizeof(info->fw_version)); 2007 } 2008 2009 static u32 ibmvnic_get_msglevel(struct net_device *netdev) 2010 { 2011 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2012 2013 return adapter->msg_enable; 2014 } 2015 2016 static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data) 2017 { 2018 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2019 2020 adapter->msg_enable = data; 2021 } 2022 2023 static u32 ibmvnic_get_link(struct net_device *netdev) 2024 { 2025 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2026 2027 /* Don't need to send a query because we request a logical link up at 2028 * init and then we wait for link state indications 2029 */ 2030 return adapter->logical_link_state; 2031 } 2032 2033 static void ibmvnic_get_ringparam(struct net_device *netdev, 2034 struct ethtool_ringparam *ring) 2035 { 2036 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2037 2038 ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq; 2039 ring->tx_max_pending = adapter->max_tx_entries_per_subcrq; 2040 ring->rx_mini_max_pending = 0; 2041 ring->rx_jumbo_max_pending = 0; 2042 ring->rx_pending = adapter->req_rx_add_entries_per_subcrq; 2043 ring->tx_pending = adapter->req_tx_entries_per_subcrq; 2044 ring->rx_mini_pending = 0; 2045 ring->rx_jumbo_pending = 0; 2046 } 2047 2048 static int ibmvnic_set_ringparam(struct net_device *netdev, 2049 struct ethtool_ringparam *ring) 2050 { 2051 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2052 2053 if (ring->rx_pending > adapter->max_rx_add_entries_per_subcrq || 2054 ring->tx_pending > adapter->max_tx_entries_per_subcrq) { 2055 netdev_err(netdev, "Invalid request.\n"); 2056 netdev_err(netdev, "Max tx buffers = %llu\n", 2057 adapter->max_rx_add_entries_per_subcrq); 2058 netdev_err(netdev, "Max rx buffers = %llu\n", 2059 adapter->max_tx_entries_per_subcrq); 2060 return -EINVAL; 2061 } 2062 2063 adapter->desired.rx_entries = ring->rx_pending; 2064 adapter->desired.tx_entries = ring->tx_pending; 2065 2066 return wait_for_reset(adapter); 2067 } 2068 2069 static void ibmvnic_get_channels(struct net_device *netdev, 2070 struct ethtool_channels *channels) 2071 { 2072 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2073 2074 channels->max_rx = adapter->max_rx_queues; 2075 channels->max_tx = adapter->max_tx_queues; 2076 channels->max_other = 0; 2077 channels->max_combined = 0; 2078 channels->rx_count = adapter->req_rx_queues; 2079 channels->tx_count = adapter->req_tx_queues; 2080 channels->other_count = 0; 2081 channels->combined_count = 0; 2082 } 2083 2084 static int ibmvnic_set_channels(struct net_device *netdev, 2085 struct ethtool_channels *channels) 2086 { 2087 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 2088 2089 adapter->desired.rx_queues = channels->rx_count; 2090 adapter->desired.tx_queues = channels->tx_count; 2091 2092 return wait_for_reset(adapter); 2093 } 2094 2095 static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data) 2096 { 2097 struct ibmvnic_adapter *adapter = netdev_priv(dev); 2098 int i; 2099 2100 if (stringset != ETH_SS_STATS) 2101 return; 2102 2103 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN) 2104 memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN); 2105 2106 for (i = 0; i < adapter->req_tx_queues; i++) { 2107 snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i); 2108 data += ETH_GSTRING_LEN; 2109 2110 snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i); 2111 data += ETH_GSTRING_LEN; 2112 2113 snprintf(data, ETH_GSTRING_LEN, "tx%d_dropped_packets", i); 2114 data += ETH_GSTRING_LEN; 2115 } 2116 2117 for (i = 0; i < adapter->req_rx_queues; i++) { 2118 snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i); 2119 data += ETH_GSTRING_LEN; 2120 2121 snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i); 2122 data += ETH_GSTRING_LEN; 2123 2124 snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i); 2125 data += ETH_GSTRING_LEN; 2126 } 2127 } 2128 2129 static int ibmvnic_get_sset_count(struct net_device *dev, int sset) 2130 { 2131 struct ibmvnic_adapter *adapter = netdev_priv(dev); 2132 2133 switch (sset) { 2134 case ETH_SS_STATS: 2135 return ARRAY_SIZE(ibmvnic_stats) + 2136 adapter->req_tx_queues * NUM_TX_STATS + 2137 adapter->req_rx_queues * NUM_RX_STATS; 2138 default: 2139 return -EOPNOTSUPP; 2140 } 2141 } 2142 2143 static void ibmvnic_get_ethtool_stats(struct net_device *dev, 2144 struct ethtool_stats *stats, u64 *data) 2145 { 2146 struct ibmvnic_adapter *adapter = netdev_priv(dev); 2147 union ibmvnic_crq crq; 2148 int i, j; 2149 2150 memset(&crq, 0, sizeof(crq)); 2151 crq.request_statistics.first = IBMVNIC_CRQ_CMD; 2152 crq.request_statistics.cmd = REQUEST_STATISTICS; 2153 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token); 2154 crq.request_statistics.len = 2155 cpu_to_be32(sizeof(struct ibmvnic_statistics)); 2156 2157 /* Wait for data to be written */ 2158 init_completion(&adapter->stats_done); 2159 ibmvnic_send_crq(adapter, &crq); 2160 wait_for_completion(&adapter->stats_done); 2161 2162 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++) 2163 data[i] = be64_to_cpu(IBMVNIC_GET_STAT(adapter, 2164 ibmvnic_stats[i].offset)); 2165 2166 for (j = 0; j < adapter->req_tx_queues; j++) { 2167 data[i] = adapter->tx_stats_buffers[j].packets; 2168 i++; 2169 data[i] = adapter->tx_stats_buffers[j].bytes; 2170 i++; 2171 data[i] = adapter->tx_stats_buffers[j].dropped_packets; 2172 i++; 2173 } 2174 2175 for (j = 0; j < adapter->req_rx_queues; j++) { 2176 data[i] = adapter->rx_stats_buffers[j].packets; 2177 i++; 2178 data[i] = adapter->rx_stats_buffers[j].bytes; 2179 i++; 2180 data[i] = adapter->rx_stats_buffers[j].interrupts; 2181 i++; 2182 } 2183 } 2184 2185 static const struct ethtool_ops ibmvnic_ethtool_ops = { 2186 .get_drvinfo = ibmvnic_get_drvinfo, 2187 .get_msglevel = ibmvnic_get_msglevel, 2188 .set_msglevel = ibmvnic_set_msglevel, 2189 .get_link = ibmvnic_get_link, 2190 .get_ringparam = ibmvnic_get_ringparam, 2191 .set_ringparam = ibmvnic_set_ringparam, 2192 .get_channels = ibmvnic_get_channels, 2193 .set_channels = ibmvnic_set_channels, 2194 .get_strings = ibmvnic_get_strings, 2195 .get_sset_count = ibmvnic_get_sset_count, 2196 .get_ethtool_stats = ibmvnic_get_ethtool_stats, 2197 .get_link_ksettings = ibmvnic_get_link_ksettings, 2198 }; 2199 2200 /* Routines for managing CRQs/sCRQs */ 2201 2202 static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter, 2203 struct ibmvnic_sub_crq_queue *scrq) 2204 { 2205 int rc; 2206 2207 if (scrq->irq) { 2208 free_irq(scrq->irq, scrq); 2209 irq_dispose_mapping(scrq->irq); 2210 scrq->irq = 0; 2211 } 2212 2213 memset(scrq->msgs, 0, 4 * PAGE_SIZE); 2214 scrq->cur = 0; 2215 2216 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token, 2217 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq); 2218 return rc; 2219 } 2220 2221 static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter) 2222 { 2223 int i, rc; 2224 2225 for (i = 0; i < adapter->req_tx_queues; i++) { 2226 netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i); 2227 rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]); 2228 if (rc) 2229 return rc; 2230 } 2231 2232 for (i = 0; i < adapter->req_rx_queues; i++) { 2233 netdev_dbg(adapter->netdev, "Re-setting rx_scrq[%d]\n", i); 2234 rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]); 2235 if (rc) 2236 return rc; 2237 } 2238 2239 return rc; 2240 } 2241 2242 static void release_sub_crq_queue(struct ibmvnic_adapter *adapter, 2243 struct ibmvnic_sub_crq_queue *scrq) 2244 { 2245 struct device *dev = &adapter->vdev->dev; 2246 long rc; 2247 2248 netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n"); 2249 2250 /* Close the sub-crqs */ 2251 do { 2252 rc = plpar_hcall_norets(H_FREE_SUB_CRQ, 2253 adapter->vdev->unit_address, 2254 scrq->crq_num); 2255 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); 2256 2257 if (rc) { 2258 netdev_err(adapter->netdev, 2259 "Failed to release sub-CRQ %16lx, rc = %ld\n", 2260 scrq->crq_num, rc); 2261 } 2262 2263 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE, 2264 DMA_BIDIRECTIONAL); 2265 free_pages((unsigned long)scrq->msgs, 2); 2266 kfree(scrq); 2267 } 2268 2269 static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter 2270 *adapter) 2271 { 2272 struct device *dev = &adapter->vdev->dev; 2273 struct ibmvnic_sub_crq_queue *scrq; 2274 int rc; 2275 2276 scrq = kzalloc(sizeof(*scrq), GFP_KERNEL); 2277 if (!scrq) 2278 return NULL; 2279 2280 scrq->msgs = 2281 (union sub_crq *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2); 2282 if (!scrq->msgs) { 2283 dev_warn(dev, "Couldn't allocate crq queue messages page\n"); 2284 goto zero_page_failed; 2285 } 2286 2287 scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE, 2288 DMA_BIDIRECTIONAL); 2289 if (dma_mapping_error(dev, scrq->msg_token)) { 2290 dev_warn(dev, "Couldn't map crq queue messages page\n"); 2291 goto map_failed; 2292 } 2293 2294 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token, 2295 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq); 2296 2297 if (rc == H_RESOURCE) 2298 rc = ibmvnic_reset_crq(adapter); 2299 2300 if (rc == H_CLOSED) { 2301 dev_warn(dev, "Partner adapter not ready, waiting.\n"); 2302 } else if (rc) { 2303 dev_warn(dev, "Error %d registering sub-crq\n", rc); 2304 goto reg_failed; 2305 } 2306 2307 scrq->adapter = adapter; 2308 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs); 2309 spin_lock_init(&scrq->lock); 2310 2311 netdev_dbg(adapter->netdev, 2312 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n", 2313 scrq->crq_num, scrq->hw_irq, scrq->irq); 2314 2315 return scrq; 2316 2317 reg_failed: 2318 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE, 2319 DMA_BIDIRECTIONAL); 2320 map_failed: 2321 free_pages((unsigned long)scrq->msgs, 2); 2322 zero_page_failed: 2323 kfree(scrq); 2324 2325 return NULL; 2326 } 2327 2328 static void release_sub_crqs(struct ibmvnic_adapter *adapter) 2329 { 2330 int i; 2331 2332 if (adapter->tx_scrq) { 2333 for (i = 0; i < adapter->req_tx_queues; i++) { 2334 if (!adapter->tx_scrq[i]) 2335 continue; 2336 2337 netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n", 2338 i); 2339 if (adapter->tx_scrq[i]->irq) { 2340 free_irq(adapter->tx_scrq[i]->irq, 2341 adapter->tx_scrq[i]); 2342 irq_dispose_mapping(adapter->tx_scrq[i]->irq); 2343 adapter->tx_scrq[i]->irq = 0; 2344 } 2345 2346 release_sub_crq_queue(adapter, adapter->tx_scrq[i]); 2347 } 2348 2349 kfree(adapter->tx_scrq); 2350 adapter->tx_scrq = NULL; 2351 } 2352 2353 if (adapter->rx_scrq) { 2354 for (i = 0; i < adapter->req_rx_queues; i++) { 2355 if (!adapter->rx_scrq[i]) 2356 continue; 2357 2358 netdev_dbg(adapter->netdev, "Releasing rx_scrq[%d]\n", 2359 i); 2360 if (adapter->rx_scrq[i]->irq) { 2361 free_irq(adapter->rx_scrq[i]->irq, 2362 adapter->rx_scrq[i]); 2363 irq_dispose_mapping(adapter->rx_scrq[i]->irq); 2364 adapter->rx_scrq[i]->irq = 0; 2365 } 2366 2367 release_sub_crq_queue(adapter, adapter->rx_scrq[i]); 2368 } 2369 2370 kfree(adapter->rx_scrq); 2371 adapter->rx_scrq = NULL; 2372 } 2373 } 2374 2375 static int disable_scrq_irq(struct ibmvnic_adapter *adapter, 2376 struct ibmvnic_sub_crq_queue *scrq) 2377 { 2378 struct device *dev = &adapter->vdev->dev; 2379 unsigned long rc; 2380 2381 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, 2382 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0); 2383 if (rc) 2384 dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n", 2385 scrq->hw_irq, rc); 2386 return rc; 2387 } 2388 2389 static int enable_scrq_irq(struct ibmvnic_adapter *adapter, 2390 struct ibmvnic_sub_crq_queue *scrq) 2391 { 2392 struct device *dev = &adapter->vdev->dev; 2393 unsigned long rc; 2394 2395 if (scrq->hw_irq > 0x100000000ULL) { 2396 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq); 2397 return 1; 2398 } 2399 2400 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, 2401 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0); 2402 if (rc) 2403 dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n", 2404 scrq->hw_irq, rc); 2405 return rc; 2406 } 2407 2408 static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter, 2409 struct ibmvnic_sub_crq_queue *scrq) 2410 { 2411 struct device *dev = &adapter->vdev->dev; 2412 struct ibmvnic_tx_buff *txbuff; 2413 union sub_crq *next; 2414 int index; 2415 int i, j; 2416 u8 first; 2417 2418 restart_loop: 2419 while (pending_scrq(adapter, scrq)) { 2420 unsigned int pool = scrq->pool_index; 2421 2422 next = ibmvnic_next_scrq(adapter, scrq); 2423 for (i = 0; i < next->tx_comp.num_comps; i++) { 2424 if (next->tx_comp.rcs[i]) { 2425 dev_err(dev, "tx error %x\n", 2426 next->tx_comp.rcs[i]); 2427 continue; 2428 } 2429 index = be32_to_cpu(next->tx_comp.correlators[i]); 2430 txbuff = &adapter->tx_pool[pool].tx_buff[index]; 2431 2432 for (j = 0; j < IBMVNIC_MAX_FRAGS_PER_CRQ; j++) { 2433 if (!txbuff->data_dma[j]) 2434 continue; 2435 2436 txbuff->data_dma[j] = 0; 2437 } 2438 /* if sub_crq was sent indirectly */ 2439 first = txbuff->indir_arr[0].generic.first; 2440 if (first == IBMVNIC_CRQ_CMD) { 2441 dma_unmap_single(dev, txbuff->indir_dma, 2442 sizeof(txbuff->indir_arr), 2443 DMA_TO_DEVICE); 2444 } 2445 2446 if (txbuff->last_frag) { 2447 dev_kfree_skb_any(txbuff->skb); 2448 txbuff->skb = NULL; 2449 } 2450 2451 adapter->tx_pool[pool].free_map[adapter->tx_pool[pool]. 2452 producer_index] = index; 2453 adapter->tx_pool[pool].producer_index = 2454 (adapter->tx_pool[pool].producer_index + 1) % 2455 adapter->req_tx_entries_per_subcrq; 2456 } 2457 /* remove tx_comp scrq*/ 2458 next->tx_comp.first = 0; 2459 2460 if (atomic_sub_return(next->tx_comp.num_comps, &scrq->used) <= 2461 (adapter->req_tx_entries_per_subcrq / 2) && 2462 __netif_subqueue_stopped(adapter->netdev, 2463 scrq->pool_index)) { 2464 netif_wake_subqueue(adapter->netdev, scrq->pool_index); 2465 netdev_info(adapter->netdev, "Started queue %d\n", 2466 scrq->pool_index); 2467 } 2468 } 2469 2470 enable_scrq_irq(adapter, scrq); 2471 2472 if (pending_scrq(adapter, scrq)) { 2473 disable_scrq_irq(adapter, scrq); 2474 goto restart_loop; 2475 } 2476 2477 return 0; 2478 } 2479 2480 static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance) 2481 { 2482 struct ibmvnic_sub_crq_queue *scrq = instance; 2483 struct ibmvnic_adapter *adapter = scrq->adapter; 2484 2485 disable_scrq_irq(adapter, scrq); 2486 ibmvnic_complete_tx(adapter, scrq); 2487 2488 return IRQ_HANDLED; 2489 } 2490 2491 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance) 2492 { 2493 struct ibmvnic_sub_crq_queue *scrq = instance; 2494 struct ibmvnic_adapter *adapter = scrq->adapter; 2495 2496 /* When booting a kdump kernel we can hit pending interrupts 2497 * prior to completing driver initialization. 2498 */ 2499 if (unlikely(adapter->state != VNIC_OPEN)) 2500 return IRQ_NONE; 2501 2502 adapter->rx_stats_buffers[scrq->scrq_num].interrupts++; 2503 2504 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) { 2505 disable_scrq_irq(adapter, scrq); 2506 __napi_schedule(&adapter->napi[scrq->scrq_num]); 2507 } 2508 2509 return IRQ_HANDLED; 2510 } 2511 2512 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter) 2513 { 2514 struct device *dev = &adapter->vdev->dev; 2515 struct ibmvnic_sub_crq_queue *scrq; 2516 int i = 0, j = 0; 2517 int rc = 0; 2518 2519 for (i = 0; i < adapter->req_tx_queues; i++) { 2520 netdev_dbg(adapter->netdev, "Initializing tx_scrq[%d] irq\n", 2521 i); 2522 scrq = adapter->tx_scrq[i]; 2523 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq); 2524 2525 if (!scrq->irq) { 2526 rc = -EINVAL; 2527 dev_err(dev, "Error mapping irq\n"); 2528 goto req_tx_irq_failed; 2529 } 2530 2531 rc = request_irq(scrq->irq, ibmvnic_interrupt_tx, 2532 0, "ibmvnic_tx", scrq); 2533 2534 if (rc) { 2535 dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n", 2536 scrq->irq, rc); 2537 irq_dispose_mapping(scrq->irq); 2538 goto req_rx_irq_failed; 2539 } 2540 } 2541 2542 for (i = 0; i < adapter->req_rx_queues; i++) { 2543 netdev_dbg(adapter->netdev, "Initializing rx_scrq[%d] irq\n", 2544 i); 2545 scrq = adapter->rx_scrq[i]; 2546 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq); 2547 if (!scrq->irq) { 2548 rc = -EINVAL; 2549 dev_err(dev, "Error mapping irq\n"); 2550 goto req_rx_irq_failed; 2551 } 2552 rc = request_irq(scrq->irq, ibmvnic_interrupt_rx, 2553 0, "ibmvnic_rx", scrq); 2554 if (rc) { 2555 dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n", 2556 scrq->irq, rc); 2557 irq_dispose_mapping(scrq->irq); 2558 goto req_rx_irq_failed; 2559 } 2560 } 2561 return rc; 2562 2563 req_rx_irq_failed: 2564 for (j = 0; j < i; j++) { 2565 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]); 2566 irq_dispose_mapping(adapter->rx_scrq[j]->irq); 2567 } 2568 i = adapter->req_tx_queues; 2569 req_tx_irq_failed: 2570 for (j = 0; j < i; j++) { 2571 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]); 2572 irq_dispose_mapping(adapter->rx_scrq[j]->irq); 2573 } 2574 release_sub_crqs(adapter); 2575 return rc; 2576 } 2577 2578 static int init_sub_crqs(struct ibmvnic_adapter *adapter) 2579 { 2580 struct device *dev = &adapter->vdev->dev; 2581 struct ibmvnic_sub_crq_queue **allqueues; 2582 int registered_queues = 0; 2583 int total_queues; 2584 int more = 0; 2585 int i; 2586 2587 total_queues = adapter->req_tx_queues + adapter->req_rx_queues; 2588 2589 allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL); 2590 if (!allqueues) 2591 return -1; 2592 2593 for (i = 0; i < total_queues; i++) { 2594 allqueues[i] = init_sub_crq_queue(adapter); 2595 if (!allqueues[i]) { 2596 dev_warn(dev, "Couldn't allocate all sub-crqs\n"); 2597 break; 2598 } 2599 registered_queues++; 2600 } 2601 2602 /* Make sure we were able to register the minimum number of queues */ 2603 if (registered_queues < 2604 adapter->min_tx_queues + adapter->min_rx_queues) { 2605 dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n"); 2606 goto tx_failed; 2607 } 2608 2609 /* Distribute the failed allocated queues*/ 2610 for (i = 0; i < total_queues - registered_queues + more ; i++) { 2611 netdev_dbg(adapter->netdev, "Reducing number of queues\n"); 2612 switch (i % 3) { 2613 case 0: 2614 if (adapter->req_rx_queues > adapter->min_rx_queues) 2615 adapter->req_rx_queues--; 2616 else 2617 more++; 2618 break; 2619 case 1: 2620 if (adapter->req_tx_queues > adapter->min_tx_queues) 2621 adapter->req_tx_queues--; 2622 else 2623 more++; 2624 break; 2625 } 2626 } 2627 2628 adapter->tx_scrq = kcalloc(adapter->req_tx_queues, 2629 sizeof(*adapter->tx_scrq), GFP_KERNEL); 2630 if (!adapter->tx_scrq) 2631 goto tx_failed; 2632 2633 for (i = 0; i < adapter->req_tx_queues; i++) { 2634 adapter->tx_scrq[i] = allqueues[i]; 2635 adapter->tx_scrq[i]->pool_index = i; 2636 } 2637 2638 adapter->rx_scrq = kcalloc(adapter->req_rx_queues, 2639 sizeof(*adapter->rx_scrq), GFP_KERNEL); 2640 if (!adapter->rx_scrq) 2641 goto rx_failed; 2642 2643 for (i = 0; i < adapter->req_rx_queues; i++) { 2644 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues]; 2645 adapter->rx_scrq[i]->scrq_num = i; 2646 } 2647 2648 kfree(allqueues); 2649 return 0; 2650 2651 rx_failed: 2652 kfree(adapter->tx_scrq); 2653 adapter->tx_scrq = NULL; 2654 tx_failed: 2655 for (i = 0; i < registered_queues; i++) 2656 release_sub_crq_queue(adapter, allqueues[i]); 2657 kfree(allqueues); 2658 return -1; 2659 } 2660 2661 static void ibmvnic_send_req_caps(struct ibmvnic_adapter *adapter, int retry) 2662 { 2663 struct device *dev = &adapter->vdev->dev; 2664 union ibmvnic_crq crq; 2665 int max_entries; 2666 2667 if (!retry) { 2668 /* Sub-CRQ entries are 32 byte long */ 2669 int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4); 2670 2671 if (adapter->min_tx_entries_per_subcrq > entries_page || 2672 adapter->min_rx_add_entries_per_subcrq > entries_page) { 2673 dev_err(dev, "Fatal, invalid entries per sub-crq\n"); 2674 return; 2675 } 2676 2677 if (adapter->desired.mtu) 2678 adapter->req_mtu = adapter->desired.mtu; 2679 else 2680 adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN; 2681 2682 if (!adapter->desired.tx_entries) 2683 adapter->desired.tx_entries = 2684 adapter->max_tx_entries_per_subcrq; 2685 if (!adapter->desired.rx_entries) 2686 adapter->desired.rx_entries = 2687 adapter->max_rx_add_entries_per_subcrq; 2688 2689 max_entries = IBMVNIC_MAX_LTB_SIZE / 2690 (adapter->req_mtu + IBMVNIC_BUFFER_HLEN); 2691 2692 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) * 2693 adapter->desired.tx_entries > IBMVNIC_MAX_LTB_SIZE) { 2694 adapter->desired.tx_entries = max_entries; 2695 } 2696 2697 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) * 2698 adapter->desired.rx_entries > IBMVNIC_MAX_LTB_SIZE) { 2699 adapter->desired.rx_entries = max_entries; 2700 } 2701 2702 if (adapter->desired.tx_entries) 2703 adapter->req_tx_entries_per_subcrq = 2704 adapter->desired.tx_entries; 2705 else 2706 adapter->req_tx_entries_per_subcrq = 2707 adapter->max_tx_entries_per_subcrq; 2708 2709 if (adapter->desired.rx_entries) 2710 adapter->req_rx_add_entries_per_subcrq = 2711 adapter->desired.rx_entries; 2712 else 2713 adapter->req_rx_add_entries_per_subcrq = 2714 adapter->max_rx_add_entries_per_subcrq; 2715 2716 if (adapter->desired.tx_queues) 2717 adapter->req_tx_queues = 2718 adapter->desired.tx_queues; 2719 else 2720 adapter->req_tx_queues = 2721 adapter->opt_tx_comp_sub_queues; 2722 2723 if (adapter->desired.rx_queues) 2724 adapter->req_rx_queues = 2725 adapter->desired.rx_queues; 2726 else 2727 adapter->req_rx_queues = 2728 adapter->opt_rx_comp_queues; 2729 2730 adapter->req_rx_add_queues = adapter->max_rx_add_queues; 2731 } 2732 2733 memset(&crq, 0, sizeof(crq)); 2734 crq.request_capability.first = IBMVNIC_CRQ_CMD; 2735 crq.request_capability.cmd = REQUEST_CAPABILITY; 2736 2737 crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES); 2738 crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues); 2739 atomic_inc(&adapter->running_cap_crqs); 2740 ibmvnic_send_crq(adapter, &crq); 2741 2742 crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES); 2743 crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues); 2744 atomic_inc(&adapter->running_cap_crqs); 2745 ibmvnic_send_crq(adapter, &crq); 2746 2747 crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES); 2748 crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues); 2749 atomic_inc(&adapter->running_cap_crqs); 2750 ibmvnic_send_crq(adapter, &crq); 2751 2752 crq.request_capability.capability = 2753 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ); 2754 crq.request_capability.number = 2755 cpu_to_be64(adapter->req_tx_entries_per_subcrq); 2756 atomic_inc(&adapter->running_cap_crqs); 2757 ibmvnic_send_crq(adapter, &crq); 2758 2759 crq.request_capability.capability = 2760 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ); 2761 crq.request_capability.number = 2762 cpu_to_be64(adapter->req_rx_add_entries_per_subcrq); 2763 atomic_inc(&adapter->running_cap_crqs); 2764 ibmvnic_send_crq(adapter, &crq); 2765 2766 crq.request_capability.capability = cpu_to_be16(REQ_MTU); 2767 crq.request_capability.number = cpu_to_be64(adapter->req_mtu); 2768 atomic_inc(&adapter->running_cap_crqs); 2769 ibmvnic_send_crq(adapter, &crq); 2770 2771 if (adapter->netdev->flags & IFF_PROMISC) { 2772 if (adapter->promisc_supported) { 2773 crq.request_capability.capability = 2774 cpu_to_be16(PROMISC_REQUESTED); 2775 crq.request_capability.number = cpu_to_be64(1); 2776 atomic_inc(&adapter->running_cap_crqs); 2777 ibmvnic_send_crq(adapter, &crq); 2778 } 2779 } else { 2780 crq.request_capability.capability = 2781 cpu_to_be16(PROMISC_REQUESTED); 2782 crq.request_capability.number = cpu_to_be64(0); 2783 atomic_inc(&adapter->running_cap_crqs); 2784 ibmvnic_send_crq(adapter, &crq); 2785 } 2786 } 2787 2788 static int pending_scrq(struct ibmvnic_adapter *adapter, 2789 struct ibmvnic_sub_crq_queue *scrq) 2790 { 2791 union sub_crq *entry = &scrq->msgs[scrq->cur]; 2792 2793 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) 2794 return 1; 2795 else 2796 return 0; 2797 } 2798 2799 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter, 2800 struct ibmvnic_sub_crq_queue *scrq) 2801 { 2802 union sub_crq *entry; 2803 unsigned long flags; 2804 2805 spin_lock_irqsave(&scrq->lock, flags); 2806 entry = &scrq->msgs[scrq->cur]; 2807 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) { 2808 if (++scrq->cur == scrq->size) 2809 scrq->cur = 0; 2810 } else { 2811 entry = NULL; 2812 } 2813 spin_unlock_irqrestore(&scrq->lock, flags); 2814 2815 return entry; 2816 } 2817 2818 static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter) 2819 { 2820 struct ibmvnic_crq_queue *queue = &adapter->crq; 2821 union ibmvnic_crq *crq; 2822 2823 crq = &queue->msgs[queue->cur]; 2824 if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) { 2825 if (++queue->cur == queue->size) 2826 queue->cur = 0; 2827 } else { 2828 crq = NULL; 2829 } 2830 2831 return crq; 2832 } 2833 2834 static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle, 2835 union sub_crq *sub_crq) 2836 { 2837 unsigned int ua = adapter->vdev->unit_address; 2838 struct device *dev = &adapter->vdev->dev; 2839 u64 *u64_crq = (u64 *)sub_crq; 2840 int rc; 2841 2842 netdev_dbg(adapter->netdev, 2843 "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n", 2844 (unsigned long int)cpu_to_be64(remote_handle), 2845 (unsigned long int)cpu_to_be64(u64_crq[0]), 2846 (unsigned long int)cpu_to_be64(u64_crq[1]), 2847 (unsigned long int)cpu_to_be64(u64_crq[2]), 2848 (unsigned long int)cpu_to_be64(u64_crq[3])); 2849 2850 /* Make sure the hypervisor sees the complete request */ 2851 mb(); 2852 2853 rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua, 2854 cpu_to_be64(remote_handle), 2855 cpu_to_be64(u64_crq[0]), 2856 cpu_to_be64(u64_crq[1]), 2857 cpu_to_be64(u64_crq[2]), 2858 cpu_to_be64(u64_crq[3])); 2859 2860 if (rc) { 2861 if (rc == H_CLOSED) 2862 dev_warn(dev, "CRQ Queue closed\n"); 2863 dev_err(dev, "Send error (rc=%d)\n", rc); 2864 } 2865 2866 return rc; 2867 } 2868 2869 static int send_subcrq_indirect(struct ibmvnic_adapter *adapter, 2870 u64 remote_handle, u64 ioba, u64 num_entries) 2871 { 2872 unsigned int ua = adapter->vdev->unit_address; 2873 struct device *dev = &adapter->vdev->dev; 2874 int rc; 2875 2876 /* Make sure the hypervisor sees the complete request */ 2877 mb(); 2878 rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua, 2879 cpu_to_be64(remote_handle), 2880 ioba, num_entries); 2881 2882 if (rc) { 2883 if (rc == H_CLOSED) 2884 dev_warn(dev, "CRQ Queue closed\n"); 2885 dev_err(dev, "Send (indirect) error (rc=%d)\n", rc); 2886 } 2887 2888 return rc; 2889 } 2890 2891 static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter, 2892 union ibmvnic_crq *crq) 2893 { 2894 unsigned int ua = adapter->vdev->unit_address; 2895 struct device *dev = &adapter->vdev->dev; 2896 u64 *u64_crq = (u64 *)crq; 2897 int rc; 2898 2899 netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n", 2900 (unsigned long int)cpu_to_be64(u64_crq[0]), 2901 (unsigned long int)cpu_to_be64(u64_crq[1])); 2902 2903 /* Make sure the hypervisor sees the complete request */ 2904 mb(); 2905 2906 rc = plpar_hcall_norets(H_SEND_CRQ, ua, 2907 cpu_to_be64(u64_crq[0]), 2908 cpu_to_be64(u64_crq[1])); 2909 2910 if (rc) { 2911 if (rc == H_CLOSED) 2912 dev_warn(dev, "CRQ Queue closed\n"); 2913 dev_warn(dev, "Send error (rc=%d)\n", rc); 2914 } 2915 2916 return rc; 2917 } 2918 2919 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter) 2920 { 2921 union ibmvnic_crq crq; 2922 2923 memset(&crq, 0, sizeof(crq)); 2924 crq.generic.first = IBMVNIC_CRQ_INIT_CMD; 2925 crq.generic.cmd = IBMVNIC_CRQ_INIT; 2926 netdev_dbg(adapter->netdev, "Sending CRQ init\n"); 2927 2928 return ibmvnic_send_crq(adapter, &crq); 2929 } 2930 2931 static int send_version_xchg(struct ibmvnic_adapter *adapter) 2932 { 2933 union ibmvnic_crq crq; 2934 2935 memset(&crq, 0, sizeof(crq)); 2936 crq.version_exchange.first = IBMVNIC_CRQ_CMD; 2937 crq.version_exchange.cmd = VERSION_EXCHANGE; 2938 crq.version_exchange.version = cpu_to_be16(ibmvnic_version); 2939 2940 return ibmvnic_send_crq(adapter, &crq); 2941 } 2942 2943 struct vnic_login_client_data { 2944 u8 type; 2945 __be16 len; 2946 char name; 2947 } __packed; 2948 2949 static int vnic_client_data_len(struct ibmvnic_adapter *adapter) 2950 { 2951 int len; 2952 2953 /* Calculate the amount of buffer space needed for the 2954 * vnic client data in the login buffer. There are four entries, 2955 * OS name, LPAR name, device name, and a null last entry. 2956 */ 2957 len = 4 * sizeof(struct vnic_login_client_data); 2958 len += 6; /* "Linux" plus NULL */ 2959 len += strlen(utsname()->nodename) + 1; 2960 len += strlen(adapter->netdev->name) + 1; 2961 2962 return len; 2963 } 2964 2965 static void vnic_add_client_data(struct ibmvnic_adapter *adapter, 2966 struct vnic_login_client_data *vlcd) 2967 { 2968 const char *os_name = "Linux"; 2969 int len; 2970 2971 /* Type 1 - LPAR OS */ 2972 vlcd->type = 1; 2973 len = strlen(os_name) + 1; 2974 vlcd->len = cpu_to_be16(len); 2975 strncpy(&vlcd->name, os_name, len); 2976 vlcd = (struct vnic_login_client_data *)((char *)&vlcd->name + len); 2977 2978 /* Type 2 - LPAR name */ 2979 vlcd->type = 2; 2980 len = strlen(utsname()->nodename) + 1; 2981 vlcd->len = cpu_to_be16(len); 2982 strncpy(&vlcd->name, utsname()->nodename, len); 2983 vlcd = (struct vnic_login_client_data *)((char *)&vlcd->name + len); 2984 2985 /* Type 3 - device name */ 2986 vlcd->type = 3; 2987 len = strlen(adapter->netdev->name) + 1; 2988 vlcd->len = cpu_to_be16(len); 2989 strncpy(&vlcd->name, adapter->netdev->name, len); 2990 } 2991 2992 static void send_login(struct ibmvnic_adapter *adapter) 2993 { 2994 struct ibmvnic_login_rsp_buffer *login_rsp_buffer; 2995 struct ibmvnic_login_buffer *login_buffer; 2996 struct device *dev = &adapter->vdev->dev; 2997 dma_addr_t rsp_buffer_token; 2998 dma_addr_t buffer_token; 2999 size_t rsp_buffer_size; 3000 union ibmvnic_crq crq; 3001 size_t buffer_size; 3002 __be64 *tx_list_p; 3003 __be64 *rx_list_p; 3004 int client_data_len; 3005 struct vnic_login_client_data *vlcd; 3006 int i; 3007 3008 client_data_len = vnic_client_data_len(adapter); 3009 3010 buffer_size = 3011 sizeof(struct ibmvnic_login_buffer) + 3012 sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues) + 3013 client_data_len; 3014 3015 login_buffer = kzalloc(buffer_size, GFP_ATOMIC); 3016 if (!login_buffer) 3017 goto buf_alloc_failed; 3018 3019 buffer_token = dma_map_single(dev, login_buffer, buffer_size, 3020 DMA_TO_DEVICE); 3021 if (dma_mapping_error(dev, buffer_token)) { 3022 dev_err(dev, "Couldn't map login buffer\n"); 3023 goto buf_map_failed; 3024 } 3025 3026 rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) + 3027 sizeof(u64) * adapter->req_tx_queues + 3028 sizeof(u64) * adapter->req_rx_queues + 3029 sizeof(u64) * adapter->req_rx_queues + 3030 sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS; 3031 3032 login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC); 3033 if (!login_rsp_buffer) 3034 goto buf_rsp_alloc_failed; 3035 3036 rsp_buffer_token = dma_map_single(dev, login_rsp_buffer, 3037 rsp_buffer_size, DMA_FROM_DEVICE); 3038 if (dma_mapping_error(dev, rsp_buffer_token)) { 3039 dev_err(dev, "Couldn't map login rsp buffer\n"); 3040 goto buf_rsp_map_failed; 3041 } 3042 3043 adapter->login_buf = login_buffer; 3044 adapter->login_buf_token = buffer_token; 3045 adapter->login_buf_sz = buffer_size; 3046 adapter->login_rsp_buf = login_rsp_buffer; 3047 adapter->login_rsp_buf_token = rsp_buffer_token; 3048 adapter->login_rsp_buf_sz = rsp_buffer_size; 3049 3050 login_buffer->len = cpu_to_be32(buffer_size); 3051 login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB); 3052 login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues); 3053 login_buffer->off_txcomp_subcrqs = 3054 cpu_to_be32(sizeof(struct ibmvnic_login_buffer)); 3055 login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues); 3056 login_buffer->off_rxcomp_subcrqs = 3057 cpu_to_be32(sizeof(struct ibmvnic_login_buffer) + 3058 sizeof(u64) * adapter->req_tx_queues); 3059 login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token); 3060 login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size); 3061 3062 tx_list_p = (__be64 *)((char *)login_buffer + 3063 sizeof(struct ibmvnic_login_buffer)); 3064 rx_list_p = (__be64 *)((char *)login_buffer + 3065 sizeof(struct ibmvnic_login_buffer) + 3066 sizeof(u64) * adapter->req_tx_queues); 3067 3068 for (i = 0; i < adapter->req_tx_queues; i++) { 3069 if (adapter->tx_scrq[i]) { 3070 tx_list_p[i] = cpu_to_be64(adapter->tx_scrq[i]-> 3071 crq_num); 3072 } 3073 } 3074 3075 for (i = 0; i < adapter->req_rx_queues; i++) { 3076 if (adapter->rx_scrq[i]) { 3077 rx_list_p[i] = cpu_to_be64(adapter->rx_scrq[i]-> 3078 crq_num); 3079 } 3080 } 3081 3082 /* Insert vNIC login client data */ 3083 vlcd = (struct vnic_login_client_data *) 3084 ((char *)rx_list_p + (sizeof(u64) * adapter->req_rx_queues)); 3085 login_buffer->client_data_offset = 3086 cpu_to_be32((char *)vlcd - (char *)login_buffer); 3087 login_buffer->client_data_len = cpu_to_be32(client_data_len); 3088 3089 vnic_add_client_data(adapter, vlcd); 3090 3091 netdev_dbg(adapter->netdev, "Login Buffer:\n"); 3092 for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) { 3093 netdev_dbg(adapter->netdev, "%016lx\n", 3094 ((unsigned long int *)(adapter->login_buf))[i]); 3095 } 3096 3097 memset(&crq, 0, sizeof(crq)); 3098 crq.login.first = IBMVNIC_CRQ_CMD; 3099 crq.login.cmd = LOGIN; 3100 crq.login.ioba = cpu_to_be32(buffer_token); 3101 crq.login.len = cpu_to_be32(buffer_size); 3102 ibmvnic_send_crq(adapter, &crq); 3103 3104 return; 3105 3106 buf_rsp_map_failed: 3107 kfree(login_rsp_buffer); 3108 buf_rsp_alloc_failed: 3109 dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE); 3110 buf_map_failed: 3111 kfree(login_buffer); 3112 buf_alloc_failed: 3113 return; 3114 } 3115 3116 static void send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr, 3117 u32 len, u8 map_id) 3118 { 3119 union ibmvnic_crq crq; 3120 3121 memset(&crq, 0, sizeof(crq)); 3122 crq.request_map.first = IBMVNIC_CRQ_CMD; 3123 crq.request_map.cmd = REQUEST_MAP; 3124 crq.request_map.map_id = map_id; 3125 crq.request_map.ioba = cpu_to_be32(addr); 3126 crq.request_map.len = cpu_to_be32(len); 3127 ibmvnic_send_crq(adapter, &crq); 3128 } 3129 3130 static void send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id) 3131 { 3132 union ibmvnic_crq crq; 3133 3134 memset(&crq, 0, sizeof(crq)); 3135 crq.request_unmap.first = IBMVNIC_CRQ_CMD; 3136 crq.request_unmap.cmd = REQUEST_UNMAP; 3137 crq.request_unmap.map_id = map_id; 3138 ibmvnic_send_crq(adapter, &crq); 3139 } 3140 3141 static void send_map_query(struct ibmvnic_adapter *adapter) 3142 { 3143 union ibmvnic_crq crq; 3144 3145 memset(&crq, 0, sizeof(crq)); 3146 crq.query_map.first = IBMVNIC_CRQ_CMD; 3147 crq.query_map.cmd = QUERY_MAP; 3148 ibmvnic_send_crq(adapter, &crq); 3149 } 3150 3151 /* Send a series of CRQs requesting various capabilities of the VNIC server */ 3152 static void send_cap_queries(struct ibmvnic_adapter *adapter) 3153 { 3154 union ibmvnic_crq crq; 3155 3156 atomic_set(&adapter->running_cap_crqs, 0); 3157 memset(&crq, 0, sizeof(crq)); 3158 crq.query_capability.first = IBMVNIC_CRQ_CMD; 3159 crq.query_capability.cmd = QUERY_CAPABILITY; 3160 3161 crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES); 3162 atomic_inc(&adapter->running_cap_crqs); 3163 ibmvnic_send_crq(adapter, &crq); 3164 3165 crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES); 3166 atomic_inc(&adapter->running_cap_crqs); 3167 ibmvnic_send_crq(adapter, &crq); 3168 3169 crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES); 3170 atomic_inc(&adapter->running_cap_crqs); 3171 ibmvnic_send_crq(adapter, &crq); 3172 3173 crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES); 3174 atomic_inc(&adapter->running_cap_crqs); 3175 ibmvnic_send_crq(adapter, &crq); 3176 3177 crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES); 3178 atomic_inc(&adapter->running_cap_crqs); 3179 ibmvnic_send_crq(adapter, &crq); 3180 3181 crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES); 3182 atomic_inc(&adapter->running_cap_crqs); 3183 ibmvnic_send_crq(adapter, &crq); 3184 3185 crq.query_capability.capability = 3186 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ); 3187 atomic_inc(&adapter->running_cap_crqs); 3188 ibmvnic_send_crq(adapter, &crq); 3189 3190 crq.query_capability.capability = 3191 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ); 3192 atomic_inc(&adapter->running_cap_crqs); 3193 ibmvnic_send_crq(adapter, &crq); 3194 3195 crq.query_capability.capability = 3196 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ); 3197 atomic_inc(&adapter->running_cap_crqs); 3198 ibmvnic_send_crq(adapter, &crq); 3199 3200 crq.query_capability.capability = 3201 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ); 3202 atomic_inc(&adapter->running_cap_crqs); 3203 ibmvnic_send_crq(adapter, &crq); 3204 3205 crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD); 3206 atomic_inc(&adapter->running_cap_crqs); 3207 ibmvnic_send_crq(adapter, &crq); 3208 3209 crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED); 3210 atomic_inc(&adapter->running_cap_crqs); 3211 ibmvnic_send_crq(adapter, &crq); 3212 3213 crq.query_capability.capability = cpu_to_be16(MIN_MTU); 3214 atomic_inc(&adapter->running_cap_crqs); 3215 ibmvnic_send_crq(adapter, &crq); 3216 3217 crq.query_capability.capability = cpu_to_be16(MAX_MTU); 3218 atomic_inc(&adapter->running_cap_crqs); 3219 ibmvnic_send_crq(adapter, &crq); 3220 3221 crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS); 3222 atomic_inc(&adapter->running_cap_crqs); 3223 ibmvnic_send_crq(adapter, &crq); 3224 3225 crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION); 3226 atomic_inc(&adapter->running_cap_crqs); 3227 ibmvnic_send_crq(adapter, &crq); 3228 3229 crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION); 3230 atomic_inc(&adapter->running_cap_crqs); 3231 ibmvnic_send_crq(adapter, &crq); 3232 3233 crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES); 3234 atomic_inc(&adapter->running_cap_crqs); 3235 ibmvnic_send_crq(adapter, &crq); 3236 3237 crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED); 3238 atomic_inc(&adapter->running_cap_crqs); 3239 ibmvnic_send_crq(adapter, &crq); 3240 3241 crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES); 3242 atomic_inc(&adapter->running_cap_crqs); 3243 ibmvnic_send_crq(adapter, &crq); 3244 3245 crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES); 3246 atomic_inc(&adapter->running_cap_crqs); 3247 ibmvnic_send_crq(adapter, &crq); 3248 3249 crq.query_capability.capability = 3250 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q); 3251 atomic_inc(&adapter->running_cap_crqs); 3252 ibmvnic_send_crq(adapter, &crq); 3253 3254 crq.query_capability.capability = 3255 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ); 3256 atomic_inc(&adapter->running_cap_crqs); 3257 ibmvnic_send_crq(adapter, &crq); 3258 3259 crq.query_capability.capability = 3260 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ); 3261 atomic_inc(&adapter->running_cap_crqs); 3262 ibmvnic_send_crq(adapter, &crq); 3263 3264 crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ); 3265 atomic_inc(&adapter->running_cap_crqs); 3266 ibmvnic_send_crq(adapter, &crq); 3267 } 3268 3269 static void handle_vpd_size_rsp(union ibmvnic_crq *crq, 3270 struct ibmvnic_adapter *adapter) 3271 { 3272 struct device *dev = &adapter->vdev->dev; 3273 3274 if (crq->get_vpd_size_rsp.rc.code) { 3275 dev_err(dev, "Error retrieving VPD size, rc=%x\n", 3276 crq->get_vpd_size_rsp.rc.code); 3277 complete(&adapter->fw_done); 3278 return; 3279 } 3280 3281 adapter->vpd->len = be64_to_cpu(crq->get_vpd_size_rsp.len); 3282 complete(&adapter->fw_done); 3283 } 3284 3285 static void handle_vpd_rsp(union ibmvnic_crq *crq, 3286 struct ibmvnic_adapter *adapter) 3287 { 3288 struct device *dev = &adapter->vdev->dev; 3289 unsigned char *substr = NULL, *ptr = NULL; 3290 u8 fw_level_len = 0; 3291 3292 memset(adapter->fw_version, 0, 32); 3293 3294 dma_unmap_single(dev, adapter->vpd->dma_addr, adapter->vpd->len, 3295 DMA_FROM_DEVICE); 3296 3297 if (crq->get_vpd_rsp.rc.code) { 3298 dev_err(dev, "Error retrieving VPD from device, rc=%x\n", 3299 crq->get_vpd_rsp.rc.code); 3300 goto complete; 3301 } 3302 3303 /* get the position of the firmware version info 3304 * located after the ASCII 'RM' substring in the buffer 3305 */ 3306 substr = strnstr(adapter->vpd->buff, "RM", adapter->vpd->len); 3307 if (!substr) { 3308 dev_info(dev, "Warning - No FW level has been provided in the VPD buffer by the VIOS Server\n"); 3309 ptr = strncpy((char *)adapter->fw_version, "N/A", 3310 3 * sizeof(char)); 3311 if (!ptr) 3312 dev_err(dev, "Failed to inform that firmware version is unavailable to the adapter\n"); 3313 goto complete; 3314 } 3315 3316 /* get length of firmware level ASCII substring */ 3317 if ((substr + 2) < (adapter->vpd->buff + adapter->vpd->len)) { 3318 fw_level_len = *(substr + 2); 3319 } else { 3320 dev_info(dev, "Length of FW substr extrapolated VDP buff\n"); 3321 goto complete; 3322 } 3323 3324 /* copy firmware version string from vpd into adapter */ 3325 if ((substr + 3 + fw_level_len) < 3326 (adapter->vpd->buff + adapter->vpd->len)) { 3327 ptr = strncpy((char *)adapter->fw_version, 3328 substr + 3, fw_level_len); 3329 3330 if (!ptr) 3331 dev_err(dev, "Failed to isolate FW level string\n"); 3332 } else { 3333 dev_info(dev, "FW substr extrapolated VPD buff\n"); 3334 } 3335 3336 complete: 3337 complete(&adapter->fw_done); 3338 } 3339 3340 static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter) 3341 { 3342 struct device *dev = &adapter->vdev->dev; 3343 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf; 3344 union ibmvnic_crq crq; 3345 int i; 3346 3347 dma_unmap_single(dev, adapter->ip_offload_tok, 3348 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE); 3349 3350 netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n"); 3351 for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++) 3352 netdev_dbg(adapter->netdev, "%016lx\n", 3353 ((unsigned long int *)(buf))[i]); 3354 3355 netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum); 3356 netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum); 3357 netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n", 3358 buf->tcp_ipv4_chksum); 3359 netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n", 3360 buf->tcp_ipv6_chksum); 3361 netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n", 3362 buf->udp_ipv4_chksum); 3363 netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n", 3364 buf->udp_ipv6_chksum); 3365 netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n", 3366 buf->large_tx_ipv4); 3367 netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n", 3368 buf->large_tx_ipv6); 3369 netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n", 3370 buf->large_rx_ipv4); 3371 netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n", 3372 buf->large_rx_ipv6); 3373 netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n", 3374 buf->max_ipv4_header_size); 3375 netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n", 3376 buf->max_ipv6_header_size); 3377 netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n", 3378 buf->max_tcp_header_size); 3379 netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n", 3380 buf->max_udp_header_size); 3381 netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n", 3382 buf->max_large_tx_size); 3383 netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n", 3384 buf->max_large_rx_size); 3385 netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n", 3386 buf->ipv6_extension_header); 3387 netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n", 3388 buf->tcp_pseudosum_req); 3389 netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n", 3390 buf->num_ipv6_ext_headers); 3391 netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n", 3392 buf->off_ipv6_ext_headers); 3393 3394 adapter->ip_offload_ctrl_tok = 3395 dma_map_single(dev, &adapter->ip_offload_ctrl, 3396 sizeof(adapter->ip_offload_ctrl), DMA_TO_DEVICE); 3397 3398 if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) { 3399 dev_err(dev, "Couldn't map ip offload control buffer\n"); 3400 return; 3401 } 3402 3403 adapter->ip_offload_ctrl.len = 3404 cpu_to_be32(sizeof(adapter->ip_offload_ctrl)); 3405 adapter->ip_offload_ctrl.version = cpu_to_be32(INITIAL_VERSION_IOB); 3406 adapter->ip_offload_ctrl.ipv4_chksum = buf->ipv4_chksum; 3407 adapter->ip_offload_ctrl.ipv6_chksum = buf->ipv6_chksum; 3408 adapter->ip_offload_ctrl.tcp_ipv4_chksum = buf->tcp_ipv4_chksum; 3409 adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum; 3410 adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum; 3411 adapter->ip_offload_ctrl.udp_ipv6_chksum = buf->udp_ipv6_chksum; 3412 adapter->ip_offload_ctrl.large_tx_ipv4 = buf->large_tx_ipv4; 3413 adapter->ip_offload_ctrl.large_tx_ipv6 = buf->large_tx_ipv6; 3414 3415 /* large_rx disabled for now, additional features needed */ 3416 adapter->ip_offload_ctrl.large_rx_ipv4 = 0; 3417 adapter->ip_offload_ctrl.large_rx_ipv6 = 0; 3418 3419 adapter->netdev->features = NETIF_F_SG | NETIF_F_GSO; 3420 3421 if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum) 3422 adapter->netdev->features |= NETIF_F_IP_CSUM; 3423 3424 if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum) 3425 adapter->netdev->features |= NETIF_F_IPV6_CSUM; 3426 3427 if ((adapter->netdev->features & 3428 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))) 3429 adapter->netdev->features |= NETIF_F_RXCSUM; 3430 3431 if (buf->large_tx_ipv4) 3432 adapter->netdev->features |= NETIF_F_TSO; 3433 if (buf->large_tx_ipv6) 3434 adapter->netdev->features |= NETIF_F_TSO6; 3435 3436 adapter->netdev->hw_features |= adapter->netdev->features; 3437 3438 memset(&crq, 0, sizeof(crq)); 3439 crq.control_ip_offload.first = IBMVNIC_CRQ_CMD; 3440 crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD; 3441 crq.control_ip_offload.len = 3442 cpu_to_be32(sizeof(adapter->ip_offload_ctrl)); 3443 crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok); 3444 ibmvnic_send_crq(adapter, &crq); 3445 } 3446 3447 static void handle_error_info_rsp(union ibmvnic_crq *crq, 3448 struct ibmvnic_adapter *adapter) 3449 { 3450 struct device *dev = &adapter->vdev->dev; 3451 struct ibmvnic_error_buff *error_buff, *tmp; 3452 unsigned long flags; 3453 bool found = false; 3454 int i; 3455 3456 if (!crq->request_error_rsp.rc.code) { 3457 dev_info(dev, "Request Error Rsp returned with rc=%x\n", 3458 crq->request_error_rsp.rc.code); 3459 return; 3460 } 3461 3462 spin_lock_irqsave(&adapter->error_list_lock, flags); 3463 list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list) 3464 if (error_buff->error_id == crq->request_error_rsp.error_id) { 3465 found = true; 3466 list_del(&error_buff->list); 3467 break; 3468 } 3469 spin_unlock_irqrestore(&adapter->error_list_lock, flags); 3470 3471 if (!found) { 3472 dev_err(dev, "Couldn't find error id %x\n", 3473 be32_to_cpu(crq->request_error_rsp.error_id)); 3474 return; 3475 } 3476 3477 dev_err(dev, "Detailed info for error id %x:", 3478 be32_to_cpu(crq->request_error_rsp.error_id)); 3479 3480 for (i = 0; i < error_buff->len; i++) { 3481 pr_cont("%02x", (int)error_buff->buff[i]); 3482 if (i % 8 == 7) 3483 pr_cont(" "); 3484 } 3485 pr_cont("\n"); 3486 3487 dma_unmap_single(dev, error_buff->dma, error_buff->len, 3488 DMA_FROM_DEVICE); 3489 kfree(error_buff->buff); 3490 kfree(error_buff); 3491 } 3492 3493 static void request_error_information(struct ibmvnic_adapter *adapter, 3494 union ibmvnic_crq *err_crq) 3495 { 3496 struct device *dev = &adapter->vdev->dev; 3497 struct net_device *netdev = adapter->netdev; 3498 struct ibmvnic_error_buff *error_buff; 3499 unsigned long timeout = msecs_to_jiffies(30000); 3500 union ibmvnic_crq crq; 3501 unsigned long flags; 3502 int rc, detail_len; 3503 3504 error_buff = kmalloc(sizeof(*error_buff), GFP_ATOMIC); 3505 if (!error_buff) 3506 return; 3507 3508 detail_len = be32_to_cpu(err_crq->error_indication.detail_error_sz); 3509 error_buff->buff = kmalloc(detail_len, GFP_ATOMIC); 3510 if (!error_buff->buff) { 3511 kfree(error_buff); 3512 return; 3513 } 3514 3515 error_buff->dma = dma_map_single(dev, error_buff->buff, detail_len, 3516 DMA_FROM_DEVICE); 3517 if (dma_mapping_error(dev, error_buff->dma)) { 3518 netdev_err(netdev, "Couldn't map error buffer\n"); 3519 kfree(error_buff->buff); 3520 kfree(error_buff); 3521 return; 3522 } 3523 3524 error_buff->len = detail_len; 3525 error_buff->error_id = err_crq->error_indication.error_id; 3526 3527 spin_lock_irqsave(&adapter->error_list_lock, flags); 3528 list_add_tail(&error_buff->list, &adapter->errors); 3529 spin_unlock_irqrestore(&adapter->error_list_lock, flags); 3530 3531 memset(&crq, 0, sizeof(crq)); 3532 crq.request_error_info.first = IBMVNIC_CRQ_CMD; 3533 crq.request_error_info.cmd = REQUEST_ERROR_INFO; 3534 crq.request_error_info.ioba = cpu_to_be32(error_buff->dma); 3535 crq.request_error_info.len = cpu_to_be32(detail_len); 3536 crq.request_error_info.error_id = err_crq->error_indication.error_id; 3537 3538 rc = ibmvnic_send_crq(adapter, &crq); 3539 if (rc) { 3540 netdev_err(netdev, "failed to request error information\n"); 3541 goto err_info_fail; 3542 } 3543 3544 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { 3545 netdev_err(netdev, "timeout waiting for error information\n"); 3546 goto err_info_fail; 3547 } 3548 3549 return; 3550 3551 err_info_fail: 3552 spin_lock_irqsave(&adapter->error_list_lock, flags); 3553 list_del(&error_buff->list); 3554 spin_unlock_irqrestore(&adapter->error_list_lock, flags); 3555 3556 kfree(error_buff->buff); 3557 kfree(error_buff); 3558 } 3559 3560 static void handle_error_indication(union ibmvnic_crq *crq, 3561 struct ibmvnic_adapter *adapter) 3562 { 3563 struct device *dev = &adapter->vdev->dev; 3564 3565 dev_err(dev, "Firmware reports %serror id %x, cause %d\n", 3566 crq->error_indication.flags 3567 & IBMVNIC_FATAL_ERROR ? "FATAL " : "", 3568 be32_to_cpu(crq->error_indication.error_id), 3569 be16_to_cpu(crq->error_indication.error_cause)); 3570 3571 if (be32_to_cpu(crq->error_indication.error_id)) 3572 request_error_information(adapter, crq); 3573 3574 if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR) 3575 ibmvnic_reset(adapter, VNIC_RESET_FATAL); 3576 else 3577 ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL); 3578 } 3579 3580 static int handle_change_mac_rsp(union ibmvnic_crq *crq, 3581 struct ibmvnic_adapter *adapter) 3582 { 3583 struct net_device *netdev = adapter->netdev; 3584 struct device *dev = &adapter->vdev->dev; 3585 long rc; 3586 3587 rc = crq->change_mac_addr_rsp.rc.code; 3588 if (rc) { 3589 dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc); 3590 goto out; 3591 } 3592 memcpy(netdev->dev_addr, &crq->change_mac_addr_rsp.mac_addr[0], 3593 ETH_ALEN); 3594 out: 3595 complete(&adapter->fw_done); 3596 return rc; 3597 } 3598 3599 static void handle_request_cap_rsp(union ibmvnic_crq *crq, 3600 struct ibmvnic_adapter *adapter) 3601 { 3602 struct device *dev = &adapter->vdev->dev; 3603 u64 *req_value; 3604 char *name; 3605 3606 atomic_dec(&adapter->running_cap_crqs); 3607 switch (be16_to_cpu(crq->request_capability_rsp.capability)) { 3608 case REQ_TX_QUEUES: 3609 req_value = &adapter->req_tx_queues; 3610 name = "tx"; 3611 break; 3612 case REQ_RX_QUEUES: 3613 req_value = &adapter->req_rx_queues; 3614 name = "rx"; 3615 break; 3616 case REQ_RX_ADD_QUEUES: 3617 req_value = &adapter->req_rx_add_queues; 3618 name = "rx_add"; 3619 break; 3620 case REQ_TX_ENTRIES_PER_SUBCRQ: 3621 req_value = &adapter->req_tx_entries_per_subcrq; 3622 name = "tx_entries_per_subcrq"; 3623 break; 3624 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ: 3625 req_value = &adapter->req_rx_add_entries_per_subcrq; 3626 name = "rx_add_entries_per_subcrq"; 3627 break; 3628 case REQ_MTU: 3629 req_value = &adapter->req_mtu; 3630 name = "mtu"; 3631 break; 3632 case PROMISC_REQUESTED: 3633 req_value = &adapter->promisc; 3634 name = "promisc"; 3635 break; 3636 default: 3637 dev_err(dev, "Got invalid cap request rsp %d\n", 3638 crq->request_capability.capability); 3639 return; 3640 } 3641 3642 switch (crq->request_capability_rsp.rc.code) { 3643 case SUCCESS: 3644 break; 3645 case PARTIALSUCCESS: 3646 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n", 3647 *req_value, 3648 (long int)be64_to_cpu(crq->request_capability_rsp. 3649 number), name); 3650 3651 if (be16_to_cpu(crq->request_capability_rsp.capability) == 3652 REQ_MTU) { 3653 pr_err("mtu of %llu is not supported. Reverting.\n", 3654 *req_value); 3655 *req_value = adapter->fallback.mtu; 3656 } else { 3657 *req_value = 3658 be64_to_cpu(crq->request_capability_rsp.number); 3659 } 3660 3661 ibmvnic_send_req_caps(adapter, 1); 3662 return; 3663 default: 3664 dev_err(dev, "Error %d in request cap rsp\n", 3665 crq->request_capability_rsp.rc.code); 3666 return; 3667 } 3668 3669 /* Done receiving requested capabilities, query IP offload support */ 3670 if (atomic_read(&adapter->running_cap_crqs) == 0) { 3671 union ibmvnic_crq newcrq; 3672 int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer); 3673 struct ibmvnic_query_ip_offload_buffer *ip_offload_buf = 3674 &adapter->ip_offload_buf; 3675 3676 adapter->wait_capability = false; 3677 adapter->ip_offload_tok = dma_map_single(dev, ip_offload_buf, 3678 buf_sz, 3679 DMA_FROM_DEVICE); 3680 3681 if (dma_mapping_error(dev, adapter->ip_offload_tok)) { 3682 if (!firmware_has_feature(FW_FEATURE_CMO)) 3683 dev_err(dev, "Couldn't map offload buffer\n"); 3684 return; 3685 } 3686 3687 memset(&newcrq, 0, sizeof(newcrq)); 3688 newcrq.query_ip_offload.first = IBMVNIC_CRQ_CMD; 3689 newcrq.query_ip_offload.cmd = QUERY_IP_OFFLOAD; 3690 newcrq.query_ip_offload.len = cpu_to_be32(buf_sz); 3691 newcrq.query_ip_offload.ioba = 3692 cpu_to_be32(adapter->ip_offload_tok); 3693 3694 ibmvnic_send_crq(adapter, &newcrq); 3695 } 3696 } 3697 3698 static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq, 3699 struct ibmvnic_adapter *adapter) 3700 { 3701 struct device *dev = &adapter->vdev->dev; 3702 struct net_device *netdev = adapter->netdev; 3703 struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf; 3704 struct ibmvnic_login_buffer *login = adapter->login_buf; 3705 int i; 3706 3707 dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz, 3708 DMA_BIDIRECTIONAL); 3709 dma_unmap_single(dev, adapter->login_rsp_buf_token, 3710 adapter->login_rsp_buf_sz, DMA_BIDIRECTIONAL); 3711 3712 /* If the number of queues requested can't be allocated by the 3713 * server, the login response will return with code 1. We will need 3714 * to resend the login buffer with fewer queues requested. 3715 */ 3716 if (login_rsp_crq->generic.rc.code) { 3717 adapter->renegotiate = true; 3718 complete(&adapter->init_done); 3719 return 0; 3720 } 3721 3722 netdev->mtu = adapter->req_mtu - ETH_HLEN; 3723 3724 netdev_dbg(adapter->netdev, "Login Response Buffer:\n"); 3725 for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) { 3726 netdev_dbg(adapter->netdev, "%016lx\n", 3727 ((unsigned long int *)(adapter->login_rsp_buf))[i]); 3728 } 3729 3730 /* Sanity checks */ 3731 if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs || 3732 (be32_to_cpu(login->num_rxcomp_subcrqs) * 3733 adapter->req_rx_add_queues != 3734 be32_to_cpu(login_rsp->num_rxadd_subcrqs))) { 3735 dev_err(dev, "FATAL: Inconsistent login and login rsp\n"); 3736 ibmvnic_remove(adapter->vdev); 3737 return -EIO; 3738 } 3739 complete(&adapter->init_done); 3740 3741 return 0; 3742 } 3743 3744 static void handle_request_unmap_rsp(union ibmvnic_crq *crq, 3745 struct ibmvnic_adapter *adapter) 3746 { 3747 struct device *dev = &adapter->vdev->dev; 3748 long rc; 3749 3750 rc = crq->request_unmap_rsp.rc.code; 3751 if (rc) 3752 dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc); 3753 } 3754 3755 static void handle_query_map_rsp(union ibmvnic_crq *crq, 3756 struct ibmvnic_adapter *adapter) 3757 { 3758 struct net_device *netdev = adapter->netdev; 3759 struct device *dev = &adapter->vdev->dev; 3760 long rc; 3761 3762 rc = crq->query_map_rsp.rc.code; 3763 if (rc) { 3764 dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc); 3765 return; 3766 } 3767 netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n", 3768 crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages, 3769 crq->query_map_rsp.free_pages); 3770 } 3771 3772 static void handle_query_cap_rsp(union ibmvnic_crq *crq, 3773 struct ibmvnic_adapter *adapter) 3774 { 3775 struct net_device *netdev = adapter->netdev; 3776 struct device *dev = &adapter->vdev->dev; 3777 long rc; 3778 3779 atomic_dec(&adapter->running_cap_crqs); 3780 netdev_dbg(netdev, "Outstanding queries: %d\n", 3781 atomic_read(&adapter->running_cap_crqs)); 3782 rc = crq->query_capability.rc.code; 3783 if (rc) { 3784 dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc); 3785 goto out; 3786 } 3787 3788 switch (be16_to_cpu(crq->query_capability.capability)) { 3789 case MIN_TX_QUEUES: 3790 adapter->min_tx_queues = 3791 be64_to_cpu(crq->query_capability.number); 3792 netdev_dbg(netdev, "min_tx_queues = %lld\n", 3793 adapter->min_tx_queues); 3794 break; 3795 case MIN_RX_QUEUES: 3796 adapter->min_rx_queues = 3797 be64_to_cpu(crq->query_capability.number); 3798 netdev_dbg(netdev, "min_rx_queues = %lld\n", 3799 adapter->min_rx_queues); 3800 break; 3801 case MIN_RX_ADD_QUEUES: 3802 adapter->min_rx_add_queues = 3803 be64_to_cpu(crq->query_capability.number); 3804 netdev_dbg(netdev, "min_rx_add_queues = %lld\n", 3805 adapter->min_rx_add_queues); 3806 break; 3807 case MAX_TX_QUEUES: 3808 adapter->max_tx_queues = 3809 be64_to_cpu(crq->query_capability.number); 3810 netdev_dbg(netdev, "max_tx_queues = %lld\n", 3811 adapter->max_tx_queues); 3812 break; 3813 case MAX_RX_QUEUES: 3814 adapter->max_rx_queues = 3815 be64_to_cpu(crq->query_capability.number); 3816 netdev_dbg(netdev, "max_rx_queues = %lld\n", 3817 adapter->max_rx_queues); 3818 break; 3819 case MAX_RX_ADD_QUEUES: 3820 adapter->max_rx_add_queues = 3821 be64_to_cpu(crq->query_capability.number); 3822 netdev_dbg(netdev, "max_rx_add_queues = %lld\n", 3823 adapter->max_rx_add_queues); 3824 break; 3825 case MIN_TX_ENTRIES_PER_SUBCRQ: 3826 adapter->min_tx_entries_per_subcrq = 3827 be64_to_cpu(crq->query_capability.number); 3828 netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n", 3829 adapter->min_tx_entries_per_subcrq); 3830 break; 3831 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ: 3832 adapter->min_rx_add_entries_per_subcrq = 3833 be64_to_cpu(crq->query_capability.number); 3834 netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n", 3835 adapter->min_rx_add_entries_per_subcrq); 3836 break; 3837 case MAX_TX_ENTRIES_PER_SUBCRQ: 3838 adapter->max_tx_entries_per_subcrq = 3839 be64_to_cpu(crq->query_capability.number); 3840 netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n", 3841 adapter->max_tx_entries_per_subcrq); 3842 break; 3843 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ: 3844 adapter->max_rx_add_entries_per_subcrq = 3845 be64_to_cpu(crq->query_capability.number); 3846 netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n", 3847 adapter->max_rx_add_entries_per_subcrq); 3848 break; 3849 case TCP_IP_OFFLOAD: 3850 adapter->tcp_ip_offload = 3851 be64_to_cpu(crq->query_capability.number); 3852 netdev_dbg(netdev, "tcp_ip_offload = %lld\n", 3853 adapter->tcp_ip_offload); 3854 break; 3855 case PROMISC_SUPPORTED: 3856 adapter->promisc_supported = 3857 be64_to_cpu(crq->query_capability.number); 3858 netdev_dbg(netdev, "promisc_supported = %lld\n", 3859 adapter->promisc_supported); 3860 break; 3861 case MIN_MTU: 3862 adapter->min_mtu = be64_to_cpu(crq->query_capability.number); 3863 netdev->min_mtu = adapter->min_mtu - ETH_HLEN; 3864 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu); 3865 break; 3866 case MAX_MTU: 3867 adapter->max_mtu = be64_to_cpu(crq->query_capability.number); 3868 netdev->max_mtu = adapter->max_mtu - ETH_HLEN; 3869 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu); 3870 break; 3871 case MAX_MULTICAST_FILTERS: 3872 adapter->max_multicast_filters = 3873 be64_to_cpu(crq->query_capability.number); 3874 netdev_dbg(netdev, "max_multicast_filters = %lld\n", 3875 adapter->max_multicast_filters); 3876 break; 3877 case VLAN_HEADER_INSERTION: 3878 adapter->vlan_header_insertion = 3879 be64_to_cpu(crq->query_capability.number); 3880 if (adapter->vlan_header_insertion) 3881 netdev->features |= NETIF_F_HW_VLAN_STAG_TX; 3882 netdev_dbg(netdev, "vlan_header_insertion = %lld\n", 3883 adapter->vlan_header_insertion); 3884 break; 3885 case RX_VLAN_HEADER_INSERTION: 3886 adapter->rx_vlan_header_insertion = 3887 be64_to_cpu(crq->query_capability.number); 3888 netdev_dbg(netdev, "rx_vlan_header_insertion = %lld\n", 3889 adapter->rx_vlan_header_insertion); 3890 break; 3891 case MAX_TX_SG_ENTRIES: 3892 adapter->max_tx_sg_entries = 3893 be64_to_cpu(crq->query_capability.number); 3894 netdev_dbg(netdev, "max_tx_sg_entries = %lld\n", 3895 adapter->max_tx_sg_entries); 3896 break; 3897 case RX_SG_SUPPORTED: 3898 adapter->rx_sg_supported = 3899 be64_to_cpu(crq->query_capability.number); 3900 netdev_dbg(netdev, "rx_sg_supported = %lld\n", 3901 adapter->rx_sg_supported); 3902 break; 3903 case OPT_TX_COMP_SUB_QUEUES: 3904 adapter->opt_tx_comp_sub_queues = 3905 be64_to_cpu(crq->query_capability.number); 3906 netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n", 3907 adapter->opt_tx_comp_sub_queues); 3908 break; 3909 case OPT_RX_COMP_QUEUES: 3910 adapter->opt_rx_comp_queues = 3911 be64_to_cpu(crq->query_capability.number); 3912 netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n", 3913 adapter->opt_rx_comp_queues); 3914 break; 3915 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q: 3916 adapter->opt_rx_bufadd_q_per_rx_comp_q = 3917 be64_to_cpu(crq->query_capability.number); 3918 netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n", 3919 adapter->opt_rx_bufadd_q_per_rx_comp_q); 3920 break; 3921 case OPT_TX_ENTRIES_PER_SUBCRQ: 3922 adapter->opt_tx_entries_per_subcrq = 3923 be64_to_cpu(crq->query_capability.number); 3924 netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n", 3925 adapter->opt_tx_entries_per_subcrq); 3926 break; 3927 case OPT_RXBA_ENTRIES_PER_SUBCRQ: 3928 adapter->opt_rxba_entries_per_subcrq = 3929 be64_to_cpu(crq->query_capability.number); 3930 netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n", 3931 adapter->opt_rxba_entries_per_subcrq); 3932 break; 3933 case TX_RX_DESC_REQ: 3934 adapter->tx_rx_desc_req = crq->query_capability.number; 3935 netdev_dbg(netdev, "tx_rx_desc_req = %llx\n", 3936 adapter->tx_rx_desc_req); 3937 break; 3938 3939 default: 3940 netdev_err(netdev, "Got invalid cap rsp %d\n", 3941 crq->query_capability.capability); 3942 } 3943 3944 out: 3945 if (atomic_read(&adapter->running_cap_crqs) == 0) { 3946 adapter->wait_capability = false; 3947 ibmvnic_send_req_caps(adapter, 0); 3948 } 3949 } 3950 3951 static void ibmvnic_handle_crq(union ibmvnic_crq *crq, 3952 struct ibmvnic_adapter *adapter) 3953 { 3954 struct ibmvnic_generic_crq *gen_crq = &crq->generic; 3955 struct net_device *netdev = adapter->netdev; 3956 struct device *dev = &adapter->vdev->dev; 3957 u64 *u64_crq = (u64 *)crq; 3958 long rc; 3959 3960 netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n", 3961 (unsigned long int)cpu_to_be64(u64_crq[0]), 3962 (unsigned long int)cpu_to_be64(u64_crq[1])); 3963 switch (gen_crq->first) { 3964 case IBMVNIC_CRQ_INIT_RSP: 3965 switch (gen_crq->cmd) { 3966 case IBMVNIC_CRQ_INIT: 3967 dev_info(dev, "Partner initialized\n"); 3968 adapter->from_passive_init = true; 3969 complete(&adapter->init_done); 3970 break; 3971 case IBMVNIC_CRQ_INIT_COMPLETE: 3972 dev_info(dev, "Partner initialization complete\n"); 3973 send_version_xchg(adapter); 3974 break; 3975 default: 3976 dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd); 3977 } 3978 return; 3979 case IBMVNIC_CRQ_XPORT_EVENT: 3980 netif_carrier_off(netdev); 3981 if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) { 3982 dev_info(dev, "Migrated, re-enabling adapter\n"); 3983 ibmvnic_reset(adapter, VNIC_RESET_MOBILITY); 3984 } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) { 3985 dev_info(dev, "Backing device failover detected\n"); 3986 ibmvnic_reset(adapter, VNIC_RESET_FAILOVER); 3987 } else { 3988 /* The adapter lost the connection */ 3989 dev_err(dev, "Virtual Adapter failed (rc=%d)\n", 3990 gen_crq->cmd); 3991 ibmvnic_reset(adapter, VNIC_RESET_FATAL); 3992 } 3993 return; 3994 case IBMVNIC_CRQ_CMD_RSP: 3995 break; 3996 default: 3997 dev_err(dev, "Got an invalid msg type 0x%02x\n", 3998 gen_crq->first); 3999 return; 4000 } 4001 4002 switch (gen_crq->cmd) { 4003 case VERSION_EXCHANGE_RSP: 4004 rc = crq->version_exchange_rsp.rc.code; 4005 if (rc) { 4006 dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc); 4007 break; 4008 } 4009 dev_info(dev, "Partner protocol version is %d\n", 4010 crq->version_exchange_rsp.version); 4011 if (be16_to_cpu(crq->version_exchange_rsp.version) < 4012 ibmvnic_version) 4013 ibmvnic_version = 4014 be16_to_cpu(crq->version_exchange_rsp.version); 4015 send_cap_queries(adapter); 4016 break; 4017 case QUERY_CAPABILITY_RSP: 4018 handle_query_cap_rsp(crq, adapter); 4019 break; 4020 case QUERY_MAP_RSP: 4021 handle_query_map_rsp(crq, adapter); 4022 break; 4023 case REQUEST_MAP_RSP: 4024 adapter->fw_done_rc = crq->request_map_rsp.rc.code; 4025 complete(&adapter->fw_done); 4026 break; 4027 case REQUEST_UNMAP_RSP: 4028 handle_request_unmap_rsp(crq, adapter); 4029 break; 4030 case REQUEST_CAPABILITY_RSP: 4031 handle_request_cap_rsp(crq, adapter); 4032 break; 4033 case LOGIN_RSP: 4034 netdev_dbg(netdev, "Got Login Response\n"); 4035 handle_login_rsp(crq, adapter); 4036 break; 4037 case LOGICAL_LINK_STATE_RSP: 4038 netdev_dbg(netdev, 4039 "Got Logical Link State Response, state: %d rc: %d\n", 4040 crq->logical_link_state_rsp.link_state, 4041 crq->logical_link_state_rsp.rc.code); 4042 adapter->logical_link_state = 4043 crq->logical_link_state_rsp.link_state; 4044 adapter->init_done_rc = crq->logical_link_state_rsp.rc.code; 4045 complete(&adapter->init_done); 4046 break; 4047 case LINK_STATE_INDICATION: 4048 netdev_dbg(netdev, "Got Logical Link State Indication\n"); 4049 adapter->phys_link_state = 4050 crq->link_state_indication.phys_link_state; 4051 adapter->logical_link_state = 4052 crq->link_state_indication.logical_link_state; 4053 break; 4054 case CHANGE_MAC_ADDR_RSP: 4055 netdev_dbg(netdev, "Got MAC address change Response\n"); 4056 adapter->fw_done_rc = handle_change_mac_rsp(crq, adapter); 4057 break; 4058 case ERROR_INDICATION: 4059 netdev_dbg(netdev, "Got Error Indication\n"); 4060 handle_error_indication(crq, adapter); 4061 break; 4062 case REQUEST_ERROR_RSP: 4063 netdev_dbg(netdev, "Got Error Detail Response\n"); 4064 handle_error_info_rsp(crq, adapter); 4065 break; 4066 case REQUEST_STATISTICS_RSP: 4067 netdev_dbg(netdev, "Got Statistics Response\n"); 4068 complete(&adapter->stats_done); 4069 break; 4070 case QUERY_IP_OFFLOAD_RSP: 4071 netdev_dbg(netdev, "Got Query IP offload Response\n"); 4072 handle_query_ip_offload_rsp(adapter); 4073 break; 4074 case MULTICAST_CTRL_RSP: 4075 netdev_dbg(netdev, "Got multicast control Response\n"); 4076 break; 4077 case CONTROL_IP_OFFLOAD_RSP: 4078 netdev_dbg(netdev, "Got Control IP offload Response\n"); 4079 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok, 4080 sizeof(adapter->ip_offload_ctrl), 4081 DMA_TO_DEVICE); 4082 complete(&adapter->init_done); 4083 break; 4084 case COLLECT_FW_TRACE_RSP: 4085 netdev_dbg(netdev, "Got Collect firmware trace Response\n"); 4086 complete(&adapter->fw_done); 4087 break; 4088 case GET_VPD_SIZE_RSP: 4089 handle_vpd_size_rsp(crq, adapter); 4090 break; 4091 case GET_VPD_RSP: 4092 handle_vpd_rsp(crq, adapter); 4093 break; 4094 default: 4095 netdev_err(netdev, "Got an invalid cmd type 0x%02x\n", 4096 gen_crq->cmd); 4097 } 4098 } 4099 4100 static irqreturn_t ibmvnic_interrupt(int irq, void *instance) 4101 { 4102 struct ibmvnic_adapter *adapter = instance; 4103 4104 tasklet_schedule(&adapter->tasklet); 4105 return IRQ_HANDLED; 4106 } 4107 4108 static void ibmvnic_tasklet(void *data) 4109 { 4110 struct ibmvnic_adapter *adapter = data; 4111 struct ibmvnic_crq_queue *queue = &adapter->crq; 4112 union ibmvnic_crq *crq; 4113 unsigned long flags; 4114 bool done = false; 4115 4116 spin_lock_irqsave(&queue->lock, flags); 4117 while (!done) { 4118 /* Pull all the valid messages off the CRQ */ 4119 while ((crq = ibmvnic_next_crq(adapter)) != NULL) { 4120 ibmvnic_handle_crq(crq, adapter); 4121 crq->generic.first = 0; 4122 } 4123 4124 /* remain in tasklet until all 4125 * capabilities responses are received 4126 */ 4127 if (!adapter->wait_capability) 4128 done = true; 4129 } 4130 /* if capabilities CRQ's were sent in this tasklet, the following 4131 * tasklet must wait until all responses are received 4132 */ 4133 if (atomic_read(&adapter->running_cap_crqs) != 0) 4134 adapter->wait_capability = true; 4135 spin_unlock_irqrestore(&queue->lock, flags); 4136 } 4137 4138 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter) 4139 { 4140 struct vio_dev *vdev = adapter->vdev; 4141 int rc; 4142 4143 do { 4144 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address); 4145 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc)); 4146 4147 if (rc) 4148 dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc); 4149 4150 return rc; 4151 } 4152 4153 static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter) 4154 { 4155 struct ibmvnic_crq_queue *crq = &adapter->crq; 4156 struct device *dev = &adapter->vdev->dev; 4157 struct vio_dev *vdev = adapter->vdev; 4158 int rc; 4159 4160 /* Close the CRQ */ 4161 do { 4162 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); 4163 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); 4164 4165 /* Clean out the queue */ 4166 memset(crq->msgs, 0, PAGE_SIZE); 4167 crq->cur = 0; 4168 4169 /* And re-open it again */ 4170 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address, 4171 crq->msg_token, PAGE_SIZE); 4172 4173 if (rc == H_CLOSED) 4174 /* Adapter is good, but other end is not ready */ 4175 dev_warn(dev, "Partner adapter not ready\n"); 4176 else if (rc != 0) 4177 dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc); 4178 4179 return rc; 4180 } 4181 4182 static void release_crq_queue(struct ibmvnic_adapter *adapter) 4183 { 4184 struct ibmvnic_crq_queue *crq = &adapter->crq; 4185 struct vio_dev *vdev = adapter->vdev; 4186 long rc; 4187 4188 if (!crq->msgs) 4189 return; 4190 4191 netdev_dbg(adapter->netdev, "Releasing CRQ\n"); 4192 free_irq(vdev->irq, adapter); 4193 tasklet_kill(&adapter->tasklet); 4194 do { 4195 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); 4196 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); 4197 4198 dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE, 4199 DMA_BIDIRECTIONAL); 4200 free_page((unsigned long)crq->msgs); 4201 crq->msgs = NULL; 4202 } 4203 4204 static int init_crq_queue(struct ibmvnic_adapter *adapter) 4205 { 4206 struct ibmvnic_crq_queue *crq = &adapter->crq; 4207 struct device *dev = &adapter->vdev->dev; 4208 struct vio_dev *vdev = adapter->vdev; 4209 int rc, retrc = -ENOMEM; 4210 4211 if (crq->msgs) 4212 return 0; 4213 4214 crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL); 4215 /* Should we allocate more than one page? */ 4216 4217 if (!crq->msgs) 4218 return -ENOMEM; 4219 4220 crq->size = PAGE_SIZE / sizeof(*crq->msgs); 4221 crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE, 4222 DMA_BIDIRECTIONAL); 4223 if (dma_mapping_error(dev, crq->msg_token)) 4224 goto map_failed; 4225 4226 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address, 4227 crq->msg_token, PAGE_SIZE); 4228 4229 if (rc == H_RESOURCE) 4230 /* maybe kexecing and resource is busy. try a reset */ 4231 rc = ibmvnic_reset_crq(adapter); 4232 retrc = rc; 4233 4234 if (rc == H_CLOSED) { 4235 dev_warn(dev, "Partner adapter not ready\n"); 4236 } else if (rc) { 4237 dev_warn(dev, "Error %d opening adapter\n", rc); 4238 goto reg_crq_failed; 4239 } 4240 4241 retrc = 0; 4242 4243 tasklet_init(&adapter->tasklet, (void *)ibmvnic_tasklet, 4244 (unsigned long)adapter); 4245 4246 netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq); 4247 rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, IBMVNIC_NAME, 4248 adapter); 4249 if (rc) { 4250 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n", 4251 vdev->irq, rc); 4252 goto req_irq_failed; 4253 } 4254 4255 rc = vio_enable_interrupts(vdev); 4256 if (rc) { 4257 dev_err(dev, "Error %d enabling interrupts\n", rc); 4258 goto req_irq_failed; 4259 } 4260 4261 crq->cur = 0; 4262 spin_lock_init(&crq->lock); 4263 4264 return retrc; 4265 4266 req_irq_failed: 4267 tasklet_kill(&adapter->tasklet); 4268 do { 4269 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); 4270 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); 4271 reg_crq_failed: 4272 dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL); 4273 map_failed: 4274 free_page((unsigned long)crq->msgs); 4275 crq->msgs = NULL; 4276 return retrc; 4277 } 4278 4279 static int ibmvnic_init(struct ibmvnic_adapter *adapter) 4280 { 4281 struct device *dev = &adapter->vdev->dev; 4282 unsigned long timeout = msecs_to_jiffies(30000); 4283 int rc; 4284 4285 if (adapter->resetting && !adapter->wait_for_reset) { 4286 rc = ibmvnic_reset_crq(adapter); 4287 if (!rc) 4288 rc = vio_enable_interrupts(adapter->vdev); 4289 } else { 4290 rc = init_crq_queue(adapter); 4291 } 4292 4293 if (rc) { 4294 dev_err(dev, "Couldn't initialize crq. rc=%d\n", rc); 4295 return rc; 4296 } 4297 4298 adapter->from_passive_init = false; 4299 4300 init_completion(&adapter->init_done); 4301 adapter->init_done_rc = 0; 4302 ibmvnic_send_crq_init(adapter); 4303 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { 4304 dev_err(dev, "Initialization sequence timed out\n"); 4305 return -1; 4306 } 4307 4308 if (adapter->init_done_rc) { 4309 release_crq_queue(adapter); 4310 return adapter->init_done_rc; 4311 } 4312 4313 if (adapter->from_passive_init) { 4314 adapter->state = VNIC_OPEN; 4315 adapter->from_passive_init = false; 4316 return -1; 4317 } 4318 4319 if (adapter->resetting && !adapter->wait_for_reset) 4320 rc = reset_sub_crq_queues(adapter); 4321 else 4322 rc = init_sub_crqs(adapter); 4323 if (rc) { 4324 dev_err(dev, "Initialization of sub crqs failed\n"); 4325 release_crq_queue(adapter); 4326 return rc; 4327 } 4328 4329 rc = init_sub_crq_irqs(adapter); 4330 if (rc) { 4331 dev_err(dev, "Failed to initialize sub crq irqs\n"); 4332 release_crq_queue(adapter); 4333 } 4334 4335 return rc; 4336 } 4337 4338 static struct device_attribute dev_attr_failover; 4339 4340 static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) 4341 { 4342 struct ibmvnic_adapter *adapter; 4343 struct net_device *netdev; 4344 unsigned char *mac_addr_p; 4345 int rc; 4346 4347 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n", 4348 dev->unit_address); 4349 4350 mac_addr_p = (unsigned char *)vio_get_attribute(dev, 4351 VETH_MAC_ADDR, NULL); 4352 if (!mac_addr_p) { 4353 dev_err(&dev->dev, 4354 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n", 4355 __FILE__, __LINE__); 4356 return 0; 4357 } 4358 4359 netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter), 4360 IBMVNIC_MAX_QUEUES); 4361 if (!netdev) 4362 return -ENOMEM; 4363 4364 adapter = netdev_priv(netdev); 4365 adapter->state = VNIC_PROBING; 4366 dev_set_drvdata(&dev->dev, netdev); 4367 adapter->vdev = dev; 4368 adapter->netdev = netdev; 4369 4370 ether_addr_copy(adapter->mac_addr, mac_addr_p); 4371 ether_addr_copy(netdev->dev_addr, adapter->mac_addr); 4372 netdev->irq = dev->irq; 4373 netdev->netdev_ops = &ibmvnic_netdev_ops; 4374 netdev->ethtool_ops = &ibmvnic_ethtool_ops; 4375 SET_NETDEV_DEV(netdev, &dev->dev); 4376 4377 spin_lock_init(&adapter->stats_lock); 4378 4379 INIT_LIST_HEAD(&adapter->errors); 4380 spin_lock_init(&adapter->error_list_lock); 4381 4382 INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset); 4383 INIT_LIST_HEAD(&adapter->rwi_list); 4384 mutex_init(&adapter->reset_lock); 4385 mutex_init(&adapter->rwi_lock); 4386 adapter->resetting = false; 4387 4388 adapter->mac_change_pending = false; 4389 4390 do { 4391 rc = ibmvnic_init(adapter); 4392 if (rc && rc != EAGAIN) 4393 goto ibmvnic_init_fail; 4394 } while (rc == EAGAIN); 4395 4396 netdev->mtu = adapter->req_mtu - ETH_HLEN; 4397 netdev->min_mtu = adapter->min_mtu - ETH_HLEN; 4398 netdev->max_mtu = adapter->max_mtu - ETH_HLEN; 4399 4400 rc = device_create_file(&dev->dev, &dev_attr_failover); 4401 if (rc) 4402 goto ibmvnic_init_fail; 4403 4404 netif_carrier_off(netdev); 4405 rc = register_netdev(netdev); 4406 if (rc) { 4407 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc); 4408 goto ibmvnic_register_fail; 4409 } 4410 dev_info(&dev->dev, "ibmvnic registered\n"); 4411 4412 adapter->state = VNIC_PROBED; 4413 4414 adapter->wait_for_reset = false; 4415 4416 return 0; 4417 4418 ibmvnic_register_fail: 4419 device_remove_file(&dev->dev, &dev_attr_failover); 4420 4421 ibmvnic_init_fail: 4422 release_sub_crqs(adapter); 4423 release_crq_queue(adapter); 4424 free_netdev(netdev); 4425 4426 return rc; 4427 } 4428 4429 static int ibmvnic_remove(struct vio_dev *dev) 4430 { 4431 struct net_device *netdev = dev_get_drvdata(&dev->dev); 4432 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 4433 4434 adapter->state = VNIC_REMOVING; 4435 unregister_netdev(netdev); 4436 mutex_lock(&adapter->reset_lock); 4437 4438 release_resources(adapter); 4439 release_sub_crqs(adapter); 4440 release_crq_queue(adapter); 4441 4442 adapter->state = VNIC_REMOVED; 4443 4444 mutex_unlock(&adapter->reset_lock); 4445 device_remove_file(&dev->dev, &dev_attr_failover); 4446 free_netdev(netdev); 4447 dev_set_drvdata(&dev->dev, NULL); 4448 4449 return 0; 4450 } 4451 4452 static ssize_t failover_store(struct device *dev, struct device_attribute *attr, 4453 const char *buf, size_t count) 4454 { 4455 struct net_device *netdev = dev_get_drvdata(dev); 4456 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 4457 unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; 4458 __be64 session_token; 4459 long rc; 4460 4461 if (!sysfs_streq(buf, "1")) 4462 return -EINVAL; 4463 4464 rc = plpar_hcall(H_VIOCTL, retbuf, adapter->vdev->unit_address, 4465 H_GET_SESSION_TOKEN, 0, 0, 0); 4466 if (rc) { 4467 netdev_err(netdev, "Couldn't retrieve session token, rc %ld\n", 4468 rc); 4469 return -EINVAL; 4470 } 4471 4472 session_token = (__be64)retbuf[0]; 4473 netdev_dbg(netdev, "Initiating client failover, session id %llx\n", 4474 be64_to_cpu(session_token)); 4475 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, 4476 H_SESSION_ERR_DETECTED, session_token, 0, 0); 4477 if (rc) { 4478 netdev_err(netdev, "Client initiated failover failed, rc %ld\n", 4479 rc); 4480 return -EINVAL; 4481 } 4482 4483 return count; 4484 } 4485 4486 static DEVICE_ATTR_WO(failover); 4487 4488 static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev) 4489 { 4490 struct net_device *netdev = dev_get_drvdata(&vdev->dev); 4491 struct ibmvnic_adapter *adapter; 4492 struct iommu_table *tbl; 4493 unsigned long ret = 0; 4494 int i; 4495 4496 tbl = get_iommu_table_base(&vdev->dev); 4497 4498 /* netdev inits at probe time along with the structures we need below*/ 4499 if (!netdev) 4500 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl); 4501 4502 adapter = netdev_priv(netdev); 4503 4504 ret += PAGE_SIZE; /* the crq message queue */ 4505 ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl); 4506 4507 for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++) 4508 ret += 4 * PAGE_SIZE; /* the scrq message queue */ 4509 4510 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); 4511 i++) 4512 ret += adapter->rx_pool[i].size * 4513 IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl); 4514 4515 return ret; 4516 } 4517 4518 static int ibmvnic_resume(struct device *dev) 4519 { 4520 struct net_device *netdev = dev_get_drvdata(dev); 4521 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 4522 4523 if (adapter->state != VNIC_OPEN) 4524 return 0; 4525 4526 tasklet_schedule(&adapter->tasklet); 4527 4528 return 0; 4529 } 4530 4531 static const struct vio_device_id ibmvnic_device_table[] = { 4532 {"network", "IBM,vnic"}, 4533 {"", "" } 4534 }; 4535 MODULE_DEVICE_TABLE(vio, ibmvnic_device_table); 4536 4537 static const struct dev_pm_ops ibmvnic_pm_ops = { 4538 .resume = ibmvnic_resume 4539 }; 4540 4541 static struct vio_driver ibmvnic_driver = { 4542 .id_table = ibmvnic_device_table, 4543 .probe = ibmvnic_probe, 4544 .remove = ibmvnic_remove, 4545 .get_desired_dma = ibmvnic_get_desired_dma, 4546 .name = ibmvnic_driver_name, 4547 .pm = &ibmvnic_pm_ops, 4548 }; 4549 4550 /* module functions */ 4551 static int __init ibmvnic_module_init(void) 4552 { 4553 pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string, 4554 IBMVNIC_DRIVER_VERSION); 4555 4556 return vio_register_driver(&ibmvnic_driver); 4557 } 4558 4559 static void __exit ibmvnic_module_exit(void) 4560 { 4561 vio_unregister_driver(&ibmvnic_driver); 4562 } 4563 4564 module_init(ibmvnic_module_init); 4565 module_exit(ibmvnic_module_exit); 4566