1 /* 2 * libcxgbi.c: Chelsio common library for T3/T4 iSCSI driver. 3 * 4 * Copyright (c) 2010 Chelsio Communications, Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation. 9 * 10 * Written by: Karen Xie (kxie@chelsio.com) 11 * Written by: Rakesh Ranjan (rranjan@chelsio.com) 12 */ 13 14 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ 15 16 #include <linux/skbuff.h> 17 #include <linux/crypto.h> 18 #include <linux/scatterlist.h> 19 #include <linux/pci.h> 20 #include <scsi/scsi.h> 21 #include <scsi/scsi_cmnd.h> 22 #include <scsi/scsi_host.h> 23 #include <linux/if_vlan.h> 24 #include <linux/inet.h> 25 #include <net/dst.h> 26 #include <net/route.h> 27 #include <net/ipv6.h> 28 #include <net/ip6_route.h> 29 #include <net/addrconf.h> 30 31 #include <linux/inetdevice.h> /* ip_dev_find */ 32 #include <linux/module.h> 33 #include <net/tcp.h> 34 35 static unsigned int dbg_level; 36 37 #include "libcxgbi.h" 38 39 #define DRV_MODULE_NAME "libcxgbi" 40 #define DRV_MODULE_DESC "Chelsio iSCSI driver library" 41 #define DRV_MODULE_VERSION "0.9.0" 42 #define DRV_MODULE_RELDATE "Jun. 2010" 43 44 MODULE_AUTHOR("Chelsio Communications, Inc."); 45 MODULE_DESCRIPTION(DRV_MODULE_DESC); 46 MODULE_VERSION(DRV_MODULE_VERSION); 47 MODULE_LICENSE("GPL"); 48 49 module_param(dbg_level, uint, 0644); 50 MODULE_PARM_DESC(dbg_level, "libiscsi debug level (default=0)"); 51 52 53 /* 54 * cxgbi device management 55 * maintains a list of the cxgbi devices 56 */ 57 static LIST_HEAD(cdev_list); 58 static DEFINE_MUTEX(cdev_mutex); 59 60 static LIST_HEAD(cdev_rcu_list); 61 static DEFINE_SPINLOCK(cdev_rcu_lock); 62 63 int cxgbi_device_portmap_create(struct cxgbi_device *cdev, unsigned int base, 64 unsigned int max_conn) 65 { 66 struct cxgbi_ports_map *pmap = &cdev->pmap; 67 68 pmap->port_csk = cxgbi_alloc_big_mem(max_conn * 69 sizeof(struct cxgbi_sock *), 70 GFP_KERNEL); 71 if (!pmap->port_csk) { 72 pr_warn("cdev 0x%p, portmap OOM %u.\n", cdev, max_conn); 73 return -ENOMEM; 74 } 75 76 pmap->max_connect = max_conn; 77 pmap->sport_base = base; 78 spin_lock_init(&pmap->lock); 79 return 0; 80 } 81 EXPORT_SYMBOL_GPL(cxgbi_device_portmap_create); 82 83 void cxgbi_device_portmap_cleanup(struct cxgbi_device *cdev) 84 { 85 struct cxgbi_ports_map *pmap = &cdev->pmap; 86 struct cxgbi_sock *csk; 87 int i; 88 89 for (i = 0; i < pmap->max_connect; i++) { 90 if (pmap->port_csk[i]) { 91 csk = pmap->port_csk[i]; 92 pmap->port_csk[i] = NULL; 93 log_debug(1 << CXGBI_DBG_SOCK, 94 "csk 0x%p, cdev 0x%p, offload down.\n", 95 csk, cdev); 96 spin_lock_bh(&csk->lock); 97 cxgbi_sock_set_flag(csk, CTPF_OFFLOAD_DOWN); 98 cxgbi_sock_closed(csk); 99 spin_unlock_bh(&csk->lock); 100 cxgbi_sock_put(csk); 101 } 102 } 103 } 104 EXPORT_SYMBOL_GPL(cxgbi_device_portmap_cleanup); 105 106 static inline void cxgbi_device_destroy(struct cxgbi_device *cdev) 107 { 108 log_debug(1 << CXGBI_DBG_DEV, 109 "cdev 0x%p, p# %u.\n", cdev, cdev->nports); 110 cxgbi_hbas_remove(cdev); 111 cxgbi_device_portmap_cleanup(cdev); 112 if (cdev->dev_ddp_cleanup) 113 cdev->dev_ddp_cleanup(cdev); 114 else 115 cxgbi_ddp_cleanup(cdev); 116 if (cdev->ddp) 117 cxgbi_ddp_cleanup(cdev); 118 if (cdev->pmap.max_connect) 119 cxgbi_free_big_mem(cdev->pmap.port_csk); 120 kfree(cdev); 121 } 122 123 struct cxgbi_device *cxgbi_device_register(unsigned int extra, 124 unsigned int nports) 125 { 126 struct cxgbi_device *cdev; 127 128 cdev = kzalloc(sizeof(*cdev) + extra + nports * 129 (sizeof(struct cxgbi_hba *) + 130 sizeof(struct net_device *)), 131 GFP_KERNEL); 132 if (!cdev) { 133 pr_warn("nport %d, OOM.\n", nports); 134 return NULL; 135 } 136 cdev->ports = (struct net_device **)(cdev + 1); 137 cdev->hbas = (struct cxgbi_hba **)(((char*)cdev->ports) + nports * 138 sizeof(struct net_device *)); 139 if (extra) 140 cdev->dd_data = ((char *)cdev->hbas) + 141 nports * sizeof(struct cxgbi_hba *); 142 spin_lock_init(&cdev->pmap.lock); 143 144 mutex_lock(&cdev_mutex); 145 list_add_tail(&cdev->list_head, &cdev_list); 146 mutex_unlock(&cdev_mutex); 147 148 spin_lock(&cdev_rcu_lock); 149 list_add_tail_rcu(&cdev->rcu_node, &cdev_rcu_list); 150 spin_unlock(&cdev_rcu_lock); 151 152 log_debug(1 << CXGBI_DBG_DEV, 153 "cdev 0x%p, p# %u.\n", cdev, nports); 154 return cdev; 155 } 156 EXPORT_SYMBOL_GPL(cxgbi_device_register); 157 158 void cxgbi_device_unregister(struct cxgbi_device *cdev) 159 { 160 log_debug(1 << CXGBI_DBG_DEV, 161 "cdev 0x%p, p# %u,%s.\n", 162 cdev, cdev->nports, cdev->nports ? cdev->ports[0]->name : ""); 163 164 mutex_lock(&cdev_mutex); 165 list_del(&cdev->list_head); 166 mutex_unlock(&cdev_mutex); 167 168 spin_lock(&cdev_rcu_lock); 169 list_del_rcu(&cdev->rcu_node); 170 spin_unlock(&cdev_rcu_lock); 171 synchronize_rcu(); 172 173 cxgbi_device_destroy(cdev); 174 } 175 EXPORT_SYMBOL_GPL(cxgbi_device_unregister); 176 177 void cxgbi_device_unregister_all(unsigned int flag) 178 { 179 struct cxgbi_device *cdev, *tmp; 180 181 mutex_lock(&cdev_mutex); 182 list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) { 183 if ((cdev->flags & flag) == flag) { 184 mutex_unlock(&cdev_mutex); 185 cxgbi_device_unregister(cdev); 186 mutex_lock(&cdev_mutex); 187 } 188 } 189 mutex_unlock(&cdev_mutex); 190 } 191 EXPORT_SYMBOL_GPL(cxgbi_device_unregister_all); 192 193 struct cxgbi_device *cxgbi_device_find_by_lldev(void *lldev) 194 { 195 struct cxgbi_device *cdev, *tmp; 196 197 mutex_lock(&cdev_mutex); 198 list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) { 199 if (cdev->lldev == lldev) { 200 mutex_unlock(&cdev_mutex); 201 return cdev; 202 } 203 } 204 mutex_unlock(&cdev_mutex); 205 206 log_debug(1 << CXGBI_DBG_DEV, 207 "lldev 0x%p, NO match found.\n", lldev); 208 return NULL; 209 } 210 EXPORT_SYMBOL_GPL(cxgbi_device_find_by_lldev); 211 212 struct cxgbi_device *cxgbi_device_find_by_netdev(struct net_device *ndev, 213 int *port) 214 { 215 struct net_device *vdev = NULL; 216 struct cxgbi_device *cdev, *tmp; 217 int i; 218 219 if (ndev->priv_flags & IFF_802_1Q_VLAN) { 220 vdev = ndev; 221 ndev = vlan_dev_real_dev(ndev); 222 log_debug(1 << CXGBI_DBG_DEV, 223 "vlan dev %s -> %s.\n", vdev->name, ndev->name); 224 } 225 226 mutex_lock(&cdev_mutex); 227 list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) { 228 for (i = 0; i < cdev->nports; i++) { 229 if (ndev == cdev->ports[i]) { 230 cdev->hbas[i]->vdev = vdev; 231 mutex_unlock(&cdev_mutex); 232 if (port) 233 *port = i; 234 return cdev; 235 } 236 } 237 } 238 mutex_unlock(&cdev_mutex); 239 log_debug(1 << CXGBI_DBG_DEV, 240 "ndev 0x%p, %s, NO match found.\n", ndev, ndev->name); 241 return NULL; 242 } 243 EXPORT_SYMBOL_GPL(cxgbi_device_find_by_netdev); 244 245 struct cxgbi_device *cxgbi_device_find_by_netdev_rcu(struct net_device *ndev, 246 int *port) 247 { 248 struct net_device *vdev = NULL; 249 struct cxgbi_device *cdev; 250 int i; 251 252 if (ndev->priv_flags & IFF_802_1Q_VLAN) { 253 vdev = ndev; 254 ndev = vlan_dev_real_dev(ndev); 255 pr_info("vlan dev %s -> %s.\n", vdev->name, ndev->name); 256 } 257 258 rcu_read_lock(); 259 list_for_each_entry_rcu(cdev, &cdev_rcu_list, rcu_node) { 260 for (i = 0; i < cdev->nports; i++) { 261 if (ndev == cdev->ports[i]) { 262 cdev->hbas[i]->vdev = vdev; 263 rcu_read_unlock(); 264 if (port) 265 *port = i; 266 return cdev; 267 } 268 } 269 } 270 rcu_read_unlock(); 271 272 log_debug(1 << CXGBI_DBG_DEV, 273 "ndev 0x%p, %s, NO match found.\n", ndev, ndev->name); 274 return NULL; 275 } 276 EXPORT_SYMBOL_GPL(cxgbi_device_find_by_netdev_rcu); 277 278 #if IS_ENABLED(CONFIG_IPV6) 279 static struct cxgbi_device *cxgbi_device_find_by_mac(struct net_device *ndev, 280 int *port) 281 { 282 struct net_device *vdev = NULL; 283 struct cxgbi_device *cdev, *tmp; 284 int i; 285 286 if (ndev->priv_flags & IFF_802_1Q_VLAN) { 287 vdev = ndev; 288 ndev = vlan_dev_real_dev(ndev); 289 pr_info("vlan dev %s -> %s.\n", vdev->name, ndev->name); 290 } 291 292 mutex_lock(&cdev_mutex); 293 list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) { 294 for (i = 0; i < cdev->nports; i++) { 295 if (!memcmp(ndev->dev_addr, cdev->ports[i]->dev_addr, 296 MAX_ADDR_LEN)) { 297 cdev->hbas[i]->vdev = vdev; 298 mutex_unlock(&cdev_mutex); 299 if (port) 300 *port = i; 301 return cdev; 302 } 303 } 304 } 305 mutex_unlock(&cdev_mutex); 306 log_debug(1 << CXGBI_DBG_DEV, 307 "ndev 0x%p, %s, NO match mac found.\n", 308 ndev, ndev->name); 309 return NULL; 310 } 311 #endif 312 313 void cxgbi_hbas_remove(struct cxgbi_device *cdev) 314 { 315 int i; 316 struct cxgbi_hba *chba; 317 318 log_debug(1 << CXGBI_DBG_DEV, 319 "cdev 0x%p, p#%u.\n", cdev, cdev->nports); 320 321 for (i = 0; i < cdev->nports; i++) { 322 chba = cdev->hbas[i]; 323 if (chba) { 324 cdev->hbas[i] = NULL; 325 iscsi_host_remove(chba->shost); 326 pci_dev_put(cdev->pdev); 327 iscsi_host_free(chba->shost); 328 } 329 } 330 } 331 EXPORT_SYMBOL_GPL(cxgbi_hbas_remove); 332 333 int cxgbi_hbas_add(struct cxgbi_device *cdev, u64 max_lun, 334 unsigned int max_id, struct scsi_host_template *sht, 335 struct scsi_transport_template *stt) 336 { 337 struct cxgbi_hba *chba; 338 struct Scsi_Host *shost; 339 int i, err; 340 341 log_debug(1 << CXGBI_DBG_DEV, "cdev 0x%p, p#%u.\n", cdev, cdev->nports); 342 343 for (i = 0; i < cdev->nports; i++) { 344 shost = iscsi_host_alloc(sht, sizeof(*chba), 1); 345 if (!shost) { 346 pr_info("0x%p, p%d, %s, host alloc failed.\n", 347 cdev, i, cdev->ports[i]->name); 348 err = -ENOMEM; 349 goto err_out; 350 } 351 352 shost->transportt = stt; 353 shost->max_lun = max_lun; 354 shost->max_id = max_id; 355 shost->max_channel = 0; 356 shost->max_cmd_len = 16; 357 358 chba = iscsi_host_priv(shost); 359 chba->cdev = cdev; 360 chba->ndev = cdev->ports[i]; 361 chba->shost = shost; 362 363 log_debug(1 << CXGBI_DBG_DEV, 364 "cdev 0x%p, p#%d %s: chba 0x%p.\n", 365 cdev, i, cdev->ports[i]->name, chba); 366 367 pci_dev_get(cdev->pdev); 368 err = iscsi_host_add(shost, &cdev->pdev->dev); 369 if (err) { 370 pr_info("cdev 0x%p, p#%d %s, host add failed.\n", 371 cdev, i, cdev->ports[i]->name); 372 pci_dev_put(cdev->pdev); 373 scsi_host_put(shost); 374 goto err_out; 375 } 376 377 cdev->hbas[i] = chba; 378 } 379 380 return 0; 381 382 err_out: 383 cxgbi_hbas_remove(cdev); 384 return err; 385 } 386 EXPORT_SYMBOL_GPL(cxgbi_hbas_add); 387 388 /* 389 * iSCSI offload 390 * 391 * - source port management 392 * To find a free source port in the port allocation map we use a very simple 393 * rotor scheme to look for the next free port. 394 * 395 * If a source port has been specified make sure that it doesn't collide with 396 * our normal source port allocation map. If it's outside the range of our 397 * allocation/deallocation scheme just let them use it. 398 * 399 * If the source port is outside our allocation range, the caller is 400 * responsible for keeping track of their port usage. 401 */ 402 static int sock_get_port(struct cxgbi_sock *csk) 403 { 404 struct cxgbi_device *cdev = csk->cdev; 405 struct cxgbi_ports_map *pmap = &cdev->pmap; 406 unsigned int start; 407 int idx; 408 __be16 *port; 409 410 if (!pmap->max_connect) { 411 pr_err("cdev 0x%p, p#%u %s, NO port map.\n", 412 cdev, csk->port_id, cdev->ports[csk->port_id]->name); 413 return -EADDRNOTAVAIL; 414 } 415 416 if (csk->csk_family == AF_INET) 417 port = &csk->saddr.sin_port; 418 else /* ipv6 */ 419 port = &csk->saddr6.sin6_port; 420 421 if (*port) { 422 pr_err("source port NON-ZERO %u.\n", 423 ntohs(*port)); 424 return -EADDRINUSE; 425 } 426 427 spin_lock_bh(&pmap->lock); 428 if (pmap->used >= pmap->max_connect) { 429 spin_unlock_bh(&pmap->lock); 430 pr_info("cdev 0x%p, p#%u %s, ALL ports used.\n", 431 cdev, csk->port_id, cdev->ports[csk->port_id]->name); 432 return -EADDRNOTAVAIL; 433 } 434 435 start = idx = pmap->next; 436 do { 437 if (++idx >= pmap->max_connect) 438 idx = 0; 439 if (!pmap->port_csk[idx]) { 440 pmap->used++; 441 *port = htons(pmap->sport_base + idx); 442 pmap->next = idx; 443 pmap->port_csk[idx] = csk; 444 spin_unlock_bh(&pmap->lock); 445 cxgbi_sock_get(csk); 446 log_debug(1 << CXGBI_DBG_SOCK, 447 "cdev 0x%p, p#%u %s, p %u, %u.\n", 448 cdev, csk->port_id, 449 cdev->ports[csk->port_id]->name, 450 pmap->sport_base + idx, pmap->next); 451 return 0; 452 } 453 } while (idx != start); 454 spin_unlock_bh(&pmap->lock); 455 456 /* should not happen */ 457 pr_warn("cdev 0x%p, p#%u %s, next %u?\n", 458 cdev, csk->port_id, cdev->ports[csk->port_id]->name, 459 pmap->next); 460 return -EADDRNOTAVAIL; 461 } 462 463 static void sock_put_port(struct cxgbi_sock *csk) 464 { 465 struct cxgbi_device *cdev = csk->cdev; 466 struct cxgbi_ports_map *pmap = &cdev->pmap; 467 __be16 *port; 468 469 if (csk->csk_family == AF_INET) 470 port = &csk->saddr.sin_port; 471 else /* ipv6 */ 472 port = &csk->saddr6.sin6_port; 473 474 if (*port) { 475 int idx = ntohs(*port) - pmap->sport_base; 476 477 *port = 0; 478 if (idx < 0 || idx >= pmap->max_connect) { 479 pr_err("cdev 0x%p, p#%u %s, port %u OOR.\n", 480 cdev, csk->port_id, 481 cdev->ports[csk->port_id]->name, 482 ntohs(*port)); 483 return; 484 } 485 486 spin_lock_bh(&pmap->lock); 487 pmap->port_csk[idx] = NULL; 488 pmap->used--; 489 spin_unlock_bh(&pmap->lock); 490 491 log_debug(1 << CXGBI_DBG_SOCK, 492 "cdev 0x%p, p#%u %s, release %u.\n", 493 cdev, csk->port_id, cdev->ports[csk->port_id]->name, 494 pmap->sport_base + idx); 495 496 cxgbi_sock_put(csk); 497 } 498 } 499 500 /* 501 * iscsi tcp connection 502 */ 503 void cxgbi_sock_free_cpl_skbs(struct cxgbi_sock *csk) 504 { 505 if (csk->cpl_close) { 506 kfree_skb(csk->cpl_close); 507 csk->cpl_close = NULL; 508 } 509 if (csk->cpl_abort_req) { 510 kfree_skb(csk->cpl_abort_req); 511 csk->cpl_abort_req = NULL; 512 } 513 if (csk->cpl_abort_rpl) { 514 kfree_skb(csk->cpl_abort_rpl); 515 csk->cpl_abort_rpl = NULL; 516 } 517 } 518 EXPORT_SYMBOL_GPL(cxgbi_sock_free_cpl_skbs); 519 520 static struct cxgbi_sock *cxgbi_sock_create(struct cxgbi_device *cdev) 521 { 522 struct cxgbi_sock *csk = kzalloc(sizeof(*csk), GFP_NOIO); 523 524 if (!csk) { 525 pr_info("alloc csk %zu failed.\n", sizeof(*csk)); 526 return NULL; 527 } 528 529 if (cdev->csk_alloc_cpls(csk) < 0) { 530 pr_info("csk 0x%p, alloc cpls failed.\n", csk); 531 kfree(csk); 532 return NULL; 533 } 534 535 spin_lock_init(&csk->lock); 536 kref_init(&csk->refcnt); 537 skb_queue_head_init(&csk->receive_queue); 538 skb_queue_head_init(&csk->write_queue); 539 setup_timer(&csk->retry_timer, NULL, (unsigned long)csk); 540 rwlock_init(&csk->callback_lock); 541 csk->cdev = cdev; 542 csk->flags = 0; 543 cxgbi_sock_set_state(csk, CTP_CLOSED); 544 545 log_debug(1 << CXGBI_DBG_SOCK, "cdev 0x%p, new csk 0x%p.\n", cdev, csk); 546 547 return csk; 548 } 549 550 static struct rtable *find_route_ipv4(struct flowi4 *fl4, 551 __be32 saddr, __be32 daddr, 552 __be16 sport, __be16 dport, u8 tos) 553 { 554 struct rtable *rt; 555 556 rt = ip_route_output_ports(&init_net, fl4, NULL, daddr, saddr, 557 dport, sport, IPPROTO_TCP, tos, 0); 558 if (IS_ERR(rt)) 559 return NULL; 560 561 return rt; 562 } 563 564 static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr) 565 { 566 struct sockaddr_in *daddr = (struct sockaddr_in *)dst_addr; 567 struct dst_entry *dst; 568 struct net_device *ndev; 569 struct cxgbi_device *cdev; 570 struct rtable *rt = NULL; 571 struct neighbour *n; 572 struct flowi4 fl4; 573 struct cxgbi_sock *csk = NULL; 574 unsigned int mtu = 0; 575 int port = 0xFFFF; 576 int err = 0; 577 578 rt = find_route_ipv4(&fl4, 0, daddr->sin_addr.s_addr, 0, daddr->sin_port, 0); 579 if (!rt) { 580 pr_info("no route to ipv4 0x%x, port %u.\n", 581 be32_to_cpu(daddr->sin_addr.s_addr), 582 be16_to_cpu(daddr->sin_port)); 583 err = -ENETUNREACH; 584 goto err_out; 585 } 586 dst = &rt->dst; 587 n = dst_neigh_lookup(dst, &daddr->sin_addr.s_addr); 588 if (!n) { 589 err = -ENODEV; 590 goto rel_rt; 591 } 592 ndev = n->dev; 593 594 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) { 595 pr_info("multi-cast route %pI4, port %u, dev %s.\n", 596 &daddr->sin_addr.s_addr, ntohs(daddr->sin_port), 597 ndev->name); 598 err = -ENETUNREACH; 599 goto rel_neigh; 600 } 601 602 if (ndev->flags & IFF_LOOPBACK) { 603 ndev = ip_dev_find(&init_net, daddr->sin_addr.s_addr); 604 mtu = ndev->mtu; 605 pr_info("rt dev %s, loopback -> %s, mtu %u.\n", 606 n->dev->name, ndev->name, mtu); 607 } 608 609 cdev = cxgbi_device_find_by_netdev(ndev, &port); 610 if (!cdev) { 611 pr_info("dst %pI4, %s, NOT cxgbi device.\n", 612 &daddr->sin_addr.s_addr, ndev->name); 613 err = -ENETUNREACH; 614 goto rel_neigh; 615 } 616 log_debug(1 << CXGBI_DBG_SOCK, 617 "route to %pI4 :%u, ndev p#%d,%s, cdev 0x%p.\n", 618 &daddr->sin_addr.s_addr, ntohs(daddr->sin_port), 619 port, ndev->name, cdev); 620 621 csk = cxgbi_sock_create(cdev); 622 if (!csk) { 623 err = -ENOMEM; 624 goto rel_neigh; 625 } 626 csk->cdev = cdev; 627 csk->port_id = port; 628 csk->mtu = mtu; 629 csk->dst = dst; 630 631 csk->csk_family = AF_INET; 632 csk->daddr.sin_addr.s_addr = daddr->sin_addr.s_addr; 633 csk->daddr.sin_port = daddr->sin_port; 634 csk->daddr.sin_family = daddr->sin_family; 635 csk->saddr.sin_family = daddr->sin_family; 636 csk->saddr.sin_addr.s_addr = fl4.saddr; 637 neigh_release(n); 638 639 return csk; 640 641 rel_neigh: 642 neigh_release(n); 643 644 rel_rt: 645 ip_rt_put(rt); 646 if (csk) 647 cxgbi_sock_closed(csk); 648 err_out: 649 return ERR_PTR(err); 650 } 651 652 #if IS_ENABLED(CONFIG_IPV6) 653 static struct rt6_info *find_route_ipv6(const struct in6_addr *saddr, 654 const struct in6_addr *daddr) 655 { 656 struct flowi6 fl; 657 658 if (saddr) 659 memcpy(&fl.saddr, saddr, sizeof(struct in6_addr)); 660 if (daddr) 661 memcpy(&fl.daddr, daddr, sizeof(struct in6_addr)); 662 return (struct rt6_info *)ip6_route_output(&init_net, NULL, &fl); 663 } 664 665 static struct cxgbi_sock *cxgbi_check_route6(struct sockaddr *dst_addr) 666 { 667 struct sockaddr_in6 *daddr6 = (struct sockaddr_in6 *)dst_addr; 668 struct dst_entry *dst; 669 struct net_device *ndev; 670 struct cxgbi_device *cdev; 671 struct rt6_info *rt = NULL; 672 struct neighbour *n; 673 struct in6_addr pref_saddr; 674 struct cxgbi_sock *csk = NULL; 675 unsigned int mtu = 0; 676 int port = 0xFFFF; 677 int err = 0; 678 679 rt = find_route_ipv6(NULL, &daddr6->sin6_addr); 680 681 if (!rt) { 682 pr_info("no route to ipv6 %pI6 port %u\n", 683 daddr6->sin6_addr.s6_addr, 684 be16_to_cpu(daddr6->sin6_port)); 685 err = -ENETUNREACH; 686 goto err_out; 687 } 688 689 dst = &rt->dst; 690 691 n = dst_neigh_lookup(dst, &daddr6->sin6_addr); 692 693 if (!n) { 694 pr_info("%pI6, port %u, dst no neighbour.\n", 695 daddr6->sin6_addr.s6_addr, 696 be16_to_cpu(daddr6->sin6_port)); 697 err = -ENETUNREACH; 698 goto rel_rt; 699 } 700 ndev = n->dev; 701 702 if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) { 703 pr_info("multi-cast route %pI6 port %u, dev %s.\n", 704 daddr6->sin6_addr.s6_addr, 705 ntohs(daddr6->sin6_port), ndev->name); 706 err = -ENETUNREACH; 707 goto rel_rt; 708 } 709 710 cdev = cxgbi_device_find_by_netdev(ndev, &port); 711 if (!cdev) 712 cdev = cxgbi_device_find_by_mac(ndev, &port); 713 if (!cdev) { 714 pr_info("dst %pI6 %s, NOT cxgbi device.\n", 715 daddr6->sin6_addr.s6_addr, ndev->name); 716 err = -ENETUNREACH; 717 goto rel_rt; 718 } 719 log_debug(1 << CXGBI_DBG_SOCK, 720 "route to %pI6 :%u, ndev p#%d,%s, cdev 0x%p.\n", 721 daddr6->sin6_addr.s6_addr, ntohs(daddr6->sin6_port), port, 722 ndev->name, cdev); 723 724 csk = cxgbi_sock_create(cdev); 725 if (!csk) { 726 err = -ENOMEM; 727 goto rel_rt; 728 } 729 csk->cdev = cdev; 730 csk->port_id = port; 731 csk->mtu = mtu; 732 csk->dst = dst; 733 734 if (ipv6_addr_any(&rt->rt6i_prefsrc.addr)) { 735 struct inet6_dev *idev = ip6_dst_idev((struct dst_entry *)rt); 736 737 err = ipv6_dev_get_saddr(&init_net, idev ? idev->dev : NULL, 738 &daddr6->sin6_addr, 0, &pref_saddr); 739 if (err) { 740 pr_info("failed to get source address to reach %pI6\n", 741 &daddr6->sin6_addr); 742 goto rel_rt; 743 } 744 } else { 745 pref_saddr = rt->rt6i_prefsrc.addr; 746 } 747 748 csk->csk_family = AF_INET6; 749 csk->daddr6.sin6_addr = daddr6->sin6_addr; 750 csk->daddr6.sin6_port = daddr6->sin6_port; 751 csk->daddr6.sin6_family = daddr6->sin6_family; 752 csk->saddr6.sin6_addr = pref_saddr; 753 754 neigh_release(n); 755 return csk; 756 757 rel_rt: 758 if (n) 759 neigh_release(n); 760 761 ip6_rt_put(rt); 762 if (csk) 763 cxgbi_sock_closed(csk); 764 err_out: 765 return ERR_PTR(err); 766 } 767 #endif /* IS_ENABLED(CONFIG_IPV6) */ 768 769 void cxgbi_sock_established(struct cxgbi_sock *csk, unsigned int snd_isn, 770 unsigned int opt) 771 { 772 csk->write_seq = csk->snd_nxt = csk->snd_una = snd_isn; 773 dst_confirm(csk->dst); 774 smp_mb(); 775 cxgbi_sock_set_state(csk, CTP_ESTABLISHED); 776 } 777 EXPORT_SYMBOL_GPL(cxgbi_sock_established); 778 779 static void cxgbi_inform_iscsi_conn_closing(struct cxgbi_sock *csk) 780 { 781 log_debug(1 << CXGBI_DBG_SOCK, 782 "csk 0x%p, state %u, flags 0x%lx, conn 0x%p.\n", 783 csk, csk->state, csk->flags, csk->user_data); 784 785 if (csk->state != CTP_ESTABLISHED) { 786 read_lock_bh(&csk->callback_lock); 787 if (csk->user_data) 788 iscsi_conn_failure(csk->user_data, 789 ISCSI_ERR_CONN_FAILED); 790 read_unlock_bh(&csk->callback_lock); 791 } 792 } 793 794 void cxgbi_sock_closed(struct cxgbi_sock *csk) 795 { 796 log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", 797 csk, (csk)->state, (csk)->flags, (csk)->tid); 798 cxgbi_sock_set_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED); 799 if (csk->state == CTP_ACTIVE_OPEN || csk->state == CTP_CLOSED) 800 return; 801 if (csk->saddr.sin_port) 802 sock_put_port(csk); 803 if (csk->dst) 804 dst_release(csk->dst); 805 csk->cdev->csk_release_offload_resources(csk); 806 cxgbi_sock_set_state(csk, CTP_CLOSED); 807 cxgbi_inform_iscsi_conn_closing(csk); 808 cxgbi_sock_put(csk); 809 } 810 EXPORT_SYMBOL_GPL(cxgbi_sock_closed); 811 812 static void need_active_close(struct cxgbi_sock *csk) 813 { 814 int data_lost; 815 int close_req = 0; 816 817 log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", 818 csk, (csk)->state, (csk)->flags, (csk)->tid); 819 spin_lock_bh(&csk->lock); 820 dst_confirm(csk->dst); 821 data_lost = skb_queue_len(&csk->receive_queue); 822 __skb_queue_purge(&csk->receive_queue); 823 824 if (csk->state == CTP_ACTIVE_OPEN) 825 cxgbi_sock_set_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED); 826 else if (csk->state == CTP_ESTABLISHED) { 827 close_req = 1; 828 cxgbi_sock_set_state(csk, CTP_ACTIVE_CLOSE); 829 } else if (csk->state == CTP_PASSIVE_CLOSE) { 830 close_req = 1; 831 cxgbi_sock_set_state(csk, CTP_CLOSE_WAIT_2); 832 } 833 834 if (close_req) { 835 if (data_lost) 836 csk->cdev->csk_send_abort_req(csk); 837 else 838 csk->cdev->csk_send_close_req(csk); 839 } 840 841 spin_unlock_bh(&csk->lock); 842 } 843 844 void cxgbi_sock_fail_act_open(struct cxgbi_sock *csk, int errno) 845 { 846 pr_info("csk 0x%p,%u,%lx, %pI4:%u-%pI4:%u, err %d.\n", 847 csk, csk->state, csk->flags, 848 &csk->saddr.sin_addr.s_addr, csk->saddr.sin_port, 849 &csk->daddr.sin_addr.s_addr, csk->daddr.sin_port, 850 errno); 851 852 cxgbi_sock_set_state(csk, CTP_CONNECTING); 853 csk->err = errno; 854 cxgbi_sock_closed(csk); 855 } 856 EXPORT_SYMBOL_GPL(cxgbi_sock_fail_act_open); 857 858 void cxgbi_sock_act_open_req_arp_failure(void *handle, struct sk_buff *skb) 859 { 860 struct cxgbi_sock *csk = (struct cxgbi_sock *)skb->sk; 861 862 log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", 863 csk, (csk)->state, (csk)->flags, (csk)->tid); 864 cxgbi_sock_get(csk); 865 spin_lock_bh(&csk->lock); 866 if (csk->state == CTP_ACTIVE_OPEN) 867 cxgbi_sock_fail_act_open(csk, -EHOSTUNREACH); 868 spin_unlock_bh(&csk->lock); 869 cxgbi_sock_put(csk); 870 __kfree_skb(skb); 871 } 872 EXPORT_SYMBOL_GPL(cxgbi_sock_act_open_req_arp_failure); 873 874 void cxgbi_sock_rcv_abort_rpl(struct cxgbi_sock *csk) 875 { 876 cxgbi_sock_get(csk); 877 spin_lock_bh(&csk->lock); 878 if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) { 879 if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_RCVD)) 880 cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_RCVD); 881 else { 882 cxgbi_sock_clear_flag(csk, CTPF_ABORT_RPL_RCVD); 883 cxgbi_sock_clear_flag(csk, CTPF_ABORT_RPL_PENDING); 884 if (cxgbi_sock_flag(csk, CTPF_ABORT_REQ_RCVD)) 885 pr_err("csk 0x%p,%u,0x%lx,%u,ABT_RPL_RSS.\n", 886 csk, csk->state, csk->flags, csk->tid); 887 cxgbi_sock_closed(csk); 888 } 889 } 890 spin_unlock_bh(&csk->lock); 891 cxgbi_sock_put(csk); 892 } 893 EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_abort_rpl); 894 895 void cxgbi_sock_rcv_peer_close(struct cxgbi_sock *csk) 896 { 897 log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", 898 csk, (csk)->state, (csk)->flags, (csk)->tid); 899 cxgbi_sock_get(csk); 900 spin_lock_bh(&csk->lock); 901 902 if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) 903 goto done; 904 905 switch (csk->state) { 906 case CTP_ESTABLISHED: 907 cxgbi_sock_set_state(csk, CTP_PASSIVE_CLOSE); 908 break; 909 case CTP_ACTIVE_CLOSE: 910 cxgbi_sock_set_state(csk, CTP_CLOSE_WAIT_2); 911 break; 912 case CTP_CLOSE_WAIT_1: 913 cxgbi_sock_closed(csk); 914 break; 915 case CTP_ABORTING: 916 break; 917 default: 918 pr_err("csk 0x%p,%u,0x%lx,%u, bad state.\n", 919 csk, csk->state, csk->flags, csk->tid); 920 } 921 cxgbi_inform_iscsi_conn_closing(csk); 922 done: 923 spin_unlock_bh(&csk->lock); 924 cxgbi_sock_put(csk); 925 } 926 EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_peer_close); 927 928 void cxgbi_sock_rcv_close_conn_rpl(struct cxgbi_sock *csk, u32 snd_nxt) 929 { 930 log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", 931 csk, (csk)->state, (csk)->flags, (csk)->tid); 932 cxgbi_sock_get(csk); 933 spin_lock_bh(&csk->lock); 934 935 csk->snd_una = snd_nxt - 1; 936 if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) 937 goto done; 938 939 switch (csk->state) { 940 case CTP_ACTIVE_CLOSE: 941 cxgbi_sock_set_state(csk, CTP_CLOSE_WAIT_1); 942 break; 943 case CTP_CLOSE_WAIT_1: 944 case CTP_CLOSE_WAIT_2: 945 cxgbi_sock_closed(csk); 946 break; 947 case CTP_ABORTING: 948 break; 949 default: 950 pr_err("csk 0x%p,%u,0x%lx,%u, bad state.\n", 951 csk, csk->state, csk->flags, csk->tid); 952 } 953 done: 954 spin_unlock_bh(&csk->lock); 955 cxgbi_sock_put(csk); 956 } 957 EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_close_conn_rpl); 958 959 void cxgbi_sock_rcv_wr_ack(struct cxgbi_sock *csk, unsigned int credits, 960 unsigned int snd_una, int seq_chk) 961 { 962 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 963 "csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, snd_una %u,%d.\n", 964 csk, csk->state, csk->flags, csk->tid, credits, 965 csk->wr_cred, csk->wr_una_cred, snd_una, seq_chk); 966 967 spin_lock_bh(&csk->lock); 968 969 csk->wr_cred += credits; 970 if (csk->wr_una_cred > csk->wr_max_cred - csk->wr_cred) 971 csk->wr_una_cred = csk->wr_max_cred - csk->wr_cred; 972 973 while (credits) { 974 struct sk_buff *p = cxgbi_sock_peek_wr(csk); 975 976 if (unlikely(!p)) { 977 pr_err("csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, empty.\n", 978 csk, csk->state, csk->flags, csk->tid, credits, 979 csk->wr_cred, csk->wr_una_cred); 980 break; 981 } 982 983 if (unlikely(credits < p->csum)) { 984 pr_warn("csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, < %u.\n", 985 csk, csk->state, csk->flags, csk->tid, 986 credits, csk->wr_cred, csk->wr_una_cred, 987 p->csum); 988 p->csum -= credits; 989 break; 990 } else { 991 cxgbi_sock_dequeue_wr(csk); 992 credits -= p->csum; 993 kfree_skb(p); 994 } 995 } 996 997 cxgbi_sock_check_wr_invariants(csk); 998 999 if (seq_chk) { 1000 if (unlikely(before(snd_una, csk->snd_una))) { 1001 pr_warn("csk 0x%p,%u,0x%lx,%u, snd_una %u/%u.", 1002 csk, csk->state, csk->flags, csk->tid, snd_una, 1003 csk->snd_una); 1004 goto done; 1005 } 1006 1007 if (csk->snd_una != snd_una) { 1008 csk->snd_una = snd_una; 1009 dst_confirm(csk->dst); 1010 } 1011 } 1012 1013 if (skb_queue_len(&csk->write_queue)) { 1014 if (csk->cdev->csk_push_tx_frames(csk, 0)) 1015 cxgbi_conn_tx_open(csk); 1016 } else 1017 cxgbi_conn_tx_open(csk); 1018 done: 1019 spin_unlock_bh(&csk->lock); 1020 } 1021 EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_wr_ack); 1022 1023 static unsigned int cxgbi_sock_find_best_mtu(struct cxgbi_sock *csk, 1024 unsigned short mtu) 1025 { 1026 int i = 0; 1027 1028 while (i < csk->cdev->nmtus - 1 && csk->cdev->mtus[i + 1] <= mtu) 1029 ++i; 1030 1031 return i; 1032 } 1033 1034 unsigned int cxgbi_sock_select_mss(struct cxgbi_sock *csk, unsigned int pmtu) 1035 { 1036 unsigned int idx; 1037 struct dst_entry *dst = csk->dst; 1038 1039 csk->advmss = dst_metric_advmss(dst); 1040 1041 if (csk->advmss > pmtu - 40) 1042 csk->advmss = pmtu - 40; 1043 if (csk->advmss < csk->cdev->mtus[0] - 40) 1044 csk->advmss = csk->cdev->mtus[0] - 40; 1045 idx = cxgbi_sock_find_best_mtu(csk, csk->advmss + 40); 1046 1047 return idx; 1048 } 1049 EXPORT_SYMBOL_GPL(cxgbi_sock_select_mss); 1050 1051 void cxgbi_sock_skb_entail(struct cxgbi_sock *csk, struct sk_buff *skb) 1052 { 1053 cxgbi_skcb_tcp_seq(skb) = csk->write_seq; 1054 __skb_queue_tail(&csk->write_queue, skb); 1055 } 1056 EXPORT_SYMBOL_GPL(cxgbi_sock_skb_entail); 1057 1058 void cxgbi_sock_purge_wr_queue(struct cxgbi_sock *csk) 1059 { 1060 struct sk_buff *skb; 1061 1062 while ((skb = cxgbi_sock_dequeue_wr(csk)) != NULL) 1063 kfree_skb(skb); 1064 } 1065 EXPORT_SYMBOL_GPL(cxgbi_sock_purge_wr_queue); 1066 1067 void cxgbi_sock_check_wr_invariants(const struct cxgbi_sock *csk) 1068 { 1069 int pending = cxgbi_sock_count_pending_wrs(csk); 1070 1071 if (unlikely(csk->wr_cred + pending != csk->wr_max_cred)) 1072 pr_err("csk 0x%p, tid %u, credit %u + %u != %u.\n", 1073 csk, csk->tid, csk->wr_cred, pending, csk->wr_max_cred); 1074 } 1075 EXPORT_SYMBOL_GPL(cxgbi_sock_check_wr_invariants); 1076 1077 static int cxgbi_sock_send_pdus(struct cxgbi_sock *csk, struct sk_buff *skb) 1078 { 1079 struct cxgbi_device *cdev = csk->cdev; 1080 struct sk_buff *next; 1081 int err, copied = 0; 1082 1083 spin_lock_bh(&csk->lock); 1084 1085 if (csk->state != CTP_ESTABLISHED) { 1086 log_debug(1 << CXGBI_DBG_PDU_TX, 1087 "csk 0x%p,%u,0x%lx,%u, EAGAIN.\n", 1088 csk, csk->state, csk->flags, csk->tid); 1089 err = -EAGAIN; 1090 goto out_err; 1091 } 1092 1093 if (csk->err) { 1094 log_debug(1 << CXGBI_DBG_PDU_TX, 1095 "csk 0x%p,%u,0x%lx,%u, EPIPE %d.\n", 1096 csk, csk->state, csk->flags, csk->tid, csk->err); 1097 err = -EPIPE; 1098 goto out_err; 1099 } 1100 1101 if (csk->write_seq - csk->snd_una >= cdev->snd_win) { 1102 log_debug(1 << CXGBI_DBG_PDU_TX, 1103 "csk 0x%p,%u,0x%lx,%u, FULL %u-%u >= %u.\n", 1104 csk, csk->state, csk->flags, csk->tid, csk->write_seq, 1105 csk->snd_una, cdev->snd_win); 1106 err = -ENOBUFS; 1107 goto out_err; 1108 } 1109 1110 while (skb) { 1111 int frags = skb_shinfo(skb)->nr_frags + 1112 (skb->len != skb->data_len); 1113 1114 if (unlikely(skb_headroom(skb) < cdev->skb_tx_rsvd)) { 1115 pr_err("csk 0x%p, skb head %u < %u.\n", 1116 csk, skb_headroom(skb), cdev->skb_tx_rsvd); 1117 err = -EINVAL; 1118 goto out_err; 1119 } 1120 1121 if (frags >= SKB_WR_LIST_SIZE) { 1122 pr_err("csk 0x%p, frags %d, %u,%u >%u.\n", 1123 csk, skb_shinfo(skb)->nr_frags, skb->len, 1124 skb->data_len, (uint)(SKB_WR_LIST_SIZE)); 1125 err = -EINVAL; 1126 goto out_err; 1127 } 1128 1129 next = skb->next; 1130 skb->next = NULL; 1131 cxgbi_skcb_set_flag(skb, SKCBF_TX_NEED_HDR); 1132 cxgbi_sock_skb_entail(csk, skb); 1133 copied += skb->len; 1134 csk->write_seq += skb->len + 1135 cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb)); 1136 skb = next; 1137 } 1138 done: 1139 if (likely(skb_queue_len(&csk->write_queue))) 1140 cdev->csk_push_tx_frames(csk, 1); 1141 spin_unlock_bh(&csk->lock); 1142 return copied; 1143 1144 out_err: 1145 if (copied == 0 && err == -EPIPE) 1146 copied = csk->err ? csk->err : -EPIPE; 1147 else 1148 copied = err; 1149 goto done; 1150 } 1151 1152 /* 1153 * Direct Data Placement - 1154 * Directly place the iSCSI Data-In or Data-Out PDU's payload into pre-posted 1155 * final destination host-memory buffers based on the Initiator Task Tag (ITT) 1156 * in Data-In or Target Task Tag (TTT) in Data-Out PDUs. 1157 * The host memory address is programmed into h/w in the format of pagepod 1158 * entries. 1159 * The location of the pagepod entry is encoded into ddp tag which is used as 1160 * the base for ITT/TTT. 1161 */ 1162 1163 static unsigned char ddp_page_order[DDP_PGIDX_MAX] = {0, 1, 2, 4}; 1164 static unsigned char ddp_page_shift[DDP_PGIDX_MAX] = {12, 13, 14, 16}; 1165 static unsigned char page_idx = DDP_PGIDX_MAX; 1166 1167 static unsigned char sw_tag_idx_bits; 1168 static unsigned char sw_tag_age_bits; 1169 1170 /* 1171 * Direct-Data Placement page size adjustment 1172 */ 1173 static int ddp_adjust_page_table(void) 1174 { 1175 int i; 1176 unsigned int base_order, order; 1177 1178 if (PAGE_SIZE < (1UL << ddp_page_shift[0])) { 1179 pr_info("PAGE_SIZE 0x%lx too small, min 0x%lx\n", 1180 PAGE_SIZE, 1UL << ddp_page_shift[0]); 1181 return -EINVAL; 1182 } 1183 1184 base_order = get_order(1UL << ddp_page_shift[0]); 1185 order = get_order(1UL << PAGE_SHIFT); 1186 1187 for (i = 0; i < DDP_PGIDX_MAX; i++) { 1188 /* first is the kernel page size, then just doubling */ 1189 ddp_page_order[i] = order - base_order + i; 1190 ddp_page_shift[i] = PAGE_SHIFT + i; 1191 } 1192 return 0; 1193 } 1194 1195 static int ddp_find_page_index(unsigned long pgsz) 1196 { 1197 int i; 1198 1199 for (i = 0; i < DDP_PGIDX_MAX; i++) { 1200 if (pgsz == (1UL << ddp_page_shift[i])) 1201 return i; 1202 } 1203 pr_info("ddp page size %lu not supported.\n", pgsz); 1204 return DDP_PGIDX_MAX; 1205 } 1206 1207 static void ddp_setup_host_page_size(void) 1208 { 1209 if (page_idx == DDP_PGIDX_MAX) { 1210 page_idx = ddp_find_page_index(PAGE_SIZE); 1211 1212 if (page_idx == DDP_PGIDX_MAX) { 1213 pr_info("system PAGE %lu, update hw.\n", PAGE_SIZE); 1214 if (ddp_adjust_page_table() < 0) { 1215 pr_info("PAGE %lu, disable ddp.\n", PAGE_SIZE); 1216 return; 1217 } 1218 page_idx = ddp_find_page_index(PAGE_SIZE); 1219 } 1220 pr_info("system PAGE %lu, ddp idx %u.\n", PAGE_SIZE, page_idx); 1221 } 1222 } 1223 1224 void cxgbi_ddp_page_size_factor(int *pgsz_factor) 1225 { 1226 int i; 1227 1228 for (i = 0; i < DDP_PGIDX_MAX; i++) 1229 pgsz_factor[i] = ddp_page_order[i]; 1230 } 1231 EXPORT_SYMBOL_GPL(cxgbi_ddp_page_size_factor); 1232 1233 /* 1234 * DDP setup & teardown 1235 */ 1236 1237 void cxgbi_ddp_ppod_set(struct cxgbi_pagepod *ppod, 1238 struct cxgbi_pagepod_hdr *hdr, 1239 struct cxgbi_gather_list *gl, unsigned int gidx) 1240 { 1241 int i; 1242 1243 memcpy(ppod, hdr, sizeof(*hdr)); 1244 for (i = 0; i < (PPOD_PAGES_MAX + 1); i++, gidx++) { 1245 ppod->addr[i] = gidx < gl->nelem ? 1246 cpu_to_be64(gl->phys_addr[gidx]) : 0ULL; 1247 } 1248 } 1249 EXPORT_SYMBOL_GPL(cxgbi_ddp_ppod_set); 1250 1251 void cxgbi_ddp_ppod_clear(struct cxgbi_pagepod *ppod) 1252 { 1253 memset(ppod, 0, sizeof(*ppod)); 1254 } 1255 EXPORT_SYMBOL_GPL(cxgbi_ddp_ppod_clear); 1256 1257 static inline int ddp_find_unused_entries(struct cxgbi_ddp_info *ddp, 1258 unsigned int start, unsigned int max, 1259 unsigned int count, 1260 struct cxgbi_gather_list *gl) 1261 { 1262 unsigned int i, j, k; 1263 1264 /* not enough entries */ 1265 if ((max - start) < count) { 1266 log_debug(1 << CXGBI_DBG_DDP, 1267 "NOT enough entries %u+%u < %u.\n", start, count, max); 1268 return -EBUSY; 1269 } 1270 1271 max -= count; 1272 spin_lock(&ddp->map_lock); 1273 for (i = start; i < max;) { 1274 for (j = 0, k = i; j < count; j++, k++) { 1275 if (ddp->gl_map[k]) 1276 break; 1277 } 1278 if (j == count) { 1279 for (j = 0, k = i; j < count; j++, k++) 1280 ddp->gl_map[k] = gl; 1281 spin_unlock(&ddp->map_lock); 1282 return i; 1283 } 1284 i += j + 1; 1285 } 1286 spin_unlock(&ddp->map_lock); 1287 log_debug(1 << CXGBI_DBG_DDP, 1288 "NO suitable entries %u available.\n", count); 1289 return -EBUSY; 1290 } 1291 1292 static inline void ddp_unmark_entries(struct cxgbi_ddp_info *ddp, 1293 int start, int count) 1294 { 1295 spin_lock(&ddp->map_lock); 1296 memset(&ddp->gl_map[start], 0, 1297 count * sizeof(struct cxgbi_gather_list *)); 1298 spin_unlock(&ddp->map_lock); 1299 } 1300 1301 static inline void ddp_gl_unmap(struct pci_dev *pdev, 1302 struct cxgbi_gather_list *gl) 1303 { 1304 int i; 1305 1306 for (i = 0; i < gl->nelem; i++) 1307 dma_unmap_page(&pdev->dev, gl->phys_addr[i], PAGE_SIZE, 1308 PCI_DMA_FROMDEVICE); 1309 } 1310 1311 static inline int ddp_gl_map(struct pci_dev *pdev, 1312 struct cxgbi_gather_list *gl) 1313 { 1314 int i; 1315 1316 for (i = 0; i < gl->nelem; i++) { 1317 gl->phys_addr[i] = dma_map_page(&pdev->dev, gl->pages[i], 0, 1318 PAGE_SIZE, 1319 PCI_DMA_FROMDEVICE); 1320 if (unlikely(dma_mapping_error(&pdev->dev, gl->phys_addr[i]))) { 1321 log_debug(1 << CXGBI_DBG_DDP, 1322 "page %d 0x%p, 0x%p dma mapping err.\n", 1323 i, gl->pages[i], pdev); 1324 goto unmap; 1325 } 1326 } 1327 return i; 1328 unmap: 1329 if (i) { 1330 unsigned int nelem = gl->nelem; 1331 1332 gl->nelem = i; 1333 ddp_gl_unmap(pdev, gl); 1334 gl->nelem = nelem; 1335 } 1336 return -EINVAL; 1337 } 1338 1339 static void ddp_release_gl(struct cxgbi_gather_list *gl, 1340 struct pci_dev *pdev) 1341 { 1342 ddp_gl_unmap(pdev, gl); 1343 kfree(gl); 1344 } 1345 1346 static struct cxgbi_gather_list *ddp_make_gl(unsigned int xferlen, 1347 struct scatterlist *sgl, 1348 unsigned int sgcnt, 1349 struct pci_dev *pdev, 1350 gfp_t gfp) 1351 { 1352 struct cxgbi_gather_list *gl; 1353 struct scatterlist *sg = sgl; 1354 struct page *sgpage = sg_page(sg); 1355 unsigned int sglen = sg->length; 1356 unsigned int sgoffset = sg->offset; 1357 unsigned int npages = (xferlen + sgoffset + PAGE_SIZE - 1) >> 1358 PAGE_SHIFT; 1359 int i = 1, j = 0; 1360 1361 if (xferlen < DDP_THRESHOLD) { 1362 log_debug(1 << CXGBI_DBG_DDP, 1363 "xfer %u < threshold %u, no ddp.\n", 1364 xferlen, DDP_THRESHOLD); 1365 return NULL; 1366 } 1367 1368 gl = kzalloc(sizeof(struct cxgbi_gather_list) + 1369 npages * (sizeof(dma_addr_t) + 1370 sizeof(struct page *)), gfp); 1371 if (!gl) { 1372 log_debug(1 << CXGBI_DBG_DDP, 1373 "xfer %u, %u pages, OOM.\n", xferlen, npages); 1374 return NULL; 1375 } 1376 1377 log_debug(1 << CXGBI_DBG_DDP, 1378 "xfer %u, sgl %u, gl max %u.\n", xferlen, sgcnt, npages); 1379 1380 gl->pages = (struct page **)&gl->phys_addr[npages]; 1381 gl->nelem = npages; 1382 gl->length = xferlen; 1383 gl->offset = sgoffset; 1384 gl->pages[0] = sgpage; 1385 1386 for (i = 1, sg = sg_next(sgl), j = 0; i < sgcnt; 1387 i++, sg = sg_next(sg)) { 1388 struct page *page = sg_page(sg); 1389 1390 if (sgpage == page && sg->offset == sgoffset + sglen) 1391 sglen += sg->length; 1392 else { 1393 /* make sure the sgl is fit for ddp: 1394 * each has the same page size, and 1395 * all of the middle pages are used completely 1396 */ 1397 if ((j && sgoffset) || ((i != sgcnt - 1) && 1398 ((sglen + sgoffset) & ~PAGE_MASK))) { 1399 log_debug(1 << CXGBI_DBG_DDP, 1400 "page %d/%u, %u + %u.\n", 1401 i, sgcnt, sgoffset, sglen); 1402 goto error_out; 1403 } 1404 1405 j++; 1406 if (j == gl->nelem || sg->offset) { 1407 log_debug(1 << CXGBI_DBG_DDP, 1408 "page %d/%u, offset %u.\n", 1409 j, gl->nelem, sg->offset); 1410 goto error_out; 1411 } 1412 gl->pages[j] = page; 1413 sglen = sg->length; 1414 sgoffset = sg->offset; 1415 sgpage = page; 1416 } 1417 } 1418 gl->nelem = ++j; 1419 1420 if (ddp_gl_map(pdev, gl) < 0) 1421 goto error_out; 1422 1423 return gl; 1424 1425 error_out: 1426 kfree(gl); 1427 return NULL; 1428 } 1429 1430 static void ddp_tag_release(struct cxgbi_hba *chba, u32 tag) 1431 { 1432 struct cxgbi_device *cdev = chba->cdev; 1433 struct cxgbi_ddp_info *ddp = cdev->ddp; 1434 u32 idx; 1435 1436 idx = (tag >> PPOD_IDX_SHIFT) & ddp->idx_mask; 1437 if (idx < ddp->nppods) { 1438 struct cxgbi_gather_list *gl = ddp->gl_map[idx]; 1439 unsigned int npods; 1440 1441 if (!gl || !gl->nelem) { 1442 pr_warn("tag 0x%x, idx %u, gl 0x%p, %u.\n", 1443 tag, idx, gl, gl ? gl->nelem : 0); 1444 return; 1445 } 1446 npods = (gl->nelem + PPOD_PAGES_MAX - 1) >> PPOD_PAGES_SHIFT; 1447 log_debug(1 << CXGBI_DBG_DDP, 1448 "tag 0x%x, release idx %u, npods %u.\n", 1449 tag, idx, npods); 1450 cdev->csk_ddp_clear(chba, tag, idx, npods); 1451 ddp_unmark_entries(ddp, idx, npods); 1452 ddp_release_gl(gl, ddp->pdev); 1453 } else 1454 pr_warn("tag 0x%x, idx %u > max %u.\n", tag, idx, ddp->nppods); 1455 } 1456 1457 static int ddp_tag_reserve(struct cxgbi_sock *csk, unsigned int tid, 1458 u32 sw_tag, u32 *tagp, struct cxgbi_gather_list *gl, 1459 gfp_t gfp) 1460 { 1461 struct cxgbi_device *cdev = csk->cdev; 1462 struct cxgbi_ddp_info *ddp = cdev->ddp; 1463 struct cxgbi_tag_format *tformat = &cdev->tag_format; 1464 struct cxgbi_pagepod_hdr hdr; 1465 unsigned int npods; 1466 int idx = -1; 1467 int err = -ENOMEM; 1468 u32 tag; 1469 1470 npods = (gl->nelem + PPOD_PAGES_MAX - 1) >> PPOD_PAGES_SHIFT; 1471 if (ddp->idx_last == ddp->nppods) 1472 idx = ddp_find_unused_entries(ddp, 0, ddp->nppods, 1473 npods, gl); 1474 else { 1475 idx = ddp_find_unused_entries(ddp, ddp->idx_last + 1, 1476 ddp->nppods, npods, 1477 gl); 1478 if (idx < 0 && ddp->idx_last >= npods) { 1479 idx = ddp_find_unused_entries(ddp, 0, 1480 min(ddp->idx_last + npods, ddp->nppods), 1481 npods, gl); 1482 } 1483 } 1484 if (idx < 0) { 1485 log_debug(1 << CXGBI_DBG_DDP, 1486 "xferlen %u, gl %u, npods %u NO DDP.\n", 1487 gl->length, gl->nelem, npods); 1488 return idx; 1489 } 1490 1491 tag = cxgbi_ddp_tag_base(tformat, sw_tag); 1492 tag |= idx << PPOD_IDX_SHIFT; 1493 1494 hdr.rsvd = 0; 1495 hdr.vld_tid = htonl(PPOD_VALID_FLAG | PPOD_TID(tid)); 1496 hdr.pgsz_tag_clr = htonl(tag & ddp->rsvd_tag_mask); 1497 hdr.max_offset = htonl(gl->length); 1498 hdr.page_offset = htonl(gl->offset); 1499 1500 err = cdev->csk_ddp_set(csk, &hdr, idx, npods, gl); 1501 if (err < 0) 1502 goto unmark_entries; 1503 1504 ddp->idx_last = idx; 1505 log_debug(1 << CXGBI_DBG_DDP, 1506 "xfer %u, gl %u,%u, tid 0x%x, tag 0x%x->0x%x(%u,%u).\n", 1507 gl->length, gl->nelem, gl->offset, tid, sw_tag, tag, idx, 1508 npods); 1509 *tagp = tag; 1510 return 0; 1511 1512 unmark_entries: 1513 ddp_unmark_entries(ddp, idx, npods); 1514 return err; 1515 } 1516 1517 int cxgbi_ddp_reserve(struct cxgbi_sock *csk, unsigned int *tagp, 1518 unsigned int sw_tag, unsigned int xferlen, 1519 struct scatterlist *sgl, unsigned int sgcnt, gfp_t gfp) 1520 { 1521 struct cxgbi_device *cdev = csk->cdev; 1522 struct cxgbi_tag_format *tformat = &cdev->tag_format; 1523 struct cxgbi_gather_list *gl; 1524 int err; 1525 1526 if (page_idx >= DDP_PGIDX_MAX || !cdev->ddp || 1527 xferlen < DDP_THRESHOLD) { 1528 log_debug(1 << CXGBI_DBG_DDP, 1529 "pgidx %u, xfer %u, NO ddp.\n", page_idx, xferlen); 1530 return -EINVAL; 1531 } 1532 1533 if (!cxgbi_sw_tag_usable(tformat, sw_tag)) { 1534 log_debug(1 << CXGBI_DBG_DDP, 1535 "sw_tag 0x%x NOT usable.\n", sw_tag); 1536 return -EINVAL; 1537 } 1538 1539 gl = ddp_make_gl(xferlen, sgl, sgcnt, cdev->pdev, gfp); 1540 if (!gl) 1541 return -ENOMEM; 1542 1543 err = ddp_tag_reserve(csk, csk->tid, sw_tag, tagp, gl, gfp); 1544 if (err < 0) 1545 ddp_release_gl(gl, cdev->pdev); 1546 1547 return err; 1548 } 1549 1550 static void ddp_destroy(struct kref *kref) 1551 { 1552 struct cxgbi_ddp_info *ddp = container_of(kref, 1553 struct cxgbi_ddp_info, 1554 refcnt); 1555 struct cxgbi_device *cdev = ddp->cdev; 1556 int i = 0; 1557 1558 pr_info("kref 0, destroy ddp 0x%p, cdev 0x%p.\n", ddp, cdev); 1559 1560 while (i < ddp->nppods) { 1561 struct cxgbi_gather_list *gl = ddp->gl_map[i]; 1562 1563 if (gl) { 1564 int npods = (gl->nelem + PPOD_PAGES_MAX - 1) 1565 >> PPOD_PAGES_SHIFT; 1566 pr_info("cdev 0x%p, ddp %d + %d.\n", cdev, i, npods); 1567 kfree(gl); 1568 i += npods; 1569 } else 1570 i++; 1571 } 1572 cxgbi_free_big_mem(ddp); 1573 } 1574 1575 int cxgbi_ddp_cleanup(struct cxgbi_device *cdev) 1576 { 1577 struct cxgbi_ddp_info *ddp = cdev->ddp; 1578 1579 log_debug(1 << CXGBI_DBG_DDP, 1580 "cdev 0x%p, release ddp 0x%p.\n", cdev, ddp); 1581 cdev->ddp = NULL; 1582 if (ddp) 1583 return kref_put(&ddp->refcnt, ddp_destroy); 1584 return 0; 1585 } 1586 EXPORT_SYMBOL_GPL(cxgbi_ddp_cleanup); 1587 1588 int cxgbi_ddp_init(struct cxgbi_device *cdev, 1589 unsigned int llimit, unsigned int ulimit, 1590 unsigned int max_txsz, unsigned int max_rxsz) 1591 { 1592 struct cxgbi_ddp_info *ddp; 1593 unsigned int ppmax, bits; 1594 1595 ppmax = (ulimit - llimit + 1) >> PPOD_SIZE_SHIFT; 1596 bits = __ilog2_u32(ppmax) + 1; 1597 if (bits > PPOD_IDX_MAX_SIZE) 1598 bits = PPOD_IDX_MAX_SIZE; 1599 ppmax = (1 << (bits - 1)) - 1; 1600 1601 ddp = cxgbi_alloc_big_mem(sizeof(struct cxgbi_ddp_info) + 1602 ppmax * (sizeof(struct cxgbi_gather_list *) + 1603 sizeof(struct sk_buff *)), 1604 GFP_KERNEL); 1605 if (!ddp) { 1606 pr_warn("cdev 0x%p, ddp ppmax %u OOM.\n", cdev, ppmax); 1607 return -ENOMEM; 1608 } 1609 ddp->gl_map = (struct cxgbi_gather_list **)(ddp + 1); 1610 cdev->ddp = ddp; 1611 1612 spin_lock_init(&ddp->map_lock); 1613 kref_init(&ddp->refcnt); 1614 1615 ddp->cdev = cdev; 1616 ddp->pdev = cdev->pdev; 1617 ddp->llimit = llimit; 1618 ddp->ulimit = ulimit; 1619 ddp->max_txsz = min_t(unsigned int, max_txsz, ULP2_MAX_PKT_SIZE); 1620 ddp->max_rxsz = min_t(unsigned int, max_rxsz, ULP2_MAX_PKT_SIZE); 1621 ddp->nppods = ppmax; 1622 ddp->idx_last = ppmax; 1623 ddp->idx_bits = bits; 1624 ddp->idx_mask = (1 << bits) - 1; 1625 ddp->rsvd_tag_mask = (1 << (bits + PPOD_IDX_SHIFT)) - 1; 1626 1627 cdev->tag_format.sw_bits = sw_tag_idx_bits + sw_tag_age_bits; 1628 cdev->tag_format.rsvd_bits = ddp->idx_bits; 1629 cdev->tag_format.rsvd_shift = PPOD_IDX_SHIFT; 1630 cdev->tag_format.rsvd_mask = (1 << cdev->tag_format.rsvd_bits) - 1; 1631 1632 pr_info("%s tag format, sw %u, rsvd %u,%u, mask 0x%x.\n", 1633 cdev->ports[0]->name, cdev->tag_format.sw_bits, 1634 cdev->tag_format.rsvd_bits, cdev->tag_format.rsvd_shift, 1635 cdev->tag_format.rsvd_mask); 1636 1637 cdev->tx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD, 1638 ddp->max_txsz - ISCSI_PDU_NONPAYLOAD_LEN); 1639 cdev->rx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD, 1640 ddp->max_rxsz - ISCSI_PDU_NONPAYLOAD_LEN); 1641 1642 log_debug(1 << CXGBI_DBG_DDP, 1643 "%s max payload size: %u/%u, %u/%u.\n", 1644 cdev->ports[0]->name, cdev->tx_max_size, ddp->max_txsz, 1645 cdev->rx_max_size, ddp->max_rxsz); 1646 return 0; 1647 } 1648 EXPORT_SYMBOL_GPL(cxgbi_ddp_init); 1649 1650 /* 1651 * APIs interacting with open-iscsi libraries 1652 */ 1653 1654 static unsigned char padding[4]; 1655 1656 static void task_release_itt(struct iscsi_task *task, itt_t hdr_itt) 1657 { 1658 struct scsi_cmnd *sc = task->sc; 1659 struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data; 1660 struct cxgbi_conn *cconn = tcp_conn->dd_data; 1661 struct cxgbi_hba *chba = cconn->chba; 1662 struct cxgbi_tag_format *tformat = &chba->cdev->tag_format; 1663 u32 tag = ntohl((__force u32)hdr_itt); 1664 1665 log_debug(1 << CXGBI_DBG_DDP, 1666 "cdev 0x%p, release tag 0x%x.\n", chba->cdev, tag); 1667 if (sc && 1668 (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_FROM_DEVICE) && 1669 cxgbi_is_ddp_tag(tformat, tag)) 1670 ddp_tag_release(chba, tag); 1671 } 1672 1673 static int task_reserve_itt(struct iscsi_task *task, itt_t *hdr_itt) 1674 { 1675 struct scsi_cmnd *sc = task->sc; 1676 struct iscsi_conn *conn = task->conn; 1677 struct iscsi_session *sess = conn->session; 1678 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 1679 struct cxgbi_conn *cconn = tcp_conn->dd_data; 1680 struct cxgbi_hba *chba = cconn->chba; 1681 struct cxgbi_tag_format *tformat = &chba->cdev->tag_format; 1682 u32 sw_tag = (sess->age << cconn->task_idx_bits) | task->itt; 1683 u32 tag = 0; 1684 int err = -EINVAL; 1685 1686 if (sc && 1687 (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_FROM_DEVICE)) { 1688 err = cxgbi_ddp_reserve(cconn->cep->csk, &tag, sw_tag, 1689 scsi_in(sc)->length, 1690 scsi_in(sc)->table.sgl, 1691 scsi_in(sc)->table.nents, 1692 GFP_ATOMIC); 1693 if (err < 0) 1694 log_debug(1 << CXGBI_DBG_DDP, 1695 "csk 0x%p, R task 0x%p, %u,%u, no ddp.\n", 1696 cconn->cep->csk, task, scsi_in(sc)->length, 1697 scsi_in(sc)->table.nents); 1698 } 1699 1700 if (err < 0) 1701 tag = cxgbi_set_non_ddp_tag(tformat, sw_tag); 1702 /* the itt need to sent in big-endian order */ 1703 *hdr_itt = (__force itt_t)htonl(tag); 1704 1705 log_debug(1 << CXGBI_DBG_DDP, 1706 "cdev 0x%p, task 0x%p, 0x%x(0x%x,0x%x)->0x%x/0x%x.\n", 1707 chba->cdev, task, sw_tag, task->itt, sess->age, tag, *hdr_itt); 1708 return 0; 1709 } 1710 1711 void cxgbi_parse_pdu_itt(struct iscsi_conn *conn, itt_t itt, int *idx, int *age) 1712 { 1713 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 1714 struct cxgbi_conn *cconn = tcp_conn->dd_data; 1715 struct cxgbi_device *cdev = cconn->chba->cdev; 1716 u32 tag = ntohl((__force u32) itt); 1717 u32 sw_bits; 1718 1719 sw_bits = cxgbi_tag_nonrsvd_bits(&cdev->tag_format, tag); 1720 if (idx) 1721 *idx = sw_bits & ((1 << cconn->task_idx_bits) - 1); 1722 if (age) 1723 *age = (sw_bits >> cconn->task_idx_bits) & ISCSI_AGE_MASK; 1724 1725 log_debug(1 << CXGBI_DBG_DDP, 1726 "cdev 0x%p, tag 0x%x/0x%x, -> 0x%x(0x%x,0x%x).\n", 1727 cdev, tag, itt, sw_bits, idx ? *idx : 0xFFFFF, 1728 age ? *age : 0xFF); 1729 } 1730 EXPORT_SYMBOL_GPL(cxgbi_parse_pdu_itt); 1731 1732 void cxgbi_conn_tx_open(struct cxgbi_sock *csk) 1733 { 1734 struct iscsi_conn *conn = csk->user_data; 1735 1736 if (conn) { 1737 log_debug(1 << CXGBI_DBG_SOCK, 1738 "csk 0x%p, cid %d.\n", csk, conn->id); 1739 iscsi_conn_queue_work(conn); 1740 } 1741 } 1742 EXPORT_SYMBOL_GPL(cxgbi_conn_tx_open); 1743 1744 /* 1745 * pdu receive, interact with libiscsi_tcp 1746 */ 1747 static inline int read_pdu_skb(struct iscsi_conn *conn, 1748 struct sk_buff *skb, 1749 unsigned int offset, 1750 int offloaded) 1751 { 1752 int status = 0; 1753 int bytes_read; 1754 1755 bytes_read = iscsi_tcp_recv_skb(conn, skb, offset, offloaded, &status); 1756 switch (status) { 1757 case ISCSI_TCP_CONN_ERR: 1758 pr_info("skb 0x%p, off %u, %d, TCP_ERR.\n", 1759 skb, offset, offloaded); 1760 return -EIO; 1761 case ISCSI_TCP_SUSPENDED: 1762 log_debug(1 << CXGBI_DBG_PDU_RX, 1763 "skb 0x%p, off %u, %d, TCP_SUSPEND, rc %d.\n", 1764 skb, offset, offloaded, bytes_read); 1765 /* no transfer - just have caller flush queue */ 1766 return bytes_read; 1767 case ISCSI_TCP_SKB_DONE: 1768 pr_info("skb 0x%p, off %u, %d, TCP_SKB_DONE.\n", 1769 skb, offset, offloaded); 1770 /* 1771 * pdus should always fit in the skb and we should get 1772 * segment done notifcation. 1773 */ 1774 iscsi_conn_printk(KERN_ERR, conn, "Invalid pdu or skb."); 1775 return -EFAULT; 1776 case ISCSI_TCP_SEGMENT_DONE: 1777 log_debug(1 << CXGBI_DBG_PDU_RX, 1778 "skb 0x%p, off %u, %d, TCP_SEG_DONE, rc %d.\n", 1779 skb, offset, offloaded, bytes_read); 1780 return bytes_read; 1781 default: 1782 pr_info("skb 0x%p, off %u, %d, invalid status %d.\n", 1783 skb, offset, offloaded, status); 1784 return -EINVAL; 1785 } 1786 } 1787 1788 static int skb_read_pdu_bhs(struct iscsi_conn *conn, struct sk_buff *skb) 1789 { 1790 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 1791 1792 log_debug(1 << CXGBI_DBG_PDU_RX, 1793 "conn 0x%p, skb 0x%p, len %u, flag 0x%lx.\n", 1794 conn, skb, skb->len, cxgbi_skcb_flags(skb)); 1795 1796 if (!iscsi_tcp_recv_segment_is_hdr(tcp_conn)) { 1797 pr_info("conn 0x%p, skb 0x%p, not hdr.\n", conn, skb); 1798 iscsi_conn_failure(conn, ISCSI_ERR_PROTO); 1799 return -EIO; 1800 } 1801 1802 if (conn->hdrdgst_en && 1803 cxgbi_skcb_test_flag(skb, SKCBF_RX_HCRC_ERR)) { 1804 pr_info("conn 0x%p, skb 0x%p, hcrc.\n", conn, skb); 1805 iscsi_conn_failure(conn, ISCSI_ERR_HDR_DGST); 1806 return -EIO; 1807 } 1808 1809 return read_pdu_skb(conn, skb, 0, 0); 1810 } 1811 1812 static int skb_read_pdu_data(struct iscsi_conn *conn, struct sk_buff *lskb, 1813 struct sk_buff *skb, unsigned int offset) 1814 { 1815 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 1816 bool offloaded = 0; 1817 int opcode = tcp_conn->in.hdr->opcode & ISCSI_OPCODE_MASK; 1818 1819 log_debug(1 << CXGBI_DBG_PDU_RX, 1820 "conn 0x%p, skb 0x%p, len %u, flag 0x%lx.\n", 1821 conn, skb, skb->len, cxgbi_skcb_flags(skb)); 1822 1823 if (conn->datadgst_en && 1824 cxgbi_skcb_test_flag(lskb, SKCBF_RX_DCRC_ERR)) { 1825 pr_info("conn 0x%p, skb 0x%p, dcrc 0x%lx.\n", 1826 conn, lskb, cxgbi_skcb_flags(lskb)); 1827 iscsi_conn_failure(conn, ISCSI_ERR_DATA_DGST); 1828 return -EIO; 1829 } 1830 1831 if (iscsi_tcp_recv_segment_is_hdr(tcp_conn)) 1832 return 0; 1833 1834 /* coalesced, add header digest length */ 1835 if (lskb == skb && conn->hdrdgst_en) 1836 offset += ISCSI_DIGEST_SIZE; 1837 1838 if (cxgbi_skcb_test_flag(lskb, SKCBF_RX_DATA_DDPD)) 1839 offloaded = 1; 1840 1841 if (opcode == ISCSI_OP_SCSI_DATA_IN) 1842 log_debug(1 << CXGBI_DBG_PDU_RX, 1843 "skb 0x%p, op 0x%x, itt 0x%x, %u %s ddp'ed.\n", 1844 skb, opcode, ntohl(tcp_conn->in.hdr->itt), 1845 tcp_conn->in.datalen, offloaded ? "is" : "not"); 1846 1847 return read_pdu_skb(conn, skb, offset, offloaded); 1848 } 1849 1850 static void csk_return_rx_credits(struct cxgbi_sock *csk, int copied) 1851 { 1852 struct cxgbi_device *cdev = csk->cdev; 1853 int must_send; 1854 u32 credits; 1855 1856 log_debug(1 << CXGBI_DBG_PDU_RX, 1857 "csk 0x%p,%u,0x%lx,%u, seq %u, wup %u, thre %u, %u.\n", 1858 csk, csk->state, csk->flags, csk->tid, csk->copied_seq, 1859 csk->rcv_wup, cdev->rx_credit_thres, 1860 cdev->rcv_win); 1861 1862 if (csk->state != CTP_ESTABLISHED) 1863 return; 1864 1865 credits = csk->copied_seq - csk->rcv_wup; 1866 if (unlikely(!credits)) 1867 return; 1868 if (unlikely(cdev->rx_credit_thres == 0)) 1869 return; 1870 1871 must_send = credits + 16384 >= cdev->rcv_win; 1872 if (must_send || credits >= cdev->rx_credit_thres) 1873 csk->rcv_wup += cdev->csk_send_rx_credits(csk, credits); 1874 } 1875 1876 void cxgbi_conn_pdu_ready(struct cxgbi_sock *csk) 1877 { 1878 struct cxgbi_device *cdev = csk->cdev; 1879 struct iscsi_conn *conn = csk->user_data; 1880 struct sk_buff *skb; 1881 unsigned int read = 0; 1882 int err = 0; 1883 1884 log_debug(1 << CXGBI_DBG_PDU_RX, 1885 "csk 0x%p, conn 0x%p.\n", csk, conn); 1886 1887 if (unlikely(!conn || conn->suspend_rx)) { 1888 log_debug(1 << CXGBI_DBG_PDU_RX, 1889 "csk 0x%p, conn 0x%p, id %d, suspend_rx %lu!\n", 1890 csk, conn, conn ? conn->id : 0xFF, 1891 conn ? conn->suspend_rx : 0xFF); 1892 return; 1893 } 1894 1895 while (!err) { 1896 skb = skb_peek(&csk->receive_queue); 1897 if (!skb || 1898 !(cxgbi_skcb_test_flag(skb, SKCBF_RX_STATUS))) { 1899 if (skb) 1900 log_debug(1 << CXGBI_DBG_PDU_RX, 1901 "skb 0x%p, NOT ready 0x%lx.\n", 1902 skb, cxgbi_skcb_flags(skb)); 1903 break; 1904 } 1905 __skb_unlink(skb, &csk->receive_queue); 1906 1907 read += cxgbi_skcb_rx_pdulen(skb); 1908 log_debug(1 << CXGBI_DBG_PDU_RX, 1909 "csk 0x%p, skb 0x%p,%u,f 0x%lx, pdu len %u.\n", 1910 csk, skb, skb->len, cxgbi_skcb_flags(skb), 1911 cxgbi_skcb_rx_pdulen(skb)); 1912 1913 if (cxgbi_skcb_test_flag(skb, SKCBF_RX_COALESCED)) { 1914 err = skb_read_pdu_bhs(conn, skb); 1915 if (err < 0) { 1916 pr_err("coalesced bhs, csk 0x%p, skb 0x%p,%u, " 1917 "f 0x%lx, plen %u.\n", 1918 csk, skb, skb->len, 1919 cxgbi_skcb_flags(skb), 1920 cxgbi_skcb_rx_pdulen(skb)); 1921 goto skb_done; 1922 } 1923 err = skb_read_pdu_data(conn, skb, skb, 1924 err + cdev->skb_rx_extra); 1925 if (err < 0) 1926 pr_err("coalesced data, csk 0x%p, skb 0x%p,%u, " 1927 "f 0x%lx, plen %u.\n", 1928 csk, skb, skb->len, 1929 cxgbi_skcb_flags(skb), 1930 cxgbi_skcb_rx_pdulen(skb)); 1931 } else { 1932 err = skb_read_pdu_bhs(conn, skb); 1933 if (err < 0) { 1934 pr_err("bhs, csk 0x%p, skb 0x%p,%u, " 1935 "f 0x%lx, plen %u.\n", 1936 csk, skb, skb->len, 1937 cxgbi_skcb_flags(skb), 1938 cxgbi_skcb_rx_pdulen(skb)); 1939 goto skb_done; 1940 } 1941 1942 if (cxgbi_skcb_test_flag(skb, SKCBF_RX_DATA)) { 1943 struct sk_buff *dskb; 1944 1945 dskb = skb_peek(&csk->receive_queue); 1946 if (!dskb) { 1947 pr_err("csk 0x%p, skb 0x%p,%u, f 0x%lx," 1948 " plen %u, NO data.\n", 1949 csk, skb, skb->len, 1950 cxgbi_skcb_flags(skb), 1951 cxgbi_skcb_rx_pdulen(skb)); 1952 err = -EIO; 1953 goto skb_done; 1954 } 1955 __skb_unlink(dskb, &csk->receive_queue); 1956 1957 err = skb_read_pdu_data(conn, skb, dskb, 0); 1958 if (err < 0) 1959 pr_err("data, csk 0x%p, skb 0x%p,%u, " 1960 "f 0x%lx, plen %u, dskb 0x%p," 1961 "%u.\n", 1962 csk, skb, skb->len, 1963 cxgbi_skcb_flags(skb), 1964 cxgbi_skcb_rx_pdulen(skb), 1965 dskb, dskb->len); 1966 __kfree_skb(dskb); 1967 } else 1968 err = skb_read_pdu_data(conn, skb, skb, 0); 1969 } 1970 skb_done: 1971 __kfree_skb(skb); 1972 1973 if (err < 0) 1974 break; 1975 } 1976 1977 log_debug(1 << CXGBI_DBG_PDU_RX, "csk 0x%p, read %u.\n", csk, read); 1978 if (read) { 1979 csk->copied_seq += read; 1980 csk_return_rx_credits(csk, read); 1981 conn->rxdata_octets += read; 1982 } 1983 1984 if (err < 0) { 1985 pr_info("csk 0x%p, 0x%p, rx failed %d, read %u.\n", 1986 csk, conn, err, read); 1987 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); 1988 } 1989 } 1990 EXPORT_SYMBOL_GPL(cxgbi_conn_pdu_ready); 1991 1992 static int sgl_seek_offset(struct scatterlist *sgl, unsigned int sgcnt, 1993 unsigned int offset, unsigned int *off, 1994 struct scatterlist **sgp) 1995 { 1996 int i; 1997 struct scatterlist *sg; 1998 1999 for_each_sg(sgl, sg, sgcnt, i) { 2000 if (offset < sg->length) { 2001 *off = offset; 2002 *sgp = sg; 2003 return 0; 2004 } 2005 offset -= sg->length; 2006 } 2007 return -EFAULT; 2008 } 2009 2010 static int sgl_read_to_frags(struct scatterlist *sg, unsigned int sgoffset, 2011 unsigned int dlen, struct page_frag *frags, 2012 int frag_max) 2013 { 2014 unsigned int datalen = dlen; 2015 unsigned int sglen = sg->length - sgoffset; 2016 struct page *page = sg_page(sg); 2017 int i; 2018 2019 i = 0; 2020 do { 2021 unsigned int copy; 2022 2023 if (!sglen) { 2024 sg = sg_next(sg); 2025 if (!sg) { 2026 pr_warn("sg %d NULL, len %u/%u.\n", 2027 i, datalen, dlen); 2028 return -EINVAL; 2029 } 2030 sgoffset = 0; 2031 sglen = sg->length; 2032 page = sg_page(sg); 2033 2034 } 2035 copy = min(datalen, sglen); 2036 if (i && page == frags[i - 1].page && 2037 sgoffset + sg->offset == 2038 frags[i - 1].offset + frags[i - 1].size) { 2039 frags[i - 1].size += copy; 2040 } else { 2041 if (i >= frag_max) { 2042 pr_warn("too many pages %u, dlen %u.\n", 2043 frag_max, dlen); 2044 return -EINVAL; 2045 } 2046 2047 frags[i].page = page; 2048 frags[i].offset = sg->offset + sgoffset; 2049 frags[i].size = copy; 2050 i++; 2051 } 2052 datalen -= copy; 2053 sgoffset += copy; 2054 sglen -= copy; 2055 } while (datalen); 2056 2057 return i; 2058 } 2059 2060 int cxgbi_conn_alloc_pdu(struct iscsi_task *task, u8 opcode) 2061 { 2062 struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data; 2063 struct cxgbi_conn *cconn = tcp_conn->dd_data; 2064 struct cxgbi_device *cdev = cconn->chba->cdev; 2065 struct iscsi_conn *conn = task->conn; 2066 struct iscsi_tcp_task *tcp_task = task->dd_data; 2067 struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); 2068 struct scsi_cmnd *sc = task->sc; 2069 int headroom = SKB_TX_ISCSI_PDU_HEADER_MAX; 2070 2071 tcp_task->dd_data = tdata; 2072 task->hdr = NULL; 2073 2074 if (SKB_MAX_HEAD(cdev->skb_tx_rsvd) > (512 * MAX_SKB_FRAGS) && 2075 (opcode == ISCSI_OP_SCSI_DATA_OUT || 2076 (opcode == ISCSI_OP_SCSI_CMD && 2077 (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_TO_DEVICE)))) 2078 /* data could goes into skb head */ 2079 headroom += min_t(unsigned int, 2080 SKB_MAX_HEAD(cdev->skb_tx_rsvd), 2081 conn->max_xmit_dlength); 2082 2083 tdata->skb = alloc_skb(cdev->skb_tx_rsvd + headroom, GFP_ATOMIC); 2084 if (!tdata->skb) { 2085 struct cxgbi_sock *csk = cconn->cep->csk; 2086 struct net_device *ndev = cdev->ports[csk->port_id]; 2087 ndev->stats.tx_dropped++; 2088 return -ENOMEM; 2089 } 2090 2091 skb_reserve(tdata->skb, cdev->skb_tx_rsvd); 2092 task->hdr = (struct iscsi_hdr *)tdata->skb->data; 2093 task->hdr_max = SKB_TX_ISCSI_PDU_HEADER_MAX; /* BHS + AHS */ 2094 2095 /* data_out uses scsi_cmd's itt */ 2096 if (opcode != ISCSI_OP_SCSI_DATA_OUT) 2097 task_reserve_itt(task, &task->hdr->itt); 2098 2099 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, 2100 "task 0x%p, op 0x%x, skb 0x%p,%u+%u/%u, itt 0x%x.\n", 2101 task, opcode, tdata->skb, cdev->skb_tx_rsvd, headroom, 2102 conn->max_xmit_dlength, ntohl(task->hdr->itt)); 2103 2104 return 0; 2105 } 2106 EXPORT_SYMBOL_GPL(cxgbi_conn_alloc_pdu); 2107 2108 static inline void tx_skb_setmode(struct sk_buff *skb, int hcrc, int dcrc) 2109 { 2110 if (hcrc || dcrc) { 2111 u8 submode = 0; 2112 2113 if (hcrc) 2114 submode |= 1; 2115 if (dcrc) 2116 submode |= 2; 2117 cxgbi_skcb_ulp_mode(skb) = (ULP2_MODE_ISCSI << 4) | submode; 2118 } else 2119 cxgbi_skcb_ulp_mode(skb) = 0; 2120 } 2121 2122 int cxgbi_conn_init_pdu(struct iscsi_task *task, unsigned int offset, 2123 unsigned int count) 2124 { 2125 struct iscsi_conn *conn = task->conn; 2126 struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); 2127 struct sk_buff *skb = tdata->skb; 2128 unsigned int datalen = count; 2129 int i, padlen = iscsi_padding(count); 2130 struct page *pg; 2131 2132 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, 2133 "task 0x%p,0x%p, skb 0x%p, 0x%x,0x%x,0x%x, %u+%u.\n", 2134 task, task->sc, skb, (*skb->data) & ISCSI_OPCODE_MASK, 2135 ntohl(task->cmdsn), ntohl(task->hdr->itt), offset, count); 2136 2137 skb_put(skb, task->hdr_len); 2138 tx_skb_setmode(skb, conn->hdrdgst_en, datalen ? conn->datadgst_en : 0); 2139 if (!count) 2140 return 0; 2141 2142 if (task->sc) { 2143 struct scsi_data_buffer *sdb = scsi_out(task->sc); 2144 struct scatterlist *sg = NULL; 2145 int err; 2146 2147 tdata->offset = offset; 2148 tdata->count = count; 2149 err = sgl_seek_offset( 2150 sdb->table.sgl, sdb->table.nents, 2151 tdata->offset, &tdata->sgoffset, &sg); 2152 if (err < 0) { 2153 pr_warn("tpdu, sgl %u, bad offset %u/%u.\n", 2154 sdb->table.nents, tdata->offset, sdb->length); 2155 return err; 2156 } 2157 err = sgl_read_to_frags(sg, tdata->sgoffset, tdata->count, 2158 tdata->frags, MAX_PDU_FRAGS); 2159 if (err < 0) { 2160 pr_warn("tpdu, sgl %u, bad offset %u + %u.\n", 2161 sdb->table.nents, tdata->offset, tdata->count); 2162 return err; 2163 } 2164 tdata->nr_frags = err; 2165 2166 if (tdata->nr_frags > MAX_SKB_FRAGS || 2167 (padlen && tdata->nr_frags == MAX_SKB_FRAGS)) { 2168 char *dst = skb->data + task->hdr_len; 2169 struct page_frag *frag = tdata->frags; 2170 2171 /* data fits in the skb's headroom */ 2172 for (i = 0; i < tdata->nr_frags; i++, frag++) { 2173 char *src = kmap_atomic(frag->page); 2174 2175 memcpy(dst, src+frag->offset, frag->size); 2176 dst += frag->size; 2177 kunmap_atomic(src); 2178 } 2179 if (padlen) { 2180 memset(dst, 0, padlen); 2181 padlen = 0; 2182 } 2183 skb_put(skb, count + padlen); 2184 } else { 2185 /* data fit into frag_list */ 2186 for (i = 0; i < tdata->nr_frags; i++) { 2187 __skb_fill_page_desc(skb, i, 2188 tdata->frags[i].page, 2189 tdata->frags[i].offset, 2190 tdata->frags[i].size); 2191 skb_frag_ref(skb, i); 2192 } 2193 skb_shinfo(skb)->nr_frags = tdata->nr_frags; 2194 skb->len += count; 2195 skb->data_len += count; 2196 skb->truesize += count; 2197 } 2198 2199 } else { 2200 pg = virt_to_page(task->data); 2201 2202 get_page(pg); 2203 skb_fill_page_desc(skb, 0, pg, offset_in_page(task->data), 2204 count); 2205 skb->len += count; 2206 skb->data_len += count; 2207 skb->truesize += count; 2208 } 2209 2210 if (padlen) { 2211 i = skb_shinfo(skb)->nr_frags; 2212 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, 2213 virt_to_page(padding), offset_in_page(padding), 2214 padlen); 2215 2216 skb->data_len += padlen; 2217 skb->truesize += padlen; 2218 skb->len += padlen; 2219 } 2220 2221 return 0; 2222 } 2223 EXPORT_SYMBOL_GPL(cxgbi_conn_init_pdu); 2224 2225 int cxgbi_conn_xmit_pdu(struct iscsi_task *task) 2226 { 2227 struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data; 2228 struct cxgbi_conn *cconn = tcp_conn->dd_data; 2229 struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); 2230 struct sk_buff *skb = tdata->skb; 2231 unsigned int datalen; 2232 int err; 2233 2234 if (!skb) { 2235 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, 2236 "task 0x%p, skb NULL.\n", task); 2237 return 0; 2238 } 2239 2240 datalen = skb->data_len; 2241 tdata->skb = NULL; 2242 err = cxgbi_sock_send_pdus(cconn->cep->csk, skb); 2243 if (err > 0) { 2244 int pdulen = err; 2245 2246 log_debug(1 << CXGBI_DBG_PDU_TX, 2247 "task 0x%p,0x%p, skb 0x%p, len %u/%u, rv %d.\n", 2248 task, task->sc, skb, skb->len, skb->data_len, err); 2249 2250 if (task->conn->hdrdgst_en) 2251 pdulen += ISCSI_DIGEST_SIZE; 2252 2253 if (datalen && task->conn->datadgst_en) 2254 pdulen += ISCSI_DIGEST_SIZE; 2255 2256 task->conn->txdata_octets += pdulen; 2257 return 0; 2258 } 2259 2260 if (err == -EAGAIN || err == -ENOBUFS) { 2261 log_debug(1 << CXGBI_DBG_PDU_TX, 2262 "task 0x%p, skb 0x%p, len %u/%u, %d EAGAIN.\n", 2263 task, skb, skb->len, skb->data_len, err); 2264 /* reset skb to send when we are called again */ 2265 tdata->skb = skb; 2266 return err; 2267 } 2268 2269 kfree_skb(skb); 2270 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, 2271 "itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n", 2272 task->itt, skb, skb->len, skb->data_len, err); 2273 iscsi_conn_printk(KERN_ERR, task->conn, "xmit err %d.\n", err); 2274 iscsi_conn_failure(task->conn, ISCSI_ERR_XMIT_FAILED); 2275 return err; 2276 } 2277 EXPORT_SYMBOL_GPL(cxgbi_conn_xmit_pdu); 2278 2279 void cxgbi_cleanup_task(struct iscsi_task *task) 2280 { 2281 struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); 2282 2283 log_debug(1 << CXGBI_DBG_ISCSI, 2284 "task 0x%p, skb 0x%p, itt 0x%x.\n", 2285 task, tdata->skb, task->hdr_itt); 2286 2287 /* never reached the xmit task callout */ 2288 if (tdata->skb) 2289 __kfree_skb(tdata->skb); 2290 memset(tdata, 0, sizeof(*tdata)); 2291 2292 task_release_itt(task, task->hdr_itt); 2293 iscsi_tcp_cleanup_task(task); 2294 } 2295 EXPORT_SYMBOL_GPL(cxgbi_cleanup_task); 2296 2297 void cxgbi_get_conn_stats(struct iscsi_cls_conn *cls_conn, 2298 struct iscsi_stats *stats) 2299 { 2300 struct iscsi_conn *conn = cls_conn->dd_data; 2301 2302 stats->txdata_octets = conn->txdata_octets; 2303 stats->rxdata_octets = conn->rxdata_octets; 2304 stats->scsicmd_pdus = conn->scsicmd_pdus_cnt; 2305 stats->dataout_pdus = conn->dataout_pdus_cnt; 2306 stats->scsirsp_pdus = conn->scsirsp_pdus_cnt; 2307 stats->datain_pdus = conn->datain_pdus_cnt; 2308 stats->r2t_pdus = conn->r2t_pdus_cnt; 2309 stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt; 2310 stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt; 2311 stats->digest_err = 0; 2312 stats->timeout_err = 0; 2313 stats->custom_length = 1; 2314 strcpy(stats->custom[0].desc, "eh_abort_cnt"); 2315 stats->custom[0].value = conn->eh_abort_cnt; 2316 } 2317 EXPORT_SYMBOL_GPL(cxgbi_get_conn_stats); 2318 2319 static int cxgbi_conn_max_xmit_dlength(struct iscsi_conn *conn) 2320 { 2321 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 2322 struct cxgbi_conn *cconn = tcp_conn->dd_data; 2323 struct cxgbi_device *cdev = cconn->chba->cdev; 2324 unsigned int headroom = SKB_MAX_HEAD(cdev->skb_tx_rsvd); 2325 unsigned int max_def = 512 * MAX_SKB_FRAGS; 2326 unsigned int max = max(max_def, headroom); 2327 2328 max = min(cconn->chba->cdev->tx_max_size, max); 2329 if (conn->max_xmit_dlength) 2330 conn->max_xmit_dlength = min(conn->max_xmit_dlength, max); 2331 else 2332 conn->max_xmit_dlength = max; 2333 cxgbi_align_pdu_size(conn->max_xmit_dlength); 2334 2335 return 0; 2336 } 2337 2338 static int cxgbi_conn_max_recv_dlength(struct iscsi_conn *conn) 2339 { 2340 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 2341 struct cxgbi_conn *cconn = tcp_conn->dd_data; 2342 unsigned int max = cconn->chba->cdev->rx_max_size; 2343 2344 cxgbi_align_pdu_size(max); 2345 2346 if (conn->max_recv_dlength) { 2347 if (conn->max_recv_dlength > max) { 2348 pr_err("MaxRecvDataSegmentLength %u > %u.\n", 2349 conn->max_recv_dlength, max); 2350 return -EINVAL; 2351 } 2352 conn->max_recv_dlength = min(conn->max_recv_dlength, max); 2353 cxgbi_align_pdu_size(conn->max_recv_dlength); 2354 } else 2355 conn->max_recv_dlength = max; 2356 2357 return 0; 2358 } 2359 2360 int cxgbi_set_conn_param(struct iscsi_cls_conn *cls_conn, 2361 enum iscsi_param param, char *buf, int buflen) 2362 { 2363 struct iscsi_conn *conn = cls_conn->dd_data; 2364 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 2365 struct cxgbi_conn *cconn = tcp_conn->dd_data; 2366 struct cxgbi_sock *csk = cconn->cep->csk; 2367 int err; 2368 2369 log_debug(1 << CXGBI_DBG_ISCSI, 2370 "cls_conn 0x%p, param %d, buf(%d) %s.\n", 2371 cls_conn, param, buflen, buf); 2372 2373 switch (param) { 2374 case ISCSI_PARAM_HDRDGST_EN: 2375 err = iscsi_set_param(cls_conn, param, buf, buflen); 2376 if (!err && conn->hdrdgst_en) 2377 err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid, 2378 conn->hdrdgst_en, 2379 conn->datadgst_en, 0); 2380 break; 2381 case ISCSI_PARAM_DATADGST_EN: 2382 err = iscsi_set_param(cls_conn, param, buf, buflen); 2383 if (!err && conn->datadgst_en) 2384 err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid, 2385 conn->hdrdgst_en, 2386 conn->datadgst_en, 0); 2387 break; 2388 case ISCSI_PARAM_MAX_R2T: 2389 return iscsi_tcp_set_max_r2t(conn, buf); 2390 case ISCSI_PARAM_MAX_RECV_DLENGTH: 2391 err = iscsi_set_param(cls_conn, param, buf, buflen); 2392 if (!err) 2393 err = cxgbi_conn_max_recv_dlength(conn); 2394 break; 2395 case ISCSI_PARAM_MAX_XMIT_DLENGTH: 2396 err = iscsi_set_param(cls_conn, param, buf, buflen); 2397 if (!err) 2398 err = cxgbi_conn_max_xmit_dlength(conn); 2399 break; 2400 default: 2401 return iscsi_set_param(cls_conn, param, buf, buflen); 2402 } 2403 return err; 2404 } 2405 EXPORT_SYMBOL_GPL(cxgbi_set_conn_param); 2406 2407 static inline int csk_print_port(struct cxgbi_sock *csk, char *buf) 2408 { 2409 int len; 2410 2411 cxgbi_sock_get(csk); 2412 len = sprintf(buf, "%hu\n", ntohs(csk->daddr.sin_port)); 2413 cxgbi_sock_put(csk); 2414 2415 return len; 2416 } 2417 2418 static inline int csk_print_ip(struct cxgbi_sock *csk, char *buf) 2419 { 2420 int len; 2421 2422 cxgbi_sock_get(csk); 2423 if (csk->csk_family == AF_INET) 2424 len = sprintf(buf, "%pI4", 2425 &csk->daddr.sin_addr.s_addr); 2426 else 2427 len = sprintf(buf, "%pI6", 2428 &csk->daddr6.sin6_addr); 2429 2430 cxgbi_sock_put(csk); 2431 2432 return len; 2433 } 2434 2435 int cxgbi_get_ep_param(struct iscsi_endpoint *ep, enum iscsi_param param, 2436 char *buf) 2437 { 2438 struct cxgbi_endpoint *cep = ep->dd_data; 2439 struct cxgbi_sock *csk; 2440 int len; 2441 2442 log_debug(1 << CXGBI_DBG_ISCSI, 2443 "cls_conn 0x%p, param %d.\n", ep, param); 2444 2445 switch (param) { 2446 case ISCSI_PARAM_CONN_PORT: 2447 case ISCSI_PARAM_CONN_ADDRESS: 2448 if (!cep) 2449 return -ENOTCONN; 2450 2451 csk = cep->csk; 2452 if (!csk) 2453 return -ENOTCONN; 2454 2455 return iscsi_conn_get_addr_param((struct sockaddr_storage *) 2456 &csk->daddr, param, buf); 2457 default: 2458 return -ENOSYS; 2459 } 2460 return len; 2461 } 2462 EXPORT_SYMBOL_GPL(cxgbi_get_ep_param); 2463 2464 struct iscsi_cls_conn * 2465 cxgbi_create_conn(struct iscsi_cls_session *cls_session, u32 cid) 2466 { 2467 struct iscsi_cls_conn *cls_conn; 2468 struct iscsi_conn *conn; 2469 struct iscsi_tcp_conn *tcp_conn; 2470 struct cxgbi_conn *cconn; 2471 2472 cls_conn = iscsi_tcp_conn_setup(cls_session, sizeof(*cconn), cid); 2473 if (!cls_conn) 2474 return NULL; 2475 2476 conn = cls_conn->dd_data; 2477 tcp_conn = conn->dd_data; 2478 cconn = tcp_conn->dd_data; 2479 cconn->iconn = conn; 2480 2481 log_debug(1 << CXGBI_DBG_ISCSI, 2482 "cid %u(0x%x), cls 0x%p,0x%p, conn 0x%p,0x%p,0x%p.\n", 2483 cid, cid, cls_session, cls_conn, conn, tcp_conn, cconn); 2484 2485 return cls_conn; 2486 } 2487 EXPORT_SYMBOL_GPL(cxgbi_create_conn); 2488 2489 int cxgbi_bind_conn(struct iscsi_cls_session *cls_session, 2490 struct iscsi_cls_conn *cls_conn, 2491 u64 transport_eph, int is_leading) 2492 { 2493 struct iscsi_conn *conn = cls_conn->dd_data; 2494 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 2495 struct cxgbi_conn *cconn = tcp_conn->dd_data; 2496 struct iscsi_endpoint *ep; 2497 struct cxgbi_endpoint *cep; 2498 struct cxgbi_sock *csk; 2499 int err; 2500 2501 ep = iscsi_lookup_endpoint(transport_eph); 2502 if (!ep) 2503 return -EINVAL; 2504 2505 /* setup ddp pagesize */ 2506 cep = ep->dd_data; 2507 csk = cep->csk; 2508 err = csk->cdev->csk_ddp_setup_pgidx(csk, csk->tid, page_idx, 0); 2509 if (err < 0) 2510 return err; 2511 2512 err = iscsi_conn_bind(cls_session, cls_conn, is_leading); 2513 if (err) 2514 return -EINVAL; 2515 2516 /* calculate the tag idx bits needed for this conn based on cmds_max */ 2517 cconn->task_idx_bits = (__ilog2_u32(conn->session->cmds_max - 1)) + 1; 2518 2519 write_lock_bh(&csk->callback_lock); 2520 csk->user_data = conn; 2521 cconn->chba = cep->chba; 2522 cconn->cep = cep; 2523 cep->cconn = cconn; 2524 write_unlock_bh(&csk->callback_lock); 2525 2526 cxgbi_conn_max_xmit_dlength(conn); 2527 cxgbi_conn_max_recv_dlength(conn); 2528 2529 log_debug(1 << CXGBI_DBG_ISCSI, 2530 "cls 0x%p,0x%p, ep 0x%p, cconn 0x%p, csk 0x%p.\n", 2531 cls_session, cls_conn, ep, cconn, csk); 2532 /* init recv engine */ 2533 iscsi_tcp_hdr_recv_prep(tcp_conn); 2534 2535 return 0; 2536 } 2537 EXPORT_SYMBOL_GPL(cxgbi_bind_conn); 2538 2539 struct iscsi_cls_session *cxgbi_create_session(struct iscsi_endpoint *ep, 2540 u16 cmds_max, u16 qdepth, 2541 u32 initial_cmdsn) 2542 { 2543 struct cxgbi_endpoint *cep; 2544 struct cxgbi_hba *chba; 2545 struct Scsi_Host *shost; 2546 struct iscsi_cls_session *cls_session; 2547 struct iscsi_session *session; 2548 2549 if (!ep) { 2550 pr_err("missing endpoint.\n"); 2551 return NULL; 2552 } 2553 2554 cep = ep->dd_data; 2555 chba = cep->chba; 2556 shost = chba->shost; 2557 2558 BUG_ON(chba != iscsi_host_priv(shost)); 2559 2560 cls_session = iscsi_session_setup(chba->cdev->itp, shost, 2561 cmds_max, 0, 2562 sizeof(struct iscsi_tcp_task) + 2563 sizeof(struct cxgbi_task_data), 2564 initial_cmdsn, ISCSI_MAX_TARGET); 2565 if (!cls_session) 2566 return NULL; 2567 2568 session = cls_session->dd_data; 2569 if (iscsi_tcp_r2tpool_alloc(session)) 2570 goto remove_session; 2571 2572 log_debug(1 << CXGBI_DBG_ISCSI, 2573 "ep 0x%p, cls sess 0x%p.\n", ep, cls_session); 2574 return cls_session; 2575 2576 remove_session: 2577 iscsi_session_teardown(cls_session); 2578 return NULL; 2579 } 2580 EXPORT_SYMBOL_GPL(cxgbi_create_session); 2581 2582 void cxgbi_destroy_session(struct iscsi_cls_session *cls_session) 2583 { 2584 log_debug(1 << CXGBI_DBG_ISCSI, 2585 "cls sess 0x%p.\n", cls_session); 2586 2587 iscsi_tcp_r2tpool_free(cls_session->dd_data); 2588 iscsi_session_teardown(cls_session); 2589 } 2590 EXPORT_SYMBOL_GPL(cxgbi_destroy_session); 2591 2592 int cxgbi_set_host_param(struct Scsi_Host *shost, enum iscsi_host_param param, 2593 char *buf, int buflen) 2594 { 2595 struct cxgbi_hba *chba = iscsi_host_priv(shost); 2596 2597 if (!chba->ndev) { 2598 shost_printk(KERN_ERR, shost, "Could not get host param. " 2599 "netdev for host not set.\n"); 2600 return -ENODEV; 2601 } 2602 2603 log_debug(1 << CXGBI_DBG_ISCSI, 2604 "shost 0x%p, hba 0x%p,%s, param %d, buf(%d) %s.\n", 2605 shost, chba, chba->ndev->name, param, buflen, buf); 2606 2607 switch (param) { 2608 case ISCSI_HOST_PARAM_IPADDRESS: 2609 { 2610 __be32 addr = in_aton(buf); 2611 log_debug(1 << CXGBI_DBG_ISCSI, 2612 "hba %s, req. ipv4 %pI4.\n", chba->ndev->name, &addr); 2613 cxgbi_set_iscsi_ipv4(chba, addr); 2614 return 0; 2615 } 2616 case ISCSI_HOST_PARAM_HWADDRESS: 2617 case ISCSI_HOST_PARAM_NETDEV_NAME: 2618 return 0; 2619 default: 2620 return iscsi_host_set_param(shost, param, buf, buflen); 2621 } 2622 } 2623 EXPORT_SYMBOL_GPL(cxgbi_set_host_param); 2624 2625 int cxgbi_get_host_param(struct Scsi_Host *shost, enum iscsi_host_param param, 2626 char *buf) 2627 { 2628 struct cxgbi_hba *chba = iscsi_host_priv(shost); 2629 int len = 0; 2630 2631 if (!chba->ndev) { 2632 shost_printk(KERN_ERR, shost, "Could not get host param. " 2633 "netdev for host not set.\n"); 2634 return -ENODEV; 2635 } 2636 2637 log_debug(1 << CXGBI_DBG_ISCSI, 2638 "shost 0x%p, hba 0x%p,%s, param %d.\n", 2639 shost, chba, chba->ndev->name, param); 2640 2641 switch (param) { 2642 case ISCSI_HOST_PARAM_HWADDRESS: 2643 len = sysfs_format_mac(buf, chba->ndev->dev_addr, 6); 2644 break; 2645 case ISCSI_HOST_PARAM_NETDEV_NAME: 2646 len = sprintf(buf, "%s\n", chba->ndev->name); 2647 break; 2648 case ISCSI_HOST_PARAM_IPADDRESS: 2649 { 2650 __be32 addr; 2651 2652 addr = cxgbi_get_iscsi_ipv4(chba); 2653 len = sprintf(buf, "%pI4", &addr); 2654 log_debug(1 << CXGBI_DBG_ISCSI, 2655 "hba %s, ipv4 %pI4.\n", chba->ndev->name, &addr); 2656 break; 2657 } 2658 default: 2659 return iscsi_host_get_param(shost, param, buf); 2660 } 2661 2662 return len; 2663 } 2664 EXPORT_SYMBOL_GPL(cxgbi_get_host_param); 2665 2666 struct iscsi_endpoint *cxgbi_ep_connect(struct Scsi_Host *shost, 2667 struct sockaddr *dst_addr, 2668 int non_blocking) 2669 { 2670 struct iscsi_endpoint *ep; 2671 struct cxgbi_endpoint *cep; 2672 struct cxgbi_hba *hba = NULL; 2673 struct cxgbi_sock *csk; 2674 int err = -EINVAL; 2675 2676 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_SOCK, 2677 "shost 0x%p, non_blocking %d, dst_addr 0x%p.\n", 2678 shost, non_blocking, dst_addr); 2679 2680 if (shost) { 2681 hba = iscsi_host_priv(shost); 2682 if (!hba) { 2683 pr_info("shost 0x%p, priv NULL.\n", shost); 2684 goto err_out; 2685 } 2686 } 2687 2688 if (dst_addr->sa_family == AF_INET) { 2689 csk = cxgbi_check_route(dst_addr); 2690 #if IS_ENABLED(CONFIG_IPV6) 2691 } else if (dst_addr->sa_family == AF_INET6) { 2692 csk = cxgbi_check_route6(dst_addr); 2693 #endif 2694 } else { 2695 pr_info("address family 0x%x NOT supported.\n", 2696 dst_addr->sa_family); 2697 err = -EAFNOSUPPORT; 2698 return (struct iscsi_endpoint *)ERR_PTR(err); 2699 } 2700 2701 if (IS_ERR(csk)) 2702 return (struct iscsi_endpoint *)csk; 2703 cxgbi_sock_get(csk); 2704 2705 if (!hba) 2706 hba = csk->cdev->hbas[csk->port_id]; 2707 else if (hba != csk->cdev->hbas[csk->port_id]) { 2708 pr_info("Could not connect through requested host %u" 2709 "hba 0x%p != 0x%p (%u).\n", 2710 shost->host_no, hba, 2711 csk->cdev->hbas[csk->port_id], csk->port_id); 2712 err = -ENOSPC; 2713 goto release_conn; 2714 } 2715 2716 err = sock_get_port(csk); 2717 if (err) 2718 goto release_conn; 2719 2720 cxgbi_sock_set_state(csk, CTP_CONNECTING); 2721 err = csk->cdev->csk_init_act_open(csk); 2722 if (err) 2723 goto release_conn; 2724 2725 if (cxgbi_sock_is_closing(csk)) { 2726 err = -ENOSPC; 2727 pr_info("csk 0x%p is closing.\n", csk); 2728 goto release_conn; 2729 } 2730 2731 ep = iscsi_create_endpoint(sizeof(*cep)); 2732 if (!ep) { 2733 err = -ENOMEM; 2734 pr_info("iscsi alloc ep, OOM.\n"); 2735 goto release_conn; 2736 } 2737 2738 cep = ep->dd_data; 2739 cep->csk = csk; 2740 cep->chba = hba; 2741 2742 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_SOCK, 2743 "ep 0x%p, cep 0x%p, csk 0x%p, hba 0x%p,%s.\n", 2744 ep, cep, csk, hba, hba->ndev->name); 2745 return ep; 2746 2747 release_conn: 2748 cxgbi_sock_put(csk); 2749 cxgbi_sock_closed(csk); 2750 err_out: 2751 return ERR_PTR(err); 2752 } 2753 EXPORT_SYMBOL_GPL(cxgbi_ep_connect); 2754 2755 int cxgbi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) 2756 { 2757 struct cxgbi_endpoint *cep = ep->dd_data; 2758 struct cxgbi_sock *csk = cep->csk; 2759 2760 if (!cxgbi_sock_is_established(csk)) 2761 return 0; 2762 return 1; 2763 } 2764 EXPORT_SYMBOL_GPL(cxgbi_ep_poll); 2765 2766 void cxgbi_ep_disconnect(struct iscsi_endpoint *ep) 2767 { 2768 struct cxgbi_endpoint *cep = ep->dd_data; 2769 struct cxgbi_conn *cconn = cep->cconn; 2770 struct cxgbi_sock *csk = cep->csk; 2771 2772 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_SOCK, 2773 "ep 0x%p, cep 0x%p, cconn 0x%p, csk 0x%p,%u,0x%lx.\n", 2774 ep, cep, cconn, csk, csk->state, csk->flags); 2775 2776 if (cconn && cconn->iconn) { 2777 iscsi_suspend_tx(cconn->iconn); 2778 write_lock_bh(&csk->callback_lock); 2779 cep->csk->user_data = NULL; 2780 cconn->cep = NULL; 2781 write_unlock_bh(&csk->callback_lock); 2782 } 2783 iscsi_destroy_endpoint(ep); 2784 2785 if (likely(csk->state >= CTP_ESTABLISHED)) 2786 need_active_close(csk); 2787 else 2788 cxgbi_sock_closed(csk); 2789 2790 cxgbi_sock_put(csk); 2791 } 2792 EXPORT_SYMBOL_GPL(cxgbi_ep_disconnect); 2793 2794 int cxgbi_iscsi_init(struct iscsi_transport *itp, 2795 struct scsi_transport_template **stt) 2796 { 2797 *stt = iscsi_register_transport(itp); 2798 if (*stt == NULL) { 2799 pr_err("unable to register %s transport 0x%p.\n", 2800 itp->name, itp); 2801 return -ENODEV; 2802 } 2803 log_debug(1 << CXGBI_DBG_ISCSI, 2804 "%s, registered iscsi transport 0x%p.\n", 2805 itp->name, stt); 2806 return 0; 2807 } 2808 EXPORT_SYMBOL_GPL(cxgbi_iscsi_init); 2809 2810 void cxgbi_iscsi_cleanup(struct iscsi_transport *itp, 2811 struct scsi_transport_template **stt) 2812 { 2813 if (*stt) { 2814 log_debug(1 << CXGBI_DBG_ISCSI, 2815 "de-register transport 0x%p, %s, stt 0x%p.\n", 2816 itp, itp->name, *stt); 2817 *stt = NULL; 2818 iscsi_unregister_transport(itp); 2819 } 2820 } 2821 EXPORT_SYMBOL_GPL(cxgbi_iscsi_cleanup); 2822 2823 umode_t cxgbi_attr_is_visible(int param_type, int param) 2824 { 2825 switch (param_type) { 2826 case ISCSI_HOST_PARAM: 2827 switch (param) { 2828 case ISCSI_HOST_PARAM_NETDEV_NAME: 2829 case ISCSI_HOST_PARAM_HWADDRESS: 2830 case ISCSI_HOST_PARAM_IPADDRESS: 2831 case ISCSI_HOST_PARAM_INITIATOR_NAME: 2832 return S_IRUGO; 2833 default: 2834 return 0; 2835 } 2836 case ISCSI_PARAM: 2837 switch (param) { 2838 case ISCSI_PARAM_MAX_RECV_DLENGTH: 2839 case ISCSI_PARAM_MAX_XMIT_DLENGTH: 2840 case ISCSI_PARAM_HDRDGST_EN: 2841 case ISCSI_PARAM_DATADGST_EN: 2842 case ISCSI_PARAM_CONN_ADDRESS: 2843 case ISCSI_PARAM_CONN_PORT: 2844 case ISCSI_PARAM_EXP_STATSN: 2845 case ISCSI_PARAM_PERSISTENT_ADDRESS: 2846 case ISCSI_PARAM_PERSISTENT_PORT: 2847 case ISCSI_PARAM_PING_TMO: 2848 case ISCSI_PARAM_RECV_TMO: 2849 case ISCSI_PARAM_INITIAL_R2T_EN: 2850 case ISCSI_PARAM_MAX_R2T: 2851 case ISCSI_PARAM_IMM_DATA_EN: 2852 case ISCSI_PARAM_FIRST_BURST: 2853 case ISCSI_PARAM_MAX_BURST: 2854 case ISCSI_PARAM_PDU_INORDER_EN: 2855 case ISCSI_PARAM_DATASEQ_INORDER_EN: 2856 case ISCSI_PARAM_ERL: 2857 case ISCSI_PARAM_TARGET_NAME: 2858 case ISCSI_PARAM_TPGT: 2859 case ISCSI_PARAM_USERNAME: 2860 case ISCSI_PARAM_PASSWORD: 2861 case ISCSI_PARAM_USERNAME_IN: 2862 case ISCSI_PARAM_PASSWORD_IN: 2863 case ISCSI_PARAM_FAST_ABORT: 2864 case ISCSI_PARAM_ABORT_TMO: 2865 case ISCSI_PARAM_LU_RESET_TMO: 2866 case ISCSI_PARAM_TGT_RESET_TMO: 2867 case ISCSI_PARAM_IFACE_NAME: 2868 case ISCSI_PARAM_INITIATOR_NAME: 2869 return S_IRUGO; 2870 default: 2871 return 0; 2872 } 2873 } 2874 2875 return 0; 2876 } 2877 EXPORT_SYMBOL_GPL(cxgbi_attr_is_visible); 2878 2879 static int __init libcxgbi_init_module(void) 2880 { 2881 sw_tag_idx_bits = (__ilog2_u32(ISCSI_ITT_MASK)) + 1; 2882 sw_tag_age_bits = (__ilog2_u32(ISCSI_AGE_MASK)) + 1; 2883 2884 pr_info("tag itt 0x%x, %u bits, age 0x%x, %u bits.\n", 2885 ISCSI_ITT_MASK, sw_tag_idx_bits, 2886 ISCSI_AGE_MASK, sw_tag_age_bits); 2887 2888 ddp_setup_host_page_size(); 2889 return 0; 2890 } 2891 2892 static void __exit libcxgbi_exit_module(void) 2893 { 2894 cxgbi_device_unregister_all(0xFF); 2895 return; 2896 } 2897 2898 module_init(libcxgbi_init_module); 2899 module_exit(libcxgbi_exit_module); 2900