1 /* 2 * libcxgbi.c: Chelsio common library for T3/T4 iSCSI driver. 3 * 4 * Copyright (c) 2010 Chelsio Communications, Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation. 9 * 10 * Written by: Karen Xie (kxie@chelsio.com) 11 * Written by: Rakesh Ranjan (rranjan@chelsio.com) 12 */ 13 14 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ 15 16 #include <linux/skbuff.h> 17 #include <linux/crypto.h> 18 #include <linux/scatterlist.h> 19 #include <linux/pci.h> 20 #include <scsi/scsi.h> 21 #include <scsi/scsi_cmnd.h> 22 #include <scsi/scsi_host.h> 23 #include <linux/if_vlan.h> 24 #include <linux/inet.h> 25 #include <net/dst.h> 26 #include <net/route.h> 27 #include <linux/inetdevice.h> /* ip_dev_find */ 28 #include <linux/module.h> 29 #include <net/tcp.h> 30 31 static unsigned int dbg_level; 32 33 #include "libcxgbi.h" 34 35 #define DRV_MODULE_NAME "libcxgbi" 36 #define DRV_MODULE_DESC "Chelsio iSCSI driver library" 37 #define DRV_MODULE_VERSION "0.9.0" 38 #define DRV_MODULE_RELDATE "Jun. 2010" 39 40 MODULE_AUTHOR("Chelsio Communications, Inc."); 41 MODULE_DESCRIPTION(DRV_MODULE_DESC); 42 MODULE_VERSION(DRV_MODULE_VERSION); 43 MODULE_LICENSE("GPL"); 44 45 module_param(dbg_level, uint, 0644); 46 MODULE_PARM_DESC(dbg_level, "libiscsi debug level (default=0)"); 47 48 49 /* 50 * cxgbi device management 51 * maintains a list of the cxgbi devices 52 */ 53 static LIST_HEAD(cdev_list); 54 static DEFINE_MUTEX(cdev_mutex); 55 56 int cxgbi_device_portmap_create(struct cxgbi_device *cdev, unsigned int base, 57 unsigned int max_conn) 58 { 59 struct cxgbi_ports_map *pmap = &cdev->pmap; 60 61 pmap->port_csk = cxgbi_alloc_big_mem(max_conn * 62 sizeof(struct cxgbi_sock *), 63 GFP_KERNEL); 64 if (!pmap->port_csk) { 65 pr_warn("cdev 0x%p, portmap OOM %u.\n", cdev, max_conn); 66 return -ENOMEM; 67 } 68 69 pmap->max_connect = max_conn; 70 pmap->sport_base = base; 71 spin_lock_init(&pmap->lock); 72 return 0; 73 } 74 EXPORT_SYMBOL_GPL(cxgbi_device_portmap_create); 75 76 void cxgbi_device_portmap_cleanup(struct cxgbi_device *cdev) 77 { 78 struct cxgbi_ports_map *pmap = &cdev->pmap; 79 struct cxgbi_sock *csk; 80 int i; 81 82 for (i = 0; i < pmap->max_connect; i++) { 83 if (pmap->port_csk[i]) { 84 csk = pmap->port_csk[i]; 85 pmap->port_csk[i] = NULL; 86 log_debug(1 << CXGBI_DBG_SOCK, 87 "csk 0x%p, cdev 0x%p, offload down.\n", 88 csk, cdev); 89 spin_lock_bh(&csk->lock); 90 cxgbi_sock_set_flag(csk, CTPF_OFFLOAD_DOWN); 91 cxgbi_sock_closed(csk); 92 spin_unlock_bh(&csk->lock); 93 cxgbi_sock_put(csk); 94 } 95 } 96 } 97 EXPORT_SYMBOL_GPL(cxgbi_device_portmap_cleanup); 98 99 static inline void cxgbi_device_destroy(struct cxgbi_device *cdev) 100 { 101 log_debug(1 << CXGBI_DBG_DEV, 102 "cdev 0x%p, p# %u.\n", cdev, cdev->nports); 103 cxgbi_hbas_remove(cdev); 104 cxgbi_device_portmap_cleanup(cdev); 105 if (cdev->dev_ddp_cleanup) 106 cdev->dev_ddp_cleanup(cdev); 107 else 108 cxgbi_ddp_cleanup(cdev); 109 if (cdev->ddp) 110 cxgbi_ddp_cleanup(cdev); 111 if (cdev->pmap.max_connect) 112 cxgbi_free_big_mem(cdev->pmap.port_csk); 113 kfree(cdev); 114 } 115 116 struct cxgbi_device *cxgbi_device_register(unsigned int extra, 117 unsigned int nports) 118 { 119 struct cxgbi_device *cdev; 120 121 cdev = kzalloc(sizeof(*cdev) + extra + nports * 122 (sizeof(struct cxgbi_hba *) + 123 sizeof(struct net_device *)), 124 GFP_KERNEL); 125 if (!cdev) { 126 pr_warn("nport %d, OOM.\n", nports); 127 return NULL; 128 } 129 cdev->ports = (struct net_device **)(cdev + 1); 130 cdev->hbas = (struct cxgbi_hba **)(((char*)cdev->ports) + nports * 131 sizeof(struct net_device *)); 132 if (extra) 133 cdev->dd_data = ((char *)cdev->hbas) + 134 nports * sizeof(struct cxgbi_hba *); 135 spin_lock_init(&cdev->pmap.lock); 136 137 mutex_lock(&cdev_mutex); 138 list_add_tail(&cdev->list_head, &cdev_list); 139 mutex_unlock(&cdev_mutex); 140 141 log_debug(1 << CXGBI_DBG_DEV, 142 "cdev 0x%p, p# %u.\n", cdev, nports); 143 return cdev; 144 } 145 EXPORT_SYMBOL_GPL(cxgbi_device_register); 146 147 void cxgbi_device_unregister(struct cxgbi_device *cdev) 148 { 149 log_debug(1 << CXGBI_DBG_DEV, 150 "cdev 0x%p, p# %u,%s.\n", 151 cdev, cdev->nports, cdev->nports ? cdev->ports[0]->name : ""); 152 mutex_lock(&cdev_mutex); 153 list_del(&cdev->list_head); 154 mutex_unlock(&cdev_mutex); 155 cxgbi_device_destroy(cdev); 156 } 157 EXPORT_SYMBOL_GPL(cxgbi_device_unregister); 158 159 void cxgbi_device_unregister_all(unsigned int flag) 160 { 161 struct cxgbi_device *cdev, *tmp; 162 163 mutex_lock(&cdev_mutex); 164 list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) { 165 if ((cdev->flags & flag) == flag) { 166 log_debug(1 << CXGBI_DBG_DEV, 167 "cdev 0x%p, p# %u,%s.\n", 168 cdev, cdev->nports, cdev->nports ? 169 cdev->ports[0]->name : ""); 170 list_del(&cdev->list_head); 171 cxgbi_device_destroy(cdev); 172 } 173 } 174 mutex_unlock(&cdev_mutex); 175 } 176 EXPORT_SYMBOL_GPL(cxgbi_device_unregister_all); 177 178 struct cxgbi_device *cxgbi_device_find_by_lldev(void *lldev) 179 { 180 struct cxgbi_device *cdev, *tmp; 181 182 mutex_lock(&cdev_mutex); 183 list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) { 184 if (cdev->lldev == lldev) { 185 mutex_unlock(&cdev_mutex); 186 return cdev; 187 } 188 } 189 mutex_unlock(&cdev_mutex); 190 log_debug(1 << CXGBI_DBG_DEV, 191 "lldev 0x%p, NO match found.\n", lldev); 192 return NULL; 193 } 194 EXPORT_SYMBOL_GPL(cxgbi_device_find_by_lldev); 195 196 static struct cxgbi_device *cxgbi_device_find_by_netdev(struct net_device *ndev, 197 int *port) 198 { 199 struct net_device *vdev = NULL; 200 struct cxgbi_device *cdev, *tmp; 201 int i; 202 203 if (ndev->priv_flags & IFF_802_1Q_VLAN) { 204 vdev = ndev; 205 ndev = vlan_dev_real_dev(ndev); 206 log_debug(1 << CXGBI_DBG_DEV, 207 "vlan dev %s -> %s.\n", vdev->name, ndev->name); 208 } 209 210 mutex_lock(&cdev_mutex); 211 list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) { 212 for (i = 0; i < cdev->nports; i++) { 213 if (ndev == cdev->ports[i]) { 214 cdev->hbas[i]->vdev = vdev; 215 mutex_unlock(&cdev_mutex); 216 if (port) 217 *port = i; 218 return cdev; 219 } 220 } 221 } 222 mutex_unlock(&cdev_mutex); 223 log_debug(1 << CXGBI_DBG_DEV, 224 "ndev 0x%p, %s, NO match found.\n", ndev, ndev->name); 225 return NULL; 226 } 227 228 void cxgbi_hbas_remove(struct cxgbi_device *cdev) 229 { 230 int i; 231 struct cxgbi_hba *chba; 232 233 log_debug(1 << CXGBI_DBG_DEV, 234 "cdev 0x%p, p#%u.\n", cdev, cdev->nports); 235 236 for (i = 0; i < cdev->nports; i++) { 237 chba = cdev->hbas[i]; 238 if (chba) { 239 cdev->hbas[i] = NULL; 240 iscsi_host_remove(chba->shost); 241 pci_dev_put(cdev->pdev); 242 iscsi_host_free(chba->shost); 243 } 244 } 245 } 246 EXPORT_SYMBOL_GPL(cxgbi_hbas_remove); 247 248 int cxgbi_hbas_add(struct cxgbi_device *cdev, unsigned int max_lun, 249 unsigned int max_id, struct scsi_host_template *sht, 250 struct scsi_transport_template *stt) 251 { 252 struct cxgbi_hba *chba; 253 struct Scsi_Host *shost; 254 int i, err; 255 256 log_debug(1 << CXGBI_DBG_DEV, "cdev 0x%p, p#%u.\n", cdev, cdev->nports); 257 258 for (i = 0; i < cdev->nports; i++) { 259 shost = iscsi_host_alloc(sht, sizeof(*chba), 1); 260 if (!shost) { 261 pr_info("0x%p, p%d, %s, host alloc failed.\n", 262 cdev, i, cdev->ports[i]->name); 263 err = -ENOMEM; 264 goto err_out; 265 } 266 267 shost->transportt = stt; 268 shost->max_lun = max_lun; 269 shost->max_id = max_id; 270 shost->max_channel = 0; 271 shost->max_cmd_len = 16; 272 273 chba = iscsi_host_priv(shost); 274 chba->cdev = cdev; 275 chba->ndev = cdev->ports[i]; 276 chba->shost = shost; 277 278 log_debug(1 << CXGBI_DBG_DEV, 279 "cdev 0x%p, p#%d %s: chba 0x%p.\n", 280 cdev, i, cdev->ports[i]->name, chba); 281 282 pci_dev_get(cdev->pdev); 283 err = iscsi_host_add(shost, &cdev->pdev->dev); 284 if (err) { 285 pr_info("cdev 0x%p, p#%d %s, host add failed.\n", 286 cdev, i, cdev->ports[i]->name); 287 pci_dev_put(cdev->pdev); 288 scsi_host_put(shost); 289 goto err_out; 290 } 291 292 cdev->hbas[i] = chba; 293 } 294 295 return 0; 296 297 err_out: 298 cxgbi_hbas_remove(cdev); 299 return err; 300 } 301 EXPORT_SYMBOL_GPL(cxgbi_hbas_add); 302 303 /* 304 * iSCSI offload 305 * 306 * - source port management 307 * To find a free source port in the port allocation map we use a very simple 308 * rotor scheme to look for the next free port. 309 * 310 * If a source port has been specified make sure that it doesn't collide with 311 * our normal source port allocation map. If it's outside the range of our 312 * allocation/deallocation scheme just let them use it. 313 * 314 * If the source port is outside our allocation range, the caller is 315 * responsible for keeping track of their port usage. 316 */ 317 static int sock_get_port(struct cxgbi_sock *csk) 318 { 319 struct cxgbi_device *cdev = csk->cdev; 320 struct cxgbi_ports_map *pmap = &cdev->pmap; 321 unsigned int start; 322 int idx; 323 324 if (!pmap->max_connect) { 325 pr_err("cdev 0x%p, p#%u %s, NO port map.\n", 326 cdev, csk->port_id, cdev->ports[csk->port_id]->name); 327 return -EADDRNOTAVAIL; 328 } 329 330 if (csk->saddr.sin_port) { 331 pr_err("source port NON-ZERO %u.\n", 332 ntohs(csk->saddr.sin_port)); 333 return -EADDRINUSE; 334 } 335 336 spin_lock_bh(&pmap->lock); 337 if (pmap->used >= pmap->max_connect) { 338 spin_unlock_bh(&pmap->lock); 339 pr_info("cdev 0x%p, p#%u %s, ALL ports used.\n", 340 cdev, csk->port_id, cdev->ports[csk->port_id]->name); 341 return -EADDRNOTAVAIL; 342 } 343 344 start = idx = pmap->next; 345 do { 346 if (++idx >= pmap->max_connect) 347 idx = 0; 348 if (!pmap->port_csk[idx]) { 349 pmap->used++; 350 csk->saddr.sin_port = 351 htons(pmap->sport_base + idx); 352 pmap->next = idx; 353 pmap->port_csk[idx] = csk; 354 spin_unlock_bh(&pmap->lock); 355 cxgbi_sock_get(csk); 356 log_debug(1 << CXGBI_DBG_SOCK, 357 "cdev 0x%p, p#%u %s, p %u, %u.\n", 358 cdev, csk->port_id, 359 cdev->ports[csk->port_id]->name, 360 pmap->sport_base + idx, pmap->next); 361 return 0; 362 } 363 } while (idx != start); 364 spin_unlock_bh(&pmap->lock); 365 366 /* should not happen */ 367 pr_warn("cdev 0x%p, p#%u %s, next %u?\n", 368 cdev, csk->port_id, cdev->ports[csk->port_id]->name, 369 pmap->next); 370 return -EADDRNOTAVAIL; 371 } 372 373 static void sock_put_port(struct cxgbi_sock *csk) 374 { 375 struct cxgbi_device *cdev = csk->cdev; 376 struct cxgbi_ports_map *pmap = &cdev->pmap; 377 378 if (csk->saddr.sin_port) { 379 int idx = ntohs(csk->saddr.sin_port) - pmap->sport_base; 380 381 csk->saddr.sin_port = 0; 382 if (idx < 0 || idx >= pmap->max_connect) { 383 pr_err("cdev 0x%p, p#%u %s, port %u OOR.\n", 384 cdev, csk->port_id, 385 cdev->ports[csk->port_id]->name, 386 ntohs(csk->saddr.sin_port)); 387 return; 388 } 389 390 spin_lock_bh(&pmap->lock); 391 pmap->port_csk[idx] = NULL; 392 pmap->used--; 393 spin_unlock_bh(&pmap->lock); 394 395 log_debug(1 << CXGBI_DBG_SOCK, 396 "cdev 0x%p, p#%u %s, release %u.\n", 397 cdev, csk->port_id, cdev->ports[csk->port_id]->name, 398 pmap->sport_base + idx); 399 400 cxgbi_sock_put(csk); 401 } 402 } 403 404 /* 405 * iscsi tcp connection 406 */ 407 void cxgbi_sock_free_cpl_skbs(struct cxgbi_sock *csk) 408 { 409 if (csk->cpl_close) { 410 kfree_skb(csk->cpl_close); 411 csk->cpl_close = NULL; 412 } 413 if (csk->cpl_abort_req) { 414 kfree_skb(csk->cpl_abort_req); 415 csk->cpl_abort_req = NULL; 416 } 417 if (csk->cpl_abort_rpl) { 418 kfree_skb(csk->cpl_abort_rpl); 419 csk->cpl_abort_rpl = NULL; 420 } 421 } 422 EXPORT_SYMBOL_GPL(cxgbi_sock_free_cpl_skbs); 423 424 static struct cxgbi_sock *cxgbi_sock_create(struct cxgbi_device *cdev) 425 { 426 struct cxgbi_sock *csk = kzalloc(sizeof(*csk), GFP_NOIO); 427 428 if (!csk) { 429 pr_info("alloc csk %zu failed.\n", sizeof(*csk)); 430 return NULL; 431 } 432 433 if (cdev->csk_alloc_cpls(csk) < 0) { 434 pr_info("csk 0x%p, alloc cpls failed.\n", csk); 435 kfree(csk); 436 return NULL; 437 } 438 439 spin_lock_init(&csk->lock); 440 kref_init(&csk->refcnt); 441 skb_queue_head_init(&csk->receive_queue); 442 skb_queue_head_init(&csk->write_queue); 443 setup_timer(&csk->retry_timer, NULL, (unsigned long)csk); 444 rwlock_init(&csk->callback_lock); 445 csk->cdev = cdev; 446 csk->flags = 0; 447 cxgbi_sock_set_state(csk, CTP_CLOSED); 448 449 log_debug(1 << CXGBI_DBG_SOCK, "cdev 0x%p, new csk 0x%p.\n", cdev, csk); 450 451 return csk; 452 } 453 454 static struct rtable *find_route_ipv4(struct flowi4 *fl4, 455 __be32 saddr, __be32 daddr, 456 __be16 sport, __be16 dport, u8 tos) 457 { 458 struct rtable *rt; 459 460 rt = ip_route_output_ports(&init_net, fl4, NULL, daddr, saddr, 461 dport, sport, IPPROTO_TCP, tos, 0); 462 if (IS_ERR(rt)) 463 return NULL; 464 465 return rt; 466 } 467 468 static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr) 469 { 470 struct sockaddr_in *daddr = (struct sockaddr_in *)dst_addr; 471 struct dst_entry *dst; 472 struct net_device *ndev; 473 struct cxgbi_device *cdev; 474 struct rtable *rt = NULL; 475 struct flowi4 fl4; 476 struct cxgbi_sock *csk = NULL; 477 unsigned int mtu = 0; 478 int port = 0xFFFF; 479 int err = 0; 480 481 if (daddr->sin_family != AF_INET) { 482 pr_info("address family 0x%x NOT supported.\n", 483 daddr->sin_family); 484 err = -EAFNOSUPPORT; 485 goto err_out; 486 } 487 488 rt = find_route_ipv4(&fl4, 0, daddr->sin_addr.s_addr, 0, daddr->sin_port, 0); 489 if (!rt) { 490 pr_info("no route to ipv4 0x%x, port %u.\n", 491 daddr->sin_addr.s_addr, daddr->sin_port); 492 err = -ENETUNREACH; 493 goto err_out; 494 } 495 dst = &rt->dst; 496 ndev = dst_get_neighbour(dst)->dev; 497 498 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) { 499 pr_info("multi-cast route %pI4, port %u, dev %s.\n", 500 &daddr->sin_addr.s_addr, ntohs(daddr->sin_port), 501 ndev->name); 502 err = -ENETUNREACH; 503 goto rel_rt; 504 } 505 506 if (ndev->flags & IFF_LOOPBACK) { 507 ndev = ip_dev_find(&init_net, daddr->sin_addr.s_addr); 508 mtu = ndev->mtu; 509 pr_info("rt dev %s, loopback -> %s, mtu %u.\n", 510 dst_get_neighbour(dst)->dev->name, ndev->name, mtu); 511 } 512 513 cdev = cxgbi_device_find_by_netdev(ndev, &port); 514 if (!cdev) { 515 pr_info("dst %pI4, %s, NOT cxgbi device.\n", 516 &daddr->sin_addr.s_addr, ndev->name); 517 err = -ENETUNREACH; 518 goto rel_rt; 519 } 520 log_debug(1 << CXGBI_DBG_SOCK, 521 "route to %pI4 :%u, ndev p#%d,%s, cdev 0x%p.\n", 522 &daddr->sin_addr.s_addr, ntohs(daddr->sin_port), 523 port, ndev->name, cdev); 524 525 csk = cxgbi_sock_create(cdev); 526 if (!csk) { 527 err = -ENOMEM; 528 goto rel_rt; 529 } 530 csk->cdev = cdev; 531 csk->port_id = port; 532 csk->mtu = mtu; 533 csk->dst = dst; 534 csk->daddr.sin_addr.s_addr = daddr->sin_addr.s_addr; 535 csk->daddr.sin_port = daddr->sin_port; 536 csk->daddr.sin_family = daddr->sin_family; 537 csk->saddr.sin_addr.s_addr = fl4.saddr; 538 539 return csk; 540 541 rel_rt: 542 ip_rt_put(rt); 543 if (csk) 544 cxgbi_sock_closed(csk); 545 err_out: 546 return ERR_PTR(err); 547 } 548 549 void cxgbi_sock_established(struct cxgbi_sock *csk, unsigned int snd_isn, 550 unsigned int opt) 551 { 552 csk->write_seq = csk->snd_nxt = csk->snd_una = snd_isn; 553 dst_confirm(csk->dst); 554 smp_mb(); 555 cxgbi_sock_set_state(csk, CTP_ESTABLISHED); 556 } 557 EXPORT_SYMBOL_GPL(cxgbi_sock_established); 558 559 static void cxgbi_inform_iscsi_conn_closing(struct cxgbi_sock *csk) 560 { 561 log_debug(1 << CXGBI_DBG_SOCK, 562 "csk 0x%p, state %u, flags 0x%lx, conn 0x%p.\n", 563 csk, csk->state, csk->flags, csk->user_data); 564 565 if (csk->state != CTP_ESTABLISHED) { 566 read_lock_bh(&csk->callback_lock); 567 if (csk->user_data) 568 iscsi_conn_failure(csk->user_data, 569 ISCSI_ERR_CONN_FAILED); 570 read_unlock_bh(&csk->callback_lock); 571 } 572 } 573 574 void cxgbi_sock_closed(struct cxgbi_sock *csk) 575 { 576 log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", 577 csk, (csk)->state, (csk)->flags, (csk)->tid); 578 cxgbi_sock_set_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED); 579 if (csk->state == CTP_ACTIVE_OPEN || csk->state == CTP_CLOSED) 580 return; 581 if (csk->saddr.sin_port) 582 sock_put_port(csk); 583 if (csk->dst) 584 dst_release(csk->dst); 585 csk->cdev->csk_release_offload_resources(csk); 586 cxgbi_sock_set_state(csk, CTP_CLOSED); 587 cxgbi_inform_iscsi_conn_closing(csk); 588 cxgbi_sock_put(csk); 589 } 590 EXPORT_SYMBOL_GPL(cxgbi_sock_closed); 591 592 static void need_active_close(struct cxgbi_sock *csk) 593 { 594 int data_lost; 595 int close_req = 0; 596 597 log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", 598 csk, (csk)->state, (csk)->flags, (csk)->tid); 599 spin_lock_bh(&csk->lock); 600 dst_confirm(csk->dst); 601 data_lost = skb_queue_len(&csk->receive_queue); 602 __skb_queue_purge(&csk->receive_queue); 603 604 if (csk->state == CTP_ACTIVE_OPEN) 605 cxgbi_sock_set_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED); 606 else if (csk->state == CTP_ESTABLISHED) { 607 close_req = 1; 608 cxgbi_sock_set_state(csk, CTP_ACTIVE_CLOSE); 609 } else if (csk->state == CTP_PASSIVE_CLOSE) { 610 close_req = 1; 611 cxgbi_sock_set_state(csk, CTP_CLOSE_WAIT_2); 612 } 613 614 if (close_req) { 615 if (data_lost) 616 csk->cdev->csk_send_abort_req(csk); 617 else 618 csk->cdev->csk_send_close_req(csk); 619 } 620 621 spin_unlock_bh(&csk->lock); 622 } 623 624 void cxgbi_sock_fail_act_open(struct cxgbi_sock *csk, int errno) 625 { 626 pr_info("csk 0x%p,%u,%lx, %pI4:%u-%pI4:%u, err %d.\n", 627 csk, csk->state, csk->flags, 628 &csk->saddr.sin_addr.s_addr, csk->saddr.sin_port, 629 &csk->daddr.sin_addr.s_addr, csk->daddr.sin_port, 630 errno); 631 632 cxgbi_sock_set_state(csk, CTP_CONNECTING); 633 csk->err = errno; 634 cxgbi_sock_closed(csk); 635 } 636 EXPORT_SYMBOL_GPL(cxgbi_sock_fail_act_open); 637 638 void cxgbi_sock_act_open_req_arp_failure(void *handle, struct sk_buff *skb) 639 { 640 struct cxgbi_sock *csk = (struct cxgbi_sock *)skb->sk; 641 642 log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", 643 csk, (csk)->state, (csk)->flags, (csk)->tid); 644 cxgbi_sock_get(csk); 645 spin_lock_bh(&csk->lock); 646 if (csk->state == CTP_ACTIVE_OPEN) 647 cxgbi_sock_fail_act_open(csk, -EHOSTUNREACH); 648 spin_unlock_bh(&csk->lock); 649 cxgbi_sock_put(csk); 650 __kfree_skb(skb); 651 } 652 EXPORT_SYMBOL_GPL(cxgbi_sock_act_open_req_arp_failure); 653 654 void cxgbi_sock_rcv_abort_rpl(struct cxgbi_sock *csk) 655 { 656 cxgbi_sock_get(csk); 657 spin_lock_bh(&csk->lock); 658 if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) { 659 if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_RCVD)) 660 cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_RCVD); 661 else { 662 cxgbi_sock_clear_flag(csk, CTPF_ABORT_RPL_RCVD); 663 cxgbi_sock_clear_flag(csk, CTPF_ABORT_RPL_PENDING); 664 if (cxgbi_sock_flag(csk, CTPF_ABORT_REQ_RCVD)) 665 pr_err("csk 0x%p,%u,0x%lx,%u,ABT_RPL_RSS.\n", 666 csk, csk->state, csk->flags, csk->tid); 667 cxgbi_sock_closed(csk); 668 } 669 } 670 spin_unlock_bh(&csk->lock); 671 cxgbi_sock_put(csk); 672 } 673 EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_abort_rpl); 674 675 void cxgbi_sock_rcv_peer_close(struct cxgbi_sock *csk) 676 { 677 log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", 678 csk, (csk)->state, (csk)->flags, (csk)->tid); 679 cxgbi_sock_get(csk); 680 spin_lock_bh(&csk->lock); 681 682 if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) 683 goto done; 684 685 switch (csk->state) { 686 case CTP_ESTABLISHED: 687 cxgbi_sock_set_state(csk, CTP_PASSIVE_CLOSE); 688 break; 689 case CTP_ACTIVE_CLOSE: 690 cxgbi_sock_set_state(csk, CTP_CLOSE_WAIT_2); 691 break; 692 case CTP_CLOSE_WAIT_1: 693 cxgbi_sock_closed(csk); 694 break; 695 case CTP_ABORTING: 696 break; 697 default: 698 pr_err("csk 0x%p,%u,0x%lx,%u, bad state.\n", 699 csk, csk->state, csk->flags, csk->tid); 700 } 701 cxgbi_inform_iscsi_conn_closing(csk); 702 done: 703 spin_unlock_bh(&csk->lock); 704 cxgbi_sock_put(csk); 705 } 706 EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_peer_close); 707 708 void cxgbi_sock_rcv_close_conn_rpl(struct cxgbi_sock *csk, u32 snd_nxt) 709 { 710 log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", 711 csk, (csk)->state, (csk)->flags, (csk)->tid); 712 cxgbi_sock_get(csk); 713 spin_lock_bh(&csk->lock); 714 715 csk->snd_una = snd_nxt - 1; 716 if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) 717 goto done; 718 719 switch (csk->state) { 720 case CTP_ACTIVE_CLOSE: 721 cxgbi_sock_set_state(csk, CTP_CLOSE_WAIT_1); 722 break; 723 case CTP_CLOSE_WAIT_1: 724 case CTP_CLOSE_WAIT_2: 725 cxgbi_sock_closed(csk); 726 break; 727 case CTP_ABORTING: 728 break; 729 default: 730 pr_err("csk 0x%p,%u,0x%lx,%u, bad state.\n", 731 csk, csk->state, csk->flags, csk->tid); 732 } 733 done: 734 spin_unlock_bh(&csk->lock); 735 cxgbi_sock_put(csk); 736 } 737 EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_close_conn_rpl); 738 739 void cxgbi_sock_rcv_wr_ack(struct cxgbi_sock *csk, unsigned int credits, 740 unsigned int snd_una, int seq_chk) 741 { 742 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 743 "csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, snd_una %u,%d.\n", 744 csk, csk->state, csk->flags, csk->tid, credits, 745 csk->wr_cred, csk->wr_una_cred, snd_una, seq_chk); 746 747 spin_lock_bh(&csk->lock); 748 749 csk->wr_cred += credits; 750 if (csk->wr_una_cred > csk->wr_max_cred - csk->wr_cred) 751 csk->wr_una_cred = csk->wr_max_cred - csk->wr_cred; 752 753 while (credits) { 754 struct sk_buff *p = cxgbi_sock_peek_wr(csk); 755 756 if (unlikely(!p)) { 757 pr_err("csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, empty.\n", 758 csk, csk->state, csk->flags, csk->tid, credits, 759 csk->wr_cred, csk->wr_una_cred); 760 break; 761 } 762 763 if (unlikely(credits < p->csum)) { 764 pr_warn("csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, < %u.\n", 765 csk, csk->state, csk->flags, csk->tid, 766 credits, csk->wr_cred, csk->wr_una_cred, 767 p->csum); 768 p->csum -= credits; 769 break; 770 } else { 771 cxgbi_sock_dequeue_wr(csk); 772 credits -= p->csum; 773 kfree_skb(p); 774 } 775 } 776 777 cxgbi_sock_check_wr_invariants(csk); 778 779 if (seq_chk) { 780 if (unlikely(before(snd_una, csk->snd_una))) { 781 pr_warn("csk 0x%p,%u,0x%lx,%u, snd_una %u/%u.", 782 csk, csk->state, csk->flags, csk->tid, snd_una, 783 csk->snd_una); 784 goto done; 785 } 786 787 if (csk->snd_una != snd_una) { 788 csk->snd_una = snd_una; 789 dst_confirm(csk->dst); 790 } 791 } 792 793 if (skb_queue_len(&csk->write_queue)) { 794 if (csk->cdev->csk_push_tx_frames(csk, 0)) 795 cxgbi_conn_tx_open(csk); 796 } else 797 cxgbi_conn_tx_open(csk); 798 done: 799 spin_unlock_bh(&csk->lock); 800 } 801 EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_wr_ack); 802 803 static unsigned int cxgbi_sock_find_best_mtu(struct cxgbi_sock *csk, 804 unsigned short mtu) 805 { 806 int i = 0; 807 808 while (i < csk->cdev->nmtus - 1 && csk->cdev->mtus[i + 1] <= mtu) 809 ++i; 810 811 return i; 812 } 813 814 unsigned int cxgbi_sock_select_mss(struct cxgbi_sock *csk, unsigned int pmtu) 815 { 816 unsigned int idx; 817 struct dst_entry *dst = csk->dst; 818 819 csk->advmss = dst_metric_advmss(dst); 820 821 if (csk->advmss > pmtu - 40) 822 csk->advmss = pmtu - 40; 823 if (csk->advmss < csk->cdev->mtus[0] - 40) 824 csk->advmss = csk->cdev->mtus[0] - 40; 825 idx = cxgbi_sock_find_best_mtu(csk, csk->advmss + 40); 826 827 return idx; 828 } 829 EXPORT_SYMBOL_GPL(cxgbi_sock_select_mss); 830 831 void cxgbi_sock_skb_entail(struct cxgbi_sock *csk, struct sk_buff *skb) 832 { 833 cxgbi_skcb_tcp_seq(skb) = csk->write_seq; 834 __skb_queue_tail(&csk->write_queue, skb); 835 } 836 EXPORT_SYMBOL_GPL(cxgbi_sock_skb_entail); 837 838 void cxgbi_sock_purge_wr_queue(struct cxgbi_sock *csk) 839 { 840 struct sk_buff *skb; 841 842 while ((skb = cxgbi_sock_dequeue_wr(csk)) != NULL) 843 kfree_skb(skb); 844 } 845 EXPORT_SYMBOL_GPL(cxgbi_sock_purge_wr_queue); 846 847 void cxgbi_sock_check_wr_invariants(const struct cxgbi_sock *csk) 848 { 849 int pending = cxgbi_sock_count_pending_wrs(csk); 850 851 if (unlikely(csk->wr_cred + pending != csk->wr_max_cred)) 852 pr_err("csk 0x%p, tid %u, credit %u + %u != %u.\n", 853 csk, csk->tid, csk->wr_cred, pending, csk->wr_max_cred); 854 } 855 EXPORT_SYMBOL_GPL(cxgbi_sock_check_wr_invariants); 856 857 static int cxgbi_sock_send_pdus(struct cxgbi_sock *csk, struct sk_buff *skb) 858 { 859 struct cxgbi_device *cdev = csk->cdev; 860 struct sk_buff *next; 861 int err, copied = 0; 862 863 spin_lock_bh(&csk->lock); 864 865 if (csk->state != CTP_ESTABLISHED) { 866 log_debug(1 << CXGBI_DBG_PDU_TX, 867 "csk 0x%p,%u,0x%lx,%u, EAGAIN.\n", 868 csk, csk->state, csk->flags, csk->tid); 869 err = -EAGAIN; 870 goto out_err; 871 } 872 873 if (csk->err) { 874 log_debug(1 << CXGBI_DBG_PDU_TX, 875 "csk 0x%p,%u,0x%lx,%u, EPIPE %d.\n", 876 csk, csk->state, csk->flags, csk->tid, csk->err); 877 err = -EPIPE; 878 goto out_err; 879 } 880 881 if (csk->write_seq - csk->snd_una >= cdev->snd_win) { 882 log_debug(1 << CXGBI_DBG_PDU_TX, 883 "csk 0x%p,%u,0x%lx,%u, FULL %u-%u >= %u.\n", 884 csk, csk->state, csk->flags, csk->tid, csk->write_seq, 885 csk->snd_una, cdev->snd_win); 886 err = -ENOBUFS; 887 goto out_err; 888 } 889 890 while (skb) { 891 int frags = skb_shinfo(skb)->nr_frags + 892 (skb->len != skb->data_len); 893 894 if (unlikely(skb_headroom(skb) < cdev->skb_tx_rsvd)) { 895 pr_err("csk 0x%p, skb head %u < %u.\n", 896 csk, skb_headroom(skb), cdev->skb_tx_rsvd); 897 err = -EINVAL; 898 goto out_err; 899 } 900 901 if (frags >= SKB_WR_LIST_SIZE) { 902 pr_err("csk 0x%p, frags %d, %u,%u >%u.\n", 903 csk, skb_shinfo(skb)->nr_frags, skb->len, 904 skb->data_len, (uint)(SKB_WR_LIST_SIZE)); 905 err = -EINVAL; 906 goto out_err; 907 } 908 909 next = skb->next; 910 skb->next = NULL; 911 cxgbi_skcb_set_flag(skb, SKCBF_TX_NEED_HDR); 912 cxgbi_sock_skb_entail(csk, skb); 913 copied += skb->len; 914 csk->write_seq += skb->len + 915 cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb)); 916 skb = next; 917 } 918 done: 919 if (likely(skb_queue_len(&csk->write_queue))) 920 cdev->csk_push_tx_frames(csk, 1); 921 spin_unlock_bh(&csk->lock); 922 return copied; 923 924 out_err: 925 if (copied == 0 && err == -EPIPE) 926 copied = csk->err ? csk->err : -EPIPE; 927 else 928 copied = err; 929 goto done; 930 } 931 932 /* 933 * Direct Data Placement - 934 * Directly place the iSCSI Data-In or Data-Out PDU's payload into pre-posted 935 * final destination host-memory buffers based on the Initiator Task Tag (ITT) 936 * in Data-In or Target Task Tag (TTT) in Data-Out PDUs. 937 * The host memory address is programmed into h/w in the format of pagepod 938 * entries. 939 * The location of the pagepod entry is encoded into ddp tag which is used as 940 * the base for ITT/TTT. 941 */ 942 943 static unsigned char ddp_page_order[DDP_PGIDX_MAX] = {0, 1, 2, 4}; 944 static unsigned char ddp_page_shift[DDP_PGIDX_MAX] = {12, 13, 14, 16}; 945 static unsigned char page_idx = DDP_PGIDX_MAX; 946 947 static unsigned char sw_tag_idx_bits; 948 static unsigned char sw_tag_age_bits; 949 950 /* 951 * Direct-Data Placement page size adjustment 952 */ 953 static int ddp_adjust_page_table(void) 954 { 955 int i; 956 unsigned int base_order, order; 957 958 if (PAGE_SIZE < (1UL << ddp_page_shift[0])) { 959 pr_info("PAGE_SIZE 0x%lx too small, min 0x%lx\n", 960 PAGE_SIZE, 1UL << ddp_page_shift[0]); 961 return -EINVAL; 962 } 963 964 base_order = get_order(1UL << ddp_page_shift[0]); 965 order = get_order(1UL << PAGE_SHIFT); 966 967 for (i = 0; i < DDP_PGIDX_MAX; i++) { 968 /* first is the kernel page size, then just doubling */ 969 ddp_page_order[i] = order - base_order + i; 970 ddp_page_shift[i] = PAGE_SHIFT + i; 971 } 972 return 0; 973 } 974 975 static int ddp_find_page_index(unsigned long pgsz) 976 { 977 int i; 978 979 for (i = 0; i < DDP_PGIDX_MAX; i++) { 980 if (pgsz == (1UL << ddp_page_shift[i])) 981 return i; 982 } 983 pr_info("ddp page size %lu not supported.\n", pgsz); 984 return DDP_PGIDX_MAX; 985 } 986 987 static void ddp_setup_host_page_size(void) 988 { 989 if (page_idx == DDP_PGIDX_MAX) { 990 page_idx = ddp_find_page_index(PAGE_SIZE); 991 992 if (page_idx == DDP_PGIDX_MAX) { 993 pr_info("system PAGE %lu, update hw.\n", PAGE_SIZE); 994 if (ddp_adjust_page_table() < 0) { 995 pr_info("PAGE %lu, disable ddp.\n", PAGE_SIZE); 996 return; 997 } 998 page_idx = ddp_find_page_index(PAGE_SIZE); 999 } 1000 pr_info("system PAGE %lu, ddp idx %u.\n", PAGE_SIZE, page_idx); 1001 } 1002 } 1003 1004 void cxgbi_ddp_page_size_factor(int *pgsz_factor) 1005 { 1006 int i; 1007 1008 for (i = 0; i < DDP_PGIDX_MAX; i++) 1009 pgsz_factor[i] = ddp_page_order[i]; 1010 } 1011 EXPORT_SYMBOL_GPL(cxgbi_ddp_page_size_factor); 1012 1013 /* 1014 * DDP setup & teardown 1015 */ 1016 1017 void cxgbi_ddp_ppod_set(struct cxgbi_pagepod *ppod, 1018 struct cxgbi_pagepod_hdr *hdr, 1019 struct cxgbi_gather_list *gl, unsigned int gidx) 1020 { 1021 int i; 1022 1023 memcpy(ppod, hdr, sizeof(*hdr)); 1024 for (i = 0; i < (PPOD_PAGES_MAX + 1); i++, gidx++) { 1025 ppod->addr[i] = gidx < gl->nelem ? 1026 cpu_to_be64(gl->phys_addr[gidx]) : 0ULL; 1027 } 1028 } 1029 EXPORT_SYMBOL_GPL(cxgbi_ddp_ppod_set); 1030 1031 void cxgbi_ddp_ppod_clear(struct cxgbi_pagepod *ppod) 1032 { 1033 memset(ppod, 0, sizeof(*ppod)); 1034 } 1035 EXPORT_SYMBOL_GPL(cxgbi_ddp_ppod_clear); 1036 1037 static inline int ddp_find_unused_entries(struct cxgbi_ddp_info *ddp, 1038 unsigned int start, unsigned int max, 1039 unsigned int count, 1040 struct cxgbi_gather_list *gl) 1041 { 1042 unsigned int i, j, k; 1043 1044 /* not enough entries */ 1045 if ((max - start) < count) { 1046 log_debug(1 << CXGBI_DBG_DDP, 1047 "NOT enough entries %u+%u < %u.\n", start, count, max); 1048 return -EBUSY; 1049 } 1050 1051 max -= count; 1052 spin_lock(&ddp->map_lock); 1053 for (i = start; i < max;) { 1054 for (j = 0, k = i; j < count; j++, k++) { 1055 if (ddp->gl_map[k]) 1056 break; 1057 } 1058 if (j == count) { 1059 for (j = 0, k = i; j < count; j++, k++) 1060 ddp->gl_map[k] = gl; 1061 spin_unlock(&ddp->map_lock); 1062 return i; 1063 } 1064 i += j + 1; 1065 } 1066 spin_unlock(&ddp->map_lock); 1067 log_debug(1 << CXGBI_DBG_DDP, 1068 "NO suitable entries %u available.\n", count); 1069 return -EBUSY; 1070 } 1071 1072 static inline void ddp_unmark_entries(struct cxgbi_ddp_info *ddp, 1073 int start, int count) 1074 { 1075 spin_lock(&ddp->map_lock); 1076 memset(&ddp->gl_map[start], 0, 1077 count * sizeof(struct cxgbi_gather_list *)); 1078 spin_unlock(&ddp->map_lock); 1079 } 1080 1081 static inline void ddp_gl_unmap(struct pci_dev *pdev, 1082 struct cxgbi_gather_list *gl) 1083 { 1084 int i; 1085 1086 for (i = 0; i < gl->nelem; i++) 1087 dma_unmap_page(&pdev->dev, gl->phys_addr[i], PAGE_SIZE, 1088 PCI_DMA_FROMDEVICE); 1089 } 1090 1091 static inline int ddp_gl_map(struct pci_dev *pdev, 1092 struct cxgbi_gather_list *gl) 1093 { 1094 int i; 1095 1096 for (i = 0; i < gl->nelem; i++) { 1097 gl->phys_addr[i] = dma_map_page(&pdev->dev, gl->pages[i], 0, 1098 PAGE_SIZE, 1099 PCI_DMA_FROMDEVICE); 1100 if (unlikely(dma_mapping_error(&pdev->dev, gl->phys_addr[i]))) { 1101 log_debug(1 << CXGBI_DBG_DDP, 1102 "page %d 0x%p, 0x%p dma mapping err.\n", 1103 i, gl->pages[i], pdev); 1104 goto unmap; 1105 } 1106 } 1107 return i; 1108 unmap: 1109 if (i) { 1110 unsigned int nelem = gl->nelem; 1111 1112 gl->nelem = i; 1113 ddp_gl_unmap(pdev, gl); 1114 gl->nelem = nelem; 1115 } 1116 return -EINVAL; 1117 } 1118 1119 static void ddp_release_gl(struct cxgbi_gather_list *gl, 1120 struct pci_dev *pdev) 1121 { 1122 ddp_gl_unmap(pdev, gl); 1123 kfree(gl); 1124 } 1125 1126 static struct cxgbi_gather_list *ddp_make_gl(unsigned int xferlen, 1127 struct scatterlist *sgl, 1128 unsigned int sgcnt, 1129 struct pci_dev *pdev, 1130 gfp_t gfp) 1131 { 1132 struct cxgbi_gather_list *gl; 1133 struct scatterlist *sg = sgl; 1134 struct page *sgpage = sg_page(sg); 1135 unsigned int sglen = sg->length; 1136 unsigned int sgoffset = sg->offset; 1137 unsigned int npages = (xferlen + sgoffset + PAGE_SIZE - 1) >> 1138 PAGE_SHIFT; 1139 int i = 1, j = 0; 1140 1141 if (xferlen < DDP_THRESHOLD) { 1142 log_debug(1 << CXGBI_DBG_DDP, 1143 "xfer %u < threshold %u, no ddp.\n", 1144 xferlen, DDP_THRESHOLD); 1145 return NULL; 1146 } 1147 1148 gl = kzalloc(sizeof(struct cxgbi_gather_list) + 1149 npages * (sizeof(dma_addr_t) + 1150 sizeof(struct page *)), gfp); 1151 if (!gl) { 1152 log_debug(1 << CXGBI_DBG_DDP, 1153 "xfer %u, %u pages, OOM.\n", xferlen, npages); 1154 return NULL; 1155 } 1156 1157 log_debug(1 << CXGBI_DBG_DDP, 1158 "xfer %u, sgl %u, gl max %u.\n", xferlen, sgcnt, npages); 1159 1160 gl->pages = (struct page **)&gl->phys_addr[npages]; 1161 gl->nelem = npages; 1162 gl->length = xferlen; 1163 gl->offset = sgoffset; 1164 gl->pages[0] = sgpage; 1165 1166 for (i = 1, sg = sg_next(sgl), j = 0; i < sgcnt; 1167 i++, sg = sg_next(sg)) { 1168 struct page *page = sg_page(sg); 1169 1170 if (sgpage == page && sg->offset == sgoffset + sglen) 1171 sglen += sg->length; 1172 else { 1173 /* make sure the sgl is fit for ddp: 1174 * each has the same page size, and 1175 * all of the middle pages are used completely 1176 */ 1177 if ((j && sgoffset) || ((i != sgcnt - 1) && 1178 ((sglen + sgoffset) & ~PAGE_MASK))) { 1179 log_debug(1 << CXGBI_DBG_DDP, 1180 "page %d/%u, %u + %u.\n", 1181 i, sgcnt, sgoffset, sglen); 1182 goto error_out; 1183 } 1184 1185 j++; 1186 if (j == gl->nelem || sg->offset) { 1187 log_debug(1 << CXGBI_DBG_DDP, 1188 "page %d/%u, offset %u.\n", 1189 j, gl->nelem, sg->offset); 1190 goto error_out; 1191 } 1192 gl->pages[j] = page; 1193 sglen = sg->length; 1194 sgoffset = sg->offset; 1195 sgpage = page; 1196 } 1197 } 1198 gl->nelem = ++j; 1199 1200 if (ddp_gl_map(pdev, gl) < 0) 1201 goto error_out; 1202 1203 return gl; 1204 1205 error_out: 1206 kfree(gl); 1207 return NULL; 1208 } 1209 1210 static void ddp_tag_release(struct cxgbi_hba *chba, u32 tag) 1211 { 1212 struct cxgbi_device *cdev = chba->cdev; 1213 struct cxgbi_ddp_info *ddp = cdev->ddp; 1214 u32 idx; 1215 1216 idx = (tag >> PPOD_IDX_SHIFT) & ddp->idx_mask; 1217 if (idx < ddp->nppods) { 1218 struct cxgbi_gather_list *gl = ddp->gl_map[idx]; 1219 unsigned int npods; 1220 1221 if (!gl || !gl->nelem) { 1222 pr_warn("tag 0x%x, idx %u, gl 0x%p, %u.\n", 1223 tag, idx, gl, gl ? gl->nelem : 0); 1224 return; 1225 } 1226 npods = (gl->nelem + PPOD_PAGES_MAX - 1) >> PPOD_PAGES_SHIFT; 1227 log_debug(1 << CXGBI_DBG_DDP, 1228 "tag 0x%x, release idx %u, npods %u.\n", 1229 tag, idx, npods); 1230 cdev->csk_ddp_clear(chba, tag, idx, npods); 1231 ddp_unmark_entries(ddp, idx, npods); 1232 ddp_release_gl(gl, ddp->pdev); 1233 } else 1234 pr_warn("tag 0x%x, idx %u > max %u.\n", tag, idx, ddp->nppods); 1235 } 1236 1237 static int ddp_tag_reserve(struct cxgbi_sock *csk, unsigned int tid, 1238 u32 sw_tag, u32 *tagp, struct cxgbi_gather_list *gl, 1239 gfp_t gfp) 1240 { 1241 struct cxgbi_device *cdev = csk->cdev; 1242 struct cxgbi_ddp_info *ddp = cdev->ddp; 1243 struct cxgbi_tag_format *tformat = &cdev->tag_format; 1244 struct cxgbi_pagepod_hdr hdr; 1245 unsigned int npods; 1246 int idx = -1; 1247 int err = -ENOMEM; 1248 u32 tag; 1249 1250 npods = (gl->nelem + PPOD_PAGES_MAX - 1) >> PPOD_PAGES_SHIFT; 1251 if (ddp->idx_last == ddp->nppods) 1252 idx = ddp_find_unused_entries(ddp, 0, ddp->nppods, 1253 npods, gl); 1254 else { 1255 idx = ddp_find_unused_entries(ddp, ddp->idx_last + 1, 1256 ddp->nppods, npods, 1257 gl); 1258 if (idx < 0 && ddp->idx_last >= npods) { 1259 idx = ddp_find_unused_entries(ddp, 0, 1260 min(ddp->idx_last + npods, ddp->nppods), 1261 npods, gl); 1262 } 1263 } 1264 if (idx < 0) { 1265 log_debug(1 << CXGBI_DBG_DDP, 1266 "xferlen %u, gl %u, npods %u NO DDP.\n", 1267 gl->length, gl->nelem, npods); 1268 return idx; 1269 } 1270 1271 tag = cxgbi_ddp_tag_base(tformat, sw_tag); 1272 tag |= idx << PPOD_IDX_SHIFT; 1273 1274 hdr.rsvd = 0; 1275 hdr.vld_tid = htonl(PPOD_VALID_FLAG | PPOD_TID(tid)); 1276 hdr.pgsz_tag_clr = htonl(tag & ddp->rsvd_tag_mask); 1277 hdr.max_offset = htonl(gl->length); 1278 hdr.page_offset = htonl(gl->offset); 1279 1280 err = cdev->csk_ddp_set(csk, &hdr, idx, npods, gl); 1281 if (err < 0) 1282 goto unmark_entries; 1283 1284 ddp->idx_last = idx; 1285 log_debug(1 << CXGBI_DBG_DDP, 1286 "xfer %u, gl %u,%u, tid 0x%x, tag 0x%x->0x%x(%u,%u).\n", 1287 gl->length, gl->nelem, gl->offset, tid, sw_tag, tag, idx, 1288 npods); 1289 *tagp = tag; 1290 return 0; 1291 1292 unmark_entries: 1293 ddp_unmark_entries(ddp, idx, npods); 1294 return err; 1295 } 1296 1297 int cxgbi_ddp_reserve(struct cxgbi_sock *csk, unsigned int *tagp, 1298 unsigned int sw_tag, unsigned int xferlen, 1299 struct scatterlist *sgl, unsigned int sgcnt, gfp_t gfp) 1300 { 1301 struct cxgbi_device *cdev = csk->cdev; 1302 struct cxgbi_tag_format *tformat = &cdev->tag_format; 1303 struct cxgbi_gather_list *gl; 1304 int err; 1305 1306 if (page_idx >= DDP_PGIDX_MAX || !cdev->ddp || 1307 xferlen < DDP_THRESHOLD) { 1308 log_debug(1 << CXGBI_DBG_DDP, 1309 "pgidx %u, xfer %u, NO ddp.\n", page_idx, xferlen); 1310 return -EINVAL; 1311 } 1312 1313 if (!cxgbi_sw_tag_usable(tformat, sw_tag)) { 1314 log_debug(1 << CXGBI_DBG_DDP, 1315 "sw_tag 0x%x NOT usable.\n", sw_tag); 1316 return -EINVAL; 1317 } 1318 1319 gl = ddp_make_gl(xferlen, sgl, sgcnt, cdev->pdev, gfp); 1320 if (!gl) 1321 return -ENOMEM; 1322 1323 err = ddp_tag_reserve(csk, csk->tid, sw_tag, tagp, gl, gfp); 1324 if (err < 0) 1325 ddp_release_gl(gl, cdev->pdev); 1326 1327 return err; 1328 } 1329 1330 static void ddp_destroy(struct kref *kref) 1331 { 1332 struct cxgbi_ddp_info *ddp = container_of(kref, 1333 struct cxgbi_ddp_info, 1334 refcnt); 1335 struct cxgbi_device *cdev = ddp->cdev; 1336 int i = 0; 1337 1338 pr_info("kref 0, destroy ddp 0x%p, cdev 0x%p.\n", ddp, cdev); 1339 1340 while (i < ddp->nppods) { 1341 struct cxgbi_gather_list *gl = ddp->gl_map[i]; 1342 1343 if (gl) { 1344 int npods = (gl->nelem + PPOD_PAGES_MAX - 1) 1345 >> PPOD_PAGES_SHIFT; 1346 pr_info("cdev 0x%p, ddp %d + %d.\n", cdev, i, npods); 1347 kfree(gl); 1348 i += npods; 1349 } else 1350 i++; 1351 } 1352 cxgbi_free_big_mem(ddp); 1353 } 1354 1355 int cxgbi_ddp_cleanup(struct cxgbi_device *cdev) 1356 { 1357 struct cxgbi_ddp_info *ddp = cdev->ddp; 1358 1359 log_debug(1 << CXGBI_DBG_DDP, 1360 "cdev 0x%p, release ddp 0x%p.\n", cdev, ddp); 1361 cdev->ddp = NULL; 1362 if (ddp) 1363 return kref_put(&ddp->refcnt, ddp_destroy); 1364 return 0; 1365 } 1366 EXPORT_SYMBOL_GPL(cxgbi_ddp_cleanup); 1367 1368 int cxgbi_ddp_init(struct cxgbi_device *cdev, 1369 unsigned int llimit, unsigned int ulimit, 1370 unsigned int max_txsz, unsigned int max_rxsz) 1371 { 1372 struct cxgbi_ddp_info *ddp; 1373 unsigned int ppmax, bits; 1374 1375 ppmax = (ulimit - llimit + 1) >> PPOD_SIZE_SHIFT; 1376 bits = __ilog2_u32(ppmax) + 1; 1377 if (bits > PPOD_IDX_MAX_SIZE) 1378 bits = PPOD_IDX_MAX_SIZE; 1379 ppmax = (1 << (bits - 1)) - 1; 1380 1381 ddp = cxgbi_alloc_big_mem(sizeof(struct cxgbi_ddp_info) + 1382 ppmax * (sizeof(struct cxgbi_gather_list *) + 1383 sizeof(struct sk_buff *)), 1384 GFP_KERNEL); 1385 if (!ddp) { 1386 pr_warn("cdev 0x%p, ddp ppmax %u OOM.\n", cdev, ppmax); 1387 return -ENOMEM; 1388 } 1389 ddp->gl_map = (struct cxgbi_gather_list **)(ddp + 1); 1390 cdev->ddp = ddp; 1391 1392 spin_lock_init(&ddp->map_lock); 1393 kref_init(&ddp->refcnt); 1394 1395 ddp->cdev = cdev; 1396 ddp->pdev = cdev->pdev; 1397 ddp->llimit = llimit; 1398 ddp->ulimit = ulimit; 1399 ddp->max_txsz = min_t(unsigned int, max_txsz, ULP2_MAX_PKT_SIZE); 1400 ddp->max_rxsz = min_t(unsigned int, max_rxsz, ULP2_MAX_PKT_SIZE); 1401 ddp->nppods = ppmax; 1402 ddp->idx_last = ppmax; 1403 ddp->idx_bits = bits; 1404 ddp->idx_mask = (1 << bits) - 1; 1405 ddp->rsvd_tag_mask = (1 << (bits + PPOD_IDX_SHIFT)) - 1; 1406 1407 cdev->tag_format.sw_bits = sw_tag_idx_bits + sw_tag_age_bits; 1408 cdev->tag_format.rsvd_bits = ddp->idx_bits; 1409 cdev->tag_format.rsvd_shift = PPOD_IDX_SHIFT; 1410 cdev->tag_format.rsvd_mask = (1 << cdev->tag_format.rsvd_bits) - 1; 1411 1412 pr_info("%s tag format, sw %u, rsvd %u,%u, mask 0x%x.\n", 1413 cdev->ports[0]->name, cdev->tag_format.sw_bits, 1414 cdev->tag_format.rsvd_bits, cdev->tag_format.rsvd_shift, 1415 cdev->tag_format.rsvd_mask); 1416 1417 cdev->tx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD, 1418 ddp->max_txsz - ISCSI_PDU_NONPAYLOAD_LEN); 1419 cdev->rx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD, 1420 ddp->max_rxsz - ISCSI_PDU_NONPAYLOAD_LEN); 1421 1422 log_debug(1 << CXGBI_DBG_DDP, 1423 "%s max payload size: %u/%u, %u/%u.\n", 1424 cdev->ports[0]->name, cdev->tx_max_size, ddp->max_txsz, 1425 cdev->rx_max_size, ddp->max_rxsz); 1426 return 0; 1427 } 1428 EXPORT_SYMBOL_GPL(cxgbi_ddp_init); 1429 1430 /* 1431 * APIs interacting with open-iscsi libraries 1432 */ 1433 1434 static unsigned char padding[4]; 1435 1436 static void task_release_itt(struct iscsi_task *task, itt_t hdr_itt) 1437 { 1438 struct scsi_cmnd *sc = task->sc; 1439 struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data; 1440 struct cxgbi_conn *cconn = tcp_conn->dd_data; 1441 struct cxgbi_hba *chba = cconn->chba; 1442 struct cxgbi_tag_format *tformat = &chba->cdev->tag_format; 1443 u32 tag = ntohl((__force u32)hdr_itt); 1444 1445 log_debug(1 << CXGBI_DBG_DDP, 1446 "cdev 0x%p, release tag 0x%x.\n", chba->cdev, tag); 1447 if (sc && 1448 (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_FROM_DEVICE) && 1449 cxgbi_is_ddp_tag(tformat, tag)) 1450 ddp_tag_release(chba, tag); 1451 } 1452 1453 static int task_reserve_itt(struct iscsi_task *task, itt_t *hdr_itt) 1454 { 1455 struct scsi_cmnd *sc = task->sc; 1456 struct iscsi_conn *conn = task->conn; 1457 struct iscsi_session *sess = conn->session; 1458 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 1459 struct cxgbi_conn *cconn = tcp_conn->dd_data; 1460 struct cxgbi_hba *chba = cconn->chba; 1461 struct cxgbi_tag_format *tformat = &chba->cdev->tag_format; 1462 u32 sw_tag = (sess->age << cconn->task_idx_bits) | task->itt; 1463 u32 tag = 0; 1464 int err = -EINVAL; 1465 1466 if (sc && 1467 (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_FROM_DEVICE)) { 1468 err = cxgbi_ddp_reserve(cconn->cep->csk, &tag, sw_tag, 1469 scsi_in(sc)->length, 1470 scsi_in(sc)->table.sgl, 1471 scsi_in(sc)->table.nents, 1472 GFP_ATOMIC); 1473 if (err < 0) 1474 log_debug(1 << CXGBI_DBG_DDP, 1475 "csk 0x%p, R task 0x%p, %u,%u, no ddp.\n", 1476 cconn->cep->csk, task, scsi_in(sc)->length, 1477 scsi_in(sc)->table.nents); 1478 } 1479 1480 if (err < 0) 1481 tag = cxgbi_set_non_ddp_tag(tformat, sw_tag); 1482 /* the itt need to sent in big-endian order */ 1483 *hdr_itt = (__force itt_t)htonl(tag); 1484 1485 log_debug(1 << CXGBI_DBG_DDP, 1486 "cdev 0x%p, task 0x%p, 0x%x(0x%x,0x%x)->0x%x/0x%x.\n", 1487 chba->cdev, task, sw_tag, task->itt, sess->age, tag, *hdr_itt); 1488 return 0; 1489 } 1490 1491 void cxgbi_parse_pdu_itt(struct iscsi_conn *conn, itt_t itt, int *idx, int *age) 1492 { 1493 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 1494 struct cxgbi_conn *cconn = tcp_conn->dd_data; 1495 struct cxgbi_device *cdev = cconn->chba->cdev; 1496 u32 tag = ntohl((__force u32) itt); 1497 u32 sw_bits; 1498 1499 sw_bits = cxgbi_tag_nonrsvd_bits(&cdev->tag_format, tag); 1500 if (idx) 1501 *idx = sw_bits & ((1 << cconn->task_idx_bits) - 1); 1502 if (age) 1503 *age = (sw_bits >> cconn->task_idx_bits) & ISCSI_AGE_MASK; 1504 1505 log_debug(1 << CXGBI_DBG_DDP, 1506 "cdev 0x%p, tag 0x%x/0x%x, -> 0x%x(0x%x,0x%x).\n", 1507 cdev, tag, itt, sw_bits, idx ? *idx : 0xFFFFF, 1508 age ? *age : 0xFF); 1509 } 1510 EXPORT_SYMBOL_GPL(cxgbi_parse_pdu_itt); 1511 1512 void cxgbi_conn_tx_open(struct cxgbi_sock *csk) 1513 { 1514 struct iscsi_conn *conn = csk->user_data; 1515 1516 if (conn) { 1517 log_debug(1 << CXGBI_DBG_SOCK, 1518 "csk 0x%p, cid %d.\n", csk, conn->id); 1519 iscsi_conn_queue_work(conn); 1520 } 1521 } 1522 EXPORT_SYMBOL_GPL(cxgbi_conn_tx_open); 1523 1524 /* 1525 * pdu receive, interact with libiscsi_tcp 1526 */ 1527 static inline int read_pdu_skb(struct iscsi_conn *conn, 1528 struct sk_buff *skb, 1529 unsigned int offset, 1530 int offloaded) 1531 { 1532 int status = 0; 1533 int bytes_read; 1534 1535 bytes_read = iscsi_tcp_recv_skb(conn, skb, offset, offloaded, &status); 1536 switch (status) { 1537 case ISCSI_TCP_CONN_ERR: 1538 pr_info("skb 0x%p, off %u, %d, TCP_ERR.\n", 1539 skb, offset, offloaded); 1540 return -EIO; 1541 case ISCSI_TCP_SUSPENDED: 1542 log_debug(1 << CXGBI_DBG_PDU_RX, 1543 "skb 0x%p, off %u, %d, TCP_SUSPEND, rc %d.\n", 1544 skb, offset, offloaded, bytes_read); 1545 /* no transfer - just have caller flush queue */ 1546 return bytes_read; 1547 case ISCSI_TCP_SKB_DONE: 1548 pr_info("skb 0x%p, off %u, %d, TCP_SKB_DONE.\n", 1549 skb, offset, offloaded); 1550 /* 1551 * pdus should always fit in the skb and we should get 1552 * segment done notifcation. 1553 */ 1554 iscsi_conn_printk(KERN_ERR, conn, "Invalid pdu or skb."); 1555 return -EFAULT; 1556 case ISCSI_TCP_SEGMENT_DONE: 1557 log_debug(1 << CXGBI_DBG_PDU_RX, 1558 "skb 0x%p, off %u, %d, TCP_SEG_DONE, rc %d.\n", 1559 skb, offset, offloaded, bytes_read); 1560 return bytes_read; 1561 default: 1562 pr_info("skb 0x%p, off %u, %d, invalid status %d.\n", 1563 skb, offset, offloaded, status); 1564 return -EINVAL; 1565 } 1566 } 1567 1568 static int skb_read_pdu_bhs(struct iscsi_conn *conn, struct sk_buff *skb) 1569 { 1570 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 1571 1572 log_debug(1 << CXGBI_DBG_PDU_RX, 1573 "conn 0x%p, skb 0x%p, len %u, flag 0x%lx.\n", 1574 conn, skb, skb->len, cxgbi_skcb_flags(skb)); 1575 1576 if (!iscsi_tcp_recv_segment_is_hdr(tcp_conn)) { 1577 pr_info("conn 0x%p, skb 0x%p, not hdr.\n", conn, skb); 1578 iscsi_conn_failure(conn, ISCSI_ERR_PROTO); 1579 return -EIO; 1580 } 1581 1582 if (conn->hdrdgst_en && 1583 cxgbi_skcb_test_flag(skb, SKCBF_RX_HCRC_ERR)) { 1584 pr_info("conn 0x%p, skb 0x%p, hcrc.\n", conn, skb); 1585 iscsi_conn_failure(conn, ISCSI_ERR_HDR_DGST); 1586 return -EIO; 1587 } 1588 1589 return read_pdu_skb(conn, skb, 0, 0); 1590 } 1591 1592 static int skb_read_pdu_data(struct iscsi_conn *conn, struct sk_buff *lskb, 1593 struct sk_buff *skb, unsigned int offset) 1594 { 1595 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 1596 bool offloaded = 0; 1597 int opcode = tcp_conn->in.hdr->opcode & ISCSI_OPCODE_MASK; 1598 1599 log_debug(1 << CXGBI_DBG_PDU_RX, 1600 "conn 0x%p, skb 0x%p, len %u, flag 0x%lx.\n", 1601 conn, skb, skb->len, cxgbi_skcb_flags(skb)); 1602 1603 if (conn->datadgst_en && 1604 cxgbi_skcb_test_flag(lskb, SKCBF_RX_DCRC_ERR)) { 1605 pr_info("conn 0x%p, skb 0x%p, dcrc 0x%lx.\n", 1606 conn, lskb, cxgbi_skcb_flags(lskb)); 1607 iscsi_conn_failure(conn, ISCSI_ERR_DATA_DGST); 1608 return -EIO; 1609 } 1610 1611 if (iscsi_tcp_recv_segment_is_hdr(tcp_conn)) 1612 return 0; 1613 1614 /* coalesced, add header digest length */ 1615 if (lskb == skb && conn->hdrdgst_en) 1616 offset += ISCSI_DIGEST_SIZE; 1617 1618 if (cxgbi_skcb_test_flag(lskb, SKCBF_RX_DATA_DDPD)) 1619 offloaded = 1; 1620 1621 if (opcode == ISCSI_OP_SCSI_DATA_IN) 1622 log_debug(1 << CXGBI_DBG_PDU_RX, 1623 "skb 0x%p, op 0x%x, itt 0x%x, %u %s ddp'ed.\n", 1624 skb, opcode, ntohl(tcp_conn->in.hdr->itt), 1625 tcp_conn->in.datalen, offloaded ? "is" : "not"); 1626 1627 return read_pdu_skb(conn, skb, offset, offloaded); 1628 } 1629 1630 static void csk_return_rx_credits(struct cxgbi_sock *csk, int copied) 1631 { 1632 struct cxgbi_device *cdev = csk->cdev; 1633 int must_send; 1634 u32 credits; 1635 1636 log_debug(1 << CXGBI_DBG_PDU_RX, 1637 "csk 0x%p,%u,0x%lu,%u, seq %u, wup %u, thre %u, %u.\n", 1638 csk, csk->state, csk->flags, csk->tid, csk->copied_seq, 1639 csk->rcv_wup, cdev->rx_credit_thres, 1640 cdev->rcv_win); 1641 1642 if (csk->state != CTP_ESTABLISHED) 1643 return; 1644 1645 credits = csk->copied_seq - csk->rcv_wup; 1646 if (unlikely(!credits)) 1647 return; 1648 if (unlikely(cdev->rx_credit_thres == 0)) 1649 return; 1650 1651 must_send = credits + 16384 >= cdev->rcv_win; 1652 if (must_send || credits >= cdev->rx_credit_thres) 1653 csk->rcv_wup += cdev->csk_send_rx_credits(csk, credits); 1654 } 1655 1656 void cxgbi_conn_pdu_ready(struct cxgbi_sock *csk) 1657 { 1658 struct cxgbi_device *cdev = csk->cdev; 1659 struct iscsi_conn *conn = csk->user_data; 1660 struct sk_buff *skb; 1661 unsigned int read = 0; 1662 int err = 0; 1663 1664 log_debug(1 << CXGBI_DBG_PDU_RX, 1665 "csk 0x%p, conn 0x%p.\n", csk, conn); 1666 1667 if (unlikely(!conn || conn->suspend_rx)) { 1668 log_debug(1 << CXGBI_DBG_PDU_RX, 1669 "csk 0x%p, conn 0x%p, id %d, suspend_rx %lu!\n", 1670 csk, conn, conn ? conn->id : 0xFF, 1671 conn ? conn->suspend_rx : 0xFF); 1672 return; 1673 } 1674 1675 while (!err) { 1676 skb = skb_peek(&csk->receive_queue); 1677 if (!skb || 1678 !(cxgbi_skcb_test_flag(skb, SKCBF_RX_STATUS))) { 1679 if (skb) 1680 log_debug(1 << CXGBI_DBG_PDU_RX, 1681 "skb 0x%p, NOT ready 0x%lx.\n", 1682 skb, cxgbi_skcb_flags(skb)); 1683 break; 1684 } 1685 __skb_unlink(skb, &csk->receive_queue); 1686 1687 read += cxgbi_skcb_rx_pdulen(skb); 1688 log_debug(1 << CXGBI_DBG_PDU_RX, 1689 "csk 0x%p, skb 0x%p,%u,f 0x%lx, pdu len %u.\n", 1690 csk, skb, skb->len, cxgbi_skcb_flags(skb), 1691 cxgbi_skcb_rx_pdulen(skb)); 1692 1693 if (cxgbi_skcb_test_flag(skb, SKCBF_RX_COALESCED)) { 1694 err = skb_read_pdu_bhs(conn, skb); 1695 if (err < 0) { 1696 pr_err("coalesced bhs, csk 0x%p, skb 0x%p,%u, " 1697 "f 0x%lx, plen %u.\n", 1698 csk, skb, skb->len, 1699 cxgbi_skcb_flags(skb), 1700 cxgbi_skcb_rx_pdulen(skb)); 1701 goto skb_done; 1702 } 1703 err = skb_read_pdu_data(conn, skb, skb, 1704 err + cdev->skb_rx_extra); 1705 if (err < 0) 1706 pr_err("coalesced data, csk 0x%p, skb 0x%p,%u, " 1707 "f 0x%lx, plen %u.\n", 1708 csk, skb, skb->len, 1709 cxgbi_skcb_flags(skb), 1710 cxgbi_skcb_rx_pdulen(skb)); 1711 } else { 1712 err = skb_read_pdu_bhs(conn, skb); 1713 if (err < 0) { 1714 pr_err("bhs, csk 0x%p, skb 0x%p,%u, " 1715 "f 0x%lx, plen %u.\n", 1716 csk, skb, skb->len, 1717 cxgbi_skcb_flags(skb), 1718 cxgbi_skcb_rx_pdulen(skb)); 1719 goto skb_done; 1720 } 1721 1722 if (cxgbi_skcb_test_flag(skb, SKCBF_RX_DATA)) { 1723 struct sk_buff *dskb; 1724 1725 dskb = skb_peek(&csk->receive_queue); 1726 if (!dskb) { 1727 pr_err("csk 0x%p, skb 0x%p,%u, f 0x%lx," 1728 " plen %u, NO data.\n", 1729 csk, skb, skb->len, 1730 cxgbi_skcb_flags(skb), 1731 cxgbi_skcb_rx_pdulen(skb)); 1732 err = -EIO; 1733 goto skb_done; 1734 } 1735 __skb_unlink(dskb, &csk->receive_queue); 1736 1737 err = skb_read_pdu_data(conn, skb, dskb, 0); 1738 if (err < 0) 1739 pr_err("data, csk 0x%p, skb 0x%p,%u, " 1740 "f 0x%lx, plen %u, dskb 0x%p," 1741 "%u.\n", 1742 csk, skb, skb->len, 1743 cxgbi_skcb_flags(skb), 1744 cxgbi_skcb_rx_pdulen(skb), 1745 dskb, dskb->len); 1746 __kfree_skb(dskb); 1747 } else 1748 err = skb_read_pdu_data(conn, skb, skb, 0); 1749 } 1750 skb_done: 1751 __kfree_skb(skb); 1752 1753 if (err < 0) 1754 break; 1755 } 1756 1757 log_debug(1 << CXGBI_DBG_PDU_RX, "csk 0x%p, read %u.\n", csk, read); 1758 if (read) { 1759 csk->copied_seq += read; 1760 csk_return_rx_credits(csk, read); 1761 conn->rxdata_octets += read; 1762 } 1763 1764 if (err < 0) { 1765 pr_info("csk 0x%p, 0x%p, rx failed %d, read %u.\n", 1766 csk, conn, err, read); 1767 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); 1768 } 1769 } 1770 EXPORT_SYMBOL_GPL(cxgbi_conn_pdu_ready); 1771 1772 static int sgl_seek_offset(struct scatterlist *sgl, unsigned int sgcnt, 1773 unsigned int offset, unsigned int *off, 1774 struct scatterlist **sgp) 1775 { 1776 int i; 1777 struct scatterlist *sg; 1778 1779 for_each_sg(sgl, sg, sgcnt, i) { 1780 if (offset < sg->length) { 1781 *off = offset; 1782 *sgp = sg; 1783 return 0; 1784 } 1785 offset -= sg->length; 1786 } 1787 return -EFAULT; 1788 } 1789 1790 static int sgl_read_to_frags(struct scatterlist *sg, unsigned int sgoffset, 1791 unsigned int dlen, struct page_frag *frags, 1792 int frag_max) 1793 { 1794 unsigned int datalen = dlen; 1795 unsigned int sglen = sg->length - sgoffset; 1796 struct page *page = sg_page(sg); 1797 int i; 1798 1799 i = 0; 1800 do { 1801 unsigned int copy; 1802 1803 if (!sglen) { 1804 sg = sg_next(sg); 1805 if (!sg) { 1806 pr_warn("sg %d NULL, len %u/%u.\n", 1807 i, datalen, dlen); 1808 return -EINVAL; 1809 } 1810 sgoffset = 0; 1811 sglen = sg->length; 1812 page = sg_page(sg); 1813 1814 } 1815 copy = min(datalen, sglen); 1816 if (i && page == frags[i - 1].page && 1817 sgoffset + sg->offset == 1818 frags[i - 1].offset + frags[i - 1].size) { 1819 frags[i - 1].size += copy; 1820 } else { 1821 if (i >= frag_max) { 1822 pr_warn("too many pages %u, dlen %u.\n", 1823 frag_max, dlen); 1824 return -EINVAL; 1825 } 1826 1827 frags[i].page = page; 1828 frags[i].offset = sg->offset + sgoffset; 1829 frags[i].size = copy; 1830 i++; 1831 } 1832 datalen -= copy; 1833 sgoffset += copy; 1834 sglen -= copy; 1835 } while (datalen); 1836 1837 return i; 1838 } 1839 1840 int cxgbi_conn_alloc_pdu(struct iscsi_task *task, u8 opcode) 1841 { 1842 struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data; 1843 struct cxgbi_conn *cconn = tcp_conn->dd_data; 1844 struct cxgbi_device *cdev = cconn->chba->cdev; 1845 struct iscsi_conn *conn = task->conn; 1846 struct iscsi_tcp_task *tcp_task = task->dd_data; 1847 struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); 1848 struct scsi_cmnd *sc = task->sc; 1849 int headroom = SKB_TX_ISCSI_PDU_HEADER_MAX; 1850 1851 tcp_task->dd_data = tdata; 1852 task->hdr = NULL; 1853 1854 if (SKB_MAX_HEAD(cdev->skb_tx_rsvd) > (512 * MAX_SKB_FRAGS) && 1855 (opcode == ISCSI_OP_SCSI_DATA_OUT || 1856 (opcode == ISCSI_OP_SCSI_CMD && 1857 (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_TO_DEVICE)))) 1858 /* data could goes into skb head */ 1859 headroom += min_t(unsigned int, 1860 SKB_MAX_HEAD(cdev->skb_tx_rsvd), 1861 conn->max_xmit_dlength); 1862 1863 tdata->skb = alloc_skb(cdev->skb_tx_rsvd + headroom, GFP_ATOMIC); 1864 if (!tdata->skb) { 1865 pr_warn("alloc skb %u+%u, opcode 0x%x failed.\n", 1866 cdev->skb_tx_rsvd, headroom, opcode); 1867 return -ENOMEM; 1868 } 1869 1870 skb_reserve(tdata->skb, cdev->skb_tx_rsvd); 1871 task->hdr = (struct iscsi_hdr *)tdata->skb->data; 1872 task->hdr_max = SKB_TX_ISCSI_PDU_HEADER_MAX; /* BHS + AHS */ 1873 1874 /* data_out uses scsi_cmd's itt */ 1875 if (opcode != ISCSI_OP_SCSI_DATA_OUT) 1876 task_reserve_itt(task, &task->hdr->itt); 1877 1878 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, 1879 "task 0x%p, op 0x%x, skb 0x%p,%u+%u/%u, itt 0x%x.\n", 1880 task, opcode, tdata->skb, cdev->skb_tx_rsvd, headroom, 1881 conn->max_xmit_dlength, ntohl(task->hdr->itt)); 1882 1883 return 0; 1884 } 1885 EXPORT_SYMBOL_GPL(cxgbi_conn_alloc_pdu); 1886 1887 static inline void tx_skb_setmode(struct sk_buff *skb, int hcrc, int dcrc) 1888 { 1889 if (hcrc || dcrc) { 1890 u8 submode = 0; 1891 1892 if (hcrc) 1893 submode |= 1; 1894 if (dcrc) 1895 submode |= 2; 1896 cxgbi_skcb_ulp_mode(skb) = (ULP2_MODE_ISCSI << 4) | submode; 1897 } else 1898 cxgbi_skcb_ulp_mode(skb) = 0; 1899 } 1900 1901 int cxgbi_conn_init_pdu(struct iscsi_task *task, unsigned int offset, 1902 unsigned int count) 1903 { 1904 struct iscsi_conn *conn = task->conn; 1905 struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); 1906 struct sk_buff *skb = tdata->skb; 1907 unsigned int datalen = count; 1908 int i, padlen = iscsi_padding(count); 1909 struct page *pg; 1910 1911 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, 1912 "task 0x%p,0x%p, skb 0x%p, 0x%x,0x%x,0x%x, %u+%u.\n", 1913 task, task->sc, skb, (*skb->data) & ISCSI_OPCODE_MASK, 1914 ntohl(task->cmdsn), ntohl(task->hdr->itt), offset, count); 1915 1916 skb_put(skb, task->hdr_len); 1917 tx_skb_setmode(skb, conn->hdrdgst_en, datalen ? conn->datadgst_en : 0); 1918 if (!count) 1919 return 0; 1920 1921 if (task->sc) { 1922 struct scsi_data_buffer *sdb = scsi_out(task->sc); 1923 struct scatterlist *sg = NULL; 1924 int err; 1925 1926 tdata->offset = offset; 1927 tdata->count = count; 1928 err = sgl_seek_offset( 1929 sdb->table.sgl, sdb->table.nents, 1930 tdata->offset, &tdata->sgoffset, &sg); 1931 if (err < 0) { 1932 pr_warn("tpdu, sgl %u, bad offset %u/%u.\n", 1933 sdb->table.nents, tdata->offset, sdb->length); 1934 return err; 1935 } 1936 err = sgl_read_to_frags(sg, tdata->sgoffset, tdata->count, 1937 tdata->frags, MAX_PDU_FRAGS); 1938 if (err < 0) { 1939 pr_warn("tpdu, sgl %u, bad offset %u + %u.\n", 1940 sdb->table.nents, tdata->offset, tdata->count); 1941 return err; 1942 } 1943 tdata->nr_frags = err; 1944 1945 if (tdata->nr_frags > MAX_SKB_FRAGS || 1946 (padlen && tdata->nr_frags == MAX_SKB_FRAGS)) { 1947 char *dst = skb->data + task->hdr_len; 1948 struct page_frag *frag = tdata->frags; 1949 1950 /* data fits in the skb's headroom */ 1951 for (i = 0; i < tdata->nr_frags; i++, frag++) { 1952 char *src = kmap_atomic(frag->page, 1953 KM_SOFTIRQ0); 1954 1955 memcpy(dst, src+frag->offset, frag->size); 1956 dst += frag->size; 1957 kunmap_atomic(src, KM_SOFTIRQ0); 1958 } 1959 if (padlen) { 1960 memset(dst, 0, padlen); 1961 padlen = 0; 1962 } 1963 skb_put(skb, count + padlen); 1964 } else { 1965 /* data fit into frag_list */ 1966 for (i = 0; i < tdata->nr_frags; i++) { 1967 __skb_fill_page_desc(skb, i, 1968 tdata->frags[i].page, 1969 tdata->frags[i].offset, 1970 tdata->frags[i].size); 1971 skb_frag_ref(skb, i); 1972 } 1973 skb_shinfo(skb)->nr_frags = tdata->nr_frags; 1974 skb->len += count; 1975 skb->data_len += count; 1976 skb->truesize += count; 1977 } 1978 1979 } else { 1980 pg = virt_to_page(task->data); 1981 1982 get_page(pg); 1983 skb_fill_page_desc(skb, 0, pg, offset_in_page(task->data), 1984 count); 1985 skb->len += count; 1986 skb->data_len += count; 1987 skb->truesize += count; 1988 } 1989 1990 if (padlen) { 1991 i = skb_shinfo(skb)->nr_frags; 1992 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, 1993 virt_to_page(padding), offset_in_page(padding), 1994 padlen); 1995 1996 skb->data_len += padlen; 1997 skb->truesize += padlen; 1998 skb->len += padlen; 1999 } 2000 2001 return 0; 2002 } 2003 EXPORT_SYMBOL_GPL(cxgbi_conn_init_pdu); 2004 2005 int cxgbi_conn_xmit_pdu(struct iscsi_task *task) 2006 { 2007 struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data; 2008 struct cxgbi_conn *cconn = tcp_conn->dd_data; 2009 struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); 2010 struct sk_buff *skb = tdata->skb; 2011 unsigned int datalen; 2012 int err; 2013 2014 if (!skb) { 2015 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, 2016 "task 0x%p, skb NULL.\n", task); 2017 return 0; 2018 } 2019 2020 datalen = skb->data_len; 2021 tdata->skb = NULL; 2022 err = cxgbi_sock_send_pdus(cconn->cep->csk, skb); 2023 if (err > 0) { 2024 int pdulen = err; 2025 2026 log_debug(1 << CXGBI_DBG_PDU_TX, 2027 "task 0x%p,0x%p, skb 0x%p, len %u/%u, rv %d.\n", 2028 task, task->sc, skb, skb->len, skb->data_len, err); 2029 2030 if (task->conn->hdrdgst_en) 2031 pdulen += ISCSI_DIGEST_SIZE; 2032 2033 if (datalen && task->conn->datadgst_en) 2034 pdulen += ISCSI_DIGEST_SIZE; 2035 2036 task->conn->txdata_octets += pdulen; 2037 return 0; 2038 } 2039 2040 if (err == -EAGAIN || err == -ENOBUFS) { 2041 log_debug(1 << CXGBI_DBG_PDU_TX, 2042 "task 0x%p, skb 0x%p, len %u/%u, %d EAGAIN.\n", 2043 task, skb, skb->len, skb->data_len, err); 2044 /* reset skb to send when we are called again */ 2045 tdata->skb = skb; 2046 return err; 2047 } 2048 2049 kfree_skb(skb); 2050 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, 2051 "itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n", 2052 task->itt, skb, skb->len, skb->data_len, err); 2053 iscsi_conn_printk(KERN_ERR, task->conn, "xmit err %d.\n", err); 2054 iscsi_conn_failure(task->conn, ISCSI_ERR_XMIT_FAILED); 2055 return err; 2056 } 2057 EXPORT_SYMBOL_GPL(cxgbi_conn_xmit_pdu); 2058 2059 void cxgbi_cleanup_task(struct iscsi_task *task) 2060 { 2061 struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); 2062 2063 log_debug(1 << CXGBI_DBG_ISCSI, 2064 "task 0x%p, skb 0x%p, itt 0x%x.\n", 2065 task, tdata->skb, task->hdr_itt); 2066 2067 /* never reached the xmit task callout */ 2068 if (tdata->skb) 2069 __kfree_skb(tdata->skb); 2070 memset(tdata, 0, sizeof(*tdata)); 2071 2072 task_release_itt(task, task->hdr_itt); 2073 iscsi_tcp_cleanup_task(task); 2074 } 2075 EXPORT_SYMBOL_GPL(cxgbi_cleanup_task); 2076 2077 void cxgbi_get_conn_stats(struct iscsi_cls_conn *cls_conn, 2078 struct iscsi_stats *stats) 2079 { 2080 struct iscsi_conn *conn = cls_conn->dd_data; 2081 2082 stats->txdata_octets = conn->txdata_octets; 2083 stats->rxdata_octets = conn->rxdata_octets; 2084 stats->scsicmd_pdus = conn->scsicmd_pdus_cnt; 2085 stats->dataout_pdus = conn->dataout_pdus_cnt; 2086 stats->scsirsp_pdus = conn->scsirsp_pdus_cnt; 2087 stats->datain_pdus = conn->datain_pdus_cnt; 2088 stats->r2t_pdus = conn->r2t_pdus_cnt; 2089 stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt; 2090 stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt; 2091 stats->digest_err = 0; 2092 stats->timeout_err = 0; 2093 stats->custom_length = 1; 2094 strcpy(stats->custom[0].desc, "eh_abort_cnt"); 2095 stats->custom[0].value = conn->eh_abort_cnt; 2096 } 2097 EXPORT_SYMBOL_GPL(cxgbi_get_conn_stats); 2098 2099 static int cxgbi_conn_max_xmit_dlength(struct iscsi_conn *conn) 2100 { 2101 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 2102 struct cxgbi_conn *cconn = tcp_conn->dd_data; 2103 struct cxgbi_device *cdev = cconn->chba->cdev; 2104 unsigned int headroom = SKB_MAX_HEAD(cdev->skb_tx_rsvd); 2105 unsigned int max_def = 512 * MAX_SKB_FRAGS; 2106 unsigned int max = max(max_def, headroom); 2107 2108 max = min(cconn->chba->cdev->tx_max_size, max); 2109 if (conn->max_xmit_dlength) 2110 conn->max_xmit_dlength = min(conn->max_xmit_dlength, max); 2111 else 2112 conn->max_xmit_dlength = max; 2113 cxgbi_align_pdu_size(conn->max_xmit_dlength); 2114 2115 return 0; 2116 } 2117 2118 static int cxgbi_conn_max_recv_dlength(struct iscsi_conn *conn) 2119 { 2120 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 2121 struct cxgbi_conn *cconn = tcp_conn->dd_data; 2122 unsigned int max = cconn->chba->cdev->rx_max_size; 2123 2124 cxgbi_align_pdu_size(max); 2125 2126 if (conn->max_recv_dlength) { 2127 if (conn->max_recv_dlength > max) { 2128 pr_err("MaxRecvDataSegmentLength %u > %u.\n", 2129 conn->max_recv_dlength, max); 2130 return -EINVAL; 2131 } 2132 conn->max_recv_dlength = min(conn->max_recv_dlength, max); 2133 cxgbi_align_pdu_size(conn->max_recv_dlength); 2134 } else 2135 conn->max_recv_dlength = max; 2136 2137 return 0; 2138 } 2139 2140 int cxgbi_set_conn_param(struct iscsi_cls_conn *cls_conn, 2141 enum iscsi_param param, char *buf, int buflen) 2142 { 2143 struct iscsi_conn *conn = cls_conn->dd_data; 2144 struct iscsi_session *session = conn->session; 2145 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 2146 struct cxgbi_conn *cconn = tcp_conn->dd_data; 2147 struct cxgbi_sock *csk = cconn->cep->csk; 2148 int value, err = 0; 2149 2150 log_debug(1 << CXGBI_DBG_ISCSI, 2151 "cls_conn 0x%p, param %d, buf(%d) %s.\n", 2152 cls_conn, param, buflen, buf); 2153 2154 switch (param) { 2155 case ISCSI_PARAM_HDRDGST_EN: 2156 err = iscsi_set_param(cls_conn, param, buf, buflen); 2157 if (!err && conn->hdrdgst_en) 2158 err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid, 2159 conn->hdrdgst_en, 2160 conn->datadgst_en, 0); 2161 break; 2162 case ISCSI_PARAM_DATADGST_EN: 2163 err = iscsi_set_param(cls_conn, param, buf, buflen); 2164 if (!err && conn->datadgst_en) 2165 err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid, 2166 conn->hdrdgst_en, 2167 conn->datadgst_en, 0); 2168 break; 2169 case ISCSI_PARAM_MAX_R2T: 2170 sscanf(buf, "%d", &value); 2171 if (value <= 0 || !is_power_of_2(value)) 2172 return -EINVAL; 2173 if (session->max_r2t == value) 2174 break; 2175 iscsi_tcp_r2tpool_free(session); 2176 err = iscsi_set_param(cls_conn, param, buf, buflen); 2177 if (!err && iscsi_tcp_r2tpool_alloc(session)) 2178 return -ENOMEM; 2179 case ISCSI_PARAM_MAX_RECV_DLENGTH: 2180 err = iscsi_set_param(cls_conn, param, buf, buflen); 2181 if (!err) 2182 err = cxgbi_conn_max_recv_dlength(conn); 2183 break; 2184 case ISCSI_PARAM_MAX_XMIT_DLENGTH: 2185 err = iscsi_set_param(cls_conn, param, buf, buflen); 2186 if (!err) 2187 err = cxgbi_conn_max_xmit_dlength(conn); 2188 break; 2189 default: 2190 return iscsi_set_param(cls_conn, param, buf, buflen); 2191 } 2192 return err; 2193 } 2194 EXPORT_SYMBOL_GPL(cxgbi_set_conn_param); 2195 2196 int cxgbi_get_ep_param(struct iscsi_endpoint *ep, enum iscsi_param param, 2197 char *buf) 2198 { 2199 struct cxgbi_endpoint *cep = ep->dd_data; 2200 struct cxgbi_sock *csk; 2201 int len; 2202 2203 log_debug(1 << CXGBI_DBG_ISCSI, 2204 "cls_conn 0x%p, param %d.\n", ep, param); 2205 2206 switch (param) { 2207 case ISCSI_PARAM_CONN_PORT: 2208 case ISCSI_PARAM_CONN_ADDRESS: 2209 if (!cep) 2210 return -ENOTCONN; 2211 2212 csk = cep->csk; 2213 if (!csk) 2214 return -ENOTCONN; 2215 2216 return iscsi_conn_get_addr_param((struct sockaddr_storage *) 2217 &csk->daddr, param, buf); 2218 default: 2219 return -ENOSYS; 2220 } 2221 return len; 2222 } 2223 EXPORT_SYMBOL_GPL(cxgbi_get_ep_param); 2224 2225 struct iscsi_cls_conn * 2226 cxgbi_create_conn(struct iscsi_cls_session *cls_session, u32 cid) 2227 { 2228 struct iscsi_cls_conn *cls_conn; 2229 struct iscsi_conn *conn; 2230 struct iscsi_tcp_conn *tcp_conn; 2231 struct cxgbi_conn *cconn; 2232 2233 cls_conn = iscsi_tcp_conn_setup(cls_session, sizeof(*cconn), cid); 2234 if (!cls_conn) 2235 return NULL; 2236 2237 conn = cls_conn->dd_data; 2238 tcp_conn = conn->dd_data; 2239 cconn = tcp_conn->dd_data; 2240 cconn->iconn = conn; 2241 2242 log_debug(1 << CXGBI_DBG_ISCSI, 2243 "cid %u(0x%x), cls 0x%p,0x%p, conn 0x%p,0x%p,0x%p.\n", 2244 cid, cid, cls_session, cls_conn, conn, tcp_conn, cconn); 2245 2246 return cls_conn; 2247 } 2248 EXPORT_SYMBOL_GPL(cxgbi_create_conn); 2249 2250 int cxgbi_bind_conn(struct iscsi_cls_session *cls_session, 2251 struct iscsi_cls_conn *cls_conn, 2252 u64 transport_eph, int is_leading) 2253 { 2254 struct iscsi_conn *conn = cls_conn->dd_data; 2255 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 2256 struct cxgbi_conn *cconn = tcp_conn->dd_data; 2257 struct iscsi_endpoint *ep; 2258 struct cxgbi_endpoint *cep; 2259 struct cxgbi_sock *csk; 2260 int err; 2261 2262 ep = iscsi_lookup_endpoint(transport_eph); 2263 if (!ep) 2264 return -EINVAL; 2265 2266 /* setup ddp pagesize */ 2267 cep = ep->dd_data; 2268 csk = cep->csk; 2269 err = csk->cdev->csk_ddp_setup_pgidx(csk, csk->tid, page_idx, 0); 2270 if (err < 0) 2271 return err; 2272 2273 err = iscsi_conn_bind(cls_session, cls_conn, is_leading); 2274 if (err) 2275 return -EINVAL; 2276 2277 /* calculate the tag idx bits needed for this conn based on cmds_max */ 2278 cconn->task_idx_bits = (__ilog2_u32(conn->session->cmds_max - 1)) + 1; 2279 2280 write_lock_bh(&csk->callback_lock); 2281 csk->user_data = conn; 2282 cconn->chba = cep->chba; 2283 cconn->cep = cep; 2284 cep->cconn = cconn; 2285 write_unlock_bh(&csk->callback_lock); 2286 2287 cxgbi_conn_max_xmit_dlength(conn); 2288 cxgbi_conn_max_recv_dlength(conn); 2289 2290 log_debug(1 << CXGBI_DBG_ISCSI, 2291 "cls 0x%p,0x%p, ep 0x%p, cconn 0x%p, csk 0x%p.\n", 2292 cls_session, cls_conn, ep, cconn, csk); 2293 /* init recv engine */ 2294 iscsi_tcp_hdr_recv_prep(tcp_conn); 2295 2296 return 0; 2297 } 2298 EXPORT_SYMBOL_GPL(cxgbi_bind_conn); 2299 2300 struct iscsi_cls_session *cxgbi_create_session(struct iscsi_endpoint *ep, 2301 u16 cmds_max, u16 qdepth, 2302 u32 initial_cmdsn) 2303 { 2304 struct cxgbi_endpoint *cep; 2305 struct cxgbi_hba *chba; 2306 struct Scsi_Host *shost; 2307 struct iscsi_cls_session *cls_session; 2308 struct iscsi_session *session; 2309 2310 if (!ep) { 2311 pr_err("missing endpoint.\n"); 2312 return NULL; 2313 } 2314 2315 cep = ep->dd_data; 2316 chba = cep->chba; 2317 shost = chba->shost; 2318 2319 BUG_ON(chba != iscsi_host_priv(shost)); 2320 2321 cls_session = iscsi_session_setup(chba->cdev->itp, shost, 2322 cmds_max, 0, 2323 sizeof(struct iscsi_tcp_task) + 2324 sizeof(struct cxgbi_task_data), 2325 initial_cmdsn, ISCSI_MAX_TARGET); 2326 if (!cls_session) 2327 return NULL; 2328 2329 session = cls_session->dd_data; 2330 if (iscsi_tcp_r2tpool_alloc(session)) 2331 goto remove_session; 2332 2333 log_debug(1 << CXGBI_DBG_ISCSI, 2334 "ep 0x%p, cls sess 0x%p.\n", ep, cls_session); 2335 return cls_session; 2336 2337 remove_session: 2338 iscsi_session_teardown(cls_session); 2339 return NULL; 2340 } 2341 EXPORT_SYMBOL_GPL(cxgbi_create_session); 2342 2343 void cxgbi_destroy_session(struct iscsi_cls_session *cls_session) 2344 { 2345 log_debug(1 << CXGBI_DBG_ISCSI, 2346 "cls sess 0x%p.\n", cls_session); 2347 2348 iscsi_tcp_r2tpool_free(cls_session->dd_data); 2349 iscsi_session_teardown(cls_session); 2350 } 2351 EXPORT_SYMBOL_GPL(cxgbi_destroy_session); 2352 2353 int cxgbi_set_host_param(struct Scsi_Host *shost, enum iscsi_host_param param, 2354 char *buf, int buflen) 2355 { 2356 struct cxgbi_hba *chba = iscsi_host_priv(shost); 2357 2358 if (!chba->ndev) { 2359 shost_printk(KERN_ERR, shost, "Could not get host param. " 2360 "netdev for host not set.\n"); 2361 return -ENODEV; 2362 } 2363 2364 log_debug(1 << CXGBI_DBG_ISCSI, 2365 "shost 0x%p, hba 0x%p,%s, param %d, buf(%d) %s.\n", 2366 shost, chba, chba->ndev->name, param, buflen, buf); 2367 2368 switch (param) { 2369 case ISCSI_HOST_PARAM_IPADDRESS: 2370 { 2371 __be32 addr = in_aton(buf); 2372 log_debug(1 << CXGBI_DBG_ISCSI, 2373 "hba %s, req. ipv4 %pI4.\n", chba->ndev->name, &addr); 2374 cxgbi_set_iscsi_ipv4(chba, addr); 2375 return 0; 2376 } 2377 case ISCSI_HOST_PARAM_HWADDRESS: 2378 case ISCSI_HOST_PARAM_NETDEV_NAME: 2379 return 0; 2380 default: 2381 return iscsi_host_set_param(shost, param, buf, buflen); 2382 } 2383 } 2384 EXPORT_SYMBOL_GPL(cxgbi_set_host_param); 2385 2386 int cxgbi_get_host_param(struct Scsi_Host *shost, enum iscsi_host_param param, 2387 char *buf) 2388 { 2389 struct cxgbi_hba *chba = iscsi_host_priv(shost); 2390 int len = 0; 2391 2392 if (!chba->ndev) { 2393 shost_printk(KERN_ERR, shost, "Could not get host param. " 2394 "netdev for host not set.\n"); 2395 return -ENODEV; 2396 } 2397 2398 log_debug(1 << CXGBI_DBG_ISCSI, 2399 "shost 0x%p, hba 0x%p,%s, param %d.\n", 2400 shost, chba, chba->ndev->name, param); 2401 2402 switch (param) { 2403 case ISCSI_HOST_PARAM_HWADDRESS: 2404 len = sysfs_format_mac(buf, chba->ndev->dev_addr, 6); 2405 break; 2406 case ISCSI_HOST_PARAM_NETDEV_NAME: 2407 len = sprintf(buf, "%s\n", chba->ndev->name); 2408 break; 2409 case ISCSI_HOST_PARAM_IPADDRESS: 2410 { 2411 __be32 addr; 2412 2413 addr = cxgbi_get_iscsi_ipv4(chba); 2414 len = sprintf(buf, "%pI4", &addr); 2415 log_debug(1 << CXGBI_DBG_ISCSI, 2416 "hba %s, ipv4 %pI4.\n", chba->ndev->name, &addr); 2417 break; 2418 } 2419 default: 2420 return iscsi_host_get_param(shost, param, buf); 2421 } 2422 2423 return len; 2424 } 2425 EXPORT_SYMBOL_GPL(cxgbi_get_host_param); 2426 2427 struct iscsi_endpoint *cxgbi_ep_connect(struct Scsi_Host *shost, 2428 struct sockaddr *dst_addr, 2429 int non_blocking) 2430 { 2431 struct iscsi_endpoint *ep; 2432 struct cxgbi_endpoint *cep; 2433 struct cxgbi_hba *hba = NULL; 2434 struct cxgbi_sock *csk; 2435 int err = -EINVAL; 2436 2437 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_SOCK, 2438 "shost 0x%p, non_blocking %d, dst_addr 0x%p.\n", 2439 shost, non_blocking, dst_addr); 2440 2441 if (shost) { 2442 hba = iscsi_host_priv(shost); 2443 if (!hba) { 2444 pr_info("shost 0x%p, priv NULL.\n", shost); 2445 goto err_out; 2446 } 2447 } 2448 2449 csk = cxgbi_check_route(dst_addr); 2450 if (IS_ERR(csk)) 2451 return (struct iscsi_endpoint *)csk; 2452 cxgbi_sock_get(csk); 2453 2454 if (!hba) 2455 hba = csk->cdev->hbas[csk->port_id]; 2456 else if (hba != csk->cdev->hbas[csk->port_id]) { 2457 pr_info("Could not connect through requested host %u" 2458 "hba 0x%p != 0x%p (%u).\n", 2459 shost->host_no, hba, 2460 csk->cdev->hbas[csk->port_id], csk->port_id); 2461 err = -ENOSPC; 2462 goto release_conn; 2463 } 2464 2465 err = sock_get_port(csk); 2466 if (err) 2467 goto release_conn; 2468 2469 cxgbi_sock_set_state(csk, CTP_CONNECTING); 2470 err = csk->cdev->csk_init_act_open(csk); 2471 if (err) 2472 goto release_conn; 2473 2474 if (cxgbi_sock_is_closing(csk)) { 2475 err = -ENOSPC; 2476 pr_info("csk 0x%p is closing.\n", csk); 2477 goto release_conn; 2478 } 2479 2480 ep = iscsi_create_endpoint(sizeof(*cep)); 2481 if (!ep) { 2482 err = -ENOMEM; 2483 pr_info("iscsi alloc ep, OOM.\n"); 2484 goto release_conn; 2485 } 2486 2487 cep = ep->dd_data; 2488 cep->csk = csk; 2489 cep->chba = hba; 2490 2491 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_SOCK, 2492 "ep 0x%p, cep 0x%p, csk 0x%p, hba 0x%p,%s.\n", 2493 ep, cep, csk, hba, hba->ndev->name); 2494 return ep; 2495 2496 release_conn: 2497 cxgbi_sock_put(csk); 2498 cxgbi_sock_closed(csk); 2499 err_out: 2500 return ERR_PTR(err); 2501 } 2502 EXPORT_SYMBOL_GPL(cxgbi_ep_connect); 2503 2504 int cxgbi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) 2505 { 2506 struct cxgbi_endpoint *cep = ep->dd_data; 2507 struct cxgbi_sock *csk = cep->csk; 2508 2509 if (!cxgbi_sock_is_established(csk)) 2510 return 0; 2511 return 1; 2512 } 2513 EXPORT_SYMBOL_GPL(cxgbi_ep_poll); 2514 2515 void cxgbi_ep_disconnect(struct iscsi_endpoint *ep) 2516 { 2517 struct cxgbi_endpoint *cep = ep->dd_data; 2518 struct cxgbi_conn *cconn = cep->cconn; 2519 struct cxgbi_sock *csk = cep->csk; 2520 2521 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_SOCK, 2522 "ep 0x%p, cep 0x%p, cconn 0x%p, csk 0x%p,%u,0x%lx.\n", 2523 ep, cep, cconn, csk, csk->state, csk->flags); 2524 2525 if (cconn && cconn->iconn) { 2526 iscsi_suspend_tx(cconn->iconn); 2527 write_lock_bh(&csk->callback_lock); 2528 cep->csk->user_data = NULL; 2529 cconn->cep = NULL; 2530 write_unlock_bh(&csk->callback_lock); 2531 } 2532 iscsi_destroy_endpoint(ep); 2533 2534 if (likely(csk->state >= CTP_ESTABLISHED)) 2535 need_active_close(csk); 2536 else 2537 cxgbi_sock_closed(csk); 2538 2539 cxgbi_sock_put(csk); 2540 } 2541 EXPORT_SYMBOL_GPL(cxgbi_ep_disconnect); 2542 2543 int cxgbi_iscsi_init(struct iscsi_transport *itp, 2544 struct scsi_transport_template **stt) 2545 { 2546 *stt = iscsi_register_transport(itp); 2547 if (*stt == NULL) { 2548 pr_err("unable to register %s transport 0x%p.\n", 2549 itp->name, itp); 2550 return -ENODEV; 2551 } 2552 log_debug(1 << CXGBI_DBG_ISCSI, 2553 "%s, registered iscsi transport 0x%p.\n", 2554 itp->name, stt); 2555 return 0; 2556 } 2557 EXPORT_SYMBOL_GPL(cxgbi_iscsi_init); 2558 2559 void cxgbi_iscsi_cleanup(struct iscsi_transport *itp, 2560 struct scsi_transport_template **stt) 2561 { 2562 if (*stt) { 2563 log_debug(1 << CXGBI_DBG_ISCSI, 2564 "de-register transport 0x%p, %s, stt 0x%p.\n", 2565 itp, itp->name, *stt); 2566 *stt = NULL; 2567 iscsi_unregister_transport(itp); 2568 } 2569 } 2570 EXPORT_SYMBOL_GPL(cxgbi_iscsi_cleanup); 2571 2572 mode_t cxgbi_attr_is_visible(int param_type, int param) 2573 { 2574 switch (param_type) { 2575 case ISCSI_HOST_PARAM: 2576 switch (param) { 2577 case ISCSI_HOST_PARAM_NETDEV_NAME: 2578 case ISCSI_HOST_PARAM_HWADDRESS: 2579 case ISCSI_HOST_PARAM_IPADDRESS: 2580 case ISCSI_HOST_PARAM_INITIATOR_NAME: 2581 return S_IRUGO; 2582 default: 2583 return 0; 2584 } 2585 case ISCSI_PARAM: 2586 switch (param) { 2587 case ISCSI_PARAM_MAX_RECV_DLENGTH: 2588 case ISCSI_PARAM_MAX_XMIT_DLENGTH: 2589 case ISCSI_PARAM_HDRDGST_EN: 2590 case ISCSI_PARAM_DATADGST_EN: 2591 case ISCSI_PARAM_CONN_ADDRESS: 2592 case ISCSI_PARAM_CONN_PORT: 2593 case ISCSI_PARAM_EXP_STATSN: 2594 case ISCSI_PARAM_PERSISTENT_ADDRESS: 2595 case ISCSI_PARAM_PERSISTENT_PORT: 2596 case ISCSI_PARAM_PING_TMO: 2597 case ISCSI_PARAM_RECV_TMO: 2598 case ISCSI_PARAM_INITIAL_R2T_EN: 2599 case ISCSI_PARAM_MAX_R2T: 2600 case ISCSI_PARAM_IMM_DATA_EN: 2601 case ISCSI_PARAM_FIRST_BURST: 2602 case ISCSI_PARAM_MAX_BURST: 2603 case ISCSI_PARAM_PDU_INORDER_EN: 2604 case ISCSI_PARAM_DATASEQ_INORDER_EN: 2605 case ISCSI_PARAM_ERL: 2606 case ISCSI_PARAM_TARGET_NAME: 2607 case ISCSI_PARAM_TPGT: 2608 case ISCSI_PARAM_USERNAME: 2609 case ISCSI_PARAM_PASSWORD: 2610 case ISCSI_PARAM_USERNAME_IN: 2611 case ISCSI_PARAM_PASSWORD_IN: 2612 case ISCSI_PARAM_FAST_ABORT: 2613 case ISCSI_PARAM_ABORT_TMO: 2614 case ISCSI_PARAM_LU_RESET_TMO: 2615 case ISCSI_PARAM_TGT_RESET_TMO: 2616 case ISCSI_PARAM_IFACE_NAME: 2617 case ISCSI_PARAM_INITIATOR_NAME: 2618 return S_IRUGO; 2619 default: 2620 return 0; 2621 } 2622 } 2623 2624 return 0; 2625 } 2626 EXPORT_SYMBOL_GPL(cxgbi_attr_is_visible); 2627 2628 static int __init libcxgbi_init_module(void) 2629 { 2630 sw_tag_idx_bits = (__ilog2_u32(ISCSI_ITT_MASK)) + 1; 2631 sw_tag_age_bits = (__ilog2_u32(ISCSI_AGE_MASK)) + 1; 2632 2633 pr_info("tag itt 0x%x, %u bits, age 0x%x, %u bits.\n", 2634 ISCSI_ITT_MASK, sw_tag_idx_bits, 2635 ISCSI_AGE_MASK, sw_tag_age_bits); 2636 2637 ddp_setup_host_page_size(); 2638 return 0; 2639 } 2640 2641 static void __exit libcxgbi_exit_module(void) 2642 { 2643 cxgbi_device_unregister_all(0xFF); 2644 return; 2645 } 2646 2647 module_init(libcxgbi_init_module); 2648 module_exit(libcxgbi_exit_module); 2649