1 /* 2 * iSCSI Initiator over TCP/IP Data-Path 3 * 4 * Copyright (C) 2004 Dmitry Yusupov 5 * Copyright (C) 2004 Alex Aizman 6 * Copyright (C) 2005 - 2006 Mike Christie 7 * Copyright (C) 2006 Red Hat, Inc. All rights reserved. 8 * maintained by open-iscsi@googlegroups.com 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published 12 * by the Free Software Foundation; either version 2 of the License, or 13 * (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, but 16 * WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 18 * General Public License for more details. 19 * 20 * See the file COPYING included with this distribution for more details. 21 * 22 * Credits: 23 * Christoph Hellwig 24 * FUJITA Tomonori 25 * Arne Redlich 26 * Zhenyu Wang 27 */ 28 29 #include <linux/err.h> 30 #include <linux/types.h> 31 #include <linux/list.h> 32 #include <linux/inet.h> 33 #include <linux/blkdev.h> 34 #include <linux/crypto.h> 35 #include <linux/delay.h> 36 #include <linux/kfifo.h> 37 #include <linux/scatterlist.h> 38 #include <linux/mutex.h> 39 #include <net/tcp.h> 40 #include <scsi/scsi_cmnd.h> 41 #include <scsi/scsi_host.h> 42 #include <scsi/scsi.h> 43 #include <scsi/scsi_transport_iscsi.h> 44 45 #include "iscsi_tcp.h" 46 47 MODULE_AUTHOR("Dmitry Yusupov <dmitry_yus@yahoo.com>, " 48 "Alex Aizman <itn780@yahoo.com>"); 49 MODULE_DESCRIPTION("iSCSI/TCP data-path"); 50 MODULE_LICENSE("GPL"); 51 /* #define DEBUG_TCP */ 52 #define DEBUG_ASSERT 53 54 #ifdef DEBUG_TCP 55 #define debug_tcp(fmt...) printk(KERN_INFO "tcp: " fmt) 56 #else 57 #define debug_tcp(fmt...) 58 #endif 59 60 #ifndef DEBUG_ASSERT 61 #ifdef BUG_ON 62 #undef BUG_ON 63 #endif 64 #define BUG_ON(expr) 65 #endif 66 67 static unsigned int iscsi_max_lun = 512; 68 module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO); 69 70 static inline void 71 iscsi_buf_init_iov(struct iscsi_buf *ibuf, char *vbuf, int size) 72 { 73 ibuf->sg.page = virt_to_page(vbuf); 74 ibuf->sg.offset = offset_in_page(vbuf); 75 ibuf->sg.length = size; 76 ibuf->sent = 0; 77 ibuf->use_sendmsg = 1; 78 } 79 80 static inline void 81 iscsi_buf_init_sg(struct iscsi_buf *ibuf, struct scatterlist *sg) 82 { 83 ibuf->sg.page = sg->page; 84 ibuf->sg.offset = sg->offset; 85 ibuf->sg.length = sg->length; 86 /* 87 * Fastpath: sg element fits into single page 88 */ 89 if (sg->length + sg->offset <= PAGE_SIZE && !PageSlab(sg->page)) 90 ibuf->use_sendmsg = 0; 91 else 92 ibuf->use_sendmsg = 1; 93 ibuf->sent = 0; 94 } 95 96 static inline int 97 iscsi_buf_left(struct iscsi_buf *ibuf) 98 { 99 int rc; 100 101 rc = ibuf->sg.length - ibuf->sent; 102 BUG_ON(rc < 0); 103 return rc; 104 } 105 106 static inline void 107 iscsi_hdr_digest(struct iscsi_conn *conn, struct iscsi_buf *buf, 108 u8* crc) 109 { 110 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 111 struct hash_desc desc; 112 113 desc.tfm = tcp_conn->tx_tfm; 114 desc.flags = 0; 115 crypto_hash_digest(&desc, &buf->sg, buf->sg.length, crc); 116 buf->sg.length += sizeof(uint32_t); 117 } 118 119 static inline int 120 iscsi_hdr_extract(struct iscsi_tcp_conn *tcp_conn) 121 { 122 struct sk_buff *skb = tcp_conn->in.skb; 123 124 tcp_conn->in.zero_copy_hdr = 0; 125 126 if (tcp_conn->in.copy >= tcp_conn->hdr_size && 127 tcp_conn->in_progress == IN_PROGRESS_WAIT_HEADER) { 128 /* 129 * Zero-copy PDU Header: using connection context 130 * to store header pointer. 131 */ 132 if (skb_shinfo(skb)->frag_list == NULL && 133 !skb_shinfo(skb)->nr_frags) { 134 tcp_conn->in.hdr = (struct iscsi_hdr *) 135 ((char*)skb->data + tcp_conn->in.offset); 136 tcp_conn->in.zero_copy_hdr = 1; 137 } else { 138 /* ignoring return code since we checked 139 * in.copy before */ 140 skb_copy_bits(skb, tcp_conn->in.offset, 141 &tcp_conn->hdr, tcp_conn->hdr_size); 142 tcp_conn->in.hdr = &tcp_conn->hdr; 143 } 144 tcp_conn->in.offset += tcp_conn->hdr_size; 145 tcp_conn->in.copy -= tcp_conn->hdr_size; 146 } else { 147 int hdr_remains; 148 int copylen; 149 150 /* 151 * PDU header scattered across SKB's, 152 * copying it... This'll happen quite rarely. 153 */ 154 155 if (tcp_conn->in_progress == IN_PROGRESS_WAIT_HEADER) 156 tcp_conn->in.hdr_offset = 0; 157 158 hdr_remains = tcp_conn->hdr_size - tcp_conn->in.hdr_offset; 159 BUG_ON(hdr_remains <= 0); 160 161 copylen = min(tcp_conn->in.copy, hdr_remains); 162 skb_copy_bits(skb, tcp_conn->in.offset, 163 (char*)&tcp_conn->hdr + tcp_conn->in.hdr_offset, 164 copylen); 165 166 debug_tcp("PDU gather offset %d bytes %d in.offset %d " 167 "in.copy %d\n", tcp_conn->in.hdr_offset, copylen, 168 tcp_conn->in.offset, tcp_conn->in.copy); 169 170 tcp_conn->in.offset += copylen; 171 tcp_conn->in.copy -= copylen; 172 if (copylen < hdr_remains) { 173 tcp_conn->in_progress = IN_PROGRESS_HEADER_GATHER; 174 tcp_conn->in.hdr_offset += copylen; 175 return -EAGAIN; 176 } 177 tcp_conn->in.hdr = &tcp_conn->hdr; 178 tcp_conn->discontiguous_hdr_cnt++; 179 tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER; 180 } 181 182 return 0; 183 } 184 185 /* 186 * must be called with session lock 187 */ 188 static void 189 iscsi_tcp_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) 190 { 191 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 192 struct iscsi_r2t_info *r2t; 193 struct scsi_cmnd *sc; 194 195 /* flush ctask's r2t queues */ 196 while (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*))) { 197 __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t, 198 sizeof(void*)); 199 debug_scsi("iscsi_tcp_cleanup_ctask pending r2t dropped\n"); 200 } 201 202 sc = ctask->sc; 203 if (unlikely(!sc)) 204 return; 205 206 tcp_ctask->xmstate = XMSTATE_IDLE; 207 tcp_ctask->r2t = NULL; 208 } 209 210 /** 211 * iscsi_data_rsp - SCSI Data-In Response processing 212 * @conn: iscsi connection 213 * @ctask: scsi command task 214 **/ 215 static int 216 iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) 217 { 218 int rc; 219 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 220 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 221 struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)tcp_conn->in.hdr; 222 struct iscsi_session *session = conn->session; 223 int datasn = be32_to_cpu(rhdr->datasn); 224 225 rc = iscsi_check_assign_cmdsn(session, (struct iscsi_nopin*)rhdr); 226 if (rc) 227 return rc; 228 /* 229 * setup Data-In byte counter (gets decremented..) 230 */ 231 ctask->data_count = tcp_conn->in.datalen; 232 233 if (tcp_conn->in.datalen == 0) 234 return 0; 235 236 if (ctask->datasn != datasn) 237 return ISCSI_ERR_DATASN; 238 239 ctask->datasn++; 240 241 tcp_ctask->data_offset = be32_to_cpu(rhdr->offset); 242 if (tcp_ctask->data_offset + tcp_conn->in.datalen > ctask->total_length) 243 return ISCSI_ERR_DATA_OFFSET; 244 245 if (rhdr->flags & ISCSI_FLAG_DATA_STATUS) { 246 struct scsi_cmnd *sc = ctask->sc; 247 248 conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1; 249 if (rhdr->flags & ISCSI_FLAG_DATA_UNDERFLOW) { 250 int res_count = be32_to_cpu(rhdr->residual_count); 251 252 if (res_count > 0 && 253 res_count <= sc->request_bufflen) { 254 sc->resid = res_count; 255 sc->result = (DID_OK << 16) | rhdr->cmd_status; 256 } else 257 sc->result = (DID_BAD_TARGET << 16) | 258 rhdr->cmd_status; 259 } else if (rhdr->flags & ISCSI_FLAG_DATA_OVERFLOW) { 260 sc->resid = be32_to_cpu(rhdr->residual_count); 261 sc->result = (DID_OK << 16) | rhdr->cmd_status; 262 } else 263 sc->result = (DID_OK << 16) | rhdr->cmd_status; 264 } 265 266 conn->datain_pdus_cnt++; 267 return 0; 268 } 269 270 /** 271 * iscsi_solicit_data_init - initialize first Data-Out 272 * @conn: iscsi connection 273 * @ctask: scsi command task 274 * @r2t: R2T info 275 * 276 * Notes: 277 * Initialize first Data-Out within this R2T sequence and finds 278 * proper data_offset within this SCSI command. 279 * 280 * This function is called with connection lock taken. 281 **/ 282 static void 283 iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask, 284 struct iscsi_r2t_info *r2t) 285 { 286 struct iscsi_data *hdr; 287 struct scsi_cmnd *sc = ctask->sc; 288 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 289 290 hdr = &r2t->dtask.hdr; 291 memset(hdr, 0, sizeof(struct iscsi_data)); 292 hdr->ttt = r2t->ttt; 293 hdr->datasn = cpu_to_be32(r2t->solicit_datasn); 294 r2t->solicit_datasn++; 295 hdr->opcode = ISCSI_OP_SCSI_DATA_OUT; 296 memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun)); 297 hdr->itt = ctask->hdr->itt; 298 hdr->exp_statsn = r2t->exp_statsn; 299 hdr->offset = cpu_to_be32(r2t->data_offset); 300 if (r2t->data_length > conn->max_xmit_dlength) { 301 hton24(hdr->dlength, conn->max_xmit_dlength); 302 r2t->data_count = conn->max_xmit_dlength; 303 hdr->flags = 0; 304 } else { 305 hton24(hdr->dlength, r2t->data_length); 306 r2t->data_count = r2t->data_length; 307 hdr->flags = ISCSI_FLAG_CMD_FINAL; 308 } 309 conn->dataout_pdus_cnt++; 310 311 r2t->sent = 0; 312 313 iscsi_buf_init_iov(&r2t->headbuf, (char*)hdr, 314 sizeof(struct iscsi_hdr)); 315 316 if (sc->use_sg) { 317 int i, sg_count = 0; 318 struct scatterlist *sg = sc->request_buffer; 319 320 r2t->sg = NULL; 321 for (i = 0; i < sc->use_sg; i++, sg += 1) { 322 /* FIXME: prefetch ? */ 323 if (sg_count + sg->length > r2t->data_offset) { 324 int page_offset; 325 326 /* sg page found! */ 327 328 /* offset within this page */ 329 page_offset = r2t->data_offset - sg_count; 330 331 /* fill in this buffer */ 332 iscsi_buf_init_sg(&r2t->sendbuf, sg); 333 r2t->sendbuf.sg.offset += page_offset; 334 r2t->sendbuf.sg.length -= page_offset; 335 336 /* xmit logic will continue with next one */ 337 r2t->sg = sg + 1; 338 break; 339 } 340 sg_count += sg->length; 341 } 342 BUG_ON(r2t->sg == NULL); 343 } else 344 iscsi_buf_init_iov(&tcp_ctask->sendbuf, 345 (char*)sc->request_buffer + r2t->data_offset, 346 r2t->data_count); 347 } 348 349 /** 350 * iscsi_r2t_rsp - iSCSI R2T Response processing 351 * @conn: iscsi connection 352 * @ctask: scsi command task 353 **/ 354 static int 355 iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) 356 { 357 struct iscsi_r2t_info *r2t; 358 struct iscsi_session *session = conn->session; 359 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 360 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 361 struct iscsi_r2t_rsp *rhdr = (struct iscsi_r2t_rsp *)tcp_conn->in.hdr; 362 int r2tsn = be32_to_cpu(rhdr->r2tsn); 363 int rc; 364 365 if (tcp_conn->in.datalen) 366 return ISCSI_ERR_DATALEN; 367 368 if (tcp_ctask->exp_r2tsn && tcp_ctask->exp_r2tsn != r2tsn) 369 return ISCSI_ERR_R2TSN; 370 371 rc = iscsi_check_assign_cmdsn(session, (struct iscsi_nopin*)rhdr); 372 if (rc) 373 return rc; 374 375 /* FIXME: use R2TSN to detect missing R2T */ 376 377 /* fill-in new R2T associated with the task */ 378 spin_lock(&session->lock); 379 if (!ctask->sc || ctask->mtask || 380 session->state != ISCSI_STATE_LOGGED_IN) { 381 printk(KERN_INFO "iscsi_tcp: dropping R2T itt %d in " 382 "recovery...\n", ctask->itt); 383 spin_unlock(&session->lock); 384 return 0; 385 } 386 387 rc = __kfifo_get(tcp_ctask->r2tpool.queue, (void*)&r2t, sizeof(void*)); 388 BUG_ON(!rc); 389 390 r2t->exp_statsn = rhdr->statsn; 391 r2t->data_length = be32_to_cpu(rhdr->data_length); 392 if (r2t->data_length == 0 || 393 r2t->data_length > session->max_burst) { 394 spin_unlock(&session->lock); 395 return ISCSI_ERR_DATALEN; 396 } 397 398 r2t->data_offset = be32_to_cpu(rhdr->data_offset); 399 if (r2t->data_offset + r2t->data_length > ctask->total_length) { 400 spin_unlock(&session->lock); 401 return ISCSI_ERR_DATALEN; 402 } 403 404 r2t->ttt = rhdr->ttt; /* no flip */ 405 r2t->solicit_datasn = 0; 406 407 iscsi_solicit_data_init(conn, ctask, r2t); 408 409 tcp_ctask->exp_r2tsn = r2tsn + 1; 410 tcp_ctask->xmstate |= XMSTATE_SOL_HDR; 411 __kfifo_put(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*)); 412 list_move_tail(&ctask->running, &conn->xmitqueue); 413 414 scsi_queue_work(session->host, &conn->xmitwork); 415 conn->r2t_pdus_cnt++; 416 spin_unlock(&session->lock); 417 418 return 0; 419 } 420 421 static int 422 iscsi_tcp_hdr_recv(struct iscsi_conn *conn) 423 { 424 int rc = 0, opcode, ahslen; 425 struct iscsi_hdr *hdr; 426 struct iscsi_session *session = conn->session; 427 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 428 uint32_t cdgst, rdgst = 0, itt; 429 430 hdr = tcp_conn->in.hdr; 431 432 /* verify PDU length */ 433 tcp_conn->in.datalen = ntoh24(hdr->dlength); 434 if (tcp_conn->in.datalen > conn->max_recv_dlength) { 435 printk(KERN_ERR "iscsi_tcp: datalen %d > %d\n", 436 tcp_conn->in.datalen, conn->max_recv_dlength); 437 return ISCSI_ERR_DATALEN; 438 } 439 tcp_conn->data_copied = 0; 440 441 /* read AHS */ 442 ahslen = hdr->hlength << 2; 443 tcp_conn->in.offset += ahslen; 444 tcp_conn->in.copy -= ahslen; 445 if (tcp_conn->in.copy < 0) { 446 printk(KERN_ERR "iscsi_tcp: can't handle AHS with length " 447 "%d bytes\n", ahslen); 448 return ISCSI_ERR_AHSLEN; 449 } 450 451 /* calculate read padding */ 452 tcp_conn->in.padding = tcp_conn->in.datalen & (ISCSI_PAD_LEN-1); 453 if (tcp_conn->in.padding) { 454 tcp_conn->in.padding = ISCSI_PAD_LEN - tcp_conn->in.padding; 455 debug_scsi("read padding %d bytes\n", tcp_conn->in.padding); 456 } 457 458 if (conn->hdrdgst_en) { 459 struct hash_desc desc; 460 struct scatterlist sg; 461 462 sg_init_one(&sg, (u8 *)hdr, 463 sizeof(struct iscsi_hdr) + ahslen); 464 desc.tfm = tcp_conn->rx_tfm; 465 desc.flags = 0; 466 crypto_hash_digest(&desc, &sg, sg.length, (u8 *)&cdgst); 467 rdgst = *(uint32_t*)((char*)hdr + sizeof(struct iscsi_hdr) + 468 ahslen); 469 if (cdgst != rdgst) { 470 printk(KERN_ERR "iscsi_tcp: hdrdgst error " 471 "recv 0x%x calc 0x%x\n", rdgst, cdgst); 472 return ISCSI_ERR_HDR_DGST; 473 } 474 } 475 476 opcode = hdr->opcode & ISCSI_OPCODE_MASK; 477 /* verify itt (itt encoding: age+cid+itt) */ 478 rc = iscsi_verify_itt(conn, hdr, &itt); 479 if (rc == ISCSI_ERR_NO_SCSI_CMD) { 480 tcp_conn->in.datalen = 0; /* force drop */ 481 return 0; 482 } else if (rc) 483 return rc; 484 485 debug_tcp("opcode 0x%x offset %d copy %d ahslen %d datalen %d\n", 486 opcode, tcp_conn->in.offset, tcp_conn->in.copy, 487 ahslen, tcp_conn->in.datalen); 488 489 switch(opcode) { 490 case ISCSI_OP_SCSI_DATA_IN: 491 tcp_conn->in.ctask = session->cmds[itt]; 492 rc = iscsi_data_rsp(conn, tcp_conn->in.ctask); 493 if (rc) 494 return rc; 495 /* fall through */ 496 case ISCSI_OP_SCSI_CMD_RSP: 497 tcp_conn->in.ctask = session->cmds[itt]; 498 if (tcp_conn->in.datalen) 499 goto copy_hdr; 500 501 spin_lock(&session->lock); 502 iscsi_tcp_cleanup_ctask(conn, tcp_conn->in.ctask); 503 rc = __iscsi_complete_pdu(conn, hdr, NULL, 0); 504 spin_unlock(&session->lock); 505 break; 506 case ISCSI_OP_R2T: 507 tcp_conn->in.ctask = session->cmds[itt]; 508 if (ahslen) 509 rc = ISCSI_ERR_AHSLEN; 510 else if (tcp_conn->in.ctask->sc->sc_data_direction == 511 DMA_TO_DEVICE) 512 rc = iscsi_r2t_rsp(conn, tcp_conn->in.ctask); 513 else 514 rc = ISCSI_ERR_PROTO; 515 break; 516 case ISCSI_OP_LOGIN_RSP: 517 case ISCSI_OP_TEXT_RSP: 518 case ISCSI_OP_REJECT: 519 case ISCSI_OP_ASYNC_EVENT: 520 /* 521 * It is possible that we could get a PDU with a buffer larger 522 * than 8K, but there are no targets that currently do this. 523 * For now we fail until we find a vendor that needs it 524 */ 525 if (DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH < 526 tcp_conn->in.datalen) { 527 printk(KERN_ERR "iscsi_tcp: received buffer of len %u " 528 "but conn buffer is only %u (opcode %0x)\n", 529 tcp_conn->in.datalen, 530 DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH, opcode); 531 rc = ISCSI_ERR_PROTO; 532 break; 533 } 534 535 if (tcp_conn->in.datalen) 536 goto copy_hdr; 537 /* fall through */ 538 case ISCSI_OP_LOGOUT_RSP: 539 case ISCSI_OP_NOOP_IN: 540 case ISCSI_OP_SCSI_TMFUNC_RSP: 541 rc = iscsi_complete_pdu(conn, hdr, NULL, 0); 542 break; 543 default: 544 rc = ISCSI_ERR_BAD_OPCODE; 545 break; 546 } 547 548 return rc; 549 550 copy_hdr: 551 /* 552 * if we did zero copy for the header but we will need multiple 553 * skbs to complete the command then we have to copy the header 554 * for later use 555 */ 556 if (tcp_conn->in.zero_copy_hdr && tcp_conn->in.copy <= 557 (tcp_conn->in.datalen + tcp_conn->in.padding + 558 (conn->datadgst_en ? 4 : 0))) { 559 debug_tcp("Copying header for later use. in.copy %d in.datalen" 560 " %d\n", tcp_conn->in.copy, tcp_conn->in.datalen); 561 memcpy(&tcp_conn->hdr, tcp_conn->in.hdr, 562 sizeof(struct iscsi_hdr)); 563 tcp_conn->in.hdr = &tcp_conn->hdr; 564 tcp_conn->in.zero_copy_hdr = 0; 565 } 566 return 0; 567 } 568 569 /** 570 * iscsi_ctask_copy - copy skb bits to the destanation cmd task 571 * @conn: iscsi tcp connection 572 * @ctask: scsi command task 573 * @buf: buffer to copy to 574 * @buf_size: size of buffer 575 * @offset: offset within the buffer 576 * 577 * Notes: 578 * The function calls skb_copy_bits() and updates per-connection and 579 * per-cmd byte counters. 580 * 581 * Read counters (in bytes): 582 * 583 * conn->in.offset offset within in progress SKB 584 * conn->in.copy left to copy from in progress SKB 585 * including padding 586 * conn->in.copied copied already from in progress SKB 587 * conn->data_copied copied already from in progress buffer 588 * ctask->sent total bytes sent up to the MidLayer 589 * ctask->data_count left to copy from in progress Data-In 590 * buf_left left to copy from in progress buffer 591 **/ 592 static inline int 593 iscsi_ctask_copy(struct iscsi_tcp_conn *tcp_conn, struct iscsi_cmd_task *ctask, 594 void *buf, int buf_size, int offset) 595 { 596 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 597 int buf_left = buf_size - (tcp_conn->data_copied + offset); 598 int size = min(tcp_conn->in.copy, buf_left); 599 int rc; 600 601 size = min(size, ctask->data_count); 602 603 debug_tcp("ctask_copy %d bytes at offset %d copied %d\n", 604 size, tcp_conn->in.offset, tcp_conn->in.copied); 605 606 BUG_ON(size <= 0); 607 BUG_ON(tcp_ctask->sent + size > ctask->total_length); 608 609 rc = skb_copy_bits(tcp_conn->in.skb, tcp_conn->in.offset, 610 (char*)buf + (offset + tcp_conn->data_copied), size); 611 /* must fit into skb->len */ 612 BUG_ON(rc); 613 614 tcp_conn->in.offset += size; 615 tcp_conn->in.copy -= size; 616 tcp_conn->in.copied += size; 617 tcp_conn->data_copied += size; 618 tcp_ctask->sent += size; 619 ctask->data_count -= size; 620 621 BUG_ON(tcp_conn->in.copy < 0); 622 BUG_ON(ctask->data_count < 0); 623 624 if (buf_size != (tcp_conn->data_copied + offset)) { 625 if (!ctask->data_count) { 626 BUG_ON(buf_size - tcp_conn->data_copied < 0); 627 /* done with this PDU */ 628 return buf_size - tcp_conn->data_copied; 629 } 630 return -EAGAIN; 631 } 632 633 /* done with this buffer or with both - PDU and buffer */ 634 tcp_conn->data_copied = 0; 635 return 0; 636 } 637 638 /** 639 * iscsi_tcp_copy - copy skb bits to the destanation buffer 640 * @conn: iscsi tcp connection 641 * 642 * Notes: 643 * The function calls skb_copy_bits() and updates per-connection 644 * byte counters. 645 **/ 646 static inline int 647 iscsi_tcp_copy(struct iscsi_conn *conn) 648 { 649 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 650 int buf_size = tcp_conn->in.datalen; 651 int buf_left = buf_size - tcp_conn->data_copied; 652 int size = min(tcp_conn->in.copy, buf_left); 653 int rc; 654 655 debug_tcp("tcp_copy %d bytes at offset %d copied %d\n", 656 size, tcp_conn->in.offset, tcp_conn->data_copied); 657 BUG_ON(size <= 0); 658 659 rc = skb_copy_bits(tcp_conn->in.skb, tcp_conn->in.offset, 660 (char*)conn->data + tcp_conn->data_copied, size); 661 BUG_ON(rc); 662 663 tcp_conn->in.offset += size; 664 tcp_conn->in.copy -= size; 665 tcp_conn->in.copied += size; 666 tcp_conn->data_copied += size; 667 668 if (buf_size != tcp_conn->data_copied) 669 return -EAGAIN; 670 671 return 0; 672 } 673 674 static inline void 675 partial_sg_digest_update(struct iscsi_tcp_conn *tcp_conn, 676 struct scatterlist *sg, int offset, int length) 677 { 678 struct scatterlist temp; 679 680 memcpy(&temp, sg, sizeof(struct scatterlist)); 681 temp.offset = offset; 682 temp.length = length; 683 crypto_hash_update(&tcp_conn->data_rx_hash, &temp, length); 684 } 685 686 static void 687 iscsi_recv_digest_update(struct iscsi_tcp_conn *tcp_conn, char* buf, int len) 688 { 689 struct scatterlist tmp; 690 691 sg_init_one(&tmp, buf, len); 692 crypto_hash_update(&tcp_conn->data_rx_hash, &tmp, len); 693 } 694 695 static int iscsi_scsi_data_in(struct iscsi_conn *conn) 696 { 697 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 698 struct iscsi_cmd_task *ctask = tcp_conn->in.ctask; 699 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 700 struct scsi_cmnd *sc = ctask->sc; 701 struct scatterlist *sg; 702 int i, offset, rc = 0; 703 704 BUG_ON((void*)ctask != sc->SCp.ptr); 705 706 /* 707 * copying Data-In into the Scsi_Cmnd 708 */ 709 if (!sc->use_sg) { 710 i = ctask->data_count; 711 rc = iscsi_ctask_copy(tcp_conn, ctask, sc->request_buffer, 712 sc->request_bufflen, 713 tcp_ctask->data_offset); 714 if (rc == -EAGAIN) 715 return rc; 716 if (conn->datadgst_en) 717 iscsi_recv_digest_update(tcp_conn, sc->request_buffer, 718 i); 719 rc = 0; 720 goto done; 721 } 722 723 offset = tcp_ctask->data_offset; 724 sg = sc->request_buffer; 725 726 if (tcp_ctask->data_offset) 727 for (i = 0; i < tcp_ctask->sg_count; i++) 728 offset -= sg[i].length; 729 /* we've passed through partial sg*/ 730 if (offset < 0) 731 offset = 0; 732 733 for (i = tcp_ctask->sg_count; i < sc->use_sg; i++) { 734 char *dest; 735 736 dest = kmap_atomic(sg[i].page, KM_SOFTIRQ0); 737 rc = iscsi_ctask_copy(tcp_conn, ctask, dest + sg[i].offset, 738 sg[i].length, offset); 739 kunmap_atomic(dest, KM_SOFTIRQ0); 740 if (rc == -EAGAIN) 741 /* continue with the next SKB/PDU */ 742 return rc; 743 if (!rc) { 744 if (conn->datadgst_en) { 745 if (!offset) 746 crypto_hash_update( 747 &tcp_conn->data_rx_hash, 748 &sg[i], sg[i].length); 749 else 750 partial_sg_digest_update(tcp_conn, 751 &sg[i], 752 sg[i].offset + offset, 753 sg[i].length - offset); 754 } 755 offset = 0; 756 tcp_ctask->sg_count++; 757 } 758 759 if (!ctask->data_count) { 760 if (rc && conn->datadgst_en) 761 /* 762 * data-in is complete, but buffer not... 763 */ 764 partial_sg_digest_update(tcp_conn, &sg[i], 765 sg[i].offset, sg[i].length-rc); 766 rc = 0; 767 break; 768 } 769 770 if (!tcp_conn->in.copy) 771 return -EAGAIN; 772 } 773 BUG_ON(ctask->data_count); 774 775 done: 776 /* check for non-exceptional status */ 777 if (tcp_conn->in.hdr->flags & ISCSI_FLAG_DATA_STATUS) { 778 debug_scsi("done [sc %lx res %d itt 0x%x flags 0x%x]\n", 779 (long)sc, sc->result, ctask->itt, 780 tcp_conn->in.hdr->flags); 781 spin_lock(&conn->session->lock); 782 iscsi_tcp_cleanup_ctask(conn, ctask); 783 __iscsi_complete_pdu(conn, tcp_conn->in.hdr, NULL, 0); 784 spin_unlock(&conn->session->lock); 785 } 786 787 return rc; 788 } 789 790 static int 791 iscsi_data_recv(struct iscsi_conn *conn) 792 { 793 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 794 int rc = 0, opcode; 795 796 opcode = tcp_conn->in.hdr->opcode & ISCSI_OPCODE_MASK; 797 switch (opcode) { 798 case ISCSI_OP_SCSI_DATA_IN: 799 rc = iscsi_scsi_data_in(conn); 800 break; 801 case ISCSI_OP_SCSI_CMD_RSP: 802 spin_lock(&conn->session->lock); 803 iscsi_tcp_cleanup_ctask(conn, tcp_conn->in.ctask); 804 spin_unlock(&conn->session->lock); 805 case ISCSI_OP_TEXT_RSP: 806 case ISCSI_OP_LOGIN_RSP: 807 case ISCSI_OP_ASYNC_EVENT: 808 case ISCSI_OP_REJECT: 809 /* 810 * Collect data segment to the connection's data 811 * placeholder 812 */ 813 if (iscsi_tcp_copy(conn)) { 814 rc = -EAGAIN; 815 goto exit; 816 } 817 818 rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr, conn->data, 819 tcp_conn->in.datalen); 820 if (!rc && conn->datadgst_en && opcode != ISCSI_OP_LOGIN_RSP) 821 iscsi_recv_digest_update(tcp_conn, conn->data, 822 tcp_conn->in.datalen); 823 break; 824 default: 825 BUG_ON(1); 826 } 827 exit: 828 return rc; 829 } 830 831 /** 832 * iscsi_tcp_data_recv - TCP receive in sendfile fashion 833 * @rd_desc: read descriptor 834 * @skb: socket buffer 835 * @offset: offset in skb 836 * @len: skb->len - offset 837 **/ 838 static int 839 iscsi_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, 840 unsigned int offset, size_t len) 841 { 842 int rc; 843 struct iscsi_conn *conn = rd_desc->arg.data; 844 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 845 int processed; 846 char pad[ISCSI_PAD_LEN]; 847 struct scatterlist sg; 848 849 /* 850 * Save current SKB and its offset in the corresponding 851 * connection context. 852 */ 853 tcp_conn->in.copy = skb->len - offset; 854 tcp_conn->in.offset = offset; 855 tcp_conn->in.skb = skb; 856 tcp_conn->in.len = tcp_conn->in.copy; 857 BUG_ON(tcp_conn->in.copy <= 0); 858 debug_tcp("in %d bytes\n", tcp_conn->in.copy); 859 860 more: 861 tcp_conn->in.copied = 0; 862 rc = 0; 863 864 if (unlikely(conn->suspend_rx)) { 865 debug_tcp("conn %d Rx suspended!\n", conn->id); 866 return 0; 867 } 868 869 if (tcp_conn->in_progress == IN_PROGRESS_WAIT_HEADER || 870 tcp_conn->in_progress == IN_PROGRESS_HEADER_GATHER) { 871 rc = iscsi_hdr_extract(tcp_conn); 872 if (rc) { 873 if (rc == -EAGAIN) 874 goto nomore; 875 else { 876 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); 877 return 0; 878 } 879 } 880 881 /* 882 * Verify and process incoming PDU header. 883 */ 884 rc = iscsi_tcp_hdr_recv(conn); 885 if (!rc && tcp_conn->in.datalen) { 886 if (conn->datadgst_en) { 887 crypto_hash_init(&tcp_conn->data_rx_hash); 888 } 889 tcp_conn->in_progress = IN_PROGRESS_DATA_RECV; 890 } else if (rc) { 891 iscsi_conn_failure(conn, rc); 892 return 0; 893 } 894 } 895 896 if (tcp_conn->in_progress == IN_PROGRESS_DDIGEST_RECV) { 897 uint32_t recv_digest; 898 899 debug_tcp("extra data_recv offset %d copy %d\n", 900 tcp_conn->in.offset, tcp_conn->in.copy); 901 skb_copy_bits(tcp_conn->in.skb, tcp_conn->in.offset, 902 &recv_digest, 4); 903 tcp_conn->in.offset += 4; 904 tcp_conn->in.copy -= 4; 905 if (recv_digest != tcp_conn->in.datadgst) { 906 debug_tcp("iscsi_tcp: data digest error!" 907 "0x%x != 0x%x\n", recv_digest, 908 tcp_conn->in.datadgst); 909 iscsi_conn_failure(conn, ISCSI_ERR_DATA_DGST); 910 return 0; 911 } else { 912 debug_tcp("iscsi_tcp: data digest match!" 913 "0x%x == 0x%x\n", recv_digest, 914 tcp_conn->in.datadgst); 915 tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER; 916 } 917 } 918 919 if (tcp_conn->in_progress == IN_PROGRESS_DATA_RECV && 920 tcp_conn->in.copy) { 921 922 debug_tcp("data_recv offset %d copy %d\n", 923 tcp_conn->in.offset, tcp_conn->in.copy); 924 925 rc = iscsi_data_recv(conn); 926 if (rc) { 927 if (rc == -EAGAIN) 928 goto again; 929 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); 930 return 0; 931 } 932 tcp_conn->in.copy -= tcp_conn->in.padding; 933 tcp_conn->in.offset += tcp_conn->in.padding; 934 if (conn->datadgst_en) { 935 if (tcp_conn->in.padding) { 936 debug_tcp("padding -> %d\n", 937 tcp_conn->in.padding); 938 memset(pad, 0, tcp_conn->in.padding); 939 sg_init_one(&sg, pad, tcp_conn->in.padding); 940 crypto_hash_update(&tcp_conn->data_rx_hash, 941 &sg, sg.length); 942 } 943 crypto_hash_final(&tcp_conn->data_rx_hash, 944 (u8 *)&tcp_conn->in.datadgst); 945 debug_tcp("rx digest 0x%x\n", tcp_conn->in.datadgst); 946 tcp_conn->in_progress = IN_PROGRESS_DDIGEST_RECV; 947 } else 948 tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER; 949 } 950 951 debug_tcp("f, processed %d from out of %d padding %d\n", 952 tcp_conn->in.offset - offset, (int)len, tcp_conn->in.padding); 953 BUG_ON(tcp_conn->in.offset - offset > len); 954 955 if (tcp_conn->in.offset - offset != len) { 956 debug_tcp("continue to process %d bytes\n", 957 (int)len - (tcp_conn->in.offset - offset)); 958 goto more; 959 } 960 961 nomore: 962 processed = tcp_conn->in.offset - offset; 963 BUG_ON(processed == 0); 964 return processed; 965 966 again: 967 processed = tcp_conn->in.offset - offset; 968 debug_tcp("c, processed %d from out of %d rd_desc_cnt %d\n", 969 processed, (int)len, (int)rd_desc->count); 970 BUG_ON(processed == 0); 971 BUG_ON(processed > len); 972 973 conn->rxdata_octets += processed; 974 return processed; 975 } 976 977 static void 978 iscsi_tcp_data_ready(struct sock *sk, int flag) 979 { 980 struct iscsi_conn *conn = sk->sk_user_data; 981 read_descriptor_t rd_desc; 982 983 read_lock(&sk->sk_callback_lock); 984 985 /* 986 * Use rd_desc to pass 'conn' to iscsi_tcp_data_recv. 987 * We set count to 1 because we want the network layer to 988 * hand us all the skbs that are available. iscsi_tcp_data_recv 989 * handled pdus that cross buffers or pdus that still need data. 990 */ 991 rd_desc.arg.data = conn; 992 rd_desc.count = 1; 993 tcp_read_sock(sk, &rd_desc, iscsi_tcp_data_recv); 994 995 read_unlock(&sk->sk_callback_lock); 996 } 997 998 static void 999 iscsi_tcp_state_change(struct sock *sk) 1000 { 1001 struct iscsi_tcp_conn *tcp_conn; 1002 struct iscsi_conn *conn; 1003 struct iscsi_session *session; 1004 void (*old_state_change)(struct sock *); 1005 1006 read_lock(&sk->sk_callback_lock); 1007 1008 conn = (struct iscsi_conn*)sk->sk_user_data; 1009 session = conn->session; 1010 1011 if ((sk->sk_state == TCP_CLOSE_WAIT || 1012 sk->sk_state == TCP_CLOSE) && 1013 !atomic_read(&sk->sk_rmem_alloc)) { 1014 debug_tcp("iscsi_tcp_state_change: TCP_CLOSE|TCP_CLOSE_WAIT\n"); 1015 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); 1016 } 1017 1018 tcp_conn = conn->dd_data; 1019 old_state_change = tcp_conn->old_state_change; 1020 1021 read_unlock(&sk->sk_callback_lock); 1022 1023 old_state_change(sk); 1024 } 1025 1026 /** 1027 * iscsi_write_space - Called when more output buffer space is available 1028 * @sk: socket space is available for 1029 **/ 1030 static void 1031 iscsi_write_space(struct sock *sk) 1032 { 1033 struct iscsi_conn *conn = (struct iscsi_conn*)sk->sk_user_data; 1034 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 1035 1036 tcp_conn->old_write_space(sk); 1037 debug_tcp("iscsi_write_space: cid %d\n", conn->id); 1038 scsi_queue_work(conn->session->host, &conn->xmitwork); 1039 } 1040 1041 static void 1042 iscsi_conn_set_callbacks(struct iscsi_conn *conn) 1043 { 1044 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 1045 struct sock *sk = tcp_conn->sock->sk; 1046 1047 /* assign new callbacks */ 1048 write_lock_bh(&sk->sk_callback_lock); 1049 sk->sk_user_data = conn; 1050 tcp_conn->old_data_ready = sk->sk_data_ready; 1051 tcp_conn->old_state_change = sk->sk_state_change; 1052 tcp_conn->old_write_space = sk->sk_write_space; 1053 sk->sk_data_ready = iscsi_tcp_data_ready; 1054 sk->sk_state_change = iscsi_tcp_state_change; 1055 sk->sk_write_space = iscsi_write_space; 1056 write_unlock_bh(&sk->sk_callback_lock); 1057 } 1058 1059 static void 1060 iscsi_conn_restore_callbacks(struct iscsi_tcp_conn *tcp_conn) 1061 { 1062 struct sock *sk = tcp_conn->sock->sk; 1063 1064 /* restore socket callbacks, see also: iscsi_conn_set_callbacks() */ 1065 write_lock_bh(&sk->sk_callback_lock); 1066 sk->sk_user_data = NULL; 1067 sk->sk_data_ready = tcp_conn->old_data_ready; 1068 sk->sk_state_change = tcp_conn->old_state_change; 1069 sk->sk_write_space = tcp_conn->old_write_space; 1070 sk->sk_no_check = 0; 1071 write_unlock_bh(&sk->sk_callback_lock); 1072 } 1073 1074 /** 1075 * iscsi_send - generic send routine 1076 * @sk: kernel's socket 1077 * @buf: buffer to write from 1078 * @size: actual size to write 1079 * @flags: socket's flags 1080 */ 1081 static inline int 1082 iscsi_send(struct iscsi_conn *conn, struct iscsi_buf *buf, int size, int flags) 1083 { 1084 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 1085 struct socket *sk = tcp_conn->sock; 1086 int offset = buf->sg.offset + buf->sent, res; 1087 1088 /* 1089 * if we got use_sg=0 or are sending something we kmallocd 1090 * then we did not have to do kmap (kmap returns page_address) 1091 * 1092 * if we got use_sg > 0, but had to drop down, we do not 1093 * set clustering so this should only happen for that 1094 * slab case. 1095 */ 1096 if (buf->use_sendmsg) 1097 res = sock_no_sendpage(sk, buf->sg.page, offset, size, flags); 1098 else 1099 res = tcp_conn->sendpage(sk, buf->sg.page, offset, size, flags); 1100 1101 if (res >= 0) { 1102 conn->txdata_octets += res; 1103 buf->sent += res; 1104 return res; 1105 } 1106 1107 tcp_conn->sendpage_failures_cnt++; 1108 if (res == -EAGAIN) 1109 res = -ENOBUFS; 1110 else 1111 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); 1112 return res; 1113 } 1114 1115 /** 1116 * iscsi_sendhdr - send PDU Header via tcp_sendpage() 1117 * @conn: iscsi connection 1118 * @buf: buffer to write from 1119 * @datalen: lenght of data to be sent after the header 1120 * 1121 * Notes: 1122 * (Tx, Fast Path) 1123 **/ 1124 static inline int 1125 iscsi_sendhdr(struct iscsi_conn *conn, struct iscsi_buf *buf, int datalen) 1126 { 1127 int flags = 0; /* MSG_DONTWAIT; */ 1128 int res, size; 1129 1130 size = buf->sg.length - buf->sent; 1131 BUG_ON(buf->sent + size > buf->sg.length); 1132 if (buf->sent + size != buf->sg.length || datalen) 1133 flags |= MSG_MORE; 1134 1135 res = iscsi_send(conn, buf, size, flags); 1136 debug_tcp("sendhdr %d bytes, sent %d res %d\n", size, buf->sent, res); 1137 if (res >= 0) { 1138 if (size != res) 1139 return -EAGAIN; 1140 return 0; 1141 } 1142 1143 return res; 1144 } 1145 1146 /** 1147 * iscsi_sendpage - send one page of iSCSI Data-Out. 1148 * @conn: iscsi connection 1149 * @buf: buffer to write from 1150 * @count: remaining data 1151 * @sent: number of bytes sent 1152 * 1153 * Notes: 1154 * (Tx, Fast Path) 1155 **/ 1156 static inline int 1157 iscsi_sendpage(struct iscsi_conn *conn, struct iscsi_buf *buf, 1158 int *count, int *sent) 1159 { 1160 int flags = 0; /* MSG_DONTWAIT; */ 1161 int res, size; 1162 1163 size = buf->sg.length - buf->sent; 1164 BUG_ON(buf->sent + size > buf->sg.length); 1165 if (size > *count) 1166 size = *count; 1167 if (buf->sent + size != buf->sg.length || *count != size) 1168 flags |= MSG_MORE; 1169 1170 res = iscsi_send(conn, buf, size, flags); 1171 debug_tcp("sendpage: %d bytes, sent %d left %d sent %d res %d\n", 1172 size, buf->sent, *count, *sent, res); 1173 if (res >= 0) { 1174 *count -= res; 1175 *sent += res; 1176 if (size != res) 1177 return -EAGAIN; 1178 return 0; 1179 } 1180 1181 return res; 1182 } 1183 1184 static inline void 1185 iscsi_data_digest_init(struct iscsi_tcp_conn *tcp_conn, 1186 struct iscsi_cmd_task *ctask) 1187 { 1188 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 1189 1190 crypto_hash_init(&tcp_conn->data_tx_hash); 1191 tcp_ctask->digest_count = 4; 1192 } 1193 1194 static int 1195 iscsi_digest_final_send(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask, 1196 struct iscsi_buf *buf, uint32_t *digest, int final) 1197 { 1198 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 1199 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 1200 int rc = 0; 1201 int sent = 0; 1202 1203 if (final) 1204 crypto_hash_final(&tcp_conn->data_tx_hash, (u8 *)digest); 1205 1206 iscsi_buf_init_iov(buf, (char*)digest, 4); 1207 rc = iscsi_sendpage(conn, buf, &tcp_ctask->digest_count, &sent); 1208 if (rc) { 1209 tcp_ctask->datadigest = *digest; 1210 tcp_ctask->xmstate |= XMSTATE_DATA_DIGEST; 1211 } else 1212 tcp_ctask->digest_count = 4; 1213 return rc; 1214 } 1215 1216 /** 1217 * iscsi_solicit_data_cont - initialize next Data-Out 1218 * @conn: iscsi connection 1219 * @ctask: scsi command task 1220 * @r2t: R2T info 1221 * @left: bytes left to transfer 1222 * 1223 * Notes: 1224 * Initialize next Data-Out within this R2T sequence and continue 1225 * to process next Scatter-Gather element(if any) of this SCSI command. 1226 * 1227 * Called under connection lock. 1228 **/ 1229 static void 1230 iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask, 1231 struct iscsi_r2t_info *r2t, int left) 1232 { 1233 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 1234 struct iscsi_data *hdr; 1235 struct scsi_cmnd *sc = ctask->sc; 1236 int new_offset; 1237 1238 hdr = &r2t->dtask.hdr; 1239 memset(hdr, 0, sizeof(struct iscsi_data)); 1240 hdr->ttt = r2t->ttt; 1241 hdr->datasn = cpu_to_be32(r2t->solicit_datasn); 1242 r2t->solicit_datasn++; 1243 hdr->opcode = ISCSI_OP_SCSI_DATA_OUT; 1244 memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun)); 1245 hdr->itt = ctask->hdr->itt; 1246 hdr->exp_statsn = r2t->exp_statsn; 1247 new_offset = r2t->data_offset + r2t->sent; 1248 hdr->offset = cpu_to_be32(new_offset); 1249 if (left > conn->max_xmit_dlength) { 1250 hton24(hdr->dlength, conn->max_xmit_dlength); 1251 r2t->data_count = conn->max_xmit_dlength; 1252 } else { 1253 hton24(hdr->dlength, left); 1254 r2t->data_count = left; 1255 hdr->flags = ISCSI_FLAG_CMD_FINAL; 1256 } 1257 conn->dataout_pdus_cnt++; 1258 1259 iscsi_buf_init_iov(&r2t->headbuf, (char*)hdr, 1260 sizeof(struct iscsi_hdr)); 1261 1262 if (sc->use_sg && !iscsi_buf_left(&r2t->sendbuf)) { 1263 BUG_ON(tcp_ctask->bad_sg == r2t->sg); 1264 iscsi_buf_init_sg(&r2t->sendbuf, r2t->sg); 1265 r2t->sg += 1; 1266 } else 1267 iscsi_buf_init_iov(&tcp_ctask->sendbuf, 1268 (char*)sc->request_buffer + new_offset, 1269 r2t->data_count); 1270 } 1271 1272 static void 1273 iscsi_unsolicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) 1274 { 1275 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 1276 struct iscsi_data_task *dtask; 1277 1278 dtask = tcp_ctask->dtask = &tcp_ctask->unsol_dtask; 1279 iscsi_prep_unsolicit_data_pdu(ctask, &dtask->hdr, 1280 tcp_ctask->r2t_data_count); 1281 iscsi_buf_init_iov(&tcp_ctask->headbuf, (char*)&dtask->hdr, 1282 sizeof(struct iscsi_hdr)); 1283 } 1284 1285 /** 1286 * iscsi_tcp_cmd_init - Initialize iSCSI SCSI_READ or SCSI_WRITE commands 1287 * @conn: iscsi connection 1288 * @ctask: scsi command task 1289 * @sc: scsi command 1290 **/ 1291 static void 1292 iscsi_tcp_cmd_init(struct iscsi_cmd_task *ctask) 1293 { 1294 struct scsi_cmnd *sc = ctask->sc; 1295 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 1296 1297 BUG_ON(__kfifo_len(tcp_ctask->r2tqueue)); 1298 1299 tcp_ctask->sent = 0; 1300 tcp_ctask->sg_count = 0; 1301 1302 if (sc->sc_data_direction == DMA_TO_DEVICE) { 1303 tcp_ctask->xmstate = XMSTATE_W_HDR; 1304 tcp_ctask->exp_r2tsn = 0; 1305 BUG_ON(ctask->total_length == 0); 1306 1307 if (sc->use_sg) { 1308 struct scatterlist *sg = sc->request_buffer; 1309 1310 iscsi_buf_init_sg(&tcp_ctask->sendbuf, 1311 &sg[tcp_ctask->sg_count++]); 1312 tcp_ctask->sg = sg; 1313 tcp_ctask->bad_sg = sg + sc->use_sg; 1314 } else 1315 iscsi_buf_init_iov(&tcp_ctask->sendbuf, 1316 sc->request_buffer, 1317 sc->request_bufflen); 1318 1319 if (ctask->imm_count) 1320 tcp_ctask->xmstate |= XMSTATE_IMM_DATA; 1321 1322 tcp_ctask->pad_count = ctask->total_length & (ISCSI_PAD_LEN-1); 1323 if (tcp_ctask->pad_count) { 1324 tcp_ctask->pad_count = ISCSI_PAD_LEN - 1325 tcp_ctask->pad_count; 1326 debug_scsi("write padding %d bytes\n", 1327 tcp_ctask->pad_count); 1328 tcp_ctask->xmstate |= XMSTATE_W_PAD; 1329 } 1330 1331 if (ctask->unsol_count) 1332 tcp_ctask->xmstate |= XMSTATE_UNS_HDR | 1333 XMSTATE_UNS_INIT; 1334 tcp_ctask->r2t_data_count = ctask->total_length - 1335 ctask->imm_count - 1336 ctask->unsol_count; 1337 1338 debug_scsi("cmd [itt 0x%x total %d imm %d imm_data %d " 1339 "r2t_data %d]\n", 1340 ctask->itt, ctask->total_length, ctask->imm_count, 1341 ctask->unsol_count, tcp_ctask->r2t_data_count); 1342 } else 1343 tcp_ctask->xmstate = XMSTATE_R_HDR; 1344 1345 iscsi_buf_init_iov(&tcp_ctask->headbuf, (char*)ctask->hdr, 1346 sizeof(struct iscsi_hdr)); 1347 } 1348 1349 /** 1350 * iscsi_tcp_mtask_xmit - xmit management(immediate) task 1351 * @conn: iscsi connection 1352 * @mtask: task management task 1353 * 1354 * Notes: 1355 * The function can return -EAGAIN in which case caller must 1356 * call it again later, or recover. '0' return code means successful 1357 * xmit. 1358 * 1359 * Management xmit state machine consists of two states: 1360 * IN_PROGRESS_IMM_HEAD - PDU Header xmit in progress 1361 * IN_PROGRESS_IMM_DATA - PDU Data xmit in progress 1362 **/ 1363 static int 1364 iscsi_tcp_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask) 1365 { 1366 struct iscsi_tcp_mgmt_task *tcp_mtask = mtask->dd_data; 1367 int rc; 1368 1369 debug_scsi("mtask deq [cid %d state %x itt 0x%x]\n", 1370 conn->id, tcp_mtask->xmstate, mtask->itt); 1371 1372 if (tcp_mtask->xmstate & XMSTATE_IMM_HDR) { 1373 tcp_mtask->xmstate &= ~XMSTATE_IMM_HDR; 1374 if (mtask->data_count) 1375 tcp_mtask->xmstate |= XMSTATE_IMM_DATA; 1376 if (conn->c_stage != ISCSI_CONN_INITIAL_STAGE && 1377 conn->stop_stage != STOP_CONN_RECOVER && 1378 conn->hdrdgst_en) 1379 iscsi_hdr_digest(conn, &tcp_mtask->headbuf, 1380 (u8*)tcp_mtask->hdrext); 1381 rc = iscsi_sendhdr(conn, &tcp_mtask->headbuf, 1382 mtask->data_count); 1383 if (rc) { 1384 tcp_mtask->xmstate |= XMSTATE_IMM_HDR; 1385 if (mtask->data_count) 1386 tcp_mtask->xmstate &= ~XMSTATE_IMM_DATA; 1387 return rc; 1388 } 1389 } 1390 1391 if (tcp_mtask->xmstate & XMSTATE_IMM_DATA) { 1392 BUG_ON(!mtask->data_count); 1393 tcp_mtask->xmstate &= ~XMSTATE_IMM_DATA; 1394 /* FIXME: implement. 1395 * Virtual buffer could be spreaded across multiple pages... 1396 */ 1397 do { 1398 int rc; 1399 1400 rc = iscsi_sendpage(conn, &tcp_mtask->sendbuf, 1401 &mtask->data_count, &tcp_mtask->sent); 1402 if (rc) { 1403 tcp_mtask->xmstate |= XMSTATE_IMM_DATA; 1404 return rc; 1405 } 1406 } while (mtask->data_count); 1407 } 1408 1409 BUG_ON(tcp_mtask->xmstate != XMSTATE_IDLE); 1410 if (mtask->hdr->itt == cpu_to_be32(ISCSI_RESERVED_TAG)) { 1411 struct iscsi_session *session = conn->session; 1412 1413 spin_lock_bh(&session->lock); 1414 list_del(&conn->mtask->running); 1415 __kfifo_put(session->mgmtpool.queue, (void*)&conn->mtask, 1416 sizeof(void*)); 1417 spin_unlock_bh(&session->lock); 1418 } 1419 return 0; 1420 } 1421 1422 static inline int 1423 handle_xmstate_r_hdr(struct iscsi_conn *conn, 1424 struct iscsi_tcp_cmd_task *tcp_ctask) 1425 { 1426 int rc; 1427 1428 tcp_ctask->xmstate &= ~XMSTATE_R_HDR; 1429 if (conn->hdrdgst_en) 1430 iscsi_hdr_digest(conn, &tcp_ctask->headbuf, 1431 (u8*)tcp_ctask->hdrext); 1432 rc = iscsi_sendhdr(conn, &tcp_ctask->headbuf, 0); 1433 if (!rc) { 1434 BUG_ON(tcp_ctask->xmstate != XMSTATE_IDLE); 1435 return 0; /* wait for Data-In */ 1436 } 1437 tcp_ctask->xmstate |= XMSTATE_R_HDR; 1438 return rc; 1439 } 1440 1441 static inline int 1442 handle_xmstate_w_hdr(struct iscsi_conn *conn, 1443 struct iscsi_cmd_task *ctask) 1444 { 1445 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 1446 int rc; 1447 1448 tcp_ctask->xmstate &= ~XMSTATE_W_HDR; 1449 if (conn->hdrdgst_en) 1450 iscsi_hdr_digest(conn, &tcp_ctask->headbuf, 1451 (u8*)tcp_ctask->hdrext); 1452 rc = iscsi_sendhdr(conn, &tcp_ctask->headbuf, ctask->imm_count); 1453 if (rc) 1454 tcp_ctask->xmstate |= XMSTATE_W_HDR; 1455 return rc; 1456 } 1457 1458 static inline int 1459 handle_xmstate_data_digest(struct iscsi_conn *conn, 1460 struct iscsi_cmd_task *ctask) 1461 { 1462 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 1463 int rc; 1464 1465 tcp_ctask->xmstate &= ~XMSTATE_DATA_DIGEST; 1466 debug_tcp("resent data digest 0x%x\n", tcp_ctask->datadigest); 1467 rc = iscsi_digest_final_send(conn, ctask, &tcp_ctask->immbuf, 1468 &tcp_ctask->datadigest, 0); 1469 if (rc) { 1470 tcp_ctask->xmstate |= XMSTATE_DATA_DIGEST; 1471 debug_tcp("resent data digest 0x%x fail!\n", 1472 tcp_ctask->datadigest); 1473 } 1474 1475 return rc; 1476 } 1477 1478 static inline int 1479 handle_xmstate_imm_data(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) 1480 { 1481 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 1482 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 1483 int rc; 1484 1485 BUG_ON(!ctask->imm_count); 1486 tcp_ctask->xmstate &= ~XMSTATE_IMM_DATA; 1487 1488 if (conn->datadgst_en) { 1489 iscsi_data_digest_init(tcp_conn, ctask); 1490 tcp_ctask->immdigest = 0; 1491 } 1492 1493 for (;;) { 1494 rc = iscsi_sendpage(conn, &tcp_ctask->sendbuf, 1495 &ctask->imm_count, &tcp_ctask->sent); 1496 if (rc) { 1497 tcp_ctask->xmstate |= XMSTATE_IMM_DATA; 1498 if (conn->datadgst_en) { 1499 crypto_hash_final(&tcp_conn->data_tx_hash, 1500 (u8 *)&tcp_ctask->immdigest); 1501 debug_tcp("tx imm sendpage fail 0x%x\n", 1502 tcp_ctask->datadigest); 1503 } 1504 return rc; 1505 } 1506 if (conn->datadgst_en) 1507 crypto_hash_update(&tcp_conn->data_tx_hash, 1508 &tcp_ctask->sendbuf.sg, 1509 tcp_ctask->sendbuf.sg.length); 1510 1511 if (!ctask->imm_count) 1512 break; 1513 iscsi_buf_init_sg(&tcp_ctask->sendbuf, 1514 &tcp_ctask->sg[tcp_ctask->sg_count++]); 1515 } 1516 1517 if (conn->datadgst_en && !(tcp_ctask->xmstate & XMSTATE_W_PAD)) { 1518 rc = iscsi_digest_final_send(conn, ctask, &tcp_ctask->immbuf, 1519 &tcp_ctask->immdigest, 1); 1520 if (rc) { 1521 debug_tcp("sending imm digest 0x%x fail!\n", 1522 tcp_ctask->immdigest); 1523 return rc; 1524 } 1525 debug_tcp("sending imm digest 0x%x\n", tcp_ctask->immdigest); 1526 } 1527 1528 return 0; 1529 } 1530 1531 static inline int 1532 handle_xmstate_uns_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) 1533 { 1534 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 1535 struct iscsi_data_task *dtask; 1536 int rc; 1537 1538 tcp_ctask->xmstate |= XMSTATE_UNS_DATA; 1539 if (tcp_ctask->xmstate & XMSTATE_UNS_INIT) { 1540 iscsi_unsolicit_data_init(conn, ctask); 1541 dtask = tcp_ctask->dtask; 1542 if (conn->hdrdgst_en) 1543 iscsi_hdr_digest(conn, &tcp_ctask->headbuf, 1544 (u8*)dtask->hdrext); 1545 tcp_ctask->xmstate &= ~XMSTATE_UNS_INIT; 1546 } 1547 1548 rc = iscsi_sendhdr(conn, &tcp_ctask->headbuf, ctask->data_count); 1549 if (rc) { 1550 tcp_ctask->xmstate &= ~XMSTATE_UNS_DATA; 1551 tcp_ctask->xmstate |= XMSTATE_UNS_HDR; 1552 return rc; 1553 } 1554 1555 debug_scsi("uns dout [itt 0x%x dlen %d sent %d]\n", 1556 ctask->itt, ctask->unsol_count, tcp_ctask->sent); 1557 return 0; 1558 } 1559 1560 static inline int 1561 handle_xmstate_uns_data(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) 1562 { 1563 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 1564 struct iscsi_data_task *dtask = tcp_ctask->dtask; 1565 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 1566 int rc; 1567 1568 BUG_ON(!ctask->data_count); 1569 tcp_ctask->xmstate &= ~XMSTATE_UNS_DATA; 1570 1571 if (conn->datadgst_en) { 1572 iscsi_data_digest_init(tcp_conn, ctask); 1573 dtask->digest = 0; 1574 } 1575 1576 for (;;) { 1577 int start = tcp_ctask->sent; 1578 1579 rc = iscsi_sendpage(conn, &tcp_ctask->sendbuf, 1580 &ctask->data_count, &tcp_ctask->sent); 1581 if (rc) { 1582 ctask->unsol_count -= tcp_ctask->sent - start; 1583 tcp_ctask->xmstate |= XMSTATE_UNS_DATA; 1584 /* will continue with this ctask later.. */ 1585 if (conn->datadgst_en) { 1586 crypto_hash_final(&tcp_conn->data_tx_hash, 1587 (u8 *)&dtask->digest); 1588 debug_tcp("tx uns data fail 0x%x\n", 1589 dtask->digest); 1590 } 1591 return rc; 1592 } 1593 1594 BUG_ON(tcp_ctask->sent > ctask->total_length); 1595 ctask->unsol_count -= tcp_ctask->sent - start; 1596 1597 /* 1598 * XXX:we may run here with un-initial sendbuf. 1599 * so pass it 1600 */ 1601 if (conn->datadgst_en && tcp_ctask->sent - start > 0) 1602 crypto_hash_update(&tcp_conn->data_tx_hash, 1603 &tcp_ctask->sendbuf.sg, 1604 tcp_ctask->sendbuf.sg.length); 1605 1606 if (!ctask->data_count) 1607 break; 1608 iscsi_buf_init_sg(&tcp_ctask->sendbuf, 1609 &tcp_ctask->sg[tcp_ctask->sg_count++]); 1610 } 1611 BUG_ON(ctask->unsol_count < 0); 1612 1613 /* 1614 * Done with the Data-Out. Next, check if we need 1615 * to send another unsolicited Data-Out. 1616 */ 1617 if (ctask->unsol_count) { 1618 if (conn->datadgst_en) { 1619 rc = iscsi_digest_final_send(conn, ctask, 1620 &dtask->digestbuf, 1621 &dtask->digest, 1); 1622 if (rc) { 1623 debug_tcp("send uns digest 0x%x fail\n", 1624 dtask->digest); 1625 return rc; 1626 } 1627 debug_tcp("sending uns digest 0x%x, more uns\n", 1628 dtask->digest); 1629 } 1630 tcp_ctask->xmstate |= XMSTATE_UNS_INIT; 1631 return 1; 1632 } 1633 1634 if (conn->datadgst_en && !(tcp_ctask->xmstate & XMSTATE_W_PAD)) { 1635 rc = iscsi_digest_final_send(conn, ctask, 1636 &dtask->digestbuf, 1637 &dtask->digest, 1); 1638 if (rc) { 1639 debug_tcp("send last uns digest 0x%x fail\n", 1640 dtask->digest); 1641 return rc; 1642 } 1643 debug_tcp("sending uns digest 0x%x\n",dtask->digest); 1644 } 1645 1646 return 0; 1647 } 1648 1649 static inline int 1650 handle_xmstate_sol_data(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) 1651 { 1652 struct iscsi_session *session = conn->session; 1653 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 1654 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 1655 struct iscsi_r2t_info *r2t = tcp_ctask->r2t; 1656 struct iscsi_data_task *dtask = &r2t->dtask; 1657 int left, rc; 1658 1659 tcp_ctask->xmstate &= ~XMSTATE_SOL_DATA; 1660 tcp_ctask->dtask = dtask; 1661 1662 if (conn->datadgst_en) { 1663 iscsi_data_digest_init(tcp_conn, ctask); 1664 dtask->digest = 0; 1665 } 1666 solicit_again: 1667 /* 1668 * send Data-Out within this R2T sequence. 1669 */ 1670 if (!r2t->data_count) 1671 goto data_out_done; 1672 1673 rc = iscsi_sendpage(conn, &r2t->sendbuf, &r2t->data_count, &r2t->sent); 1674 if (rc) { 1675 tcp_ctask->xmstate |= XMSTATE_SOL_DATA; 1676 /* will continue with this ctask later.. */ 1677 if (conn->datadgst_en) { 1678 crypto_hash_final(&tcp_conn->data_tx_hash, 1679 (u8 *)&dtask->digest); 1680 debug_tcp("r2t data send fail 0x%x\n", dtask->digest); 1681 } 1682 return rc; 1683 } 1684 1685 BUG_ON(r2t->data_count < 0); 1686 if (conn->datadgst_en) 1687 crypto_hash_update(&tcp_conn->data_tx_hash, &r2t->sendbuf.sg, 1688 r2t->sendbuf.sg.length); 1689 1690 if (r2t->data_count) { 1691 BUG_ON(ctask->sc->use_sg == 0); 1692 if (!iscsi_buf_left(&r2t->sendbuf)) { 1693 BUG_ON(tcp_ctask->bad_sg == r2t->sg); 1694 iscsi_buf_init_sg(&r2t->sendbuf, r2t->sg); 1695 r2t->sg += 1; 1696 } 1697 goto solicit_again; 1698 } 1699 1700 data_out_done: 1701 /* 1702 * Done with this Data-Out. Next, check if we have 1703 * to send another Data-Out for this R2T. 1704 */ 1705 BUG_ON(r2t->data_length - r2t->sent < 0); 1706 left = r2t->data_length - r2t->sent; 1707 if (left) { 1708 if (conn->datadgst_en) { 1709 rc = iscsi_digest_final_send(conn, ctask, 1710 &dtask->digestbuf, 1711 &dtask->digest, 1); 1712 if (rc) { 1713 debug_tcp("send r2t data digest 0x%x" 1714 "fail\n", dtask->digest); 1715 return rc; 1716 } 1717 debug_tcp("r2t data send digest 0x%x\n", 1718 dtask->digest); 1719 } 1720 iscsi_solicit_data_cont(conn, ctask, r2t, left); 1721 tcp_ctask->xmstate |= XMSTATE_SOL_DATA; 1722 tcp_ctask->xmstate &= ~XMSTATE_SOL_HDR; 1723 return 1; 1724 } 1725 1726 /* 1727 * Done with this R2T. Check if there are more 1728 * outstanding R2Ts ready to be processed. 1729 */ 1730 BUG_ON(tcp_ctask->r2t_data_count - r2t->data_length < 0); 1731 if (conn->datadgst_en) { 1732 rc = iscsi_digest_final_send(conn, ctask, &dtask->digestbuf, 1733 &dtask->digest, 1); 1734 if (rc) { 1735 debug_tcp("send last r2t data digest 0x%x" 1736 "fail\n", dtask->digest); 1737 return rc; 1738 } 1739 debug_tcp("r2t done dout digest 0x%x\n", dtask->digest); 1740 } 1741 1742 tcp_ctask->r2t_data_count -= r2t->data_length; 1743 tcp_ctask->r2t = NULL; 1744 spin_lock_bh(&session->lock); 1745 __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t, sizeof(void*)); 1746 spin_unlock_bh(&session->lock); 1747 if (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*))) { 1748 tcp_ctask->r2t = r2t; 1749 tcp_ctask->xmstate |= XMSTATE_SOL_DATA; 1750 tcp_ctask->xmstate &= ~XMSTATE_SOL_HDR; 1751 return 1; 1752 } 1753 1754 return 0; 1755 } 1756 1757 static inline int 1758 handle_xmstate_w_pad(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) 1759 { 1760 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 1761 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 1762 struct iscsi_data_task *dtask = tcp_ctask->dtask; 1763 int sent = 0, rc; 1764 1765 tcp_ctask->xmstate &= ~XMSTATE_W_PAD; 1766 iscsi_buf_init_iov(&tcp_ctask->sendbuf, (char*)&tcp_ctask->pad, 1767 tcp_ctask->pad_count); 1768 rc = iscsi_sendpage(conn, &tcp_ctask->sendbuf, &tcp_ctask->pad_count, 1769 &sent); 1770 if (rc) { 1771 tcp_ctask->xmstate |= XMSTATE_W_PAD; 1772 return rc; 1773 } 1774 1775 if (conn->datadgst_en) { 1776 crypto_hash_update(&tcp_conn->data_tx_hash, 1777 &tcp_ctask->sendbuf.sg, 1778 tcp_ctask->sendbuf.sg.length); 1779 /* imm data? */ 1780 if (!dtask) { 1781 rc = iscsi_digest_final_send(conn, ctask, 1782 &tcp_ctask->immbuf, 1783 &tcp_ctask->immdigest, 1); 1784 if (rc) { 1785 debug_tcp("send padding digest 0x%x" 1786 "fail!\n", tcp_ctask->immdigest); 1787 return rc; 1788 } 1789 debug_tcp("done with padding, digest 0x%x\n", 1790 tcp_ctask->datadigest); 1791 } else { 1792 rc = iscsi_digest_final_send(conn, ctask, 1793 &dtask->digestbuf, 1794 &dtask->digest, 1); 1795 if (rc) { 1796 debug_tcp("send padding digest 0x%x" 1797 "fail\n", dtask->digest); 1798 return rc; 1799 } 1800 debug_tcp("done with padding, digest 0x%x\n", 1801 dtask->digest); 1802 } 1803 } 1804 1805 return 0; 1806 } 1807 1808 static int 1809 iscsi_tcp_ctask_xmit(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) 1810 { 1811 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 1812 int rc = 0; 1813 1814 debug_scsi("ctask deq [cid %d xmstate %x itt 0x%x]\n", 1815 conn->id, tcp_ctask->xmstate, ctask->itt); 1816 1817 /* 1818 * serialize with TMF AbortTask 1819 */ 1820 if (ctask->mtask) 1821 return rc; 1822 1823 if (tcp_ctask->xmstate & XMSTATE_R_HDR) 1824 return handle_xmstate_r_hdr(conn, tcp_ctask); 1825 1826 if (tcp_ctask->xmstate & XMSTATE_W_HDR) { 1827 rc = handle_xmstate_w_hdr(conn, ctask); 1828 if (rc) 1829 return rc; 1830 } 1831 1832 /* XXX: for data digest xmit recover */ 1833 if (tcp_ctask->xmstate & XMSTATE_DATA_DIGEST) { 1834 rc = handle_xmstate_data_digest(conn, ctask); 1835 if (rc) 1836 return rc; 1837 } 1838 1839 if (tcp_ctask->xmstate & XMSTATE_IMM_DATA) { 1840 rc = handle_xmstate_imm_data(conn, ctask); 1841 if (rc) 1842 return rc; 1843 } 1844 1845 if (tcp_ctask->xmstate & XMSTATE_UNS_HDR) { 1846 BUG_ON(!ctask->unsol_count); 1847 tcp_ctask->xmstate &= ~XMSTATE_UNS_HDR; 1848 unsolicit_head_again: 1849 rc = handle_xmstate_uns_hdr(conn, ctask); 1850 if (rc) 1851 return rc; 1852 } 1853 1854 if (tcp_ctask->xmstate & XMSTATE_UNS_DATA) { 1855 rc = handle_xmstate_uns_data(conn, ctask); 1856 if (rc == 1) 1857 goto unsolicit_head_again; 1858 else if (rc) 1859 return rc; 1860 goto done; 1861 } 1862 1863 if (tcp_ctask->xmstate & XMSTATE_SOL_HDR) { 1864 struct iscsi_r2t_info *r2t; 1865 1866 tcp_ctask->xmstate &= ~XMSTATE_SOL_HDR; 1867 tcp_ctask->xmstate |= XMSTATE_SOL_DATA; 1868 if (!tcp_ctask->r2t) 1869 __kfifo_get(tcp_ctask->r2tqueue, (void*)&tcp_ctask->r2t, 1870 sizeof(void*)); 1871 solicit_head_again: 1872 r2t = tcp_ctask->r2t; 1873 if (conn->hdrdgst_en) 1874 iscsi_hdr_digest(conn, &r2t->headbuf, 1875 (u8*)r2t->dtask.hdrext); 1876 rc = iscsi_sendhdr(conn, &r2t->headbuf, r2t->data_count); 1877 if (rc) { 1878 tcp_ctask->xmstate &= ~XMSTATE_SOL_DATA; 1879 tcp_ctask->xmstate |= XMSTATE_SOL_HDR; 1880 return rc; 1881 } 1882 1883 debug_scsi("sol dout [dsn %d itt 0x%x dlen %d sent %d]\n", 1884 r2t->solicit_datasn - 1, ctask->itt, r2t->data_count, 1885 r2t->sent); 1886 } 1887 1888 if (tcp_ctask->xmstate & XMSTATE_SOL_DATA) { 1889 rc = handle_xmstate_sol_data(conn, ctask); 1890 if (rc == 1) 1891 goto solicit_head_again; 1892 if (rc) 1893 return rc; 1894 } 1895 1896 done: 1897 /* 1898 * Last thing to check is whether we need to send write 1899 * padding. Note that we check for xmstate equality, not just the bit. 1900 */ 1901 if (tcp_ctask->xmstate == XMSTATE_W_PAD) 1902 rc = handle_xmstate_w_pad(conn, ctask); 1903 1904 return rc; 1905 } 1906 1907 static struct iscsi_cls_conn * 1908 iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx) 1909 { 1910 struct iscsi_conn *conn; 1911 struct iscsi_cls_conn *cls_conn; 1912 struct iscsi_tcp_conn *tcp_conn; 1913 1914 cls_conn = iscsi_conn_setup(cls_session, conn_idx); 1915 if (!cls_conn) 1916 return NULL; 1917 conn = cls_conn->dd_data; 1918 /* 1919 * due to strange issues with iser these are not set 1920 * in iscsi_conn_setup 1921 */ 1922 conn->max_recv_dlength = DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH; 1923 1924 tcp_conn = kzalloc(sizeof(*tcp_conn), GFP_KERNEL); 1925 if (!tcp_conn) 1926 goto tcp_conn_alloc_fail; 1927 1928 conn->dd_data = tcp_conn; 1929 tcp_conn->iscsi_conn = conn; 1930 tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER; 1931 /* initial operational parameters */ 1932 tcp_conn->hdr_size = sizeof(struct iscsi_hdr); 1933 1934 return cls_conn; 1935 1936 tcp_conn_alloc_fail: 1937 iscsi_conn_teardown(cls_conn); 1938 return NULL; 1939 } 1940 1941 static void 1942 iscsi_tcp_release_conn(struct iscsi_conn *conn) 1943 { 1944 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 1945 1946 if (!tcp_conn->sock) 1947 return; 1948 1949 sock_hold(tcp_conn->sock->sk); 1950 iscsi_conn_restore_callbacks(tcp_conn); 1951 sock_put(tcp_conn->sock->sk); 1952 1953 sock_release(tcp_conn->sock); 1954 tcp_conn->sock = NULL; 1955 conn->recv_lock = NULL; 1956 } 1957 1958 static void 1959 iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn) 1960 { 1961 struct iscsi_conn *conn = cls_conn->dd_data; 1962 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 1963 int digest = 0; 1964 1965 if (conn->hdrdgst_en || conn->datadgst_en) 1966 digest = 1; 1967 1968 iscsi_tcp_release_conn(conn); 1969 iscsi_conn_teardown(cls_conn); 1970 1971 /* now free tcp_conn */ 1972 if (digest) { 1973 if (tcp_conn->tx_tfm) 1974 crypto_free_hash(tcp_conn->tx_tfm); 1975 if (tcp_conn->rx_tfm) 1976 crypto_free_hash(tcp_conn->rx_tfm); 1977 if (tcp_conn->data_tx_hash.tfm) 1978 crypto_free_hash(tcp_conn->data_tx_hash.tfm); 1979 if (tcp_conn->data_rx_hash.tfm) 1980 crypto_free_hash(tcp_conn->data_rx_hash.tfm); 1981 } 1982 1983 kfree(tcp_conn); 1984 } 1985 1986 static void 1987 iscsi_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) 1988 { 1989 struct iscsi_conn *conn = cls_conn->dd_data; 1990 1991 iscsi_conn_stop(cls_conn, flag); 1992 iscsi_tcp_release_conn(conn); 1993 } 1994 1995 static int 1996 iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session, 1997 struct iscsi_cls_conn *cls_conn, uint64_t transport_eph, 1998 int is_leading) 1999 { 2000 struct iscsi_conn *conn = cls_conn->dd_data; 2001 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 2002 struct sock *sk; 2003 struct socket *sock; 2004 int err; 2005 2006 /* lookup for existing socket */ 2007 sock = sockfd_lookup((int)transport_eph, &err); 2008 if (!sock) { 2009 printk(KERN_ERR "iscsi_tcp: sockfd_lookup failed %d\n", err); 2010 return -EEXIST; 2011 } 2012 2013 err = iscsi_conn_bind(cls_session, cls_conn, is_leading); 2014 if (err) 2015 return err; 2016 2017 /* bind iSCSI connection and socket */ 2018 tcp_conn->sock = sock; 2019 2020 /* setup Socket parameters */ 2021 sk = sock->sk; 2022 sk->sk_reuse = 1; 2023 sk->sk_sndtimeo = 15 * HZ; /* FIXME: make it configurable */ 2024 sk->sk_allocation = GFP_ATOMIC; 2025 2026 /* FIXME: disable Nagle's algorithm */ 2027 2028 /* 2029 * Intercept TCP callbacks for sendfile like receive 2030 * processing. 2031 */ 2032 conn->recv_lock = &sk->sk_callback_lock; 2033 iscsi_conn_set_callbacks(conn); 2034 tcp_conn->sendpage = tcp_conn->sock->ops->sendpage; 2035 /* 2036 * set receive state machine into initial state 2037 */ 2038 tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER; 2039 2040 return 0; 2041 } 2042 2043 /* called with host lock */ 2044 static void 2045 iscsi_tcp_mgmt_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask, 2046 char *data, uint32_t data_size) 2047 { 2048 struct iscsi_tcp_mgmt_task *tcp_mtask = mtask->dd_data; 2049 2050 iscsi_buf_init_iov(&tcp_mtask->headbuf, (char*)mtask->hdr, 2051 sizeof(struct iscsi_hdr)); 2052 tcp_mtask->xmstate = XMSTATE_IMM_HDR; 2053 tcp_mtask->sent = 0; 2054 2055 if (mtask->data_count) 2056 iscsi_buf_init_iov(&tcp_mtask->sendbuf, (char*)mtask->data, 2057 mtask->data_count); 2058 } 2059 2060 static int 2061 iscsi_r2tpool_alloc(struct iscsi_session *session) 2062 { 2063 int i; 2064 int cmd_i; 2065 2066 /* 2067 * initialize per-task: R2T pool and xmit queue 2068 */ 2069 for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) { 2070 struct iscsi_cmd_task *ctask = session->cmds[cmd_i]; 2071 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 2072 2073 /* 2074 * pre-allocated x4 as much r2ts to handle race when 2075 * target acks DataOut faster than we data_xmit() queues 2076 * could replenish r2tqueue. 2077 */ 2078 2079 /* R2T pool */ 2080 if (iscsi_pool_init(&tcp_ctask->r2tpool, session->max_r2t * 4, 2081 (void***)&tcp_ctask->r2ts, 2082 sizeof(struct iscsi_r2t_info))) { 2083 goto r2t_alloc_fail; 2084 } 2085 2086 /* R2T xmit queue */ 2087 tcp_ctask->r2tqueue = kfifo_alloc( 2088 session->max_r2t * 4 * sizeof(void*), GFP_KERNEL, NULL); 2089 if (tcp_ctask->r2tqueue == ERR_PTR(-ENOMEM)) { 2090 iscsi_pool_free(&tcp_ctask->r2tpool, 2091 (void**)tcp_ctask->r2ts); 2092 goto r2t_alloc_fail; 2093 } 2094 } 2095 2096 return 0; 2097 2098 r2t_alloc_fail: 2099 for (i = 0; i < cmd_i; i++) { 2100 struct iscsi_cmd_task *ctask = session->cmds[i]; 2101 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 2102 2103 kfifo_free(tcp_ctask->r2tqueue); 2104 iscsi_pool_free(&tcp_ctask->r2tpool, 2105 (void**)tcp_ctask->r2ts); 2106 } 2107 return -ENOMEM; 2108 } 2109 2110 static void 2111 iscsi_r2tpool_free(struct iscsi_session *session) 2112 { 2113 int i; 2114 2115 for (i = 0; i < session->cmds_max; i++) { 2116 struct iscsi_cmd_task *ctask = session->cmds[i]; 2117 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 2118 2119 kfifo_free(tcp_ctask->r2tqueue); 2120 iscsi_pool_free(&tcp_ctask->r2tpool, 2121 (void**)tcp_ctask->r2ts); 2122 } 2123 } 2124 2125 static int 2126 iscsi_conn_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param, 2127 char *buf, int buflen) 2128 { 2129 struct iscsi_conn *conn = cls_conn->dd_data; 2130 struct iscsi_session *session = conn->session; 2131 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 2132 int value; 2133 2134 switch(param) { 2135 case ISCSI_PARAM_HDRDGST_EN: 2136 iscsi_set_param(cls_conn, param, buf, buflen); 2137 tcp_conn->hdr_size = sizeof(struct iscsi_hdr); 2138 if (conn->hdrdgst_en) { 2139 tcp_conn->hdr_size += sizeof(__u32); 2140 if (!tcp_conn->tx_tfm) 2141 tcp_conn->tx_tfm = 2142 crypto_alloc_hash("crc32c", 0, 2143 CRYPTO_ALG_ASYNC); 2144 if (IS_ERR(tcp_conn->tx_tfm)) 2145 return PTR_ERR(tcp_conn->tx_tfm); 2146 if (!tcp_conn->rx_tfm) 2147 tcp_conn->rx_tfm = 2148 crypto_alloc_hash("crc32c", 0, 2149 CRYPTO_ALG_ASYNC); 2150 if (IS_ERR(tcp_conn->rx_tfm)) { 2151 crypto_free_hash(tcp_conn->tx_tfm); 2152 return PTR_ERR(tcp_conn->rx_tfm); 2153 } 2154 } else { 2155 if (tcp_conn->tx_tfm) 2156 crypto_free_hash(tcp_conn->tx_tfm); 2157 if (tcp_conn->rx_tfm) 2158 crypto_free_hash(tcp_conn->rx_tfm); 2159 } 2160 break; 2161 case ISCSI_PARAM_DATADGST_EN: 2162 iscsi_set_param(cls_conn, param, buf, buflen); 2163 if (conn->datadgst_en) { 2164 if (!tcp_conn->data_tx_hash.tfm) 2165 tcp_conn->data_tx_hash.tfm = 2166 crypto_alloc_hash("crc32c", 0, 2167 CRYPTO_ALG_ASYNC); 2168 if (IS_ERR(tcp_conn->data_tx_hash.tfm)) 2169 return PTR_ERR(tcp_conn->data_tx_hash.tfm); 2170 if (!tcp_conn->data_rx_hash.tfm) 2171 tcp_conn->data_rx_hash.tfm = 2172 crypto_alloc_hash("crc32c", 0, 2173 CRYPTO_ALG_ASYNC); 2174 if (IS_ERR(tcp_conn->data_rx_hash.tfm)) { 2175 crypto_free_hash(tcp_conn->data_tx_hash.tfm); 2176 return PTR_ERR(tcp_conn->data_rx_hash.tfm); 2177 } 2178 } else { 2179 if (tcp_conn->data_tx_hash.tfm) 2180 crypto_free_hash(tcp_conn->data_tx_hash.tfm); 2181 if (tcp_conn->data_rx_hash.tfm) 2182 crypto_free_hash(tcp_conn->data_rx_hash.tfm); 2183 } 2184 tcp_conn->sendpage = conn->datadgst_en ? 2185 sock_no_sendpage : tcp_conn->sock->ops->sendpage; 2186 break; 2187 case ISCSI_PARAM_MAX_R2T: 2188 sscanf(buf, "%d", &value); 2189 if (session->max_r2t == roundup_pow_of_two(value)) 2190 break; 2191 iscsi_r2tpool_free(session); 2192 iscsi_set_param(cls_conn, param, buf, buflen); 2193 if (session->max_r2t & (session->max_r2t - 1)) 2194 session->max_r2t = roundup_pow_of_two(session->max_r2t); 2195 if (iscsi_r2tpool_alloc(session)) 2196 return -ENOMEM; 2197 break; 2198 default: 2199 return iscsi_set_param(cls_conn, param, buf, buflen); 2200 } 2201 2202 return 0; 2203 } 2204 2205 static int 2206 iscsi_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn, 2207 enum iscsi_param param, char *buf) 2208 { 2209 struct iscsi_conn *conn = cls_conn->dd_data; 2210 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 2211 struct inet_sock *inet; 2212 struct ipv6_pinfo *np; 2213 struct sock *sk; 2214 int len; 2215 2216 switch(param) { 2217 case ISCSI_PARAM_CONN_PORT: 2218 mutex_lock(&conn->xmitmutex); 2219 if (!tcp_conn->sock) { 2220 mutex_unlock(&conn->xmitmutex); 2221 return -EINVAL; 2222 } 2223 2224 inet = inet_sk(tcp_conn->sock->sk); 2225 len = sprintf(buf, "%hu\n", be16_to_cpu(inet->dport)); 2226 mutex_unlock(&conn->xmitmutex); 2227 break; 2228 case ISCSI_PARAM_CONN_ADDRESS: 2229 mutex_lock(&conn->xmitmutex); 2230 if (!tcp_conn->sock) { 2231 mutex_unlock(&conn->xmitmutex); 2232 return -EINVAL; 2233 } 2234 2235 sk = tcp_conn->sock->sk; 2236 if (sk->sk_family == PF_INET) { 2237 inet = inet_sk(sk); 2238 len = sprintf(buf, "%u.%u.%u.%u\n", 2239 NIPQUAD(inet->daddr)); 2240 } else { 2241 np = inet6_sk(sk); 2242 len = sprintf(buf, 2243 "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n", 2244 NIP6(np->daddr)); 2245 } 2246 mutex_unlock(&conn->xmitmutex); 2247 break; 2248 default: 2249 return iscsi_conn_get_param(cls_conn, param, buf); 2250 } 2251 2252 return len; 2253 } 2254 2255 static void 2256 iscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats) 2257 { 2258 struct iscsi_conn *conn = cls_conn->dd_data; 2259 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 2260 2261 stats->txdata_octets = conn->txdata_octets; 2262 stats->rxdata_octets = conn->rxdata_octets; 2263 stats->scsicmd_pdus = conn->scsicmd_pdus_cnt; 2264 stats->dataout_pdus = conn->dataout_pdus_cnt; 2265 stats->scsirsp_pdus = conn->scsirsp_pdus_cnt; 2266 stats->datain_pdus = conn->datain_pdus_cnt; 2267 stats->r2t_pdus = conn->r2t_pdus_cnt; 2268 stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt; 2269 stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt; 2270 stats->custom_length = 3; 2271 strcpy(stats->custom[0].desc, "tx_sendpage_failures"); 2272 stats->custom[0].value = tcp_conn->sendpage_failures_cnt; 2273 strcpy(stats->custom[1].desc, "rx_discontiguous_hdr"); 2274 stats->custom[1].value = tcp_conn->discontiguous_hdr_cnt; 2275 strcpy(stats->custom[2].desc, "eh_abort_cnt"); 2276 stats->custom[2].value = conn->eh_abort_cnt; 2277 } 2278 2279 static struct iscsi_cls_session * 2280 iscsi_tcp_session_create(struct iscsi_transport *iscsit, 2281 struct scsi_transport_template *scsit, 2282 uint32_t initial_cmdsn, uint32_t *hostno) 2283 { 2284 struct iscsi_cls_session *cls_session; 2285 struct iscsi_session *session; 2286 uint32_t hn; 2287 int cmd_i; 2288 2289 cls_session = iscsi_session_setup(iscsit, scsit, 2290 sizeof(struct iscsi_tcp_cmd_task), 2291 sizeof(struct iscsi_tcp_mgmt_task), 2292 initial_cmdsn, &hn); 2293 if (!cls_session) 2294 return NULL; 2295 *hostno = hn; 2296 2297 session = class_to_transport_session(cls_session); 2298 for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) { 2299 struct iscsi_cmd_task *ctask = session->cmds[cmd_i]; 2300 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 2301 2302 ctask->hdr = &tcp_ctask->hdr; 2303 } 2304 2305 for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++) { 2306 struct iscsi_mgmt_task *mtask = session->mgmt_cmds[cmd_i]; 2307 struct iscsi_tcp_mgmt_task *tcp_mtask = mtask->dd_data; 2308 2309 mtask->hdr = &tcp_mtask->hdr; 2310 } 2311 2312 if (iscsi_r2tpool_alloc(class_to_transport_session(cls_session))) 2313 goto r2tpool_alloc_fail; 2314 2315 return cls_session; 2316 2317 r2tpool_alloc_fail: 2318 iscsi_session_teardown(cls_session); 2319 return NULL; 2320 } 2321 2322 static void iscsi_tcp_session_destroy(struct iscsi_cls_session *cls_session) 2323 { 2324 iscsi_r2tpool_free(class_to_transport_session(cls_session)); 2325 iscsi_session_teardown(cls_session); 2326 } 2327 2328 static struct scsi_host_template iscsi_sht = { 2329 .name = "iSCSI Initiator over TCP/IP", 2330 .queuecommand = iscsi_queuecommand, 2331 .change_queue_depth = iscsi_change_queue_depth, 2332 .can_queue = ISCSI_XMIT_CMDS_MAX - 1, 2333 .sg_tablesize = ISCSI_SG_TABLESIZE, 2334 .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN, 2335 .eh_abort_handler = iscsi_eh_abort, 2336 .eh_host_reset_handler = iscsi_eh_host_reset, 2337 .use_clustering = DISABLE_CLUSTERING, 2338 .proc_name = "iscsi_tcp", 2339 .this_id = -1, 2340 }; 2341 2342 static struct iscsi_transport iscsi_tcp_transport = { 2343 .owner = THIS_MODULE, 2344 .name = "tcp", 2345 .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST 2346 | CAP_DATADGST, 2347 .param_mask = ISCSI_MAX_RECV_DLENGTH | 2348 ISCSI_MAX_XMIT_DLENGTH | 2349 ISCSI_HDRDGST_EN | 2350 ISCSI_DATADGST_EN | 2351 ISCSI_INITIAL_R2T_EN | 2352 ISCSI_MAX_R2T | 2353 ISCSI_IMM_DATA_EN | 2354 ISCSI_FIRST_BURST | 2355 ISCSI_MAX_BURST | 2356 ISCSI_PDU_INORDER_EN | 2357 ISCSI_DATASEQ_INORDER_EN | 2358 ISCSI_ERL | 2359 ISCSI_CONN_PORT | 2360 ISCSI_CONN_ADDRESS | 2361 ISCSI_EXP_STATSN | 2362 ISCSI_PERSISTENT_PORT | 2363 ISCSI_PERSISTENT_ADDRESS | 2364 ISCSI_TARGET_NAME | 2365 ISCSI_TPGT, 2366 .host_template = &iscsi_sht, 2367 .conndata_size = sizeof(struct iscsi_conn), 2368 .max_conn = 1, 2369 .max_cmd_len = ISCSI_TCP_MAX_CMD_LEN, 2370 /* session management */ 2371 .create_session = iscsi_tcp_session_create, 2372 .destroy_session = iscsi_tcp_session_destroy, 2373 /* connection management */ 2374 .create_conn = iscsi_tcp_conn_create, 2375 .bind_conn = iscsi_tcp_conn_bind, 2376 .destroy_conn = iscsi_tcp_conn_destroy, 2377 .set_param = iscsi_conn_set_param, 2378 .get_conn_param = iscsi_tcp_conn_get_param, 2379 .get_session_param = iscsi_session_get_param, 2380 .start_conn = iscsi_conn_start, 2381 .stop_conn = iscsi_tcp_conn_stop, 2382 /* IO */ 2383 .send_pdu = iscsi_conn_send_pdu, 2384 .get_stats = iscsi_conn_get_stats, 2385 .init_cmd_task = iscsi_tcp_cmd_init, 2386 .init_mgmt_task = iscsi_tcp_mgmt_init, 2387 .xmit_cmd_task = iscsi_tcp_ctask_xmit, 2388 .xmit_mgmt_task = iscsi_tcp_mtask_xmit, 2389 .cleanup_cmd_task = iscsi_tcp_cleanup_ctask, 2390 /* recovery */ 2391 .session_recovery_timedout = iscsi_session_recovery_timedout, 2392 }; 2393 2394 static int __init 2395 iscsi_tcp_init(void) 2396 { 2397 if (iscsi_max_lun < 1) { 2398 printk(KERN_ERR "iscsi_tcp: Invalid max_lun value of %u\n", 2399 iscsi_max_lun); 2400 return -EINVAL; 2401 } 2402 iscsi_tcp_transport.max_lun = iscsi_max_lun; 2403 2404 if (!iscsi_register_transport(&iscsi_tcp_transport)) 2405 return -ENODEV; 2406 2407 return 0; 2408 } 2409 2410 static void __exit 2411 iscsi_tcp_exit(void) 2412 { 2413 iscsi_unregister_transport(&iscsi_tcp_transport); 2414 } 2415 2416 module_init(iscsi_tcp_init); 2417 module_exit(iscsi_tcp_exit); 2418