1 /* 2 * Copyright (c) 2010 Cisco Systems, Inc. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms and conditions of the GNU General Public License, 6 * version 2, as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * more details. 12 * 13 * You should have received a copy of the GNU General Public License along with 14 * this program; if not, write to the Free Software Foundation, Inc., 15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 16 */ 17 18 /* XXX TBD some includes may be extraneous */ 19 20 #include <linux/module.h> 21 #include <linux/moduleparam.h> 22 #include <linux/utsname.h> 23 #include <linux/init.h> 24 #include <linux/slab.h> 25 #include <linux/kthread.h> 26 #include <linux/types.h> 27 #include <linux/string.h> 28 #include <linux/configfs.h> 29 #include <linux/ctype.h> 30 #include <linux/hash.h> 31 #include <linux/percpu_ida.h> 32 #include <asm/unaligned.h> 33 #include <scsi/scsi.h> 34 #include <scsi/scsi_host.h> 35 #include <scsi/scsi_device.h> 36 #include <scsi/scsi_cmnd.h> 37 #include <scsi/scsi_tcq.h> 38 #include <scsi/libfc.h> 39 #include <scsi/fc_encode.h> 40 41 #include <target/target_core_base.h> 42 #include <target/target_core_fabric.h> 43 #include <target/target_core_configfs.h> 44 #include <target/configfs_macros.h> 45 46 #include "tcm_fc.h" 47 48 /* 49 * Dump cmd state for debugging. 50 */ 51 static void _ft_dump_cmd(struct ft_cmd *cmd, const char *caller) 52 { 53 struct fc_exch *ep; 54 struct fc_seq *sp; 55 struct se_cmd *se_cmd; 56 struct scatterlist *sg; 57 int count; 58 59 se_cmd = &cmd->se_cmd; 60 pr_debug("%s: cmd %p sess %p seq %p se_cmd %p\n", 61 caller, cmd, cmd->sess, cmd->seq, se_cmd); 62 63 pr_debug("%s: cmd %p data_nents %u len %u se_cmd_flags <0x%x>\n", 64 caller, cmd, se_cmd->t_data_nents, 65 se_cmd->data_length, se_cmd->se_cmd_flags); 66 67 for_each_sg(se_cmd->t_data_sg, sg, se_cmd->t_data_nents, count) 68 pr_debug("%s: cmd %p sg %p page %p " 69 "len 0x%x off 0x%x\n", 70 caller, cmd, sg, 71 sg_page(sg), sg->length, sg->offset); 72 73 sp = cmd->seq; 74 if (sp) { 75 ep = fc_seq_exch(sp); 76 pr_debug("%s: cmd %p sid %x did %x " 77 "ox_id %x rx_id %x seq_id %x e_stat %x\n", 78 caller, cmd, ep->sid, ep->did, ep->oxid, ep->rxid, 79 sp->id, ep->esb_stat); 80 } 81 } 82 83 void ft_dump_cmd(struct ft_cmd *cmd, const char *caller) 84 { 85 if (unlikely(ft_debug_logging)) 86 _ft_dump_cmd(cmd, caller); 87 } 88 89 static void ft_free_cmd(struct ft_cmd *cmd) 90 { 91 struct fc_frame *fp; 92 struct fc_lport *lport; 93 struct ft_sess *sess; 94 95 if (!cmd) 96 return; 97 sess = cmd->sess; 98 fp = cmd->req_frame; 99 lport = fr_dev(fp); 100 if (fr_seq(fp)) 101 lport->tt.seq_release(fr_seq(fp)); 102 fc_frame_free(fp); 103 percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag); 104 ft_sess_put(sess); /* undo get from lookup at recv */ 105 } 106 107 void ft_release_cmd(struct se_cmd *se_cmd) 108 { 109 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd); 110 111 ft_free_cmd(cmd); 112 } 113 114 int ft_check_stop_free(struct se_cmd *se_cmd) 115 { 116 transport_generic_free_cmd(se_cmd, 0); 117 return 1; 118 } 119 120 /* 121 * Send response. 122 */ 123 int ft_queue_status(struct se_cmd *se_cmd) 124 { 125 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd); 126 struct fc_frame *fp; 127 struct fcp_resp_with_ext *fcp; 128 struct fc_lport *lport; 129 struct fc_exch *ep; 130 size_t len; 131 int rc; 132 133 if (cmd->aborted) 134 return 0; 135 ft_dump_cmd(cmd, __func__); 136 ep = fc_seq_exch(cmd->seq); 137 lport = ep->lp; 138 len = sizeof(*fcp) + se_cmd->scsi_sense_length; 139 fp = fc_frame_alloc(lport, len); 140 if (!fp) { 141 se_cmd->scsi_status = SAM_STAT_TASK_SET_FULL; 142 return -ENOMEM; 143 } 144 145 fcp = fc_frame_payload_get(fp, len); 146 memset(fcp, 0, len); 147 fcp->resp.fr_status = se_cmd->scsi_status; 148 149 len = se_cmd->scsi_sense_length; 150 if (len) { 151 fcp->resp.fr_flags |= FCP_SNS_LEN_VAL; 152 fcp->ext.fr_sns_len = htonl(len); 153 memcpy((fcp + 1), se_cmd->sense_buffer, len); 154 } 155 156 /* 157 * Test underflow and overflow with one mask. Usually both are off. 158 * Bidirectional commands are not handled yet. 159 */ 160 if (se_cmd->se_cmd_flags & (SCF_OVERFLOW_BIT | SCF_UNDERFLOW_BIT)) { 161 if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) 162 fcp->resp.fr_flags |= FCP_RESID_OVER; 163 else 164 fcp->resp.fr_flags |= FCP_RESID_UNDER; 165 fcp->ext.fr_resid = cpu_to_be32(se_cmd->residual_count); 166 } 167 168 /* 169 * Send response. 170 */ 171 cmd->seq = lport->tt.seq_start_next(cmd->seq); 172 fc_fill_fc_hdr(fp, FC_RCTL_DD_CMD_STATUS, ep->did, ep->sid, FC_TYPE_FCP, 173 FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ, 0); 174 175 rc = lport->tt.seq_send(lport, cmd->seq, fp); 176 if (rc) { 177 pr_info_ratelimited("%s: Failed to send response frame %p, " 178 "xid <0x%x>\n", __func__, fp, ep->xid); 179 /* 180 * Generate a TASK_SET_FULL status to notify the initiator 181 * to reduce it's queue_depth after the se_cmd response has 182 * been re-queued by target-core. 183 */ 184 se_cmd->scsi_status = SAM_STAT_TASK_SET_FULL; 185 return -ENOMEM; 186 } 187 lport->tt.exch_done(cmd->seq); 188 return 0; 189 } 190 191 int ft_write_pending_status(struct se_cmd *se_cmd) 192 { 193 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd); 194 195 return cmd->write_data_len != se_cmd->data_length; 196 } 197 198 /* 199 * Send TX_RDY (transfer ready). 200 */ 201 int ft_write_pending(struct se_cmd *se_cmd) 202 { 203 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd); 204 struct fc_frame *fp; 205 struct fcp_txrdy *txrdy; 206 struct fc_lport *lport; 207 struct fc_exch *ep; 208 struct fc_frame_header *fh; 209 u32 f_ctl; 210 211 ft_dump_cmd(cmd, __func__); 212 213 if (cmd->aborted) 214 return 0; 215 ep = fc_seq_exch(cmd->seq); 216 lport = ep->lp; 217 fp = fc_frame_alloc(lport, sizeof(*txrdy)); 218 if (!fp) 219 return -ENOMEM; /* Signal QUEUE_FULL */ 220 221 txrdy = fc_frame_payload_get(fp, sizeof(*txrdy)); 222 memset(txrdy, 0, sizeof(*txrdy)); 223 txrdy->ft_burst_len = htonl(se_cmd->data_length); 224 225 cmd->seq = lport->tt.seq_start_next(cmd->seq); 226 fc_fill_fc_hdr(fp, FC_RCTL_DD_DATA_DESC, ep->did, ep->sid, FC_TYPE_FCP, 227 FC_FC_EX_CTX | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); 228 229 fh = fc_frame_header_get(fp); 230 f_ctl = ntoh24(fh->fh_f_ctl); 231 232 /* Only if it is 'Exchange Responder' */ 233 if (f_ctl & FC_FC_EX_CTX) { 234 /* Target is 'exchange responder' and sending XFER_READY 235 * to 'exchange initiator (initiator)' 236 */ 237 if ((ep->xid <= lport->lro_xid) && 238 (fh->fh_r_ctl == FC_RCTL_DD_DATA_DESC)) { 239 if ((se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && 240 lport->tt.ddp_target(lport, ep->xid, 241 se_cmd->t_data_sg, 242 se_cmd->t_data_nents)) 243 cmd->was_ddp_setup = 1; 244 } 245 } 246 lport->tt.seq_send(lport, cmd->seq, fp); 247 return 0; 248 } 249 250 u32 ft_get_task_tag(struct se_cmd *se_cmd) 251 { 252 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd); 253 254 if (cmd->aborted) 255 return ~0; 256 return fc_seq_exch(cmd->seq)->rxid; 257 } 258 259 int ft_get_cmd_state(struct se_cmd *se_cmd) 260 { 261 return 0; 262 } 263 264 /* 265 * FC sequence response handler for follow-on sequences (data) and aborts. 266 */ 267 static void ft_recv_seq(struct fc_seq *sp, struct fc_frame *fp, void *arg) 268 { 269 struct ft_cmd *cmd = arg; 270 struct fc_frame_header *fh; 271 272 if (unlikely(IS_ERR(fp))) { 273 /* XXX need to find cmd if queued */ 274 cmd->seq = NULL; 275 cmd->aborted = true; 276 return; 277 } 278 279 fh = fc_frame_header_get(fp); 280 281 switch (fh->fh_r_ctl) { 282 case FC_RCTL_DD_SOL_DATA: /* write data */ 283 ft_recv_write_data(cmd, fp); 284 break; 285 case FC_RCTL_DD_UNSOL_CTL: /* command */ 286 case FC_RCTL_DD_SOL_CTL: /* transfer ready */ 287 case FC_RCTL_DD_DATA_DESC: /* transfer ready */ 288 default: 289 pr_debug("%s: unhandled frame r_ctl %x\n", 290 __func__, fh->fh_r_ctl); 291 ft_invl_hw_context(cmd); 292 fc_frame_free(fp); 293 transport_generic_free_cmd(&cmd->se_cmd, 0); 294 break; 295 } 296 } 297 298 /* 299 * Send a FCP response including SCSI status and optional FCP rsp_code. 300 * status is SAM_STAT_GOOD (zero) iff code is valid. 301 * This is used in error cases, such as allocation failures. 302 */ 303 static void ft_send_resp_status(struct fc_lport *lport, 304 const struct fc_frame *rx_fp, 305 u32 status, enum fcp_resp_rsp_codes code) 306 { 307 struct fc_frame *fp; 308 struct fc_seq *sp; 309 const struct fc_frame_header *fh; 310 size_t len; 311 struct fcp_resp_with_ext *fcp; 312 struct fcp_resp_rsp_info *info; 313 314 fh = fc_frame_header_get(rx_fp); 315 pr_debug("FCP error response: did %x oxid %x status %x code %x\n", 316 ntoh24(fh->fh_s_id), ntohs(fh->fh_ox_id), status, code); 317 len = sizeof(*fcp); 318 if (status == SAM_STAT_GOOD) 319 len += sizeof(*info); 320 fp = fc_frame_alloc(lport, len); 321 if (!fp) 322 return; 323 fcp = fc_frame_payload_get(fp, len); 324 memset(fcp, 0, len); 325 fcp->resp.fr_status = status; 326 if (status == SAM_STAT_GOOD) { 327 fcp->ext.fr_rsp_len = htonl(sizeof(*info)); 328 fcp->resp.fr_flags |= FCP_RSP_LEN_VAL; 329 info = (struct fcp_resp_rsp_info *)(fcp + 1); 330 info->rsp_code = code; 331 } 332 333 fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_DD_CMD_STATUS, 0); 334 sp = fr_seq(fp); 335 if (sp) { 336 lport->tt.seq_send(lport, sp, fp); 337 lport->tt.exch_done(sp); 338 } else { 339 lport->tt.frame_send(lport, fp); 340 } 341 } 342 343 /* 344 * Send error or task management response. 345 */ 346 static void ft_send_resp_code(struct ft_cmd *cmd, 347 enum fcp_resp_rsp_codes code) 348 { 349 ft_send_resp_status(cmd->sess->tport->lport, 350 cmd->req_frame, SAM_STAT_GOOD, code); 351 } 352 353 354 /* 355 * Send error or task management response. 356 * Always frees the cmd and associated state. 357 */ 358 static void ft_send_resp_code_and_free(struct ft_cmd *cmd, 359 enum fcp_resp_rsp_codes code) 360 { 361 ft_send_resp_code(cmd, code); 362 ft_free_cmd(cmd); 363 } 364 365 /* 366 * Handle Task Management Request. 367 */ 368 static void ft_send_tm(struct ft_cmd *cmd) 369 { 370 struct fcp_cmnd *fcp; 371 int rc; 372 u8 tm_func; 373 374 fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp)); 375 376 switch (fcp->fc_tm_flags) { 377 case FCP_TMF_LUN_RESET: 378 tm_func = TMR_LUN_RESET; 379 break; 380 case FCP_TMF_TGT_RESET: 381 tm_func = TMR_TARGET_WARM_RESET; 382 break; 383 case FCP_TMF_CLR_TASK_SET: 384 tm_func = TMR_CLEAR_TASK_SET; 385 break; 386 case FCP_TMF_ABT_TASK_SET: 387 tm_func = TMR_ABORT_TASK_SET; 388 break; 389 case FCP_TMF_CLR_ACA: 390 tm_func = TMR_CLEAR_ACA; 391 break; 392 default: 393 /* 394 * FCP4r01 indicates having a combination of 395 * tm_flags set is invalid. 396 */ 397 pr_debug("invalid FCP tm_flags %x\n", fcp->fc_tm_flags); 398 ft_send_resp_code_and_free(cmd, FCP_CMND_FIELDS_INVALID); 399 return; 400 } 401 402 /* FIXME: Add referenced task tag for ABORT_TASK */ 403 rc = target_submit_tmr(&cmd->se_cmd, cmd->sess->se_sess, 404 &cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun), 405 cmd, tm_func, GFP_KERNEL, 0, 0); 406 if (rc < 0) 407 ft_send_resp_code_and_free(cmd, FCP_TMF_FAILED); 408 } 409 410 /* 411 * Send status from completed task management request. 412 */ 413 void ft_queue_tm_resp(struct se_cmd *se_cmd) 414 { 415 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd); 416 struct se_tmr_req *tmr = se_cmd->se_tmr_req; 417 enum fcp_resp_rsp_codes code; 418 419 if (cmd->aborted) 420 return; 421 switch (tmr->response) { 422 case TMR_FUNCTION_COMPLETE: 423 code = FCP_TMF_CMPL; 424 break; 425 case TMR_LUN_DOES_NOT_EXIST: 426 code = FCP_TMF_INVALID_LUN; 427 break; 428 case TMR_FUNCTION_REJECTED: 429 code = FCP_TMF_REJECTED; 430 break; 431 case TMR_TASK_DOES_NOT_EXIST: 432 case TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED: 433 default: 434 code = FCP_TMF_FAILED; 435 break; 436 } 437 pr_debug("tmr fn %d resp %d fcp code %d\n", 438 tmr->function, tmr->response, code); 439 ft_send_resp_code(cmd, code); 440 } 441 442 void ft_aborted_task(struct se_cmd *se_cmd) 443 { 444 return; 445 } 446 447 static void ft_send_work(struct work_struct *work); 448 449 /* 450 * Handle incoming FCP command. 451 */ 452 static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp) 453 { 454 struct ft_cmd *cmd; 455 struct fc_lport *lport = sess->tport->lport; 456 struct se_session *se_sess = sess->se_sess; 457 int tag; 458 459 tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING); 460 if (tag < 0) 461 goto busy; 462 463 cmd = &((struct ft_cmd *)se_sess->sess_cmd_map)[tag]; 464 memset(cmd, 0, sizeof(struct ft_cmd)); 465 466 cmd->se_cmd.map_tag = tag; 467 cmd->sess = sess; 468 cmd->seq = lport->tt.seq_assign(lport, fp); 469 if (!cmd->seq) { 470 percpu_ida_free(&se_sess->sess_tag_pool, tag); 471 goto busy; 472 } 473 cmd->req_frame = fp; /* hold frame during cmd */ 474 475 INIT_WORK(&cmd->work, ft_send_work); 476 queue_work(sess->tport->tpg->workqueue, &cmd->work); 477 return; 478 479 busy: 480 pr_debug("cmd or seq allocation failure - sending BUSY\n"); 481 ft_send_resp_status(lport, fp, SAM_STAT_BUSY, 0); 482 fc_frame_free(fp); 483 ft_sess_put(sess); /* undo get from lookup */ 484 } 485 486 487 /* 488 * Handle incoming FCP frame. 489 * Caller has verified that the frame is type FCP. 490 */ 491 void ft_recv_req(struct ft_sess *sess, struct fc_frame *fp) 492 { 493 struct fc_frame_header *fh = fc_frame_header_get(fp); 494 495 switch (fh->fh_r_ctl) { 496 case FC_RCTL_DD_UNSOL_CMD: /* command */ 497 ft_recv_cmd(sess, fp); 498 break; 499 case FC_RCTL_DD_SOL_DATA: /* write data */ 500 case FC_RCTL_DD_UNSOL_CTL: 501 case FC_RCTL_DD_SOL_CTL: 502 case FC_RCTL_DD_DATA_DESC: /* transfer ready */ 503 case FC_RCTL_ELS4_REQ: /* SRR, perhaps */ 504 default: 505 pr_debug("%s: unhandled frame r_ctl %x\n", 506 __func__, fh->fh_r_ctl); 507 fc_frame_free(fp); 508 ft_sess_put(sess); /* undo get from lookup */ 509 break; 510 } 511 } 512 513 /* 514 * Send new command to target. 515 */ 516 static void ft_send_work(struct work_struct *work) 517 { 518 struct ft_cmd *cmd = container_of(work, struct ft_cmd, work); 519 struct fc_frame_header *fh = fc_frame_header_get(cmd->req_frame); 520 struct fcp_cmnd *fcp; 521 int data_dir = 0; 522 int task_attr; 523 524 fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp)); 525 if (!fcp) 526 goto err; 527 528 if (fcp->fc_flags & FCP_CFL_LEN_MASK) 529 goto err; /* not handling longer CDBs yet */ 530 531 /* 532 * Check for FCP task management flags 533 */ 534 if (fcp->fc_tm_flags) { 535 ft_send_tm(cmd); 536 return; 537 } 538 539 switch (fcp->fc_flags & (FCP_CFL_RDDATA | FCP_CFL_WRDATA)) { 540 case 0: 541 data_dir = DMA_NONE; 542 break; 543 case FCP_CFL_RDDATA: 544 data_dir = DMA_FROM_DEVICE; 545 break; 546 case FCP_CFL_WRDATA: 547 data_dir = DMA_TO_DEVICE; 548 break; 549 case FCP_CFL_WRDATA | FCP_CFL_RDDATA: 550 goto err; /* TBD not supported by tcm_fc yet */ 551 } 552 /* 553 * Locate the SAM Task Attr from fc_pri_ta 554 */ 555 switch (fcp->fc_pri_ta & FCP_PTA_MASK) { 556 case FCP_PTA_HEADQ: 557 task_attr = MSG_HEAD_TAG; 558 break; 559 case FCP_PTA_ORDERED: 560 task_attr = MSG_ORDERED_TAG; 561 break; 562 case FCP_PTA_ACA: 563 task_attr = MSG_ACA_TAG; 564 break; 565 case FCP_PTA_SIMPLE: /* Fallthrough */ 566 default: 567 task_attr = MSG_SIMPLE_TAG; 568 } 569 570 fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd); 571 /* 572 * Use a single se_cmd->cmd_kref as we expect to release se_cmd 573 * directly from ft_check_stop_free callback in response path. 574 */ 575 if (target_submit_cmd(&cmd->se_cmd, cmd->sess->se_sess, fcp->fc_cdb, 576 &cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun), 577 ntohl(fcp->fc_dl), task_attr, data_dir, 0)) 578 goto err; 579 580 pr_debug("r_ctl %x alloc target_submit_cmd\n", fh->fh_r_ctl); 581 return; 582 583 err: 584 ft_send_resp_code_and_free(cmd, FCP_CMND_FIELDS_INVALID); 585 } 586