1 /******************************************************************************* 2 * Filename: target_core_tmr.c 3 * 4 * This file contains SPC-3 task management infrastructure 5 * 6 * Copyright (c) 2009,2010 Rising Tide Systems 7 * Copyright (c) 2009,2010 Linux-iSCSI.org 8 * 9 * Nicholas A. Bellinger <nab@kernel.org> 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation; either version 2 of the License, or 14 * (at your option) any later version. 15 * 16 * This program is distributed in the hope that it will be useful, 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 * GNU General Public License for more details. 20 * 21 * You should have received a copy of the GNU General Public License 22 * along with this program; if not, write to the Free Software 23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 24 * 25 ******************************************************************************/ 26 27 #include <linux/slab.h> 28 #include <linux/spinlock.h> 29 #include <linux/list.h> 30 #include <linux/export.h> 31 #include <scsi/scsi.h> 32 #include <scsi/scsi_cmnd.h> 33 34 #include <target/target_core_base.h> 35 #include <target/target_core_backend.h> 36 #include <target/target_core_fabric.h> 37 #include <target/target_core_configfs.h> 38 39 #include "target_core_internal.h" 40 #include "target_core_alua.h" 41 #include "target_core_pr.h" 42 43 struct se_tmr_req *core_tmr_alloc_req( 44 struct se_cmd *se_cmd, 45 void *fabric_tmr_ptr, 46 u8 function, 47 gfp_t gfp_flags) 48 { 49 struct se_tmr_req *tmr; 50 51 tmr = kmem_cache_zalloc(se_tmr_req_cache, gfp_flags); 52 if (!tmr) { 53 pr_err("Unable to allocate struct se_tmr_req\n"); 54 return ERR_PTR(-ENOMEM); 55 } 56 tmr->task_cmd = se_cmd; 57 tmr->fabric_tmr_ptr = fabric_tmr_ptr; 58 tmr->function = function; 59 INIT_LIST_HEAD(&tmr->tmr_list); 60 61 return tmr; 62 } 63 EXPORT_SYMBOL(core_tmr_alloc_req); 64 65 void core_tmr_release_req( 66 struct se_tmr_req *tmr) 67 { 68 struct se_device *dev = tmr->tmr_dev; 69 unsigned long flags; 70 71 if (!dev) { 72 kmem_cache_free(se_tmr_req_cache, tmr); 73 return; 74 } 75 76 spin_lock_irqsave(&dev->se_tmr_lock, flags); 77 list_del(&tmr->tmr_list); 78 spin_unlock_irqrestore(&dev->se_tmr_lock, flags); 79 80 kmem_cache_free(se_tmr_req_cache, tmr); 81 } 82 83 static void core_tmr_handle_tas_abort( 84 struct se_node_acl *tmr_nacl, 85 struct se_cmd *cmd, 86 int tas, 87 int fe_count) 88 { 89 if (!fe_count) { 90 transport_cmd_finish_abort(cmd, 1); 91 return; 92 } 93 /* 94 * TASK ABORTED status (TAS) bit support 95 */ 96 if ((tmr_nacl && 97 (tmr_nacl == cmd->se_sess->se_node_acl)) || tas) 98 transport_send_task_abort(cmd); 99 100 transport_cmd_finish_abort(cmd, 0); 101 } 102 103 static int target_check_cdb_and_preempt(struct list_head *list, 104 struct se_cmd *cmd) 105 { 106 struct t10_pr_registration *reg; 107 108 if (!list) 109 return 0; 110 list_for_each_entry(reg, list, pr_reg_abort_list) { 111 if (reg->pr_res_key == cmd->pr_res_key) 112 return 0; 113 } 114 115 return 1; 116 } 117 118 static void core_tmr_drain_tmr_list( 119 struct se_device *dev, 120 struct se_tmr_req *tmr, 121 struct list_head *preempt_and_abort_list) 122 { 123 LIST_HEAD(drain_tmr_list); 124 struct se_tmr_req *tmr_p, *tmr_pp; 125 struct se_cmd *cmd; 126 unsigned long flags; 127 /* 128 * Release all pending and outgoing TMRs aside from the received 129 * LUN_RESET tmr.. 130 */ 131 spin_lock_irqsave(&dev->se_tmr_lock, flags); 132 list_for_each_entry_safe(tmr_p, tmr_pp, &dev->dev_tmr_list, tmr_list) { 133 /* 134 * Allow the received TMR to return with FUNCTION_COMPLETE. 135 */ 136 if (tmr_p == tmr) 137 continue; 138 139 cmd = tmr_p->task_cmd; 140 if (!cmd) { 141 pr_err("Unable to locate struct se_cmd for TMR\n"); 142 continue; 143 } 144 /* 145 * If this function was called with a valid pr_res_key 146 * parameter (eg: for PROUT PREEMPT_AND_ABORT service action 147 * skip non regisration key matching TMRs. 148 */ 149 if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd)) 150 continue; 151 152 spin_lock(&cmd->t_state_lock); 153 if (!atomic_read(&cmd->t_transport_active)) { 154 spin_unlock(&cmd->t_state_lock); 155 continue; 156 } 157 if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) { 158 spin_unlock(&cmd->t_state_lock); 159 continue; 160 } 161 spin_unlock(&cmd->t_state_lock); 162 163 list_move_tail(&tmr_p->tmr_list, &drain_tmr_list); 164 } 165 spin_unlock_irqrestore(&dev->se_tmr_lock, flags); 166 167 list_for_each_entry_safe(tmr_p, tmr_pp, &drain_tmr_list, tmr_list) { 168 list_del_init(&tmr_p->tmr_list); 169 cmd = tmr_p->task_cmd; 170 171 pr_debug("LUN_RESET: %s releasing TMR %p Function: 0x%02x," 172 " Response: 0x%02x, t_state: %d\n", 173 (preempt_and_abort_list) ? "Preempt" : "", tmr_p, 174 tmr_p->function, tmr_p->response, cmd->t_state); 175 176 transport_cmd_finish_abort(cmd, 1); 177 } 178 } 179 180 static void core_tmr_drain_task_list( 181 struct se_device *dev, 182 struct se_cmd *prout_cmd, 183 struct se_node_acl *tmr_nacl, 184 int tas, 185 struct list_head *preempt_and_abort_list) 186 { 187 LIST_HEAD(drain_task_list); 188 struct se_cmd *cmd; 189 struct se_task *task, *task_tmp; 190 unsigned long flags; 191 int fe_count; 192 /* 193 * Complete outstanding struct se_task CDBs with TASK_ABORTED SAM status. 194 * This is following sam4r17, section 5.6 Aborting commands, Table 38 195 * for TMR LUN_RESET: 196 * 197 * a) "Yes" indicates that each command that is aborted on an I_T nexus 198 * other than the one that caused the SCSI device condition is 199 * completed with TASK ABORTED status, if the TAS bit is set to one in 200 * the Control mode page (see SPC-4). "No" indicates that no status is 201 * returned for aborted commands. 202 * 203 * d) If the logical unit reset is caused by a particular I_T nexus 204 * (e.g., by a LOGICAL UNIT RESET task management function), then "yes" 205 * (TASK_ABORTED status) applies. 206 * 207 * Otherwise (e.g., if triggered by a hard reset), "no" 208 * (no TASK_ABORTED SAM status) applies. 209 * 210 * Note that this seems to be independent of TAS (Task Aborted Status) 211 * in the Control Mode Page. 212 */ 213 spin_lock_irqsave(&dev->execute_task_lock, flags); 214 list_for_each_entry_safe(task, task_tmp, &dev->state_task_list, 215 t_state_list) { 216 if (!task->task_se_cmd) { 217 pr_err("task->task_se_cmd is NULL!\n"); 218 continue; 219 } 220 cmd = task->task_se_cmd; 221 222 /* 223 * For PREEMPT_AND_ABORT usage, only process commands 224 * with a matching reservation key. 225 */ 226 if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd)) 227 continue; 228 /* 229 * Not aborting PROUT PREEMPT_AND_ABORT CDB.. 230 */ 231 if (prout_cmd == cmd) 232 continue; 233 234 list_move_tail(&task->t_state_list, &drain_task_list); 235 task->t_state_active = false; 236 /* 237 * Remove from task execute list before processing drain_task_list 238 */ 239 if (!list_empty(&task->t_execute_list)) 240 __transport_remove_task_from_execute_queue(task, dev); 241 } 242 spin_unlock_irqrestore(&dev->execute_task_lock, flags); 243 244 while (!list_empty(&drain_task_list)) { 245 task = list_entry(drain_task_list.next, struct se_task, t_state_list); 246 list_del(&task->t_state_list); 247 cmd = task->task_se_cmd; 248 249 pr_debug("LUN_RESET: %s cmd: %p task: %p" 250 " ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state: %d" 251 "cdb: 0x%02x\n", 252 (preempt_and_abort_list) ? "Preempt" : "", cmd, task, 253 cmd->se_tfo->get_task_tag(cmd), 0, 254 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state, 255 cmd->t_task_cdb[0]); 256 pr_debug("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx" 257 " t_task_cdbs: %d t_task_cdbs_left: %d" 258 " t_task_cdbs_sent: %d -- t_transport_active: %d" 259 " t_transport_stop: %d t_transport_sent: %d\n", 260 cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key, 261 cmd->t_task_list_num, 262 atomic_read(&cmd->t_task_cdbs_left), 263 atomic_read(&cmd->t_task_cdbs_sent), 264 atomic_read(&cmd->t_transport_active), 265 atomic_read(&cmd->t_transport_stop), 266 atomic_read(&cmd->t_transport_sent)); 267 268 /* 269 * If the command may be queued onto a workqueue cancel it now. 270 * 271 * This is equivalent to removal from the execute queue in the 272 * loop above, but we do it down here given that 273 * cancel_work_sync may block. 274 */ 275 if (cmd->t_state == TRANSPORT_COMPLETE) 276 cancel_work_sync(&cmd->work); 277 278 spin_lock_irqsave(&cmd->t_state_lock, flags); 279 target_stop_task(task, &flags); 280 281 if (!atomic_dec_and_test(&cmd->t_task_cdbs_ex_left)) { 282 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 283 pr_debug("LUN_RESET: Skipping task: %p, dev: %p for" 284 " t_task_cdbs_ex_left: %d\n", task, dev, 285 atomic_read(&cmd->t_task_cdbs_ex_left)); 286 continue; 287 } 288 fe_count = atomic_read(&cmd->t_fe_count); 289 290 if (atomic_read(&cmd->t_transport_active)) { 291 pr_debug("LUN_RESET: got t_transport_active = 1 for" 292 " task: %p, t_fe_count: %d dev: %p\n", task, 293 fe_count, dev); 294 atomic_set(&cmd->t_transport_aborted, 1); 295 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 296 297 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); 298 continue; 299 } 300 pr_debug("LUN_RESET: Got t_transport_active = 0 for task: %p," 301 " t_fe_count: %d dev: %p\n", task, fe_count, dev); 302 atomic_set(&cmd->t_transport_aborted, 1); 303 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 304 305 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); 306 } 307 } 308 309 static void core_tmr_drain_cmd_list( 310 struct se_device *dev, 311 struct se_cmd *prout_cmd, 312 struct se_node_acl *tmr_nacl, 313 int tas, 314 struct list_head *preempt_and_abort_list) 315 { 316 LIST_HEAD(drain_cmd_list); 317 struct se_queue_obj *qobj = &dev->dev_queue_obj; 318 struct se_cmd *cmd, *tcmd; 319 unsigned long flags; 320 /* 321 * Release all commands remaining in the struct se_device cmd queue. 322 * 323 * This follows the same logic as above for the struct se_device 324 * struct se_task state list, where commands are returned with 325 * TASK_ABORTED status, if there is an outstanding $FABRIC_MOD 326 * reference, otherwise the struct se_cmd is released. 327 */ 328 spin_lock_irqsave(&qobj->cmd_queue_lock, flags); 329 list_for_each_entry_safe(cmd, tcmd, &qobj->qobj_list, se_queue_node) { 330 /* 331 * For PREEMPT_AND_ABORT usage, only process commands 332 * with a matching reservation key. 333 */ 334 if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd)) 335 continue; 336 /* 337 * Not aborting PROUT PREEMPT_AND_ABORT CDB.. 338 */ 339 if (prout_cmd == cmd) 340 continue; 341 342 atomic_set(&cmd->t_transport_queue_active, 0); 343 atomic_dec(&qobj->queue_cnt); 344 list_move_tail(&cmd->se_queue_node, &drain_cmd_list); 345 } 346 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); 347 348 while (!list_empty(&drain_cmd_list)) { 349 cmd = list_entry(drain_cmd_list.next, struct se_cmd, se_queue_node); 350 list_del_init(&cmd->se_queue_node); 351 352 pr_debug("LUN_RESET: %s from Device Queue: cmd: %p t_state:" 353 " %d t_fe_count: %d\n", (preempt_and_abort_list) ? 354 "Preempt" : "", cmd, cmd->t_state, 355 atomic_read(&cmd->t_fe_count)); 356 357 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, 358 atomic_read(&cmd->t_fe_count)); 359 } 360 } 361 362 int core_tmr_lun_reset( 363 struct se_device *dev, 364 struct se_tmr_req *tmr, 365 struct list_head *preempt_and_abort_list, 366 struct se_cmd *prout_cmd) 367 { 368 struct se_node_acl *tmr_nacl = NULL; 369 struct se_portal_group *tmr_tpg = NULL; 370 int tas; 371 /* 372 * TASK_ABORTED status bit, this is configurable via ConfigFS 373 * struct se_device attributes. spc4r17 section 7.4.6 Control mode page 374 * 375 * A task aborted status (TAS) bit set to zero specifies that aborted 376 * tasks shall be terminated by the device server without any response 377 * to the application client. A TAS bit set to one specifies that tasks 378 * aborted by the actions of an I_T nexus other than the I_T nexus on 379 * which the command was received shall be completed with TASK ABORTED 380 * status (see SAM-4). 381 */ 382 tas = dev->se_sub_dev->se_dev_attrib.emulate_tas; 383 /* 384 * Determine if this se_tmr is coming from a $FABRIC_MOD 385 * or struct se_device passthrough.. 386 */ 387 if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) { 388 tmr_nacl = tmr->task_cmd->se_sess->se_node_acl; 389 tmr_tpg = tmr->task_cmd->se_sess->se_tpg; 390 if (tmr_nacl && tmr_tpg) { 391 pr_debug("LUN_RESET: TMR caller fabric: %s" 392 " initiator port %s\n", 393 tmr_tpg->se_tpg_tfo->get_fabric_name(), 394 tmr_nacl->initiatorname); 395 } 396 } 397 pr_debug("LUN_RESET: %s starting for [%s], tas: %d\n", 398 (preempt_and_abort_list) ? "Preempt" : "TMR", 399 dev->transport->name, tas); 400 401 core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list); 402 core_tmr_drain_task_list(dev, prout_cmd, tmr_nacl, tas, 403 preempt_and_abort_list); 404 core_tmr_drain_cmd_list(dev, prout_cmd, tmr_nacl, tas, 405 preempt_and_abort_list); 406 /* 407 * Clear any legacy SPC-2 reservation when called during 408 * LOGICAL UNIT RESET 409 */ 410 if (!preempt_and_abort_list && 411 (dev->dev_flags & DF_SPC2_RESERVATIONS)) { 412 spin_lock(&dev->dev_reservation_lock); 413 dev->dev_reserved_node_acl = NULL; 414 dev->dev_flags &= ~DF_SPC2_RESERVATIONS; 415 spin_unlock(&dev->dev_reservation_lock); 416 pr_debug("LUN_RESET: SCSI-2 Released reservation\n"); 417 } 418 419 spin_lock_irq(&dev->stats_lock); 420 dev->num_resets++; 421 spin_unlock_irq(&dev->stats_lock); 422 423 pr_debug("LUN_RESET: %s for [%s] Complete\n", 424 (preempt_and_abort_list) ? "Preempt" : "TMR", 425 dev->transport->name); 426 return 0; 427 } 428 429