1 /******************************************************************************* 2 * Filename: target_core_tmr.c 3 * 4 * This file contains SPC-3 task management infrastructure 5 * 6 * Copyright (c) 2009,2010 Rising Tide Systems 7 * Copyright (c) 2009,2010 Linux-iSCSI.org 8 * 9 * Nicholas A. Bellinger <nab@kernel.org> 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation; either version 2 of the License, or 14 * (at your option) any later version. 15 * 16 * This program is distributed in the hope that it will be useful, 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 * GNU General Public License for more details. 20 * 21 * You should have received a copy of the GNU General Public License 22 * along with this program; if not, write to the Free Software 23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 24 * 25 ******************************************************************************/ 26 27 #include <linux/version.h> 28 #include <linux/slab.h> 29 #include <linux/spinlock.h> 30 #include <linux/list.h> 31 #include <scsi/scsi.h> 32 #include <scsi/scsi_cmnd.h> 33 34 #include <target/target_core_base.h> 35 #include <target/target_core_device.h> 36 #include <target/target_core_tmr.h> 37 #include <target/target_core_transport.h> 38 #include <target/target_core_fabric_ops.h> 39 #include <target/target_core_configfs.h> 40 41 #include "target_core_alua.h" 42 #include "target_core_pr.h" 43 44 struct se_tmr_req *core_tmr_alloc_req( 45 struct se_cmd *se_cmd, 46 void *fabric_tmr_ptr, 47 u8 function) 48 { 49 struct se_tmr_req *tmr; 50 51 tmr = kmem_cache_zalloc(se_tmr_req_cache, (in_interrupt()) ? 52 GFP_ATOMIC : GFP_KERNEL); 53 if (!tmr) { 54 pr_err("Unable to allocate struct se_tmr_req\n"); 55 return ERR_PTR(-ENOMEM); 56 } 57 tmr->task_cmd = se_cmd; 58 tmr->fabric_tmr_ptr = fabric_tmr_ptr; 59 tmr->function = function; 60 INIT_LIST_HEAD(&tmr->tmr_list); 61 62 return tmr; 63 } 64 EXPORT_SYMBOL(core_tmr_alloc_req); 65 66 void core_tmr_release_req( 67 struct se_tmr_req *tmr) 68 { 69 struct se_device *dev = tmr->tmr_dev; 70 71 if (!dev) { 72 kmem_cache_free(se_tmr_req_cache, tmr); 73 return; 74 } 75 76 spin_lock_irq(&dev->se_tmr_lock); 77 list_del(&tmr->tmr_list); 78 spin_unlock_irq(&dev->se_tmr_lock); 79 80 kmem_cache_free(se_tmr_req_cache, tmr); 81 } 82 83 static void core_tmr_handle_tas_abort( 84 struct se_node_acl *tmr_nacl, 85 struct se_cmd *cmd, 86 int tas, 87 int fe_count) 88 { 89 if (!fe_count) { 90 transport_cmd_finish_abort(cmd, 1); 91 return; 92 } 93 /* 94 * TASK ABORTED status (TAS) bit support 95 */ 96 if ((tmr_nacl && 97 (tmr_nacl == cmd->se_sess->se_node_acl)) || tas) 98 transport_send_task_abort(cmd); 99 100 transport_cmd_finish_abort(cmd, 0); 101 } 102 103 int core_tmr_lun_reset( 104 struct se_device *dev, 105 struct se_tmr_req *tmr, 106 struct list_head *preempt_and_abort_list, 107 struct se_cmd *prout_cmd) 108 { 109 struct se_cmd *cmd, *tcmd; 110 struct se_node_acl *tmr_nacl = NULL; 111 struct se_portal_group *tmr_tpg = NULL; 112 struct se_queue_obj *qobj = &dev->dev_queue_obj; 113 struct se_tmr_req *tmr_p, *tmr_pp; 114 struct se_task *task, *task_tmp; 115 unsigned long flags; 116 int fe_count, tas; 117 /* 118 * TASK_ABORTED status bit, this is configurable via ConfigFS 119 * struct se_device attributes. spc4r17 section 7.4.6 Control mode page 120 * 121 * A task aborted status (TAS) bit set to zero specifies that aborted 122 * tasks shall be terminated by the device server without any response 123 * to the application client. A TAS bit set to one specifies that tasks 124 * aborted by the actions of an I_T nexus other than the I_T nexus on 125 * which the command was received shall be completed with TASK ABORTED 126 * status (see SAM-4). 127 */ 128 tas = dev->se_sub_dev->se_dev_attrib.emulate_tas; 129 /* 130 * Determine if this se_tmr is coming from a $FABRIC_MOD 131 * or struct se_device passthrough.. 132 */ 133 if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) { 134 tmr_nacl = tmr->task_cmd->se_sess->se_node_acl; 135 tmr_tpg = tmr->task_cmd->se_sess->se_tpg; 136 if (tmr_nacl && tmr_tpg) { 137 pr_debug("LUN_RESET: TMR caller fabric: %s" 138 " initiator port %s\n", 139 tmr_tpg->se_tpg_tfo->get_fabric_name(), 140 tmr_nacl->initiatorname); 141 } 142 } 143 pr_debug("LUN_RESET: %s starting for [%s], tas: %d\n", 144 (preempt_and_abort_list) ? "Preempt" : "TMR", 145 dev->transport->name, tas); 146 /* 147 * Release all pending and outgoing TMRs aside from the received 148 * LUN_RESET tmr.. 149 */ 150 spin_lock_irq(&dev->se_tmr_lock); 151 list_for_each_entry_safe(tmr_p, tmr_pp, &dev->dev_tmr_list, tmr_list) { 152 /* 153 * Allow the received TMR to return with FUNCTION_COMPLETE. 154 */ 155 if (tmr && (tmr_p == tmr)) 156 continue; 157 158 cmd = tmr_p->task_cmd; 159 if (!cmd) { 160 pr_err("Unable to locate struct se_cmd for TMR\n"); 161 continue; 162 } 163 /* 164 * If this function was called with a valid pr_res_key 165 * parameter (eg: for PROUT PREEMPT_AND_ABORT service action 166 * skip non regisration key matching TMRs. 167 */ 168 if (preempt_and_abort_list && 169 (core_scsi3_check_cdb_abort_and_preempt( 170 preempt_and_abort_list, cmd) != 0)) 171 continue; 172 spin_unlock_irq(&dev->se_tmr_lock); 173 174 spin_lock_irqsave(&cmd->t_state_lock, flags); 175 if (!atomic_read(&cmd->t_transport_active)) { 176 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 177 spin_lock_irq(&dev->se_tmr_lock); 178 continue; 179 } 180 if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) { 181 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 182 spin_lock_irq(&dev->se_tmr_lock); 183 continue; 184 } 185 pr_debug("LUN_RESET: %s releasing TMR %p Function: 0x%02x," 186 " Response: 0x%02x, t_state: %d\n", 187 (preempt_and_abort_list) ? "Preempt" : "", tmr_p, 188 tmr_p->function, tmr_p->response, cmd->t_state); 189 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 190 191 transport_cmd_finish_abort_tmr(cmd); 192 spin_lock_irq(&dev->se_tmr_lock); 193 } 194 spin_unlock_irq(&dev->se_tmr_lock); 195 /* 196 * Complete outstanding struct se_task CDBs with TASK_ABORTED SAM status. 197 * This is following sam4r17, section 5.6 Aborting commands, Table 38 198 * for TMR LUN_RESET: 199 * 200 * a) "Yes" indicates that each command that is aborted on an I_T nexus 201 * other than the one that caused the SCSI device condition is 202 * completed with TASK ABORTED status, if the TAS bit is set to one in 203 * the Control mode page (see SPC-4). "No" indicates that no status is 204 * returned for aborted commands. 205 * 206 * d) If the logical unit reset is caused by a particular I_T nexus 207 * (e.g., by a LOGICAL UNIT RESET task management function), then "yes" 208 * (TASK_ABORTED status) applies. 209 * 210 * Otherwise (e.g., if triggered by a hard reset), "no" 211 * (no TASK_ABORTED SAM status) applies. 212 * 213 * Note that this seems to be independent of TAS (Task Aborted Status) 214 * in the Control Mode Page. 215 */ 216 spin_lock_irqsave(&dev->execute_task_lock, flags); 217 list_for_each_entry_safe(task, task_tmp, &dev->state_task_list, 218 t_state_list) { 219 if (!task->task_se_cmd) { 220 pr_err("task->task_se_cmd is NULL!\n"); 221 continue; 222 } 223 cmd = task->task_se_cmd; 224 225 /* 226 * For PREEMPT_AND_ABORT usage, only process commands 227 * with a matching reservation key. 228 */ 229 if (preempt_and_abort_list && 230 (core_scsi3_check_cdb_abort_and_preempt( 231 preempt_and_abort_list, cmd) != 0)) 232 continue; 233 /* 234 * Not aborting PROUT PREEMPT_AND_ABORT CDB.. 235 */ 236 if (prout_cmd == cmd) 237 continue; 238 239 list_del(&task->t_state_list); 240 atomic_set(&task->task_state_active, 0); 241 spin_unlock_irqrestore(&dev->execute_task_lock, flags); 242 243 spin_lock_irqsave(&cmd->t_state_lock, flags); 244 pr_debug("LUN_RESET: %s cmd: %p task: %p" 245 " ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state/" 246 "def_t_state: %d/%d cdb: 0x%02x\n", 247 (preempt_and_abort_list) ? "Preempt" : "", cmd, task, 248 cmd->se_tfo->get_task_tag(cmd), 0, 249 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state, 250 cmd->deferred_t_state, cmd->t_task_cdb[0]); 251 pr_debug("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx" 252 " t_task_cdbs: %d t_task_cdbs_left: %d" 253 " t_task_cdbs_sent: %d -- t_transport_active: %d" 254 " t_transport_stop: %d t_transport_sent: %d\n", 255 cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key, 256 cmd->t_task_list_num, 257 atomic_read(&cmd->t_task_cdbs_left), 258 atomic_read(&cmd->t_task_cdbs_sent), 259 atomic_read(&cmd->t_transport_active), 260 atomic_read(&cmd->t_transport_stop), 261 atomic_read(&cmd->t_transport_sent)); 262 263 if (atomic_read(&task->task_active)) { 264 atomic_set(&task->task_stop, 1); 265 spin_unlock_irqrestore( 266 &cmd->t_state_lock, flags); 267 268 pr_debug("LUN_RESET: Waiting for task: %p to shutdown" 269 " for dev: %p\n", task, dev); 270 wait_for_completion(&task->task_stop_comp); 271 pr_debug("LUN_RESET Completed task: %p shutdown for" 272 " dev: %p\n", task, dev); 273 spin_lock_irqsave(&cmd->t_state_lock, flags); 274 atomic_dec(&cmd->t_task_cdbs_left); 275 276 atomic_set(&task->task_active, 0); 277 atomic_set(&task->task_stop, 0); 278 } else { 279 if (atomic_read(&task->task_execute_queue) != 0) 280 transport_remove_task_from_execute_queue(task, dev); 281 } 282 __transport_stop_task_timer(task, &flags); 283 284 if (!atomic_dec_and_test(&cmd->t_task_cdbs_ex_left)) { 285 spin_unlock_irqrestore( 286 &cmd->t_state_lock, flags); 287 pr_debug("LUN_RESET: Skipping task: %p, dev: %p for" 288 " t_task_cdbs_ex_left: %d\n", task, dev, 289 atomic_read(&cmd->t_task_cdbs_ex_left)); 290 291 spin_lock_irqsave(&dev->execute_task_lock, flags); 292 continue; 293 } 294 fe_count = atomic_read(&cmd->t_fe_count); 295 296 if (atomic_read(&cmd->t_transport_active)) { 297 pr_debug("LUN_RESET: got t_transport_active = 1 for" 298 " task: %p, t_fe_count: %d dev: %p\n", task, 299 fe_count, dev); 300 atomic_set(&cmd->t_transport_aborted, 1); 301 spin_unlock_irqrestore(&cmd->t_state_lock, 302 flags); 303 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); 304 305 spin_lock_irqsave(&dev->execute_task_lock, flags); 306 continue; 307 } 308 pr_debug("LUN_RESET: Got t_transport_active = 0 for task: %p," 309 " t_fe_count: %d dev: %p\n", task, fe_count, dev); 310 atomic_set(&cmd->t_transport_aborted, 1); 311 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 312 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); 313 314 spin_lock_irqsave(&dev->execute_task_lock, flags); 315 } 316 spin_unlock_irqrestore(&dev->execute_task_lock, flags); 317 /* 318 * Release all commands remaining in the struct se_device cmd queue. 319 * 320 * This follows the same logic as above for the struct se_device 321 * struct se_task state list, where commands are returned with 322 * TASK_ABORTED status, if there is an outstanding $FABRIC_MOD 323 * reference, otherwise the struct se_cmd is released. 324 */ 325 spin_lock_irqsave(&qobj->cmd_queue_lock, flags); 326 list_for_each_entry_safe(cmd, tcmd, &qobj->qobj_list, se_queue_node) { 327 /* 328 * For PREEMPT_AND_ABORT usage, only process commands 329 * with a matching reservation key. 330 */ 331 if (preempt_and_abort_list && 332 (core_scsi3_check_cdb_abort_and_preempt( 333 preempt_and_abort_list, cmd) != 0)) 334 continue; 335 /* 336 * Not aborting PROUT PREEMPT_AND_ABORT CDB.. 337 */ 338 if (prout_cmd == cmd) 339 continue; 340 341 atomic_dec(&cmd->t_transport_queue_active); 342 atomic_dec(&qobj->queue_cnt); 343 list_del(&cmd->se_queue_node); 344 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); 345 346 pr_debug("LUN_RESET: %s from Device Queue: cmd: %p t_state:" 347 " %d t_fe_count: %d\n", (preempt_and_abort_list) ? 348 "Preempt" : "", cmd, cmd->t_state, 349 atomic_read(&cmd->t_fe_count)); 350 /* 351 * Signal that the command has failed via cmd->se_cmd_flags, 352 */ 353 transport_new_cmd_failure(cmd); 354 355 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, 356 atomic_read(&cmd->t_fe_count)); 357 spin_lock_irqsave(&qobj->cmd_queue_lock, flags); 358 } 359 spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); 360 /* 361 * Clear any legacy SPC-2 reservation when called during 362 * LOGICAL UNIT RESET 363 */ 364 if (!preempt_and_abort_list && 365 (dev->dev_flags & DF_SPC2_RESERVATIONS)) { 366 spin_lock(&dev->dev_reservation_lock); 367 dev->dev_reserved_node_acl = NULL; 368 dev->dev_flags &= ~DF_SPC2_RESERVATIONS; 369 spin_unlock(&dev->dev_reservation_lock); 370 pr_debug("LUN_RESET: SCSI-2 Released reservation\n"); 371 } 372 373 spin_lock_irq(&dev->stats_lock); 374 dev->num_resets++; 375 spin_unlock_irq(&dev->stats_lock); 376 377 pr_debug("LUN_RESET: %s for [%s] Complete\n", 378 (preempt_and_abort_list) ? "Preempt" : "TMR", 379 dev->transport->name); 380 return 0; 381 } 382