1 /* 2 * Serial Attached SCSI (SAS) Transport Layer initialization 3 * 4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved. 5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com> 6 * 7 * This file is licensed under GPLv2. 8 * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public License as 11 * published by the Free Software Foundation; either version 2 of the 12 * License, or (at your option) any later version. 13 * 14 * This program is distributed in the hope that it will be useful, but 15 * WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 17 * General Public License for more details. 18 * 19 * You should have received a copy of the GNU General Public License 20 * along with this program; if not, write to the Free Software 21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 22 * USA 23 * 24 */ 25 26 #include <linux/module.h> 27 #include <linux/slab.h> 28 #include <linux/init.h> 29 #include <linux/device.h> 30 #include <linux/spinlock.h> 31 #include <scsi/sas_ata.h> 32 #include <scsi/scsi_host.h> 33 #include <scsi/scsi_device.h> 34 #include <scsi/scsi_transport.h> 35 #include <scsi/scsi_transport_sas.h> 36 37 #include "sas_internal.h" 38 39 #include "../scsi_sas_internal.h" 40 41 static struct kmem_cache *sas_task_cache; 42 43 struct sas_task *sas_alloc_task(gfp_t flags) 44 { 45 struct sas_task *task = kmem_cache_zalloc(sas_task_cache, flags); 46 47 if (task) { 48 spin_lock_init(&task->task_state_lock); 49 task->task_state_flags = SAS_TASK_STATE_PENDING; 50 } 51 52 return task; 53 } 54 EXPORT_SYMBOL_GPL(sas_alloc_task); 55 56 struct sas_task *sas_alloc_slow_task(gfp_t flags) 57 { 58 struct sas_task *task = sas_alloc_task(flags); 59 struct sas_task_slow *slow = kmalloc(sizeof(*slow), flags); 60 61 if (!task || !slow) { 62 if (task) 63 kmem_cache_free(sas_task_cache, task); 64 kfree(slow); 65 return NULL; 66 } 67 68 task->slow_task = slow; 69 slow->task = task; 70 timer_setup(&slow->timer, NULL, 0); 71 init_completion(&slow->completion); 72 73 return task; 74 } 75 EXPORT_SYMBOL_GPL(sas_alloc_slow_task); 76 77 void sas_free_task(struct sas_task *task) 78 { 79 if (task) { 80 kfree(task->slow_task); 81 kmem_cache_free(sas_task_cache, task); 82 } 83 } 84 EXPORT_SYMBOL_GPL(sas_free_task); 85 86 /*------------ SAS addr hash -----------*/ 87 void sas_hash_addr(u8 *hashed, const u8 *sas_addr) 88 { 89 const u32 poly = 0x00DB2777; 90 u32 r = 0; 91 int i; 92 93 for (i = 0; i < 8; i++) { 94 int b; 95 for (b = 7; b >= 0; b--) { 96 r <<= 1; 97 if ((1 << b) & sas_addr[i]) { 98 if (!(r & 0x01000000)) 99 r ^= poly; 100 } else if (r & 0x01000000) 101 r ^= poly; 102 } 103 } 104 105 hashed[0] = (r >> 16) & 0xFF; 106 hashed[1] = (r >> 8) & 0xFF ; 107 hashed[2] = r & 0xFF; 108 } 109 110 111 /* ---------- HA events ---------- */ 112 113 void sas_hae_reset(struct work_struct *work) 114 { 115 struct sas_ha_event *ev = to_sas_ha_event(work); 116 struct sas_ha_struct *ha = ev->ha; 117 118 clear_bit(HAE_RESET, &ha->pending); 119 } 120 121 int sas_register_ha(struct sas_ha_struct *sas_ha) 122 { 123 int error = 0; 124 125 mutex_init(&sas_ha->disco_mutex); 126 spin_lock_init(&sas_ha->phy_port_lock); 127 sas_hash_addr(sas_ha->hashed_sas_addr, sas_ha->sas_addr); 128 129 set_bit(SAS_HA_REGISTERED, &sas_ha->state); 130 spin_lock_init(&sas_ha->lock); 131 mutex_init(&sas_ha->drain_mutex); 132 init_waitqueue_head(&sas_ha->eh_wait_q); 133 INIT_LIST_HEAD(&sas_ha->defer_q); 134 INIT_LIST_HEAD(&sas_ha->eh_dev_q); 135 136 error = sas_register_phys(sas_ha); 137 if (error) { 138 printk(KERN_NOTICE "couldn't register sas phys:%d\n", error); 139 return error; 140 } 141 142 error = sas_register_ports(sas_ha); 143 if (error) { 144 printk(KERN_NOTICE "couldn't register sas ports:%d\n", error); 145 goto Undo_phys; 146 } 147 148 error = sas_init_events(sas_ha); 149 if (error) { 150 printk(KERN_NOTICE "couldn't start event thread:%d\n", error); 151 goto Undo_ports; 152 } 153 154 INIT_LIST_HEAD(&sas_ha->eh_done_q); 155 INIT_LIST_HEAD(&sas_ha->eh_ata_q); 156 157 return 0; 158 159 Undo_ports: 160 sas_unregister_ports(sas_ha); 161 Undo_phys: 162 163 return error; 164 } 165 166 static void sas_disable_events(struct sas_ha_struct *sas_ha) 167 { 168 /* Set the state to unregistered to avoid further unchained 169 * events to be queued, and flush any in-progress drainers 170 */ 171 mutex_lock(&sas_ha->drain_mutex); 172 spin_lock_irq(&sas_ha->lock); 173 clear_bit(SAS_HA_REGISTERED, &sas_ha->state); 174 spin_unlock_irq(&sas_ha->lock); 175 __sas_drain_work(sas_ha); 176 mutex_unlock(&sas_ha->drain_mutex); 177 } 178 179 int sas_unregister_ha(struct sas_ha_struct *sas_ha) 180 { 181 sas_disable_events(sas_ha); 182 sas_unregister_ports(sas_ha); 183 184 /* flush unregistration work */ 185 mutex_lock(&sas_ha->drain_mutex); 186 __sas_drain_work(sas_ha); 187 mutex_unlock(&sas_ha->drain_mutex); 188 189 return 0; 190 } 191 192 static int sas_get_linkerrors(struct sas_phy *phy) 193 { 194 if (scsi_is_sas_phy_local(phy)) { 195 struct Scsi_Host *shost = dev_to_shost(phy->dev.parent); 196 struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost); 197 struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number]; 198 struct sas_internal *i = 199 to_sas_internal(sas_ha->core.shost->transportt); 200 201 return i->dft->lldd_control_phy(asd_phy, PHY_FUNC_GET_EVENTS, NULL); 202 } 203 204 return sas_smp_get_phy_events(phy); 205 } 206 207 int sas_try_ata_reset(struct asd_sas_phy *asd_phy) 208 { 209 struct domain_device *dev = NULL; 210 211 /* try to route user requested link resets through libata */ 212 if (asd_phy->port) 213 dev = asd_phy->port->port_dev; 214 215 /* validate that dev has been probed */ 216 if (dev) 217 dev = sas_find_dev_by_rphy(dev->rphy); 218 219 if (dev && dev_is_sata(dev)) { 220 sas_ata_schedule_reset(dev); 221 sas_ata_wait_eh(dev); 222 return 0; 223 } 224 225 return -ENODEV; 226 } 227 228 /** 229 * transport_sas_phy_reset - reset a phy and permit libata to manage the link 230 * 231 * phy reset request via sysfs in host workqueue context so we know we 232 * can block on eh and safely traverse the domain_device topology 233 */ 234 static int transport_sas_phy_reset(struct sas_phy *phy, int hard_reset) 235 { 236 enum phy_func reset_type; 237 238 if (hard_reset) 239 reset_type = PHY_FUNC_HARD_RESET; 240 else 241 reset_type = PHY_FUNC_LINK_RESET; 242 243 if (scsi_is_sas_phy_local(phy)) { 244 struct Scsi_Host *shost = dev_to_shost(phy->dev.parent); 245 struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost); 246 struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number]; 247 struct sas_internal *i = 248 to_sas_internal(sas_ha->core.shost->transportt); 249 250 if (!hard_reset && sas_try_ata_reset(asd_phy) == 0) 251 return 0; 252 return i->dft->lldd_control_phy(asd_phy, reset_type, NULL); 253 } else { 254 struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent); 255 struct domain_device *ddev = sas_find_dev_by_rphy(rphy); 256 struct domain_device *ata_dev = sas_ex_to_ata(ddev, phy->number); 257 258 if (ata_dev && !hard_reset) { 259 sas_ata_schedule_reset(ata_dev); 260 sas_ata_wait_eh(ata_dev); 261 return 0; 262 } else 263 return sas_smp_phy_control(ddev, phy->number, reset_type, NULL); 264 } 265 } 266 267 static int sas_phy_enable(struct sas_phy *phy, int enable) 268 { 269 int ret; 270 enum phy_func cmd; 271 272 if (enable) 273 cmd = PHY_FUNC_LINK_RESET; 274 else 275 cmd = PHY_FUNC_DISABLE; 276 277 if (scsi_is_sas_phy_local(phy)) { 278 struct Scsi_Host *shost = dev_to_shost(phy->dev.parent); 279 struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost); 280 struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number]; 281 struct sas_internal *i = 282 to_sas_internal(sas_ha->core.shost->transportt); 283 284 if (enable) 285 ret = transport_sas_phy_reset(phy, 0); 286 else 287 ret = i->dft->lldd_control_phy(asd_phy, cmd, NULL); 288 } else { 289 struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent); 290 struct domain_device *ddev = sas_find_dev_by_rphy(rphy); 291 292 if (enable) 293 ret = transport_sas_phy_reset(phy, 0); 294 else 295 ret = sas_smp_phy_control(ddev, phy->number, cmd, NULL); 296 } 297 return ret; 298 } 299 300 int sas_phy_reset(struct sas_phy *phy, int hard_reset) 301 { 302 int ret; 303 enum phy_func reset_type; 304 305 if (!phy->enabled) 306 return -ENODEV; 307 308 if (hard_reset) 309 reset_type = PHY_FUNC_HARD_RESET; 310 else 311 reset_type = PHY_FUNC_LINK_RESET; 312 313 if (scsi_is_sas_phy_local(phy)) { 314 struct Scsi_Host *shost = dev_to_shost(phy->dev.parent); 315 struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost); 316 struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number]; 317 struct sas_internal *i = 318 to_sas_internal(sas_ha->core.shost->transportt); 319 320 ret = i->dft->lldd_control_phy(asd_phy, reset_type, NULL); 321 } else { 322 struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent); 323 struct domain_device *ddev = sas_find_dev_by_rphy(rphy); 324 ret = sas_smp_phy_control(ddev, phy->number, reset_type, NULL); 325 } 326 return ret; 327 } 328 329 int sas_set_phy_speed(struct sas_phy *phy, 330 struct sas_phy_linkrates *rates) 331 { 332 int ret; 333 334 if ((rates->minimum_linkrate && 335 rates->minimum_linkrate > phy->maximum_linkrate) || 336 (rates->maximum_linkrate && 337 rates->maximum_linkrate < phy->minimum_linkrate)) 338 return -EINVAL; 339 340 if (rates->minimum_linkrate && 341 rates->minimum_linkrate < phy->minimum_linkrate_hw) 342 rates->minimum_linkrate = phy->minimum_linkrate_hw; 343 344 if (rates->maximum_linkrate && 345 rates->maximum_linkrate > phy->maximum_linkrate_hw) 346 rates->maximum_linkrate = phy->maximum_linkrate_hw; 347 348 if (scsi_is_sas_phy_local(phy)) { 349 struct Scsi_Host *shost = dev_to_shost(phy->dev.parent); 350 struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost); 351 struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number]; 352 struct sas_internal *i = 353 to_sas_internal(sas_ha->core.shost->transportt); 354 355 ret = i->dft->lldd_control_phy(asd_phy, PHY_FUNC_SET_LINK_RATE, 356 rates); 357 } else { 358 struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent); 359 struct domain_device *ddev = sas_find_dev_by_rphy(rphy); 360 ret = sas_smp_phy_control(ddev, phy->number, 361 PHY_FUNC_LINK_RESET, rates); 362 363 } 364 365 return ret; 366 } 367 368 void sas_prep_resume_ha(struct sas_ha_struct *ha) 369 { 370 int i; 371 372 set_bit(SAS_HA_REGISTERED, &ha->state); 373 374 /* clear out any stale link events/data from the suspension path */ 375 for (i = 0; i < ha->num_phys; i++) { 376 struct asd_sas_phy *phy = ha->sas_phy[i]; 377 378 memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE); 379 phy->port_events_pending = 0; 380 phy->phy_events_pending = 0; 381 phy->frame_rcvd_size = 0; 382 } 383 } 384 EXPORT_SYMBOL(sas_prep_resume_ha); 385 386 static int phys_suspended(struct sas_ha_struct *ha) 387 { 388 int i, rc = 0; 389 390 for (i = 0; i < ha->num_phys; i++) { 391 struct asd_sas_phy *phy = ha->sas_phy[i]; 392 393 if (phy->suspended) 394 rc++; 395 } 396 397 return rc; 398 } 399 400 void sas_resume_ha(struct sas_ha_struct *ha) 401 { 402 const unsigned long tmo = msecs_to_jiffies(25000); 403 int i; 404 405 /* deform ports on phys that did not resume 406 * at this point we may be racing the phy coming back (as posted 407 * by the lldd). So we post the event and once we are in the 408 * libsas context check that the phy remains suspended before 409 * tearing it down. 410 */ 411 i = phys_suspended(ha); 412 if (i) 413 dev_info(ha->dev, "waiting up to 25 seconds for %d phy%s to resume\n", 414 i, i > 1 ? "s" : ""); 415 wait_event_timeout(ha->eh_wait_q, phys_suspended(ha) == 0, tmo); 416 for (i = 0; i < ha->num_phys; i++) { 417 struct asd_sas_phy *phy = ha->sas_phy[i]; 418 419 if (phy->suspended) { 420 dev_warn(&phy->phy->dev, "resume timeout\n"); 421 sas_notify_phy_event(phy, PHYE_RESUME_TIMEOUT); 422 } 423 } 424 425 /* all phys are back up or timed out, turn on i/o so we can 426 * flush out disks that did not return 427 */ 428 scsi_unblock_requests(ha->core.shost); 429 sas_drain_work(ha); 430 } 431 EXPORT_SYMBOL(sas_resume_ha); 432 433 void sas_suspend_ha(struct sas_ha_struct *ha) 434 { 435 int i; 436 437 sas_disable_events(ha); 438 scsi_block_requests(ha->core.shost); 439 for (i = 0; i < ha->num_phys; i++) { 440 struct asd_sas_port *port = ha->sas_port[i]; 441 442 sas_discover_event(port, DISCE_SUSPEND); 443 } 444 445 /* flush suspend events while unregistered */ 446 mutex_lock(&ha->drain_mutex); 447 __sas_drain_work(ha); 448 mutex_unlock(&ha->drain_mutex); 449 } 450 EXPORT_SYMBOL(sas_suspend_ha); 451 452 static void sas_phy_release(struct sas_phy *phy) 453 { 454 kfree(phy->hostdata); 455 phy->hostdata = NULL; 456 } 457 458 static void phy_reset_work(struct work_struct *work) 459 { 460 struct sas_phy_data *d = container_of(work, typeof(*d), reset_work.work); 461 462 d->reset_result = transport_sas_phy_reset(d->phy, d->hard_reset); 463 } 464 465 static void phy_enable_work(struct work_struct *work) 466 { 467 struct sas_phy_data *d = container_of(work, typeof(*d), enable_work.work); 468 469 d->enable_result = sas_phy_enable(d->phy, d->enable); 470 } 471 472 static int sas_phy_setup(struct sas_phy *phy) 473 { 474 struct sas_phy_data *d = kzalloc(sizeof(*d), GFP_KERNEL); 475 476 if (!d) 477 return -ENOMEM; 478 479 mutex_init(&d->event_lock); 480 INIT_SAS_WORK(&d->reset_work, phy_reset_work); 481 INIT_SAS_WORK(&d->enable_work, phy_enable_work); 482 d->phy = phy; 483 phy->hostdata = d; 484 485 return 0; 486 } 487 488 static int queue_phy_reset(struct sas_phy *phy, int hard_reset) 489 { 490 struct Scsi_Host *shost = dev_to_shost(phy->dev.parent); 491 struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost); 492 struct sas_phy_data *d = phy->hostdata; 493 int rc; 494 495 if (!d) 496 return -ENOMEM; 497 498 /* libsas workqueue coordinates ata-eh reset with discovery */ 499 mutex_lock(&d->event_lock); 500 d->reset_result = 0; 501 d->hard_reset = hard_reset; 502 503 spin_lock_irq(&ha->lock); 504 sas_queue_work(ha, &d->reset_work); 505 spin_unlock_irq(&ha->lock); 506 507 rc = sas_drain_work(ha); 508 if (rc == 0) 509 rc = d->reset_result; 510 mutex_unlock(&d->event_lock); 511 512 return rc; 513 } 514 515 static int queue_phy_enable(struct sas_phy *phy, int enable) 516 { 517 struct Scsi_Host *shost = dev_to_shost(phy->dev.parent); 518 struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost); 519 struct sas_phy_data *d = phy->hostdata; 520 int rc; 521 522 if (!d) 523 return -ENOMEM; 524 525 /* libsas workqueue coordinates ata-eh reset with discovery */ 526 mutex_lock(&d->event_lock); 527 d->enable_result = 0; 528 d->enable = enable; 529 530 spin_lock_irq(&ha->lock); 531 sas_queue_work(ha, &d->enable_work); 532 spin_unlock_irq(&ha->lock); 533 534 rc = sas_drain_work(ha); 535 if (rc == 0) 536 rc = d->enable_result; 537 mutex_unlock(&d->event_lock); 538 539 return rc; 540 } 541 542 static struct sas_function_template sft = { 543 .phy_enable = queue_phy_enable, 544 .phy_reset = queue_phy_reset, 545 .phy_setup = sas_phy_setup, 546 .phy_release = sas_phy_release, 547 .set_phy_speed = sas_set_phy_speed, 548 .get_linkerrors = sas_get_linkerrors, 549 .smp_handler = sas_smp_handler, 550 }; 551 552 struct scsi_transport_template * 553 sas_domain_attach_transport(struct sas_domain_function_template *dft) 554 { 555 struct scsi_transport_template *stt = sas_attach_transport(&sft); 556 struct sas_internal *i; 557 558 if (!stt) 559 return stt; 560 561 i = to_sas_internal(stt); 562 i->dft = dft; 563 stt->create_work_queue = 1; 564 stt->eh_strategy_handler = sas_scsi_recover_host; 565 566 return stt; 567 } 568 EXPORT_SYMBOL_GPL(sas_domain_attach_transport); 569 570 /* ---------- SAS Class register/unregister ---------- */ 571 572 static int __init sas_class_init(void) 573 { 574 sas_task_cache = KMEM_CACHE(sas_task, SLAB_HWCACHE_ALIGN); 575 if (!sas_task_cache) 576 return -ENOMEM; 577 578 return 0; 579 } 580 581 static void __exit sas_class_exit(void) 582 { 583 kmem_cache_destroy(sas_task_cache); 584 } 585 586 MODULE_AUTHOR("Luben Tuikov <luben_tuikov@adaptec.com>"); 587 MODULE_DESCRIPTION("SAS Transport Layer"); 588 MODULE_LICENSE("GPL v2"); 589 590 module_init(sas_class_init); 591 module_exit(sas_class_exit); 592 593 EXPORT_SYMBOL_GPL(sas_register_ha); 594 EXPORT_SYMBOL_GPL(sas_unregister_ha); 595