1 /* 2 * zfcp device driver 3 * 4 * Module interface and handling of zfcp data structures. 5 * 6 * Copyright IBM Corporation 2002, 2010 7 */ 8 9 /* 10 * Driver authors: 11 * Martin Peschke (originator of the driver) 12 * Raimund Schroeder 13 * Aron Zeh 14 * Wolfgang Taphorn 15 * Stefan Bader 16 * Heiko Carstens (kernel 2.6 port of the driver) 17 * Andreas Herrmann 18 * Maxim Shchetynin 19 * Volker Sameske 20 * Ralph Wuerthner 21 * Michael Loehr 22 * Swen Schillig 23 * Christof Schmitt 24 * Martin Petermann 25 * Sven Schuetz 26 */ 27 28 #define KMSG_COMPONENT "zfcp" 29 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 30 31 #include <linux/miscdevice.h> 32 #include <linux/seq_file.h> 33 #include <linux/slab.h> 34 #include "zfcp_ext.h" 35 #include "zfcp_fc.h" 36 #include "zfcp_reqlist.h" 37 38 #define ZFCP_BUS_ID_SIZE 20 39 40 MODULE_AUTHOR("IBM Deutschland Entwicklung GmbH - linux390@de.ibm.com"); 41 MODULE_DESCRIPTION("FCP HBA driver"); 42 MODULE_LICENSE("GPL"); 43 44 static char *init_device; 45 module_param_named(device, init_device, charp, 0400); 46 MODULE_PARM_DESC(device, "specify initial device"); 47 48 static struct kmem_cache *zfcp_cache_hw_align(const char *name, 49 unsigned long size) 50 { 51 return kmem_cache_create(name, size, roundup_pow_of_two(size), 0, NULL); 52 } 53 54 static void __init zfcp_init_device_configure(char *busid, u64 wwpn, u64 lun) 55 { 56 struct ccw_device *cdev; 57 struct zfcp_adapter *adapter; 58 struct zfcp_port *port; 59 60 cdev = get_ccwdev_by_busid(&zfcp_ccw_driver, busid); 61 if (!cdev) 62 return; 63 64 if (ccw_device_set_online(cdev)) 65 goto out_ccw_device; 66 67 adapter = zfcp_ccw_adapter_by_cdev(cdev); 68 if (!adapter) 69 goto out_ccw_device; 70 71 port = zfcp_get_port_by_wwpn(adapter, wwpn); 72 if (!port) 73 goto out_port; 74 flush_work(&port->rport_work); 75 76 zfcp_unit_add(port, lun); 77 put_device(&port->dev); 78 79 out_port: 80 zfcp_ccw_adapter_put(adapter); 81 out_ccw_device: 82 put_device(&cdev->dev); 83 return; 84 } 85 86 static void __init zfcp_init_device_setup(char *devstr) 87 { 88 char *token; 89 char *str, *str_saved; 90 char busid[ZFCP_BUS_ID_SIZE]; 91 u64 wwpn, lun; 92 93 /* duplicate devstr and keep the original for sysfs presentation*/ 94 str_saved = kstrdup(devstr, GFP_KERNEL); 95 str = str_saved; 96 if (!str) 97 return; 98 99 token = strsep(&str, ","); 100 if (!token || strlen(token) >= ZFCP_BUS_ID_SIZE) 101 goto err_out; 102 strncpy(busid, token, ZFCP_BUS_ID_SIZE); 103 104 token = strsep(&str, ","); 105 if (!token || strict_strtoull(token, 0, (unsigned long long *) &wwpn)) 106 goto err_out; 107 108 token = strsep(&str, ","); 109 if (!token || strict_strtoull(token, 0, (unsigned long long *) &lun)) 110 goto err_out; 111 112 kfree(str_saved); 113 zfcp_init_device_configure(busid, wwpn, lun); 114 return; 115 116 err_out: 117 kfree(str_saved); 118 pr_err("%s is not a valid SCSI device\n", devstr); 119 } 120 121 static int __init zfcp_module_init(void) 122 { 123 int retval = -ENOMEM; 124 125 zfcp_data.gpn_ft_cache = zfcp_cache_hw_align("zfcp_gpn", 126 sizeof(struct zfcp_fc_gpn_ft_req)); 127 if (!zfcp_data.gpn_ft_cache) 128 goto out; 129 130 zfcp_data.qtcb_cache = zfcp_cache_hw_align("zfcp_qtcb", 131 sizeof(struct fsf_qtcb)); 132 if (!zfcp_data.qtcb_cache) 133 goto out_qtcb_cache; 134 135 zfcp_data.sr_buffer_cache = zfcp_cache_hw_align("zfcp_sr", 136 sizeof(struct fsf_status_read_buffer)); 137 if (!zfcp_data.sr_buffer_cache) 138 goto out_sr_cache; 139 140 zfcp_data.gid_pn_cache = zfcp_cache_hw_align("zfcp_gid", 141 sizeof(struct zfcp_fc_gid_pn)); 142 if (!zfcp_data.gid_pn_cache) 143 goto out_gid_cache; 144 145 zfcp_data.adisc_cache = zfcp_cache_hw_align("zfcp_adisc", 146 sizeof(struct zfcp_fc_els_adisc)); 147 if (!zfcp_data.adisc_cache) 148 goto out_adisc_cache; 149 150 zfcp_data.scsi_transport_template = 151 fc_attach_transport(&zfcp_transport_functions); 152 if (!zfcp_data.scsi_transport_template) 153 goto out_transport; 154 scsi_transport_reserve_device(zfcp_data.scsi_transport_template, 155 sizeof(struct zfcp_scsi_dev)); 156 157 158 retval = misc_register(&zfcp_cfdc_misc); 159 if (retval) { 160 pr_err("Registering the misc device zfcp_cfdc failed\n"); 161 goto out_misc; 162 } 163 164 retval = ccw_driver_register(&zfcp_ccw_driver); 165 if (retval) { 166 pr_err("The zfcp device driver could not register with " 167 "the common I/O layer\n"); 168 goto out_ccw_register; 169 } 170 171 if (init_device) 172 zfcp_init_device_setup(init_device); 173 return 0; 174 175 out_ccw_register: 176 misc_deregister(&zfcp_cfdc_misc); 177 out_misc: 178 fc_release_transport(zfcp_data.scsi_transport_template); 179 out_transport: 180 kmem_cache_destroy(zfcp_data.adisc_cache); 181 out_adisc_cache: 182 kmem_cache_destroy(zfcp_data.gid_pn_cache); 183 out_gid_cache: 184 kmem_cache_destroy(zfcp_data.sr_buffer_cache); 185 out_sr_cache: 186 kmem_cache_destroy(zfcp_data.qtcb_cache); 187 out_qtcb_cache: 188 kmem_cache_destroy(zfcp_data.gpn_ft_cache); 189 out: 190 return retval; 191 } 192 193 module_init(zfcp_module_init); 194 195 static void __exit zfcp_module_exit(void) 196 { 197 ccw_driver_unregister(&zfcp_ccw_driver); 198 misc_deregister(&zfcp_cfdc_misc); 199 fc_release_transport(zfcp_data.scsi_transport_template); 200 kmem_cache_destroy(zfcp_data.adisc_cache); 201 kmem_cache_destroy(zfcp_data.gid_pn_cache); 202 kmem_cache_destroy(zfcp_data.sr_buffer_cache); 203 kmem_cache_destroy(zfcp_data.qtcb_cache); 204 kmem_cache_destroy(zfcp_data.gpn_ft_cache); 205 } 206 207 module_exit(zfcp_module_exit); 208 209 /** 210 * zfcp_get_port_by_wwpn - find port in port list of adapter by wwpn 211 * @adapter: pointer to adapter to search for port 212 * @wwpn: wwpn to search for 213 * 214 * Returns: pointer to zfcp_port or NULL 215 */ 216 struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *adapter, 217 u64 wwpn) 218 { 219 unsigned long flags; 220 struct zfcp_port *port; 221 222 read_lock_irqsave(&adapter->port_list_lock, flags); 223 list_for_each_entry(port, &adapter->port_list, list) 224 if (port->wwpn == wwpn) { 225 if (!get_device(&port->dev)) 226 port = NULL; 227 read_unlock_irqrestore(&adapter->port_list_lock, flags); 228 return port; 229 } 230 read_unlock_irqrestore(&adapter->port_list_lock, flags); 231 return NULL; 232 } 233 234 static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter) 235 { 236 adapter->pool.erp_req = 237 mempool_create_kmalloc_pool(1, sizeof(struct zfcp_fsf_req)); 238 if (!adapter->pool.erp_req) 239 return -ENOMEM; 240 241 adapter->pool.gid_pn_req = 242 mempool_create_kmalloc_pool(1, sizeof(struct zfcp_fsf_req)); 243 if (!adapter->pool.gid_pn_req) 244 return -ENOMEM; 245 246 adapter->pool.scsi_req = 247 mempool_create_kmalloc_pool(1, sizeof(struct zfcp_fsf_req)); 248 if (!adapter->pool.scsi_req) 249 return -ENOMEM; 250 251 adapter->pool.scsi_abort = 252 mempool_create_kmalloc_pool(1, sizeof(struct zfcp_fsf_req)); 253 if (!adapter->pool.scsi_abort) 254 return -ENOMEM; 255 256 adapter->pool.status_read_req = 257 mempool_create_kmalloc_pool(FSF_STATUS_READS_RECOM, 258 sizeof(struct zfcp_fsf_req)); 259 if (!adapter->pool.status_read_req) 260 return -ENOMEM; 261 262 adapter->pool.qtcb_pool = 263 mempool_create_slab_pool(4, zfcp_data.qtcb_cache); 264 if (!adapter->pool.qtcb_pool) 265 return -ENOMEM; 266 267 adapter->pool.status_read_data = 268 mempool_create_slab_pool(FSF_STATUS_READS_RECOM, 269 zfcp_data.sr_buffer_cache); 270 if (!adapter->pool.status_read_data) 271 return -ENOMEM; 272 273 adapter->pool.gid_pn = 274 mempool_create_slab_pool(1, zfcp_data.gid_pn_cache); 275 if (!adapter->pool.gid_pn) 276 return -ENOMEM; 277 278 return 0; 279 } 280 281 static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter) 282 { 283 if (adapter->pool.erp_req) 284 mempool_destroy(adapter->pool.erp_req); 285 if (adapter->pool.scsi_req) 286 mempool_destroy(adapter->pool.scsi_req); 287 if (adapter->pool.scsi_abort) 288 mempool_destroy(adapter->pool.scsi_abort); 289 if (adapter->pool.qtcb_pool) 290 mempool_destroy(adapter->pool.qtcb_pool); 291 if (adapter->pool.status_read_req) 292 mempool_destroy(adapter->pool.status_read_req); 293 if (adapter->pool.status_read_data) 294 mempool_destroy(adapter->pool.status_read_data); 295 if (adapter->pool.gid_pn) 296 mempool_destroy(adapter->pool.gid_pn); 297 } 298 299 /** 300 * zfcp_status_read_refill - refill the long running status_read_requests 301 * @adapter: ptr to struct zfcp_adapter for which the buffers should be refilled 302 * 303 * Returns: 0 on success, 1 otherwise 304 * 305 * if there are 16 or more status_read requests missing an adapter_reopen 306 * is triggered 307 */ 308 int zfcp_status_read_refill(struct zfcp_adapter *adapter) 309 { 310 while (atomic_read(&adapter->stat_miss) > 0) 311 if (zfcp_fsf_status_read(adapter->qdio)) { 312 if (atomic_read(&adapter->stat_miss) >= 313 adapter->stat_read_buf_num) { 314 zfcp_erp_adapter_reopen(adapter, 0, "axsref1", 315 NULL); 316 return 1; 317 } 318 break; 319 } else 320 atomic_dec(&adapter->stat_miss); 321 return 0; 322 } 323 324 static void _zfcp_status_read_scheduler(struct work_struct *work) 325 { 326 zfcp_status_read_refill(container_of(work, struct zfcp_adapter, 327 stat_work)); 328 } 329 330 static void zfcp_print_sl(struct seq_file *m, struct service_level *sl) 331 { 332 struct zfcp_adapter *adapter = 333 container_of(sl, struct zfcp_adapter, service_level); 334 335 seq_printf(m, "zfcp: %s microcode level %x\n", 336 dev_name(&adapter->ccw_device->dev), 337 adapter->fsf_lic_version); 338 } 339 340 static int zfcp_setup_adapter_work_queue(struct zfcp_adapter *adapter) 341 { 342 char name[TASK_COMM_LEN]; 343 344 snprintf(name, sizeof(name), "zfcp_q_%s", 345 dev_name(&adapter->ccw_device->dev)); 346 adapter->work_queue = create_singlethread_workqueue(name); 347 348 if (adapter->work_queue) 349 return 0; 350 return -ENOMEM; 351 } 352 353 static void zfcp_destroy_adapter_work_queue(struct zfcp_adapter *adapter) 354 { 355 if (adapter->work_queue) 356 destroy_workqueue(adapter->work_queue); 357 adapter->work_queue = NULL; 358 359 } 360 361 /** 362 * zfcp_adapter_enqueue - enqueue a new adapter to the list 363 * @ccw_device: pointer to the struct cc_device 364 * 365 * Returns: struct zfcp_adapter* 366 * Enqueues an adapter at the end of the adapter list in the driver data. 367 * All adapter internal structures are set up. 368 * Proc-fs entries are also created. 369 */ 370 struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device) 371 { 372 struct zfcp_adapter *adapter; 373 374 if (!get_device(&ccw_device->dev)) 375 return ERR_PTR(-ENODEV); 376 377 adapter = kzalloc(sizeof(struct zfcp_adapter), GFP_KERNEL); 378 if (!adapter) { 379 put_device(&ccw_device->dev); 380 return ERR_PTR(-ENOMEM); 381 } 382 383 kref_init(&adapter->ref); 384 385 ccw_device->handler = NULL; 386 adapter->ccw_device = ccw_device; 387 388 INIT_WORK(&adapter->stat_work, _zfcp_status_read_scheduler); 389 INIT_WORK(&adapter->scan_work, zfcp_fc_scan_ports); 390 391 if (zfcp_qdio_setup(adapter)) 392 goto failed; 393 394 if (zfcp_allocate_low_mem_buffers(adapter)) 395 goto failed; 396 397 adapter->req_list = zfcp_reqlist_alloc(); 398 if (!adapter->req_list) 399 goto failed; 400 401 if (zfcp_dbf_adapter_register(adapter)) 402 goto failed; 403 404 if (zfcp_setup_adapter_work_queue(adapter)) 405 goto failed; 406 407 if (zfcp_fc_gs_setup(adapter)) 408 goto failed; 409 410 rwlock_init(&adapter->port_list_lock); 411 INIT_LIST_HEAD(&adapter->port_list); 412 413 INIT_LIST_HEAD(&adapter->events.list); 414 INIT_WORK(&adapter->events.work, zfcp_fc_post_event); 415 spin_lock_init(&adapter->events.list_lock); 416 417 init_waitqueue_head(&adapter->erp_ready_wq); 418 init_waitqueue_head(&adapter->erp_done_wqh); 419 420 INIT_LIST_HEAD(&adapter->erp_ready_head); 421 INIT_LIST_HEAD(&adapter->erp_running_head); 422 423 rwlock_init(&adapter->erp_lock); 424 rwlock_init(&adapter->abort_lock); 425 426 if (zfcp_erp_thread_setup(adapter)) 427 goto failed; 428 429 adapter->service_level.seq_print = zfcp_print_sl; 430 431 dev_set_drvdata(&ccw_device->dev, adapter); 432 433 if (sysfs_create_group(&ccw_device->dev.kobj, 434 &zfcp_sysfs_adapter_attrs)) 435 goto failed; 436 437 /* report size limit per scatter-gather segment */ 438 adapter->dma_parms.max_segment_size = ZFCP_QDIO_SBALE_LEN; 439 adapter->ccw_device->dev.dma_parms = &adapter->dma_parms; 440 441 if (!zfcp_adapter_scsi_register(adapter)) 442 return adapter; 443 444 failed: 445 zfcp_adapter_unregister(adapter); 446 return ERR_PTR(-ENOMEM); 447 } 448 449 void zfcp_adapter_unregister(struct zfcp_adapter *adapter) 450 { 451 struct ccw_device *cdev = adapter->ccw_device; 452 453 cancel_work_sync(&adapter->scan_work); 454 cancel_work_sync(&adapter->stat_work); 455 zfcp_destroy_adapter_work_queue(adapter); 456 457 zfcp_fc_wka_ports_force_offline(adapter->gs); 458 zfcp_adapter_scsi_unregister(adapter); 459 sysfs_remove_group(&cdev->dev.kobj, &zfcp_sysfs_adapter_attrs); 460 461 zfcp_erp_thread_kill(adapter); 462 zfcp_dbf_adapter_unregister(adapter->dbf); 463 zfcp_qdio_destroy(adapter->qdio); 464 465 zfcp_ccw_adapter_put(adapter); /* final put to release */ 466 } 467 468 /** 469 * zfcp_adapter_release - remove the adapter from the resource list 470 * @ref: pointer to struct kref 471 * locks: adapter list write lock is assumed to be held by caller 472 */ 473 void zfcp_adapter_release(struct kref *ref) 474 { 475 struct zfcp_adapter *adapter = container_of(ref, struct zfcp_adapter, 476 ref); 477 struct ccw_device *cdev = adapter->ccw_device; 478 479 dev_set_drvdata(&adapter->ccw_device->dev, NULL); 480 zfcp_fc_gs_destroy(adapter); 481 zfcp_free_low_mem_buffers(adapter); 482 kfree(adapter->req_list); 483 kfree(adapter->fc_stats); 484 kfree(adapter->stats_reset_data); 485 kfree(adapter); 486 put_device(&cdev->dev); 487 } 488 489 /** 490 * zfcp_device_unregister - remove port, unit from system 491 * @dev: reference to device which is to be removed 492 * @grp: related reference to attribute group 493 * 494 * Helper function to unregister port, unit from system 495 */ 496 void zfcp_device_unregister(struct device *dev, 497 const struct attribute_group *grp) 498 { 499 sysfs_remove_group(&dev->kobj, grp); 500 device_unregister(dev); 501 } 502 503 static void zfcp_port_release(struct device *dev) 504 { 505 struct zfcp_port *port = container_of(dev, struct zfcp_port, dev); 506 507 zfcp_ccw_adapter_put(port->adapter); 508 kfree(port); 509 } 510 511 /** 512 * zfcp_port_enqueue - enqueue port to port list of adapter 513 * @adapter: adapter where remote port is added 514 * @wwpn: WWPN of the remote port to be enqueued 515 * @status: initial status for the port 516 * @d_id: destination id of the remote port to be enqueued 517 * Returns: pointer to enqueued port on success, ERR_PTR on error 518 * 519 * All port internal structures are set up and the sysfs entry is generated. 520 * d_id is used to enqueue ports with a well known address like the Directory 521 * Service for nameserver lookup. 522 */ 523 struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn, 524 u32 status, u32 d_id) 525 { 526 struct zfcp_port *port; 527 int retval = -ENOMEM; 528 529 kref_get(&adapter->ref); 530 531 port = zfcp_get_port_by_wwpn(adapter, wwpn); 532 if (port) { 533 put_device(&port->dev); 534 retval = -EEXIST; 535 goto err_out; 536 } 537 538 port = kzalloc(sizeof(struct zfcp_port), GFP_KERNEL); 539 if (!port) 540 goto err_out; 541 542 rwlock_init(&port->unit_list_lock); 543 INIT_LIST_HEAD(&port->unit_list); 544 545 INIT_WORK(&port->gid_pn_work, zfcp_fc_port_did_lookup); 546 INIT_WORK(&port->test_link_work, zfcp_fc_link_test_work); 547 INIT_WORK(&port->rport_work, zfcp_scsi_rport_work); 548 549 port->adapter = adapter; 550 port->d_id = d_id; 551 port->wwpn = wwpn; 552 port->rport_task = RPORT_NONE; 553 port->dev.parent = &adapter->ccw_device->dev; 554 port->dev.release = zfcp_port_release; 555 556 if (dev_set_name(&port->dev, "0x%016llx", (unsigned long long)wwpn)) { 557 kfree(port); 558 goto err_out; 559 } 560 retval = -EINVAL; 561 562 if (device_register(&port->dev)) { 563 put_device(&port->dev); 564 goto err_out; 565 } 566 567 if (sysfs_create_group(&port->dev.kobj, 568 &zfcp_sysfs_port_attrs)) 569 goto err_out_put; 570 571 write_lock_irq(&adapter->port_list_lock); 572 list_add_tail(&port->list, &adapter->port_list); 573 write_unlock_irq(&adapter->port_list_lock); 574 575 atomic_set_mask(status | ZFCP_STATUS_COMMON_RUNNING, &port->status); 576 577 return port; 578 579 err_out_put: 580 device_unregister(&port->dev); 581 err_out: 582 zfcp_ccw_adapter_put(adapter); 583 return ERR_PTR(retval); 584 } 585 586 /** 587 * zfcp_sg_free_table - free memory used by scatterlists 588 * @sg: pointer to scatterlist 589 * @count: number of scatterlist which are to be free'ed 590 * the scatterlist are expected to reference pages always 591 */ 592 void zfcp_sg_free_table(struct scatterlist *sg, int count) 593 { 594 int i; 595 596 for (i = 0; i < count; i++, sg++) 597 if (sg) 598 free_page((unsigned long) sg_virt(sg)); 599 else 600 break; 601 } 602 603 /** 604 * zfcp_sg_setup_table - init scatterlist and allocate, assign buffers 605 * @sg: pointer to struct scatterlist 606 * @count: number of scatterlists which should be assigned with buffers 607 * of size page 608 * 609 * Returns: 0 on success, -ENOMEM otherwise 610 */ 611 int zfcp_sg_setup_table(struct scatterlist *sg, int count) 612 { 613 void *addr; 614 int i; 615 616 sg_init_table(sg, count); 617 for (i = 0; i < count; i++, sg++) { 618 addr = (void *) get_zeroed_page(GFP_KERNEL); 619 if (!addr) { 620 zfcp_sg_free_table(sg, i); 621 return -ENOMEM; 622 } 623 sg_set_buf(sg, addr, PAGE_SIZE); 624 } 625 return 0; 626 } 627