1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright(c) 2011 - 2012 Intel Corporation. All rights reserved. 4 * 5 * Maintained at www.Open-FCoE.org 6 */ 7 8 #include <linux/module.h> 9 #include <linux/types.h> 10 #include <linux/kernel.h> 11 #include <linux/etherdevice.h> 12 #include <linux/ctype.h> 13 #include <linux/string.h> 14 15 #include <scsi/fcoe_sysfs.h> 16 #include <scsi/libfcoe.h> 17 18 /* 19 * OK to include local libfcoe.h for debug_logging, but cannot include 20 * <scsi/libfcoe.h> otherwise non-netdev based fcoe solutions would have 21 * have to include more than fcoe_sysfs.h. 22 */ 23 #include "libfcoe.h" 24 25 static atomic_t ctlr_num; 26 static atomic_t fcf_num; 27 28 /* 29 * fcoe_fcf_dev_loss_tmo: the default number of seconds that fcoe sysfs 30 * should insulate the loss of a fcf. 31 */ 32 static unsigned int fcoe_fcf_dev_loss_tmo = 1800; /* seconds */ 33 34 module_param_named(fcf_dev_loss_tmo, fcoe_fcf_dev_loss_tmo, 35 uint, S_IRUGO|S_IWUSR); 36 MODULE_PARM_DESC(fcf_dev_loss_tmo, 37 "Maximum number of seconds that libfcoe should" 38 " insulate the loss of a fcf. Once this value is" 39 " exceeded, the fcf is removed."); 40 41 /* 42 * These are used by the fcoe_*_show_function routines, they 43 * are intentionally placed in the .c file as they're not intended 44 * for use throughout the code. 45 */ 46 #define fcoe_ctlr_id(x) \ 47 ((x)->id) 48 #define fcoe_ctlr_work_q_name(x) \ 49 ((x)->work_q_name) 50 #define fcoe_ctlr_work_q(x) \ 51 ((x)->work_q) 52 #define fcoe_ctlr_devloss_work_q_name(x) \ 53 ((x)->devloss_work_q_name) 54 #define fcoe_ctlr_devloss_work_q(x) \ 55 ((x)->devloss_work_q) 56 #define fcoe_ctlr_mode(x) \ 57 ((x)->mode) 58 #define fcoe_ctlr_fcf_dev_loss_tmo(x) \ 59 ((x)->fcf_dev_loss_tmo) 60 #define fcoe_ctlr_link_fail(x) \ 61 ((x)->lesb.lesb_link_fail) 62 #define fcoe_ctlr_vlink_fail(x) \ 63 ((x)->lesb.lesb_vlink_fail) 64 #define fcoe_ctlr_miss_fka(x) \ 65 ((x)->lesb.lesb_miss_fka) 66 #define fcoe_ctlr_symb_err(x) \ 67 ((x)->lesb.lesb_symb_err) 68 #define fcoe_ctlr_err_block(x) \ 69 ((x)->lesb.lesb_err_block) 70 #define fcoe_ctlr_fcs_error(x) \ 71 ((x)->lesb.lesb_fcs_error) 72 #define fcoe_ctlr_enabled(x) \ 73 ((x)->enabled) 74 #define fcoe_fcf_state(x) \ 75 ((x)->state) 76 #define fcoe_fcf_fabric_name(x) \ 77 ((x)->fabric_name) 78 #define fcoe_fcf_switch_name(x) \ 79 ((x)->switch_name) 80 #define fcoe_fcf_fc_map(x) \ 81 ((x)->fc_map) 82 #define fcoe_fcf_vfid(x) \ 83 ((x)->vfid) 84 #define fcoe_fcf_mac(x) \ 85 ((x)->mac) 86 #define fcoe_fcf_priority(x) \ 87 ((x)->priority) 88 #define fcoe_fcf_fka_period(x) \ 89 ((x)->fka_period) 90 #define fcoe_fcf_dev_loss_tmo(x) \ 91 ((x)->dev_loss_tmo) 92 #define fcoe_fcf_selected(x) \ 93 ((x)->selected) 94 #define fcoe_fcf_vlan_id(x) \ 95 ((x)->vlan_id) 96 97 /* 98 * dev_loss_tmo attribute 99 */ 100 static int fcoe_str_to_dev_loss(const char *buf, unsigned long *val) 101 { 102 int ret; 103 104 ret = kstrtoul(buf, 0, val); 105 if (ret) 106 return -EINVAL; 107 /* 108 * Check for overflow; dev_loss_tmo is u32 109 */ 110 if (*val > UINT_MAX) 111 return -EINVAL; 112 113 return 0; 114 } 115 116 static int fcoe_fcf_set_dev_loss_tmo(struct fcoe_fcf_device *fcf, 117 unsigned long val) 118 { 119 if ((fcf->state == FCOE_FCF_STATE_UNKNOWN) || 120 (fcf->state == FCOE_FCF_STATE_DISCONNECTED) || 121 (fcf->state == FCOE_FCF_STATE_DELETED)) 122 return -EBUSY; 123 /* 124 * Check for overflow; dev_loss_tmo is u32 125 */ 126 if (val > UINT_MAX) 127 return -EINVAL; 128 129 fcoe_fcf_dev_loss_tmo(fcf) = val; 130 return 0; 131 } 132 133 #define FCOE_DEVICE_ATTR(_prefix, _name, _mode, _show, _store) \ 134 struct device_attribute device_attr_fcoe_##_prefix##_##_name = \ 135 __ATTR(_name, _mode, _show, _store) 136 137 #define fcoe_ctlr_show_function(field, format_string, sz, cast) \ 138 static ssize_t show_fcoe_ctlr_device_##field(struct device *dev, \ 139 struct device_attribute *attr, \ 140 char *buf) \ 141 { \ 142 struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); \ 143 if (ctlr->f->get_fcoe_ctlr_##field) \ 144 ctlr->f->get_fcoe_ctlr_##field(ctlr); \ 145 return snprintf(buf, sz, format_string, \ 146 cast fcoe_ctlr_##field(ctlr)); \ 147 } 148 149 #define fcoe_fcf_show_function(field, format_string, sz, cast) \ 150 static ssize_t show_fcoe_fcf_device_##field(struct device *dev, \ 151 struct device_attribute *attr, \ 152 char *buf) \ 153 { \ 154 struct fcoe_fcf_device *fcf = dev_to_fcf(dev); \ 155 struct fcoe_ctlr_device *ctlr = fcoe_fcf_dev_to_ctlr_dev(fcf); \ 156 if (ctlr->f->get_fcoe_fcf_##field) \ 157 ctlr->f->get_fcoe_fcf_##field(fcf); \ 158 return snprintf(buf, sz, format_string, \ 159 cast fcoe_fcf_##field(fcf)); \ 160 } 161 162 #define fcoe_ctlr_private_show_function(field, format_string, sz, cast) \ 163 static ssize_t show_fcoe_ctlr_device_##field(struct device *dev, \ 164 struct device_attribute *attr, \ 165 char *buf) \ 166 { \ 167 struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); \ 168 return snprintf(buf, sz, format_string, cast fcoe_ctlr_##field(ctlr)); \ 169 } 170 171 #define fcoe_fcf_private_show_function(field, format_string, sz, cast) \ 172 static ssize_t show_fcoe_fcf_device_##field(struct device *dev, \ 173 struct device_attribute *attr, \ 174 char *buf) \ 175 { \ 176 struct fcoe_fcf_device *fcf = dev_to_fcf(dev); \ 177 return snprintf(buf, sz, format_string, cast fcoe_fcf_##field(fcf)); \ 178 } 179 180 #define fcoe_ctlr_private_rd_attr(field, format_string, sz) \ 181 fcoe_ctlr_private_show_function(field, format_string, sz, ) \ 182 static FCOE_DEVICE_ATTR(ctlr, field, S_IRUGO, \ 183 show_fcoe_ctlr_device_##field, NULL) 184 185 #define fcoe_ctlr_rd_attr(field, format_string, sz) \ 186 fcoe_ctlr_show_function(field, format_string, sz, ) \ 187 static FCOE_DEVICE_ATTR(ctlr, field, S_IRUGO, \ 188 show_fcoe_ctlr_device_##field, NULL) 189 190 #define fcoe_fcf_rd_attr(field, format_string, sz) \ 191 fcoe_fcf_show_function(field, format_string, sz, ) \ 192 static FCOE_DEVICE_ATTR(fcf, field, S_IRUGO, \ 193 show_fcoe_fcf_device_##field, NULL) 194 195 #define fcoe_fcf_private_rd_attr(field, format_string, sz) \ 196 fcoe_fcf_private_show_function(field, format_string, sz, ) \ 197 static FCOE_DEVICE_ATTR(fcf, field, S_IRUGO, \ 198 show_fcoe_fcf_device_##field, NULL) 199 200 #define fcoe_ctlr_private_rd_attr_cast(field, format_string, sz, cast) \ 201 fcoe_ctlr_private_show_function(field, format_string, sz, (cast)) \ 202 static FCOE_DEVICE_ATTR(ctlr, field, S_IRUGO, \ 203 show_fcoe_ctlr_device_##field, NULL) 204 205 #define fcoe_fcf_private_rd_attr_cast(field, format_string, sz, cast) \ 206 fcoe_fcf_private_show_function(field, format_string, sz, (cast)) \ 207 static FCOE_DEVICE_ATTR(fcf, field, S_IRUGO, \ 208 show_fcoe_fcf_device_##field, NULL) 209 210 #define fcoe_enum_name_search(title, table_type, table) \ 211 static const char *get_fcoe_##title##_name(enum table_type table_key) \ 212 { \ 213 if (table_key < 0 || table_key >= ARRAY_SIZE(table)) \ 214 return NULL; \ 215 return table[table_key]; \ 216 } 217 218 static const char * const fip_conn_type_names[] = { 219 [ FIP_CONN_TYPE_UNKNOWN ] = "Unknown", 220 [ FIP_CONN_TYPE_FABRIC ] = "Fabric", 221 [ FIP_CONN_TYPE_VN2VN ] = "VN2VN", 222 }; 223 fcoe_enum_name_search(ctlr_mode, fip_conn_type, fip_conn_type_names) 224 225 static char *fcf_state_names[] = { 226 [ FCOE_FCF_STATE_UNKNOWN ] = "Unknown", 227 [ FCOE_FCF_STATE_DISCONNECTED ] = "Disconnected", 228 [ FCOE_FCF_STATE_CONNECTED ] = "Connected", 229 }; 230 fcoe_enum_name_search(fcf_state, fcf_state, fcf_state_names) 231 #define FCOE_FCF_STATE_MAX_NAMELEN 50 232 233 static ssize_t show_fcf_state(struct device *dev, 234 struct device_attribute *attr, 235 char *buf) 236 { 237 struct fcoe_fcf_device *fcf = dev_to_fcf(dev); 238 const char *name; 239 name = get_fcoe_fcf_state_name(fcf->state); 240 if (!name) 241 return -EINVAL; 242 return snprintf(buf, FCOE_FCF_STATE_MAX_NAMELEN, "%s\n", name); 243 } 244 static FCOE_DEVICE_ATTR(fcf, state, S_IRUGO, show_fcf_state, NULL); 245 246 #define FCOE_MAX_MODENAME_LEN 20 247 static ssize_t show_ctlr_mode(struct device *dev, 248 struct device_attribute *attr, 249 char *buf) 250 { 251 struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); 252 const char *name; 253 254 name = get_fcoe_ctlr_mode_name(ctlr->mode); 255 if (!name) 256 return -EINVAL; 257 return snprintf(buf, FCOE_MAX_MODENAME_LEN, 258 "%s\n", name); 259 } 260 261 static ssize_t store_ctlr_mode(struct device *dev, 262 struct device_attribute *attr, 263 const char *buf, size_t count) 264 { 265 struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); 266 267 if (count > FCOE_MAX_MODENAME_LEN) 268 return -EINVAL; 269 270 271 switch (ctlr->enabled) { 272 case FCOE_CTLR_ENABLED: 273 LIBFCOE_SYSFS_DBG(ctlr, "Cannot change mode when enabled.\n"); 274 return -EBUSY; 275 case FCOE_CTLR_DISABLED: 276 if (!ctlr->f->set_fcoe_ctlr_mode) { 277 LIBFCOE_SYSFS_DBG(ctlr, 278 "Mode change not supported by LLD.\n"); 279 return -ENOTSUPP; 280 } 281 282 ctlr->mode = sysfs_match_string(fip_conn_type_names, buf); 283 if (ctlr->mode < 0 || ctlr->mode == FIP_CONN_TYPE_UNKNOWN) { 284 LIBFCOE_SYSFS_DBG(ctlr, "Unknown mode %s provided.\n", 285 buf); 286 return -EINVAL; 287 } 288 289 ctlr->f->set_fcoe_ctlr_mode(ctlr); 290 LIBFCOE_SYSFS_DBG(ctlr, "Mode changed to %s.\n", buf); 291 292 return count; 293 case FCOE_CTLR_UNUSED: 294 default: 295 LIBFCOE_SYSFS_DBG(ctlr, "Mode change not supported.\n"); 296 return -ENOTSUPP; 297 } 298 } 299 300 static FCOE_DEVICE_ATTR(ctlr, mode, S_IRUGO | S_IWUSR, 301 show_ctlr_mode, store_ctlr_mode); 302 303 static ssize_t store_ctlr_enabled(struct device *dev, 304 struct device_attribute *attr, 305 const char *buf, size_t count) 306 { 307 struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); 308 bool enabled; 309 int rc; 310 311 if (*buf == '1') 312 enabled = true; 313 else if (*buf == '0') 314 enabled = false; 315 else 316 return -EINVAL; 317 318 switch (ctlr->enabled) { 319 case FCOE_CTLR_ENABLED: 320 if (enabled) 321 return count; 322 ctlr->enabled = FCOE_CTLR_DISABLED; 323 break; 324 case FCOE_CTLR_DISABLED: 325 if (!enabled) 326 return count; 327 ctlr->enabled = FCOE_CTLR_ENABLED; 328 break; 329 case FCOE_CTLR_UNUSED: 330 return -ENOTSUPP; 331 } 332 333 rc = ctlr->f->set_fcoe_ctlr_enabled(ctlr); 334 if (rc) 335 return rc; 336 337 return count; 338 } 339 340 static char *ctlr_enabled_state_names[] = { 341 [ FCOE_CTLR_ENABLED ] = "1", 342 [ FCOE_CTLR_DISABLED ] = "0", 343 }; 344 fcoe_enum_name_search(ctlr_enabled_state, ctlr_enabled_state, 345 ctlr_enabled_state_names) 346 #define FCOE_CTLR_ENABLED_MAX_NAMELEN 50 347 348 static ssize_t show_ctlr_enabled_state(struct device *dev, 349 struct device_attribute *attr, 350 char *buf) 351 { 352 struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); 353 const char *name; 354 355 name = get_fcoe_ctlr_enabled_state_name(ctlr->enabled); 356 if (!name) 357 return -EINVAL; 358 return snprintf(buf, FCOE_CTLR_ENABLED_MAX_NAMELEN, 359 "%s\n", name); 360 } 361 362 static FCOE_DEVICE_ATTR(ctlr, enabled, S_IRUGO | S_IWUSR, 363 show_ctlr_enabled_state, 364 store_ctlr_enabled); 365 366 static ssize_t store_ctlr_fip_resp(struct device *dev, 367 struct device_attribute *attr, 368 const char *buf, size_t count) 369 { 370 struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); 371 struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr); 372 373 mutex_lock(&fip->ctlr_mutex); 374 if ((buf[1] == '\0') || ((buf[1] == '\n') && (buf[2] == '\0'))) { 375 if (buf[0] == '1') { 376 fip->fip_resp = 1; 377 mutex_unlock(&fip->ctlr_mutex); 378 return count; 379 } 380 if (buf[0] == '0') { 381 fip->fip_resp = 0; 382 mutex_unlock(&fip->ctlr_mutex); 383 return count; 384 } 385 } 386 mutex_unlock(&fip->ctlr_mutex); 387 return -EINVAL; 388 } 389 390 static ssize_t show_ctlr_fip_resp(struct device *dev, 391 struct device_attribute *attr, 392 char *buf) 393 { 394 struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); 395 struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr); 396 397 return sprintf(buf, "%d\n", fip->fip_resp ? 1 : 0); 398 } 399 400 static FCOE_DEVICE_ATTR(ctlr, fip_vlan_responder, S_IRUGO | S_IWUSR, 401 show_ctlr_fip_resp, 402 store_ctlr_fip_resp); 403 404 static ssize_t 405 fcoe_ctlr_var_store(u32 *var, const char *buf, size_t count) 406 { 407 int err; 408 unsigned long v; 409 410 err = kstrtoul(buf, 10, &v); 411 if (err || v > UINT_MAX) 412 return -EINVAL; 413 414 *var = v; 415 416 return count; 417 } 418 419 static ssize_t store_ctlr_r_a_tov(struct device *dev, 420 struct device_attribute *attr, 421 const char *buf, size_t count) 422 { 423 struct fcoe_ctlr_device *ctlr_dev = dev_to_ctlr(dev); 424 struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev); 425 426 if (ctlr_dev->enabled == FCOE_CTLR_ENABLED) 427 return -EBUSY; 428 if (ctlr_dev->enabled == FCOE_CTLR_DISABLED) 429 return fcoe_ctlr_var_store(&ctlr->lp->r_a_tov, buf, count); 430 return -ENOTSUPP; 431 } 432 433 static ssize_t show_ctlr_r_a_tov(struct device *dev, 434 struct device_attribute *attr, 435 char *buf) 436 { 437 struct fcoe_ctlr_device *ctlr_dev = dev_to_ctlr(dev); 438 struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev); 439 440 return sprintf(buf, "%d\n", ctlr->lp->r_a_tov); 441 } 442 443 static FCOE_DEVICE_ATTR(ctlr, r_a_tov, S_IRUGO | S_IWUSR, 444 show_ctlr_r_a_tov, store_ctlr_r_a_tov); 445 446 static ssize_t store_ctlr_e_d_tov(struct device *dev, 447 struct device_attribute *attr, 448 const char *buf, size_t count) 449 { 450 struct fcoe_ctlr_device *ctlr_dev = dev_to_ctlr(dev); 451 struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev); 452 453 if (ctlr_dev->enabled == FCOE_CTLR_ENABLED) 454 return -EBUSY; 455 if (ctlr_dev->enabled == FCOE_CTLR_DISABLED) 456 return fcoe_ctlr_var_store(&ctlr->lp->e_d_tov, buf, count); 457 return -ENOTSUPP; 458 } 459 460 static ssize_t show_ctlr_e_d_tov(struct device *dev, 461 struct device_attribute *attr, 462 char *buf) 463 { 464 struct fcoe_ctlr_device *ctlr_dev = dev_to_ctlr(dev); 465 struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev); 466 467 return sprintf(buf, "%d\n", ctlr->lp->e_d_tov); 468 } 469 470 static FCOE_DEVICE_ATTR(ctlr, e_d_tov, S_IRUGO | S_IWUSR, 471 show_ctlr_e_d_tov, store_ctlr_e_d_tov); 472 473 static ssize_t 474 store_private_fcoe_ctlr_fcf_dev_loss_tmo(struct device *dev, 475 struct device_attribute *attr, 476 const char *buf, size_t count) 477 { 478 struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); 479 struct fcoe_fcf_device *fcf; 480 unsigned long val; 481 int rc; 482 483 rc = fcoe_str_to_dev_loss(buf, &val); 484 if (rc) 485 return rc; 486 487 fcoe_ctlr_fcf_dev_loss_tmo(ctlr) = val; 488 mutex_lock(&ctlr->lock); 489 list_for_each_entry(fcf, &ctlr->fcfs, peers) 490 fcoe_fcf_set_dev_loss_tmo(fcf, val); 491 mutex_unlock(&ctlr->lock); 492 return count; 493 } 494 fcoe_ctlr_private_show_function(fcf_dev_loss_tmo, "%d\n", 20, ); 495 static FCOE_DEVICE_ATTR(ctlr, fcf_dev_loss_tmo, S_IRUGO | S_IWUSR, 496 show_fcoe_ctlr_device_fcf_dev_loss_tmo, 497 store_private_fcoe_ctlr_fcf_dev_loss_tmo); 498 499 /* Link Error Status Block (LESB) */ 500 fcoe_ctlr_rd_attr(link_fail, "%u\n", 20); 501 fcoe_ctlr_rd_attr(vlink_fail, "%u\n", 20); 502 fcoe_ctlr_rd_attr(miss_fka, "%u\n", 20); 503 fcoe_ctlr_rd_attr(symb_err, "%u\n", 20); 504 fcoe_ctlr_rd_attr(err_block, "%u\n", 20); 505 fcoe_ctlr_rd_attr(fcs_error, "%u\n", 20); 506 507 fcoe_fcf_private_rd_attr_cast(fabric_name, "0x%llx\n", 20, unsigned long long); 508 fcoe_fcf_private_rd_attr_cast(switch_name, "0x%llx\n", 20, unsigned long long); 509 fcoe_fcf_private_rd_attr(priority, "%u\n", 20); 510 fcoe_fcf_private_rd_attr(fc_map, "0x%x\n", 20); 511 fcoe_fcf_private_rd_attr(vfid, "%u\n", 20); 512 fcoe_fcf_private_rd_attr(mac, "%pM\n", 20); 513 fcoe_fcf_private_rd_attr(fka_period, "%u\n", 20); 514 fcoe_fcf_rd_attr(selected, "%u\n", 20); 515 fcoe_fcf_rd_attr(vlan_id, "%u\n", 20); 516 517 fcoe_fcf_private_show_function(dev_loss_tmo, "%d\n", 20, ) 518 static ssize_t 519 store_fcoe_fcf_dev_loss_tmo(struct device *dev, struct device_attribute *attr, 520 const char *buf, size_t count) 521 { 522 struct fcoe_fcf_device *fcf = dev_to_fcf(dev); 523 unsigned long val; 524 int rc; 525 526 rc = fcoe_str_to_dev_loss(buf, &val); 527 if (rc) 528 return rc; 529 530 rc = fcoe_fcf_set_dev_loss_tmo(fcf, val); 531 if (rc) 532 return rc; 533 return count; 534 } 535 static FCOE_DEVICE_ATTR(fcf, dev_loss_tmo, S_IRUGO | S_IWUSR, 536 show_fcoe_fcf_device_dev_loss_tmo, 537 store_fcoe_fcf_dev_loss_tmo); 538 539 static struct attribute *fcoe_ctlr_lesb_attrs[] = { 540 &device_attr_fcoe_ctlr_link_fail.attr, 541 &device_attr_fcoe_ctlr_vlink_fail.attr, 542 &device_attr_fcoe_ctlr_miss_fka.attr, 543 &device_attr_fcoe_ctlr_symb_err.attr, 544 &device_attr_fcoe_ctlr_err_block.attr, 545 &device_attr_fcoe_ctlr_fcs_error.attr, 546 NULL, 547 }; 548 549 static struct attribute_group fcoe_ctlr_lesb_attr_group = { 550 .name = "lesb", 551 .attrs = fcoe_ctlr_lesb_attrs, 552 }; 553 554 static struct attribute *fcoe_ctlr_attrs[] = { 555 &device_attr_fcoe_ctlr_fip_vlan_responder.attr, 556 &device_attr_fcoe_ctlr_fcf_dev_loss_tmo.attr, 557 &device_attr_fcoe_ctlr_r_a_tov.attr, 558 &device_attr_fcoe_ctlr_e_d_tov.attr, 559 &device_attr_fcoe_ctlr_enabled.attr, 560 &device_attr_fcoe_ctlr_mode.attr, 561 NULL, 562 }; 563 564 static struct attribute_group fcoe_ctlr_attr_group = { 565 .attrs = fcoe_ctlr_attrs, 566 }; 567 568 static const struct attribute_group *fcoe_ctlr_attr_groups[] = { 569 &fcoe_ctlr_attr_group, 570 &fcoe_ctlr_lesb_attr_group, 571 NULL, 572 }; 573 574 static struct attribute *fcoe_fcf_attrs[] = { 575 &device_attr_fcoe_fcf_fabric_name.attr, 576 &device_attr_fcoe_fcf_switch_name.attr, 577 &device_attr_fcoe_fcf_dev_loss_tmo.attr, 578 &device_attr_fcoe_fcf_fc_map.attr, 579 &device_attr_fcoe_fcf_vfid.attr, 580 &device_attr_fcoe_fcf_mac.attr, 581 &device_attr_fcoe_fcf_priority.attr, 582 &device_attr_fcoe_fcf_fka_period.attr, 583 &device_attr_fcoe_fcf_state.attr, 584 &device_attr_fcoe_fcf_selected.attr, 585 &device_attr_fcoe_fcf_vlan_id.attr, 586 NULL 587 }; 588 589 static struct attribute_group fcoe_fcf_attr_group = { 590 .attrs = fcoe_fcf_attrs, 591 }; 592 593 static const struct attribute_group *fcoe_fcf_attr_groups[] = { 594 &fcoe_fcf_attr_group, 595 NULL, 596 }; 597 598 static struct bus_type fcoe_bus_type; 599 600 static int fcoe_bus_match(struct device *dev, 601 struct device_driver *drv) 602 { 603 if (dev->bus == &fcoe_bus_type) 604 return 1; 605 return 0; 606 } 607 608 /** 609 * fcoe_ctlr_device_release() - Release the FIP ctlr memory 610 * @dev: Pointer to the FIP ctlr's embedded device 611 * 612 * Called when the last FIP ctlr reference is released. 613 */ 614 static void fcoe_ctlr_device_release(struct device *dev) 615 { 616 struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); 617 kfree(ctlr); 618 } 619 620 /** 621 * fcoe_fcf_device_release() - Release the FIP fcf memory 622 * @dev: Pointer to the fcf's embedded device 623 * 624 * Called when the last FIP fcf reference is released. 625 */ 626 static void fcoe_fcf_device_release(struct device *dev) 627 { 628 struct fcoe_fcf_device *fcf = dev_to_fcf(dev); 629 kfree(fcf); 630 } 631 632 static const struct device_type fcoe_ctlr_device_type = { 633 .name = "fcoe_ctlr", 634 .groups = fcoe_ctlr_attr_groups, 635 .release = fcoe_ctlr_device_release, 636 }; 637 638 static const struct device_type fcoe_fcf_device_type = { 639 .name = "fcoe_fcf", 640 .groups = fcoe_fcf_attr_groups, 641 .release = fcoe_fcf_device_release, 642 }; 643 644 static ssize_t ctlr_create_store(const struct bus_type *bus, const char *buf, 645 size_t count) 646 { 647 return fcoe_ctlr_create_store(buf, count); 648 } 649 static BUS_ATTR_WO(ctlr_create); 650 651 static ssize_t ctlr_destroy_store(const struct bus_type *bus, const char *buf, 652 size_t count) 653 { 654 return fcoe_ctlr_destroy_store(buf, count); 655 } 656 static BUS_ATTR_WO(ctlr_destroy); 657 658 static struct attribute *fcoe_bus_attrs[] = { 659 &bus_attr_ctlr_create.attr, 660 &bus_attr_ctlr_destroy.attr, 661 NULL, 662 }; 663 ATTRIBUTE_GROUPS(fcoe_bus); 664 665 static struct bus_type fcoe_bus_type = { 666 .name = "fcoe", 667 .match = &fcoe_bus_match, 668 .bus_groups = fcoe_bus_groups, 669 }; 670 671 /** 672 * fcoe_ctlr_device_flush_work() - Flush a FIP ctlr's workqueue 673 * @ctlr: Pointer to the FIP ctlr whose workqueue is to be flushed 674 */ 675 static void fcoe_ctlr_device_flush_work(struct fcoe_ctlr_device *ctlr) 676 { 677 if (!fcoe_ctlr_work_q(ctlr)) { 678 printk(KERN_ERR 679 "ERROR: FIP Ctlr '%d' attempted to flush work, " 680 "when no workqueue created.\n", ctlr->id); 681 dump_stack(); 682 return; 683 } 684 685 flush_workqueue(fcoe_ctlr_work_q(ctlr)); 686 } 687 688 /** 689 * fcoe_ctlr_device_queue_work() - Schedule work for a FIP ctlr's workqueue 690 * @ctlr: Pointer to the FIP ctlr who owns the devloss workqueue 691 * @work: Work to queue for execution 692 * 693 * Return value: 694 * 1 on success / 0 already queued / < 0 for error 695 */ 696 static int fcoe_ctlr_device_queue_work(struct fcoe_ctlr_device *ctlr, 697 struct work_struct *work) 698 { 699 if (unlikely(!fcoe_ctlr_work_q(ctlr))) { 700 printk(KERN_ERR 701 "ERROR: FIP Ctlr '%d' attempted to queue work, " 702 "when no workqueue created.\n", ctlr->id); 703 dump_stack(); 704 705 return -EINVAL; 706 } 707 708 return queue_work(fcoe_ctlr_work_q(ctlr), work); 709 } 710 711 /** 712 * fcoe_ctlr_device_flush_devloss() - Flush a FIP ctlr's devloss workqueue 713 * @ctlr: Pointer to FIP ctlr whose workqueue is to be flushed 714 */ 715 static void fcoe_ctlr_device_flush_devloss(struct fcoe_ctlr_device *ctlr) 716 { 717 if (!fcoe_ctlr_devloss_work_q(ctlr)) { 718 printk(KERN_ERR 719 "ERROR: FIP Ctlr '%d' attempted to flush work, " 720 "when no workqueue created.\n", ctlr->id); 721 dump_stack(); 722 return; 723 } 724 725 flush_workqueue(fcoe_ctlr_devloss_work_q(ctlr)); 726 } 727 728 /** 729 * fcoe_ctlr_device_queue_devloss_work() - Schedule work for a FIP ctlr's devloss workqueue 730 * @ctlr: Pointer to the FIP ctlr who owns the devloss workqueue 731 * @work: Work to queue for execution 732 * @delay: jiffies to delay the work queuing 733 * 734 * Return value: 735 * 1 on success / 0 already queued / < 0 for error 736 */ 737 static int fcoe_ctlr_device_queue_devloss_work(struct fcoe_ctlr_device *ctlr, 738 struct delayed_work *work, 739 unsigned long delay) 740 { 741 if (unlikely(!fcoe_ctlr_devloss_work_q(ctlr))) { 742 printk(KERN_ERR 743 "ERROR: FIP Ctlr '%d' attempted to queue work, " 744 "when no workqueue created.\n", ctlr->id); 745 dump_stack(); 746 747 return -EINVAL; 748 } 749 750 return queue_delayed_work(fcoe_ctlr_devloss_work_q(ctlr), work, delay); 751 } 752 753 static int fcoe_fcf_device_match(struct fcoe_fcf_device *new, 754 struct fcoe_fcf_device *old) 755 { 756 if (new->switch_name == old->switch_name && 757 new->fabric_name == old->fabric_name && 758 new->fc_map == old->fc_map && 759 ether_addr_equal(new->mac, old->mac)) 760 return 1; 761 return 0; 762 } 763 764 /** 765 * fcoe_ctlr_device_add() - Add a FIP ctlr to sysfs 766 * @parent: The parent device to which the fcoe_ctlr instance 767 * should be attached 768 * @f: The LLD's FCoE sysfs function template pointer 769 * @priv_size: Size to be allocated with the fcoe_ctlr_device for the LLD 770 * 771 * This routine allocates a FIP ctlr object with some additional memory 772 * for the LLD. The FIP ctlr is initialized, added to sysfs and then 773 * attributes are added to it. 774 */ 775 struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent, 776 struct fcoe_sysfs_function_template *f, 777 int priv_size) 778 { 779 struct fcoe_ctlr_device *ctlr; 780 int error = 0; 781 782 ctlr = kzalloc(sizeof(struct fcoe_ctlr_device) + priv_size, 783 GFP_KERNEL); 784 if (!ctlr) 785 goto out; 786 787 ctlr->id = atomic_inc_return(&ctlr_num) - 1; 788 ctlr->f = f; 789 ctlr->mode = FIP_CONN_TYPE_FABRIC; 790 INIT_LIST_HEAD(&ctlr->fcfs); 791 mutex_init(&ctlr->lock); 792 ctlr->dev.parent = parent; 793 ctlr->dev.bus = &fcoe_bus_type; 794 ctlr->dev.type = &fcoe_ctlr_device_type; 795 796 ctlr->fcf_dev_loss_tmo = fcoe_fcf_dev_loss_tmo; 797 798 snprintf(ctlr->work_q_name, sizeof(ctlr->work_q_name), 799 "ctlr_wq_%d", ctlr->id); 800 ctlr->work_q = create_singlethread_workqueue( 801 ctlr->work_q_name); 802 if (!ctlr->work_q) 803 goto out_del; 804 805 snprintf(ctlr->devloss_work_q_name, 806 sizeof(ctlr->devloss_work_q_name), 807 "ctlr_dl_wq_%d", ctlr->id); 808 ctlr->devloss_work_q = create_singlethread_workqueue( 809 ctlr->devloss_work_q_name); 810 if (!ctlr->devloss_work_q) 811 goto out_del_q; 812 813 dev_set_name(&ctlr->dev, "ctlr_%d", ctlr->id); 814 error = device_register(&ctlr->dev); 815 if (error) { 816 destroy_workqueue(ctlr->devloss_work_q); 817 destroy_workqueue(ctlr->work_q); 818 put_device(&ctlr->dev); 819 return NULL; 820 } 821 822 return ctlr; 823 824 out_del_q: 825 destroy_workqueue(ctlr->work_q); 826 ctlr->work_q = NULL; 827 out_del: 828 kfree(ctlr); 829 out: 830 return NULL; 831 } 832 EXPORT_SYMBOL_GPL(fcoe_ctlr_device_add); 833 834 /** 835 * fcoe_ctlr_device_delete() - Delete a FIP ctlr and its subtree from sysfs 836 * @ctlr: A pointer to the ctlr to be deleted 837 * 838 * Deletes a FIP ctlr and any fcfs attached 839 * to it. Deleting fcfs will cause their childen 840 * to be deleted as well. 841 * 842 * The ctlr is detached from sysfs and it's resources 843 * are freed (work q), but the memory is not freed 844 * until its last reference is released. 845 * 846 * This routine expects no locks to be held before 847 * calling. 848 * 849 * TODO: Currently there are no callbacks to clean up LLD data 850 * for a fcoe_fcf_device. LLDs must keep this in mind as they need 851 * to clean up each of their LLD data for all fcoe_fcf_device before 852 * calling fcoe_ctlr_device_delete. 853 */ 854 void fcoe_ctlr_device_delete(struct fcoe_ctlr_device *ctlr) 855 { 856 struct fcoe_fcf_device *fcf, *next; 857 /* Remove any attached fcfs */ 858 mutex_lock(&ctlr->lock); 859 list_for_each_entry_safe(fcf, next, 860 &ctlr->fcfs, peers) { 861 list_del(&fcf->peers); 862 fcf->state = FCOE_FCF_STATE_DELETED; 863 fcoe_ctlr_device_queue_work(ctlr, &fcf->delete_work); 864 } 865 mutex_unlock(&ctlr->lock); 866 867 fcoe_ctlr_device_flush_work(ctlr); 868 869 destroy_workqueue(ctlr->devloss_work_q); 870 ctlr->devloss_work_q = NULL; 871 destroy_workqueue(ctlr->work_q); 872 ctlr->work_q = NULL; 873 874 device_unregister(&ctlr->dev); 875 } 876 EXPORT_SYMBOL_GPL(fcoe_ctlr_device_delete); 877 878 /** 879 * fcoe_fcf_device_final_delete() - Final delete routine 880 * @work: The FIP fcf's embedded work struct 881 * 882 * It is expected that the fcf has been removed from 883 * the FIP ctlr's list before calling this routine. 884 */ 885 static void fcoe_fcf_device_final_delete(struct work_struct *work) 886 { 887 struct fcoe_fcf_device *fcf = 888 container_of(work, struct fcoe_fcf_device, delete_work); 889 struct fcoe_ctlr_device *ctlr = fcoe_fcf_dev_to_ctlr_dev(fcf); 890 891 /* 892 * Cancel any outstanding timers. These should really exist 893 * only when rmmod'ing the LLDD and we're asking for 894 * immediate termination of the rports 895 */ 896 if (!cancel_delayed_work(&fcf->dev_loss_work)) 897 fcoe_ctlr_device_flush_devloss(ctlr); 898 899 device_unregister(&fcf->dev); 900 } 901 902 /** 903 * fip_timeout_deleted_fcf() - Delete a fcf when the devloss timer fires 904 * @work: The FIP fcf's embedded work struct 905 * 906 * Removes the fcf from the FIP ctlr's list of fcfs and 907 * queues the final deletion. 908 */ 909 static void fip_timeout_deleted_fcf(struct work_struct *work) 910 { 911 struct fcoe_fcf_device *fcf = 912 container_of(work, struct fcoe_fcf_device, dev_loss_work.work); 913 struct fcoe_ctlr_device *ctlr = fcoe_fcf_dev_to_ctlr_dev(fcf); 914 915 mutex_lock(&ctlr->lock); 916 917 /* 918 * If the fcf is deleted or reconnected before the timer 919 * fires the devloss queue will be flushed, but the state will 920 * either be CONNECTED or DELETED. If that is the case we 921 * cancel deleting the fcf. 922 */ 923 if (fcf->state != FCOE_FCF_STATE_DISCONNECTED) 924 goto out; 925 926 dev_printk(KERN_ERR, &fcf->dev, 927 "FIP fcf connection time out: removing fcf\n"); 928 929 list_del(&fcf->peers); 930 fcf->state = FCOE_FCF_STATE_DELETED; 931 fcoe_ctlr_device_queue_work(ctlr, &fcf->delete_work); 932 933 out: 934 mutex_unlock(&ctlr->lock); 935 } 936 937 /** 938 * fcoe_fcf_device_delete() - Delete a FIP fcf 939 * @fcf: Pointer to the fcf which is to be deleted 940 * 941 * Queues the FIP fcf on the devloss workqueue 942 * 943 * Expects the ctlr_attrs mutex to be held for fcf 944 * state change. 945 */ 946 void fcoe_fcf_device_delete(struct fcoe_fcf_device *fcf) 947 { 948 struct fcoe_ctlr_device *ctlr = fcoe_fcf_dev_to_ctlr_dev(fcf); 949 int timeout = fcf->dev_loss_tmo; 950 951 if (fcf->state != FCOE_FCF_STATE_CONNECTED) 952 return; 953 954 fcf->state = FCOE_FCF_STATE_DISCONNECTED; 955 956 /* 957 * FCF will only be re-connected by the LLD calling 958 * fcoe_fcf_device_add, and it should be setting up 959 * priv then. 960 */ 961 fcf->priv = NULL; 962 963 fcoe_ctlr_device_queue_devloss_work(ctlr, &fcf->dev_loss_work, 964 timeout * HZ); 965 } 966 EXPORT_SYMBOL_GPL(fcoe_fcf_device_delete); 967 968 /** 969 * fcoe_fcf_device_add() - Add a FCoE sysfs fcoe_fcf_device to the system 970 * @ctlr: The fcoe_ctlr_device that will be the fcoe_fcf_device parent 971 * @new_fcf: A temporary FCF used for lookups on the current list of fcfs 972 * 973 * Expects to be called with the ctlr->lock held 974 */ 975 struct fcoe_fcf_device *fcoe_fcf_device_add(struct fcoe_ctlr_device *ctlr, 976 struct fcoe_fcf_device *new_fcf) 977 { 978 struct fcoe_fcf_device *fcf; 979 int error = 0; 980 981 list_for_each_entry(fcf, &ctlr->fcfs, peers) { 982 if (fcoe_fcf_device_match(new_fcf, fcf)) { 983 if (fcf->state == FCOE_FCF_STATE_CONNECTED) 984 return fcf; 985 986 fcf->state = FCOE_FCF_STATE_CONNECTED; 987 988 if (!cancel_delayed_work(&fcf->dev_loss_work)) 989 fcoe_ctlr_device_flush_devloss(ctlr); 990 991 return fcf; 992 } 993 } 994 995 fcf = kzalloc(sizeof(struct fcoe_fcf_device), GFP_ATOMIC); 996 if (unlikely(!fcf)) 997 goto out; 998 999 INIT_WORK(&fcf->delete_work, fcoe_fcf_device_final_delete); 1000 INIT_DELAYED_WORK(&fcf->dev_loss_work, fip_timeout_deleted_fcf); 1001 1002 fcf->dev.parent = &ctlr->dev; 1003 fcf->dev.bus = &fcoe_bus_type; 1004 fcf->dev.type = &fcoe_fcf_device_type; 1005 fcf->id = atomic_inc_return(&fcf_num) - 1; 1006 fcf->state = FCOE_FCF_STATE_UNKNOWN; 1007 1008 fcf->dev_loss_tmo = ctlr->fcf_dev_loss_tmo; 1009 1010 dev_set_name(&fcf->dev, "fcf_%d", fcf->id); 1011 1012 fcf->fabric_name = new_fcf->fabric_name; 1013 fcf->switch_name = new_fcf->switch_name; 1014 fcf->fc_map = new_fcf->fc_map; 1015 fcf->vfid = new_fcf->vfid; 1016 memcpy(fcf->mac, new_fcf->mac, ETH_ALEN); 1017 fcf->priority = new_fcf->priority; 1018 fcf->fka_period = new_fcf->fka_period; 1019 fcf->selected = new_fcf->selected; 1020 1021 error = device_register(&fcf->dev); 1022 if (error) { 1023 put_device(&fcf->dev); 1024 goto out; 1025 } 1026 1027 fcf->state = FCOE_FCF_STATE_CONNECTED; 1028 list_add_tail(&fcf->peers, &ctlr->fcfs); 1029 1030 return fcf; 1031 1032 out: 1033 return NULL; 1034 } 1035 EXPORT_SYMBOL_GPL(fcoe_fcf_device_add); 1036 1037 int __init fcoe_sysfs_setup(void) 1038 { 1039 atomic_set(&ctlr_num, 0); 1040 atomic_set(&fcf_num, 0); 1041 1042 return bus_register(&fcoe_bus_type); 1043 } 1044 1045 void __exit fcoe_sysfs_teardown(void) 1046 { 1047 bus_unregister(&fcoe_bus_type); 1048 } 1049