1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright(c) 2011 - 2012 Intel Corporation. All rights reserved. 4 * 5 * Maintained at www.Open-FCoE.org 6 */ 7 8 #include <linux/module.h> 9 #include <linux/types.h> 10 #include <linux/kernel.h> 11 #include <linux/etherdevice.h> 12 #include <linux/ctype.h> 13 #include <linux/string.h> 14 15 #include <scsi/fcoe_sysfs.h> 16 #include <scsi/libfcoe.h> 17 18 /* 19 * OK to include local libfcoe.h for debug_logging, but cannot include 20 * <scsi/libfcoe.h> otherwise non-netdev based fcoe solutions would have 21 * have to include more than fcoe_sysfs.h. 22 */ 23 #include "libfcoe.h" 24 25 static atomic_t ctlr_num; 26 static atomic_t fcf_num; 27 28 /* 29 * fcoe_fcf_dev_loss_tmo: the default number of seconds that fcoe sysfs 30 * should insulate the loss of a fcf. 31 */ 32 static unsigned int fcoe_fcf_dev_loss_tmo = 1800; /* seconds */ 33 34 module_param_named(fcf_dev_loss_tmo, fcoe_fcf_dev_loss_tmo, 35 uint, S_IRUGO|S_IWUSR); 36 MODULE_PARM_DESC(fcf_dev_loss_tmo, 37 "Maximum number of seconds that libfcoe should" 38 " insulate the loss of a fcf. Once this value is" 39 " exceeded, the fcf is removed."); 40 41 /* 42 * These are used by the fcoe_*_show_function routines, they 43 * are intentionally placed in the .c file as they're not intended 44 * for use throughout the code. 45 */ 46 #define fcoe_ctlr_id(x) \ 47 ((x)->id) 48 #define fcoe_ctlr_work_q_name(x) \ 49 ((x)->work_q_name) 50 #define fcoe_ctlr_work_q(x) \ 51 ((x)->work_q) 52 #define fcoe_ctlr_devloss_work_q_name(x) \ 53 ((x)->devloss_work_q_name) 54 #define fcoe_ctlr_devloss_work_q(x) \ 55 ((x)->devloss_work_q) 56 #define fcoe_ctlr_mode(x) \ 57 ((x)->mode) 58 #define fcoe_ctlr_fcf_dev_loss_tmo(x) \ 59 ((x)->fcf_dev_loss_tmo) 60 #define fcoe_ctlr_link_fail(x) \ 61 ((x)->lesb.lesb_link_fail) 62 #define fcoe_ctlr_vlink_fail(x) \ 63 ((x)->lesb.lesb_vlink_fail) 64 #define fcoe_ctlr_miss_fka(x) \ 65 ((x)->lesb.lesb_miss_fka) 66 #define fcoe_ctlr_symb_err(x) \ 67 ((x)->lesb.lesb_symb_err) 68 #define fcoe_ctlr_err_block(x) \ 69 ((x)->lesb.lesb_err_block) 70 #define fcoe_ctlr_fcs_error(x) \ 71 ((x)->lesb.lesb_fcs_error) 72 #define fcoe_ctlr_enabled(x) \ 73 ((x)->enabled) 74 #define fcoe_fcf_state(x) \ 75 ((x)->state) 76 #define fcoe_fcf_fabric_name(x) \ 77 ((x)->fabric_name) 78 #define fcoe_fcf_switch_name(x) \ 79 ((x)->switch_name) 80 #define fcoe_fcf_fc_map(x) \ 81 ((x)->fc_map) 82 #define fcoe_fcf_vfid(x) \ 83 ((x)->vfid) 84 #define fcoe_fcf_mac(x) \ 85 ((x)->mac) 86 #define fcoe_fcf_priority(x) \ 87 ((x)->priority) 88 #define fcoe_fcf_fka_period(x) \ 89 ((x)->fka_period) 90 #define fcoe_fcf_dev_loss_tmo(x) \ 91 ((x)->dev_loss_tmo) 92 #define fcoe_fcf_selected(x) \ 93 ((x)->selected) 94 #define fcoe_fcf_vlan_id(x) \ 95 ((x)->vlan_id) 96 97 /* 98 * dev_loss_tmo attribute 99 */ 100 static int fcoe_str_to_dev_loss(const char *buf, unsigned long *val) 101 { 102 int ret; 103 104 ret = kstrtoul(buf, 0, val); 105 if (ret) 106 return -EINVAL; 107 /* 108 * Check for overflow; dev_loss_tmo is u32 109 */ 110 if (*val > UINT_MAX) 111 return -EINVAL; 112 113 return 0; 114 } 115 116 static int fcoe_fcf_set_dev_loss_tmo(struct fcoe_fcf_device *fcf, 117 unsigned long val) 118 { 119 if ((fcf->state == FCOE_FCF_STATE_UNKNOWN) || 120 (fcf->state == FCOE_FCF_STATE_DISCONNECTED) || 121 (fcf->state == FCOE_FCF_STATE_DELETED)) 122 return -EBUSY; 123 /* 124 * Check for overflow; dev_loss_tmo is u32 125 */ 126 if (val > UINT_MAX) 127 return -EINVAL; 128 129 fcoe_fcf_dev_loss_tmo(fcf) = val; 130 return 0; 131 } 132 133 #define FCOE_DEVICE_ATTR(_prefix, _name, _mode, _show, _store) \ 134 struct device_attribute device_attr_fcoe_##_prefix##_##_name = \ 135 __ATTR(_name, _mode, _show, _store) 136 137 #define fcoe_ctlr_show_function(field, format_string, sz, cast) \ 138 static ssize_t show_fcoe_ctlr_device_##field(struct device *dev, \ 139 struct device_attribute *attr, \ 140 char *buf) \ 141 { \ 142 struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); \ 143 if (ctlr->f->get_fcoe_ctlr_##field) \ 144 ctlr->f->get_fcoe_ctlr_##field(ctlr); \ 145 return snprintf(buf, sz, format_string, \ 146 cast fcoe_ctlr_##field(ctlr)); \ 147 } 148 149 #define fcoe_fcf_show_function(field, format_string, sz, cast) \ 150 static ssize_t show_fcoe_fcf_device_##field(struct device *dev, \ 151 struct device_attribute *attr, \ 152 char *buf) \ 153 { \ 154 struct fcoe_fcf_device *fcf = dev_to_fcf(dev); \ 155 struct fcoe_ctlr_device *ctlr = fcoe_fcf_dev_to_ctlr_dev(fcf); \ 156 if (ctlr->f->get_fcoe_fcf_##field) \ 157 ctlr->f->get_fcoe_fcf_##field(fcf); \ 158 return snprintf(buf, sz, format_string, \ 159 cast fcoe_fcf_##field(fcf)); \ 160 } 161 162 #define fcoe_ctlr_private_show_function(field, format_string, sz, cast) \ 163 static ssize_t show_fcoe_ctlr_device_##field(struct device *dev, \ 164 struct device_attribute *attr, \ 165 char *buf) \ 166 { \ 167 struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); \ 168 return snprintf(buf, sz, format_string, cast fcoe_ctlr_##field(ctlr)); \ 169 } 170 171 #define fcoe_fcf_private_show_function(field, format_string, sz, cast) \ 172 static ssize_t show_fcoe_fcf_device_##field(struct device *dev, \ 173 struct device_attribute *attr, \ 174 char *buf) \ 175 { \ 176 struct fcoe_fcf_device *fcf = dev_to_fcf(dev); \ 177 return snprintf(buf, sz, format_string, cast fcoe_fcf_##field(fcf)); \ 178 } 179 180 #define fcoe_ctlr_private_rd_attr(field, format_string, sz) \ 181 fcoe_ctlr_private_show_function(field, format_string, sz, ) \ 182 static FCOE_DEVICE_ATTR(ctlr, field, S_IRUGO, \ 183 show_fcoe_ctlr_device_##field, NULL) 184 185 #define fcoe_ctlr_rd_attr(field, format_string, sz) \ 186 fcoe_ctlr_show_function(field, format_string, sz, ) \ 187 static FCOE_DEVICE_ATTR(ctlr, field, S_IRUGO, \ 188 show_fcoe_ctlr_device_##field, NULL) 189 190 #define fcoe_fcf_rd_attr(field, format_string, sz) \ 191 fcoe_fcf_show_function(field, format_string, sz, ) \ 192 static FCOE_DEVICE_ATTR(fcf, field, S_IRUGO, \ 193 show_fcoe_fcf_device_##field, NULL) 194 195 #define fcoe_fcf_private_rd_attr(field, format_string, sz) \ 196 fcoe_fcf_private_show_function(field, format_string, sz, ) \ 197 static FCOE_DEVICE_ATTR(fcf, field, S_IRUGO, \ 198 show_fcoe_fcf_device_##field, NULL) 199 200 #define fcoe_ctlr_private_rd_attr_cast(field, format_string, sz, cast) \ 201 fcoe_ctlr_private_show_function(field, format_string, sz, (cast)) \ 202 static FCOE_DEVICE_ATTR(ctlr, field, S_IRUGO, \ 203 show_fcoe_ctlr_device_##field, NULL) 204 205 #define fcoe_fcf_private_rd_attr_cast(field, format_string, sz, cast) \ 206 fcoe_fcf_private_show_function(field, format_string, sz, (cast)) \ 207 static FCOE_DEVICE_ATTR(fcf, field, S_IRUGO, \ 208 show_fcoe_fcf_device_##field, NULL) 209 210 #define fcoe_enum_name_search(title, table_type, table) \ 211 static const char *get_fcoe_##title##_name(enum table_type table_key) \ 212 { \ 213 if (table_key < 0 || table_key >= ARRAY_SIZE(table)) \ 214 return NULL; \ 215 return table[table_key]; \ 216 } 217 218 static const char * const fip_conn_type_names[] = { 219 [ FIP_CONN_TYPE_UNKNOWN ] = "Unknown", 220 [ FIP_CONN_TYPE_FABRIC ] = "Fabric", 221 [ FIP_CONN_TYPE_VN2VN ] = "VN2VN", 222 }; 223 fcoe_enum_name_search(ctlr_mode, fip_conn_type, fip_conn_type_names) 224 225 static char *fcf_state_names[] = { 226 [ FCOE_FCF_STATE_UNKNOWN ] = "Unknown", 227 [ FCOE_FCF_STATE_DISCONNECTED ] = "Disconnected", 228 [ FCOE_FCF_STATE_CONNECTED ] = "Connected", 229 }; 230 fcoe_enum_name_search(fcf_state, fcf_state, fcf_state_names) 231 #define FCOE_FCF_STATE_MAX_NAMELEN 50 232 233 static ssize_t show_fcf_state(struct device *dev, 234 struct device_attribute *attr, 235 char *buf) 236 { 237 struct fcoe_fcf_device *fcf = dev_to_fcf(dev); 238 const char *name; 239 name = get_fcoe_fcf_state_name(fcf->state); 240 if (!name) 241 return -EINVAL; 242 return snprintf(buf, FCOE_FCF_STATE_MAX_NAMELEN, "%s\n", name); 243 } 244 static FCOE_DEVICE_ATTR(fcf, state, S_IRUGO, show_fcf_state, NULL); 245 246 #define FCOE_MAX_MODENAME_LEN 20 247 static ssize_t show_ctlr_mode(struct device *dev, 248 struct device_attribute *attr, 249 char *buf) 250 { 251 struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); 252 const char *name; 253 254 name = get_fcoe_ctlr_mode_name(ctlr->mode); 255 if (!name) 256 return -EINVAL; 257 return snprintf(buf, FCOE_MAX_MODENAME_LEN, 258 "%s\n", name); 259 } 260 261 static ssize_t store_ctlr_mode(struct device *dev, 262 struct device_attribute *attr, 263 const char *buf, size_t count) 264 { 265 struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); 266 int res; 267 268 if (count > FCOE_MAX_MODENAME_LEN) 269 return -EINVAL; 270 271 272 switch (ctlr->enabled) { 273 case FCOE_CTLR_ENABLED: 274 LIBFCOE_SYSFS_DBG(ctlr, "Cannot change mode when enabled.\n"); 275 return -EBUSY; 276 case FCOE_CTLR_DISABLED: 277 if (!ctlr->f->set_fcoe_ctlr_mode) { 278 LIBFCOE_SYSFS_DBG(ctlr, 279 "Mode change not supported by LLD.\n"); 280 return -ENOTSUPP; 281 } 282 283 res = sysfs_match_string(fip_conn_type_names, buf); 284 if (res < 0 || res == FIP_CONN_TYPE_UNKNOWN) { 285 LIBFCOE_SYSFS_DBG(ctlr, "Unknown mode %s provided.\n", 286 buf); 287 return -EINVAL; 288 } 289 ctlr->mode = res; 290 291 ctlr->f->set_fcoe_ctlr_mode(ctlr); 292 LIBFCOE_SYSFS_DBG(ctlr, "Mode changed to %s.\n", buf); 293 294 return count; 295 case FCOE_CTLR_UNUSED: 296 default: 297 LIBFCOE_SYSFS_DBG(ctlr, "Mode change not supported.\n"); 298 return -ENOTSUPP; 299 } 300 } 301 302 static FCOE_DEVICE_ATTR(ctlr, mode, S_IRUGO | S_IWUSR, 303 show_ctlr_mode, store_ctlr_mode); 304 305 static ssize_t store_ctlr_enabled(struct device *dev, 306 struct device_attribute *attr, 307 const char *buf, size_t count) 308 { 309 struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); 310 bool enabled; 311 int rc; 312 313 if (*buf == '1') 314 enabled = true; 315 else if (*buf == '0') 316 enabled = false; 317 else 318 return -EINVAL; 319 320 switch (ctlr->enabled) { 321 case FCOE_CTLR_ENABLED: 322 if (enabled) 323 return count; 324 ctlr->enabled = FCOE_CTLR_DISABLED; 325 break; 326 case FCOE_CTLR_DISABLED: 327 if (!enabled) 328 return count; 329 ctlr->enabled = FCOE_CTLR_ENABLED; 330 break; 331 case FCOE_CTLR_UNUSED: 332 return -ENOTSUPP; 333 } 334 335 rc = ctlr->f->set_fcoe_ctlr_enabled(ctlr); 336 if (rc) 337 return rc; 338 339 return count; 340 } 341 342 static char *ctlr_enabled_state_names[] = { 343 [ FCOE_CTLR_ENABLED ] = "1", 344 [ FCOE_CTLR_DISABLED ] = "0", 345 }; 346 fcoe_enum_name_search(ctlr_enabled_state, ctlr_enabled_state, 347 ctlr_enabled_state_names) 348 #define FCOE_CTLR_ENABLED_MAX_NAMELEN 50 349 350 static ssize_t show_ctlr_enabled_state(struct device *dev, 351 struct device_attribute *attr, 352 char *buf) 353 { 354 struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); 355 const char *name; 356 357 name = get_fcoe_ctlr_enabled_state_name(ctlr->enabled); 358 if (!name) 359 return -EINVAL; 360 return snprintf(buf, FCOE_CTLR_ENABLED_MAX_NAMELEN, 361 "%s\n", name); 362 } 363 364 static FCOE_DEVICE_ATTR(ctlr, enabled, S_IRUGO | S_IWUSR, 365 show_ctlr_enabled_state, 366 store_ctlr_enabled); 367 368 static ssize_t store_ctlr_fip_resp(struct device *dev, 369 struct device_attribute *attr, 370 const char *buf, size_t count) 371 { 372 struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); 373 struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr); 374 375 mutex_lock(&fip->ctlr_mutex); 376 if ((buf[1] == '\0') || ((buf[1] == '\n') && (buf[2] == '\0'))) { 377 if (buf[0] == '1') { 378 fip->fip_resp = 1; 379 mutex_unlock(&fip->ctlr_mutex); 380 return count; 381 } 382 if (buf[0] == '0') { 383 fip->fip_resp = 0; 384 mutex_unlock(&fip->ctlr_mutex); 385 return count; 386 } 387 } 388 mutex_unlock(&fip->ctlr_mutex); 389 return -EINVAL; 390 } 391 392 static ssize_t show_ctlr_fip_resp(struct device *dev, 393 struct device_attribute *attr, 394 char *buf) 395 { 396 struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); 397 struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr); 398 399 return sprintf(buf, "%d\n", fip->fip_resp ? 1 : 0); 400 } 401 402 static FCOE_DEVICE_ATTR(ctlr, fip_vlan_responder, S_IRUGO | S_IWUSR, 403 show_ctlr_fip_resp, 404 store_ctlr_fip_resp); 405 406 static ssize_t 407 fcoe_ctlr_var_store(u32 *var, const char *buf, size_t count) 408 { 409 int err; 410 unsigned long v; 411 412 err = kstrtoul(buf, 10, &v); 413 if (err || v > UINT_MAX) 414 return -EINVAL; 415 416 *var = v; 417 418 return count; 419 } 420 421 static ssize_t store_ctlr_r_a_tov(struct device *dev, 422 struct device_attribute *attr, 423 const char *buf, size_t count) 424 { 425 struct fcoe_ctlr_device *ctlr_dev = dev_to_ctlr(dev); 426 struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev); 427 428 if (ctlr_dev->enabled == FCOE_CTLR_ENABLED) 429 return -EBUSY; 430 if (ctlr_dev->enabled == FCOE_CTLR_DISABLED) 431 return fcoe_ctlr_var_store(&ctlr->lp->r_a_tov, buf, count); 432 return -ENOTSUPP; 433 } 434 435 static ssize_t show_ctlr_r_a_tov(struct device *dev, 436 struct device_attribute *attr, 437 char *buf) 438 { 439 struct fcoe_ctlr_device *ctlr_dev = dev_to_ctlr(dev); 440 struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev); 441 442 return sprintf(buf, "%d\n", ctlr->lp->r_a_tov); 443 } 444 445 static FCOE_DEVICE_ATTR(ctlr, r_a_tov, S_IRUGO | S_IWUSR, 446 show_ctlr_r_a_tov, store_ctlr_r_a_tov); 447 448 static ssize_t store_ctlr_e_d_tov(struct device *dev, 449 struct device_attribute *attr, 450 const char *buf, size_t count) 451 { 452 struct fcoe_ctlr_device *ctlr_dev = dev_to_ctlr(dev); 453 struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev); 454 455 if (ctlr_dev->enabled == FCOE_CTLR_ENABLED) 456 return -EBUSY; 457 if (ctlr_dev->enabled == FCOE_CTLR_DISABLED) 458 return fcoe_ctlr_var_store(&ctlr->lp->e_d_tov, buf, count); 459 return -ENOTSUPP; 460 } 461 462 static ssize_t show_ctlr_e_d_tov(struct device *dev, 463 struct device_attribute *attr, 464 char *buf) 465 { 466 struct fcoe_ctlr_device *ctlr_dev = dev_to_ctlr(dev); 467 struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev); 468 469 return sprintf(buf, "%d\n", ctlr->lp->e_d_tov); 470 } 471 472 static FCOE_DEVICE_ATTR(ctlr, e_d_tov, S_IRUGO | S_IWUSR, 473 show_ctlr_e_d_tov, store_ctlr_e_d_tov); 474 475 static ssize_t 476 store_private_fcoe_ctlr_fcf_dev_loss_tmo(struct device *dev, 477 struct device_attribute *attr, 478 const char *buf, size_t count) 479 { 480 struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); 481 struct fcoe_fcf_device *fcf; 482 unsigned long val; 483 int rc; 484 485 rc = fcoe_str_to_dev_loss(buf, &val); 486 if (rc) 487 return rc; 488 489 fcoe_ctlr_fcf_dev_loss_tmo(ctlr) = val; 490 mutex_lock(&ctlr->lock); 491 list_for_each_entry(fcf, &ctlr->fcfs, peers) 492 fcoe_fcf_set_dev_loss_tmo(fcf, val); 493 mutex_unlock(&ctlr->lock); 494 return count; 495 } 496 fcoe_ctlr_private_show_function(fcf_dev_loss_tmo, "%d\n", 20, ); 497 static FCOE_DEVICE_ATTR(ctlr, fcf_dev_loss_tmo, S_IRUGO | S_IWUSR, 498 show_fcoe_ctlr_device_fcf_dev_loss_tmo, 499 store_private_fcoe_ctlr_fcf_dev_loss_tmo); 500 501 /* Link Error Status Block (LESB) */ 502 fcoe_ctlr_rd_attr(link_fail, "%u\n", 20); 503 fcoe_ctlr_rd_attr(vlink_fail, "%u\n", 20); 504 fcoe_ctlr_rd_attr(miss_fka, "%u\n", 20); 505 fcoe_ctlr_rd_attr(symb_err, "%u\n", 20); 506 fcoe_ctlr_rd_attr(err_block, "%u\n", 20); 507 fcoe_ctlr_rd_attr(fcs_error, "%u\n", 20); 508 509 fcoe_fcf_private_rd_attr_cast(fabric_name, "0x%llx\n", 20, unsigned long long); 510 fcoe_fcf_private_rd_attr_cast(switch_name, "0x%llx\n", 20, unsigned long long); 511 fcoe_fcf_private_rd_attr(priority, "%u\n", 20); 512 fcoe_fcf_private_rd_attr(fc_map, "0x%x\n", 20); 513 fcoe_fcf_private_rd_attr(vfid, "%u\n", 20); 514 fcoe_fcf_private_rd_attr(mac, "%pM\n", 20); 515 fcoe_fcf_private_rd_attr(fka_period, "%u\n", 20); 516 fcoe_fcf_rd_attr(selected, "%u\n", 20); 517 fcoe_fcf_rd_attr(vlan_id, "%u\n", 20); 518 519 fcoe_fcf_private_show_function(dev_loss_tmo, "%d\n", 20, ) 520 static ssize_t 521 store_fcoe_fcf_dev_loss_tmo(struct device *dev, struct device_attribute *attr, 522 const char *buf, size_t count) 523 { 524 struct fcoe_fcf_device *fcf = dev_to_fcf(dev); 525 unsigned long val; 526 int rc; 527 528 rc = fcoe_str_to_dev_loss(buf, &val); 529 if (rc) 530 return rc; 531 532 rc = fcoe_fcf_set_dev_loss_tmo(fcf, val); 533 if (rc) 534 return rc; 535 return count; 536 } 537 static FCOE_DEVICE_ATTR(fcf, dev_loss_tmo, S_IRUGO | S_IWUSR, 538 show_fcoe_fcf_device_dev_loss_tmo, 539 store_fcoe_fcf_dev_loss_tmo); 540 541 static struct attribute *fcoe_ctlr_lesb_attrs[] = { 542 &device_attr_fcoe_ctlr_link_fail.attr, 543 &device_attr_fcoe_ctlr_vlink_fail.attr, 544 &device_attr_fcoe_ctlr_miss_fka.attr, 545 &device_attr_fcoe_ctlr_symb_err.attr, 546 &device_attr_fcoe_ctlr_err_block.attr, 547 &device_attr_fcoe_ctlr_fcs_error.attr, 548 NULL, 549 }; 550 551 static struct attribute_group fcoe_ctlr_lesb_attr_group = { 552 .name = "lesb", 553 .attrs = fcoe_ctlr_lesb_attrs, 554 }; 555 556 static struct attribute *fcoe_ctlr_attrs[] = { 557 &device_attr_fcoe_ctlr_fip_vlan_responder.attr, 558 &device_attr_fcoe_ctlr_fcf_dev_loss_tmo.attr, 559 &device_attr_fcoe_ctlr_r_a_tov.attr, 560 &device_attr_fcoe_ctlr_e_d_tov.attr, 561 &device_attr_fcoe_ctlr_enabled.attr, 562 &device_attr_fcoe_ctlr_mode.attr, 563 NULL, 564 }; 565 566 static struct attribute_group fcoe_ctlr_attr_group = { 567 .attrs = fcoe_ctlr_attrs, 568 }; 569 570 static const struct attribute_group *fcoe_ctlr_attr_groups[] = { 571 &fcoe_ctlr_attr_group, 572 &fcoe_ctlr_lesb_attr_group, 573 NULL, 574 }; 575 576 static struct attribute *fcoe_fcf_attrs[] = { 577 &device_attr_fcoe_fcf_fabric_name.attr, 578 &device_attr_fcoe_fcf_switch_name.attr, 579 &device_attr_fcoe_fcf_dev_loss_tmo.attr, 580 &device_attr_fcoe_fcf_fc_map.attr, 581 &device_attr_fcoe_fcf_vfid.attr, 582 &device_attr_fcoe_fcf_mac.attr, 583 &device_attr_fcoe_fcf_priority.attr, 584 &device_attr_fcoe_fcf_fka_period.attr, 585 &device_attr_fcoe_fcf_state.attr, 586 &device_attr_fcoe_fcf_selected.attr, 587 &device_attr_fcoe_fcf_vlan_id.attr, 588 NULL 589 }; 590 591 static struct attribute_group fcoe_fcf_attr_group = { 592 .attrs = fcoe_fcf_attrs, 593 }; 594 595 static const struct attribute_group *fcoe_fcf_attr_groups[] = { 596 &fcoe_fcf_attr_group, 597 NULL, 598 }; 599 600 static const struct bus_type fcoe_bus_type; 601 602 static int fcoe_bus_match(struct device *dev, 603 struct device_driver *drv) 604 { 605 if (dev->bus == &fcoe_bus_type) 606 return 1; 607 return 0; 608 } 609 610 /** 611 * fcoe_ctlr_device_release() - Release the FIP ctlr memory 612 * @dev: Pointer to the FIP ctlr's embedded device 613 * 614 * Called when the last FIP ctlr reference is released. 615 */ 616 static void fcoe_ctlr_device_release(struct device *dev) 617 { 618 struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); 619 kfree(ctlr); 620 } 621 622 /** 623 * fcoe_fcf_device_release() - Release the FIP fcf memory 624 * @dev: Pointer to the fcf's embedded device 625 * 626 * Called when the last FIP fcf reference is released. 627 */ 628 static void fcoe_fcf_device_release(struct device *dev) 629 { 630 struct fcoe_fcf_device *fcf = dev_to_fcf(dev); 631 kfree(fcf); 632 } 633 634 static const struct device_type fcoe_ctlr_device_type = { 635 .name = "fcoe_ctlr", 636 .groups = fcoe_ctlr_attr_groups, 637 .release = fcoe_ctlr_device_release, 638 }; 639 640 static const struct device_type fcoe_fcf_device_type = { 641 .name = "fcoe_fcf", 642 .groups = fcoe_fcf_attr_groups, 643 .release = fcoe_fcf_device_release, 644 }; 645 646 static ssize_t ctlr_create_store(const struct bus_type *bus, const char *buf, 647 size_t count) 648 { 649 return fcoe_ctlr_create_store(buf, count); 650 } 651 static BUS_ATTR_WO(ctlr_create); 652 653 static ssize_t ctlr_destroy_store(const struct bus_type *bus, const char *buf, 654 size_t count) 655 { 656 return fcoe_ctlr_destroy_store(buf, count); 657 } 658 static BUS_ATTR_WO(ctlr_destroy); 659 660 static struct attribute *fcoe_bus_attrs[] = { 661 &bus_attr_ctlr_create.attr, 662 &bus_attr_ctlr_destroy.attr, 663 NULL, 664 }; 665 ATTRIBUTE_GROUPS(fcoe_bus); 666 667 static const struct bus_type fcoe_bus_type = { 668 .name = "fcoe", 669 .match = &fcoe_bus_match, 670 .bus_groups = fcoe_bus_groups, 671 }; 672 673 /** 674 * fcoe_ctlr_device_flush_work() - Flush a FIP ctlr's workqueue 675 * @ctlr: Pointer to the FIP ctlr whose workqueue is to be flushed 676 */ 677 static void fcoe_ctlr_device_flush_work(struct fcoe_ctlr_device *ctlr) 678 { 679 if (!fcoe_ctlr_work_q(ctlr)) { 680 printk(KERN_ERR 681 "ERROR: FIP Ctlr '%d' attempted to flush work, " 682 "when no workqueue created.\n", ctlr->id); 683 dump_stack(); 684 return; 685 } 686 687 flush_workqueue(fcoe_ctlr_work_q(ctlr)); 688 } 689 690 /** 691 * fcoe_ctlr_device_queue_work() - Schedule work for a FIP ctlr's workqueue 692 * @ctlr: Pointer to the FIP ctlr who owns the devloss workqueue 693 * @work: Work to queue for execution 694 * 695 * Return value: 696 * 1 on success / 0 already queued / < 0 for error 697 */ 698 static int fcoe_ctlr_device_queue_work(struct fcoe_ctlr_device *ctlr, 699 struct work_struct *work) 700 { 701 if (unlikely(!fcoe_ctlr_work_q(ctlr))) { 702 printk(KERN_ERR 703 "ERROR: FIP Ctlr '%d' attempted to queue work, " 704 "when no workqueue created.\n", ctlr->id); 705 dump_stack(); 706 707 return -EINVAL; 708 } 709 710 return queue_work(fcoe_ctlr_work_q(ctlr), work); 711 } 712 713 /** 714 * fcoe_ctlr_device_flush_devloss() - Flush a FIP ctlr's devloss workqueue 715 * @ctlr: Pointer to FIP ctlr whose workqueue is to be flushed 716 */ 717 static void fcoe_ctlr_device_flush_devloss(struct fcoe_ctlr_device *ctlr) 718 { 719 if (!fcoe_ctlr_devloss_work_q(ctlr)) { 720 printk(KERN_ERR 721 "ERROR: FIP Ctlr '%d' attempted to flush work, " 722 "when no workqueue created.\n", ctlr->id); 723 dump_stack(); 724 return; 725 } 726 727 flush_workqueue(fcoe_ctlr_devloss_work_q(ctlr)); 728 } 729 730 /** 731 * fcoe_ctlr_device_queue_devloss_work() - Schedule work for a FIP ctlr's devloss workqueue 732 * @ctlr: Pointer to the FIP ctlr who owns the devloss workqueue 733 * @work: Work to queue for execution 734 * @delay: jiffies to delay the work queuing 735 * 736 * Return value: 737 * 1 on success / 0 already queued / < 0 for error 738 */ 739 static int fcoe_ctlr_device_queue_devloss_work(struct fcoe_ctlr_device *ctlr, 740 struct delayed_work *work, 741 unsigned long delay) 742 { 743 if (unlikely(!fcoe_ctlr_devloss_work_q(ctlr))) { 744 printk(KERN_ERR 745 "ERROR: FIP Ctlr '%d' attempted to queue work, " 746 "when no workqueue created.\n", ctlr->id); 747 dump_stack(); 748 749 return -EINVAL; 750 } 751 752 return queue_delayed_work(fcoe_ctlr_devloss_work_q(ctlr), work, delay); 753 } 754 755 static int fcoe_fcf_device_match(struct fcoe_fcf_device *new, 756 struct fcoe_fcf_device *old) 757 { 758 if (new->switch_name == old->switch_name && 759 new->fabric_name == old->fabric_name && 760 new->fc_map == old->fc_map && 761 ether_addr_equal(new->mac, old->mac)) 762 return 1; 763 return 0; 764 } 765 766 /** 767 * fcoe_ctlr_device_add() - Add a FIP ctlr to sysfs 768 * @parent: The parent device to which the fcoe_ctlr instance 769 * should be attached 770 * @f: The LLD's FCoE sysfs function template pointer 771 * @priv_size: Size to be allocated with the fcoe_ctlr_device for the LLD 772 * 773 * This routine allocates a FIP ctlr object with some additional memory 774 * for the LLD. The FIP ctlr is initialized, added to sysfs and then 775 * attributes are added to it. 776 */ 777 struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent, 778 struct fcoe_sysfs_function_template *f, 779 int priv_size) 780 { 781 struct fcoe_ctlr_device *ctlr; 782 int error = 0; 783 784 ctlr = kzalloc(sizeof(struct fcoe_ctlr_device) + priv_size, 785 GFP_KERNEL); 786 if (!ctlr) 787 goto out; 788 789 ctlr->id = atomic_inc_return(&ctlr_num) - 1; 790 ctlr->f = f; 791 ctlr->mode = FIP_CONN_TYPE_FABRIC; 792 INIT_LIST_HEAD(&ctlr->fcfs); 793 mutex_init(&ctlr->lock); 794 ctlr->dev.parent = parent; 795 ctlr->dev.bus = &fcoe_bus_type; 796 ctlr->dev.type = &fcoe_ctlr_device_type; 797 798 ctlr->fcf_dev_loss_tmo = fcoe_fcf_dev_loss_tmo; 799 800 snprintf(ctlr->work_q_name, sizeof(ctlr->work_q_name), 801 "ctlr_wq_%d", ctlr->id); 802 ctlr->work_q = create_singlethread_workqueue( 803 ctlr->work_q_name); 804 if (!ctlr->work_q) 805 goto out_del; 806 807 snprintf(ctlr->devloss_work_q_name, 808 sizeof(ctlr->devloss_work_q_name), 809 "ctlr_dl_wq_%d", ctlr->id); 810 ctlr->devloss_work_q = create_singlethread_workqueue( 811 ctlr->devloss_work_q_name); 812 if (!ctlr->devloss_work_q) 813 goto out_del_q; 814 815 dev_set_name(&ctlr->dev, "ctlr_%d", ctlr->id); 816 error = device_register(&ctlr->dev); 817 if (error) { 818 destroy_workqueue(ctlr->devloss_work_q); 819 destroy_workqueue(ctlr->work_q); 820 put_device(&ctlr->dev); 821 return NULL; 822 } 823 824 return ctlr; 825 826 out_del_q: 827 destroy_workqueue(ctlr->work_q); 828 ctlr->work_q = NULL; 829 out_del: 830 kfree(ctlr); 831 out: 832 return NULL; 833 } 834 EXPORT_SYMBOL_GPL(fcoe_ctlr_device_add); 835 836 /** 837 * fcoe_ctlr_device_delete() - Delete a FIP ctlr and its subtree from sysfs 838 * @ctlr: A pointer to the ctlr to be deleted 839 * 840 * Deletes a FIP ctlr and any fcfs attached 841 * to it. Deleting fcfs will cause their childen 842 * to be deleted as well. 843 * 844 * The ctlr is detached from sysfs and it's resources 845 * are freed (work q), but the memory is not freed 846 * until its last reference is released. 847 * 848 * This routine expects no locks to be held before 849 * calling. 850 * 851 * TODO: Currently there are no callbacks to clean up LLD data 852 * for a fcoe_fcf_device. LLDs must keep this in mind as they need 853 * to clean up each of their LLD data for all fcoe_fcf_device before 854 * calling fcoe_ctlr_device_delete. 855 */ 856 void fcoe_ctlr_device_delete(struct fcoe_ctlr_device *ctlr) 857 { 858 struct fcoe_fcf_device *fcf, *next; 859 /* Remove any attached fcfs */ 860 mutex_lock(&ctlr->lock); 861 list_for_each_entry_safe(fcf, next, 862 &ctlr->fcfs, peers) { 863 list_del(&fcf->peers); 864 fcf->state = FCOE_FCF_STATE_DELETED; 865 fcoe_ctlr_device_queue_work(ctlr, &fcf->delete_work); 866 } 867 mutex_unlock(&ctlr->lock); 868 869 fcoe_ctlr_device_flush_work(ctlr); 870 871 destroy_workqueue(ctlr->devloss_work_q); 872 ctlr->devloss_work_q = NULL; 873 destroy_workqueue(ctlr->work_q); 874 ctlr->work_q = NULL; 875 876 device_unregister(&ctlr->dev); 877 } 878 EXPORT_SYMBOL_GPL(fcoe_ctlr_device_delete); 879 880 /** 881 * fcoe_fcf_device_final_delete() - Final delete routine 882 * @work: The FIP fcf's embedded work struct 883 * 884 * It is expected that the fcf has been removed from 885 * the FIP ctlr's list before calling this routine. 886 */ 887 static void fcoe_fcf_device_final_delete(struct work_struct *work) 888 { 889 struct fcoe_fcf_device *fcf = 890 container_of(work, struct fcoe_fcf_device, delete_work); 891 struct fcoe_ctlr_device *ctlr = fcoe_fcf_dev_to_ctlr_dev(fcf); 892 893 /* 894 * Cancel any outstanding timers. These should really exist 895 * only when rmmod'ing the LLDD and we're asking for 896 * immediate termination of the rports 897 */ 898 if (!cancel_delayed_work(&fcf->dev_loss_work)) 899 fcoe_ctlr_device_flush_devloss(ctlr); 900 901 device_unregister(&fcf->dev); 902 } 903 904 /** 905 * fip_timeout_deleted_fcf() - Delete a fcf when the devloss timer fires 906 * @work: The FIP fcf's embedded work struct 907 * 908 * Removes the fcf from the FIP ctlr's list of fcfs and 909 * queues the final deletion. 910 */ 911 static void fip_timeout_deleted_fcf(struct work_struct *work) 912 { 913 struct fcoe_fcf_device *fcf = 914 container_of(work, struct fcoe_fcf_device, dev_loss_work.work); 915 struct fcoe_ctlr_device *ctlr = fcoe_fcf_dev_to_ctlr_dev(fcf); 916 917 mutex_lock(&ctlr->lock); 918 919 /* 920 * If the fcf is deleted or reconnected before the timer 921 * fires the devloss queue will be flushed, but the state will 922 * either be CONNECTED or DELETED. If that is the case we 923 * cancel deleting the fcf. 924 */ 925 if (fcf->state != FCOE_FCF_STATE_DISCONNECTED) 926 goto out; 927 928 dev_printk(KERN_ERR, &fcf->dev, 929 "FIP fcf connection time out: removing fcf\n"); 930 931 list_del(&fcf->peers); 932 fcf->state = FCOE_FCF_STATE_DELETED; 933 fcoe_ctlr_device_queue_work(ctlr, &fcf->delete_work); 934 935 out: 936 mutex_unlock(&ctlr->lock); 937 } 938 939 /** 940 * fcoe_fcf_device_delete() - Delete a FIP fcf 941 * @fcf: Pointer to the fcf which is to be deleted 942 * 943 * Queues the FIP fcf on the devloss workqueue 944 * 945 * Expects the ctlr_attrs mutex to be held for fcf 946 * state change. 947 */ 948 void fcoe_fcf_device_delete(struct fcoe_fcf_device *fcf) 949 { 950 struct fcoe_ctlr_device *ctlr = fcoe_fcf_dev_to_ctlr_dev(fcf); 951 int timeout = fcf->dev_loss_tmo; 952 953 if (fcf->state != FCOE_FCF_STATE_CONNECTED) 954 return; 955 956 fcf->state = FCOE_FCF_STATE_DISCONNECTED; 957 958 /* 959 * FCF will only be re-connected by the LLD calling 960 * fcoe_fcf_device_add, and it should be setting up 961 * priv then. 962 */ 963 fcf->priv = NULL; 964 965 fcoe_ctlr_device_queue_devloss_work(ctlr, &fcf->dev_loss_work, 966 timeout * HZ); 967 } 968 EXPORT_SYMBOL_GPL(fcoe_fcf_device_delete); 969 970 /** 971 * fcoe_fcf_device_add() - Add a FCoE sysfs fcoe_fcf_device to the system 972 * @ctlr: The fcoe_ctlr_device that will be the fcoe_fcf_device parent 973 * @new_fcf: A temporary FCF used for lookups on the current list of fcfs 974 * 975 * Expects to be called with the ctlr->lock held 976 */ 977 struct fcoe_fcf_device *fcoe_fcf_device_add(struct fcoe_ctlr_device *ctlr, 978 struct fcoe_fcf_device *new_fcf) 979 { 980 struct fcoe_fcf_device *fcf; 981 int error = 0; 982 983 list_for_each_entry(fcf, &ctlr->fcfs, peers) { 984 if (fcoe_fcf_device_match(new_fcf, fcf)) { 985 if (fcf->state == FCOE_FCF_STATE_CONNECTED) 986 return fcf; 987 988 fcf->state = FCOE_FCF_STATE_CONNECTED; 989 990 if (!cancel_delayed_work(&fcf->dev_loss_work)) 991 fcoe_ctlr_device_flush_devloss(ctlr); 992 993 return fcf; 994 } 995 } 996 997 fcf = kzalloc(sizeof(struct fcoe_fcf_device), GFP_ATOMIC); 998 if (unlikely(!fcf)) 999 goto out; 1000 1001 INIT_WORK(&fcf->delete_work, fcoe_fcf_device_final_delete); 1002 INIT_DELAYED_WORK(&fcf->dev_loss_work, fip_timeout_deleted_fcf); 1003 1004 fcf->dev.parent = &ctlr->dev; 1005 fcf->dev.bus = &fcoe_bus_type; 1006 fcf->dev.type = &fcoe_fcf_device_type; 1007 fcf->id = atomic_inc_return(&fcf_num) - 1; 1008 fcf->state = FCOE_FCF_STATE_UNKNOWN; 1009 1010 fcf->dev_loss_tmo = ctlr->fcf_dev_loss_tmo; 1011 1012 dev_set_name(&fcf->dev, "fcf_%d", fcf->id); 1013 1014 fcf->fabric_name = new_fcf->fabric_name; 1015 fcf->switch_name = new_fcf->switch_name; 1016 fcf->fc_map = new_fcf->fc_map; 1017 fcf->vfid = new_fcf->vfid; 1018 memcpy(fcf->mac, new_fcf->mac, ETH_ALEN); 1019 fcf->priority = new_fcf->priority; 1020 fcf->fka_period = new_fcf->fka_period; 1021 fcf->selected = new_fcf->selected; 1022 1023 error = device_register(&fcf->dev); 1024 if (error) { 1025 put_device(&fcf->dev); 1026 goto out; 1027 } 1028 1029 fcf->state = FCOE_FCF_STATE_CONNECTED; 1030 list_add_tail(&fcf->peers, &ctlr->fcfs); 1031 1032 return fcf; 1033 1034 out: 1035 return NULL; 1036 } 1037 EXPORT_SYMBOL_GPL(fcoe_fcf_device_add); 1038 1039 int __init fcoe_sysfs_setup(void) 1040 { 1041 atomic_set(&ctlr_num, 0); 1042 atomic_set(&fcf_num, 0); 1043 1044 return bus_register(&fcoe_bus_type); 1045 } 1046 1047 void __exit fcoe_sysfs_teardown(void) 1048 { 1049 bus_unregister(&fcoe_bus_type); 1050 } 1051