1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright(c) 2011 - 2012 Intel Corporation. All rights reserved.
4 *
5 * Maintained at www.Open-FCoE.org
6 */
7
8 #include <linux/module.h>
9 #include <linux/types.h>
10 #include <linux/kernel.h>
11 #include <linux/etherdevice.h>
12 #include <linux/ctype.h>
13 #include <linux/string.h>
14
15 #include <scsi/fcoe_sysfs.h>
16 #include <scsi/libfcoe.h>
17
18 /*
19 * OK to include local libfcoe.h for debug_logging, but cannot include
20 * <scsi/libfcoe.h> otherwise non-netdev based fcoe solutions would have
21 * have to include more than fcoe_sysfs.h.
22 */
23 #include "libfcoe.h"
24
25 static atomic_t ctlr_num;
26 static atomic_t fcf_num;
27
28 /*
29 * fcoe_fcf_dev_loss_tmo: the default number of seconds that fcoe sysfs
30 * should insulate the loss of a fcf.
31 */
32 static unsigned int fcoe_fcf_dev_loss_tmo = 1800; /* seconds */
33
34 module_param_named(fcf_dev_loss_tmo, fcoe_fcf_dev_loss_tmo,
35 uint, S_IRUGO|S_IWUSR);
36 MODULE_PARM_DESC(fcf_dev_loss_tmo,
37 "Maximum number of seconds that libfcoe should"
38 " insulate the loss of a fcf. Once this value is"
39 " exceeded, the fcf is removed.");
40
41 /*
42 * These are used by the fcoe_*_show_function routines, they
43 * are intentionally placed in the .c file as they're not intended
44 * for use throughout the code.
45 */
46 #define fcoe_ctlr_id(x) \
47 ((x)->id)
48 #define fcoe_ctlr_work_q(x) \
49 ((x)->work_q)
50 #define fcoe_ctlr_devloss_work_q(x) \
51 ((x)->devloss_work_q)
52 #define fcoe_ctlr_mode(x) \
53 ((x)->mode)
54 #define fcoe_ctlr_fcf_dev_loss_tmo(x) \
55 ((x)->fcf_dev_loss_tmo)
56 #define fcoe_ctlr_link_fail(x) \
57 ((x)->lesb.lesb_link_fail)
58 #define fcoe_ctlr_vlink_fail(x) \
59 ((x)->lesb.lesb_vlink_fail)
60 #define fcoe_ctlr_miss_fka(x) \
61 ((x)->lesb.lesb_miss_fka)
62 #define fcoe_ctlr_symb_err(x) \
63 ((x)->lesb.lesb_symb_err)
64 #define fcoe_ctlr_err_block(x) \
65 ((x)->lesb.lesb_err_block)
66 #define fcoe_ctlr_fcs_error(x) \
67 ((x)->lesb.lesb_fcs_error)
68 #define fcoe_ctlr_enabled(x) \
69 ((x)->enabled)
70 #define fcoe_fcf_state(x) \
71 ((x)->state)
72 #define fcoe_fcf_fabric_name(x) \
73 ((x)->fabric_name)
74 #define fcoe_fcf_switch_name(x) \
75 ((x)->switch_name)
76 #define fcoe_fcf_fc_map(x) \
77 ((x)->fc_map)
78 #define fcoe_fcf_vfid(x) \
79 ((x)->vfid)
80 #define fcoe_fcf_mac(x) \
81 ((x)->mac)
82 #define fcoe_fcf_priority(x) \
83 ((x)->priority)
84 #define fcoe_fcf_fka_period(x) \
85 ((x)->fka_period)
86 #define fcoe_fcf_dev_loss_tmo(x) \
87 ((x)->dev_loss_tmo)
88 #define fcoe_fcf_selected(x) \
89 ((x)->selected)
90 #define fcoe_fcf_vlan_id(x) \
91 ((x)->vlan_id)
92
93 /*
94 * dev_loss_tmo attribute
95 */
fcoe_str_to_dev_loss(const char * buf,unsigned long * val)96 static int fcoe_str_to_dev_loss(const char *buf, unsigned long *val)
97 {
98 int ret;
99
100 ret = kstrtoul(buf, 0, val);
101 if (ret)
102 return -EINVAL;
103 /*
104 * Check for overflow; dev_loss_tmo is u32
105 */
106 if (*val > UINT_MAX)
107 return -EINVAL;
108
109 return 0;
110 }
111
fcoe_fcf_set_dev_loss_tmo(struct fcoe_fcf_device * fcf,unsigned long val)112 static int fcoe_fcf_set_dev_loss_tmo(struct fcoe_fcf_device *fcf,
113 unsigned long val)
114 {
115 if ((fcf->state == FCOE_FCF_STATE_UNKNOWN) ||
116 (fcf->state == FCOE_FCF_STATE_DISCONNECTED) ||
117 (fcf->state == FCOE_FCF_STATE_DELETED))
118 return -EBUSY;
119 /*
120 * Check for overflow; dev_loss_tmo is u32
121 */
122 if (val > UINT_MAX)
123 return -EINVAL;
124
125 fcoe_fcf_dev_loss_tmo(fcf) = val;
126 return 0;
127 }
128
129 #define FCOE_DEVICE_ATTR(_prefix, _name, _mode, _show, _store) \
130 struct device_attribute device_attr_fcoe_##_prefix##_##_name = \
131 __ATTR(_name, _mode, _show, _store)
132
133 #define fcoe_ctlr_show_function(field, format_string, sz, cast) \
134 static ssize_t show_fcoe_ctlr_device_##field(struct device *dev, \
135 struct device_attribute *attr, \
136 char *buf) \
137 { \
138 struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); \
139 if (ctlr->f->get_fcoe_ctlr_##field) \
140 ctlr->f->get_fcoe_ctlr_##field(ctlr); \
141 return snprintf(buf, sz, format_string, \
142 cast fcoe_ctlr_##field(ctlr)); \
143 }
144
145 #define fcoe_fcf_show_function(field, format_string, sz, cast) \
146 static ssize_t show_fcoe_fcf_device_##field(struct device *dev, \
147 struct device_attribute *attr, \
148 char *buf) \
149 { \
150 struct fcoe_fcf_device *fcf = dev_to_fcf(dev); \
151 struct fcoe_ctlr_device *ctlr = fcoe_fcf_dev_to_ctlr_dev(fcf); \
152 if (ctlr->f->get_fcoe_fcf_##field) \
153 ctlr->f->get_fcoe_fcf_##field(fcf); \
154 return snprintf(buf, sz, format_string, \
155 cast fcoe_fcf_##field(fcf)); \
156 }
157
158 #define fcoe_ctlr_private_show_function(field, format_string, sz, cast) \
159 static ssize_t show_fcoe_ctlr_device_##field(struct device *dev, \
160 struct device_attribute *attr, \
161 char *buf) \
162 { \
163 struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); \
164 return snprintf(buf, sz, format_string, cast fcoe_ctlr_##field(ctlr)); \
165 }
166
167 #define fcoe_fcf_private_show_function(field, format_string, sz, cast) \
168 static ssize_t show_fcoe_fcf_device_##field(struct device *dev, \
169 struct device_attribute *attr, \
170 char *buf) \
171 { \
172 struct fcoe_fcf_device *fcf = dev_to_fcf(dev); \
173 return snprintf(buf, sz, format_string, cast fcoe_fcf_##field(fcf)); \
174 }
175
176 #define fcoe_ctlr_private_rd_attr(field, format_string, sz) \
177 fcoe_ctlr_private_show_function(field, format_string, sz, ) \
178 static FCOE_DEVICE_ATTR(ctlr, field, S_IRUGO, \
179 show_fcoe_ctlr_device_##field, NULL)
180
181 #define fcoe_ctlr_rd_attr(field, format_string, sz) \
182 fcoe_ctlr_show_function(field, format_string, sz, ) \
183 static FCOE_DEVICE_ATTR(ctlr, field, S_IRUGO, \
184 show_fcoe_ctlr_device_##field, NULL)
185
186 #define fcoe_fcf_rd_attr(field, format_string, sz) \
187 fcoe_fcf_show_function(field, format_string, sz, ) \
188 static FCOE_DEVICE_ATTR(fcf, field, S_IRUGO, \
189 show_fcoe_fcf_device_##field, NULL)
190
191 #define fcoe_fcf_private_rd_attr(field, format_string, sz) \
192 fcoe_fcf_private_show_function(field, format_string, sz, ) \
193 static FCOE_DEVICE_ATTR(fcf, field, S_IRUGO, \
194 show_fcoe_fcf_device_##field, NULL)
195
196 #define fcoe_ctlr_private_rd_attr_cast(field, format_string, sz, cast) \
197 fcoe_ctlr_private_show_function(field, format_string, sz, (cast)) \
198 static FCOE_DEVICE_ATTR(ctlr, field, S_IRUGO, \
199 show_fcoe_ctlr_device_##field, NULL)
200
201 #define fcoe_fcf_private_rd_attr_cast(field, format_string, sz, cast) \
202 fcoe_fcf_private_show_function(field, format_string, sz, (cast)) \
203 static FCOE_DEVICE_ATTR(fcf, field, S_IRUGO, \
204 show_fcoe_fcf_device_##field, NULL)
205
206 #define fcoe_enum_name_search(title, table_type, table) \
207 static const char *get_fcoe_##title##_name(enum table_type table_key) \
208 { \
209 if (table_key < 0 || table_key >= ARRAY_SIZE(table)) \
210 return NULL; \
211 return table[table_key]; \
212 }
213
214 static const char * const fip_conn_type_names[] = {
215 [ FIP_CONN_TYPE_UNKNOWN ] = "Unknown",
216 [ FIP_CONN_TYPE_FABRIC ] = "Fabric",
217 [ FIP_CONN_TYPE_VN2VN ] = "VN2VN",
218 };
219 fcoe_enum_name_search(ctlr_mode, fip_conn_type, fip_conn_type_names)
220
221 static char *fcf_state_names[] = {
222 [ FCOE_FCF_STATE_UNKNOWN ] = "Unknown",
223 [ FCOE_FCF_STATE_DISCONNECTED ] = "Disconnected",
224 [ FCOE_FCF_STATE_CONNECTED ] = "Connected",
225 };
fcoe_enum_name_search(fcf_state,fcf_state,fcf_state_names)226 fcoe_enum_name_search(fcf_state, fcf_state, fcf_state_names)
227 #define FCOE_FCF_STATE_MAX_NAMELEN 50
228
229 static ssize_t show_fcf_state(struct device *dev,
230 struct device_attribute *attr,
231 char *buf)
232 {
233 struct fcoe_fcf_device *fcf = dev_to_fcf(dev);
234 const char *name;
235 name = get_fcoe_fcf_state_name(fcf->state);
236 if (!name)
237 return -EINVAL;
238 return snprintf(buf, FCOE_FCF_STATE_MAX_NAMELEN, "%s\n", name);
239 }
240 static FCOE_DEVICE_ATTR(fcf, state, S_IRUGO, show_fcf_state, NULL);
241
242 #define FCOE_MAX_MODENAME_LEN 20
show_ctlr_mode(struct device * dev,struct device_attribute * attr,char * buf)243 static ssize_t show_ctlr_mode(struct device *dev,
244 struct device_attribute *attr,
245 char *buf)
246 {
247 struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
248 const char *name;
249
250 name = get_fcoe_ctlr_mode_name(ctlr->mode);
251 if (!name)
252 return -EINVAL;
253 return snprintf(buf, FCOE_MAX_MODENAME_LEN,
254 "%s\n", name);
255 }
256
store_ctlr_mode(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)257 static ssize_t store_ctlr_mode(struct device *dev,
258 struct device_attribute *attr,
259 const char *buf, size_t count)
260 {
261 struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
262 int res;
263
264 if (count > FCOE_MAX_MODENAME_LEN)
265 return -EINVAL;
266
267
268 switch (ctlr->enabled) {
269 case FCOE_CTLR_ENABLED:
270 LIBFCOE_SYSFS_DBG(ctlr, "Cannot change mode when enabled.\n");
271 return -EBUSY;
272 case FCOE_CTLR_DISABLED:
273 if (!ctlr->f->set_fcoe_ctlr_mode) {
274 LIBFCOE_SYSFS_DBG(ctlr,
275 "Mode change not supported by LLD.\n");
276 return -ENOTSUPP;
277 }
278
279 res = sysfs_match_string(fip_conn_type_names, buf);
280 if (res < 0 || res == FIP_CONN_TYPE_UNKNOWN) {
281 LIBFCOE_SYSFS_DBG(ctlr, "Unknown mode %s provided.\n",
282 buf);
283 return -EINVAL;
284 }
285 ctlr->mode = res;
286
287 ctlr->f->set_fcoe_ctlr_mode(ctlr);
288 LIBFCOE_SYSFS_DBG(ctlr, "Mode changed to %s.\n", buf);
289
290 return count;
291 case FCOE_CTLR_UNUSED:
292 default:
293 LIBFCOE_SYSFS_DBG(ctlr, "Mode change not supported.\n");
294 return -ENOTSUPP;
295 }
296 }
297
298 static FCOE_DEVICE_ATTR(ctlr, mode, S_IRUGO | S_IWUSR,
299 show_ctlr_mode, store_ctlr_mode);
300
store_ctlr_enabled(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)301 static ssize_t store_ctlr_enabled(struct device *dev,
302 struct device_attribute *attr,
303 const char *buf, size_t count)
304 {
305 struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
306 bool enabled;
307 int rc;
308
309 if (*buf == '1')
310 enabled = true;
311 else if (*buf == '0')
312 enabled = false;
313 else
314 return -EINVAL;
315
316 switch (ctlr->enabled) {
317 case FCOE_CTLR_ENABLED:
318 if (enabled)
319 return count;
320 ctlr->enabled = FCOE_CTLR_DISABLED;
321 break;
322 case FCOE_CTLR_DISABLED:
323 if (!enabled)
324 return count;
325 ctlr->enabled = FCOE_CTLR_ENABLED;
326 break;
327 case FCOE_CTLR_UNUSED:
328 return -ENOTSUPP;
329 }
330
331 rc = ctlr->f->set_fcoe_ctlr_enabled(ctlr);
332 if (rc)
333 return rc;
334
335 return count;
336 }
337
338 static char *ctlr_enabled_state_names[] = {
339 [ FCOE_CTLR_ENABLED ] = "1",
340 [ FCOE_CTLR_DISABLED ] = "0",
341 };
fcoe_enum_name_search(ctlr_enabled_state,ctlr_enabled_state,ctlr_enabled_state_names)342 fcoe_enum_name_search(ctlr_enabled_state, ctlr_enabled_state,
343 ctlr_enabled_state_names)
344 #define FCOE_CTLR_ENABLED_MAX_NAMELEN 50
345
346 static ssize_t show_ctlr_enabled_state(struct device *dev,
347 struct device_attribute *attr,
348 char *buf)
349 {
350 struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
351 const char *name;
352
353 name = get_fcoe_ctlr_enabled_state_name(ctlr->enabled);
354 if (!name)
355 return -EINVAL;
356 return snprintf(buf, FCOE_CTLR_ENABLED_MAX_NAMELEN,
357 "%s\n", name);
358 }
359
360 static FCOE_DEVICE_ATTR(ctlr, enabled, S_IRUGO | S_IWUSR,
361 show_ctlr_enabled_state,
362 store_ctlr_enabled);
363
store_ctlr_fip_resp(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)364 static ssize_t store_ctlr_fip_resp(struct device *dev,
365 struct device_attribute *attr,
366 const char *buf, size_t count)
367 {
368 struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
369 struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr);
370
371 mutex_lock(&fip->ctlr_mutex);
372 if ((buf[1] == '\0') || ((buf[1] == '\n') && (buf[2] == '\0'))) {
373 if (buf[0] == '1') {
374 fip->fip_resp = 1;
375 mutex_unlock(&fip->ctlr_mutex);
376 return count;
377 }
378 if (buf[0] == '0') {
379 fip->fip_resp = 0;
380 mutex_unlock(&fip->ctlr_mutex);
381 return count;
382 }
383 }
384 mutex_unlock(&fip->ctlr_mutex);
385 return -EINVAL;
386 }
387
show_ctlr_fip_resp(struct device * dev,struct device_attribute * attr,char * buf)388 static ssize_t show_ctlr_fip_resp(struct device *dev,
389 struct device_attribute *attr,
390 char *buf)
391 {
392 struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
393 struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr);
394
395 return sprintf(buf, "%d\n", fip->fip_resp ? 1 : 0);
396 }
397
398 static FCOE_DEVICE_ATTR(ctlr, fip_vlan_responder, S_IRUGO | S_IWUSR,
399 show_ctlr_fip_resp,
400 store_ctlr_fip_resp);
401
402 static ssize_t
fcoe_ctlr_var_store(u32 * var,const char * buf,size_t count)403 fcoe_ctlr_var_store(u32 *var, const char *buf, size_t count)
404 {
405 int err;
406 unsigned long v;
407
408 err = kstrtoul(buf, 10, &v);
409 if (err || v > UINT_MAX)
410 return -EINVAL;
411
412 *var = v;
413
414 return count;
415 }
416
store_ctlr_r_a_tov(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)417 static ssize_t store_ctlr_r_a_tov(struct device *dev,
418 struct device_attribute *attr,
419 const char *buf, size_t count)
420 {
421 struct fcoe_ctlr_device *ctlr_dev = dev_to_ctlr(dev);
422 struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev);
423
424 if (ctlr_dev->enabled == FCOE_CTLR_ENABLED)
425 return -EBUSY;
426 if (ctlr_dev->enabled == FCOE_CTLR_DISABLED)
427 return fcoe_ctlr_var_store(&ctlr->lp->r_a_tov, buf, count);
428 return -ENOTSUPP;
429 }
430
show_ctlr_r_a_tov(struct device * dev,struct device_attribute * attr,char * buf)431 static ssize_t show_ctlr_r_a_tov(struct device *dev,
432 struct device_attribute *attr,
433 char *buf)
434 {
435 struct fcoe_ctlr_device *ctlr_dev = dev_to_ctlr(dev);
436 struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev);
437
438 return sprintf(buf, "%d\n", ctlr->lp->r_a_tov);
439 }
440
441 static FCOE_DEVICE_ATTR(ctlr, r_a_tov, S_IRUGO | S_IWUSR,
442 show_ctlr_r_a_tov, store_ctlr_r_a_tov);
443
store_ctlr_e_d_tov(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)444 static ssize_t store_ctlr_e_d_tov(struct device *dev,
445 struct device_attribute *attr,
446 const char *buf, size_t count)
447 {
448 struct fcoe_ctlr_device *ctlr_dev = dev_to_ctlr(dev);
449 struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev);
450
451 if (ctlr_dev->enabled == FCOE_CTLR_ENABLED)
452 return -EBUSY;
453 if (ctlr_dev->enabled == FCOE_CTLR_DISABLED)
454 return fcoe_ctlr_var_store(&ctlr->lp->e_d_tov, buf, count);
455 return -ENOTSUPP;
456 }
457
show_ctlr_e_d_tov(struct device * dev,struct device_attribute * attr,char * buf)458 static ssize_t show_ctlr_e_d_tov(struct device *dev,
459 struct device_attribute *attr,
460 char *buf)
461 {
462 struct fcoe_ctlr_device *ctlr_dev = dev_to_ctlr(dev);
463 struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev);
464
465 return sprintf(buf, "%d\n", ctlr->lp->e_d_tov);
466 }
467
468 static FCOE_DEVICE_ATTR(ctlr, e_d_tov, S_IRUGO | S_IWUSR,
469 show_ctlr_e_d_tov, store_ctlr_e_d_tov);
470
471 static ssize_t
store_private_fcoe_ctlr_fcf_dev_loss_tmo(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)472 store_private_fcoe_ctlr_fcf_dev_loss_tmo(struct device *dev,
473 struct device_attribute *attr,
474 const char *buf, size_t count)
475 {
476 struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
477 struct fcoe_fcf_device *fcf;
478 unsigned long val;
479 int rc;
480
481 rc = fcoe_str_to_dev_loss(buf, &val);
482 if (rc)
483 return rc;
484
485 fcoe_ctlr_fcf_dev_loss_tmo(ctlr) = val;
486 mutex_lock(&ctlr->lock);
487 list_for_each_entry(fcf, &ctlr->fcfs, peers)
488 fcoe_fcf_set_dev_loss_tmo(fcf, val);
489 mutex_unlock(&ctlr->lock);
490 return count;
491 }
492 fcoe_ctlr_private_show_function(fcf_dev_loss_tmo, "%d\n", 20, );
493 static FCOE_DEVICE_ATTR(ctlr, fcf_dev_loss_tmo, S_IRUGO | S_IWUSR,
494 show_fcoe_ctlr_device_fcf_dev_loss_tmo,
495 store_private_fcoe_ctlr_fcf_dev_loss_tmo);
496
497 /* Link Error Status Block (LESB) */
498 fcoe_ctlr_rd_attr(link_fail, "%u\n", 20);
499 fcoe_ctlr_rd_attr(vlink_fail, "%u\n", 20);
500 fcoe_ctlr_rd_attr(miss_fka, "%u\n", 20);
501 fcoe_ctlr_rd_attr(symb_err, "%u\n", 20);
502 fcoe_ctlr_rd_attr(err_block, "%u\n", 20);
503 fcoe_ctlr_rd_attr(fcs_error, "%u\n", 20);
504
505 fcoe_fcf_private_rd_attr_cast(fabric_name, "0x%llx\n", 20, unsigned long long);
506 fcoe_fcf_private_rd_attr_cast(switch_name, "0x%llx\n", 20, unsigned long long);
507 fcoe_fcf_private_rd_attr(priority, "%u\n", 20);
508 fcoe_fcf_private_rd_attr(fc_map, "0x%x\n", 20);
509 fcoe_fcf_private_rd_attr(vfid, "%u\n", 20);
510 fcoe_fcf_private_rd_attr(mac, "%pM\n", 20);
511 fcoe_fcf_private_rd_attr(fka_period, "%u\n", 20);
512 fcoe_fcf_rd_attr(selected, "%u\n", 20);
513 fcoe_fcf_rd_attr(vlan_id, "%u\n", 20);
514
515 fcoe_fcf_private_show_function(dev_loss_tmo, "%d\n", 20, )
516 static ssize_t
store_fcoe_fcf_dev_loss_tmo(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)517 store_fcoe_fcf_dev_loss_tmo(struct device *dev, struct device_attribute *attr,
518 const char *buf, size_t count)
519 {
520 struct fcoe_fcf_device *fcf = dev_to_fcf(dev);
521 unsigned long val;
522 int rc;
523
524 rc = fcoe_str_to_dev_loss(buf, &val);
525 if (rc)
526 return rc;
527
528 rc = fcoe_fcf_set_dev_loss_tmo(fcf, val);
529 if (rc)
530 return rc;
531 return count;
532 }
533 static FCOE_DEVICE_ATTR(fcf, dev_loss_tmo, S_IRUGO | S_IWUSR,
534 show_fcoe_fcf_device_dev_loss_tmo,
535 store_fcoe_fcf_dev_loss_tmo);
536
537 static struct attribute *fcoe_ctlr_lesb_attrs[] = {
538 &device_attr_fcoe_ctlr_link_fail.attr,
539 &device_attr_fcoe_ctlr_vlink_fail.attr,
540 &device_attr_fcoe_ctlr_miss_fka.attr,
541 &device_attr_fcoe_ctlr_symb_err.attr,
542 &device_attr_fcoe_ctlr_err_block.attr,
543 &device_attr_fcoe_ctlr_fcs_error.attr,
544 NULL,
545 };
546
547 static struct attribute_group fcoe_ctlr_lesb_attr_group = {
548 .name = "lesb",
549 .attrs = fcoe_ctlr_lesb_attrs,
550 };
551
552 static struct attribute *fcoe_ctlr_attrs[] = {
553 &device_attr_fcoe_ctlr_fip_vlan_responder.attr,
554 &device_attr_fcoe_ctlr_fcf_dev_loss_tmo.attr,
555 &device_attr_fcoe_ctlr_r_a_tov.attr,
556 &device_attr_fcoe_ctlr_e_d_tov.attr,
557 &device_attr_fcoe_ctlr_enabled.attr,
558 &device_attr_fcoe_ctlr_mode.attr,
559 NULL,
560 };
561
562 static struct attribute_group fcoe_ctlr_attr_group = {
563 .attrs = fcoe_ctlr_attrs,
564 };
565
566 static const struct attribute_group *fcoe_ctlr_attr_groups[] = {
567 &fcoe_ctlr_attr_group,
568 &fcoe_ctlr_lesb_attr_group,
569 NULL,
570 };
571
572 static struct attribute *fcoe_fcf_attrs[] = {
573 &device_attr_fcoe_fcf_fabric_name.attr,
574 &device_attr_fcoe_fcf_switch_name.attr,
575 &device_attr_fcoe_fcf_dev_loss_tmo.attr,
576 &device_attr_fcoe_fcf_fc_map.attr,
577 &device_attr_fcoe_fcf_vfid.attr,
578 &device_attr_fcoe_fcf_mac.attr,
579 &device_attr_fcoe_fcf_priority.attr,
580 &device_attr_fcoe_fcf_fka_period.attr,
581 &device_attr_fcoe_fcf_state.attr,
582 &device_attr_fcoe_fcf_selected.attr,
583 &device_attr_fcoe_fcf_vlan_id.attr,
584 NULL
585 };
586
587 static struct attribute_group fcoe_fcf_attr_group = {
588 .attrs = fcoe_fcf_attrs,
589 };
590
591 static const struct attribute_group *fcoe_fcf_attr_groups[] = {
592 &fcoe_fcf_attr_group,
593 NULL,
594 };
595
596 static const struct bus_type fcoe_bus_type;
597
fcoe_bus_match(struct device * dev,const struct device_driver * drv)598 static int fcoe_bus_match(struct device *dev,
599 const struct device_driver *drv)
600 {
601 if (dev->bus == &fcoe_bus_type)
602 return 1;
603 return 0;
604 }
605
606 /**
607 * fcoe_ctlr_device_release() - Release the FIP ctlr memory
608 * @dev: Pointer to the FIP ctlr's embedded device
609 *
610 * Called when the last FIP ctlr reference is released.
611 */
fcoe_ctlr_device_release(struct device * dev)612 static void fcoe_ctlr_device_release(struct device *dev)
613 {
614 struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
615 kfree(ctlr);
616 }
617
618 /**
619 * fcoe_fcf_device_release() - Release the FIP fcf memory
620 * @dev: Pointer to the fcf's embedded device
621 *
622 * Called when the last FIP fcf reference is released.
623 */
fcoe_fcf_device_release(struct device * dev)624 static void fcoe_fcf_device_release(struct device *dev)
625 {
626 struct fcoe_fcf_device *fcf = dev_to_fcf(dev);
627 kfree(fcf);
628 }
629
630 static const struct device_type fcoe_ctlr_device_type = {
631 .name = "fcoe_ctlr",
632 .groups = fcoe_ctlr_attr_groups,
633 .release = fcoe_ctlr_device_release,
634 };
635
636 static const struct device_type fcoe_fcf_device_type = {
637 .name = "fcoe_fcf",
638 .groups = fcoe_fcf_attr_groups,
639 .release = fcoe_fcf_device_release,
640 };
641
ctlr_create_store(const struct bus_type * bus,const char * buf,size_t count)642 static ssize_t ctlr_create_store(const struct bus_type *bus, const char *buf,
643 size_t count)
644 {
645 return fcoe_ctlr_create_store(buf, count);
646 }
647 static BUS_ATTR_WO(ctlr_create);
648
ctlr_destroy_store(const struct bus_type * bus,const char * buf,size_t count)649 static ssize_t ctlr_destroy_store(const struct bus_type *bus, const char *buf,
650 size_t count)
651 {
652 return fcoe_ctlr_destroy_store(buf, count);
653 }
654 static BUS_ATTR_WO(ctlr_destroy);
655
656 static struct attribute *fcoe_bus_attrs[] = {
657 &bus_attr_ctlr_create.attr,
658 &bus_attr_ctlr_destroy.attr,
659 NULL,
660 };
661 ATTRIBUTE_GROUPS(fcoe_bus);
662
663 static const struct bus_type fcoe_bus_type = {
664 .name = "fcoe",
665 .match = &fcoe_bus_match,
666 .bus_groups = fcoe_bus_groups,
667 };
668
669 /**
670 * fcoe_ctlr_device_flush_work() - Flush a FIP ctlr's workqueue
671 * @ctlr: Pointer to the FIP ctlr whose workqueue is to be flushed
672 */
fcoe_ctlr_device_flush_work(struct fcoe_ctlr_device * ctlr)673 static void fcoe_ctlr_device_flush_work(struct fcoe_ctlr_device *ctlr)
674 {
675 if (!fcoe_ctlr_work_q(ctlr)) {
676 printk(KERN_ERR
677 "ERROR: FIP Ctlr '%d' attempted to flush work, "
678 "when no workqueue created.\n", ctlr->id);
679 dump_stack();
680 return;
681 }
682
683 flush_workqueue(fcoe_ctlr_work_q(ctlr));
684 }
685
686 /**
687 * fcoe_ctlr_device_queue_work() - Schedule work for a FIP ctlr's workqueue
688 * @ctlr: Pointer to the FIP ctlr who owns the devloss workqueue
689 * @work: Work to queue for execution
690 *
691 * Return value:
692 * 1 on success / 0 already queued / < 0 for error
693 */
fcoe_ctlr_device_queue_work(struct fcoe_ctlr_device * ctlr,struct work_struct * work)694 static int fcoe_ctlr_device_queue_work(struct fcoe_ctlr_device *ctlr,
695 struct work_struct *work)
696 {
697 if (unlikely(!fcoe_ctlr_work_q(ctlr))) {
698 printk(KERN_ERR
699 "ERROR: FIP Ctlr '%d' attempted to queue work, "
700 "when no workqueue created.\n", ctlr->id);
701 dump_stack();
702
703 return -EINVAL;
704 }
705
706 return queue_work(fcoe_ctlr_work_q(ctlr), work);
707 }
708
709 /**
710 * fcoe_ctlr_device_flush_devloss() - Flush a FIP ctlr's devloss workqueue
711 * @ctlr: Pointer to FIP ctlr whose workqueue is to be flushed
712 */
fcoe_ctlr_device_flush_devloss(struct fcoe_ctlr_device * ctlr)713 static void fcoe_ctlr_device_flush_devloss(struct fcoe_ctlr_device *ctlr)
714 {
715 if (!fcoe_ctlr_devloss_work_q(ctlr)) {
716 printk(KERN_ERR
717 "ERROR: FIP Ctlr '%d' attempted to flush work, "
718 "when no workqueue created.\n", ctlr->id);
719 dump_stack();
720 return;
721 }
722
723 flush_workqueue(fcoe_ctlr_devloss_work_q(ctlr));
724 }
725
726 /**
727 * fcoe_ctlr_device_queue_devloss_work() - Schedule work for a FIP ctlr's devloss workqueue
728 * @ctlr: Pointer to the FIP ctlr who owns the devloss workqueue
729 * @work: Work to queue for execution
730 * @delay: jiffies to delay the work queuing
731 *
732 * Return value:
733 * 1 on success / 0 already queued / < 0 for error
734 */
fcoe_ctlr_device_queue_devloss_work(struct fcoe_ctlr_device * ctlr,struct delayed_work * work,unsigned long delay)735 static int fcoe_ctlr_device_queue_devloss_work(struct fcoe_ctlr_device *ctlr,
736 struct delayed_work *work,
737 unsigned long delay)
738 {
739 if (unlikely(!fcoe_ctlr_devloss_work_q(ctlr))) {
740 printk(KERN_ERR
741 "ERROR: FIP Ctlr '%d' attempted to queue work, "
742 "when no workqueue created.\n", ctlr->id);
743 dump_stack();
744
745 return -EINVAL;
746 }
747
748 return queue_delayed_work(fcoe_ctlr_devloss_work_q(ctlr), work, delay);
749 }
750
fcoe_fcf_device_match(struct fcoe_fcf_device * new,struct fcoe_fcf_device * old)751 static int fcoe_fcf_device_match(struct fcoe_fcf_device *new,
752 struct fcoe_fcf_device *old)
753 {
754 if (new->switch_name == old->switch_name &&
755 new->fabric_name == old->fabric_name &&
756 new->fc_map == old->fc_map &&
757 ether_addr_equal(new->mac, old->mac))
758 return 1;
759 return 0;
760 }
761
762 /**
763 * fcoe_ctlr_device_add() - Add a FIP ctlr to sysfs
764 * @parent: The parent device to which the fcoe_ctlr instance
765 * should be attached
766 * @f: The LLD's FCoE sysfs function template pointer
767 * @priv_size: Size to be allocated with the fcoe_ctlr_device for the LLD
768 *
769 * This routine allocates a FIP ctlr object with some additional memory
770 * for the LLD. The FIP ctlr is initialized, added to sysfs and then
771 * attributes are added to it.
772 */
fcoe_ctlr_device_add(struct device * parent,struct fcoe_sysfs_function_template * f,int priv_size)773 struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent,
774 struct fcoe_sysfs_function_template *f,
775 int priv_size)
776 {
777 struct fcoe_ctlr_device *ctlr;
778 int error = 0;
779
780 ctlr = kzalloc(sizeof(struct fcoe_ctlr_device) + priv_size,
781 GFP_KERNEL);
782 if (!ctlr)
783 goto out;
784
785 ctlr->id = atomic_inc_return(&ctlr_num) - 1;
786 ctlr->f = f;
787 ctlr->mode = FIP_CONN_TYPE_FABRIC;
788 INIT_LIST_HEAD(&ctlr->fcfs);
789 mutex_init(&ctlr->lock);
790 ctlr->dev.parent = parent;
791 ctlr->dev.bus = &fcoe_bus_type;
792 ctlr->dev.type = &fcoe_ctlr_device_type;
793
794 ctlr->fcf_dev_loss_tmo = fcoe_fcf_dev_loss_tmo;
795
796 ctlr->work_q = alloc_ordered_workqueue("ctlr_wq_%d", WQ_MEM_RECLAIM,
797 ctlr->id);
798 if (!ctlr->work_q)
799 goto out_del;
800
801 ctlr->devloss_work_q = alloc_ordered_workqueue("ctlr_dl_wq_%d",
802 WQ_MEM_RECLAIM,
803 ctlr->id);
804 if (!ctlr->devloss_work_q)
805 goto out_del_q;
806
807 dev_set_name(&ctlr->dev, "ctlr_%d", ctlr->id);
808 error = device_register(&ctlr->dev);
809 if (error) {
810 destroy_workqueue(ctlr->devloss_work_q);
811 destroy_workqueue(ctlr->work_q);
812 put_device(&ctlr->dev);
813 return NULL;
814 }
815
816 return ctlr;
817
818 out_del_q:
819 destroy_workqueue(ctlr->work_q);
820 ctlr->work_q = NULL;
821 out_del:
822 kfree(ctlr);
823 out:
824 return NULL;
825 }
826 EXPORT_SYMBOL_GPL(fcoe_ctlr_device_add);
827
828 /**
829 * fcoe_ctlr_device_delete() - Delete a FIP ctlr and its subtree from sysfs
830 * @ctlr: A pointer to the ctlr to be deleted
831 *
832 * Deletes a FIP ctlr and any fcfs attached
833 * to it. Deleting fcfs will cause their childen
834 * to be deleted as well.
835 *
836 * The ctlr is detached from sysfs and it's resources
837 * are freed (work q), but the memory is not freed
838 * until its last reference is released.
839 *
840 * This routine expects no locks to be held before
841 * calling.
842 *
843 * TODO: Currently there are no callbacks to clean up LLD data
844 * for a fcoe_fcf_device. LLDs must keep this in mind as they need
845 * to clean up each of their LLD data for all fcoe_fcf_device before
846 * calling fcoe_ctlr_device_delete.
847 */
fcoe_ctlr_device_delete(struct fcoe_ctlr_device * ctlr)848 void fcoe_ctlr_device_delete(struct fcoe_ctlr_device *ctlr)
849 {
850 struct fcoe_fcf_device *fcf, *next;
851 /* Remove any attached fcfs */
852 mutex_lock(&ctlr->lock);
853 list_for_each_entry_safe(fcf, next,
854 &ctlr->fcfs, peers) {
855 list_del(&fcf->peers);
856 fcf->state = FCOE_FCF_STATE_DELETED;
857 fcoe_ctlr_device_queue_work(ctlr, &fcf->delete_work);
858 }
859 mutex_unlock(&ctlr->lock);
860
861 fcoe_ctlr_device_flush_work(ctlr);
862
863 destroy_workqueue(ctlr->devloss_work_q);
864 ctlr->devloss_work_q = NULL;
865 destroy_workqueue(ctlr->work_q);
866 ctlr->work_q = NULL;
867
868 device_unregister(&ctlr->dev);
869 }
870 EXPORT_SYMBOL_GPL(fcoe_ctlr_device_delete);
871
872 /**
873 * fcoe_fcf_device_final_delete() - Final delete routine
874 * @work: The FIP fcf's embedded work struct
875 *
876 * It is expected that the fcf has been removed from
877 * the FIP ctlr's list before calling this routine.
878 */
fcoe_fcf_device_final_delete(struct work_struct * work)879 static void fcoe_fcf_device_final_delete(struct work_struct *work)
880 {
881 struct fcoe_fcf_device *fcf =
882 container_of(work, struct fcoe_fcf_device, delete_work);
883 struct fcoe_ctlr_device *ctlr = fcoe_fcf_dev_to_ctlr_dev(fcf);
884
885 /*
886 * Cancel any outstanding timers. These should really exist
887 * only when rmmod'ing the LLDD and we're asking for
888 * immediate termination of the rports
889 */
890 if (!cancel_delayed_work(&fcf->dev_loss_work))
891 fcoe_ctlr_device_flush_devloss(ctlr);
892
893 device_unregister(&fcf->dev);
894 }
895
896 /**
897 * fip_timeout_deleted_fcf() - Delete a fcf when the devloss timer fires
898 * @work: The FIP fcf's embedded work struct
899 *
900 * Removes the fcf from the FIP ctlr's list of fcfs and
901 * queues the final deletion.
902 */
fip_timeout_deleted_fcf(struct work_struct * work)903 static void fip_timeout_deleted_fcf(struct work_struct *work)
904 {
905 struct fcoe_fcf_device *fcf =
906 container_of(work, struct fcoe_fcf_device, dev_loss_work.work);
907 struct fcoe_ctlr_device *ctlr = fcoe_fcf_dev_to_ctlr_dev(fcf);
908
909 mutex_lock(&ctlr->lock);
910
911 /*
912 * If the fcf is deleted or reconnected before the timer
913 * fires the devloss queue will be flushed, but the state will
914 * either be CONNECTED or DELETED. If that is the case we
915 * cancel deleting the fcf.
916 */
917 if (fcf->state != FCOE_FCF_STATE_DISCONNECTED)
918 goto out;
919
920 dev_printk(KERN_ERR, &fcf->dev,
921 "FIP fcf connection time out: removing fcf\n");
922
923 list_del(&fcf->peers);
924 fcf->state = FCOE_FCF_STATE_DELETED;
925 fcoe_ctlr_device_queue_work(ctlr, &fcf->delete_work);
926
927 out:
928 mutex_unlock(&ctlr->lock);
929 }
930
931 /**
932 * fcoe_fcf_device_delete() - Delete a FIP fcf
933 * @fcf: Pointer to the fcf which is to be deleted
934 *
935 * Queues the FIP fcf on the devloss workqueue
936 *
937 * Expects the ctlr_attrs mutex to be held for fcf
938 * state change.
939 */
fcoe_fcf_device_delete(struct fcoe_fcf_device * fcf)940 void fcoe_fcf_device_delete(struct fcoe_fcf_device *fcf)
941 {
942 struct fcoe_ctlr_device *ctlr = fcoe_fcf_dev_to_ctlr_dev(fcf);
943 int timeout = fcf->dev_loss_tmo;
944
945 if (fcf->state != FCOE_FCF_STATE_CONNECTED)
946 return;
947
948 fcf->state = FCOE_FCF_STATE_DISCONNECTED;
949
950 /*
951 * FCF will only be re-connected by the LLD calling
952 * fcoe_fcf_device_add, and it should be setting up
953 * priv then.
954 */
955 fcf->priv = NULL;
956
957 fcoe_ctlr_device_queue_devloss_work(ctlr, &fcf->dev_loss_work,
958 timeout * HZ);
959 }
960 EXPORT_SYMBOL_GPL(fcoe_fcf_device_delete);
961
962 /**
963 * fcoe_fcf_device_add() - Add a FCoE sysfs fcoe_fcf_device to the system
964 * @ctlr: The fcoe_ctlr_device that will be the fcoe_fcf_device parent
965 * @new_fcf: A temporary FCF used for lookups on the current list of fcfs
966 *
967 * Expects to be called with the ctlr->lock held
968 */
fcoe_fcf_device_add(struct fcoe_ctlr_device * ctlr,struct fcoe_fcf_device * new_fcf)969 struct fcoe_fcf_device *fcoe_fcf_device_add(struct fcoe_ctlr_device *ctlr,
970 struct fcoe_fcf_device *new_fcf)
971 {
972 struct fcoe_fcf_device *fcf;
973 int error = 0;
974
975 list_for_each_entry(fcf, &ctlr->fcfs, peers) {
976 if (fcoe_fcf_device_match(new_fcf, fcf)) {
977 if (fcf->state == FCOE_FCF_STATE_CONNECTED)
978 return fcf;
979
980 fcf->state = FCOE_FCF_STATE_CONNECTED;
981
982 if (!cancel_delayed_work(&fcf->dev_loss_work))
983 fcoe_ctlr_device_flush_devloss(ctlr);
984
985 return fcf;
986 }
987 }
988
989 fcf = kzalloc(sizeof(struct fcoe_fcf_device), GFP_ATOMIC);
990 if (unlikely(!fcf))
991 goto out;
992
993 INIT_WORK(&fcf->delete_work, fcoe_fcf_device_final_delete);
994 INIT_DELAYED_WORK(&fcf->dev_loss_work, fip_timeout_deleted_fcf);
995
996 fcf->dev.parent = &ctlr->dev;
997 fcf->dev.bus = &fcoe_bus_type;
998 fcf->dev.type = &fcoe_fcf_device_type;
999 fcf->id = atomic_inc_return(&fcf_num) - 1;
1000 fcf->state = FCOE_FCF_STATE_UNKNOWN;
1001
1002 fcf->dev_loss_tmo = ctlr->fcf_dev_loss_tmo;
1003
1004 dev_set_name(&fcf->dev, "fcf_%d", fcf->id);
1005
1006 fcf->fabric_name = new_fcf->fabric_name;
1007 fcf->switch_name = new_fcf->switch_name;
1008 fcf->fc_map = new_fcf->fc_map;
1009 fcf->vfid = new_fcf->vfid;
1010 memcpy(fcf->mac, new_fcf->mac, ETH_ALEN);
1011 fcf->priority = new_fcf->priority;
1012 fcf->fka_period = new_fcf->fka_period;
1013 fcf->selected = new_fcf->selected;
1014
1015 error = device_register(&fcf->dev);
1016 if (error) {
1017 put_device(&fcf->dev);
1018 goto out;
1019 }
1020
1021 fcf->state = FCOE_FCF_STATE_CONNECTED;
1022 list_add_tail(&fcf->peers, &ctlr->fcfs);
1023
1024 return fcf;
1025
1026 out:
1027 return NULL;
1028 }
1029 EXPORT_SYMBOL_GPL(fcoe_fcf_device_add);
1030
fcoe_sysfs_setup(void)1031 int __init fcoe_sysfs_setup(void)
1032 {
1033 atomic_set(&ctlr_num, 0);
1034 atomic_set(&fcf_num, 0);
1035
1036 return bus_register(&fcoe_bus_type);
1037 }
1038
fcoe_sysfs_teardown(void)1039 void __exit fcoe_sysfs_teardown(void)
1040 {
1041 bus_unregister(&fcoe_bus_type);
1042 }
1043