xref: /linux/drivers/misc/cxl/sysfs.c (revision b6c84ba22ff3a198eb8d5552cf9b8fda1d792e54)
1 /*
2  * Copyright 2014 IBM Corp.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version
7  * 2 of the License, or (at your option) any later version.
8  */
9 
10 #include <linux/kernel.h>
11 #include <linux/device.h>
12 #include <linux/sysfs.h>
13 #include <linux/pci_regs.h>
14 
15 #include "cxl.h"
16 
17 #define to_afu_chardev_m(d) dev_get_drvdata(d)
18 
19 /*********  Adapter attributes  **********************************************/
20 
21 static ssize_t caia_version_show(struct device *device,
22 				 struct device_attribute *attr,
23 				 char *buf)
24 {
25 	struct cxl *adapter = to_cxl_adapter(device);
26 
27 	return scnprintf(buf, PAGE_SIZE, "%i.%i\n", adapter->caia_major,
28 			 adapter->caia_minor);
29 }
30 
31 static ssize_t psl_revision_show(struct device *device,
32 				 struct device_attribute *attr,
33 				 char *buf)
34 {
35 	struct cxl *adapter = to_cxl_adapter(device);
36 
37 	return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->psl_rev);
38 }
39 
40 static ssize_t base_image_show(struct device *device,
41 			       struct device_attribute *attr,
42 			       char *buf)
43 {
44 	struct cxl *adapter = to_cxl_adapter(device);
45 
46 	return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->base_image);
47 }
48 
49 static ssize_t image_loaded_show(struct device *device,
50 				 struct device_attribute *attr,
51 				 char *buf)
52 {
53 	struct cxl *adapter = to_cxl_adapter(device);
54 
55 	if (adapter->user_image_loaded)
56 		return scnprintf(buf, PAGE_SIZE, "user\n");
57 	return scnprintf(buf, PAGE_SIZE, "factory\n");
58 }
59 
60 static ssize_t psl_timebase_synced_show(struct device *device,
61 					struct device_attribute *attr,
62 					char *buf)
63 {
64 	struct cxl *adapter = to_cxl_adapter(device);
65 	u64 psl_tb, delta;
66 
67 	/* Recompute the status only in native mode */
68 	if (cpu_has_feature(CPU_FTR_HVMODE)) {
69 		psl_tb = adapter->native->sl_ops->timebase_read(adapter);
70 		delta = abs(mftb() - psl_tb);
71 
72 		/* CORE TB and PSL TB difference <= 16usecs ? */
73 		adapter->psl_timebase_synced = (tb_to_ns(delta) < 16000) ? true : false;
74 		pr_devel("PSL timebase %s - delta: 0x%016llx\n",
75 			 (tb_to_ns(delta) < 16000) ? "synchronized" :
76 			 "not synchronized", tb_to_ns(delta));
77 	}
78 	return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->psl_timebase_synced);
79 }
80 
81 static ssize_t reset_adapter_store(struct device *device,
82 				   struct device_attribute *attr,
83 				   const char *buf, size_t count)
84 {
85 	struct cxl *adapter = to_cxl_adapter(device);
86 	int rc;
87 	int val;
88 
89 	rc = sscanf(buf, "%i", &val);
90 	if ((rc != 1) || (val != 1 && val != -1))
91 		return -EINVAL;
92 
93 	/*
94 	 * See if we can lock the context mapping that's only allowed
95 	 * when there are no contexts attached to the adapter. Once
96 	 * taken this will also prevent any context from getting activated.
97 	 */
98 	if (val == 1) {
99 		rc =  cxl_adapter_context_lock(adapter);
100 		if (rc)
101 			goto out;
102 
103 		rc = cxl_ops->adapter_reset(adapter);
104 		/* In case reset failed release context lock */
105 		if (rc)
106 			cxl_adapter_context_unlock(adapter);
107 
108 	} else if (val == -1) {
109 		/* Perform a forced adapter reset */
110 		rc = cxl_ops->adapter_reset(adapter);
111 	}
112 
113 out:
114 	return rc ? rc : count;
115 }
116 
117 static ssize_t load_image_on_perst_show(struct device *device,
118 				 struct device_attribute *attr,
119 				 char *buf)
120 {
121 	struct cxl *adapter = to_cxl_adapter(device);
122 
123 	if (!adapter->perst_loads_image)
124 		return scnprintf(buf, PAGE_SIZE, "none\n");
125 
126 	if (adapter->perst_select_user)
127 		return scnprintf(buf, PAGE_SIZE, "user\n");
128 	return scnprintf(buf, PAGE_SIZE, "factory\n");
129 }
130 
131 static ssize_t load_image_on_perst_store(struct device *device,
132 				 struct device_attribute *attr,
133 				 const char *buf, size_t count)
134 {
135 	struct cxl *adapter = to_cxl_adapter(device);
136 	int rc;
137 
138 	if (!strncmp(buf, "none", 4))
139 		adapter->perst_loads_image = false;
140 	else if (!strncmp(buf, "user", 4)) {
141 		adapter->perst_select_user = true;
142 		adapter->perst_loads_image = true;
143 	} else if (!strncmp(buf, "factory", 7)) {
144 		adapter->perst_select_user = false;
145 		adapter->perst_loads_image = true;
146 	} else
147 		return -EINVAL;
148 
149 	if ((rc = cxl_update_image_control(adapter)))
150 		return rc;
151 
152 	return count;
153 }
154 
155 static ssize_t perst_reloads_same_image_show(struct device *device,
156 				 struct device_attribute *attr,
157 				 char *buf)
158 {
159 	struct cxl *adapter = to_cxl_adapter(device);
160 
161 	return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->perst_same_image);
162 }
163 
164 static ssize_t perst_reloads_same_image_store(struct device *device,
165 				 struct device_attribute *attr,
166 				 const char *buf, size_t count)
167 {
168 	struct cxl *adapter = to_cxl_adapter(device);
169 	int rc;
170 	int val;
171 
172 	rc = sscanf(buf, "%i", &val);
173 	if ((rc != 1) || !(val == 1 || val == 0))
174 		return -EINVAL;
175 
176 	adapter->perst_same_image = (val == 1 ? true : false);
177 	return count;
178 }
179 
180 static struct device_attribute adapter_attrs[] = {
181 	__ATTR_RO(caia_version),
182 	__ATTR_RO(psl_revision),
183 	__ATTR_RO(base_image),
184 	__ATTR_RO(image_loaded),
185 	__ATTR_RO(psl_timebase_synced),
186 	__ATTR_RW(load_image_on_perst),
187 	__ATTR_RW(perst_reloads_same_image),
188 	__ATTR(reset, S_IWUSR, NULL, reset_adapter_store),
189 };
190 
191 
192 /*********  AFU master specific attributes  **********************************/
193 
194 static ssize_t mmio_size_show_master(struct device *device,
195 				     struct device_attribute *attr,
196 				     char *buf)
197 {
198 	struct cxl_afu *afu = to_afu_chardev_m(device);
199 
200 	return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->adapter->ps_size);
201 }
202 
203 static ssize_t pp_mmio_off_show(struct device *device,
204 				struct device_attribute *attr,
205 				char *buf)
206 {
207 	struct cxl_afu *afu = to_afu_chardev_m(device);
208 
209 	return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->native->pp_offset);
210 }
211 
212 static ssize_t pp_mmio_len_show(struct device *device,
213 				struct device_attribute *attr,
214 				char *buf)
215 {
216 	struct cxl_afu *afu = to_afu_chardev_m(device);
217 
218 	return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->pp_size);
219 }
220 
221 static struct device_attribute afu_master_attrs[] = {
222 	__ATTR(mmio_size, S_IRUGO, mmio_size_show_master, NULL),
223 	__ATTR_RO(pp_mmio_off),
224 	__ATTR_RO(pp_mmio_len),
225 };
226 
227 
228 /*********  AFU attributes  **************************************************/
229 
230 static ssize_t mmio_size_show(struct device *device,
231 			      struct device_attribute *attr,
232 			      char *buf)
233 {
234 	struct cxl_afu *afu = to_cxl_afu(device);
235 
236 	if (afu->pp_size)
237 		return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->pp_size);
238 	return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->adapter->ps_size);
239 }
240 
241 static ssize_t reset_store_afu(struct device *device,
242 			       struct device_attribute *attr,
243 			       const char *buf, size_t count)
244 {
245 	struct cxl_afu *afu = to_cxl_afu(device);
246 	int rc;
247 
248 	/* Not safe to reset if it is currently in use */
249 	mutex_lock(&afu->contexts_lock);
250 	if (!idr_is_empty(&afu->contexts_idr)) {
251 		rc = -EBUSY;
252 		goto err;
253 	}
254 
255 	if ((rc = cxl_ops->afu_reset(afu)))
256 		goto err;
257 
258 	rc = count;
259 err:
260 	mutex_unlock(&afu->contexts_lock);
261 	return rc;
262 }
263 
264 static ssize_t irqs_min_show(struct device *device,
265 			     struct device_attribute *attr,
266 			     char *buf)
267 {
268 	struct cxl_afu *afu = to_cxl_afu(device);
269 
270 	return scnprintf(buf, PAGE_SIZE, "%i\n", afu->pp_irqs);
271 }
272 
273 static ssize_t irqs_max_show(struct device *device,
274 				  struct device_attribute *attr,
275 				  char *buf)
276 {
277 	struct cxl_afu *afu = to_cxl_afu(device);
278 
279 	return scnprintf(buf, PAGE_SIZE, "%i\n", afu->irqs_max);
280 }
281 
282 static ssize_t irqs_max_store(struct device *device,
283 				  struct device_attribute *attr,
284 				  const char *buf, size_t count)
285 {
286 	struct cxl_afu *afu = to_cxl_afu(device);
287 	ssize_t ret;
288 	int irqs_max;
289 
290 	ret = sscanf(buf, "%i", &irqs_max);
291 	if (ret != 1)
292 		return -EINVAL;
293 
294 	if (irqs_max < afu->pp_irqs)
295 		return -EINVAL;
296 
297 	if (cpu_has_feature(CPU_FTR_HVMODE)) {
298 		if (irqs_max > afu->adapter->user_irqs)
299 			return -EINVAL;
300 	} else {
301 		/* pHyp sets a per-AFU limit */
302 		if (irqs_max > afu->guest->max_ints)
303 			return -EINVAL;
304 	}
305 
306 	afu->irqs_max = irqs_max;
307 	return count;
308 }
309 
310 static ssize_t modes_supported_show(struct device *device,
311 				    struct device_attribute *attr, char *buf)
312 {
313 	struct cxl_afu *afu = to_cxl_afu(device);
314 	char *p = buf, *end = buf + PAGE_SIZE;
315 
316 	if (afu->modes_supported & CXL_MODE_DEDICATED)
317 		p += scnprintf(p, end - p, "dedicated_process\n");
318 	if (afu->modes_supported & CXL_MODE_DIRECTED)
319 		p += scnprintf(p, end - p, "afu_directed\n");
320 	return (p - buf);
321 }
322 
323 static ssize_t prefault_mode_show(struct device *device,
324 				  struct device_attribute *attr,
325 				  char *buf)
326 {
327 	struct cxl_afu *afu = to_cxl_afu(device);
328 
329 	switch (afu->prefault_mode) {
330 	case CXL_PREFAULT_WED:
331 		return scnprintf(buf, PAGE_SIZE, "work_element_descriptor\n");
332 	case CXL_PREFAULT_ALL:
333 		return scnprintf(buf, PAGE_SIZE, "all\n");
334 	default:
335 		return scnprintf(buf, PAGE_SIZE, "none\n");
336 	}
337 }
338 
339 static ssize_t prefault_mode_store(struct device *device,
340 			  struct device_attribute *attr,
341 			  const char *buf, size_t count)
342 {
343 	struct cxl_afu *afu = to_cxl_afu(device);
344 	enum prefault_modes mode = -1;
345 
346 	if (!strncmp(buf, "none", 4))
347 		mode = CXL_PREFAULT_NONE;
348 	else {
349 		if (!radix_enabled()) {
350 
351 			/* only allowed when not in radix mode */
352 			if (!strncmp(buf, "work_element_descriptor", 23))
353 				mode = CXL_PREFAULT_WED;
354 			if (!strncmp(buf, "all", 3))
355 				mode = CXL_PREFAULT_ALL;
356 		} else {
357 			dev_err(device, "Cannot prefault with radix enabled\n");
358 		}
359 	}
360 
361 	if (mode == -1)
362 		return -EINVAL;
363 
364 	afu->prefault_mode = mode;
365 	return count;
366 }
367 
368 static ssize_t mode_show(struct device *device,
369 			 struct device_attribute *attr,
370 			 char *buf)
371 {
372 	struct cxl_afu *afu = to_cxl_afu(device);
373 
374 	if (afu->current_mode == CXL_MODE_DEDICATED)
375 		return scnprintf(buf, PAGE_SIZE, "dedicated_process\n");
376 	if (afu->current_mode == CXL_MODE_DIRECTED)
377 		return scnprintf(buf, PAGE_SIZE, "afu_directed\n");
378 	return scnprintf(buf, PAGE_SIZE, "none\n");
379 }
380 
381 static ssize_t mode_store(struct device *device, struct device_attribute *attr,
382 			  const char *buf, size_t count)
383 {
384 	struct cxl_afu *afu = to_cxl_afu(device);
385 	int old_mode, mode = -1;
386 	int rc = -EBUSY;
387 
388 	/* can't change this if we have a user */
389 	mutex_lock(&afu->contexts_lock);
390 	if (!idr_is_empty(&afu->contexts_idr))
391 		goto err;
392 
393 	if (!strncmp(buf, "dedicated_process", 17))
394 		mode = CXL_MODE_DEDICATED;
395 	if (!strncmp(buf, "afu_directed", 12))
396 		mode = CXL_MODE_DIRECTED;
397 	if (!strncmp(buf, "none", 4))
398 		mode = 0;
399 
400 	if (mode == -1) {
401 		rc = -EINVAL;
402 		goto err;
403 	}
404 
405 	/*
406 	 * afu_deactivate_mode needs to be done outside the lock, prevent
407 	 * other contexts coming in before we are ready:
408 	 */
409 	old_mode = afu->current_mode;
410 	afu->current_mode = 0;
411 	afu->num_procs = 0;
412 
413 	mutex_unlock(&afu->contexts_lock);
414 
415 	if ((rc = cxl_ops->afu_deactivate_mode(afu, old_mode)))
416 		return rc;
417 	if ((rc = cxl_ops->afu_activate_mode(afu, mode)))
418 		return rc;
419 
420 	return count;
421 err:
422 	mutex_unlock(&afu->contexts_lock);
423 	return rc;
424 }
425 
426 static ssize_t api_version_show(struct device *device,
427 				struct device_attribute *attr,
428 				char *buf)
429 {
430 	return scnprintf(buf, PAGE_SIZE, "%i\n", CXL_API_VERSION);
431 }
432 
433 static ssize_t api_version_compatible_show(struct device *device,
434 					   struct device_attribute *attr,
435 					   char *buf)
436 {
437 	return scnprintf(buf, PAGE_SIZE, "%i\n", CXL_API_VERSION_COMPATIBLE);
438 }
439 
440 static ssize_t afu_eb_read(struct file *filp, struct kobject *kobj,
441 			       struct bin_attribute *bin_attr, char *buf,
442 			       loff_t off, size_t count)
443 {
444 	struct cxl_afu *afu = to_cxl_afu(kobj_to_dev(kobj));
445 
446 	return cxl_ops->afu_read_err_buffer(afu, buf, off, count);
447 }
448 
449 static struct device_attribute afu_attrs[] = {
450 	__ATTR_RO(mmio_size),
451 	__ATTR_RO(irqs_min),
452 	__ATTR_RW(irqs_max),
453 	__ATTR_RO(modes_supported),
454 	__ATTR_RW(mode),
455 	__ATTR_RW(prefault_mode),
456 	__ATTR_RO(api_version),
457 	__ATTR_RO(api_version_compatible),
458 	__ATTR(reset, S_IWUSR, NULL, reset_store_afu),
459 };
460 
461 int cxl_sysfs_adapter_add(struct cxl *adapter)
462 {
463 	struct device_attribute *dev_attr;
464 	int i, rc;
465 
466 	for (i = 0; i < ARRAY_SIZE(adapter_attrs); i++) {
467 		dev_attr = &adapter_attrs[i];
468 		if (cxl_ops->support_attributes(dev_attr->attr.name,
469 						CXL_ADAPTER_ATTRS)) {
470 			if ((rc = device_create_file(&adapter->dev, dev_attr)))
471 				goto err;
472 		}
473 	}
474 	return 0;
475 err:
476 	for (i--; i >= 0; i--) {
477 		dev_attr = &adapter_attrs[i];
478 		if (cxl_ops->support_attributes(dev_attr->attr.name,
479 						CXL_ADAPTER_ATTRS))
480 			device_remove_file(&adapter->dev, dev_attr);
481 	}
482 	return rc;
483 }
484 
485 void cxl_sysfs_adapter_remove(struct cxl *adapter)
486 {
487 	struct device_attribute *dev_attr;
488 	int i;
489 
490 	for (i = 0; i < ARRAY_SIZE(adapter_attrs); i++) {
491 		dev_attr = &adapter_attrs[i];
492 		if (cxl_ops->support_attributes(dev_attr->attr.name,
493 						CXL_ADAPTER_ATTRS))
494 			device_remove_file(&adapter->dev, dev_attr);
495 	}
496 }
497 
498 struct afu_config_record {
499 	struct kobject kobj;
500 	struct bin_attribute config_attr;
501 	struct list_head list;
502 	int cr;
503 	u16 device;
504 	u16 vendor;
505 	u32 class;
506 };
507 
508 #define to_cr(obj) container_of(obj, struct afu_config_record, kobj)
509 
510 static ssize_t vendor_show(struct kobject *kobj,
511 			   struct kobj_attribute *attr, char *buf)
512 {
513 	struct afu_config_record *cr = to_cr(kobj);
514 
515 	return scnprintf(buf, PAGE_SIZE, "0x%.4x\n", cr->vendor);
516 }
517 
518 static ssize_t device_show(struct kobject *kobj,
519 			   struct kobj_attribute *attr, char *buf)
520 {
521 	struct afu_config_record *cr = to_cr(kobj);
522 
523 	return scnprintf(buf, PAGE_SIZE, "0x%.4x\n", cr->device);
524 }
525 
526 static ssize_t class_show(struct kobject *kobj,
527 			  struct kobj_attribute *attr, char *buf)
528 {
529 	struct afu_config_record *cr = to_cr(kobj);
530 
531 	return scnprintf(buf, PAGE_SIZE, "0x%.6x\n", cr->class);
532 }
533 
534 static ssize_t afu_read_config(struct file *filp, struct kobject *kobj,
535 			       struct bin_attribute *bin_attr, char *buf,
536 			       loff_t off, size_t count)
537 {
538 	struct afu_config_record *cr = to_cr(kobj);
539 	struct cxl_afu *afu = to_cxl_afu(kobj_to_dev(kobj->parent));
540 
541 	u64 i, j, val, rc;
542 
543 	for (i = 0; i < count;) {
544 		rc = cxl_ops->afu_cr_read64(afu, cr->cr, off & ~0x7, &val);
545 		if (rc)
546 			val = ~0ULL;
547 		for (j = off & 0x7; j < 8 && i < count; i++, j++, off++)
548 			buf[i] = (val >> (j * 8)) & 0xff;
549 	}
550 
551 	return count;
552 }
553 
554 static struct kobj_attribute vendor_attribute =
555 	__ATTR_RO(vendor);
556 static struct kobj_attribute device_attribute =
557 	__ATTR_RO(device);
558 static struct kobj_attribute class_attribute =
559 	__ATTR_RO(class);
560 
561 static struct attribute *afu_cr_attrs[] = {
562 	&vendor_attribute.attr,
563 	&device_attribute.attr,
564 	&class_attribute.attr,
565 	NULL,
566 };
567 
568 static void release_afu_config_record(struct kobject *kobj)
569 {
570 	struct afu_config_record *cr = to_cr(kobj);
571 
572 	kfree(cr);
573 }
574 
575 static struct kobj_type afu_config_record_type = {
576 	.sysfs_ops = &kobj_sysfs_ops,
577 	.release = release_afu_config_record,
578 	.default_attrs = afu_cr_attrs,
579 };
580 
581 static struct afu_config_record *cxl_sysfs_afu_new_cr(struct cxl_afu *afu, int cr_idx)
582 {
583 	struct afu_config_record *cr;
584 	int rc;
585 
586 	cr = kzalloc(sizeof(struct afu_config_record), GFP_KERNEL);
587 	if (!cr)
588 		return ERR_PTR(-ENOMEM);
589 
590 	cr->cr = cr_idx;
591 
592 	rc = cxl_ops->afu_cr_read16(afu, cr_idx, PCI_DEVICE_ID, &cr->device);
593 	if (rc)
594 		goto err;
595 	rc = cxl_ops->afu_cr_read16(afu, cr_idx, PCI_VENDOR_ID, &cr->vendor);
596 	if (rc)
597 		goto err;
598 	rc = cxl_ops->afu_cr_read32(afu, cr_idx, PCI_CLASS_REVISION, &cr->class);
599 	if (rc)
600 		goto err;
601 	cr->class >>= 8;
602 
603 	/*
604 	 * Export raw AFU PCIe like config record. For now this is read only by
605 	 * root - we can expand that later to be readable by non-root and maybe
606 	 * even writable provided we have a good use-case. Once we support
607 	 * exposing AFUs through a virtual PHB they will get that for free from
608 	 * Linux' PCI infrastructure, but until then it's not clear that we
609 	 * need it for anything since the main use case is just identifying
610 	 * AFUs, which can be done via the vendor, device and class attributes.
611 	 */
612 	sysfs_bin_attr_init(&cr->config_attr);
613 	cr->config_attr.attr.name = "config";
614 	cr->config_attr.attr.mode = S_IRUSR;
615 	cr->config_attr.size = afu->crs_len;
616 	cr->config_attr.read = afu_read_config;
617 
618 	rc = kobject_init_and_add(&cr->kobj, &afu_config_record_type,
619 				  &afu->dev.kobj, "cr%i", cr->cr);
620 	if (rc)
621 		goto err;
622 
623 	rc = sysfs_create_bin_file(&cr->kobj, &cr->config_attr);
624 	if (rc)
625 		goto err1;
626 
627 	rc = kobject_uevent(&cr->kobj, KOBJ_ADD);
628 	if (rc)
629 		goto err2;
630 
631 	return cr;
632 err2:
633 	sysfs_remove_bin_file(&cr->kobj, &cr->config_attr);
634 err1:
635 	kobject_put(&cr->kobj);
636 	return ERR_PTR(rc);
637 err:
638 	kfree(cr);
639 	return ERR_PTR(rc);
640 }
641 
642 void cxl_sysfs_afu_remove(struct cxl_afu *afu)
643 {
644 	struct device_attribute *dev_attr;
645 	struct afu_config_record *cr, *tmp;
646 	int i;
647 
648 	/* remove the err buffer bin attribute */
649 	if (afu->eb_len)
650 		device_remove_bin_file(&afu->dev, &afu->attr_eb);
651 
652 	for (i = 0; i < ARRAY_SIZE(afu_attrs); i++) {
653 		dev_attr = &afu_attrs[i];
654 		if (cxl_ops->support_attributes(dev_attr->attr.name,
655 						CXL_AFU_ATTRS))
656 			device_remove_file(&afu->dev, &afu_attrs[i]);
657 	}
658 
659 	list_for_each_entry_safe(cr, tmp, &afu->crs, list) {
660 		sysfs_remove_bin_file(&cr->kobj, &cr->config_attr);
661 		kobject_put(&cr->kobj);
662 	}
663 }
664 
665 int cxl_sysfs_afu_add(struct cxl_afu *afu)
666 {
667 	struct device_attribute *dev_attr;
668 	struct afu_config_record *cr;
669 	int i, rc;
670 
671 	INIT_LIST_HEAD(&afu->crs);
672 
673 	for (i = 0; i < ARRAY_SIZE(afu_attrs); i++) {
674 		dev_attr = &afu_attrs[i];
675 		if (cxl_ops->support_attributes(dev_attr->attr.name,
676 						CXL_AFU_ATTRS)) {
677 			if ((rc = device_create_file(&afu->dev, &afu_attrs[i])))
678 				goto err;
679 		}
680 	}
681 
682 	/* conditionally create the add the binary file for error info buffer */
683 	if (afu->eb_len) {
684 		sysfs_attr_init(&afu->attr_eb.attr);
685 
686 		afu->attr_eb.attr.name = "afu_err_buff";
687 		afu->attr_eb.attr.mode = S_IRUGO;
688 		afu->attr_eb.size = afu->eb_len;
689 		afu->attr_eb.read = afu_eb_read;
690 
691 		rc = device_create_bin_file(&afu->dev, &afu->attr_eb);
692 		if (rc) {
693 			dev_err(&afu->dev,
694 				"Unable to create eb attr for the afu. Err(%d)\n",
695 				rc);
696 			goto err;
697 		}
698 	}
699 
700 	for (i = 0; i < afu->crs_num; i++) {
701 		cr = cxl_sysfs_afu_new_cr(afu, i);
702 		if (IS_ERR(cr)) {
703 			rc = PTR_ERR(cr);
704 			goto err1;
705 		}
706 		list_add(&cr->list, &afu->crs);
707 	}
708 
709 	return 0;
710 
711 err1:
712 	cxl_sysfs_afu_remove(afu);
713 	return rc;
714 err:
715 	/* reset the eb_len as we havent created the bin attr */
716 	afu->eb_len = 0;
717 
718 	for (i--; i >= 0; i--) {
719 		dev_attr = &afu_attrs[i];
720 		if (cxl_ops->support_attributes(dev_attr->attr.name,
721 						CXL_AFU_ATTRS))
722 		device_remove_file(&afu->dev, &afu_attrs[i]);
723 	}
724 	return rc;
725 }
726 
727 int cxl_sysfs_afu_m_add(struct cxl_afu *afu)
728 {
729 	struct device_attribute *dev_attr;
730 	int i, rc;
731 
732 	for (i = 0; i < ARRAY_SIZE(afu_master_attrs); i++) {
733 		dev_attr = &afu_master_attrs[i];
734 		if (cxl_ops->support_attributes(dev_attr->attr.name,
735 						CXL_AFU_MASTER_ATTRS)) {
736 			if ((rc = device_create_file(afu->chardev_m, &afu_master_attrs[i])))
737 				goto err;
738 		}
739 	}
740 
741 	return 0;
742 
743 err:
744 	for (i--; i >= 0; i--) {
745 		dev_attr = &afu_master_attrs[i];
746 		if (cxl_ops->support_attributes(dev_attr->attr.name,
747 						CXL_AFU_MASTER_ATTRS))
748 			device_remove_file(afu->chardev_m, &afu_master_attrs[i]);
749 	}
750 	return rc;
751 }
752 
753 void cxl_sysfs_afu_m_remove(struct cxl_afu *afu)
754 {
755 	struct device_attribute *dev_attr;
756 	int i;
757 
758 	for (i = 0; i < ARRAY_SIZE(afu_master_attrs); i++) {
759 		dev_attr = &afu_master_attrs[i];
760 		if (cxl_ops->support_attributes(dev_attr->attr.name,
761 						CXL_AFU_MASTER_ATTRS))
762 			device_remove_file(afu->chardev_m, &afu_master_attrs[i]);
763 	}
764 }
765