1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * The generic EDAC scrub driver controls the memory scrubbers in the
4 * system. The common sysfs scrub interface abstracts the control of
5 * various arbitrary scrubbing functionalities into a unified set of
6 * functions.
7 *
8 * Copyright (c) 2024-2025 HiSilicon Limited.
9 */
10
11 #include <linux/edac.h>
12
13 enum edac_scrub_attributes {
14 SCRUB_ADDRESS,
15 SCRUB_SIZE,
16 SCRUB_ENABLE_BACKGROUND,
17 SCRUB_MIN_CYCLE_DURATION,
18 SCRUB_MAX_CYCLE_DURATION,
19 SCRUB_CUR_CYCLE_DURATION,
20 SCRUB_MAX_ATTRS
21 };
22
23 struct edac_scrub_dev_attr {
24 struct device_attribute dev_attr;
25 u8 instance;
26 };
27
28 struct edac_scrub_context {
29 char name[EDAC_FEAT_NAME_LEN];
30 struct edac_scrub_dev_attr scrub_dev_attr[SCRUB_MAX_ATTRS];
31 struct attribute *scrub_attrs[SCRUB_MAX_ATTRS + 1];
32 struct attribute_group group;
33 };
34
35 #define TO_SCRUB_DEV_ATTR(_dev_attr) \
36 container_of(_dev_attr, struct edac_scrub_dev_attr, dev_attr)
37
38 #define EDAC_SCRUB_ATTR_SHOW(attrib, cb, type, format) \
39 static ssize_t attrib##_show(struct device *ras_feat_dev, \
40 struct device_attribute *attr, char *buf) \
41 { \
42 u8 inst = TO_SCRUB_DEV_ATTR(attr)->instance; \
43 struct edac_dev_feat_ctx *ctx = dev_get_drvdata(ras_feat_dev); \
44 const struct edac_scrub_ops *ops = ctx->scrub[inst].scrub_ops; \
45 type data; \
46 int ret; \
47 \
48 ret = ops->cb(ras_feat_dev->parent, ctx->scrub[inst].private, &data); \
49 if (ret) \
50 return ret; \
51 \
52 return sysfs_emit(buf, format, data); \
53 }
54
55 EDAC_SCRUB_ATTR_SHOW(addr, read_addr, u64, "0x%llx\n")
56 EDAC_SCRUB_ATTR_SHOW(size, read_size, u64, "0x%llx\n")
57 EDAC_SCRUB_ATTR_SHOW(enable_background, get_enabled_bg, bool, "%u\n")
58 EDAC_SCRUB_ATTR_SHOW(min_cycle_duration, get_min_cycle, u32, "%u\n")
59 EDAC_SCRUB_ATTR_SHOW(max_cycle_duration, get_max_cycle, u32, "%u\n")
60 EDAC_SCRUB_ATTR_SHOW(current_cycle_duration, get_cycle_duration, u32, "%u\n")
61
62 #define EDAC_SCRUB_ATTR_STORE(attrib, cb, type, conv_func) \
63 static ssize_t attrib##_store(struct device *ras_feat_dev, \
64 struct device_attribute *attr, \
65 const char *buf, size_t len) \
66 { \
67 u8 inst = TO_SCRUB_DEV_ATTR(attr)->instance; \
68 struct edac_dev_feat_ctx *ctx = dev_get_drvdata(ras_feat_dev); \
69 const struct edac_scrub_ops *ops = ctx->scrub[inst].scrub_ops; \
70 type data; \
71 int ret; \
72 \
73 ret = conv_func(buf, 0, &data); \
74 if (ret < 0) \
75 return ret; \
76 \
77 ret = ops->cb(ras_feat_dev->parent, ctx->scrub[inst].private, data); \
78 if (ret) \
79 return ret; \
80 \
81 return len; \
82 }
83
EDAC_SCRUB_ATTR_STORE(addr,write_addr,u64,kstrtou64)84 EDAC_SCRUB_ATTR_STORE(addr, write_addr, u64, kstrtou64)
85 EDAC_SCRUB_ATTR_STORE(size, write_size, u64, kstrtou64)
86 EDAC_SCRUB_ATTR_STORE(enable_background, set_enabled_bg, unsigned long, kstrtoul)
87 EDAC_SCRUB_ATTR_STORE(current_cycle_duration, set_cycle_duration, unsigned long, kstrtoul)
88
89 static umode_t scrub_attr_visible(struct kobject *kobj, struct attribute *a, int attr_id)
90 {
91 struct device *ras_feat_dev = kobj_to_dev(kobj);
92 struct device_attribute *dev_attr = container_of(a, struct device_attribute, attr);
93 u8 inst = TO_SCRUB_DEV_ATTR(dev_attr)->instance;
94 struct edac_dev_feat_ctx *ctx = dev_get_drvdata(ras_feat_dev);
95 const struct edac_scrub_ops *ops = ctx->scrub[inst].scrub_ops;
96
97 switch (attr_id) {
98 case SCRUB_ADDRESS:
99 if (ops->read_addr) {
100 if (ops->write_addr)
101 return a->mode;
102 else
103 return 0444;
104 }
105 break;
106 case SCRUB_SIZE:
107 if (ops->read_size) {
108 if (ops->write_size)
109 return a->mode;
110 else
111 return 0444;
112 }
113 break;
114 case SCRUB_ENABLE_BACKGROUND:
115 if (ops->get_enabled_bg) {
116 if (ops->set_enabled_bg)
117 return a->mode;
118 else
119 return 0444;
120 }
121 break;
122 case SCRUB_MIN_CYCLE_DURATION:
123 if (ops->get_min_cycle)
124 return a->mode;
125 break;
126 case SCRUB_MAX_CYCLE_DURATION:
127 if (ops->get_max_cycle)
128 return a->mode;
129 break;
130 case SCRUB_CUR_CYCLE_DURATION:
131 if (ops->get_cycle_duration) {
132 if (ops->set_cycle_duration)
133 return a->mode;
134 else
135 return 0444;
136 }
137 break;
138 default:
139 break;
140 }
141
142 return 0;
143 }
144
145 #define EDAC_SCRUB_ATTR_RO(_name, _instance) \
146 ((struct edac_scrub_dev_attr) { .dev_attr = __ATTR_RO(_name), \
147 .instance = _instance })
148
149 #define EDAC_SCRUB_ATTR_WO(_name, _instance) \
150 ((struct edac_scrub_dev_attr) { .dev_attr = __ATTR_WO(_name), \
151 .instance = _instance })
152
153 #define EDAC_SCRUB_ATTR_RW(_name, _instance) \
154 ((struct edac_scrub_dev_attr) { .dev_attr = __ATTR_RW(_name), \
155 .instance = _instance })
156
scrub_create_desc(struct device * scrub_dev,const struct attribute_group ** attr_groups,u8 instance)157 static int scrub_create_desc(struct device *scrub_dev,
158 const struct attribute_group **attr_groups, u8 instance)
159 {
160 struct edac_scrub_context *scrub_ctx;
161 struct attribute_group *group;
162 int i;
163 struct edac_scrub_dev_attr dev_attr[] = {
164 [SCRUB_ADDRESS] = EDAC_SCRUB_ATTR_RW(addr, instance),
165 [SCRUB_SIZE] = EDAC_SCRUB_ATTR_RW(size, instance),
166 [SCRUB_ENABLE_BACKGROUND] = EDAC_SCRUB_ATTR_RW(enable_background, instance),
167 [SCRUB_MIN_CYCLE_DURATION] = EDAC_SCRUB_ATTR_RO(min_cycle_duration, instance),
168 [SCRUB_MAX_CYCLE_DURATION] = EDAC_SCRUB_ATTR_RO(max_cycle_duration, instance),
169 [SCRUB_CUR_CYCLE_DURATION] = EDAC_SCRUB_ATTR_RW(current_cycle_duration, instance)
170 };
171
172 scrub_ctx = devm_kzalloc(scrub_dev, sizeof(*scrub_ctx), GFP_KERNEL);
173 if (!scrub_ctx)
174 return -ENOMEM;
175
176 group = &scrub_ctx->group;
177 for (i = 0; i < SCRUB_MAX_ATTRS; i++) {
178 memcpy(&scrub_ctx->scrub_dev_attr[i], &dev_attr[i], sizeof(dev_attr[i]));
179 sysfs_attr_init(&scrub_ctx->scrub_dev_attr[i].dev_attr.attr);
180 scrub_ctx->scrub_attrs[i] = &scrub_ctx->scrub_dev_attr[i].dev_attr.attr;
181 }
182 sprintf(scrub_ctx->name, "%s%d", "scrub", instance);
183 group->name = scrub_ctx->name;
184 group->attrs = scrub_ctx->scrub_attrs;
185 group->is_visible = scrub_attr_visible;
186
187 attr_groups[0] = group;
188
189 return 0;
190 }
191
192 /**
193 * edac_scrub_get_desc - get EDAC scrub descriptors
194 * @scrub_dev: client device, with scrub support
195 * @attr_groups: pointer to attribute group container
196 * @instance: device's scrub instance number.
197 *
198 * Return:
199 * * %0 - Success.
200 * * %-EINVAL - Invalid parameters passed.
201 * * %-ENOMEM - Dynamic memory allocation failed.
202 */
edac_scrub_get_desc(struct device * scrub_dev,const struct attribute_group ** attr_groups,u8 instance)203 int edac_scrub_get_desc(struct device *scrub_dev,
204 const struct attribute_group **attr_groups, u8 instance)
205 {
206 if (!scrub_dev || !attr_groups)
207 return -EINVAL;
208
209 return scrub_create_desc(scrub_dev, attr_groups, instance);
210 }
211