xref: /linux/drivers/crypto/intel/qat/qat_common/adf_sysfs.c (revision 55d0969c451159cff86949b38c39171cab962069)
1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2022 Intel Corporation */
3 #include <linux/device.h>
4 #include <linux/errno.h>
5 #include <linux/pci.h>
6 #include "adf_accel_devices.h"
7 #include "adf_cfg.h"
8 #include "adf_cfg_services.h"
9 #include "adf_common_drv.h"
10 
11 #define UNSET_RING_NUM -1
12 
13 static const char * const state_operations[] = {
14 	[DEV_DOWN] = "down",
15 	[DEV_UP] = "up",
16 };
17 
18 static ssize_t state_show(struct device *dev, struct device_attribute *attr,
19 			  char *buf)
20 {
21 	struct adf_accel_dev *accel_dev;
22 	char *state;
23 
24 	accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
25 	if (!accel_dev)
26 		return -EINVAL;
27 
28 	state = adf_dev_started(accel_dev) ? "up" : "down";
29 	return sysfs_emit(buf, "%s\n", state);
30 }
31 
32 static ssize_t state_store(struct device *dev, struct device_attribute *attr,
33 			   const char *buf, size_t count)
34 {
35 	struct adf_accel_dev *accel_dev;
36 	u32 accel_id;
37 	int ret;
38 
39 	accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
40 	if (!accel_dev)
41 		return -EINVAL;
42 
43 	accel_id = accel_dev->accel_id;
44 
45 	if (adf_devmgr_in_reset(accel_dev) || adf_dev_in_use(accel_dev)) {
46 		dev_info(dev, "Device qat_dev%d is busy\n", accel_id);
47 		return -EBUSY;
48 	}
49 
50 	ret = sysfs_match_string(state_operations, buf);
51 	if (ret < 0)
52 		return ret;
53 
54 	switch (ret) {
55 	case DEV_DOWN:
56 		dev_info(dev, "Stopping device qat_dev%d\n", accel_id);
57 
58 		if (!adf_dev_started(accel_dev)) {
59 			dev_info(&GET_DEV(accel_dev), "Device qat_dev%d already down\n",
60 				 accel_id);
61 
62 			break;
63 		}
64 
65 		ret = adf_dev_down(accel_dev);
66 		if (ret)
67 			return ret;
68 
69 		break;
70 	case DEV_UP:
71 		dev_info(dev, "Starting device qat_dev%d\n", accel_id);
72 
73 		ret = adf_dev_up(accel_dev, true);
74 		if (ret == -EALREADY) {
75 			break;
76 		} else if (ret) {
77 			dev_err(dev, "Failed to start device qat_dev%d\n",
78 				accel_id);
79 			adf_dev_down(accel_dev);
80 			return ret;
81 		}
82 		break;
83 	default:
84 		return -EINVAL;
85 	}
86 
87 	return count;
88 }
89 
90 static ssize_t cfg_services_show(struct device *dev, struct device_attribute *attr,
91 				 char *buf)
92 {
93 	char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
94 	struct adf_accel_dev *accel_dev;
95 	int ret;
96 
97 	accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
98 	if (!accel_dev)
99 		return -EINVAL;
100 
101 	ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
102 				      ADF_SERVICES_ENABLED, services);
103 	if (ret)
104 		return ret;
105 
106 	return sysfs_emit(buf, "%s\n", services);
107 }
108 
109 static int adf_sysfs_update_dev_config(struct adf_accel_dev *accel_dev,
110 				       const char *services)
111 {
112 	return adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC,
113 					   ADF_SERVICES_ENABLED, services,
114 					   ADF_STR);
115 }
116 
117 static ssize_t cfg_services_store(struct device *dev, struct device_attribute *attr,
118 				  const char *buf, size_t count)
119 {
120 	struct adf_hw_device_data *hw_data;
121 	struct adf_accel_dev *accel_dev;
122 	int ret;
123 
124 	ret = sysfs_match_string(adf_cfg_services, buf);
125 	if (ret < 0)
126 		return ret;
127 
128 	accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
129 	if (!accel_dev)
130 		return -EINVAL;
131 
132 	if (adf_dev_started(accel_dev)) {
133 		dev_info(dev, "Device qat_dev%d must be down to reconfigure the service.\n",
134 			 accel_dev->accel_id);
135 		return -EINVAL;
136 	}
137 
138 	ret = adf_sysfs_update_dev_config(accel_dev, adf_cfg_services[ret]);
139 	if (ret < 0)
140 		return ret;
141 
142 	hw_data = GET_HW_DATA(accel_dev);
143 
144 	/* Update capabilities mask after change in configuration.
145 	 * A call to this function is required as capabilities are, at the
146 	 * moment, tied to configuration
147 	 */
148 	hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev);
149 	if (!hw_data->accel_capabilities_mask)
150 		return -EINVAL;
151 
152 	return count;
153 }
154 
155 static ssize_t pm_idle_enabled_show(struct device *dev, struct device_attribute *attr,
156 				    char *buf)
157 {
158 	char pm_idle_enabled[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {};
159 	struct adf_accel_dev *accel_dev;
160 	int ret;
161 
162 	accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
163 	if (!accel_dev)
164 		return -EINVAL;
165 
166 	ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
167 				      ADF_PM_IDLE_SUPPORT, pm_idle_enabled);
168 	if (ret)
169 		return sysfs_emit(buf, "1\n");
170 
171 	return sysfs_emit(buf, "%s\n", pm_idle_enabled);
172 }
173 
174 static ssize_t pm_idle_enabled_store(struct device *dev, struct device_attribute *attr,
175 				     const char *buf, size_t count)
176 {
177 	unsigned long pm_idle_enabled_cfg_val;
178 	struct adf_accel_dev *accel_dev;
179 	bool pm_idle_enabled;
180 	int ret;
181 
182 	ret = kstrtobool(buf, &pm_idle_enabled);
183 	if (ret)
184 		return ret;
185 
186 	pm_idle_enabled_cfg_val = pm_idle_enabled;
187 	accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
188 	if (!accel_dev)
189 		return -EINVAL;
190 
191 	if (adf_dev_started(accel_dev)) {
192 		dev_info(dev, "Device qat_dev%d must be down to set pm_idle_enabled.\n",
193 			 accel_dev->accel_id);
194 		return -EINVAL;
195 	}
196 
197 	ret = adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC,
198 					  ADF_PM_IDLE_SUPPORT, &pm_idle_enabled_cfg_val,
199 					  ADF_DEC);
200 	if (ret)
201 		return ret;
202 
203 	return count;
204 }
205 static DEVICE_ATTR_RW(pm_idle_enabled);
206 
207 static ssize_t auto_reset_show(struct device *dev, struct device_attribute *attr,
208 			       char *buf)
209 {
210 	char *auto_reset;
211 	struct adf_accel_dev *accel_dev;
212 
213 	accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
214 	if (!accel_dev)
215 		return -EINVAL;
216 
217 	auto_reset = accel_dev->autoreset_on_error ? "on" : "off";
218 
219 	return sysfs_emit(buf, "%s\n", auto_reset);
220 }
221 
222 static ssize_t auto_reset_store(struct device *dev, struct device_attribute *attr,
223 				const char *buf, size_t count)
224 {
225 	struct adf_accel_dev *accel_dev;
226 	bool enabled = false;
227 	int ret;
228 
229 	ret = kstrtobool(buf, &enabled);
230 	if (ret)
231 		return ret;
232 
233 	accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
234 	if (!accel_dev)
235 		return -EINVAL;
236 
237 	accel_dev->autoreset_on_error = enabled;
238 
239 	return count;
240 }
241 static DEVICE_ATTR_RW(auto_reset);
242 
243 static DEVICE_ATTR_RW(state);
244 static DEVICE_ATTR_RW(cfg_services);
245 
246 static ssize_t rp2srv_show(struct device *dev, struct device_attribute *attr,
247 			   char *buf)
248 {
249 	struct adf_hw_device_data *hw_data;
250 	struct adf_accel_dev *accel_dev;
251 	enum adf_cfg_service_type svc;
252 
253 	accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
254 	if (!accel_dev)
255 		return -EINVAL;
256 
257 	hw_data = GET_HW_DATA(accel_dev);
258 
259 	if (accel_dev->sysfs.ring_num == UNSET_RING_NUM)
260 		return -EINVAL;
261 
262 	down_read(&accel_dev->sysfs.lock);
263 	svc = GET_SRV_TYPE(accel_dev, accel_dev->sysfs.ring_num %
264 					      hw_data->num_banks_per_vf);
265 	up_read(&accel_dev->sysfs.lock);
266 
267 	switch (svc) {
268 	case COMP:
269 		return sysfs_emit(buf, "%s\n", ADF_CFG_DC);
270 	case SYM:
271 		return sysfs_emit(buf, "%s\n", ADF_CFG_SYM);
272 	case ASYM:
273 		return sysfs_emit(buf, "%s\n", ADF_CFG_ASYM);
274 	default:
275 		break;
276 	}
277 	return -EINVAL;
278 }
279 
280 static ssize_t rp2srv_store(struct device *dev, struct device_attribute *attr,
281 			    const char *buf, size_t count)
282 {
283 	struct adf_accel_dev *accel_dev;
284 	int num_rings, ret;
285 	unsigned int ring;
286 
287 	accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
288 	if (!accel_dev)
289 		return -EINVAL;
290 
291 	ret = kstrtouint(buf, 10, &ring);
292 	if (ret)
293 		return ret;
294 
295 	num_rings = GET_MAX_BANKS(accel_dev);
296 	if (ring >= num_rings) {
297 		dev_err(&GET_DEV(accel_dev),
298 			"Device does not support more than %u ring pairs\n",
299 			num_rings);
300 		return -EINVAL;
301 	}
302 
303 	down_write(&accel_dev->sysfs.lock);
304 	accel_dev->sysfs.ring_num = ring;
305 	up_write(&accel_dev->sysfs.lock);
306 
307 	return count;
308 }
309 static DEVICE_ATTR_RW(rp2srv);
310 
311 static ssize_t num_rps_show(struct device *dev, struct device_attribute *attr,
312 			    char *buf)
313 {
314 	struct adf_accel_dev *accel_dev;
315 
316 	accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
317 	if (!accel_dev)
318 		return -EINVAL;
319 
320 	return sysfs_emit(buf, "%u\n", GET_MAX_BANKS(accel_dev));
321 }
322 static DEVICE_ATTR_RO(num_rps);
323 
324 static struct attribute *qat_attrs[] = {
325 	&dev_attr_state.attr,
326 	&dev_attr_cfg_services.attr,
327 	&dev_attr_pm_idle_enabled.attr,
328 	&dev_attr_rp2srv.attr,
329 	&dev_attr_num_rps.attr,
330 	&dev_attr_auto_reset.attr,
331 	NULL,
332 };
333 
334 static struct attribute_group qat_group = {
335 	.attrs = qat_attrs,
336 	.name = "qat",
337 };
338 
339 int adf_sysfs_init(struct adf_accel_dev *accel_dev)
340 {
341 	int ret;
342 
343 	ret = devm_device_add_group(&GET_DEV(accel_dev), &qat_group);
344 	if (ret) {
345 		dev_err(&GET_DEV(accel_dev),
346 			"Failed to create qat attribute group: %d\n", ret);
347 	}
348 
349 	accel_dev->sysfs.ring_num = UNSET_RING_NUM;
350 
351 	return ret;
352 }
353 EXPORT_SYMBOL_GPL(adf_sysfs_init);
354