xref: /linux/drivers/crypto/intel/qat/qat_common/adf_cnv_dbgfs.c (revision be239684b18e1cdcafcf8c7face4a2f562c745ad)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2023 Intel Corporation */
3 
4 #include <linux/bitfield.h>
5 #include <linux/debugfs.h>
6 #include <linux/kernel.h>
7 
8 #include "adf_accel_devices.h"
9 #include "adf_admin.h"
10 #include "adf_common_drv.h"
11 #include "adf_cnv_dbgfs.h"
12 #include "qat_compression.h"
13 
14 #define CNV_DEBUGFS_FILENAME		"cnv_errors"
15 #define CNV_MIN_PADDING			16
16 
17 #define CNV_ERR_INFO_MASK		GENMASK(11, 0)
18 #define CNV_ERR_TYPE_MASK		GENMASK(15, 12)
19 #define CNV_SLICE_ERR_MASK		GENMASK(7, 0)
20 #define CNV_SLICE_ERR_SIGN_BIT_INDEX	7
21 #define CNV_DELTA_ERR_SIGN_BIT_INDEX	11
22 
23 enum cnv_error_type {
24 	CNV_ERR_TYPE_NONE,
25 	CNV_ERR_TYPE_CHECKSUM,
26 	CNV_ERR_TYPE_DECOMP_PRODUCED_LENGTH,
27 	CNV_ERR_TYPE_DECOMPRESSION,
28 	CNV_ERR_TYPE_TRANSLATION,
29 	CNV_ERR_TYPE_DECOMP_CONSUMED_LENGTH,
30 	CNV_ERR_TYPE_UNKNOWN,
31 	CNV_ERR_TYPES_COUNT
32 };
33 
34 #define CNV_ERROR_TYPE_GET(latest_err)	\
35 	min_t(u16, u16_get_bits(latest_err, CNV_ERR_TYPE_MASK), CNV_ERR_TYPE_UNKNOWN)
36 
37 #define CNV_GET_DELTA_ERR_INFO(latest_error)	\
38 	sign_extend32(latest_error, CNV_DELTA_ERR_SIGN_BIT_INDEX)
39 
40 #define CNV_GET_SLICE_ERR_INFO(latest_error)	\
41 	sign_extend32(latest_error, CNV_SLICE_ERR_SIGN_BIT_INDEX)
42 
43 #define CNV_GET_DEFAULT_ERR_INFO(latest_error)	\
44 	u16_get_bits(latest_error, CNV_ERR_INFO_MASK)
45 
46 enum cnv_fields {
47 	CNV_ERR_COUNT,
48 	CNV_LATEST_ERR,
49 	CNV_FIELDS_COUNT
50 };
51 
52 static const char * const cnv_field_names[CNV_FIELDS_COUNT] = {
53 	[CNV_ERR_COUNT] = "Total Errors",
54 	[CNV_LATEST_ERR] = "Last Error",
55 };
56 
57 static const char * const cnv_error_names[CNV_ERR_TYPES_COUNT] = {
58 	[CNV_ERR_TYPE_NONE] = "No Error",
59 	[CNV_ERR_TYPE_CHECKSUM] = "Checksum Error",
60 	[CNV_ERR_TYPE_DECOMP_PRODUCED_LENGTH] = "Length Error-P",
61 	[CNV_ERR_TYPE_DECOMPRESSION] = "Decomp Error",
62 	[CNV_ERR_TYPE_TRANSLATION] = "Xlat Error",
63 	[CNV_ERR_TYPE_DECOMP_CONSUMED_LENGTH] = "Length Error-C",
64 	[CNV_ERR_TYPE_UNKNOWN] = "Unknown Error",
65 };
66 
67 struct ae_cnv_errors {
68 	u16 ae;
69 	u16 err_cnt;
70 	u16 latest_err;
71 	bool is_comp_ae;
72 };
73 
74 struct cnv_err_stats {
75 	u16 ae_count;
76 	struct ae_cnv_errors ae_cnv_errors[];
77 };
78 
79 static s16 get_err_info(u8 error_type, u16 latest)
80 {
81 	switch (error_type) {
82 	case CNV_ERR_TYPE_DECOMP_PRODUCED_LENGTH:
83 	case CNV_ERR_TYPE_DECOMP_CONSUMED_LENGTH:
84 		return CNV_GET_DELTA_ERR_INFO(latest);
85 	case CNV_ERR_TYPE_DECOMPRESSION:
86 	case CNV_ERR_TYPE_TRANSLATION:
87 		return CNV_GET_SLICE_ERR_INFO(latest);
88 	default:
89 		return CNV_GET_DEFAULT_ERR_INFO(latest);
90 	}
91 }
92 
93 static void *qat_cnv_errors_seq_start(struct seq_file *sfile, loff_t *pos)
94 {
95 	struct cnv_err_stats *err_stats = sfile->private;
96 
97 	if (*pos == 0)
98 		return SEQ_START_TOKEN;
99 
100 	if (*pos > err_stats->ae_count)
101 		return NULL;
102 
103 	return &err_stats->ae_cnv_errors[*pos - 1];
104 }
105 
106 static void *qat_cnv_errors_seq_next(struct seq_file *sfile, void *v,
107 				     loff_t *pos)
108 {
109 	struct cnv_err_stats *err_stats = sfile->private;
110 
111 	(*pos)++;
112 
113 	if (*pos > err_stats->ae_count)
114 		return NULL;
115 
116 	return &err_stats->ae_cnv_errors[*pos - 1];
117 }
118 
119 static void qat_cnv_errors_seq_stop(struct seq_file *sfile, void *v)
120 {
121 }
122 
123 static int qat_cnv_errors_seq_show(struct seq_file *sfile, void *v)
124 {
125 	struct ae_cnv_errors *ae_errors;
126 	unsigned int i;
127 	s16 err_info;
128 	u8 err_type;
129 
130 	if (v == SEQ_START_TOKEN) {
131 		seq_puts(sfile, "AE ");
132 		for (i = 0; i < CNV_FIELDS_COUNT; ++i)
133 			seq_printf(sfile, " %*s", CNV_MIN_PADDING,
134 				   cnv_field_names[i]);
135 	} else {
136 		ae_errors = v;
137 
138 		if (!ae_errors->is_comp_ae)
139 			return 0;
140 
141 		err_type = CNV_ERROR_TYPE_GET(ae_errors->latest_err);
142 		err_info = get_err_info(err_type, ae_errors->latest_err);
143 
144 		seq_printf(sfile, "%d:", ae_errors->ae);
145 		seq_printf(sfile, " %*d", CNV_MIN_PADDING, ae_errors->err_cnt);
146 		seq_printf(sfile, "%*s [%d]", CNV_MIN_PADDING,
147 			   cnv_error_names[err_type], err_info);
148 	}
149 	seq_putc(sfile, '\n');
150 
151 	return 0;
152 }
153 
154 static const struct seq_operations qat_cnv_errors_sops = {
155 	.start = qat_cnv_errors_seq_start,
156 	.next = qat_cnv_errors_seq_next,
157 	.stop = qat_cnv_errors_seq_stop,
158 	.show = qat_cnv_errors_seq_show,
159 };
160 
161 /**
162  * cnv_err_stats_alloc() - Get CNV stats for the provided device.
163  * @accel_dev: Pointer to a QAT acceleration device
164  *
165  * Allocates and populates table of CNV errors statistics for each non-admin AE
166  * available through the supplied acceleration device. The caller becomes the
167  * owner of such memory and is responsible for the deallocation through a call
168  * to kfree().
169  *
170  * Returns: a pointer to a dynamically allocated struct cnv_err_stats on success
171  * or a negative value on error.
172  */
173 static struct cnv_err_stats *cnv_err_stats_alloc(struct adf_accel_dev *accel_dev)
174 {
175 	struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
176 	struct cnv_err_stats *err_stats;
177 	unsigned long ae_count;
178 	unsigned long ae_mask;
179 	size_t err_stats_size;
180 	unsigned long ae;
181 	unsigned int i;
182 	u16 latest_err;
183 	u16 err_cnt;
184 	int ret;
185 
186 	if (!adf_dev_started(accel_dev)) {
187 		dev_err(&GET_DEV(accel_dev), "QAT Device not started\n");
188 		return ERR_PTR(-EBUSY);
189 	}
190 
191 	/* Ignore the admin AEs */
192 	ae_mask = hw_data->ae_mask & ~hw_data->admin_ae_mask;
193 	ae_count = hweight_long(ae_mask);
194 	if (unlikely(!ae_count))
195 		return ERR_PTR(-EINVAL);
196 
197 	err_stats_size = struct_size(err_stats, ae_cnv_errors, ae_count);
198 	err_stats = kmalloc(err_stats_size, GFP_KERNEL);
199 	if (!err_stats)
200 		return ERR_PTR(-ENOMEM);
201 
202 	err_stats->ae_count = ae_count;
203 
204 	i = 0;
205 	for_each_set_bit(ae, &ae_mask, GET_MAX_ACCELENGINES(accel_dev)) {
206 		ret = adf_get_cnv_stats(accel_dev, ae, &err_cnt, &latest_err);
207 		if (ret) {
208 			dev_dbg(&GET_DEV(accel_dev),
209 				"Failed to get CNV stats for ae %ld, [%d].\n",
210 				ae, ret);
211 			err_stats->ae_cnv_errors[i++].is_comp_ae = false;
212 			continue;
213 		}
214 		err_stats->ae_cnv_errors[i].is_comp_ae = true;
215 		err_stats->ae_cnv_errors[i].latest_err = latest_err;
216 		err_stats->ae_cnv_errors[i].err_cnt = err_cnt;
217 		err_stats->ae_cnv_errors[i].ae = ae;
218 		i++;
219 	}
220 
221 	return err_stats;
222 }
223 
224 static int qat_cnv_errors_file_open(struct inode *inode, struct file *file)
225 {
226 	struct adf_accel_dev *accel_dev = inode->i_private;
227 	struct seq_file *cnv_errors_seq_file;
228 	struct cnv_err_stats *cnv_err_stats;
229 	int ret;
230 
231 	cnv_err_stats = cnv_err_stats_alloc(accel_dev);
232 	if (IS_ERR(cnv_err_stats))
233 		return PTR_ERR(cnv_err_stats);
234 
235 	ret = seq_open(file, &qat_cnv_errors_sops);
236 	if (unlikely(ret)) {
237 		kfree(cnv_err_stats);
238 		return ret;
239 	}
240 
241 	cnv_errors_seq_file = file->private_data;
242 	cnv_errors_seq_file->private = cnv_err_stats;
243 	return ret;
244 }
245 
246 static int qat_cnv_errors_file_release(struct inode *inode, struct file *file)
247 {
248 	struct seq_file *cnv_errors_seq_file = file->private_data;
249 
250 	kfree(cnv_errors_seq_file->private);
251 	cnv_errors_seq_file->private = NULL;
252 
253 	return seq_release(inode, file);
254 }
255 
256 static const struct file_operations qat_cnv_fops = {
257 	.owner = THIS_MODULE,
258 	.open = qat_cnv_errors_file_open,
259 	.read = seq_read,
260 	.llseek = seq_lseek,
261 	.release = qat_cnv_errors_file_release,
262 };
263 
264 static ssize_t no_comp_file_read(struct file *f, char __user *buf, size_t count,
265 				 loff_t *pos)
266 {
267 	char *file_msg = "No engine configured for comp\n";
268 
269 	return simple_read_from_buffer(buf, count, pos, file_msg,
270 				       strlen(file_msg));
271 }
272 
273 static const struct file_operations qat_cnv_no_comp_fops = {
274 	.owner = THIS_MODULE,
275 	.read = no_comp_file_read,
276 };
277 
278 void adf_cnv_dbgfs_add(struct adf_accel_dev *accel_dev)
279 {
280 	const struct file_operations *fops;
281 	void *data;
282 
283 	if (adf_hw_dev_has_compression(accel_dev)) {
284 		fops = &qat_cnv_fops;
285 		data = accel_dev;
286 	} else {
287 		fops = &qat_cnv_no_comp_fops;
288 		data = NULL;
289 	}
290 
291 	accel_dev->cnv_dbgfile = debugfs_create_file(CNV_DEBUGFS_FILENAME, 0400,
292 						     accel_dev->debugfs_dir,
293 						     data, fops);
294 }
295 
296 void adf_cnv_dbgfs_rm(struct adf_accel_dev *accel_dev)
297 {
298 	debugfs_remove(accel_dev->cnv_dbgfile);
299 	accel_dev->cnv_dbgfile = NULL;
300 }
301