xref: /linux/drivers/crypto/intel/qat/qat_common/adf_telemetry.c (revision 621cde16e49b3ecf7d59a8106a20aaebfb4a59a9)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2023 Intel Corporation. */
3 #define dev_fmt(fmt) "Telemetry: " fmt
4 
5 #include <asm/errno.h>
6 #include <linux/atomic.h>
7 #include <linux/device.h>
8 #include <linux/dev_printk.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/jiffies.h>
11 #include <linux/kernel.h>
12 #include <linux/mutex.h>
13 #include <linux/slab.h>
14 #include <linux/string.h>
15 #include <linux/workqueue.h>
16 
17 #include "adf_admin.h"
18 #include "adf_accel_devices.h"
19 #include "adf_common_drv.h"
20 #include "adf_telemetry.h"
21 
22 #define TL_IS_ZERO(input)	((input) == 0)
23 
is_tl_supported(struct adf_accel_dev * accel_dev)24 static bool is_tl_supported(struct adf_accel_dev *accel_dev)
25 {
26 	u16 fw_caps =  GET_HW_DATA(accel_dev)->fw_capabilities;
27 
28 	return fw_caps & TL_CAPABILITY_BIT;
29 }
30 
validate_tl_data(struct adf_tl_hw_data * tl_data)31 static int validate_tl_data(struct adf_tl_hw_data *tl_data)
32 {
33 	if (!tl_data->dev_counters ||
34 	    TL_IS_ZERO(tl_data->num_dev_counters) ||
35 	    !tl_data->sl_util_counters ||
36 	    !tl_data->sl_exec_counters ||
37 	    !tl_data->rp_counters ||
38 	    TL_IS_ZERO(tl_data->num_rp_counters))
39 		return -EOPNOTSUPP;
40 
41 	return 0;
42 }
43 
validate_tl_slice_counters(struct icp_qat_fw_init_admin_slice_cnt * slice_count,u8 max_slices_per_type)44 static int validate_tl_slice_counters(struct icp_qat_fw_init_admin_slice_cnt *slice_count,
45 				      u8 max_slices_per_type)
46 {
47 	u8 *sl_counter = (u8 *)slice_count;
48 	int i;
49 
50 	for (i = 0; i < ADF_TL_SL_CNT_COUNT; i++) {
51 		if (sl_counter[i] > max_slices_per_type)
52 			return -EINVAL;
53 	}
54 
55 	return 0;
56 }
57 
adf_tl_alloc_mem(struct adf_accel_dev * accel_dev)58 static int adf_tl_alloc_mem(struct adf_accel_dev *accel_dev)
59 {
60 	struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev);
61 	struct device *dev = &GET_DEV(accel_dev);
62 	size_t regs_sz = tl_data->layout_sz;
63 	struct adf_telemetry *telemetry;
64 	int node = dev_to_node(dev);
65 	void *tl_data_regs;
66 	unsigned int i;
67 
68 	telemetry = kzalloc_node(sizeof(*telemetry), GFP_KERNEL, node);
69 	if (!telemetry)
70 		return -ENOMEM;
71 
72 	telemetry->rp_num_indexes = kmalloc_array(tl_data->max_rp,
73 						  sizeof(*telemetry->rp_num_indexes),
74 						  GFP_KERNEL);
75 	if (!telemetry->rp_num_indexes)
76 		goto err_free_tl;
77 
78 	telemetry->regs_hist_buff = kmalloc_array(tl_data->num_hbuff,
79 						  sizeof(*telemetry->regs_hist_buff),
80 						  GFP_KERNEL);
81 	if (!telemetry->regs_hist_buff)
82 		goto err_free_rp_indexes;
83 
84 	telemetry->regs_data = dma_alloc_coherent(dev, regs_sz,
85 						  &telemetry->regs_data_p,
86 						  GFP_KERNEL);
87 	if (!telemetry->regs_data)
88 		goto err_free_regs_hist_buff;
89 
90 	for (i = 0; i < tl_data->num_hbuff; i++) {
91 		tl_data_regs = kzalloc_node(regs_sz, GFP_KERNEL, node);
92 		if (!tl_data_regs)
93 			goto err_free_dma;
94 
95 		telemetry->regs_hist_buff[i] = tl_data_regs;
96 	}
97 
98 	accel_dev->telemetry = telemetry;
99 
100 	return 0;
101 
102 err_free_dma:
103 	dma_free_coherent(dev, regs_sz, telemetry->regs_data,
104 			  telemetry->regs_data_p);
105 
106 	while (i--)
107 		kfree(telemetry->regs_hist_buff[i]);
108 
109 err_free_regs_hist_buff:
110 	kfree(telemetry->regs_hist_buff);
111 err_free_rp_indexes:
112 	kfree(telemetry->rp_num_indexes);
113 err_free_tl:
114 	kfree(telemetry);
115 
116 	return -ENOMEM;
117 }
118 
adf_tl_free_mem(struct adf_accel_dev * accel_dev)119 static void adf_tl_free_mem(struct adf_accel_dev *accel_dev)
120 {
121 	struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev);
122 	struct adf_telemetry *telemetry = accel_dev->telemetry;
123 	struct device *dev = &GET_DEV(accel_dev);
124 	size_t regs_sz = tl_data->layout_sz;
125 	unsigned int i;
126 
127 	for (i = 0; i < tl_data->num_hbuff; i++)
128 		kfree(telemetry->regs_hist_buff[i]);
129 
130 	dma_free_coherent(dev, regs_sz, telemetry->regs_data,
131 			  telemetry->regs_data_p);
132 
133 	kfree(telemetry->regs_hist_buff);
134 	kfree(telemetry->rp_num_indexes);
135 	kfree(telemetry);
136 	accel_dev->telemetry = NULL;
137 }
138 
get_next_timeout(void)139 static unsigned long get_next_timeout(void)
140 {
141 	return msecs_to_jiffies(ADF_TL_TIMER_INT_MS);
142 }
143 
snapshot_regs(struct adf_telemetry * telemetry,size_t size)144 static void snapshot_regs(struct adf_telemetry *telemetry, size_t size)
145 {
146 	void *dst = telemetry->regs_hist_buff[telemetry->hb_num];
147 	void *src = telemetry->regs_data;
148 
149 	memcpy(dst, src, size);
150 }
151 
tl_work_handler(struct work_struct * work)152 static void tl_work_handler(struct work_struct *work)
153 {
154 	struct delayed_work *delayed_work;
155 	struct adf_telemetry *telemetry;
156 	struct adf_tl_hw_data *tl_data;
157 	u32 msg_cnt, old_msg_cnt;
158 	size_t layout_sz;
159 	u32 *regs_data;
160 	size_t id;
161 
162 	delayed_work = to_delayed_work(work);
163 	telemetry = container_of(delayed_work, struct adf_telemetry, work_ctx);
164 	tl_data = &GET_TL_DATA(telemetry->accel_dev);
165 	regs_data = telemetry->regs_data;
166 
167 	id = tl_data->msg_cnt_off / sizeof(*regs_data);
168 	layout_sz = tl_data->layout_sz;
169 
170 	if (!atomic_read(&telemetry->state)) {
171 		cancel_delayed_work_sync(&telemetry->work_ctx);
172 		return;
173 	}
174 
175 	msg_cnt = regs_data[id];
176 	old_msg_cnt = msg_cnt;
177 	if (msg_cnt == telemetry->msg_cnt)
178 		goto out;
179 
180 	mutex_lock(&telemetry->regs_hist_lock);
181 
182 	snapshot_regs(telemetry, layout_sz);
183 
184 	/* Check if data changed while updating it */
185 	msg_cnt = regs_data[id];
186 	if (old_msg_cnt != msg_cnt)
187 		snapshot_regs(telemetry, layout_sz);
188 
189 	telemetry->msg_cnt = msg_cnt;
190 	telemetry->hb_num++;
191 	telemetry->hb_num %= telemetry->hbuffs;
192 
193 	mutex_unlock(&telemetry->regs_hist_lock);
194 
195 out:
196 	adf_misc_wq_queue_delayed_work(&telemetry->work_ctx, get_next_timeout());
197 }
198 
adf_tl_halt(struct adf_accel_dev * accel_dev)199 int adf_tl_halt(struct adf_accel_dev *accel_dev)
200 {
201 	struct adf_telemetry *telemetry = accel_dev->telemetry;
202 	struct device *dev = &GET_DEV(accel_dev);
203 	int ret;
204 
205 	cancel_delayed_work_sync(&telemetry->work_ctx);
206 	atomic_set(&telemetry->state, 0);
207 
208 	ret = adf_send_admin_tl_stop(accel_dev);
209 	if (ret)
210 		dev_err(dev, "failed to stop telemetry\n");
211 
212 	return ret;
213 }
214 
adf_tl_run(struct adf_accel_dev * accel_dev,int state)215 int adf_tl_run(struct adf_accel_dev *accel_dev, int state)
216 {
217 	struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev);
218 	struct adf_telemetry *telemetry = accel_dev->telemetry;
219 	struct device *dev = &GET_DEV(accel_dev);
220 	size_t layout_sz = tl_data->layout_sz;
221 	int ret;
222 
223 	ret = adf_send_admin_tl_start(accel_dev, telemetry->regs_data_p,
224 				      layout_sz, telemetry->rp_num_indexes,
225 				      &telemetry->slice_cnt);
226 	if (ret) {
227 		dev_err(dev, "failed to start telemetry\n");
228 		return ret;
229 	}
230 
231 	ret = validate_tl_slice_counters(&telemetry->slice_cnt, tl_data->max_sl_cnt);
232 	if (ret) {
233 		dev_err(dev, "invalid value returned by FW\n");
234 		adf_send_admin_tl_stop(accel_dev);
235 		return ret;
236 	}
237 
238 	telemetry->hbuffs = state;
239 	atomic_set(&telemetry->state, state);
240 
241 	adf_misc_wq_queue_delayed_work(&telemetry->work_ctx, get_next_timeout());
242 
243 	return 0;
244 }
245 
adf_tl_init(struct adf_accel_dev * accel_dev)246 int adf_tl_init(struct adf_accel_dev *accel_dev)
247 {
248 	struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev);
249 	u8 max_rp = GET_TL_DATA(accel_dev).max_rp;
250 	struct device *dev = &GET_DEV(accel_dev);
251 	struct adf_telemetry *telemetry;
252 	unsigned int i;
253 	int ret;
254 
255 	ret = validate_tl_data(tl_data);
256 	if (ret)
257 		return ret;
258 
259 	ret = adf_tl_alloc_mem(accel_dev);
260 	if (ret) {
261 		dev_err(dev, "failed to initialize: %d\n", ret);
262 		return ret;
263 	}
264 
265 	telemetry = accel_dev->telemetry;
266 	telemetry->accel_dev = accel_dev;
267 
268 	mutex_init(&telemetry->wr_lock);
269 	mutex_init(&telemetry->regs_hist_lock);
270 	INIT_DELAYED_WORK(&telemetry->work_ctx, tl_work_handler);
271 
272 	for (i = 0; i < max_rp; i++)
273 		telemetry->rp_num_indexes[i] = ADF_TL_RP_REGS_DISABLED;
274 
275 	return 0;
276 }
277 
adf_tl_start(struct adf_accel_dev * accel_dev)278 int adf_tl_start(struct adf_accel_dev *accel_dev)
279 {
280 	struct device *dev = &GET_DEV(accel_dev);
281 
282 	if (!accel_dev->telemetry)
283 		return -EOPNOTSUPP;
284 
285 	if (!is_tl_supported(accel_dev)) {
286 		dev_info(dev, "feature not supported by FW\n");
287 		adf_tl_free_mem(accel_dev);
288 		return -EOPNOTSUPP;
289 	}
290 
291 	return 0;
292 }
293 
adf_tl_stop(struct adf_accel_dev * accel_dev)294 void adf_tl_stop(struct adf_accel_dev *accel_dev)
295 {
296 	if (!accel_dev->telemetry)
297 		return;
298 
299 	if (atomic_read(&accel_dev->telemetry->state))
300 		adf_tl_halt(accel_dev);
301 }
302 
adf_tl_shutdown(struct adf_accel_dev * accel_dev)303 void adf_tl_shutdown(struct adf_accel_dev *accel_dev)
304 {
305 	if (!accel_dev->telemetry)
306 		return;
307 
308 	adf_tl_free_mem(accel_dev);
309 }
310