xref: /linux/drivers/crypto/intel/qat/qat_common/adf_telemetry.c (revision 189f164e573e18d9f8876dbd3ad8fcbe11f93037)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2023 Intel Corporation. */
3 #define dev_fmt(fmt) "Telemetry: " fmt
4 
5 #include <asm/errno.h>
6 #include <linux/atomic.h>
7 #include <linux/device.h>
8 #include <linux/dev_printk.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/jiffies.h>
11 #include <linux/kernel.h>
12 #include <linux/mutex.h>
13 #include <linux/slab.h>
14 #include <linux/string.h>
15 #include <linux/workqueue.h>
16 
17 #include "adf_admin.h"
18 #include "adf_accel_devices.h"
19 #include "adf_common_drv.h"
20 #include "adf_telemetry.h"
21 
22 #define TL_IS_ZERO(input)	((input) == 0)
23 
is_tl_supported(struct adf_accel_dev * accel_dev)24 static bool is_tl_supported(struct adf_accel_dev *accel_dev)
25 {
26 	u16 fw_caps =  GET_HW_DATA(accel_dev)->fw_capabilities;
27 
28 	return fw_caps & TL_CAPABILITY_BIT;
29 }
30 
validate_tl_data(struct adf_tl_hw_data * tl_data)31 static int validate_tl_data(struct adf_tl_hw_data *tl_data)
32 {
33 	if (!tl_data->dev_counters ||
34 	    TL_IS_ZERO(tl_data->num_dev_counters) ||
35 	    !tl_data->sl_util_counters ||
36 	    !tl_data->sl_exec_counters ||
37 	    !tl_data->rp_counters ||
38 	    TL_IS_ZERO(tl_data->num_rp_counters))
39 		return -EOPNOTSUPP;
40 
41 	return 0;
42 }
43 
validate_tl_slice_counters(struct icp_qat_fw_init_admin_slice_cnt * slice_count,u8 max_slices_per_type)44 static int validate_tl_slice_counters(struct icp_qat_fw_init_admin_slice_cnt *slice_count,
45 				      u8 max_slices_per_type)
46 {
47 	u8 *sl_counter = (u8 *)slice_count;
48 	int i;
49 
50 	for (i = 0; i < ADF_TL_SL_CNT_COUNT; i++) {
51 		if (sl_counter[i] > max_slices_per_type)
52 			return -EINVAL;
53 	}
54 
55 	return 0;
56 }
57 
adf_tl_alloc_mem(struct adf_accel_dev * accel_dev)58 static int adf_tl_alloc_mem(struct adf_accel_dev *accel_dev)
59 {
60 	struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev);
61 	struct device *dev = &GET_DEV(accel_dev);
62 	size_t regs_sz = tl_data->layout_sz;
63 	struct adf_telemetry *telemetry;
64 	int node = dev_to_node(dev);
65 	void *tl_data_regs;
66 	unsigned int i;
67 
68 	telemetry = kzalloc_node(sizeof(*telemetry), GFP_KERNEL, node);
69 	if (!telemetry)
70 		return -ENOMEM;
71 
72 	telemetry->rp_num_indexes = kmalloc_array(tl_data->max_rp,
73 						  sizeof(*telemetry->rp_num_indexes),
74 						  GFP_KERNEL);
75 	if (!telemetry->rp_num_indexes)
76 		goto err_free_tl;
77 
78 	telemetry->regs_hist_buff = kmalloc_objs(*telemetry->regs_hist_buff,
79 						 tl_data->num_hbuff);
80 	if (!telemetry->regs_hist_buff)
81 		goto err_free_rp_indexes;
82 
83 	telemetry->regs_data = dma_alloc_coherent(dev, regs_sz,
84 						  &telemetry->regs_data_p,
85 						  GFP_KERNEL);
86 	if (!telemetry->regs_data)
87 		goto err_free_regs_hist_buff;
88 
89 	for (i = 0; i < tl_data->num_hbuff; i++) {
90 		tl_data_regs = kzalloc_node(regs_sz, GFP_KERNEL, node);
91 		if (!tl_data_regs)
92 			goto err_free_dma;
93 
94 		telemetry->regs_hist_buff[i] = tl_data_regs;
95 	}
96 
97 	accel_dev->telemetry = telemetry;
98 
99 	return 0;
100 
101 err_free_dma:
102 	dma_free_coherent(dev, regs_sz, telemetry->regs_data,
103 			  telemetry->regs_data_p);
104 
105 	while (i--)
106 		kfree(telemetry->regs_hist_buff[i]);
107 
108 err_free_regs_hist_buff:
109 	kfree(telemetry->regs_hist_buff);
110 err_free_rp_indexes:
111 	kfree(telemetry->rp_num_indexes);
112 err_free_tl:
113 	kfree(telemetry);
114 
115 	return -ENOMEM;
116 }
117 
adf_tl_free_mem(struct adf_accel_dev * accel_dev)118 static void adf_tl_free_mem(struct adf_accel_dev *accel_dev)
119 {
120 	struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev);
121 	struct adf_telemetry *telemetry = accel_dev->telemetry;
122 	struct device *dev = &GET_DEV(accel_dev);
123 	size_t regs_sz = tl_data->layout_sz;
124 	unsigned int i;
125 
126 	for (i = 0; i < tl_data->num_hbuff; i++)
127 		kfree(telemetry->regs_hist_buff[i]);
128 
129 	dma_free_coherent(dev, regs_sz, telemetry->regs_data,
130 			  telemetry->regs_data_p);
131 
132 	kfree(telemetry->regs_hist_buff);
133 	kfree(telemetry->rp_num_indexes);
134 	kfree(telemetry);
135 	accel_dev->telemetry = NULL;
136 }
137 
get_next_timeout(void)138 static unsigned long get_next_timeout(void)
139 {
140 	return msecs_to_jiffies(ADF_TL_TIMER_INT_MS);
141 }
142 
snapshot_regs(struct adf_telemetry * telemetry,size_t size)143 static void snapshot_regs(struct adf_telemetry *telemetry, size_t size)
144 {
145 	void *dst = telemetry->regs_hist_buff[telemetry->hb_num];
146 	void *src = telemetry->regs_data;
147 
148 	memcpy(dst, src, size);
149 }
150 
tl_work_handler(struct work_struct * work)151 static void tl_work_handler(struct work_struct *work)
152 {
153 	struct delayed_work *delayed_work;
154 	struct adf_telemetry *telemetry;
155 	struct adf_tl_hw_data *tl_data;
156 	u32 msg_cnt, old_msg_cnt;
157 	size_t layout_sz;
158 	u32 *regs_data;
159 	size_t id;
160 
161 	delayed_work = to_delayed_work(work);
162 	telemetry = container_of(delayed_work, struct adf_telemetry, work_ctx);
163 	tl_data = &GET_TL_DATA(telemetry->accel_dev);
164 	regs_data = telemetry->regs_data;
165 
166 	id = tl_data->msg_cnt_off / sizeof(*regs_data);
167 	layout_sz = tl_data->layout_sz;
168 
169 	if (!atomic_read(&telemetry->state)) {
170 		cancel_delayed_work_sync(&telemetry->work_ctx);
171 		return;
172 	}
173 
174 	msg_cnt = regs_data[id];
175 	old_msg_cnt = msg_cnt;
176 	if (msg_cnt == telemetry->msg_cnt)
177 		goto out;
178 
179 	mutex_lock(&telemetry->regs_hist_lock);
180 
181 	snapshot_regs(telemetry, layout_sz);
182 
183 	/* Check if data changed while updating it */
184 	msg_cnt = regs_data[id];
185 	if (old_msg_cnt != msg_cnt)
186 		snapshot_regs(telemetry, layout_sz);
187 
188 	telemetry->msg_cnt = msg_cnt;
189 	telemetry->hb_num++;
190 	telemetry->hb_num %= telemetry->hbuffs;
191 
192 	mutex_unlock(&telemetry->regs_hist_lock);
193 
194 out:
195 	adf_misc_wq_queue_delayed_work(&telemetry->work_ctx, get_next_timeout());
196 }
197 
adf_tl_halt(struct adf_accel_dev * accel_dev)198 int adf_tl_halt(struct adf_accel_dev *accel_dev)
199 {
200 	struct adf_telemetry *telemetry = accel_dev->telemetry;
201 	struct device *dev = &GET_DEV(accel_dev);
202 	int ret;
203 
204 	cancel_delayed_work_sync(&telemetry->work_ctx);
205 	atomic_set(&telemetry->state, 0);
206 
207 	ret = adf_send_admin_tl_stop(accel_dev);
208 	if (ret)
209 		dev_err(dev, "failed to stop telemetry\n");
210 
211 	return ret;
212 }
213 
adf_set_cmdq_cnt(struct adf_accel_dev * accel_dev,struct adf_tl_hw_data * tl_data)214 static void adf_set_cmdq_cnt(struct adf_accel_dev *accel_dev,
215 			     struct adf_tl_hw_data *tl_data)
216 {
217 	struct icp_qat_fw_init_admin_slice_cnt *slice_cnt, *cmdq_cnt;
218 
219 	slice_cnt = &accel_dev->telemetry->slice_cnt;
220 	cmdq_cnt = &accel_dev->telemetry->cmdq_cnt;
221 
222 	cmdq_cnt->cpr_cnt = slice_cnt->cpr_cnt * tl_data->multiplier.cpr_cnt;
223 	cmdq_cnt->dcpr_cnt = slice_cnt->dcpr_cnt * tl_data->multiplier.dcpr_cnt;
224 	cmdq_cnt->pke_cnt = slice_cnt->pke_cnt * tl_data->multiplier.pke_cnt;
225 	cmdq_cnt->wat_cnt = slice_cnt->wat_cnt * tl_data->multiplier.wat_cnt;
226 	cmdq_cnt->wcp_cnt = slice_cnt->wcp_cnt * tl_data->multiplier.wcp_cnt;
227 	cmdq_cnt->ucs_cnt = slice_cnt->ucs_cnt * tl_data->multiplier.ucs_cnt;
228 	cmdq_cnt->ath_cnt = slice_cnt->ath_cnt * tl_data->multiplier.ath_cnt;
229 }
230 
adf_tl_run(struct adf_accel_dev * accel_dev,int state)231 int adf_tl_run(struct adf_accel_dev *accel_dev, int state)
232 {
233 	struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev);
234 	struct adf_telemetry *telemetry = accel_dev->telemetry;
235 	struct device *dev = &GET_DEV(accel_dev);
236 	size_t layout_sz = tl_data->layout_sz;
237 	int ret;
238 
239 	ret = adf_send_admin_tl_start(accel_dev, telemetry->regs_data_p,
240 				      layout_sz, telemetry->rp_num_indexes,
241 				      &telemetry->slice_cnt);
242 	if (ret) {
243 		dev_err(dev, "failed to start telemetry\n");
244 		return ret;
245 	}
246 
247 	ret = validate_tl_slice_counters(&telemetry->slice_cnt, tl_data->max_sl_cnt);
248 	if (ret) {
249 		dev_err(dev, "invalid value returned by FW\n");
250 		adf_send_admin_tl_stop(accel_dev);
251 		return ret;
252 	}
253 
254 	adf_set_cmdq_cnt(accel_dev, tl_data);
255 
256 	telemetry->hbuffs = state;
257 	atomic_set(&telemetry->state, state);
258 
259 	adf_misc_wq_queue_delayed_work(&telemetry->work_ctx, get_next_timeout());
260 
261 	return 0;
262 }
263 
adf_tl_init(struct adf_accel_dev * accel_dev)264 int adf_tl_init(struct adf_accel_dev *accel_dev)
265 {
266 	struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev);
267 	u8 max_rp = GET_TL_DATA(accel_dev).max_rp;
268 	struct device *dev = &GET_DEV(accel_dev);
269 	struct adf_telemetry *telemetry;
270 	unsigned int i;
271 	int ret;
272 
273 	ret = validate_tl_data(tl_data);
274 	if (ret)
275 		return ret;
276 
277 	ret = adf_tl_alloc_mem(accel_dev);
278 	if (ret) {
279 		dev_err(dev, "failed to initialize: %d\n", ret);
280 		return ret;
281 	}
282 
283 	telemetry = accel_dev->telemetry;
284 	telemetry->accel_dev = accel_dev;
285 
286 	mutex_init(&telemetry->wr_lock);
287 	mutex_init(&telemetry->regs_hist_lock);
288 	INIT_DELAYED_WORK(&telemetry->work_ctx, tl_work_handler);
289 
290 	for (i = 0; i < max_rp; i++)
291 		telemetry->rp_num_indexes[i] = ADF_TL_RP_REGS_DISABLED;
292 
293 	return 0;
294 }
295 
adf_tl_start(struct adf_accel_dev * accel_dev)296 int adf_tl_start(struct adf_accel_dev *accel_dev)
297 {
298 	struct device *dev = &GET_DEV(accel_dev);
299 
300 	if (!accel_dev->telemetry)
301 		return -EOPNOTSUPP;
302 
303 	if (!is_tl_supported(accel_dev)) {
304 		dev_info(dev, "feature not supported by FW\n");
305 		adf_tl_free_mem(accel_dev);
306 		return -EOPNOTSUPP;
307 	}
308 
309 	return 0;
310 }
311 
adf_tl_stop(struct adf_accel_dev * accel_dev)312 void adf_tl_stop(struct adf_accel_dev *accel_dev)
313 {
314 	if (!accel_dev->telemetry)
315 		return;
316 
317 	if (atomic_read(&accel_dev->telemetry->state))
318 		adf_tl_halt(accel_dev);
319 }
320 
adf_tl_shutdown(struct adf_accel_dev * accel_dev)321 void adf_tl_shutdown(struct adf_accel_dev *accel_dev)
322 {
323 	if (!accel_dev->telemetry)
324 		return;
325 
326 	adf_tl_free_mem(accel_dev);
327 }
328