xref: /freebsd/sys/dev/qat/qat_common/adf_freebsd_uio.c (revision d0b2dbfa0ecf2bbc9709efc5e20baf8e4b44bbbf)
1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright(c) 2007-2022 Intel Corporation */
3 #include "qat_freebsd.h"
4 #include "adf_cfg.h"
5 #include "adf_common_drv.h"
6 #include "adf_accel_devices.h"
7 #include "icp_qat_uclo.h"
8 #include "icp_qat_fw.h"
9 #include "icp_qat_fw_init_admin.h"
10 #include "adf_cfg_strings.h"
11 #include "adf_uio_control.h"
12 #include "adf_uio_cleanup.h"
13 #include "adf_uio.h"
14 #include "adf_transport_access_macros.h"
15 #include "adf_transport_internal.h"
16 #include <sys/conf.h>
17 #include <sys/capsicum.h>
18 #include <sys/kdb.h>
19 #include <sys/condvar.h>
20 #include <sys/param.h>
21 #include <sys/systm.h>
22 #include <sys/proc.h>
23 #include <sys/file.h>
24 #include <sys/lock.h>
25 #include <sys/rwlock.h>
26 #include <sys/sglist.h>
27 #include <vm/vm.h>
28 #include <vm/vm_object.h>
29 #include <vm/vm_page.h>
30 #include <vm/vm_pager.h>
31 
32 #define ADF_UIO_GET_NAME(accel_dev) (GET_HW_DATA(accel_dev)->dev_class->name)
33 #define ADF_UIO_GET_TYPE(accel_dev) (GET_HW_DATA(accel_dev)->dev_class->type)
34 #define ADF_UIO_GET_BAR(accel_dev)                                             \
35 	(GET_HW_DATA(accel_dev)->get_etr_bar_id(GET_HW_DATA(accel_dev)))
36 
37 static d_ioctl_t adf_uio_ioctl;
38 static d_mmap_single_t adf_uio_mmap_single;
39 
40 static struct cdevsw adf_uio_cdevsw = { .d_ioctl = adf_uio_ioctl,
41 					.d_mmap_single = adf_uio_mmap_single,
42 					.d_version = D_VERSION,
43 					.d_name = "qat" };
44 
45 struct adf_uio_open_bundle {
46 	struct adf_uio_control_accel *accel;
47 	int bundle;
48 	struct file **mem_files;
49 	int num_mem_files;
50 };
51 
52 static void
53 adf_release_bundle(void *arg)
54 {
55 	struct adf_uio_control_accel *accel = NULL;
56 	struct adf_uio_open_bundle *handle = NULL;
57 	struct adf_uio_control_bundle *bundle = NULL;
58 	struct adf_uio_instance_rings *instance_rings, *tmp;
59 	int i = 0;
60 
61 	handle = arg;
62 	accel = handle->accel;
63 	bundle = &accel->bundle[handle->bundle];
64 
65 	mutex_lock(&bundle->lock);
66 	adf_uio_do_cleanup_orphan(bundle->hardware_bundle_number, accel);
67 	mutex_unlock(&bundle->lock);
68 
69 	for (i = 0; i < handle->num_mem_files; i++) {
70 		/*
71 		 * Similar to the garbage collection of orphaned file
72 		 * descriptor references in UNIX domain socket control
73 		 * messages, the current thread isn't relevant to the
74 		 * the file descriptor reference being released.  In
75 		 * particular, the current thread does not hold any
76 		 * advisory file locks on these file descriptors.
77 		 */
78 		fdrop(handle->mem_files[i], NULL);
79 	}
80 	free(handle->mem_files, M_QAT);
81 
82 	mtx_lock(&accel->lock);
83 
84 	mutex_lock(&bundle->list_lock);
85 	list_for_each_entry_safe(instance_rings, tmp, &bundle->list, list)
86 	{
87 		if (instance_rings->user_pid == curproc->p_pid) {
88 			list_del(&instance_rings->list);
89 			free(instance_rings, M_QAT);
90 			break;
91 		}
92 	}
93 	mutex_unlock(&bundle->list_lock);
94 
95 	adf_dev_put(accel->accel_dev);
96 	accel->num_handles--;
97 	free(handle, M_QAT);
98 	if (!accel->num_handles) {
99 		cv_broadcast(&accel->cleanup_ok);
100 		/* the broadcasting effect happens after releasing accel->lock
101 		 */
102 	}
103 	mtx_unlock(&accel->lock);
104 }
105 
106 static int
107 adf_add_mem_fd(struct adf_accel_dev *accel_dev, int mem_fd)
108 {
109 	struct adf_uio_control_accel *accel = NULL;
110 	struct adf_uio_open_bundle *handle = NULL;
111 	struct file *fp, **new_files;
112 	cap_rights_t rights;
113 	int error = -1, old_count = 0;
114 
115 	error = devfs_get_cdevpriv((void **)&handle);
116 	if (error)
117 		return (error);
118 
119 	error = fget(curthread, mem_fd, cap_rights_init(&rights), &fp);
120 	if (error) {
121 		printf(
122 		    "Failed to fetch file pointer from current process %d \n",
123 		    __LINE__);
124 		return (error);
125 	}
126 
127 	accel = accel_dev->accel;
128 	mtx_lock(&accel->lock);
129 	for (;;) {
130 		old_count = handle->num_mem_files;
131 		mtx_unlock(&accel->lock);
132 		new_files = malloc((old_count + 1) * sizeof(*new_files),
133 				   M_QAT,
134 				   M_WAITOK);
135 		mtx_lock(&accel->lock);
136 		if (old_count == handle->num_mem_files) {
137 			if (old_count != 0) {
138 				memcpy(new_files,
139 				       handle->mem_files,
140 				       old_count * sizeof(*new_files));
141 				free(handle->mem_files, M_QAT);
142 			}
143 			handle->mem_files = new_files;
144 			new_files[old_count] = fp;
145 			handle->num_mem_files++;
146 			break;
147 		} else
148 			free(new_files, M_QAT);
149 	}
150 	mtx_unlock(&accel->lock);
151 	return (0);
152 }
153 
154 static vm_object_t
155 adf_uio_map_bar(struct adf_accel_dev *accel_dev, uint8_t bank_offset)
156 {
157 	unsigned int ring_bundle_size, offset;
158 	struct sglist *sg = NULL;
159 	struct adf_uio_control_accel *accel = accel_dev->accel;
160 	struct adf_hw_csr_info *csr_info = &accel_dev->hw_device->csr_info;
161 	vm_object_t obj;
162 
163 	ring_bundle_size = csr_info->ring_bundle_size;
164 	offset = bank_offset * ring_bundle_size;
165 
166 	sg = sglist_alloc(1, M_WAITOK);
167 
168 	/* Starting from new HW there is an additional offset
169 	 * for bundle CSRs
170 	 */
171 	sglist_append_phys(sg,
172 			   accel->bar->base_addr + offset +
173 			       csr_info->csr_addr_offset,
174 			   ring_bundle_size);
175 
176 	obj = vm_pager_allocate(
177 	    OBJT_SG, sg, ring_bundle_size, VM_PROT_RW, 0, NULL);
178 	if (obj != NULL) {
179 		VM_OBJECT_WLOCK(obj);
180 		vm_object_set_memattr(obj, VM_MEMATTR_UNCACHEABLE);
181 		VM_OBJECT_WUNLOCK(obj);
182 	}
183 	sglist_free(sg);
184 
185 	return obj;
186 }
187 
188 static int
189 adf_alloc_bundle(struct adf_accel_dev *accel_dev, int bundle_nr)
190 {
191 	struct adf_uio_control_accel *accel = NULL;
192 	struct adf_uio_open_bundle *handle = NULL;
193 	int error;
194 
195 	if (bundle_nr < 0 || bundle_nr >= GET_MAX_BANKS(accel_dev)) {
196 		printf("ERROR in %s (%d) %d\n", __func__, bundle_nr, __LINE__);
197 		return EINVAL;
198 	}
199 
200 	accel = accel_dev->accel;
201 	handle = malloc(sizeof(*handle), M_QAT, M_WAITOK | M_ZERO);
202 	if (!handle) {
203 		printf("ERROR in adf_alloc_bundle %d\n", __LINE__);
204 		return ENOMEM;
205 	}
206 	handle->accel = accel;
207 	handle->bundle = bundle_nr;
208 
209 	mtx_lock(&accel->lock);
210 	adf_dev_get(accel_dev);
211 	accel->num_handles++;
212 	mtx_unlock(&accel->lock);
213 
214 	error = devfs_set_cdevpriv(handle, adf_release_bundle);
215 	if (error) {
216 		adf_release_bundle(handle);
217 		device_printf(GET_DEV(accel_dev),
218 			      "ERROR in adf_alloc_bundle %d\n",
219 			      __LINE__);
220 		return (error);
221 	}
222 
223 	return (0);
224 }
225 
226 static int
227 adf_uio_ioctl(struct cdev *dev,
228 	      u_long cmd,
229 	      caddr_t data,
230 	      int fflag,
231 	      struct thread *td)
232 {
233 	struct adf_accel_dev *accel_dev = dev->si_drv1;
234 	struct adf_hw_csr_info *csr_info = NULL;
235 
236 	if (!accel_dev) {
237 		printf("%s - accel_dev is NULL\n", __func__);
238 		return EFAULT;
239 	}
240 
241 	csr_info = &accel_dev->hw_device->csr_info;
242 
243 	switch (cmd) {
244 	case IOCTL_GET_BUNDLE_SIZE:
245 		*(uint32_t *)data = csr_info->ring_bundle_size;
246 		break;
247 	case IOCTL_ALLOC_BUNDLE:
248 		return (adf_alloc_bundle(accel_dev, *(int *)data));
249 	case IOCTL_GET_ACCEL_TYPE:
250 		*(uint32_t *)data = ADF_UIO_GET_TYPE(accel_dev);
251 		break;
252 	case IOCTL_ADD_MEM_FD:
253 		return (adf_add_mem_fd(accel_dev, *(int *)data));
254 	default:
255 		return (ENOTTY);
256 	}
257 	return (0);
258 }
259 
260 static int
261 adf_uio_mmap_single(struct cdev *dev,
262 		    vm_ooffset_t *offset,
263 		    vm_size_t size,
264 		    struct vm_object **object,
265 		    int nprot)
266 {
267 	struct adf_uio_open_bundle *handle = NULL;
268 	struct adf_uio_control_accel *accel = NULL;
269 	struct adf_uio_control_bundle *bundle = NULL;
270 	struct adf_uio_instance_rings *instance_rings;
271 	int error;
272 
273 	error = devfs_get_cdevpriv((void **)&handle);
274 	if (error)
275 		return (error);
276 
277 	if (!handle->accel) {
278 		printf("QAT: Error - no accel in handle\n");
279 		return EINVAL;
280 	}
281 	accel = handle->accel;
282 
283 	if (!accel->accel_dev) {
284 		printf("QAT: Error - no accel_dev in accel\n");
285 		return EINVAL;
286 	}
287 
288 	bundle = &accel->bundle[handle->bundle];
289 	if (!bundle->obj) {
290 		printf("QAT: Error no vm_object in bundle\n");
291 		return EINVAL;
292 	}
293 
294 	/* Adding pid to bundle list */
295 	instance_rings =
296 	    malloc(sizeof(*instance_rings), M_QAT, M_WAITOK | M_ZERO);
297 	if (!instance_rings) {
298 		printf("QAT: Memory allocation error - line: %d\n", __LINE__);
299 		return -ENOMEM;
300 	}
301 	instance_rings->user_pid = curproc->p_pid;
302 	instance_rings->ring_mask = 0;
303 	mutex_lock(&bundle->list_lock);
304 	list_add_tail(&instance_rings->list, &bundle->list);
305 	mutex_unlock(&bundle->list_lock);
306 
307 	vm_object_reference(bundle->obj);
308 	*object = bundle->obj;
309 	return (0);
310 }
311 
312 static inline void
313 adf_uio_init_accel_ctrl(struct adf_uio_control_accel *accel,
314 			struct adf_accel_dev *accel_dev,
315 			unsigned int nb_bundles)
316 {
317 	struct adf_uio_control_bundle *bundle;
318 	struct qat_uio_bundle_dev *priv;
319 	unsigned int i;
320 
321 	accel->nb_bundles = nb_bundles;
322 	accel->total_used_bundles = 0;
323 
324 	for (i = 0; i < nb_bundles; i++) {
325 		/*initialize the bundle */
326 		bundle = &accel->bundle[i];
327 		priv = &bundle->uio_priv;
328 		bundle->hardware_bundle_number =
329 		    GET_MAX_BANKS(accel_dev) - nb_bundles + i;
330 
331 		INIT_LIST_HEAD(&bundle->list);
332 		priv->bundle = bundle;
333 		priv->accel = accel;
334 
335 		mutex_init(&bundle->lock);
336 		mutex_init(&bundle->list_lock);
337 		if (!accel->bar)
338 			printf("ERROR: bar not defined in accel\n");
339 		else
340 			bundle->csr_addr = (void *)accel->bar->virt_addr;
341 	}
342 }
343 
344 /**
345  * Initialization bars on dev start.
346  */
347 static inline void
348 adf_uio_init_bundle_dev(struct adf_uio_control_accel *accel,
349 			struct adf_accel_dev *accel_dev,
350 			unsigned int nb_bundles)
351 {
352 	struct adf_uio_control_bundle *bundle;
353 	unsigned int i;
354 
355 	for (i = 0; i < nb_bundles; i++) {
356 		bundle = &accel->bundle[i];
357 		bundle->obj =
358 		    adf_uio_map_bar(accel_dev, bundle->hardware_bundle_number);
359 		if (!bundle->obj) {
360 			device_printf(GET_DEV(accel_dev),
361 				      "ERROR in adf_alloc_bundle %d\n",
362 				      __LINE__);
363 		}
364 	}
365 }
366 
367 int
368 adf_uio_register(struct adf_accel_dev *accel_dev)
369 {
370 	struct adf_uio_control_accel *accel = NULL;
371 	char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { 0 };
372 	int nb_bundles;
373 
374 	if (!accel_dev) {
375 		printf("%s - accel_dev is NULL\n", __func__);
376 		return EFAULT;
377 	}
378 
379 	if (adf_cfg_get_param_value(
380 		accel_dev, ADF_GENERAL_SEC, ADF_FIRST_USER_BUNDLE, val)) {
381 		nb_bundles = 0;
382 	} else {
383 		nb_bundles = GET_MAX_BANKS(accel_dev);
384 	}
385 
386 	if (nb_bundles) {
387 		accel = malloc(sizeof(*accel) +
388 				   nb_bundles *
389 				       sizeof(struct adf_uio_control_bundle),
390 			       M_QAT,
391 			       M_WAITOK | M_ZERO);
392 		mtx_init(&accel->lock, "qat uio", NULL, MTX_DEF);
393 		accel->accel_dev = accel_dev;
394 		accel->bar = accel_dev->accel_pci_dev.pci_bars +
395 		    ADF_UIO_GET_BAR(accel_dev);
396 
397 		adf_uio_init_accel_ctrl(accel, accel_dev, nb_bundles);
398 		accel->cdev = make_dev(&adf_uio_cdevsw,
399 				       0,
400 				       UID_ROOT,
401 				       GID_WHEEL,
402 				       0600,
403 				       "%s",
404 				       device_get_nameunit(GET_DEV(accel_dev)));
405 		if (accel->cdev == NULL) {
406 			mtx_destroy(&accel->lock);
407 			goto fail_clean;
408 		}
409 		accel->cdev->si_drv1 = accel_dev;
410 		accel_dev->accel = accel;
411 		cv_init(&accel->cleanup_ok, "uio_accel_cv");
412 
413 		adf_uio_init_bundle_dev(accel, accel_dev, nb_bundles);
414 	}
415 	return 0;
416 fail_clean:
417 	free(accel, M_QAT);
418 	device_printf(GET_DEV(accel_dev), "Failed to register UIO devices\n");
419 	return ENODEV;
420 }
421 
422 void
423 adf_uio_remove(struct adf_accel_dev *accel_dev)
424 {
425 	struct adf_uio_control_accel *accel = accel_dev->accel;
426 	struct adf_uio_control_bundle *bundle;
427 	unsigned int i;
428 
429 	if (accel) {
430 		/* Un-mapping all bars */
431 		for (i = 0; i < accel->nb_bundles; i++) {
432 			bundle = &accel->bundle[i];
433 			vm_object_deallocate(bundle->obj);
434 		}
435 
436 		destroy_dev(accel->cdev);
437 		mtx_lock(&accel->lock);
438 		while (accel->num_handles) {
439 			cv_timedwait_sig(&accel->cleanup_ok,
440 					 &accel->lock,
441 					 3 * hz);
442 		}
443 		mtx_unlock(&accel->lock);
444 		mtx_destroy(&accel->lock);
445 		cv_destroy(&accel->cleanup_ok);
446 		free(accel, M_QAT);
447 		accel_dev->accel = NULL;
448 	}
449 }
450