xref: /freebsd/sys/dev/qat/qat_common/adf_freebsd_uio.c (revision ccfd87fe2ac0e2e6aeb1911a7d7cce6712a8564f)
1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright(c) 2007-2022 Intel Corporation */
3 /* $FreeBSD$ */
4 #include "qat_freebsd.h"
5 #include "adf_cfg.h"
6 #include "adf_common_drv.h"
7 #include "adf_accel_devices.h"
8 #include "icp_qat_uclo.h"
9 #include "icp_qat_fw.h"
10 #include "icp_qat_fw_init_admin.h"
11 #include "adf_cfg_strings.h"
12 #include "adf_uio_control.h"
13 #include "adf_uio_cleanup.h"
14 #include "adf_uio.h"
15 #include "adf_transport_access_macros.h"
16 #include "adf_transport_internal.h"
17 #include <sys/conf.h>
18 #include <sys/capsicum.h>
19 #include <sys/kdb.h>
20 #include <sys/condvar.h>
21 #include <sys/param.h>
22 #include <sys/systm.h>
23 #include <sys/proc.h>
24 #include <sys/file.h>
25 #include <sys/lock.h>
26 #include <sys/rwlock.h>
27 #include <sys/sglist.h>
28 #include <vm/vm.h>
29 #include <vm/vm_object.h>
30 #include <vm/vm_page.h>
31 #include <vm/vm_pager.h>
32 
33 #define ADF_UIO_GET_NAME(accel_dev) (GET_HW_DATA(accel_dev)->dev_class->name)
34 #define ADF_UIO_GET_TYPE(accel_dev) (GET_HW_DATA(accel_dev)->dev_class->type)
35 #define ADF_UIO_GET_BAR(accel_dev)                                             \
36 	(GET_HW_DATA(accel_dev)->get_etr_bar_id(GET_HW_DATA(accel_dev)))
37 
38 static d_ioctl_t adf_uio_ioctl;
39 static d_mmap_single_t adf_uio_mmap_single;
40 
41 static struct cdevsw adf_uio_cdevsw = { .d_ioctl = adf_uio_ioctl,
42 					.d_mmap_single = adf_uio_mmap_single,
43 					.d_version = D_VERSION,
44 					.d_name = "qat" };
45 
46 struct adf_uio_open_bundle {
47 	struct adf_uio_control_accel *accel;
48 	int bundle;
49 	struct file **mem_files;
50 	int num_mem_files;
51 };
52 
53 static void
54 adf_release_bundle(void *arg)
55 {
56 	struct adf_uio_control_accel *accel = NULL;
57 	struct adf_uio_open_bundle *handle = NULL;
58 	struct adf_uio_control_bundle *bundle = NULL;
59 	struct adf_uio_instance_rings *instance_rings, *tmp;
60 	int i = 0;
61 
62 	handle = arg;
63 	accel = handle->accel;
64 	bundle = &accel->bundle[handle->bundle];
65 
66 	mutex_lock(&bundle->lock);
67 	adf_uio_do_cleanup_orphan(bundle->hardware_bundle_number, accel);
68 	mutex_unlock(&bundle->lock);
69 
70 	for (i = 0; i < handle->num_mem_files; i++) {
71 		/*
72 		 * Similar to the garbage collection of orphaned file
73 		 * descriptor references in UNIX domain socket control
74 		 * messages, the current thread isn't relevant to the
75 		 * the file descriptor reference being released.  In
76 		 * particular, the current thread does not hold any
77 		 * advisory file locks on these file descriptors.
78 		 */
79 		fdrop(handle->mem_files[i], NULL);
80 	}
81 	free(handle->mem_files, M_QAT);
82 
83 	mtx_lock(&accel->lock);
84 
85 	mutex_lock(&bundle->list_lock);
86 	list_for_each_entry_safe(instance_rings, tmp, &bundle->list, list)
87 	{
88 		if (instance_rings->user_pid == curproc->p_pid) {
89 			list_del(&instance_rings->list);
90 			free(instance_rings, M_QAT);
91 			break;
92 		}
93 	}
94 	mutex_unlock(&bundle->list_lock);
95 
96 	adf_dev_put(accel->accel_dev);
97 	accel->num_handles--;
98 	free(handle, M_QAT);
99 	if (!accel->num_handles) {
100 		cv_broadcast(&accel->cleanup_ok);
101 		/* the broadcasting effect happens after releasing accel->lock
102 		 */
103 	}
104 	mtx_unlock(&accel->lock);
105 }
106 
107 static int
108 adf_add_mem_fd(struct adf_accel_dev *accel_dev, int mem_fd)
109 {
110 	struct adf_uio_control_accel *accel = NULL;
111 	struct adf_uio_open_bundle *handle = NULL;
112 	struct file *fp, **new_files;
113 	cap_rights_t rights;
114 	int error = -1, old_count = 0;
115 
116 	error = devfs_get_cdevpriv((void **)&handle);
117 	if (error)
118 		return (error);
119 
120 	error = fget(curthread, mem_fd, cap_rights_init(&rights), &fp);
121 	if (error) {
122 		printf(
123 		    "Failed to fetch file pointer from current process %d \n",
124 		    __LINE__);
125 		return (error);
126 	}
127 
128 	accel = accel_dev->accel;
129 	mtx_lock(&accel->lock);
130 	for (;;) {
131 		old_count = handle->num_mem_files;
132 		mtx_unlock(&accel->lock);
133 		new_files = malloc((old_count + 1) * sizeof(*new_files),
134 				   M_QAT,
135 				   M_WAITOK);
136 		mtx_lock(&accel->lock);
137 		if (old_count == handle->num_mem_files) {
138 			if (old_count != 0) {
139 				memcpy(new_files,
140 				       handle->mem_files,
141 				       old_count * sizeof(*new_files));
142 				free(handle->mem_files, M_QAT);
143 			}
144 			handle->mem_files = new_files;
145 			new_files[old_count] = fp;
146 			handle->num_mem_files++;
147 			break;
148 		} else
149 			free(new_files, M_QAT);
150 	}
151 	mtx_unlock(&accel->lock);
152 	return (0);
153 }
154 
155 static vm_object_t
156 adf_uio_map_bar(struct adf_accel_dev *accel_dev, uint8_t bank_offset)
157 {
158 	unsigned int ring_bundle_size, offset;
159 	struct sglist *sg = NULL;
160 	struct adf_uio_control_accel *accel = accel_dev->accel;
161 	struct adf_hw_csr_info *csr_info = &accel_dev->hw_device->csr_info;
162 	vm_object_t obj;
163 
164 	ring_bundle_size = csr_info->ring_bundle_size;
165 	offset = bank_offset * ring_bundle_size;
166 
167 	sg = sglist_alloc(1, M_WAITOK);
168 
169 	/* Starting from new HW there is an additional offset
170 	 * for bundle CSRs
171 	 */
172 	sglist_append_phys(sg,
173 			   accel->bar->base_addr + offset +
174 			       csr_info->csr_addr_offset,
175 			   ring_bundle_size);
176 
177 	obj = vm_pager_allocate(
178 	    OBJT_SG, sg, ring_bundle_size, VM_PROT_RW, 0, NULL);
179 	if (obj != NULL) {
180 		VM_OBJECT_WLOCK(obj);
181 		vm_object_set_memattr(obj, VM_MEMATTR_UNCACHEABLE);
182 		VM_OBJECT_WUNLOCK(obj);
183 	}
184 	sglist_free(sg);
185 
186 	return obj;
187 }
188 
189 static int
190 adf_alloc_bundle(struct adf_accel_dev *accel_dev, int bundle_nr)
191 {
192 	struct adf_uio_control_accel *accel = NULL;
193 	struct adf_uio_open_bundle *handle = NULL;
194 	int error;
195 
196 	if (bundle_nr < 0 || bundle_nr >= GET_MAX_BANKS(accel_dev)) {
197 		printf("ERROR in %s (%d) %d\n", __func__, bundle_nr, __LINE__);
198 		return EINVAL;
199 	}
200 
201 	accel = accel_dev->accel;
202 	handle = malloc(sizeof(*handle), M_QAT, M_WAITOK | M_ZERO);
203 	if (!handle) {
204 		printf("ERROR in adf_alloc_bundle %d\n", __LINE__);
205 		return ENOMEM;
206 	}
207 	handle->accel = accel;
208 	handle->bundle = bundle_nr;
209 
210 	mtx_lock(&accel->lock);
211 	adf_dev_get(accel_dev);
212 	accel->num_handles++;
213 	mtx_unlock(&accel->lock);
214 
215 	error = devfs_set_cdevpriv(handle, adf_release_bundle);
216 	if (error) {
217 		adf_release_bundle(handle);
218 		device_printf(GET_DEV(accel_dev),
219 			      "ERROR in adf_alloc_bundle %d\n",
220 			      __LINE__);
221 		return (error);
222 	}
223 
224 	return (0);
225 }
226 
227 static int
228 adf_uio_ioctl(struct cdev *dev,
229 	      u_long cmd,
230 	      caddr_t data,
231 	      int fflag,
232 	      struct thread *td)
233 {
234 	struct adf_accel_dev *accel_dev = dev->si_drv1;
235 	struct adf_hw_csr_info *csr_info = NULL;
236 
237 	if (!accel_dev) {
238 		printf("%s - accel_dev is NULL\n", __func__);
239 		return EFAULT;
240 	}
241 
242 	csr_info = &accel_dev->hw_device->csr_info;
243 
244 	switch (cmd) {
245 	case IOCTL_GET_BUNDLE_SIZE:
246 		*(uint32_t *)data = csr_info->ring_bundle_size;
247 		break;
248 	case IOCTL_ALLOC_BUNDLE:
249 		return (adf_alloc_bundle(accel_dev, *(int *)data));
250 	case IOCTL_GET_ACCEL_TYPE:
251 		*(uint32_t *)data = ADF_UIO_GET_TYPE(accel_dev);
252 		break;
253 	case IOCTL_ADD_MEM_FD:
254 		return (adf_add_mem_fd(accel_dev, *(int *)data));
255 	default:
256 		return (ENOTTY);
257 	}
258 	return (0);
259 }
260 
261 static int
262 adf_uio_mmap_single(struct cdev *dev,
263 		    vm_ooffset_t *offset,
264 		    vm_size_t size,
265 		    struct vm_object **object,
266 		    int nprot)
267 {
268 	struct adf_uio_open_bundle *handle = NULL;
269 	struct adf_uio_control_accel *accel = NULL;
270 	struct adf_uio_control_bundle *bundle = NULL;
271 	struct adf_uio_instance_rings *instance_rings;
272 	int error;
273 
274 	error = devfs_get_cdevpriv((void **)&handle);
275 	if (error)
276 		return (error);
277 
278 	if (!handle->accel) {
279 		printf("QAT: Error - no accel in handle\n");
280 		return EINVAL;
281 	}
282 	accel = handle->accel;
283 
284 	if (!accel->accel_dev) {
285 		printf("QAT: Error - no accel_dev in accel\n");
286 		return EINVAL;
287 	}
288 
289 	bundle = &accel->bundle[handle->bundle];
290 	if (!bundle->obj) {
291 		printf("QAT: Error no vm_object in bundle\n");
292 		return EINVAL;
293 	}
294 
295 	/* Adding pid to bundle list */
296 	instance_rings =
297 	    malloc(sizeof(*instance_rings), M_QAT, M_WAITOK | M_ZERO);
298 	if (!instance_rings) {
299 		printf("QAT: Memory allocation error - line: %d\n", __LINE__);
300 		return -ENOMEM;
301 	}
302 	instance_rings->user_pid = curproc->p_pid;
303 	instance_rings->ring_mask = 0;
304 	mutex_lock(&bundle->list_lock);
305 	list_add_tail(&instance_rings->list, &bundle->list);
306 	mutex_unlock(&bundle->list_lock);
307 
308 	vm_object_reference(bundle->obj);
309 	*object = bundle->obj;
310 	return (0);
311 }
312 
313 static inline void
314 adf_uio_init_accel_ctrl(struct adf_uio_control_accel *accel,
315 			struct adf_accel_dev *accel_dev,
316 			unsigned int nb_bundles)
317 {
318 	struct adf_uio_control_bundle *bundle;
319 	struct qat_uio_bundle_dev *priv;
320 	unsigned int i;
321 
322 	accel->nb_bundles = nb_bundles;
323 	accel->total_used_bundles = 0;
324 
325 	for (i = 0; i < nb_bundles; i++) {
326 		/*initialize the bundle */
327 		bundle = &accel->bundle[i];
328 		priv = &bundle->uio_priv;
329 		bundle->hardware_bundle_number =
330 		    GET_MAX_BANKS(accel_dev) - nb_bundles + i;
331 
332 		INIT_LIST_HEAD(&bundle->list);
333 		priv->bundle = bundle;
334 		priv->accel = accel;
335 
336 		mutex_init(&bundle->lock);
337 		mutex_init(&bundle->list_lock);
338 		if (!accel->bar)
339 			printf("ERROR: bar not defined in accel\n");
340 		else
341 			bundle->csr_addr = (void *)accel->bar->virt_addr;
342 	}
343 }
344 
345 /**
346  * Initialization bars on dev start.
347  */
348 static inline void
349 adf_uio_init_bundle_dev(struct adf_uio_control_accel *accel,
350 			struct adf_accel_dev *accel_dev,
351 			unsigned int nb_bundles)
352 {
353 	struct adf_uio_control_bundle *bundle;
354 	unsigned int i;
355 
356 	for (i = 0; i < nb_bundles; i++) {
357 		bundle = &accel->bundle[i];
358 		bundle->obj =
359 		    adf_uio_map_bar(accel_dev, bundle->hardware_bundle_number);
360 		if (!bundle->obj) {
361 			device_printf(GET_DEV(accel_dev),
362 				      "ERROR in adf_alloc_bundle %d\n",
363 				      __LINE__);
364 		}
365 	}
366 }
367 
368 int
369 adf_uio_register(struct adf_accel_dev *accel_dev)
370 {
371 	struct adf_uio_control_accel *accel = NULL;
372 	char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { 0 };
373 	int nb_bundles;
374 
375 	if (!accel_dev) {
376 		printf("%s - accel_dev is NULL\n", __func__);
377 		return EFAULT;
378 	}
379 
380 	if (adf_cfg_get_param_value(
381 		accel_dev, ADF_GENERAL_SEC, ADF_FIRST_USER_BUNDLE, val)) {
382 		nb_bundles = 0;
383 	} else {
384 		nb_bundles = GET_MAX_BANKS(accel_dev);
385 	}
386 
387 	if (nb_bundles) {
388 		accel = malloc(sizeof(*accel) +
389 				   nb_bundles *
390 				       sizeof(struct adf_uio_control_bundle),
391 			       M_QAT,
392 			       M_WAITOK | M_ZERO);
393 		mtx_init(&accel->lock, "qat uio", NULL, MTX_DEF);
394 		accel->accel_dev = accel_dev;
395 		accel->bar = accel_dev->accel_pci_dev.pci_bars +
396 		    ADF_UIO_GET_BAR(accel_dev);
397 
398 		adf_uio_init_accel_ctrl(accel, accel_dev, nb_bundles);
399 		accel->cdev = make_dev(&adf_uio_cdevsw,
400 				       0,
401 				       UID_ROOT,
402 				       GID_WHEEL,
403 				       0600,
404 				       "%s",
405 				       device_get_nameunit(GET_DEV(accel_dev)));
406 		if (accel->cdev == NULL) {
407 			mtx_destroy(&accel->lock);
408 			goto fail_clean;
409 		}
410 		accel->cdev->si_drv1 = accel_dev;
411 		accel_dev->accel = accel;
412 		cv_init(&accel->cleanup_ok, "uio_accel_cv");
413 
414 		adf_uio_init_bundle_dev(accel, accel_dev, nb_bundles);
415 	}
416 	return 0;
417 fail_clean:
418 	free(accel, M_QAT);
419 	device_printf(GET_DEV(accel_dev), "Failed to register UIO devices\n");
420 	return ENODEV;
421 }
422 
423 void
424 adf_uio_remove(struct adf_accel_dev *accel_dev)
425 {
426 	struct adf_uio_control_accel *accel = accel_dev->accel;
427 	struct adf_uio_control_bundle *bundle;
428 	unsigned int i;
429 
430 	if (accel) {
431 		/* Un-mapping all bars */
432 		for (i = 0; i < accel->nb_bundles; i++) {
433 			bundle = &accel->bundle[i];
434 			vm_object_deallocate(bundle->obj);
435 		}
436 
437 		destroy_dev(accel->cdev);
438 		mtx_lock(&accel->lock);
439 		while (accel->num_handles) {
440 			cv_timedwait_sig(&accel->cleanup_ok,
441 					 &accel->lock,
442 					 3 * hz);
443 		}
444 		mtx_unlock(&accel->lock);
445 		mtx_destroy(&accel->lock);
446 		cv_destroy(&accel->cleanup_ok);
447 		free(accel, M_QAT);
448 		accel_dev->accel = NULL;
449 	}
450 }
451