1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright(c) 2007-2022 Intel Corporation */
3 #include "qat_freebsd.h"
4 #include "adf_cfg.h"
5 #include "adf_common_drv.h"
6 #include "adf_accel_devices.h"
7 #include "icp_qat_uclo.h"
8 #include "icp_qat_fw.h"
9 #include "icp_qat_fw_init_admin.h"
10 #include "adf_cfg_strings.h"
11 #include "adf_uio_control.h"
12 #include "adf_uio_cleanup.h"
13 #include "adf_uio.h"
14 #include "adf_transport_access_macros.h"
15 #include "adf_transport_internal.h"
16 #include <sys/conf.h>
17 #include <sys/capsicum.h>
18 #include <sys/kdb.h>
19 #include <sys/condvar.h>
20 #include <sys/param.h>
21 #include <sys/systm.h>
22 #include <sys/proc.h>
23 #include <sys/file.h>
24 #include <sys/lock.h>
25 #include <sys/rwlock.h>
26 #include <sys/sglist.h>
27 #include <vm/vm.h>
28 #include <vm/vm_object.h>
29 #include <vm/vm_page.h>
30 #include <vm/vm_pager.h>
31
32 #define ADF_UIO_GET_NAME(accel_dev) (GET_HW_DATA(accel_dev)->dev_class->name)
33 #define ADF_UIO_GET_TYPE(accel_dev) (GET_HW_DATA(accel_dev)->dev_class->type)
34 #define ADF_UIO_GET_BAR(accel_dev) \
35 (GET_HW_DATA(accel_dev)->get_etr_bar_id(GET_HW_DATA(accel_dev)))
36
37 static d_ioctl_t adf_uio_ioctl;
38 static d_mmap_single_t adf_uio_mmap_single;
39
40 static struct cdevsw adf_uio_cdevsw = { .d_ioctl = adf_uio_ioctl,
41 .d_mmap_single = adf_uio_mmap_single,
42 .d_version = D_VERSION,
43 .d_name = "qat" };
44
45 struct adf_uio_open_bundle {
46 struct adf_uio_control_accel *accel;
47 int bundle;
48 struct file **mem_files;
49 int num_mem_files;
50 };
51
52 static void
adf_release_bundle(void * arg)53 adf_release_bundle(void *arg)
54 {
55 struct adf_uio_control_accel *accel = NULL;
56 struct adf_uio_open_bundle *handle = NULL;
57 struct adf_uio_control_bundle *bundle = NULL;
58 struct adf_uio_instance_rings *instance_rings, *tmp;
59 int i = 0;
60
61 handle = arg;
62 accel = handle->accel;
63 bundle = &accel->bundle[handle->bundle];
64
65 mutex_lock(&bundle->lock);
66 adf_uio_do_cleanup_orphan(bundle->hardware_bundle_number, accel);
67 mutex_unlock(&bundle->lock);
68
69 for (i = 0; i < handle->num_mem_files; i++) {
70 /*
71 * Similar to the garbage collection of orphaned file
72 * descriptor references in UNIX domain socket control
73 * messages, the current thread isn't relevant to the
74 * the file descriptor reference being released. In
75 * particular, the current thread does not hold any
76 * advisory file locks on these file descriptors.
77 */
78 fdrop(handle->mem_files[i], NULL);
79 }
80 free(handle->mem_files, M_QAT);
81
82 mtx_lock(&accel->lock);
83
84 mutex_lock(&bundle->list_lock);
85 list_for_each_entry_safe(instance_rings, tmp, &bundle->list, list)
86 {
87 if (instance_rings->user_pid == curproc->p_pid) {
88 list_del(&instance_rings->list);
89 free(instance_rings, M_QAT);
90 break;
91 }
92 }
93 mutex_unlock(&bundle->list_lock);
94
95 adf_dev_put(accel->accel_dev);
96 accel->num_handles--;
97 free(handle, M_QAT);
98 if (!accel->num_handles) {
99 cv_broadcast(&accel->cleanup_ok);
100 /* the broadcasting effect happens after releasing accel->lock
101 */
102 }
103 mtx_unlock(&accel->lock);
104 }
105
106 static int
adf_add_mem_fd(struct adf_accel_dev * accel_dev,int mem_fd)107 adf_add_mem_fd(struct adf_accel_dev *accel_dev, int mem_fd)
108 {
109 struct adf_uio_control_accel *accel = NULL;
110 struct adf_uio_open_bundle *handle = NULL;
111 struct file *fp, **new_files;
112 cap_rights_t rights;
113 int error = -1, old_count = 0;
114
115 error = devfs_get_cdevpriv((void **)&handle);
116 if (error)
117 return (error);
118
119 error = fget(curthread, mem_fd, cap_rights_init(&rights), &fp);
120 if (error) {
121 printf(
122 "Failed to fetch file pointer from current process %d \n",
123 __LINE__);
124 return (error);
125 }
126
127 accel = accel_dev->accel;
128 mtx_lock(&accel->lock);
129 for (;;) {
130 old_count = handle->num_mem_files;
131 mtx_unlock(&accel->lock);
132 new_files = malloc((old_count + 1) * sizeof(*new_files),
133 M_QAT,
134 M_WAITOK);
135 mtx_lock(&accel->lock);
136 if (old_count == handle->num_mem_files) {
137 if (old_count != 0) {
138 memcpy(new_files,
139 handle->mem_files,
140 old_count * sizeof(*new_files));
141 free(handle->mem_files, M_QAT);
142 }
143 handle->mem_files = new_files;
144 new_files[old_count] = fp;
145 handle->num_mem_files++;
146 break;
147 } else
148 free(new_files, M_QAT);
149 }
150 mtx_unlock(&accel->lock);
151 return (0);
152 }
153
154 static vm_object_t
adf_uio_map_bar(struct adf_accel_dev * accel_dev,uint8_t bank_offset)155 adf_uio_map_bar(struct adf_accel_dev *accel_dev, uint8_t bank_offset)
156 {
157 unsigned int ring_bundle_size, offset;
158 struct sglist *sg = NULL;
159 struct adf_uio_control_accel *accel = accel_dev->accel;
160 struct adf_hw_csr_info *csr_info = &accel_dev->hw_device->csr_info;
161 vm_object_t obj;
162
163 ring_bundle_size = csr_info->ring_bundle_size;
164 offset = bank_offset * ring_bundle_size;
165
166 sg = sglist_alloc(1, M_WAITOK);
167
168 /* Starting from new HW there is an additional offset
169 * for bundle CSRs
170 */
171 sglist_append_phys(sg,
172 accel->bar->base_addr + offset +
173 csr_info->csr_addr_offset,
174 ring_bundle_size);
175
176 obj = vm_pager_allocate(
177 OBJT_SG, sg, ring_bundle_size, VM_PROT_RW, 0, NULL);
178 if (obj != NULL) {
179 VM_OBJECT_WLOCK(obj);
180 vm_object_set_memattr(obj, VM_MEMATTR_UNCACHEABLE);
181 VM_OBJECT_WUNLOCK(obj);
182 }
183 sglist_free(sg);
184
185 return obj;
186 }
187
188 static int
adf_alloc_bundle(struct adf_accel_dev * accel_dev,int bundle_nr)189 adf_alloc_bundle(struct adf_accel_dev *accel_dev, int bundle_nr)
190 {
191 struct adf_uio_control_accel *accel = NULL;
192 struct adf_uio_open_bundle *handle = NULL;
193 int error;
194
195 if (bundle_nr < 0 || bundle_nr >= GET_MAX_BANKS(accel_dev)) {
196 printf("ERROR in %s (%d) %d\n", __func__, bundle_nr, __LINE__);
197 return EINVAL;
198 }
199
200 accel = accel_dev->accel;
201 handle = malloc(sizeof(*handle), M_QAT, M_WAITOK | M_ZERO);
202 handle->accel = accel;
203 handle->bundle = bundle_nr;
204
205 mtx_lock(&accel->lock);
206 adf_dev_get(accel_dev);
207 accel->num_handles++;
208 mtx_unlock(&accel->lock);
209
210 error = devfs_set_cdevpriv(handle, adf_release_bundle);
211 if (error) {
212 adf_release_bundle(handle);
213 device_printf(GET_DEV(accel_dev),
214 "ERROR in adf_alloc_bundle %d\n",
215 __LINE__);
216 return (error);
217 }
218
219 return (0);
220 }
221
222 static int
adf_uio_ioctl(struct cdev * dev,u_long cmd,caddr_t data,int fflag,struct thread * td)223 adf_uio_ioctl(struct cdev *dev,
224 u_long cmd,
225 caddr_t data,
226 int fflag,
227 struct thread *td)
228 {
229 struct adf_accel_dev *accel_dev = dev->si_drv1;
230 struct adf_hw_csr_info *csr_info = NULL;
231
232 if (!accel_dev) {
233 printf("%s - accel_dev is NULL\n", __func__);
234 return EFAULT;
235 }
236
237 csr_info = &accel_dev->hw_device->csr_info;
238
239 switch (cmd) {
240 case IOCTL_GET_BUNDLE_SIZE:
241 *(uint32_t *)data = csr_info->ring_bundle_size;
242 break;
243 case IOCTL_ALLOC_BUNDLE:
244 return (adf_alloc_bundle(accel_dev, *(int *)data));
245 case IOCTL_GET_ACCEL_TYPE:
246 *(uint32_t *)data = ADF_UIO_GET_TYPE(accel_dev);
247 break;
248 case IOCTL_ADD_MEM_FD:
249 return (adf_add_mem_fd(accel_dev, *(int *)data));
250 default:
251 return (ENOTTY);
252 }
253 return (0);
254 }
255
256 static int
adf_uio_mmap_single(struct cdev * dev,vm_ooffset_t * offset,vm_size_t size,struct vm_object ** object,int nprot)257 adf_uio_mmap_single(struct cdev *dev,
258 vm_ooffset_t *offset,
259 vm_size_t size,
260 struct vm_object **object,
261 int nprot)
262 {
263 struct adf_uio_open_bundle *handle = NULL;
264 struct adf_uio_control_accel *accel = NULL;
265 struct adf_uio_control_bundle *bundle = NULL;
266 struct adf_uio_instance_rings *instance_rings;
267 int error;
268
269 error = devfs_get_cdevpriv((void **)&handle);
270 if (error)
271 return (error);
272
273 if (!handle->accel) {
274 printf("QAT: Error - no accel in handle\n");
275 return EINVAL;
276 }
277 accel = handle->accel;
278
279 if (!accel->accel_dev) {
280 printf("QAT: Error - no accel_dev in accel\n");
281 return EINVAL;
282 }
283
284 bundle = &accel->bundle[handle->bundle];
285 if (!bundle->obj) {
286 printf("QAT: Error no vm_object in bundle\n");
287 return EINVAL;
288 }
289
290 /* Adding pid to bundle list */
291 instance_rings =
292 malloc(sizeof(*instance_rings), M_QAT, M_WAITOK | M_ZERO);
293 instance_rings->user_pid = curproc->p_pid;
294 instance_rings->ring_mask = 0;
295 mutex_lock(&bundle->list_lock);
296 list_add_tail(&instance_rings->list, &bundle->list);
297 mutex_unlock(&bundle->list_lock);
298
299 vm_object_reference(bundle->obj);
300 *object = bundle->obj;
301 return (0);
302 }
303
304 static inline void
adf_uio_init_accel_ctrl(struct adf_uio_control_accel * accel,struct adf_accel_dev * accel_dev,unsigned int nb_bundles)305 adf_uio_init_accel_ctrl(struct adf_uio_control_accel *accel,
306 struct adf_accel_dev *accel_dev,
307 unsigned int nb_bundles)
308 {
309 struct adf_uio_control_bundle *bundle;
310 struct qat_uio_bundle_dev *priv;
311 unsigned int i;
312
313 accel->nb_bundles = nb_bundles;
314 accel->total_used_bundles = 0;
315
316 for (i = 0; i < nb_bundles; i++) {
317 /*initialize the bundle */
318 bundle = &accel->bundle[i];
319 priv = &bundle->uio_priv;
320 bundle->hardware_bundle_number =
321 GET_MAX_BANKS(accel_dev) - nb_bundles + i;
322
323 INIT_LIST_HEAD(&bundle->list);
324 priv->bundle = bundle;
325 priv->accel = accel;
326
327 mutex_init(&bundle->lock);
328 mutex_init(&bundle->list_lock);
329 if (!accel->bar)
330 printf("ERROR: bar not defined in accel\n");
331 else
332 bundle->csr_addr = (void *)accel->bar->virt_addr;
333 }
334 }
335
336 /**
337 * Initialization bars on dev start.
338 */
339 static inline void
adf_uio_init_bundle_dev(struct adf_uio_control_accel * accel,struct adf_accel_dev * accel_dev,unsigned int nb_bundles)340 adf_uio_init_bundle_dev(struct adf_uio_control_accel *accel,
341 struct adf_accel_dev *accel_dev,
342 unsigned int nb_bundles)
343 {
344 struct adf_uio_control_bundle *bundle;
345 unsigned int i;
346
347 for (i = 0; i < nb_bundles; i++) {
348 bundle = &accel->bundle[i];
349 bundle->obj =
350 adf_uio_map_bar(accel_dev, bundle->hardware_bundle_number);
351 if (!bundle->obj) {
352 device_printf(GET_DEV(accel_dev),
353 "ERROR in adf_alloc_bundle %d\n",
354 __LINE__);
355 }
356 }
357 }
358
359 int
adf_uio_register(struct adf_accel_dev * accel_dev)360 adf_uio_register(struct adf_accel_dev *accel_dev)
361 {
362 struct adf_uio_control_accel *accel = NULL;
363 char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { 0 };
364 int nb_bundles;
365
366 if (!accel_dev) {
367 printf("%s - accel_dev is NULL\n", __func__);
368 return EFAULT;
369 }
370
371 if (adf_cfg_get_param_value(
372 accel_dev, ADF_GENERAL_SEC, ADF_FIRST_USER_BUNDLE, val)) {
373 nb_bundles = 0;
374 } else {
375 nb_bundles = GET_MAX_BANKS(accel_dev);
376 }
377
378 if (nb_bundles) {
379 accel = malloc(sizeof(*accel) +
380 nb_bundles *
381 sizeof(struct adf_uio_control_bundle),
382 M_QAT,
383 M_WAITOK | M_ZERO);
384 mtx_init(&accel->lock, "qat uio", NULL, MTX_DEF);
385 accel->accel_dev = accel_dev;
386 accel->bar = accel_dev->accel_pci_dev.pci_bars +
387 ADF_UIO_GET_BAR(accel_dev);
388
389 adf_uio_init_accel_ctrl(accel, accel_dev, nb_bundles);
390 accel->cdev = make_dev(&adf_uio_cdevsw,
391 0,
392 UID_ROOT,
393 GID_WHEEL,
394 0600,
395 "%s",
396 device_get_nameunit(GET_DEV(accel_dev)));
397 if (accel->cdev == NULL) {
398 mtx_destroy(&accel->lock);
399 goto fail_clean;
400 }
401 accel->cdev->si_drv1 = accel_dev;
402 accel_dev->accel = accel;
403 cv_init(&accel->cleanup_ok, "uio_accel_cv");
404
405 adf_uio_init_bundle_dev(accel, accel_dev, nb_bundles);
406 }
407 return 0;
408 fail_clean:
409 free(accel, M_QAT);
410 device_printf(GET_DEV(accel_dev), "Failed to register UIO devices\n");
411 return ENODEV;
412 }
413
414 void
adf_uio_remove(struct adf_accel_dev * accel_dev)415 adf_uio_remove(struct adf_accel_dev *accel_dev)
416 {
417 struct adf_uio_control_accel *accel = accel_dev->accel;
418 struct adf_uio_control_bundle *bundle;
419 unsigned int i;
420
421 if (accel) {
422 /* Un-mapping all bars */
423 for (i = 0; i < accel->nb_bundles; i++) {
424 bundle = &accel->bundle[i];
425 vm_object_deallocate(bundle->obj);
426 }
427
428 destroy_dev(accel->cdev);
429 mtx_lock(&accel->lock);
430 while (accel->num_handles) {
431 cv_timedwait_sig(&accel->cleanup_ok,
432 &accel->lock,
433 3 * hz);
434 }
435 mtx_unlock(&accel->lock);
436 mtx_destroy(&accel->lock);
437 cv_destroy(&accel->cleanup_ok);
438 free(accel, M_QAT);
439 accel_dev->accel = NULL;
440 }
441 }
442