1*78ee8d1cSJulian Grajkowski /* SPDX-License-Identifier: BSD-3-Clause */
2*78ee8d1cSJulian Grajkowski /* Copyright(c) 2007-2022 Intel Corporation */
3*78ee8d1cSJulian Grajkowski #include "qat_freebsd.h"
4*78ee8d1cSJulian Grajkowski #include "adf_cfg.h"
5*78ee8d1cSJulian Grajkowski #include "adf_common_drv.h"
6*78ee8d1cSJulian Grajkowski #include "adf_accel_devices.h"
7*78ee8d1cSJulian Grajkowski #include "icp_qat_uclo.h"
8*78ee8d1cSJulian Grajkowski #include "icp_qat_fw.h"
9*78ee8d1cSJulian Grajkowski #include "icp_qat_fw_init_admin.h"
10*78ee8d1cSJulian Grajkowski #include "adf_cfg_strings.h"
11*78ee8d1cSJulian Grajkowski #include "adf_transport_access_macros.h"
12*78ee8d1cSJulian Grajkowski #include "adf_transport_internal.h"
13*78ee8d1cSJulian Grajkowski #include <sys/mutex.h>
14*78ee8d1cSJulian Grajkowski #include "adf_cfg.h"
15*78ee8d1cSJulian Grajkowski #include "adf_common_drv.h"
16*78ee8d1cSJulian Grajkowski
17*78ee8d1cSJulian Grajkowski #define ADF_AE_PAIR 2
18*78ee8d1cSJulian Grajkowski #define PKE_SLICES_PER_AE_PAIR 5
19*78ee8d1cSJulian Grajkowski
20*78ee8d1cSJulian Grajkowski static LIST_HEAD(accel_table);
21*78ee8d1cSJulian Grajkowski static LIST_HEAD(vfs_table);
22*78ee8d1cSJulian Grajkowski static DEFINE_MUTEX(table_lock);
23*78ee8d1cSJulian Grajkowski static uint32_t num_devices;
24*78ee8d1cSJulian Grajkowski static u8 id_map[ADF_MAX_DEVICES];
25*78ee8d1cSJulian Grajkowski
26*78ee8d1cSJulian Grajkowski struct vf_id_map {
27*78ee8d1cSJulian Grajkowski u32 bdf;
28*78ee8d1cSJulian Grajkowski u32 id;
29*78ee8d1cSJulian Grajkowski u32 fake_id;
30*78ee8d1cSJulian Grajkowski bool attached;
31*78ee8d1cSJulian Grajkowski struct list_head list;
32*78ee8d1cSJulian Grajkowski };
33*78ee8d1cSJulian Grajkowski
34*78ee8d1cSJulian Grajkowski /**
35*78ee8d1cSJulian Grajkowski * adf_get_vf_real_id() - Translate fake to real device id
36*78ee8d1cSJulian Grajkowski *
37*78ee8d1cSJulian Grajkowski * The "real" id is assigned to a device when it is initially
38*78ee8d1cSJulian Grajkowski * bound to the driver.
39*78ee8d1cSJulian Grajkowski * The "fake" id is usually the same as the real id, but
40*78ee8d1cSJulian Grajkowski * can change when devices are unbound from the qat driver,
41*78ee8d1cSJulian Grajkowski * perhaps to assign the device to a guest.
42*78ee8d1cSJulian Grajkowski */
43*78ee8d1cSJulian Grajkowski static int
adf_get_vf_real_id(u32 fake)44*78ee8d1cSJulian Grajkowski adf_get_vf_real_id(u32 fake)
45*78ee8d1cSJulian Grajkowski {
46*78ee8d1cSJulian Grajkowski struct list_head *itr;
47*78ee8d1cSJulian Grajkowski
48*78ee8d1cSJulian Grajkowski list_for_each(itr, &vfs_table)
49*78ee8d1cSJulian Grajkowski {
50*78ee8d1cSJulian Grajkowski struct vf_id_map *ptr = list_entry(itr, struct vf_id_map, list);
51*78ee8d1cSJulian Grajkowski if (ptr->fake_id == fake)
52*78ee8d1cSJulian Grajkowski return ptr->id;
53*78ee8d1cSJulian Grajkowski }
54*78ee8d1cSJulian Grajkowski return -1;
55*78ee8d1cSJulian Grajkowski }
56*78ee8d1cSJulian Grajkowski
57*78ee8d1cSJulian Grajkowski /**
58*78ee8d1cSJulian Grajkowski * adf_clean_vf_map() - Cleans VF id mapings
59*78ee8d1cSJulian Grajkowski *
60*78ee8d1cSJulian Grajkowski * Function cleans internal ids for virtual functions.
61*78ee8d1cSJulian Grajkowski * @vf: flag indicating whether mappings is cleaned
62*78ee8d1cSJulian Grajkowski * for vfs only or for vfs and pfs
63*78ee8d1cSJulian Grajkowski */
64*78ee8d1cSJulian Grajkowski void
adf_clean_vf_map(bool vf)65*78ee8d1cSJulian Grajkowski adf_clean_vf_map(bool vf)
66*78ee8d1cSJulian Grajkowski {
67*78ee8d1cSJulian Grajkowski struct vf_id_map *map;
68*78ee8d1cSJulian Grajkowski struct list_head *ptr, *tmp;
69*78ee8d1cSJulian Grajkowski
70*78ee8d1cSJulian Grajkowski mutex_lock(&table_lock);
71*78ee8d1cSJulian Grajkowski list_for_each_safe(ptr, tmp, &vfs_table)
72*78ee8d1cSJulian Grajkowski {
73*78ee8d1cSJulian Grajkowski map = list_entry(ptr, struct vf_id_map, list);
74*78ee8d1cSJulian Grajkowski if (map->bdf != -1) {
75*78ee8d1cSJulian Grajkowski id_map[map->id] = 0;
76*78ee8d1cSJulian Grajkowski num_devices--;
77*78ee8d1cSJulian Grajkowski }
78*78ee8d1cSJulian Grajkowski
79*78ee8d1cSJulian Grajkowski if (vf && map->bdf == -1)
80*78ee8d1cSJulian Grajkowski continue;
81*78ee8d1cSJulian Grajkowski
82*78ee8d1cSJulian Grajkowski list_del(ptr);
83*78ee8d1cSJulian Grajkowski free(map, M_QAT);
84*78ee8d1cSJulian Grajkowski }
85*78ee8d1cSJulian Grajkowski mutex_unlock(&table_lock);
86*78ee8d1cSJulian Grajkowski }
87*78ee8d1cSJulian Grajkowski
88*78ee8d1cSJulian Grajkowski /**
89*78ee8d1cSJulian Grajkowski * adf_devmgr_update_class_index() - Update internal index
90*78ee8d1cSJulian Grajkowski * @hw_data: Pointer to internal device data.
91*78ee8d1cSJulian Grajkowski *
92*78ee8d1cSJulian Grajkowski * Function updates internal dev index for VFs
93*78ee8d1cSJulian Grajkowski */
94*78ee8d1cSJulian Grajkowski void
adf_devmgr_update_class_index(struct adf_hw_device_data * hw_data)95*78ee8d1cSJulian Grajkowski adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data)
96*78ee8d1cSJulian Grajkowski {
97*78ee8d1cSJulian Grajkowski struct adf_hw_device_class *class = hw_data->dev_class;
98*78ee8d1cSJulian Grajkowski struct list_head *itr;
99*78ee8d1cSJulian Grajkowski int i = 0;
100*78ee8d1cSJulian Grajkowski
101*78ee8d1cSJulian Grajkowski list_for_each(itr, &accel_table)
102*78ee8d1cSJulian Grajkowski {
103*78ee8d1cSJulian Grajkowski struct adf_accel_dev *ptr =
104*78ee8d1cSJulian Grajkowski list_entry(itr, struct adf_accel_dev, list);
105*78ee8d1cSJulian Grajkowski
106*78ee8d1cSJulian Grajkowski if (ptr->hw_device->dev_class == class)
107*78ee8d1cSJulian Grajkowski ptr->hw_device->instance_id = i++;
108*78ee8d1cSJulian Grajkowski
109*78ee8d1cSJulian Grajkowski if (i == class->instances)
110*78ee8d1cSJulian Grajkowski break;
111*78ee8d1cSJulian Grajkowski }
112*78ee8d1cSJulian Grajkowski }
113*78ee8d1cSJulian Grajkowski
114*78ee8d1cSJulian Grajkowski static unsigned int
adf_find_free_id(void)115*78ee8d1cSJulian Grajkowski adf_find_free_id(void)
116*78ee8d1cSJulian Grajkowski {
117*78ee8d1cSJulian Grajkowski unsigned int i;
118*78ee8d1cSJulian Grajkowski
119*78ee8d1cSJulian Grajkowski for (i = 0; i < ADF_MAX_DEVICES; i++) {
120*78ee8d1cSJulian Grajkowski if (!id_map[i]) {
121*78ee8d1cSJulian Grajkowski id_map[i] = 1;
122*78ee8d1cSJulian Grajkowski return i;
123*78ee8d1cSJulian Grajkowski }
124*78ee8d1cSJulian Grajkowski }
125*78ee8d1cSJulian Grajkowski return ADF_MAX_DEVICES + 1;
126*78ee8d1cSJulian Grajkowski }
127*78ee8d1cSJulian Grajkowski
128*78ee8d1cSJulian Grajkowski /**
129*78ee8d1cSJulian Grajkowski * adf_devmgr_add_dev() - Add accel_dev to the acceleration framework
130*78ee8d1cSJulian Grajkowski * @accel_dev: Pointer to acceleration device.
131*78ee8d1cSJulian Grajkowski * @pf: Corresponding PF if the accel_dev is a VF
132*78ee8d1cSJulian Grajkowski *
133*78ee8d1cSJulian Grajkowski * Function adds acceleration device to the acceleration framework.
134*78ee8d1cSJulian Grajkowski * To be used by QAT device specific drivers.
135*78ee8d1cSJulian Grajkowski *
136*78ee8d1cSJulian Grajkowski * Return: 0 on success, error code otherwise.
137*78ee8d1cSJulian Grajkowski */
138*78ee8d1cSJulian Grajkowski int
adf_devmgr_add_dev(struct adf_accel_dev * accel_dev,struct adf_accel_dev * pf)139*78ee8d1cSJulian Grajkowski adf_devmgr_add_dev(struct adf_accel_dev *accel_dev, struct adf_accel_dev *pf)
140*78ee8d1cSJulian Grajkowski {
141*78ee8d1cSJulian Grajkowski struct list_head *itr;
142*78ee8d1cSJulian Grajkowski int ret = 0;
143*78ee8d1cSJulian Grajkowski
144*78ee8d1cSJulian Grajkowski if (num_devices == ADF_MAX_DEVICES) {
145*78ee8d1cSJulian Grajkowski device_printf(GET_DEV(accel_dev),
146*78ee8d1cSJulian Grajkowski "Only support up to %d devices\n",
147*78ee8d1cSJulian Grajkowski ADF_MAX_DEVICES);
148*78ee8d1cSJulian Grajkowski return EFAULT;
149*78ee8d1cSJulian Grajkowski }
150*78ee8d1cSJulian Grajkowski
151*78ee8d1cSJulian Grajkowski mutex_lock(&table_lock);
152*78ee8d1cSJulian Grajkowski
153*78ee8d1cSJulian Grajkowski /* PF on host or VF on guest */
154*78ee8d1cSJulian Grajkowski if (!accel_dev->is_vf || (accel_dev->is_vf && !pf)) {
155*78ee8d1cSJulian Grajkowski struct vf_id_map *map;
156*78ee8d1cSJulian Grajkowski
157*78ee8d1cSJulian Grajkowski list_for_each(itr, &accel_table)
158*78ee8d1cSJulian Grajkowski {
159*78ee8d1cSJulian Grajkowski struct adf_accel_dev *ptr =
160*78ee8d1cSJulian Grajkowski list_entry(itr, struct adf_accel_dev, list);
161*78ee8d1cSJulian Grajkowski
162*78ee8d1cSJulian Grajkowski if (ptr == accel_dev) {
163*78ee8d1cSJulian Grajkowski ret = EEXIST;
164*78ee8d1cSJulian Grajkowski goto unlock;
165*78ee8d1cSJulian Grajkowski }
166*78ee8d1cSJulian Grajkowski }
167*78ee8d1cSJulian Grajkowski
168*78ee8d1cSJulian Grajkowski list_add_tail(&accel_dev->list, &accel_table);
169*78ee8d1cSJulian Grajkowski accel_dev->accel_id = adf_find_free_id();
170*78ee8d1cSJulian Grajkowski if (accel_dev->accel_id > ADF_MAX_DEVICES) {
171*78ee8d1cSJulian Grajkowski ret = EFAULT;
172*78ee8d1cSJulian Grajkowski goto unlock;
173*78ee8d1cSJulian Grajkowski }
174*78ee8d1cSJulian Grajkowski num_devices++;
175*78ee8d1cSJulian Grajkowski map = malloc(sizeof(*map), M_QAT, GFP_KERNEL);
176*78ee8d1cSJulian Grajkowski if (!map) {
177*78ee8d1cSJulian Grajkowski ret = ENOMEM;
178*78ee8d1cSJulian Grajkowski goto unlock;
179*78ee8d1cSJulian Grajkowski }
180*78ee8d1cSJulian Grajkowski map->bdf = ~0;
181*78ee8d1cSJulian Grajkowski map->id = accel_dev->accel_id;
182*78ee8d1cSJulian Grajkowski map->fake_id = map->id;
183*78ee8d1cSJulian Grajkowski map->attached = true;
184*78ee8d1cSJulian Grajkowski list_add_tail(&map->list, &vfs_table);
185*78ee8d1cSJulian Grajkowski } else if (accel_dev->is_vf && pf) {
186*78ee8d1cSJulian Grajkowski ret = ENOTSUP;
187*78ee8d1cSJulian Grajkowski goto unlock;
188*78ee8d1cSJulian Grajkowski }
189*78ee8d1cSJulian Grajkowski unlock:
190*78ee8d1cSJulian Grajkowski mutex_unlock(&table_lock);
191*78ee8d1cSJulian Grajkowski return ret;
192*78ee8d1cSJulian Grajkowski }
193*78ee8d1cSJulian Grajkowski
194*78ee8d1cSJulian Grajkowski struct list_head *
adf_devmgr_get_head(void)195*78ee8d1cSJulian Grajkowski adf_devmgr_get_head(void)
196*78ee8d1cSJulian Grajkowski {
197*78ee8d1cSJulian Grajkowski return &accel_table;
198*78ee8d1cSJulian Grajkowski }
199*78ee8d1cSJulian Grajkowski
200*78ee8d1cSJulian Grajkowski /**
201*78ee8d1cSJulian Grajkowski * adf_devmgr_rm_dev() - Remove accel_dev from the acceleration framework.
202*78ee8d1cSJulian Grajkowski * @accel_dev: Pointer to acceleration device.
203*78ee8d1cSJulian Grajkowski * @pf: Corresponding PF if the accel_dev is a VF
204*78ee8d1cSJulian Grajkowski *
205*78ee8d1cSJulian Grajkowski * Function removes acceleration device from the acceleration framework.
206*78ee8d1cSJulian Grajkowski * To be used by QAT device specific drivers.
207*78ee8d1cSJulian Grajkowski *
208*78ee8d1cSJulian Grajkowski * Return: void
209*78ee8d1cSJulian Grajkowski */
210*78ee8d1cSJulian Grajkowski void
adf_devmgr_rm_dev(struct adf_accel_dev * accel_dev,struct adf_accel_dev * pf)211*78ee8d1cSJulian Grajkowski adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev, struct adf_accel_dev *pf)
212*78ee8d1cSJulian Grajkowski {
213*78ee8d1cSJulian Grajkowski mutex_lock(&table_lock);
214*78ee8d1cSJulian Grajkowski if (!accel_dev->is_vf || (accel_dev->is_vf && !pf)) {
215*78ee8d1cSJulian Grajkowski id_map[accel_dev->accel_id] = 0;
216*78ee8d1cSJulian Grajkowski num_devices--;
217*78ee8d1cSJulian Grajkowski }
218*78ee8d1cSJulian Grajkowski list_del(&accel_dev->list);
219*78ee8d1cSJulian Grajkowski mutex_unlock(&table_lock);
220*78ee8d1cSJulian Grajkowski }
221*78ee8d1cSJulian Grajkowski
222*78ee8d1cSJulian Grajkowski struct adf_accel_dev *
adf_devmgr_get_first(void)223*78ee8d1cSJulian Grajkowski adf_devmgr_get_first(void)
224*78ee8d1cSJulian Grajkowski {
225*78ee8d1cSJulian Grajkowski struct adf_accel_dev *dev = NULL;
226*78ee8d1cSJulian Grajkowski
227*78ee8d1cSJulian Grajkowski if (!list_empty(&accel_table))
228*78ee8d1cSJulian Grajkowski dev =
229*78ee8d1cSJulian Grajkowski list_first_entry(&accel_table, struct adf_accel_dev, list);
230*78ee8d1cSJulian Grajkowski return dev;
231*78ee8d1cSJulian Grajkowski }
232*78ee8d1cSJulian Grajkowski
233*78ee8d1cSJulian Grajkowski /**
234*78ee8d1cSJulian Grajkowski * adf_devmgr_pci_to_accel_dev() - Get accel_dev associated with the pci_dev.
235*78ee8d1cSJulian Grajkowski * @accel_dev: Pointer to pci device.
236*78ee8d1cSJulian Grajkowski *
237*78ee8d1cSJulian Grajkowski * Function returns acceleration device associated with the given pci device.
238*78ee8d1cSJulian Grajkowski * To be used by QAT device specific drivers.
239*78ee8d1cSJulian Grajkowski *
240*78ee8d1cSJulian Grajkowski * Return: pointer to accel_dev or NULL if not found.
241*78ee8d1cSJulian Grajkowski */
242*78ee8d1cSJulian Grajkowski struct adf_accel_dev *
adf_devmgr_pci_to_accel_dev(device_t pci_dev)243*78ee8d1cSJulian Grajkowski adf_devmgr_pci_to_accel_dev(device_t pci_dev)
244*78ee8d1cSJulian Grajkowski {
245*78ee8d1cSJulian Grajkowski struct list_head *itr;
246*78ee8d1cSJulian Grajkowski
247*78ee8d1cSJulian Grajkowski mutex_lock(&table_lock);
248*78ee8d1cSJulian Grajkowski list_for_each(itr, &accel_table)
249*78ee8d1cSJulian Grajkowski {
250*78ee8d1cSJulian Grajkowski struct adf_accel_dev *ptr =
251*78ee8d1cSJulian Grajkowski list_entry(itr, struct adf_accel_dev, list);
252*78ee8d1cSJulian Grajkowski
253*78ee8d1cSJulian Grajkowski if (ptr->accel_pci_dev.pci_dev == pci_dev) {
254*78ee8d1cSJulian Grajkowski mutex_unlock(&table_lock);
255*78ee8d1cSJulian Grajkowski return ptr;
256*78ee8d1cSJulian Grajkowski }
257*78ee8d1cSJulian Grajkowski }
258*78ee8d1cSJulian Grajkowski mutex_unlock(&table_lock);
259*78ee8d1cSJulian Grajkowski return NULL;
260*78ee8d1cSJulian Grajkowski }
261*78ee8d1cSJulian Grajkowski
262*78ee8d1cSJulian Grajkowski struct adf_accel_dev *
adf_devmgr_get_dev_by_id(uint32_t id)263*78ee8d1cSJulian Grajkowski adf_devmgr_get_dev_by_id(uint32_t id)
264*78ee8d1cSJulian Grajkowski {
265*78ee8d1cSJulian Grajkowski struct list_head *itr;
266*78ee8d1cSJulian Grajkowski int real_id;
267*78ee8d1cSJulian Grajkowski
268*78ee8d1cSJulian Grajkowski mutex_lock(&table_lock);
269*78ee8d1cSJulian Grajkowski real_id = adf_get_vf_real_id(id);
270*78ee8d1cSJulian Grajkowski if (real_id < 0)
271*78ee8d1cSJulian Grajkowski goto unlock;
272*78ee8d1cSJulian Grajkowski
273*78ee8d1cSJulian Grajkowski id = real_id;
274*78ee8d1cSJulian Grajkowski
275*78ee8d1cSJulian Grajkowski list_for_each(itr, &accel_table)
276*78ee8d1cSJulian Grajkowski {
277*78ee8d1cSJulian Grajkowski struct adf_accel_dev *ptr =
278*78ee8d1cSJulian Grajkowski list_entry(itr, struct adf_accel_dev, list);
279*78ee8d1cSJulian Grajkowski if (ptr->accel_id == id) {
280*78ee8d1cSJulian Grajkowski mutex_unlock(&table_lock);
281*78ee8d1cSJulian Grajkowski return ptr;
282*78ee8d1cSJulian Grajkowski }
283*78ee8d1cSJulian Grajkowski }
284*78ee8d1cSJulian Grajkowski unlock:
285*78ee8d1cSJulian Grajkowski mutex_unlock(&table_lock);
286*78ee8d1cSJulian Grajkowski return NULL;
287*78ee8d1cSJulian Grajkowski }
288*78ee8d1cSJulian Grajkowski
289*78ee8d1cSJulian Grajkowski int
adf_devmgr_verify_id(uint32_t * id)290*78ee8d1cSJulian Grajkowski adf_devmgr_verify_id(uint32_t *id)
291*78ee8d1cSJulian Grajkowski {
292*78ee8d1cSJulian Grajkowski struct adf_accel_dev *accel_dev;
293*78ee8d1cSJulian Grajkowski
294*78ee8d1cSJulian Grajkowski if (*id == ADF_CFG_ALL_DEVICES)
295*78ee8d1cSJulian Grajkowski return 0;
296*78ee8d1cSJulian Grajkowski
297*78ee8d1cSJulian Grajkowski accel_dev = adf_devmgr_get_dev_by_id(*id);
298*78ee8d1cSJulian Grajkowski if (!accel_dev)
299*78ee8d1cSJulian Grajkowski return ENODEV;
300*78ee8d1cSJulian Grajkowski
301*78ee8d1cSJulian Grajkowski /* Correct the id if real and fake differ */
302*78ee8d1cSJulian Grajkowski *id = accel_dev->accel_id;
303*78ee8d1cSJulian Grajkowski return 0;
304*78ee8d1cSJulian Grajkowski }
305*78ee8d1cSJulian Grajkowski
306*78ee8d1cSJulian Grajkowski static int
adf_get_num_dettached_vfs(void)307*78ee8d1cSJulian Grajkowski adf_get_num_dettached_vfs(void)
308*78ee8d1cSJulian Grajkowski {
309*78ee8d1cSJulian Grajkowski struct list_head *itr;
310*78ee8d1cSJulian Grajkowski int vfs = 0;
311*78ee8d1cSJulian Grajkowski
312*78ee8d1cSJulian Grajkowski mutex_lock(&table_lock);
313*78ee8d1cSJulian Grajkowski list_for_each(itr, &vfs_table)
314*78ee8d1cSJulian Grajkowski {
315*78ee8d1cSJulian Grajkowski struct vf_id_map *ptr = list_entry(itr, struct vf_id_map, list);
316*78ee8d1cSJulian Grajkowski if (ptr->bdf != ~0 && !ptr->attached)
317*78ee8d1cSJulian Grajkowski vfs++;
318*78ee8d1cSJulian Grajkowski }
319*78ee8d1cSJulian Grajkowski mutex_unlock(&table_lock);
320*78ee8d1cSJulian Grajkowski return vfs;
321*78ee8d1cSJulian Grajkowski }
322*78ee8d1cSJulian Grajkowski
323*78ee8d1cSJulian Grajkowski void
adf_devmgr_get_num_dev(uint32_t * num)324*78ee8d1cSJulian Grajkowski adf_devmgr_get_num_dev(uint32_t *num)
325*78ee8d1cSJulian Grajkowski {
326*78ee8d1cSJulian Grajkowski *num = num_devices - adf_get_num_dettached_vfs();
327*78ee8d1cSJulian Grajkowski }
328*78ee8d1cSJulian Grajkowski
329*78ee8d1cSJulian Grajkowski /**
330*78ee8d1cSJulian Grajkowski * adf_dev_in_use() - Check whether accel_dev is currently in use
331*78ee8d1cSJulian Grajkowski * @accel_dev: Pointer to acceleration device.
332*78ee8d1cSJulian Grajkowski *
333*78ee8d1cSJulian Grajkowski * To be used by QAT device specific drivers.
334*78ee8d1cSJulian Grajkowski *
335*78ee8d1cSJulian Grajkowski * Return: 1 when device is in use, 0 otherwise.
336*78ee8d1cSJulian Grajkowski */
337*78ee8d1cSJulian Grajkowski int
adf_dev_in_use(struct adf_accel_dev * accel_dev)338*78ee8d1cSJulian Grajkowski adf_dev_in_use(struct adf_accel_dev *accel_dev)
339*78ee8d1cSJulian Grajkowski {
340*78ee8d1cSJulian Grajkowski return atomic_read(&accel_dev->ref_count) != 0;
341*78ee8d1cSJulian Grajkowski }
342*78ee8d1cSJulian Grajkowski
343*78ee8d1cSJulian Grajkowski /**
344*78ee8d1cSJulian Grajkowski * adf_dev_get() - Increment accel_dev reference count
345*78ee8d1cSJulian Grajkowski * @accel_dev: Pointer to acceleration device.
346*78ee8d1cSJulian Grajkowski *
347*78ee8d1cSJulian Grajkowski * Increment the accel_dev refcount and if this is the first time
348*78ee8d1cSJulian Grajkowski * incrementing it during this period the accel_dev is in use,
349*78ee8d1cSJulian Grajkowski * increment the module refcount too.
350*78ee8d1cSJulian Grajkowski * To be used by QAT device specific drivers.
351*78ee8d1cSJulian Grajkowski *
352*78ee8d1cSJulian Grajkowski * Return: void
353*78ee8d1cSJulian Grajkowski */
354*78ee8d1cSJulian Grajkowski void
adf_dev_get(struct adf_accel_dev * accel_dev)355*78ee8d1cSJulian Grajkowski adf_dev_get(struct adf_accel_dev *accel_dev)
356*78ee8d1cSJulian Grajkowski {
357*78ee8d1cSJulian Grajkowski if (atomic_add_return(1, &accel_dev->ref_count) == 1)
358*78ee8d1cSJulian Grajkowski device_busy(GET_DEV(accel_dev));
359*78ee8d1cSJulian Grajkowski }
360*78ee8d1cSJulian Grajkowski
361*78ee8d1cSJulian Grajkowski /**
362*78ee8d1cSJulian Grajkowski * adf_dev_put() - Decrement accel_dev reference count
363*78ee8d1cSJulian Grajkowski * @accel_dev: Pointer to acceleration device.
364*78ee8d1cSJulian Grajkowski *
365*78ee8d1cSJulian Grajkowski * Decrement the accel_dev refcount and if this is the last time
366*78ee8d1cSJulian Grajkowski * decrementing it during this period the accel_dev is in use,
367*78ee8d1cSJulian Grajkowski * decrement the module refcount too.
368*78ee8d1cSJulian Grajkowski * To be used by QAT device specific drivers.
369*78ee8d1cSJulian Grajkowski *
370*78ee8d1cSJulian Grajkowski * Return: void
371*78ee8d1cSJulian Grajkowski */
372*78ee8d1cSJulian Grajkowski void
adf_dev_put(struct adf_accel_dev * accel_dev)373*78ee8d1cSJulian Grajkowski adf_dev_put(struct adf_accel_dev *accel_dev)
374*78ee8d1cSJulian Grajkowski {
375*78ee8d1cSJulian Grajkowski if (atomic_sub_return(1, &accel_dev->ref_count) == 0)
376*78ee8d1cSJulian Grajkowski device_unbusy(GET_DEV(accel_dev));
377*78ee8d1cSJulian Grajkowski }
378*78ee8d1cSJulian Grajkowski
379*78ee8d1cSJulian Grajkowski /**
380*78ee8d1cSJulian Grajkowski * adf_devmgr_in_reset() - Check whether device is in reset
381*78ee8d1cSJulian Grajkowski * @accel_dev: Pointer to acceleration device.
382*78ee8d1cSJulian Grajkowski *
383*78ee8d1cSJulian Grajkowski * To be used by QAT device specific drivers.
384*78ee8d1cSJulian Grajkowski *
385*78ee8d1cSJulian Grajkowski * Return: 1 when the device is being reset, 0 otherwise.
386*78ee8d1cSJulian Grajkowski */
387*78ee8d1cSJulian Grajkowski int
adf_devmgr_in_reset(struct adf_accel_dev * accel_dev)388*78ee8d1cSJulian Grajkowski adf_devmgr_in_reset(struct adf_accel_dev *accel_dev)
389*78ee8d1cSJulian Grajkowski {
390*78ee8d1cSJulian Grajkowski return test_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
391*78ee8d1cSJulian Grajkowski }
392*78ee8d1cSJulian Grajkowski
393*78ee8d1cSJulian Grajkowski /**
394*78ee8d1cSJulian Grajkowski * adf_dev_started() - Check whether device has started
395*78ee8d1cSJulian Grajkowski * @accel_dev: Pointer to acceleration device.
396*78ee8d1cSJulian Grajkowski *
397*78ee8d1cSJulian Grajkowski * To be used by QAT device specific drivers.
398*78ee8d1cSJulian Grajkowski *
399*78ee8d1cSJulian Grajkowski * Return: 1 when the device has started, 0 otherwise
400*78ee8d1cSJulian Grajkowski */
401*78ee8d1cSJulian Grajkowski int
adf_dev_started(struct adf_accel_dev * accel_dev)402*78ee8d1cSJulian Grajkowski adf_dev_started(struct adf_accel_dev *accel_dev)
403*78ee8d1cSJulian Grajkowski {
404*78ee8d1cSJulian Grajkowski return test_bit(ADF_STATUS_STARTED, &accel_dev->status);
405*78ee8d1cSJulian Grajkowski }
406