1 /* SPDX-License-Identifier: BSD-3-Clause */ 2 /* Copyright(c) 2007-2022 Intel Corporation */ 3 #include "qat_freebsd.h" 4 #include "adf_cfg.h" 5 #include "adf_common_drv.h" 6 #include "adf_accel_devices.h" 7 #include "icp_qat_uclo.h" 8 #include "icp_qat_fw.h" 9 #include "icp_qat_fw_init_admin.h" 10 #include "adf_cfg_strings.h" 11 #include "adf_transport_access_macros.h" 12 #include "adf_transport_internal.h" 13 #include <sys/mutex.h> 14 #include "adf_cfg.h" 15 #include "adf_common_drv.h" 16 17 #define ADF_AE_PAIR 2 18 #define PKE_SLICES_PER_AE_PAIR 5 19 20 static LIST_HEAD(accel_table); 21 static LIST_HEAD(vfs_table); 22 static DEFINE_MUTEX(table_lock); 23 static uint32_t num_devices; 24 static u8 id_map[ADF_MAX_DEVICES]; 25 26 struct vf_id_map { 27 u32 bdf; 28 u32 id; 29 u32 fake_id; 30 bool attached; 31 struct list_head list; 32 }; 33 34 /** 35 * adf_get_vf_real_id() - Translate fake to real device id 36 * 37 * The "real" id is assigned to a device when it is initially 38 * bound to the driver. 39 * The "fake" id is usually the same as the real id, but 40 * can change when devices are unbound from the qat driver, 41 * perhaps to assign the device to a guest. 42 */ 43 static int 44 adf_get_vf_real_id(u32 fake) 45 { 46 struct list_head *itr; 47 48 list_for_each(itr, &vfs_table) 49 { 50 struct vf_id_map *ptr = list_entry(itr, struct vf_id_map, list); 51 if (ptr->fake_id == fake) 52 return ptr->id; 53 } 54 return -1; 55 } 56 57 /** 58 * adf_clean_vf_map() - Cleans VF id mapings 59 * 60 * Function cleans internal ids for virtual functions. 61 * @vf: flag indicating whether mappings is cleaned 62 * for vfs only or for vfs and pfs 63 */ 64 void 65 adf_clean_vf_map(bool vf) 66 { 67 struct vf_id_map *map; 68 struct list_head *ptr, *tmp; 69 70 mutex_lock(&table_lock); 71 list_for_each_safe(ptr, tmp, &vfs_table) 72 { 73 map = list_entry(ptr, struct vf_id_map, list); 74 if (map->bdf != -1) { 75 id_map[map->id] = 0; 76 num_devices--; 77 } 78 79 if (vf && map->bdf == -1) 80 continue; 81 82 list_del(ptr); 83 free(map, M_QAT); 84 } 85 mutex_unlock(&table_lock); 86 } 87 88 /** 89 * adf_devmgr_update_class_index() - Update internal index 90 * @hw_data: Pointer to internal device data. 91 * 92 * Function updates internal dev index for VFs 93 */ 94 void 95 adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data) 96 { 97 struct adf_hw_device_class *class = hw_data->dev_class; 98 struct list_head *itr; 99 int i = 0; 100 101 list_for_each(itr, &accel_table) 102 { 103 struct adf_accel_dev *ptr = 104 list_entry(itr, struct adf_accel_dev, list); 105 106 if (ptr->hw_device->dev_class == class) 107 ptr->hw_device->instance_id = i++; 108 109 if (i == class->instances) 110 break; 111 } 112 } 113 114 static unsigned int 115 adf_find_free_id(void) 116 { 117 unsigned int i; 118 119 for (i = 0; i < ADF_MAX_DEVICES; i++) { 120 if (!id_map[i]) { 121 id_map[i] = 1; 122 return i; 123 } 124 } 125 return ADF_MAX_DEVICES + 1; 126 } 127 128 /** 129 * adf_devmgr_add_dev() - Add accel_dev to the acceleration framework 130 * @accel_dev: Pointer to acceleration device. 131 * @pf: Corresponding PF if the accel_dev is a VF 132 * 133 * Function adds acceleration device to the acceleration framework. 134 * To be used by QAT device specific drivers. 135 * 136 * Return: 0 on success, error code otherwise. 137 */ 138 int 139 adf_devmgr_add_dev(struct adf_accel_dev *accel_dev, struct adf_accel_dev *pf) 140 { 141 struct list_head *itr; 142 int ret = 0; 143 144 if (num_devices == ADF_MAX_DEVICES) { 145 device_printf(GET_DEV(accel_dev), 146 "Only support up to %d devices\n", 147 ADF_MAX_DEVICES); 148 return EFAULT; 149 } 150 151 mutex_lock(&table_lock); 152 153 /* PF on host or VF on guest */ 154 if (!accel_dev->is_vf || (accel_dev->is_vf && !pf)) { 155 struct vf_id_map *map; 156 157 list_for_each(itr, &accel_table) 158 { 159 struct adf_accel_dev *ptr = 160 list_entry(itr, struct adf_accel_dev, list); 161 162 if (ptr == accel_dev) { 163 ret = EEXIST; 164 goto unlock; 165 } 166 } 167 168 list_add_tail(&accel_dev->list, &accel_table); 169 accel_dev->accel_id = adf_find_free_id(); 170 if (accel_dev->accel_id > ADF_MAX_DEVICES) { 171 ret = EFAULT; 172 goto unlock; 173 } 174 num_devices++; 175 map = malloc(sizeof(*map), M_QAT, GFP_KERNEL); 176 if (!map) { 177 ret = ENOMEM; 178 goto unlock; 179 } 180 map->bdf = ~0; 181 map->id = accel_dev->accel_id; 182 map->fake_id = map->id; 183 map->attached = true; 184 list_add_tail(&map->list, &vfs_table); 185 } else if (accel_dev->is_vf && pf) { 186 ret = ENOTSUP; 187 goto unlock; 188 } 189 unlock: 190 mutex_unlock(&table_lock); 191 return ret; 192 } 193 194 struct list_head * 195 adf_devmgr_get_head(void) 196 { 197 return &accel_table; 198 } 199 200 /** 201 * adf_devmgr_rm_dev() - Remove accel_dev from the acceleration framework. 202 * @accel_dev: Pointer to acceleration device. 203 * @pf: Corresponding PF if the accel_dev is a VF 204 * 205 * Function removes acceleration device from the acceleration framework. 206 * To be used by QAT device specific drivers. 207 * 208 * Return: void 209 */ 210 void 211 adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev, struct adf_accel_dev *pf) 212 { 213 mutex_lock(&table_lock); 214 if (!accel_dev->is_vf || (accel_dev->is_vf && !pf)) { 215 id_map[accel_dev->accel_id] = 0; 216 num_devices--; 217 } 218 list_del(&accel_dev->list); 219 mutex_unlock(&table_lock); 220 } 221 222 struct adf_accel_dev * 223 adf_devmgr_get_first(void) 224 { 225 struct adf_accel_dev *dev = NULL; 226 227 if (!list_empty(&accel_table)) 228 dev = 229 list_first_entry(&accel_table, struct adf_accel_dev, list); 230 return dev; 231 } 232 233 /** 234 * adf_devmgr_pci_to_accel_dev() - Get accel_dev associated with the pci_dev. 235 * @accel_dev: Pointer to pci device. 236 * 237 * Function returns acceleration device associated with the given pci device. 238 * To be used by QAT device specific drivers. 239 * 240 * Return: pointer to accel_dev or NULL if not found. 241 */ 242 struct adf_accel_dev * 243 adf_devmgr_pci_to_accel_dev(device_t pci_dev) 244 { 245 struct list_head *itr; 246 247 mutex_lock(&table_lock); 248 list_for_each(itr, &accel_table) 249 { 250 struct adf_accel_dev *ptr = 251 list_entry(itr, struct adf_accel_dev, list); 252 253 if (ptr->accel_pci_dev.pci_dev == pci_dev) { 254 mutex_unlock(&table_lock); 255 return ptr; 256 } 257 } 258 mutex_unlock(&table_lock); 259 return NULL; 260 } 261 262 struct adf_accel_dev * 263 adf_devmgr_get_dev_by_id(uint32_t id) 264 { 265 struct list_head *itr; 266 int real_id; 267 268 mutex_lock(&table_lock); 269 real_id = adf_get_vf_real_id(id); 270 if (real_id < 0) 271 goto unlock; 272 273 id = real_id; 274 275 list_for_each(itr, &accel_table) 276 { 277 struct adf_accel_dev *ptr = 278 list_entry(itr, struct adf_accel_dev, list); 279 if (ptr->accel_id == id) { 280 mutex_unlock(&table_lock); 281 return ptr; 282 } 283 } 284 unlock: 285 mutex_unlock(&table_lock); 286 return NULL; 287 } 288 289 int 290 adf_devmgr_verify_id(uint32_t *id) 291 { 292 struct adf_accel_dev *accel_dev; 293 294 if (*id == ADF_CFG_ALL_DEVICES) 295 return 0; 296 297 accel_dev = adf_devmgr_get_dev_by_id(*id); 298 if (!accel_dev) 299 return ENODEV; 300 301 /* Correct the id if real and fake differ */ 302 *id = accel_dev->accel_id; 303 return 0; 304 } 305 306 static int 307 adf_get_num_dettached_vfs(void) 308 { 309 struct list_head *itr; 310 int vfs = 0; 311 312 mutex_lock(&table_lock); 313 list_for_each(itr, &vfs_table) 314 { 315 struct vf_id_map *ptr = list_entry(itr, struct vf_id_map, list); 316 if (ptr->bdf != ~0 && !ptr->attached) 317 vfs++; 318 } 319 mutex_unlock(&table_lock); 320 return vfs; 321 } 322 323 void 324 adf_devmgr_get_num_dev(uint32_t *num) 325 { 326 *num = num_devices - adf_get_num_dettached_vfs(); 327 } 328 329 /** 330 * adf_dev_in_use() - Check whether accel_dev is currently in use 331 * @accel_dev: Pointer to acceleration device. 332 * 333 * To be used by QAT device specific drivers. 334 * 335 * Return: 1 when device is in use, 0 otherwise. 336 */ 337 int 338 adf_dev_in_use(struct adf_accel_dev *accel_dev) 339 { 340 return atomic_read(&accel_dev->ref_count) != 0; 341 } 342 343 /** 344 * adf_dev_get() - Increment accel_dev reference count 345 * @accel_dev: Pointer to acceleration device. 346 * 347 * Increment the accel_dev refcount and if this is the first time 348 * incrementing it during this period the accel_dev is in use, 349 * increment the module refcount too. 350 * To be used by QAT device specific drivers. 351 * 352 * Return: void 353 */ 354 void 355 adf_dev_get(struct adf_accel_dev *accel_dev) 356 { 357 if (atomic_add_return(1, &accel_dev->ref_count) == 1) 358 device_busy(GET_DEV(accel_dev)); 359 } 360 361 /** 362 * adf_dev_put() - Decrement accel_dev reference count 363 * @accel_dev: Pointer to acceleration device. 364 * 365 * Decrement the accel_dev refcount and if this is the last time 366 * decrementing it during this period the accel_dev is in use, 367 * decrement the module refcount too. 368 * To be used by QAT device specific drivers. 369 * 370 * Return: void 371 */ 372 void 373 adf_dev_put(struct adf_accel_dev *accel_dev) 374 { 375 if (atomic_sub_return(1, &accel_dev->ref_count) == 0) 376 device_unbusy(GET_DEV(accel_dev)); 377 } 378 379 /** 380 * adf_devmgr_in_reset() - Check whether device is in reset 381 * @accel_dev: Pointer to acceleration device. 382 * 383 * To be used by QAT device specific drivers. 384 * 385 * Return: 1 when the device is being reset, 0 otherwise. 386 */ 387 int 388 adf_devmgr_in_reset(struct adf_accel_dev *accel_dev) 389 { 390 return test_bit(ADF_STATUS_RESTARTING, &accel_dev->status); 391 } 392 393 /** 394 * adf_dev_started() - Check whether device has started 395 * @accel_dev: Pointer to acceleration device. 396 * 397 * To be used by QAT device specific drivers. 398 * 399 * Return: 1 when the device has started, 0 otherwise 400 */ 401 int 402 adf_dev_started(struct adf_accel_dev *accel_dev) 403 { 404 return test_bit(ADF_STATUS_STARTED, &accel_dev->status); 405 } 406