1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright(c) 2022 Intel Corporation */ 3 #include <linux/module.h> 4 #include <linux/slab.h> 5 #include "adf_accel_devices.h" 6 #include "adf_common_drv.h" 7 #include "adf_transport.h" 8 #include "adf_transport_access_macros.h" 9 #include "adf_cfg.h" 10 #include "adf_cfg_strings.h" 11 #include "qat_compression.h" 12 #include "icp_qat_fw.h" 13 14 #define SEC ADF_KERNEL_SEC 15 16 static struct service_hndl qat_compression; 17 18 void qat_compression_put_instance(struct qat_compression_instance *inst) 19 { 20 atomic_dec(&inst->refctr); 21 adf_dev_put(inst->accel_dev); 22 } 23 24 static int qat_compression_free_instances(struct adf_accel_dev *accel_dev) 25 { 26 struct qat_compression_instance *inst; 27 struct list_head *list_ptr, *tmp; 28 int i; 29 30 list_for_each_safe(list_ptr, tmp, &accel_dev->compression_list) { 31 inst = list_entry(list_ptr, 32 struct qat_compression_instance, list); 33 34 for (i = 0; i < atomic_read(&inst->refctr); i++) 35 qat_compression_put_instance(inst); 36 37 if (inst->dc_tx) 38 adf_remove_ring(inst->dc_tx); 39 40 if (inst->dc_rx) 41 adf_remove_ring(inst->dc_rx); 42 43 list_del(list_ptr); 44 kfree(inst); 45 } 46 return 0; 47 } 48 49 struct qat_compression_instance *qat_compression_get_instance_node(int node) 50 { 51 struct qat_compression_instance *inst = NULL; 52 struct adf_accel_dev *accel_dev = NULL; 53 unsigned long best = ~0; 54 struct list_head *itr; 55 56 list_for_each(itr, adf_devmgr_get_head()) { 57 struct adf_accel_dev *tmp_dev; 58 unsigned long ctr; 59 int tmp_dev_node; 60 61 tmp_dev = list_entry(itr, struct adf_accel_dev, list); 62 tmp_dev_node = dev_to_node(&GET_DEV(tmp_dev)); 63 64 if ((node == tmp_dev_node || tmp_dev_node < 0) && 65 adf_dev_started(tmp_dev) && !list_empty(&tmp_dev->compression_list)) { 66 ctr = atomic_read(&tmp_dev->ref_count); 67 if (best > ctr) { 68 accel_dev = tmp_dev; 69 best = ctr; 70 } 71 } 72 } 73 74 if (!accel_dev) { 75 pr_debug_ratelimited("QAT: Could not find a device on node %d\n", node); 76 /* Get any started device */ 77 list_for_each(itr, adf_devmgr_get_head()) { 78 struct adf_accel_dev *tmp_dev; 79 80 tmp_dev = list_entry(itr, struct adf_accel_dev, list); 81 if (adf_dev_started(tmp_dev) && 82 !list_empty(&tmp_dev->compression_list)) { 83 accel_dev = tmp_dev; 84 break; 85 } 86 } 87 } 88 89 if (!accel_dev) 90 return NULL; 91 92 best = ~0; 93 list_for_each(itr, &accel_dev->compression_list) { 94 struct qat_compression_instance *tmp_inst; 95 unsigned long ctr; 96 97 tmp_inst = list_entry(itr, struct qat_compression_instance, list); 98 ctr = atomic_read(&tmp_inst->refctr); 99 if (best > ctr) { 100 inst = tmp_inst; 101 best = ctr; 102 } 103 } 104 if (inst) { 105 if (adf_dev_get(accel_dev)) { 106 dev_err(&GET_DEV(accel_dev), "Could not increment dev refctr\n"); 107 return NULL; 108 } 109 atomic_inc(&inst->refctr); 110 } 111 return inst; 112 } 113 114 static int qat_compression_create_instances(struct adf_accel_dev *accel_dev) 115 { 116 struct qat_compression_instance *inst; 117 char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; 118 char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; 119 unsigned long num_inst, num_msg_dc; 120 unsigned long bank; 121 int msg_size; 122 int ret; 123 int i; 124 125 INIT_LIST_HEAD(&accel_dev->compression_list); 126 strscpy(key, ADF_NUM_DC, sizeof(key)); 127 ret = adf_cfg_get_param_value(accel_dev, SEC, key, val); 128 if (ret) 129 return ret; 130 131 ret = kstrtoul(val, 10, &num_inst); 132 if (ret) 133 return ret; 134 135 for (i = 0; i < num_inst; i++) { 136 inst = kzalloc_node(sizeof(*inst), GFP_KERNEL, 137 dev_to_node(&GET_DEV(accel_dev))); 138 if (!inst) { 139 ret = -ENOMEM; 140 goto err; 141 } 142 143 list_add_tail(&inst->list, &accel_dev->compression_list); 144 inst->id = i; 145 atomic_set(&inst->refctr, 0); 146 inst->accel_dev = accel_dev; 147 148 snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_BANK_NUM, i); 149 ret = adf_cfg_get_param_value(accel_dev, SEC, key, val); 150 if (ret) 151 return ret; 152 153 ret = kstrtoul(val, 10, &bank); 154 if (ret) 155 return ret; 156 157 snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_SIZE, i); 158 ret = adf_cfg_get_param_value(accel_dev, SEC, key, val); 159 if (ret) 160 return ret; 161 162 ret = kstrtoul(val, 10, &num_msg_dc); 163 if (ret) 164 return ret; 165 166 msg_size = ICP_QAT_FW_REQ_DEFAULT_SZ; 167 snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_TX, i); 168 ret = adf_create_ring(accel_dev, SEC, bank, num_msg_dc, 169 msg_size, key, NULL, 0, &inst->dc_tx); 170 if (ret) 171 return ret; 172 173 msg_size = ICP_QAT_FW_RESP_DEFAULT_SZ; 174 snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_RX, i); 175 ret = adf_create_ring(accel_dev, SEC, bank, num_msg_dc, 176 msg_size, key, qat_comp_alg_callback, 0, 177 &inst->dc_rx); 178 if (ret) 179 return ret; 180 181 inst->dc_data = accel_dev->dc_data; 182 INIT_LIST_HEAD(&inst->backlog.list); 183 spin_lock_init(&inst->backlog.lock); 184 } 185 return 0; 186 err: 187 qat_compression_free_instances(accel_dev); 188 return ret; 189 } 190 191 static int qat_compression_alloc_dc_data(struct adf_accel_dev *accel_dev) 192 { 193 struct device *dev = &GET_DEV(accel_dev); 194 dma_addr_t obuff_p = DMA_MAPPING_ERROR; 195 size_t ovf_buff_sz = QAT_COMP_MAX_SKID; 196 struct adf_dc_data *dc_data = NULL; 197 u8 *obuff = NULL; 198 199 dc_data = kzalloc_node(sizeof(*dc_data), GFP_KERNEL, dev_to_node(dev)); 200 if (!dc_data) 201 goto err; 202 203 obuff = kzalloc_node(ovf_buff_sz, GFP_KERNEL, dev_to_node(dev)); 204 if (!obuff) 205 goto err; 206 207 obuff_p = dma_map_single(dev, obuff, ovf_buff_sz, DMA_BIDIRECTIONAL); 208 if (unlikely(dma_mapping_error(dev, obuff_p))) 209 goto err; 210 211 dc_data->ovf_buff = obuff; 212 dc_data->ovf_buff_p = obuff_p; 213 dc_data->ovf_buff_sz = ovf_buff_sz; 214 215 accel_dev->dc_data = dc_data; 216 217 return 0; 218 219 err: 220 accel_dev->dc_data = NULL; 221 kfree(obuff); 222 devm_kfree(dev, dc_data); 223 return -ENOMEM; 224 } 225 226 static void qat_free_dc_data(struct adf_accel_dev *accel_dev) 227 { 228 struct adf_dc_data *dc_data = accel_dev->dc_data; 229 struct device *dev = &GET_DEV(accel_dev); 230 231 if (!dc_data) 232 return; 233 234 dma_unmap_single(dev, dc_data->ovf_buff_p, dc_data->ovf_buff_sz, 235 DMA_BIDIRECTIONAL); 236 kfree_sensitive(dc_data->ovf_buff); 237 kfree(dc_data); 238 accel_dev->dc_data = NULL; 239 } 240 241 static int qat_compression_init(struct adf_accel_dev *accel_dev) 242 { 243 int ret; 244 245 ret = qat_compression_alloc_dc_data(accel_dev); 246 if (ret) 247 return ret; 248 249 ret = qat_compression_create_instances(accel_dev); 250 if (ret) 251 qat_free_dc_data(accel_dev); 252 253 return ret; 254 } 255 256 static int qat_compression_shutdown(struct adf_accel_dev *accel_dev) 257 { 258 qat_free_dc_data(accel_dev); 259 return qat_compression_free_instances(accel_dev); 260 } 261 262 static int qat_compression_event_handler(struct adf_accel_dev *accel_dev, 263 enum adf_event event) 264 { 265 int ret; 266 267 switch (event) { 268 case ADF_EVENT_INIT: 269 ret = qat_compression_init(accel_dev); 270 break; 271 case ADF_EVENT_SHUTDOWN: 272 ret = qat_compression_shutdown(accel_dev); 273 break; 274 case ADF_EVENT_RESTARTING: 275 case ADF_EVENT_RESTARTED: 276 case ADF_EVENT_START: 277 case ADF_EVENT_STOP: 278 default: 279 ret = 0; 280 } 281 return ret; 282 } 283 284 int qat_compression_register(void) 285 { 286 memset(&qat_compression, 0, sizeof(qat_compression)); 287 qat_compression.event_hld = qat_compression_event_handler; 288 qat_compression.name = "qat_compression"; 289 return adf_service_register(&qat_compression); 290 } 291 292 int qat_compression_unregister(void) 293 { 294 return adf_service_unregister(&qat_compression); 295 } 296