1 /** 2 * This file is part of the Chelsio T4/T5/T6 Ethernet driver for Linux. 3 * 4 * Copyright (C) 2011-2016 Chelsio Communications. All rights reserved. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation. 9 * 10 * Written and Maintained by: 11 * Manoj Malviya (manojmalviya@chelsio.com) 12 * Atul Gupta (atul.gupta@chelsio.com) 13 * Jitendra Lulla (jlulla@chelsio.com) 14 * Yeshaswi M R Gowda (yeshaswi@chelsio.com) 15 * Harsh Jain (harsh@chelsio.com) 16 */ 17 18 #include <linux/kernel.h> 19 #include <linux/module.h> 20 #include <linux/skbuff.h> 21 22 #include <crypto/aes.h> 23 #include <crypto/hash.h> 24 25 #include "t4_msg.h" 26 #include "chcr_core.h" 27 #include "cxgb4_uld.h" 28 29 static struct chcr_driver_data drv_data; 30 31 typedef int (*chcr_handler_func)(struct adapter *adap, unsigned char *input); 32 static int cpl_fw6_pld_handler(struct adapter *adap, unsigned char *input); 33 static void *chcr_uld_add(const struct cxgb4_lld_info *lld); 34 static int chcr_uld_state_change(void *handle, enum cxgb4_state state); 35 36 #ifdef CONFIG_CHELSIO_IPSEC_INLINE 37 static void update_netdev_features(void); 38 #endif /* CONFIG_CHELSIO_IPSEC_INLINE */ 39 40 static chcr_handler_func work_handlers[NUM_CPL_CMDS] = { 41 [CPL_FW6_PLD] = cpl_fw6_pld_handler, 42 #ifdef CONFIG_CHELSIO_TLS_DEVICE 43 [CPL_ACT_OPEN_RPL] = chcr_ktls_cpl_act_open_rpl, 44 [CPL_SET_TCB_RPL] = chcr_ktls_cpl_set_tcb_rpl, 45 #endif 46 }; 47 48 static struct cxgb4_uld_info chcr_uld_info = { 49 .name = DRV_MODULE_NAME, 50 .nrxq = MAX_ULD_QSETS, 51 /* Max ntxq will be derived from fw config file*/ 52 .rxq_size = 1024, 53 .add = chcr_uld_add, 54 .state_change = chcr_uld_state_change, 55 .rx_handler = chcr_uld_rx_handler, 56 #if defined(CONFIG_CHELSIO_IPSEC_INLINE) || defined(CONFIG_CHELSIO_TLS_DEVICE) 57 .tx_handler = chcr_uld_tx_handler, 58 #endif /* CONFIG_CHELSIO_IPSEC_INLINE || CONFIG_CHELSIO_TLS_DEVICE */ 59 }; 60 61 static void detach_work_fn(struct work_struct *work) 62 { 63 struct chcr_dev *dev; 64 65 dev = container_of(work, struct chcr_dev, detach_work.work); 66 67 if (atomic_read(&dev->inflight)) { 68 dev->wqretry--; 69 if (dev->wqretry) { 70 pr_debug("Request Inflight Count %d\n", 71 atomic_read(&dev->inflight)); 72 73 schedule_delayed_work(&dev->detach_work, WQ_DETACH_TM); 74 } else { 75 WARN(1, "CHCR:%d request Still Pending\n", 76 atomic_read(&dev->inflight)); 77 complete(&dev->detach_comp); 78 } 79 } else { 80 complete(&dev->detach_comp); 81 } 82 } 83 84 struct uld_ctx *assign_chcr_device(void) 85 { 86 struct uld_ctx *u_ctx = NULL; 87 88 /* 89 * When multiple devices are present in system select 90 * device in round-robin fashion for crypto operations 91 * Although One session must use the same device to 92 * maintain request-response ordering. 93 */ 94 mutex_lock(&drv_data.drv_mutex); 95 if (!list_empty(&drv_data.act_dev)) { 96 u_ctx = drv_data.last_dev; 97 if (list_is_last(&drv_data.last_dev->entry, &drv_data.act_dev)) 98 drv_data.last_dev = list_first_entry(&drv_data.act_dev, 99 struct uld_ctx, entry); 100 else 101 drv_data.last_dev = 102 list_next_entry(drv_data.last_dev, entry); 103 } 104 mutex_unlock(&drv_data.drv_mutex); 105 return u_ctx; 106 } 107 108 static void chcr_dev_add(struct uld_ctx *u_ctx) 109 { 110 struct chcr_dev *dev; 111 112 dev = &u_ctx->dev; 113 dev->state = CHCR_ATTACH; 114 atomic_set(&dev->inflight, 0); 115 mutex_lock(&drv_data.drv_mutex); 116 list_move(&u_ctx->entry, &drv_data.act_dev); 117 if (!drv_data.last_dev) 118 drv_data.last_dev = u_ctx; 119 mutex_unlock(&drv_data.drv_mutex); 120 } 121 122 static void chcr_dev_init(struct uld_ctx *u_ctx) 123 { 124 struct chcr_dev *dev; 125 126 dev = &u_ctx->dev; 127 spin_lock_init(&dev->lock_chcr_dev); 128 INIT_DELAYED_WORK(&dev->detach_work, detach_work_fn); 129 init_completion(&dev->detach_comp); 130 dev->state = CHCR_INIT; 131 dev->wqretry = WQ_RETRY; 132 atomic_inc(&drv_data.dev_count); 133 atomic_set(&dev->inflight, 0); 134 mutex_lock(&drv_data.drv_mutex); 135 list_add_tail(&u_ctx->entry, &drv_data.inact_dev); 136 mutex_unlock(&drv_data.drv_mutex); 137 } 138 139 static int chcr_dev_move(struct uld_ctx *u_ctx) 140 { 141 mutex_lock(&drv_data.drv_mutex); 142 if (drv_data.last_dev == u_ctx) { 143 if (list_is_last(&drv_data.last_dev->entry, &drv_data.act_dev)) 144 drv_data.last_dev = list_first_entry(&drv_data.act_dev, 145 struct uld_ctx, entry); 146 else 147 drv_data.last_dev = 148 list_next_entry(drv_data.last_dev, entry); 149 } 150 list_move(&u_ctx->entry, &drv_data.inact_dev); 151 if (list_empty(&drv_data.act_dev)) 152 drv_data.last_dev = NULL; 153 atomic_dec(&drv_data.dev_count); 154 mutex_unlock(&drv_data.drv_mutex); 155 156 return 0; 157 } 158 159 static int cpl_fw6_pld_handler(struct adapter *adap, 160 unsigned char *input) 161 { 162 struct crypto_async_request *req; 163 struct cpl_fw6_pld *fw6_pld; 164 u32 ack_err_status = 0; 165 int error_status = 0; 166 167 fw6_pld = (struct cpl_fw6_pld *)input; 168 req = (struct crypto_async_request *)(uintptr_t)be64_to_cpu( 169 fw6_pld->data[1]); 170 171 ack_err_status = 172 ntohl(*(__be32 *)((unsigned char *)&fw6_pld->data[0] + 4)); 173 if (CHK_MAC_ERR_BIT(ack_err_status) || CHK_PAD_ERR_BIT(ack_err_status)) 174 error_status = -EBADMSG; 175 /* call completion callback with failure status */ 176 if (req) { 177 error_status = chcr_handle_resp(req, input, error_status); 178 } else { 179 pr_err("Incorrect request address from the firmware\n"); 180 return -EFAULT; 181 } 182 if (error_status) 183 atomic_inc(&adap->chcr_stats.error); 184 185 return 0; 186 } 187 188 int chcr_send_wr(struct sk_buff *skb) 189 { 190 return cxgb4_crypto_send(skb->dev, skb); 191 } 192 193 static void *chcr_uld_add(const struct cxgb4_lld_info *lld) 194 { 195 struct uld_ctx *u_ctx; 196 197 /* Create the device and add it in the device list */ 198 if (!(lld->ulp_crypto & ULP_CRYPTO_LOOKASIDE)) 199 return ERR_PTR(-EOPNOTSUPP); 200 201 /* Create the device and add it in the device list */ 202 u_ctx = kzalloc(sizeof(*u_ctx), GFP_KERNEL); 203 if (!u_ctx) { 204 u_ctx = ERR_PTR(-ENOMEM); 205 goto out; 206 } 207 u_ctx->lldi = *lld; 208 chcr_dev_init(u_ctx); 209 210 #ifdef CONFIG_CHELSIO_TLS_DEVICE 211 if (lld->ulp_crypto & ULP_CRYPTO_KTLS_INLINE) 212 chcr_enable_ktls(padap(&u_ctx->dev)); 213 #endif 214 out: 215 return u_ctx; 216 } 217 218 int chcr_uld_rx_handler(void *handle, const __be64 *rsp, 219 const struct pkt_gl *pgl) 220 { 221 struct uld_ctx *u_ctx = (struct uld_ctx *)handle; 222 struct chcr_dev *dev = &u_ctx->dev; 223 struct adapter *adap = padap(dev); 224 const struct cpl_fw6_pld *rpl = (struct cpl_fw6_pld *)rsp; 225 226 if (!work_handlers[rpl->opcode]) { 227 pr_err("Unsupported opcode %d received\n", rpl->opcode); 228 return 0; 229 } 230 231 if (!pgl) 232 work_handlers[rpl->opcode](adap, (unsigned char *)&rsp[1]); 233 else 234 work_handlers[rpl->opcode](adap, pgl->va); 235 return 0; 236 } 237 238 #if defined(CONFIG_CHELSIO_IPSEC_INLINE) || defined(CONFIG_CHELSIO_TLS_DEVICE) 239 int chcr_uld_tx_handler(struct sk_buff *skb, struct net_device *dev) 240 { 241 /* In case if skb's decrypted bit is set, it's nic tls packet, else it's 242 * ipsec packet. 243 */ 244 #ifdef CONFIG_CHELSIO_TLS_DEVICE 245 if (skb->decrypted) 246 return chcr_ktls_xmit(skb, dev); 247 #endif 248 #ifdef CONFIG_CHELSIO_IPSEC_INLINE 249 return chcr_ipsec_xmit(skb, dev); 250 #endif 251 return 0; 252 } 253 #endif /* CONFIG_CHELSIO_IPSEC_INLINE || CONFIG_CHELSIO_TLS_DEVICE */ 254 255 static void chcr_detach_device(struct uld_ctx *u_ctx) 256 { 257 struct chcr_dev *dev = &u_ctx->dev; 258 259 if (dev->state == CHCR_DETACH) { 260 pr_debug("Detached Event received for already detach device\n"); 261 return; 262 } 263 dev->state = CHCR_DETACH; 264 if (atomic_read(&dev->inflight) != 0) { 265 schedule_delayed_work(&dev->detach_work, WQ_DETACH_TM); 266 wait_for_completion(&dev->detach_comp); 267 } 268 269 // Move u_ctx to inactive_dev list 270 chcr_dev_move(u_ctx); 271 } 272 273 static int chcr_uld_state_change(void *handle, enum cxgb4_state state) 274 { 275 struct uld_ctx *u_ctx = handle; 276 int ret = 0; 277 278 switch (state) { 279 case CXGB4_STATE_UP: 280 if (u_ctx->dev.state != CHCR_INIT) { 281 // ALready Initialised. 282 return 0; 283 } 284 chcr_dev_add(u_ctx); 285 ret = start_crypto(); 286 break; 287 288 case CXGB4_STATE_DETACH: 289 chcr_detach_device(u_ctx); 290 break; 291 292 case CXGB4_STATE_START_RECOVERY: 293 case CXGB4_STATE_DOWN: 294 default: 295 break; 296 } 297 return ret; 298 } 299 300 #ifdef CONFIG_CHELSIO_IPSEC_INLINE 301 static void update_netdev_features(void) 302 { 303 struct uld_ctx *u_ctx, *tmp; 304 305 mutex_lock(&drv_data.drv_mutex); 306 list_for_each_entry_safe(u_ctx, tmp, &drv_data.inact_dev, entry) { 307 if (u_ctx->lldi.crypto & ULP_CRYPTO_IPSEC_INLINE) 308 chcr_add_xfrmops(&u_ctx->lldi); 309 } 310 list_for_each_entry_safe(u_ctx, tmp, &drv_data.act_dev, entry) { 311 if (u_ctx->lldi.crypto & ULP_CRYPTO_IPSEC_INLINE) 312 chcr_add_xfrmops(&u_ctx->lldi); 313 } 314 mutex_unlock(&drv_data.drv_mutex); 315 } 316 #endif /* CONFIG_CHELSIO_IPSEC_INLINE */ 317 318 static int __init chcr_crypto_init(void) 319 { 320 INIT_LIST_HEAD(&drv_data.act_dev); 321 INIT_LIST_HEAD(&drv_data.inact_dev); 322 atomic_set(&drv_data.dev_count, 0); 323 mutex_init(&drv_data.drv_mutex); 324 drv_data.last_dev = NULL; 325 cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info); 326 327 #ifdef CONFIG_CHELSIO_IPSEC_INLINE 328 rtnl_lock(); 329 update_netdev_features(); 330 rtnl_unlock(); 331 #endif /* CONFIG_CHELSIO_IPSEC_INLINE */ 332 333 return 0; 334 } 335 336 static void __exit chcr_crypto_exit(void) 337 { 338 struct uld_ctx *u_ctx, *tmp; 339 struct adapter *adap; 340 341 stop_crypto(); 342 cxgb4_unregister_uld(CXGB4_ULD_CRYPTO); 343 /* Remove all devices from list */ 344 mutex_lock(&drv_data.drv_mutex); 345 list_for_each_entry_safe(u_ctx, tmp, &drv_data.act_dev, entry) { 346 adap = padap(&u_ctx->dev); 347 memset(&adap->chcr_stats, 0, sizeof(adap->chcr_stats)); 348 #ifdef CONFIG_CHELSIO_TLS_DEVICE 349 if (u_ctx->lldi.ulp_crypto & ULP_CRYPTO_KTLS_INLINE) 350 chcr_disable_ktls(adap); 351 #endif 352 list_del(&u_ctx->entry); 353 kfree(u_ctx); 354 } 355 list_for_each_entry_safe(u_ctx, tmp, &drv_data.inact_dev, entry) { 356 adap = padap(&u_ctx->dev); 357 memset(&adap->chcr_stats, 0, sizeof(adap->chcr_stats)); 358 #ifdef CONFIG_CHELSIO_TLS_DEVICE 359 if (u_ctx->lldi.ulp_crypto & ULP_CRYPTO_KTLS_INLINE) 360 chcr_disable_ktls(adap); 361 #endif 362 list_del(&u_ctx->entry); 363 kfree(u_ctx); 364 } 365 mutex_unlock(&drv_data.drv_mutex); 366 } 367 368 module_init(chcr_crypto_init); 369 module_exit(chcr_crypto_exit); 370 371 MODULE_DESCRIPTION("Crypto Co-processor for Chelsio Terminator cards."); 372 MODULE_LICENSE("GPL"); 373 MODULE_AUTHOR("Chelsio Communications"); 374 MODULE_VERSION(DRV_VERSION); 375