1 /* 2 * Copyright(c) 2017 - 2018 Intel Corporation. 3 * 4 * This file is provided under a dual BSD/GPLv2 license. When using or 5 * redistributing this file, you may do so under either license. 6 * 7 * GPL LICENSE SUMMARY 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of version 2 of the GNU General Public License as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * BSD LICENSE 19 * 20 * Redistribution and use in source and binary forms, with or without 21 * modification, are permitted provided that the following conditions 22 * are met: 23 * 24 * - Redistributions of source code must retain the above copyright 25 * notice, this list of conditions and the following disclaimer. 26 * - Redistributions in binary form must reproduce the above copyright 27 * notice, this list of conditions and the following disclaimer in 28 * the documentation and/or other materials provided with the 29 * distribution. 30 * - Neither the name of Intel Corporation nor the names of its 31 * contributors may be used to endorse or promote products derived 32 * from this software without specific prior written permission. 33 * 34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 45 * 46 */ 47 48 /* 49 * This file contains HFI1 support for VNIC SDMA functionality 50 */ 51 52 #include "sdma.h" 53 #include "vnic.h" 54 55 #define HFI1_VNIC_SDMA_Q_ACTIVE BIT(0) 56 #define HFI1_VNIC_SDMA_Q_DEFERRED BIT(1) 57 58 #define HFI1_VNIC_TXREQ_NAME_LEN 32 59 #define HFI1_VNIC_SDMA_DESC_WTRMRK 64 60 61 /* 62 * struct vnic_txreq - VNIC transmit descriptor 63 * @txreq: sdma transmit request 64 * @sdma: vnic sdma pointer 65 * @skb: skb to send 66 * @pad: pad buffer 67 * @plen: pad length 68 * @pbc_val: pbc value 69 */ 70 struct vnic_txreq { 71 struct sdma_txreq txreq; 72 struct hfi1_vnic_sdma *sdma; 73 74 struct sk_buff *skb; 75 unsigned char pad[HFI1_VNIC_MAX_PAD]; 76 u16 plen; 77 __le64 pbc_val; 78 }; 79 80 static void vnic_sdma_complete(struct sdma_txreq *txreq, 81 int status) 82 { 83 struct vnic_txreq *tx = container_of(txreq, struct vnic_txreq, txreq); 84 struct hfi1_vnic_sdma *vnic_sdma = tx->sdma; 85 86 sdma_txclean(vnic_sdma->dd, txreq); 87 dev_kfree_skb_any(tx->skb); 88 kmem_cache_free(vnic_sdma->dd->vnic.txreq_cache, tx); 89 } 90 91 static noinline int build_vnic_ulp_payload(struct sdma_engine *sde, 92 struct vnic_txreq *tx) 93 { 94 int i, ret = 0; 95 96 ret = sdma_txadd_kvaddr( 97 sde->dd, 98 &tx->txreq, 99 tx->skb->data, 100 skb_headlen(tx->skb)); 101 if (unlikely(ret)) 102 goto bail_txadd; 103 104 for (i = 0; i < skb_shinfo(tx->skb)->nr_frags; i++) { 105 struct skb_frag_struct *frag = &skb_shinfo(tx->skb)->frags[i]; 106 107 /* combine physically continuous fragments later? */ 108 ret = sdma_txadd_page(sde->dd, 109 &tx->txreq, 110 skb_frag_page(frag), 111 frag->page_offset, 112 skb_frag_size(frag)); 113 if (unlikely(ret)) 114 goto bail_txadd; 115 } 116 117 if (tx->plen) 118 ret = sdma_txadd_kvaddr(sde->dd, &tx->txreq, 119 tx->pad + HFI1_VNIC_MAX_PAD - tx->plen, 120 tx->plen); 121 122 bail_txadd: 123 return ret; 124 } 125 126 static int build_vnic_tx_desc(struct sdma_engine *sde, 127 struct vnic_txreq *tx, 128 u64 pbc) 129 { 130 int ret = 0; 131 u16 hdrbytes = 2 << 2; /* PBC */ 132 133 ret = sdma_txinit_ahg( 134 &tx->txreq, 135 0, 136 hdrbytes + tx->skb->len + tx->plen, 137 0, 138 0, 139 NULL, 140 0, 141 vnic_sdma_complete); 142 if (unlikely(ret)) 143 goto bail_txadd; 144 145 /* add pbc */ 146 tx->pbc_val = cpu_to_le64(pbc); 147 ret = sdma_txadd_kvaddr( 148 sde->dd, 149 &tx->txreq, 150 &tx->pbc_val, 151 hdrbytes); 152 if (unlikely(ret)) 153 goto bail_txadd; 154 155 /* add the ulp payload */ 156 ret = build_vnic_ulp_payload(sde, tx); 157 bail_txadd: 158 return ret; 159 } 160 161 /* setup the last plen bypes of pad */ 162 static inline void hfi1_vnic_update_pad(unsigned char *pad, u8 plen) 163 { 164 pad[HFI1_VNIC_MAX_PAD - 1] = plen - OPA_VNIC_ICRC_TAIL_LEN; 165 } 166 167 int hfi1_vnic_send_dma(struct hfi1_devdata *dd, u8 q_idx, 168 struct hfi1_vnic_vport_info *vinfo, 169 struct sk_buff *skb, u64 pbc, u8 plen) 170 { 171 struct hfi1_vnic_sdma *vnic_sdma = &vinfo->sdma[q_idx]; 172 struct sdma_engine *sde = vnic_sdma->sde; 173 struct vnic_txreq *tx; 174 int ret = -ECOMM; 175 176 if (unlikely(READ_ONCE(vnic_sdma->state) != HFI1_VNIC_SDMA_Q_ACTIVE)) 177 goto tx_err; 178 179 if (unlikely(!sde || !sdma_running(sde))) 180 goto tx_err; 181 182 tx = kmem_cache_alloc(dd->vnic.txreq_cache, GFP_ATOMIC); 183 if (unlikely(!tx)) { 184 ret = -ENOMEM; 185 goto tx_err; 186 } 187 188 tx->sdma = vnic_sdma; 189 tx->skb = skb; 190 hfi1_vnic_update_pad(tx->pad, plen); 191 tx->plen = plen; 192 ret = build_vnic_tx_desc(sde, tx, pbc); 193 if (unlikely(ret)) 194 goto free_desc; 195 196 ret = sdma_send_txreq(sde, iowait_get_ib_work(&vnic_sdma->wait), 197 &tx->txreq, vnic_sdma->pkts_sent); 198 /* When -ECOMM, sdma callback will be called with ABORT status */ 199 if (unlikely(ret && unlikely(ret != -ECOMM))) 200 goto free_desc; 201 202 if (!ret) { 203 vnic_sdma->pkts_sent = true; 204 iowait_starve_clear(vnic_sdma->pkts_sent, &vnic_sdma->wait); 205 } 206 return ret; 207 208 free_desc: 209 sdma_txclean(dd, &tx->txreq); 210 kmem_cache_free(dd->vnic.txreq_cache, tx); 211 tx_err: 212 if (ret != -EBUSY) 213 dev_kfree_skb_any(skb); 214 else 215 vnic_sdma->pkts_sent = false; 216 return ret; 217 } 218 219 /* 220 * hfi1_vnic_sdma_sleep - vnic sdma sleep function 221 * 222 * This function gets called from sdma_send_txreq() when there are not enough 223 * sdma descriptors available to send the packet. It adds Tx queue's wait 224 * structure to sdma engine's dmawait list to be woken up when descriptors 225 * become available. 226 */ 227 static int hfi1_vnic_sdma_sleep(struct sdma_engine *sde, 228 struct iowait_work *wait, 229 struct sdma_txreq *txreq, 230 uint seq, 231 bool pkts_sent) 232 { 233 struct hfi1_vnic_sdma *vnic_sdma = 234 container_of(wait->iow, struct hfi1_vnic_sdma, wait); 235 236 write_seqlock(&sde->waitlock); 237 if (sdma_progress(sde, seq, txreq)) { 238 write_sequnlock(&sde->waitlock); 239 return -EAGAIN; 240 } 241 242 vnic_sdma->state = HFI1_VNIC_SDMA_Q_DEFERRED; 243 if (list_empty(&vnic_sdma->wait.list)) { 244 iowait_get_priority(wait->iow); 245 iowait_queue(pkts_sent, wait->iow, &sde->dmawait); 246 } 247 write_sequnlock(&sde->waitlock); 248 return -EBUSY; 249 } 250 251 /* 252 * hfi1_vnic_sdma_wakeup - vnic sdma wakeup function 253 * 254 * This function gets called when SDMA descriptors becomes available and Tx 255 * queue's wait structure was previously added to sdma engine's dmawait list. 256 * It notifies the upper driver about Tx queue wakeup. 257 */ 258 static void hfi1_vnic_sdma_wakeup(struct iowait *wait, int reason) 259 { 260 struct hfi1_vnic_sdma *vnic_sdma = 261 container_of(wait, struct hfi1_vnic_sdma, wait); 262 struct hfi1_vnic_vport_info *vinfo = vnic_sdma->vinfo; 263 264 vnic_sdma->state = HFI1_VNIC_SDMA_Q_ACTIVE; 265 if (__netif_subqueue_stopped(vinfo->netdev, vnic_sdma->q_idx)) 266 netif_wake_subqueue(vinfo->netdev, vnic_sdma->q_idx); 267 }; 268 269 inline bool hfi1_vnic_sdma_write_avail(struct hfi1_vnic_vport_info *vinfo, 270 u8 q_idx) 271 { 272 struct hfi1_vnic_sdma *vnic_sdma = &vinfo->sdma[q_idx]; 273 274 return (READ_ONCE(vnic_sdma->state) == HFI1_VNIC_SDMA_Q_ACTIVE); 275 } 276 277 void hfi1_vnic_sdma_init(struct hfi1_vnic_vport_info *vinfo) 278 { 279 int i; 280 281 for (i = 0; i < vinfo->num_tx_q; i++) { 282 struct hfi1_vnic_sdma *vnic_sdma = &vinfo->sdma[i]; 283 284 iowait_init(&vnic_sdma->wait, 0, NULL, NULL, 285 hfi1_vnic_sdma_sleep, 286 hfi1_vnic_sdma_wakeup, NULL, NULL); 287 vnic_sdma->sde = &vinfo->dd->per_sdma[i]; 288 vnic_sdma->dd = vinfo->dd; 289 vnic_sdma->vinfo = vinfo; 290 vnic_sdma->q_idx = i; 291 vnic_sdma->state = HFI1_VNIC_SDMA_Q_ACTIVE; 292 293 /* Add a free descriptor watermark for wakeups */ 294 if (vnic_sdma->sde->descq_cnt > HFI1_VNIC_SDMA_DESC_WTRMRK) { 295 struct iowait_work *work; 296 297 INIT_LIST_HEAD(&vnic_sdma->stx.list); 298 vnic_sdma->stx.num_desc = HFI1_VNIC_SDMA_DESC_WTRMRK; 299 work = iowait_get_ib_work(&vnic_sdma->wait); 300 list_add_tail(&vnic_sdma->stx.list, &work->tx_head); 301 } 302 } 303 } 304 305 int hfi1_vnic_txreq_init(struct hfi1_devdata *dd) 306 { 307 char buf[HFI1_VNIC_TXREQ_NAME_LEN]; 308 309 snprintf(buf, sizeof(buf), "hfi1_%u_vnic_txreq_cache", dd->unit); 310 dd->vnic.txreq_cache = kmem_cache_create(buf, 311 sizeof(struct vnic_txreq), 312 0, SLAB_HWCACHE_ALIGN, 313 NULL); 314 if (!dd->vnic.txreq_cache) 315 return -ENOMEM; 316 return 0; 317 } 318 319 void hfi1_vnic_txreq_deinit(struct hfi1_devdata *dd) 320 { 321 kmem_cache_destroy(dd->vnic.txreq_cache); 322 dd->vnic.txreq_cache = NULL; 323 } 324