1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved. 24 */ 25 26 #include <sys/types.h> 27 #include <sys/kmem.h> 28 #include <sys/conf.h> 29 #include <sys/ddi.h> 30 #include <sys/sunddi.h> 31 #include <sys/ksynch.h> 32 33 #include <sys/ib/clients/eoib/eib_impl.h> 34 35 eib_chan_t * 36 eib_chan_init(void) 37 { 38 eib_chan_t *chan; 39 40 /* 41 * Allocate a eib_chan_t to store stuff about admin qp and 42 * initialize some basic stuff 43 */ 44 chan = kmem_zalloc(sizeof (eib_chan_t), KM_SLEEP); 45 46 mutex_init(&chan->ch_pkey_lock, NULL, MUTEX_DRIVER, NULL); 47 mutex_init(&chan->ch_cep_lock, NULL, MUTEX_DRIVER, NULL); 48 mutex_init(&chan->ch_tx_lock, NULL, MUTEX_DRIVER, NULL); 49 mutex_init(&chan->ch_rx_lock, NULL, MUTEX_DRIVER, NULL); 50 mutex_init(&chan->ch_vhub_lock, NULL, MUTEX_DRIVER, NULL); 51 52 cv_init(&chan->ch_cep_cv, NULL, CV_DEFAULT, NULL); 53 cv_init(&chan->ch_tx_cv, NULL, CV_DEFAULT, NULL); 54 cv_init(&chan->ch_rx_cv, NULL, CV_DEFAULT, NULL); 55 56 return (chan); 57 } 58 59 void 60 eib_chan_fini(eib_chan_t *chan) 61 { 62 if (chan) { 63 cv_destroy(&chan->ch_rx_cv); 64 cv_destroy(&chan->ch_tx_cv); 65 cv_destroy(&chan->ch_cep_cv); 66 67 mutex_destroy(&chan->ch_vhub_lock); 68 mutex_destroy(&chan->ch_rx_lock); 69 mutex_destroy(&chan->ch_tx_lock); 70 mutex_destroy(&chan->ch_cep_lock); 71 mutex_destroy(&chan->ch_pkey_lock); 72 73 kmem_free(chan, sizeof (eib_chan_t)); 74 } 75 } 76 77 int 78 eib_chan_post_rx(eib_t *ss, eib_chan_t *chan, uint_t *n_posted) 79 { 80 eib_wqe_t *rwqes[EIB_RWR_CHUNK_SZ]; 81 ibt_status_t ret; 82 uint_t n_got = 0; 83 uint_t n_good = 0; 84 uint_t limit = 0; 85 uint_t room = 0; 86 uint_t chunk_sz; 87 int wndx; 88 int i; 89 90 /* 91 * We don't want to post beyond the maximum rwqe size for this channel 92 */ 93 room = chan->ch_max_rwqes - chan->ch_rx_posted; 94 limit = (room > chan->ch_rwqe_bktsz) ? chan->ch_rwqe_bktsz : room; 95 96 for (wndx = 0; wndx < limit; wndx += chunk_sz) { 97 /* 98 * Grab a chunk of rwqes 99 */ 100 chunk_sz = ((limit - wndx) < EIB_RWR_CHUNK_SZ) ? 101 (limit - wndx) : EIB_RWR_CHUNK_SZ; 102 103 /* 104 * When eib_chan_post_rx() is called to post a bunch of rwqes, 105 * it is either during the vnic setup or when we're refilling 106 * the data channel. Neither situation is important enough for 107 * us to grab the wqes reserved for sending keepalives of 108 * previously established vnics. 109 */ 110 ret = eib_rsrc_grab_rwqes(ss, rwqes, chunk_sz, &n_got, 111 EIB_WPRI_LO); 112 if (ret != EIB_E_SUCCESS) 113 break; 114 115 /* 116 * Post work requests from the rwqes we just grabbed 117 */ 118 for (i = 0; i < n_got; i++) { 119 eib_wqe_t *rwqe = rwqes[i]; 120 121 ret = eib_chan_post_recv(ss, chan, rwqe); 122 if (ret == EIB_E_SUCCESS) { 123 n_good++; 124 } else if (rwqe->qe_mp) { 125 freemsg(rwqe->qe_mp); 126 } else { 127 eib_rsrc_return_rwqe(ss, rwqe, NULL); 128 } 129 } 130 131 /* 132 * If we got less rwqes than we asked for during the grab 133 * earlier, we'll stop asking for more and quit now. 134 */ 135 if (n_got < chunk_sz) 136 break; 137 } 138 139 /* 140 * If we posted absolutely nothing, we return failure; otherwise 141 * return success. 142 */ 143 if (n_good == 0) 144 return (EIB_E_FAILURE); 145 146 if (n_posted) 147 *n_posted = n_good; 148 149 return (EIB_E_SUCCESS); 150 } 151 152 /*ARGSUSED*/ 153 int 154 eib_chan_post_recv(eib_t *ss, eib_chan_t *chan, eib_wqe_t *rwqe) 155 { 156 ibt_status_t ret; 157 uint8_t *mp_base; 158 size_t mp_len; 159 160 rwqe->qe_sgl.ds_va = (ib_vaddr_t)(uintptr_t)rwqe->qe_cpbuf; 161 rwqe->qe_sgl.ds_len = rwqe->qe_bufsz; 162 163 /* 164 * If this channel has receive buffer alignment restrictions, make 165 * sure the requirements are met 166 */ 167 if (chan->ch_ip_hdr_align) { 168 rwqe->qe_sgl.ds_va += chan->ch_ip_hdr_align; 169 rwqe->qe_sgl.ds_len -= chan->ch_ip_hdr_align; 170 } 171 172 /* 173 * If the receive buffer for this channel needs to have an mblk 174 * allocated, do it 175 */ 176 if (chan->ch_alloc_mp) { 177 mp_base = (uint8_t *)(uintptr_t)(rwqe->qe_sgl.ds_va); 178 mp_len = rwqe->qe_sgl.ds_len; 179 180 rwqe->qe_mp = desballoc(mp_base, mp_len, 0, &rwqe->qe_frp); 181 if (rwqe->qe_mp == NULL) { 182 EIB_DPRINTF_ERR(ss->ei_instance, "eib_chan_post_recv: " 183 "desballoc(base=0x%llx, len=0x%llx) failed", 184 mp_base, mp_len); 185 return (EIB_E_FAILURE); 186 } 187 } 188 189 /* 190 * Check if the recv queue is already full or if we can post one more 191 */ 192 mutex_enter(&chan->ch_rx_lock); 193 if (chan->ch_rx_posted > (chan->ch_max_rwqes - 1)) { 194 EIB_DPRINTF_ERR(ss->ei_instance, "eib_chan_post_recv: " 195 "too many rwqes posted already, posted=0x%lx, max=0x%lx", 196 chan->ch_rx_posted, chan->ch_max_rwqes); 197 mutex_exit(&chan->ch_rx_lock); 198 return (EIB_E_FAILURE); 199 } 200 201 rwqe->qe_vnic_inst = chan->ch_vnic_inst; 202 rwqe->qe_chan = chan; 203 rwqe->qe_info |= EIB_WQE_FLG_POSTED_TO_HCA; 204 205 ret = ibt_post_recv(chan->ch_chan, &(rwqe->qe_wr.recv), 1, NULL); 206 if (ret != IBT_SUCCESS) { 207 EIB_DPRINTF_ERR(ss->ei_instance, "eib_chan_post_recv: " 208 "ibt_post_recv() failed, ret=%d", ret); 209 mutex_exit(&chan->ch_rx_lock); 210 return (EIB_E_FAILURE); 211 } 212 chan->ch_rx_posted++; 213 mutex_exit(&chan->ch_rx_lock); 214 215 return (EIB_E_SUCCESS); 216 } 217