1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Emulex. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* 28 * Source file containing the implementation of Driver buffer management 29 * and related helper functions 30 */ 31 #include <oce_impl.h> 32 33 static ddi_dma_attr_t oce_dma_buf_attr = { 34 DMA_ATTR_V0, /* version number */ 35 0x0000000000000000ull, /* low address */ 36 0xFFFFFFFFFFFFFFFFull, /* high address */ 37 0x00000000FFFFFFFFull, /* dma counter max */ 38 OCE_DMA_ALIGNMENT, /* alignment */ 39 0x00000FFF, /* burst sizes */ 40 0x00000001, /* minimum transfer size */ 41 0x00000000FFFFFFFFull, /* maximum transfer size */ 42 0xFFFFFFFFFFFFFFFFull, /* maximum segment size */ 43 1, /* scatter/gather list length */ 44 0x00000001, /* granularity */ 45 0 /* DMA flags */ 46 }; 47 48 static ddi_device_acc_attr_t oce_dma_buf_accattr = { 49 DDI_DEVICE_ATTR_V0, 50 DDI_NEVERSWAP_ACC, 51 DDI_STRICTORDER_ACC, 52 }; 53 54 55 /* 56 * function to allocate a dma buffer for mapping memory va-pa 57 * 58 * dev - software handle to device 59 * size - size of the memory to map 60 * flags - DDI_DMA_CONSISTENT/DDI_DMA_STREAMING 61 * 62 * return pointer to a oce_dma_buf_t structure handling the map 63 * NULL => failure 64 */ 65 oce_dma_buf_t * 66 oce_alloc_dma_buffer(struct oce_dev *dev, 67 uint32_t size, uint32_t flags) 68 { 69 oce_dma_buf_t *dbuf; 70 ddi_dma_cookie_t cookie; 71 uint32_t count; 72 size_t actual_len; 73 int ret = 0; 74 75 ASSERT(size > 0); 76 77 dbuf = kmem_zalloc(sizeof (oce_dma_buf_t), KM_SLEEP); 78 79 /* allocate dma handle */ 80 ret = ddi_dma_alloc_handle(dev->dip, &oce_dma_buf_attr, 81 DDI_DMA_SLEEP, NULL, &dbuf->dma_handle); 82 if (ret != DDI_SUCCESS) { 83 oce_log(dev, CE_WARN, MOD_CONFIG, "%s", 84 "Failed to allocate DMA handle"); 85 goto alloc_fail; 86 } 87 /* allocate the DMA-able memory */ 88 ret = ddi_dma_mem_alloc(dbuf->dma_handle, size, &oce_dma_buf_accattr, 89 flags, DDI_DMA_SLEEP, NULL, &dbuf->base, 90 &actual_len, &dbuf->acc_handle); 91 if (ret != DDI_SUCCESS) { 92 oce_log(dev, CE_WARN, MOD_CONFIG, "%s", 93 "Failed to allocate DMA memory"); 94 goto alloc_fail; 95 } 96 97 /* bind handle */ 98 ret = ddi_dma_addr_bind_handle(dbuf->dma_handle, 99 (struct as *)0, dbuf->base, actual_len, 100 DDI_DMA_RDWR | flags, 101 DDI_DMA_SLEEP, NULL, &cookie, &count); 102 if (ret != DDI_DMA_MAPPED) { 103 oce_log(dev, CE_WARN, MOD_CONFIG, "%s", 104 "Failed to bind dma handle"); 105 goto alloc_fail; 106 } 107 bzero(dbuf->base, actual_len); 108 dbuf->addr = cookie.dmac_laddress; 109 dbuf->size = actual_len; 110 /* usable length */ 111 dbuf->len = size; 112 dbuf->num_pages = OCE_NUM_PAGES(size); 113 return (dbuf); 114 alloc_fail: 115 oce_free_dma_buffer(dev, dbuf); 116 return (NULL); 117 } /* oce_dma_alloc_buffer */ 118 119 /* 120 * function to delete a dma buffer 121 * 122 * dev - software handle to device 123 * dbuf - dma obj to delete 124 * 125 * return none 126 */ 127 void 128 oce_free_dma_buffer(struct oce_dev *dev, oce_dma_buf_t *dbuf) 129 { 130 _NOTE(ARGUNUSED(dev)); 131 132 if (dbuf == NULL) { 133 return; 134 } 135 if (dbuf->dma_handle != NULL) { 136 (void) ddi_dma_unbind_handle(dbuf->dma_handle); 137 } 138 if (dbuf->acc_handle != NULL) { 139 ddi_dma_mem_free(&dbuf->acc_handle); 140 } 141 if (dbuf->dma_handle != NULL) { 142 ddi_dma_free_handle(&dbuf->dma_handle); 143 } 144 kmem_free(dbuf, sizeof (oce_dma_buf_t)); 145 } /* oce_free_dma_buffer */ 146 147 /* 148 * function to create a ring buffer 149 * 150 * dev - software handle to the device 151 * num_items - number of items in the ring 152 * item_size - size of an individual item in the ring 153 * flags - DDI_DMA_CONSISTENT/DDI_DMA_STREAMING for ring memory 154 * 155 * return pointer to a ring_buffer structure, NULL on failure 156 */ 157 oce_ring_buffer_t * 158 create_ring_buffer(struct oce_dev *dev, 159 uint32_t num_items, uint32_t item_size, uint32_t flags) 160 { 161 oce_ring_buffer_t *ring; 162 uint32_t size; 163 164 /* allocate the ring buffer */ 165 ring = kmem_zalloc(sizeof (oce_ring_buffer_t), KM_SLEEP); 166 167 /* get the dbuf defining the ring */ 168 size = num_items * item_size; 169 ring->dbuf = oce_alloc_dma_buffer(dev, size, flags); 170 if (ring->dbuf == NULL) { 171 oce_log(dev, CE_WARN, MOD_CONFIG, "%s", 172 "Ring buffer allocation failed"); 173 goto dbuf_fail; 174 } 175 176 /* fill the rest of the ring */ 177 ring->num_items = num_items; 178 ring->item_size = item_size; 179 ring->num_used = 0; 180 return (ring); 181 182 dbuf_fail: 183 kmem_free(ring, sizeof (oce_ring_buffer_t)); 184 return (NULL); 185 } /* create_ring_buffer */ 186 187 /* 188 * function to destroy a ring buffer 189 * 190 * dev - software handle to teh device 191 * ring - the ring buffer to delete 192 * 193 * return none 194 */ 195 void 196 destroy_ring_buffer(struct oce_dev *dev, oce_ring_buffer_t *ring) 197 { 198 ASSERT(dev != NULL); 199 ASSERT(ring != NULL); 200 201 /* free the dbuf associated with the ring */ 202 oce_free_dma_buffer(dev, ring->dbuf); 203 ring->dbuf = NULL; 204 205 /* free the ring itself */ 206 kmem_free(ring, sizeof (oce_ring_buffer_t)); 207 } /* destroy_ring_buffer */ 208 209 210 /* 211 * function to enable the fma flags 212 * fm_caps - FM capability flags 213 * 214 * return none 215 */ 216 217 void 218 oce_set_dma_fma_flags(int fm_caps) 219 { 220 if (fm_caps == DDI_FM_NOT_CAPABLE) { 221 return; 222 } 223 224 if (DDI_FM_ACC_ERR_CAP(fm_caps)) { 225 oce_dma_buf_accattr.devacc_attr_access = DDI_FLAGERR_ACC; 226 } else { 227 oce_dma_buf_accattr.devacc_attr_access = DDI_DEFAULT_ACC; 228 } 229 230 if (DDI_FM_DMA_ERR_CAP(fm_caps)) { 231 oce_dma_buf_attr.dma_attr_flags |= DDI_DMA_FLAGERR; 232 233 } else { 234 oce_dma_buf_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR; 235 236 } 237 } /* oce_set_dma_fma_flags */ 238