xref: /titanic_44/usr/src/uts/common/io/fibre-channel/fca/oce/oce_buf.c (revision 3abb112f8485b33b6b9b52b340bede0a333c10bf)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /* Copyright © 2003-2011 Emulex. All rights reserved.  */
23 
24 /*
25  * Source file containing the implementation of Driver buffer management
26  * and related helper functions
27  */
28 #include <oce_impl.h>
29 
30 static ddi_dma_attr_t oce_dma_buf_attr = {
31 	DMA_ATTR_V0,		/* version number */
32 	0x0000000000000000ull,	/* low address */
33 	0xFFFFFFFFFFFFFFFFull,	/* high address */
34 	0x00000000FFFFFFFFull,	/* dma counter max */
35 	OCE_DMA_ALIGNMENT,	/* alignment */
36 	0x00000FFF,		/* burst sizes */
37 	0x00000001,		/* minimum transfer size */
38 	0x00000000FFFFFFFFull,	/* maximum transfer size */
39 	0xFFFFFFFFFFFFFFFFull,	/* maximum segment size */
40 	1,			/* scatter/gather list length */
41 	0x00000001,		/* granularity */
42 	0			/* DMA flags */
43 };
44 
45 static ddi_device_acc_attr_t oce_dma_buf_accattr = {
46 	DDI_DEVICE_ATTR_V0,
47 	DDI_NEVERSWAP_ACC,
48 	DDI_STRICTORDER_ACC,
49 };
50 
51 
52 /*
53  * function to allocate a dma buffer for mapping memory va-pa
54  *
55  * dev - software handle to device
56  * size - size of the memory to map
57  * flags - DDI_DMA_CONSISTENT/DDI_DMA_STREAMING
58  *
59  * return pointer to a oce_dma_buf_t structure handling the map
60  *      NULL => failure
61  */
62 oce_dma_buf_t *
oce_alloc_dma_buffer(struct oce_dev * dev,uint32_t size,ddi_dma_attr_t * dma_attr,uint32_t flags)63 oce_alloc_dma_buffer(struct oce_dev *dev,
64     uint32_t size, ddi_dma_attr_t *dma_attr, uint32_t flags)
65 {
66 	oce_dma_buf_t  *dbuf;
67 	ddi_dma_cookie_t cookie;
68 	uint32_t count;
69 	size_t actual_len;
70 	int ret = 0;
71 
72 	ASSERT(size > 0);
73 	/* if NULL use default */
74 	if (dma_attr == NULL) {
75 		dma_attr = &oce_dma_buf_attr;
76 	}
77 
78 	dbuf = kmem_zalloc(sizeof (oce_dma_buf_t), KM_NOSLEEP);
79 	if (dbuf == NULL) {
80 		return (NULL);
81 	}
82 
83 	/* allocate dma handle */
84 	ret = ddi_dma_alloc_handle(dev->dip, dma_attr,
85 	    DDI_DMA_DONTWAIT, NULL, &dbuf->dma_handle);
86 	if (ret != DDI_SUCCESS) {
87 		oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
88 		    "Failed to allocate DMA handle");
89 		goto handle_fail;
90 	}
91 	/* allocate the DMA-able memory */
92 	ret = ddi_dma_mem_alloc(dbuf->dma_handle, size, &oce_dma_buf_accattr,
93 	    flags, DDI_DMA_DONTWAIT, NULL, &dbuf->base,
94 	    &actual_len, &dbuf->acc_handle);
95 	if (ret != DDI_SUCCESS) {
96 		oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
97 		    "Failed to allocate DMA memory");
98 		goto alloc_fail;
99 	}
100 
101 	/* bind handle */
102 	ret = ddi_dma_addr_bind_handle(dbuf->dma_handle,
103 	    (struct as *)0, dbuf->base, actual_len,
104 	    DDI_DMA_RDWR | flags,
105 	    DDI_DMA_DONTWAIT, NULL, &cookie, &count);
106 	if (ret != DDI_DMA_MAPPED) {
107 		oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
108 		    "Failed to bind dma handle");
109 		goto bind_fail;
110 	}
111 	bzero(dbuf->base, actual_len);
112 	dbuf->addr = cookie.dmac_laddress;
113 	dbuf->size = actual_len;
114 	/* usable length */
115 	dbuf->len  = size;
116 	dbuf->num_pages = OCE_NUM_PAGES(size);
117 	return (dbuf);
118 
119 bind_fail:
120 	ddi_dma_mem_free(&dbuf->acc_handle);
121 alloc_fail:
122 	ddi_dma_free_handle(&dbuf->dma_handle);
123 handle_fail:
124 	kmem_free(dbuf, sizeof (oce_dma_buf_t));
125 	return (NULL);
126 } /* oce_dma_alloc_buffer */
127 
128 /*
129  * function to delete a dma buffer
130  *
131  * dev - software handle to device
132  * dbuf - dma obj  to delete
133  *
134  * return none
135  */
136 void
oce_free_dma_buffer(struct oce_dev * dev,oce_dma_buf_t * dbuf)137 oce_free_dma_buffer(struct oce_dev *dev, oce_dma_buf_t *dbuf)
138 {
139 	_NOTE(ARGUNUSED(dev));
140 
141 	if (dbuf == NULL) {
142 		return;
143 	}
144 	if (dbuf->dma_handle != NULL) {
145 		(void) ddi_dma_unbind_handle(dbuf->dma_handle);
146 	}
147 	if (dbuf->acc_handle != NULL) {
148 		ddi_dma_mem_free(&dbuf->acc_handle);
149 	}
150 	if (dbuf->dma_handle != NULL) {
151 		ddi_dma_free_handle(&dbuf->dma_handle);
152 	}
153 	kmem_free(dbuf, sizeof (oce_dma_buf_t));
154 } /* oce_free_dma_buffer */
155 
156 /*
157  * function to create a ring buffer
158  *
159  * dev - software handle to the device
160  * num_items - number of items in the ring
161  * item_size - size of an individual item in the ring
162  * flags - DDI_DMA_CONSISTENT/DDI_DMA_STREAMING for ring memory
163  *
164  * return pointer to a ring_buffer structure, NULL on failure
165  */
166 oce_ring_buffer_t *
create_ring_buffer(struct oce_dev * dev,uint32_t num_items,uint32_t item_size,uint32_t flags)167 create_ring_buffer(struct oce_dev *dev,
168     uint32_t num_items, uint32_t item_size, uint32_t flags)
169 {
170 	oce_ring_buffer_t *ring;
171 	uint32_t size;
172 
173 	/* allocate the ring buffer */
174 	ring = kmem_zalloc(sizeof (oce_ring_buffer_t), KM_NOSLEEP);
175 	if (ring == NULL) {
176 		return (NULL);
177 	}
178 
179 	/* get the dbuf defining the ring */
180 	size = num_items * item_size;
181 	ring->dbuf = oce_alloc_dma_buffer(dev, size, NULL, flags);
182 	if (ring->dbuf  == NULL) {
183 		oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
184 		    "Ring buffer allocation failed");
185 		goto dbuf_fail;
186 	}
187 
188 	/* fill the rest of the ring */
189 	ring->num_items = num_items;
190 	ring->item_size = item_size;
191 	ring->num_used  = 0;
192 	return (ring);
193 
194 dbuf_fail:
195 	kmem_free(ring, sizeof (oce_ring_buffer_t));
196 	return (NULL);
197 } /* create_ring_buffer */
198 
199 /*
200  * function to destroy a ring buffer
201  *
202  * dev - software handle to teh device
203  * ring - the ring buffer to delete
204  *
205  * return none
206  */
207 void
destroy_ring_buffer(struct oce_dev * dev,oce_ring_buffer_t * ring)208 destroy_ring_buffer(struct oce_dev *dev, oce_ring_buffer_t *ring)
209 {
210 	ASSERT(dev != NULL);
211 	ASSERT(ring !=  NULL);
212 
213 	/* free the dbuf associated with the ring */
214 	oce_free_dma_buffer(dev, ring->dbuf);
215 	ring->dbuf = NULL;
216 
217 	/* free the ring itself */
218 	kmem_free(ring, sizeof (oce_ring_buffer_t));
219 } /* destroy_ring_buffer */
220 
221 
222 /*
223  * function to enable the fma flags
224  * fm_caps - FM capability flags
225  *
226  * return none
227  */
228 
229 void
oce_set_dma_fma_flags(int fm_caps)230 oce_set_dma_fma_flags(int fm_caps)
231 {
232 	if (fm_caps == DDI_FM_NOT_CAPABLE) {
233 		return;
234 	}
235 
236 	oce_dma_buf_accattr.devacc_attr_access = DDI_DEFAULT_ACC;
237 
238 	if (DDI_FM_DMA_ERR_CAP(fm_caps)) {
239 		oce_dma_buf_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
240 
241 	} else {
242 		oce_dma_buf_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
243 
244 	}
245 } /* oce_set_dma_fma_flags */
246