1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 * 21 * Copyright (c) 2002-2006 Neterion, Inc. 22 */ 23 24 #include "xge-os-pal.h" 25 #include "xgehal-mm.h" 26 #include "xge-debug.h" 27 28 /* 29 * __hal_mempool_grow 30 * 31 * Will resize mempool up to %num_allocate value. 32 */ 33 xge_hal_status_e 34 __hal_mempool_grow(xge_hal_mempool_t *mempool, int num_allocate, 35 int *num_allocated) 36 { 37 int i, first_time = mempool->memblocks_allocated == 0 ? 1 : 0; 38 int n_items = mempool->items_per_memblock; 39 40 *num_allocated = 0; 41 42 if ((mempool->memblocks_allocated + num_allocate) > 43 mempool->memblocks_max) { 44 xge_debug_mm(XGE_ERR, "%s", 45 "__hal_mempool_grow: can grow anymore"); 46 return XGE_HAL_ERR_OUT_OF_MEMORY; 47 } 48 49 for (i = mempool->memblocks_allocated; 50 i < mempool->memblocks_allocated + num_allocate; i++) { 51 int j; 52 int is_last = 53 ((mempool->memblocks_allocated+num_allocate-1) == i); 54 xge_hal_mempool_dma_t *dma_object = 55 mempool->memblocks_dma_arr + i; 56 void *the_memblock; 57 int dma_flags; 58 59 dma_flags = XGE_OS_DMA_CACHELINE_ALIGNED; 60 #ifdef XGE_HAL_DMA_DTR_CONSISTENT 61 dma_flags |= XGE_OS_DMA_CONSISTENT; 62 #else 63 dma_flags |= XGE_OS_DMA_STREAMING; 64 #endif 65 66 /* allocate DMA-capable memblock */ 67 mempool->memblocks_arr[i] = xge_os_dma_malloc(mempool->pdev, 68 mempool->memblock_size, 69 dma_flags, 70 &dma_object->handle, 71 &dma_object->acc_handle); 72 if (mempool->memblocks_arr[i] == NULL) { 73 xge_debug_mm(XGE_ERR, 74 "memblock[%d]: out of DMA memory", i); 75 return XGE_HAL_ERR_OUT_OF_MEMORY; 76 } 77 xge_os_memzero(mempool->memblocks_arr[i], 78 mempool->memblock_size); 79 the_memblock = mempool->memblocks_arr[i]; 80 81 /* allocate memblock's private part. Each DMA memblock 82 * has a space allocated for item's private usage upon 83 * mempool's user request. Each time mempool grows, it will 84 * allocate new memblock and its private part at once. 85 * This helps to minimize memory usage a lot. */ 86 mempool->memblocks_priv_arr[i] = xge_os_malloc(mempool->pdev, 87 mempool->items_priv_size * n_items); 88 if (mempool->memblocks_priv_arr[i] == NULL) { 89 xge_os_dma_free(mempool->pdev, 90 the_memblock, 91 mempool->memblock_size, 92 &dma_object->acc_handle, 93 &dma_object->handle); 94 xge_debug_mm(XGE_ERR, 95 "memblock_priv[%d]: out of virtual memory, " 96 "requested %d(%d:%d) bytes", i, 97 mempool->items_priv_size * n_items, 98 mempool->items_priv_size, n_items); 99 return XGE_HAL_ERR_OUT_OF_MEMORY; 100 } 101 xge_os_memzero(mempool->memblocks_priv_arr[i], 102 mempool->items_priv_size * n_items); 103 104 /* map memblock to physical memory */ 105 dma_object->addr = xge_os_dma_map(mempool->pdev, 106 dma_object->handle, 107 the_memblock, 108 mempool->memblock_size, 109 XGE_OS_DMA_DIR_BIDIRECTIONAL, 110 #ifdef XGE_HAL_DMA_DTR_CONSISTENT 111 XGE_OS_DMA_CONSISTENT 112 #else 113 XGE_OS_DMA_STREAMING 114 #endif 115 ); 116 if (dma_object->addr == XGE_OS_INVALID_DMA_ADDR) { 117 xge_os_free(mempool->pdev, mempool->memblocks_priv_arr[i], 118 mempool->items_priv_size * 119 n_items); 120 xge_os_dma_free(mempool->pdev, 121 the_memblock, 122 mempool->memblock_size, 123 &dma_object->acc_handle, 124 &dma_object->handle); 125 return XGE_HAL_ERR_OUT_OF_MAPPING; 126 } 127 128 /* fill the items hash array */ 129 for (j=0; j<n_items; j++) { 130 int index = i*n_items + j; 131 132 if (first_time && index >= mempool->items_initial) { 133 break; 134 } 135 136 mempool->items_arr[index] = 137 ((char *)the_memblock + j*mempool->item_size); 138 139 /* let caller to do more job on each item */ 140 if (mempool->item_func_alloc != NULL) { 141 xge_hal_status_e status; 142 143 if ((status = mempool->item_func_alloc( 144 mempool, 145 the_memblock, 146 i, 147 dma_object, 148 mempool->items_arr[index], 149 index, 150 is_last, 151 mempool->userdata)) != XGE_HAL_OK) { 152 153 if (mempool->item_func_free != NULL) { 154 int k; 155 156 for (k=0; k<j; k++) { 157 158 index =i*n_items + k; 159 160 (void)mempool->item_func_free( 161 mempool, the_memblock, 162 i, dma_object, 163 mempool->items_arr[index], 164 index, is_last, 165 mempool->userdata); 166 } 167 } 168 169 xge_os_free(mempool->pdev, 170 mempool->memblocks_priv_arr[i], 171 mempool->items_priv_size * 172 n_items); 173 xge_os_dma_unmap(mempool->pdev, 174 dma_object->handle, 175 dma_object->addr, 176 mempool->memblock_size, 177 XGE_OS_DMA_DIR_BIDIRECTIONAL); 178 xge_os_dma_free(mempool->pdev, 179 the_memblock, 180 mempool->memblock_size, 181 &dma_object->acc_handle, 182 &dma_object->handle); 183 return status; 184 } 185 } 186 187 mempool->items_current = index + 1; 188 } 189 190 xge_debug_mm(XGE_TRACE, 191 "memblock%d: allocated %dk, vaddr 0x"XGE_OS_LLXFMT", " 192 "dma_addr 0x"XGE_OS_LLXFMT, i, mempool->memblock_size / 1024, 193 (unsigned long long)(ulong_t)mempool->memblocks_arr[i], 194 (unsigned long long)dma_object->addr); 195 196 (*num_allocated)++; 197 198 if (first_time && mempool->items_current == 199 mempool->items_initial) { 200 break; 201 } 202 } 203 204 /* increment actual number of allocated memblocks */ 205 mempool->memblocks_allocated += *num_allocated; 206 207 return XGE_HAL_OK; 208 } 209 210 /* 211 * xge_hal_mempool_create 212 * @memblock_size: 213 * @items_initial: 214 * @items_max: 215 * @item_size: 216 * @item_func: 217 * 218 * This function will create memory pool object. Pool may grow but will 219 * never shrink. Pool consists of number of dynamically allocated blocks 220 * with size enough to hold %items_initial number of items. Memory is 221 * DMA-able but client must map/unmap before interoperating with the device. 222 * See also: xge_os_dma_map(), xge_hal_dma_unmap(), xge_hal_status_e{}. 223 */ 224 xge_hal_mempool_t* 225 __hal_mempool_create(pci_dev_h pdev, int memblock_size, int item_size, 226 int items_priv_size, int items_initial, int items_max, 227 xge_hal_mempool_item_f item_func_alloc, 228 xge_hal_mempool_item_f item_func_free, void *userdata) 229 { 230 xge_hal_status_e status; 231 int memblocks_to_allocate; 232 xge_hal_mempool_t *mempool; 233 int allocated; 234 235 if (memblock_size < item_size) { 236 xge_debug_mm(XGE_ERR, 237 "memblock_size %d < item_size %d: misconfiguration", 238 memblock_size, item_size); 239 return NULL; 240 } 241 242 mempool = (xge_hal_mempool_t *) \ 243 xge_os_malloc(pdev, sizeof(xge_hal_mempool_t)); 244 if (mempool == NULL) { 245 xge_debug_mm(XGE_ERR, "mempool allocation failure"); 246 return NULL; 247 } 248 xge_os_memzero(mempool, sizeof(xge_hal_mempool_t)); 249 250 mempool->pdev = pdev; 251 mempool->memblock_size = memblock_size; 252 mempool->items_max = items_max; 253 mempool->items_initial = items_initial; 254 mempool->item_size = item_size; 255 mempool->items_priv_size = items_priv_size; 256 mempool->item_func_alloc = item_func_alloc; 257 mempool->item_func_free = item_func_free; 258 mempool->userdata = userdata; 259 260 mempool->memblocks_allocated = 0; 261 262 mempool->items_per_memblock = memblock_size / item_size; 263 264 mempool->memblocks_max = (items_max + mempool->items_per_memblock - 1) / 265 mempool->items_per_memblock; 266 267 /* allocate array of memblocks */ 268 mempool->memblocks_arr = (void ** ) xge_os_malloc(mempool->pdev, 269 sizeof(void*) * mempool->memblocks_max); 270 if (mempool->memblocks_arr == NULL) { 271 xge_debug_mm(XGE_ERR, "memblocks_arr allocation failure"); 272 __hal_mempool_destroy(mempool); 273 return NULL; 274 } 275 xge_os_memzero(mempool->memblocks_arr, 276 sizeof(void*) * mempool->memblocks_max); 277 278 /* allocate array of private parts of items per memblocks */ 279 mempool->memblocks_priv_arr = (void **) xge_os_malloc(mempool->pdev, 280 sizeof(void*) * mempool->memblocks_max); 281 if (mempool->memblocks_priv_arr == NULL) { 282 xge_debug_mm(XGE_ERR, "memblocks_priv_arr allocation failure"); 283 __hal_mempool_destroy(mempool); 284 return NULL; 285 } 286 xge_os_memzero(mempool->memblocks_priv_arr, 287 sizeof(void*) * mempool->memblocks_max); 288 289 /* allocate array of memblocks DMA objects */ 290 mempool->memblocks_dma_arr = 291 (xge_hal_mempool_dma_t *) xge_os_malloc(mempool->pdev, 292 sizeof(xge_hal_mempool_dma_t) * mempool->memblocks_max); 293 294 if (mempool->memblocks_dma_arr == NULL) { 295 xge_debug_mm(XGE_ERR, "memblocks_dma_arr allocation failure"); 296 __hal_mempool_destroy(mempool); 297 return NULL; 298 } 299 xge_os_memzero(mempool->memblocks_dma_arr, 300 sizeof(xge_hal_mempool_dma_t) * mempool->memblocks_max); 301 302 /* allocate hash array of items */ 303 mempool->items_arr = (void **) xge_os_malloc(mempool->pdev, 304 sizeof(void*) * mempool->items_max); 305 if (mempool->items_arr == NULL) { 306 xge_debug_mm(XGE_ERR, "items_arr allocation failure"); 307 __hal_mempool_destroy(mempool); 308 return NULL; 309 } 310 xge_os_memzero(mempool->items_arr, sizeof(void *) * mempool->items_max); 311 312 mempool->shadow_items_arr = (void **) xge_os_malloc(mempool->pdev, 313 sizeof(void*) * mempool->items_max); 314 if (mempool->shadow_items_arr == NULL) { 315 xge_debug_mm(XGE_ERR, "shadow_items_arr allocation failure"); 316 __hal_mempool_destroy(mempool); 317 return NULL; 318 } 319 xge_os_memzero(mempool->shadow_items_arr, 320 sizeof(void *) * mempool->items_max); 321 322 /* calculate initial number of memblocks */ 323 memblocks_to_allocate = (mempool->items_initial + 324 mempool->items_per_memblock - 1) / 325 mempool->items_per_memblock; 326 327 xge_debug_mm(XGE_TRACE, "allocating %d memblocks, " 328 "%d items per memblock", memblocks_to_allocate, 329 mempool->items_per_memblock); 330 331 /* pre-allocate the mempool */ 332 status = __hal_mempool_grow(mempool, memblocks_to_allocate, &allocated); 333 xge_os_memcpy(mempool->shadow_items_arr, mempool->items_arr, 334 sizeof(void*) * mempool->items_max); 335 if (status != XGE_HAL_OK) { 336 xge_debug_mm(XGE_ERR, "mempool_grow failure"); 337 __hal_mempool_destroy(mempool); 338 return NULL; 339 } 340 341 xge_debug_mm(XGE_TRACE, 342 "total: allocated %dk of DMA-capable memory", 343 mempool->memblock_size * allocated / 1024); 344 345 return mempool; 346 } 347 348 /* 349 * xge_hal_mempool_destroy 350 */ 351 void 352 __hal_mempool_destroy(xge_hal_mempool_t *mempool) 353 { 354 int i, j; 355 356 for (i=0; i<mempool->memblocks_allocated; i++) { 357 xge_hal_mempool_dma_t *dma_object; 358 359 xge_assert(mempool->memblocks_arr[i]); 360 xge_assert(mempool->memblocks_dma_arr + i); 361 362 dma_object = mempool->memblocks_dma_arr + i; 363 364 for (j=0; j<mempool->items_per_memblock; j++) { 365 int index = i*mempool->items_per_memblock + j; 366 367 /* to skip last partially filled(if any) memblock */ 368 if (index >= mempool->items_current) { 369 break; 370 } 371 372 /* let caller to do more job on each item */ 373 if (mempool->item_func_free != NULL) { 374 375 mempool->item_func_free(mempool, 376 mempool->memblocks_arr[i], 377 i, dma_object, 378 mempool->shadow_items_arr[index], 379 index, /* unused */ -1, 380 mempool->userdata); 381 } 382 } 383 384 xge_os_dma_unmap(mempool->pdev, 385 dma_object->handle, dma_object->addr, 386 mempool->memblock_size, XGE_OS_DMA_DIR_BIDIRECTIONAL); 387 388 xge_os_free(mempool->pdev, mempool->memblocks_priv_arr[i], 389 mempool->items_priv_size * mempool->items_per_memblock); 390 391 xge_os_dma_free(mempool->pdev, mempool->memblocks_arr[i], 392 mempool->memblock_size, &dma_object->acc_handle, 393 &dma_object->handle); 394 } 395 396 if (mempool->items_arr) { 397 xge_os_free(mempool->pdev, mempool->items_arr, sizeof(void*) * 398 mempool->items_max); 399 } 400 401 if (mempool->shadow_items_arr) { 402 xge_os_free(mempool->pdev, mempool->shadow_items_arr, 403 sizeof(void*) * mempool->items_max); 404 } 405 406 if (mempool->memblocks_dma_arr) { 407 xge_os_free(mempool->pdev, mempool->memblocks_dma_arr, 408 sizeof(xge_hal_mempool_dma_t) * 409 mempool->memblocks_max); 410 } 411 412 if (mempool->memblocks_priv_arr) { 413 xge_os_free(mempool->pdev, mempool->memblocks_priv_arr, 414 sizeof(void*) * mempool->memblocks_max); 415 } 416 417 if (mempool->memblocks_arr) { 418 xge_os_free(mempool->pdev, mempool->memblocks_arr, 419 sizeof(void*) * mempool->memblocks_max); 420 } 421 422 xge_os_free(mempool->pdev, mempool, sizeof(xge_hal_mempool_t)); 423 } 424 425 #ifdef XGEHAL_RNIC 426 427 /* 428 * __hal_allocate_dma_register 429 * 430 * Will allocate dmable memory for register. 431 */ 432 xge_hal_status_e 433 __hal_allocate_dma_register(pci_dev_h pdev, int size, 434 void **dma_register, xge_hal_mempool_dma_t *dma_object) 435 { 436 int dma_flags; 437 438 dma_flags = XGE_OS_DMA_CACHELINE_ALIGNED; 439 #ifdef XGE_HAL_DMA_DTR_CONSISTENT 440 dma_flags |= XGE_OS_DMA_CONSISTENT; 441 #else 442 dma_flags |= XGE_OS_DMA_STREAMING; 443 #endif 444 445 xge_os_memzero(dma_object, sizeof(xge_hal_mempool_dma_t)); 446 447 /* allocate DMA-capable memblock */ 448 *dma_register = xge_os_dma_malloc(pdev, 449 size, 450 dma_flags, 451 &dma_object->handle, 452 &dma_object->acc_handle); 453 if (*dma_register == NULL) { 454 xge_debug_mm(XGE_ERR, "dma_register: out of DMA memory"); 455 return XGE_HAL_ERR_OUT_OF_MEMORY; 456 } 457 458 xge_os_memzero(*dma_register, size); 459 460 /* map memblock to physical memory */ 461 dma_object->addr = xge_os_dma_map(pdev, 462 dma_object->handle, 463 *dma_register, 464 size, 465 XGE_OS_DMA_DIR_BIDIRECTIONAL, 466 #ifdef XGE_HAL_DMA_DTR_CONSISTENT 467 XGE_OS_DMA_CONSISTENT 468 #else 469 XGE_OS_DMA_STREAMING 470 #endif 471 ); 472 if (dma_object->addr == XGE_OS_INVALID_DMA_ADDR) { 473 xge_os_dma_free(pdev, 474 *dma_register, 475 size, 476 &dma_object->acc_handle, 477 &dma_object->handle); 478 return XGE_HAL_ERR_OUT_OF_MAPPING; 479 } 480 481 xge_debug_mm(XGE_TRACE, 482 "dmareg: allocated %dk, vaddr 0x"XGE_OS_LLXFMT", " 483 "dma_addr 0x"XGE_OS_LLXFMT, size / 1024, 484 (unsigned long long)(ulong_t)*dma_register, 485 (unsigned long long)dma_object->addr); 486 487 488 return XGE_HAL_OK; 489 } 490 491 /* 492 * __hal_free_dma_register 493 */ 494 void 495 __hal_free_dma_register(pci_dev_h pdev, int size, 496 void *dma_register, xge_hal_mempool_dma_t *dma_object) 497 498 { 499 500 xge_os_dma_unmap(pdev, 501 dma_object->handle, dma_object->addr, 502 size, XGE_OS_DMA_DIR_BIDIRECTIONAL); 503 504 xge_os_dma_free(pdev, dma_register, size, 505 &dma_object->acc_handle, &dma_object->handle); 506 507 } 508 509 #endif 510