1 /* 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <rdma/ib_umem_odp.h> 34 #include "mlx5_ib.h" 35 36 /* 37 * Fill in a physical address list. ib_umem_num_dma_blocks() entries will be 38 * filled in the pas array. 39 */ 40 void mlx5_ib_populate_pas(struct ib_umem *umem, size_t page_size, __be64 *pas, 41 u64 access_flags) 42 { 43 struct ib_block_iter biter; 44 45 rdma_umem_for_each_dma_block (umem, &biter, page_size) { 46 *pas = cpu_to_be64(rdma_block_iter_dma_address(&biter) | 47 access_flags); 48 pas++; 49 } 50 } 51 52 /* 53 * Compute the page shift and page_offset for mailboxes that use a quantized 54 * page_offset. The granulatity of the page offset scales according to page 55 * size. 56 */ 57 unsigned long __mlx5_umem_find_best_quantized_pgoff( 58 struct ib_umem *umem, unsigned long pgsz_bitmap, 59 unsigned int page_offset_bits, u64 pgoff_bitmask, unsigned int scale, 60 unsigned int *page_offset_quantized) 61 { 62 const u64 page_offset_mask = (1UL << page_offset_bits) - 1; 63 unsigned long page_size; 64 u64 page_offset; 65 66 page_size = ib_umem_find_best_pgoff(umem, pgsz_bitmap, pgoff_bitmask); 67 if (!page_size) 68 return 0; 69 70 /* 71 * page size is the largest possible page size. 72 * 73 * Reduce the page_size, and thus the page_offset and quanta, until the 74 * page_offset fits into the mailbox field. Once page_size < scale this 75 * loop is guaranteed to terminate. 76 */ 77 page_offset = ib_umem_dma_offset(umem, page_size); 78 while (page_offset & ~(u64)(page_offset_mask * (page_size / scale))) { 79 page_size /= 2; 80 page_offset = ib_umem_dma_offset(umem, page_size); 81 } 82 83 /* 84 * The address is not aligned, or otherwise cannot be represented by the 85 * page_offset. 86 */ 87 if (!(pgsz_bitmap & page_size)) 88 return 0; 89 90 *page_offset_quantized = 91 (unsigned long)page_offset / (page_size / scale); 92 if (WARN_ON(*page_offset_quantized > page_offset_mask)) 93 return 0; 94 return page_size; 95 } 96