1 /*- 2 * Copyright (c) 2013-2020, Mellanox Technologies, Ltd. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * $FreeBSD$ 26 */ 27 28 #include "opt_rss.h" 29 #include "opt_ratelimit.h" 30 31 #include <linux/module.h> 32 #include <rdma/ib_umem.h> 33 #include <rdma/ib_umem_odp.h> 34 #include <dev/mlx5/mlx5_ib/mlx5_ib.h> 35 36 /* @umem: umem object to scan 37 * @addr: ib virtual address requested by the user 38 * @max_page_shift: high limit for page_shift - 0 means no limit 39 * @count: number of PAGE_SIZE pages covered by umem 40 * @shift: page shift for the compound pages found in the region 41 * @ncont: number of compund pages 42 * @order: log2 of the number of compound pages 43 */ 44 void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, 45 unsigned long max_page_shift, 46 int *count, int *shift, 47 int *ncont, int *order) 48 { 49 unsigned long tmp; 50 unsigned long m; 51 u64 base = ~0, p = 0; 52 u64 len, pfn; 53 int i = 0; 54 struct scatterlist *sg; 55 int entry; 56 57 addr = addr >> PAGE_SHIFT; 58 tmp = (unsigned long)addr; 59 m = find_first_bit(&tmp, BITS_PER_LONG); 60 if (max_page_shift) 61 m = min_t(unsigned long, max_page_shift - PAGE_SHIFT, m); 62 63 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { 64 len = sg_dma_len(sg) >> PAGE_SHIFT; 65 pfn = sg_dma_address(sg) >> PAGE_SHIFT; 66 if (base + p != pfn) { 67 /* If either the offset or the new 68 * base are unaligned update m 69 */ 70 tmp = (unsigned long)(pfn | p); 71 if (!IS_ALIGNED(tmp, 1 << m)) 72 m = find_first_bit(&tmp, BITS_PER_LONG); 73 74 base = pfn; 75 p = 0; 76 } 77 78 p += len; 79 i += len; 80 } 81 82 if (i) { 83 m = min_t(unsigned long, ilog2(roundup_pow_of_two(i)), m); 84 85 if (order) 86 *order = ilog2(roundup_pow_of_two(i) >> m); 87 88 *ncont = DIV_ROUND_UP(i, (1 << m)); 89 } else { 90 m = 0; 91 92 if (order) 93 *order = 0; 94 95 *ncont = 0; 96 } 97 *shift = PAGE_SHIFT + m; 98 *count = i; 99 } 100 101 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 102 static u64 umem_dma_to_mtt(dma_addr_t umem_dma) 103 { 104 u64 mtt_entry = umem_dma & ODP_DMA_ADDR_MASK; 105 106 if (umem_dma & ODP_READ_ALLOWED_BIT) 107 mtt_entry |= MLX5_IB_MTT_READ; 108 if (umem_dma & ODP_WRITE_ALLOWED_BIT) 109 mtt_entry |= MLX5_IB_MTT_WRITE; 110 111 return mtt_entry; 112 } 113 #endif 114 115 /* 116 * Populate the given array with bus addresses from the umem. 117 * 118 * dev - mlx5_ib device 119 * umem - umem to use to fill the pages 120 * page_shift - determines the page size used in the resulting array 121 * offset - offset into the umem to start from, 122 * only implemented for ODP umems 123 * num_pages - total number of pages to fill 124 * pas - bus addresses array to fill 125 * access_flags - access flags to set on all present pages. 126 use enum mlx5_ib_mtt_access_flags for this. 127 */ 128 void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, 129 int page_shift, size_t offset, size_t num_pages, 130 __be64 *pas, int access_flags) 131 { 132 unsigned long umem_page_shift = ilog2(umem->page_size); 133 int shift = page_shift - umem_page_shift; 134 int mask = (1 << shift) - 1; 135 int i, k; 136 u64 cur = 0; 137 u64 base; 138 int len; 139 struct scatterlist *sg; 140 int entry; 141 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 142 const bool odp = umem->odp_data != NULL; 143 144 if (odp) { 145 WARN_ON(shift != 0); 146 WARN_ON(access_flags != (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE)); 147 148 for (i = 0; i < num_pages; ++i) { 149 dma_addr_t pa = umem->odp_data->dma_list[offset + i]; 150 151 pas[i] = cpu_to_be64(umem_dma_to_mtt(pa)); 152 } 153 return; 154 } 155 #endif 156 157 i = 0; 158 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { 159 len = sg_dma_len(sg) >> umem_page_shift; 160 base = sg_dma_address(sg); 161 for (k = 0; k < len; k++) { 162 if (!(i & mask)) { 163 cur = base + (k << umem_page_shift); 164 cur |= access_flags; 165 166 pas[i >> shift] = cpu_to_be64(cur); 167 mlx5_ib_dbg(dev, "pas[%d] 0x%llx\n", 168 i >> shift, (long long)be64_to_cpu(pas[i >> shift])); 169 } else 170 mlx5_ib_dbg(dev, "=====> 0x%llx\n", 171 (long long)(base + (k << umem_page_shift))); 172 i++; 173 } 174 } 175 } 176 177 void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, 178 int page_shift, __be64 *pas, int access_flags) 179 { 180 return __mlx5_ib_populate_pas(dev, umem, page_shift, 0, 181 ib_umem_num_pages(umem), pas, 182 access_flags); 183 } 184 int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset) 185 { 186 u64 page_size; 187 u64 page_mask; 188 u64 off_size; 189 u64 off_mask; 190 u64 buf_off; 191 192 page_size = (u64)1 << page_shift; 193 page_mask = page_size - 1; 194 buf_off = addr & page_mask; 195 off_size = page_size >> 6; 196 off_mask = off_size - 1; 197 198 if (buf_off & off_mask) 199 return -EINVAL; 200 201 *offset = buf_off >> ilog2(off_size); 202 return 0; 203 } 204