xref: /freebsd/sys/dev/mlx5/mlx5_ib/mlx5_ib_mem.c (revision 580744621f33383027108364dcadad718df46ffe)
1 /*-
2  * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  * $FreeBSD$
26  */
27 
28 #include <linux/module.h>
29 #include <rdma/ib_umem.h>
30 #include <rdma/ib_umem_odp.h>
31 #include "mlx5_ib.h"
32 
33 /* @umem: umem object to scan
34  * @addr: ib virtual address requested by the user
35  * @count: number of PAGE_SIZE pages covered by umem
36  * @shift: page shift for the compound pages found in the region
37  * @ncont: number of compund pages
38  * @order: log2 of the number of compound pages
39  */
40 void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
41 			int *ncont, int *order)
42 {
43 	unsigned long tmp;
44 	unsigned long m;
45 	int i, k;
46 	u64 base = 0;
47 	int p = 0;
48 	int skip;
49 	int mask;
50 	u64 len;
51 	u64 pfn;
52 	struct scatterlist *sg;
53 	int entry;
54 	unsigned long page_shift = ilog2(umem->page_size);
55 
56 	/* With ODP we must always match OS page size. */
57 	if (umem->odp_data) {
58 		*count = ib_umem_page_count(umem);
59 		*shift = PAGE_SHIFT;
60 		*ncont = *count;
61 		if (order)
62 			*order = ilog2(roundup_pow_of_two(*count));
63 
64 		return;
65 	}
66 
67 	addr = addr >> page_shift;
68 	tmp = (unsigned long)addr;
69 	m = find_first_bit(&tmp, BITS_PER_LONG);
70 	skip = 1 << m;
71 	mask = skip - 1;
72 	i = 0;
73 	for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
74 		len = sg_dma_len(sg) >> page_shift;
75 		pfn = sg_dma_address(sg) >> page_shift;
76 		for (k = 0; k < len; k++) {
77 			if (!(i & mask)) {
78 				tmp = (unsigned long)pfn;
79 				m = min_t(unsigned long, m, find_first_bit(&tmp, BITS_PER_LONG));
80 				skip = 1 << m;
81 				mask = skip - 1;
82 				base = pfn;
83 				p = 0;
84 			} else {
85 				if (base + p != pfn) {
86 					tmp = (unsigned long)p;
87 					m = find_first_bit(&tmp, BITS_PER_LONG);
88 					skip = 1 << m;
89 					mask = skip - 1;
90 					base = pfn;
91 					p = 0;
92 				}
93 			}
94 			p++;
95 			i++;
96 		}
97 	}
98 
99 	if (i) {
100 		m = min_t(unsigned long, ilog2(roundup_pow_of_two(i)), m);
101 
102 		if (order)
103 			*order = ilog2(roundup_pow_of_two(i) >> m);
104 
105 		*ncont = DIV_ROUND_UP(i, (1 << m));
106 	} else {
107 		m  = 0;
108 
109 		if (order)
110 			*order = 0;
111 
112 		*ncont = 0;
113 	}
114 	*shift = page_shift + m;
115 	*count = i;
116 }
117 
118 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
119 static u64 umem_dma_to_mtt(dma_addr_t umem_dma)
120 {
121 	u64 mtt_entry = umem_dma & ODP_DMA_ADDR_MASK;
122 
123 	if (umem_dma & ODP_READ_ALLOWED_BIT)
124 		mtt_entry |= MLX5_IB_MTT_READ;
125 	if (umem_dma & ODP_WRITE_ALLOWED_BIT)
126 		mtt_entry |= MLX5_IB_MTT_WRITE;
127 
128 	return mtt_entry;
129 }
130 #endif
131 
132 /*
133  * Populate the given array with bus addresses from the umem.
134  *
135  * dev - mlx5_ib device
136  * umem - umem to use to fill the pages
137  * page_shift - determines the page size used in the resulting array
138  * offset - offset into the umem to start from,
139  *          only implemented for ODP umems
140  * num_pages - total number of pages to fill
141  * pas - bus addresses array to fill
142  * access_flags - access flags to set on all present pages.
143 		  use enum mlx5_ib_mtt_access_flags for this.
144  */
145 void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
146 			    int page_shift, size_t offset, size_t num_pages,
147 			    __be64 *pas, int access_flags)
148 {
149 	unsigned long umem_page_shift = ilog2(umem->page_size);
150 	int shift = page_shift - umem_page_shift;
151 	int mask = (1 << shift) - 1;
152 	int i, k;
153 	u64 cur = 0;
154 	u64 base;
155 	int len;
156 	struct scatterlist *sg;
157 	int entry;
158 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
159 	const bool odp = umem->odp_data != NULL;
160 
161 	if (odp) {
162 		WARN_ON(shift != 0);
163 		WARN_ON(access_flags != (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE));
164 
165 		for (i = 0; i < num_pages; ++i) {
166 			dma_addr_t pa = umem->odp_data->dma_list[offset + i];
167 
168 			pas[i] = cpu_to_be64(umem_dma_to_mtt(pa));
169 		}
170 		return;
171 	}
172 #endif
173 
174 	i = 0;
175 	for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
176 		len = sg_dma_len(sg) >> umem_page_shift;
177 		base = sg_dma_address(sg);
178 		for (k = 0; k < len; k++) {
179 			if (!(i & mask)) {
180 				cur = base + (k << umem_page_shift);
181 				cur |= access_flags;
182 
183 				pas[i >> shift] = cpu_to_be64(cur);
184 				mlx5_ib_dbg(dev, "pas[%d] 0x%llx\n",
185 					    i >> shift, (long long)be64_to_cpu(pas[i >> shift]));
186 			}  else
187 				mlx5_ib_dbg(dev, "=====> 0x%llx\n",
188 					    (long long)(base + (k << umem_page_shift)));
189 			i++;
190 		}
191 	}
192 }
193 
194 void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
195 			  int page_shift, __be64 *pas, int access_flags)
196 {
197 	return __mlx5_ib_populate_pas(dev, umem, page_shift, 0,
198 				      ib_umem_num_pages(umem), pas,
199 				      access_flags);
200 }
201 int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset)
202 {
203 	u64 page_size;
204 	u64 page_mask;
205 	u64 off_size;
206 	u64 off_mask;
207 	u64 buf_off;
208 
209 	page_size = (u64)1 << page_shift;
210 	page_mask = page_size - 1;
211 	buf_off = addr & page_mask;
212 	off_size = page_size >> 6;
213 	off_mask = off_size - 1;
214 
215 	if (buf_off & off_mask)
216 		return -EINVAL;
217 
218 	*offset = buf_off >> ilog2(off_size);
219 	return 0;
220 }
221