xref: /freebsd/sys/dev/mlx5/mlx5_ib/mlx5_ib_mem.c (revision 3806950135d2c8633ec0764e8807eacc87cf3e10)
1 /*-
2  * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  * $FreeBSD$
26  */
27 
28 #include <linux/module.h>
29 #include <rdma/ib_umem.h>
30 #include "mlx5_ib.h"
31 
32 CTASSERT(sizeof(uintptr_t) == sizeof(unsigned long));
33 
34 /* @umem: umem object to scan
35  * @addr: ib virtual address requested by the user
36  * @count: number of PAGE_SIZE pages covered by umem
37  * @shift: page shift for the compound pages found in the region
38  * @ncont: number of compund pages
39  * @order: log2 of the number of compound pages
40  */
41 
42 void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
43 			int *ncont, int *order)
44 {
45 	unsigned long tmp;
46 	unsigned long m;
47 	int i, k;
48 	u64 base = 0;
49 	int p = 0;
50 	int skip;
51 	int mask;
52 	u64 len;
53 	u64 pfn;
54 	struct scatterlist *sg;
55 	int entry;
56 	unsigned long page_shift = ilog2(umem->page_size);
57 
58 	addr = addr >> page_shift;
59 	tmp = (uintptr_t)addr;
60 	m = find_first_bit(&tmp, 8 * sizeof(tmp));
61 	skip = 1 << m;
62 	mask = skip - 1;
63 	i = 0;
64 
65 	for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
66 		len = sg_dma_len(sg) >> page_shift;
67 		pfn = sg_dma_address(sg) >> page_shift;
68 		for (k = 0; k < len; k++) {
69 			if (!(i & mask)) {
70 				tmp = (uintptr_t)pfn;
71 				m = min_t(unsigned long, m,
72 					  find_first_bit(&tmp, 8 * sizeof(tmp)));
73 				skip = 1 << m;
74 				mask = skip - 1;
75 				base = pfn;
76 				p = 0;
77 			} else {
78 				if (base + p != pfn) {
79 					tmp = (uintptr_t)p;
80 					m = find_first_bit(&tmp, 8 * sizeof(tmp));
81 					skip = 1 << m;
82 					mask = skip - 1;
83 					base = pfn;
84 					p = 0;
85 				}
86 			}
87 			p++;
88 			i++;
89 		}
90 	}
91 
92 	if (i) {
93 		m = min_t(unsigned long, ilog2(roundup_pow_of_two(i)), m);
94 
95 		if (order)
96 			*order = ilog2(roundup_pow_of_two(i) >> m);
97 
98 		*ncont = DIV_ROUND_UP(i, (1 << m));
99 	} else {
100 		m  = 0;
101 
102 		if (order)
103 			*order = 0;
104 
105 		*ncont = 0;
106 	}
107 	*shift = page_shift + m;
108 	*count = i;
109 }
110 
111 /*
112  * Populate the given array with bus addresses from the umem.
113  *
114  * dev - mlx5_ib device
115  * umem - umem to use to fill the pages
116  * page_shift - determines the page size used in the resulting array
117  * offset - offset into the umem to start from,
118  *          only implemented for ODP umems
119  * num_pages - total number of pages to fill
120  * pas - bus addresses array to fill
121  * access_flags - access flags to set on all present pages.
122 		  use enum mlx5_ib_mtt_access_flags for this.
123  */
124 static void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
125 			    int page_shift, size_t offset,
126 			    __be64 *pas, int access_flags)
127 {
128 	unsigned long umem_page_shift = ilog2(umem->page_size);
129 	int shift = page_shift - umem_page_shift;
130 	int mask = (1 << shift) - 1;
131 	int i, k;
132 	u64 cur = 0;
133 	u64 base;
134 	int len;
135 	struct scatterlist *sg;
136 	int entry;
137 
138 	i = 0;
139 	for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
140 		len = sg_dma_len(sg) >> umem_page_shift;
141 		base = sg_dma_address(sg);
142 		for (k = 0; k < len; k++) {
143 			if (!(i & mask)) {
144 				cur = base + (k << umem_page_shift);
145 				cur |= access_flags;
146 
147 				pas[i >> shift] = cpu_to_be64(cur);
148 				mlx5_ib_dbg(dev, "pas[%d] 0x%llx\n",
149 					    i >> shift, (unsigned long long)
150 					    be64_to_cpu(pas[i >> shift]));
151 			}  else
152 				mlx5_ib_dbg(dev, "=====> 0x%llx\n",
153 					    (unsigned long long)
154 					    (base + (k << umem_page_shift)));
155 			i++;
156 		}
157 	}
158 }
159 
160 void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
161 			  int page_shift, __be64 *pas, int access_flags)
162 {
163 	return __mlx5_ib_populate_pas(dev, umem, page_shift, 0,
164 				      pas,
165 				      access_flags);
166 }
167 
168 int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset)
169 {
170 	u64 page_size;
171 	u64 page_mask;
172 	u64 off_size;
173 	u64 off_mask;
174 	u64 buf_off;
175 
176 	page_size = (u64)1 << page_shift;
177 	page_mask = page_size - 1;
178 	buf_off = addr & page_mask;
179 	off_size = page_size >> 6;
180 	off_mask = off_size - 1;
181 
182 	if (buf_off & off_mask)
183 		return -EINVAL;
184 
185 	*offset = (u32)(buf_off >> ilog2(off_size));
186 	return 0;
187 }
188