xref: /linux/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c (revision e58e871becec2d3b04ed91c0c16fe8deac9c9dfa)
1 /*
2  * Copyright (c) 2012-2016 VMware, Inc.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of EITHER the GNU General Public License
6  * version 2 as published by the Free Software Foundation or the BSD
7  * 2-Clause License. This program is distributed in the hope that it
8  * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
9  * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
10  * See the GNU General Public License version 2 for more details at
11  * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program available in the file COPYING in the main
15  * directory of this source tree.
16  *
17  * The BSD 2-Clause License
18  *
19  *     Redistribution and use in source and binary forms, with or
20  *     without modification, are permitted provided that the following
21  *     conditions are met:
22  *
23  *      - Redistributions of source code must retain the above
24  *        copyright notice, this list of conditions and the following
25  *        disclaimer.
26  *
27  *      - Redistributions in binary form must reproduce the above
28  *        copyright notice, this list of conditions and the following
29  *        disclaimer in the documentation and/or other materials
30  *        provided with the distribution.
31  *
32  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
33  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
34  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
36  * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
37  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
38  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
39  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
41  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
43  * OF THE POSSIBILITY OF SUCH DAMAGE.
44  */
45 
46 #include <linux/list.h>
47 #include <linux/slab.h>
48 
49 #include "pvrdma.h"
50 
51 /**
52  * pvrdma_get_dma_mr - get a DMA memory region
53  * @pd: protection domain
54  * @acc: access flags
55  *
56  * @return: ib_mr pointer on success, otherwise returns an errno.
57  */
58 struct ib_mr *pvrdma_get_dma_mr(struct ib_pd *pd, int acc)
59 {
60 	struct pvrdma_dev *dev = to_vdev(pd->device);
61 	struct pvrdma_user_mr *mr;
62 	union pvrdma_cmd_req req;
63 	union pvrdma_cmd_resp rsp;
64 	struct pvrdma_cmd_create_mr *cmd = &req.create_mr;
65 	struct pvrdma_cmd_create_mr_resp *resp = &rsp.create_mr_resp;
66 	int ret;
67 
68 	/* Support only LOCAL_WRITE flag for DMA MRs */
69 	if (acc & ~IB_ACCESS_LOCAL_WRITE) {
70 		dev_warn(&dev->pdev->dev,
71 			 "unsupported dma mr access flags %#x\n", acc);
72 		return ERR_PTR(-EOPNOTSUPP);
73 	}
74 
75 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
76 	if (!mr)
77 		return ERR_PTR(-ENOMEM);
78 
79 	memset(cmd, 0, sizeof(*cmd));
80 	cmd->hdr.cmd = PVRDMA_CMD_CREATE_MR;
81 	cmd->pd_handle = to_vpd(pd)->pd_handle;
82 	cmd->access_flags = acc;
83 	cmd->flags = PVRDMA_MR_FLAG_DMA;
84 
85 	ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_MR_RESP);
86 	if (ret < 0) {
87 		dev_warn(&dev->pdev->dev,
88 			 "could not get DMA mem region, error: %d\n", ret);
89 		kfree(mr);
90 		return ERR_PTR(ret);
91 	}
92 
93 	mr->mmr.mr_handle = resp->mr_handle;
94 	mr->ibmr.lkey = resp->lkey;
95 	mr->ibmr.rkey = resp->rkey;
96 
97 	return &mr->ibmr;
98 }
99 
100 /**
101  * pvrdma_reg_user_mr - register a userspace memory region
102  * @pd: protection domain
103  * @start: starting address
104  * @length: length of region
105  * @virt_addr: I/O virtual address
106  * @access_flags: access flags for memory region
107  * @udata: user data
108  *
109  * @return: ib_mr pointer on success, otherwise returns an errno.
110  */
111 struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
112 				 u64 virt_addr, int access_flags,
113 				 struct ib_udata *udata)
114 {
115 	struct pvrdma_dev *dev = to_vdev(pd->device);
116 	struct pvrdma_user_mr *mr = NULL;
117 	struct ib_umem *umem;
118 	union pvrdma_cmd_req req;
119 	union pvrdma_cmd_resp rsp;
120 	struct pvrdma_cmd_create_mr *cmd = &req.create_mr;
121 	struct pvrdma_cmd_create_mr_resp *resp = &rsp.create_mr_resp;
122 	int nchunks;
123 	int ret;
124 	int entry;
125 	struct scatterlist *sg;
126 
127 	if (length == 0 || length > dev->dsr->caps.max_mr_size) {
128 		dev_warn(&dev->pdev->dev, "invalid mem region length\n");
129 		return ERR_PTR(-EINVAL);
130 	}
131 
132 	umem = ib_umem_get(pd->uobject->context, start,
133 			   length, access_flags, 0);
134 	if (IS_ERR(umem)) {
135 		dev_warn(&dev->pdev->dev,
136 			 "could not get umem for mem region\n");
137 		return ERR_CAST(umem);
138 	}
139 
140 	nchunks = 0;
141 	for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry)
142 		nchunks += sg_dma_len(sg) >> PAGE_SHIFT;
143 
144 	if (nchunks < 0 || nchunks > PVRDMA_PAGE_DIR_MAX_PAGES) {
145 		dev_warn(&dev->pdev->dev, "overflow %d pages in mem region\n",
146 			 nchunks);
147 		ret = -EINVAL;
148 		goto err_umem;
149 	}
150 
151 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
152 	if (!mr) {
153 		ret = -ENOMEM;
154 		goto err_umem;
155 	}
156 
157 	mr->mmr.iova = virt_addr;
158 	mr->mmr.size = length;
159 	mr->umem = umem;
160 
161 	ret = pvrdma_page_dir_init(dev, &mr->pdir, nchunks, false);
162 	if (ret) {
163 		dev_warn(&dev->pdev->dev,
164 			 "could not allocate page directory\n");
165 		goto err_umem;
166 	}
167 
168 	ret = pvrdma_page_dir_insert_umem(&mr->pdir, mr->umem, 0);
169 	if (ret)
170 		goto err_pdir;
171 
172 	memset(cmd, 0, sizeof(*cmd));
173 	cmd->hdr.cmd = PVRDMA_CMD_CREATE_MR;
174 	cmd->start = start;
175 	cmd->length = length;
176 	cmd->pd_handle = to_vpd(pd)->pd_handle;
177 	cmd->access_flags = access_flags;
178 	cmd->nchunks = nchunks;
179 	cmd->pdir_dma = mr->pdir.dir_dma;
180 
181 	ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_MR_RESP);
182 	if (ret < 0) {
183 		dev_warn(&dev->pdev->dev,
184 			 "could not register mem region, error: %d\n", ret);
185 		goto err_pdir;
186 	}
187 
188 	mr->mmr.mr_handle = resp->mr_handle;
189 	mr->ibmr.lkey = resp->lkey;
190 	mr->ibmr.rkey = resp->rkey;
191 
192 	return &mr->ibmr;
193 
194 err_pdir:
195 	pvrdma_page_dir_cleanup(dev, &mr->pdir);
196 err_umem:
197 	ib_umem_release(umem);
198 	kfree(mr);
199 
200 	return ERR_PTR(ret);
201 }
202 
203 /**
204  * pvrdma_alloc_mr - allocate a memory region
205  * @pd: protection domain
206  * @mr_type: type of memory region
207  * @max_num_sg: maximum number of pages
208  *
209  * @return: ib_mr pointer on success, otherwise returns an errno.
210  */
211 struct ib_mr *pvrdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
212 			      u32 max_num_sg)
213 {
214 	struct pvrdma_dev *dev = to_vdev(pd->device);
215 	struct pvrdma_user_mr *mr;
216 	union pvrdma_cmd_req req;
217 	union pvrdma_cmd_resp rsp;
218 	struct pvrdma_cmd_create_mr *cmd = &req.create_mr;
219 	struct pvrdma_cmd_create_mr_resp *resp = &rsp.create_mr_resp;
220 	int size = max_num_sg * sizeof(u64);
221 	int ret;
222 
223 	if (mr_type != IB_MR_TYPE_MEM_REG ||
224 	    max_num_sg > PVRDMA_MAX_FAST_REG_PAGES)
225 		return ERR_PTR(-EINVAL);
226 
227 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
228 	if (!mr)
229 		return ERR_PTR(-ENOMEM);
230 
231 	mr->pages = kzalloc(size, GFP_KERNEL);
232 	if (!mr->pages) {
233 		ret = -ENOMEM;
234 		goto freemr;
235 	}
236 
237 	ret = pvrdma_page_dir_init(dev, &mr->pdir, max_num_sg, false);
238 	if (ret) {
239 		dev_warn(&dev->pdev->dev,
240 			 "failed to allocate page dir for mr\n");
241 		ret = -ENOMEM;
242 		goto freepages;
243 	}
244 
245 	memset(cmd, 0, sizeof(*cmd));
246 	cmd->hdr.cmd = PVRDMA_CMD_CREATE_MR;
247 	cmd->pd_handle = to_vpd(pd)->pd_handle;
248 	cmd->access_flags = 0;
249 	cmd->flags = PVRDMA_MR_FLAG_FRMR;
250 	cmd->nchunks = max_num_sg;
251 
252 	ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_MR_RESP);
253 	if (ret < 0) {
254 		dev_warn(&dev->pdev->dev,
255 			 "could not create FR mem region, error: %d\n", ret);
256 		goto freepdir;
257 	}
258 
259 	mr->max_pages = max_num_sg;
260 	mr->mmr.mr_handle = resp->mr_handle;
261 	mr->ibmr.lkey = resp->lkey;
262 	mr->ibmr.rkey = resp->rkey;
263 	mr->page_shift = PAGE_SHIFT;
264 	mr->umem = NULL;
265 
266 	return &mr->ibmr;
267 
268 freepdir:
269 	pvrdma_page_dir_cleanup(dev, &mr->pdir);
270 freepages:
271 	kfree(mr->pages);
272 freemr:
273 	kfree(mr);
274 	return ERR_PTR(ret);
275 }
276 
277 /**
278  * pvrdma_dereg_mr - deregister a memory region
279  * @ibmr: memory region
280  *
281  * @return: 0 on success.
282  */
283 int pvrdma_dereg_mr(struct ib_mr *ibmr)
284 {
285 	struct pvrdma_user_mr *mr = to_vmr(ibmr);
286 	struct pvrdma_dev *dev = to_vdev(ibmr->device);
287 	union pvrdma_cmd_req req;
288 	struct pvrdma_cmd_destroy_mr *cmd = &req.destroy_mr;
289 	int ret;
290 
291 	memset(cmd, 0, sizeof(*cmd));
292 	cmd->hdr.cmd = PVRDMA_CMD_DESTROY_MR;
293 	cmd->mr_handle = mr->mmr.mr_handle;
294 	ret = pvrdma_cmd_post(dev, &req, NULL, 0);
295 	if (ret < 0)
296 		dev_warn(&dev->pdev->dev,
297 			 "could not deregister mem region, error: %d\n", ret);
298 
299 	pvrdma_page_dir_cleanup(dev, &mr->pdir);
300 	if (mr->umem)
301 		ib_umem_release(mr->umem);
302 
303 	kfree(mr->pages);
304 	kfree(mr);
305 
306 	return 0;
307 }
308 
309 static int pvrdma_set_page(struct ib_mr *ibmr, u64 addr)
310 {
311 	struct pvrdma_user_mr *mr = to_vmr(ibmr);
312 
313 	if (mr->npages == mr->max_pages)
314 		return -ENOMEM;
315 
316 	mr->pages[mr->npages++] = addr;
317 	return 0;
318 }
319 
320 int pvrdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
321 		     unsigned int *sg_offset)
322 {
323 	struct pvrdma_user_mr *mr = to_vmr(ibmr);
324 	struct pvrdma_dev *dev = to_vdev(ibmr->device);
325 	int ret;
326 
327 	mr->npages = 0;
328 
329 	ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, pvrdma_set_page);
330 	if (ret < 0)
331 		dev_warn(&dev->pdev->dev, "could not map sg to pages\n");
332 
333 	return ret;
334 }
335