xref: /freebsd/sys/dev/cxgbe/iw_cxgbe/mem.c (revision 6574b8ed19b093f0af09501d2c9676c28993cb97)
1 /*
2  * Copyright (c) 2009-2013 Chelsio, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include "opt_inet.h"
36 
37 #ifdef TCP_OFFLOAD
38 #include <linux/types.h>
39 #include <linux/kref.h>
40 #include <rdma/ib_umem.h>
41 #include <asm/atomic.h>
42 
43 #include <common/t4_msg.h>
44 #include "iw_cxgbe.h"
45 
46 #define T4_ULPTX_MIN_IO 32
47 #define C4IW_MAX_INLINE_SIZE 96
48 
49 static int
50 write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data)
51 {
52 	struct adapter *sc = rdev->adap;
53 	struct ulp_mem_io *ulpmc;
54 	struct ulptx_idata *ulpsc;
55 	u8 wr_len, *to_dp, *from_dp;
56 	int copy_len, num_wqe, i, ret = 0;
57 	struct c4iw_wr_wait wr_wait;
58 	struct wrqe *wr;
59 	u32 cmd;
60 
61 	cmd = cpu_to_be32(V_ULPTX_CMD(ULP_TX_MEM_WRITE));
62 	if (is_t4(sc))
63 		cmd |= cpu_to_be32(F_ULP_MEMIO_ORDER);
64 	else
65 		cmd |= cpu_to_be32(F_T5_ULP_MEMIO_IMM);
66 
67 	addr &= 0x7FFFFFF;
68 	CTR3(KTR_IW_CXGBE, "%s addr 0x%x len %u", __func__, addr, len);
69 	num_wqe = DIV_ROUND_UP(len, C4IW_MAX_INLINE_SIZE);
70 	c4iw_init_wr_wait(&wr_wait);
71 	for (i = 0; i < num_wqe; i++) {
72 
73 		copy_len = min(len, C4IW_MAX_INLINE_SIZE);
74 		wr_len = roundup(sizeof *ulpmc + sizeof *ulpsc +
75 				 roundup(copy_len, T4_ULPTX_MIN_IO), 16);
76 
77 		wr = alloc_wrqe(wr_len, &sc->sge.mgmtq);
78 		if (wr == NULL)
79 			return (0);
80 		ulpmc = wrtod(wr);
81 
82 		memset(ulpmc, 0, wr_len);
83 		INIT_ULPTX_WR(ulpmc, wr_len, 0, 0);
84 
85 		if (i == (num_wqe-1)) {
86 			ulpmc->wr.wr_hi = cpu_to_be32(V_FW_WR_OP(FW_ULPTX_WR) |
87 						    F_FW_WR_COMPL);
88 			ulpmc->wr.wr_lo = (__force __be64)(unsigned long) &wr_wait;
89 		} else
90 			ulpmc->wr.wr_hi = cpu_to_be32(V_FW_WR_OP(FW_ULPTX_WR));
91 		ulpmc->wr.wr_mid = cpu_to_be32(
92 				       V_FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16)));
93 
94 		ulpmc->cmd = cmd;
95 		ulpmc->dlen = cpu_to_be32(V_ULP_MEMIO_DATA_LEN(
96 		    DIV_ROUND_UP(copy_len, T4_ULPTX_MIN_IO)));
97 		ulpmc->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(ulpmc->wr),
98 						      16));
99 		ulpmc->lock_addr = cpu_to_be32(V_ULP_MEMIO_ADDR(addr + i * 3));
100 
101 		ulpsc = (struct ulptx_idata *)(ulpmc + 1);
102 		ulpsc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
103 		ulpsc->len = cpu_to_be32(roundup(copy_len, T4_ULPTX_MIN_IO));
104 
105 		to_dp = (u8 *)(ulpsc + 1);
106 		from_dp = (u8 *)data + i * C4IW_MAX_INLINE_SIZE;
107 		if (data)
108 			memcpy(to_dp, from_dp, copy_len);
109 		else
110 			memset(to_dp, 0, copy_len);
111 		if (copy_len % T4_ULPTX_MIN_IO)
112 			memset(to_dp + copy_len, 0, T4_ULPTX_MIN_IO -
113 			       (copy_len % T4_ULPTX_MIN_IO));
114 		t4_wrq_tx(sc, wr);
115 		len -= C4IW_MAX_INLINE_SIZE;
116 	}
117 
118 	ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
119 	return ret;
120 }
121 
122 /*
123  * Build and write a TPT entry.
124  * IN: stag key, pdid, perm, bind_enabled, zbva, to, len, page_size,
125  *     pbl_size and pbl_addr
126  * OUT: stag index
127  */
128 static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
129 			   u32 *stag, u8 stag_state, u32 pdid,
130 			   enum fw_ri_stag_type type, enum fw_ri_mem_perms perm,
131 			   int bind_enabled, u32 zbva, u64 to,
132 			   u64 len, u8 page_size, u32 pbl_size, u32 pbl_addr)
133 {
134 	int err;
135 	struct fw_ri_tpte tpt;
136 	u32 stag_idx;
137 	static atomic_t key;
138 
139 	if (c4iw_fatal_error(rdev))
140 		return -EIO;
141 
142 	stag_state = stag_state > 0;
143 	stag_idx = (*stag) >> 8;
144 
145 	if ((!reset_tpt_entry) && (*stag == T4_STAG_UNSET)) {
146 		stag_idx = c4iw_get_resource(&rdev->resource.tpt_table);
147 		if (!stag_idx)
148 			return -ENOMEM;
149 		mutex_lock(&rdev->stats.lock);
150 		rdev->stats.stag.cur += 32;
151 		if (rdev->stats.stag.cur > rdev->stats.stag.max)
152 			rdev->stats.stag.max = rdev->stats.stag.cur;
153 		mutex_unlock(&rdev->stats.lock);
154 		*stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
155 	}
156 	CTR5(KTR_IW_CXGBE,
157 	    "%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x",
158 	    __func__, stag_state, type, pdid, stag_idx);
159 
160 	/* write TPT entry */
161 	if (reset_tpt_entry)
162 		memset(&tpt, 0, sizeof(tpt));
163 	else {
164 		tpt.valid_to_pdid = cpu_to_be32(F_FW_RI_TPTE_VALID |
165 			V_FW_RI_TPTE_STAGKEY((*stag & M_FW_RI_TPTE_STAGKEY)) |
166 			V_FW_RI_TPTE_STAGSTATE(stag_state) |
167 			V_FW_RI_TPTE_STAGTYPE(type) | V_FW_RI_TPTE_PDID(pdid));
168 		tpt.locread_to_qpid = cpu_to_be32(V_FW_RI_TPTE_PERM(perm) |
169 			(bind_enabled ? F_FW_RI_TPTE_MWBINDEN : 0) |
170 			V_FW_RI_TPTE_ADDRTYPE((zbva ? FW_RI_ZERO_BASED_TO :
171 						      FW_RI_VA_BASED_TO))|
172 			V_FW_RI_TPTE_PS(page_size));
173 		tpt.nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32(
174 			V_FW_RI_TPTE_PBLADDR(PBL_OFF(rdev, pbl_addr)>>3));
175 		tpt.len_lo = cpu_to_be32((u32)(len & 0xffffffffUL));
176 		tpt.va_hi = cpu_to_be32((u32)(to >> 32));
177 		tpt.va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL));
178 		tpt.dca_mwbcnt_pstag = cpu_to_be32(0);
179 		tpt.len_hi = cpu_to_be32((u32)(len >> 32));
180 	}
181 	err = write_adapter_mem(rdev, stag_idx +
182 				(rdev->adap->vres.stag.start >> 5),
183 				sizeof(tpt), &tpt);
184 
185 	if (reset_tpt_entry) {
186 		c4iw_put_resource(&rdev->resource.tpt_table, stag_idx);
187 		mutex_lock(&rdev->stats.lock);
188 		rdev->stats.stag.cur -= 32;
189 		mutex_unlock(&rdev->stats.lock);
190 	}
191 	return err;
192 }
193 
194 static int write_pbl(struct c4iw_rdev *rdev, __be64 *pbl,
195 		     u32 pbl_addr, u32 pbl_size)
196 {
197 	int err;
198 
199 	CTR4(KTR_IW_CXGBE, "%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d",
200 	     __func__, pbl_addr, rdev->adap->vres.pbl.start, pbl_size);
201 
202 	err = write_adapter_mem(rdev, pbl_addr >> 5, pbl_size << 3, pbl);
203 	return err;
204 }
205 
206 static int dereg_mem(struct c4iw_rdev *rdev, u32 stag, u32 pbl_size,
207 		     u32 pbl_addr)
208 {
209 	return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0,
210 			       pbl_size, pbl_addr);
211 }
212 
213 static int allocate_window(struct c4iw_rdev *rdev, u32 * stag, u32 pdid)
214 {
215 	*stag = T4_STAG_UNSET;
216 	return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_MW, 0, 0, 0,
217 			       0UL, 0, 0, 0, 0);
218 }
219 
220 static int deallocate_window(struct c4iw_rdev *rdev, u32 stag)
221 {
222 	return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0, 0,
223 			       0);
224 }
225 
226 static int allocate_stag(struct c4iw_rdev *rdev, u32 *stag, u32 pdid,
227 			 u32 pbl_size, u32 pbl_addr)
228 {
229 	*stag = T4_STAG_UNSET;
230 	return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_NSMR, 0, 0, 0,
231 			       0UL, 0, 0, pbl_size, pbl_addr);
232 }
233 
234 static int finish_mem_reg(struct c4iw_mr *mhp, u32 stag)
235 {
236 	u32 mmid;
237 
238 	mhp->attr.state = 1;
239 	mhp->attr.stag = stag;
240 	mmid = stag >> 8;
241 	mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
242 	CTR3(KTR_IW_CXGBE, "%s mmid 0x%x mhp %p", __func__, mmid, mhp);
243 	return insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid);
244 }
245 
246 static int register_mem(struct c4iw_dev *rhp, struct c4iw_pd *php,
247 		      struct c4iw_mr *mhp, int shift)
248 {
249 	u32 stag = T4_STAG_UNSET;
250 	int ret;
251 
252 	ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid,
253 			      FW_RI_STAG_NSMR, mhp->attr.perms,
254 			      mhp->attr.mw_bind_enable, mhp->attr.zbva,
255 			      mhp->attr.va_fbo, mhp->attr.len, shift - 12,
256 			      mhp->attr.pbl_size, mhp->attr.pbl_addr);
257 	if (ret)
258 		return ret;
259 
260 	ret = finish_mem_reg(mhp, stag);
261 	if (ret)
262 		dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
263 		       mhp->attr.pbl_addr);
264 	return ret;
265 }
266 
267 static int reregister_mem(struct c4iw_dev *rhp, struct c4iw_pd *php,
268 			  struct c4iw_mr *mhp, int shift, int npages)
269 {
270 	u32 stag;
271 	int ret;
272 
273 	if (npages > mhp->attr.pbl_size)
274 		return -ENOMEM;
275 
276 	stag = mhp->attr.stag;
277 	ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid,
278 			      FW_RI_STAG_NSMR, mhp->attr.perms,
279 			      mhp->attr.mw_bind_enable, mhp->attr.zbva,
280 			      mhp->attr.va_fbo, mhp->attr.len, shift - 12,
281 			      mhp->attr.pbl_size, mhp->attr.pbl_addr);
282 	if (ret)
283 		return ret;
284 
285 	ret = finish_mem_reg(mhp, stag);
286 	if (ret)
287 		dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
288 		       mhp->attr.pbl_addr);
289 
290 	return ret;
291 }
292 
293 static int alloc_pbl(struct c4iw_mr *mhp, int npages)
294 {
295 	mhp->attr.pbl_addr = c4iw_pblpool_alloc(&mhp->rhp->rdev,
296 						    npages << 3);
297 
298 	if (!mhp->attr.pbl_addr)
299 		return -ENOMEM;
300 
301 	mhp->attr.pbl_size = npages;
302 
303 	return 0;
304 }
305 
306 static int build_phys_page_list(struct ib_phys_buf *buffer_list,
307 				int num_phys_buf, u64 *iova_start,
308 				u64 *total_size, int *npages,
309 				int *shift, __be64 **page_list)
310 {
311 	u64 mask;
312 	int i, j, n;
313 
314 	mask = 0;
315 	*total_size = 0;
316 	for (i = 0; i < num_phys_buf; ++i) {
317 		if (i != 0 && buffer_list[i].addr & ~PAGE_MASK)
318 			return -EINVAL;
319 		if (i != 0 && i != num_phys_buf - 1 &&
320 		    (buffer_list[i].size & ~PAGE_MASK))
321 			return -EINVAL;
322 		*total_size += buffer_list[i].size;
323 		if (i > 0)
324 			mask |= buffer_list[i].addr;
325 		else
326 			mask |= buffer_list[i].addr & PAGE_MASK;
327 		if (i != num_phys_buf - 1)
328 			mask |= buffer_list[i].addr + buffer_list[i].size;
329 		else
330 			mask |= (buffer_list[i].addr + buffer_list[i].size +
331 				PAGE_SIZE - 1) & PAGE_MASK;
332 	}
333 
334 	if (*total_size > 0xFFFFFFFFULL)
335 		return -ENOMEM;
336 
337 	/* Find largest page shift we can use to cover buffers */
338 	for (*shift = PAGE_SHIFT; *shift < 27; ++(*shift))
339 		if ((1ULL << *shift) & mask)
340 			break;
341 
342 	buffer_list[0].size += buffer_list[0].addr & ((1ULL << *shift) - 1);
343 	buffer_list[0].addr &= ~0ull << *shift;
344 
345 	*npages = 0;
346 	for (i = 0; i < num_phys_buf; ++i)
347 		*npages += (buffer_list[i].size +
348 			(1ULL << *shift) - 1) >> *shift;
349 
350 	if (!*npages)
351 		return -EINVAL;
352 
353 	*page_list = kmalloc(sizeof(u64) * *npages, GFP_KERNEL);
354 	if (!*page_list)
355 		return -ENOMEM;
356 
357 	n = 0;
358 	for (i = 0; i < num_phys_buf; ++i)
359 		for (j = 0;
360 		     j < (buffer_list[i].size + (1ULL << *shift) - 1) >> *shift;
361 		     ++j)
362 			(*page_list)[n++] = cpu_to_be64(buffer_list[i].addr +
363 			    ((u64) j << *shift));
364 
365 	CTR6(KTR_IW_CXGBE,
366 	    "%s va 0x%llx mask 0x%llx shift %d len %lld pbl_size %d", __func__,
367 	    (unsigned long long)*iova_start, (unsigned long long)mask, *shift,
368 	    (unsigned long long)*total_size, *npages);
369 
370 	return 0;
371 
372 }
373 
374 int c4iw_reregister_phys_mem(struct ib_mr *mr, int mr_rereg_mask,
375 			     struct ib_pd *pd, struct ib_phys_buf *buffer_list,
376 			     int num_phys_buf, int acc, u64 *iova_start)
377 {
378 
379 	struct c4iw_mr mh, *mhp;
380 	struct c4iw_pd *php;
381 	struct c4iw_dev *rhp;
382 	__be64 *page_list = NULL;
383 	int shift = 0;
384 	u64 total_size;
385 	int npages = 0;
386 	int ret;
387 
388 	CTR3(KTR_IW_CXGBE, "%s ib_mr %p ib_pd %p", __func__, mr, pd);
389 
390 	/* There can be no memory windows */
391 	if (atomic_read(&mr->usecnt))
392 		return -EINVAL;
393 
394 	mhp = to_c4iw_mr(mr);
395 	rhp = mhp->rhp;
396 	php = to_c4iw_pd(mr->pd);
397 
398 	/* make sure we are on the same adapter */
399 	if (rhp != php->rhp)
400 		return -EINVAL;
401 
402 	memcpy(&mh, mhp, sizeof *mhp);
403 
404 	if (mr_rereg_mask & IB_MR_REREG_PD)
405 		php = to_c4iw_pd(pd);
406 	if (mr_rereg_mask & IB_MR_REREG_ACCESS) {
407 		mh.attr.perms = c4iw_ib_to_tpt_access(acc);
408 		mh.attr.mw_bind_enable = (acc & IB_ACCESS_MW_BIND) ==
409 					 IB_ACCESS_MW_BIND;
410 	}
411 	if (mr_rereg_mask & IB_MR_REREG_TRANS) {
412 		ret = build_phys_page_list(buffer_list, num_phys_buf,
413 						iova_start,
414 						&total_size, &npages,
415 						&shift, &page_list);
416 		if (ret)
417 			return ret;
418 	}
419 
420 	ret = reregister_mem(rhp, php, &mh, shift, npages);
421 	kfree(page_list);
422 	if (ret)
423 		return ret;
424 	if (mr_rereg_mask & IB_MR_REREG_PD)
425 		mhp->attr.pdid = php->pdid;
426 	if (mr_rereg_mask & IB_MR_REREG_ACCESS)
427 		mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
428 	if (mr_rereg_mask & IB_MR_REREG_TRANS) {
429 		mhp->attr.zbva = 0;
430 		mhp->attr.va_fbo = *iova_start;
431 		mhp->attr.page_size = shift - 12;
432 		mhp->attr.len = (u32) total_size;
433 		mhp->attr.pbl_size = npages;
434 	}
435 
436 	return 0;
437 }
438 
439 struct ib_mr *c4iw_register_phys_mem(struct ib_pd *pd,
440 				     struct ib_phys_buf *buffer_list,
441 				     int num_phys_buf, int acc, u64 *iova_start)
442 {
443 	__be64 *page_list;
444 	int shift;
445 	u64 total_size;
446 	int npages;
447 	struct c4iw_dev *rhp;
448 	struct c4iw_pd *php;
449 	struct c4iw_mr *mhp;
450 	int ret;
451 
452 	CTR2(KTR_IW_CXGBE, "%s ib_pd %p", __func__, pd);
453 	php = to_c4iw_pd(pd);
454 	rhp = php->rhp;
455 
456 	mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
457 	if (!mhp)
458 		return ERR_PTR(-ENOMEM);
459 
460 	mhp->rhp = rhp;
461 
462 	/* First check that we have enough alignment */
463 	if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK)) {
464 		ret = -EINVAL;
465 		goto err;
466 	}
467 
468 	if (num_phys_buf > 1 &&
469 	    ((buffer_list[0].addr + buffer_list[0].size) & ~PAGE_MASK)) {
470 		ret = -EINVAL;
471 		goto err;
472 	}
473 
474 	ret = build_phys_page_list(buffer_list, num_phys_buf, iova_start,
475 					&total_size, &npages, &shift,
476 					&page_list);
477 	if (ret)
478 		goto err;
479 
480 	ret = alloc_pbl(mhp, npages);
481 	if (ret) {
482 		kfree(page_list);
483 		goto err_pbl;
484 	}
485 
486 	ret = write_pbl(&mhp->rhp->rdev, page_list, mhp->attr.pbl_addr,
487 			     npages);
488 	kfree(page_list);
489 	if (ret)
490 		goto err_pbl;
491 
492 	mhp->attr.pdid = php->pdid;
493 	mhp->attr.zbva = 0;
494 
495 	mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
496 	mhp->attr.va_fbo = *iova_start;
497 	mhp->attr.page_size = shift - 12;
498 
499 	mhp->attr.len = (u32) total_size;
500 	mhp->attr.pbl_size = npages;
501 	ret = register_mem(rhp, php, mhp, shift);
502 	if (ret)
503 		goto err_pbl;
504 
505 	return &mhp->ibmr;
506 
507 err_pbl:
508 	c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
509 			      mhp->attr.pbl_size << 3);
510 
511 err:
512 	kfree(mhp);
513 	return ERR_PTR(ret);
514 
515 }
516 
517 struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc)
518 {
519 	struct c4iw_dev *rhp;
520 	struct c4iw_pd *php;
521 	struct c4iw_mr *mhp;
522 	int ret;
523 	u32 stag = T4_STAG_UNSET;
524 
525 	CTR2(KTR_IW_CXGBE, "%s ib_pd %p", __func__, pd);
526 	php = to_c4iw_pd(pd);
527 	rhp = php->rhp;
528 
529 	mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
530 	if (!mhp)
531 		return ERR_PTR(-ENOMEM);
532 
533 	mhp->rhp = rhp;
534 	mhp->attr.pdid = php->pdid;
535 	mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
536 	mhp->attr.mw_bind_enable = (acc&IB_ACCESS_MW_BIND) == IB_ACCESS_MW_BIND;
537 	mhp->attr.zbva = 0;
538 	mhp->attr.va_fbo = 0;
539 	mhp->attr.page_size = 0;
540 	mhp->attr.len = ~0UL;
541 	mhp->attr.pbl_size = 0;
542 
543 	ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, php->pdid,
544 			      FW_RI_STAG_NSMR, mhp->attr.perms,
545 			      mhp->attr.mw_bind_enable, 0, 0, ~0UL, 0, 0, 0);
546 	if (ret)
547 		goto err1;
548 
549 	ret = finish_mem_reg(mhp, stag);
550 	if (ret)
551 		goto err2;
552 	return &mhp->ibmr;
553 err2:
554 	dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
555 		  mhp->attr.pbl_addr);
556 err1:
557 	kfree(mhp);
558 	return ERR_PTR(ret);
559 }
560 
561 struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
562     u64 virt, int acc, struct ib_udata *udata, int mr_id)
563 {
564 	__be64 *pages;
565 	int shift, n, len;
566 	int i, j, k;
567 	int err = 0;
568 	struct ib_umem_chunk *chunk;
569 	struct c4iw_dev *rhp;
570 	struct c4iw_pd *php;
571 	struct c4iw_mr *mhp;
572 
573 	CTR2(KTR_IW_CXGBE, "%s ib_pd %p", __func__, pd);
574 
575 	if (length == ~0ULL)
576 		return ERR_PTR(-EINVAL);
577 
578 	if ((length + start) < start)
579 		return ERR_PTR(-EINVAL);
580 
581 	php = to_c4iw_pd(pd);
582 	rhp = php->rhp;
583 	mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
584 	if (!mhp)
585 		return ERR_PTR(-ENOMEM);
586 
587 	mhp->rhp = rhp;
588 
589 	mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
590 	if (IS_ERR(mhp->umem)) {
591 		err = PTR_ERR(mhp->umem);
592 		kfree(mhp);
593 		return ERR_PTR(err);
594 	}
595 
596 	shift = ffs(mhp->umem->page_size) - 1;
597 
598 	n = 0;
599 	list_for_each_entry(chunk, &mhp->umem->chunk_list, list)
600 		n += chunk->nents;
601 
602 	err = alloc_pbl(mhp, n);
603 	if (err)
604 		goto err;
605 
606 	pages = (__be64 *) __get_free_page(GFP_KERNEL);
607 	if (!pages) {
608 		err = -ENOMEM;
609 		goto err_pbl;
610 	}
611 
612 	i = n = 0;
613 
614 	list_for_each_entry(chunk, &mhp->umem->chunk_list, list)
615 		for (j = 0; j < chunk->nmap; ++j) {
616 			len = sg_dma_len(&chunk->page_list[j]) >> shift;
617 			for (k = 0; k < len; ++k) {
618 				pages[i++] = cpu_to_be64(sg_dma_address(
619 					&chunk->page_list[j]) +
620 					mhp->umem->page_size * k);
621 				if (i == PAGE_SIZE / sizeof *pages) {
622 					err = write_pbl(&mhp->rhp->rdev,
623 					      pages,
624 					      mhp->attr.pbl_addr + (n << 3), i);
625 					if (err)
626 						goto pbl_done;
627 					n += i;
628 					i = 0;
629 				}
630 			}
631 		}
632 
633 	if (i)
634 		err = write_pbl(&mhp->rhp->rdev, pages,
635 				     mhp->attr.pbl_addr + (n << 3), i);
636 
637 pbl_done:
638 	free_page((unsigned long) pages);
639 	if (err)
640 		goto err_pbl;
641 
642 	mhp->attr.pdid = php->pdid;
643 	mhp->attr.zbva = 0;
644 	mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
645 	mhp->attr.va_fbo = virt;
646 	mhp->attr.page_size = shift - 12;
647 	mhp->attr.len = length;
648 
649 	err = register_mem(rhp, php, mhp, shift);
650 	if (err)
651 		goto err_pbl;
652 
653 	return &mhp->ibmr;
654 
655 err_pbl:
656 	c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
657 			      mhp->attr.pbl_size << 3);
658 
659 err:
660 	ib_umem_release(mhp->umem);
661 	kfree(mhp);
662 	return ERR_PTR(err);
663 }
664 
665 struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd)
666 {
667 	struct c4iw_dev *rhp;
668 	struct c4iw_pd *php;
669 	struct c4iw_mw *mhp;
670 	u32 mmid;
671 	u32 stag = 0;
672 	int ret;
673 
674 	php = to_c4iw_pd(pd);
675 	rhp = php->rhp;
676 	mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
677 	if (!mhp)
678 		return ERR_PTR(-ENOMEM);
679 	ret = allocate_window(&rhp->rdev, &stag, php->pdid);
680 	if (ret) {
681 		kfree(mhp);
682 		return ERR_PTR(ret);
683 	}
684 	mhp->rhp = rhp;
685 	mhp->attr.pdid = php->pdid;
686 	mhp->attr.type = FW_RI_STAG_MW;
687 	mhp->attr.stag = stag;
688 	mmid = (stag) >> 8;
689 	mhp->ibmw.rkey = stag;
690 	if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
691 		deallocate_window(&rhp->rdev, mhp->attr.stag);
692 		kfree(mhp);
693 		return ERR_PTR(-ENOMEM);
694 	}
695 	CTR4(KTR_IW_CXGBE, "%s mmid 0x%x mhp %p stag 0x%x", __func__, mmid, mhp,
696 	    stag);
697 	return &(mhp->ibmw);
698 }
699 
700 int c4iw_dealloc_mw(struct ib_mw *mw)
701 {
702 	struct c4iw_dev *rhp;
703 	struct c4iw_mw *mhp;
704 	u32 mmid;
705 
706 	mhp = to_c4iw_mw(mw);
707 	rhp = mhp->rhp;
708 	mmid = (mw->rkey) >> 8;
709 	remove_handle(rhp, &rhp->mmidr, mmid);
710 	deallocate_window(&rhp->rdev, mhp->attr.stag);
711 	kfree(mhp);
712 	CTR4(KTR_IW_CXGBE, "%s ib_mw %p mmid 0x%x ptr %p", __func__, mw, mmid,
713 	    mhp);
714 	return 0;
715 }
716 
717 struct ib_mr *c4iw_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth)
718 {
719 	struct c4iw_dev *rhp;
720 	struct c4iw_pd *php;
721 	struct c4iw_mr *mhp;
722 	u32 mmid;
723 	u32 stag = 0;
724 	int ret = 0;
725 
726 	php = to_c4iw_pd(pd);
727 	rhp = php->rhp;
728 	mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
729 	if (!mhp) {
730 		ret = -ENOMEM;
731 		goto err;
732 	}
733 
734 	mhp->rhp = rhp;
735 	ret = alloc_pbl(mhp, pbl_depth);
736 	if (ret)
737 		goto err1;
738 	mhp->attr.pbl_size = pbl_depth;
739 	ret = allocate_stag(&rhp->rdev, &stag, php->pdid,
740 				 mhp->attr.pbl_size, mhp->attr.pbl_addr);
741 	if (ret)
742 		goto err2;
743 	mhp->attr.pdid = php->pdid;
744 	mhp->attr.type = FW_RI_STAG_NSMR;
745 	mhp->attr.stag = stag;
746 	mhp->attr.state = 1;
747 	mmid = (stag) >> 8;
748 	mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
749 	if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
750 		ret = -ENOMEM;
751 		goto err3;
752 	}
753 
754 	CTR4(KTR_IW_CXGBE, "%s mmid 0x%x mhp %p stag 0x%x", __func__, mmid, mhp,
755 	    stag);
756 	return &(mhp->ibmr);
757 err3:
758 	dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size,
759 		       mhp->attr.pbl_addr);
760 err2:
761 	c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
762 			      mhp->attr.pbl_size << 3);
763 err1:
764 	kfree(mhp);
765 err:
766 	return ERR_PTR(ret);
767 }
768 
769 struct ib_fast_reg_page_list *c4iw_alloc_fastreg_pbl(struct ib_device *device,
770 						     int page_list_len)
771 {
772 	struct c4iw_fr_page_list *c4pl;
773 	struct c4iw_dev *dev = to_c4iw_dev(device);
774 	bus_addr_t dma_addr;
775 	int size = sizeof *c4pl + page_list_len * sizeof(u64);
776 
777 	c4pl = contigmalloc(size,
778             M_DEVBUF, M_NOWAIT, 0ul, ~0ul, 4096, 0);
779         if (c4pl)
780                 dma_addr = vtophys(c4pl);
781         else
782                 return ERR_PTR(-ENOMEM);;
783 
784 	pci_unmap_addr_set(c4pl, mapping, dma_addr);
785 	c4pl->dma_addr = dma_addr;
786 	c4pl->dev = dev;
787 	c4pl->size = size;
788 	c4pl->ibpl.page_list = (u64 *)(c4pl + 1);
789 	c4pl->ibpl.max_page_list_len = page_list_len;
790 
791 	return &c4pl->ibpl;
792 }
793 
794 void c4iw_free_fastreg_pbl(struct ib_fast_reg_page_list *ibpl)
795 {
796 	struct c4iw_fr_page_list *c4pl = to_c4iw_fr_page_list(ibpl);
797 	contigfree(c4pl, c4pl->size, M_DEVBUF);
798 }
799 
800 int c4iw_dereg_mr(struct ib_mr *ib_mr)
801 {
802 	struct c4iw_dev *rhp;
803 	struct c4iw_mr *mhp;
804 	u32 mmid;
805 
806 	CTR2(KTR_IW_CXGBE, "%s ib_mr %p", __func__, ib_mr);
807 	/* There can be no memory windows */
808 	if (atomic_read(&ib_mr->usecnt))
809 		return -EINVAL;
810 
811 	mhp = to_c4iw_mr(ib_mr);
812 	rhp = mhp->rhp;
813 	mmid = mhp->attr.stag >> 8;
814 	remove_handle(rhp, &rhp->mmidr, mmid);
815 	dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
816 		       mhp->attr.pbl_addr);
817 	if (mhp->attr.pbl_size)
818 		c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
819 				  mhp->attr.pbl_size << 3);
820 	if (mhp->kva)
821 		kfree((void *) (unsigned long) mhp->kva);
822 	if (mhp->umem)
823 		ib_umem_release(mhp->umem);
824 	CTR3(KTR_IW_CXGBE, "%s mmid 0x%x ptr %p", __func__, mmid, mhp);
825 	kfree(mhp);
826 	return 0;
827 }
828 #endif
829