xref: /linux/drivers/infiniband/hw/cxgb4/mem.c (revision cdd38c5f1ce4398ec58fec95904b75824daab7b5)
1cfdda9d7SSteve Wise /*
2cfdda9d7SSteve Wise  * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3cfdda9d7SSteve Wise  *
4cfdda9d7SSteve Wise  * This software is available to you under a choice of one of two
5cfdda9d7SSteve Wise  * licenses.  You may choose to be licensed under the terms of the GNU
6cfdda9d7SSteve Wise  * General Public License (GPL) Version 2, available from the file
7cfdda9d7SSteve Wise  * COPYING in the main directory of this source tree, or the
8cfdda9d7SSteve Wise  * OpenIB.org BSD license below:
9cfdda9d7SSteve Wise  *
10cfdda9d7SSteve Wise  *     Redistribution and use in source and binary forms, with or
11cfdda9d7SSteve Wise  *     without modification, are permitted provided that the following
12cfdda9d7SSteve Wise  *     conditions are met:
13cfdda9d7SSteve Wise  *
14cfdda9d7SSteve Wise  *      - Redistributions of source code must retain the above
15cfdda9d7SSteve Wise  *        copyright notice, this list of conditions and the following
16cfdda9d7SSteve Wise  *        disclaimer.
17cfdda9d7SSteve Wise  *
18cfdda9d7SSteve Wise  *      - Redistributions in binary form must reproduce the above
19cfdda9d7SSteve Wise  *        copyright notice, this list of conditions and the following
20cfdda9d7SSteve Wise  *        disclaimer in the documentation and/or other materials
21cfdda9d7SSteve Wise  *        provided with the distribution.
22cfdda9d7SSteve Wise  *
23cfdda9d7SSteve Wise  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24cfdda9d7SSteve Wise  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25cfdda9d7SSteve Wise  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26cfdda9d7SSteve Wise  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27cfdda9d7SSteve Wise  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28cfdda9d7SSteve Wise  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29cfdda9d7SSteve Wise  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30cfdda9d7SSteve Wise  * SOFTWARE.
31cfdda9d7SSteve Wise  */
32cfdda9d7SSteve Wise 
3342b6a949SVipul Pandya #include <linux/module.h>
3442b6a949SVipul Pandya #include <linux/moduleparam.h>
35cfdda9d7SSteve Wise #include <rdma/ib_umem.h>
3660063497SArun Sharma #include <linux/atomic.h>
37b2a239dfSMatan Barak #include <rdma/ib_user_verbs.h>
38cfdda9d7SSteve Wise 
39cfdda9d7SSteve Wise #include "iw_cxgb4.h"
40cfdda9d7SSteve Wise 
41e821303cSGanesh Goudar int use_dsgl = 1;
4242b6a949SVipul Pandya module_param(use_dsgl, int, 0644);
43e821303cSGanesh Goudar MODULE_PARM_DESC(use_dsgl, "Use DSGL for PBL/FastReg (default=1) (DEPRECATED)");
4442b6a949SVipul Pandya 
45cfdda9d7SSteve Wise #define T4_ULPTX_MIN_IO 32
46cfdda9d7SSteve Wise #define C4IW_MAX_INLINE_SIZE 96
4742b6a949SVipul Pandya #define T4_ULPTX_MAX_DMA 1024
4842b6a949SVipul Pandya #define C4IW_INLINE_THRESHOLD 128
49cfdda9d7SSteve Wise 
5042b6a949SVipul Pandya static int inline_threshold = C4IW_INLINE_THRESHOLD;
5142b6a949SVipul Pandya module_param(inline_threshold, int, 0644);
5242b6a949SVipul Pandya MODULE_PARM_DESC(inline_threshold, "inline vs dsgl threshold (default=128)");
5342b6a949SVipul Pandya 
mr_exceeds_hw_limits(struct c4iw_dev * dev,u64 length)542550a88dSHariprasad Shenai static int mr_exceeds_hw_limits(struct c4iw_dev *dev, u64 length)
552550a88dSHariprasad Shenai {
562550a88dSHariprasad Shenai 	return (is_t4(dev->rdev.lldi.adapter_type) ||
572550a88dSHariprasad Shenai 		is_t5(dev->rdev.lldi.adapter_type)) &&
582550a88dSHariprasad Shenai 		length >= 8*1024*1024*1024ULL;
592550a88dSHariprasad Shenai }
602550a88dSHariprasad Shenai 
_c4iw_write_mem_dma_aligned(struct c4iw_rdev * rdev,u32 addr,u32 len,dma_addr_t data,struct sk_buff * skb,struct c4iw_wr_wait * wr_waitp)6142b6a949SVipul Pandya static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
620f8ab0b6SHariprasad S 				       u32 len, dma_addr_t data,
63a3f12da0SSteve Wise 				       struct sk_buff *skb,
64a3f12da0SSteve Wise 				       struct c4iw_wr_wait *wr_waitp)
6542b6a949SVipul Pandya {
6642b6a949SVipul Pandya 	struct ulp_mem_io *req;
6742b6a949SVipul Pandya 	struct ulptx_sgl *sgl;
6842b6a949SVipul Pandya 	u8 wr_len;
6942b6a949SVipul Pandya 	int ret = 0;
7042b6a949SVipul Pandya 
7142b6a949SVipul Pandya 	addr &= 0x7FFFFFF;
7242b6a949SVipul Pandya 
73a3f12da0SSteve Wise 	if (wr_waitp)
74a3f12da0SSteve Wise 		c4iw_init_wr_wait(wr_waitp);
7542b6a949SVipul Pandya 	wr_len = roundup(sizeof(*req) + sizeof(*sgl), 16);
7642b6a949SVipul Pandya 
770f8ab0b6SHariprasad S 	if (!skb) {
780f8ab0b6SHariprasad S 		skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
7942b6a949SVipul Pandya 		if (!skb)
8042b6a949SVipul Pandya 			return -ENOMEM;
810f8ab0b6SHariprasad S 	}
8242b6a949SVipul Pandya 	set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
8342b6a949SVipul Pandya 
84de77b966Syuan linyu 	req = __skb_put_zero(skb, wr_len);
8542b6a949SVipul Pandya 	INIT_ULPTX_WR(req, wr_len, 0, 0);
86e2ac9628SHariprasad Shenai 	req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR) |
87a3f12da0SSteve Wise 			(wr_waitp ? FW_WR_COMPL_F : 0));
88a3f12da0SSteve Wise 	req->wr.wr_lo = wr_waitp ? (__force __be64)(unsigned long)wr_waitp : 0L;
89e2ac9628SHariprasad Shenai 	req->wr.wr_mid = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(wr_len, 16)));
9092f850ecSHariprasad S 	req->cmd = cpu_to_be32(ULPTX_CMD_V(ULP_TX_MEM_WRITE) |
9192f850ecSHariprasad S 			       T5_ULP_MEMIO_ORDER_V(1) |
9292f850ecSHariprasad S 			       T5_ULP_MEMIO_FID_V(rdev->lldi.rxq_ids[0]));
93d7990b0cSAnish Bhatt 	req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN_V(len>>5));
9442b6a949SVipul Pandya 	req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr), 16));
95d7990b0cSAnish Bhatt 	req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR_V(addr));
9642b6a949SVipul Pandya 
9742b6a949SVipul Pandya 	sgl = (struct ulptx_sgl *)(req + 1);
98d7990b0cSAnish Bhatt 	sgl->cmd_nsge = cpu_to_be32(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
99bdc590b9SHariprasad Shenai 				    ULPTX_NSGE_V(1));
10042b6a949SVipul Pandya 	sgl->len0 = cpu_to_be32(len);
1010e5eca79SVipul Pandya 	sgl->addr0 = cpu_to_be64(data);
10242b6a949SVipul Pandya 
103a3f12da0SSteve Wise 	if (wr_waitp)
1042015f26cSSteve Wise 		ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0, __func__);
1052015f26cSSteve Wise 	else
1062015f26cSSteve Wise 		ret = c4iw_ofld_send(rdev, skb);
10742b6a949SVipul Pandya 	return ret;
10842b6a949SVipul Pandya }
10942b6a949SVipul Pandya 
_c4iw_write_mem_inline(struct c4iw_rdev * rdev,u32 addr,u32 len,void * data,struct sk_buff * skb,struct c4iw_wr_wait * wr_waitp)11042b6a949SVipul Pandya static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
111a3f12da0SSteve Wise 				  void *data, struct sk_buff *skb,
112a3f12da0SSteve Wise 				  struct c4iw_wr_wait *wr_waitp)
113cfdda9d7SSteve Wise {
114cfdda9d7SSteve Wise 	struct ulp_mem_io *req;
115cfdda9d7SSteve Wise 	struct ulptx_idata *sc;
116cfdda9d7SSteve Wise 	u8 wr_len, *to_dp, *from_dp;
117cfdda9d7SSteve Wise 	int copy_len, num_wqe, i, ret = 0;
118d7990b0cSAnish Bhatt 	__be32 cmd = cpu_to_be32(ULPTX_CMD_V(ULP_TX_MEM_WRITE));
11942b6a949SVipul Pandya 
12042b6a949SVipul Pandya 	if (is_t4(rdev->lldi.adapter_type))
121d7990b0cSAnish Bhatt 		cmd |= cpu_to_be32(ULP_MEMIO_ORDER_F);
12242b6a949SVipul Pandya 	else
123d7990b0cSAnish Bhatt 		cmd |= cpu_to_be32(T5_ULP_MEMIO_IMM_F);
124cfdda9d7SSteve Wise 
125cfdda9d7SSteve Wise 	addr &= 0x7FFFFFF;
126548ddb19SBharat Potnuri 	pr_debug("addr 0x%x len %u\n", addr, len);
127cfdda9d7SSteve Wise 	num_wqe = DIV_ROUND_UP(len, C4IW_MAX_INLINE_SIZE);
128a3f12da0SSteve Wise 	c4iw_init_wr_wait(wr_waitp);
129cfdda9d7SSteve Wise 	for (i = 0; i < num_wqe; i++) {
130cfdda9d7SSteve Wise 
131cfdda9d7SSteve Wise 		copy_len = len > C4IW_MAX_INLINE_SIZE ? C4IW_MAX_INLINE_SIZE :
132cfdda9d7SSteve Wise 			   len;
13334d56893SLeon Romanovsky 		wr_len = roundup(sizeof(*req) + sizeof(*sc) +
13434d56893SLeon Romanovsky 					 roundup(copy_len, T4_ULPTX_MIN_IO),
13534d56893SLeon Romanovsky 				 16);
136cfdda9d7SSteve Wise 
1370f8ab0b6SHariprasad S 		if (!skb) {
1380f8ab0b6SHariprasad S 			skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
139cfdda9d7SSteve Wise 			if (!skb)
140cfdda9d7SSteve Wise 				return -ENOMEM;
1410f8ab0b6SHariprasad S 		}
142cfdda9d7SSteve Wise 		set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
143cfdda9d7SSteve Wise 
144de77b966Syuan linyu 		req = __skb_put_zero(skb, wr_len);
145cfdda9d7SSteve Wise 		INIT_ULPTX_WR(req, wr_len, 0, 0);
146cfdda9d7SSteve Wise 
147cfdda9d7SSteve Wise 		if (i == (num_wqe-1)) {
148e2ac9628SHariprasad Shenai 			req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR) |
149e2ac9628SHariprasad Shenai 						    FW_WR_COMPL_F);
150a3f12da0SSteve Wise 			req->wr.wr_lo = (__force __be64)(unsigned long)wr_waitp;
151cfdda9d7SSteve Wise 		} else
152e2ac9628SHariprasad Shenai 			req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR));
153cfdda9d7SSteve Wise 		req->wr.wr_mid = cpu_to_be32(
154e2ac9628SHariprasad Shenai 				       FW_WR_LEN16_V(DIV_ROUND_UP(wr_len, 16)));
155cfdda9d7SSteve Wise 
15642b6a949SVipul Pandya 		req->cmd = cmd;
157d7990b0cSAnish Bhatt 		req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN_V(
158cfdda9d7SSteve Wise 				DIV_ROUND_UP(copy_len, T4_ULPTX_MIN_IO)));
159cfdda9d7SSteve Wise 		req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr),
160cfdda9d7SSteve Wise 						      16));
161d7990b0cSAnish Bhatt 		req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR_V(addr + i * 3));
162cfdda9d7SSteve Wise 
163cfdda9d7SSteve Wise 		sc = (struct ulptx_idata *)(req + 1);
164d7990b0cSAnish Bhatt 		sc->cmd_more = cpu_to_be32(ULPTX_CMD_V(ULP_TX_SC_IMM));
165cfdda9d7SSteve Wise 		sc->len = cpu_to_be32(roundup(copy_len, T4_ULPTX_MIN_IO));
166cfdda9d7SSteve Wise 
167cfdda9d7SSteve Wise 		to_dp = (u8 *)(sc + 1);
168cfdda9d7SSteve Wise 		from_dp = (u8 *)data + i * C4IW_MAX_INLINE_SIZE;
169cfdda9d7SSteve Wise 		if (data)
170cfdda9d7SSteve Wise 			memcpy(to_dp, from_dp, copy_len);
171cfdda9d7SSteve Wise 		else
172cfdda9d7SSteve Wise 			memset(to_dp, 0, copy_len);
173cfdda9d7SSteve Wise 		if (copy_len % T4_ULPTX_MIN_IO)
174cfdda9d7SSteve Wise 			memset(to_dp + copy_len, 0, T4_ULPTX_MIN_IO -
175cfdda9d7SSteve Wise 			       (copy_len % T4_ULPTX_MIN_IO));
1762015f26cSSteve Wise 		if (i == (num_wqe-1))
1772015f26cSSteve Wise 			ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0,
1782015f26cSSteve Wise 						 __func__);
1792015f26cSSteve Wise 		else
180cfdda9d7SSteve Wise 			ret = c4iw_ofld_send(rdev, skb);
181cfdda9d7SSteve Wise 		if (ret)
1822015f26cSSteve Wise 			break;
1832015f26cSSteve Wise 		skb = NULL;
184cfdda9d7SSteve Wise 		len -= C4IW_MAX_INLINE_SIZE;
185cfdda9d7SSteve Wise 	}
186cfdda9d7SSteve Wise 
187cfdda9d7SSteve Wise 	return ret;
188cfdda9d7SSteve Wise }
189cfdda9d7SSteve Wise 
_c4iw_write_mem_dma(struct c4iw_rdev * rdev,u32 addr,u32 len,void * data,struct sk_buff * skb,struct c4iw_wr_wait * wr_waitp)1900f8ab0b6SHariprasad S static int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len,
191a3f12da0SSteve Wise 			       void *data, struct sk_buff *skb,
192a3f12da0SSteve Wise 			       struct c4iw_wr_wait *wr_waitp)
19342b6a949SVipul Pandya {
19442b6a949SVipul Pandya 	u32 remain = len;
19542b6a949SVipul Pandya 	u32 dmalen;
19642b6a949SVipul Pandya 	int ret = 0;
1970e5eca79SVipul Pandya 	dma_addr_t daddr;
1980e5eca79SVipul Pandya 	dma_addr_t save;
1990e5eca79SVipul Pandya 
2000e5eca79SVipul Pandya 	daddr = dma_map_single(&rdev->lldi.pdev->dev, data, len, DMA_TO_DEVICE);
2010e5eca79SVipul Pandya 	if (dma_mapping_error(&rdev->lldi.pdev->dev, daddr))
2020e5eca79SVipul Pandya 		return -1;
2030e5eca79SVipul Pandya 	save = daddr;
20442b6a949SVipul Pandya 
20542b6a949SVipul Pandya 	while (remain > inline_threshold) {
20642b6a949SVipul Pandya 		if (remain < T4_ULPTX_MAX_DMA) {
20742b6a949SVipul Pandya 			if (remain & ~T4_ULPTX_MIN_IO)
20842b6a949SVipul Pandya 				dmalen = remain & ~(T4_ULPTX_MIN_IO-1);
20942b6a949SVipul Pandya 			else
21042b6a949SVipul Pandya 				dmalen = remain;
21142b6a949SVipul Pandya 		} else
21242b6a949SVipul Pandya 			dmalen = T4_ULPTX_MAX_DMA;
21342b6a949SVipul Pandya 		remain -= dmalen;
2140e5eca79SVipul Pandya 		ret = _c4iw_write_mem_dma_aligned(rdev, addr, dmalen, daddr,
215a3f12da0SSteve Wise 						 skb, remain ? NULL : wr_waitp);
21642b6a949SVipul Pandya 		if (ret)
21742b6a949SVipul Pandya 			goto out;
21842b6a949SVipul Pandya 		addr += dmalen >> 5;
21942b6a949SVipul Pandya 		data += dmalen;
2200e5eca79SVipul Pandya 		daddr += dmalen;
22142b6a949SVipul Pandya 	}
22242b6a949SVipul Pandya 	if (remain)
223a3f12da0SSteve Wise 		ret = _c4iw_write_mem_inline(rdev, addr, remain, data, skb,
224a3f12da0SSteve Wise 					     wr_waitp);
22542b6a949SVipul Pandya out:
2260e5eca79SVipul Pandya 	dma_unmap_single(&rdev->lldi.pdev->dev, save, len, DMA_TO_DEVICE);
22742b6a949SVipul Pandya 	return ret;
22842b6a949SVipul Pandya }
22942b6a949SVipul Pandya 
23042b6a949SVipul Pandya /*
23142b6a949SVipul Pandya  * write len bytes of data into addr (32B aligned address)
23242b6a949SVipul Pandya  * If data is NULL, clear len byte of memory to zero.
23342b6a949SVipul Pandya  */
write_adapter_mem(struct c4iw_rdev * rdev,u32 addr,u32 len,void * data,struct sk_buff * skb,struct c4iw_wr_wait * wr_waitp)23442b6a949SVipul Pandya static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len,
235a3f12da0SSteve Wise 			     void *data, struct sk_buff *skb,
236a3f12da0SSteve Wise 			     struct c4iw_wr_wait *wr_waitp)
23742b6a949SVipul Pandya {
238a3f12da0SSteve Wise 	int ret;
239a3f12da0SSteve Wise 
240a3f12da0SSteve Wise 	if (!rdev->lldi.ulptx_memwrite_dsgl || !use_dsgl) {
241a3f12da0SSteve Wise 		ret = _c4iw_write_mem_inline(rdev, addr, len, data, skb,
242a3f12da0SSteve Wise 					      wr_waitp);
243a3f12da0SSteve Wise 		goto out;
244a3f12da0SSteve Wise 	}
245a3f12da0SSteve Wise 
246a3f12da0SSteve Wise 	if (len <= inline_threshold) {
247a3f12da0SSteve Wise 		ret = _c4iw_write_mem_inline(rdev, addr, len, data, skb,
248a3f12da0SSteve Wise 					      wr_waitp);
249a3f12da0SSteve Wise 		goto out;
250a3f12da0SSteve Wise 	}
251a3f12da0SSteve Wise 
252a3f12da0SSteve Wise 	ret = _c4iw_write_mem_dma(rdev, addr, len, data, skb, wr_waitp);
253a3f12da0SSteve Wise 	if (ret) {
254700456bdSJoe Perches 		pr_warn_ratelimited("%s: dma map failure (non fatal)\n",
2550e5eca79SVipul Pandya 				    pci_name(rdev->lldi.pdev));
256a3f12da0SSteve Wise 		ret = _c4iw_write_mem_inline(rdev, addr, len, data, skb,
257a3f12da0SSteve Wise 					      wr_waitp);
2580f8ab0b6SHariprasad S 	}
259a3f12da0SSteve Wise out:
260a3f12da0SSteve Wise 	return ret;
261a3f12da0SSteve Wise 
26242b6a949SVipul Pandya }
26342b6a949SVipul Pandya 
264cfdda9d7SSteve Wise /*
265cfdda9d7SSteve Wise  * Build and write a TPT entry.
266cfdda9d7SSteve Wise  * IN: stag key, pdid, perm, bind_enabled, zbva, to, len, page_size,
267cfdda9d7SSteve Wise  *     pbl_size and pbl_addr
268cfdda9d7SSteve Wise  * OUT: stag index
269cfdda9d7SSteve Wise  */
write_tpt_entry(struct c4iw_rdev * rdev,u32 reset_tpt_entry,u32 * stag,u8 stag_state,u32 pdid,enum fw_ri_stag_type type,enum fw_ri_mem_perms perm,int bind_enabled,u32 zbva,u64 to,u64 len,u8 page_size,u32 pbl_size,u32 pbl_addr,struct sk_buff * skb,struct c4iw_wr_wait * wr_waitp)270cfdda9d7SSteve Wise static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
271cfdda9d7SSteve Wise 			   u32 *stag, u8 stag_state, u32 pdid,
272cfdda9d7SSteve Wise 			   enum fw_ri_stag_type type, enum fw_ri_mem_perms perm,
273cfdda9d7SSteve Wise 			   int bind_enabled, u32 zbva, u64 to,
2740f8ab0b6SHariprasad S 			   u64 len, u8 page_size, u32 pbl_size, u32 pbl_addr,
275a3f12da0SSteve Wise 			   struct sk_buff *skb, struct c4iw_wr_wait *wr_waitp)
276cfdda9d7SSteve Wise {
277cfdda9d7SSteve Wise 	int err;
2783840c5b7SGreg KH 	struct fw_ri_tpte *tpt;
279cfdda9d7SSteve Wise 	u32 stag_idx;
280cfdda9d7SSteve Wise 	static atomic_t key;
281cfdda9d7SSteve Wise 
282cfdda9d7SSteve Wise 	if (c4iw_fatal_error(rdev))
283cfdda9d7SSteve Wise 		return -EIO;
284cfdda9d7SSteve Wise 
2853840c5b7SGreg KH 	tpt = kmalloc(sizeof(*tpt), GFP_KERNEL);
2863840c5b7SGreg KH 	if (!tpt)
2873840c5b7SGreg KH 		return -ENOMEM;
2883840c5b7SGreg KH 
289cfdda9d7SSteve Wise 	stag_state = stag_state > 0;
290cfdda9d7SSteve Wise 	stag_idx = (*stag) >> 8;
291cfdda9d7SSteve Wise 
292cfdda9d7SSteve Wise 	if ((!reset_tpt_entry) && (*stag == T4_STAG_UNSET)) {
293ec3eead2SVipul Pandya 		stag_idx = c4iw_get_resource(&rdev->resource.tpt_table);
29498a3e879SSteve Wise 		if (!stag_idx) {
29598a3e879SSteve Wise 			mutex_lock(&rdev->stats.lock);
29698a3e879SSteve Wise 			rdev->stats.stag.fail++;
29798a3e879SSteve Wise 			mutex_unlock(&rdev->stats.lock);
2983840c5b7SGreg KH 			kfree(tpt);
299cfdda9d7SSteve Wise 			return -ENOMEM;
30098a3e879SSteve Wise 		}
3018d81ef34SVipul Pandya 		mutex_lock(&rdev->stats.lock);
3028d81ef34SVipul Pandya 		rdev->stats.stag.cur += 32;
3038d81ef34SVipul Pandya 		if (rdev->stats.stag.cur > rdev->stats.stag.max)
3048d81ef34SVipul Pandya 			rdev->stats.stag.max = rdev->stats.stag.cur;
3058d81ef34SVipul Pandya 		mutex_unlock(&rdev->stats.lock);
306cfdda9d7SSteve Wise 		*stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
307cfdda9d7SSteve Wise 	}
308548ddb19SBharat Potnuri 	pr_debug("stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
309548ddb19SBharat Potnuri 		 stag_state, type, pdid, stag_idx);
310cfdda9d7SSteve Wise 
311cfdda9d7SSteve Wise 	/* write TPT entry */
312cfdda9d7SSteve Wise 	if (reset_tpt_entry)
3133840c5b7SGreg KH 		memset(tpt, 0, sizeof(*tpt));
314cfdda9d7SSteve Wise 	else {
3153840c5b7SGreg KH 		tpt->valid_to_pdid = cpu_to_be32(FW_RI_TPTE_VALID_F |
316cf7fe64aSHariprasad Shenai 			FW_RI_TPTE_STAGKEY_V((*stag & FW_RI_TPTE_STAGKEY_M)) |
317cf7fe64aSHariprasad Shenai 			FW_RI_TPTE_STAGSTATE_V(stag_state) |
318cf7fe64aSHariprasad Shenai 			FW_RI_TPTE_STAGTYPE_V(type) | FW_RI_TPTE_PDID_V(pdid));
3193840c5b7SGreg KH 		tpt->locread_to_qpid = cpu_to_be32(FW_RI_TPTE_PERM_V(perm) |
320cf7fe64aSHariprasad Shenai 			(bind_enabled ? FW_RI_TPTE_MWBINDEN_F : 0) |
321cf7fe64aSHariprasad Shenai 			FW_RI_TPTE_ADDRTYPE_V((zbva ? FW_RI_ZERO_BASED_TO :
322cfdda9d7SSteve Wise 						      FW_RI_VA_BASED_TO))|
323cf7fe64aSHariprasad Shenai 			FW_RI_TPTE_PS_V(page_size));
3243840c5b7SGreg KH 		tpt->nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32(
325cf7fe64aSHariprasad Shenai 			FW_RI_TPTE_PBLADDR_V(PBL_OFF(rdev, pbl_addr)>>3));
3263840c5b7SGreg KH 		tpt->len_lo = cpu_to_be32((u32)(len & 0xffffffffUL));
3273840c5b7SGreg KH 		tpt->va_hi = cpu_to_be32((u32)(to >> 32));
3283840c5b7SGreg KH 		tpt->va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL));
3293840c5b7SGreg KH 		tpt->dca_mwbcnt_pstag = cpu_to_be32(0);
3303840c5b7SGreg KH 		tpt->len_hi = cpu_to_be32((u32)(len >> 32));
331cfdda9d7SSteve Wise 	}
332cfdda9d7SSteve Wise 	err = write_adapter_mem(rdev, stag_idx +
333cfdda9d7SSteve Wise 				(rdev->lldi.vr->stag.start >> 5),
3343840c5b7SGreg KH 				sizeof(*tpt), tpt, skb, wr_waitp);
335cfdda9d7SSteve Wise 
3368d81ef34SVipul Pandya 	if (reset_tpt_entry) {
337ec3eead2SVipul Pandya 		c4iw_put_resource(&rdev->resource.tpt_table, stag_idx);
3388d81ef34SVipul Pandya 		mutex_lock(&rdev->stats.lock);
3398d81ef34SVipul Pandya 		rdev->stats.stag.cur -= 32;
3408d81ef34SVipul Pandya 		mutex_unlock(&rdev->stats.lock);
3418d81ef34SVipul Pandya 	}
3423840c5b7SGreg KH 	kfree(tpt);
343cfdda9d7SSteve Wise 	return err;
344cfdda9d7SSteve Wise }
345cfdda9d7SSteve Wise 
write_pbl(struct c4iw_rdev * rdev,__be64 * pbl,u32 pbl_addr,u32 pbl_size,struct c4iw_wr_wait * wr_waitp)346cfdda9d7SSteve Wise static int write_pbl(struct c4iw_rdev *rdev, __be64 *pbl,
347a3f12da0SSteve Wise 		     u32 pbl_addr, u32 pbl_size, struct c4iw_wr_wait *wr_waitp)
348cfdda9d7SSteve Wise {
349cfdda9d7SSteve Wise 	int err;
350cfdda9d7SSteve Wise 
351548ddb19SBharat Potnuri 	pr_debug("*pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n",
352548ddb19SBharat Potnuri 		 pbl_addr, rdev->lldi.vr->pbl.start,
353cfdda9d7SSteve Wise 		 pbl_size);
354cfdda9d7SSteve Wise 
355a3f12da0SSteve Wise 	err = write_adapter_mem(rdev, pbl_addr >> 5, pbl_size << 3, pbl, NULL,
356a3f12da0SSteve Wise 				wr_waitp);
357cfdda9d7SSteve Wise 	return err;
358cfdda9d7SSteve Wise }
359cfdda9d7SSteve Wise 
dereg_mem(struct c4iw_rdev * rdev,u32 stag,u32 pbl_size,u32 pbl_addr,struct sk_buff * skb,struct c4iw_wr_wait * wr_waitp)360cfdda9d7SSteve Wise static int dereg_mem(struct c4iw_rdev *rdev, u32 stag, u32 pbl_size,
361a3f12da0SSteve Wise 		     u32 pbl_addr, struct sk_buff *skb,
362a3f12da0SSteve Wise 		     struct c4iw_wr_wait *wr_waitp)
363cfdda9d7SSteve Wise {
364cfdda9d7SSteve Wise 	return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0,
365a3f12da0SSteve Wise 			       pbl_size, pbl_addr, skb, wr_waitp);
366cfdda9d7SSteve Wise }
367cfdda9d7SSteve Wise 
allocate_stag(struct c4iw_rdev * rdev,u32 * stag,u32 pdid,u32 pbl_size,u32 pbl_addr,struct c4iw_wr_wait * wr_waitp)368cfdda9d7SSteve Wise static int allocate_stag(struct c4iw_rdev *rdev, u32 *stag, u32 pdid,
369a3f12da0SSteve Wise 			 u32 pbl_size, u32 pbl_addr,
370a3f12da0SSteve Wise 			 struct c4iw_wr_wait *wr_waitp)
371cfdda9d7SSteve Wise {
372cfdda9d7SSteve Wise 	*stag = T4_STAG_UNSET;
373cfdda9d7SSteve Wise 	return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_NSMR, 0, 0, 0,
374a3f12da0SSteve Wise 			       0UL, 0, 0, pbl_size, pbl_addr, NULL, wr_waitp);
375cfdda9d7SSteve Wise }
376cfdda9d7SSteve Wise 
finish_mem_reg(struct c4iw_mr * mhp,u32 stag)377cfdda9d7SSteve Wise static int finish_mem_reg(struct c4iw_mr *mhp, u32 stag)
378cfdda9d7SSteve Wise {
379cfdda9d7SSteve Wise 	u32 mmid;
380cfdda9d7SSteve Wise 
381cfdda9d7SSteve Wise 	mhp->attr.state = 1;
382cfdda9d7SSteve Wise 	mhp->attr.stag = stag;
383cfdda9d7SSteve Wise 	mmid = stag >> 8;
384cfdda9d7SSteve Wise 	mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
385750fb165SSteve Wise 	mhp->ibmr.length = mhp->attr.len;
386750fb165SSteve Wise 	mhp->ibmr.page_size = 1U << (mhp->attr.page_size + 12);
387548ddb19SBharat Potnuri 	pr_debug("mmid 0x%x mhp %p\n", mmid, mhp);
3887a268a93SMatthew Wilcox 	return xa_insert_irq(&mhp->rhp->mrs, mmid, mhp, GFP_KERNEL);
389cfdda9d7SSteve Wise }
390cfdda9d7SSteve Wise 
register_mem(struct c4iw_dev * rhp,struct c4iw_pd * php,struct c4iw_mr * mhp,int shift)391cfdda9d7SSteve Wise static int register_mem(struct c4iw_dev *rhp, struct c4iw_pd *php,
392cfdda9d7SSteve Wise 		      struct c4iw_mr *mhp, int shift)
393cfdda9d7SSteve Wise {
394cfdda9d7SSteve Wise 	u32 stag = T4_STAG_UNSET;
395cfdda9d7SSteve Wise 	int ret;
396cfdda9d7SSteve Wise 
397cfdda9d7SSteve Wise 	ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid,
398123bc2a2SPramod Kumar 			      FW_RI_STAG_NSMR, mhp->attr.len ?
399123bc2a2SPramod Kumar 			      mhp->attr.perms : 0,
400cfdda9d7SSteve Wise 			      mhp->attr.mw_bind_enable, mhp->attr.zbva,
401123bc2a2SPramod Kumar 			      mhp->attr.va_fbo, mhp->attr.len ?
402123bc2a2SPramod Kumar 			      mhp->attr.len : -1, shift - 12,
403a3f12da0SSteve Wise 			      mhp->attr.pbl_size, mhp->attr.pbl_addr, NULL,
404a3f12da0SSteve Wise 			      mhp->wr_waitp);
405cfdda9d7SSteve Wise 	if (ret)
406cfdda9d7SSteve Wise 		return ret;
407cfdda9d7SSteve Wise 
408cfdda9d7SSteve Wise 	ret = finish_mem_reg(mhp, stag);
4090f8ab0b6SHariprasad S 	if (ret) {
410cfdda9d7SSteve Wise 		dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
411a3f12da0SSteve Wise 			  mhp->attr.pbl_addr, mhp->dereg_skb, mhp->wr_waitp);
4120f8ab0b6SHariprasad S 		mhp->dereg_skb = NULL;
4130f8ab0b6SHariprasad S 	}
414cfdda9d7SSteve Wise 	return ret;
415cfdda9d7SSteve Wise }
416cfdda9d7SSteve Wise 
alloc_pbl(struct c4iw_mr * mhp,int npages)417cfdda9d7SSteve Wise static int alloc_pbl(struct c4iw_mr *mhp, int npages)
418cfdda9d7SSteve Wise {
419cfdda9d7SSteve Wise 	mhp->attr.pbl_addr = c4iw_pblpool_alloc(&mhp->rhp->rdev,
420cfdda9d7SSteve Wise 						    npages << 3);
421cfdda9d7SSteve Wise 
422cfdda9d7SSteve Wise 	if (!mhp->attr.pbl_addr)
423cfdda9d7SSteve Wise 		return -ENOMEM;
424cfdda9d7SSteve Wise 
425cfdda9d7SSteve Wise 	mhp->attr.pbl_size = npages;
426cfdda9d7SSteve Wise 
427cfdda9d7SSteve Wise 	return 0;
428cfdda9d7SSteve Wise }
429cfdda9d7SSteve Wise 
c4iw_get_dma_mr(struct ib_pd * pd,int acc)430cfdda9d7SSteve Wise struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc)
431cfdda9d7SSteve Wise {
432cfdda9d7SSteve Wise 	struct c4iw_dev *rhp;
433cfdda9d7SSteve Wise 	struct c4iw_pd *php;
434cfdda9d7SSteve Wise 	struct c4iw_mr *mhp;
435cfdda9d7SSteve Wise 	int ret;
436cfdda9d7SSteve Wise 	u32 stag = T4_STAG_UNSET;
437cfdda9d7SSteve Wise 
438548ddb19SBharat Potnuri 	pr_debug("ib_pd %p\n", pd);
439cfdda9d7SSteve Wise 	php = to_c4iw_pd(pd);
440cfdda9d7SSteve Wise 	rhp = php->rhp;
441cfdda9d7SSteve Wise 
442cfdda9d7SSteve Wise 	mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
443cfdda9d7SSteve Wise 	if (!mhp)
444cfdda9d7SSteve Wise 		return ERR_PTR(-ENOMEM);
4452015f26cSSteve Wise 	mhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
446a3f12da0SSteve Wise 	if (!mhp->wr_waitp) {
447a3f12da0SSteve Wise 		ret = -ENOMEM;
448a3f12da0SSteve Wise 		goto err_free_mhp;
449a3f12da0SSteve Wise 	}
450a3f12da0SSteve Wise 	c4iw_init_wr_wait(mhp->wr_waitp);
451cfdda9d7SSteve Wise 
4520f8ab0b6SHariprasad S 	mhp->dereg_skb = alloc_skb(SGE_MAX_WR_LEN, GFP_KERNEL);
4530f8ab0b6SHariprasad S 	if (!mhp->dereg_skb) {
4540f8ab0b6SHariprasad S 		ret = -ENOMEM;
455a3f12da0SSteve Wise 		goto err_free_wr_wait;
4560f8ab0b6SHariprasad S 	}
4570f8ab0b6SHariprasad S 
458cfdda9d7SSteve Wise 	mhp->rhp = rhp;
459cfdda9d7SSteve Wise 	mhp->attr.pdid = php->pdid;
460cfdda9d7SSteve Wise 	mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
461cfdda9d7SSteve Wise 	mhp->attr.mw_bind_enable = (acc&IB_ACCESS_MW_BIND) == IB_ACCESS_MW_BIND;
462cfdda9d7SSteve Wise 	mhp->attr.zbva = 0;
463cfdda9d7SSteve Wise 	mhp->attr.va_fbo = 0;
464cfdda9d7SSteve Wise 	mhp->attr.page_size = 0;
4656198dd8dSHariprasad S 	mhp->attr.len = ~0ULL;
466cfdda9d7SSteve Wise 	mhp->attr.pbl_size = 0;
467cfdda9d7SSteve Wise 
468cfdda9d7SSteve Wise 	ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, php->pdid,
469cfdda9d7SSteve Wise 			      FW_RI_STAG_NSMR, mhp->attr.perms,
4700f8ab0b6SHariprasad S 			      mhp->attr.mw_bind_enable, 0, 0, ~0ULL, 0, 0, 0,
471a3f12da0SSteve Wise 			      NULL, mhp->wr_waitp);
472cfdda9d7SSteve Wise 	if (ret)
473a3f12da0SSteve Wise 		goto err_free_skb;
474cfdda9d7SSteve Wise 
475cfdda9d7SSteve Wise 	ret = finish_mem_reg(mhp, stag);
476cfdda9d7SSteve Wise 	if (ret)
477a3f12da0SSteve Wise 		goto err_dereg_mem;
478cfdda9d7SSteve Wise 	return &mhp->ibmr;
479a3f12da0SSteve Wise err_dereg_mem:
480cfdda9d7SSteve Wise 	dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
481a3f12da0SSteve Wise 		  mhp->attr.pbl_addr, mhp->dereg_skb, mhp->wr_waitp);
482a3f12da0SSteve Wise err_free_skb:
4830f8ab0b6SHariprasad S 	kfree_skb(mhp->dereg_skb);
4843d691910SChristophe Jaillet err_free_wr_wait:
4853d691910SChristophe Jaillet 	c4iw_put_wr_wait(mhp->wr_waitp);
486a3f12da0SSteve Wise err_free_mhp:
487cfdda9d7SSteve Wise 	kfree(mhp);
488cfdda9d7SSteve Wise 	return ERR_PTR(ret);
489cfdda9d7SSteve Wise }
490cfdda9d7SSteve Wise 
c4iw_reg_user_mr(struct ib_pd * pd,u64 start,u64 length,u64 virt,int acc,struct ib_udata * udata)491cfdda9d7SSteve Wise struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
492cfdda9d7SSteve Wise 			       u64 virt, int acc, struct ib_udata *udata)
493cfdda9d7SSteve Wise {
494cfdda9d7SSteve Wise 	__be64 *pages;
49548b586acSShiraz, Saleem 	int shift, n, i;
496a3f12da0SSteve Wise 	int err = -ENOMEM;
49789603f7eSJason Gunthorpe 	struct ib_block_iter biter;
498cfdda9d7SSteve Wise 	struct c4iw_dev *rhp;
499cfdda9d7SSteve Wise 	struct c4iw_pd *php;
500cfdda9d7SSteve Wise 	struct c4iw_mr *mhp;
501cfdda9d7SSteve Wise 
502548ddb19SBharat Potnuri 	pr_debug("ib_pd %p\n", pd);
503cfdda9d7SSteve Wise 
504cfdda9d7SSteve Wise 	if (length == ~0ULL)
505cfdda9d7SSteve Wise 		return ERR_PTR(-EINVAL);
506cfdda9d7SSteve Wise 
507cfdda9d7SSteve Wise 	if ((length + start) < start)
508cfdda9d7SSteve Wise 		return ERR_PTR(-EINVAL);
509cfdda9d7SSteve Wise 
510cfdda9d7SSteve Wise 	php = to_c4iw_pd(pd);
511cfdda9d7SSteve Wise 	rhp = php->rhp;
5122550a88dSHariprasad Shenai 
5132550a88dSHariprasad Shenai 	if (mr_exceeds_hw_limits(rhp, length))
5142550a88dSHariprasad Shenai 		return ERR_PTR(-EINVAL);
5152550a88dSHariprasad Shenai 
516cfdda9d7SSteve Wise 	mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
517cfdda9d7SSteve Wise 	if (!mhp)
518cfdda9d7SSteve Wise 		return ERR_PTR(-ENOMEM);
5192015f26cSSteve Wise 	mhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
520a3f12da0SSteve Wise 	if (!mhp->wr_waitp)
521a3f12da0SSteve Wise 		goto err_free_mhp;
522cfdda9d7SSteve Wise 
5230f8ab0b6SHariprasad S 	mhp->dereg_skb = alloc_skb(SGE_MAX_WR_LEN, GFP_KERNEL);
524a3f12da0SSteve Wise 	if (!mhp->dereg_skb)
525a3f12da0SSteve Wise 		goto err_free_wr_wait;
5260f8ab0b6SHariprasad S 
527cfdda9d7SSteve Wise 	mhp->rhp = rhp;
528cfdda9d7SSteve Wise 
529c320e527SMoni Shoua 	mhp->umem = ib_umem_get(pd->device, start, length, acc);
530a3f12da0SSteve Wise 	if (IS_ERR(mhp->umem))
531a3f12da0SSteve Wise 		goto err_free_skb;
532cfdda9d7SSteve Wise 
53348b586acSShiraz, Saleem 	shift = PAGE_SHIFT;
534cfdda9d7SSteve Wise 
535*a665aca8SJason Gunthorpe 	n = ib_umem_num_dma_blocks(mhp->umem, 1 << shift);
536cfdda9d7SSteve Wise 	err = alloc_pbl(mhp, n);
537cfdda9d7SSteve Wise 	if (err)
538a3f12da0SSteve Wise 		goto err_umem_release;
539cfdda9d7SSteve Wise 
540cfdda9d7SSteve Wise 	pages = (__be64 *) __get_free_page(GFP_KERNEL);
541cfdda9d7SSteve Wise 	if (!pages) {
542cfdda9d7SSteve Wise 		err = -ENOMEM;
543a3f12da0SSteve Wise 		goto err_pbl_free;
544cfdda9d7SSteve Wise 	}
545cfdda9d7SSteve Wise 
546cfdda9d7SSteve Wise 	i = n = 0;
547cfdda9d7SSteve Wise 
54889603f7eSJason Gunthorpe 	rdma_umem_for_each_dma_block(mhp->umem, &biter, 1 << shift) {
54989603f7eSJason Gunthorpe 		pages[i++] = cpu_to_be64(rdma_block_iter_dma_address(&biter));
55048b586acSShiraz, Saleem 		if (i == PAGE_SIZE / sizeof(*pages)) {
55148b586acSShiraz, Saleem 			err = write_pbl(&mhp->rhp->rdev, pages,
552a3f12da0SSteve Wise 					mhp->attr.pbl_addr + (n << 3), i,
553a3f12da0SSteve Wise 					mhp->wr_waitp);
554cfdda9d7SSteve Wise 			if (err)
555cfdda9d7SSteve Wise 				goto pbl_done;
556cfdda9d7SSteve Wise 			n += i;
557cfdda9d7SSteve Wise 			i = 0;
558cfdda9d7SSteve Wise 		}
559cfdda9d7SSteve Wise 	}
560cfdda9d7SSteve Wise 
561cfdda9d7SSteve Wise 	if (i)
562cfdda9d7SSteve Wise 		err = write_pbl(&mhp->rhp->rdev, pages,
563a3f12da0SSteve Wise 				mhp->attr.pbl_addr + (n << 3), i,
564a3f12da0SSteve Wise 				mhp->wr_waitp);
565cfdda9d7SSteve Wise 
566cfdda9d7SSteve Wise pbl_done:
567cfdda9d7SSteve Wise 	free_page((unsigned long) pages);
568cfdda9d7SSteve Wise 	if (err)
569a3f12da0SSteve Wise 		goto err_pbl_free;
570cfdda9d7SSteve Wise 
571cfdda9d7SSteve Wise 	mhp->attr.pdid = php->pdid;
572cfdda9d7SSteve Wise 	mhp->attr.zbva = 0;
573cfdda9d7SSteve Wise 	mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
574cfdda9d7SSteve Wise 	mhp->attr.va_fbo = virt;
575cfdda9d7SSteve Wise 	mhp->attr.page_size = shift - 12;
576301c2c3fSSteve Wise 	mhp->attr.len = length;
577cfdda9d7SSteve Wise 
578cfdda9d7SSteve Wise 	err = register_mem(rhp, php, mhp, shift);
579cfdda9d7SSteve Wise 	if (err)
580a3f12da0SSteve Wise 		goto err_pbl_free;
581cfdda9d7SSteve Wise 
582cfdda9d7SSteve Wise 	return &mhp->ibmr;
583cfdda9d7SSteve Wise 
584a3f12da0SSteve Wise err_pbl_free:
585cfdda9d7SSteve Wise 	c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
586cfdda9d7SSteve Wise 			      mhp->attr.pbl_size << 3);
587a3f12da0SSteve Wise err_umem_release:
588cfdda9d7SSteve Wise 	ib_umem_release(mhp->umem);
589a3f12da0SSteve Wise err_free_skb:
5900f8ab0b6SHariprasad S 	kfree_skb(mhp->dereg_skb);
591a3f12da0SSteve Wise err_free_wr_wait:
5922015f26cSSteve Wise 	c4iw_put_wr_wait(mhp->wr_waitp);
593a3f12da0SSteve Wise err_free_mhp:
594cfdda9d7SSteve Wise 	kfree(mhp);
595cfdda9d7SSteve Wise 	return ERR_PTR(err);
596cfdda9d7SSteve Wise }
597cfdda9d7SSteve Wise 
c4iw_alloc_mr(struct ib_pd * pd,enum ib_mr_type mr_type,u32 max_num_sg)598c4367a26SShamir Rabinovitch struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
59942a3b153SGal Pressman 			    u32 max_num_sg)
600cfdda9d7SSteve Wise {
601cfdda9d7SSteve Wise 	struct c4iw_dev *rhp;
602cfdda9d7SSteve Wise 	struct c4iw_pd *php;
603cfdda9d7SSteve Wise 	struct c4iw_mr *mhp;
604cfdda9d7SSteve Wise 	u32 mmid;
605cfdda9d7SSteve Wise 	u32 stag = 0;
606cfdda9d7SSteve Wise 	int ret = 0;
6078376b86dSSagi Grimberg 	int length = roundup(max_num_sg * sizeof(u64), 32);
608cfdda9d7SSteve Wise 
609cfdda9d7SSteve Wise 	php = to_c4iw_pd(pd);
610cfdda9d7SSteve Wise 	rhp = php->rhp;
611ee30f7d5SHariprasad S 
612ee30f7d5SHariprasad S 	if (mr_type != IB_MR_TYPE_MEM_REG ||
613d4ba61d2SSteve Wise 	    max_num_sg > t4_max_fr_depth(rhp->rdev.lldi.ulptx_memwrite_dsgl &&
614ee30f7d5SHariprasad S 					 use_dsgl))
615ee30f7d5SHariprasad S 		return ERR_PTR(-EINVAL);
616ee30f7d5SHariprasad S 
617cfdda9d7SSteve Wise 	mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
618841dba9aSSteve Wise 	if (!mhp) {
619841dba9aSSteve Wise 		ret = -ENOMEM;
620cfdda9d7SSteve Wise 		goto err;
621841dba9aSSteve Wise 	}
622cfdda9d7SSteve Wise 
6232015f26cSSteve Wise 	mhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
624a3f12da0SSteve Wise 	if (!mhp->wr_waitp) {
625a3f12da0SSteve Wise 		ret = -ENOMEM;
626a3f12da0SSteve Wise 		goto err_free_mhp;
627a3f12da0SSteve Wise 	}
628a3f12da0SSteve Wise 	c4iw_init_wr_wait(mhp->wr_waitp);
629a3f12da0SSteve Wise 
6308376b86dSSagi Grimberg 	mhp->mpl = dma_alloc_coherent(&rhp->rdev.lldi.pdev->dev,
6318376b86dSSagi Grimberg 				      length, &mhp->mpl_addr, GFP_KERNEL);
6328376b86dSSagi Grimberg 	if (!mhp->mpl) {
6338376b86dSSagi Grimberg 		ret = -ENOMEM;
634a3f12da0SSteve Wise 		goto err_free_wr_wait;
6358376b86dSSagi Grimberg 	}
6368376b86dSSagi Grimberg 	mhp->max_mpl_len = length;
6378376b86dSSagi Grimberg 
638cfdda9d7SSteve Wise 	mhp->rhp = rhp;
639a2164034SSagi Grimberg 	ret = alloc_pbl(mhp, max_num_sg);
640cfdda9d7SSteve Wise 	if (ret)
641a3f12da0SSteve Wise 		goto err_free_dma;
642a2164034SSagi Grimberg 	mhp->attr.pbl_size = max_num_sg;
643cfdda9d7SSteve Wise 	ret = allocate_stag(&rhp->rdev, &stag, php->pdid,
644a3f12da0SSteve Wise 			    mhp->attr.pbl_size, mhp->attr.pbl_addr,
645a3f12da0SSteve Wise 			    mhp->wr_waitp);
646cfdda9d7SSteve Wise 	if (ret)
647a3f12da0SSteve Wise 		goto err_free_pbl;
648cfdda9d7SSteve Wise 	mhp->attr.pdid = php->pdid;
649cfdda9d7SSteve Wise 	mhp->attr.type = FW_RI_STAG_NSMR;
650cfdda9d7SSteve Wise 	mhp->attr.stag = stag;
65149b53a93SSteve Wise 	mhp->attr.state = 0;
652cfdda9d7SSteve Wise 	mmid = (stag) >> 8;
653cfdda9d7SSteve Wise 	mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
6547a268a93SMatthew Wilcox 	if (xa_insert_irq(&rhp->mrs, mmid, mhp, GFP_KERNEL)) {
655841dba9aSSteve Wise 		ret = -ENOMEM;
656a3f12da0SSteve Wise 		goto err_dereg;
657841dba9aSSteve Wise 	}
658cfdda9d7SSteve Wise 
659548ddb19SBharat Potnuri 	pr_debug("mmid 0x%x mhp %p stag 0x%x\n", mmid, mhp, stag);
660cfdda9d7SSteve Wise 	return &(mhp->ibmr);
661a3f12da0SSteve Wise err_dereg:
662cfdda9d7SSteve Wise 	dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size,
663a3f12da0SSteve Wise 		  mhp->attr.pbl_addr, mhp->dereg_skb, mhp->wr_waitp);
664a3f12da0SSteve Wise err_free_pbl:
665cfdda9d7SSteve Wise 	c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
666cfdda9d7SSteve Wise 			      mhp->attr.pbl_size << 3);
667a3f12da0SSteve Wise err_free_dma:
6688376b86dSSagi Grimberg 	dma_free_coherent(&mhp->rhp->rdev.lldi.pdev->dev,
6698376b86dSSagi Grimberg 			  mhp->max_mpl_len, mhp->mpl, mhp->mpl_addr);
670a3f12da0SSteve Wise err_free_wr_wait:
6712015f26cSSteve Wise 	c4iw_put_wr_wait(mhp->wr_waitp);
672a3f12da0SSteve Wise err_free_mhp:
673cfdda9d7SSteve Wise 	kfree(mhp);
674cfdda9d7SSteve Wise err:
675cfdda9d7SSteve Wise 	return ERR_PTR(ret);
676cfdda9d7SSteve Wise }
677cfdda9d7SSteve Wise 
c4iw_set_page(struct ib_mr * ibmr,u64 addr)6788376b86dSSagi Grimberg static int c4iw_set_page(struct ib_mr *ibmr, u64 addr)
6798376b86dSSagi Grimberg {
6808376b86dSSagi Grimberg 	struct c4iw_mr *mhp = to_c4iw_mr(ibmr);
6818376b86dSSagi Grimberg 
6827b72717aSSteve Wise 	if (unlikely(mhp->mpl_len == mhp->attr.pbl_size))
6838376b86dSSagi Grimberg 		return -ENOMEM;
6848376b86dSSagi Grimberg 
6858376b86dSSagi Grimberg 	mhp->mpl[mhp->mpl_len++] = addr;
6868376b86dSSagi Grimberg 
6878376b86dSSagi Grimberg 	return 0;
6888376b86dSSagi Grimberg }
6898376b86dSSagi Grimberg 
c4iw_map_mr_sg(struct ib_mr * ibmr,struct scatterlist * sg,int sg_nents,unsigned int * sg_offset)690ff2ba993SChristoph Hellwig int c4iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
6919aa8b321SBart Van Assche 		   unsigned int *sg_offset)
6928376b86dSSagi Grimberg {
6938376b86dSSagi Grimberg 	struct c4iw_mr *mhp = to_c4iw_mr(ibmr);
6948376b86dSSagi Grimberg 
6958376b86dSSagi Grimberg 	mhp->mpl_len = 0;
6968376b86dSSagi Grimberg 
697ff2ba993SChristoph Hellwig 	return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, c4iw_set_page);
6988376b86dSSagi Grimberg }
6998376b86dSSagi Grimberg 
c4iw_dereg_mr(struct ib_mr * ib_mr,struct ib_udata * udata)700c4367a26SShamir Rabinovitch int c4iw_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
701cfdda9d7SSteve Wise {
702cfdda9d7SSteve Wise 	struct c4iw_dev *rhp;
703cfdda9d7SSteve Wise 	struct c4iw_mr *mhp;
704cfdda9d7SSteve Wise 	u32 mmid;
705cfdda9d7SSteve Wise 
706548ddb19SBharat Potnuri 	pr_debug("ib_mr %p\n", ib_mr);
707cfdda9d7SSteve Wise 
708cfdda9d7SSteve Wise 	mhp = to_c4iw_mr(ib_mr);
709cfdda9d7SSteve Wise 	rhp = mhp->rhp;
710cfdda9d7SSteve Wise 	mmid = mhp->attr.stag >> 8;
7117a268a93SMatthew Wilcox 	xa_erase_irq(&rhp->mrs, mmid);
7128376b86dSSagi Grimberg 	if (mhp->mpl)
7138376b86dSSagi Grimberg 		dma_free_coherent(&mhp->rhp->rdev.lldi.pdev->dev,
7148376b86dSSagi Grimberg 				  mhp->max_mpl_len, mhp->mpl, mhp->mpl_addr);
715cfdda9d7SSteve Wise 	dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
716a3f12da0SSteve Wise 		  mhp->attr.pbl_addr, mhp->dereg_skb, mhp->wr_waitp);
717cfdda9d7SSteve Wise 	if (mhp->attr.pbl_size)
718cfdda9d7SSteve Wise 		c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
719cfdda9d7SSteve Wise 				  mhp->attr.pbl_size << 3);
720cfdda9d7SSteve Wise 	if (mhp->kva)
721cfdda9d7SSteve Wise 		kfree((void *) (unsigned long) mhp->kva);
722cfdda9d7SSteve Wise 	ib_umem_release(mhp->umem);
723548ddb19SBharat Potnuri 	pr_debug("mmid 0x%x ptr %p\n", mmid, mhp);
7242015f26cSSteve Wise 	c4iw_put_wr_wait(mhp->wr_waitp);
725cfdda9d7SSteve Wise 	kfree(mhp);
726cfdda9d7SSteve Wise 	return 0;
727cfdda9d7SSteve Wise }
7285c6b2aafSSteve Wise 
c4iw_invalidate_mr(struct c4iw_dev * rhp,u32 rkey)7295c6b2aafSSteve Wise void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey)
7305c6b2aafSSteve Wise {
7315c6b2aafSSteve Wise 	struct c4iw_mr *mhp;
7325c6b2aafSSteve Wise 	unsigned long flags;
7335c6b2aafSSteve Wise 
7347a268a93SMatthew Wilcox 	xa_lock_irqsave(&rhp->mrs, flags);
7357a268a93SMatthew Wilcox 	mhp = xa_load(&rhp->mrs, rkey >> 8);
7365c6b2aafSSteve Wise 	if (mhp)
7375c6b2aafSSteve Wise 		mhp->attr.state = 0;
7387a268a93SMatthew Wilcox 	xa_unlock_irqrestore(&rhp->mrs, flags);
7395c6b2aafSSteve Wise }
740