xref: /linux/drivers/infiniband/hw/hns/hns_roce_main.c (revision 4f58e6dceb0e44ca8f21568ed81e1df24e55964c)
1 /*
2  * Copyright (c) 2016 Hisilicon Limited.
3  * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 #include <linux/acpi.h>
34 #include <linux/of_platform.h>
35 #include <rdma/ib_addr.h>
36 #include <rdma/ib_smi.h>
37 #include <rdma/ib_user_verbs.h>
38 #include "hns_roce_common.h"
39 #include "hns_roce_device.h"
40 #include "hns_roce_user.h"
41 #include "hns_roce_hem.h"
42 
43 /**
44  * hns_roce_addrconf_ifid_eui48 - Get default gid.
45  * @eui: eui.
46  * @vlan_id:  gid
47  * @dev:  net device
48  * Description:
49  *    MAC convert to GID
50  *        gid[0..7] = fe80 0000 0000 0000
51  *        gid[8] = mac[0] ^ 2
52  *        gid[9] = mac[1]
53  *        gid[10] = mac[2]
54  *        gid[11] = ff        (VLAN ID high byte (4 MS bits))
55  *        gid[12] = fe        (VLAN ID low byte)
56  *        gid[13] = mac[3]
57  *        gid[14] = mac[4]
58  *        gid[15] = mac[5]
59  */
60 static void hns_roce_addrconf_ifid_eui48(u8 *eui, u16 vlan_id,
61 					 struct net_device *dev)
62 {
63 	memcpy(eui, dev->dev_addr, 3);
64 	memcpy(eui + 5, dev->dev_addr + 3, 3);
65 	if (vlan_id < 0x1000) {
66 		eui[3] = vlan_id >> 8;
67 		eui[4] = vlan_id & 0xff;
68 	} else {
69 		eui[3] = 0xff;
70 		eui[4] = 0xfe;
71 	}
72 	eui[0] ^= 2;
73 }
74 
75 static void hns_roce_make_default_gid(struct net_device *dev, union ib_gid *gid)
76 {
77 	memset(gid, 0, sizeof(*gid));
78 	gid->raw[0] = 0xFE;
79 	gid->raw[1] = 0x80;
80 	hns_roce_addrconf_ifid_eui48(&gid->raw[8], 0xffff, dev);
81 }
82 
83 /**
84  * hns_get_gid_index - Get gid index.
85  * @hr_dev: pointer to structure hns_roce_dev.
86  * @port:  port, value range: 0 ~ MAX
87  * @gid_index:  gid_index, value range: 0 ~ MAX
88  * Description:
89  *    N ports shared gids, allocation method as follow:
90  *		GID[0][0], GID[1][0],.....GID[N - 1][0],
91  *		GID[0][0], GID[1][0],.....GID[N - 1][0],
92  *		And so on
93  */
94 int hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index)
95 {
96 	return gid_index * hr_dev->caps.num_ports + port;
97 }
98 
99 static int hns_roce_set_gid(struct hns_roce_dev *hr_dev, u8 port, int gid_index,
100 		     union ib_gid *gid)
101 {
102 	struct device *dev = &hr_dev->pdev->dev;
103 	u8 gid_idx = 0;
104 
105 	if (gid_index >= hr_dev->caps.gid_table_len[port]) {
106 		dev_err(dev, "gid_index %d illegal, port %d gid range: 0~%d\n",
107 			gid_index, port, hr_dev->caps.gid_table_len[port] - 1);
108 		return -EINVAL;
109 	}
110 
111 	gid_idx = hns_get_gid_index(hr_dev, port, gid_index);
112 
113 	if (!memcmp(gid, &hr_dev->iboe.gid_table[gid_idx], sizeof(*gid)))
114 		return -EINVAL;
115 
116 	memcpy(&hr_dev->iboe.gid_table[gid_idx], gid, sizeof(*gid));
117 
118 	hr_dev->hw->set_gid(hr_dev, port, gid_index, gid);
119 
120 	return 0;
121 }
122 
123 static void hns_roce_set_mac(struct hns_roce_dev *hr_dev, u8 port, u8 *addr)
124 {
125 	u8 phy_port;
126 	u32 i = 0;
127 
128 	if (!memcmp(hr_dev->dev_addr[port], addr, MAC_ADDR_OCTET_NUM))
129 		return;
130 
131 	for (i = 0; i < MAC_ADDR_OCTET_NUM; i++)
132 		hr_dev->dev_addr[port][i] = addr[i];
133 
134 	phy_port = hr_dev->iboe.phy_port[port];
135 	hr_dev->hw->set_mac(hr_dev, phy_port, addr);
136 }
137 
138 static void hns_roce_set_mtu(struct hns_roce_dev *hr_dev, u8 port, int mtu)
139 {
140 	u8 phy_port = hr_dev->iboe.phy_port[port];
141 	enum ib_mtu tmp;
142 
143 	tmp = iboe_get_mtu(mtu);
144 	if (!tmp)
145 		tmp = IB_MTU_256;
146 
147 	hr_dev->hw->set_mtu(hr_dev, phy_port, tmp);
148 }
149 
150 static void hns_roce_update_gids(struct hns_roce_dev *hr_dev, int port)
151 {
152 	struct ib_event event;
153 
154 	/* Refresh gid in ib_cache */
155 	event.device = &hr_dev->ib_dev;
156 	event.element.port_num = port + 1;
157 	event.event = IB_EVENT_GID_CHANGE;
158 	ib_dispatch_event(&event);
159 }
160 
161 static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port,
162 			   unsigned long event)
163 {
164 	struct device *dev = &hr_dev->pdev->dev;
165 	struct net_device *netdev;
166 	unsigned long flags;
167 	union ib_gid gid;
168 	int ret = 0;
169 
170 	netdev = hr_dev->iboe.netdevs[port];
171 	if (!netdev) {
172 		dev_err(dev, "port(%d) can't find netdev\n", port);
173 		return -ENODEV;
174 	}
175 
176 	spin_lock_irqsave(&hr_dev->iboe.lock, flags);
177 
178 	switch (event) {
179 	case NETDEV_UP:
180 	case NETDEV_CHANGE:
181 	case NETDEV_REGISTER:
182 	case NETDEV_CHANGEADDR:
183 		hns_roce_set_mac(hr_dev, port, netdev->dev_addr);
184 		hns_roce_make_default_gid(netdev, &gid);
185 		ret = hns_roce_set_gid(hr_dev, port, 0, &gid);
186 		if (!ret)
187 			hns_roce_update_gids(hr_dev, port);
188 		break;
189 	case NETDEV_DOWN:
190 		/*
191 		* In v1 engine, only support all ports closed together.
192 		*/
193 		break;
194 	default:
195 		dev_dbg(dev, "NETDEV event = 0x%x!\n", (u32)(event));
196 		break;
197 	}
198 
199 	spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
200 	return ret;
201 }
202 
203 static int hns_roce_netdev_event(struct notifier_block *self,
204 				 unsigned long event, void *ptr)
205 {
206 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
207 	struct hns_roce_ib_iboe *iboe = NULL;
208 	struct hns_roce_dev *hr_dev = NULL;
209 	u8 port = 0;
210 	int ret = 0;
211 
212 	hr_dev = container_of(self, struct hns_roce_dev, iboe.nb);
213 	iboe = &hr_dev->iboe;
214 
215 	for (port = 0; port < hr_dev->caps.num_ports; port++) {
216 		if (dev == iboe->netdevs[port]) {
217 			ret = handle_en_event(hr_dev, port, event);
218 			if (ret)
219 				return NOTIFY_DONE;
220 			break;
221 		}
222 	}
223 
224 	return NOTIFY_DONE;
225 }
226 
227 static void hns_roce_addr_event(int event, struct net_device *event_netdev,
228 				struct hns_roce_dev *hr_dev, union ib_gid *gid)
229 {
230 	struct hns_roce_ib_iboe *iboe = NULL;
231 	int gid_table_len = 0;
232 	unsigned long flags;
233 	union ib_gid zgid;
234 	u8 gid_idx = 0;
235 	u8 port = 0;
236 	int i = 0;
237 	int free;
238 	struct net_device *real_dev = rdma_vlan_dev_real_dev(event_netdev) ?
239 				      rdma_vlan_dev_real_dev(event_netdev) :
240 				      event_netdev;
241 
242 	if (event != NETDEV_UP && event != NETDEV_DOWN)
243 		return;
244 
245 	iboe = &hr_dev->iboe;
246 	while (port < hr_dev->caps.num_ports) {
247 		if (real_dev == iboe->netdevs[port])
248 			break;
249 		port++;
250 	}
251 
252 	if (port >= hr_dev->caps.num_ports) {
253 		dev_dbg(&hr_dev->pdev->dev, "can't find netdev\n");
254 		return;
255 	}
256 
257 	memset(zgid.raw, 0, sizeof(zgid.raw));
258 	free = -1;
259 	gid_table_len = hr_dev->caps.gid_table_len[port];
260 
261 	spin_lock_irqsave(&hr_dev->iboe.lock, flags);
262 
263 	for (i = 0; i < gid_table_len; i++) {
264 		gid_idx = hns_get_gid_index(hr_dev, port, i);
265 		if (!memcmp(gid->raw, iboe->gid_table[gid_idx].raw,
266 			    sizeof(gid->raw)))
267 			break;
268 		if (free < 0 && !memcmp(zgid.raw,
269 			iboe->gid_table[gid_idx].raw, sizeof(zgid.raw)))
270 			free = i;
271 	}
272 
273 	if (i >= gid_table_len) {
274 		if (free < 0) {
275 			spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
276 			dev_dbg(&hr_dev->pdev->dev,
277 				"gid_index overflow, port(%d)\n", port);
278 			return;
279 		}
280 		if (!hns_roce_set_gid(hr_dev, port, free, gid))
281 			hns_roce_update_gids(hr_dev, port);
282 	} else if (event == NETDEV_DOWN) {
283 		if (!hns_roce_set_gid(hr_dev, port, i, &zgid))
284 			hns_roce_update_gids(hr_dev, port);
285 	}
286 
287 	spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
288 }
289 
290 static int hns_roce_inet_event(struct notifier_block *self, unsigned long event,
291 			       void *ptr)
292 {
293 	struct in_ifaddr *ifa = ptr;
294 	struct hns_roce_dev *hr_dev;
295 	struct net_device *dev = ifa->ifa_dev->dev;
296 	union ib_gid gid;
297 
298 	ipv6_addr_set_v4mapped(ifa->ifa_address, (struct in6_addr *)&gid);
299 
300 	hr_dev = container_of(self, struct hns_roce_dev, iboe.nb_inet);
301 
302 	hns_roce_addr_event(event, dev, hr_dev, &gid);
303 
304 	return NOTIFY_DONE;
305 }
306 
307 static int hns_roce_setup_mtu_gids(struct hns_roce_dev *hr_dev)
308 {
309 	struct in_ifaddr *ifa_list = NULL;
310 	union ib_gid gid = {{0} };
311 	u32 ipaddr = 0;
312 	int index = 0;
313 	int ret = 0;
314 	u8 i = 0;
315 
316 	for (i = 0; i < hr_dev->caps.num_ports; i++) {
317 		hns_roce_set_mtu(hr_dev, i,
318 				 ib_mtu_enum_to_int(hr_dev->caps.max_mtu));
319 		hns_roce_set_mac(hr_dev, i, hr_dev->iboe.netdevs[i]->dev_addr);
320 
321 		if (hr_dev->iboe.netdevs[i]->ip_ptr) {
322 			ifa_list = hr_dev->iboe.netdevs[i]->ip_ptr->ifa_list;
323 			index = 1;
324 			while (ifa_list) {
325 				ipaddr = ifa_list->ifa_address;
326 				ipv6_addr_set_v4mapped(ipaddr,
327 						       (struct in6_addr *)&gid);
328 				ret = hns_roce_set_gid(hr_dev, i, index, &gid);
329 				if (ret)
330 					break;
331 				index++;
332 				ifa_list = ifa_list->ifa_next;
333 			}
334 			hns_roce_update_gids(hr_dev, i);
335 		}
336 	}
337 
338 	return ret;
339 }
340 
341 static int hns_roce_query_device(struct ib_device *ib_dev,
342 				 struct ib_device_attr *props,
343 				 struct ib_udata *uhw)
344 {
345 	struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
346 
347 	memset(props, 0, sizeof(*props));
348 
349 	props->sys_image_guid = hr_dev->sys_image_guid;
350 	props->max_mr_size = (u64)(~(0ULL));
351 	props->page_size_cap = hr_dev->caps.page_size_cap;
352 	props->vendor_id = hr_dev->vendor_id;
353 	props->vendor_part_id = hr_dev->vendor_part_id;
354 	props->hw_ver = hr_dev->hw_rev;
355 	props->max_qp = hr_dev->caps.num_qps;
356 	props->max_qp_wr = hr_dev->caps.max_wqes;
357 	props->device_cap_flags = IB_DEVICE_PORT_ACTIVE_EVENT |
358 				  IB_DEVICE_RC_RNR_NAK_GEN |
359 				  IB_DEVICE_LOCAL_DMA_LKEY;
360 	props->max_sge = hr_dev->caps.max_sq_sg;
361 	props->max_sge_rd = 1;
362 	props->max_cq = hr_dev->caps.num_cqs;
363 	props->max_cqe = hr_dev->caps.max_cqes;
364 	props->max_mr = hr_dev->caps.num_mtpts;
365 	props->max_pd = hr_dev->caps.num_pds;
366 	props->max_qp_rd_atom = hr_dev->caps.max_qp_dest_rdma;
367 	props->max_qp_init_rd_atom = hr_dev->caps.max_qp_init_rdma;
368 	props->atomic_cap = IB_ATOMIC_NONE;
369 	props->max_pkeys = 1;
370 	props->local_ca_ack_delay = hr_dev->caps.local_ca_ack_delay;
371 
372 	return 0;
373 }
374 
375 static int hns_roce_query_port(struct ib_device *ib_dev, u8 port_num,
376 			       struct ib_port_attr *props)
377 {
378 	struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
379 	struct device *dev = &hr_dev->pdev->dev;
380 	struct net_device *net_dev;
381 	unsigned long flags;
382 	enum ib_mtu mtu;
383 	u8 port;
384 
385 	assert(port_num > 0);
386 	port = port_num - 1;
387 
388 	memset(props, 0, sizeof(*props));
389 
390 	props->max_mtu = hr_dev->caps.max_mtu;
391 	props->gid_tbl_len = hr_dev->caps.gid_table_len[port];
392 	props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
393 				IB_PORT_VENDOR_CLASS_SUP |
394 				IB_PORT_BOOT_MGMT_SUP;
395 	props->max_msg_sz = HNS_ROCE_MAX_MSG_LEN;
396 	props->pkey_tbl_len = 1;
397 	props->active_width = IB_WIDTH_4X;
398 	props->active_speed = 1;
399 
400 	spin_lock_irqsave(&hr_dev->iboe.lock, flags);
401 
402 	net_dev = hr_dev->iboe.netdevs[port];
403 	if (!net_dev) {
404 		spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
405 		dev_err(dev, "find netdev %d failed!\r\n", port);
406 		return -EINVAL;
407 	}
408 
409 	mtu = iboe_get_mtu(net_dev->mtu);
410 	props->active_mtu = mtu ? min(props->max_mtu, mtu) : IB_MTU_256;
411 	props->state = (netif_running(net_dev) && netif_carrier_ok(net_dev)) ?
412 			IB_PORT_ACTIVE : IB_PORT_DOWN;
413 	props->phys_state = (props->state == IB_PORT_ACTIVE) ? 5 : 3;
414 
415 	spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
416 
417 	return 0;
418 }
419 
420 static enum rdma_link_layer hns_roce_get_link_layer(struct ib_device *device,
421 						    u8 port_num)
422 {
423 	return IB_LINK_LAYER_ETHERNET;
424 }
425 
426 static int hns_roce_query_gid(struct ib_device *ib_dev, u8 port_num, int index,
427 			      union ib_gid *gid)
428 {
429 	struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
430 	struct device *dev = &hr_dev->pdev->dev;
431 	u8 gid_idx = 0;
432 	u8 port;
433 
434 	if (port_num < 1 || port_num > hr_dev->caps.num_ports ||
435 	    index >= hr_dev->caps.gid_table_len[port_num - 1]) {
436 		dev_err(dev,
437 			"port_num %d index %d illegal! correct range: port_num 1~%d index 0~%d!\n",
438 			port_num, index, hr_dev->caps.num_ports,
439 			hr_dev->caps.gid_table_len[port_num - 1] - 1);
440 		return -EINVAL;
441 	}
442 
443 	port = port_num - 1;
444 	gid_idx = hns_get_gid_index(hr_dev, port, index);
445 	if (gid_idx >= HNS_ROCE_MAX_GID_NUM) {
446 		dev_err(dev, "port_num %d index %d illegal! total gid num %d!\n",
447 			port_num, index, HNS_ROCE_MAX_GID_NUM);
448 		return -EINVAL;
449 	}
450 
451 	memcpy(gid->raw, hr_dev->iboe.gid_table[gid_idx].raw,
452 	       HNS_ROCE_GID_SIZE);
453 
454 	return 0;
455 }
456 
457 static int hns_roce_query_pkey(struct ib_device *ib_dev, u8 port, u16 index,
458 			       u16 *pkey)
459 {
460 	*pkey = PKEY_ID;
461 
462 	return 0;
463 }
464 
465 static int hns_roce_modify_device(struct ib_device *ib_dev, int mask,
466 				  struct ib_device_modify *props)
467 {
468 	unsigned long flags;
469 
470 	if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
471 		return -EOPNOTSUPP;
472 
473 	if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
474 		spin_lock_irqsave(&to_hr_dev(ib_dev)->sm_lock, flags);
475 		memcpy(ib_dev->node_desc, props->node_desc, NODE_DESC_SIZE);
476 		spin_unlock_irqrestore(&to_hr_dev(ib_dev)->sm_lock, flags);
477 	}
478 
479 	return 0;
480 }
481 
482 static int hns_roce_modify_port(struct ib_device *ib_dev, u8 port_num, int mask,
483 				struct ib_port_modify *props)
484 {
485 	return 0;
486 }
487 
488 static struct ib_ucontext *hns_roce_alloc_ucontext(struct ib_device *ib_dev,
489 						   struct ib_udata *udata)
490 {
491 	int ret = 0;
492 	struct hns_roce_ucontext *context;
493 	struct hns_roce_ib_alloc_ucontext_resp resp;
494 	struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
495 
496 	resp.qp_tab_size = hr_dev->caps.num_qps;
497 
498 	context = kmalloc(sizeof(*context), GFP_KERNEL);
499 	if (!context)
500 		return ERR_PTR(-ENOMEM);
501 
502 	ret = hns_roce_uar_alloc(hr_dev, &context->uar);
503 	if (ret)
504 		goto error_fail_uar_alloc;
505 
506 	ret = ib_copy_to_udata(udata, &resp, sizeof(resp));
507 	if (ret)
508 		goto error_fail_copy_to_udata;
509 
510 	return &context->ibucontext;
511 
512 error_fail_copy_to_udata:
513 	hns_roce_uar_free(hr_dev, &context->uar);
514 
515 error_fail_uar_alloc:
516 	kfree(context);
517 
518 	return ERR_PTR(ret);
519 }
520 
521 static int hns_roce_dealloc_ucontext(struct ib_ucontext *ibcontext)
522 {
523 	struct hns_roce_ucontext *context = to_hr_ucontext(ibcontext);
524 
525 	hns_roce_uar_free(to_hr_dev(ibcontext->device), &context->uar);
526 	kfree(context);
527 
528 	return 0;
529 }
530 
531 static int hns_roce_mmap(struct ib_ucontext *context,
532 			 struct vm_area_struct *vma)
533 {
534 	if (((vma->vm_end - vma->vm_start) % PAGE_SIZE) != 0)
535 		return -EINVAL;
536 
537 	if (vma->vm_pgoff == 0) {
538 		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
539 		if (io_remap_pfn_range(vma, vma->vm_start,
540 				       to_hr_ucontext(context)->uar.pfn,
541 				       PAGE_SIZE, vma->vm_page_prot))
542 			return -EAGAIN;
543 
544 	} else {
545 		return -EINVAL;
546 	}
547 
548 	return 0;
549 }
550 
551 static int hns_roce_port_immutable(struct ib_device *ib_dev, u8 port_num,
552 				   struct ib_port_immutable *immutable)
553 {
554 	struct ib_port_attr attr;
555 	int ret;
556 
557 	ret = hns_roce_query_port(ib_dev, port_num, &attr);
558 	if (ret)
559 		return ret;
560 
561 	immutable->pkey_tbl_len = attr.pkey_tbl_len;
562 	immutable->gid_tbl_len = attr.gid_tbl_len;
563 
564 	immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
565 	immutable->max_mad_size = IB_MGMT_MAD_SIZE;
566 
567 	return 0;
568 }
569 
570 static void hns_roce_unregister_device(struct hns_roce_dev *hr_dev)
571 {
572 	struct hns_roce_ib_iboe *iboe = &hr_dev->iboe;
573 
574 	unregister_inetaddr_notifier(&iboe->nb_inet);
575 	unregister_netdevice_notifier(&iboe->nb);
576 	ib_unregister_device(&hr_dev->ib_dev);
577 }
578 
579 static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
580 {
581 	int ret;
582 	struct hns_roce_ib_iboe *iboe = NULL;
583 	struct ib_device *ib_dev = NULL;
584 	struct device *dev = &hr_dev->pdev->dev;
585 
586 	iboe = &hr_dev->iboe;
587 
588 	ib_dev = &hr_dev->ib_dev;
589 	strlcpy(ib_dev->name, "hisi_%d", IB_DEVICE_NAME_MAX);
590 
591 	ib_dev->owner			= THIS_MODULE;
592 	ib_dev->node_type		= RDMA_NODE_IB_CA;
593 	ib_dev->dma_device		= dev;
594 
595 	ib_dev->phys_port_cnt		= hr_dev->caps.num_ports;
596 	ib_dev->local_dma_lkey		= hr_dev->caps.reserved_lkey;
597 	ib_dev->num_comp_vectors	= hr_dev->caps.num_comp_vectors;
598 	ib_dev->uverbs_abi_ver		= 1;
599 	ib_dev->uverbs_cmd_mask		=
600 		(1ULL << IB_USER_VERBS_CMD_GET_CONTEXT) |
601 		(1ULL << IB_USER_VERBS_CMD_QUERY_DEVICE) |
602 		(1ULL << IB_USER_VERBS_CMD_QUERY_PORT) |
603 		(1ULL << IB_USER_VERBS_CMD_ALLOC_PD) |
604 		(1ULL << IB_USER_VERBS_CMD_DEALLOC_PD) |
605 		(1ULL << IB_USER_VERBS_CMD_REG_MR) |
606 		(1ULL << IB_USER_VERBS_CMD_DEREG_MR) |
607 		(1ULL << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
608 		(1ULL << IB_USER_VERBS_CMD_CREATE_CQ) |
609 		(1ULL << IB_USER_VERBS_CMD_DESTROY_CQ) |
610 		(1ULL << IB_USER_VERBS_CMD_CREATE_QP) |
611 		(1ULL << IB_USER_VERBS_CMD_MODIFY_QP) |
612 		(1ULL << IB_USER_VERBS_CMD_QUERY_QP) |
613 		(1ULL << IB_USER_VERBS_CMD_DESTROY_QP);
614 
615 	/* HCA||device||port */
616 	ib_dev->modify_device		= hns_roce_modify_device;
617 	ib_dev->query_device		= hns_roce_query_device;
618 	ib_dev->query_port		= hns_roce_query_port;
619 	ib_dev->modify_port		= hns_roce_modify_port;
620 	ib_dev->get_link_layer		= hns_roce_get_link_layer;
621 	ib_dev->query_gid		= hns_roce_query_gid;
622 	ib_dev->query_pkey		= hns_roce_query_pkey;
623 	ib_dev->alloc_ucontext		= hns_roce_alloc_ucontext;
624 	ib_dev->dealloc_ucontext	= hns_roce_dealloc_ucontext;
625 	ib_dev->mmap			= hns_roce_mmap;
626 
627 	/* PD */
628 	ib_dev->alloc_pd		= hns_roce_alloc_pd;
629 	ib_dev->dealloc_pd		= hns_roce_dealloc_pd;
630 
631 	/* AH */
632 	ib_dev->create_ah		= hns_roce_create_ah;
633 	ib_dev->query_ah		= hns_roce_query_ah;
634 	ib_dev->destroy_ah		= hns_roce_destroy_ah;
635 
636 	/* QP */
637 	ib_dev->create_qp		= hns_roce_create_qp;
638 	ib_dev->modify_qp		= hns_roce_modify_qp;
639 	ib_dev->query_qp		= hr_dev->hw->query_qp;
640 	ib_dev->destroy_qp		= hr_dev->hw->destroy_qp;
641 	ib_dev->post_send		= hr_dev->hw->post_send;
642 	ib_dev->post_recv		= hr_dev->hw->post_recv;
643 
644 	/* CQ */
645 	ib_dev->create_cq		= hns_roce_ib_create_cq;
646 	ib_dev->destroy_cq		= hns_roce_ib_destroy_cq;
647 	ib_dev->req_notify_cq		= hr_dev->hw->req_notify_cq;
648 	ib_dev->poll_cq			= hr_dev->hw->poll_cq;
649 
650 	/* MR */
651 	ib_dev->get_dma_mr		= hns_roce_get_dma_mr;
652 	ib_dev->reg_user_mr		= hns_roce_reg_user_mr;
653 	ib_dev->dereg_mr		= hns_roce_dereg_mr;
654 
655 	/* OTHERS */
656 	ib_dev->get_port_immutable	= hns_roce_port_immutable;
657 
658 	ret = ib_register_device(ib_dev, NULL);
659 	if (ret) {
660 		dev_err(dev, "ib_register_device failed!\n");
661 		return ret;
662 	}
663 
664 	ret = hns_roce_setup_mtu_gids(hr_dev);
665 	if (ret) {
666 		dev_err(dev, "roce_setup_mtu_gids failed!\n");
667 		goto error_failed_setup_mtu_gids;
668 	}
669 
670 	spin_lock_init(&iboe->lock);
671 
672 	iboe->nb.notifier_call = hns_roce_netdev_event;
673 	ret = register_netdevice_notifier(&iboe->nb);
674 	if (ret) {
675 		dev_err(dev, "register_netdevice_notifier failed!\n");
676 		goto error_failed_setup_mtu_gids;
677 	}
678 
679 	iboe->nb_inet.notifier_call = hns_roce_inet_event;
680 	ret = register_inetaddr_notifier(&iboe->nb_inet);
681 	if (ret) {
682 		dev_err(dev, "register inet addr notifier failed!\n");
683 		goto error_failed_register_inetaddr_notifier;
684 	}
685 
686 	return 0;
687 
688 error_failed_register_inetaddr_notifier:
689 	unregister_netdevice_notifier(&iboe->nb);
690 
691 error_failed_setup_mtu_gids:
692 	ib_unregister_device(ib_dev);
693 
694 	return ret;
695 }
696 
697 static const struct of_device_id hns_roce_of_match[] = {
698 	{ .compatible = "hisilicon,hns-roce-v1", .data = &hns_roce_hw_v1, },
699 	{},
700 };
701 MODULE_DEVICE_TABLE(of, hns_roce_of_match);
702 
703 static const struct acpi_device_id hns_roce_acpi_match[] = {
704 	{ "HISI00D1", (kernel_ulong_t)&hns_roce_hw_v1 },
705 	{},
706 };
707 MODULE_DEVICE_TABLE(acpi, hns_roce_acpi_match);
708 
709 static int hns_roce_node_match(struct device *dev, void *fwnode)
710 {
711 	return dev->fwnode == fwnode;
712 }
713 
714 static struct
715 platform_device *hns_roce_find_pdev(struct fwnode_handle *fwnode)
716 {
717 	struct device *dev;
718 
719 	/* get the 'device'corresponding to matching 'fwnode' */
720 	dev = bus_find_device(&platform_bus_type, NULL,
721 			      fwnode, hns_roce_node_match);
722 	/* get the platform device */
723 	return dev ? to_platform_device(dev) : NULL;
724 }
725 
726 static int hns_roce_get_cfg(struct hns_roce_dev *hr_dev)
727 {
728 	int i;
729 	int ret;
730 	u8 phy_port;
731 	int port_cnt = 0;
732 	struct device *dev = &hr_dev->pdev->dev;
733 	struct device_node *net_node;
734 	struct net_device *netdev = NULL;
735 	struct platform_device *pdev = NULL;
736 	struct resource *res;
737 
738 	/* check if we are compatible with the underlying SoC */
739 	if (dev_of_node(dev)) {
740 		const struct of_device_id *of_id;
741 
742 		of_id = of_match_node(hns_roce_of_match, dev->of_node);
743 		if (!of_id) {
744 			dev_err(dev, "device is not compatible!\n");
745 			return -ENXIO;
746 		}
747 		hr_dev->hw = (struct hns_roce_hw *)of_id->data;
748 		if (!hr_dev->hw) {
749 			dev_err(dev, "couldn't get H/W specific DT data!\n");
750 			return -ENXIO;
751 		}
752 	} else if (is_acpi_device_node(dev->fwnode)) {
753 		const struct acpi_device_id *acpi_id;
754 
755 		acpi_id = acpi_match_device(hns_roce_acpi_match, dev);
756 		if (!acpi_id) {
757 			dev_err(dev, "device is not compatible!\n");
758 			return -ENXIO;
759 		}
760 		hr_dev->hw = (struct hns_roce_hw *) acpi_id->driver_data;
761 		if (!hr_dev->hw) {
762 			dev_err(dev, "couldn't get H/W specific ACPI data!\n");
763 			return -ENXIO;
764 		}
765 	} else {
766 		dev_err(dev, "can't read compatibility data from DT or ACPI\n");
767 		return -ENXIO;
768 	}
769 
770 	/* get the mapped register base address */
771 	res = platform_get_resource(hr_dev->pdev, IORESOURCE_MEM, 0);
772 	if (!res) {
773 		dev_err(dev, "memory resource not found!\n");
774 		return -EINVAL;
775 	}
776 	hr_dev->reg_base = devm_ioremap_resource(dev, res);
777 	if (IS_ERR(hr_dev->reg_base))
778 		return PTR_ERR(hr_dev->reg_base);
779 
780 	/* get the RoCE associated ethernet ports or netdevices */
781 	for (i = 0; i < HNS_ROCE_MAX_PORTS; i++) {
782 		if (dev_of_node(dev)) {
783 			net_node = of_parse_phandle(dev->of_node, "eth-handle",
784 						    i);
785 			if (!net_node)
786 				continue;
787 			pdev = of_find_device_by_node(net_node);
788 		} else if (is_acpi_device_node(dev->fwnode)) {
789 			struct acpi_reference_args args;
790 			struct fwnode_handle *fwnode;
791 
792 			ret = acpi_node_get_property_reference(dev->fwnode,
793 							       "eth-handle",
794 							       i, &args);
795 			if (ret)
796 				continue;
797 			fwnode = acpi_fwnode_handle(args.adev);
798 			pdev = hns_roce_find_pdev(fwnode);
799 		} else {
800 			dev_err(dev, "cannot read data from DT or ACPI\n");
801 			return -ENXIO;
802 		}
803 
804 		if (pdev) {
805 			netdev = platform_get_drvdata(pdev);
806 			phy_port = (u8)i;
807 			if (netdev) {
808 				hr_dev->iboe.netdevs[port_cnt] = netdev;
809 				hr_dev->iboe.phy_port[port_cnt] = phy_port;
810 			} else {
811 				dev_err(dev, "no netdev found with pdev %s\n",
812 					pdev->name);
813 				return -ENODEV;
814 			}
815 			port_cnt++;
816 		}
817 	}
818 
819 	if (port_cnt == 0) {
820 		dev_err(dev, "unable to get eth-handle for available ports!\n");
821 		return -EINVAL;
822 	}
823 
824 	hr_dev->caps.num_ports = port_cnt;
825 
826 	/* cmd issue mode: 0 is poll, 1 is event */
827 	hr_dev->cmd_mod = 1;
828 	hr_dev->loop_idc = 0;
829 
830 	/* read the interrupt names from the DT or ACPI */
831 	ret = device_property_read_string_array(dev, "interrupt-names",
832 						hr_dev->irq_names,
833 						HNS_ROCE_MAX_IRQ_NUM);
834 	if (ret < 0) {
835 		dev_err(dev, "couldn't get interrupt names from DT or ACPI!\n");
836 		return ret;
837 	}
838 
839 	/* fetch the interrupt numbers */
840 	for (i = 0; i < HNS_ROCE_MAX_IRQ_NUM; i++) {
841 		hr_dev->irq[i] = platform_get_irq(hr_dev->pdev, i);
842 		if (hr_dev->irq[i] <= 0) {
843 			dev_err(dev, "platform get of irq[=%d] failed!\n", i);
844 			return -EINVAL;
845 		}
846 	}
847 
848 	return 0;
849 }
850 
851 static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
852 {
853 	int ret;
854 	struct device *dev = &hr_dev->pdev->dev;
855 
856 	ret = hns_roce_init_hem_table(hr_dev, &hr_dev->mr_table.mtt_table,
857 				      HEM_TYPE_MTT, hr_dev->caps.mtt_entry_sz,
858 				      hr_dev->caps.num_mtt_segs, 1);
859 	if (ret) {
860 		dev_err(dev, "Failed to init MTT context memory, aborting.\n");
861 		return ret;
862 	}
863 
864 	ret = hns_roce_init_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table,
865 				      HEM_TYPE_MTPT, hr_dev->caps.mtpt_entry_sz,
866 				      hr_dev->caps.num_mtpts, 1);
867 	if (ret) {
868 		dev_err(dev, "Failed to init MTPT context memory, aborting.\n");
869 		goto err_unmap_mtt;
870 	}
871 
872 	ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qp_table.qp_table,
873 				      HEM_TYPE_QPC, hr_dev->caps.qpc_entry_sz,
874 				      hr_dev->caps.num_qps, 1);
875 	if (ret) {
876 		dev_err(dev, "Failed to init QP context memory, aborting.\n");
877 		goto err_unmap_dmpt;
878 	}
879 
880 	ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qp_table.irrl_table,
881 				      HEM_TYPE_IRRL,
882 				      hr_dev->caps.irrl_entry_sz *
883 				      hr_dev->caps.max_qp_init_rdma,
884 				      hr_dev->caps.num_qps, 1);
885 	if (ret) {
886 		dev_err(dev, "Failed to init irrl_table memory, aborting.\n");
887 		goto err_unmap_qp;
888 	}
889 
890 	ret = hns_roce_init_hem_table(hr_dev, &hr_dev->cq_table.table,
891 				      HEM_TYPE_CQC, hr_dev->caps.cqc_entry_sz,
892 				      hr_dev->caps.num_cqs, 1);
893 	if (ret) {
894 		dev_err(dev, "Failed to init CQ context memory, aborting.\n");
895 		goto err_unmap_irrl;
896 	}
897 
898 	return 0;
899 
900 err_unmap_irrl:
901 	hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.irrl_table);
902 
903 err_unmap_qp:
904 	hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.qp_table);
905 
906 err_unmap_dmpt:
907 	hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table);
908 
909 err_unmap_mtt:
910 	hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtt_table);
911 
912 	return ret;
913 }
914 
915 /**
916 * hns_roce_setup_hca - setup host channel adapter
917 * @hr_dev: pointer to hns roce device
918 * Return : int
919 */
920 static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
921 {
922 	int ret;
923 	struct device *dev = &hr_dev->pdev->dev;
924 
925 	spin_lock_init(&hr_dev->sm_lock);
926 	spin_lock_init(&hr_dev->cq_db_lock);
927 	spin_lock_init(&hr_dev->bt_cmd_lock);
928 
929 	ret = hns_roce_init_uar_table(hr_dev);
930 	if (ret) {
931 		dev_err(dev, "Failed to initialize uar table. aborting\n");
932 		return ret;
933 	}
934 
935 	ret = hns_roce_uar_alloc(hr_dev, &hr_dev->priv_uar);
936 	if (ret) {
937 		dev_err(dev, "Failed to allocate priv_uar.\n");
938 		goto err_uar_table_free;
939 	}
940 
941 	ret = hns_roce_init_pd_table(hr_dev);
942 	if (ret) {
943 		dev_err(dev, "Failed to init protected domain table.\n");
944 		goto err_uar_alloc_free;
945 	}
946 
947 	ret = hns_roce_init_mr_table(hr_dev);
948 	if (ret) {
949 		dev_err(dev, "Failed to init memory region table.\n");
950 		goto err_pd_table_free;
951 	}
952 
953 	ret = hns_roce_init_cq_table(hr_dev);
954 	if (ret) {
955 		dev_err(dev, "Failed to init completion queue table.\n");
956 		goto err_mr_table_free;
957 	}
958 
959 	ret = hns_roce_init_qp_table(hr_dev);
960 	if (ret) {
961 		dev_err(dev, "Failed to init queue pair table.\n");
962 		goto err_cq_table_free;
963 	}
964 
965 	return 0;
966 
967 err_cq_table_free:
968 	hns_roce_cleanup_cq_table(hr_dev);
969 
970 err_mr_table_free:
971 	hns_roce_cleanup_mr_table(hr_dev);
972 
973 err_pd_table_free:
974 	hns_roce_cleanup_pd_table(hr_dev);
975 
976 err_uar_alloc_free:
977 	hns_roce_uar_free(hr_dev, &hr_dev->priv_uar);
978 
979 err_uar_table_free:
980 	hns_roce_cleanup_uar_table(hr_dev);
981 	return ret;
982 }
983 
984 /**
985 * hns_roce_probe - RoCE driver entrance
986 * @pdev: pointer to platform device
987 * Return : int
988 *
989 */
990 static int hns_roce_probe(struct platform_device *pdev)
991 {
992 	int ret;
993 	struct hns_roce_dev *hr_dev;
994 	struct device *dev = &pdev->dev;
995 
996 	hr_dev = (struct hns_roce_dev *)ib_alloc_device(sizeof(*hr_dev));
997 	if (!hr_dev)
998 		return -ENOMEM;
999 
1000 	memset((u8 *)hr_dev + sizeof(struct ib_device), 0,
1001 		sizeof(struct hns_roce_dev) - sizeof(struct ib_device));
1002 
1003 	hr_dev->pdev = pdev;
1004 	platform_set_drvdata(pdev, hr_dev);
1005 
1006 	if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64ULL)) &&
1007 	    dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32ULL))) {
1008 		dev_err(dev, "Not usable DMA addressing mode\n");
1009 		ret = -EIO;
1010 		goto error_failed_get_cfg;
1011 	}
1012 
1013 	ret = hns_roce_get_cfg(hr_dev);
1014 	if (ret) {
1015 		dev_err(dev, "Get Configuration failed!\n");
1016 		goto error_failed_get_cfg;
1017 	}
1018 
1019 	ret = hr_dev->hw->reset(hr_dev, true);
1020 	if (ret) {
1021 		dev_err(dev, "Reset RoCE engine failed!\n");
1022 		goto error_failed_get_cfg;
1023 	}
1024 
1025 	hr_dev->hw->hw_profile(hr_dev);
1026 
1027 	ret = hns_roce_cmd_init(hr_dev);
1028 	if (ret) {
1029 		dev_err(dev, "cmd init failed!\n");
1030 		goto error_failed_cmd_init;
1031 	}
1032 
1033 	ret = hns_roce_init_eq_table(hr_dev);
1034 	if (ret) {
1035 		dev_err(dev, "eq init failed!\n");
1036 		goto error_failed_eq_table;
1037 	}
1038 
1039 	if (hr_dev->cmd_mod) {
1040 		ret = hns_roce_cmd_use_events(hr_dev);
1041 		if (ret) {
1042 			dev_err(dev, "Switch to event-driven cmd failed!\n");
1043 			goto error_failed_use_event;
1044 		}
1045 	}
1046 
1047 	ret = hns_roce_init_hem(hr_dev);
1048 	if (ret) {
1049 		dev_err(dev, "init HEM(Hardware Entry Memory) failed!\n");
1050 		goto error_failed_init_hem;
1051 	}
1052 
1053 	ret = hns_roce_setup_hca(hr_dev);
1054 	if (ret) {
1055 		dev_err(dev, "setup hca failed!\n");
1056 		goto error_failed_setup_hca;
1057 	}
1058 
1059 	ret = hr_dev->hw->hw_init(hr_dev);
1060 	if (ret) {
1061 		dev_err(dev, "hw_init failed!\n");
1062 		goto error_failed_engine_init;
1063 	}
1064 
1065 	ret = hns_roce_register_device(hr_dev);
1066 	if (ret)
1067 		goto error_failed_register_device;
1068 
1069 	return 0;
1070 
1071 error_failed_register_device:
1072 	hr_dev->hw->hw_exit(hr_dev);
1073 
1074 error_failed_engine_init:
1075 	hns_roce_cleanup_bitmap(hr_dev);
1076 
1077 error_failed_setup_hca:
1078 	hns_roce_cleanup_hem(hr_dev);
1079 
1080 error_failed_init_hem:
1081 	if (hr_dev->cmd_mod)
1082 		hns_roce_cmd_use_polling(hr_dev);
1083 
1084 error_failed_use_event:
1085 	hns_roce_cleanup_eq_table(hr_dev);
1086 
1087 error_failed_eq_table:
1088 	hns_roce_cmd_cleanup(hr_dev);
1089 
1090 error_failed_cmd_init:
1091 	ret = hr_dev->hw->reset(hr_dev, false);
1092 	if (ret)
1093 		dev_err(&hr_dev->pdev->dev, "roce_engine reset fail\n");
1094 
1095 error_failed_get_cfg:
1096 	ib_dealloc_device(&hr_dev->ib_dev);
1097 
1098 	return ret;
1099 }
1100 
1101 /**
1102 * hns_roce_remove - remove RoCE device
1103 * @pdev: pointer to platform device
1104 */
1105 static int hns_roce_remove(struct platform_device *pdev)
1106 {
1107 	struct hns_roce_dev *hr_dev = platform_get_drvdata(pdev);
1108 
1109 	hns_roce_unregister_device(hr_dev);
1110 	hr_dev->hw->hw_exit(hr_dev);
1111 	hns_roce_cleanup_bitmap(hr_dev);
1112 	hns_roce_cleanup_hem(hr_dev);
1113 
1114 	if (hr_dev->cmd_mod)
1115 		hns_roce_cmd_use_polling(hr_dev);
1116 
1117 	hns_roce_cleanup_eq_table(hr_dev);
1118 	hns_roce_cmd_cleanup(hr_dev);
1119 	hr_dev->hw->reset(hr_dev, false);
1120 
1121 	ib_dealloc_device(&hr_dev->ib_dev);
1122 
1123 	return 0;
1124 }
1125 
1126 static struct platform_driver hns_roce_driver = {
1127 	.probe = hns_roce_probe,
1128 	.remove = hns_roce_remove,
1129 	.driver = {
1130 		.name = DRV_NAME,
1131 		.of_match_table = hns_roce_of_match,
1132 		.acpi_match_table = ACPI_PTR(hns_roce_acpi_match),
1133 	},
1134 };
1135 
1136 module_platform_driver(hns_roce_driver);
1137 
1138 MODULE_LICENSE("Dual BSD/GPL");
1139 MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
1140 MODULE_AUTHOR("Nenglong Zhao <zhaonenglong@hisilicon.com>");
1141 MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>");
1142 MODULE_DESCRIPTION("HNS RoCE Driver");
1143