1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (C) 2017, Microsoft Corporation.
4 * Copyright (C) 2018, LG Electronics.
5 * Copyright (c) 2025 Stefan Metzmacher
6 */
7
8 #include "internal.h"
9
smbdirect_ib_device_rdma_capable_node_type(struct ib_device * ib_dev)10 static u8 smbdirect_ib_device_rdma_capable_node_type(struct ib_device *ib_dev)
11 {
12 if (!smbdirect_frwr_is_supported(&ib_dev->attrs))
13 return RDMA_NODE_UNSPECIFIED;
14
15 switch (ib_dev->node_type) {
16 case RDMA_NODE_IB_CA: /* Infiniband, RoCE v1 and v2 */
17 case RDMA_NODE_RNIC: /* iWarp */
18 return ib_dev->node_type;
19 }
20
21 return RDMA_NODE_UNSPECIFIED;
22 }
23
smbdirect_ib_client_add(struct ib_device * ib_dev)24 static int smbdirect_ib_client_add(struct ib_device *ib_dev)
25 {
26 u8 node_type = smbdirect_ib_device_rdma_capable_node_type(ib_dev);
27 struct smbdirect_device *sdev;
28 const char *node_str;
29 const char *action;
30 u32 pidx;
31
32 switch (node_type) {
33 case RDMA_NODE_IB_CA:
34 node_str = "IB_CA";
35 action = "added";
36 break;
37 case RDMA_NODE_RNIC:
38 node_str = "RNIC";
39 action = "added";
40 break;
41 case RDMA_NODE_UNSPECIFIED:
42 node_str = "UNSPECIFIED";
43 action = "ignored";
44 break;
45 default:
46 node_str = "UNKNOWN";
47 action = "ignored";
48 node_type = RDMA_NODE_UNSPECIFIED;
49 break;
50 }
51
52 pr_info("ib_dev[%.*s]: %s: %s %s=%u %s=0x%llx %s=0x%llx %s=0x%llx\n",
53 IB_DEVICE_NAME_MAX,
54 ib_dev->name,
55 action,
56 node_str,
57 "max_fast_reg_page_list_len",
58 ib_dev->attrs.max_fast_reg_page_list_len,
59 "device_cap_flags",
60 ib_dev->attrs.device_cap_flags,
61 "kernel_cap_flags",
62 ib_dev->attrs.kernel_cap_flags,
63 "page_size_cap",
64 ib_dev->attrs.page_size_cap);
65
66 if (node_type == RDMA_NODE_UNSPECIFIED)
67 return 0;
68
69 pr_info("ib_dev[%.*s]: %s=%u %s=%u %s=%u %s=%u %s=%u %s=%u %s=%u %s=%u %s=%u\n",
70 IB_DEVICE_NAME_MAX,
71 ib_dev->name,
72 "num_ports",
73 rdma_end_port(ib_dev),
74 "max_qp_rd_atom",
75 ib_dev->attrs.max_qp_rd_atom,
76 "max_qp_init_rd_atom",
77 ib_dev->attrs.max_qp_init_rd_atom,
78 "max_sgl_rd",
79 ib_dev->attrs.max_sgl_rd,
80 "max_sge_rd",
81 ib_dev->attrs.max_sge_rd,
82 "max_cqe",
83 ib_dev->attrs.max_cqe,
84 "max_qp_wr",
85 ib_dev->attrs.max_qp_wr,
86 "max_send_sge",
87 ib_dev->attrs.max_send_sge,
88 "max_recv_sge",
89 ib_dev->attrs.max_recv_sge);
90
91 rdma_for_each_port(ib_dev, pidx) {
92 const struct ib_port_immutable *ib_pi =
93 ib_port_immutable_read(ib_dev, pidx);
94 u32 core_cap_flags = ib_pi ? ib_pi->core_cap_flags : 0;
95
96 pr_info("ib_dev[%.*s]PORT[%u]: %s=%u %s=%u %s=%u %s=%u %s=%u %s=0x%x\n",
97 IB_DEVICE_NAME_MAX,
98 ib_dev->name,
99 pidx,
100 "iwarp",
101 rdma_protocol_iwarp(ib_dev, pidx),
102 "ib",
103 rdma_protocol_ib(ib_dev, pidx),
104 "roce",
105 rdma_protocol_roce(ib_dev, pidx),
106 "v1",
107 rdma_protocol_roce_eth_encap(ib_dev, pidx),
108 "v2",
109 rdma_protocol_roce_udp_encap(ib_dev, pidx),
110 "core_cap_flags",
111 core_cap_flags);
112 }
113
114 sdev = kzalloc_obj(*sdev);
115 if (!sdev)
116 return -ENOMEM;
117 sdev->ib_dev = ib_dev;
118 snprintf(sdev->ib_name, ARRAY_SIZE(sdev->ib_name), "%.*s",
119 IB_DEVICE_NAME_MAX, ib_dev->name);
120
121 write_lock(&smbdirect_globals.devices.lock);
122 list_add(&sdev->list, &smbdirect_globals.devices.list);
123 write_unlock(&smbdirect_globals.devices.lock);
124
125 return 0;
126 }
127
smbdirect_ib_client_remove(struct ib_device * ib_dev,void * client_data)128 static void smbdirect_ib_client_remove(struct ib_device *ib_dev, void *client_data)
129 {
130 struct smbdirect_device *sdev, *tmp;
131
132 write_lock(&smbdirect_globals.devices.lock);
133 list_for_each_entry_safe(sdev, tmp, &smbdirect_globals.devices.list, list) {
134 if (sdev->ib_dev == ib_dev) {
135 list_del(&sdev->list);
136 pr_info("ib_dev[%.*s] removed\n",
137 IB_DEVICE_NAME_MAX, sdev->ib_name);
138 kfree(sdev);
139 break;
140 }
141 }
142 write_unlock(&smbdirect_globals.devices.lock);
143 }
144
smbdirect_ib_client_rename(struct ib_device * ib_dev,void * client_data)145 static void smbdirect_ib_client_rename(struct ib_device *ib_dev, void *client_data)
146 {
147 struct smbdirect_device *sdev;
148
149 write_lock(&smbdirect_globals.devices.lock);
150 list_for_each_entry(sdev, &smbdirect_globals.devices.list, list) {
151 if (sdev->ib_dev == ib_dev) {
152 pr_info("ib_dev[%.*s] renamed to [%.*s]\n",
153 IB_DEVICE_NAME_MAX, sdev->ib_name,
154 IB_DEVICE_NAME_MAX, ib_dev->name);
155 snprintf(sdev->ib_name, ARRAY_SIZE(sdev->ib_name), "%.*s",
156 IB_DEVICE_NAME_MAX, ib_dev->name);
157 break;
158 }
159 }
160 write_unlock(&smbdirect_globals.devices.lock);
161 }
162
163 static struct ib_client smbdirect_ib_client = {
164 .name = "smbdirect_ib_client",
165 .add = smbdirect_ib_client_add,
166 .remove = smbdirect_ib_client_remove,
167 .rename = smbdirect_ib_client_rename,
168 };
169
smbdirect_netdev_find_rdma_capable_node_type(struct net_device * netdev)170 static u8 smbdirect_netdev_find_rdma_capable_node_type(struct net_device *netdev)
171 {
172 struct smbdirect_device *sdev;
173 u8 node_type = RDMA_NODE_UNSPECIFIED;
174
175 read_lock(&smbdirect_globals.devices.lock);
176 list_for_each_entry(sdev, &smbdirect_globals.devices.list, list) {
177 u32 pi;
178
179 rdma_for_each_port(sdev->ib_dev, pi) {
180 struct net_device *ndev;
181
182 ndev = ib_device_get_netdev(sdev->ib_dev, pi);
183 if (!ndev)
184 continue;
185
186 if (ndev == netdev) {
187 dev_put(ndev);
188 node_type = sdev->ib_dev->node_type;
189 goto out;
190 }
191 dev_put(ndev);
192 }
193 }
194 out:
195 read_unlock(&smbdirect_globals.devices.lock);
196
197 if (node_type == RDMA_NODE_UNSPECIFIED) {
198 struct ib_device *ibdev;
199
200 ibdev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_UNKNOWN);
201 if (ibdev) {
202 node_type = smbdirect_ib_device_rdma_capable_node_type(ibdev);
203 ib_device_put(ibdev);
204 }
205 }
206
207 return node_type;
208 }
209
210 /*
211 * Returns RDMA_NODE_UNSPECIFIED when the netdev has
212 * no support for smbdirect capable rdma.
213 *
214 * Otherwise RDMA_NODE_RNIC is returned for iwarp devices
215 * and RDMA_NODE_IB_CA or Infiniband and RoCE (v1 and v2)
216 */
smbdirect_netdev_rdma_capable_node_type(struct net_device * netdev)217 u8 smbdirect_netdev_rdma_capable_node_type(struct net_device *netdev)
218 {
219 struct net_device *lower_dev;
220 struct list_head *iter;
221 u8 node_type = RDMA_NODE_UNSPECIFIED;
222
223 node_type = smbdirect_netdev_find_rdma_capable_node_type(netdev);
224 if (node_type != RDMA_NODE_UNSPECIFIED)
225 return node_type;
226
227 /* check if netdev is bridge or VLAN */
228 if (netif_is_bridge_master(netdev) || netdev->priv_flags & IFF_802_1Q_VLAN)
229 netdev_for_each_lower_dev(netdev, lower_dev, iter) {
230 node_type = smbdirect_netdev_find_rdma_capable_node_type(lower_dev);
231 if (node_type != RDMA_NODE_UNSPECIFIED)
232 return node_type;
233 }
234
235 /* check if netdev is IPoIB safely without layer violation */
236 if (netdev->type == ARPHRD_INFINIBAND)
237 return RDMA_NODE_IB_CA;
238
239 return RDMA_NODE_UNSPECIFIED;
240 }
241 EXPORT_SYMBOL_GPL(smbdirect_netdev_rdma_capable_node_type);
242
smbdirect_devices_init(void)243 __init int smbdirect_devices_init(void)
244 {
245 int ret;
246
247 rwlock_init(&smbdirect_globals.devices.lock);
248 INIT_LIST_HEAD(&smbdirect_globals.devices.list);
249
250 ret = ib_register_client(&smbdirect_ib_client);
251 if (ret) {
252 pr_crit("failed to ib_register_client: %d %1pe\n",
253 ret, SMBDIRECT_DEBUG_ERR_PTR(ret));
254 return ret;
255 }
256
257 return 0;
258 }
259
smbdirect_devices_exit(void)260 __exit void smbdirect_devices_exit(void)
261 {
262 struct smbdirect_device *sdev, *tmp;
263
264 /*
265 * On exist we just cleanup so that
266 * smbdirect_ib_client_remove() won't
267 * print removals of devices.
268 */
269 write_lock(&smbdirect_globals.devices.lock);
270 list_for_each_entry_safe(sdev, tmp, &smbdirect_globals.devices.list, list) {
271 list_del(&sdev->list);
272 kfree(sdev);
273 }
274 write_unlock(&smbdirect_globals.devices.lock);
275
276 ib_unregister_client(&smbdirect_ib_client);
277 }
278