1 /*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
5 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36 #include <linux/if_vlan.h>
37 #include <linux/errno.h>
38 #include <linux/slab.h>
39 #include <linux/workqueue.h>
40 #include <linux/netdevice.h>
41 #include <net/addrconf.h>
42
43 #include <rdma/ib_cache.h>
44
45 #include "core_priv.h"
46
47 struct ib_pkey_cache {
48 int table_len;
49 u16 table[] __counted_by(table_len);
50 };
51
52 struct ib_update_work {
53 struct work_struct work;
54 struct ib_event event;
55 bool enforce_security;
56 };
57
58 union ib_gid zgid;
59 EXPORT_SYMBOL(zgid);
60
61 enum gid_attr_find_mask {
62 GID_ATTR_FIND_MASK_GID = 1UL << 0,
63 GID_ATTR_FIND_MASK_NETDEV = 1UL << 1,
64 GID_ATTR_FIND_MASK_DEFAULT = 1UL << 2,
65 GID_ATTR_FIND_MASK_GID_TYPE = 1UL << 3,
66 };
67
68 enum gid_table_entry_state {
69 GID_TABLE_ENTRY_INVALID = 1,
70 GID_TABLE_ENTRY_VALID = 2,
71 /*
72 * Indicates that entry is pending to be removed, there may
73 * be active users of this GID entry.
74 * When last user of the GID entry releases reference to it,
75 * GID entry is detached from the table.
76 */
77 GID_TABLE_ENTRY_PENDING_DEL = 3,
78 };
79
80 struct roce_gid_ndev_storage {
81 struct rcu_head rcu_head;
82 struct net_device *ndev;
83 };
84
85 struct ib_gid_table_entry {
86 struct kref kref;
87 struct work_struct del_work;
88 struct ib_gid_attr attr;
89 void *context;
90 /* Store the ndev pointer to release reference later on in
91 * call_rcu context because by that time gid_table_entry
92 * and attr might be already freed. So keep a copy of it.
93 * ndev_storage is freed by rcu callback.
94 */
95 struct roce_gid_ndev_storage *ndev_storage;
96 enum gid_table_entry_state state;
97 };
98
99 struct ib_gid_table {
100 int sz;
101 /* In RoCE, adding a GID to the table requires:
102 * (a) Find if this GID is already exists.
103 * (b) Find a free space.
104 * (c) Write the new GID
105 *
106 * Delete requires different set of operations:
107 * (a) Find the GID
108 * (b) Delete it.
109 *
110 **/
111 /* Any writer to data_vec must hold this lock and the write side of
112 * rwlock. Readers must hold only rwlock. All writers must be in a
113 * sleepable context.
114 */
115 struct mutex lock;
116 /* rwlock protects data_vec[ix]->state and entry pointer.
117 */
118 rwlock_t rwlock;
119 struct ib_gid_table_entry **data_vec;
120 /* bit field, each bit indicates the index of default GID */
121 u32 default_gid_indices;
122 };
123
dispatch_gid_change_event(struct ib_device * ib_dev,u32 port)124 static void dispatch_gid_change_event(struct ib_device *ib_dev, u32 port)
125 {
126 struct ib_event event;
127
128 event.device = ib_dev;
129 event.element.port_num = port;
130 event.event = IB_EVENT_GID_CHANGE;
131
132 ib_dispatch_event_clients(&event);
133 }
134
135 static const char * const gid_type_str[] = {
136 /* IB/RoCE v1 value is set for IB_GID_TYPE_IB and IB_GID_TYPE_ROCE for
137 * user space compatibility reasons.
138 */
139 [IB_GID_TYPE_IB] = "IB/RoCE v1",
140 [IB_GID_TYPE_ROCE] = "IB/RoCE v1",
141 [IB_GID_TYPE_ROCE_UDP_ENCAP] = "RoCE v2",
142 };
143
ib_cache_gid_type_str(enum ib_gid_type gid_type)144 const char *ib_cache_gid_type_str(enum ib_gid_type gid_type)
145 {
146 if (gid_type < ARRAY_SIZE(gid_type_str) && gid_type_str[gid_type])
147 return gid_type_str[gid_type];
148
149 return "Invalid GID type";
150 }
151 EXPORT_SYMBOL(ib_cache_gid_type_str);
152
153 /** rdma_is_zero_gid - Check if given GID is zero or not.
154 * @gid: GID to check
155 * Returns true if given GID is zero, returns false otherwise.
156 */
rdma_is_zero_gid(const union ib_gid * gid)157 bool rdma_is_zero_gid(const union ib_gid *gid)
158 {
159 return !memcmp(gid, &zgid, sizeof(*gid));
160 }
161 EXPORT_SYMBOL(rdma_is_zero_gid);
162
163 /** is_gid_index_default - Check if a given index belongs to
164 * reserved default GIDs or not.
165 * @table: GID table pointer
166 * @index: Index to check in GID table
167 * Returns true if index is one of the reserved default GID index otherwise
168 * returns false.
169 */
is_gid_index_default(const struct ib_gid_table * table,unsigned int index)170 static bool is_gid_index_default(const struct ib_gid_table *table,
171 unsigned int index)
172 {
173 return index < 32 && (BIT(index) & table->default_gid_indices);
174 }
175
ib_cache_gid_parse_type_str(const char * buf)176 int ib_cache_gid_parse_type_str(const char *buf)
177 {
178 unsigned int i;
179 size_t len;
180 int err = -EINVAL;
181
182 len = strlen(buf);
183 if (len == 0)
184 return -EINVAL;
185
186 if (buf[len - 1] == '\n')
187 len--;
188
189 for (i = 0; i < ARRAY_SIZE(gid_type_str); ++i)
190 if (gid_type_str[i] && !strncmp(buf, gid_type_str[i], len) &&
191 len == strlen(gid_type_str[i])) {
192 err = i;
193 break;
194 }
195
196 return err;
197 }
198 EXPORT_SYMBOL(ib_cache_gid_parse_type_str);
199
rdma_gid_table(struct ib_device * device,u32 port)200 static struct ib_gid_table *rdma_gid_table(struct ib_device *device, u32 port)
201 {
202 return device->port_data[port].cache.gid;
203 }
204
is_gid_entry_free(const struct ib_gid_table_entry * entry)205 static bool is_gid_entry_free(const struct ib_gid_table_entry *entry)
206 {
207 return !entry;
208 }
209
is_gid_entry_valid(const struct ib_gid_table_entry * entry)210 static bool is_gid_entry_valid(const struct ib_gid_table_entry *entry)
211 {
212 return entry && entry->state == GID_TABLE_ENTRY_VALID;
213 }
214
schedule_free_gid(struct kref * kref)215 static void schedule_free_gid(struct kref *kref)
216 {
217 struct ib_gid_table_entry *entry =
218 container_of(kref, struct ib_gid_table_entry, kref);
219
220 queue_work(ib_wq, &entry->del_work);
221 }
222
put_gid_ndev(struct rcu_head * head)223 static void put_gid_ndev(struct rcu_head *head)
224 {
225 struct roce_gid_ndev_storage *storage =
226 container_of(head, struct roce_gid_ndev_storage, rcu_head);
227
228 WARN_ON(!storage->ndev);
229 /* At this point its safe to release netdev reference,
230 * as all callers working on gid_attr->ndev are done
231 * using this netdev.
232 */
233 dev_put(storage->ndev);
234 kfree(storage);
235 }
236
free_gid_entry_locked(struct ib_gid_table_entry * entry)237 static void free_gid_entry_locked(struct ib_gid_table_entry *entry)
238 {
239 struct ib_device *device = entry->attr.device;
240 u32 port_num = entry->attr.port_num;
241 struct ib_gid_table *table = rdma_gid_table(device, port_num);
242
243 dev_dbg(&device->dev, "%s port=%u index=%u gid %pI6\n", __func__,
244 port_num, entry->attr.index, entry->attr.gid.raw);
245
246 write_lock_irq(&table->rwlock);
247
248 /*
249 * The only way to avoid overwriting NULL in table is
250 * by comparing if it is same entry in table or not!
251 * If new entry in table is added by the time we free here,
252 * don't overwrite the table entry.
253 */
254 if (entry == table->data_vec[entry->attr.index])
255 table->data_vec[entry->attr.index] = NULL;
256 /* Now this index is ready to be allocated */
257 write_unlock_irq(&table->rwlock);
258
259 if (entry->ndev_storage)
260 call_rcu(&entry->ndev_storage->rcu_head, put_gid_ndev);
261 kfree(entry);
262 }
263
free_gid_entry(struct kref * kref)264 static void free_gid_entry(struct kref *kref)
265 {
266 struct ib_gid_table_entry *entry =
267 container_of(kref, struct ib_gid_table_entry, kref);
268
269 free_gid_entry_locked(entry);
270 }
271
272 /**
273 * free_gid_work - Release reference to the GID entry
274 * @work: Work structure to refer to GID entry which needs to be
275 * deleted.
276 *
277 * free_gid_work() frees the entry from the HCA's hardware table
278 * if provider supports it. It releases reference to netdevice.
279 */
free_gid_work(struct work_struct * work)280 static void free_gid_work(struct work_struct *work)
281 {
282 struct ib_gid_table_entry *entry =
283 container_of(work, struct ib_gid_table_entry, del_work);
284 struct ib_device *device = entry->attr.device;
285 u32 port_num = entry->attr.port_num;
286 struct ib_gid_table *table = rdma_gid_table(device, port_num);
287
288 mutex_lock(&table->lock);
289 free_gid_entry_locked(entry);
290 mutex_unlock(&table->lock);
291 }
292
293 static struct ib_gid_table_entry *
alloc_gid_entry(const struct ib_gid_attr * attr)294 alloc_gid_entry(const struct ib_gid_attr *attr)
295 {
296 struct ib_gid_table_entry *entry;
297 struct net_device *ndev;
298
299 entry = kzalloc_obj(*entry);
300 if (!entry)
301 return NULL;
302
303 ndev = rcu_dereference_protected(attr->ndev, 1);
304 if (ndev) {
305 entry->ndev_storage = kzalloc_obj(*entry->ndev_storage);
306 if (!entry->ndev_storage) {
307 kfree(entry);
308 return NULL;
309 }
310 dev_hold(ndev);
311 entry->ndev_storage->ndev = ndev;
312 }
313 kref_init(&entry->kref);
314 memcpy(&entry->attr, attr, sizeof(*attr));
315 INIT_WORK(&entry->del_work, free_gid_work);
316 entry->state = GID_TABLE_ENTRY_INVALID;
317 return entry;
318 }
319
store_gid_entry(struct ib_gid_table * table,struct ib_gid_table_entry * entry)320 static void store_gid_entry(struct ib_gid_table *table,
321 struct ib_gid_table_entry *entry)
322 {
323 entry->state = GID_TABLE_ENTRY_VALID;
324
325 dev_dbg(&entry->attr.device->dev, "%s port=%u index=%u gid %pI6\n",
326 __func__, entry->attr.port_num, entry->attr.index,
327 entry->attr.gid.raw);
328
329 lockdep_assert_held(&table->lock);
330 write_lock_irq(&table->rwlock);
331 table->data_vec[entry->attr.index] = entry;
332 write_unlock_irq(&table->rwlock);
333 }
334
get_gid_entry(struct ib_gid_table_entry * entry)335 static void get_gid_entry(struct ib_gid_table_entry *entry)
336 {
337 kref_get(&entry->kref);
338 }
339
put_gid_entry(struct ib_gid_table_entry * entry)340 static void put_gid_entry(struct ib_gid_table_entry *entry)
341 {
342 kref_put(&entry->kref, schedule_free_gid);
343 }
344
put_gid_entry_locked(struct ib_gid_table_entry * entry)345 static void put_gid_entry_locked(struct ib_gid_table_entry *entry)
346 {
347 kref_put(&entry->kref, free_gid_entry);
348 }
349
add_roce_gid(struct ib_gid_table_entry * entry)350 static int add_roce_gid(struct ib_gid_table_entry *entry)
351 {
352 const struct ib_gid_attr *attr = &entry->attr;
353 int ret;
354
355 if (!attr->ndev) {
356 dev_err(&attr->device->dev, "%s NULL netdev port=%u index=%u\n",
357 __func__, attr->port_num, attr->index);
358 return -EINVAL;
359 }
360 if (rdma_cap_roce_gid_table(attr->device, attr->port_num)) {
361 ret = attr->device->ops.add_gid(attr, &entry->context);
362 if (ret) {
363 dev_err(&attr->device->dev,
364 "%s GID add failed port=%u index=%u\n",
365 __func__, attr->port_num, attr->index);
366 return ret;
367 }
368 }
369 return 0;
370 }
371
372 /**
373 * del_gid - Delete GID table entry
374 *
375 * @ib_dev: IB device whose GID entry to be deleted
376 * @port: Port number of the IB device
377 * @table: GID table of the IB device for a port
378 * @ix: GID entry index to delete
379 *
380 */
del_gid(struct ib_device * ib_dev,u32 port,struct ib_gid_table * table,int ix)381 static void del_gid(struct ib_device *ib_dev, u32 port,
382 struct ib_gid_table *table, int ix)
383 {
384 struct roce_gid_ndev_storage *ndev_storage;
385 struct ib_gid_table_entry *entry;
386
387 lockdep_assert_held(&table->lock);
388
389 dev_dbg(&ib_dev->dev, "%s port=%u index=%d gid %pI6\n", __func__, port,
390 ix, table->data_vec[ix]->attr.gid.raw);
391
392 write_lock_irq(&table->rwlock);
393 entry = table->data_vec[ix];
394 entry->state = GID_TABLE_ENTRY_PENDING_DEL;
395 /*
396 * For non RoCE protocol, GID entry slot is ready to use.
397 */
398 if (!rdma_protocol_roce(ib_dev, port))
399 table->data_vec[ix] = NULL;
400 write_unlock_irq(&table->rwlock);
401
402 if (rdma_cap_roce_gid_table(ib_dev, port))
403 ib_dev->ops.del_gid(&entry->attr, &entry->context);
404
405 ndev_storage = entry->ndev_storage;
406 if (ndev_storage) {
407 entry->ndev_storage = NULL;
408 rcu_assign_pointer(entry->attr.ndev, NULL);
409 call_rcu(&ndev_storage->rcu_head, put_gid_ndev);
410 }
411
412 put_gid_entry_locked(entry);
413 }
414
415 /**
416 * add_modify_gid - Add or modify GID table entry
417 *
418 * @table: GID table in which GID to be added or modified
419 * @attr: Attributes of the GID
420 *
421 * Returns 0 on success or appropriate error code. It accepts zero
422 * GID addition for non RoCE ports for HCA's who report them as valid
423 * GID. However such zero GIDs are not added to the cache.
424 */
add_modify_gid(struct ib_gid_table * table,const struct ib_gid_attr * attr)425 static int add_modify_gid(struct ib_gid_table *table,
426 const struct ib_gid_attr *attr)
427 {
428 struct ib_gid_table_entry *entry;
429 int ret = 0;
430
431 /*
432 * Invalidate any old entry in the table to make it safe to write to
433 * this index.
434 */
435 if (is_gid_entry_valid(table->data_vec[attr->index]))
436 del_gid(attr->device, attr->port_num, table, attr->index);
437
438 /*
439 * Some HCA's report multiple GID entries with only one valid GID, and
440 * leave other unused entries as the zero GID. Convert zero GIDs to
441 * empty table entries instead of storing them.
442 */
443 if (rdma_is_zero_gid(&attr->gid))
444 return 0;
445
446 entry = alloc_gid_entry(attr);
447 if (!entry)
448 return -ENOMEM;
449
450 if (rdma_protocol_roce(attr->device, attr->port_num)) {
451 ret = add_roce_gid(entry);
452 if (ret)
453 goto done;
454 }
455
456 store_gid_entry(table, entry);
457 return 0;
458
459 done:
460 put_gid_entry(entry);
461 return ret;
462 }
463
464 /* rwlock should be read locked, or lock should be held */
find_gid(struct ib_gid_table * table,const union ib_gid * gid,const struct ib_gid_attr * val,bool default_gid,unsigned long mask,int * pempty)465 static int find_gid(struct ib_gid_table *table, const union ib_gid *gid,
466 const struct ib_gid_attr *val, bool default_gid,
467 unsigned long mask, int *pempty)
468 {
469 int i = 0;
470 int found = -1;
471 int empty = pempty ? -1 : 0;
472
473 while (i < table->sz && (found < 0 || empty < 0)) {
474 struct ib_gid_table_entry *data = table->data_vec[i];
475 struct ib_gid_attr *attr;
476 int curr_index = i;
477
478 i++;
479
480 /* find_gid() is used during GID addition where it is expected
481 * to return a free entry slot which is not duplicate.
482 * Free entry slot is requested and returned if pempty is set,
483 * so lookup free slot only if requested.
484 */
485 if (pempty && empty < 0) {
486 if (is_gid_entry_free(data) &&
487 default_gid ==
488 is_gid_index_default(table, curr_index)) {
489 /*
490 * Found an invalid (free) entry; allocate it.
491 * If default GID is requested, then our
492 * found slot must be one of the DEFAULT
493 * reserved slots or we fail.
494 * This ensures that only DEFAULT reserved
495 * slots are used for default property GIDs.
496 */
497 empty = curr_index;
498 }
499 }
500
501 /*
502 * Additionally find_gid() is used to find valid entry during
503 * lookup operation; so ignore the entries which are marked as
504 * pending for removal and the entries which are marked as
505 * invalid.
506 */
507 if (!is_gid_entry_valid(data))
508 continue;
509
510 if (found >= 0)
511 continue;
512
513 attr = &data->attr;
514 if (mask & GID_ATTR_FIND_MASK_GID_TYPE &&
515 attr->gid_type != val->gid_type)
516 continue;
517
518 if (mask & GID_ATTR_FIND_MASK_GID &&
519 memcmp(gid, &data->attr.gid, sizeof(*gid)))
520 continue;
521
522 if (mask & GID_ATTR_FIND_MASK_NETDEV &&
523 attr->ndev != val->ndev)
524 continue;
525
526 if (mask & GID_ATTR_FIND_MASK_DEFAULT &&
527 is_gid_index_default(table, curr_index) != default_gid)
528 continue;
529
530 found = curr_index;
531 }
532
533 if (pempty)
534 *pempty = empty;
535
536 return found;
537 }
538
make_default_gid(struct net_device * dev,union ib_gid * gid)539 static void make_default_gid(struct net_device *dev, union ib_gid *gid)
540 {
541 gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
542 addrconf_ifid_eui48(&gid->raw[8], dev);
543 }
544
__ib_cache_gid_add(struct ib_device * ib_dev,u32 port,union ib_gid * gid,struct ib_gid_attr * attr,unsigned long mask,bool default_gid)545 static int __ib_cache_gid_add(struct ib_device *ib_dev, u32 port,
546 union ib_gid *gid, struct ib_gid_attr *attr,
547 unsigned long mask, bool default_gid)
548 {
549 struct ib_gid_table *table;
550 int ret = 0;
551 int empty;
552 int ix;
553
554 /* Do not allow adding zero GID in support of
555 * IB spec version 1.3 section 4.1.1 point (6) and
556 * section 12.7.10 and section 12.7.20
557 */
558 if (rdma_is_zero_gid(gid))
559 return -EINVAL;
560
561 table = rdma_gid_table(ib_dev, port);
562
563 mutex_lock(&table->lock);
564
565 ix = find_gid(table, gid, attr, default_gid, mask, &empty);
566 if (ix >= 0)
567 goto out_unlock;
568
569 if (empty < 0) {
570 ret = -ENOSPC;
571 goto out_unlock;
572 }
573 attr->device = ib_dev;
574 attr->index = empty;
575 attr->port_num = port;
576 attr->gid = *gid;
577 ret = add_modify_gid(table, attr);
578 if (!ret)
579 dispatch_gid_change_event(ib_dev, port);
580
581 out_unlock:
582 mutex_unlock(&table->lock);
583 if (ret)
584 pr_warn_ratelimited("%s: unable to add gid %pI6 error=%d\n",
585 __func__, gid->raw, ret);
586 return ret;
587 }
588
ib_cache_gid_add(struct ib_device * ib_dev,u32 port,union ib_gid * gid,struct ib_gid_attr * attr)589 int ib_cache_gid_add(struct ib_device *ib_dev, u32 port,
590 union ib_gid *gid, struct ib_gid_attr *attr)
591 {
592 unsigned long mask = GID_ATTR_FIND_MASK_GID |
593 GID_ATTR_FIND_MASK_GID_TYPE |
594 GID_ATTR_FIND_MASK_NETDEV;
595
596 return __ib_cache_gid_add(ib_dev, port, gid, attr, mask, false);
597 }
598
599 static int
_ib_cache_gid_del(struct ib_device * ib_dev,u32 port,union ib_gid * gid,struct ib_gid_attr * attr,unsigned long mask,bool default_gid)600 _ib_cache_gid_del(struct ib_device *ib_dev, u32 port,
601 union ib_gid *gid, struct ib_gid_attr *attr,
602 unsigned long mask, bool default_gid)
603 {
604 struct ib_gid_table *table;
605 int ret = 0;
606 int ix;
607
608 table = rdma_gid_table(ib_dev, port);
609
610 mutex_lock(&table->lock);
611
612 ix = find_gid(table, gid, attr, default_gid, mask, NULL);
613 if (ix < 0) {
614 ret = -EINVAL;
615 goto out_unlock;
616 }
617
618 del_gid(ib_dev, port, table, ix);
619 dispatch_gid_change_event(ib_dev, port);
620
621 out_unlock:
622 mutex_unlock(&table->lock);
623 if (ret)
624 pr_debug("%s: can't delete gid %pI6 error=%d\n",
625 __func__, gid->raw, ret);
626 return ret;
627 }
628
ib_cache_gid_del(struct ib_device * ib_dev,u32 port,union ib_gid * gid,struct ib_gid_attr * attr)629 int ib_cache_gid_del(struct ib_device *ib_dev, u32 port,
630 union ib_gid *gid, struct ib_gid_attr *attr)
631 {
632 unsigned long mask = GID_ATTR_FIND_MASK_GID |
633 GID_ATTR_FIND_MASK_GID_TYPE |
634 GID_ATTR_FIND_MASK_DEFAULT |
635 GID_ATTR_FIND_MASK_NETDEV;
636
637 return _ib_cache_gid_del(ib_dev, port, gid, attr, mask, false);
638 }
639
ib_cache_gid_del_all_netdev_gids(struct ib_device * ib_dev,u32 port,struct net_device * ndev)640 int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u32 port,
641 struct net_device *ndev)
642 {
643 struct ib_gid_table *table;
644 int ix;
645 bool deleted = false;
646
647 table = rdma_gid_table(ib_dev, port);
648
649 mutex_lock(&table->lock);
650
651 for (ix = 0; ix < table->sz; ix++) {
652 if (is_gid_entry_valid(table->data_vec[ix]) &&
653 table->data_vec[ix]->attr.ndev == ndev) {
654 del_gid(ib_dev, port, table, ix);
655 deleted = true;
656 }
657 }
658
659 mutex_unlock(&table->lock);
660
661 if (deleted)
662 dispatch_gid_change_event(ib_dev, port);
663
664 return 0;
665 }
666
667 /**
668 * rdma_find_gid_by_port - Returns the GID entry attributes when it finds
669 * a valid GID entry for given search parameters. It searches for the specified
670 * GID value in the local software cache.
671 * @ib_dev: The device to query.
672 * @gid: The GID value to search for.
673 * @gid_type: The GID type to search for.
674 * @port: The port number of the device where the GID value should be searched.
675 * @ndev: In RoCE, the net device of the device. NULL means ignore.
676 *
677 * Returns sgid attributes if the GID is found with valid reference or
678 * returns ERR_PTR for the error.
679 * The caller must invoke rdma_put_gid_attr() to release the reference.
680 */
681 const struct ib_gid_attr *
rdma_find_gid_by_port(struct ib_device * ib_dev,const union ib_gid * gid,enum ib_gid_type gid_type,u32 port,struct net_device * ndev)682 rdma_find_gid_by_port(struct ib_device *ib_dev,
683 const union ib_gid *gid,
684 enum ib_gid_type gid_type,
685 u32 port, struct net_device *ndev)
686 {
687 int local_index;
688 struct ib_gid_table *table;
689 unsigned long mask = GID_ATTR_FIND_MASK_GID |
690 GID_ATTR_FIND_MASK_GID_TYPE;
691 struct ib_gid_attr val = {.ndev = ndev, .gid_type = gid_type};
692 const struct ib_gid_attr *attr;
693 unsigned long flags;
694
695 if (!rdma_is_port_valid(ib_dev, port))
696 return ERR_PTR(-ENOENT);
697
698 table = rdma_gid_table(ib_dev, port);
699
700 if (ndev)
701 mask |= GID_ATTR_FIND_MASK_NETDEV;
702
703 read_lock_irqsave(&table->rwlock, flags);
704 local_index = find_gid(table, gid, &val, false, mask, NULL);
705 if (local_index >= 0) {
706 get_gid_entry(table->data_vec[local_index]);
707 attr = &table->data_vec[local_index]->attr;
708 read_unlock_irqrestore(&table->rwlock, flags);
709 return attr;
710 }
711
712 read_unlock_irqrestore(&table->rwlock, flags);
713 return ERR_PTR(-ENOENT);
714 }
715 EXPORT_SYMBOL(rdma_find_gid_by_port);
716
717 /**
718 * rdma_find_gid_by_filter - Returns the GID table attribute where a
719 * specified GID value occurs
720 * @ib_dev: The device to query.
721 * @gid: The GID value to search for.
722 * @port: The port number of the device where the GID value could be
723 * searched.
724 * @filter: The filter function is executed on any matching GID in the table.
725 * If the filter function returns true, the corresponding index is returned,
726 * otherwise, we continue searching the GID table. It's guaranteed that
727 * while filter is executed, ndev field is valid and the structure won't
728 * change. filter is executed in an atomic context. filter must not be NULL.
729 * @context: Private data to pass into the call-back.
730 *
731 * rdma_find_gid_by_filter() searches for the specified GID value
732 * of which the filter function returns true in the port's GID table.
733 *
734 */
rdma_find_gid_by_filter(struct ib_device * ib_dev,const union ib_gid * gid,u32 port,bool (* filter)(const union ib_gid * gid,const struct ib_gid_attr *,void *),void * context)735 const struct ib_gid_attr *rdma_find_gid_by_filter(
736 struct ib_device *ib_dev, const union ib_gid *gid, u32 port,
737 bool (*filter)(const union ib_gid *gid, const struct ib_gid_attr *,
738 void *),
739 void *context)
740 {
741 const struct ib_gid_attr *res = ERR_PTR(-ENOENT);
742 struct ib_gid_table *table;
743 unsigned long flags;
744 unsigned int i;
745
746 if (!rdma_is_port_valid(ib_dev, port))
747 return ERR_PTR(-EINVAL);
748
749 table = rdma_gid_table(ib_dev, port);
750
751 read_lock_irqsave(&table->rwlock, flags);
752 for (i = 0; i < table->sz; i++) {
753 struct ib_gid_table_entry *entry = table->data_vec[i];
754
755 if (!is_gid_entry_valid(entry))
756 continue;
757
758 if (memcmp(gid, &entry->attr.gid, sizeof(*gid)))
759 continue;
760
761 if (filter(gid, &entry->attr, context)) {
762 get_gid_entry(entry);
763 res = &entry->attr;
764 break;
765 }
766 }
767 read_unlock_irqrestore(&table->rwlock, flags);
768 return res;
769 }
770
alloc_gid_table(int sz)771 static struct ib_gid_table *alloc_gid_table(int sz)
772 {
773 struct ib_gid_table *table = kzalloc_obj(*table);
774
775 if (!table)
776 return NULL;
777
778 table->data_vec = kzalloc_objs(*table->data_vec, sz);
779 if (!table->data_vec)
780 goto err_free_table;
781
782 mutex_init(&table->lock);
783
784 table->sz = sz;
785 rwlock_init(&table->rwlock);
786 return table;
787
788 err_free_table:
789 kfree(table);
790 return NULL;
791 }
792
release_gid_table(struct ib_device * device,struct ib_gid_table * table)793 static void release_gid_table(struct ib_device *device,
794 struct ib_gid_table *table)
795 {
796 int i;
797
798 if (!table)
799 return;
800
801 for (i = 0; i < table->sz; i++) {
802 if (is_gid_entry_free(table->data_vec[i]))
803 continue;
804
805 WARN_ONCE(true,
806 "GID entry ref leak for dev %s index %d ref=%u\n",
807 dev_name(&device->dev), i,
808 kref_read(&table->data_vec[i]->kref));
809 }
810
811 mutex_destroy(&table->lock);
812 kfree(table->data_vec);
813 kfree(table);
814 }
815
cleanup_gid_table_port(struct ib_device * ib_dev,u32 port,struct ib_gid_table * table)816 static void cleanup_gid_table_port(struct ib_device *ib_dev, u32 port,
817 struct ib_gid_table *table)
818 {
819 int i;
820
821 if (!table)
822 return;
823
824 mutex_lock(&table->lock);
825 for (i = 0; i < table->sz; ++i) {
826 if (is_gid_entry_valid(table->data_vec[i]))
827 del_gid(ib_dev, port, table, i);
828 }
829 mutex_unlock(&table->lock);
830 }
831
ib_cache_gid_set_default_gid(struct ib_device * ib_dev,u32 port,struct net_device * ndev,unsigned long gid_type_mask,enum ib_cache_gid_default_mode mode)832 void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u32 port,
833 struct net_device *ndev,
834 unsigned long gid_type_mask,
835 enum ib_cache_gid_default_mode mode)
836 {
837 union ib_gid gid = { };
838 struct ib_gid_attr gid_attr;
839 unsigned int gid_type;
840 unsigned long mask;
841
842 mask = GID_ATTR_FIND_MASK_GID_TYPE |
843 GID_ATTR_FIND_MASK_DEFAULT |
844 GID_ATTR_FIND_MASK_NETDEV;
845 memset(&gid_attr, 0, sizeof(gid_attr));
846 gid_attr.ndev = ndev;
847
848 for (gid_type = 0; gid_type < IB_GID_TYPE_SIZE; ++gid_type) {
849 if (1UL << gid_type & ~gid_type_mask)
850 continue;
851
852 gid_attr.gid_type = gid_type;
853
854 if (mode == IB_CACHE_GID_DEFAULT_MODE_SET) {
855 make_default_gid(ndev, &gid);
856 __ib_cache_gid_add(ib_dev, port, &gid,
857 &gid_attr, mask, true);
858 } else if (mode == IB_CACHE_GID_DEFAULT_MODE_DELETE) {
859 _ib_cache_gid_del(ib_dev, port, &gid,
860 &gid_attr, mask, true);
861 }
862 }
863 }
864
gid_table_reserve_default(struct ib_device * ib_dev,u32 port,struct ib_gid_table * table)865 static void gid_table_reserve_default(struct ib_device *ib_dev, u32 port,
866 struct ib_gid_table *table)
867 {
868 unsigned int i;
869 unsigned long roce_gid_type_mask;
870 unsigned int num_default_gids;
871
872 roce_gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
873 num_default_gids = hweight_long(roce_gid_type_mask);
874 /* Reserve starting indices for default GIDs */
875 for (i = 0; i < num_default_gids && i < table->sz; i++)
876 table->default_gid_indices |= BIT(i);
877 }
878
879
gid_table_release_one(struct ib_device * ib_dev)880 static void gid_table_release_one(struct ib_device *ib_dev)
881 {
882 u32 p;
883
884 rdma_for_each_port (ib_dev, p) {
885 release_gid_table(ib_dev, ib_dev->port_data[p].cache.gid);
886 ib_dev->port_data[p].cache.gid = NULL;
887 }
888 }
889
_gid_table_setup_one(struct ib_device * ib_dev)890 static int _gid_table_setup_one(struct ib_device *ib_dev)
891 {
892 struct ib_gid_table *table;
893 u32 rdma_port;
894
895 rdma_for_each_port (ib_dev, rdma_port) {
896 table = alloc_gid_table(
897 ib_dev->port_data[rdma_port].immutable.gid_tbl_len);
898 if (!table)
899 goto rollback_table_setup;
900
901 gid_table_reserve_default(ib_dev, rdma_port, table);
902 ib_dev->port_data[rdma_port].cache.gid = table;
903 }
904 return 0;
905
906 rollback_table_setup:
907 gid_table_release_one(ib_dev);
908 return -ENOMEM;
909 }
910
gid_table_cleanup_one(struct ib_device * ib_dev)911 static void gid_table_cleanup_one(struct ib_device *ib_dev)
912 {
913 u32 p;
914
915 rdma_for_each_port (ib_dev, p)
916 cleanup_gid_table_port(ib_dev, p,
917 ib_dev->port_data[p].cache.gid);
918 }
919
gid_table_setup_one(struct ib_device * ib_dev)920 static int gid_table_setup_one(struct ib_device *ib_dev)
921 {
922 int err;
923
924 err = _gid_table_setup_one(ib_dev);
925
926 if (err)
927 return err;
928
929 rdma_roce_rescan_device(ib_dev);
930
931 return err;
932 }
933
934 /**
935 * rdma_query_gid - Read the GID content from the GID software cache
936 * @device: Device to query the GID
937 * @port_num: Port number of the device
938 * @index: Index of the GID table entry to read
939 * @gid: Pointer to GID where to store the entry's GID
940 *
941 * rdma_query_gid() only reads the GID entry content for requested device,
942 * port and index. It reads for IB, RoCE and iWarp link layers. It doesn't
943 * hold any reference to the GID table entry in the HCA or software cache.
944 *
945 * Returns 0 on success or appropriate error code.
946 *
947 */
rdma_query_gid(struct ib_device * device,u32 port_num,int index,union ib_gid * gid)948 int rdma_query_gid(struct ib_device *device, u32 port_num,
949 int index, union ib_gid *gid)
950 {
951 struct ib_gid_table *table;
952 unsigned long flags;
953 int res;
954
955 if (!rdma_is_port_valid(device, port_num))
956 return -EINVAL;
957
958 table = rdma_gid_table(device, port_num);
959 read_lock_irqsave(&table->rwlock, flags);
960
961 if (index < 0 || index >= table->sz) {
962 res = -EINVAL;
963 goto done;
964 }
965
966 if (!is_gid_entry_valid(table->data_vec[index])) {
967 res = -ENOENT;
968 goto done;
969 }
970
971 memcpy(gid, &table->data_vec[index]->attr.gid, sizeof(*gid));
972 res = 0;
973
974 done:
975 read_unlock_irqrestore(&table->rwlock, flags);
976 return res;
977 }
978 EXPORT_SYMBOL(rdma_query_gid);
979
980 /**
981 * rdma_read_gid_hw_context - Read the HW GID context from GID attribute
982 * @attr: Potinter to the GID attribute
983 *
984 * rdma_read_gid_hw_context() reads the drivers GID HW context corresponding
985 * to the SGID attr. Callers are required to already be holding the reference
986 * to an existing GID entry.
987 *
988 * Returns the HW GID context
989 *
990 */
rdma_read_gid_hw_context(const struct ib_gid_attr * attr)991 void *rdma_read_gid_hw_context(const struct ib_gid_attr *attr)
992 {
993 return container_of(attr, struct ib_gid_table_entry, attr)->context;
994 }
995 EXPORT_SYMBOL(rdma_read_gid_hw_context);
996
997 /**
998 * rdma_find_gid - Returns SGID attributes if the matching GID is found.
999 * @device: The device to query.
1000 * @gid: The GID value to search for.
1001 * @gid_type: The GID type to search for.
1002 * @ndev: In RoCE, the net device of the device. NULL means ignore.
1003 *
1004 * rdma_find_gid() searches for the specified GID value in the software cache.
1005 *
1006 * Returns GID attributes if a valid GID is found or returns ERR_PTR for the
1007 * error. The caller must invoke rdma_put_gid_attr() to release the reference.
1008 *
1009 */
rdma_find_gid(struct ib_device * device,const union ib_gid * gid,enum ib_gid_type gid_type,struct net_device * ndev)1010 const struct ib_gid_attr *rdma_find_gid(struct ib_device *device,
1011 const union ib_gid *gid,
1012 enum ib_gid_type gid_type,
1013 struct net_device *ndev)
1014 {
1015 unsigned long mask = GID_ATTR_FIND_MASK_GID |
1016 GID_ATTR_FIND_MASK_GID_TYPE;
1017 struct ib_gid_attr gid_attr_val = {.ndev = ndev, .gid_type = gid_type};
1018 u32 p;
1019
1020 if (ndev)
1021 mask |= GID_ATTR_FIND_MASK_NETDEV;
1022
1023 rdma_for_each_port(device, p) {
1024 struct ib_gid_table *table;
1025 unsigned long flags;
1026 int index;
1027
1028 table = device->port_data[p].cache.gid;
1029 read_lock_irqsave(&table->rwlock, flags);
1030 index = find_gid(table, gid, &gid_attr_val, false, mask, NULL);
1031 if (index >= 0) {
1032 const struct ib_gid_attr *attr;
1033
1034 get_gid_entry(table->data_vec[index]);
1035 attr = &table->data_vec[index]->attr;
1036 read_unlock_irqrestore(&table->rwlock, flags);
1037 return attr;
1038 }
1039 read_unlock_irqrestore(&table->rwlock, flags);
1040 }
1041
1042 return ERR_PTR(-ENOENT);
1043 }
1044 EXPORT_SYMBOL(rdma_find_gid);
1045
ib_get_cached_pkey(struct ib_device * device,u32 port_num,int index,u16 * pkey)1046 int ib_get_cached_pkey(struct ib_device *device,
1047 u32 port_num,
1048 int index,
1049 u16 *pkey)
1050 {
1051 struct ib_pkey_cache *cache;
1052 unsigned long flags;
1053 int ret = 0;
1054
1055 if (!rdma_is_port_valid(device, port_num))
1056 return -EINVAL;
1057
1058 read_lock_irqsave(&device->cache_lock, flags);
1059
1060 cache = device->port_data[port_num].cache.pkey;
1061
1062 if (!cache || index < 0 || index >= cache->table_len)
1063 ret = -EINVAL;
1064 else
1065 *pkey = cache->table[index];
1066
1067 read_unlock_irqrestore(&device->cache_lock, flags);
1068
1069 return ret;
1070 }
1071 EXPORT_SYMBOL(ib_get_cached_pkey);
1072
ib_get_cached_subnet_prefix(struct ib_device * device,u32 port_num,u64 * sn_pfx)1073 void ib_get_cached_subnet_prefix(struct ib_device *device, u32 port_num,
1074 u64 *sn_pfx)
1075 {
1076 unsigned long flags;
1077
1078 read_lock_irqsave(&device->cache_lock, flags);
1079 *sn_pfx = device->port_data[port_num].cache.subnet_prefix;
1080 read_unlock_irqrestore(&device->cache_lock, flags);
1081 }
1082 EXPORT_SYMBOL(ib_get_cached_subnet_prefix);
1083
ib_find_cached_pkey(struct ib_device * device,u32 port_num,u16 pkey,u16 * index)1084 int ib_find_cached_pkey(struct ib_device *device, u32 port_num,
1085 u16 pkey, u16 *index)
1086 {
1087 struct ib_pkey_cache *cache;
1088 unsigned long flags;
1089 int i;
1090 int ret = -ENOENT;
1091 int partial_ix = -1;
1092
1093 if (!rdma_is_port_valid(device, port_num))
1094 return -EINVAL;
1095
1096 read_lock_irqsave(&device->cache_lock, flags);
1097
1098 cache = device->port_data[port_num].cache.pkey;
1099 if (!cache) {
1100 ret = -EINVAL;
1101 goto err;
1102 }
1103
1104 *index = -1;
1105
1106 for (i = 0; i < cache->table_len; ++i)
1107 if ((cache->table[i] & 0x7fff) == (pkey & 0x7fff)) {
1108 if (cache->table[i] & 0x8000) {
1109 *index = i;
1110 ret = 0;
1111 break;
1112 } else {
1113 partial_ix = i;
1114 }
1115 }
1116
1117 if (ret && partial_ix >= 0) {
1118 *index = partial_ix;
1119 ret = 0;
1120 }
1121
1122 err:
1123 read_unlock_irqrestore(&device->cache_lock, flags);
1124
1125 return ret;
1126 }
1127 EXPORT_SYMBOL(ib_find_cached_pkey);
1128
ib_get_cached_lmc(struct ib_device * device,u32 port_num,u8 * lmc)1129 int ib_get_cached_lmc(struct ib_device *device, u32 port_num, u8 *lmc)
1130 {
1131 unsigned long flags;
1132 int ret = 0;
1133
1134 if (!rdma_is_port_valid(device, port_num))
1135 return -EINVAL;
1136
1137 read_lock_irqsave(&device->cache_lock, flags);
1138 *lmc = device->port_data[port_num].cache.lmc;
1139 read_unlock_irqrestore(&device->cache_lock, flags);
1140
1141 return ret;
1142 }
1143 EXPORT_SYMBOL(ib_get_cached_lmc);
1144
ib_get_cached_port_state(struct ib_device * device,u32 port_num,enum ib_port_state * port_state)1145 int ib_get_cached_port_state(struct ib_device *device, u32 port_num,
1146 enum ib_port_state *port_state)
1147 {
1148 unsigned long flags;
1149 int ret = 0;
1150
1151 if (!rdma_is_port_valid(device, port_num))
1152 return -EINVAL;
1153
1154 read_lock_irqsave(&device->cache_lock, flags);
1155 *port_state = device->port_data[port_num].cache.port_state;
1156 read_unlock_irqrestore(&device->cache_lock, flags);
1157
1158 return ret;
1159 }
1160 EXPORT_SYMBOL(ib_get_cached_port_state);
1161
1162 /**
1163 * rdma_get_gid_attr - Returns GID attributes for a port of a device
1164 * at a requested gid_index, if a valid GID entry exists.
1165 * @device: The device to query.
1166 * @port_num: The port number on the device where the GID value
1167 * is to be queried.
1168 * @index: Index of the GID table entry whose attributes are to
1169 * be queried.
1170 *
1171 * rdma_get_gid_attr() acquires reference count of gid attributes from the
1172 * cached GID table. Caller must invoke rdma_put_gid_attr() to release
1173 * reference to gid attribute regardless of link layer.
1174 *
1175 * Returns pointer to valid gid attribute or ERR_PTR for the appropriate error
1176 * code.
1177 */
1178 const struct ib_gid_attr *
rdma_get_gid_attr(struct ib_device * device,u32 port_num,int index)1179 rdma_get_gid_attr(struct ib_device *device, u32 port_num, int index)
1180 {
1181 const struct ib_gid_attr *attr = ERR_PTR(-ENODATA);
1182 struct ib_gid_table *table;
1183 unsigned long flags;
1184
1185 if (!rdma_is_port_valid(device, port_num))
1186 return ERR_PTR(-EINVAL);
1187
1188 table = rdma_gid_table(device, port_num);
1189 if (index < 0 || index >= table->sz)
1190 return ERR_PTR(-EINVAL);
1191
1192 read_lock_irqsave(&table->rwlock, flags);
1193 if (!is_gid_entry_valid(table->data_vec[index]))
1194 goto done;
1195
1196 get_gid_entry(table->data_vec[index]);
1197 attr = &table->data_vec[index]->attr;
1198 done:
1199 read_unlock_irqrestore(&table->rwlock, flags);
1200 return attr;
1201 }
1202 EXPORT_SYMBOL(rdma_get_gid_attr);
1203
1204 /**
1205 * rdma_query_gid_table - Reads GID table entries of all the ports of a device up to max_entries.
1206 * @device: The device to query.
1207 * @entries: Entries where GID entries are returned.
1208 * @max_entries: Maximum number of entries that can be returned.
1209 * Entries array must be allocated to hold max_entries number of entries.
1210 *
1211 * Returns number of entries on success or appropriate error code.
1212 */
rdma_query_gid_table(struct ib_device * device,struct ib_uverbs_gid_entry * entries,size_t max_entries)1213 ssize_t rdma_query_gid_table(struct ib_device *device,
1214 struct ib_uverbs_gid_entry *entries,
1215 size_t max_entries)
1216 {
1217 const struct ib_gid_attr *gid_attr;
1218 ssize_t num_entries = 0, ret;
1219 struct ib_gid_table *table;
1220 u32 port_num, i;
1221 struct net_device *ndev;
1222 unsigned long flags;
1223
1224 rdma_for_each_port(device, port_num) {
1225 table = rdma_gid_table(device, port_num);
1226 read_lock_irqsave(&table->rwlock, flags);
1227 for (i = 0; i < table->sz; i++) {
1228 if (!is_gid_entry_valid(table->data_vec[i]))
1229 continue;
1230 if (num_entries >= max_entries) {
1231 ret = -EINVAL;
1232 goto err;
1233 }
1234
1235 gid_attr = &table->data_vec[i]->attr;
1236
1237 memcpy(&entries->gid, &gid_attr->gid,
1238 sizeof(gid_attr->gid));
1239 entries->gid_index = gid_attr->index;
1240 entries->port_num = gid_attr->port_num;
1241 entries->gid_type = gid_attr->gid_type;
1242 ndev = rcu_dereference_protected(
1243 gid_attr->ndev,
1244 lockdep_is_held(&table->rwlock));
1245 if (ndev)
1246 entries->netdev_ifindex = ndev->ifindex;
1247
1248 num_entries++;
1249 entries++;
1250 }
1251 read_unlock_irqrestore(&table->rwlock, flags);
1252 }
1253
1254 return num_entries;
1255 err:
1256 read_unlock_irqrestore(&table->rwlock, flags);
1257 return ret;
1258 }
1259 EXPORT_SYMBOL(rdma_query_gid_table);
1260
1261 /**
1262 * rdma_put_gid_attr - Release reference to the GID attribute
1263 * @attr: Pointer to the GID attribute whose reference
1264 * needs to be released.
1265 *
1266 * rdma_put_gid_attr() must be used to release reference whose
1267 * reference is acquired using rdma_get_gid_attr() or any APIs
1268 * which returns a pointer to the ib_gid_attr regardless of link layer
1269 * of IB or RoCE.
1270 *
1271 */
rdma_put_gid_attr(const struct ib_gid_attr * attr)1272 void rdma_put_gid_attr(const struct ib_gid_attr *attr)
1273 {
1274 struct ib_gid_table_entry *entry =
1275 container_of(attr, struct ib_gid_table_entry, attr);
1276
1277 put_gid_entry(entry);
1278 }
1279 EXPORT_SYMBOL(rdma_put_gid_attr);
1280
1281 /**
1282 * rdma_hold_gid_attr - Get reference to existing GID attribute
1283 *
1284 * @attr: Pointer to the GID attribute whose reference
1285 * needs to be taken.
1286 *
1287 * Increase the reference count to a GID attribute to keep it from being
1288 * freed. Callers are required to already be holding a reference to attribute.
1289 *
1290 */
rdma_hold_gid_attr(const struct ib_gid_attr * attr)1291 void rdma_hold_gid_attr(const struct ib_gid_attr *attr)
1292 {
1293 struct ib_gid_table_entry *entry =
1294 container_of(attr, struct ib_gid_table_entry, attr);
1295
1296 get_gid_entry(entry);
1297 }
1298 EXPORT_SYMBOL(rdma_hold_gid_attr);
1299
1300 /**
1301 * rdma_read_gid_attr_ndev_rcu - Read GID attribute netdevice
1302 * which must be in UP state.
1303 *
1304 * @attr:Pointer to the GID attribute
1305 *
1306 * Returns pointer to netdevice if the netdevice was attached to GID and
1307 * netdevice is in UP state. Caller must hold RCU lock as this API
1308 * reads the netdev flags which can change while netdevice migrates to
1309 * different net namespace. Returns ERR_PTR with error code otherwise.
1310 *
1311 */
rdma_read_gid_attr_ndev_rcu(const struct ib_gid_attr * attr)1312 struct net_device *rdma_read_gid_attr_ndev_rcu(const struct ib_gid_attr *attr)
1313 {
1314 struct ib_gid_table_entry *entry =
1315 container_of(attr, struct ib_gid_table_entry, attr);
1316 struct ib_device *device = entry->attr.device;
1317 struct net_device *ndev = ERR_PTR(-EINVAL);
1318 u32 port_num = entry->attr.port_num;
1319 struct ib_gid_table *table;
1320 unsigned long flags;
1321 bool valid;
1322
1323 table = rdma_gid_table(device, port_num);
1324
1325 read_lock_irqsave(&table->rwlock, flags);
1326 valid = is_gid_entry_valid(table->data_vec[attr->index]);
1327 if (valid) {
1328 ndev = rcu_dereference(attr->ndev);
1329 if (!ndev)
1330 ndev = ERR_PTR(-ENODEV);
1331 }
1332 read_unlock_irqrestore(&table->rwlock, flags);
1333 return ndev;
1334 }
1335 EXPORT_SYMBOL(rdma_read_gid_attr_ndev_rcu);
1336
get_lower_dev_vlan(struct net_device * lower_dev,struct netdev_nested_priv * priv)1337 static int get_lower_dev_vlan(struct net_device *lower_dev,
1338 struct netdev_nested_priv *priv)
1339 {
1340 u16 *vlan_id = (u16 *)priv->data;
1341
1342 if (is_vlan_dev(lower_dev))
1343 *vlan_id = vlan_dev_vlan_id(lower_dev);
1344
1345 /* We are interested only in first level vlan device, so
1346 * always return 1 to stop iterating over next level devices.
1347 */
1348 return 1;
1349 }
1350
1351 /**
1352 * rdma_read_gid_l2_fields - Read the vlan ID and source MAC address
1353 * of a GID entry.
1354 *
1355 * @attr: GID attribute pointer whose L2 fields to be read
1356 * @vlan_id: Pointer to vlan id to fill up if the GID entry has
1357 * vlan id. It is optional.
1358 * @smac: Pointer to smac to fill up for a GID entry. It is optional.
1359 *
1360 * rdma_read_gid_l2_fields() returns 0 on success and returns vlan id
1361 * (if gid entry has vlan) and source MAC, or returns error.
1362 */
rdma_read_gid_l2_fields(const struct ib_gid_attr * attr,u16 * vlan_id,u8 * smac)1363 int rdma_read_gid_l2_fields(const struct ib_gid_attr *attr,
1364 u16 *vlan_id, u8 *smac)
1365 {
1366 struct netdev_nested_priv priv = {
1367 .data = (void *)vlan_id,
1368 };
1369 struct net_device *ndev;
1370
1371 rcu_read_lock();
1372 ndev = rcu_dereference(attr->ndev);
1373 if (!ndev) {
1374 rcu_read_unlock();
1375 return -ENODEV;
1376 }
1377 if (smac)
1378 ether_addr_copy(smac, ndev->dev_addr);
1379 if (vlan_id) {
1380 *vlan_id = 0xffff;
1381 if (is_vlan_dev(ndev)) {
1382 *vlan_id = vlan_dev_vlan_id(ndev);
1383 } else {
1384 /* If the netdev is upper device and if it's lower
1385 * device is vlan device, consider vlan id of
1386 * the lower vlan device for this gid entry.
1387 */
1388 netdev_walk_all_lower_dev_rcu(attr->ndev,
1389 get_lower_dev_vlan, &priv);
1390 }
1391 }
1392 rcu_read_unlock();
1393 return 0;
1394 }
1395 EXPORT_SYMBOL(rdma_read_gid_l2_fields);
1396
config_non_roce_gid_cache(struct ib_device * device,u32 port,struct ib_port_attr * tprops)1397 static int config_non_roce_gid_cache(struct ib_device *device,
1398 u32 port, struct ib_port_attr *tprops)
1399 {
1400 struct ib_gid_attr gid_attr = {};
1401 struct ib_gid_table *table;
1402 int ret = 0;
1403 int i;
1404
1405 gid_attr.device = device;
1406 gid_attr.port_num = port;
1407 table = rdma_gid_table(device, port);
1408
1409 mutex_lock(&table->lock);
1410 for (i = 0; i < tprops->gid_tbl_len; ++i) {
1411 if (!device->ops.query_gid)
1412 continue;
1413 ret = device->ops.query_gid(device, port, i, &gid_attr.gid);
1414 if (ret) {
1415 dev_warn(&device->dev,
1416 "query_gid failed (%d) for index %d\n", ret,
1417 i);
1418 goto err;
1419 }
1420
1421 if (rdma_protocol_iwarp(device, port)) {
1422 struct net_device *ndev;
1423
1424 ndev = ib_device_get_netdev(device, port);
1425 if (!ndev)
1426 continue;
1427 RCU_INIT_POINTER(gid_attr.ndev, ndev);
1428 dev_put(ndev);
1429 }
1430
1431 gid_attr.index = i;
1432 tprops->subnet_prefix =
1433 be64_to_cpu(gid_attr.gid.global.subnet_prefix);
1434 add_modify_gid(table, &gid_attr);
1435 }
1436 err:
1437 mutex_unlock(&table->lock);
1438 return ret;
1439 }
1440
1441 static int
ib_cache_update(struct ib_device * device,u32 port,bool update_gids,bool update_pkeys,bool enforce_security)1442 ib_cache_update(struct ib_device *device, u32 port, bool update_gids,
1443 bool update_pkeys, bool enforce_security)
1444 {
1445 struct ib_port_attr *tprops = NULL;
1446 struct ib_pkey_cache *pkey_cache = NULL;
1447 struct ib_pkey_cache *old_pkey_cache = NULL;
1448 int i;
1449 int ret;
1450
1451 if (!rdma_is_port_valid(device, port))
1452 return -EINVAL;
1453
1454 tprops = kmalloc_obj(*tprops);
1455 if (!tprops)
1456 return -ENOMEM;
1457
1458 ret = ib_query_port(device, port, tprops);
1459 if (ret) {
1460 dev_warn(&device->dev, "ib_query_port failed (%d)\n", ret);
1461 goto err;
1462 }
1463
1464 if (!rdma_protocol_roce(device, port) && update_gids) {
1465 ret = config_non_roce_gid_cache(device, port,
1466 tprops);
1467 if (ret)
1468 goto err;
1469 }
1470
1471 update_pkeys &= !!tprops->pkey_tbl_len;
1472
1473 if (update_pkeys) {
1474 pkey_cache = kmalloc_flex(*pkey_cache, table,
1475 tprops->pkey_tbl_len);
1476 if (!pkey_cache) {
1477 ret = -ENOMEM;
1478 goto err;
1479 }
1480
1481 pkey_cache->table_len = tprops->pkey_tbl_len;
1482
1483 for (i = 0; i < pkey_cache->table_len; ++i) {
1484 ret = ib_query_pkey(device, port, i,
1485 pkey_cache->table + i);
1486 if (ret) {
1487 dev_warn(&device->dev,
1488 "ib_query_pkey failed (%d) for index %d\n",
1489 ret, i);
1490 goto err;
1491 }
1492 }
1493 }
1494
1495 write_lock_irq(&device->cache_lock);
1496
1497 if (update_pkeys) {
1498 old_pkey_cache = device->port_data[port].cache.pkey;
1499 device->port_data[port].cache.pkey = pkey_cache;
1500 }
1501 device->port_data[port].cache.lmc = tprops->lmc;
1502
1503 if (device->port_data[port].cache.port_state != IB_PORT_NOP &&
1504 device->port_data[port].cache.port_state != tprops->state)
1505 ibdev_info(device, "Port: %d Link %s\n", port,
1506 ib_port_state_to_str(tprops->state));
1507
1508 device->port_data[port].cache.port_state = tprops->state;
1509
1510 device->port_data[port].cache.subnet_prefix = tprops->subnet_prefix;
1511 write_unlock_irq(&device->cache_lock);
1512
1513 if (enforce_security)
1514 ib_security_cache_change(device,
1515 port,
1516 tprops->subnet_prefix);
1517
1518 kfree(old_pkey_cache);
1519 kfree(tprops);
1520 return 0;
1521
1522 err:
1523 kfree(pkey_cache);
1524 kfree(tprops);
1525 return ret;
1526 }
1527
ib_cache_event_task(struct work_struct * _work)1528 static void ib_cache_event_task(struct work_struct *_work)
1529 {
1530 struct ib_update_work *work =
1531 container_of(_work, struct ib_update_work, work);
1532 int ret;
1533
1534 /* Before distributing the cache update event, first sync
1535 * the cache.
1536 */
1537 ret = ib_cache_update(work->event.device, work->event.element.port_num,
1538 work->event.event == IB_EVENT_GID_CHANGE ||
1539 work->event.event == IB_EVENT_CLIENT_REREGISTER,
1540 work->event.event == IB_EVENT_PKEY_CHANGE,
1541 work->enforce_security);
1542
1543 /* GID event is notified already for individual GID entries by
1544 * dispatch_gid_change_event(). Hence, notifiy for rest of the
1545 * events.
1546 */
1547 if (!ret && work->event.event != IB_EVENT_GID_CHANGE)
1548 ib_dispatch_event_clients(&work->event);
1549
1550 kfree(work);
1551 }
1552
ib_generic_event_task(struct work_struct * _work)1553 static void ib_generic_event_task(struct work_struct *_work)
1554 {
1555 struct ib_update_work *work =
1556 container_of(_work, struct ib_update_work, work);
1557
1558 ib_dispatch_event_clients(&work->event);
1559 kfree(work);
1560 }
1561
is_cache_update_event(const struct ib_event * event)1562 static bool is_cache_update_event(const struct ib_event *event)
1563 {
1564 return (event->event == IB_EVENT_PORT_ERR ||
1565 event->event == IB_EVENT_PORT_ACTIVE ||
1566 event->event == IB_EVENT_LID_CHANGE ||
1567 event->event == IB_EVENT_PKEY_CHANGE ||
1568 event->event == IB_EVENT_CLIENT_REREGISTER ||
1569 event->event == IB_EVENT_GID_CHANGE);
1570 }
1571
1572 /**
1573 * ib_dispatch_event - Dispatch an asynchronous event
1574 * @event:Event to dispatch
1575 *
1576 * Low-level drivers must call ib_dispatch_event() to dispatch the
1577 * event to all registered event handlers when an asynchronous event
1578 * occurs.
1579 */
ib_dispatch_event(const struct ib_event * event)1580 void ib_dispatch_event(const struct ib_event *event)
1581 {
1582 struct ib_update_work *work;
1583
1584 work = kzalloc_obj(*work, GFP_ATOMIC);
1585 if (!work)
1586 return;
1587
1588 if (is_cache_update_event(event))
1589 INIT_WORK(&work->work, ib_cache_event_task);
1590 else
1591 INIT_WORK(&work->work, ib_generic_event_task);
1592
1593 work->event = *event;
1594 if (event->event == IB_EVENT_PKEY_CHANGE ||
1595 event->event == IB_EVENT_GID_CHANGE)
1596 work->enforce_security = true;
1597
1598 queue_work(ib_wq, &work->work);
1599 }
1600 EXPORT_SYMBOL(ib_dispatch_event);
1601
ib_cache_setup_one(struct ib_device * device)1602 int ib_cache_setup_one(struct ib_device *device)
1603 {
1604 u32 p;
1605 int err;
1606
1607 err = gid_table_setup_one(device);
1608 if (err)
1609 return err;
1610
1611 rdma_for_each_port (device, p) {
1612 err = ib_cache_update(device, p, true, true, true);
1613 if (err) {
1614 gid_table_cleanup_one(device);
1615 return err;
1616 }
1617 }
1618
1619 return 0;
1620 }
1621
ib_cache_release_one(struct ib_device * device)1622 void ib_cache_release_one(struct ib_device *device)
1623 {
1624 u32 p;
1625
1626 /*
1627 * The release function frees all the cache elements.
1628 * This function should be called as part of freeing
1629 * all the device's resources when the cache could no
1630 * longer be accessed.
1631 */
1632 rdma_for_each_port (device, p)
1633 kfree(device->port_data[p].cache.pkey);
1634
1635 gid_table_release_one(device);
1636 }
1637
ib_cache_cleanup_one(struct ib_device * device)1638 void ib_cache_cleanup_one(struct ib_device *device)
1639 {
1640 /* The cleanup function waits for all in-progress workqueue
1641 * elements and cleans up the GID cache. This function should be
1642 * called after the device was removed from the devices list and
1643 * all clients were removed, so the cache exists but is
1644 * non-functional and shouldn't be updated anymore.
1645 */
1646 flush_workqueue(ib_wq);
1647 gid_table_cleanup_one(device);
1648
1649 /*
1650 * Flush the wq second time for any pending GID delete work.
1651 */
1652 flush_workqueue(ib_wq);
1653 }
1654