xref: /linux/drivers/infiniband/core/cache.c (revision 973403ca3553f0367a6982687f5f0ee4212e9ab9)
1 /*
2  * Copyright (c) 2004 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Intel Corporation. All rights reserved.
4  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
5  * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  */
35 
36 #include <linux/if_vlan.h>
37 #include <linux/errno.h>
38 #include <linux/slab.h>
39 #include <linux/workqueue.h>
40 #include <linux/netdevice.h>
41 #include <net/addrconf.h>
42 
43 #include <rdma/ib_cache.h>
44 
45 #include "core_priv.h"
46 
47 struct ib_pkey_cache {
48 	int             table_len;
49 	u16             table[] __counted_by(table_len);
50 };
51 
52 struct ib_update_work {
53 	struct work_struct work;
54 	struct ib_event event;
55 	bool enforce_security;
56 };
57 
58 union ib_gid zgid;
59 EXPORT_SYMBOL(zgid);
60 
61 enum gid_attr_find_mask {
62 	GID_ATTR_FIND_MASK_GID          = 1UL << 0,
63 	GID_ATTR_FIND_MASK_NETDEV	= 1UL << 1,
64 	GID_ATTR_FIND_MASK_DEFAULT	= 1UL << 2,
65 	GID_ATTR_FIND_MASK_GID_TYPE	= 1UL << 3,
66 };
67 
68 enum gid_table_entry_state {
69 	GID_TABLE_ENTRY_INVALID		= 1,
70 	GID_TABLE_ENTRY_VALID		= 2,
71 	/*
72 	 * Indicates that entry is pending to be removed, there may
73 	 * be active users of this GID entry.
74 	 * When last user of the GID entry releases reference to it,
75 	 * GID entry is detached from the table.
76 	 */
77 	GID_TABLE_ENTRY_PENDING_DEL	= 3,
78 };
79 
80 struct roce_gid_ndev_storage {
81 	struct rcu_head rcu_head;
82 	struct net_device *ndev;
83 };
84 
85 struct ib_gid_table_entry {
86 	struct kref			kref;
87 	struct work_struct		del_work;
88 	struct ib_gid_attr		attr;
89 	void				*context;
90 	/* Store the ndev pointer to release reference later on in
91 	 * call_rcu context because by that time gid_table_entry
92 	 * and attr might be already freed. So keep a copy of it.
93 	 * ndev_storage is freed by rcu callback.
94 	 */
95 	struct roce_gid_ndev_storage	*ndev_storage;
96 	enum gid_table_entry_state	state;
97 };
98 
99 struct ib_gid_table {
100 	int				sz;
101 	/* In RoCE, adding a GID to the table requires:
102 	 * (a) Find if this GID is already exists.
103 	 * (b) Find a free space.
104 	 * (c) Write the new GID
105 	 *
106 	 * Delete requires different set of operations:
107 	 * (a) Find the GID
108 	 * (b) Delete it.
109 	 *
110 	 **/
111 	/* Any writer to data_vec must hold this lock and the write side of
112 	 * rwlock. Readers must hold only rwlock. All writers must be in a
113 	 * sleepable context.
114 	 */
115 	struct mutex			lock;
116 	/* rwlock protects data_vec[ix]->state and entry pointer.
117 	 */
118 	rwlock_t			rwlock;
119 	/* bit field, each bit indicates the index of default GID */
120 	u32				default_gid_indices;
121 	struct ib_gid_table_entry	*data_vec[] __counted_by(sz);
122 };
123 
124 static void dispatch_gid_change_event(struct ib_device *ib_dev, u32 port)
125 {
126 	struct ib_event event;
127 
128 	event.device		= ib_dev;
129 	event.element.port_num	= port;
130 	event.event		= IB_EVENT_GID_CHANGE;
131 
132 	ib_dispatch_event_clients(&event);
133 }
134 
135 static const char * const gid_type_str[] = {
136 	/* IB/RoCE v1 value is set for IB_GID_TYPE_IB and IB_GID_TYPE_ROCE for
137 	 * user space compatibility reasons.
138 	 */
139 	[IB_GID_TYPE_IB]	= "IB/RoCE v1",
140 	[IB_GID_TYPE_ROCE]	= "IB/RoCE v1",
141 	[IB_GID_TYPE_ROCE_UDP_ENCAP]	= "RoCE v2",
142 };
143 
144 const char *ib_cache_gid_type_str(enum ib_gid_type gid_type)
145 {
146 	if (gid_type < ARRAY_SIZE(gid_type_str) && gid_type_str[gid_type])
147 		return gid_type_str[gid_type];
148 
149 	return "Invalid GID type";
150 }
151 EXPORT_SYMBOL(ib_cache_gid_type_str);
152 
153 /** rdma_is_zero_gid - Check if given GID is zero or not.
154  * @gid:	GID to check
155  * Returns true if given GID is zero, returns false otherwise.
156  */
157 bool rdma_is_zero_gid(const union ib_gid *gid)
158 {
159 	return !memcmp(gid, &zgid, sizeof(*gid));
160 }
161 EXPORT_SYMBOL(rdma_is_zero_gid);
162 
163 /** is_gid_index_default - Check if a given index belongs to
164  * reserved default GIDs or not.
165  * @table:	GID table pointer
166  * @index:	Index to check in GID table
167  * Returns true if index is one of the reserved default GID index otherwise
168  * returns false.
169  */
170 static bool is_gid_index_default(const struct ib_gid_table *table,
171 				 unsigned int index)
172 {
173 	return index < 32 && (BIT(index) & table->default_gid_indices);
174 }
175 
176 int ib_cache_gid_parse_type_str(const char *buf)
177 {
178 	unsigned int i;
179 	size_t len;
180 	int err = -EINVAL;
181 
182 	len = strlen(buf);
183 	if (len == 0)
184 		return -EINVAL;
185 
186 	if (buf[len - 1] == '\n')
187 		len--;
188 
189 	for (i = 0; i < ARRAY_SIZE(gid_type_str); ++i)
190 		if (gid_type_str[i] && !strncmp(buf, gid_type_str[i], len) &&
191 		    len == strlen(gid_type_str[i])) {
192 			err = i;
193 			break;
194 		}
195 
196 	return err;
197 }
198 EXPORT_SYMBOL(ib_cache_gid_parse_type_str);
199 
200 static struct ib_gid_table *rdma_gid_table(struct ib_device *device, u32 port)
201 {
202 	return device->port_data[port].cache.gid;
203 }
204 
205 static bool is_gid_entry_free(const struct ib_gid_table_entry *entry)
206 {
207 	return !entry;
208 }
209 
210 static bool is_gid_entry_valid(const struct ib_gid_table_entry *entry)
211 {
212 	return entry && entry->state == GID_TABLE_ENTRY_VALID;
213 }
214 
215 static void schedule_free_gid(struct kref *kref)
216 {
217 	struct ib_gid_table_entry *entry =
218 			container_of(kref, struct ib_gid_table_entry, kref);
219 
220 	queue_work(ib_wq, &entry->del_work);
221 }
222 
223 static void put_gid_ndev(struct rcu_head *head)
224 {
225 	struct roce_gid_ndev_storage *storage =
226 		container_of(head, struct roce_gid_ndev_storage, rcu_head);
227 
228 	WARN_ON(!storage->ndev);
229 	/* At this point its safe to release netdev reference,
230 	 * as all callers working on gid_attr->ndev are done
231 	 * using this netdev.
232 	 */
233 	dev_put(storage->ndev);
234 	kfree(storage);
235 }
236 
237 static void free_gid_entry_locked(struct ib_gid_table_entry *entry)
238 {
239 	struct ib_device *device = entry->attr.device;
240 	u32 port_num = entry->attr.port_num;
241 	struct ib_gid_table *table = rdma_gid_table(device, port_num);
242 
243 	dev_dbg(&device->dev, "%s port=%u index=%u gid %pI6\n", __func__,
244 		port_num, entry->attr.index, entry->attr.gid.raw);
245 
246 	write_lock_irq(&table->rwlock);
247 
248 	/*
249 	 * The only way to avoid overwriting NULL in table is
250 	 * by comparing if it is same entry in table or not!
251 	 * If new entry in table is added by the time we free here,
252 	 * don't overwrite the table entry.
253 	 */
254 	if (entry == table->data_vec[entry->attr.index])
255 		table->data_vec[entry->attr.index] = NULL;
256 	/* Now this index is ready to be allocated */
257 	write_unlock_irq(&table->rwlock);
258 
259 	if (entry->ndev_storage)
260 		call_rcu(&entry->ndev_storage->rcu_head, put_gid_ndev);
261 	kfree(entry);
262 }
263 
264 static void free_gid_entry(struct kref *kref)
265 {
266 	struct ib_gid_table_entry *entry =
267 			container_of(kref, struct ib_gid_table_entry, kref);
268 
269 	free_gid_entry_locked(entry);
270 }
271 
272 /**
273  * free_gid_work - Release reference to the GID entry
274  * @work: Work structure to refer to GID entry which needs to be
275  * deleted.
276  *
277  * free_gid_work() frees the entry from the HCA's hardware table
278  * if provider supports it. It releases reference to netdevice.
279  */
280 static void free_gid_work(struct work_struct *work)
281 {
282 	struct ib_gid_table_entry *entry =
283 		container_of(work, struct ib_gid_table_entry, del_work);
284 	struct ib_device *device = entry->attr.device;
285 	u32 port_num = entry->attr.port_num;
286 	struct ib_gid_table *table = rdma_gid_table(device, port_num);
287 
288 	mutex_lock(&table->lock);
289 	free_gid_entry_locked(entry);
290 	mutex_unlock(&table->lock);
291 }
292 
293 static struct ib_gid_table_entry *
294 alloc_gid_entry(const struct ib_gid_attr *attr)
295 {
296 	struct ib_gid_table_entry *entry;
297 	struct net_device *ndev;
298 
299 	entry = kzalloc_obj(*entry);
300 	if (!entry)
301 		return NULL;
302 
303 	ndev = rcu_dereference_protected(attr->ndev, 1);
304 	if (ndev) {
305 		entry->ndev_storage = kzalloc_obj(*entry->ndev_storage);
306 		if (!entry->ndev_storage) {
307 			kfree(entry);
308 			return NULL;
309 		}
310 		dev_hold(ndev);
311 		entry->ndev_storage->ndev = ndev;
312 	}
313 	kref_init(&entry->kref);
314 	memcpy(&entry->attr, attr, sizeof(*attr));
315 	INIT_WORK(&entry->del_work, free_gid_work);
316 	entry->state = GID_TABLE_ENTRY_INVALID;
317 	return entry;
318 }
319 
320 static void store_gid_entry(struct ib_gid_table *table,
321 			    struct ib_gid_table_entry *entry)
322 {
323 	entry->state = GID_TABLE_ENTRY_VALID;
324 
325 	dev_dbg(&entry->attr.device->dev, "%s port=%u index=%u gid %pI6\n",
326 		__func__, entry->attr.port_num, entry->attr.index,
327 		entry->attr.gid.raw);
328 
329 	lockdep_assert_held(&table->lock);
330 	write_lock_irq(&table->rwlock);
331 	table->data_vec[entry->attr.index] = entry;
332 	write_unlock_irq(&table->rwlock);
333 }
334 
335 static void get_gid_entry(struct ib_gid_table_entry *entry)
336 {
337 	kref_get(&entry->kref);
338 }
339 
340 static void put_gid_entry(struct ib_gid_table_entry *entry)
341 {
342 	kref_put(&entry->kref, schedule_free_gid);
343 }
344 
345 static void put_gid_entry_locked(struct ib_gid_table_entry *entry)
346 {
347 	kref_put(&entry->kref, free_gid_entry);
348 }
349 
350 static int add_roce_gid(struct ib_gid_table_entry *entry)
351 {
352 	const struct ib_gid_attr *attr = &entry->attr;
353 	int ret;
354 
355 	if (!attr->ndev) {
356 		dev_err(&attr->device->dev, "%s NULL netdev port=%u index=%u\n",
357 			__func__, attr->port_num, attr->index);
358 		return -EINVAL;
359 	}
360 	if (rdma_cap_roce_gid_table(attr->device, attr->port_num)) {
361 		ret = attr->device->ops.add_gid(attr, &entry->context);
362 		if (ret) {
363 			dev_err(&attr->device->dev,
364 				"%s GID add failed port=%u index=%u\n",
365 				__func__, attr->port_num, attr->index);
366 			return ret;
367 		}
368 	}
369 	return 0;
370 }
371 
372 /**
373  * del_gid - Delete GID table entry
374  *
375  * @ib_dev:	IB device whose GID entry to be deleted
376  * @port:	Port number of the IB device
377  * @table:	GID table of the IB device for a port
378  * @ix:		GID entry index to delete
379  *
380  */
381 static void del_gid(struct ib_device *ib_dev, u32 port,
382 		    struct ib_gid_table *table, int ix)
383 {
384 	struct roce_gid_ndev_storage *ndev_storage;
385 	struct ib_gid_table_entry *entry;
386 
387 	lockdep_assert_held(&table->lock);
388 
389 	dev_dbg(&ib_dev->dev, "%s port=%u index=%d gid %pI6\n", __func__, port,
390 		ix, table->data_vec[ix]->attr.gid.raw);
391 
392 	write_lock_irq(&table->rwlock);
393 	entry = table->data_vec[ix];
394 	entry->state = GID_TABLE_ENTRY_PENDING_DEL;
395 	/*
396 	 * For non RoCE protocol, GID entry slot is ready to use.
397 	 */
398 	if (!rdma_protocol_roce(ib_dev, port))
399 		table->data_vec[ix] = NULL;
400 	write_unlock_irq(&table->rwlock);
401 
402 	if (rdma_cap_roce_gid_table(ib_dev, port))
403 		ib_dev->ops.del_gid(&entry->attr, &entry->context);
404 
405 	ndev_storage = entry->ndev_storage;
406 	if (ndev_storage) {
407 		entry->ndev_storage = NULL;
408 		rcu_assign_pointer(entry->attr.ndev, NULL);
409 		call_rcu(&ndev_storage->rcu_head, put_gid_ndev);
410 	}
411 
412 	put_gid_entry_locked(entry);
413 }
414 
415 /**
416  * add_modify_gid - Add or modify GID table entry
417  *
418  * @table:	GID table in which GID to be added or modified
419  * @attr:	Attributes of the GID
420  *
421  * Returns 0 on success or appropriate error code. It accepts zero
422  * GID addition for non RoCE ports for HCA's who report them as valid
423  * GID. However such zero GIDs are not added to the cache.
424  */
425 static int add_modify_gid(struct ib_gid_table *table,
426 			  const struct ib_gid_attr *attr)
427 {
428 	struct ib_gid_table_entry *entry;
429 	int ret = 0;
430 
431 	/*
432 	 * Invalidate any old entry in the table to make it safe to write to
433 	 * this index.
434 	 */
435 	if (is_gid_entry_valid(table->data_vec[attr->index]))
436 		del_gid(attr->device, attr->port_num, table, attr->index);
437 
438 	/*
439 	 * Some HCA's report multiple GID entries with only one valid GID, and
440 	 * leave other unused entries as the zero GID. Convert zero GIDs to
441 	 * empty table entries instead of storing them.
442 	 */
443 	if (rdma_is_zero_gid(&attr->gid))
444 		return 0;
445 
446 	entry = alloc_gid_entry(attr);
447 	if (!entry)
448 		return -ENOMEM;
449 
450 	if (rdma_protocol_roce(attr->device, attr->port_num)) {
451 		ret = add_roce_gid(entry);
452 		if (ret)
453 			goto done;
454 	}
455 
456 	store_gid_entry(table, entry);
457 	return 0;
458 
459 done:
460 	put_gid_entry(entry);
461 	return ret;
462 }
463 
464 /* rwlock should be read locked, or lock should be held */
465 static int find_gid(struct ib_gid_table *table, const union ib_gid *gid,
466 		    const struct ib_gid_attr *val, bool default_gid,
467 		    unsigned long mask, int *pempty)
468 {
469 	int i = 0;
470 	int found = -1;
471 	int empty = pempty ? -1 : 0;
472 
473 	while (i < table->sz && (found < 0 || empty < 0)) {
474 		struct ib_gid_table_entry *data = table->data_vec[i];
475 		struct ib_gid_attr *attr;
476 		int curr_index = i;
477 
478 		i++;
479 
480 		/* find_gid() is used during GID addition where it is expected
481 		 * to return a free entry slot which is not duplicate.
482 		 * Free entry slot is requested and returned if pempty is set,
483 		 * so lookup free slot only if requested.
484 		 */
485 		if (pempty && empty < 0) {
486 			if (is_gid_entry_free(data) &&
487 			    default_gid ==
488 				is_gid_index_default(table, curr_index)) {
489 				/*
490 				 * Found an invalid (free) entry; allocate it.
491 				 * If default GID is requested, then our
492 				 * found slot must be one of the DEFAULT
493 				 * reserved slots or we fail.
494 				 * This ensures that only DEFAULT reserved
495 				 * slots are used for default property GIDs.
496 				 */
497 				empty = curr_index;
498 			}
499 		}
500 
501 		/*
502 		 * Additionally find_gid() is used to find valid entry during
503 		 * lookup operation; so ignore the entries which are marked as
504 		 * pending for removal and the entries which are marked as
505 		 * invalid.
506 		 */
507 		if (!is_gid_entry_valid(data))
508 			continue;
509 
510 		if (found >= 0)
511 			continue;
512 
513 		attr = &data->attr;
514 		if (mask & GID_ATTR_FIND_MASK_GID_TYPE &&
515 		    attr->gid_type != val->gid_type)
516 			continue;
517 
518 		if (mask & GID_ATTR_FIND_MASK_GID &&
519 		    memcmp(gid, &data->attr.gid, sizeof(*gid)))
520 			continue;
521 
522 		if (mask & GID_ATTR_FIND_MASK_NETDEV &&
523 		    attr->ndev != val->ndev)
524 			continue;
525 
526 		if (mask & GID_ATTR_FIND_MASK_DEFAULT &&
527 		    is_gid_index_default(table, curr_index) != default_gid)
528 			continue;
529 
530 		found = curr_index;
531 	}
532 
533 	if (pempty)
534 		*pempty = empty;
535 
536 	return found;
537 }
538 
539 static void make_default_gid(struct  net_device *dev, union ib_gid *gid)
540 {
541 	gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
542 	addrconf_ifid_eui48(&gid->raw[8], dev);
543 }
544 
545 static int __ib_cache_gid_add(struct ib_device *ib_dev, u32 port,
546 			      union ib_gid *gid, struct ib_gid_attr *attr,
547 			      unsigned long mask, bool default_gid)
548 {
549 	struct ib_gid_table *table;
550 	int ret = 0;
551 	int empty;
552 	int ix;
553 
554 	/* Do not allow adding zero GID in support of
555 	 * IB spec version 1.3 section 4.1.1 point (6) and
556 	 * section 12.7.10 and section 12.7.20
557 	 */
558 	if (rdma_is_zero_gid(gid))
559 		return -EINVAL;
560 
561 	table = rdma_gid_table(ib_dev, port);
562 
563 	mutex_lock(&table->lock);
564 
565 	ix = find_gid(table, gid, attr, default_gid, mask, &empty);
566 	if (ix >= 0)
567 		goto out_unlock;
568 
569 	if (empty < 0) {
570 		ret = -ENOSPC;
571 		goto out_unlock;
572 	}
573 	attr->device = ib_dev;
574 	attr->index = empty;
575 	attr->port_num = port;
576 	attr->gid = *gid;
577 	ret = add_modify_gid(table, attr);
578 	if (!ret)
579 		dispatch_gid_change_event(ib_dev, port);
580 
581 out_unlock:
582 	mutex_unlock(&table->lock);
583 	if (ret)
584 		pr_warn_ratelimited("%s: unable to add gid %pI6 error=%d\n",
585 				    __func__, gid->raw, ret);
586 	return ret;
587 }
588 
589 int ib_cache_gid_add(struct ib_device *ib_dev, u32 port,
590 		     union ib_gid *gid, struct ib_gid_attr *attr)
591 {
592 	unsigned long mask = GID_ATTR_FIND_MASK_GID |
593 			     GID_ATTR_FIND_MASK_GID_TYPE |
594 			     GID_ATTR_FIND_MASK_NETDEV;
595 
596 	return __ib_cache_gid_add(ib_dev, port, gid, attr, mask, false);
597 }
598 
599 static int
600 _ib_cache_gid_del(struct ib_device *ib_dev, u32 port,
601 		  union ib_gid *gid, struct ib_gid_attr *attr,
602 		  unsigned long mask, bool default_gid)
603 {
604 	struct ib_gid_table *table;
605 	int ret = 0;
606 	int ix;
607 
608 	table = rdma_gid_table(ib_dev, port);
609 
610 	mutex_lock(&table->lock);
611 
612 	ix = find_gid(table, gid, attr, default_gid, mask, NULL);
613 	if (ix < 0) {
614 		ret = -EINVAL;
615 		goto out_unlock;
616 	}
617 
618 	del_gid(ib_dev, port, table, ix);
619 	dispatch_gid_change_event(ib_dev, port);
620 
621 out_unlock:
622 	mutex_unlock(&table->lock);
623 	if (ret)
624 		pr_debug("%s: can't delete gid %pI6 error=%d\n",
625 			 __func__, gid->raw, ret);
626 	return ret;
627 }
628 
629 int ib_cache_gid_del(struct ib_device *ib_dev, u32 port,
630 		     union ib_gid *gid, struct ib_gid_attr *attr)
631 {
632 	unsigned long mask = GID_ATTR_FIND_MASK_GID	  |
633 			     GID_ATTR_FIND_MASK_GID_TYPE |
634 			     GID_ATTR_FIND_MASK_DEFAULT  |
635 			     GID_ATTR_FIND_MASK_NETDEV;
636 
637 	return _ib_cache_gid_del(ib_dev, port, gid, attr, mask, false);
638 }
639 
640 int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u32 port,
641 				     struct net_device *ndev)
642 {
643 	struct ib_gid_table *table;
644 	int ix;
645 	bool deleted = false;
646 
647 	table = rdma_gid_table(ib_dev, port);
648 
649 	mutex_lock(&table->lock);
650 
651 	for (ix = 0; ix < table->sz; ix++) {
652 		if (is_gid_entry_valid(table->data_vec[ix]) &&
653 		    table->data_vec[ix]->attr.ndev == ndev) {
654 			del_gid(ib_dev, port, table, ix);
655 			deleted = true;
656 		}
657 	}
658 
659 	mutex_unlock(&table->lock);
660 
661 	if (deleted)
662 		dispatch_gid_change_event(ib_dev, port);
663 
664 	return 0;
665 }
666 
667 /**
668  * rdma_find_gid_by_port - Returns the GID entry attributes when it finds
669  * a valid GID entry for given search parameters. It searches for the specified
670  * GID value in the local software cache.
671  * @ib_dev: The device to query.
672  * @gid: The GID value to search for.
673  * @gid_type: The GID type to search for.
674  * @port: The port number of the device where the GID value should be searched.
675  * @ndev: In RoCE, the net device of the device. NULL means ignore.
676  *
677  * Returns sgid attributes if the GID is found with valid reference or
678  * returns ERR_PTR for the error.
679  * The caller must invoke rdma_put_gid_attr() to release the reference.
680  */
681 const struct ib_gid_attr *
682 rdma_find_gid_by_port(struct ib_device *ib_dev,
683 		      const union ib_gid *gid,
684 		      enum ib_gid_type gid_type,
685 		      u32 port, struct net_device *ndev)
686 {
687 	int local_index;
688 	struct ib_gid_table *table;
689 	unsigned long mask = GID_ATTR_FIND_MASK_GID |
690 			     GID_ATTR_FIND_MASK_GID_TYPE;
691 	struct ib_gid_attr val = {.ndev = ndev, .gid_type = gid_type};
692 	const struct ib_gid_attr *attr;
693 	unsigned long flags;
694 
695 	if (!rdma_is_port_valid(ib_dev, port))
696 		return ERR_PTR(-ENOENT);
697 
698 	table = rdma_gid_table(ib_dev, port);
699 
700 	if (ndev)
701 		mask |= GID_ATTR_FIND_MASK_NETDEV;
702 
703 	read_lock_irqsave(&table->rwlock, flags);
704 	local_index = find_gid(table, gid, &val, false, mask, NULL);
705 	if (local_index >= 0) {
706 		get_gid_entry(table->data_vec[local_index]);
707 		attr = &table->data_vec[local_index]->attr;
708 		read_unlock_irqrestore(&table->rwlock, flags);
709 		return attr;
710 	}
711 
712 	read_unlock_irqrestore(&table->rwlock, flags);
713 	return ERR_PTR(-ENOENT);
714 }
715 EXPORT_SYMBOL(rdma_find_gid_by_port);
716 
717 /**
718  * rdma_find_gid_by_filter - Returns the GID table attribute where a
719  * specified GID value occurs
720  * @ib_dev: The device to query.
721  * @gid: The GID value to search for.
722  * @port: The port number of the device where the GID value could be
723  *   searched.
724  * @filter: The filter function is executed on any matching GID in the table.
725  *   If the filter function returns true, the corresponding index is returned,
726  *   otherwise, we continue searching the GID table. It's guaranteed that
727  *   while filter is executed, ndev field is valid and the structure won't
728  *   change. filter is executed in an atomic context. filter must not be NULL.
729  * @context: Private data to pass into the call-back.
730  *
731  * rdma_find_gid_by_filter() searches for the specified GID value
732  * of which the filter function returns true in the port's GID table.
733  *
734  */
735 const struct ib_gid_attr *rdma_find_gid_by_filter(
736 	struct ib_device *ib_dev, const union ib_gid *gid, u32 port,
737 	bool (*filter)(const union ib_gid *gid, const struct ib_gid_attr *,
738 		       void *),
739 	void *context)
740 {
741 	const struct ib_gid_attr *res = ERR_PTR(-ENOENT);
742 	struct ib_gid_table *table;
743 	unsigned long flags;
744 	unsigned int i;
745 
746 	if (!rdma_is_port_valid(ib_dev, port))
747 		return ERR_PTR(-EINVAL);
748 
749 	table = rdma_gid_table(ib_dev, port);
750 
751 	read_lock_irqsave(&table->rwlock, flags);
752 	for (i = 0; i < table->sz; i++) {
753 		struct ib_gid_table_entry *entry = table->data_vec[i];
754 
755 		if (!is_gid_entry_valid(entry))
756 			continue;
757 
758 		if (memcmp(gid, &entry->attr.gid, sizeof(*gid)))
759 			continue;
760 
761 		if (filter(gid, &entry->attr, context)) {
762 			get_gid_entry(entry);
763 			res = &entry->attr;
764 			break;
765 		}
766 	}
767 	read_unlock_irqrestore(&table->rwlock, flags);
768 	return res;
769 }
770 
771 static struct ib_gid_table *alloc_gid_table(int sz)
772 {
773 	struct ib_gid_table *table = kzalloc_flex(*table, data_vec, sz);
774 
775 	if (!table)
776 		return NULL;
777 
778 	table->sz = sz;
779 
780 	mutex_init(&table->lock);
781 	rwlock_init(&table->rwlock);
782 	return table;
783 }
784 
785 static void release_gid_table(struct ib_device *device,
786 			      struct ib_gid_table *table)
787 {
788 	int i;
789 
790 	if (!table)
791 		return;
792 
793 	for (i = 0; i < table->sz; i++) {
794 		if (is_gid_entry_free(table->data_vec[i]))
795 			continue;
796 
797 		WARN_ONCE(true,
798 			  "GID entry ref leak for dev %s index %d ref=%u\n",
799 			  dev_name(&device->dev), i,
800 			  kref_read(&table->data_vec[i]->kref));
801 	}
802 
803 	mutex_destroy(&table->lock);
804 	kfree(table);
805 }
806 
807 static void cleanup_gid_table_port(struct ib_device *ib_dev, u32 port,
808 				   struct ib_gid_table *table)
809 {
810 	int i;
811 
812 	if (!table)
813 		return;
814 
815 	mutex_lock(&table->lock);
816 	for (i = 0; i < table->sz; ++i) {
817 		if (is_gid_entry_valid(table->data_vec[i]))
818 			del_gid(ib_dev, port, table, i);
819 	}
820 	mutex_unlock(&table->lock);
821 }
822 
823 void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u32 port,
824 				  struct net_device *ndev,
825 				  unsigned long gid_type_mask,
826 				  enum ib_cache_gid_default_mode mode)
827 {
828 	union ib_gid gid = { };
829 	struct ib_gid_attr gid_attr;
830 	unsigned int gid_type;
831 	unsigned long mask;
832 
833 	mask = GID_ATTR_FIND_MASK_GID_TYPE |
834 	       GID_ATTR_FIND_MASK_DEFAULT |
835 	       GID_ATTR_FIND_MASK_NETDEV;
836 	memset(&gid_attr, 0, sizeof(gid_attr));
837 	gid_attr.ndev = ndev;
838 
839 	for (gid_type = 0; gid_type < IB_GID_TYPE_SIZE; ++gid_type) {
840 		if (1UL << gid_type & ~gid_type_mask)
841 			continue;
842 
843 		gid_attr.gid_type = gid_type;
844 
845 		if (mode == IB_CACHE_GID_DEFAULT_MODE_SET) {
846 			make_default_gid(ndev, &gid);
847 			__ib_cache_gid_add(ib_dev, port, &gid,
848 					   &gid_attr, mask, true);
849 		} else if (mode == IB_CACHE_GID_DEFAULT_MODE_DELETE) {
850 			_ib_cache_gid_del(ib_dev, port, &gid,
851 					  &gid_attr, mask, true);
852 		}
853 	}
854 }
855 
856 static void gid_table_reserve_default(struct ib_device *ib_dev, u32 port,
857 				      struct ib_gid_table *table)
858 {
859 	unsigned int i;
860 	unsigned long roce_gid_type_mask;
861 	unsigned int num_default_gids;
862 
863 	roce_gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
864 	num_default_gids = hweight_long(roce_gid_type_mask);
865 	/* Reserve starting indices for default GIDs */
866 	for (i = 0; i < num_default_gids && i < table->sz; i++)
867 		table->default_gid_indices |= BIT(i);
868 }
869 
870 
871 static void gid_table_release_one(struct ib_device *ib_dev)
872 {
873 	u32 p;
874 
875 	rdma_for_each_port (ib_dev, p) {
876 		release_gid_table(ib_dev, ib_dev->port_data[p].cache.gid);
877 		ib_dev->port_data[p].cache.gid = NULL;
878 	}
879 }
880 
881 static int _gid_table_setup_one(struct ib_device *ib_dev)
882 {
883 	struct ib_gid_table *table;
884 	u32 rdma_port;
885 
886 	rdma_for_each_port (ib_dev, rdma_port) {
887 		table = alloc_gid_table(
888 			ib_dev->port_data[rdma_port].immutable.gid_tbl_len);
889 		if (!table)
890 			goto rollback_table_setup;
891 
892 		gid_table_reserve_default(ib_dev, rdma_port, table);
893 		ib_dev->port_data[rdma_port].cache.gid = table;
894 	}
895 	return 0;
896 
897 rollback_table_setup:
898 	gid_table_release_one(ib_dev);
899 	return -ENOMEM;
900 }
901 
902 static void gid_table_cleanup_one(struct ib_device *ib_dev)
903 {
904 	u32 p;
905 
906 	rdma_for_each_port (ib_dev, p)
907 		cleanup_gid_table_port(ib_dev, p,
908 				       ib_dev->port_data[p].cache.gid);
909 }
910 
911 static int gid_table_setup_one(struct ib_device *ib_dev)
912 {
913 	int err;
914 
915 	err = _gid_table_setup_one(ib_dev);
916 
917 	if (err)
918 		return err;
919 
920 	/*
921 	 * Mark the device as ready for GID cache updates. This allows netdev
922 	 * event handlers to update the GID cache even before the device is
923 	 * fully registered.
924 	 */
925 	ib_device_enable_gid_updates(ib_dev);
926 
927 	rdma_roce_rescan_device(ib_dev);
928 
929 	return err;
930 }
931 
932 /**
933  * rdma_query_gid - Read the GID content from the GID software cache
934  * @device:		Device to query the GID
935  * @port_num:		Port number of the device
936  * @index:		Index of the GID table entry to read
937  * @gid:		Pointer to GID where to store the entry's GID
938  *
939  * rdma_query_gid() only reads the GID entry content for requested device,
940  * port and index. It reads for IB, RoCE and iWarp link layers.  It doesn't
941  * hold any reference to the GID table entry in the HCA or software cache.
942  *
943  * Returns 0 on success or appropriate error code.
944  *
945  */
946 int rdma_query_gid(struct ib_device *device, u32 port_num,
947 		   int index, union ib_gid *gid)
948 {
949 	struct ib_gid_table *table;
950 	unsigned long flags;
951 	int res;
952 
953 	if (!rdma_is_port_valid(device, port_num))
954 		return -EINVAL;
955 
956 	table = rdma_gid_table(device, port_num);
957 	read_lock_irqsave(&table->rwlock, flags);
958 
959 	if (index < 0 || index >= table->sz) {
960 		res = -EINVAL;
961 		goto done;
962 	}
963 
964 	if (!is_gid_entry_valid(table->data_vec[index])) {
965 		res = -ENOENT;
966 		goto done;
967 	}
968 
969 	memcpy(gid, &table->data_vec[index]->attr.gid, sizeof(*gid));
970 	res = 0;
971 
972 done:
973 	read_unlock_irqrestore(&table->rwlock, flags);
974 	return res;
975 }
976 EXPORT_SYMBOL(rdma_query_gid);
977 
978 /**
979  * rdma_read_gid_hw_context - Read the HW GID context from GID attribute
980  * @attr:		Potinter to the GID attribute
981  *
982  * rdma_read_gid_hw_context() reads the drivers GID HW context corresponding
983  * to the SGID attr. Callers are required to already be holding the reference
984  * to an existing GID entry.
985  *
986  * Returns the HW GID context
987  *
988  */
989 void *rdma_read_gid_hw_context(const struct ib_gid_attr *attr)
990 {
991 	return container_of(attr, struct ib_gid_table_entry, attr)->context;
992 }
993 EXPORT_SYMBOL(rdma_read_gid_hw_context);
994 
995 /**
996  * rdma_find_gid - Returns SGID attributes if the matching GID is found.
997  * @device: The device to query.
998  * @gid: The GID value to search for.
999  * @gid_type: The GID type to search for.
1000  * @ndev: In RoCE, the net device of the device. NULL means ignore.
1001  *
1002  * rdma_find_gid() searches for the specified GID value in the software cache.
1003  *
1004  * Returns GID attributes if a valid GID is found or returns ERR_PTR for the
1005  * error. The caller must invoke rdma_put_gid_attr() to release the reference.
1006  *
1007  */
1008 const struct ib_gid_attr *rdma_find_gid(struct ib_device *device,
1009 					const union ib_gid *gid,
1010 					enum ib_gid_type gid_type,
1011 					struct net_device *ndev)
1012 {
1013 	unsigned long mask = GID_ATTR_FIND_MASK_GID |
1014 			     GID_ATTR_FIND_MASK_GID_TYPE;
1015 	struct ib_gid_attr gid_attr_val = {.ndev = ndev, .gid_type = gid_type};
1016 	u32 p;
1017 
1018 	if (ndev)
1019 		mask |= GID_ATTR_FIND_MASK_NETDEV;
1020 
1021 	rdma_for_each_port(device, p) {
1022 		struct ib_gid_table *table;
1023 		unsigned long flags;
1024 		int index;
1025 
1026 		table = device->port_data[p].cache.gid;
1027 		read_lock_irqsave(&table->rwlock, flags);
1028 		index = find_gid(table, gid, &gid_attr_val, false, mask, NULL);
1029 		if (index >= 0) {
1030 			const struct ib_gid_attr *attr;
1031 
1032 			get_gid_entry(table->data_vec[index]);
1033 			attr = &table->data_vec[index]->attr;
1034 			read_unlock_irqrestore(&table->rwlock, flags);
1035 			return attr;
1036 		}
1037 		read_unlock_irqrestore(&table->rwlock, flags);
1038 	}
1039 
1040 	return ERR_PTR(-ENOENT);
1041 }
1042 EXPORT_SYMBOL(rdma_find_gid);
1043 
1044 int ib_get_cached_pkey(struct ib_device *device,
1045 		       u32               port_num,
1046 		       int               index,
1047 		       u16              *pkey)
1048 {
1049 	struct ib_pkey_cache *cache;
1050 	unsigned long flags;
1051 	int ret = 0;
1052 
1053 	if (!rdma_is_port_valid(device, port_num))
1054 		return -EINVAL;
1055 
1056 	read_lock_irqsave(&device->cache_lock, flags);
1057 
1058 	cache = device->port_data[port_num].cache.pkey;
1059 
1060 	if (!cache || index < 0 || index >= cache->table_len)
1061 		ret = -EINVAL;
1062 	else
1063 		*pkey = cache->table[index];
1064 
1065 	read_unlock_irqrestore(&device->cache_lock, flags);
1066 
1067 	return ret;
1068 }
1069 EXPORT_SYMBOL(ib_get_cached_pkey);
1070 
1071 void ib_get_cached_subnet_prefix(struct ib_device *device, u32 port_num,
1072 				u64 *sn_pfx)
1073 {
1074 	unsigned long flags;
1075 
1076 	read_lock_irqsave(&device->cache_lock, flags);
1077 	*sn_pfx = device->port_data[port_num].cache.subnet_prefix;
1078 	read_unlock_irqrestore(&device->cache_lock, flags);
1079 }
1080 EXPORT_SYMBOL(ib_get_cached_subnet_prefix);
1081 
1082 int ib_find_cached_pkey(struct ib_device *device, u32 port_num,
1083 			u16 pkey, u16 *index)
1084 {
1085 	struct ib_pkey_cache *cache;
1086 	unsigned long flags;
1087 	int i;
1088 	int ret = -ENOENT;
1089 	int partial_ix = -1;
1090 
1091 	if (!rdma_is_port_valid(device, port_num))
1092 		return -EINVAL;
1093 
1094 	read_lock_irqsave(&device->cache_lock, flags);
1095 
1096 	cache = device->port_data[port_num].cache.pkey;
1097 	if (!cache) {
1098 		ret = -EINVAL;
1099 		goto err;
1100 	}
1101 
1102 	*index = -1;
1103 
1104 	for (i = 0; i < cache->table_len; ++i)
1105 		if ((cache->table[i] & 0x7fff) == (pkey & 0x7fff)) {
1106 			if (cache->table[i] & 0x8000) {
1107 				*index = i;
1108 				ret = 0;
1109 				break;
1110 			} else {
1111 				partial_ix = i;
1112 			}
1113 		}
1114 
1115 	if (ret && partial_ix >= 0) {
1116 		*index = partial_ix;
1117 		ret = 0;
1118 	}
1119 
1120 err:
1121 	read_unlock_irqrestore(&device->cache_lock, flags);
1122 
1123 	return ret;
1124 }
1125 EXPORT_SYMBOL(ib_find_cached_pkey);
1126 
1127 int ib_get_cached_lmc(struct ib_device *device, u32 port_num, u8 *lmc)
1128 {
1129 	unsigned long flags;
1130 	int ret = 0;
1131 
1132 	if (!rdma_is_port_valid(device, port_num))
1133 		return -EINVAL;
1134 
1135 	read_lock_irqsave(&device->cache_lock, flags);
1136 	*lmc = device->port_data[port_num].cache.lmc;
1137 	read_unlock_irqrestore(&device->cache_lock, flags);
1138 
1139 	return ret;
1140 }
1141 EXPORT_SYMBOL(ib_get_cached_lmc);
1142 
1143 int ib_get_cached_port_state(struct ib_device *device, u32 port_num,
1144 			     enum ib_port_state *port_state)
1145 {
1146 	unsigned long flags;
1147 	int ret = 0;
1148 
1149 	if (!rdma_is_port_valid(device, port_num))
1150 		return -EINVAL;
1151 
1152 	read_lock_irqsave(&device->cache_lock, flags);
1153 	*port_state = device->port_data[port_num].cache.port_state;
1154 	read_unlock_irqrestore(&device->cache_lock, flags);
1155 
1156 	return ret;
1157 }
1158 EXPORT_SYMBOL(ib_get_cached_port_state);
1159 
1160 /**
1161  * rdma_get_gid_attr - Returns GID attributes for a port of a device
1162  * at a requested gid_index, if a valid GID entry exists.
1163  * @device:		The device to query.
1164  * @port_num:		The port number on the device where the GID value
1165  *			is to be queried.
1166  * @index:		Index of the GID table entry whose attributes are to
1167  *                      be queried.
1168  *
1169  * rdma_get_gid_attr() acquires reference count of gid attributes from the
1170  * cached GID table. Caller must invoke rdma_put_gid_attr() to release
1171  * reference to gid attribute regardless of link layer.
1172  *
1173  * Returns pointer to valid gid attribute or ERR_PTR for the appropriate error
1174  * code.
1175  */
1176 const struct ib_gid_attr *
1177 rdma_get_gid_attr(struct ib_device *device, u32 port_num, int index)
1178 {
1179 	const struct ib_gid_attr *attr = ERR_PTR(-ENODATA);
1180 	struct ib_gid_table *table;
1181 	unsigned long flags;
1182 
1183 	if (!rdma_is_port_valid(device, port_num))
1184 		return ERR_PTR(-EINVAL);
1185 
1186 	table = rdma_gid_table(device, port_num);
1187 	if (index < 0 || index >= table->sz)
1188 		return ERR_PTR(-EINVAL);
1189 
1190 	read_lock_irqsave(&table->rwlock, flags);
1191 	if (!is_gid_entry_valid(table->data_vec[index]))
1192 		goto done;
1193 
1194 	get_gid_entry(table->data_vec[index]);
1195 	attr = &table->data_vec[index]->attr;
1196 done:
1197 	read_unlock_irqrestore(&table->rwlock, flags);
1198 	return attr;
1199 }
1200 EXPORT_SYMBOL(rdma_get_gid_attr);
1201 
1202 /**
1203  * rdma_query_gid_table - Reads GID table entries of all the ports of a device up to max_entries.
1204  * @device: The device to query.
1205  * @entries: Entries where GID entries are returned.
1206  * @max_entries: Maximum number of entries that can be returned.
1207  * Entries array must be allocated to hold max_entries number of entries.
1208  *
1209  * Returns number of entries on success or appropriate error code.
1210  */
1211 ssize_t rdma_query_gid_table(struct ib_device *device,
1212 			     struct ib_uverbs_gid_entry *entries,
1213 			     size_t max_entries)
1214 {
1215 	const struct ib_gid_attr *gid_attr;
1216 	ssize_t num_entries = 0, ret;
1217 	struct ib_gid_table *table;
1218 	u32 port_num, i;
1219 	struct net_device *ndev;
1220 	unsigned long flags;
1221 
1222 	rdma_for_each_port(device, port_num) {
1223 		table = rdma_gid_table(device, port_num);
1224 		read_lock_irqsave(&table->rwlock, flags);
1225 		for (i = 0; i < table->sz; i++) {
1226 			if (!is_gid_entry_valid(table->data_vec[i]))
1227 				continue;
1228 			if (num_entries >= max_entries) {
1229 				ret = -EINVAL;
1230 				goto err;
1231 			}
1232 
1233 			gid_attr = &table->data_vec[i]->attr;
1234 
1235 			memcpy(&entries->gid, &gid_attr->gid,
1236 			       sizeof(gid_attr->gid));
1237 			entries->gid_index = gid_attr->index;
1238 			entries->port_num = gid_attr->port_num;
1239 			entries->gid_type = gid_attr->gid_type;
1240 			ndev = rcu_dereference_protected(
1241 				gid_attr->ndev,
1242 				lockdep_is_held(&table->rwlock));
1243 			if (ndev)
1244 				entries->netdev_ifindex = ndev->ifindex;
1245 
1246 			num_entries++;
1247 			entries++;
1248 		}
1249 		read_unlock_irqrestore(&table->rwlock, flags);
1250 	}
1251 
1252 	return num_entries;
1253 err:
1254 	read_unlock_irqrestore(&table->rwlock, flags);
1255 	return ret;
1256 }
1257 EXPORT_SYMBOL(rdma_query_gid_table);
1258 
1259 /**
1260  * rdma_put_gid_attr - Release reference to the GID attribute
1261  * @attr:		Pointer to the GID attribute whose reference
1262  *			needs to be released.
1263  *
1264  * rdma_put_gid_attr() must be used to release reference whose
1265  * reference is acquired using rdma_get_gid_attr() or any APIs
1266  * which returns a pointer to the ib_gid_attr regardless of link layer
1267  * of IB or RoCE.
1268  *
1269  */
1270 void rdma_put_gid_attr(const struct ib_gid_attr *attr)
1271 {
1272 	struct ib_gid_table_entry *entry =
1273 		container_of(attr, struct ib_gid_table_entry, attr);
1274 
1275 	put_gid_entry(entry);
1276 }
1277 EXPORT_SYMBOL(rdma_put_gid_attr);
1278 
1279 /**
1280  * rdma_hold_gid_attr - Get reference to existing GID attribute
1281  *
1282  * @attr:		Pointer to the GID attribute whose reference
1283  *			needs to be taken.
1284  *
1285  * Increase the reference count to a GID attribute to keep it from being
1286  * freed. Callers are required to already be holding a reference to attribute.
1287  *
1288  */
1289 void rdma_hold_gid_attr(const struct ib_gid_attr *attr)
1290 {
1291 	struct ib_gid_table_entry *entry =
1292 		container_of(attr, struct ib_gid_table_entry, attr);
1293 
1294 	get_gid_entry(entry);
1295 }
1296 EXPORT_SYMBOL(rdma_hold_gid_attr);
1297 
1298 /**
1299  * rdma_read_gid_attr_ndev_rcu - Read GID attribute netdevice
1300  * which must be in UP state.
1301  *
1302  * @attr:Pointer to the GID attribute
1303  *
1304  * Returns pointer to netdevice if the netdevice was attached to GID and
1305  * netdevice is in UP state. Caller must hold RCU lock as this API
1306  * reads the netdev flags which can change while netdevice migrates to
1307  * different net namespace. Returns ERR_PTR with error code otherwise.
1308  *
1309  */
1310 struct net_device *rdma_read_gid_attr_ndev_rcu(const struct ib_gid_attr *attr)
1311 {
1312 	struct ib_gid_table_entry *entry =
1313 			container_of(attr, struct ib_gid_table_entry, attr);
1314 	struct ib_device *device = entry->attr.device;
1315 	struct net_device *ndev = ERR_PTR(-EINVAL);
1316 	u32 port_num = entry->attr.port_num;
1317 	struct ib_gid_table *table;
1318 	unsigned long flags;
1319 	bool valid;
1320 
1321 	table = rdma_gid_table(device, port_num);
1322 
1323 	read_lock_irqsave(&table->rwlock, flags);
1324 	valid = is_gid_entry_valid(table->data_vec[attr->index]);
1325 	if (valid) {
1326 		ndev = rcu_dereference(attr->ndev);
1327 		if (!ndev)
1328 			ndev = ERR_PTR(-ENODEV);
1329 	}
1330 	read_unlock_irqrestore(&table->rwlock, flags);
1331 	return ndev;
1332 }
1333 EXPORT_SYMBOL(rdma_read_gid_attr_ndev_rcu);
1334 
1335 static int get_lower_dev_vlan(struct net_device *lower_dev,
1336 			      struct netdev_nested_priv *priv)
1337 {
1338 	u16 *vlan_id = (u16 *)priv->data;
1339 
1340 	if (is_vlan_dev(lower_dev))
1341 		*vlan_id = vlan_dev_vlan_id(lower_dev);
1342 
1343 	/* We are interested only in first level vlan device, so
1344 	 * always return 1 to stop iterating over next level devices.
1345 	 */
1346 	return 1;
1347 }
1348 
1349 /**
1350  * rdma_read_gid_l2_fields - Read the vlan ID and source MAC address
1351  *			     of a GID entry.
1352  *
1353  * @attr:	GID attribute pointer whose L2 fields to be read
1354  * @vlan_id:	Pointer to vlan id to fill up if the GID entry has
1355  *		vlan id. It is optional.
1356  * @smac:	Pointer to smac to fill up for a GID entry. It is optional.
1357  *
1358  * rdma_read_gid_l2_fields() returns 0 on success and returns vlan id
1359  * (if gid entry has vlan) and source MAC, or returns error.
1360  */
1361 int rdma_read_gid_l2_fields(const struct ib_gid_attr *attr,
1362 			    u16 *vlan_id, u8 *smac)
1363 {
1364 	struct netdev_nested_priv priv = {
1365 		.data = (void *)vlan_id,
1366 	};
1367 	struct net_device *ndev;
1368 
1369 	rcu_read_lock();
1370 	ndev = rcu_dereference(attr->ndev);
1371 	if (!ndev) {
1372 		rcu_read_unlock();
1373 		return -ENODEV;
1374 	}
1375 	if (smac)
1376 		ether_addr_copy(smac, ndev->dev_addr);
1377 	if (vlan_id) {
1378 		*vlan_id = 0xffff;
1379 		if (is_vlan_dev(ndev)) {
1380 			*vlan_id = vlan_dev_vlan_id(ndev);
1381 		} else {
1382 			/* If the netdev is upper device and if it's lower
1383 			 * device is vlan device, consider vlan id of
1384 			 * the lower vlan device for this gid entry.
1385 			 */
1386 			netdev_walk_all_lower_dev_rcu(attr->ndev,
1387 					get_lower_dev_vlan, &priv);
1388 		}
1389 	}
1390 	rcu_read_unlock();
1391 	return 0;
1392 }
1393 EXPORT_SYMBOL(rdma_read_gid_l2_fields);
1394 
1395 static int config_non_roce_gid_cache(struct ib_device *device,
1396 				     u32 port, struct ib_port_attr *tprops)
1397 {
1398 	struct ib_gid_attr gid_attr = {};
1399 	struct ib_gid_table *table;
1400 	int ret = 0;
1401 	int i;
1402 
1403 	gid_attr.device = device;
1404 	gid_attr.port_num = port;
1405 	table = rdma_gid_table(device, port);
1406 
1407 	mutex_lock(&table->lock);
1408 	for (i = 0; i < tprops->gid_tbl_len; ++i) {
1409 		if (!device->ops.query_gid)
1410 			continue;
1411 		ret = device->ops.query_gid(device, port, i, &gid_attr.gid);
1412 		if (ret) {
1413 			dev_warn(&device->dev,
1414 				 "query_gid failed (%d) for index %d\n", ret,
1415 				 i);
1416 			goto err;
1417 		}
1418 
1419 		if (rdma_protocol_iwarp(device, port)) {
1420 			struct net_device *ndev;
1421 
1422 			ndev = ib_device_get_netdev(device, port);
1423 			if (!ndev)
1424 				continue;
1425 			RCU_INIT_POINTER(gid_attr.ndev, ndev);
1426 			dev_put(ndev);
1427 		}
1428 
1429 		gid_attr.index = i;
1430 		tprops->subnet_prefix =
1431 			be64_to_cpu(gid_attr.gid.global.subnet_prefix);
1432 		add_modify_gid(table, &gid_attr);
1433 	}
1434 err:
1435 	mutex_unlock(&table->lock);
1436 	return ret;
1437 }
1438 
1439 static int
1440 ib_cache_update(struct ib_device *device, u32 port, bool update_gids,
1441 		bool update_pkeys, bool enforce_security)
1442 {
1443 	struct ib_port_attr       *tprops = NULL;
1444 	struct ib_pkey_cache      *pkey_cache = NULL;
1445 	struct ib_pkey_cache      *old_pkey_cache = NULL;
1446 	int                        i;
1447 	int                        ret;
1448 
1449 	if (!rdma_is_port_valid(device, port))
1450 		return -EINVAL;
1451 
1452 	tprops = kmalloc_obj(*tprops);
1453 	if (!tprops)
1454 		return -ENOMEM;
1455 
1456 	ret = ib_query_port(device, port, tprops);
1457 	if (ret) {
1458 		dev_warn(&device->dev, "ib_query_port failed (%d)\n", ret);
1459 		goto err;
1460 	}
1461 
1462 	if (!rdma_protocol_roce(device, port) && update_gids) {
1463 		ret = config_non_roce_gid_cache(device, port,
1464 						tprops);
1465 		if (ret)
1466 			goto err;
1467 	}
1468 
1469 	update_pkeys &= !!tprops->pkey_tbl_len;
1470 
1471 	if (update_pkeys) {
1472 		pkey_cache = kmalloc_flex(*pkey_cache, table,
1473 					  tprops->pkey_tbl_len);
1474 		if (!pkey_cache) {
1475 			ret = -ENOMEM;
1476 			goto err;
1477 		}
1478 
1479 		pkey_cache->table_len = tprops->pkey_tbl_len;
1480 
1481 		for (i = 0; i < pkey_cache->table_len; ++i) {
1482 			ret = ib_query_pkey(device, port, i,
1483 					    pkey_cache->table + i);
1484 			if (ret) {
1485 				dev_warn(&device->dev,
1486 					 "ib_query_pkey failed (%d) for index %d\n",
1487 					 ret, i);
1488 				goto err;
1489 			}
1490 		}
1491 	}
1492 
1493 	write_lock_irq(&device->cache_lock);
1494 
1495 	if (update_pkeys) {
1496 		old_pkey_cache = device->port_data[port].cache.pkey;
1497 		device->port_data[port].cache.pkey = pkey_cache;
1498 	}
1499 	device->port_data[port].cache.lmc = tprops->lmc;
1500 
1501 	if (device->port_data[port].cache.port_state != IB_PORT_NOP &&
1502 	    device->port_data[port].cache.port_state != tprops->state)
1503 		ibdev_info(device, "Port: %d Link %s\n", port,
1504 			   ib_port_state_to_str(tprops->state));
1505 
1506 	device->port_data[port].cache.port_state = tprops->state;
1507 
1508 	device->port_data[port].cache.subnet_prefix = tprops->subnet_prefix;
1509 	write_unlock_irq(&device->cache_lock);
1510 
1511 	if (enforce_security)
1512 		ib_security_cache_change(device,
1513 					 port,
1514 					 tprops->subnet_prefix);
1515 
1516 	kfree(old_pkey_cache);
1517 	kfree(tprops);
1518 	return 0;
1519 
1520 err:
1521 	kfree(pkey_cache);
1522 	kfree(tprops);
1523 	return ret;
1524 }
1525 
1526 static void ib_cache_event_task(struct work_struct *_work)
1527 {
1528 	struct ib_update_work *work =
1529 		container_of(_work, struct ib_update_work, work);
1530 	int ret;
1531 
1532 	/* Before distributing the cache update event, first sync
1533 	 * the cache.
1534 	 */
1535 	ret = ib_cache_update(work->event.device, work->event.element.port_num,
1536 			      work->event.event == IB_EVENT_GID_CHANGE ||
1537 			      work->event.event == IB_EVENT_CLIENT_REREGISTER,
1538 			      work->event.event == IB_EVENT_PKEY_CHANGE,
1539 			      work->enforce_security);
1540 
1541 	/* GID event is notified already for individual GID entries by
1542 	 * dispatch_gid_change_event(). Hence, notifiy for rest of the
1543 	 * events.
1544 	 */
1545 	if (!ret && work->event.event != IB_EVENT_GID_CHANGE)
1546 		ib_dispatch_event_clients(&work->event);
1547 
1548 	kfree(work);
1549 }
1550 
1551 static void ib_generic_event_task(struct work_struct *_work)
1552 {
1553 	struct ib_update_work *work =
1554 		container_of(_work, struct ib_update_work, work);
1555 
1556 	ib_dispatch_event_clients(&work->event);
1557 	kfree(work);
1558 }
1559 
1560 static bool is_cache_update_event(const struct ib_event *event)
1561 {
1562 	return (event->event == IB_EVENT_PORT_ERR    ||
1563 		event->event == IB_EVENT_PORT_ACTIVE ||
1564 		event->event == IB_EVENT_LID_CHANGE  ||
1565 		event->event == IB_EVENT_PKEY_CHANGE ||
1566 		event->event == IB_EVENT_CLIENT_REREGISTER ||
1567 		event->event == IB_EVENT_GID_CHANGE);
1568 }
1569 
1570 /**
1571  * ib_dispatch_event - Dispatch an asynchronous event
1572  * @event:Event to dispatch
1573  *
1574  * Low-level drivers must call ib_dispatch_event() to dispatch the
1575  * event to all registered event handlers when an asynchronous event
1576  * occurs.
1577  */
1578 void ib_dispatch_event(const struct ib_event *event)
1579 {
1580 	struct ib_update_work *work;
1581 
1582 	work = kzalloc_obj(*work, GFP_ATOMIC);
1583 	if (!work)
1584 		return;
1585 
1586 	if (is_cache_update_event(event))
1587 		INIT_WORK(&work->work, ib_cache_event_task);
1588 	else
1589 		INIT_WORK(&work->work, ib_generic_event_task);
1590 
1591 	work->event = *event;
1592 	if (event->event == IB_EVENT_PKEY_CHANGE ||
1593 	    event->event == IB_EVENT_GID_CHANGE)
1594 		work->enforce_security = true;
1595 
1596 	queue_work(ib_wq, &work->work);
1597 }
1598 EXPORT_SYMBOL(ib_dispatch_event);
1599 
1600 int ib_cache_setup_one(struct ib_device *device)
1601 {
1602 	u32 p;
1603 	int err;
1604 
1605 	err = gid_table_setup_one(device);
1606 	if (err)
1607 		return err;
1608 
1609 	rdma_for_each_port (device, p) {
1610 		err = ib_cache_update(device, p, true, true, true);
1611 		if (err) {
1612 			gid_table_cleanup_one(device);
1613 			return err;
1614 		}
1615 	}
1616 
1617 	return 0;
1618 }
1619 
1620 void ib_cache_release_one(struct ib_device *device)
1621 {
1622 	u32 p;
1623 
1624 	/*
1625 	 * The release function frees all the cache elements.
1626 	 * This function should be called as part of freeing
1627 	 * all the device's resources when the cache could no
1628 	 * longer be accessed.
1629 	 */
1630 	rdma_for_each_port (device, p)
1631 		kfree(device->port_data[p].cache.pkey);
1632 
1633 	gid_table_release_one(device);
1634 }
1635 
1636 void ib_cache_cleanup_one(struct ib_device *device)
1637 {
1638 	/*
1639 	 * Clear the GID updates mark first to prevent event handlers from
1640 	 * accessing the device while it's being torn down.
1641 	 */
1642 	ib_device_disable_gid_updates(device);
1643 
1644 	/* The cleanup function waits for all in-progress workqueue
1645 	 * elements and cleans up the GID cache. This function should be
1646 	 * called after the device was removed from the devices list and
1647 	 * all clients were removed, so the cache exists but is
1648 	 * non-functional and shouldn't be updated anymore.
1649 	 */
1650 	flush_workqueue(ib_wq);
1651 	gid_table_cleanup_one(device);
1652 
1653 	/*
1654 	 * Flush the wq second time for any pending GID delete work.
1655 	 */
1656 	flush_workqueue(ib_wq);
1657 }
1658