xref: /linux/drivers/infiniband/core/cache.c (revision d6e4b3e326d8b44675b9e19534347d97073826aa)
1 /*
2  * Copyright (c) 2004 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Intel Corporation. All rights reserved.
4  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
5  * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  */
35 
36 #include <linux/module.h>
37 #include <linux/errno.h>
38 #include <linux/slab.h>
39 #include <linux/workqueue.h>
40 #include <linux/netdevice.h>
41 #include <net/addrconf.h>
42 
43 #include <rdma/ib_cache.h>
44 
45 #include "core_priv.h"
46 
47 struct ib_pkey_cache {
48 	int             table_len;
49 	u16             table[0];
50 };
51 
52 struct ib_update_work {
53 	struct work_struct work;
54 	struct ib_device  *device;
55 	u8                 port_num;
56 	bool		   enforce_security;
57 };
58 
59 union ib_gid zgid;
60 EXPORT_SYMBOL(zgid);
61 
62 enum gid_attr_find_mask {
63 	GID_ATTR_FIND_MASK_GID          = 1UL << 0,
64 	GID_ATTR_FIND_MASK_NETDEV	= 1UL << 1,
65 	GID_ATTR_FIND_MASK_DEFAULT	= 1UL << 2,
66 	GID_ATTR_FIND_MASK_GID_TYPE	= 1UL << 3,
67 };
68 
69 enum gid_table_entry_state {
70 	GID_TABLE_ENTRY_INVALID		= 1,
71 	GID_TABLE_ENTRY_VALID		= 2,
72 	/*
73 	 * Indicates that entry is pending to be removed, there may
74 	 * be active users of this GID entry.
75 	 * When last user of the GID entry releases reference to it,
76 	 * GID entry is detached from the table.
77 	 */
78 	GID_TABLE_ENTRY_PENDING_DEL	= 3,
79 };
80 
81 struct ib_gid_table_entry {
82 	struct kref			kref;
83 	struct work_struct		del_work;
84 	struct ib_gid_attr		attr;
85 	void				*context;
86 	enum gid_table_entry_state	state;
87 };
88 
89 struct ib_gid_table {
90 	int				sz;
91 	/* In RoCE, adding a GID to the table requires:
92 	 * (a) Find if this GID is already exists.
93 	 * (b) Find a free space.
94 	 * (c) Write the new GID
95 	 *
96 	 * Delete requires different set of operations:
97 	 * (a) Find the GID
98 	 * (b) Delete it.
99 	 *
100 	 **/
101 	/* Any writer to data_vec must hold this lock and the write side of
102 	 * rwlock. Readers must hold only rwlock. All writers must be in a
103 	 * sleepable context.
104 	 */
105 	struct mutex			lock;
106 	/* rwlock protects data_vec[ix]->state and entry pointer.
107 	 */
108 	rwlock_t			rwlock;
109 	struct ib_gid_table_entry	**data_vec;
110 	/* bit field, each bit indicates the index of default GID */
111 	u32				default_gid_indices;
112 };
113 
114 static void dispatch_gid_change_event(struct ib_device *ib_dev, u8 port)
115 {
116 	struct ib_event event;
117 
118 	event.device		= ib_dev;
119 	event.element.port_num	= port;
120 	event.event		= IB_EVENT_GID_CHANGE;
121 
122 	ib_dispatch_event(&event);
123 }
124 
125 static const char * const gid_type_str[] = {
126 	[IB_GID_TYPE_IB]	= "IB/RoCE v1",
127 	[IB_GID_TYPE_ROCE_UDP_ENCAP]	= "RoCE v2",
128 };
129 
130 const char *ib_cache_gid_type_str(enum ib_gid_type gid_type)
131 {
132 	if (gid_type < ARRAY_SIZE(gid_type_str) && gid_type_str[gid_type])
133 		return gid_type_str[gid_type];
134 
135 	return "Invalid GID type";
136 }
137 EXPORT_SYMBOL(ib_cache_gid_type_str);
138 
139 /** rdma_is_zero_gid - Check if given GID is zero or not.
140  * @gid:	GID to check
141  * Returns true if given GID is zero, returns false otherwise.
142  */
143 bool rdma_is_zero_gid(const union ib_gid *gid)
144 {
145 	return !memcmp(gid, &zgid, sizeof(*gid));
146 }
147 EXPORT_SYMBOL(rdma_is_zero_gid);
148 
149 /** is_gid_index_default - Check if a given index belongs to
150  * reserved default GIDs or not.
151  * @table:	GID table pointer
152  * @index:	Index to check in GID table
153  * Returns true if index is one of the reserved default GID index otherwise
154  * returns false.
155  */
156 static bool is_gid_index_default(const struct ib_gid_table *table,
157 				 unsigned int index)
158 {
159 	return index < 32 && (BIT(index) & table->default_gid_indices);
160 }
161 
162 int ib_cache_gid_parse_type_str(const char *buf)
163 {
164 	unsigned int i;
165 	size_t len;
166 	int err = -EINVAL;
167 
168 	len = strlen(buf);
169 	if (len == 0)
170 		return -EINVAL;
171 
172 	if (buf[len - 1] == '\n')
173 		len--;
174 
175 	for (i = 0; i < ARRAY_SIZE(gid_type_str); ++i)
176 		if (gid_type_str[i] && !strncmp(buf, gid_type_str[i], len) &&
177 		    len == strlen(gid_type_str[i])) {
178 			err = i;
179 			break;
180 		}
181 
182 	return err;
183 }
184 EXPORT_SYMBOL(ib_cache_gid_parse_type_str);
185 
186 static struct ib_gid_table *rdma_gid_table(struct ib_device *device, u8 port)
187 {
188 	return device->cache.ports[port - rdma_start_port(device)].gid;
189 }
190 
191 static bool is_gid_entry_free(const struct ib_gid_table_entry *entry)
192 {
193 	return !entry;
194 }
195 
196 static bool is_gid_entry_valid(const struct ib_gid_table_entry *entry)
197 {
198 	return entry && entry->state == GID_TABLE_ENTRY_VALID;
199 }
200 
201 static void schedule_free_gid(struct kref *kref)
202 {
203 	struct ib_gid_table_entry *entry =
204 			container_of(kref, struct ib_gid_table_entry, kref);
205 
206 	queue_work(ib_wq, &entry->del_work);
207 }
208 
209 static void free_gid_entry_locked(struct ib_gid_table_entry *entry)
210 {
211 	struct ib_device *device = entry->attr.device;
212 	u8 port_num = entry->attr.port_num;
213 	struct ib_gid_table *table = rdma_gid_table(device, port_num);
214 
215 	dev_dbg(&device->dev, "%s port=%d index=%d gid %pI6\n", __func__,
216 		port_num, entry->attr.index, entry->attr.gid.raw);
217 
218 	write_lock_irq(&table->rwlock);
219 
220 	/*
221 	 * The only way to avoid overwriting NULL in table is
222 	 * by comparing if it is same entry in table or not!
223 	 * If new entry in table is added by the time we free here,
224 	 * don't overwrite the table entry.
225 	 */
226 	if (entry == table->data_vec[entry->attr.index])
227 		table->data_vec[entry->attr.index] = NULL;
228 	/* Now this index is ready to be allocated */
229 	write_unlock_irq(&table->rwlock);
230 
231 	if (entry->attr.ndev)
232 		dev_put(entry->attr.ndev);
233 	kfree(entry);
234 }
235 
236 static void free_gid_entry(struct kref *kref)
237 {
238 	struct ib_gid_table_entry *entry =
239 			container_of(kref, struct ib_gid_table_entry, kref);
240 
241 	free_gid_entry_locked(entry);
242 }
243 
244 /**
245  * free_gid_work - Release reference to the GID entry
246  * @work: Work structure to refer to GID entry which needs to be
247  * deleted.
248  *
249  * free_gid_work() frees the entry from the HCA's hardware table
250  * if provider supports it. It releases reference to netdevice.
251  */
252 static void free_gid_work(struct work_struct *work)
253 {
254 	struct ib_gid_table_entry *entry =
255 		container_of(work, struct ib_gid_table_entry, del_work);
256 	struct ib_device *device = entry->attr.device;
257 	u8 port_num = entry->attr.port_num;
258 	struct ib_gid_table *table = rdma_gid_table(device, port_num);
259 
260 	mutex_lock(&table->lock);
261 	free_gid_entry_locked(entry);
262 	mutex_unlock(&table->lock);
263 }
264 
265 static struct ib_gid_table_entry *
266 alloc_gid_entry(const struct ib_gid_attr *attr)
267 {
268 	struct ib_gid_table_entry *entry;
269 
270 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
271 	if (!entry)
272 		return NULL;
273 	kref_init(&entry->kref);
274 	memcpy(&entry->attr, attr, sizeof(*attr));
275 	if (entry->attr.ndev)
276 		dev_hold(entry->attr.ndev);
277 	INIT_WORK(&entry->del_work, free_gid_work);
278 	entry->state = GID_TABLE_ENTRY_INVALID;
279 	return entry;
280 }
281 
282 static void store_gid_entry(struct ib_gid_table *table,
283 			    struct ib_gid_table_entry *entry)
284 {
285 	entry->state = GID_TABLE_ENTRY_VALID;
286 
287 	dev_dbg(&entry->attr.device->dev, "%s port=%d index=%d gid %pI6\n",
288 		__func__, entry->attr.port_num, entry->attr.index,
289 		entry->attr.gid.raw);
290 
291 	lockdep_assert_held(&table->lock);
292 	write_lock_irq(&table->rwlock);
293 	table->data_vec[entry->attr.index] = entry;
294 	write_unlock_irq(&table->rwlock);
295 }
296 
297 static void get_gid_entry(struct ib_gid_table_entry *entry)
298 {
299 	kref_get(&entry->kref);
300 }
301 
302 static void put_gid_entry(struct ib_gid_table_entry *entry)
303 {
304 	kref_put(&entry->kref, schedule_free_gid);
305 }
306 
307 static void put_gid_entry_locked(struct ib_gid_table_entry *entry)
308 {
309 	kref_put(&entry->kref, free_gid_entry);
310 }
311 
312 static int add_roce_gid(struct ib_gid_table_entry *entry)
313 {
314 	const struct ib_gid_attr *attr = &entry->attr;
315 	int ret;
316 
317 	if (!attr->ndev) {
318 		dev_err(&attr->device->dev, "%s NULL netdev port=%d index=%d\n",
319 			__func__, attr->port_num, attr->index);
320 		return -EINVAL;
321 	}
322 	if (rdma_cap_roce_gid_table(attr->device, attr->port_num)) {
323 		ret = attr->device->ops.add_gid(attr, &entry->context);
324 		if (ret) {
325 			dev_err(&attr->device->dev,
326 				"%s GID add failed port=%d index=%d\n",
327 				__func__, attr->port_num, attr->index);
328 			return ret;
329 		}
330 	}
331 	return 0;
332 }
333 
334 /**
335  * del_gid - Delete GID table entry
336  *
337  * @ib_dev:	IB device whose GID entry to be deleted
338  * @port:	Port number of the IB device
339  * @table:	GID table of the IB device for a port
340  * @ix:		GID entry index to delete
341  *
342  */
343 static void del_gid(struct ib_device *ib_dev, u8 port,
344 		    struct ib_gid_table *table, int ix)
345 {
346 	struct ib_gid_table_entry *entry;
347 
348 	lockdep_assert_held(&table->lock);
349 
350 	dev_dbg(&ib_dev->dev, "%s port=%d index=%d gid %pI6\n", __func__, port,
351 		ix, table->data_vec[ix]->attr.gid.raw);
352 
353 	write_lock_irq(&table->rwlock);
354 	entry = table->data_vec[ix];
355 	entry->state = GID_TABLE_ENTRY_PENDING_DEL;
356 	/*
357 	 * For non RoCE protocol, GID entry slot is ready to use.
358 	 */
359 	if (!rdma_protocol_roce(ib_dev, port))
360 		table->data_vec[ix] = NULL;
361 	write_unlock_irq(&table->rwlock);
362 
363 	if (rdma_cap_roce_gid_table(ib_dev, port))
364 		ib_dev->ops.del_gid(&entry->attr, &entry->context);
365 
366 	put_gid_entry_locked(entry);
367 }
368 
369 /**
370  * add_modify_gid - Add or modify GID table entry
371  *
372  * @table:	GID table in which GID to be added or modified
373  * @attr:	Attributes of the GID
374  *
375  * Returns 0 on success or appropriate error code. It accepts zero
376  * GID addition for non RoCE ports for HCA's who report them as valid
377  * GID. However such zero GIDs are not added to the cache.
378  */
379 static int add_modify_gid(struct ib_gid_table *table,
380 			  const struct ib_gid_attr *attr)
381 {
382 	struct ib_gid_table_entry *entry;
383 	int ret = 0;
384 
385 	/*
386 	 * Invalidate any old entry in the table to make it safe to write to
387 	 * this index.
388 	 */
389 	if (is_gid_entry_valid(table->data_vec[attr->index]))
390 		del_gid(attr->device, attr->port_num, table, attr->index);
391 
392 	/*
393 	 * Some HCA's report multiple GID entries with only one valid GID, and
394 	 * leave other unused entries as the zero GID. Convert zero GIDs to
395 	 * empty table entries instead of storing them.
396 	 */
397 	if (rdma_is_zero_gid(&attr->gid))
398 		return 0;
399 
400 	entry = alloc_gid_entry(attr);
401 	if (!entry)
402 		return -ENOMEM;
403 
404 	if (rdma_protocol_roce(attr->device, attr->port_num)) {
405 		ret = add_roce_gid(entry);
406 		if (ret)
407 			goto done;
408 	}
409 
410 	store_gid_entry(table, entry);
411 	return 0;
412 
413 done:
414 	put_gid_entry(entry);
415 	return ret;
416 }
417 
418 /* rwlock should be read locked, or lock should be held */
419 static int find_gid(struct ib_gid_table *table, const union ib_gid *gid,
420 		    const struct ib_gid_attr *val, bool default_gid,
421 		    unsigned long mask, int *pempty)
422 {
423 	int i = 0;
424 	int found = -1;
425 	int empty = pempty ? -1 : 0;
426 
427 	while (i < table->sz && (found < 0 || empty < 0)) {
428 		struct ib_gid_table_entry *data = table->data_vec[i];
429 		struct ib_gid_attr *attr;
430 		int curr_index = i;
431 
432 		i++;
433 
434 		/* find_gid() is used during GID addition where it is expected
435 		 * to return a free entry slot which is not duplicate.
436 		 * Free entry slot is requested and returned if pempty is set,
437 		 * so lookup free slot only if requested.
438 		 */
439 		if (pempty && empty < 0) {
440 			if (is_gid_entry_free(data) &&
441 			    default_gid ==
442 				is_gid_index_default(table, curr_index)) {
443 				/*
444 				 * Found an invalid (free) entry; allocate it.
445 				 * If default GID is requested, then our
446 				 * found slot must be one of the DEFAULT
447 				 * reserved slots or we fail.
448 				 * This ensures that only DEFAULT reserved
449 				 * slots are used for default property GIDs.
450 				 */
451 				empty = curr_index;
452 			}
453 		}
454 
455 		/*
456 		 * Additionally find_gid() is used to find valid entry during
457 		 * lookup operation; so ignore the entries which are marked as
458 		 * pending for removal and the entries which are marked as
459 		 * invalid.
460 		 */
461 		if (!is_gid_entry_valid(data))
462 			continue;
463 
464 		if (found >= 0)
465 			continue;
466 
467 		attr = &data->attr;
468 		if (mask & GID_ATTR_FIND_MASK_GID_TYPE &&
469 		    attr->gid_type != val->gid_type)
470 			continue;
471 
472 		if (mask & GID_ATTR_FIND_MASK_GID &&
473 		    memcmp(gid, &data->attr.gid, sizeof(*gid)))
474 			continue;
475 
476 		if (mask & GID_ATTR_FIND_MASK_NETDEV &&
477 		    attr->ndev != val->ndev)
478 			continue;
479 
480 		if (mask & GID_ATTR_FIND_MASK_DEFAULT &&
481 		    is_gid_index_default(table, curr_index) != default_gid)
482 			continue;
483 
484 		found = curr_index;
485 	}
486 
487 	if (pempty)
488 		*pempty = empty;
489 
490 	return found;
491 }
492 
493 static void make_default_gid(struct  net_device *dev, union ib_gid *gid)
494 {
495 	gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
496 	addrconf_ifid_eui48(&gid->raw[8], dev);
497 }
498 
499 static int __ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
500 			      union ib_gid *gid, struct ib_gid_attr *attr,
501 			      unsigned long mask, bool default_gid)
502 {
503 	struct ib_gid_table *table;
504 	int ret = 0;
505 	int empty;
506 	int ix;
507 
508 	/* Do not allow adding zero GID in support of
509 	 * IB spec version 1.3 section 4.1.1 point (6) and
510 	 * section 12.7.10 and section 12.7.20
511 	 */
512 	if (rdma_is_zero_gid(gid))
513 		return -EINVAL;
514 
515 	table = rdma_gid_table(ib_dev, port);
516 
517 	mutex_lock(&table->lock);
518 
519 	ix = find_gid(table, gid, attr, default_gid, mask, &empty);
520 	if (ix >= 0)
521 		goto out_unlock;
522 
523 	if (empty < 0) {
524 		ret = -ENOSPC;
525 		goto out_unlock;
526 	}
527 	attr->device = ib_dev;
528 	attr->index = empty;
529 	attr->port_num = port;
530 	attr->gid = *gid;
531 	ret = add_modify_gid(table, attr);
532 	if (!ret)
533 		dispatch_gid_change_event(ib_dev, port);
534 
535 out_unlock:
536 	mutex_unlock(&table->lock);
537 	if (ret)
538 		pr_warn("%s: unable to add gid %pI6 error=%d\n",
539 			__func__, gid->raw, ret);
540 	return ret;
541 }
542 
543 int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
544 		     union ib_gid *gid, struct ib_gid_attr *attr)
545 {
546 	struct net_device *idev;
547 	unsigned long mask;
548 	int ret;
549 
550 	if (ib_dev->ops.get_netdev) {
551 		idev = ib_dev->ops.get_netdev(ib_dev, port);
552 		if (idev && attr->ndev != idev) {
553 			union ib_gid default_gid;
554 
555 			/* Adding default GIDs in not permitted */
556 			make_default_gid(idev, &default_gid);
557 			if (!memcmp(gid, &default_gid, sizeof(*gid))) {
558 				dev_put(idev);
559 				return -EPERM;
560 			}
561 		}
562 		if (idev)
563 			dev_put(idev);
564 	}
565 
566 	mask = GID_ATTR_FIND_MASK_GID |
567 	       GID_ATTR_FIND_MASK_GID_TYPE |
568 	       GID_ATTR_FIND_MASK_NETDEV;
569 
570 	ret = __ib_cache_gid_add(ib_dev, port, gid, attr, mask, false);
571 	return ret;
572 }
573 
574 static int
575 _ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
576 		  union ib_gid *gid, struct ib_gid_attr *attr,
577 		  unsigned long mask, bool default_gid)
578 {
579 	struct ib_gid_table *table;
580 	int ret = 0;
581 	int ix;
582 
583 	table = rdma_gid_table(ib_dev, port);
584 
585 	mutex_lock(&table->lock);
586 
587 	ix = find_gid(table, gid, attr, default_gid, mask, NULL);
588 	if (ix < 0) {
589 		ret = -EINVAL;
590 		goto out_unlock;
591 	}
592 
593 	del_gid(ib_dev, port, table, ix);
594 	dispatch_gid_change_event(ib_dev, port);
595 
596 out_unlock:
597 	mutex_unlock(&table->lock);
598 	if (ret)
599 		pr_debug("%s: can't delete gid %pI6 error=%d\n",
600 			 __func__, gid->raw, ret);
601 	return ret;
602 }
603 
604 int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
605 		     union ib_gid *gid, struct ib_gid_attr *attr)
606 {
607 	unsigned long mask = GID_ATTR_FIND_MASK_GID	  |
608 			     GID_ATTR_FIND_MASK_GID_TYPE |
609 			     GID_ATTR_FIND_MASK_DEFAULT  |
610 			     GID_ATTR_FIND_MASK_NETDEV;
611 
612 	return _ib_cache_gid_del(ib_dev, port, gid, attr, mask, false);
613 }
614 
615 int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
616 				     struct net_device *ndev)
617 {
618 	struct ib_gid_table *table;
619 	int ix;
620 	bool deleted = false;
621 
622 	table = rdma_gid_table(ib_dev, port);
623 
624 	mutex_lock(&table->lock);
625 
626 	for (ix = 0; ix < table->sz; ix++) {
627 		if (is_gid_entry_valid(table->data_vec[ix]) &&
628 		    table->data_vec[ix]->attr.ndev == ndev) {
629 			del_gid(ib_dev, port, table, ix);
630 			deleted = true;
631 		}
632 	}
633 
634 	mutex_unlock(&table->lock);
635 
636 	if (deleted)
637 		dispatch_gid_change_event(ib_dev, port);
638 
639 	return 0;
640 }
641 
642 /**
643  * rdma_find_gid_by_port - Returns the GID entry attributes when it finds
644  * a valid GID entry for given search parameters. It searches for the specified
645  * GID value in the local software cache.
646  * @device: The device to query.
647  * @gid: The GID value to search for.
648  * @gid_type: The GID type to search for.
649  * @port_num: The port number of the device where the GID value should be
650  *   searched.
651  * @ndev: In RoCE, the net device of the device. NULL means ignore.
652  *
653  * Returns sgid attributes if the GID is found with valid reference or
654  * returns ERR_PTR for the error.
655  * The caller must invoke rdma_put_gid_attr() to release the reference.
656  */
657 const struct ib_gid_attr *
658 rdma_find_gid_by_port(struct ib_device *ib_dev,
659 		      const union ib_gid *gid,
660 		      enum ib_gid_type gid_type,
661 		      u8 port, struct net_device *ndev)
662 {
663 	int local_index;
664 	struct ib_gid_table *table;
665 	unsigned long mask = GID_ATTR_FIND_MASK_GID |
666 			     GID_ATTR_FIND_MASK_GID_TYPE;
667 	struct ib_gid_attr val = {.ndev = ndev, .gid_type = gid_type};
668 	const struct ib_gid_attr *attr;
669 	unsigned long flags;
670 
671 	if (!rdma_is_port_valid(ib_dev, port))
672 		return ERR_PTR(-ENOENT);
673 
674 	table = rdma_gid_table(ib_dev, port);
675 
676 	if (ndev)
677 		mask |= GID_ATTR_FIND_MASK_NETDEV;
678 
679 	read_lock_irqsave(&table->rwlock, flags);
680 	local_index = find_gid(table, gid, &val, false, mask, NULL);
681 	if (local_index >= 0) {
682 		get_gid_entry(table->data_vec[local_index]);
683 		attr = &table->data_vec[local_index]->attr;
684 		read_unlock_irqrestore(&table->rwlock, flags);
685 		return attr;
686 	}
687 
688 	read_unlock_irqrestore(&table->rwlock, flags);
689 	return ERR_PTR(-ENOENT);
690 }
691 EXPORT_SYMBOL(rdma_find_gid_by_port);
692 
693 /**
694  * rdma_find_gid_by_filter - Returns the GID table attribute where a
695  * specified GID value occurs
696  * @device: The device to query.
697  * @gid: The GID value to search for.
698  * @port: The port number of the device where the GID value could be
699  *   searched.
700  * @filter: The filter function is executed on any matching GID in the table.
701  *   If the filter function returns true, the corresponding index is returned,
702  *   otherwise, we continue searching the GID table. It's guaranteed that
703  *   while filter is executed, ndev field is valid and the structure won't
704  *   change. filter is executed in an atomic context. filter must not be NULL.
705  *
706  * rdma_find_gid_by_filter() searches for the specified GID value
707  * of which the filter function returns true in the port's GID table.
708  *
709  */
710 const struct ib_gid_attr *rdma_find_gid_by_filter(
711 	struct ib_device *ib_dev, const union ib_gid *gid, u8 port,
712 	bool (*filter)(const union ib_gid *gid, const struct ib_gid_attr *,
713 		       void *),
714 	void *context)
715 {
716 	const struct ib_gid_attr *res = ERR_PTR(-ENOENT);
717 	struct ib_gid_table *table;
718 	unsigned long flags;
719 	unsigned int i;
720 
721 	if (!rdma_is_port_valid(ib_dev, port))
722 		return ERR_PTR(-EINVAL);
723 
724 	table = rdma_gid_table(ib_dev, port);
725 
726 	read_lock_irqsave(&table->rwlock, flags);
727 	for (i = 0; i < table->sz; i++) {
728 		struct ib_gid_table_entry *entry = table->data_vec[i];
729 
730 		if (!is_gid_entry_valid(entry))
731 			continue;
732 
733 		if (memcmp(gid, &entry->attr.gid, sizeof(*gid)))
734 			continue;
735 
736 		if (filter(gid, &entry->attr, context)) {
737 			get_gid_entry(entry);
738 			res = &entry->attr;
739 			break;
740 		}
741 	}
742 	read_unlock_irqrestore(&table->rwlock, flags);
743 	return res;
744 }
745 
746 static struct ib_gid_table *alloc_gid_table(int sz)
747 {
748 	struct ib_gid_table *table = kzalloc(sizeof(*table), GFP_KERNEL);
749 
750 	if (!table)
751 		return NULL;
752 
753 	table->data_vec = kcalloc(sz, sizeof(*table->data_vec), GFP_KERNEL);
754 	if (!table->data_vec)
755 		goto err_free_table;
756 
757 	mutex_init(&table->lock);
758 
759 	table->sz = sz;
760 	rwlock_init(&table->rwlock);
761 	return table;
762 
763 err_free_table:
764 	kfree(table);
765 	return NULL;
766 }
767 
768 static void release_gid_table(struct ib_device *device, u8 port,
769 			      struct ib_gid_table *table)
770 {
771 	bool leak = false;
772 	int i;
773 
774 	if (!table)
775 		return;
776 
777 	for (i = 0; i < table->sz; i++) {
778 		if (is_gid_entry_free(table->data_vec[i]))
779 			continue;
780 		if (kref_read(&table->data_vec[i]->kref) > 1) {
781 			dev_err(&device->dev,
782 				"GID entry ref leak for index %d ref=%d\n", i,
783 				kref_read(&table->data_vec[i]->kref));
784 			leak = true;
785 		}
786 	}
787 	if (leak)
788 		return;
789 
790 	kfree(table->data_vec);
791 	kfree(table);
792 }
793 
794 static void cleanup_gid_table_port(struct ib_device *ib_dev, u8 port,
795 				   struct ib_gid_table *table)
796 {
797 	int i;
798 	bool deleted = false;
799 
800 	if (!table)
801 		return;
802 
803 	mutex_lock(&table->lock);
804 	for (i = 0; i < table->sz; ++i) {
805 		if (is_gid_entry_valid(table->data_vec[i])) {
806 			del_gid(ib_dev, port, table, i);
807 			deleted = true;
808 		}
809 	}
810 	mutex_unlock(&table->lock);
811 
812 	if (deleted)
813 		dispatch_gid_change_event(ib_dev, port);
814 }
815 
816 void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
817 				  struct net_device *ndev,
818 				  unsigned long gid_type_mask,
819 				  enum ib_cache_gid_default_mode mode)
820 {
821 	union ib_gid gid = { };
822 	struct ib_gid_attr gid_attr;
823 	unsigned int gid_type;
824 	unsigned long mask;
825 
826 	mask = GID_ATTR_FIND_MASK_GID_TYPE |
827 	       GID_ATTR_FIND_MASK_DEFAULT |
828 	       GID_ATTR_FIND_MASK_NETDEV;
829 	memset(&gid_attr, 0, sizeof(gid_attr));
830 	gid_attr.ndev = ndev;
831 
832 	for (gid_type = 0; gid_type < IB_GID_TYPE_SIZE; ++gid_type) {
833 		if (1UL << gid_type & ~gid_type_mask)
834 			continue;
835 
836 		gid_attr.gid_type = gid_type;
837 
838 		if (mode == IB_CACHE_GID_DEFAULT_MODE_SET) {
839 			make_default_gid(ndev, &gid);
840 			__ib_cache_gid_add(ib_dev, port, &gid,
841 					   &gid_attr, mask, true);
842 		} else if (mode == IB_CACHE_GID_DEFAULT_MODE_DELETE) {
843 			_ib_cache_gid_del(ib_dev, port, &gid,
844 					  &gid_attr, mask, true);
845 		}
846 	}
847 }
848 
849 static void gid_table_reserve_default(struct ib_device *ib_dev, u8 port,
850 				      struct ib_gid_table *table)
851 {
852 	unsigned int i;
853 	unsigned long roce_gid_type_mask;
854 	unsigned int num_default_gids;
855 
856 	roce_gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
857 	num_default_gids = hweight_long(roce_gid_type_mask);
858 	/* Reserve starting indices for default GIDs */
859 	for (i = 0; i < num_default_gids && i < table->sz; i++)
860 		table->default_gid_indices |= BIT(i);
861 }
862 
863 
864 static void gid_table_release_one(struct ib_device *ib_dev)
865 {
866 	struct ib_gid_table *table;
867 	u8 port;
868 
869 	for (port = 0; port < ib_dev->phys_port_cnt; port++) {
870 		table = ib_dev->cache.ports[port].gid;
871 		release_gid_table(ib_dev, port, table);
872 		ib_dev->cache.ports[port].gid = NULL;
873 	}
874 }
875 
876 static int _gid_table_setup_one(struct ib_device *ib_dev)
877 {
878 	u8 port;
879 	struct ib_gid_table *table;
880 
881 	for (port = 0; port < ib_dev->phys_port_cnt; port++) {
882 		u8 rdma_port = port + rdma_start_port(ib_dev);
883 
884 		table =	alloc_gid_table(
885 				ib_dev->port_immutable[rdma_port].gid_tbl_len);
886 		if (!table)
887 			goto rollback_table_setup;
888 
889 		gid_table_reserve_default(ib_dev, rdma_port, table);
890 		ib_dev->cache.ports[port].gid = table;
891 	}
892 	return 0;
893 
894 rollback_table_setup:
895 	gid_table_release_one(ib_dev);
896 	return -ENOMEM;
897 }
898 
899 static void gid_table_cleanup_one(struct ib_device *ib_dev)
900 {
901 	struct ib_gid_table *table;
902 	u8 port;
903 
904 	for (port = 0; port < ib_dev->phys_port_cnt; port++) {
905 		table = ib_dev->cache.ports[port].gid;
906 		cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev),
907 				       table);
908 	}
909 }
910 
911 static int gid_table_setup_one(struct ib_device *ib_dev)
912 {
913 	int err;
914 
915 	err = _gid_table_setup_one(ib_dev);
916 
917 	if (err)
918 		return err;
919 
920 	rdma_roce_rescan_device(ib_dev);
921 
922 	return err;
923 }
924 
925 /**
926  * rdma_query_gid - Read the GID content from the GID software cache
927  * @device:		Device to query the GID
928  * @port_num:		Port number of the device
929  * @index:		Index of the GID table entry to read
930  * @gid:		Pointer to GID where to store the entry's GID
931  *
932  * rdma_query_gid() only reads the GID entry content for requested device,
933  * port and index. It reads for IB, RoCE and iWarp link layers.  It doesn't
934  * hold any reference to the GID table entry in the HCA or software cache.
935  *
936  * Returns 0 on success or appropriate error code.
937  *
938  */
939 int rdma_query_gid(struct ib_device *device, u8 port_num,
940 		   int index, union ib_gid *gid)
941 {
942 	struct ib_gid_table *table;
943 	unsigned long flags;
944 	int res = -EINVAL;
945 
946 	if (!rdma_is_port_valid(device, port_num))
947 		return -EINVAL;
948 
949 	table = rdma_gid_table(device, port_num);
950 	read_lock_irqsave(&table->rwlock, flags);
951 
952 	if (index < 0 || index >= table->sz ||
953 	    !is_gid_entry_valid(table->data_vec[index]))
954 		goto done;
955 
956 	memcpy(gid, &table->data_vec[index]->attr.gid, sizeof(*gid));
957 	res = 0;
958 
959 done:
960 	read_unlock_irqrestore(&table->rwlock, flags);
961 	return res;
962 }
963 EXPORT_SYMBOL(rdma_query_gid);
964 
965 /**
966  * rdma_find_gid - Returns SGID attributes if the matching GID is found.
967  * @device: The device to query.
968  * @gid: The GID value to search for.
969  * @gid_type: The GID type to search for.
970  * @ndev: In RoCE, the net device of the device. NULL means ignore.
971  *
972  * rdma_find_gid() searches for the specified GID value in the software cache.
973  *
974  * Returns GID attributes if a valid GID is found or returns ERR_PTR for the
975  * error. The caller must invoke rdma_put_gid_attr() to release the reference.
976  *
977  */
978 const struct ib_gid_attr *rdma_find_gid(struct ib_device *device,
979 					const union ib_gid *gid,
980 					enum ib_gid_type gid_type,
981 					struct net_device *ndev)
982 {
983 	unsigned long mask = GID_ATTR_FIND_MASK_GID |
984 			     GID_ATTR_FIND_MASK_GID_TYPE;
985 	struct ib_gid_attr gid_attr_val = {.ndev = ndev, .gid_type = gid_type};
986 	u8 p;
987 
988 	if (ndev)
989 		mask |= GID_ATTR_FIND_MASK_NETDEV;
990 
991 	for (p = 0; p < device->phys_port_cnt; p++) {
992 		struct ib_gid_table *table;
993 		unsigned long flags;
994 		int index;
995 
996 		table = device->cache.ports[p].gid;
997 		read_lock_irqsave(&table->rwlock, flags);
998 		index = find_gid(table, gid, &gid_attr_val, false, mask, NULL);
999 		if (index >= 0) {
1000 			const struct ib_gid_attr *attr;
1001 
1002 			get_gid_entry(table->data_vec[index]);
1003 			attr = &table->data_vec[index]->attr;
1004 			read_unlock_irqrestore(&table->rwlock, flags);
1005 			return attr;
1006 		}
1007 		read_unlock_irqrestore(&table->rwlock, flags);
1008 	}
1009 
1010 	return ERR_PTR(-ENOENT);
1011 }
1012 EXPORT_SYMBOL(rdma_find_gid);
1013 
1014 int ib_get_cached_pkey(struct ib_device *device,
1015 		       u8                port_num,
1016 		       int               index,
1017 		       u16              *pkey)
1018 {
1019 	struct ib_pkey_cache *cache;
1020 	unsigned long flags;
1021 	int ret = 0;
1022 
1023 	if (!rdma_is_port_valid(device, port_num))
1024 		return -EINVAL;
1025 
1026 	read_lock_irqsave(&device->cache.lock, flags);
1027 
1028 	cache = device->cache.ports[port_num - rdma_start_port(device)].pkey;
1029 
1030 	if (index < 0 || index >= cache->table_len)
1031 		ret = -EINVAL;
1032 	else
1033 		*pkey = cache->table[index];
1034 
1035 	read_unlock_irqrestore(&device->cache.lock, flags);
1036 
1037 	return ret;
1038 }
1039 EXPORT_SYMBOL(ib_get_cached_pkey);
1040 
1041 int ib_get_cached_subnet_prefix(struct ib_device *device,
1042 				u8                port_num,
1043 				u64              *sn_pfx)
1044 {
1045 	unsigned long flags;
1046 	int p;
1047 
1048 	if (!rdma_is_port_valid(device, port_num))
1049 		return -EINVAL;
1050 
1051 	p = port_num - rdma_start_port(device);
1052 	read_lock_irqsave(&device->cache.lock, flags);
1053 	*sn_pfx = device->cache.ports[p].subnet_prefix;
1054 	read_unlock_irqrestore(&device->cache.lock, flags);
1055 
1056 	return 0;
1057 }
1058 EXPORT_SYMBOL(ib_get_cached_subnet_prefix);
1059 
1060 int ib_find_cached_pkey(struct ib_device *device,
1061 			u8                port_num,
1062 			u16               pkey,
1063 			u16              *index)
1064 {
1065 	struct ib_pkey_cache *cache;
1066 	unsigned long flags;
1067 	int i;
1068 	int ret = -ENOENT;
1069 	int partial_ix = -1;
1070 
1071 	if (!rdma_is_port_valid(device, port_num))
1072 		return -EINVAL;
1073 
1074 	read_lock_irqsave(&device->cache.lock, flags);
1075 
1076 	cache = device->cache.ports[port_num - rdma_start_port(device)].pkey;
1077 
1078 	*index = -1;
1079 
1080 	for (i = 0; i < cache->table_len; ++i)
1081 		if ((cache->table[i] & 0x7fff) == (pkey & 0x7fff)) {
1082 			if (cache->table[i] & 0x8000) {
1083 				*index = i;
1084 				ret = 0;
1085 				break;
1086 			} else
1087 				partial_ix = i;
1088 		}
1089 
1090 	if (ret && partial_ix >= 0) {
1091 		*index = partial_ix;
1092 		ret = 0;
1093 	}
1094 
1095 	read_unlock_irqrestore(&device->cache.lock, flags);
1096 
1097 	return ret;
1098 }
1099 EXPORT_SYMBOL(ib_find_cached_pkey);
1100 
1101 int ib_find_exact_cached_pkey(struct ib_device *device,
1102 			      u8                port_num,
1103 			      u16               pkey,
1104 			      u16              *index)
1105 {
1106 	struct ib_pkey_cache *cache;
1107 	unsigned long flags;
1108 	int i;
1109 	int ret = -ENOENT;
1110 
1111 	if (!rdma_is_port_valid(device, port_num))
1112 		return -EINVAL;
1113 
1114 	read_lock_irqsave(&device->cache.lock, flags);
1115 
1116 	cache = device->cache.ports[port_num - rdma_start_port(device)].pkey;
1117 
1118 	*index = -1;
1119 
1120 	for (i = 0; i < cache->table_len; ++i)
1121 		if (cache->table[i] == pkey) {
1122 			*index = i;
1123 			ret = 0;
1124 			break;
1125 		}
1126 
1127 	read_unlock_irqrestore(&device->cache.lock, flags);
1128 
1129 	return ret;
1130 }
1131 EXPORT_SYMBOL(ib_find_exact_cached_pkey);
1132 
1133 int ib_get_cached_lmc(struct ib_device *device,
1134 		      u8                port_num,
1135 		      u8                *lmc)
1136 {
1137 	unsigned long flags;
1138 	int ret = 0;
1139 
1140 	if (!rdma_is_port_valid(device, port_num))
1141 		return -EINVAL;
1142 
1143 	read_lock_irqsave(&device->cache.lock, flags);
1144 	*lmc = device->cache.ports[port_num - rdma_start_port(device)].lmc;
1145 	read_unlock_irqrestore(&device->cache.lock, flags);
1146 
1147 	return ret;
1148 }
1149 EXPORT_SYMBOL(ib_get_cached_lmc);
1150 
1151 int ib_get_cached_port_state(struct ib_device   *device,
1152 			     u8                  port_num,
1153 			     enum ib_port_state *port_state)
1154 {
1155 	unsigned long flags;
1156 	int ret = 0;
1157 
1158 	if (!rdma_is_port_valid(device, port_num))
1159 		return -EINVAL;
1160 
1161 	read_lock_irqsave(&device->cache.lock, flags);
1162 	*port_state = device->cache.ports[port_num
1163 		- rdma_start_port(device)].port_state;
1164 	read_unlock_irqrestore(&device->cache.lock, flags);
1165 
1166 	return ret;
1167 }
1168 EXPORT_SYMBOL(ib_get_cached_port_state);
1169 
1170 /**
1171  * rdma_get_gid_attr - Returns GID attributes for a port of a device
1172  * at a requested gid_index, if a valid GID entry exists.
1173  * @device:		The device to query.
1174  * @port_num:		The port number on the device where the GID value
1175  *			is to be queried.
1176  * @index:		Index of the GID table entry whose attributes are to
1177  *                      be queried.
1178  *
1179  * rdma_get_gid_attr() acquires reference count of gid attributes from the
1180  * cached GID table. Caller must invoke rdma_put_gid_attr() to release
1181  * reference to gid attribute regardless of link layer.
1182  *
1183  * Returns pointer to valid gid attribute or ERR_PTR for the appropriate error
1184  * code.
1185  */
1186 const struct ib_gid_attr *
1187 rdma_get_gid_attr(struct ib_device *device, u8 port_num, int index)
1188 {
1189 	const struct ib_gid_attr *attr = ERR_PTR(-EINVAL);
1190 	struct ib_gid_table *table;
1191 	unsigned long flags;
1192 
1193 	if (!rdma_is_port_valid(device, port_num))
1194 		return ERR_PTR(-EINVAL);
1195 
1196 	table = rdma_gid_table(device, port_num);
1197 	if (index < 0 || index >= table->sz)
1198 		return ERR_PTR(-EINVAL);
1199 
1200 	read_lock_irqsave(&table->rwlock, flags);
1201 	if (!is_gid_entry_valid(table->data_vec[index]))
1202 		goto done;
1203 
1204 	get_gid_entry(table->data_vec[index]);
1205 	attr = &table->data_vec[index]->attr;
1206 done:
1207 	read_unlock_irqrestore(&table->rwlock, flags);
1208 	return attr;
1209 }
1210 EXPORT_SYMBOL(rdma_get_gid_attr);
1211 
1212 /**
1213  * rdma_put_gid_attr - Release reference to the GID attribute
1214  * @attr:		Pointer to the GID attribute whose reference
1215  *			needs to be released.
1216  *
1217  * rdma_put_gid_attr() must be used to release reference whose
1218  * reference is acquired using rdma_get_gid_attr() or any APIs
1219  * which returns a pointer to the ib_gid_attr regardless of link layer
1220  * of IB or RoCE.
1221  *
1222  */
1223 void rdma_put_gid_attr(const struct ib_gid_attr *attr)
1224 {
1225 	struct ib_gid_table_entry *entry =
1226 		container_of(attr, struct ib_gid_table_entry, attr);
1227 
1228 	put_gid_entry(entry);
1229 }
1230 EXPORT_SYMBOL(rdma_put_gid_attr);
1231 
1232 /**
1233  * rdma_hold_gid_attr - Get reference to existing GID attribute
1234  *
1235  * @attr:		Pointer to the GID attribute whose reference
1236  *			needs to be taken.
1237  *
1238  * Increase the reference count to a GID attribute to keep it from being
1239  * freed. Callers are required to already be holding a reference to attribute.
1240  *
1241  */
1242 void rdma_hold_gid_attr(const struct ib_gid_attr *attr)
1243 {
1244 	struct ib_gid_table_entry *entry =
1245 		container_of(attr, struct ib_gid_table_entry, attr);
1246 
1247 	get_gid_entry(entry);
1248 }
1249 EXPORT_SYMBOL(rdma_hold_gid_attr);
1250 
1251 /**
1252  * rdma_read_gid_attr_ndev_rcu - Read GID attribute netdevice
1253  * which must be in UP state.
1254  *
1255  * @attr:Pointer to the GID attribute
1256  *
1257  * Returns pointer to netdevice if the netdevice was attached to GID and
1258  * netdevice is in UP state. Caller must hold RCU lock as this API
1259  * reads the netdev flags which can change while netdevice migrates to
1260  * different net namespace. Returns ERR_PTR with error code otherwise.
1261  *
1262  */
1263 struct net_device *rdma_read_gid_attr_ndev_rcu(const struct ib_gid_attr *attr)
1264 {
1265 	struct ib_gid_table_entry *entry =
1266 			container_of(attr, struct ib_gid_table_entry, attr);
1267 	struct ib_device *device = entry->attr.device;
1268 	struct net_device *ndev = ERR_PTR(-ENODEV);
1269 	u8 port_num = entry->attr.port_num;
1270 	struct ib_gid_table *table;
1271 	unsigned long flags;
1272 	bool valid;
1273 
1274 	table = rdma_gid_table(device, port_num);
1275 
1276 	read_lock_irqsave(&table->rwlock, flags);
1277 	valid = is_gid_entry_valid(table->data_vec[attr->index]);
1278 	if (valid && attr->ndev && (READ_ONCE(attr->ndev->flags) & IFF_UP))
1279 		ndev = attr->ndev;
1280 	read_unlock_irqrestore(&table->rwlock, flags);
1281 	return ndev;
1282 }
1283 
1284 static int config_non_roce_gid_cache(struct ib_device *device,
1285 				     u8 port, int gid_tbl_len)
1286 {
1287 	struct ib_gid_attr gid_attr = {};
1288 	struct ib_gid_table *table;
1289 	int ret = 0;
1290 	int i;
1291 
1292 	gid_attr.device = device;
1293 	gid_attr.port_num = port;
1294 	table = rdma_gid_table(device, port);
1295 
1296 	mutex_lock(&table->lock);
1297 	for (i = 0; i < gid_tbl_len; ++i) {
1298 		if (!device->ops.query_gid)
1299 			continue;
1300 		ret = device->ops.query_gid(device, port, i, &gid_attr.gid);
1301 		if (ret) {
1302 			dev_warn(&device->dev,
1303 				 "query_gid failed (%d) for index %d\n", ret,
1304 				 i);
1305 			goto err;
1306 		}
1307 		gid_attr.index = i;
1308 		add_modify_gid(table, &gid_attr);
1309 	}
1310 err:
1311 	mutex_unlock(&table->lock);
1312 	return ret;
1313 }
1314 
1315 static void ib_cache_update(struct ib_device *device,
1316 			    u8                port,
1317 			    bool	      enforce_security)
1318 {
1319 	struct ib_port_attr       *tprops = NULL;
1320 	struct ib_pkey_cache      *pkey_cache = NULL, *old_pkey_cache;
1321 	int                        i;
1322 	int                        ret;
1323 
1324 	if (!rdma_is_port_valid(device, port))
1325 		return;
1326 
1327 	tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
1328 	if (!tprops)
1329 		return;
1330 
1331 	ret = ib_query_port(device, port, tprops);
1332 	if (ret) {
1333 		dev_warn(&device->dev, "ib_query_port failed (%d)\n", ret);
1334 		goto err;
1335 	}
1336 
1337 	if (!rdma_protocol_roce(device, port)) {
1338 		ret = config_non_roce_gid_cache(device, port,
1339 						tprops->gid_tbl_len);
1340 		if (ret)
1341 			goto err;
1342 	}
1343 
1344 	pkey_cache = kmalloc(struct_size(pkey_cache, table,
1345 					 tprops->pkey_tbl_len),
1346 			     GFP_KERNEL);
1347 	if (!pkey_cache)
1348 		goto err;
1349 
1350 	pkey_cache->table_len = tprops->pkey_tbl_len;
1351 
1352 	for (i = 0; i < pkey_cache->table_len; ++i) {
1353 		ret = ib_query_pkey(device, port, i, pkey_cache->table + i);
1354 		if (ret) {
1355 			dev_warn(&device->dev,
1356 				 "ib_query_pkey failed (%d) for index %d\n",
1357 				 ret, i);
1358 			goto err;
1359 		}
1360 	}
1361 
1362 	write_lock_irq(&device->cache.lock);
1363 
1364 	old_pkey_cache = device->cache.ports[port -
1365 		rdma_start_port(device)].pkey;
1366 
1367 	device->cache.ports[port - rdma_start_port(device)].pkey = pkey_cache;
1368 	device->cache.ports[port - rdma_start_port(device)].lmc = tprops->lmc;
1369 	device->cache.ports[port - rdma_start_port(device)].port_state =
1370 		tprops->state;
1371 
1372 	device->cache.ports[port - rdma_start_port(device)].subnet_prefix =
1373 							tprops->subnet_prefix;
1374 	write_unlock_irq(&device->cache.lock);
1375 
1376 	if (enforce_security)
1377 		ib_security_cache_change(device,
1378 					 port,
1379 					 tprops->subnet_prefix);
1380 
1381 	kfree(old_pkey_cache);
1382 	kfree(tprops);
1383 	return;
1384 
1385 err:
1386 	kfree(pkey_cache);
1387 	kfree(tprops);
1388 }
1389 
1390 static void ib_cache_task(struct work_struct *_work)
1391 {
1392 	struct ib_update_work *work =
1393 		container_of(_work, struct ib_update_work, work);
1394 
1395 	ib_cache_update(work->device,
1396 			work->port_num,
1397 			work->enforce_security);
1398 	kfree(work);
1399 }
1400 
1401 static void ib_cache_event(struct ib_event_handler *handler,
1402 			   struct ib_event *event)
1403 {
1404 	struct ib_update_work *work;
1405 
1406 	if (event->event == IB_EVENT_PORT_ERR    ||
1407 	    event->event == IB_EVENT_PORT_ACTIVE ||
1408 	    event->event == IB_EVENT_LID_CHANGE  ||
1409 	    event->event == IB_EVENT_PKEY_CHANGE ||
1410 	    event->event == IB_EVENT_SM_CHANGE   ||
1411 	    event->event == IB_EVENT_CLIENT_REREGISTER ||
1412 	    event->event == IB_EVENT_GID_CHANGE) {
1413 		work = kmalloc(sizeof *work, GFP_ATOMIC);
1414 		if (work) {
1415 			INIT_WORK(&work->work, ib_cache_task);
1416 			work->device   = event->device;
1417 			work->port_num = event->element.port_num;
1418 			if (event->event == IB_EVENT_PKEY_CHANGE ||
1419 			    event->event == IB_EVENT_GID_CHANGE)
1420 				work->enforce_security = true;
1421 			else
1422 				work->enforce_security = false;
1423 
1424 			queue_work(ib_wq, &work->work);
1425 		}
1426 	}
1427 }
1428 
1429 int ib_cache_setup_one(struct ib_device *device)
1430 {
1431 	int p;
1432 	int err;
1433 
1434 	rwlock_init(&device->cache.lock);
1435 
1436 	device->cache.ports =
1437 		kcalloc(rdma_end_port(device) - rdma_start_port(device) + 1,
1438 			sizeof(*device->cache.ports),
1439 			GFP_KERNEL);
1440 	if (!device->cache.ports)
1441 		return -ENOMEM;
1442 
1443 	err = gid_table_setup_one(device);
1444 	if (err) {
1445 		kfree(device->cache.ports);
1446 		device->cache.ports = NULL;
1447 		return err;
1448 	}
1449 
1450 	for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p)
1451 		ib_cache_update(device, p + rdma_start_port(device), true);
1452 
1453 	INIT_IB_EVENT_HANDLER(&device->cache.event_handler,
1454 			      device, ib_cache_event);
1455 	ib_register_event_handler(&device->cache.event_handler);
1456 	return 0;
1457 }
1458 
1459 void ib_cache_release_one(struct ib_device *device)
1460 {
1461 	int p;
1462 
1463 	/*
1464 	 * The release function frees all the cache elements.
1465 	 * This function should be called as part of freeing
1466 	 * all the device's resources when the cache could no
1467 	 * longer be accessed.
1468 	 */
1469 	for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p)
1470 		kfree(device->cache.ports[p].pkey);
1471 
1472 	gid_table_release_one(device);
1473 	kfree(device->cache.ports);
1474 }
1475 
1476 void ib_cache_cleanup_one(struct ib_device *device)
1477 {
1478 	/* The cleanup function unregisters the event handler,
1479 	 * waits for all in-progress workqueue elements and cleans
1480 	 * up the GID cache. This function should be called after
1481 	 * the device was removed from the devices list and all
1482 	 * clients were removed, so the cache exists but is
1483 	 * non-functional and shouldn't be updated anymore.
1484 	 */
1485 	ib_unregister_event_handler(&device->cache.event_handler);
1486 	flush_workqueue(ib_wq);
1487 	gid_table_cleanup_one(device);
1488 
1489 	/*
1490 	 * Flush the wq second time for any pending GID delete work.
1491 	 */
1492 	flush_workqueue(ib_wq);
1493 }
1494