xref: /freebsd/sys/dev/mlx4/mlx4_ib/mlx4_ib_alias_GUID.c (revision 64a0982bee3db2236df43357e70ce8dddbc21d48)
1 /*
2  * Copyright (c) 2012 Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32  /***********************************************************/
33 /*This file support the handling of the Alias GUID feature. */
34 /***********************************************************/
35 #include <rdma/ib_mad.h>
36 #include <rdma/ib_smi.h>
37 #include <rdma/ib_cache.h>
38 #include <rdma/ib_sa.h>
39 #include <rdma/ib_pack.h>
40 #include <dev/mlx4/cmd.h>
41 #include <linux/module.h>
42 #include <linux/errno.h>
43 #include <rdma/ib_user_verbs.h>
44 #include <linux/delay.h>
45 #include "mlx4_ib.h"
46 
47 /*
48 The driver keeps the current state of all guids, as they are in the HW.
49 Whenever we receive an smp mad GUIDInfo record, the data will be cached.
50 */
51 
52 struct mlx4_alias_guid_work_context {
53 	u8 port;
54 	struct mlx4_ib_dev     *dev ;
55 	struct ib_sa_query     *sa_query;
56 	struct completion	done;
57 	int			query_id;
58 	struct list_head	list;
59 	int			block_num;
60 	u8			method;
61 };
62 
63 struct mlx4_next_alias_guid_work {
64 	u8 port;
65 	u8 block_num;
66 	struct mlx4_sriov_alias_guid_info_rec_det rec_det;
67 };
68 
69 
70 void mlx4_ib_update_cache_on_guid_change(struct mlx4_ib_dev *dev, int block_num,
71 					 u8 port_num, u8 *p_data)
72 {
73 	int i;
74 	u64 guid_indexes;
75 	int slave_id;
76 	int port_index = port_num - 1;
77 
78 	if (!mlx4_is_master(dev->dev))
79 		return;
80 
81 	guid_indexes = be64_to_cpu((__force __be64) dev->sriov.alias_guid.
82 				   ports_guid[port_num - 1].
83 				   all_rec_per_port[block_num].guid_indexes);
84 	pr_debug("port: %d, guid_indexes: 0x%llx\n", port_num,
85 	    (unsigned long long)guid_indexes);
86 
87 	for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) {
88 		/* The location of the specific index starts from bit number 4
89 		 * until bit num 11 */
90 		if (test_bit(i + 4, (unsigned long *)&guid_indexes)) {
91 			slave_id = (block_num * NUM_ALIAS_GUID_IN_REC) + i ;
92 			if (slave_id >= dev->dev->num_slaves) {
93 				pr_debug("The last slave: %d\n", slave_id);
94 				return;
95 			}
96 
97 			/* cache the guid: */
98 			memcpy(&dev->sriov.demux[port_index].guid_cache[slave_id],
99 			       &p_data[i * GUID_REC_SIZE],
100 			       GUID_REC_SIZE);
101 		} else
102 			pr_debug("Guid number: %d in block: %d"
103 				 " was not updated\n", i, block_num);
104 	}
105 }
106 
107 static __be64 get_cached_alias_guid(struct mlx4_ib_dev *dev, int port, int index)
108 {
109 	if (index >= NUM_ALIAS_GUID_PER_PORT) {
110 		pr_err("%s: ERROR: asked for index:%d\n", __func__, index);
111 		return (__force __be64) -1;
112 	}
113 	return *(__be64 *)&dev->sriov.demux[port - 1].guid_cache[index];
114 }
115 
116 
117 ib_sa_comp_mask mlx4_ib_get_aguid_comp_mask_from_ix(int index)
118 {
119 	return IB_SA_COMP_MASK(4 + index);
120 }
121 
122 /*
123  * Whenever new GUID is set/unset (guid table change) create event and
124  * notify the relevant slave (master also should be notified).
125  * If the GUID value is not as we have in the cache the slave will not be
126  * updated; in this case it waits for the smp_snoop or the port management
127  * event to call the function and to update the slave.
128  * block_number - the index of the block (16 blocks available)
129  * port_number - 1 or 2
130  */
131 void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev,
132 					  int block_num, u8 port_num,
133 					  u8 *p_data)
134 {
135 	int i;
136 	u64 guid_indexes;
137 	int slave_id;
138 	enum slave_port_state new_state;
139 	enum slave_port_state prev_state;
140 	__be64 tmp_cur_ag, form_cache_ag;
141 	enum slave_port_gen_event gen_event;
142 
143 	if (!mlx4_is_master(dev->dev))
144 		return;
145 
146 	guid_indexes = be64_to_cpu((__force __be64) dev->sriov.alias_guid.
147 				   ports_guid[port_num - 1].
148 				   all_rec_per_port[block_num].guid_indexes);
149 	pr_debug("port: %d, guid_indexes: 0x%llx\n", port_num,
150 	    (unsigned long long)guid_indexes);
151 
152 	/*calculate the slaves and notify them*/
153 	for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) {
154 		/* the location of the specific index runs from bits 4..11 */
155 		if (!(test_bit(i + 4, (unsigned long *)&guid_indexes)))
156 			continue;
157 
158 		slave_id = (block_num * NUM_ALIAS_GUID_IN_REC) + i ;
159 		if (slave_id >= dev->dev->num_slaves)
160 			return;
161 		tmp_cur_ag = *(__be64 *)&p_data[i * GUID_REC_SIZE];
162 		form_cache_ag = get_cached_alias_guid(dev, port_num,
163 					(NUM_ALIAS_GUID_IN_REC * block_num) + i);
164 		/*
165 		 * Check if guid is not the same as in the cache,
166 		 * If it is different, wait for the snoop_smp or the port mgmt
167 		 * change event to update the slave on its port state change
168 		 */
169 		if (tmp_cur_ag != form_cache_ag)
170 			continue;
171 		mlx4_gen_guid_change_eqe(dev->dev, slave_id, port_num);
172 
173 		/*2 cases: Valid GUID, and Invalid Guid*/
174 
175 		if (tmp_cur_ag != MLX4_NOT_SET_GUID) { /*valid GUID*/
176 			prev_state = mlx4_get_slave_port_state(dev->dev, slave_id, port_num);
177 			new_state = set_and_calc_slave_port_state(dev->dev, slave_id, port_num,
178 								  MLX4_PORT_STATE_IB_PORT_STATE_EVENT_GID_VALID,
179 								  &gen_event);
180 			pr_debug("slave: %d, port: %d prev_port_state: %d,"
181 				 " new_port_state: %d, gen_event: %d\n",
182 				 slave_id, port_num, prev_state, new_state, gen_event);
183 			if (gen_event == SLAVE_PORT_GEN_EVENT_UP) {
184 				pr_debug("sending PORT_UP event to slave: %d, port: %d\n",
185 					 slave_id, port_num);
186 				mlx4_gen_port_state_change_eqe(dev->dev, slave_id,
187 							       port_num, MLX4_PORT_CHANGE_SUBTYPE_ACTIVE);
188 			}
189 		} else { /* request to invalidate GUID */
190 			set_and_calc_slave_port_state(dev->dev, slave_id, port_num,
191 						      MLX4_PORT_STATE_IB_EVENT_GID_INVALID,
192 						      &gen_event);
193 			pr_debug("sending PORT DOWN event to slave: %d, port: %d\n",
194 				 slave_id, port_num);
195 			mlx4_gen_port_state_change_eqe(dev->dev, slave_id, port_num,
196 						       MLX4_PORT_CHANGE_SUBTYPE_DOWN);
197 		}
198 	}
199 }
200 
201 static void aliasguid_query_handler(int status,
202 				    struct ib_sa_guidinfo_rec *guid_rec,
203 				    void *context)
204 {
205 	struct mlx4_ib_dev *dev;
206 	struct mlx4_alias_guid_work_context *cb_ctx = context;
207 	u8 port_index;
208 	int i;
209 	struct mlx4_sriov_alias_guid_info_rec_det *rec;
210 	unsigned long flags, flags1;
211 
212 	if (!context)
213 		return;
214 
215 	dev = cb_ctx->dev;
216 	port_index = cb_ctx->port - 1;
217 	rec = &dev->sriov.alias_guid.ports_guid[port_index].
218 		all_rec_per_port[cb_ctx->block_num];
219 
220 	if (status) {
221 		rec->status = MLX4_GUID_INFO_STATUS_IDLE;
222 		pr_debug("(port: %d) failed: status = %d\n",
223 			 cb_ctx->port, status);
224 		goto out;
225 	}
226 
227 	if (guid_rec->block_num != cb_ctx->block_num) {
228 		pr_err("block num mismatch: %d != %d\n",
229 		       cb_ctx->block_num, guid_rec->block_num);
230 		goto out;
231 	}
232 
233 	pr_debug("lid/port: %d/%d, block_num: %d\n",
234 		 be16_to_cpu(guid_rec->lid), cb_ctx->port,
235 		 guid_rec->block_num);
236 
237 	rec = &dev->sriov.alias_guid.ports_guid[port_index].
238 		all_rec_per_port[guid_rec->block_num];
239 
240 	rec->status = MLX4_GUID_INFO_STATUS_SET;
241 	rec->method = MLX4_GUID_INFO_RECORD_SET;
242 
243 	for (i = 0 ; i < NUM_ALIAS_GUID_IN_REC; i++) {
244 		__be64 tmp_cur_ag;
245 		tmp_cur_ag = *(__be64 *)&guid_rec->guid_info_list[i * GUID_REC_SIZE];
246 		if ((cb_ctx->method == MLX4_GUID_INFO_RECORD_DELETE)
247 		    && (MLX4_NOT_SET_GUID == tmp_cur_ag)) {
248 			pr_debug("%s:Record num %d in block_num:%d "
249 				"was deleted by SM,ownership by %d "
250 				"(0 = driver, 1=sysAdmin, 2=None)\n",
251 				__func__, i, guid_rec->block_num,
252 				rec->ownership);
253 			rec->guid_indexes = rec->guid_indexes &
254 				~mlx4_ib_get_aguid_comp_mask_from_ix(i);
255 			continue;
256 		}
257 
258 		/* check if the SM didn't assign one of the records.
259 		 * if it didn't, if it was not sysadmin request:
260 		 * ask the SM to give a new GUID, (instead of the driver request).
261 		 */
262 		if (tmp_cur_ag == MLX4_NOT_SET_GUID) {
263 			mlx4_ib_warn(&dev->ib_dev, "%s:Record num %d in "
264 				     "block_num: %d was declined by SM, "
265 				     "ownership by %d (0 = driver, 1=sysAdmin,"
266 				     " 2=None)\n", __func__, i,
267 				     guid_rec->block_num, rec->ownership);
268 			if (rec->ownership == MLX4_GUID_DRIVER_ASSIGN) {
269 				/* if it is driver assign, asks for new GUID from SM*/
270 				*(__be64 *)&rec->all_recs[i * GUID_REC_SIZE] =
271 					MLX4_NOT_SET_GUID;
272 
273 				/* Mark the record as not assigned, and let it
274 				 * be sent again in the next work sched.*/
275 				rec->status = MLX4_GUID_INFO_STATUS_IDLE;
276 				rec->guid_indexes |= mlx4_ib_get_aguid_comp_mask_from_ix(i);
277 			}
278 		} else {
279 		       /* properly assigned record. */
280 		       /* We save the GUID we just got from the SM in the
281 			* admin_guid in order to be persistent, and in the
282 			* request from the sm the process will ask for the same GUID */
283 			if (rec->ownership == MLX4_GUID_SYSADMIN_ASSIGN &&
284 			    tmp_cur_ag != *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE]) {
285 				/* the sysadmin assignment failed.*/
286 				mlx4_ib_warn(&dev->ib_dev, "%s: Failed to set"
287 					     " admin guid after SysAdmin "
288 					     "configuration. "
289 					     "Record num %d in block_num:%d "
290 					     "was declined by SM, "
291 					     "new val(0x%llx) was kept\n",
292 					      __func__, i,
293 					     guid_rec->block_num,
294 					     (long long)be64_to_cpu(*(__be64 *) &
295 							 rec->all_recs[i * GUID_REC_SIZE]));
296 			} else {
297 				memcpy(&rec->all_recs[i * GUID_REC_SIZE],
298 				       &guid_rec->guid_info_list[i * GUID_REC_SIZE],
299 				       GUID_REC_SIZE);
300 			}
301 		}
302 	}
303 	/*
304 	The func is call here to close the cases when the
305 	sm doesn't send smp, so in the sa response the driver
306 	notifies the slave.
307 	*/
308 	mlx4_ib_notify_slaves_on_guid_change(dev, guid_rec->block_num,
309 					     cb_ctx->port,
310 					     guid_rec->guid_info_list);
311 out:
312 	spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
313 	spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
314 	if (!dev->sriov.is_going_down)
315 		queue_delayed_work(dev->sriov.alias_guid.ports_guid[port_index].wq,
316 				   &dev->sriov.alias_guid.ports_guid[port_index].
317 				   alias_guid_work, 0);
318 	if (cb_ctx->sa_query) {
319 		list_del(&cb_ctx->list);
320 		kfree(cb_ctx);
321 	} else
322 		complete(&cb_ctx->done);
323 	spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
324 	spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
325 }
326 
327 static void invalidate_guid_record(struct mlx4_ib_dev *dev, u8 port, int index)
328 {
329 	int i;
330 	u64 cur_admin_val;
331 	ib_sa_comp_mask comp_mask = 0;
332 
333 	dev->sriov.alias_guid.ports_guid[port - 1].all_rec_per_port[index].status
334 		= MLX4_GUID_INFO_STATUS_IDLE;
335 	dev->sriov.alias_guid.ports_guid[port - 1].all_rec_per_port[index].method
336 		= MLX4_GUID_INFO_RECORD_SET;
337 
338 	/* calculate the comp_mask for that record.*/
339 	for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) {
340 		cur_admin_val =
341 			*(u64 *)&dev->sriov.alias_guid.ports_guid[port - 1].
342 			all_rec_per_port[index].all_recs[GUID_REC_SIZE * i];
343 		/*
344 		check the admin value: if it's for delete (~00LL) or
345 		it is the first guid of the first record (hw guid) or
346 		the records is not in ownership of the sysadmin and the sm doesn't
347 		need to assign GUIDs, then don't put it up for assignment.
348 		*/
349 		if (MLX4_GUID_FOR_DELETE_VAL == cur_admin_val ||
350 		    (!index && !i) ||
351 		    MLX4_GUID_NONE_ASSIGN == dev->sriov.alias_guid.
352 		    ports_guid[port - 1].all_rec_per_port[index].ownership)
353 			continue;
354 		comp_mask |= mlx4_ib_get_aguid_comp_mask_from_ix(i);
355 	}
356 	dev->sriov.alias_guid.ports_guid[port - 1].
357 		all_rec_per_port[index].guid_indexes = comp_mask;
358 }
359 
360 static int set_guid_rec(struct ib_device *ibdev,
361 			u8 port, int index,
362 			struct mlx4_sriov_alias_guid_info_rec_det *rec_det)
363 {
364 	int err;
365 	struct mlx4_ib_dev *dev = to_mdev(ibdev);
366 	struct ib_sa_guidinfo_rec guid_info_rec;
367 	ib_sa_comp_mask comp_mask;
368 	struct ib_port_attr attr;
369 	struct mlx4_alias_guid_work_context *callback_context;
370 	unsigned long resched_delay, flags, flags1;
371 	struct list_head *head =
372 		&dev->sriov.alias_guid.ports_guid[port - 1].cb_list;
373 
374 	err = __mlx4_ib_query_port(ibdev, port, &attr, 1);
375 	if (err) {
376 		pr_debug("mlx4_ib_query_port failed (err: %d), port: %d\n",
377 			 err, port);
378 		return err;
379 	}
380 	/*check the port was configured by the sm, otherwise no need to send */
381 	if (attr.state != IB_PORT_ACTIVE) {
382 		pr_debug("port %d not active...rescheduling\n", port);
383 		resched_delay = 5 * HZ;
384 		err = -EAGAIN;
385 		goto new_schedule;
386 	}
387 
388 	callback_context = kmalloc(sizeof *callback_context, GFP_KERNEL);
389 	if (!callback_context) {
390 		err = -ENOMEM;
391 		resched_delay = HZ * 5;
392 		goto new_schedule;
393 	}
394 	callback_context->port = port;
395 	callback_context->dev = dev;
396 	callback_context->block_num = index;
397 	callback_context->method = rec_det->method;
398 	memset(&guid_info_rec, 0, sizeof (struct ib_sa_guidinfo_rec));
399 
400 	guid_info_rec.lid = cpu_to_be16(attr.lid);
401 	guid_info_rec.block_num = index;
402 
403 	memcpy(guid_info_rec.guid_info_list, rec_det->all_recs,
404 	       GUID_REC_SIZE * NUM_ALIAS_GUID_IN_REC);
405 	comp_mask = IB_SA_GUIDINFO_REC_LID | IB_SA_GUIDINFO_REC_BLOCK_NUM |
406 		rec_det->guid_indexes;
407 
408 	init_completion(&callback_context->done);
409 	spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
410 	list_add_tail(&callback_context->list, head);
411 	spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
412 
413 	callback_context->query_id =
414 		ib_sa_guid_info_rec_query(dev->sriov.alias_guid.sa_client,
415 					  ibdev, port, &guid_info_rec,
416 					  comp_mask, rec_det->method, 1000,
417 					  GFP_KERNEL, aliasguid_query_handler,
418 					  callback_context,
419 					  &callback_context->sa_query);
420 	if (callback_context->query_id < 0) {
421 		pr_debug("ib_sa_guid_info_rec_query failed, query_id: "
422 			 "%d. will reschedule to the next 1 sec.\n",
423 			 callback_context->query_id);
424 		spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
425 		list_del(&callback_context->list);
426 		kfree(callback_context);
427 		spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
428 		resched_delay = 1 * HZ;
429 		err = -EAGAIN;
430 		goto new_schedule;
431 	}
432 	err = 0;
433 	goto out;
434 
435 new_schedule:
436 	spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
437 	spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
438 	invalidate_guid_record(dev, port, index);
439 	if (!dev->sriov.is_going_down) {
440 		queue_delayed_work(dev->sriov.alias_guid.ports_guid[port - 1].wq,
441 				   &dev->sriov.alias_guid.ports_guid[port - 1].alias_guid_work,
442 				   resched_delay);
443 	}
444 	spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
445 	spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
446 
447 out:
448 	return err;
449 }
450 
451 void mlx4_ib_invalidate_all_guid_record(struct mlx4_ib_dev *dev, int port)
452 {
453 	int i;
454 	unsigned long flags, flags1;
455 
456 	pr_debug("port %d\n", port);
457 
458 	spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
459 	spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
460 	for (i = 0; i < NUM_ALIAS_GUID_REC_IN_PORT; i++)
461 		invalidate_guid_record(dev, port, i);
462 
463 	if (mlx4_is_master(dev->dev) && !dev->sriov.is_going_down) {
464 		/*
465 		make sure no work waits in the queue, if the work is already
466 		queued(not on the timer) the cancel will fail. That is not a problem
467 		because we just want the work started.
468 		*/
469 		cancel_delayed_work(&dev->sriov.alias_guid.
470 				      ports_guid[port - 1].alias_guid_work);
471 		queue_delayed_work(dev->sriov.alias_guid.ports_guid[port - 1].wq,
472 				   &dev->sriov.alias_guid.ports_guid[port - 1].alias_guid_work,
473 				   0);
474 	}
475 	spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
476 	spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
477 }
478 
479 /* The function returns the next record that was
480  * not configured (or failed to be configured) */
481 static int get_next_record_to_update(struct mlx4_ib_dev *dev, u8 port,
482 				     struct mlx4_next_alias_guid_work *rec)
483 {
484 	int j;
485 	unsigned long flags;
486 
487 	for (j = 0; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) {
488 		spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags);
489 		if (dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[j].status ==
490 		    MLX4_GUID_INFO_STATUS_IDLE) {
491 			memcpy(&rec->rec_det,
492 			       &dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[j],
493 			       sizeof (struct mlx4_sriov_alias_guid_info_rec_det));
494 			rec->port = port;
495 			rec->block_num = j;
496 			dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[j].status =
497 				MLX4_GUID_INFO_STATUS_PENDING;
498 			spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags);
499 			return 0;
500 		}
501 		spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags);
502 	}
503 	return -ENOENT;
504 }
505 
506 static void set_administratively_guid_record(struct mlx4_ib_dev *dev, int port,
507 					     int rec_index,
508 					     struct mlx4_sriov_alias_guid_info_rec_det *rec_det)
509 {
510 	dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[rec_index].guid_indexes =
511 		rec_det->guid_indexes;
512 	memcpy(dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[rec_index].all_recs,
513 	       rec_det->all_recs, NUM_ALIAS_GUID_IN_REC * GUID_REC_SIZE);
514 	dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[rec_index].status =
515 		rec_det->status;
516 }
517 
518 static void set_all_slaves_guids(struct mlx4_ib_dev *dev, int port)
519 {
520 	int j;
521 	struct mlx4_sriov_alias_guid_info_rec_det rec_det ;
522 
523 	for (j = 0 ; j < NUM_ALIAS_GUID_REC_IN_PORT ; j++) {
524 		memset(rec_det.all_recs, 0, NUM_ALIAS_GUID_IN_REC * GUID_REC_SIZE);
525 		rec_det.guid_indexes = (!j ? 0 : IB_SA_GUIDINFO_REC_GID0) |
526 			IB_SA_GUIDINFO_REC_GID1 | IB_SA_GUIDINFO_REC_GID2 |
527 			IB_SA_GUIDINFO_REC_GID3 | IB_SA_GUIDINFO_REC_GID4 |
528 			IB_SA_GUIDINFO_REC_GID5 | IB_SA_GUIDINFO_REC_GID6 |
529 			IB_SA_GUIDINFO_REC_GID7;
530 		rec_det.status = MLX4_GUID_INFO_STATUS_IDLE;
531 		set_administratively_guid_record(dev, port, j, &rec_det);
532 	}
533 }
534 
535 static void alias_guid_work(struct work_struct *work)
536 {
537 	struct delayed_work *delay = to_delayed_work(work);
538 	int ret = 0;
539 	struct mlx4_next_alias_guid_work *rec;
540 	struct mlx4_sriov_alias_guid_port_rec_det *sriov_alias_port =
541 		container_of(delay, struct mlx4_sriov_alias_guid_port_rec_det,
542 			     alias_guid_work);
543 	struct mlx4_sriov_alias_guid *sriov_alias_guid = sriov_alias_port->parent;
544 	struct mlx4_ib_sriov *ib_sriov = container_of(sriov_alias_guid,
545 						struct mlx4_ib_sriov,
546 						alias_guid);
547 	struct mlx4_ib_dev *dev = container_of(ib_sriov, struct mlx4_ib_dev, sriov);
548 
549 	rec = kzalloc(sizeof *rec, GFP_KERNEL);
550 	if (!rec) {
551 		pr_err("alias_guid_work: No Memory\n");
552 		return;
553 	}
554 
555 	pr_debug("starting [port: %d]...\n", sriov_alias_port->port + 1);
556 	ret = get_next_record_to_update(dev, sriov_alias_port->port, rec);
557 	if (ret) {
558 		pr_debug("No more records to update.\n");
559 		goto out;
560 	}
561 
562 	set_guid_rec(&dev->ib_dev, rec->port + 1, rec->block_num,
563 		     &rec->rec_det);
564 
565 out:
566 	kfree(rec);
567 }
568 
569 
570 void mlx4_ib_init_alias_guid_work(struct mlx4_ib_dev *dev, int port)
571 {
572 	unsigned long flags, flags1;
573 
574 	if (!mlx4_is_master(dev->dev))
575 		return;
576 	spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
577 	spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
578 	if (!dev->sriov.is_going_down) {
579 		queue_delayed_work(dev->sriov.alias_guid.ports_guid[port].wq,
580 			   &dev->sriov.alias_guid.ports_guid[port].alias_guid_work, 0);
581 	}
582 	spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
583 	spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
584 }
585 
586 void mlx4_ib_destroy_alias_guid_service(struct mlx4_ib_dev *dev)
587 {
588 	int i;
589 	struct mlx4_ib_sriov *sriov = &dev->sriov;
590 	struct mlx4_alias_guid_work_context *cb_ctx;
591 	struct mlx4_sriov_alias_guid_port_rec_det *det;
592 	struct ib_sa_query *sa_query;
593 	unsigned long flags;
594 
595 	for (i = 0 ; i < dev->num_ports; i++) {
596 		cancel_delayed_work(&dev->sriov.alias_guid.ports_guid[i].alias_guid_work);
597 		det = &sriov->alias_guid.ports_guid[i];
598 		spin_lock_irqsave(&sriov->alias_guid.ag_work_lock, flags);
599 		while (!list_empty(&det->cb_list)) {
600 			cb_ctx = list_entry(det->cb_list.next,
601 					    struct mlx4_alias_guid_work_context,
602 					    list);
603 			sa_query = cb_ctx->sa_query;
604 			cb_ctx->sa_query = NULL;
605 			list_del(&cb_ctx->list);
606 			spin_unlock_irqrestore(&sriov->alias_guid.ag_work_lock, flags);
607 			ib_sa_cancel_query(cb_ctx->query_id, sa_query);
608 			wait_for_completion(&cb_ctx->done);
609 			kfree(cb_ctx);
610 			spin_lock_irqsave(&sriov->alias_guid.ag_work_lock, flags);
611 		}
612 		spin_unlock_irqrestore(&sriov->alias_guid.ag_work_lock, flags);
613 	}
614 	for (i = 0 ; i < dev->num_ports; i++) {
615 		flush_workqueue(dev->sriov.alias_guid.ports_guid[i].wq);
616 		destroy_workqueue(dev->sriov.alias_guid.ports_guid[i].wq);
617 	}
618 	ib_sa_unregister_client(dev->sriov.alias_guid.sa_client);
619 	kfree(dev->sriov.alias_guid.sa_client);
620 }
621 
622 int mlx4_ib_init_alias_guid_service(struct mlx4_ib_dev *dev)
623 {
624 	char alias_wq_name[15];
625 	int ret = 0;
626 	int i, j, k;
627 	union ib_gid gid;
628 
629 	if (!mlx4_is_master(dev->dev))
630 		return 0;
631 	dev->sriov.alias_guid.sa_client =
632 		kzalloc(sizeof *dev->sriov.alias_guid.sa_client, GFP_KERNEL);
633 	if (!dev->sriov.alias_guid.sa_client)
634 		return -ENOMEM;
635 
636 	ib_sa_register_client(dev->sriov.alias_guid.sa_client);
637 
638 	spin_lock_init(&dev->sriov.alias_guid.ag_work_lock);
639 
640 	for (i = 1; i <= dev->num_ports; ++i) {
641 		if (dev->ib_dev.query_gid(&dev->ib_dev , i, 0, &gid)) {
642 			ret = -EFAULT;
643 			goto err_unregister;
644 		}
645 	}
646 
647 	for (i = 0 ; i < dev->num_ports; i++) {
648 		memset(&dev->sriov.alias_guid.ports_guid[i], 0,
649 		       sizeof (struct mlx4_sriov_alias_guid_port_rec_det));
650 		/*Check if the SM doesn't need to assign the GUIDs*/
651 		for (j = 0; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) {
652 			if (mlx4_ib_sm_guid_assign) {
653 				dev->sriov.alias_guid.ports_guid[i].
654 					all_rec_per_port[j].
655 					ownership = MLX4_GUID_DRIVER_ASSIGN;
656 				continue;
657 			}
658 			dev->sriov.alias_guid.ports_guid[i].all_rec_per_port[j].
659 					ownership = MLX4_GUID_NONE_ASSIGN;
660 			/*mark each val as it was deleted,
661 			  till the sysAdmin will give it valid val*/
662 			for (k = 0; k < NUM_ALIAS_GUID_IN_REC; k++) {
663 				*(__be64 *)&dev->sriov.alias_guid.ports_guid[i].
664 					all_rec_per_port[j].all_recs[GUID_REC_SIZE * k] =
665 						cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL);
666 			}
667 		}
668 		INIT_LIST_HEAD(&dev->sriov.alias_guid.ports_guid[i].cb_list);
669 		/*prepare the records, set them to be allocated by sm*/
670 		for (j = 0 ; j < NUM_ALIAS_GUID_REC_IN_PORT; j++)
671 			invalidate_guid_record(dev, i + 1, j);
672 
673 		dev->sriov.alias_guid.ports_guid[i].parent = &dev->sriov.alias_guid;
674 		dev->sriov.alias_guid.ports_guid[i].port  = i;
675 		if (mlx4_ib_sm_guid_assign)
676 			set_all_slaves_guids(dev, i);
677 
678 		snprintf(alias_wq_name, sizeof alias_wq_name, "alias_guid%d", i);
679 		dev->sriov.alias_guid.ports_guid[i].wq =
680 			create_singlethread_workqueue(alias_wq_name);
681 		if (!dev->sriov.alias_guid.ports_guid[i].wq) {
682 			ret = -ENOMEM;
683 			goto err_thread;
684 		}
685 		INIT_DELAYED_WORK(&dev->sriov.alias_guid.ports_guid[i].alias_guid_work,
686 			  alias_guid_work);
687 	}
688 	return 0;
689 
690 err_thread:
691 	for (--i; i >= 0; i--) {
692 		destroy_workqueue(dev->sriov.alias_guid.ports_guid[i].wq);
693 		dev->sriov.alias_guid.ports_guid[i].wq = NULL;
694 	}
695 
696 err_unregister:
697 	ib_sa_unregister_client(dev->sriov.alias_guid.sa_client);
698 	kfree(dev->sriov.alias_guid.sa_client);
699 	dev->sriov.alias_guid.sa_client = NULL;
700 	pr_err("init_alias_guid_service: Failed. (ret:%d)\n", ret);
701 	return ret;
702 }
703