197549c34SHans Petter Selasky /*
297549c34SHans Petter Selasky * Copyright (c) 2012 Mellanox Technologies. All rights reserved.
397549c34SHans Petter Selasky *
497549c34SHans Petter Selasky * This software is available to you under a choice of one of two
597549c34SHans Petter Selasky * licenses. You may choose to be licensed under the terms of the GNU
697549c34SHans Petter Selasky * General Public License (GPL) Version 2, available from the file
797549c34SHans Petter Selasky * COPYING in the main directory of this source tree, or the
897549c34SHans Petter Selasky * OpenIB.org BSD license below:
997549c34SHans Petter Selasky *
1097549c34SHans Petter Selasky * Redistribution and use in source and binary forms, with or
1197549c34SHans Petter Selasky * without modification, are permitted provided that the following
1297549c34SHans Petter Selasky * conditions are met:
1397549c34SHans Petter Selasky *
1497549c34SHans Petter Selasky * - Redistributions of source code must retain the above
1597549c34SHans Petter Selasky * copyright notice, this list of conditions and the following
1697549c34SHans Petter Selasky * disclaimer.
1797549c34SHans Petter Selasky *
1897549c34SHans Petter Selasky * - Redistributions in binary form must reproduce the above
1997549c34SHans Petter Selasky * copyright notice, this list of conditions and the following
2097549c34SHans Petter Selasky * disclaimer in the documentation and/or other materials
2197549c34SHans Petter Selasky * provided with the distribution.
2297549c34SHans Petter Selasky *
2397549c34SHans Petter Selasky * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
2497549c34SHans Petter Selasky * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
2597549c34SHans Petter Selasky * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
2697549c34SHans Petter Selasky * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
2797549c34SHans Petter Selasky * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
2897549c34SHans Petter Selasky * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
2997549c34SHans Petter Selasky * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
3097549c34SHans Petter Selasky * SOFTWARE.
3197549c34SHans Petter Selasky */
3297549c34SHans Petter Selasky /***********************************************************/
3397549c34SHans Petter Selasky /*This file support the handling of the Alias GUID feature. */
3497549c34SHans Petter Selasky /***********************************************************/
3597549c34SHans Petter Selasky #include <rdma/ib_mad.h>
3697549c34SHans Petter Selasky #include <rdma/ib_smi.h>
3797549c34SHans Petter Selasky #include <rdma/ib_cache.h>
3897549c34SHans Petter Selasky #include <rdma/ib_sa.h>
3997549c34SHans Petter Selasky #include <rdma/ib_pack.h>
4097549c34SHans Petter Selasky #include <dev/mlx4/cmd.h>
4197549c34SHans Petter Selasky #include <linux/module.h>
4297549c34SHans Petter Selasky #include <linux/errno.h>
4397549c34SHans Petter Selasky #include <rdma/ib_user_verbs.h>
4497549c34SHans Petter Selasky #include <linux/delay.h>
45*8cc48704SHans Petter Selasky #include <linux/math64.h>
46*8cc48704SHans Petter Selasky #include <linux/ktime.h>
4797549c34SHans Petter Selasky #include "mlx4_ib.h"
4897549c34SHans Petter Selasky
4997549c34SHans Petter Selasky /*
5097549c34SHans Petter Selasky The driver keeps the current state of all guids, as they are in the HW.
5197549c34SHans Petter Selasky Whenever we receive an smp mad GUIDInfo record, the data will be cached.
5297549c34SHans Petter Selasky */
5397549c34SHans Petter Selasky
5497549c34SHans Petter Selasky struct mlx4_alias_guid_work_context {
5597549c34SHans Petter Selasky u8 port;
5697549c34SHans Petter Selasky struct mlx4_ib_dev *dev ;
5797549c34SHans Petter Selasky struct ib_sa_query *sa_query;
5897549c34SHans Petter Selasky struct completion done;
5997549c34SHans Petter Selasky int query_id;
6097549c34SHans Petter Selasky struct list_head list;
6197549c34SHans Petter Selasky int block_num;
62*8cc48704SHans Petter Selasky ib_sa_comp_mask guid_indexes;
6397549c34SHans Petter Selasky u8 method;
6497549c34SHans Petter Selasky };
6597549c34SHans Petter Selasky
6697549c34SHans Petter Selasky struct mlx4_next_alias_guid_work {
6797549c34SHans Petter Selasky u8 port;
6897549c34SHans Petter Selasky u8 block_num;
69*8cc48704SHans Petter Selasky u8 method;
7097549c34SHans Petter Selasky struct mlx4_sriov_alias_guid_info_rec_det rec_det;
7197549c34SHans Petter Selasky };
7297549c34SHans Petter Selasky
73*8cc48704SHans Petter Selasky static int get_low_record_time_index(struct mlx4_ib_dev *dev, u8 port,
74*8cc48704SHans Petter Selasky int *resched_delay_sec);
7597549c34SHans Petter Selasky
mlx4_ib_update_cache_on_guid_change(struct mlx4_ib_dev * dev,int block_num,u8 port_num,u8 * p_data)7697549c34SHans Petter Selasky void mlx4_ib_update_cache_on_guid_change(struct mlx4_ib_dev *dev, int block_num,
7797549c34SHans Petter Selasky u8 port_num, u8 *p_data)
7897549c34SHans Petter Selasky {
7997549c34SHans Petter Selasky int i;
8097549c34SHans Petter Selasky u64 guid_indexes;
8197549c34SHans Petter Selasky int slave_id;
8297549c34SHans Petter Selasky int port_index = port_num - 1;
8397549c34SHans Petter Selasky
8497549c34SHans Petter Selasky if (!mlx4_is_master(dev->dev))
8597549c34SHans Petter Selasky return;
8697549c34SHans Petter Selasky
8797549c34SHans Petter Selasky guid_indexes = be64_to_cpu((__force __be64) dev->sriov.alias_guid.
8897549c34SHans Petter Selasky ports_guid[port_num - 1].
8997549c34SHans Petter Selasky all_rec_per_port[block_num].guid_indexes);
9097549c34SHans Petter Selasky pr_debug("port: %d, guid_indexes: 0x%llx\n", port_num,
9197549c34SHans Petter Selasky (unsigned long long)guid_indexes);
9297549c34SHans Petter Selasky
9397549c34SHans Petter Selasky for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) {
9497549c34SHans Petter Selasky /* The location of the specific index starts from bit number 4
9597549c34SHans Petter Selasky * until bit num 11 */
9697549c34SHans Petter Selasky if (test_bit(i + 4, (unsigned long *)&guid_indexes)) {
9797549c34SHans Petter Selasky slave_id = (block_num * NUM_ALIAS_GUID_IN_REC) + i ;
9897549c34SHans Petter Selasky if (slave_id >= dev->dev->num_slaves) {
9997549c34SHans Petter Selasky pr_debug("The last slave: %d\n", slave_id);
10097549c34SHans Petter Selasky return;
10197549c34SHans Petter Selasky }
10297549c34SHans Petter Selasky
10397549c34SHans Petter Selasky /* cache the guid: */
10497549c34SHans Petter Selasky memcpy(&dev->sriov.demux[port_index].guid_cache[slave_id],
10597549c34SHans Petter Selasky &p_data[i * GUID_REC_SIZE],
10697549c34SHans Petter Selasky GUID_REC_SIZE);
10797549c34SHans Petter Selasky } else
10897549c34SHans Petter Selasky pr_debug("Guid number: %d in block: %d"
10997549c34SHans Petter Selasky " was not updated\n", i, block_num);
11097549c34SHans Petter Selasky }
11197549c34SHans Petter Selasky }
11297549c34SHans Petter Selasky
get_cached_alias_guid(struct mlx4_ib_dev * dev,int port,int index)11397549c34SHans Petter Selasky static __be64 get_cached_alias_guid(struct mlx4_ib_dev *dev, int port, int index)
11497549c34SHans Petter Selasky {
11597549c34SHans Petter Selasky if (index >= NUM_ALIAS_GUID_PER_PORT) {
11697549c34SHans Petter Selasky pr_err("%s: ERROR: asked for index:%d\n", __func__, index);
11797549c34SHans Petter Selasky return (__force __be64) -1;
11897549c34SHans Petter Selasky }
11997549c34SHans Petter Selasky return *(__be64 *)&dev->sriov.demux[port - 1].guid_cache[index];
12097549c34SHans Petter Selasky }
12197549c34SHans Petter Selasky
12297549c34SHans Petter Selasky
mlx4_ib_get_aguid_comp_mask_from_ix(int index)12397549c34SHans Petter Selasky ib_sa_comp_mask mlx4_ib_get_aguid_comp_mask_from_ix(int index)
12497549c34SHans Petter Selasky {
12597549c34SHans Petter Selasky return IB_SA_COMP_MASK(4 + index);
12697549c34SHans Petter Selasky }
12797549c34SHans Petter Selasky
mlx4_ib_slave_alias_guid_event(struct mlx4_ib_dev * dev,int slave,int port,int slave_init)128*8cc48704SHans Petter Selasky void mlx4_ib_slave_alias_guid_event(struct mlx4_ib_dev *dev, int slave,
129*8cc48704SHans Petter Selasky int port, int slave_init)
130*8cc48704SHans Petter Selasky {
131*8cc48704SHans Petter Selasky __be64 curr_guid, required_guid;
132*8cc48704SHans Petter Selasky int record_num = slave / 8;
133*8cc48704SHans Petter Selasky int index = slave % 8;
134*8cc48704SHans Petter Selasky int port_index = port - 1;
135*8cc48704SHans Petter Selasky unsigned long flags;
136*8cc48704SHans Petter Selasky int do_work = 0;
137*8cc48704SHans Petter Selasky
138*8cc48704SHans Petter Selasky spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags);
139*8cc48704SHans Petter Selasky if (dev->sriov.alias_guid.ports_guid[port_index].state_flags &
140*8cc48704SHans Petter Selasky GUID_STATE_NEED_PORT_INIT)
141*8cc48704SHans Petter Selasky goto unlock;
142*8cc48704SHans Petter Selasky if (!slave_init) {
143*8cc48704SHans Petter Selasky curr_guid = *(__be64 *)&dev->sriov.
144*8cc48704SHans Petter Selasky alias_guid.ports_guid[port_index].
145*8cc48704SHans Petter Selasky all_rec_per_port[record_num].
146*8cc48704SHans Petter Selasky all_recs[GUID_REC_SIZE * index];
147*8cc48704SHans Petter Selasky if (curr_guid == cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL) ||
148*8cc48704SHans Petter Selasky !curr_guid)
149*8cc48704SHans Petter Selasky goto unlock;
150*8cc48704SHans Petter Selasky required_guid = cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL);
151*8cc48704SHans Petter Selasky } else {
152*8cc48704SHans Petter Selasky required_guid = mlx4_get_admin_guid(dev->dev, slave, port);
153*8cc48704SHans Petter Selasky if (required_guid == cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL))
154*8cc48704SHans Petter Selasky goto unlock;
155*8cc48704SHans Petter Selasky }
156*8cc48704SHans Petter Selasky *(__be64 *)&dev->sriov.alias_guid.ports_guid[port_index].
157*8cc48704SHans Petter Selasky all_rec_per_port[record_num].
158*8cc48704SHans Petter Selasky all_recs[GUID_REC_SIZE * index] = required_guid;
159*8cc48704SHans Petter Selasky dev->sriov.alias_guid.ports_guid[port_index].
160*8cc48704SHans Petter Selasky all_rec_per_port[record_num].guid_indexes
161*8cc48704SHans Petter Selasky |= mlx4_ib_get_aguid_comp_mask_from_ix(index);
162*8cc48704SHans Petter Selasky dev->sriov.alias_guid.ports_guid[port_index].
163*8cc48704SHans Petter Selasky all_rec_per_port[record_num].status
164*8cc48704SHans Petter Selasky = MLX4_GUID_INFO_STATUS_IDLE;
165*8cc48704SHans Petter Selasky /* set to run immediately */
166*8cc48704SHans Petter Selasky dev->sriov.alias_guid.ports_guid[port_index].
167*8cc48704SHans Petter Selasky all_rec_per_port[record_num].time_to_run = 0;
168*8cc48704SHans Petter Selasky dev->sriov.alias_guid.ports_guid[port_index].
169*8cc48704SHans Petter Selasky all_rec_per_port[record_num].
170*8cc48704SHans Petter Selasky guids_retry_schedule[index] = 0;
171*8cc48704SHans Petter Selasky do_work = 1;
172*8cc48704SHans Petter Selasky unlock:
173*8cc48704SHans Petter Selasky spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags);
174*8cc48704SHans Petter Selasky
175*8cc48704SHans Petter Selasky if (do_work)
176*8cc48704SHans Petter Selasky mlx4_ib_init_alias_guid_work(dev, port_index);
177*8cc48704SHans Petter Selasky }
178*8cc48704SHans Petter Selasky
17997549c34SHans Petter Selasky /*
18097549c34SHans Petter Selasky * Whenever new GUID is set/unset (guid table change) create event and
18197549c34SHans Petter Selasky * notify the relevant slave (master also should be notified).
18297549c34SHans Petter Selasky * If the GUID value is not as we have in the cache the slave will not be
18397549c34SHans Petter Selasky * updated; in this case it waits for the smp_snoop or the port management
18497549c34SHans Petter Selasky * event to call the function and to update the slave.
18597549c34SHans Petter Selasky * block_number - the index of the block (16 blocks available)
18697549c34SHans Petter Selasky * port_number - 1 or 2
18797549c34SHans Petter Selasky */
mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev * dev,int block_num,u8 port_num,u8 * p_data)18897549c34SHans Petter Selasky void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev,
18997549c34SHans Petter Selasky int block_num, u8 port_num,
19097549c34SHans Petter Selasky u8 *p_data)
19197549c34SHans Petter Selasky {
19297549c34SHans Petter Selasky int i;
19397549c34SHans Petter Selasky u64 guid_indexes;
194*8cc48704SHans Petter Selasky int slave_id, slave_port;
19597549c34SHans Petter Selasky enum slave_port_state new_state;
19697549c34SHans Petter Selasky enum slave_port_state prev_state;
19797549c34SHans Petter Selasky __be64 tmp_cur_ag, form_cache_ag;
19897549c34SHans Petter Selasky enum slave_port_gen_event gen_event;
199*8cc48704SHans Petter Selasky struct mlx4_sriov_alias_guid_info_rec_det *rec;
200*8cc48704SHans Petter Selasky unsigned long flags;
201*8cc48704SHans Petter Selasky __be64 required_value;
20297549c34SHans Petter Selasky
20397549c34SHans Petter Selasky if (!mlx4_is_master(dev->dev))
20497549c34SHans Petter Selasky return;
20597549c34SHans Petter Selasky
206*8cc48704SHans Petter Selasky rec = &dev->sriov.alias_guid.ports_guid[port_num - 1].
207*8cc48704SHans Petter Selasky all_rec_per_port[block_num];
20897549c34SHans Petter Selasky guid_indexes = be64_to_cpu((__force __be64) dev->sriov.alias_guid.
20997549c34SHans Petter Selasky ports_guid[port_num - 1].
21097549c34SHans Petter Selasky all_rec_per_port[block_num].guid_indexes);
21197549c34SHans Petter Selasky pr_debug("port: %d, guid_indexes: 0x%llx\n", port_num,
21297549c34SHans Petter Selasky (unsigned long long)guid_indexes);
21397549c34SHans Petter Selasky
21497549c34SHans Petter Selasky /*calculate the slaves and notify them*/
21597549c34SHans Petter Selasky for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) {
21697549c34SHans Petter Selasky /* the location of the specific index runs from bits 4..11 */
21797549c34SHans Petter Selasky if (!(test_bit(i + 4, (unsigned long *)&guid_indexes)))
21897549c34SHans Petter Selasky continue;
21997549c34SHans Petter Selasky
22097549c34SHans Petter Selasky slave_id = (block_num * NUM_ALIAS_GUID_IN_REC) + i ;
221*8cc48704SHans Petter Selasky if (slave_id >= dev->dev->persist->num_vfs + 1)
22297549c34SHans Petter Selasky return;
223*8cc48704SHans Petter Selasky
224*8cc48704SHans Petter Selasky slave_port = mlx4_phys_to_slave_port(dev->dev, slave_id, port_num);
225*8cc48704SHans Petter Selasky if (slave_port < 0) /* this port isn't available for the VF */
226*8cc48704SHans Petter Selasky continue;
227*8cc48704SHans Petter Selasky
22897549c34SHans Petter Selasky tmp_cur_ag = *(__be64 *)&p_data[i * GUID_REC_SIZE];
22997549c34SHans Petter Selasky form_cache_ag = get_cached_alias_guid(dev, port_num,
23097549c34SHans Petter Selasky (NUM_ALIAS_GUID_IN_REC * block_num) + i);
23197549c34SHans Petter Selasky /*
23297549c34SHans Petter Selasky * Check if guid is not the same as in the cache,
23397549c34SHans Petter Selasky * If it is different, wait for the snoop_smp or the port mgmt
23497549c34SHans Petter Selasky * change event to update the slave on its port state change
23597549c34SHans Petter Selasky */
23697549c34SHans Petter Selasky if (tmp_cur_ag != form_cache_ag)
23797549c34SHans Petter Selasky continue;
23897549c34SHans Petter Selasky
239*8cc48704SHans Petter Selasky spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags);
240*8cc48704SHans Petter Selasky required_value = *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE];
241*8cc48704SHans Petter Selasky
242*8cc48704SHans Petter Selasky if (required_value == cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL))
243*8cc48704SHans Petter Selasky required_value = 0;
244*8cc48704SHans Petter Selasky
245*8cc48704SHans Petter Selasky if (tmp_cur_ag == required_value) {
246*8cc48704SHans Petter Selasky rec->guid_indexes = rec->guid_indexes &
247*8cc48704SHans Petter Selasky ~mlx4_ib_get_aguid_comp_mask_from_ix(i);
248*8cc48704SHans Petter Selasky } else {
249*8cc48704SHans Petter Selasky /* may notify port down if value is 0 */
250*8cc48704SHans Petter Selasky if (tmp_cur_ag != MLX4_NOT_SET_GUID) {
251*8cc48704SHans Petter Selasky spin_unlock_irqrestore(&dev->sriov.
252*8cc48704SHans Petter Selasky alias_guid.ag_work_lock, flags);
253*8cc48704SHans Petter Selasky continue;
254*8cc48704SHans Petter Selasky }
255*8cc48704SHans Petter Selasky }
256*8cc48704SHans Petter Selasky spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock,
257*8cc48704SHans Petter Selasky flags);
258*8cc48704SHans Petter Selasky mlx4_gen_guid_change_eqe(dev->dev, slave_id, port_num);
25997549c34SHans Petter Selasky /*2 cases: Valid GUID, and Invalid Guid*/
26097549c34SHans Petter Selasky
26197549c34SHans Petter Selasky if (tmp_cur_ag != MLX4_NOT_SET_GUID) { /*valid GUID*/
26297549c34SHans Petter Selasky prev_state = mlx4_get_slave_port_state(dev->dev, slave_id, port_num);
26397549c34SHans Petter Selasky new_state = set_and_calc_slave_port_state(dev->dev, slave_id, port_num,
26497549c34SHans Petter Selasky MLX4_PORT_STATE_IB_PORT_STATE_EVENT_GID_VALID,
26597549c34SHans Petter Selasky &gen_event);
26697549c34SHans Petter Selasky pr_debug("slave: %d, port: %d prev_port_state: %d,"
26797549c34SHans Petter Selasky " new_port_state: %d, gen_event: %d\n",
26897549c34SHans Petter Selasky slave_id, port_num, prev_state, new_state, gen_event);
26997549c34SHans Petter Selasky if (gen_event == SLAVE_PORT_GEN_EVENT_UP) {
27097549c34SHans Petter Selasky pr_debug("sending PORT_UP event to slave: %d, port: %d\n",
27197549c34SHans Petter Selasky slave_id, port_num);
27297549c34SHans Petter Selasky mlx4_gen_port_state_change_eqe(dev->dev, slave_id,
27397549c34SHans Petter Selasky port_num, MLX4_PORT_CHANGE_SUBTYPE_ACTIVE);
27497549c34SHans Petter Selasky }
27597549c34SHans Petter Selasky } else { /* request to invalidate GUID */
27697549c34SHans Petter Selasky set_and_calc_slave_port_state(dev->dev, slave_id, port_num,
27797549c34SHans Petter Selasky MLX4_PORT_STATE_IB_EVENT_GID_INVALID,
27897549c34SHans Petter Selasky &gen_event);
279*8cc48704SHans Petter Selasky if (gen_event == SLAVE_PORT_GEN_EVENT_DOWN) {
28097549c34SHans Petter Selasky pr_debug("sending PORT DOWN event to slave: %d, port: %d\n",
28197549c34SHans Petter Selasky slave_id, port_num);
282*8cc48704SHans Petter Selasky mlx4_gen_port_state_change_eqe(dev->dev,
283*8cc48704SHans Petter Selasky slave_id,
284*8cc48704SHans Petter Selasky port_num,
28597549c34SHans Petter Selasky MLX4_PORT_CHANGE_SUBTYPE_DOWN);
28697549c34SHans Petter Selasky }
28797549c34SHans Petter Selasky }
28897549c34SHans Petter Selasky }
289*8cc48704SHans Petter Selasky }
29097549c34SHans Petter Selasky
aliasguid_query_handler(int status,struct ib_sa_guidinfo_rec * guid_rec,void * context)29197549c34SHans Petter Selasky static void aliasguid_query_handler(int status,
29297549c34SHans Petter Selasky struct ib_sa_guidinfo_rec *guid_rec,
29397549c34SHans Petter Selasky void *context)
29497549c34SHans Petter Selasky {
29597549c34SHans Petter Selasky struct mlx4_ib_dev *dev;
29697549c34SHans Petter Selasky struct mlx4_alias_guid_work_context *cb_ctx = context;
29797549c34SHans Petter Selasky u8 port_index;
29897549c34SHans Petter Selasky int i;
29997549c34SHans Petter Selasky struct mlx4_sriov_alias_guid_info_rec_det *rec;
30097549c34SHans Petter Selasky unsigned long flags, flags1;
301*8cc48704SHans Petter Selasky ib_sa_comp_mask declined_guid_indexes = 0;
302*8cc48704SHans Petter Selasky ib_sa_comp_mask applied_guid_indexes = 0;
303*8cc48704SHans Petter Selasky unsigned int resched_delay_sec = 0;
30497549c34SHans Petter Selasky
30597549c34SHans Petter Selasky if (!context)
30697549c34SHans Petter Selasky return;
30797549c34SHans Petter Selasky
30897549c34SHans Petter Selasky dev = cb_ctx->dev;
30997549c34SHans Petter Selasky port_index = cb_ctx->port - 1;
31097549c34SHans Petter Selasky rec = &dev->sriov.alias_guid.ports_guid[port_index].
31197549c34SHans Petter Selasky all_rec_per_port[cb_ctx->block_num];
31297549c34SHans Petter Selasky
31397549c34SHans Petter Selasky if (status) {
31497549c34SHans Petter Selasky pr_debug("(port: %d) failed: status = %d\n",
31597549c34SHans Petter Selasky cb_ctx->port, status);
316*8cc48704SHans Petter Selasky rec->time_to_run = ktime_get_ns() + 1 * NSEC_PER_SEC;
31797549c34SHans Petter Selasky goto out;
31897549c34SHans Petter Selasky }
31997549c34SHans Petter Selasky
32097549c34SHans Petter Selasky if (guid_rec->block_num != cb_ctx->block_num) {
32197549c34SHans Petter Selasky pr_err("block num mismatch: %d != %d\n",
32297549c34SHans Petter Selasky cb_ctx->block_num, guid_rec->block_num);
32397549c34SHans Petter Selasky goto out;
32497549c34SHans Petter Selasky }
32597549c34SHans Petter Selasky
32697549c34SHans Petter Selasky pr_debug("lid/port: %d/%d, block_num: %d\n",
32797549c34SHans Petter Selasky be16_to_cpu(guid_rec->lid), cb_ctx->port,
32897549c34SHans Petter Selasky guid_rec->block_num);
32997549c34SHans Petter Selasky
33097549c34SHans Petter Selasky rec = &dev->sriov.alias_guid.ports_guid[port_index].
33197549c34SHans Petter Selasky all_rec_per_port[guid_rec->block_num];
33297549c34SHans Petter Selasky
333*8cc48704SHans Petter Selasky spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags);
33497549c34SHans Petter Selasky for (i = 0 ; i < NUM_ALIAS_GUID_IN_REC; i++) {
335*8cc48704SHans Petter Selasky __be64 sm_response, required_val;
336*8cc48704SHans Petter Selasky
337*8cc48704SHans Petter Selasky if (!(cb_ctx->guid_indexes &
338*8cc48704SHans Petter Selasky mlx4_ib_get_aguid_comp_mask_from_ix(i)))
33997549c34SHans Petter Selasky continue;
340*8cc48704SHans Petter Selasky sm_response = *(__be64 *)&guid_rec->guid_info_list
341*8cc48704SHans Petter Selasky [i * GUID_REC_SIZE];
342*8cc48704SHans Petter Selasky required_val = *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE];
343*8cc48704SHans Petter Selasky if (cb_ctx->method == MLX4_GUID_INFO_RECORD_DELETE) {
344*8cc48704SHans Petter Selasky if (required_val ==
345*8cc48704SHans Petter Selasky cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL))
346*8cc48704SHans Petter Selasky goto next_entry;
347*8cc48704SHans Petter Selasky
348*8cc48704SHans Petter Selasky /* A new value was set till we got the response */
349*8cc48704SHans Petter Selasky pr_debug("need to set new value %llx, record num %d, block_num:%d\n",
350*8cc48704SHans Petter Selasky (long long)be64_to_cpu(required_val),
351*8cc48704SHans Petter Selasky i, guid_rec->block_num);
352*8cc48704SHans Petter Selasky goto entry_declined;
35397549c34SHans Petter Selasky }
35497549c34SHans Petter Selasky
35597549c34SHans Petter Selasky /* check if the SM didn't assign one of the records.
356*8cc48704SHans Petter Selasky * if it didn't, re-ask for.
35797549c34SHans Petter Selasky */
358*8cc48704SHans Petter Selasky if (sm_response == MLX4_NOT_SET_GUID) {
359*8cc48704SHans Petter Selasky if (rec->guids_retry_schedule[i] == 0)
360*8cc48704SHans Petter Selasky mlx4_ib_warn(&dev->ib_dev,
361*8cc48704SHans Petter Selasky "%s:Record num %d in block_num: %d was declined by SM\n",
362*8cc48704SHans Petter Selasky __func__, i,
363*8cc48704SHans Petter Selasky guid_rec->block_num);
364*8cc48704SHans Petter Selasky goto entry_declined;
36597549c34SHans Petter Selasky } else {
36697549c34SHans Petter Selasky /* properly assigned record. */
36797549c34SHans Petter Selasky /* We save the GUID we just got from the SM in the
36897549c34SHans Petter Selasky * admin_guid in order to be persistent, and in the
36997549c34SHans Petter Selasky * request from the sm the process will ask for the same GUID */
370*8cc48704SHans Petter Selasky if (required_val &&
371*8cc48704SHans Petter Selasky sm_response != required_val) {
372*8cc48704SHans Petter Selasky /* Warn only on first retry */
373*8cc48704SHans Petter Selasky if (rec->guids_retry_schedule[i] == 0)
37497549c34SHans Petter Selasky mlx4_ib_warn(&dev->ib_dev, "%s: Failed to set"
37597549c34SHans Petter Selasky " admin guid after SysAdmin "
37697549c34SHans Petter Selasky "configuration. "
37797549c34SHans Petter Selasky "Record num %d in block_num:%d "
37897549c34SHans Petter Selasky "was declined by SM, "
379*8cc48704SHans Petter Selasky "new val(0x%llx) was kept, SM returned (0x%llx)\n",
38097549c34SHans Petter Selasky __func__, i,
38197549c34SHans Petter Selasky guid_rec->block_num,
382*8cc48704SHans Petter Selasky (long long)be64_to_cpu(required_val),
383*8cc48704SHans Petter Selasky (long long)be64_to_cpu(sm_response));
384*8cc48704SHans Petter Selasky goto entry_declined;
38597549c34SHans Petter Selasky } else {
386*8cc48704SHans Petter Selasky *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE] =
387*8cc48704SHans Petter Selasky sm_response;
388*8cc48704SHans Petter Selasky if (required_val == 0)
389*8cc48704SHans Petter Selasky mlx4_set_admin_guid(dev->dev,
390*8cc48704SHans Petter Selasky sm_response,
391*8cc48704SHans Petter Selasky (guid_rec->block_num
392*8cc48704SHans Petter Selasky * NUM_ALIAS_GUID_IN_REC) + i,
393*8cc48704SHans Petter Selasky cb_ctx->port);
394*8cc48704SHans Petter Selasky goto next_entry;
39597549c34SHans Petter Selasky }
39697549c34SHans Petter Selasky }
397*8cc48704SHans Petter Selasky entry_declined:
398*8cc48704SHans Petter Selasky declined_guid_indexes |= mlx4_ib_get_aguid_comp_mask_from_ix(i);
399*8cc48704SHans Petter Selasky rec->guids_retry_schedule[i] =
400*8cc48704SHans Petter Selasky (rec->guids_retry_schedule[i] == 0) ? 1 :
401*8cc48704SHans Petter Selasky min((unsigned int)60,
402*8cc48704SHans Petter Selasky rec->guids_retry_schedule[i] * 2);
403*8cc48704SHans Petter Selasky /* using the minimum value among all entries in that record */
404*8cc48704SHans Petter Selasky resched_delay_sec = (resched_delay_sec == 0) ?
405*8cc48704SHans Petter Selasky rec->guids_retry_schedule[i] :
406*8cc48704SHans Petter Selasky min(resched_delay_sec,
407*8cc48704SHans Petter Selasky rec->guids_retry_schedule[i]);
408*8cc48704SHans Petter Selasky continue;
409*8cc48704SHans Petter Selasky
410*8cc48704SHans Petter Selasky next_entry:
411*8cc48704SHans Petter Selasky rec->guids_retry_schedule[i] = 0;
41297549c34SHans Petter Selasky }
413*8cc48704SHans Petter Selasky
414*8cc48704SHans Petter Selasky applied_guid_indexes = cb_ctx->guid_indexes & ~declined_guid_indexes;
415*8cc48704SHans Petter Selasky if (declined_guid_indexes ||
416*8cc48704SHans Petter Selasky rec->guid_indexes & ~(applied_guid_indexes)) {
417*8cc48704SHans Petter Selasky pr_debug("record=%d wasn't fully set, guid_indexes=0x%llx applied_indexes=0x%llx, declined_indexes=0x%llx\n",
418*8cc48704SHans Petter Selasky guid_rec->block_num,
419*8cc48704SHans Petter Selasky (long long)be64_to_cpu((__force __be64)rec->guid_indexes),
420*8cc48704SHans Petter Selasky (long long)be64_to_cpu((__force __be64)applied_guid_indexes),
421*8cc48704SHans Petter Selasky (long long)be64_to_cpu((__force __be64)declined_guid_indexes));
422*8cc48704SHans Petter Selasky rec->time_to_run = ktime_get_ns() +
423*8cc48704SHans Petter Selasky resched_delay_sec * NSEC_PER_SEC;
424*8cc48704SHans Petter Selasky } else {
425*8cc48704SHans Petter Selasky rec->status = MLX4_GUID_INFO_STATUS_SET;
426*8cc48704SHans Petter Selasky }
427*8cc48704SHans Petter Selasky spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags);
42897549c34SHans Petter Selasky /*
42997549c34SHans Petter Selasky The func is call here to close the cases when the
43097549c34SHans Petter Selasky sm doesn't send smp, so in the sa response the driver
43197549c34SHans Petter Selasky notifies the slave.
43297549c34SHans Petter Selasky */
43397549c34SHans Petter Selasky mlx4_ib_notify_slaves_on_guid_change(dev, guid_rec->block_num,
43497549c34SHans Petter Selasky cb_ctx->port,
43597549c34SHans Petter Selasky guid_rec->guid_info_list);
43697549c34SHans Petter Selasky out:
43797549c34SHans Petter Selasky spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
43897549c34SHans Petter Selasky spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
439*8cc48704SHans Petter Selasky if (!dev->sriov.is_going_down) {
440*8cc48704SHans Petter Selasky get_low_record_time_index(dev, port_index, &resched_delay_sec);
44197549c34SHans Petter Selasky queue_delayed_work(dev->sriov.alias_guid.ports_guid[port_index].wq,
44297549c34SHans Petter Selasky &dev->sriov.alias_guid.ports_guid[port_index].
443*8cc48704SHans Petter Selasky alias_guid_work,
444*8cc48704SHans Petter Selasky msecs_to_jiffies(resched_delay_sec * 1000));
445*8cc48704SHans Petter Selasky }
44697549c34SHans Petter Selasky if (cb_ctx->sa_query) {
44797549c34SHans Petter Selasky list_del(&cb_ctx->list);
44897549c34SHans Petter Selasky kfree(cb_ctx);
44997549c34SHans Petter Selasky } else
45097549c34SHans Petter Selasky complete(&cb_ctx->done);
45197549c34SHans Petter Selasky spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
45297549c34SHans Petter Selasky spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
45397549c34SHans Petter Selasky }
45497549c34SHans Petter Selasky
invalidate_guid_record(struct mlx4_ib_dev * dev,u8 port,int index)45597549c34SHans Petter Selasky static void invalidate_guid_record(struct mlx4_ib_dev *dev, u8 port, int index)
45697549c34SHans Petter Selasky {
45797549c34SHans Petter Selasky int i;
45897549c34SHans Petter Selasky u64 cur_admin_val;
45997549c34SHans Petter Selasky ib_sa_comp_mask comp_mask = 0;
46097549c34SHans Petter Selasky
46197549c34SHans Petter Selasky dev->sriov.alias_guid.ports_guid[port - 1].all_rec_per_port[index].status
462*8cc48704SHans Petter Selasky = MLX4_GUID_INFO_STATUS_SET;
46397549c34SHans Petter Selasky
46497549c34SHans Petter Selasky /* calculate the comp_mask for that record.*/
46597549c34SHans Petter Selasky for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) {
46697549c34SHans Petter Selasky cur_admin_val =
46797549c34SHans Petter Selasky *(u64 *)&dev->sriov.alias_guid.ports_guid[port - 1].
46897549c34SHans Petter Selasky all_rec_per_port[index].all_recs[GUID_REC_SIZE * i];
46997549c34SHans Petter Selasky /*
47097549c34SHans Petter Selasky check the admin value: if it's for delete (~00LL) or
47197549c34SHans Petter Selasky it is the first guid of the first record (hw guid) or
47297549c34SHans Petter Selasky the records is not in ownership of the sysadmin and the sm doesn't
47397549c34SHans Petter Selasky need to assign GUIDs, then don't put it up for assignment.
47497549c34SHans Petter Selasky */
47597549c34SHans Petter Selasky if (MLX4_GUID_FOR_DELETE_VAL == cur_admin_val ||
476*8cc48704SHans Petter Selasky (!index && !i))
47797549c34SHans Petter Selasky continue;
47897549c34SHans Petter Selasky comp_mask |= mlx4_ib_get_aguid_comp_mask_from_ix(i);
47997549c34SHans Petter Selasky }
48097549c34SHans Petter Selasky dev->sriov.alias_guid.ports_guid[port - 1].
481*8cc48704SHans Petter Selasky all_rec_per_port[index].guid_indexes |= comp_mask;
482*8cc48704SHans Petter Selasky if (dev->sriov.alias_guid.ports_guid[port - 1].
483*8cc48704SHans Petter Selasky all_rec_per_port[index].guid_indexes)
484*8cc48704SHans Petter Selasky dev->sriov.alias_guid.ports_guid[port - 1].
485*8cc48704SHans Petter Selasky all_rec_per_port[index].status = MLX4_GUID_INFO_STATUS_IDLE;
486*8cc48704SHans Petter Selasky
48797549c34SHans Petter Selasky }
48897549c34SHans Petter Selasky
set_guid_rec(struct ib_device * ibdev,struct mlx4_next_alias_guid_work * rec)48997549c34SHans Petter Selasky static int set_guid_rec(struct ib_device *ibdev,
490*8cc48704SHans Petter Selasky struct mlx4_next_alias_guid_work *rec)
49197549c34SHans Petter Selasky {
49297549c34SHans Petter Selasky int err;
49397549c34SHans Petter Selasky struct mlx4_ib_dev *dev = to_mdev(ibdev);
49497549c34SHans Petter Selasky struct ib_sa_guidinfo_rec guid_info_rec;
49597549c34SHans Petter Selasky ib_sa_comp_mask comp_mask;
49697549c34SHans Petter Selasky struct ib_port_attr attr;
49797549c34SHans Petter Selasky struct mlx4_alias_guid_work_context *callback_context;
49897549c34SHans Petter Selasky unsigned long resched_delay, flags, flags1;
499*8cc48704SHans Petter Selasky u8 port = rec->port + 1;
500*8cc48704SHans Petter Selasky int index = rec->block_num;
501*8cc48704SHans Petter Selasky struct mlx4_sriov_alias_guid_info_rec_det *rec_det = &rec->rec_det;
50297549c34SHans Petter Selasky struct list_head *head =
50397549c34SHans Petter Selasky &dev->sriov.alias_guid.ports_guid[port - 1].cb_list;
50497549c34SHans Petter Selasky
50597549c34SHans Petter Selasky err = __mlx4_ib_query_port(ibdev, port, &attr, 1);
50697549c34SHans Petter Selasky if (err) {
50797549c34SHans Petter Selasky pr_debug("mlx4_ib_query_port failed (err: %d), port: %d\n",
50897549c34SHans Petter Selasky err, port);
50997549c34SHans Petter Selasky return err;
51097549c34SHans Petter Selasky }
51197549c34SHans Petter Selasky /*check the port was configured by the sm, otherwise no need to send */
51297549c34SHans Petter Selasky if (attr.state != IB_PORT_ACTIVE) {
51397549c34SHans Petter Selasky pr_debug("port %d not active...rescheduling\n", port);
51497549c34SHans Petter Selasky resched_delay = 5 * HZ;
51597549c34SHans Petter Selasky err = -EAGAIN;
51697549c34SHans Petter Selasky goto new_schedule;
51797549c34SHans Petter Selasky }
51897549c34SHans Petter Selasky
51997549c34SHans Petter Selasky callback_context = kmalloc(sizeof *callback_context, GFP_KERNEL);
52097549c34SHans Petter Selasky if (!callback_context) {
52197549c34SHans Petter Selasky err = -ENOMEM;
52297549c34SHans Petter Selasky resched_delay = HZ * 5;
52397549c34SHans Petter Selasky goto new_schedule;
52497549c34SHans Petter Selasky }
52597549c34SHans Petter Selasky callback_context->port = port;
52697549c34SHans Petter Selasky callback_context->dev = dev;
52797549c34SHans Petter Selasky callback_context->block_num = index;
528*8cc48704SHans Petter Selasky callback_context->guid_indexes = rec_det->guid_indexes;
529*8cc48704SHans Petter Selasky callback_context->method = rec->method;
530*8cc48704SHans Petter Selasky
53197549c34SHans Petter Selasky memset(&guid_info_rec, 0, sizeof (struct ib_sa_guidinfo_rec));
53297549c34SHans Petter Selasky
53397549c34SHans Petter Selasky guid_info_rec.lid = cpu_to_be16(attr.lid);
53497549c34SHans Petter Selasky guid_info_rec.block_num = index;
53597549c34SHans Petter Selasky
53697549c34SHans Petter Selasky memcpy(guid_info_rec.guid_info_list, rec_det->all_recs,
53797549c34SHans Petter Selasky GUID_REC_SIZE * NUM_ALIAS_GUID_IN_REC);
53897549c34SHans Petter Selasky comp_mask = IB_SA_GUIDINFO_REC_LID | IB_SA_GUIDINFO_REC_BLOCK_NUM |
53997549c34SHans Petter Selasky rec_det->guid_indexes;
54097549c34SHans Petter Selasky
54197549c34SHans Petter Selasky init_completion(&callback_context->done);
54297549c34SHans Petter Selasky spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
54397549c34SHans Petter Selasky list_add_tail(&callback_context->list, head);
54497549c34SHans Petter Selasky spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
54597549c34SHans Petter Selasky
54697549c34SHans Petter Selasky callback_context->query_id =
54797549c34SHans Petter Selasky ib_sa_guid_info_rec_query(dev->sriov.alias_guid.sa_client,
54897549c34SHans Petter Selasky ibdev, port, &guid_info_rec,
549*8cc48704SHans Petter Selasky comp_mask, rec->method, 1000,
55097549c34SHans Petter Selasky GFP_KERNEL, aliasguid_query_handler,
55197549c34SHans Petter Selasky callback_context,
55297549c34SHans Petter Selasky &callback_context->sa_query);
55397549c34SHans Petter Selasky if (callback_context->query_id < 0) {
55497549c34SHans Petter Selasky pr_debug("ib_sa_guid_info_rec_query failed, query_id: "
55597549c34SHans Petter Selasky "%d. will reschedule to the next 1 sec.\n",
55697549c34SHans Petter Selasky callback_context->query_id);
55797549c34SHans Petter Selasky spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
55897549c34SHans Petter Selasky list_del(&callback_context->list);
55997549c34SHans Petter Selasky kfree(callback_context);
56097549c34SHans Petter Selasky spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
56197549c34SHans Petter Selasky resched_delay = 1 * HZ;
56297549c34SHans Petter Selasky err = -EAGAIN;
56397549c34SHans Petter Selasky goto new_schedule;
56497549c34SHans Petter Selasky }
56597549c34SHans Petter Selasky err = 0;
56697549c34SHans Petter Selasky goto out;
56797549c34SHans Petter Selasky
56897549c34SHans Petter Selasky new_schedule:
56997549c34SHans Petter Selasky spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
57097549c34SHans Petter Selasky spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
57197549c34SHans Petter Selasky invalidate_guid_record(dev, port, index);
57297549c34SHans Petter Selasky if (!dev->sriov.is_going_down) {
57397549c34SHans Petter Selasky queue_delayed_work(dev->sriov.alias_guid.ports_guid[port - 1].wq,
57497549c34SHans Petter Selasky &dev->sriov.alias_guid.ports_guid[port - 1].alias_guid_work,
57597549c34SHans Petter Selasky resched_delay);
57697549c34SHans Petter Selasky }
57797549c34SHans Petter Selasky spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
57897549c34SHans Petter Selasky spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
57997549c34SHans Petter Selasky
58097549c34SHans Petter Selasky out:
58197549c34SHans Petter Selasky return err;
58297549c34SHans Petter Selasky }
58397549c34SHans Petter Selasky
mlx4_ib_guid_port_init(struct mlx4_ib_dev * dev,int port)584*8cc48704SHans Petter Selasky static void mlx4_ib_guid_port_init(struct mlx4_ib_dev *dev, int port)
585*8cc48704SHans Petter Selasky {
586*8cc48704SHans Petter Selasky int j, k, entry;
587*8cc48704SHans Petter Selasky __be64 guid;
588*8cc48704SHans Petter Selasky
589*8cc48704SHans Petter Selasky /*Check if the SM doesn't need to assign the GUIDs*/
590*8cc48704SHans Petter Selasky for (j = 0; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) {
591*8cc48704SHans Petter Selasky for (k = 0; k < NUM_ALIAS_GUID_IN_REC; k++) {
592*8cc48704SHans Petter Selasky entry = j * NUM_ALIAS_GUID_IN_REC + k;
593*8cc48704SHans Petter Selasky /* no request for the 0 entry (hw guid) */
594*8cc48704SHans Petter Selasky if (!entry || entry > dev->dev->persist->num_vfs ||
595*8cc48704SHans Petter Selasky !mlx4_is_slave_active(dev->dev, entry))
596*8cc48704SHans Petter Selasky continue;
597*8cc48704SHans Petter Selasky guid = mlx4_get_admin_guid(dev->dev, entry, port);
598*8cc48704SHans Petter Selasky *(__be64 *)&dev->sriov.alias_guid.ports_guid[port - 1].
599*8cc48704SHans Petter Selasky all_rec_per_port[j].all_recs
600*8cc48704SHans Petter Selasky [GUID_REC_SIZE * k] = guid;
601*8cc48704SHans Petter Selasky pr_debug("guid was set, entry=%d, val=0x%llx, port=%d\n",
602*8cc48704SHans Petter Selasky entry,
603*8cc48704SHans Petter Selasky (long long)be64_to_cpu(guid),
604*8cc48704SHans Petter Selasky port);
605*8cc48704SHans Petter Selasky }
606*8cc48704SHans Petter Selasky }
607*8cc48704SHans Petter Selasky }
mlx4_ib_invalidate_all_guid_record(struct mlx4_ib_dev * dev,int port)60897549c34SHans Petter Selasky void mlx4_ib_invalidate_all_guid_record(struct mlx4_ib_dev *dev, int port)
60997549c34SHans Petter Selasky {
61097549c34SHans Petter Selasky int i;
61197549c34SHans Petter Selasky unsigned long flags, flags1;
61297549c34SHans Petter Selasky
61397549c34SHans Petter Selasky pr_debug("port %d\n", port);
61497549c34SHans Petter Selasky
61597549c34SHans Petter Selasky spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
61697549c34SHans Petter Selasky spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
617*8cc48704SHans Petter Selasky
618*8cc48704SHans Petter Selasky if (dev->sriov.alias_guid.ports_guid[port - 1].state_flags &
619*8cc48704SHans Petter Selasky GUID_STATE_NEED_PORT_INIT) {
620*8cc48704SHans Petter Selasky mlx4_ib_guid_port_init(dev, port);
621*8cc48704SHans Petter Selasky dev->sriov.alias_guid.ports_guid[port - 1].state_flags &=
622*8cc48704SHans Petter Selasky (~GUID_STATE_NEED_PORT_INIT);
623*8cc48704SHans Petter Selasky }
62497549c34SHans Petter Selasky for (i = 0; i < NUM_ALIAS_GUID_REC_IN_PORT; i++)
62597549c34SHans Petter Selasky invalidate_guid_record(dev, port, i);
62697549c34SHans Petter Selasky
62797549c34SHans Petter Selasky if (mlx4_is_master(dev->dev) && !dev->sriov.is_going_down) {
62897549c34SHans Petter Selasky /*
62997549c34SHans Petter Selasky make sure no work waits in the queue, if the work is already
63097549c34SHans Petter Selasky queued(not on the timer) the cancel will fail. That is not a problem
63197549c34SHans Petter Selasky because we just want the work started.
63297549c34SHans Petter Selasky */
63397549c34SHans Petter Selasky cancel_delayed_work(&dev->sriov.alias_guid.
63497549c34SHans Petter Selasky ports_guid[port - 1].alias_guid_work);
63597549c34SHans Petter Selasky queue_delayed_work(dev->sriov.alias_guid.ports_guid[port - 1].wq,
63697549c34SHans Petter Selasky &dev->sriov.alias_guid.ports_guid[port - 1].alias_guid_work,
63797549c34SHans Petter Selasky 0);
63897549c34SHans Petter Selasky }
63997549c34SHans Petter Selasky spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
64097549c34SHans Petter Selasky spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
64197549c34SHans Petter Selasky }
64297549c34SHans Petter Selasky
set_required_record(struct mlx4_ib_dev * dev,u8 port,struct mlx4_next_alias_guid_work * next_rec,int record_index)643*8cc48704SHans Petter Selasky static void set_required_record(struct mlx4_ib_dev *dev, u8 port,
644*8cc48704SHans Petter Selasky struct mlx4_next_alias_guid_work *next_rec,
645*8cc48704SHans Petter Selasky int record_index)
646*8cc48704SHans Petter Selasky {
647*8cc48704SHans Petter Selasky int i;
648*8cc48704SHans Petter Selasky int lowset_time_entry = -1;
649*8cc48704SHans Petter Selasky int lowest_time = 0;
650*8cc48704SHans Petter Selasky ib_sa_comp_mask delete_guid_indexes = 0;
651*8cc48704SHans Petter Selasky ib_sa_comp_mask set_guid_indexes = 0;
652*8cc48704SHans Petter Selasky struct mlx4_sriov_alias_guid_info_rec_det *rec =
653*8cc48704SHans Petter Selasky &dev->sriov.alias_guid.ports_guid[port].
654*8cc48704SHans Petter Selasky all_rec_per_port[record_index];
655*8cc48704SHans Petter Selasky
656*8cc48704SHans Petter Selasky for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) {
657*8cc48704SHans Petter Selasky if (!(rec->guid_indexes &
658*8cc48704SHans Petter Selasky mlx4_ib_get_aguid_comp_mask_from_ix(i)))
659*8cc48704SHans Petter Selasky continue;
660*8cc48704SHans Petter Selasky
661*8cc48704SHans Petter Selasky if (*(__be64 *)&rec->all_recs[i * GUID_REC_SIZE] ==
662*8cc48704SHans Petter Selasky cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL))
663*8cc48704SHans Petter Selasky delete_guid_indexes |=
664*8cc48704SHans Petter Selasky mlx4_ib_get_aguid_comp_mask_from_ix(i);
665*8cc48704SHans Petter Selasky else
666*8cc48704SHans Petter Selasky set_guid_indexes |=
667*8cc48704SHans Petter Selasky mlx4_ib_get_aguid_comp_mask_from_ix(i);
668*8cc48704SHans Petter Selasky
669*8cc48704SHans Petter Selasky if (lowset_time_entry == -1 || rec->guids_retry_schedule[i] <=
670*8cc48704SHans Petter Selasky lowest_time) {
671*8cc48704SHans Petter Selasky lowset_time_entry = i;
672*8cc48704SHans Petter Selasky lowest_time = rec->guids_retry_schedule[i];
673*8cc48704SHans Petter Selasky }
674*8cc48704SHans Petter Selasky }
675*8cc48704SHans Petter Selasky
676*8cc48704SHans Petter Selasky memcpy(&next_rec->rec_det, rec, sizeof(*rec));
677*8cc48704SHans Petter Selasky next_rec->port = port;
678*8cc48704SHans Petter Selasky next_rec->block_num = record_index;
679*8cc48704SHans Petter Selasky
680*8cc48704SHans Petter Selasky if (*(__be64 *)&rec->all_recs[lowset_time_entry * GUID_REC_SIZE] ==
681*8cc48704SHans Petter Selasky cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL)) {
682*8cc48704SHans Petter Selasky next_rec->rec_det.guid_indexes = delete_guid_indexes;
683*8cc48704SHans Petter Selasky next_rec->method = MLX4_GUID_INFO_RECORD_DELETE;
684*8cc48704SHans Petter Selasky } else {
685*8cc48704SHans Petter Selasky next_rec->rec_det.guid_indexes = set_guid_indexes;
686*8cc48704SHans Petter Selasky next_rec->method = MLX4_GUID_INFO_RECORD_SET;
687*8cc48704SHans Petter Selasky }
688*8cc48704SHans Petter Selasky }
689*8cc48704SHans Petter Selasky
690*8cc48704SHans Petter Selasky /* return index of record that should be updated based on lowest
691*8cc48704SHans Petter Selasky * rescheduled time
692*8cc48704SHans Petter Selasky */
get_low_record_time_index(struct mlx4_ib_dev * dev,u8 port,int * resched_delay_sec)693*8cc48704SHans Petter Selasky static int get_low_record_time_index(struct mlx4_ib_dev *dev, u8 port,
694*8cc48704SHans Petter Selasky int *resched_delay_sec)
695*8cc48704SHans Petter Selasky {
696*8cc48704SHans Petter Selasky int record_index = -1;
697*8cc48704SHans Petter Selasky u64 low_record_time = 0;
698*8cc48704SHans Petter Selasky struct mlx4_sriov_alias_guid_info_rec_det rec;
699*8cc48704SHans Petter Selasky int j;
700*8cc48704SHans Petter Selasky
701*8cc48704SHans Petter Selasky for (j = 0; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) {
702*8cc48704SHans Petter Selasky rec = dev->sriov.alias_guid.ports_guid[port].
703*8cc48704SHans Petter Selasky all_rec_per_port[j];
704*8cc48704SHans Petter Selasky if (rec.status == MLX4_GUID_INFO_STATUS_IDLE &&
705*8cc48704SHans Petter Selasky rec.guid_indexes) {
706*8cc48704SHans Petter Selasky if (record_index == -1 ||
707*8cc48704SHans Petter Selasky rec.time_to_run < low_record_time) {
708*8cc48704SHans Petter Selasky record_index = j;
709*8cc48704SHans Petter Selasky low_record_time = rec.time_to_run;
710*8cc48704SHans Petter Selasky }
711*8cc48704SHans Petter Selasky }
712*8cc48704SHans Petter Selasky }
713*8cc48704SHans Petter Selasky if (resched_delay_sec) {
714*8cc48704SHans Petter Selasky u64 curr_time = ktime_get_ns();
715*8cc48704SHans Petter Selasky
716*8cc48704SHans Petter Selasky *resched_delay_sec = (low_record_time < curr_time) ? 0 :
717*8cc48704SHans Petter Selasky div_u64((low_record_time - curr_time), NSEC_PER_SEC);
718*8cc48704SHans Petter Selasky }
719*8cc48704SHans Petter Selasky
720*8cc48704SHans Petter Selasky return record_index;
721*8cc48704SHans Petter Selasky }
722*8cc48704SHans Petter Selasky
72397549c34SHans Petter Selasky /* The function returns the next record that was
72497549c34SHans Petter Selasky * not configured (or failed to be configured) */
get_next_record_to_update(struct mlx4_ib_dev * dev,u8 port,struct mlx4_next_alias_guid_work * rec)72597549c34SHans Petter Selasky static int get_next_record_to_update(struct mlx4_ib_dev *dev, u8 port,
72697549c34SHans Petter Selasky struct mlx4_next_alias_guid_work *rec)
72797549c34SHans Petter Selasky {
72897549c34SHans Petter Selasky unsigned long flags;
729*8cc48704SHans Petter Selasky int record_index;
730*8cc48704SHans Petter Selasky int ret = 0;
73197549c34SHans Petter Selasky
73297549c34SHans Petter Selasky spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags);
733*8cc48704SHans Petter Selasky record_index = get_low_record_time_index(dev, port, NULL);
734*8cc48704SHans Petter Selasky
735*8cc48704SHans Petter Selasky if (record_index < 0) {
736*8cc48704SHans Petter Selasky ret = -ENOENT;
737*8cc48704SHans Petter Selasky goto out;
738*8cc48704SHans Petter Selasky }
739*8cc48704SHans Petter Selasky
740*8cc48704SHans Petter Selasky set_required_record(dev, port, rec, record_index);
741*8cc48704SHans Petter Selasky out:
74297549c34SHans Petter Selasky spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags);
743*8cc48704SHans Petter Selasky return ret;
74497549c34SHans Petter Selasky }
74597549c34SHans Petter Selasky
alias_guid_work(struct work_struct * work)74697549c34SHans Petter Selasky static void alias_guid_work(struct work_struct *work)
74797549c34SHans Petter Selasky {
74897549c34SHans Petter Selasky struct delayed_work *delay = to_delayed_work(work);
74997549c34SHans Petter Selasky int ret = 0;
75097549c34SHans Petter Selasky struct mlx4_next_alias_guid_work *rec;
75197549c34SHans Petter Selasky struct mlx4_sriov_alias_guid_port_rec_det *sriov_alias_port =
75297549c34SHans Petter Selasky container_of(delay, struct mlx4_sriov_alias_guid_port_rec_det,
75397549c34SHans Petter Selasky alias_guid_work);
75497549c34SHans Petter Selasky struct mlx4_sriov_alias_guid *sriov_alias_guid = sriov_alias_port->parent;
75597549c34SHans Petter Selasky struct mlx4_ib_sriov *ib_sriov = container_of(sriov_alias_guid,
75697549c34SHans Petter Selasky struct mlx4_ib_sriov,
75797549c34SHans Petter Selasky alias_guid);
75897549c34SHans Petter Selasky struct mlx4_ib_dev *dev = container_of(ib_sriov, struct mlx4_ib_dev, sriov);
75997549c34SHans Petter Selasky
76097549c34SHans Petter Selasky rec = kzalloc(sizeof *rec, GFP_KERNEL);
76197549c34SHans Petter Selasky if (!rec) {
76297549c34SHans Petter Selasky pr_err("alias_guid_work: No Memory\n");
76397549c34SHans Petter Selasky return;
76497549c34SHans Petter Selasky }
76597549c34SHans Petter Selasky
76697549c34SHans Petter Selasky pr_debug("starting [port: %d]...\n", sriov_alias_port->port + 1);
76797549c34SHans Petter Selasky ret = get_next_record_to_update(dev, sriov_alias_port->port, rec);
76897549c34SHans Petter Selasky if (ret) {
76997549c34SHans Petter Selasky pr_debug("No more records to update.\n");
77097549c34SHans Petter Selasky goto out;
77197549c34SHans Petter Selasky }
77297549c34SHans Petter Selasky
773*8cc48704SHans Petter Selasky set_guid_rec(&dev->ib_dev, rec);
77497549c34SHans Petter Selasky out:
77597549c34SHans Petter Selasky kfree(rec);
77697549c34SHans Petter Selasky }
77797549c34SHans Petter Selasky
77897549c34SHans Petter Selasky
mlx4_ib_init_alias_guid_work(struct mlx4_ib_dev * dev,int port)77997549c34SHans Petter Selasky void mlx4_ib_init_alias_guid_work(struct mlx4_ib_dev *dev, int port)
78097549c34SHans Petter Selasky {
78197549c34SHans Petter Selasky unsigned long flags, flags1;
78297549c34SHans Petter Selasky
78397549c34SHans Petter Selasky if (!mlx4_is_master(dev->dev))
78497549c34SHans Petter Selasky return;
78597549c34SHans Petter Selasky spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
78697549c34SHans Petter Selasky spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
78797549c34SHans Petter Selasky if (!dev->sriov.is_going_down) {
788*8cc48704SHans Petter Selasky /* If there is pending one should cancell then run, otherwise
789*8cc48704SHans Petter Selasky * won't run till previous one is ended as same work
790*8cc48704SHans Petter Selasky * struct is used.
791*8cc48704SHans Petter Selasky */
792*8cc48704SHans Petter Selasky cancel_delayed_work(&dev->sriov.alias_guid.ports_guid[port].
793*8cc48704SHans Petter Selasky alias_guid_work);
79497549c34SHans Petter Selasky queue_delayed_work(dev->sriov.alias_guid.ports_guid[port].wq,
79597549c34SHans Petter Selasky &dev->sriov.alias_guid.ports_guid[port].alias_guid_work, 0);
79697549c34SHans Petter Selasky }
79797549c34SHans Petter Selasky spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1);
79897549c34SHans Petter Selasky spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
79997549c34SHans Petter Selasky }
80097549c34SHans Petter Selasky
mlx4_ib_destroy_alias_guid_service(struct mlx4_ib_dev * dev)80197549c34SHans Petter Selasky void mlx4_ib_destroy_alias_guid_service(struct mlx4_ib_dev *dev)
80297549c34SHans Petter Selasky {
80397549c34SHans Petter Selasky int i;
80497549c34SHans Petter Selasky struct mlx4_ib_sriov *sriov = &dev->sriov;
80597549c34SHans Petter Selasky struct mlx4_alias_guid_work_context *cb_ctx;
80697549c34SHans Petter Selasky struct mlx4_sriov_alias_guid_port_rec_det *det;
80797549c34SHans Petter Selasky struct ib_sa_query *sa_query;
80897549c34SHans Petter Selasky unsigned long flags;
80997549c34SHans Petter Selasky
81097549c34SHans Petter Selasky for (i = 0 ; i < dev->num_ports; i++) {
81197549c34SHans Petter Selasky cancel_delayed_work(&dev->sriov.alias_guid.ports_guid[i].alias_guid_work);
81297549c34SHans Petter Selasky det = &sriov->alias_guid.ports_guid[i];
81397549c34SHans Petter Selasky spin_lock_irqsave(&sriov->alias_guid.ag_work_lock, flags);
81497549c34SHans Petter Selasky while (!list_empty(&det->cb_list)) {
81597549c34SHans Petter Selasky cb_ctx = list_entry(det->cb_list.next,
81697549c34SHans Petter Selasky struct mlx4_alias_guid_work_context,
81797549c34SHans Petter Selasky list);
81897549c34SHans Petter Selasky sa_query = cb_ctx->sa_query;
81997549c34SHans Petter Selasky cb_ctx->sa_query = NULL;
82097549c34SHans Petter Selasky list_del(&cb_ctx->list);
82197549c34SHans Petter Selasky spin_unlock_irqrestore(&sriov->alias_guid.ag_work_lock, flags);
82297549c34SHans Petter Selasky ib_sa_cancel_query(cb_ctx->query_id, sa_query);
82397549c34SHans Petter Selasky wait_for_completion(&cb_ctx->done);
82497549c34SHans Petter Selasky kfree(cb_ctx);
82597549c34SHans Petter Selasky spin_lock_irqsave(&sriov->alias_guid.ag_work_lock, flags);
82697549c34SHans Petter Selasky }
82797549c34SHans Petter Selasky spin_unlock_irqrestore(&sriov->alias_guid.ag_work_lock, flags);
82897549c34SHans Petter Selasky }
82997549c34SHans Petter Selasky for (i = 0 ; i < dev->num_ports; i++) {
83097549c34SHans Petter Selasky flush_workqueue(dev->sriov.alias_guid.ports_guid[i].wq);
83197549c34SHans Petter Selasky destroy_workqueue(dev->sriov.alias_guid.ports_guid[i].wq);
83297549c34SHans Petter Selasky }
83397549c34SHans Petter Selasky ib_sa_unregister_client(dev->sriov.alias_guid.sa_client);
83497549c34SHans Petter Selasky kfree(dev->sriov.alias_guid.sa_client);
83597549c34SHans Petter Selasky }
83697549c34SHans Petter Selasky
mlx4_ib_init_alias_guid_service(struct mlx4_ib_dev * dev)83797549c34SHans Petter Selasky int mlx4_ib_init_alias_guid_service(struct mlx4_ib_dev *dev)
83897549c34SHans Petter Selasky {
83997549c34SHans Petter Selasky char alias_wq_name[15];
84097549c34SHans Petter Selasky int ret = 0;
841*8cc48704SHans Petter Selasky int i, j;
84297549c34SHans Petter Selasky union ib_gid gid;
84397549c34SHans Petter Selasky
84497549c34SHans Petter Selasky if (!mlx4_is_master(dev->dev))
84597549c34SHans Petter Selasky return 0;
84697549c34SHans Petter Selasky dev->sriov.alias_guid.sa_client =
84797549c34SHans Petter Selasky kzalloc(sizeof *dev->sriov.alias_guid.sa_client, GFP_KERNEL);
84897549c34SHans Petter Selasky if (!dev->sriov.alias_guid.sa_client)
84997549c34SHans Petter Selasky return -ENOMEM;
85097549c34SHans Petter Selasky
85197549c34SHans Petter Selasky ib_sa_register_client(dev->sriov.alias_guid.sa_client);
85297549c34SHans Petter Selasky
85397549c34SHans Petter Selasky spin_lock_init(&dev->sriov.alias_guid.ag_work_lock);
85497549c34SHans Petter Selasky
85597549c34SHans Petter Selasky for (i = 1; i <= dev->num_ports; ++i) {
85697549c34SHans Petter Selasky if (dev->ib_dev.query_gid(&dev->ib_dev , i, 0, &gid)) {
85797549c34SHans Petter Selasky ret = -EFAULT;
85897549c34SHans Petter Selasky goto err_unregister;
85997549c34SHans Petter Selasky }
86097549c34SHans Petter Selasky }
86197549c34SHans Petter Selasky
86297549c34SHans Petter Selasky for (i = 0 ; i < dev->num_ports; i++) {
86397549c34SHans Petter Selasky memset(&dev->sriov.alias_guid.ports_guid[i], 0,
86497549c34SHans Petter Selasky sizeof (struct mlx4_sriov_alias_guid_port_rec_det));
865*8cc48704SHans Petter Selasky dev->sriov.alias_guid.ports_guid[i].state_flags |=
866*8cc48704SHans Petter Selasky GUID_STATE_NEED_PORT_INIT;
86797549c34SHans Petter Selasky for (j = 0; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) {
868*8cc48704SHans Petter Selasky /* mark each val as it was deleted */
869*8cc48704SHans Petter Selasky memset(dev->sriov.alias_guid.ports_guid[i].
870*8cc48704SHans Petter Selasky all_rec_per_port[j].all_recs, 0xFF,
871*8cc48704SHans Petter Selasky sizeof(dev->sriov.alias_guid.ports_guid[i].
872*8cc48704SHans Petter Selasky all_rec_per_port[j].all_recs));
87397549c34SHans Petter Selasky }
87497549c34SHans Petter Selasky INIT_LIST_HEAD(&dev->sriov.alias_guid.ports_guid[i].cb_list);
87597549c34SHans Petter Selasky /*prepare the records, set them to be allocated by sm*/
876*8cc48704SHans Petter Selasky if (mlx4_ib_sm_guid_assign)
877*8cc48704SHans Petter Selasky for (j = 1; j < NUM_ALIAS_GUID_PER_PORT; j++)
878*8cc48704SHans Petter Selasky mlx4_set_admin_guid(dev->dev, 0, j, i + 1);
87997549c34SHans Petter Selasky for (j = 0 ; j < NUM_ALIAS_GUID_REC_IN_PORT; j++)
88097549c34SHans Petter Selasky invalidate_guid_record(dev, i + 1, j);
88197549c34SHans Petter Selasky
88297549c34SHans Petter Selasky dev->sriov.alias_guid.ports_guid[i].parent = &dev->sriov.alias_guid;
88397549c34SHans Petter Selasky dev->sriov.alias_guid.ports_guid[i].port = i;
88497549c34SHans Petter Selasky
88597549c34SHans Petter Selasky snprintf(alias_wq_name, sizeof alias_wq_name, "alias_guid%d", i);
88697549c34SHans Petter Selasky dev->sriov.alias_guid.ports_guid[i].wq =
887*8cc48704SHans Petter Selasky alloc_ordered_workqueue(alias_wq_name, WQ_MEM_RECLAIM);
88897549c34SHans Petter Selasky if (!dev->sriov.alias_guid.ports_guid[i].wq) {
88997549c34SHans Petter Selasky ret = -ENOMEM;
89097549c34SHans Petter Selasky goto err_thread;
89197549c34SHans Petter Selasky }
89297549c34SHans Petter Selasky INIT_DELAYED_WORK(&dev->sriov.alias_guid.ports_guid[i].alias_guid_work,
89397549c34SHans Petter Selasky alias_guid_work);
89497549c34SHans Petter Selasky }
89597549c34SHans Petter Selasky return 0;
89697549c34SHans Petter Selasky
89797549c34SHans Petter Selasky err_thread:
89897549c34SHans Petter Selasky for (--i; i >= 0; i--) {
89997549c34SHans Petter Selasky destroy_workqueue(dev->sriov.alias_guid.ports_guid[i].wq);
90097549c34SHans Petter Selasky dev->sriov.alias_guid.ports_guid[i].wq = NULL;
90197549c34SHans Petter Selasky }
90297549c34SHans Petter Selasky
90397549c34SHans Petter Selasky err_unregister:
90497549c34SHans Petter Selasky ib_sa_unregister_client(dev->sriov.alias_guid.sa_client);
90597549c34SHans Petter Selasky kfree(dev->sriov.alias_guid.sa_client);
90697549c34SHans Petter Selasky dev->sriov.alias_guid.sa_client = NULL;
90797549c34SHans Petter Selasky pr_err("init_alias_guid_service: Failed. (ret:%d)\n", ret);
90897549c34SHans Petter Selasky return ret;
90997549c34SHans Petter Selasky }
910