xref: /linux/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c (revision 3874d6a8b61966a77aa743b4160ba96bf3081ce5)
1c0c050c5SMichael Chan /* Broadcom NetXtreme-C/E network driver.
2c0c050c5SMichael Chan  *
3c0c050c5SMichael Chan  * Copyright (c) 2014-2015 Broadcom Corporation
4c0c050c5SMichael Chan  *
5c0c050c5SMichael Chan  * This program is free software; you can redistribute it and/or modify
6c0c050c5SMichael Chan  * it under the terms of the GNU General Public License as published by
7c0c050c5SMichael Chan  * the Free Software Foundation.
8c0c050c5SMichael Chan  */
9c0c050c5SMichael Chan 
10c0c050c5SMichael Chan #include <linux/module.h>
11c0c050c5SMichael Chan #include <linux/pci.h>
12c0c050c5SMichael Chan #include <linux/netdevice.h>
13c0c050c5SMichael Chan #include <linux/if_vlan.h>
14c0c050c5SMichael Chan #include <linux/interrupt.h>
15c0c050c5SMichael Chan #include <linux/etherdevice.h>
16c0c050c5SMichael Chan #include "bnxt_hsi.h"
17c0c050c5SMichael Chan #include "bnxt.h"
18c0c050c5SMichael Chan #include "bnxt_sriov.h"
19c0c050c5SMichael Chan #include "bnxt_ethtool.h"
20c0c050c5SMichael Chan 
21c0c050c5SMichael Chan #ifdef CONFIG_BNXT_SRIOV
22c0c050c5SMichael Chan static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id)
23c0c050c5SMichael Chan {
24caefe526SMichael Chan 	if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
25c0c050c5SMichael Chan 		netdev_err(bp->dev, "vf ndo called though PF is down\n");
26c0c050c5SMichael Chan 		return -EINVAL;
27c0c050c5SMichael Chan 	}
28c0c050c5SMichael Chan 	if (!bp->pf.active_vfs) {
29c0c050c5SMichael Chan 		netdev_err(bp->dev, "vf ndo called though sriov is disabled\n");
30c0c050c5SMichael Chan 		return -EINVAL;
31c0c050c5SMichael Chan 	}
32c0c050c5SMichael Chan 	if (vf_id >= bp->pf.max_vfs) {
33c0c050c5SMichael Chan 		netdev_err(bp->dev, "Invalid VF id %d\n", vf_id);
34c0c050c5SMichael Chan 		return -EINVAL;
35c0c050c5SMichael Chan 	}
36c0c050c5SMichael Chan 	return 0;
37c0c050c5SMichael Chan }
38c0c050c5SMichael Chan 
39c0c050c5SMichael Chan int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting)
40c0c050c5SMichael Chan {
41c0c050c5SMichael Chan 	struct hwrm_func_cfg_input req = {0};
42c0c050c5SMichael Chan 	struct bnxt *bp = netdev_priv(dev);
43c0c050c5SMichael Chan 	struct bnxt_vf_info *vf;
44c0c050c5SMichael Chan 	bool old_setting = false;
45c0c050c5SMichael Chan 	u32 func_flags;
46c0c050c5SMichael Chan 	int rc;
47c0c050c5SMichael Chan 
48c0c050c5SMichael Chan 	rc = bnxt_vf_ndo_prep(bp, vf_id);
49c0c050c5SMichael Chan 	if (rc)
50c0c050c5SMichael Chan 		return rc;
51c0c050c5SMichael Chan 
52c0c050c5SMichael Chan 	vf = &bp->pf.vf[vf_id];
53c0c050c5SMichael Chan 	if (vf->flags & BNXT_VF_SPOOFCHK)
54c0c050c5SMichael Chan 		old_setting = true;
55c0c050c5SMichael Chan 	if (old_setting == setting)
56c0c050c5SMichael Chan 		return 0;
57c0c050c5SMichael Chan 
58c0c050c5SMichael Chan 	func_flags = vf->func_flags;
59c0c050c5SMichael Chan 	if (setting)
60c0c050c5SMichael Chan 		func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK;
61c0c050c5SMichael Chan 	else
62c0c050c5SMichael Chan 		func_flags &= ~FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK;
63c0c050c5SMichael Chan 	/*TODO: if the driver supports VLAN filter on guest VLAN,
64c0c050c5SMichael Chan 	 * the spoof check should also include vlan anti-spoofing
65c0c050c5SMichael Chan 	 */
66c0c050c5SMichael Chan 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
67c193554eSMichael Chan 	req.fid = cpu_to_le16(vf->fw_fid);
68c0c050c5SMichael Chan 	req.flags = cpu_to_le32(func_flags);
69c0c050c5SMichael Chan 	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
70c0c050c5SMichael Chan 	if (!rc) {
71c0c050c5SMichael Chan 		vf->func_flags = func_flags;
72c0c050c5SMichael Chan 		if (setting)
73c0c050c5SMichael Chan 			vf->flags |= BNXT_VF_SPOOFCHK;
74c0c050c5SMichael Chan 		else
75c0c050c5SMichael Chan 			vf->flags &= ~BNXT_VF_SPOOFCHK;
76c0c050c5SMichael Chan 	}
77c0c050c5SMichael Chan 	return rc;
78c0c050c5SMichael Chan }
79c0c050c5SMichael Chan 
80c0c050c5SMichael Chan int bnxt_get_vf_config(struct net_device *dev, int vf_id,
81c0c050c5SMichael Chan 		       struct ifla_vf_info *ivi)
82c0c050c5SMichael Chan {
83c0c050c5SMichael Chan 	struct bnxt *bp = netdev_priv(dev);
84c0c050c5SMichael Chan 	struct bnxt_vf_info *vf;
85c0c050c5SMichael Chan 	int rc;
86c0c050c5SMichael Chan 
87c0c050c5SMichael Chan 	rc = bnxt_vf_ndo_prep(bp, vf_id);
88c0c050c5SMichael Chan 	if (rc)
89c0c050c5SMichael Chan 		return rc;
90c0c050c5SMichael Chan 
91c0c050c5SMichael Chan 	ivi->vf = vf_id;
92c0c050c5SMichael Chan 	vf = &bp->pf.vf[vf_id];
93c0c050c5SMichael Chan 
94c0c050c5SMichael Chan 	memcpy(&ivi->mac, vf->mac_addr, ETH_ALEN);
95c0c050c5SMichael Chan 	ivi->max_tx_rate = vf->max_tx_rate;
96c0c050c5SMichael Chan 	ivi->min_tx_rate = vf->min_tx_rate;
97c0c050c5SMichael Chan 	ivi->vlan = vf->vlan;
98c0c050c5SMichael Chan 	ivi->qos = vf->flags & BNXT_VF_QOS;
99c0c050c5SMichael Chan 	ivi->spoofchk = vf->flags & BNXT_VF_SPOOFCHK;
100c0c050c5SMichael Chan 	if (!(vf->flags & BNXT_VF_LINK_FORCED))
101c0c050c5SMichael Chan 		ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
102c0c050c5SMichael Chan 	else if (vf->flags & BNXT_VF_LINK_UP)
103c0c050c5SMichael Chan 		ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
104c0c050c5SMichael Chan 	else
105c0c050c5SMichael Chan 		ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
106c0c050c5SMichael Chan 
107c0c050c5SMichael Chan 	return 0;
108c0c050c5SMichael Chan }
109c0c050c5SMichael Chan 
110c0c050c5SMichael Chan int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac)
111c0c050c5SMichael Chan {
112c0c050c5SMichael Chan 	struct hwrm_func_cfg_input req = {0};
113c0c050c5SMichael Chan 	struct bnxt *bp = netdev_priv(dev);
114c0c050c5SMichael Chan 	struct bnxt_vf_info *vf;
115c0c050c5SMichael Chan 	int rc;
116c0c050c5SMichael Chan 
117c0c050c5SMichael Chan 	rc = bnxt_vf_ndo_prep(bp, vf_id);
118c0c050c5SMichael Chan 	if (rc)
119c0c050c5SMichael Chan 		return rc;
120c0c050c5SMichael Chan 	/* reject bc or mc mac addr, zero mac addr means allow
121c0c050c5SMichael Chan 	 * VF to use its own mac addr
122c0c050c5SMichael Chan 	 */
123c0c050c5SMichael Chan 	if (is_multicast_ether_addr(mac)) {
124c0c050c5SMichael Chan 		netdev_err(dev, "Invalid VF ethernet address\n");
125c0c050c5SMichael Chan 		return -EINVAL;
126c0c050c5SMichael Chan 	}
127c0c050c5SMichael Chan 	vf = &bp->pf.vf[vf_id];
128c0c050c5SMichael Chan 
129c0c050c5SMichael Chan 	memcpy(vf->mac_addr, mac, ETH_ALEN);
130c0c050c5SMichael Chan 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
131c193554eSMichael Chan 	req.fid = cpu_to_le16(vf->fw_fid);
132c0c050c5SMichael Chan 	req.flags = cpu_to_le32(vf->func_flags);
133c0c050c5SMichael Chan 	req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
134c0c050c5SMichael Chan 	memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
135c0c050c5SMichael Chan 	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
136c0c050c5SMichael Chan }
137c0c050c5SMichael Chan 
138c0c050c5SMichael Chan int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos)
139c0c050c5SMichael Chan {
140c0c050c5SMichael Chan 	struct hwrm_func_cfg_input req = {0};
141c0c050c5SMichael Chan 	struct bnxt *bp = netdev_priv(dev);
142c0c050c5SMichael Chan 	struct bnxt_vf_info *vf;
143c0c050c5SMichael Chan 	u16 vlan_tag;
144c0c050c5SMichael Chan 	int rc;
145c0c050c5SMichael Chan 
146c0c050c5SMichael Chan 	rc = bnxt_vf_ndo_prep(bp, vf_id);
147c0c050c5SMichael Chan 	if (rc)
148c0c050c5SMichael Chan 		return rc;
149c0c050c5SMichael Chan 
150c0c050c5SMichael Chan 	/* TODO: needed to implement proper handling of user priority,
151c0c050c5SMichael Chan 	 * currently fail the command if there is valid priority
152c0c050c5SMichael Chan 	 */
153c0c050c5SMichael Chan 	if (vlan_id > 4095 || qos)
154c0c050c5SMichael Chan 		return -EINVAL;
155c0c050c5SMichael Chan 
156c0c050c5SMichael Chan 	vf = &bp->pf.vf[vf_id];
157c0c050c5SMichael Chan 	vlan_tag = vlan_id;
158c0c050c5SMichael Chan 	if (vlan_tag == vf->vlan)
159c0c050c5SMichael Chan 		return 0;
160c0c050c5SMichael Chan 
161c0c050c5SMichael Chan 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
162c193554eSMichael Chan 	req.fid = cpu_to_le16(vf->fw_fid);
163c0c050c5SMichael Chan 	req.flags = cpu_to_le32(vf->func_flags);
164c0c050c5SMichael Chan 	req.dflt_vlan = cpu_to_le16(vlan_tag);
165c0c050c5SMichael Chan 	req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
166c0c050c5SMichael Chan 	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
167c0c050c5SMichael Chan 	if (!rc)
168c0c050c5SMichael Chan 		vf->vlan = vlan_tag;
169c0c050c5SMichael Chan 	return rc;
170c0c050c5SMichael Chan }
171c0c050c5SMichael Chan 
172c0c050c5SMichael Chan int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate,
173c0c050c5SMichael Chan 		   int max_tx_rate)
174c0c050c5SMichael Chan {
175c0c050c5SMichael Chan 	struct hwrm_func_cfg_input req = {0};
176c0c050c5SMichael Chan 	struct bnxt *bp = netdev_priv(dev);
177c0c050c5SMichael Chan 	struct bnxt_vf_info *vf;
178c0c050c5SMichael Chan 	u32 pf_link_speed;
179c0c050c5SMichael Chan 	int rc;
180c0c050c5SMichael Chan 
181c0c050c5SMichael Chan 	rc = bnxt_vf_ndo_prep(bp, vf_id);
182c0c050c5SMichael Chan 	if (rc)
183c0c050c5SMichael Chan 		return rc;
184c0c050c5SMichael Chan 
185c0c050c5SMichael Chan 	vf = &bp->pf.vf[vf_id];
186c0c050c5SMichael Chan 	pf_link_speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
187c0c050c5SMichael Chan 	if (max_tx_rate > pf_link_speed) {
188c0c050c5SMichael Chan 		netdev_info(bp->dev, "max tx rate %d exceed PF link speed for VF %d\n",
189c0c050c5SMichael Chan 			    max_tx_rate, vf_id);
190c0c050c5SMichael Chan 		return -EINVAL;
191c0c050c5SMichael Chan 	}
192c0c050c5SMichael Chan 
193c0c050c5SMichael Chan 	if (min_tx_rate > pf_link_speed || min_tx_rate > max_tx_rate) {
194c0c050c5SMichael Chan 		netdev_info(bp->dev, "min tx rate %d is invalid for VF %d\n",
195c0c050c5SMichael Chan 			    min_tx_rate, vf_id);
196c0c050c5SMichael Chan 		return -EINVAL;
197c0c050c5SMichael Chan 	}
198c0c050c5SMichael Chan 	if (min_tx_rate == vf->min_tx_rate && max_tx_rate == vf->max_tx_rate)
199c0c050c5SMichael Chan 		return 0;
200c0c050c5SMichael Chan 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
201c193554eSMichael Chan 	req.fid = cpu_to_le16(vf->fw_fid);
202c0c050c5SMichael Chan 	req.flags = cpu_to_le32(vf->func_flags);
203c0c050c5SMichael Chan 	req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW);
204c0c050c5SMichael Chan 	req.max_bw = cpu_to_le32(max_tx_rate);
205c0c050c5SMichael Chan 	req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW);
206c0c050c5SMichael Chan 	req.min_bw = cpu_to_le32(min_tx_rate);
207c0c050c5SMichael Chan 	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
208c0c050c5SMichael Chan 	if (!rc) {
209c0c050c5SMichael Chan 		vf->min_tx_rate = min_tx_rate;
210c0c050c5SMichael Chan 		vf->max_tx_rate = max_tx_rate;
211c0c050c5SMichael Chan 	}
212c0c050c5SMichael Chan 	return rc;
213c0c050c5SMichael Chan }
214c0c050c5SMichael Chan 
215c0c050c5SMichael Chan int bnxt_set_vf_link_state(struct net_device *dev, int vf_id, int link)
216c0c050c5SMichael Chan {
217c0c050c5SMichael Chan 	struct bnxt *bp = netdev_priv(dev);
218c0c050c5SMichael Chan 	struct bnxt_vf_info *vf;
219c0c050c5SMichael Chan 	int rc;
220c0c050c5SMichael Chan 
221c0c050c5SMichael Chan 	rc = bnxt_vf_ndo_prep(bp, vf_id);
222c0c050c5SMichael Chan 	if (rc)
223c0c050c5SMichael Chan 		return rc;
224c0c050c5SMichael Chan 
225c0c050c5SMichael Chan 	vf = &bp->pf.vf[vf_id];
226c0c050c5SMichael Chan 
227c0c050c5SMichael Chan 	vf->flags &= ~(BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED);
228c0c050c5SMichael Chan 	switch (link) {
229c0c050c5SMichael Chan 	case IFLA_VF_LINK_STATE_AUTO:
230c0c050c5SMichael Chan 		vf->flags |= BNXT_VF_LINK_UP;
231c0c050c5SMichael Chan 		break;
232c0c050c5SMichael Chan 	case IFLA_VF_LINK_STATE_DISABLE:
233c0c050c5SMichael Chan 		vf->flags |= BNXT_VF_LINK_FORCED;
234c0c050c5SMichael Chan 		break;
235c0c050c5SMichael Chan 	case IFLA_VF_LINK_STATE_ENABLE:
236c0c050c5SMichael Chan 		vf->flags |= BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED;
237c0c050c5SMichael Chan 		break;
238c0c050c5SMichael Chan 	default:
239c0c050c5SMichael Chan 		netdev_err(bp->dev, "Invalid link option\n");
240c0c050c5SMichael Chan 		rc = -EINVAL;
241c0c050c5SMichael Chan 		break;
242c0c050c5SMichael Chan 	}
243c0c050c5SMichael Chan 	/* CHIMP TODO: send msg to VF to update new link state */
244c0c050c5SMichael Chan 
245c0c050c5SMichael Chan 	return rc;
246c0c050c5SMichael Chan }
247c0c050c5SMichael Chan 
248c0c050c5SMichael Chan static int bnxt_set_vf_attr(struct bnxt *bp, int num_vfs)
249c0c050c5SMichael Chan {
250c0c050c5SMichael Chan 	int i;
251c0c050c5SMichael Chan 	struct bnxt_vf_info *vf;
252c0c050c5SMichael Chan 
253c0c050c5SMichael Chan 	for (i = 0; i < num_vfs; i++) {
254c0c050c5SMichael Chan 		vf = &bp->pf.vf[i];
255c0c050c5SMichael Chan 		memset(vf, 0, sizeof(*vf));
256c0c050c5SMichael Chan 		vf->flags = BNXT_VF_QOS | BNXT_VF_LINK_UP;
257c0c050c5SMichael Chan 	}
258c0c050c5SMichael Chan 	return 0;
259c0c050c5SMichael Chan }
260c0c050c5SMichael Chan 
2614bb6cdceSJeffrey Huang static int bnxt_hwrm_func_vf_resource_free(struct bnxt *bp, int num_vfs)
262c0c050c5SMichael Chan {
263c0c050c5SMichael Chan 	int i, rc = 0;
264c0c050c5SMichael Chan 	struct bnxt_pf_info *pf = &bp->pf;
265c0c050c5SMichael Chan 	struct hwrm_func_vf_resc_free_input req = {0};
266c0c050c5SMichael Chan 
267c0c050c5SMichael Chan 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESC_FREE, -1, -1);
268c0c050c5SMichael Chan 
269c0c050c5SMichael Chan 	mutex_lock(&bp->hwrm_cmd_lock);
2704bb6cdceSJeffrey Huang 	for (i = pf->first_vf_id; i < pf->first_vf_id + num_vfs; i++) {
271c0c050c5SMichael Chan 		req.vf_id = cpu_to_le16(i);
272c0c050c5SMichael Chan 		rc = _hwrm_send_message(bp, &req, sizeof(req),
273c0c050c5SMichael Chan 					HWRM_CMD_TIMEOUT);
274c0c050c5SMichael Chan 		if (rc)
275c0c050c5SMichael Chan 			break;
276c0c050c5SMichael Chan 	}
277c0c050c5SMichael Chan 	mutex_unlock(&bp->hwrm_cmd_lock);
278c0c050c5SMichael Chan 	return rc;
279c0c050c5SMichael Chan }
280c0c050c5SMichael Chan 
281c0c050c5SMichael Chan static void bnxt_free_vf_resources(struct bnxt *bp)
282c0c050c5SMichael Chan {
283c0c050c5SMichael Chan 	struct pci_dev *pdev = bp->pdev;
284c0c050c5SMichael Chan 	int i;
285c0c050c5SMichael Chan 
286c0c050c5SMichael Chan 	kfree(bp->pf.vf_event_bmap);
287c0c050c5SMichael Chan 	bp->pf.vf_event_bmap = NULL;
288c0c050c5SMichael Chan 
289c0c050c5SMichael Chan 	for (i = 0; i < 4; i++) {
290c0c050c5SMichael Chan 		if (bp->pf.hwrm_cmd_req_addr[i]) {
291c0c050c5SMichael Chan 			dma_free_coherent(&pdev->dev, BNXT_PAGE_SIZE,
292c0c050c5SMichael Chan 					  bp->pf.hwrm_cmd_req_addr[i],
293c0c050c5SMichael Chan 					  bp->pf.hwrm_cmd_req_dma_addr[i]);
294c0c050c5SMichael Chan 			bp->pf.hwrm_cmd_req_addr[i] = NULL;
295c0c050c5SMichael Chan 		}
296c0c050c5SMichael Chan 	}
297c0c050c5SMichael Chan 
298c0c050c5SMichael Chan 	kfree(bp->pf.vf);
299c0c050c5SMichael Chan 	bp->pf.vf = NULL;
300c0c050c5SMichael Chan }
301c0c050c5SMichael Chan 
302c0c050c5SMichael Chan static int bnxt_alloc_vf_resources(struct bnxt *bp, int num_vfs)
303c0c050c5SMichael Chan {
304c0c050c5SMichael Chan 	struct pci_dev *pdev = bp->pdev;
305c0c050c5SMichael Chan 	u32 nr_pages, size, i, j, k = 0;
306c0c050c5SMichael Chan 
307c0c050c5SMichael Chan 	bp->pf.vf = kcalloc(num_vfs, sizeof(struct bnxt_vf_info), GFP_KERNEL);
308c0c050c5SMichael Chan 	if (!bp->pf.vf)
309c0c050c5SMichael Chan 		return -ENOMEM;
310c0c050c5SMichael Chan 
311c0c050c5SMichael Chan 	bnxt_set_vf_attr(bp, num_vfs);
312c0c050c5SMichael Chan 
313c0c050c5SMichael Chan 	size = num_vfs * BNXT_HWRM_REQ_MAX_SIZE;
314c0c050c5SMichael Chan 	nr_pages = size / BNXT_PAGE_SIZE;
315c0c050c5SMichael Chan 	if (size & (BNXT_PAGE_SIZE - 1))
316c0c050c5SMichael Chan 		nr_pages++;
317c0c050c5SMichael Chan 
318c0c050c5SMichael Chan 	for (i = 0; i < nr_pages; i++) {
319c0c050c5SMichael Chan 		bp->pf.hwrm_cmd_req_addr[i] =
320c0c050c5SMichael Chan 			dma_alloc_coherent(&pdev->dev, BNXT_PAGE_SIZE,
321c0c050c5SMichael Chan 					   &bp->pf.hwrm_cmd_req_dma_addr[i],
322c0c050c5SMichael Chan 					   GFP_KERNEL);
323c0c050c5SMichael Chan 
324c0c050c5SMichael Chan 		if (!bp->pf.hwrm_cmd_req_addr[i])
325c0c050c5SMichael Chan 			return -ENOMEM;
326c0c050c5SMichael Chan 
327c0c050c5SMichael Chan 		for (j = 0; j < BNXT_HWRM_REQS_PER_PAGE && k < num_vfs; j++) {
328c0c050c5SMichael Chan 			struct bnxt_vf_info *vf = &bp->pf.vf[k];
329c0c050c5SMichael Chan 
330c0c050c5SMichael Chan 			vf->hwrm_cmd_req_addr = bp->pf.hwrm_cmd_req_addr[i] +
331c0c050c5SMichael Chan 						j * BNXT_HWRM_REQ_MAX_SIZE;
332c0c050c5SMichael Chan 			vf->hwrm_cmd_req_dma_addr =
333c0c050c5SMichael Chan 				bp->pf.hwrm_cmd_req_dma_addr[i] + j *
334c0c050c5SMichael Chan 				BNXT_HWRM_REQ_MAX_SIZE;
335c0c050c5SMichael Chan 			k++;
336c0c050c5SMichael Chan 		}
337c0c050c5SMichael Chan 	}
338c0c050c5SMichael Chan 
339c0c050c5SMichael Chan 	/* Max 128 VF's */
340c0c050c5SMichael Chan 	bp->pf.vf_event_bmap = kzalloc(16, GFP_KERNEL);
341c0c050c5SMichael Chan 	if (!bp->pf.vf_event_bmap)
342c0c050c5SMichael Chan 		return -ENOMEM;
343c0c050c5SMichael Chan 
344c0c050c5SMichael Chan 	bp->pf.hwrm_cmd_req_pages = nr_pages;
345c0c050c5SMichael Chan 	return 0;
346c0c050c5SMichael Chan }
347c0c050c5SMichael Chan 
348c0c050c5SMichael Chan static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
349c0c050c5SMichael Chan {
350c0c050c5SMichael Chan 	struct hwrm_func_buf_rgtr_input req = {0};
351c0c050c5SMichael Chan 
352c0c050c5SMichael Chan 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BUF_RGTR, -1, -1);
353c0c050c5SMichael Chan 
354c0c050c5SMichael Chan 	req.req_buf_num_pages = cpu_to_le16(bp->pf.hwrm_cmd_req_pages);
355c0c050c5SMichael Chan 	req.req_buf_page_size = cpu_to_le16(BNXT_PAGE_SHIFT);
356c0c050c5SMichael Chan 	req.req_buf_len = cpu_to_le16(BNXT_HWRM_REQ_MAX_SIZE);
357c0c050c5SMichael Chan 	req.req_buf_page_addr0 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[0]);
358c0c050c5SMichael Chan 	req.req_buf_page_addr1 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[1]);
359c0c050c5SMichael Chan 	req.req_buf_page_addr2 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[2]);
360c0c050c5SMichael Chan 	req.req_buf_page_addr3 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[3]);
361c0c050c5SMichael Chan 
362c0c050c5SMichael Chan 	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
363c0c050c5SMichael Chan }
364c0c050c5SMichael Chan 
365c0c050c5SMichael Chan /* only call by PF to reserve resources for VF */
36692268c32SMichael Chan static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
367c0c050c5SMichael Chan {
368c0c050c5SMichael Chan 	u32 rc = 0, mtu, i;
369c0c050c5SMichael Chan 	u16 vf_tx_rings, vf_rx_rings, vf_cp_rings, vf_stat_ctx, vf_vnics;
370b72d4a68SMichael Chan 	u16 vf_ring_grps;
371c0c050c5SMichael Chan 	struct hwrm_func_cfg_input req = {0};
372c0c050c5SMichael Chan 	struct bnxt_pf_info *pf = &bp->pf;
373c0c050c5SMichael Chan 
374c0c050c5SMichael Chan 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
375c0c050c5SMichael Chan 
376c0c050c5SMichael Chan 	/* Remaining rings are distributed equally amongs VF's for now */
377c0c050c5SMichael Chan 	/* TODO: the following workaroud is needed to restrict total number
378c0c050c5SMichael Chan 	 * of vf_cp_rings not exceed number of HW ring groups. This WA should
379c0c050c5SMichael Chan 	 * be removed once new HWRM provides HW ring groups capability in
380c0c050c5SMichael Chan 	 * hwrm_func_qcap.
381c0c050c5SMichael Chan 	 */
38292268c32SMichael Chan 	vf_cp_rings = min_t(u16, pf->max_cp_rings, pf->max_stat_ctxs);
38392268c32SMichael Chan 	vf_cp_rings = (vf_cp_rings - bp->cp_nr_rings) / num_vfs;
384c0c050c5SMichael Chan 	/* TODO: restore this logic below once the WA above is removed */
38592268c32SMichael Chan 	/* vf_cp_rings = (pf->max_cp_rings - bp->cp_nr_rings) / num_vfs; */
38692268c32SMichael Chan 	vf_stat_ctx = (pf->max_stat_ctxs - bp->num_stat_ctxs) / num_vfs;
387c0c050c5SMichael Chan 	if (bp->flags & BNXT_FLAG_AGG_RINGS)
38892268c32SMichael Chan 		vf_rx_rings = (pf->max_rx_rings - bp->rx_nr_rings * 2) /
38992268c32SMichael Chan 			      num_vfs;
390c0c050c5SMichael Chan 	else
39192268c32SMichael Chan 		vf_rx_rings = (pf->max_rx_rings - bp->rx_nr_rings) / num_vfs;
392b72d4a68SMichael Chan 	vf_ring_grps = (bp->pf.max_hw_ring_grps - bp->rx_nr_rings) / num_vfs;
39392268c32SMichael Chan 	vf_tx_rings = (pf->max_tx_rings - bp->tx_nr_rings) / num_vfs;
394c0c050c5SMichael Chan 
395c0c050c5SMichael Chan 	req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MTU |
396c0c050c5SMichael Chan 				  FUNC_CFG_REQ_ENABLES_MRU |
397c0c050c5SMichael Chan 				  FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS |
398c0c050c5SMichael Chan 				  FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS |
399c0c050c5SMichael Chan 				  FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
400c0c050c5SMichael Chan 				  FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS |
401c0c050c5SMichael Chan 				  FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS |
402c0c050c5SMichael Chan 				  FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS |
403b72d4a68SMichael Chan 				  FUNC_CFG_REQ_ENABLES_NUM_VNICS |
404b72d4a68SMichael Chan 				  FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS);
405c0c050c5SMichael Chan 
406c0c050c5SMichael Chan 	mtu = bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
407c0c050c5SMichael Chan 	req.mru = cpu_to_le16(mtu);
408c0c050c5SMichael Chan 	req.mtu = cpu_to_le16(mtu);
409c0c050c5SMichael Chan 
410c0c050c5SMichael Chan 	req.num_rsscos_ctxs = cpu_to_le16(1);
411c0c050c5SMichael Chan 	req.num_cmpl_rings = cpu_to_le16(vf_cp_rings);
412c0c050c5SMichael Chan 	req.num_tx_rings = cpu_to_le16(vf_tx_rings);
413c0c050c5SMichael Chan 	req.num_rx_rings = cpu_to_le16(vf_rx_rings);
414b72d4a68SMichael Chan 	req.num_hw_ring_grps = cpu_to_le16(vf_ring_grps);
415c0c050c5SMichael Chan 	req.num_l2_ctxs = cpu_to_le16(4);
416c0c050c5SMichael Chan 	vf_vnics = 1;
417c0c050c5SMichael Chan 
418c0c050c5SMichael Chan 	req.num_vnics = cpu_to_le16(vf_vnics);
419c0c050c5SMichael Chan 	/* FIXME spec currently uses 1 bit for stats ctx */
420c0c050c5SMichael Chan 	req.num_stat_ctxs = cpu_to_le16(vf_stat_ctx);
421c0c050c5SMichael Chan 
422c0c050c5SMichael Chan 	mutex_lock(&bp->hwrm_cmd_lock);
42392268c32SMichael Chan 	for (i = 0; i < num_vfs; i++) {
424c193554eSMichael Chan 		req.fid = cpu_to_le16(pf->first_vf_id + i);
425c0c050c5SMichael Chan 		rc = _hwrm_send_message(bp, &req, sizeof(req),
426c0c050c5SMichael Chan 					HWRM_CMD_TIMEOUT);
427c0c050c5SMichael Chan 		if (rc)
428c0c050c5SMichael Chan 			break;
42992268c32SMichael Chan 		pf->active_vfs = i + 1;
430c193554eSMichael Chan 		pf->vf[i].fw_fid = le16_to_cpu(req.fid);
431c0c050c5SMichael Chan 	}
432c0c050c5SMichael Chan 	mutex_unlock(&bp->hwrm_cmd_lock);
433c0c050c5SMichael Chan 	if (!rc) {
4344a21b49bSMichael Chan 		pf->max_tx_rings -= vf_tx_rings * num_vfs;
4354a21b49bSMichael Chan 		pf->max_rx_rings -= vf_rx_rings * num_vfs;
436b72d4a68SMichael Chan 		pf->max_hw_ring_grps -= vf_ring_grps * num_vfs;
4374a21b49bSMichael Chan 		pf->max_cp_rings -= vf_cp_rings * num_vfs;
4384a21b49bSMichael Chan 		pf->max_rsscos_ctxs -= num_vfs;
4394a21b49bSMichael Chan 		pf->max_stat_ctxs -= vf_stat_ctx * num_vfs;
4404a21b49bSMichael Chan 		pf->max_vnics -= vf_vnics * num_vfs;
441c0c050c5SMichael Chan 	}
442c0c050c5SMichael Chan 	return rc;
443c0c050c5SMichael Chan }
444c0c050c5SMichael Chan 
445c0c050c5SMichael Chan static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
446c0c050c5SMichael Chan {
447c0c050c5SMichael Chan 	int rc = 0, vfs_supported;
448c0c050c5SMichael Chan 	int min_rx_rings, min_tx_rings, min_rss_ctxs;
449c0c050c5SMichael Chan 	int tx_ok = 0, rx_ok = 0, rss_ok = 0;
450c0c050c5SMichael Chan 
451c0c050c5SMichael Chan 	/* Check if we can enable requested num of vf's. At a mininum
452c0c050c5SMichael Chan 	 * we require 1 RX 1 TX rings for each VF. In this minimum conf
453c0c050c5SMichael Chan 	 * features like TPA will not be available.
454c0c050c5SMichael Chan 	 */
455c0c050c5SMichael Chan 	vfs_supported = *num_vfs;
456c0c050c5SMichael Chan 
457c0c050c5SMichael Chan 	while (vfs_supported) {
458c0c050c5SMichael Chan 		min_rx_rings = vfs_supported;
459c0c050c5SMichael Chan 		min_tx_rings = vfs_supported;
460c0c050c5SMichael Chan 		min_rss_ctxs = vfs_supported;
461c0c050c5SMichael Chan 
462c0c050c5SMichael Chan 		if (bp->flags & BNXT_FLAG_AGG_RINGS) {
463c0c050c5SMichael Chan 			if (bp->pf.max_rx_rings - bp->rx_nr_rings * 2 >=
464c0c050c5SMichael Chan 			    min_rx_rings)
465c0c050c5SMichael Chan 				rx_ok = 1;
466c0c050c5SMichael Chan 		} else {
467c0c050c5SMichael Chan 			if (bp->pf.max_rx_rings - bp->rx_nr_rings >=
468c0c050c5SMichael Chan 			    min_rx_rings)
469c0c050c5SMichael Chan 				rx_ok = 1;
470c0c050c5SMichael Chan 		}
471c0c050c5SMichael Chan 
472c0c050c5SMichael Chan 		if (bp->pf.max_tx_rings - bp->tx_nr_rings >= min_tx_rings)
473c0c050c5SMichael Chan 			tx_ok = 1;
474c0c050c5SMichael Chan 
475c0c050c5SMichael Chan 		if (bp->pf.max_rsscos_ctxs - bp->rsscos_nr_ctxs >= min_rss_ctxs)
476c0c050c5SMichael Chan 			rss_ok = 1;
477c0c050c5SMichael Chan 
478c0c050c5SMichael Chan 		if (tx_ok && rx_ok && rss_ok)
479c0c050c5SMichael Chan 			break;
480c0c050c5SMichael Chan 
481c0c050c5SMichael Chan 		vfs_supported--;
482c0c050c5SMichael Chan 	}
483c0c050c5SMichael Chan 
484c0c050c5SMichael Chan 	if (!vfs_supported) {
485c0c050c5SMichael Chan 		netdev_err(bp->dev, "Cannot enable VF's as all resources are used by PF\n");
486c0c050c5SMichael Chan 		return -EINVAL;
487c0c050c5SMichael Chan 	}
488c0c050c5SMichael Chan 
489c0c050c5SMichael Chan 	if (vfs_supported != *num_vfs) {
490c0c050c5SMichael Chan 		netdev_info(bp->dev, "Requested VFs %d, can enable %d\n",
491c0c050c5SMichael Chan 			    *num_vfs, vfs_supported);
492c0c050c5SMichael Chan 		*num_vfs = vfs_supported;
493c0c050c5SMichael Chan 	}
494c0c050c5SMichael Chan 
495c0c050c5SMichael Chan 	rc = bnxt_alloc_vf_resources(bp, *num_vfs);
496c0c050c5SMichael Chan 	if (rc)
497c0c050c5SMichael Chan 		goto err_out1;
498c0c050c5SMichael Chan 
499c0c050c5SMichael Chan 	/* Reserve resources for VFs */
50092268c32SMichael Chan 	rc = bnxt_hwrm_func_cfg(bp, *num_vfs);
501c0c050c5SMichael Chan 	if (rc)
502c0c050c5SMichael Chan 		goto err_out2;
503c0c050c5SMichael Chan 
504c0c050c5SMichael Chan 	/* Register buffers for VFs */
505c0c050c5SMichael Chan 	rc = bnxt_hwrm_func_buf_rgtr(bp);
506c0c050c5SMichael Chan 	if (rc)
507c0c050c5SMichael Chan 		goto err_out2;
508c0c050c5SMichael Chan 
509c0c050c5SMichael Chan 	rc = pci_enable_sriov(bp->pdev, *num_vfs);
510c0c050c5SMichael Chan 	if (rc)
511c0c050c5SMichael Chan 		goto err_out2;
512c0c050c5SMichael Chan 
513c0c050c5SMichael Chan 	return 0;
514c0c050c5SMichael Chan 
515c0c050c5SMichael Chan err_out2:
516c0c050c5SMichael Chan 	/* Free the resources reserved for various VF's */
5174bb6cdceSJeffrey Huang 	bnxt_hwrm_func_vf_resource_free(bp, *num_vfs);
518c0c050c5SMichael Chan 
519c0c050c5SMichael Chan err_out1:
520c0c050c5SMichael Chan 	bnxt_free_vf_resources(bp);
521c0c050c5SMichael Chan 
522c0c050c5SMichael Chan 	return rc;
523c0c050c5SMichael Chan }
524c0c050c5SMichael Chan 
525c0c050c5SMichael Chan void bnxt_sriov_disable(struct bnxt *bp)
526c0c050c5SMichael Chan {
5274bb6cdceSJeffrey Huang 	u16 num_vfs = pci_num_vf(bp->pdev);
5284bb6cdceSJeffrey Huang 
5294bb6cdceSJeffrey Huang 	if (!num_vfs)
530c0c050c5SMichael Chan 		return;
531c0c050c5SMichael Chan 
5324bb6cdceSJeffrey Huang 	if (pci_vfs_assigned(bp->pdev)) {
5334bb6cdceSJeffrey Huang 		netdev_warn(bp->dev, "Unable to free %d VFs because some are assigned to VMs.\n",
5344bb6cdceSJeffrey Huang 			    num_vfs);
5354bb6cdceSJeffrey Huang 	} else {
536c0c050c5SMichael Chan 		pci_disable_sriov(bp->pdev);
5374bb6cdceSJeffrey Huang 		/* Free the HW resources reserved for various VF's */
5384bb6cdceSJeffrey Huang 		bnxt_hwrm_func_vf_resource_free(bp, num_vfs);
5394bb6cdceSJeffrey Huang 	}
540c0c050c5SMichael Chan 
541c0c050c5SMichael Chan 	bnxt_free_vf_resources(bp);
542c0c050c5SMichael Chan 
543c0c050c5SMichael Chan 	bp->pf.active_vfs = 0;
5444a21b49bSMichael Chan 	/* Reclaim all resources for the PF. */
5454a21b49bSMichael Chan 	bnxt_hwrm_func_qcaps(bp);
546c0c050c5SMichael Chan }
547c0c050c5SMichael Chan 
548c0c050c5SMichael Chan int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs)
549c0c050c5SMichael Chan {
550c0c050c5SMichael Chan 	struct net_device *dev = pci_get_drvdata(pdev);
551c0c050c5SMichael Chan 	struct bnxt *bp = netdev_priv(dev);
552c0c050c5SMichael Chan 
553c0c050c5SMichael Chan 	if (!(bp->flags & BNXT_FLAG_USING_MSIX)) {
554c0c050c5SMichael Chan 		netdev_warn(dev, "Not allow SRIOV if the irq mode is not MSIX\n");
555c0c050c5SMichael Chan 		return 0;
556c0c050c5SMichael Chan 	}
557c0c050c5SMichael Chan 
558c0c050c5SMichael Chan 	rtnl_lock();
559c0c050c5SMichael Chan 	if (!netif_running(dev)) {
560c0c050c5SMichael Chan 		netdev_warn(dev, "Reject SRIOV config request since if is down!\n");
561c0c050c5SMichael Chan 		rtnl_unlock();
562c0c050c5SMichael Chan 		return 0;
563c0c050c5SMichael Chan 	}
564c0c050c5SMichael Chan 	bp->sriov_cfg = true;
565c0c050c5SMichael Chan 	rtnl_unlock();
5664bb6cdceSJeffrey Huang 
5674bb6cdceSJeffrey Huang 	if (pci_vfs_assigned(bp->pdev)) {
5684bb6cdceSJeffrey Huang 		netdev_warn(dev, "Unable to configure SRIOV since some VFs are assigned to VMs.\n");
5694bb6cdceSJeffrey Huang 		num_vfs = 0;
5704bb6cdceSJeffrey Huang 		goto sriov_cfg_exit;
571c0c050c5SMichael Chan 	}
572c0c050c5SMichael Chan 
573c0c050c5SMichael Chan 	/* Check if enabled VFs is same as requested */
5744bb6cdceSJeffrey Huang 	if (num_vfs && num_vfs == bp->pf.active_vfs)
5754bb6cdceSJeffrey Huang 		goto sriov_cfg_exit;
5764bb6cdceSJeffrey Huang 
5774bb6cdceSJeffrey Huang 	/* if there are previous existing VFs, clean them up */
5784bb6cdceSJeffrey Huang 	bnxt_sriov_disable(bp);
5794bb6cdceSJeffrey Huang 	if (!num_vfs)
5804bb6cdceSJeffrey Huang 		goto sriov_cfg_exit;
581c0c050c5SMichael Chan 
582c0c050c5SMichael Chan 	bnxt_sriov_enable(bp, &num_vfs);
583c0c050c5SMichael Chan 
5844bb6cdceSJeffrey Huang sriov_cfg_exit:
585c0c050c5SMichael Chan 	bp->sriov_cfg = false;
586c0c050c5SMichael Chan 	wake_up(&bp->sriov_cfg_wait);
587c0c050c5SMichael Chan 
588c0c050c5SMichael Chan 	return num_vfs;
589c0c050c5SMichael Chan }
590c0c050c5SMichael Chan 
591c0c050c5SMichael Chan static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
592c0c050c5SMichael Chan 			      void *encap_resp, __le64 encap_resp_addr,
593c0c050c5SMichael Chan 			      __le16 encap_resp_cpr, u32 msg_size)
594c0c050c5SMichael Chan {
595c0c050c5SMichael Chan 	int rc = 0;
596c0c050c5SMichael Chan 	struct hwrm_fwd_resp_input req = {0};
597c0c050c5SMichael Chan 	struct hwrm_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
598c0c050c5SMichael Chan 
599c0c050c5SMichael Chan 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_RESP, -1, -1);
600c0c050c5SMichael Chan 
601c0c050c5SMichael Chan 	/* Set the new target id */
602c0c050c5SMichael Chan 	req.target_id = cpu_to_le16(vf->fw_fid);
603c193554eSMichael Chan 	req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
604c0c050c5SMichael Chan 	req.encap_resp_len = cpu_to_le16(msg_size);
605c0c050c5SMichael Chan 	req.encap_resp_addr = encap_resp_addr;
606c0c050c5SMichael Chan 	req.encap_resp_cmpl_ring = encap_resp_cpr;
607c0c050c5SMichael Chan 	memcpy(req.encap_resp, encap_resp, msg_size);
608c0c050c5SMichael Chan 
609c0c050c5SMichael Chan 	mutex_lock(&bp->hwrm_cmd_lock);
610c0c050c5SMichael Chan 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
611c0c050c5SMichael Chan 
612c0c050c5SMichael Chan 	if (rc) {
613c0c050c5SMichael Chan 		netdev_err(bp->dev, "hwrm_fwd_resp failed. rc:%d\n", rc);
614c0c050c5SMichael Chan 		goto fwd_resp_exit;
615c0c050c5SMichael Chan 	}
616c0c050c5SMichael Chan 
617c0c050c5SMichael Chan 	if (resp->error_code) {
618c0c050c5SMichael Chan 		netdev_err(bp->dev, "hwrm_fwd_resp error %d\n",
619c0c050c5SMichael Chan 			   resp->error_code);
620c0c050c5SMichael Chan 		rc = -1;
621c0c050c5SMichael Chan 	}
622c0c050c5SMichael Chan 
623c0c050c5SMichael Chan fwd_resp_exit:
624c0c050c5SMichael Chan 	mutex_unlock(&bp->hwrm_cmd_lock);
625c0c050c5SMichael Chan 	return rc;
626c0c050c5SMichael Chan }
627c0c050c5SMichael Chan 
628c0c050c5SMichael Chan static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
629c0c050c5SMichael Chan 				  u32 msg_size)
630c0c050c5SMichael Chan {
631c0c050c5SMichael Chan 	int rc = 0;
632c0c050c5SMichael Chan 	struct hwrm_reject_fwd_resp_input req = {0};
633c0c050c5SMichael Chan 	struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
634c0c050c5SMichael Chan 
635c0c050c5SMichael Chan 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_REJECT_FWD_RESP, -1, -1);
636c0c050c5SMichael Chan 	/* Set the new target id */
637c0c050c5SMichael Chan 	req.target_id = cpu_to_le16(vf->fw_fid);
638c193554eSMichael Chan 	req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
639c0c050c5SMichael Chan 	memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size);
640c0c050c5SMichael Chan 
641c0c050c5SMichael Chan 	mutex_lock(&bp->hwrm_cmd_lock);
642c0c050c5SMichael Chan 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
643c0c050c5SMichael Chan 
644c0c050c5SMichael Chan 	if (rc) {
645c0c050c5SMichael Chan 		netdev_err(bp->dev, "hwrm_fwd_err_resp failed. rc:%d\n", rc);
646c0c050c5SMichael Chan 		goto fwd_err_resp_exit;
647c0c050c5SMichael Chan 	}
648c0c050c5SMichael Chan 
649c0c050c5SMichael Chan 	if (resp->error_code) {
650c0c050c5SMichael Chan 		netdev_err(bp->dev, "hwrm_fwd_err_resp error %d\n",
651c0c050c5SMichael Chan 			   resp->error_code);
652c0c050c5SMichael Chan 		rc = -1;
653c0c050c5SMichael Chan 	}
654c0c050c5SMichael Chan 
655c0c050c5SMichael Chan fwd_err_resp_exit:
656c0c050c5SMichael Chan 	mutex_unlock(&bp->hwrm_cmd_lock);
657c0c050c5SMichael Chan 	return rc;
658c0c050c5SMichael Chan }
659c0c050c5SMichael Chan 
660c0c050c5SMichael Chan static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
661c0c050c5SMichael Chan 				   u32 msg_size)
662c0c050c5SMichael Chan {
663c0c050c5SMichael Chan 	int rc = 0;
664c0c050c5SMichael Chan 	struct hwrm_exec_fwd_resp_input req = {0};
665c0c050c5SMichael Chan 	struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
666c0c050c5SMichael Chan 
667c0c050c5SMichael Chan 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_EXEC_FWD_RESP, -1, -1);
668c0c050c5SMichael Chan 	/* Set the new target id */
669c0c050c5SMichael Chan 	req.target_id = cpu_to_le16(vf->fw_fid);
670c193554eSMichael Chan 	req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
671c0c050c5SMichael Chan 	memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size);
672c0c050c5SMichael Chan 
673c0c050c5SMichael Chan 	mutex_lock(&bp->hwrm_cmd_lock);
674c0c050c5SMichael Chan 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
675c0c050c5SMichael Chan 
676c0c050c5SMichael Chan 	if (rc) {
677c0c050c5SMichael Chan 		netdev_err(bp->dev, "hwrm_exec_fw_resp failed. rc:%d\n", rc);
678c0c050c5SMichael Chan 		goto exec_fwd_resp_exit;
679c0c050c5SMichael Chan 	}
680c0c050c5SMichael Chan 
681c0c050c5SMichael Chan 	if (resp->error_code) {
682c0c050c5SMichael Chan 		netdev_err(bp->dev, "hwrm_exec_fw_resp error %d\n",
683c0c050c5SMichael Chan 			   resp->error_code);
684c0c050c5SMichael Chan 		rc = -1;
685c0c050c5SMichael Chan 	}
686c0c050c5SMichael Chan 
687c0c050c5SMichael Chan exec_fwd_resp_exit:
688c0c050c5SMichael Chan 	mutex_unlock(&bp->hwrm_cmd_lock);
689c0c050c5SMichael Chan 	return rc;
690c0c050c5SMichael Chan }
691c0c050c5SMichael Chan 
692c0c050c5SMichael Chan static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
693c0c050c5SMichael Chan {
694c0c050c5SMichael Chan 	u32 msg_size = sizeof(struct hwrm_cfa_l2_filter_alloc_input);
695c0c050c5SMichael Chan 	struct hwrm_cfa_l2_filter_alloc_input *req =
696c0c050c5SMichael Chan 		(struct hwrm_cfa_l2_filter_alloc_input *)vf->hwrm_cmd_req_addr;
697c0c050c5SMichael Chan 
698c0c050c5SMichael Chan 	if (!is_valid_ether_addr(vf->mac_addr) ||
699c0c050c5SMichael Chan 	    ether_addr_equal((const u8 *)req->l2_addr, vf->mac_addr))
700c0c050c5SMichael Chan 		return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
701c0c050c5SMichael Chan 	else
702c0c050c5SMichael Chan 		return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
703c0c050c5SMichael Chan }
704c0c050c5SMichael Chan 
705c0c050c5SMichael Chan static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
706c0c050c5SMichael Chan {
707c0c050c5SMichael Chan 	int rc = 0;
708c0c050c5SMichael Chan 
709c0c050c5SMichael Chan 	if (!(vf->flags & BNXT_VF_LINK_FORCED)) {
710c0c050c5SMichael Chan 		/* real link */
711c0c050c5SMichael Chan 		rc = bnxt_hwrm_exec_fwd_resp(
712c0c050c5SMichael Chan 			bp, vf, sizeof(struct hwrm_port_phy_qcfg_input));
713c0c050c5SMichael Chan 	} else {
714c0c050c5SMichael Chan 		struct hwrm_port_phy_qcfg_output phy_qcfg_resp;
715c0c050c5SMichael Chan 		struct hwrm_port_phy_qcfg_input *phy_qcfg_req;
716c0c050c5SMichael Chan 
717c0c050c5SMichael Chan 		phy_qcfg_req =
718c0c050c5SMichael Chan 		(struct hwrm_port_phy_qcfg_input *)vf->hwrm_cmd_req_addr;
719c0c050c5SMichael Chan 		mutex_lock(&bp->hwrm_cmd_lock);
720c0c050c5SMichael Chan 		memcpy(&phy_qcfg_resp, &bp->link_info.phy_qcfg_resp,
721c0c050c5SMichael Chan 		       sizeof(phy_qcfg_resp));
722c0c050c5SMichael Chan 		mutex_unlock(&bp->hwrm_cmd_lock);
723c0c050c5SMichael Chan 		phy_qcfg_resp.seq_id = phy_qcfg_req->seq_id;
724c0c050c5SMichael Chan 
725c0c050c5SMichael Chan 		if (vf->flags & BNXT_VF_LINK_UP) {
726c0c050c5SMichael Chan 			/* if physical link is down, force link up on VF */
727c0c050c5SMichael Chan 			if (phy_qcfg_resp.link ==
728c0c050c5SMichael Chan 			    PORT_PHY_QCFG_RESP_LINK_NO_LINK) {
729c0c050c5SMichael Chan 				phy_qcfg_resp.link =
730c0c050c5SMichael Chan 					PORT_PHY_QCFG_RESP_LINK_LINK;
731c0c050c5SMichael Chan 				if (phy_qcfg_resp.auto_link_speed)
732c0c050c5SMichael Chan 					phy_qcfg_resp.link_speed =
733c0c050c5SMichael Chan 						phy_qcfg_resp.auto_link_speed;
734c0c050c5SMichael Chan 				else
735c0c050c5SMichael Chan 					phy_qcfg_resp.link_speed =
736c0c050c5SMichael Chan 						phy_qcfg_resp.force_link_speed;
737c0c050c5SMichael Chan 				phy_qcfg_resp.duplex =
738c0c050c5SMichael Chan 					PORT_PHY_QCFG_RESP_DUPLEX_FULL;
739c0c050c5SMichael Chan 				phy_qcfg_resp.pause =
740c0c050c5SMichael Chan 					(PORT_PHY_QCFG_RESP_PAUSE_TX |
741c0c050c5SMichael Chan 					 PORT_PHY_QCFG_RESP_PAUSE_RX);
742c0c050c5SMichael Chan 			}
743c0c050c5SMichael Chan 		} else {
744c0c050c5SMichael Chan 			/* force link down */
745c0c050c5SMichael Chan 			phy_qcfg_resp.link = PORT_PHY_QCFG_RESP_LINK_NO_LINK;
746c0c050c5SMichael Chan 			phy_qcfg_resp.link_speed = 0;
747c0c050c5SMichael Chan 			phy_qcfg_resp.duplex = PORT_PHY_QCFG_RESP_DUPLEX_HALF;
748c0c050c5SMichael Chan 			phy_qcfg_resp.pause = 0;
749c0c050c5SMichael Chan 		}
750c0c050c5SMichael Chan 		rc = bnxt_hwrm_fwd_resp(bp, vf, &phy_qcfg_resp,
751c0c050c5SMichael Chan 					phy_qcfg_req->resp_addr,
752c0c050c5SMichael Chan 					phy_qcfg_req->cmpl_ring,
753c0c050c5SMichael Chan 					sizeof(phy_qcfg_resp));
754c0c050c5SMichael Chan 	}
755c0c050c5SMichael Chan 	return rc;
756c0c050c5SMichael Chan }
757c0c050c5SMichael Chan 
758c0c050c5SMichael Chan static int bnxt_vf_req_validate_snd(struct bnxt *bp, struct bnxt_vf_info *vf)
759c0c050c5SMichael Chan {
760c0c050c5SMichael Chan 	int rc = 0;
761c0c050c5SMichael Chan 	struct hwrm_cmd_req_hdr *encap_req = vf->hwrm_cmd_req_addr;
762c0c050c5SMichael Chan 	u32 req_type = le32_to_cpu(encap_req->cmpl_ring_req_type) & 0xffff;
763c0c050c5SMichael Chan 
764c0c050c5SMichael Chan 	switch (req_type) {
765c0c050c5SMichael Chan 	case HWRM_CFA_L2_FILTER_ALLOC:
766c0c050c5SMichael Chan 		rc = bnxt_vf_validate_set_mac(bp, vf);
767c0c050c5SMichael Chan 		break;
768c0c050c5SMichael Chan 	case HWRM_FUNC_CFG:
769c0c050c5SMichael Chan 		/* TODO Validate if VF is allowed to change mac address,
770c0c050c5SMichael Chan 		 * mtu, num of rings etc
771c0c050c5SMichael Chan 		 */
772c0c050c5SMichael Chan 		rc = bnxt_hwrm_exec_fwd_resp(
773c0c050c5SMichael Chan 			bp, vf, sizeof(struct hwrm_func_cfg_input));
774c0c050c5SMichael Chan 		break;
775c0c050c5SMichael Chan 	case HWRM_PORT_PHY_QCFG:
776c0c050c5SMichael Chan 		rc = bnxt_vf_set_link(bp, vf);
777c0c050c5SMichael Chan 		break;
778c0c050c5SMichael Chan 	default:
779c0c050c5SMichael Chan 		break;
780c0c050c5SMichael Chan 	}
781c0c050c5SMichael Chan 	return rc;
782c0c050c5SMichael Chan }
783c0c050c5SMichael Chan 
784c0c050c5SMichael Chan void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
785c0c050c5SMichael Chan {
786c0c050c5SMichael Chan 	u32 i = 0, active_vfs = bp->pf.active_vfs, vf_id;
787c0c050c5SMichael Chan 
788c0c050c5SMichael Chan 	/* Scan through VF's and process commands */
789c0c050c5SMichael Chan 	while (1) {
790c0c050c5SMichael Chan 		vf_id = find_next_bit(bp->pf.vf_event_bmap, active_vfs, i);
791c0c050c5SMichael Chan 		if (vf_id >= active_vfs)
792c0c050c5SMichael Chan 			break;
793c0c050c5SMichael Chan 
794c0c050c5SMichael Chan 		clear_bit(vf_id, bp->pf.vf_event_bmap);
795c0c050c5SMichael Chan 		bnxt_vf_req_validate_snd(bp, &bp->pf.vf[vf_id]);
796c0c050c5SMichael Chan 		i = vf_id + 1;
797c0c050c5SMichael Chan 	}
798c0c050c5SMichael Chan }
799379a80a1SMichael Chan 
800379a80a1SMichael Chan void bnxt_update_vf_mac(struct bnxt *bp)
801379a80a1SMichael Chan {
802379a80a1SMichael Chan 	struct hwrm_func_qcaps_input req = {0};
803379a80a1SMichael Chan 	struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
804379a80a1SMichael Chan 
805379a80a1SMichael Chan 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
806379a80a1SMichael Chan 	req.fid = cpu_to_le16(0xffff);
807379a80a1SMichael Chan 
808379a80a1SMichael Chan 	mutex_lock(&bp->hwrm_cmd_lock);
809379a80a1SMichael Chan 	if (_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT))
810379a80a1SMichael Chan 		goto update_vf_mac_exit;
811379a80a1SMichael Chan 
812*3874d6a8SJeffrey Huang 	/* Store MAC address from the firmware.  There are 2 cases:
813*3874d6a8SJeffrey Huang 	 * 1. MAC address is valid.  It is assigned from the PF and we
814*3874d6a8SJeffrey Huang 	 *    need to override the current VF MAC address with it.
815*3874d6a8SJeffrey Huang 	 * 2. MAC address is zero.  The VF will use a random MAC address by
816*3874d6a8SJeffrey Huang 	 *    default but the stored zero MAC will allow the VF user to change
817*3874d6a8SJeffrey Huang 	 *    the random MAC address using ndo_set_mac_address() if he wants.
818*3874d6a8SJeffrey Huang 	 */
819bdd4347bSJeffrey Huang 	if (!ether_addr_equal(resp->perm_mac_address, bp->vf.mac_addr))
820379a80a1SMichael Chan 		memcpy(bp->vf.mac_addr, resp->perm_mac_address, ETH_ALEN);
821*3874d6a8SJeffrey Huang 
822*3874d6a8SJeffrey Huang 	/* overwrite netdev dev_addr with admin VF MAC */
823*3874d6a8SJeffrey Huang 	if (is_valid_ether_addr(bp->vf.mac_addr))
824379a80a1SMichael Chan 		memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN);
825379a80a1SMichael Chan update_vf_mac_exit:
826379a80a1SMichael Chan 	mutex_unlock(&bp->hwrm_cmd_lock);
827379a80a1SMichael Chan }
828379a80a1SMichael Chan 
829c0c050c5SMichael Chan #else
830c0c050c5SMichael Chan 
831c0c050c5SMichael Chan void bnxt_sriov_disable(struct bnxt *bp)
832c0c050c5SMichael Chan {
833c0c050c5SMichael Chan }
834c0c050c5SMichael Chan 
835c0c050c5SMichael Chan void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
836c0c050c5SMichael Chan {
837379a80a1SMichael Chan 	netdev_err(bp->dev, "Invalid VF message received when SRIOV is not enable\n");
838379a80a1SMichael Chan }
839379a80a1SMichael Chan 
840379a80a1SMichael Chan void bnxt_update_vf_mac(struct bnxt *bp)
841379a80a1SMichael Chan {
842c0c050c5SMichael Chan }
843c0c050c5SMichael Chan #endif
844