xref: /linux/drivers/crypto/cavium/nitrox/nitrox_sriov.c (revision 976e3645923bdd2fe7893aae33fd7a21098bfb28)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/pci.h>
3 #include <linux/delay.h>
4 
5 #include "nitrox_dev.h"
6 #include "nitrox_hal.h"
7 #include "nitrox_common.h"
8 #include "nitrox_isr.h"
9 #include "nitrox_mbx.h"
10 
11 /**
12  * num_vfs_valid - validate VF count
13  * @num_vfs: number of VF(s)
14  */
num_vfs_valid(int num_vfs)15 static inline bool num_vfs_valid(int num_vfs)
16 {
17 	bool valid = false;
18 
19 	switch (num_vfs) {
20 	case 16:
21 	case 32:
22 	case 64:
23 	case 128:
24 		valid = true;
25 		break;
26 	}
27 
28 	return valid;
29 }
30 
num_vfs_to_mode(int num_vfs)31 static inline enum vf_mode num_vfs_to_mode(int num_vfs)
32 {
33 	enum vf_mode mode = 0;
34 
35 	switch (num_vfs) {
36 	case 0:
37 		mode = __NDEV_MODE_PF;
38 		break;
39 	case 16:
40 		mode = __NDEV_MODE_VF16;
41 		break;
42 	case 32:
43 		mode = __NDEV_MODE_VF32;
44 		break;
45 	case 64:
46 		mode = __NDEV_MODE_VF64;
47 		break;
48 	case 128:
49 		mode = __NDEV_MODE_VF128;
50 		break;
51 	}
52 
53 	return mode;
54 }
55 
vf_mode_to_nr_queues(enum vf_mode mode)56 static inline int vf_mode_to_nr_queues(enum vf_mode mode)
57 {
58 	int nr_queues = 0;
59 
60 	switch (mode) {
61 	case __NDEV_MODE_PF:
62 		nr_queues = MAX_PF_QUEUES;
63 		break;
64 	case __NDEV_MODE_VF16:
65 		nr_queues = 8;
66 		break;
67 	case __NDEV_MODE_VF32:
68 		nr_queues = 4;
69 		break;
70 	case __NDEV_MODE_VF64:
71 		nr_queues = 2;
72 		break;
73 	case __NDEV_MODE_VF128:
74 		nr_queues = 1;
75 		break;
76 	}
77 
78 	return nr_queues;
79 }
80 
nitrox_pf_cleanup(struct nitrox_device * ndev)81 static void nitrox_pf_cleanup(struct nitrox_device *ndev)
82 {
83 	 /* PF has no queues in SR-IOV mode */
84 	atomic_set(&ndev->state, __NDEV_NOT_READY);
85 	/* unregister crypto algorithms */
86 	nitrox_crypto_unregister();
87 
88 	/* cleanup PF resources */
89 	nitrox_unregister_interrupts(ndev);
90 	nitrox_common_sw_cleanup(ndev);
91 }
92 
93 /**
94  * nitrox_pf_reinit - re-initialize PF resources once SR-IOV is disabled
95  * @ndev: NITROX device
96  */
nitrox_pf_reinit(struct nitrox_device * ndev)97 static int nitrox_pf_reinit(struct nitrox_device *ndev)
98 {
99 	int err;
100 
101 	/* allocate resources for PF */
102 	err = nitrox_common_sw_init(ndev);
103 	if (err)
104 		return err;
105 
106 	err = nitrox_register_interrupts(ndev);
107 	if (err) {
108 		nitrox_common_sw_cleanup(ndev);
109 		return err;
110 	}
111 
112 	/* configure the AQM queues */
113 	nitrox_config_aqm_rings(ndev);
114 
115 	/* configure the packet queues */
116 	nitrox_config_pkt_input_rings(ndev);
117 	nitrox_config_pkt_solicit_ports(ndev);
118 
119 	/* set device to ready state */
120 	atomic_set(&ndev->state, __NDEV_READY);
121 
122 	/* register crypto algorithms */
123 	return nitrox_crypto_register();
124 }
125 
nitrox_sriov_cleanup(struct nitrox_device * ndev)126 static void nitrox_sriov_cleanup(struct nitrox_device *ndev)
127 {
128 	/* unregister interrupts for PF in SR-IOV */
129 	nitrox_sriov_unregister_interrupts(ndev);
130 	nitrox_mbox_cleanup(ndev);
131 }
132 
nitrox_sriov_init(struct nitrox_device * ndev)133 static int nitrox_sriov_init(struct nitrox_device *ndev)
134 {
135 	int ret;
136 
137 	/* register interrupts for PF in SR-IOV */
138 	ret = nitrox_sriov_register_interupts(ndev);
139 	if (ret)
140 		return ret;
141 
142 	ret = nitrox_mbox_init(ndev);
143 	if (ret)
144 		goto sriov_init_fail;
145 
146 	return 0;
147 
148 sriov_init_fail:
149 	nitrox_sriov_cleanup(ndev);
150 	return ret;
151 }
152 
nitrox_sriov_enable(struct pci_dev * pdev,int num_vfs)153 static int nitrox_sriov_enable(struct pci_dev *pdev, int num_vfs)
154 {
155 	struct nitrox_device *ndev = pci_get_drvdata(pdev);
156 	int err;
157 
158 	if (!num_vfs_valid(num_vfs)) {
159 		dev_err(DEV(ndev), "Invalid num_vfs %d\n", num_vfs);
160 		return -EINVAL;
161 	}
162 
163 	if (pci_num_vf(pdev) == num_vfs)
164 		return num_vfs;
165 
166 	err = pci_enable_sriov(pdev, num_vfs);
167 	if (err) {
168 		dev_err(DEV(ndev), "failed to enable PCI sriov %d\n", err);
169 		return err;
170 	}
171 	dev_info(DEV(ndev), "Enabled VF(s) %d\n", num_vfs);
172 
173 	ndev->mode = num_vfs_to_mode(num_vfs);
174 	ndev->iov.num_vfs = num_vfs;
175 	ndev->iov.max_vf_queues = vf_mode_to_nr_queues(ndev->mode);
176 	/* set bit in flags */
177 	set_bit(__NDEV_SRIOV_BIT, &ndev->flags);
178 
179 	/* cleanup PF resources */
180 	nitrox_pf_cleanup(ndev);
181 
182 	/* PF SR-IOV mode initialization */
183 	err = nitrox_sriov_init(ndev);
184 	if (err)
185 		goto iov_fail;
186 
187 	config_nps_core_vfcfg_mode(ndev, ndev->mode);
188 	return num_vfs;
189 
190 iov_fail:
191 	pci_disable_sriov(pdev);
192 	/* clear bit in flags */
193 	clear_bit(__NDEV_SRIOV_BIT, &ndev->flags);
194 	ndev->iov.num_vfs = 0;
195 	ndev->mode = __NDEV_MODE_PF;
196 	/* reset back to working mode in PF */
197 	nitrox_pf_reinit(ndev);
198 	return err;
199 }
200 
nitrox_sriov_disable(struct pci_dev * pdev)201 static int nitrox_sriov_disable(struct pci_dev *pdev)
202 {
203 	struct nitrox_device *ndev = pci_get_drvdata(pdev);
204 
205 	if (!test_bit(__NDEV_SRIOV_BIT, &ndev->flags))
206 		return 0;
207 
208 	if (pci_vfs_assigned(pdev)) {
209 		dev_warn(DEV(ndev), "VFs are attached to VM. Can't disable SR-IOV\n");
210 		return -EPERM;
211 	}
212 	pci_disable_sriov(pdev);
213 	/* clear bit in flags */
214 	clear_bit(__NDEV_SRIOV_BIT, &ndev->flags);
215 
216 	ndev->iov.num_vfs = 0;
217 	ndev->iov.max_vf_queues = 0;
218 	ndev->mode = __NDEV_MODE_PF;
219 
220 	/* cleanup PF SR-IOV resources */
221 	nitrox_sriov_cleanup(ndev);
222 
223 	config_nps_core_vfcfg_mode(ndev, ndev->mode);
224 
225 	return nitrox_pf_reinit(ndev);
226 }
227 
nitrox_sriov_configure(struct pci_dev * pdev,int num_vfs)228 int nitrox_sriov_configure(struct pci_dev *pdev, int num_vfs)
229 {
230 	if (!num_vfs)
231 		return nitrox_sriov_disable(pdev);
232 
233 	return nitrox_sriov_enable(pdev, num_vfs);
234 }
235