xref: /linux/drivers/crypto/cavium/nitrox/nitrox_main.c (revision d003d772e64df08af04ee63609d47169ee82ae0e)
1 #include <linux/aer.h>
2 #include <linux/delay.h>
3 #include <linux/firmware.h>
4 #include <linux/list.h>
5 #include <linux/module.h>
6 #include <linux/mutex.h>
7 #include <linux/pci.h>
8 #include <linux/pci_ids.h>
9 
10 #include "nitrox_dev.h"
11 #include "nitrox_common.h"
12 #include "nitrox_csr.h"
13 #include "nitrox_hal.h"
14 #include "nitrox_isr.h"
15 #include "nitrox_debugfs.h"
16 
17 #define CNN55XX_DEV_ID	0x12
18 #define UCODE_HLEN 48
19 #define SE_GROUP 0
20 
21 #define DRIVER_VERSION "1.1"
22 #define FW_DIR "cavium/"
23 /* SE microcode */
24 #define SE_FW	FW_DIR "cnn55xx_se.fw"
25 
26 static const char nitrox_driver_name[] = "CNN55XX";
27 
28 static LIST_HEAD(ndevlist);
29 static DEFINE_MUTEX(devlist_lock);
30 static unsigned int num_devices;
31 
32 /**
33  * nitrox_pci_tbl - PCI Device ID Table
34  */
35 static const struct pci_device_id nitrox_pci_tbl[] = {
36 	{PCI_VDEVICE(CAVIUM, CNN55XX_DEV_ID), 0},
37 	/* required last entry */
38 	{0, }
39 };
40 MODULE_DEVICE_TABLE(pci, nitrox_pci_tbl);
41 
42 static unsigned int qlen = DEFAULT_CMD_QLEN;
43 module_param(qlen, uint, 0644);
44 MODULE_PARM_DESC(qlen, "Command queue length - default 2048");
45 
46 #ifdef CONFIG_PCI_IOV
47 int nitrox_sriov_configure(struct pci_dev *pdev, int num_vfs);
48 #else
49 int nitrox_sriov_configure(struct pci_dev *pdev, int num_vfs)
50 {
51 	return 0;
52 }
53 #endif
54 
55 /**
56  * struct ucode - Firmware Header
57  * @id: microcode ID
58  * @version: firmware version
59  * @code_size: code section size
60  * @raz: alignment
61  * @code: code section
62  */
63 struct ucode {
64 	u8 id;
65 	char version[VERSION_LEN - 1];
66 	__be32 code_size;
67 	u8 raz[12];
68 	u64 code[0];
69 };
70 
71 /**
72  * write_to_ucd_unit - Write Firmware to NITROX UCD unit
73  */
74 static void write_to_ucd_unit(struct nitrox_device *ndev,
75 			      struct ucode *ucode)
76 {
77 	u32 code_size = be32_to_cpu(ucode->code_size) * 2;
78 	u64 offset, data;
79 	int i = 0;
80 
81 	/*
82 	 * UCD structure
83 	 *
84 	 *  -------------
85 	 *  |    BLK 7  |
86 	 *  -------------
87 	 *  |    BLK 6  |
88 	 *  -------------
89 	 *  |    ...    |
90 	 *  -------------
91 	 *  |    BLK 0  |
92 	 *  -------------
93 	 *  Total of 8 blocks, each size 32KB
94 	 */
95 
96 	/* set the block number */
97 	offset = UCD_UCODE_LOAD_BLOCK_NUM;
98 	nitrox_write_csr(ndev, offset, 0);
99 
100 	code_size = roundup(code_size, 8);
101 	while (code_size) {
102 		data = ucode->code[i];
103 		/* write 8 bytes at a time */
104 		offset = UCD_UCODE_LOAD_IDX_DATAX(i);
105 		nitrox_write_csr(ndev, offset, data);
106 		code_size -= 8;
107 		i++;
108 	}
109 
110 	/* put all SE cores in group 0 */
111 	offset = POM_GRP_EXECMASKX(SE_GROUP);
112 	nitrox_write_csr(ndev, offset, (~0ULL));
113 
114 	for (i = 0; i < ndev->hw.se_cores; i++) {
115 		/*
116 		 * write block number and firware length
117 		 * bit:<2:0> block number
118 		 * bit:3 is set SE uses 32KB microcode
119 		 * bit:3 is clear SE uses 64KB microcode
120 		 */
121 		offset = UCD_SE_EID_UCODE_BLOCK_NUMX(i);
122 		nitrox_write_csr(ndev, offset, 0x8);
123 	}
124 	usleep_range(300, 400);
125 }
126 
127 static int nitrox_load_fw(struct nitrox_device *ndev, const char *fw_name)
128 {
129 	const struct firmware *fw;
130 	struct ucode *ucode;
131 	int ret;
132 
133 	dev_info(DEV(ndev), "Loading firmware \"%s\"\n", fw_name);
134 
135 	ret = request_firmware(&fw, fw_name, DEV(ndev));
136 	if (ret < 0) {
137 		dev_err(DEV(ndev), "failed to get firmware %s\n", fw_name);
138 		return ret;
139 	}
140 
141 	ucode = (struct ucode *)fw->data;
142 	/* copy the firmware version */
143 	memcpy(ndev->hw.fw_name, ucode->version, (VERSION_LEN - 2));
144 	ndev->hw.fw_name[VERSION_LEN - 1] = '\0';
145 
146 	write_to_ucd_unit(ndev, ucode);
147 	release_firmware(fw);
148 
149 	return 0;
150 }
151 
152 /**
153  * nitrox_add_to_devlist - add NITROX device to global device list
154  * @ndev: NITROX device
155  */
156 static int nitrox_add_to_devlist(struct nitrox_device *ndev)
157 {
158 	struct nitrox_device *dev;
159 	int ret = 0;
160 
161 	INIT_LIST_HEAD(&ndev->list);
162 	refcount_set(&ndev->refcnt, 1);
163 
164 	mutex_lock(&devlist_lock);
165 	list_for_each_entry(dev, &ndevlist, list) {
166 		if (dev == ndev) {
167 			ret = -EEXIST;
168 			goto unlock;
169 		}
170 	}
171 	ndev->idx = num_devices++;
172 	list_add_tail(&ndev->list, &ndevlist);
173 unlock:
174 	mutex_unlock(&devlist_lock);
175 	return ret;
176 }
177 
178 /**
179  * nitrox_remove_from_devlist - remove NITROX device from
180  *   global device list
181  * @ndev: NITROX device
182  */
183 static void nitrox_remove_from_devlist(struct nitrox_device *ndev)
184 {
185 	mutex_lock(&devlist_lock);
186 	list_del(&ndev->list);
187 	num_devices--;
188 	mutex_unlock(&devlist_lock);
189 }
190 
191 struct nitrox_device *nitrox_get_first_device(void)
192 {
193 	struct nitrox_device *ndev = NULL;
194 
195 	mutex_lock(&devlist_lock);
196 	list_for_each_entry(ndev, &ndevlist, list) {
197 		if (nitrox_ready(ndev))
198 			break;
199 	}
200 	mutex_unlock(&devlist_lock);
201 	if (!ndev)
202 		return NULL;
203 
204 	refcount_inc(&ndev->refcnt);
205 	/* barrier to sync with other cpus */
206 	smp_mb__after_atomic();
207 	return ndev;
208 }
209 
210 void nitrox_put_device(struct nitrox_device *ndev)
211 {
212 	if (!ndev)
213 		return;
214 
215 	refcount_dec(&ndev->refcnt);
216 	/* barrier to sync with other cpus */
217 	smp_mb__after_atomic();
218 }
219 
220 static int nitrox_device_flr(struct pci_dev *pdev)
221 {
222 	int pos = 0;
223 
224 	pos = pci_save_state(pdev);
225 	if (pos) {
226 		dev_err(&pdev->dev, "Failed to save pci state\n");
227 		return -ENOMEM;
228 	}
229 
230 	/* check flr support */
231 	if (pcie_has_flr(pdev))
232 		pcie_flr(pdev);
233 
234 	pci_restore_state(pdev);
235 
236 	return 0;
237 }
238 
239 static int nitrox_pf_sw_init(struct nitrox_device *ndev)
240 {
241 	int err;
242 
243 	err = nitrox_common_sw_init(ndev);
244 	if (err)
245 		return err;
246 
247 	err = nitrox_register_interrupts(ndev);
248 	if (err)
249 		nitrox_common_sw_cleanup(ndev);
250 
251 	return err;
252 }
253 
254 static void nitrox_pf_sw_cleanup(struct nitrox_device *ndev)
255 {
256 	nitrox_unregister_interrupts(ndev);
257 	nitrox_common_sw_cleanup(ndev);
258 }
259 
260 /**
261  * nitrox_bist_check - Check NITORX BIST registers status
262  * @ndev: NITROX device
263  */
264 static int nitrox_bist_check(struct nitrox_device *ndev)
265 {
266 	u64 value = 0;
267 	int i;
268 
269 	for (i = 0; i < NR_CLUSTERS; i++) {
270 		value += nitrox_read_csr(ndev, EMU_BIST_STATUSX(i));
271 		value += nitrox_read_csr(ndev, EFL_CORE_BIST_REGX(i));
272 	}
273 	value += nitrox_read_csr(ndev, UCD_BIST_STATUS);
274 	value += nitrox_read_csr(ndev, NPS_CORE_BIST_REG);
275 	value += nitrox_read_csr(ndev, NPS_CORE_NPC_BIST_REG);
276 	value += nitrox_read_csr(ndev, NPS_PKT_SLC_BIST_REG);
277 	value += nitrox_read_csr(ndev, NPS_PKT_IN_BIST_REG);
278 	value += nitrox_read_csr(ndev, POM_BIST_REG);
279 	value += nitrox_read_csr(ndev, BMI_BIST_REG);
280 	value += nitrox_read_csr(ndev, EFL_TOP_BIST_STAT);
281 	value += nitrox_read_csr(ndev, BMO_BIST_REG);
282 	value += nitrox_read_csr(ndev, LBC_BIST_STATUS);
283 	value += nitrox_read_csr(ndev, PEM_BIST_STATUSX(0));
284 	if (value)
285 		return -EIO;
286 	return 0;
287 }
288 
289 static int nitrox_pf_hw_init(struct nitrox_device *ndev)
290 {
291 	int err;
292 
293 	err = nitrox_bist_check(ndev);
294 	if (err) {
295 		dev_err(&ndev->pdev->dev, "BIST check failed\n");
296 		return err;
297 	}
298 	/* get cores information */
299 	nitrox_get_hwinfo(ndev);
300 
301 	nitrox_config_nps_unit(ndev);
302 	nitrox_config_pom_unit(ndev);
303 	nitrox_config_efl_unit(ndev);
304 	/* configure IO units */
305 	nitrox_config_bmi_unit(ndev);
306 	nitrox_config_bmo_unit(ndev);
307 	/* configure Local Buffer Cache */
308 	nitrox_config_lbc_unit(ndev);
309 	nitrox_config_rand_unit(ndev);
310 
311 	/* load firmware on SE cores */
312 	err = nitrox_load_fw(ndev, SE_FW);
313 	if (err)
314 		return err;
315 
316 	nitrox_config_emu_unit(ndev);
317 
318 	return 0;
319 }
320 
321 /**
322  * nitrox_probe - NITROX Initialization function.
323  * @pdev: PCI device information struct
324  * @id: entry in nitrox_pci_tbl
325  *
326  * Return: 0, if the driver is bound to the device, or
327  *         a negative error if there is failure.
328  */
329 static int nitrox_probe(struct pci_dev *pdev,
330 			const struct pci_device_id *id)
331 {
332 	struct nitrox_device *ndev;
333 	int err;
334 
335 	dev_info_once(&pdev->dev, "%s driver version %s\n",
336 		      nitrox_driver_name, DRIVER_VERSION);
337 
338 	err = pci_enable_device_mem(pdev);
339 	if (err)
340 		return err;
341 
342 	/* do FLR */
343 	err = nitrox_device_flr(pdev);
344 	if (err) {
345 		dev_err(&pdev->dev, "FLR failed\n");
346 		pci_disable_device(pdev);
347 		return err;
348 	}
349 
350 	if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
351 		dev_dbg(&pdev->dev, "DMA to 64-BIT address\n");
352 	} else {
353 		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
354 		if (err) {
355 			dev_err(&pdev->dev, "DMA configuration failed\n");
356 			pci_disable_device(pdev);
357 			return err;
358 		}
359 	}
360 
361 	err = pci_request_mem_regions(pdev, nitrox_driver_name);
362 	if (err) {
363 		pci_disable_device(pdev);
364 		return err;
365 	}
366 	pci_set_master(pdev);
367 
368 	ndev = kzalloc(sizeof(*ndev), GFP_KERNEL);
369 	if (!ndev) {
370 		err = -ENOMEM;
371 		goto ndev_fail;
372 	}
373 
374 	pci_set_drvdata(pdev, ndev);
375 	ndev->pdev = pdev;
376 
377 	/* add to device list */
378 	nitrox_add_to_devlist(ndev);
379 
380 	ndev->hw.vendor_id = pdev->vendor;
381 	ndev->hw.device_id = pdev->device;
382 	ndev->hw.revision_id = pdev->revision;
383 	/* command timeout in jiffies */
384 	ndev->timeout = msecs_to_jiffies(CMD_TIMEOUT);
385 	ndev->node = dev_to_node(&pdev->dev);
386 	if (ndev->node == NUMA_NO_NODE)
387 		ndev->node = 0;
388 
389 	ndev->bar_addr = ioremap(pci_resource_start(pdev, 0),
390 				 pci_resource_len(pdev, 0));
391 	if (!ndev->bar_addr) {
392 		err = -EIO;
393 		goto ioremap_err;
394 	}
395 	/* allocate command queus based on cpus, max queues are 64 */
396 	ndev->nr_queues = min_t(u32, MAX_PF_QUEUES, num_online_cpus());
397 	ndev->qlen = qlen;
398 
399 	err = nitrox_pf_sw_init(ndev);
400 	if (err)
401 		goto ioremap_err;
402 
403 	err = nitrox_pf_hw_init(ndev);
404 	if (err)
405 		goto pf_hw_fail;
406 
407 	nitrox_debugfs_init(ndev);
408 
409 	/* clear the statistics */
410 	atomic64_set(&ndev->stats.posted, 0);
411 	atomic64_set(&ndev->stats.completed, 0);
412 	atomic64_set(&ndev->stats.dropped, 0);
413 
414 	atomic_set(&ndev->state, __NDEV_READY);
415 	/* barrier to sync with other cpus */
416 	smp_mb__after_atomic();
417 
418 	err = nitrox_crypto_register();
419 	if (err)
420 		goto crypto_fail;
421 
422 	return 0;
423 
424 crypto_fail:
425 	nitrox_debugfs_exit(ndev);
426 	atomic_set(&ndev->state, __NDEV_NOT_READY);
427 	/* barrier to sync with other cpus */
428 	smp_mb__after_atomic();
429 pf_hw_fail:
430 	nitrox_pf_sw_cleanup(ndev);
431 ioremap_err:
432 	nitrox_remove_from_devlist(ndev);
433 	kfree(ndev);
434 	pci_set_drvdata(pdev, NULL);
435 ndev_fail:
436 	pci_release_mem_regions(pdev);
437 	pci_disable_device(pdev);
438 	return err;
439 }
440 
441 /**
442  * nitrox_remove - Unbind the driver from the device.
443  * @pdev: PCI device information struct
444  */
445 static void nitrox_remove(struct pci_dev *pdev)
446 {
447 	struct nitrox_device *ndev = pci_get_drvdata(pdev);
448 
449 	if (!ndev)
450 		return;
451 
452 	if (!refcount_dec_and_test(&ndev->refcnt)) {
453 		dev_err(DEV(ndev), "Device refcnt not zero (%d)\n",
454 			refcount_read(&ndev->refcnt));
455 		return;
456 	}
457 
458 	dev_info(DEV(ndev), "Removing Device %x:%x\n",
459 		 ndev->hw.vendor_id, ndev->hw.device_id);
460 
461 	atomic_set(&ndev->state, __NDEV_NOT_READY);
462 	/* barrier to sync with other cpus */
463 	smp_mb__after_atomic();
464 
465 	nitrox_remove_from_devlist(ndev);
466 
467 #ifdef CONFIG_PCI_IOV
468 	/* disable SR-IOV */
469 	nitrox_sriov_configure(pdev, 0);
470 #endif
471 	nitrox_crypto_unregister();
472 	nitrox_debugfs_exit(ndev);
473 	nitrox_pf_sw_cleanup(ndev);
474 
475 	iounmap(ndev->bar_addr);
476 	kfree(ndev);
477 
478 	pci_set_drvdata(pdev, NULL);
479 	pci_release_mem_regions(pdev);
480 	pci_disable_device(pdev);
481 }
482 
483 static void nitrox_shutdown(struct pci_dev *pdev)
484 {
485 	pci_set_drvdata(pdev, NULL);
486 	pci_release_mem_regions(pdev);
487 	pci_disable_device(pdev);
488 }
489 
490 static struct pci_driver nitrox_driver = {
491 	.name = nitrox_driver_name,
492 	.id_table = nitrox_pci_tbl,
493 	.probe = nitrox_probe,
494 	.remove	= nitrox_remove,
495 	.shutdown = nitrox_shutdown,
496 #ifdef CONFIG_PCI_IOV
497 	.sriov_configure = nitrox_sriov_configure,
498 #endif
499 };
500 
501 module_pci_driver(nitrox_driver);
502 
503 MODULE_AUTHOR("Srikanth Jampala <Jampala.Srikanth@cavium.com>");
504 MODULE_DESCRIPTION("Cavium CNN55XX PF Driver" DRIVER_VERSION " ");
505 MODULE_LICENSE("GPL");
506 MODULE_VERSION(DRIVER_VERSION);
507 MODULE_FIRMWARE(SE_FW);
508