xref: /linux/drivers/crypto/cavium/nitrox/nitrox_main.c (revision a6f37cee6e4f6fa9d61962efbcb06a032efed1ba)
1 #include <linux/aer.h>
2 #include <linux/delay.h>
3 #include <linux/debugfs.h>
4 #include <linux/firmware.h>
5 #include <linux/list.h>
6 #include <linux/module.h>
7 #include <linux/mutex.h>
8 #include <linux/pci.h>
9 #include <linux/pci_ids.h>
10 
11 #include "nitrox_dev.h"
12 #include "nitrox_common.h"
13 #include "nitrox_csr.h"
14 #include "nitrox_hal.h"
15 
16 #define CNN55XX_DEV_ID	0x12
17 #define MAX_PF_QUEUES	64
18 #define UCODE_HLEN 48
19 #define SE_GROUP 0
20 
21 #define DRIVER_VERSION "1.1"
22 #define FW_DIR "cavium/"
23 /* SE microcode */
24 #define SE_FW	FW_DIR "cnn55xx_se.fw"
25 
26 static const char nitrox_driver_name[] = "CNN55XX";
27 
28 static LIST_HEAD(ndevlist);
29 static DEFINE_MUTEX(devlist_lock);
30 static unsigned int num_devices;
31 
32 /**
33  * nitrox_pci_tbl - PCI Device ID Table
34  */
35 static const struct pci_device_id nitrox_pci_tbl[] = {
36 	{PCI_VDEVICE(CAVIUM, CNN55XX_DEV_ID), 0},
37 	/* required last entry */
38 	{0, }
39 };
40 MODULE_DEVICE_TABLE(pci, nitrox_pci_tbl);
41 
42 static unsigned int qlen = DEFAULT_CMD_QLEN;
43 module_param(qlen, uint, 0644);
44 MODULE_PARM_DESC(qlen, "Command queue length - default 2048");
45 
46 #ifdef CONFIG_PCI_IOV
47 int nitrox_sriov_configure(struct pci_dev *pdev, int num_vfs);
48 #else
49 int nitrox_sriov_configure(struct pci_dev *pdev, int num_vfs)
50 {
51 	return 0;
52 }
53 #endif
54 
55 /**
56  * struct ucode - Firmware Header
57  * @id: microcode ID
58  * @version: firmware version
59  * @code_size: code section size
60  * @raz: alignment
61  * @code: code section
62  */
63 struct ucode {
64 	u8 id;
65 	char version[VERSION_LEN - 1];
66 	__be32 code_size;
67 	u8 raz[12];
68 	u64 code[0];
69 };
70 
71 /**
72  * write_to_ucd_unit - Write Firmware to NITROX UCD unit
73  */
74 static void write_to_ucd_unit(struct nitrox_device *ndev,
75 			      struct ucode *ucode)
76 {
77 	u32 code_size = be32_to_cpu(ucode->code_size) * 2;
78 	u64 offset, data;
79 	int i = 0;
80 
81 	/*
82 	 * UCD structure
83 	 *
84 	 *  -------------
85 	 *  |    BLK 7  |
86 	 *  -------------
87 	 *  |    BLK 6  |
88 	 *  -------------
89 	 *  |    ...    |
90 	 *  -------------
91 	 *  |    BLK 0  |
92 	 *  -------------
93 	 *  Total of 8 blocks, each size 32KB
94 	 */
95 
96 	/* set the block number */
97 	offset = UCD_UCODE_LOAD_BLOCK_NUM;
98 	nitrox_write_csr(ndev, offset, 0);
99 
100 	code_size = roundup(code_size, 8);
101 	while (code_size) {
102 		data = ucode->code[i];
103 		/* write 8 bytes at a time */
104 		offset = UCD_UCODE_LOAD_IDX_DATAX(i);
105 		nitrox_write_csr(ndev, offset, data);
106 		code_size -= 8;
107 		i++;
108 	}
109 
110 	/* put all SE cores in group 0 */
111 	offset = POM_GRP_EXECMASKX(SE_GROUP);
112 	nitrox_write_csr(ndev, offset, (~0ULL));
113 
114 	for (i = 0; i < ndev->hw.se_cores; i++) {
115 		/*
116 		 * write block number and firware length
117 		 * bit:<2:0> block number
118 		 * bit:3 is set SE uses 32KB microcode
119 		 * bit:3 is clear SE uses 64KB microcode
120 		 */
121 		offset = UCD_SE_EID_UCODE_BLOCK_NUMX(i);
122 		nitrox_write_csr(ndev, offset, 0x8);
123 	}
124 	usleep_range(300, 400);
125 }
126 
127 static int nitrox_load_fw(struct nitrox_device *ndev, const char *fw_name)
128 {
129 	const struct firmware *fw;
130 	struct ucode *ucode;
131 	int ret;
132 
133 	dev_info(DEV(ndev), "Loading firmware \"%s\"\n", fw_name);
134 
135 	ret = request_firmware(&fw, fw_name, DEV(ndev));
136 	if (ret < 0) {
137 		dev_err(DEV(ndev), "failed to get firmware %s\n", fw_name);
138 		return ret;
139 	}
140 
141 	ucode = (struct ucode *)fw->data;
142 	/* copy the firmware version */
143 	memcpy(ndev->hw.fw_name, ucode->version, (VERSION_LEN - 2));
144 	ndev->hw.fw_name[VERSION_LEN - 1] = '\0';
145 
146 	write_to_ucd_unit(ndev, ucode);
147 	release_firmware(fw);
148 
149 	return 0;
150 }
151 
152 /**
153  * nitrox_add_to_devlist - add NITROX device to global device list
154  * @ndev: NITROX device
155  */
156 static int nitrox_add_to_devlist(struct nitrox_device *ndev)
157 {
158 	struct nitrox_device *dev;
159 	int ret = 0;
160 
161 	INIT_LIST_HEAD(&ndev->list);
162 	refcount_set(&ndev->refcnt, 1);
163 
164 	mutex_lock(&devlist_lock);
165 	list_for_each_entry(dev, &ndevlist, list) {
166 		if (dev == ndev) {
167 			ret = -EEXIST;
168 			goto unlock;
169 		}
170 	}
171 	ndev->idx = num_devices++;
172 	list_add_tail(&ndev->list, &ndevlist);
173 unlock:
174 	mutex_unlock(&devlist_lock);
175 	return ret;
176 }
177 
178 /**
179  * nitrox_remove_from_devlist - remove NITROX device from
180  *   global device list
181  * @ndev: NITROX device
182  */
183 static void nitrox_remove_from_devlist(struct nitrox_device *ndev)
184 {
185 	mutex_lock(&devlist_lock);
186 	list_del(&ndev->list);
187 	num_devices--;
188 	mutex_unlock(&devlist_lock);
189 }
190 
191 struct nitrox_device *nitrox_get_first_device(void)
192 {
193 	struct nitrox_device *ndev = NULL;
194 
195 	mutex_lock(&devlist_lock);
196 	list_for_each_entry(ndev, &ndevlist, list) {
197 		if (nitrox_ready(ndev))
198 			break;
199 	}
200 	mutex_unlock(&devlist_lock);
201 	if (!ndev)
202 		return NULL;
203 
204 	refcount_inc(&ndev->refcnt);
205 	/* barrier to sync with other cpus */
206 	smp_mb__after_atomic();
207 	return ndev;
208 }
209 
210 void nitrox_put_device(struct nitrox_device *ndev)
211 {
212 	if (!ndev)
213 		return;
214 
215 	refcount_dec(&ndev->refcnt);
216 	/* barrier to sync with other cpus */
217 	smp_mb__after_atomic();
218 }
219 
220 static int nitrox_reset_device(struct pci_dev *pdev)
221 {
222 	int pos = 0;
223 
224 	pos = pci_save_state(pdev);
225 	if (pos) {
226 		dev_err(&pdev->dev, "Failed to save pci state\n");
227 		return -ENOMEM;
228 	}
229 
230 	pos = pci_pcie_cap(pdev);
231 	if (!pos)
232 		return -ENOTTY;
233 
234 	if (!pci_wait_for_pending_transaction(pdev))
235 		dev_err(&pdev->dev, "waiting for pending transaction\n");
236 
237 	pcie_capability_set_word(pdev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
238 	msleep(100);
239 	pci_restore_state(pdev);
240 
241 	return 0;
242 }
243 
244 static int nitrox_pf_sw_init(struct nitrox_device *ndev)
245 {
246 	int err;
247 
248 	err = nitrox_common_sw_init(ndev);
249 	if (err)
250 		return err;
251 
252 	err = nitrox_pf_init_isr(ndev);
253 	if (err)
254 		nitrox_common_sw_cleanup(ndev);
255 
256 	return err;
257 }
258 
259 static void nitrox_pf_sw_cleanup(struct nitrox_device *ndev)
260 {
261 	nitrox_pf_cleanup_isr(ndev);
262 	nitrox_common_sw_cleanup(ndev);
263 }
264 
265 /**
266  * nitrox_bist_check - Check NITORX BIST registers status
267  * @ndev: NITROX device
268  */
269 static int nitrox_bist_check(struct nitrox_device *ndev)
270 {
271 	u64 value = 0;
272 	int i;
273 
274 	for (i = 0; i < NR_CLUSTERS; i++) {
275 		value += nitrox_read_csr(ndev, EMU_BIST_STATUSX(i));
276 		value += nitrox_read_csr(ndev, EFL_CORE_BIST_REGX(i));
277 	}
278 	value += nitrox_read_csr(ndev, UCD_BIST_STATUS);
279 	value += nitrox_read_csr(ndev, NPS_CORE_BIST_REG);
280 	value += nitrox_read_csr(ndev, NPS_CORE_NPC_BIST_REG);
281 	value += nitrox_read_csr(ndev, NPS_PKT_SLC_BIST_REG);
282 	value += nitrox_read_csr(ndev, NPS_PKT_IN_BIST_REG);
283 	value += nitrox_read_csr(ndev, POM_BIST_REG);
284 	value += nitrox_read_csr(ndev, BMI_BIST_REG);
285 	value += nitrox_read_csr(ndev, EFL_TOP_BIST_STAT);
286 	value += nitrox_read_csr(ndev, BMO_BIST_REG);
287 	value += nitrox_read_csr(ndev, LBC_BIST_STATUS);
288 	value += nitrox_read_csr(ndev, PEM_BIST_STATUSX(0));
289 	if (value)
290 		return -EIO;
291 	return 0;
292 }
293 
294 static void nitrox_get_hwinfo(struct nitrox_device *ndev)
295 {
296 	union emu_fuse_map emu_fuse;
297 	u64 offset;
298 	int i;
299 
300 	for (i = 0; i < NR_CLUSTERS; i++) {
301 		u8 dead_cores;
302 
303 		offset = EMU_FUSE_MAPX(i);
304 		emu_fuse.value = nitrox_read_csr(ndev, offset);
305 		if (emu_fuse.s.valid) {
306 			dead_cores = hweight32(emu_fuse.s.ae_fuse);
307 			ndev->hw.ae_cores += AE_CORES_PER_CLUSTER - dead_cores;
308 			dead_cores = hweight16(emu_fuse.s.se_fuse);
309 			ndev->hw.se_cores += SE_CORES_PER_CLUSTER - dead_cores;
310 		}
311 	}
312 }
313 
314 static int nitrox_pf_hw_init(struct nitrox_device *ndev)
315 {
316 	int err;
317 
318 	err = nitrox_bist_check(ndev);
319 	if (err) {
320 		dev_err(&ndev->pdev->dev, "BIST check failed\n");
321 		return err;
322 	}
323 	/* get cores information */
324 	nitrox_get_hwinfo(ndev);
325 
326 	nitrox_config_nps_unit(ndev);
327 	nitrox_config_pom_unit(ndev);
328 	nitrox_config_efl_unit(ndev);
329 	/* configure IO units */
330 	nitrox_config_bmi_unit(ndev);
331 	nitrox_config_bmo_unit(ndev);
332 	/* configure Local Buffer Cache */
333 	nitrox_config_lbc_unit(ndev);
334 	nitrox_config_rand_unit(ndev);
335 
336 	/* load firmware on SE cores */
337 	err = nitrox_load_fw(ndev, SE_FW);
338 	if (err)
339 		return err;
340 
341 	nitrox_config_emu_unit(ndev);
342 
343 	return 0;
344 }
345 
346 #if IS_ENABLED(CONFIG_DEBUG_FS)
347 static int registers_show(struct seq_file *s, void *v)
348 {
349 	struct nitrox_device *ndev = s->private;
350 	u64 offset;
351 
352 	/* NPS DMA stats */
353 	offset = NPS_STATS_PKT_DMA_RD_CNT;
354 	seq_printf(s, "NPS_STATS_PKT_DMA_RD_CNT  0x%016llx\n",
355 		   nitrox_read_csr(ndev, offset));
356 	offset = NPS_STATS_PKT_DMA_WR_CNT;
357 	seq_printf(s, "NPS_STATS_PKT_DMA_WR_CNT  0x%016llx\n",
358 		   nitrox_read_csr(ndev, offset));
359 
360 	/* BMI/BMO stats */
361 	offset = BMI_NPS_PKT_CNT;
362 	seq_printf(s, "BMI_NPS_PKT_CNT  0x%016llx\n",
363 		   nitrox_read_csr(ndev, offset));
364 	offset = BMO_NPS_SLC_PKT_CNT;
365 	seq_printf(s, "BMO_NPS_PKT_CNT  0x%016llx\n",
366 		   nitrox_read_csr(ndev, offset));
367 
368 	return 0;
369 }
370 
371 static int registers_open(struct inode *inode, struct file *file)
372 {
373 	return single_open(file, registers_show, inode->i_private);
374 }
375 
376 static const struct file_operations register_fops = {
377 	.owner = THIS_MODULE,
378 	.open = registers_open,
379 	.read = seq_read,
380 	.llseek = seq_lseek,
381 	.release = single_release,
382 };
383 
384 static int firmware_show(struct seq_file *s, void *v)
385 {
386 	struct nitrox_device *ndev = s->private;
387 
388 	seq_printf(s, "Version: %s\n", ndev->hw.fw_name);
389 	return 0;
390 }
391 
392 static int firmware_open(struct inode *inode, struct file *file)
393 {
394 	return single_open(file, firmware_show, inode->i_private);
395 }
396 
397 static const struct file_operations firmware_fops = {
398 	.owner = THIS_MODULE,
399 	.open = firmware_open,
400 	.read = seq_read,
401 	.llseek = seq_lseek,
402 	.release = single_release,
403 };
404 
405 static int nitrox_show(struct seq_file *s, void *v)
406 {
407 	struct nitrox_device *ndev = s->private;
408 
409 	seq_printf(s, "NITROX-5 [idx: %d]\n", ndev->idx);
410 	seq_printf(s, "  Revision ID: 0x%0x\n", ndev->hw.revision_id);
411 	seq_printf(s, "  Cores [AE: %u  SE: %u]\n",
412 		   ndev->hw.ae_cores, ndev->hw.se_cores);
413 	seq_printf(s, "  Number of Queues: %u\n", ndev->nr_queues);
414 	seq_printf(s, "  Queue length: %u\n", ndev->qlen);
415 	seq_printf(s, "  Node: %u\n", ndev->node);
416 
417 	return 0;
418 }
419 
420 static int nitrox_open(struct inode *inode, struct file *file)
421 {
422 	return single_open(file, nitrox_show, inode->i_private);
423 }
424 
425 static const struct file_operations nitrox_fops = {
426 	.owner = THIS_MODULE,
427 	.open = nitrox_open,
428 	.read = seq_read,
429 	.llseek = seq_lseek,
430 	.release = single_release,
431 };
432 
433 static void nitrox_debugfs_exit(struct nitrox_device *ndev)
434 {
435 	debugfs_remove_recursive(ndev->debugfs_dir);
436 	ndev->debugfs_dir = NULL;
437 }
438 
439 static int nitrox_debugfs_init(struct nitrox_device *ndev)
440 {
441 	struct dentry *dir, *f;
442 
443 	dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
444 	if (!dir)
445 		return -ENOMEM;
446 
447 	ndev->debugfs_dir = dir;
448 	f = debugfs_create_file("counters", 0400, dir, ndev, &register_fops);
449 	if (!f)
450 		goto err;
451 	f = debugfs_create_file("firmware", 0400, dir, ndev, &firmware_fops);
452 	if (!f)
453 		goto err;
454 	f = debugfs_create_file("nitrox", 0400, dir, ndev, &nitrox_fops);
455 	if (!f)
456 		goto err;
457 
458 	return 0;
459 
460 err:
461 	nitrox_debugfs_exit(ndev);
462 	return -ENODEV;
463 }
464 #else
465 static int nitrox_debugfs_init(struct nitrox_device *ndev)
466 {
467 	return 0;
468 }
469 
470 static void nitrox_debugfs_exit(struct nitrox_device *ndev)
471 {
472 }
473 #endif
474 
475 /**
476  * nitrox_probe - NITROX Initialization function.
477  * @pdev: PCI device information struct
478  * @id: entry in nitrox_pci_tbl
479  *
480  * Return: 0, if the driver is bound to the device, or
481  *         a negative error if there is failure.
482  */
483 static int nitrox_probe(struct pci_dev *pdev,
484 			const struct pci_device_id *id)
485 {
486 	struct nitrox_device *ndev;
487 	int err;
488 
489 	dev_info_once(&pdev->dev, "%s driver version %s\n",
490 		      nitrox_driver_name, DRIVER_VERSION);
491 
492 	err = pci_enable_device_mem(pdev);
493 	if (err)
494 		return err;
495 
496 	/* do FLR */
497 	err = nitrox_reset_device(pdev);
498 	if (err) {
499 		dev_err(&pdev->dev, "FLR failed\n");
500 		pci_disable_device(pdev);
501 		return err;
502 	}
503 
504 	if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
505 		dev_dbg(&pdev->dev, "DMA to 64-BIT address\n");
506 	} else {
507 		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
508 		if (err) {
509 			dev_err(&pdev->dev, "DMA configuration failed\n");
510 			pci_disable_device(pdev);
511 			return err;
512 		}
513 	}
514 
515 	err = pci_request_mem_regions(pdev, nitrox_driver_name);
516 	if (err) {
517 		pci_disable_device(pdev);
518 		return err;
519 	}
520 	pci_set_master(pdev);
521 
522 	ndev = kzalloc(sizeof(*ndev), GFP_KERNEL);
523 	if (!ndev) {
524 		err = -ENOMEM;
525 		goto ndev_fail;
526 	}
527 
528 	pci_set_drvdata(pdev, ndev);
529 	ndev->pdev = pdev;
530 
531 	/* add to device list */
532 	nitrox_add_to_devlist(ndev);
533 
534 	ndev->hw.vendor_id = pdev->vendor;
535 	ndev->hw.device_id = pdev->device;
536 	ndev->hw.revision_id = pdev->revision;
537 	/* command timeout in jiffies */
538 	ndev->timeout = msecs_to_jiffies(CMD_TIMEOUT);
539 	ndev->node = dev_to_node(&pdev->dev);
540 	if (ndev->node == NUMA_NO_NODE)
541 		ndev->node = 0;
542 
543 	ndev->bar_addr = ioremap(pci_resource_start(pdev, 0),
544 				 pci_resource_len(pdev, 0));
545 	if (!ndev->bar_addr) {
546 		err = -EIO;
547 		goto ioremap_err;
548 	}
549 	/* allocate command queus based on cpus, max queues are 64 */
550 	ndev->nr_queues = min_t(u32, MAX_PF_QUEUES, num_online_cpus());
551 	ndev->qlen = qlen;
552 
553 	err = nitrox_pf_sw_init(ndev);
554 	if (err)
555 		goto ioremap_err;
556 
557 	err = nitrox_pf_hw_init(ndev);
558 	if (err)
559 		goto pf_hw_fail;
560 
561 	err = nitrox_debugfs_init(ndev);
562 	if (err)
563 		goto pf_hw_fail;
564 
565 	atomic_set(&ndev->state, __NDEV_READY);
566 	/* barrier to sync with other cpus */
567 	smp_mb__after_atomic();
568 
569 	err = nitrox_crypto_register();
570 	if (err)
571 		goto crypto_fail;
572 
573 	return 0;
574 
575 crypto_fail:
576 	nitrox_debugfs_exit(ndev);
577 	atomic_set(&ndev->state, __NDEV_NOT_READY);
578 	/* barrier to sync with other cpus */
579 	smp_mb__after_atomic();
580 pf_hw_fail:
581 	nitrox_pf_sw_cleanup(ndev);
582 ioremap_err:
583 	nitrox_remove_from_devlist(ndev);
584 	kfree(ndev);
585 	pci_set_drvdata(pdev, NULL);
586 ndev_fail:
587 	pci_release_mem_regions(pdev);
588 	pci_disable_device(pdev);
589 	return err;
590 }
591 
592 /**
593  * nitrox_remove - Unbind the driver from the device.
594  * @pdev: PCI device information struct
595  */
596 static void nitrox_remove(struct pci_dev *pdev)
597 {
598 	struct nitrox_device *ndev = pci_get_drvdata(pdev);
599 
600 	if (!ndev)
601 		return;
602 
603 	if (!refcount_dec_and_test(&ndev->refcnt)) {
604 		dev_err(DEV(ndev), "Device refcnt not zero (%d)\n",
605 			refcount_read(&ndev->refcnt));
606 		return;
607 	}
608 
609 	dev_info(DEV(ndev), "Removing Device %x:%x\n",
610 		 ndev->hw.vendor_id, ndev->hw.device_id);
611 
612 	atomic_set(&ndev->state, __NDEV_NOT_READY);
613 	/* barrier to sync with other cpus */
614 	smp_mb__after_atomic();
615 
616 	nitrox_remove_from_devlist(ndev);
617 
618 #ifdef CONFIG_PCI_IOV
619 	/* disable SR-IOV */
620 	nitrox_sriov_configure(pdev, 0);
621 #endif
622 	nitrox_crypto_unregister();
623 	nitrox_debugfs_exit(ndev);
624 	nitrox_pf_sw_cleanup(ndev);
625 
626 	iounmap(ndev->bar_addr);
627 	kfree(ndev);
628 
629 	pci_set_drvdata(pdev, NULL);
630 	pci_release_mem_regions(pdev);
631 	pci_disable_device(pdev);
632 }
633 
634 static void nitrox_shutdown(struct pci_dev *pdev)
635 {
636 	pci_set_drvdata(pdev, NULL);
637 	pci_release_mem_regions(pdev);
638 	pci_disable_device(pdev);
639 }
640 
641 static struct pci_driver nitrox_driver = {
642 	.name = nitrox_driver_name,
643 	.id_table = nitrox_pci_tbl,
644 	.probe = nitrox_probe,
645 	.remove	= nitrox_remove,
646 	.shutdown = nitrox_shutdown,
647 #ifdef CONFIG_PCI_IOV
648 	.sriov_configure = nitrox_sriov_configure,
649 #endif
650 };
651 
652 module_pci_driver(nitrox_driver);
653 
654 MODULE_AUTHOR("Srikanth Jampala <Jampala.Srikanth@cavium.com>");
655 MODULE_DESCRIPTION("Cavium CNN55XX PF Driver" DRIVER_VERSION " ");
656 MODULE_LICENSE("GPL");
657 MODULE_VERSION(DRIVER_VERSION);
658 MODULE_FIRMWARE(SE_FW);
659