xref: /linux/drivers/crypto/cavium/cpt/cptvf_main.c (revision 03ab8e6297acd1bc0eedaa050e2a1635c576fd11)
125763b3cSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2c694b233SGeorge Cherian /*
3c694b233SGeorge Cherian  * Copyright (C) 2016 Cavium, Inc.
4c694b233SGeorge Cherian  */
5c694b233SGeorge Cherian 
6c694b233SGeorge Cherian #include <linux/interrupt.h>
7c694b233SGeorge Cherian #include <linux/module.h>
8c694b233SGeorge Cherian 
9c694b233SGeorge Cherian #include "cptvf.h"
10c694b233SGeorge Cherian 
11c694b233SGeorge Cherian #define DRV_NAME	"thunder-cptvf"
12c694b233SGeorge Cherian #define DRV_VERSION	"1.0"
13c694b233SGeorge Cherian 
14c694b233SGeorge Cherian struct cptvf_wqe {
15c694b233SGeorge Cherian 	struct tasklet_struct twork;
16c694b233SGeorge Cherian 	void *cptvf;
17c694b233SGeorge Cherian 	u32 qno;
18c694b233SGeorge Cherian };
19c694b233SGeorge Cherian 
20c694b233SGeorge Cherian struct cptvf_wqe_info {
21c694b233SGeorge Cherian 	struct cptvf_wqe vq_wqe[CPT_NUM_QS_PER_VF];
22c694b233SGeorge Cherian };
23c694b233SGeorge Cherian 
vq_work_handler(unsigned long data)24c694b233SGeorge Cherian static void vq_work_handler(unsigned long data)
25c694b233SGeorge Cherian {
26c694b233SGeorge Cherian 	struct cptvf_wqe_info *cwqe_info = (struct cptvf_wqe_info *)data;
27c694b233SGeorge Cherian 	struct cptvf_wqe *cwqe = &cwqe_info->vq_wqe[0];
28c694b233SGeorge Cherian 
29c694b233SGeorge Cherian 	vq_post_process(cwqe->cptvf, cwqe->qno);
30c694b233SGeorge Cherian }
31c694b233SGeorge Cherian 
init_worker_threads(struct cpt_vf * cptvf)32c694b233SGeorge Cherian static int init_worker_threads(struct cpt_vf *cptvf)
33c694b233SGeorge Cherian {
34c694b233SGeorge Cherian 	struct pci_dev *pdev = cptvf->pdev;
35c694b233SGeorge Cherian 	struct cptvf_wqe_info *cwqe_info;
36c694b233SGeorge Cherian 	int i;
37c694b233SGeorge Cherian 
38c694b233SGeorge Cherian 	cwqe_info = kzalloc(sizeof(*cwqe_info), GFP_KERNEL);
39c694b233SGeorge Cherian 	if (!cwqe_info)
40c694b233SGeorge Cherian 		return -ENOMEM;
41c694b233SGeorge Cherian 
42c694b233SGeorge Cherian 	if (cptvf->nr_queues) {
43c694b233SGeorge Cherian 		dev_info(&pdev->dev, "Creating VQ worker threads (%d)\n",
44c694b233SGeorge Cherian 			 cptvf->nr_queues);
45c694b233SGeorge Cherian 	}
46c694b233SGeorge Cherian 
47c694b233SGeorge Cherian 	for (i = 0; i < cptvf->nr_queues; i++) {
48c694b233SGeorge Cherian 		tasklet_init(&cwqe_info->vq_wqe[i].twork, vq_work_handler,
49c694b233SGeorge Cherian 			     (u64)cwqe_info);
50c694b233SGeorge Cherian 		cwqe_info->vq_wqe[i].qno = i;
51c694b233SGeorge Cherian 		cwqe_info->vq_wqe[i].cptvf = cptvf;
52c694b233SGeorge Cherian 	}
53c694b233SGeorge Cherian 
54c694b233SGeorge Cherian 	cptvf->wqe_info = cwqe_info;
55c694b233SGeorge Cherian 
56c694b233SGeorge Cherian 	return 0;
57c694b233SGeorge Cherian }
58c694b233SGeorge Cherian 
cleanup_worker_threads(struct cpt_vf * cptvf)59c694b233SGeorge Cherian static void cleanup_worker_threads(struct cpt_vf *cptvf)
60c694b233SGeorge Cherian {
61c694b233SGeorge Cherian 	struct cptvf_wqe_info *cwqe_info;
62c694b233SGeorge Cherian 	struct pci_dev *pdev = cptvf->pdev;
63c694b233SGeorge Cherian 	int i;
64c694b233SGeorge Cherian 
65c694b233SGeorge Cherian 	cwqe_info = (struct cptvf_wqe_info *)cptvf->wqe_info;
66c694b233SGeorge Cherian 	if (!cwqe_info)
67c694b233SGeorge Cherian 		return;
68c694b233SGeorge Cherian 
69c694b233SGeorge Cherian 	if (cptvf->nr_queues) {
70c694b233SGeorge Cherian 		dev_info(&pdev->dev, "Cleaning VQ worker threads (%u)\n",
71c694b233SGeorge Cherian 			 cptvf->nr_queues);
72c694b233SGeorge Cherian 	}
73c694b233SGeorge Cherian 
74c694b233SGeorge Cherian 	for (i = 0; i < cptvf->nr_queues; i++)
75c694b233SGeorge Cherian 		tasklet_kill(&cwqe_info->vq_wqe[i].twork);
76c694b233SGeorge Cherian 
77453431a5SWaiman Long 	kfree_sensitive(cwqe_info);
78c694b233SGeorge Cherian 	cptvf->wqe_info = NULL;
79c694b233SGeorge Cherian }
80c694b233SGeorge Cherian 
free_pending_queues(struct pending_qinfo * pqinfo)81c694b233SGeorge Cherian static void free_pending_queues(struct pending_qinfo *pqinfo)
82c694b233SGeorge Cherian {
83c694b233SGeorge Cherian 	int i;
84c694b233SGeorge Cherian 	struct pending_queue *queue;
85c694b233SGeorge Cherian 
86c694b233SGeorge Cherian 	for_each_pending_queue(pqinfo, queue, i) {
87c694b233SGeorge Cherian 		if (!queue->head)
88c694b233SGeorge Cherian 			continue;
89c694b233SGeorge Cherian 
90c694b233SGeorge Cherian 		/* free single queue */
91453431a5SWaiman Long 		kfree_sensitive((queue->head));
92c694b233SGeorge Cherian 
93c694b233SGeorge Cherian 		queue->front = 0;
94c694b233SGeorge Cherian 		queue->rear = 0;
95c694b233SGeorge Cherian 
96c694b233SGeorge Cherian 		return;
97c694b233SGeorge Cherian 	}
98c694b233SGeorge Cherian 
99c694b233SGeorge Cherian 	pqinfo->qlen = 0;
100c694b233SGeorge Cherian 	pqinfo->nr_queues = 0;
101c694b233SGeorge Cherian }
102c694b233SGeorge Cherian 
alloc_pending_queues(struct pending_qinfo * pqinfo,u32 qlen,u32 nr_queues)103c694b233SGeorge Cherian static int alloc_pending_queues(struct pending_qinfo *pqinfo, u32 qlen,
104c694b233SGeorge Cherian 				u32 nr_queues)
105c694b233SGeorge Cherian {
106c694b233SGeorge Cherian 	u32 i;
107c694b233SGeorge Cherian 	int ret;
108c694b233SGeorge Cherian 	struct pending_queue *queue = NULL;
109c694b233SGeorge Cherian 
110c694b233SGeorge Cherian 	pqinfo->nr_queues = nr_queues;
111c694b233SGeorge Cherian 	pqinfo->qlen = qlen;
112c694b233SGeorge Cherian 
113c694b233SGeorge Cherian 	for_each_pending_queue(pqinfo, queue, i) {
114*61a13714SGustavo A. R. Silva 		queue->head = kcalloc(qlen, sizeof(*queue->head), GFP_KERNEL);
115c694b233SGeorge Cherian 		if (!queue->head) {
116c694b233SGeorge Cherian 			ret = -ENOMEM;
117c694b233SGeorge Cherian 			goto pending_qfail;
118c694b233SGeorge Cherian 		}
119c694b233SGeorge Cherian 
120c694b233SGeorge Cherian 		queue->front = 0;
121c694b233SGeorge Cherian 		queue->rear = 0;
122c694b233SGeorge Cherian 		atomic64_set((&queue->pending_count), (0));
123c694b233SGeorge Cherian 
124c694b233SGeorge Cherian 		/* init queue spin lock */
125c694b233SGeorge Cherian 		spin_lock_init(&queue->lock);
126c694b233SGeorge Cherian 	}
127c694b233SGeorge Cherian 
128c694b233SGeorge Cherian 	return 0;
129c694b233SGeorge Cherian 
130c694b233SGeorge Cherian pending_qfail:
131c694b233SGeorge Cherian 	free_pending_queues(pqinfo);
132c694b233SGeorge Cherian 
133c694b233SGeorge Cherian 	return ret;
134c694b233SGeorge Cherian }
135c694b233SGeorge Cherian 
init_pending_queues(struct cpt_vf * cptvf,u32 qlen,u32 nr_queues)136c694b233SGeorge Cherian static int init_pending_queues(struct cpt_vf *cptvf, u32 qlen, u32 nr_queues)
137c694b233SGeorge Cherian {
138c694b233SGeorge Cherian 	struct pci_dev *pdev = cptvf->pdev;
139c694b233SGeorge Cherian 	int ret;
140c694b233SGeorge Cherian 
141c694b233SGeorge Cherian 	if (!nr_queues)
142c694b233SGeorge Cherian 		return 0;
143c694b233SGeorge Cherian 
144c694b233SGeorge Cherian 	ret = alloc_pending_queues(&cptvf->pqinfo, qlen, nr_queues);
145c694b233SGeorge Cherian 	if (ret) {
146c694b233SGeorge Cherian 		dev_err(&pdev->dev, "failed to setup pending queues (%u)\n",
147c694b233SGeorge Cherian 			nr_queues);
148c694b233SGeorge Cherian 		return ret;
149c694b233SGeorge Cherian 	}
150c694b233SGeorge Cherian 
151c694b233SGeorge Cherian 	return 0;
152c694b233SGeorge Cherian }
153c694b233SGeorge Cherian 
cleanup_pending_queues(struct cpt_vf * cptvf)154c694b233SGeorge Cherian static void cleanup_pending_queues(struct cpt_vf *cptvf)
155c694b233SGeorge Cherian {
156c694b233SGeorge Cherian 	struct pci_dev *pdev = cptvf->pdev;
157c694b233SGeorge Cherian 
158c694b233SGeorge Cherian 	if (!cptvf->nr_queues)
159c694b233SGeorge Cherian 		return;
160c694b233SGeorge Cherian 
161c694b233SGeorge Cherian 	dev_info(&pdev->dev, "Cleaning VQ pending queue (%u)\n",
162c694b233SGeorge Cherian 		 cptvf->nr_queues);
163c694b233SGeorge Cherian 	free_pending_queues(&cptvf->pqinfo);
164c694b233SGeorge Cherian }
165c694b233SGeorge Cherian 
free_command_queues(struct cpt_vf * cptvf,struct command_qinfo * cqinfo)166c694b233SGeorge Cherian static void free_command_queues(struct cpt_vf *cptvf,
167c694b233SGeorge Cherian 				struct command_qinfo *cqinfo)
168c694b233SGeorge Cherian {
169c694b233SGeorge Cherian 	int i;
170c694b233SGeorge Cherian 	struct command_queue *queue = NULL;
171c694b233SGeorge Cherian 	struct command_chunk *chunk = NULL;
172c694b233SGeorge Cherian 	struct pci_dev *pdev = cptvf->pdev;
173c694b233SGeorge Cherian 	struct hlist_node *node;
174c694b233SGeorge Cherian 
175c694b233SGeorge Cherian 	/* clean up for each queue */
176c694b233SGeorge Cherian 	for (i = 0; i < cptvf->nr_queues; i++) {
177c694b233SGeorge Cherian 		queue = &cqinfo->queue[i];
178c694b233SGeorge Cherian 		if (hlist_empty(&cqinfo->queue[i].chead))
179c694b233SGeorge Cherian 			continue;
180c694b233SGeorge Cherian 
181c694b233SGeorge Cherian 		hlist_for_each_entry_safe(chunk, node, &cqinfo->queue[i].chead,
182c694b233SGeorge Cherian 					  nextchunk) {
183c694b233SGeorge Cherian 			dma_free_coherent(&pdev->dev, chunk->size,
184c694b233SGeorge Cherian 					  chunk->head,
185c694b233SGeorge Cherian 					  chunk->dma_addr);
186c694b233SGeorge Cherian 			chunk->head = NULL;
187c694b233SGeorge Cherian 			chunk->dma_addr = 0;
188c694b233SGeorge Cherian 			hlist_del(&chunk->nextchunk);
189453431a5SWaiman Long 			kfree_sensitive(chunk);
190c694b233SGeorge Cherian 		}
191c694b233SGeorge Cherian 
192c694b233SGeorge Cherian 		queue->nchunks = 0;
193c694b233SGeorge Cherian 		queue->idx = 0;
194c694b233SGeorge Cherian 	}
195c694b233SGeorge Cherian 
196c694b233SGeorge Cherian 	/* common cleanup */
197c694b233SGeorge Cherian 	cqinfo->cmd_size = 0;
198c694b233SGeorge Cherian }
199c694b233SGeorge Cherian 
alloc_command_queues(struct cpt_vf * cptvf,struct command_qinfo * cqinfo,size_t cmd_size,u32 qlen)200c694b233SGeorge Cherian static int alloc_command_queues(struct cpt_vf *cptvf,
201c694b233SGeorge Cherian 				struct command_qinfo *cqinfo, size_t cmd_size,
202c694b233SGeorge Cherian 				u32 qlen)
203c694b233SGeorge Cherian {
204c694b233SGeorge Cherian 	int i;
205c694b233SGeorge Cherian 	size_t q_size;
206c694b233SGeorge Cherian 	struct command_queue *queue = NULL;
207c694b233SGeorge Cherian 	struct pci_dev *pdev = cptvf->pdev;
208c694b233SGeorge Cherian 
209c694b233SGeorge Cherian 	/* common init */
210c694b233SGeorge Cherian 	cqinfo->cmd_size = cmd_size;
211c694b233SGeorge Cherian 	/* Qsize in dwords, needed for SADDR config, 1-next chunk pointer */
212c694b233SGeorge Cherian 	cptvf->qsize = min(qlen, cqinfo->qchunksize) *
213c694b233SGeorge Cherian 			CPT_NEXT_CHUNK_PTR_SIZE + 1;
214c694b233SGeorge Cherian 	/* Qsize in bytes to create space for alignment */
215c694b233SGeorge Cherian 	q_size = qlen * cqinfo->cmd_size;
216c694b233SGeorge Cherian 
217c694b233SGeorge Cherian 	/* per queue initialization */
218c694b233SGeorge Cherian 	for (i = 0; i < cptvf->nr_queues; i++) {
219c694b233SGeorge Cherian 		size_t c_size = 0;
220c694b233SGeorge Cherian 		size_t rem_q_size = q_size;
221c694b233SGeorge Cherian 		struct command_chunk *curr = NULL, *first = NULL, *last = NULL;
222c694b233SGeorge Cherian 		u32 qcsize_bytes = cqinfo->qchunksize * cqinfo->cmd_size;
223c694b233SGeorge Cherian 
224c694b233SGeorge Cherian 		queue = &cqinfo->queue[i];
225c694b233SGeorge Cherian 		INIT_HLIST_HEAD(&cqinfo->queue[i].chead);
226c694b233SGeorge Cherian 		do {
227c694b233SGeorge Cherian 			curr = kzalloc(sizeof(*curr), GFP_KERNEL);
228c694b233SGeorge Cherian 			if (!curr)
229c694b233SGeorge Cherian 				goto cmd_qfail;
230c694b233SGeorge Cherian 
231c694b233SGeorge Cherian 			c_size = (rem_q_size > qcsize_bytes) ? qcsize_bytes :
232c694b233SGeorge Cherian 					rem_q_size;
233d27fb046SXu Wang 			curr->head = dma_alloc_coherent(&pdev->dev,
234c694b233SGeorge Cherian 							c_size + CPT_NEXT_CHUNK_PTR_SIZE,
235750afb08SLuis Chamberlain 							&curr->dma_addr,
236750afb08SLuis Chamberlain 							GFP_KERNEL);
237c694b233SGeorge Cherian 			if (!curr->head) {
238c694b233SGeorge Cherian 				dev_err(&pdev->dev, "Command Q (%d) chunk (%d) allocation failed\n",
239c694b233SGeorge Cherian 					i, queue->nchunks);
240d80388ecSColin Ian King 				kfree(curr);
241c694b233SGeorge Cherian 				goto cmd_qfail;
242c694b233SGeorge Cherian 			}
243c694b233SGeorge Cherian 
244c694b233SGeorge Cherian 			curr->size = c_size;
245c694b233SGeorge Cherian 			if (queue->nchunks == 0) {
246c694b233SGeorge Cherian 				hlist_add_head(&curr->nextchunk,
247c694b233SGeorge Cherian 					       &cqinfo->queue[i].chead);
248c694b233SGeorge Cherian 				first = curr;
249c694b233SGeorge Cherian 			} else {
250c694b233SGeorge Cherian 				hlist_add_behind(&curr->nextchunk,
251c694b233SGeorge Cherian 						 &last->nextchunk);
252c694b233SGeorge Cherian 			}
253c694b233SGeorge Cherian 
254c694b233SGeorge Cherian 			queue->nchunks++;
255c694b233SGeorge Cherian 			rem_q_size -= c_size;
256c694b233SGeorge Cherian 			if (last)
257c694b233SGeorge Cherian 				*((u64 *)(&last->head[last->size])) = (u64)curr->dma_addr;
258c694b233SGeorge Cherian 
259c694b233SGeorge Cherian 			last = curr;
260c694b233SGeorge Cherian 		} while (rem_q_size);
261c694b233SGeorge Cherian 
262c694b233SGeorge Cherian 		/* Make the queue circular */
263c694b233SGeorge Cherian 		/* Tie back last chunk entry to head */
264c694b233SGeorge Cherian 		curr = first;
265c694b233SGeorge Cherian 		*((u64 *)(&last->head[last->size])) = (u64)curr->dma_addr;
266c694b233SGeorge Cherian 		queue->qhead = curr;
267c694b233SGeorge Cherian 		spin_lock_init(&queue->lock);
268c694b233SGeorge Cherian 	}
269c694b233SGeorge Cherian 	return 0;
270c694b233SGeorge Cherian 
271c694b233SGeorge Cherian cmd_qfail:
272c694b233SGeorge Cherian 	free_command_queues(cptvf, cqinfo);
273c694b233SGeorge Cherian 	return -ENOMEM;
274c694b233SGeorge Cherian }
275c694b233SGeorge Cherian 
init_command_queues(struct cpt_vf * cptvf,u32 qlen)276c694b233SGeorge Cherian static int init_command_queues(struct cpt_vf *cptvf, u32 qlen)
277c694b233SGeorge Cherian {
278c694b233SGeorge Cherian 	struct pci_dev *pdev = cptvf->pdev;
279c694b233SGeorge Cherian 	int ret;
280c694b233SGeorge Cherian 
281c694b233SGeorge Cherian 	/* setup AE command queues */
282c694b233SGeorge Cherian 	ret = alloc_command_queues(cptvf, &cptvf->cqinfo, CPT_INST_SIZE,
283c694b233SGeorge Cherian 				   qlen);
284c694b233SGeorge Cherian 	if (ret) {
285c694b233SGeorge Cherian 		dev_err(&pdev->dev, "failed to allocate AE command queues (%u)\n",
286c694b233SGeorge Cherian 			cptvf->nr_queues);
287c694b233SGeorge Cherian 		return ret;
288c694b233SGeorge Cherian 	}
289c694b233SGeorge Cherian 
290c694b233SGeorge Cherian 	return ret;
291c694b233SGeorge Cherian }
292c694b233SGeorge Cherian 
cleanup_command_queues(struct cpt_vf * cptvf)293c694b233SGeorge Cherian static void cleanup_command_queues(struct cpt_vf *cptvf)
294c694b233SGeorge Cherian {
295c694b233SGeorge Cherian 	struct pci_dev *pdev = cptvf->pdev;
296c694b233SGeorge Cherian 
297c694b233SGeorge Cherian 	if (!cptvf->nr_queues)
298c694b233SGeorge Cherian 		return;
299c694b233SGeorge Cherian 
300c694b233SGeorge Cherian 	dev_info(&pdev->dev, "Cleaning VQ command queue (%u)\n",
301c694b233SGeorge Cherian 		 cptvf->nr_queues);
302c694b233SGeorge Cherian 	free_command_queues(cptvf, &cptvf->cqinfo);
303c694b233SGeorge Cherian }
304c694b233SGeorge Cherian 
cptvf_sw_cleanup(struct cpt_vf * cptvf)305c694b233SGeorge Cherian static void cptvf_sw_cleanup(struct cpt_vf *cptvf)
306c694b233SGeorge Cherian {
307c694b233SGeorge Cherian 	cleanup_worker_threads(cptvf);
308c694b233SGeorge Cherian 	cleanup_pending_queues(cptvf);
309c694b233SGeorge Cherian 	cleanup_command_queues(cptvf);
310c694b233SGeorge Cherian }
311c694b233SGeorge Cherian 
cptvf_sw_init(struct cpt_vf * cptvf,u32 qlen,u32 nr_queues)312c694b233SGeorge Cherian static int cptvf_sw_init(struct cpt_vf *cptvf, u32 qlen, u32 nr_queues)
313c694b233SGeorge Cherian {
314c694b233SGeorge Cherian 	struct pci_dev *pdev = cptvf->pdev;
315c694b233SGeorge Cherian 	int ret = 0;
316c694b233SGeorge Cherian 	u32 max_dev_queues = 0;
317c694b233SGeorge Cherian 
318c694b233SGeorge Cherian 	max_dev_queues = CPT_NUM_QS_PER_VF;
319c694b233SGeorge Cherian 	/* possible cpus */
320c694b233SGeorge Cherian 	nr_queues = min_t(u32, nr_queues, max_dev_queues);
321c694b233SGeorge Cherian 	cptvf->nr_queues = nr_queues;
322c694b233SGeorge Cherian 
323c694b233SGeorge Cherian 	ret = init_command_queues(cptvf, qlen);
324c694b233SGeorge Cherian 	if (ret) {
325c694b233SGeorge Cherian 		dev_err(&pdev->dev, "Failed to setup command queues (%u)\n",
326c694b233SGeorge Cherian 			nr_queues);
327c694b233SGeorge Cherian 		return ret;
328c694b233SGeorge Cherian 	}
329c694b233SGeorge Cherian 
330c694b233SGeorge Cherian 	ret = init_pending_queues(cptvf, qlen, nr_queues);
331c694b233SGeorge Cherian 	if (ret) {
332c694b233SGeorge Cherian 		dev_err(&pdev->dev, "Failed to setup pending queues (%u)\n",
333c694b233SGeorge Cherian 			nr_queues);
334c694b233SGeorge Cherian 		goto setup_pqfail;
335c694b233SGeorge Cherian 	}
336c694b233SGeorge Cherian 
337c694b233SGeorge Cherian 	/* Create worker threads for BH processing */
338c694b233SGeorge Cherian 	ret = init_worker_threads(cptvf);
339c694b233SGeorge Cherian 	if (ret) {
340c694b233SGeorge Cherian 		dev_err(&pdev->dev, "Failed to setup worker threads\n");
341c694b233SGeorge Cherian 		goto init_work_fail;
342c694b233SGeorge Cherian 	}
343c694b233SGeorge Cherian 
344c694b233SGeorge Cherian 	return 0;
345c694b233SGeorge Cherian 
346c694b233SGeorge Cherian init_work_fail:
347c694b233SGeorge Cherian 	cleanup_worker_threads(cptvf);
348c694b233SGeorge Cherian 	cleanup_pending_queues(cptvf);
349c694b233SGeorge Cherian 
350c694b233SGeorge Cherian setup_pqfail:
351c694b233SGeorge Cherian 	cleanup_command_queues(cptvf);
352c694b233SGeorge Cherian 
353c694b233SGeorge Cherian 	return ret;
354c694b233SGeorge Cherian }
355c694b233SGeorge Cherian 
cptvf_free_irq_affinity(struct cpt_vf * cptvf,int vec)35615c0b9edSChristoph Hellwig static void cptvf_free_irq_affinity(struct cpt_vf *cptvf, int vec)
357c694b233SGeorge Cherian {
35815c0b9edSChristoph Hellwig 	irq_set_affinity_hint(pci_irq_vector(cptvf->pdev, vec), NULL);
35915c0b9edSChristoph Hellwig 	free_cpumask_var(cptvf->affinity_mask[vec]);
360c694b233SGeorge Cherian }
361c694b233SGeorge Cherian 
cptvf_write_vq_ctl(struct cpt_vf * cptvf,bool val)362c694b233SGeorge Cherian static void cptvf_write_vq_ctl(struct cpt_vf *cptvf, bool val)
363c694b233SGeorge Cherian {
364c694b233SGeorge Cherian 	union cptx_vqx_ctl vqx_ctl;
365c694b233SGeorge Cherian 
366c694b233SGeorge Cherian 	vqx_ctl.u = cpt_read_csr64(cptvf->reg_base, CPTX_VQX_CTL(0, 0));
367c694b233SGeorge Cherian 	vqx_ctl.s.ena = val;
368c694b233SGeorge Cherian 	cpt_write_csr64(cptvf->reg_base, CPTX_VQX_CTL(0, 0), vqx_ctl.u);
369c694b233SGeorge Cherian }
370c694b233SGeorge Cherian 
cptvf_write_vq_doorbell(struct cpt_vf * cptvf,u32 val)371c694b233SGeorge Cherian void cptvf_write_vq_doorbell(struct cpt_vf *cptvf, u32 val)
372c694b233SGeorge Cherian {
373c694b233SGeorge Cherian 	union cptx_vqx_doorbell vqx_dbell;
374c694b233SGeorge Cherian 
375c694b233SGeorge Cherian 	vqx_dbell.u = cpt_read_csr64(cptvf->reg_base,
376c694b233SGeorge Cherian 				     CPTX_VQX_DOORBELL(0, 0));
377c694b233SGeorge Cherian 	vqx_dbell.s.dbell_cnt = val * 8; /* Num of Instructions * 8 words */
378c694b233SGeorge Cherian 	cpt_write_csr64(cptvf->reg_base, CPTX_VQX_DOORBELL(0, 0),
379c694b233SGeorge Cherian 			vqx_dbell.u);
380c694b233SGeorge Cherian }
381c694b233SGeorge Cherian 
cptvf_write_vq_inprog(struct cpt_vf * cptvf,u8 val)382c694b233SGeorge Cherian static void cptvf_write_vq_inprog(struct cpt_vf *cptvf, u8 val)
383c694b233SGeorge Cherian {
384c694b233SGeorge Cherian 	union cptx_vqx_inprog vqx_inprg;
385c694b233SGeorge Cherian 
386c694b233SGeorge Cherian 	vqx_inprg.u = cpt_read_csr64(cptvf->reg_base, CPTX_VQX_INPROG(0, 0));
387c694b233SGeorge Cherian 	vqx_inprg.s.inflight = val;
388c694b233SGeorge Cherian 	cpt_write_csr64(cptvf->reg_base, CPTX_VQX_INPROG(0, 0), vqx_inprg.u);
389c694b233SGeorge Cherian }
390c694b233SGeorge Cherian 
cptvf_write_vq_done_numwait(struct cpt_vf * cptvf,u32 val)391c694b233SGeorge Cherian static void cptvf_write_vq_done_numwait(struct cpt_vf *cptvf, u32 val)
392c694b233SGeorge Cherian {
393c694b233SGeorge Cherian 	union cptx_vqx_done_wait vqx_dwait;
394c694b233SGeorge Cherian 
395c694b233SGeorge Cherian 	vqx_dwait.u = cpt_read_csr64(cptvf->reg_base,
396c694b233SGeorge Cherian 				     CPTX_VQX_DONE_WAIT(0, 0));
397c694b233SGeorge Cherian 	vqx_dwait.s.num_wait = val;
398c694b233SGeorge Cherian 	cpt_write_csr64(cptvf->reg_base, CPTX_VQX_DONE_WAIT(0, 0),
399c694b233SGeorge Cherian 			vqx_dwait.u);
400c694b233SGeorge Cherian }
401c694b233SGeorge Cherian 
cptvf_write_vq_done_timewait(struct cpt_vf * cptvf,u16 time)402c694b233SGeorge Cherian static void cptvf_write_vq_done_timewait(struct cpt_vf *cptvf, u16 time)
403c694b233SGeorge Cherian {
404c694b233SGeorge Cherian 	union cptx_vqx_done_wait vqx_dwait;
405c694b233SGeorge Cherian 
406c694b233SGeorge Cherian 	vqx_dwait.u = cpt_read_csr64(cptvf->reg_base,
407c694b233SGeorge Cherian 				     CPTX_VQX_DONE_WAIT(0, 0));
408c694b233SGeorge Cherian 	vqx_dwait.s.time_wait = time;
409c694b233SGeorge Cherian 	cpt_write_csr64(cptvf->reg_base, CPTX_VQX_DONE_WAIT(0, 0),
410c694b233SGeorge Cherian 			vqx_dwait.u);
411c694b233SGeorge Cherian }
412c694b233SGeorge Cherian 
cptvf_enable_swerr_interrupts(struct cpt_vf * cptvf)413c694b233SGeorge Cherian static void cptvf_enable_swerr_interrupts(struct cpt_vf *cptvf)
414c694b233SGeorge Cherian {
415c694b233SGeorge Cherian 	union cptx_vqx_misc_ena_w1s vqx_misc_ena;
416c694b233SGeorge Cherian 
417c694b233SGeorge Cherian 	vqx_misc_ena.u = cpt_read_csr64(cptvf->reg_base,
418c694b233SGeorge Cherian 					CPTX_VQX_MISC_ENA_W1S(0, 0));
419c694b233SGeorge Cherian 	/* Set mbox(0) interupts for the requested vf */
420c694b233SGeorge Cherian 	vqx_misc_ena.s.swerr = 1;
421c694b233SGeorge Cherian 	cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_ENA_W1S(0, 0),
422c694b233SGeorge Cherian 			vqx_misc_ena.u);
423c694b233SGeorge Cherian }
424c694b233SGeorge Cherian 
cptvf_enable_mbox_interrupts(struct cpt_vf * cptvf)425c694b233SGeorge Cherian static void cptvf_enable_mbox_interrupts(struct cpt_vf *cptvf)
426c694b233SGeorge Cherian {
427c694b233SGeorge Cherian 	union cptx_vqx_misc_ena_w1s vqx_misc_ena;
428c694b233SGeorge Cherian 
429c694b233SGeorge Cherian 	vqx_misc_ena.u = cpt_read_csr64(cptvf->reg_base,
430c694b233SGeorge Cherian 					CPTX_VQX_MISC_ENA_W1S(0, 0));
431c694b233SGeorge Cherian 	/* Set mbox(0) interupts for the requested vf */
432c694b233SGeorge Cherian 	vqx_misc_ena.s.mbox = 1;
433c694b233SGeorge Cherian 	cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_ENA_W1S(0, 0),
434c694b233SGeorge Cherian 			vqx_misc_ena.u);
435c694b233SGeorge Cherian }
436c694b233SGeorge Cherian 
cptvf_enable_done_interrupts(struct cpt_vf * cptvf)437c694b233SGeorge Cherian static void cptvf_enable_done_interrupts(struct cpt_vf *cptvf)
438c694b233SGeorge Cherian {
439c694b233SGeorge Cherian 	union cptx_vqx_done_ena_w1s vqx_done_ena;
440c694b233SGeorge Cherian 
441c694b233SGeorge Cherian 	vqx_done_ena.u = cpt_read_csr64(cptvf->reg_base,
442c694b233SGeorge Cherian 					CPTX_VQX_DONE_ENA_W1S(0, 0));
443c694b233SGeorge Cherian 	/* Set DONE interrupt for the requested vf */
444c694b233SGeorge Cherian 	vqx_done_ena.s.done = 1;
445c694b233SGeorge Cherian 	cpt_write_csr64(cptvf->reg_base, CPTX_VQX_DONE_ENA_W1S(0, 0),
446c694b233SGeorge Cherian 			vqx_done_ena.u);
447c694b233SGeorge Cherian }
448c694b233SGeorge Cherian 
cptvf_clear_dovf_intr(struct cpt_vf * cptvf)449c694b233SGeorge Cherian static void cptvf_clear_dovf_intr(struct cpt_vf *cptvf)
450c694b233SGeorge Cherian {
451c694b233SGeorge Cherian 	union cptx_vqx_misc_int vqx_misc_int;
452c694b233SGeorge Cherian 
453c694b233SGeorge Cherian 	vqx_misc_int.u = cpt_read_csr64(cptvf->reg_base,
454c694b233SGeorge Cherian 					CPTX_VQX_MISC_INT(0, 0));
455c694b233SGeorge Cherian 	/* W1C for the VF */
456c694b233SGeorge Cherian 	vqx_misc_int.s.dovf = 1;
457c694b233SGeorge Cherian 	cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0),
458c694b233SGeorge Cherian 			vqx_misc_int.u);
459c694b233SGeorge Cherian }
460c694b233SGeorge Cherian 
cptvf_clear_irde_intr(struct cpt_vf * cptvf)461c694b233SGeorge Cherian static void cptvf_clear_irde_intr(struct cpt_vf *cptvf)
462c694b233SGeorge Cherian {
463c694b233SGeorge Cherian 	union cptx_vqx_misc_int vqx_misc_int;
464c694b233SGeorge Cherian 
465c694b233SGeorge Cherian 	vqx_misc_int.u = cpt_read_csr64(cptvf->reg_base,
466c694b233SGeorge Cherian 					CPTX_VQX_MISC_INT(0, 0));
467c694b233SGeorge Cherian 	/* W1C for the VF */
468c694b233SGeorge Cherian 	vqx_misc_int.s.irde = 1;
469c694b233SGeorge Cherian 	cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0),
470c694b233SGeorge Cherian 			vqx_misc_int.u);
471c694b233SGeorge Cherian }
472c694b233SGeorge Cherian 
cptvf_clear_nwrp_intr(struct cpt_vf * cptvf)473c694b233SGeorge Cherian static void cptvf_clear_nwrp_intr(struct cpt_vf *cptvf)
474c694b233SGeorge Cherian {
475c694b233SGeorge Cherian 	union cptx_vqx_misc_int vqx_misc_int;
476c694b233SGeorge Cherian 
477c694b233SGeorge Cherian 	vqx_misc_int.u = cpt_read_csr64(cptvf->reg_base,
478c694b233SGeorge Cherian 					CPTX_VQX_MISC_INT(0, 0));
479c694b233SGeorge Cherian 	/* W1C for the VF */
480c694b233SGeorge Cherian 	vqx_misc_int.s.nwrp = 1;
481c694b233SGeorge Cherian 	cpt_write_csr64(cptvf->reg_base,
482c694b233SGeorge Cherian 			CPTX_VQX_MISC_INT(0, 0), vqx_misc_int.u);
483c694b233SGeorge Cherian }
484c694b233SGeorge Cherian 
cptvf_clear_mbox_intr(struct cpt_vf * cptvf)485c694b233SGeorge Cherian static void cptvf_clear_mbox_intr(struct cpt_vf *cptvf)
486c694b233SGeorge Cherian {
487c694b233SGeorge Cherian 	union cptx_vqx_misc_int vqx_misc_int;
488c694b233SGeorge Cherian 
489c694b233SGeorge Cherian 	vqx_misc_int.u = cpt_read_csr64(cptvf->reg_base,
490c694b233SGeorge Cherian 					CPTX_VQX_MISC_INT(0, 0));
491c694b233SGeorge Cherian 	/* W1C for the VF */
492c694b233SGeorge Cherian 	vqx_misc_int.s.mbox = 1;
493c694b233SGeorge Cherian 	cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0),
494c694b233SGeorge Cherian 			vqx_misc_int.u);
495c694b233SGeorge Cherian }
496c694b233SGeorge Cherian 
cptvf_clear_swerr_intr(struct cpt_vf * cptvf)497c694b233SGeorge Cherian static void cptvf_clear_swerr_intr(struct cpt_vf *cptvf)
498c694b233SGeorge Cherian {
499c694b233SGeorge Cherian 	union cptx_vqx_misc_int vqx_misc_int;
500c694b233SGeorge Cherian 
501c694b233SGeorge Cherian 	vqx_misc_int.u = cpt_read_csr64(cptvf->reg_base,
502c694b233SGeorge Cherian 					CPTX_VQX_MISC_INT(0, 0));
503c694b233SGeorge Cherian 	/* W1C for the VF */
504c694b233SGeorge Cherian 	vqx_misc_int.s.swerr = 1;
505c694b233SGeorge Cherian 	cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0),
506c694b233SGeorge Cherian 			vqx_misc_int.u);
507c694b233SGeorge Cherian }
508c694b233SGeorge Cherian 
cptvf_read_vf_misc_intr_status(struct cpt_vf * cptvf)509c694b233SGeorge Cherian static u64 cptvf_read_vf_misc_intr_status(struct cpt_vf *cptvf)
510c694b233SGeorge Cherian {
511c694b233SGeorge Cherian 	return cpt_read_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0));
512c694b233SGeorge Cherian }
513c694b233SGeorge Cherian 
cptvf_misc_intr_handler(int irq,void * cptvf_irq)514c694b233SGeorge Cherian static irqreturn_t cptvf_misc_intr_handler(int irq, void *cptvf_irq)
515c694b233SGeorge Cherian {
516c694b233SGeorge Cherian 	struct cpt_vf *cptvf = (struct cpt_vf *)cptvf_irq;
517c694b233SGeorge Cherian 	struct pci_dev *pdev = cptvf->pdev;
518c694b233SGeorge Cherian 	u64 intr;
519c694b233SGeorge Cherian 
520c694b233SGeorge Cherian 	intr = cptvf_read_vf_misc_intr_status(cptvf);
521c694b233SGeorge Cherian 	/*Check for MISC interrupt types*/
522c694b233SGeorge Cherian 	if (likely(intr & CPT_VF_INTR_MBOX_MASK)) {
523cc53e92aSGeorge Cherian 		dev_dbg(&pdev->dev, "Mailbox interrupt 0x%llx on CPT VF %d\n",
524c694b233SGeorge Cherian 			intr, cptvf->vfid);
525c694b233SGeorge Cherian 		cptvf_handle_mbox_intr(cptvf);
526c694b233SGeorge Cherian 		cptvf_clear_mbox_intr(cptvf);
527c694b233SGeorge Cherian 	} else if (unlikely(intr & CPT_VF_INTR_DOVF_MASK)) {
528c694b233SGeorge Cherian 		cptvf_clear_dovf_intr(cptvf);
529c694b233SGeorge Cherian 		/*Clear doorbell count*/
530c694b233SGeorge Cherian 		cptvf_write_vq_doorbell(cptvf, 0);
531c694b233SGeorge Cherian 		dev_err(&pdev->dev, "Doorbell overflow error interrupt 0x%llx on CPT VF %d\n",
532c694b233SGeorge Cherian 			intr, cptvf->vfid);
533c694b233SGeorge Cherian 	} else if (unlikely(intr & CPT_VF_INTR_IRDE_MASK)) {
534c694b233SGeorge Cherian 		cptvf_clear_irde_intr(cptvf);
535c694b233SGeorge Cherian 		dev_err(&pdev->dev, "Instruction NCB read error interrupt 0x%llx on CPT VF %d\n",
536c694b233SGeorge Cherian 			intr, cptvf->vfid);
537c694b233SGeorge Cherian 	} else if (unlikely(intr & CPT_VF_INTR_NWRP_MASK)) {
538c694b233SGeorge Cherian 		cptvf_clear_nwrp_intr(cptvf);
539c694b233SGeorge Cherian 		dev_err(&pdev->dev, "NCB response write error interrupt 0x%llx on CPT VF %d\n",
540c694b233SGeorge Cherian 			intr, cptvf->vfid);
541c694b233SGeorge Cherian 	} else if (unlikely(intr & CPT_VF_INTR_SERR_MASK)) {
542c694b233SGeorge Cherian 		cptvf_clear_swerr_intr(cptvf);
543c694b233SGeorge Cherian 		dev_err(&pdev->dev, "Software error interrupt 0x%llx on CPT VF %d\n",
544c694b233SGeorge Cherian 			intr, cptvf->vfid);
545c694b233SGeorge Cherian 	} else {
546c694b233SGeorge Cherian 		dev_err(&pdev->dev, "Unhandled interrupt in CPT VF %d\n",
547c694b233SGeorge Cherian 			cptvf->vfid);
548c694b233SGeorge Cherian 	}
549c694b233SGeorge Cherian 
550c694b233SGeorge Cherian 	return IRQ_HANDLED;
551c694b233SGeorge Cherian }
552c694b233SGeorge Cherian 
get_cptvf_vq_wqe(struct cpt_vf * cptvf,int qno)553c694b233SGeorge Cherian static inline struct cptvf_wqe *get_cptvf_vq_wqe(struct cpt_vf *cptvf,
554c694b233SGeorge Cherian 						 int qno)
555c694b233SGeorge Cherian {
556c694b233SGeorge Cherian 	struct cptvf_wqe_info *nwqe_info;
557c694b233SGeorge Cherian 
558c694b233SGeorge Cherian 	if (unlikely(qno >= cptvf->nr_queues))
559c694b233SGeorge Cherian 		return NULL;
560c694b233SGeorge Cherian 	nwqe_info = (struct cptvf_wqe_info *)cptvf->wqe_info;
561c694b233SGeorge Cherian 
562c694b233SGeorge Cherian 	return &nwqe_info->vq_wqe[qno];
563c694b233SGeorge Cherian }
564c694b233SGeorge Cherian 
cptvf_read_vq_done_count(struct cpt_vf * cptvf)565c694b233SGeorge Cherian static inline u32 cptvf_read_vq_done_count(struct cpt_vf *cptvf)
566c694b233SGeorge Cherian {
567c694b233SGeorge Cherian 	union cptx_vqx_done vqx_done;
568c694b233SGeorge Cherian 
569c694b233SGeorge Cherian 	vqx_done.u = cpt_read_csr64(cptvf->reg_base, CPTX_VQX_DONE(0, 0));
570c694b233SGeorge Cherian 	return vqx_done.s.done;
571c694b233SGeorge Cherian }
572c694b233SGeorge Cherian 
cptvf_write_vq_done_ack(struct cpt_vf * cptvf,u32 ackcnt)573c694b233SGeorge Cherian static inline void cptvf_write_vq_done_ack(struct cpt_vf *cptvf,
574c694b233SGeorge Cherian 					   u32 ackcnt)
575c694b233SGeorge Cherian {
576c694b233SGeorge Cherian 	union cptx_vqx_done_ack vqx_dack_cnt;
577c694b233SGeorge Cherian 
578c694b233SGeorge Cherian 	vqx_dack_cnt.u = cpt_read_csr64(cptvf->reg_base,
579c694b233SGeorge Cherian 					CPTX_VQX_DONE_ACK(0, 0));
580c694b233SGeorge Cherian 	vqx_dack_cnt.s.done_ack = ackcnt;
581c694b233SGeorge Cherian 	cpt_write_csr64(cptvf->reg_base, CPTX_VQX_DONE_ACK(0, 0),
582c694b233SGeorge Cherian 			vqx_dack_cnt.u);
583c694b233SGeorge Cherian }
584c694b233SGeorge Cherian 
cptvf_done_intr_handler(int irq,void * cptvf_irq)585c694b233SGeorge Cherian static irqreturn_t cptvf_done_intr_handler(int irq, void *cptvf_irq)
586c694b233SGeorge Cherian {
587c694b233SGeorge Cherian 	struct cpt_vf *cptvf = (struct cpt_vf *)cptvf_irq;
588c694b233SGeorge Cherian 	struct pci_dev *pdev = cptvf->pdev;
589c694b233SGeorge Cherian 	/* Read the number of completions */
590c694b233SGeorge Cherian 	u32 intr = cptvf_read_vq_done_count(cptvf);
591c694b233SGeorge Cherian 
592c694b233SGeorge Cherian 	if (intr) {
593c694b233SGeorge Cherian 		struct cptvf_wqe *wqe;
594c694b233SGeorge Cherian 
595c694b233SGeorge Cherian 		/* Acknowledge the number of
596c694b233SGeorge Cherian 		 * scheduled completions for processing
597c694b233SGeorge Cherian 		 */
598c694b233SGeorge Cherian 		cptvf_write_vq_done_ack(cptvf, intr);
599c694b233SGeorge Cherian 		wqe = get_cptvf_vq_wqe(cptvf, 0);
600c694b233SGeorge Cherian 		if (unlikely(!wqe)) {
601c694b233SGeorge Cherian 			dev_err(&pdev->dev, "No work to schedule for VF (%d)",
602c694b233SGeorge Cherian 				cptvf->vfid);
603c694b233SGeorge Cherian 			return IRQ_NONE;
604c694b233SGeorge Cherian 		}
605c694b233SGeorge Cherian 		tasklet_hi_schedule(&wqe->twork);
606c694b233SGeorge Cherian 	}
607c694b233SGeorge Cherian 
608c694b233SGeorge Cherian 	return IRQ_HANDLED;
609c694b233SGeorge Cherian }
610c694b233SGeorge Cherian 
cptvf_set_irq_affinity(struct cpt_vf * cptvf,int vec)61115c0b9edSChristoph Hellwig static void cptvf_set_irq_affinity(struct cpt_vf *cptvf, int vec)
612c694b233SGeorge Cherian {
613c694b233SGeorge Cherian 	struct pci_dev *pdev = cptvf->pdev;
61415c0b9edSChristoph Hellwig 	int cpu;
615c694b233SGeorge Cherian 
616c694b233SGeorge Cherian 	if (!zalloc_cpumask_var(&cptvf->affinity_mask[vec],
617c694b233SGeorge Cherian 				GFP_KERNEL)) {
618c694b233SGeorge Cherian 		dev_err(&pdev->dev, "Allocation failed for affinity_mask for VF %d",
619c694b233SGeorge Cherian 			cptvf->vfid);
620c694b233SGeorge Cherian 		return;
621c694b233SGeorge Cherian 	}
622c694b233SGeorge Cherian 
623c694b233SGeorge Cherian 	cpu = cptvf->vfid % num_online_cpus();
624c694b233SGeorge Cherian 	cpumask_set_cpu(cpumask_local_spread(cpu, cptvf->node),
625c694b233SGeorge Cherian 			cptvf->affinity_mask[vec]);
62615c0b9edSChristoph Hellwig 	irq_set_affinity_hint(pci_irq_vector(pdev, vec),
62715c0b9edSChristoph Hellwig 			cptvf->affinity_mask[vec]);
628c694b233SGeorge Cherian }
629c694b233SGeorge Cherian 
cptvf_write_vq_saddr(struct cpt_vf * cptvf,u64 val)630c694b233SGeorge Cherian static void cptvf_write_vq_saddr(struct cpt_vf *cptvf, u64 val)
631c694b233SGeorge Cherian {
632c694b233SGeorge Cherian 	union cptx_vqx_saddr vqx_saddr;
633c694b233SGeorge Cherian 
634c694b233SGeorge Cherian 	vqx_saddr.u = val;
635c694b233SGeorge Cherian 	cpt_write_csr64(cptvf->reg_base, CPTX_VQX_SADDR(0, 0), vqx_saddr.u);
636c694b233SGeorge Cherian }
637c694b233SGeorge Cherian 
cptvf_device_init(struct cpt_vf * cptvf)6382ff9dff5SYueHaibing static void cptvf_device_init(struct cpt_vf *cptvf)
639c694b233SGeorge Cherian {
640c694b233SGeorge Cherian 	u64 base_addr = 0;
641c694b233SGeorge Cherian 
642c694b233SGeorge Cherian 	/* Disable the VQ */
643c694b233SGeorge Cherian 	cptvf_write_vq_ctl(cptvf, 0);
644c694b233SGeorge Cherian 	/* Reset the doorbell */
645c694b233SGeorge Cherian 	cptvf_write_vq_doorbell(cptvf, 0);
646c694b233SGeorge Cherian 	/* Clear inflight */
647c694b233SGeorge Cherian 	cptvf_write_vq_inprog(cptvf, 0);
648c694b233SGeorge Cherian 	/* Write VQ SADDR */
649c694b233SGeorge Cherian 	/* TODO: for now only one queue, so hard coded */
650c694b233SGeorge Cherian 	base_addr = (u64)(cptvf->cqinfo.queue[0].qhead->dma_addr);
651c694b233SGeorge Cherian 	cptvf_write_vq_saddr(cptvf, base_addr);
652c694b233SGeorge Cherian 	/* Configure timerhold / coalescence */
653c694b233SGeorge Cherian 	cptvf_write_vq_done_timewait(cptvf, CPT_TIMER_THOLD);
654c694b233SGeorge Cherian 	cptvf_write_vq_done_numwait(cptvf, 1);
655c694b233SGeorge Cherian 	/* Enable the VQ */
656c694b233SGeorge Cherian 	cptvf_write_vq_ctl(cptvf, 1);
657c694b233SGeorge Cherian 	/* Flag the VF ready */
658c694b233SGeorge Cherian 	cptvf->flags |= CPT_FLAG_DEVICE_READY;
659c694b233SGeorge Cherian }
660c694b233SGeorge Cherian 
cptvf_probe(struct pci_dev * pdev,const struct pci_device_id * ent)661c694b233SGeorge Cherian static int cptvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
662c694b233SGeorge Cherian {
663c694b233SGeorge Cherian 	struct device *dev = &pdev->dev;
664c694b233SGeorge Cherian 	struct cpt_vf *cptvf;
665c694b233SGeorge Cherian 	int    err;
666c694b233SGeorge Cherian 
667c694b233SGeorge Cherian 	cptvf = devm_kzalloc(dev, sizeof(*cptvf), GFP_KERNEL);
668c694b233SGeorge Cherian 	if (!cptvf)
669c694b233SGeorge Cherian 		return -ENOMEM;
670c694b233SGeorge Cherian 
671c694b233SGeorge Cherian 	pci_set_drvdata(pdev, cptvf);
672c694b233SGeorge Cherian 	cptvf->pdev = pdev;
673c694b233SGeorge Cherian 	err = pci_enable_device(pdev);
674c694b233SGeorge Cherian 	if (err) {
675c694b233SGeorge Cherian 		dev_err(dev, "Failed to enable PCI device\n");
676c694b233SGeorge Cherian 		pci_set_drvdata(pdev, NULL);
677c694b233SGeorge Cherian 		return err;
678c694b233SGeorge Cherian 	}
679c694b233SGeorge Cherian 
680c694b233SGeorge Cherian 	err = pci_request_regions(pdev, DRV_NAME);
681c694b233SGeorge Cherian 	if (err) {
682c694b233SGeorge Cherian 		dev_err(dev, "PCI request regions failed 0x%x\n", err);
683c694b233SGeorge Cherian 		goto cptvf_err_disable_device;
684c694b233SGeorge Cherian 	}
685c694b233SGeorge Cherian 	/* Mark as VF driver */
686c694b233SGeorge Cherian 	cptvf->flags |= CPT_FLAG_VF_DRIVER;
687aeb4d8c0SChristophe JAILLET 	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
688c694b233SGeorge Cherian 	if (err) {
689aeb4d8c0SChristophe JAILLET 		dev_err(dev, "Unable to get usable 48-bit DMA configuration\n");
690c694b233SGeorge Cherian 		goto cptvf_err_release_regions;
691c694b233SGeorge Cherian 	}
692c694b233SGeorge Cherian 
693c694b233SGeorge Cherian 	/* MAP PF's configuration registers */
694c694b233SGeorge Cherian 	cptvf->reg_base = pcim_iomap(pdev, 0, 0);
695c694b233SGeorge Cherian 	if (!cptvf->reg_base) {
696c694b233SGeorge Cherian 		dev_err(dev, "Cannot map config register space, aborting\n");
697c694b233SGeorge Cherian 		err = -ENOMEM;
698c694b233SGeorge Cherian 		goto cptvf_err_release_regions;
699c694b233SGeorge Cherian 	}
700c694b233SGeorge Cherian 
701c694b233SGeorge Cherian 	cptvf->node = dev_to_node(&pdev->dev);
70215c0b9edSChristoph Hellwig 	err = pci_alloc_irq_vectors(pdev, CPT_VF_MSIX_VECTORS,
70315c0b9edSChristoph Hellwig 			CPT_VF_MSIX_VECTORS, PCI_IRQ_MSIX);
70415c0b9edSChristoph Hellwig 	if (err < 0) {
70515c0b9edSChristoph Hellwig 		dev_err(dev, "Request for #%d msix vectors failed\n",
70615c0b9edSChristoph Hellwig 			CPT_VF_MSIX_VECTORS);
707c694b233SGeorge Cherian 		goto cptvf_err_release_regions;
708c694b233SGeorge Cherian 	}
709c694b233SGeorge Cherian 
71015c0b9edSChristoph Hellwig 	err = request_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_MISC),
71115c0b9edSChristoph Hellwig 			  cptvf_misc_intr_handler, 0, "CPT VF misc intr",
71215c0b9edSChristoph Hellwig 			  cptvf);
71315c0b9edSChristoph Hellwig 	if (err) {
71415c0b9edSChristoph Hellwig 		dev_err(dev, "Request misc irq failed");
71515c0b9edSChristoph Hellwig 		goto cptvf_free_vectors;
71615c0b9edSChristoph Hellwig 	}
71715c0b9edSChristoph Hellwig 
71815c0b9edSChristoph Hellwig 	/* Enable mailbox interrupt */
71915c0b9edSChristoph Hellwig 	cptvf_enable_mbox_interrupts(cptvf);
72015c0b9edSChristoph Hellwig 	cptvf_enable_swerr_interrupts(cptvf);
721c694b233SGeorge Cherian 
722c694b233SGeorge Cherian 	/* Check ready with PF */
723c694b233SGeorge Cherian 	/* Gets chip ID / device Id from PF if ready */
724c694b233SGeorge Cherian 	err = cptvf_check_pf_ready(cptvf);
725c694b233SGeorge Cherian 	if (err) {
726c694b233SGeorge Cherian 		dev_err(dev, "PF not responding to READY msg");
72715c0b9edSChristoph Hellwig 		goto cptvf_free_misc_irq;
728c694b233SGeorge Cherian 	}
729c694b233SGeorge Cherian 
730c694b233SGeorge Cherian 	/* CPT VF software resources initialization */
731c694b233SGeorge Cherian 	cptvf->cqinfo.qchunksize = CPT_CMD_QCHUNK_SIZE;
732c694b233SGeorge Cherian 	err = cptvf_sw_init(cptvf, CPT_CMD_QLEN, CPT_NUM_QS_PER_VF);
733c694b233SGeorge Cherian 	if (err) {
734c694b233SGeorge Cherian 		dev_err(dev, "cptvf_sw_init() failed");
73515c0b9edSChristoph Hellwig 		goto cptvf_free_misc_irq;
736c694b233SGeorge Cherian 	}
737c694b233SGeorge Cherian 	/* Convey VQ LEN to PF */
738c694b233SGeorge Cherian 	err = cptvf_send_vq_size_msg(cptvf);
739c694b233SGeorge Cherian 	if (err) {
740c694b233SGeorge Cherian 		dev_err(dev, "PF not responding to QLEN msg");
74115c0b9edSChristoph Hellwig 		goto cptvf_free_misc_irq;
742c694b233SGeorge Cherian 	}
743c694b233SGeorge Cherian 
744c694b233SGeorge Cherian 	/* CPT VF device initialization */
745c694b233SGeorge Cherian 	cptvf_device_init(cptvf);
746c694b233SGeorge Cherian 	/* Send msg to PF to assign currnet Q to required group */
747c694b233SGeorge Cherian 	cptvf->vfgrp = 1;
748c694b233SGeorge Cherian 	err = cptvf_send_vf_to_grp_msg(cptvf);
749c694b233SGeorge Cherian 	if (err) {
750c694b233SGeorge Cherian 		dev_err(dev, "PF not responding to VF_GRP msg");
75115c0b9edSChristoph Hellwig 		goto cptvf_free_misc_irq;
752c694b233SGeorge Cherian 	}
753c694b233SGeorge Cherian 
754c694b233SGeorge Cherian 	cptvf->priority = 1;
755c694b233SGeorge Cherian 	err = cptvf_send_vf_priority_msg(cptvf);
756c694b233SGeorge Cherian 	if (err) {
757c694b233SGeorge Cherian 		dev_err(dev, "PF not responding to VF_PRIO msg");
75815c0b9edSChristoph Hellwig 		goto cptvf_free_misc_irq;
759c694b233SGeorge Cherian 	}
76015c0b9edSChristoph Hellwig 
76115c0b9edSChristoph Hellwig 	err = request_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_DONE),
76215c0b9edSChristoph Hellwig 			  cptvf_done_intr_handler, 0, "CPT VF done intr",
76315c0b9edSChristoph Hellwig 			  cptvf);
76415c0b9edSChristoph Hellwig 	if (err) {
76515c0b9edSChristoph Hellwig 		dev_err(dev, "Request done irq failed\n");
76615c0b9edSChristoph Hellwig 		goto cptvf_free_misc_irq;
76715c0b9edSChristoph Hellwig 	}
76815c0b9edSChristoph Hellwig 
76915c0b9edSChristoph Hellwig 	/* Enable mailbox interrupt */
77015c0b9edSChristoph Hellwig 	cptvf_enable_done_interrupts(cptvf);
771c694b233SGeorge Cherian 
772c694b233SGeorge Cherian 	/* Set irq affinity masks */
77315c0b9edSChristoph Hellwig 	cptvf_set_irq_affinity(cptvf, CPT_VF_INT_VEC_E_MISC);
77415c0b9edSChristoph Hellwig 	cptvf_set_irq_affinity(cptvf, CPT_VF_INT_VEC_E_DONE);
77515c0b9edSChristoph Hellwig 
776c694b233SGeorge Cherian 	err = cptvf_send_vf_up(cptvf);
777c694b233SGeorge Cherian 	if (err) {
778c694b233SGeorge Cherian 		dev_err(dev, "PF not responding to UP msg");
77915c0b9edSChristoph Hellwig 		goto cptvf_free_irq_affinity;
780c694b233SGeorge Cherian 	}
781c694b233SGeorge Cherian 	err = cvm_crypto_init(cptvf);
782c694b233SGeorge Cherian 	if (err) {
783c694b233SGeorge Cherian 		dev_err(dev, "Algorithm register failed\n");
78415c0b9edSChristoph Hellwig 		goto cptvf_free_irq_affinity;
785c694b233SGeorge Cherian 	}
786c694b233SGeorge Cherian 	return 0;
787c694b233SGeorge Cherian 
78815c0b9edSChristoph Hellwig cptvf_free_irq_affinity:
78915c0b9edSChristoph Hellwig 	cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_DONE);
79015c0b9edSChristoph Hellwig 	cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_MISC);
79115c0b9edSChristoph Hellwig cptvf_free_misc_irq:
79215c0b9edSChristoph Hellwig 	free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_MISC), cptvf);
79315c0b9edSChristoph Hellwig cptvf_free_vectors:
79415c0b9edSChristoph Hellwig 	pci_free_irq_vectors(cptvf->pdev);
795c694b233SGeorge Cherian cptvf_err_release_regions:
796c694b233SGeorge Cherian 	pci_release_regions(pdev);
797c694b233SGeorge Cherian cptvf_err_disable_device:
798c694b233SGeorge Cherian 	pci_disable_device(pdev);
799c694b233SGeorge Cherian 	pci_set_drvdata(pdev, NULL);
800c694b233SGeorge Cherian 
801c694b233SGeorge Cherian 	return err;
802c694b233SGeorge Cherian }
803c694b233SGeorge Cherian 
cptvf_remove(struct pci_dev * pdev)804c694b233SGeorge Cherian static void cptvf_remove(struct pci_dev *pdev)
805c694b233SGeorge Cherian {
806c694b233SGeorge Cherian 	struct cpt_vf *cptvf = pci_get_drvdata(pdev);
807c694b233SGeorge Cherian 
8089bd82904SGeorge Cherian 	if (!cptvf) {
809c694b233SGeorge Cherian 		dev_err(&pdev->dev, "Invalid CPT-VF device\n");
8109bd82904SGeorge Cherian 		return;
8119bd82904SGeorge Cherian 	}
812c694b233SGeorge Cherian 
813c694b233SGeorge Cherian 	/* Convey DOWN to PF */
814c694b233SGeorge Cherian 	if (cptvf_send_vf_down(cptvf)) {
815c694b233SGeorge Cherian 		dev_err(&pdev->dev, "PF not responding to DOWN msg");
816c694b233SGeorge Cherian 	} else {
81715c0b9edSChristoph Hellwig 		cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_DONE);
81815c0b9edSChristoph Hellwig 		cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_MISC);
81915c0b9edSChristoph Hellwig 		free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_DONE), cptvf);
82015c0b9edSChristoph Hellwig 		free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_MISC), cptvf);
82115c0b9edSChristoph Hellwig 		pci_free_irq_vectors(cptvf->pdev);
822c694b233SGeorge Cherian 		cptvf_sw_cleanup(cptvf);
823c694b233SGeorge Cherian 		pci_set_drvdata(pdev, NULL);
824c694b233SGeorge Cherian 		pci_release_regions(pdev);
825c694b233SGeorge Cherian 		pci_disable_device(pdev);
826c694b233SGeorge Cherian 		cvm_crypto_exit();
827c694b233SGeorge Cherian 	}
828c694b233SGeorge Cherian }
829c694b233SGeorge Cherian 
cptvf_shutdown(struct pci_dev * pdev)830c694b233SGeorge Cherian static void cptvf_shutdown(struct pci_dev *pdev)
831c694b233SGeorge Cherian {
832c694b233SGeorge Cherian 	cptvf_remove(pdev);
833c694b233SGeorge Cherian }
834c694b233SGeorge Cherian 
835c694b233SGeorge Cherian /* Supported devices */
836c694b233SGeorge Cherian static const struct pci_device_id cptvf_id_table[] = {
837c694b233SGeorge Cherian 	{PCI_VDEVICE(CAVIUM, CPT_81XX_PCI_VF_DEVICE_ID), 0},
838c694b233SGeorge Cherian 	{ 0, }  /* end of table */
839c694b233SGeorge Cherian };
840c694b233SGeorge Cherian 
841c694b233SGeorge Cherian static struct pci_driver cptvf_pci_driver = {
842c694b233SGeorge Cherian 	.name = DRV_NAME,
843c694b233SGeorge Cherian 	.id_table = cptvf_id_table,
844c694b233SGeorge Cherian 	.probe = cptvf_probe,
845c694b233SGeorge Cherian 	.remove = cptvf_remove,
846c694b233SGeorge Cherian 	.shutdown = cptvf_shutdown,
847c694b233SGeorge Cherian };
848c694b233SGeorge Cherian 
849c694b233SGeorge Cherian module_pci_driver(cptvf_pci_driver);
850c694b233SGeorge Cherian 
851c694b233SGeorge Cherian MODULE_AUTHOR("George Cherian <george.cherian@cavium.com>");
852c694b233SGeorge Cherian MODULE_DESCRIPTION("Cavium Thunder CPT Virtual Function Driver");
853c694b233SGeorge Cherian MODULE_LICENSE("GPL v2");
854c694b233SGeorge Cherian MODULE_VERSION(DRV_VERSION);
855c694b233SGeorge Cherian MODULE_DEVICE_TABLE(pci, cptvf_id_table);
856