xref: /linux/drivers/crypto/cavium/nitrox/nitrox_mbx.c (revision 26fbb4c8c7c3ee9a4c3b4de555a8587b5a19154e)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/workqueue.h>
3 
4 #include "nitrox_csr.h"
5 #include "nitrox_hal.h"
6 #include "nitrox_dev.h"
7 #include "nitrox_mbx.h"
8 
9 #define RING_TO_VFNO(_x, _y)	((_x) / (_y))
10 
11 /**
12  * mbx_msg_type - Mailbox message types
13  */
14 enum mbx_msg_type {
15 	MBX_MSG_TYPE_NOP,
16 	MBX_MSG_TYPE_REQ,
17 	MBX_MSG_TYPE_ACK,
18 	MBX_MSG_TYPE_NACK,
19 };
20 
21 /**
22  * mbx_msg_opcode - Mailbox message opcodes
23  */
24 enum mbx_msg_opcode {
25 	MSG_OP_VF_MODE = 1,
26 	MSG_OP_VF_UP,
27 	MSG_OP_VF_DOWN,
28 	MSG_OP_CHIPID_VFID,
29 	MSG_OP_MCODE_INFO = 11,
30 };
31 
32 struct pf2vf_work {
33 	struct nitrox_vfdev *vfdev;
34 	struct nitrox_device *ndev;
35 	struct work_struct pf2vf_resp;
36 };
37 
38 static inline u64 pf2vf_read_mbox(struct nitrox_device *ndev, int ring)
39 {
40 	u64 reg_addr;
41 
42 	reg_addr = NPS_PKT_MBOX_VF_PF_PFDATAX(ring);
43 	return nitrox_read_csr(ndev, reg_addr);
44 }
45 
46 static inline void pf2vf_write_mbox(struct nitrox_device *ndev, u64 value,
47 				    int ring)
48 {
49 	u64 reg_addr;
50 
51 	reg_addr = NPS_PKT_MBOX_PF_VF_PFDATAX(ring);
52 	nitrox_write_csr(ndev, reg_addr, value);
53 }
54 
55 static void pf2vf_send_response(struct nitrox_device *ndev,
56 				struct nitrox_vfdev *vfdev)
57 {
58 	union mbox_msg msg;
59 
60 	msg.value = vfdev->msg.value;
61 
62 	switch (vfdev->msg.opcode) {
63 	case MSG_OP_VF_MODE:
64 		msg.data = ndev->mode;
65 		break;
66 	case MSG_OP_VF_UP:
67 		vfdev->nr_queues = vfdev->msg.data;
68 		atomic_set(&vfdev->state, __NDEV_READY);
69 		break;
70 	case MSG_OP_CHIPID_VFID:
71 		msg.id.chipid = ndev->idx;
72 		msg.id.vfid = vfdev->vfno;
73 		break;
74 	case MSG_OP_VF_DOWN:
75 		vfdev->nr_queues = 0;
76 		atomic_set(&vfdev->state, __NDEV_NOT_READY);
77 		break;
78 	case MSG_OP_MCODE_INFO:
79 		msg.data = 0;
80 		msg.mcode_info.count = 2;
81 		msg.mcode_info.info = MCODE_TYPE_SE_SSL | (MCODE_TYPE_AE << 5);
82 		msg.mcode_info.next_se_grp = 1;
83 		msg.mcode_info.next_ae_grp = 1;
84 		break;
85 	default:
86 		msg.type = MBX_MSG_TYPE_NOP;
87 		break;
88 	}
89 
90 	if (msg.type == MBX_MSG_TYPE_NOP)
91 		return;
92 
93 	/* send ACK to VF */
94 	msg.type = MBX_MSG_TYPE_ACK;
95 	pf2vf_write_mbox(ndev, msg.value, vfdev->ring);
96 
97 	vfdev->msg.value = 0;
98 	atomic64_inc(&vfdev->mbx_resp);
99 }
100 
101 static void pf2vf_resp_handler(struct work_struct *work)
102 {
103 	struct pf2vf_work *pf2vf_resp = container_of(work, struct pf2vf_work,
104 						     pf2vf_resp);
105 	struct nitrox_vfdev *vfdev = pf2vf_resp->vfdev;
106 	struct nitrox_device *ndev = pf2vf_resp->ndev;
107 
108 	switch (vfdev->msg.type) {
109 	case MBX_MSG_TYPE_REQ:
110 		/* process the request from VF */
111 		pf2vf_send_response(ndev, vfdev);
112 		break;
113 	case MBX_MSG_TYPE_ACK:
114 	case MBX_MSG_TYPE_NACK:
115 		break;
116 	}
117 
118 	kfree(pf2vf_resp);
119 }
120 
121 void nitrox_pf2vf_mbox_handler(struct nitrox_device *ndev)
122 {
123 	struct nitrox_vfdev *vfdev;
124 	struct pf2vf_work *pfwork;
125 	u64 value, reg_addr;
126 	u32 i;
127 	int vfno;
128 
129 	/* loop for VF(0..63) */
130 	reg_addr = NPS_PKT_MBOX_INT_LO;
131 	value = nitrox_read_csr(ndev, reg_addr);
132 	for_each_set_bit(i, (const unsigned long *)&value, BITS_PER_LONG) {
133 		/* get the vfno from ring */
134 		vfno = RING_TO_VFNO(i, ndev->iov.max_vf_queues);
135 		vfdev = ndev->iov.vfdev + vfno;
136 		vfdev->ring = i;
137 		/* fill the vf mailbox data */
138 		vfdev->msg.value = pf2vf_read_mbox(ndev, vfdev->ring);
139 		pfwork = kzalloc(sizeof(*pfwork), GFP_ATOMIC);
140 		if (!pfwork)
141 			continue;
142 
143 		pfwork->vfdev = vfdev;
144 		pfwork->ndev = ndev;
145 		INIT_WORK(&pfwork->pf2vf_resp, pf2vf_resp_handler);
146 		queue_work(ndev->iov.pf2vf_wq, &pfwork->pf2vf_resp);
147 		/* clear the corresponding vf bit */
148 		nitrox_write_csr(ndev, reg_addr, BIT_ULL(i));
149 	}
150 
151 	/* loop for VF(64..127) */
152 	reg_addr = NPS_PKT_MBOX_INT_HI;
153 	value = nitrox_read_csr(ndev, reg_addr);
154 	for_each_set_bit(i, (const unsigned long *)&value, BITS_PER_LONG) {
155 		/* get the vfno from ring */
156 		vfno = RING_TO_VFNO(i + 64, ndev->iov.max_vf_queues);
157 		vfdev = ndev->iov.vfdev + vfno;
158 		vfdev->ring = (i + 64);
159 		/* fill the vf mailbox data */
160 		vfdev->msg.value = pf2vf_read_mbox(ndev, vfdev->ring);
161 
162 		pfwork = kzalloc(sizeof(*pfwork), GFP_ATOMIC);
163 		if (!pfwork)
164 			continue;
165 
166 		pfwork->vfdev = vfdev;
167 		pfwork->ndev = ndev;
168 		INIT_WORK(&pfwork->pf2vf_resp, pf2vf_resp_handler);
169 		queue_work(ndev->iov.pf2vf_wq, &pfwork->pf2vf_resp);
170 		/* clear the corresponding vf bit */
171 		nitrox_write_csr(ndev, reg_addr, BIT_ULL(i));
172 	}
173 }
174 
175 int nitrox_mbox_init(struct nitrox_device *ndev)
176 {
177 	struct nitrox_vfdev *vfdev;
178 	int i;
179 
180 	ndev->iov.vfdev = kcalloc(ndev->iov.num_vfs,
181 				  sizeof(struct nitrox_vfdev), GFP_KERNEL);
182 	if (!ndev->iov.vfdev)
183 		return -ENOMEM;
184 
185 	for (i = 0; i < ndev->iov.num_vfs; i++) {
186 		vfdev = ndev->iov.vfdev + i;
187 		vfdev->vfno = i;
188 	}
189 
190 	/* allocate pf2vf response workqueue */
191 	ndev->iov.pf2vf_wq = alloc_workqueue("nitrox_pf2vf", 0, 0);
192 	if (!ndev->iov.pf2vf_wq) {
193 		kfree(ndev->iov.vfdev);
194 		return -ENOMEM;
195 	}
196 	/* enable pf2vf mailbox interrupts */
197 	enable_pf2vf_mbox_interrupts(ndev);
198 
199 	return 0;
200 }
201 
202 void nitrox_mbox_cleanup(struct nitrox_device *ndev)
203 {
204 	/* disable pf2vf mailbox interrupts */
205 	disable_pf2vf_mbox_interrupts(ndev);
206 	/* destroy workqueue */
207 	if (ndev->iov.pf2vf_wq)
208 		destroy_workqueue(ndev->iov.pf2vf_wq);
209 
210 	kfree(ndev->iov.vfdev);
211 	ndev->iov.pf2vf_wq = NULL;
212 	ndev->iov.vfdev = NULL;
213 }
214