1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2021, HiSilicon Ltd.
4 */
5
6 #include <linux/device.h>
7 #include <linux/eventfd.h>
8 #include <linux/file.h>
9 #include <linux/hisi_acc_qm.h>
10 #include <linux/interrupt.h>
11 #include <linux/module.h>
12 #include <linux/pci.h>
13 #include <linux/vfio.h>
14 #include <linux/vfio_pci_core.h>
15 #include <linux/anon_inodes.h>
16
17 #include "hisi_acc_vfio_pci.h"
18
19 /* Return 0 on VM acc device ready, -ETIMEDOUT hardware timeout */
qm_wait_dev_not_ready(struct hisi_qm * qm)20 static int qm_wait_dev_not_ready(struct hisi_qm *qm)
21 {
22 u32 val;
23
24 return readl_relaxed_poll_timeout(qm->io_base + QM_VF_STATE,
25 val, !(val & 0x1), MB_POLL_PERIOD_US,
26 MB_POLL_TIMEOUT_US);
27 }
28
29 /*
30 * Each state Reg is checked 100 times,
31 * with a delay of 100 microseconds after each check
32 */
qm_check_reg_state(struct hisi_qm * qm,u32 regs)33 static u32 qm_check_reg_state(struct hisi_qm *qm, u32 regs)
34 {
35 int check_times = 0;
36 u32 state;
37
38 state = readl(qm->io_base + regs);
39 while (state && check_times < ERROR_CHECK_TIMEOUT) {
40 udelay(CHECK_DELAY_TIME);
41 state = readl(qm->io_base + regs);
42 check_times++;
43 }
44
45 return state;
46 }
47
qm_read_regs(struct hisi_qm * qm,u32 reg_addr,u32 * data,u8 nums)48 static int qm_read_regs(struct hisi_qm *qm, u32 reg_addr,
49 u32 *data, u8 nums)
50 {
51 int i;
52
53 if (nums < 1 || nums > QM_REGS_MAX_LEN)
54 return -EINVAL;
55
56 for (i = 0; i < nums; i++) {
57 data[i] = readl(qm->io_base + reg_addr);
58 reg_addr += QM_REG_ADDR_OFFSET;
59 }
60
61 return 0;
62 }
63
qm_write_regs(struct hisi_qm * qm,u32 reg,u32 * data,u8 nums)64 static int qm_write_regs(struct hisi_qm *qm, u32 reg,
65 u32 *data, u8 nums)
66 {
67 int i;
68
69 if (nums < 1 || nums > QM_REGS_MAX_LEN)
70 return -EINVAL;
71
72 for (i = 0; i < nums; i++)
73 writel(data[i], qm->io_base + reg + i * QM_REG_ADDR_OFFSET);
74
75 return 0;
76 }
77
qm_get_vft(struct hisi_qm * qm,u32 * base)78 static int qm_get_vft(struct hisi_qm *qm, u32 *base)
79 {
80 u64 sqc_vft;
81 u32 qp_num;
82 int ret;
83
84 ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_VFT_V2, 0, 0, 1);
85 if (ret)
86 return ret;
87
88 sqc_vft = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) |
89 ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) <<
90 QM_XQC_ADDR_OFFSET);
91 *base = QM_SQC_VFT_BASE_MASK_V2 & (sqc_vft >> QM_SQC_VFT_BASE_SHIFT_V2);
92 qp_num = (QM_SQC_VFT_NUM_MASK_V2 &
93 (sqc_vft >> QM_SQC_VFT_NUM_SHIFT_V2)) + 1;
94
95 return qp_num;
96 }
97
qm_get_sqc(struct hisi_qm * qm,u64 * addr)98 static int qm_get_sqc(struct hisi_qm *qm, u64 *addr)
99 {
100 int ret;
101
102 ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_BT, 0, 0, 1);
103 if (ret)
104 return ret;
105
106 *addr = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) |
107 ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) <<
108 QM_XQC_ADDR_OFFSET);
109
110 return 0;
111 }
112
qm_get_cqc(struct hisi_qm * qm,u64 * addr)113 static int qm_get_cqc(struct hisi_qm *qm, u64 *addr)
114 {
115 int ret;
116
117 ret = hisi_qm_mb(qm, QM_MB_CMD_CQC_BT, 0, 0, 1);
118 if (ret)
119 return ret;
120
121 *addr = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) |
122 ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) <<
123 QM_XQC_ADDR_OFFSET);
124
125 return 0;
126 }
127
qm_get_regs(struct hisi_qm * qm,struct acc_vf_data * vf_data)128 static int qm_get_regs(struct hisi_qm *qm, struct acc_vf_data *vf_data)
129 {
130 struct device *dev = &qm->pdev->dev;
131 int ret;
132
133 ret = qm_read_regs(qm, QM_VF_AEQ_INT_MASK, &vf_data->aeq_int_mask, 1);
134 if (ret) {
135 dev_err(dev, "failed to read QM_VF_AEQ_INT_MASK\n");
136 return ret;
137 }
138
139 ret = qm_read_regs(qm, QM_VF_EQ_INT_MASK, &vf_data->eq_int_mask, 1);
140 if (ret) {
141 dev_err(dev, "failed to read QM_VF_EQ_INT_MASK\n");
142 return ret;
143 }
144
145 ret = qm_read_regs(qm, QM_IFC_INT_SOURCE_V,
146 &vf_data->ifc_int_source, 1);
147 if (ret) {
148 dev_err(dev, "failed to read QM_IFC_INT_SOURCE_V\n");
149 return ret;
150 }
151
152 ret = qm_read_regs(qm, QM_IFC_INT_MASK, &vf_data->ifc_int_mask, 1);
153 if (ret) {
154 dev_err(dev, "failed to read QM_IFC_INT_MASK\n");
155 return ret;
156 }
157
158 ret = qm_read_regs(qm, QM_IFC_INT_SET_V, &vf_data->ifc_int_set, 1);
159 if (ret) {
160 dev_err(dev, "failed to read QM_IFC_INT_SET_V\n");
161 return ret;
162 }
163
164 ret = qm_read_regs(qm, QM_PAGE_SIZE, &vf_data->page_size, 1);
165 if (ret) {
166 dev_err(dev, "failed to read QM_PAGE_SIZE\n");
167 return ret;
168 }
169
170 /* QM_EQC_DW has 7 regs */
171 ret = qm_read_regs(qm, QM_EQC_DW0, vf_data->qm_eqc_dw, 7);
172 if (ret) {
173 dev_err(dev, "failed to read QM_EQC_DW\n");
174 return ret;
175 }
176
177 /* QM_AEQC_DW has 7 regs */
178 ret = qm_read_regs(qm, QM_AEQC_DW0, vf_data->qm_aeqc_dw, 7);
179 if (ret) {
180 dev_err(dev, "failed to read QM_AEQC_DW\n");
181 return ret;
182 }
183
184 return 0;
185 }
186
qm_set_regs(struct hisi_qm * qm,struct acc_vf_data * vf_data)187 static int qm_set_regs(struct hisi_qm *qm, struct acc_vf_data *vf_data)
188 {
189 struct device *dev = &qm->pdev->dev;
190 int ret;
191
192 /* Check VF state */
193 ret = hisi_qm_wait_mb_ready(qm);
194 if (unlikely(ret)) {
195 dev_err(&qm->pdev->dev, "QM device is not ready to write\n");
196 return ret;
197 }
198
199 ret = qm_write_regs(qm, QM_VF_AEQ_INT_MASK, &vf_data->aeq_int_mask, 1);
200 if (ret) {
201 dev_err(dev, "failed to write QM_VF_AEQ_INT_MASK\n");
202 return ret;
203 }
204
205 ret = qm_write_regs(qm, QM_VF_EQ_INT_MASK, &vf_data->eq_int_mask, 1);
206 if (ret) {
207 dev_err(dev, "failed to write QM_VF_EQ_INT_MASK\n");
208 return ret;
209 }
210
211 ret = qm_write_regs(qm, QM_IFC_INT_SOURCE_V,
212 &vf_data->ifc_int_source, 1);
213 if (ret) {
214 dev_err(dev, "failed to write QM_IFC_INT_SOURCE_V\n");
215 return ret;
216 }
217
218 ret = qm_write_regs(qm, QM_IFC_INT_MASK, &vf_data->ifc_int_mask, 1);
219 if (ret) {
220 dev_err(dev, "failed to write QM_IFC_INT_MASK\n");
221 return ret;
222 }
223
224 ret = qm_write_regs(qm, QM_IFC_INT_SET_V, &vf_data->ifc_int_set, 1);
225 if (ret) {
226 dev_err(dev, "failed to write QM_IFC_INT_SET_V\n");
227 return ret;
228 }
229
230 ret = qm_write_regs(qm, QM_QUE_ISO_CFG_V, &vf_data->que_iso_cfg, 1);
231 if (ret) {
232 dev_err(dev, "failed to write QM_QUE_ISO_CFG_V\n");
233 return ret;
234 }
235
236 ret = qm_write_regs(qm, QM_PAGE_SIZE, &vf_data->page_size, 1);
237 if (ret) {
238 dev_err(dev, "failed to write QM_PAGE_SIZE\n");
239 return ret;
240 }
241
242 /* QM_EQC_DW has 7 regs */
243 ret = qm_write_regs(qm, QM_EQC_DW0, vf_data->qm_eqc_dw, 7);
244 if (ret) {
245 dev_err(dev, "failed to write QM_EQC_DW\n");
246 return ret;
247 }
248
249 /* QM_AEQC_DW has 7 regs */
250 ret = qm_write_regs(qm, QM_AEQC_DW0, vf_data->qm_aeqc_dw, 7);
251 if (ret) {
252 dev_err(dev, "failed to write QM_AEQC_DW\n");
253 return ret;
254 }
255
256 return 0;
257 }
258
qm_db(struct hisi_qm * qm,u16 qn,u8 cmd,u16 index,u8 priority)259 static void qm_db(struct hisi_qm *qm, u16 qn, u8 cmd,
260 u16 index, u8 priority)
261 {
262 u64 doorbell;
263 u64 dbase;
264 u16 randata = 0;
265
266 if (cmd == QM_DOORBELL_CMD_SQ || cmd == QM_DOORBELL_CMD_CQ)
267 dbase = QM_DOORBELL_SQ_CQ_BASE_V2;
268 else
269 dbase = QM_DOORBELL_EQ_AEQ_BASE_V2;
270
271 doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V2) |
272 ((u64)randata << QM_DB_RAND_SHIFT_V2) |
273 ((u64)index << QM_DB_INDEX_SHIFT_V2) |
274 ((u64)priority << QM_DB_PRIORITY_SHIFT_V2);
275
276 writeq(doorbell, qm->io_base + dbase);
277 }
278
pf_qm_get_qp_num(struct hisi_qm * qm,int vf_id,u32 * rbase)279 static int pf_qm_get_qp_num(struct hisi_qm *qm, int vf_id, u32 *rbase)
280 {
281 unsigned int val;
282 u64 sqc_vft;
283 u32 qp_num;
284 int ret;
285
286 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
287 val & BIT(0), MB_POLL_PERIOD_US,
288 MB_POLL_TIMEOUT_US);
289 if (ret)
290 return ret;
291
292 writel(0x1, qm->io_base + QM_VFT_CFG_OP_WR);
293 /* 0 mean SQC VFT */
294 writel(0x0, qm->io_base + QM_VFT_CFG_TYPE);
295 writel(vf_id, qm->io_base + QM_VFT_CFG);
296
297 writel(0x0, qm->io_base + QM_VFT_CFG_RDY);
298 writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE);
299
300 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
301 val & BIT(0), MB_POLL_PERIOD_US,
302 MB_POLL_TIMEOUT_US);
303 if (ret)
304 return ret;
305
306 sqc_vft = readl(qm->io_base + QM_VFT_CFG_DATA_L) |
307 ((u64)readl(qm->io_base + QM_VFT_CFG_DATA_H) <<
308 QM_XQC_ADDR_OFFSET);
309 *rbase = QM_SQC_VFT_BASE_MASK_V2 &
310 (sqc_vft >> QM_SQC_VFT_BASE_SHIFT_V2);
311 qp_num = (QM_SQC_VFT_NUM_MASK_V2 &
312 (sqc_vft >> QM_SQC_VFT_NUM_SHIFT_V2)) + 1;
313
314 return qp_num;
315 }
316
qm_dev_cmd_init(struct hisi_qm * qm)317 static void qm_dev_cmd_init(struct hisi_qm *qm)
318 {
319 /* Clear VF communication status registers. */
320 writel(0x1, qm->io_base + QM_IFC_INT_SOURCE_V);
321
322 /* Enable pf and vf communication. */
323 writel(0x0, qm->io_base + QM_IFC_INT_MASK);
324 }
325
vf_qm_cache_wb(struct hisi_qm * qm)326 static int vf_qm_cache_wb(struct hisi_qm *qm)
327 {
328 unsigned int val;
329 int ret;
330
331 writel(0x1, qm->io_base + QM_CACHE_WB_START);
332 ret = readl_relaxed_poll_timeout(qm->io_base + QM_CACHE_WB_DONE,
333 val, val & BIT(0), MB_POLL_PERIOD_US,
334 MB_POLL_TIMEOUT_US);
335 if (ret) {
336 dev_err(&qm->pdev->dev, "vf QM writeback sqc cache fail\n");
337 return ret;
338 }
339
340 return 0;
341 }
342
vf_qm_fun_reset(struct hisi_qm * qm)343 static void vf_qm_fun_reset(struct hisi_qm *qm)
344 {
345 int i;
346
347 for (i = 0; i < qm->qp_num; i++)
348 qm_db(qm, i, QM_DOORBELL_CMD_SQ, 0, 1);
349 }
350
vf_qm_func_stop(struct hisi_qm * qm)351 static int vf_qm_func_stop(struct hisi_qm *qm)
352 {
353 return hisi_qm_mb(qm, QM_MB_CMD_PAUSE_QM, 0, 0, 0);
354 }
355
vf_qm_version_check(struct acc_vf_data * vf_data,struct device * dev)356 static int vf_qm_version_check(struct acc_vf_data *vf_data, struct device *dev)
357 {
358 switch (vf_data->acc_magic) {
359 case ACC_DEV_MAGIC_V2:
360 if (vf_data->major_ver != ACC_DRV_MAJOR_VER) {
361 dev_info(dev, "migration driver version<%u.%u> not match!\n",
362 vf_data->major_ver, vf_data->minor_ver);
363 return -EINVAL;
364 }
365 break;
366 case ACC_DEV_MAGIC_V1:
367 /* Correct dma address */
368 vf_data->eqe_dma = vf_data->qm_eqc_dw[QM_XQC_ADDR_HIGH];
369 vf_data->eqe_dma <<= QM_XQC_ADDR_OFFSET;
370 vf_data->eqe_dma |= vf_data->qm_eqc_dw[QM_XQC_ADDR_LOW];
371 vf_data->aeqe_dma = vf_data->qm_aeqc_dw[QM_XQC_ADDR_HIGH];
372 vf_data->aeqe_dma <<= QM_XQC_ADDR_OFFSET;
373 vf_data->aeqe_dma |= vf_data->qm_aeqc_dw[QM_XQC_ADDR_LOW];
374 break;
375 default:
376 return -EINVAL;
377 }
378
379 return 0;
380 }
381
vf_qm_check_match(struct hisi_acc_vf_core_device * hisi_acc_vdev,struct hisi_acc_vf_migration_file * migf)382 static int vf_qm_check_match(struct hisi_acc_vf_core_device *hisi_acc_vdev,
383 struct hisi_acc_vf_migration_file *migf)
384 {
385 struct acc_vf_data *vf_data = &migf->vf_data;
386 struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm;
387 struct hisi_qm *pf_qm = hisi_acc_vdev->pf_qm;
388 struct device *dev = &vf_qm->pdev->dev;
389 u32 que_iso_state;
390 int ret;
391
392 if (migf->total_length < QM_MATCH_SIZE || hisi_acc_vdev->match_done)
393 return 0;
394
395 ret = vf_qm_version_check(vf_data, dev);
396 if (ret) {
397 dev_err(dev, "failed to match ACC_DEV_MAGIC\n");
398 return ret;
399 }
400
401 if (vf_data->dev_id != hisi_acc_vdev->vf_dev->device) {
402 dev_err(dev, "failed to match VF devices\n");
403 return -EINVAL;
404 }
405
406 /* VF qp num check */
407 ret = qm_get_vft(vf_qm, &vf_qm->qp_base);
408 if (ret <= 0) {
409 dev_err(dev, "failed to get vft qp nums\n");
410 return ret;
411 }
412
413 if (ret != vf_data->qp_num) {
414 dev_err(dev, "failed to match VF qp num\n");
415 return -EINVAL;
416 }
417
418 vf_qm->qp_num = ret;
419
420 /* VF isolation state check */
421 ret = qm_read_regs(pf_qm, QM_QUE_ISO_CFG_V, &que_iso_state, 1);
422 if (ret) {
423 dev_err(dev, "failed to read QM_QUE_ISO_CFG_V\n");
424 return ret;
425 }
426
427 if (vf_data->que_iso_cfg != que_iso_state) {
428 dev_err(dev, "failed to match isolation state\n");
429 return -EINVAL;
430 }
431
432 hisi_acc_vdev->match_done = true;
433 return 0;
434 }
435
vf_qm_get_match_data(struct hisi_acc_vf_core_device * hisi_acc_vdev,struct acc_vf_data * vf_data)436 static int vf_qm_get_match_data(struct hisi_acc_vf_core_device *hisi_acc_vdev,
437 struct acc_vf_data *vf_data)
438 {
439 struct hisi_qm *pf_qm = hisi_acc_vdev->pf_qm;
440 struct device *dev = &pf_qm->pdev->dev;
441 int vf_id = hisi_acc_vdev->vf_id;
442 int ret;
443
444 vf_data->acc_magic = ACC_DEV_MAGIC_V2;
445 vf_data->major_ver = ACC_DRV_MAJOR_VER;
446 vf_data->minor_ver = ACC_DRV_MINOR_VER;
447 /* Save device id */
448 vf_data->dev_id = hisi_acc_vdev->vf_dev->device;
449
450 /* VF qp num save from PF */
451 ret = pf_qm_get_qp_num(pf_qm, vf_id, &vf_data->qp_base);
452 if (ret <= 0) {
453 dev_err(dev, "failed to get vft qp nums!\n");
454 return -EINVAL;
455 }
456
457 vf_data->qp_num = ret;
458
459 /* VF isolation state save from PF */
460 ret = qm_read_regs(pf_qm, QM_QUE_ISO_CFG_V, &vf_data->que_iso_cfg, 1);
461 if (ret) {
462 dev_err(dev, "failed to read QM_QUE_ISO_CFG_V!\n");
463 return ret;
464 }
465
466 return 0;
467 }
468
vf_qm_xeqc_save(struct hisi_qm * qm,struct hisi_acc_vf_migration_file * migf)469 static void vf_qm_xeqc_save(struct hisi_qm *qm,
470 struct hisi_acc_vf_migration_file *migf)
471 {
472 struct acc_vf_data *vf_data = &migf->vf_data;
473 u16 eq_head, aeq_head;
474
475 eq_head = vf_data->qm_eqc_dw[0] & 0xFFFF;
476 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, eq_head, 0);
477
478 aeq_head = vf_data->qm_aeqc_dw[0] & 0xFFFF;
479 qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, aeq_head, 0);
480 }
481
vf_qm_load_data(struct hisi_acc_vf_core_device * hisi_acc_vdev,struct hisi_acc_vf_migration_file * migf)482 static int vf_qm_load_data(struct hisi_acc_vf_core_device *hisi_acc_vdev,
483 struct hisi_acc_vf_migration_file *migf)
484 {
485 struct hisi_qm *qm = &hisi_acc_vdev->vf_qm;
486 struct device *dev = &qm->pdev->dev;
487 struct acc_vf_data *vf_data = &migf->vf_data;
488 int ret;
489
490 /* Return if only match data was transferred */
491 if (migf->total_length == QM_MATCH_SIZE)
492 return 0;
493
494 if (migf->total_length < sizeof(struct acc_vf_data))
495 return -EINVAL;
496
497 if (!vf_data->eqe_dma || !vf_data->aeqe_dma ||
498 !vf_data->sqc_dma || !vf_data->cqc_dma) {
499 dev_info(dev, "resume dma addr is NULL!\n");
500 hisi_acc_vdev->vf_qm_state = QM_NOT_READY;
501 return 0;
502 }
503
504 ret = qm_write_regs(qm, QM_VF_STATE, &vf_data->vf_qm_state, 1);
505 if (ret) {
506 dev_err(dev, "failed to write QM_VF_STATE\n");
507 return ret;
508 }
509 hisi_acc_vdev->vf_qm_state = vf_data->vf_qm_state;
510
511 qm->eqe_dma = vf_data->eqe_dma;
512 qm->aeqe_dma = vf_data->aeqe_dma;
513 qm->sqc_dma = vf_data->sqc_dma;
514 qm->cqc_dma = vf_data->cqc_dma;
515
516 qm->qp_base = vf_data->qp_base;
517 qm->qp_num = vf_data->qp_num;
518
519 ret = qm_set_regs(qm, vf_data);
520 if (ret) {
521 dev_err(dev, "set VF regs failed\n");
522 return ret;
523 }
524
525 ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0);
526 if (ret) {
527 dev_err(dev, "set sqc failed\n");
528 return ret;
529 }
530
531 ret = hisi_qm_mb(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, 0);
532 if (ret) {
533 dev_err(dev, "set cqc failed\n");
534 return ret;
535 }
536
537 qm_dev_cmd_init(qm);
538 return 0;
539 }
540
vf_qm_read_data(struct hisi_qm * vf_qm,struct acc_vf_data * vf_data)541 static int vf_qm_read_data(struct hisi_qm *vf_qm, struct acc_vf_data *vf_data)
542 {
543 struct device *dev = &vf_qm->pdev->dev;
544 int ret;
545
546 ret = qm_get_regs(vf_qm, vf_data);
547 if (ret)
548 return ret;
549
550 /* Every reg is 32 bit, the dma address is 64 bit. */
551 vf_data->eqe_dma = vf_data->qm_eqc_dw[QM_XQC_ADDR_HIGH];
552 vf_data->eqe_dma <<= QM_XQC_ADDR_OFFSET;
553 vf_data->eqe_dma |= vf_data->qm_eqc_dw[QM_XQC_ADDR_LOW];
554 vf_data->aeqe_dma = vf_data->qm_aeqc_dw[QM_XQC_ADDR_HIGH];
555 vf_data->aeqe_dma <<= QM_XQC_ADDR_OFFSET;
556 vf_data->aeqe_dma |= vf_data->qm_aeqc_dw[QM_XQC_ADDR_LOW];
557
558 /* Through SQC_BT/CQC_BT to get sqc and cqc address */
559 ret = qm_get_sqc(vf_qm, &vf_data->sqc_dma);
560 if (ret) {
561 dev_err(dev, "failed to read SQC addr!\n");
562 return ret;
563 }
564
565 ret = qm_get_cqc(vf_qm, &vf_data->cqc_dma);
566 if (ret) {
567 dev_err(dev, "failed to read CQC addr!\n");
568 return ret;
569 }
570
571 return 0;
572 }
573
vf_qm_state_save(struct hisi_acc_vf_core_device * hisi_acc_vdev,struct hisi_acc_vf_migration_file * migf)574 static int vf_qm_state_save(struct hisi_acc_vf_core_device *hisi_acc_vdev,
575 struct hisi_acc_vf_migration_file *migf)
576 {
577 struct acc_vf_data *vf_data = &migf->vf_data;
578 struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm;
579 int ret;
580
581 if (unlikely(qm_wait_dev_not_ready(vf_qm))) {
582 /* Update state and return with match data */
583 vf_data->vf_qm_state = QM_NOT_READY;
584 hisi_acc_vdev->vf_qm_state = vf_data->vf_qm_state;
585 migf->total_length = QM_MATCH_SIZE;
586 return 0;
587 }
588
589 vf_data->vf_qm_state = QM_READY;
590 hisi_acc_vdev->vf_qm_state = vf_data->vf_qm_state;
591
592 ret = vf_qm_read_data(vf_qm, vf_data);
593 if (ret)
594 return ret;
595
596 migf->total_length = sizeof(struct acc_vf_data);
597 /* Save eqc and aeqc interrupt information */
598 vf_qm_xeqc_save(vf_qm, migf);
599
600 return 0;
601 }
602
hisi_acc_drvdata(struct pci_dev * pdev)603 static struct hisi_acc_vf_core_device *hisi_acc_drvdata(struct pci_dev *pdev)
604 {
605 struct vfio_pci_core_device *core_device = dev_get_drvdata(&pdev->dev);
606
607 return container_of(core_device, struct hisi_acc_vf_core_device,
608 core_device);
609 }
610
611 /* Check the PF's RAS state and Function INT state */
612 static int
hisi_acc_check_int_state(struct hisi_acc_vf_core_device * hisi_acc_vdev)613 hisi_acc_check_int_state(struct hisi_acc_vf_core_device *hisi_acc_vdev)
614 {
615 struct hisi_qm *vfqm = &hisi_acc_vdev->vf_qm;
616 struct hisi_qm *qm = hisi_acc_vdev->pf_qm;
617 struct pci_dev *vf_pdev = hisi_acc_vdev->vf_dev;
618 struct device *dev = &qm->pdev->dev;
619 u32 state;
620
621 /* Check RAS state */
622 state = qm_check_reg_state(qm, QM_ABNORMAL_INT_STATUS);
623 if (state) {
624 dev_err(dev, "failed to check QM RAS state!\n");
625 return -EBUSY;
626 }
627
628 /* Check Function Communication state between PF and VF */
629 state = qm_check_reg_state(vfqm, QM_IFC_INT_STATUS);
630 if (state) {
631 dev_err(dev, "failed to check QM IFC INT state!\n");
632 return -EBUSY;
633 }
634 state = qm_check_reg_state(vfqm, QM_IFC_INT_SET_V);
635 if (state) {
636 dev_err(dev, "failed to check QM IFC INT SET state!\n");
637 return -EBUSY;
638 }
639
640 /* Check submodule task state */
641 switch (vf_pdev->device) {
642 case PCI_DEVICE_ID_HUAWEI_SEC_VF:
643 state = qm_check_reg_state(qm, SEC_CORE_INT_STATUS);
644 if (state) {
645 dev_err(dev, "failed to check QM SEC Core INT state!\n");
646 return -EBUSY;
647 }
648 return 0;
649 case PCI_DEVICE_ID_HUAWEI_HPRE_VF:
650 state = qm_check_reg_state(qm, HPRE_HAC_INT_STATUS);
651 if (state) {
652 dev_err(dev, "failed to check QM HPRE HAC INT state!\n");
653 return -EBUSY;
654 }
655 return 0;
656 case PCI_DEVICE_ID_HUAWEI_ZIP_VF:
657 state = qm_check_reg_state(qm, HZIP_CORE_INT_STATUS);
658 if (state) {
659 dev_err(dev, "failed to check QM ZIP Core INT state!\n");
660 return -EBUSY;
661 }
662 return 0;
663 default:
664 dev_err(dev, "failed to detect acc module type!\n");
665 return -EINVAL;
666 }
667 }
668
hisi_acc_vf_disable_fd(struct hisi_acc_vf_migration_file * migf)669 static void hisi_acc_vf_disable_fd(struct hisi_acc_vf_migration_file *migf)
670 {
671 mutex_lock(&migf->lock);
672 migf->disabled = true;
673 migf->total_length = 0;
674 migf->filp->f_pos = 0;
675 mutex_unlock(&migf->lock);
676 }
677
678 static void
hisi_acc_debug_migf_copy(struct hisi_acc_vf_core_device * hisi_acc_vdev,struct hisi_acc_vf_migration_file * src_migf)679 hisi_acc_debug_migf_copy(struct hisi_acc_vf_core_device *hisi_acc_vdev,
680 struct hisi_acc_vf_migration_file *src_migf)
681 {
682 struct hisi_acc_vf_migration_file *dst_migf = hisi_acc_vdev->debug_migf;
683
684 if (!dst_migf)
685 return;
686
687 dst_migf->total_length = src_migf->total_length;
688 memcpy(&dst_migf->vf_data, &src_migf->vf_data,
689 sizeof(struct acc_vf_data));
690 }
691
hisi_acc_vf_disable_fds(struct hisi_acc_vf_core_device * hisi_acc_vdev)692 static void hisi_acc_vf_disable_fds(struct hisi_acc_vf_core_device *hisi_acc_vdev)
693 {
694 if (hisi_acc_vdev->resuming_migf) {
695 hisi_acc_debug_migf_copy(hisi_acc_vdev, hisi_acc_vdev->resuming_migf);
696 hisi_acc_vf_disable_fd(hisi_acc_vdev->resuming_migf);
697 fput(hisi_acc_vdev->resuming_migf->filp);
698 hisi_acc_vdev->resuming_migf = NULL;
699 }
700
701 if (hisi_acc_vdev->saving_migf) {
702 hisi_acc_debug_migf_copy(hisi_acc_vdev, hisi_acc_vdev->saving_migf);
703 hisi_acc_vf_disable_fd(hisi_acc_vdev->saving_migf);
704 fput(hisi_acc_vdev->saving_migf->filp);
705 hisi_acc_vdev->saving_migf = NULL;
706 }
707 }
708
hisi_acc_get_vf_dev(struct vfio_device * vdev)709 static struct hisi_acc_vf_core_device *hisi_acc_get_vf_dev(struct vfio_device *vdev)
710 {
711 return container_of(vdev, struct hisi_acc_vf_core_device,
712 core_device.vdev);
713 }
714
hisi_acc_vf_reset(struct hisi_acc_vf_core_device * hisi_acc_vdev)715 static void hisi_acc_vf_reset(struct hisi_acc_vf_core_device *hisi_acc_vdev)
716 {
717 hisi_acc_vdev->vf_qm_state = QM_NOT_READY;
718 hisi_acc_vdev->mig_state = VFIO_DEVICE_STATE_RUNNING;
719 hisi_acc_vf_disable_fds(hisi_acc_vdev);
720 }
721
hisi_acc_vf_start_device(struct hisi_acc_vf_core_device * hisi_acc_vdev)722 static void hisi_acc_vf_start_device(struct hisi_acc_vf_core_device *hisi_acc_vdev)
723 {
724 struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm;
725
726 if (hisi_acc_vdev->vf_qm_state != QM_READY)
727 return;
728
729 /* Make sure the device is enabled */
730 qm_dev_cmd_init(vf_qm);
731
732 vf_qm_fun_reset(vf_qm);
733 }
734
hisi_acc_vf_load_state(struct hisi_acc_vf_core_device * hisi_acc_vdev)735 static int hisi_acc_vf_load_state(struct hisi_acc_vf_core_device *hisi_acc_vdev)
736 {
737 struct device *dev = &hisi_acc_vdev->vf_dev->dev;
738 struct hisi_acc_vf_migration_file *migf = hisi_acc_vdev->resuming_migf;
739 int ret;
740
741 /* Recover data to VF */
742 ret = vf_qm_load_data(hisi_acc_vdev, migf);
743 if (ret) {
744 dev_err(dev, "failed to recover the VF!\n");
745 return ret;
746 }
747
748 return 0;
749 }
750
hisi_acc_vf_release_file(struct inode * inode,struct file * filp)751 static int hisi_acc_vf_release_file(struct inode *inode, struct file *filp)
752 {
753 struct hisi_acc_vf_migration_file *migf = filp->private_data;
754
755 hisi_acc_vf_disable_fd(migf);
756 mutex_destroy(&migf->lock);
757 kfree(migf);
758 return 0;
759 }
760
hisi_acc_vf_resume_write(struct file * filp,const char __user * buf,size_t len,loff_t * pos)761 static ssize_t hisi_acc_vf_resume_write(struct file *filp, const char __user *buf,
762 size_t len, loff_t *pos)
763 {
764 struct hisi_acc_vf_migration_file *migf = filp->private_data;
765 u8 *vf_data = (u8 *)&migf->vf_data;
766 loff_t requested_length;
767 ssize_t done = 0;
768 int ret;
769
770 if (pos)
771 return -ESPIPE;
772 pos = &filp->f_pos;
773
774 if (*pos < 0 ||
775 check_add_overflow((loff_t)len, *pos, &requested_length))
776 return -EINVAL;
777
778 if (requested_length > sizeof(struct acc_vf_data))
779 return -ENOMEM;
780
781 mutex_lock(&migf->lock);
782 if (migf->disabled) {
783 done = -ENODEV;
784 goto out_unlock;
785 }
786
787 ret = copy_from_user(vf_data + *pos, buf, len);
788 if (ret) {
789 done = -EFAULT;
790 goto out_unlock;
791 }
792 *pos += len;
793 done = len;
794 migf->total_length += len;
795
796 ret = vf_qm_check_match(migf->hisi_acc_vdev, migf);
797 if (ret)
798 done = -EFAULT;
799 out_unlock:
800 mutex_unlock(&migf->lock);
801 return done;
802 }
803
804 static const struct file_operations hisi_acc_vf_resume_fops = {
805 .owner = THIS_MODULE,
806 .write = hisi_acc_vf_resume_write,
807 .release = hisi_acc_vf_release_file,
808 };
809
810 static struct hisi_acc_vf_migration_file *
hisi_acc_vf_pci_resume(struct hisi_acc_vf_core_device * hisi_acc_vdev)811 hisi_acc_vf_pci_resume(struct hisi_acc_vf_core_device *hisi_acc_vdev)
812 {
813 struct hisi_acc_vf_migration_file *migf;
814
815 migf = kzalloc(sizeof(*migf), GFP_KERNEL_ACCOUNT);
816 if (!migf)
817 return ERR_PTR(-ENOMEM);
818
819 migf->filp = anon_inode_getfile("hisi_acc_vf_mig", &hisi_acc_vf_resume_fops, migf,
820 O_WRONLY);
821 if (IS_ERR(migf->filp)) {
822 int err = PTR_ERR(migf->filp);
823
824 kfree(migf);
825 return ERR_PTR(err);
826 }
827
828 stream_open(migf->filp->f_inode, migf->filp);
829 mutex_init(&migf->lock);
830 migf->hisi_acc_vdev = hisi_acc_vdev;
831 return migf;
832 }
833
hisi_acc_vf_precopy_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)834 static long hisi_acc_vf_precopy_ioctl(struct file *filp,
835 unsigned int cmd, unsigned long arg)
836 {
837 struct hisi_acc_vf_migration_file *migf = filp->private_data;
838 struct hisi_acc_vf_core_device *hisi_acc_vdev = migf->hisi_acc_vdev;
839 loff_t *pos = &filp->f_pos;
840 struct vfio_precopy_info info;
841 unsigned long minsz;
842 int ret;
843
844 if (cmd != VFIO_MIG_GET_PRECOPY_INFO)
845 return -ENOTTY;
846
847 minsz = offsetofend(struct vfio_precopy_info, dirty_bytes);
848
849 if (copy_from_user(&info, (void __user *)arg, minsz))
850 return -EFAULT;
851 if (info.argsz < minsz)
852 return -EINVAL;
853
854 mutex_lock(&hisi_acc_vdev->state_mutex);
855 if (hisi_acc_vdev->mig_state != VFIO_DEVICE_STATE_PRE_COPY) {
856 mutex_unlock(&hisi_acc_vdev->state_mutex);
857 return -EINVAL;
858 }
859
860 mutex_lock(&migf->lock);
861
862 if (migf->disabled) {
863 ret = -ENODEV;
864 goto out;
865 }
866
867 if (*pos > migf->total_length) {
868 ret = -EINVAL;
869 goto out;
870 }
871
872 info.dirty_bytes = 0;
873 info.initial_bytes = migf->total_length - *pos;
874 mutex_unlock(&migf->lock);
875 mutex_unlock(&hisi_acc_vdev->state_mutex);
876
877 return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0;
878 out:
879 mutex_unlock(&migf->lock);
880 mutex_unlock(&hisi_acc_vdev->state_mutex);
881 return ret;
882 }
883
hisi_acc_vf_save_read(struct file * filp,char __user * buf,size_t len,loff_t * pos)884 static ssize_t hisi_acc_vf_save_read(struct file *filp, char __user *buf, size_t len,
885 loff_t *pos)
886 {
887 struct hisi_acc_vf_migration_file *migf = filp->private_data;
888 ssize_t done = 0;
889 int ret;
890
891 if (pos)
892 return -ESPIPE;
893 pos = &filp->f_pos;
894
895 mutex_lock(&migf->lock);
896 if (*pos > migf->total_length) {
897 done = -EINVAL;
898 goto out_unlock;
899 }
900
901 if (migf->disabled) {
902 done = -ENODEV;
903 goto out_unlock;
904 }
905
906 len = min_t(size_t, migf->total_length - *pos, len);
907 if (len) {
908 u8 *vf_data = (u8 *)&migf->vf_data;
909
910 ret = copy_to_user(buf, vf_data + *pos, len);
911 if (ret) {
912 done = -EFAULT;
913 goto out_unlock;
914 }
915 *pos += len;
916 done = len;
917 }
918 out_unlock:
919 mutex_unlock(&migf->lock);
920 return done;
921 }
922
923 static const struct file_operations hisi_acc_vf_save_fops = {
924 .owner = THIS_MODULE,
925 .read = hisi_acc_vf_save_read,
926 .unlocked_ioctl = hisi_acc_vf_precopy_ioctl,
927 .compat_ioctl = compat_ptr_ioctl,
928 .release = hisi_acc_vf_release_file,
929 };
930
931 static struct hisi_acc_vf_migration_file *
hisi_acc_open_saving_migf(struct hisi_acc_vf_core_device * hisi_acc_vdev)932 hisi_acc_open_saving_migf(struct hisi_acc_vf_core_device *hisi_acc_vdev)
933 {
934 struct hisi_acc_vf_migration_file *migf;
935 int ret;
936
937 migf = kzalloc(sizeof(*migf), GFP_KERNEL_ACCOUNT);
938 if (!migf)
939 return ERR_PTR(-ENOMEM);
940
941 migf->filp = anon_inode_getfile("hisi_acc_vf_mig", &hisi_acc_vf_save_fops, migf,
942 O_RDONLY);
943 if (IS_ERR(migf->filp)) {
944 int err = PTR_ERR(migf->filp);
945
946 kfree(migf);
947 return ERR_PTR(err);
948 }
949
950 stream_open(migf->filp->f_inode, migf->filp);
951 mutex_init(&migf->lock);
952 migf->hisi_acc_vdev = hisi_acc_vdev;
953
954 ret = vf_qm_get_match_data(hisi_acc_vdev, &migf->vf_data);
955 if (ret) {
956 fput(migf->filp);
957 return ERR_PTR(ret);
958 }
959
960 return migf;
961 }
962
963 static struct hisi_acc_vf_migration_file *
hisi_acc_vf_pre_copy(struct hisi_acc_vf_core_device * hisi_acc_vdev)964 hisi_acc_vf_pre_copy(struct hisi_acc_vf_core_device *hisi_acc_vdev)
965 {
966 struct hisi_acc_vf_migration_file *migf;
967
968 migf = hisi_acc_open_saving_migf(hisi_acc_vdev);
969 if (IS_ERR(migf))
970 return migf;
971
972 migf->total_length = QM_MATCH_SIZE;
973 return migf;
974 }
975
976 static struct hisi_acc_vf_migration_file *
hisi_acc_vf_stop_copy(struct hisi_acc_vf_core_device * hisi_acc_vdev,bool open)977 hisi_acc_vf_stop_copy(struct hisi_acc_vf_core_device *hisi_acc_vdev, bool open)
978 {
979 int ret;
980 struct hisi_acc_vf_migration_file *migf = NULL;
981
982 if (open) {
983 /*
984 * Userspace didn't use PRECOPY support. Hence saving_migf
985 * is not opened yet.
986 */
987 migf = hisi_acc_open_saving_migf(hisi_acc_vdev);
988 if (IS_ERR(migf))
989 return migf;
990 } else {
991 migf = hisi_acc_vdev->saving_migf;
992 }
993
994 ret = vf_qm_state_save(hisi_acc_vdev, migf);
995 if (ret)
996 return ERR_PTR(ret);
997
998 return open ? migf : NULL;
999 }
1000
hisi_acc_vf_stop_device(struct hisi_acc_vf_core_device * hisi_acc_vdev)1001 static int hisi_acc_vf_stop_device(struct hisi_acc_vf_core_device *hisi_acc_vdev)
1002 {
1003 struct device *dev = &hisi_acc_vdev->vf_dev->dev;
1004 struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm;
1005 int ret;
1006
1007 ret = vf_qm_func_stop(vf_qm);
1008 if (ret) {
1009 dev_err(dev, "failed to stop QM VF function!\n");
1010 return ret;
1011 }
1012
1013 ret = hisi_acc_check_int_state(hisi_acc_vdev);
1014 if (ret) {
1015 dev_err(dev, "failed to check QM INT state!\n");
1016 return ret;
1017 }
1018
1019 ret = vf_qm_cache_wb(vf_qm);
1020 if (ret) {
1021 dev_err(dev, "failed to writeback QM cache!\n");
1022 return ret;
1023 }
1024
1025 return 0;
1026 }
1027
1028 static struct file *
hisi_acc_vf_set_device_state(struct hisi_acc_vf_core_device * hisi_acc_vdev,u32 new)1029 hisi_acc_vf_set_device_state(struct hisi_acc_vf_core_device *hisi_acc_vdev,
1030 u32 new)
1031 {
1032 u32 cur = hisi_acc_vdev->mig_state;
1033 int ret;
1034
1035 if (cur == VFIO_DEVICE_STATE_RUNNING && new == VFIO_DEVICE_STATE_PRE_COPY) {
1036 struct hisi_acc_vf_migration_file *migf;
1037
1038 migf = hisi_acc_vf_pre_copy(hisi_acc_vdev);
1039 if (IS_ERR(migf))
1040 return ERR_CAST(migf);
1041 get_file(migf->filp);
1042 hisi_acc_vdev->saving_migf = migf;
1043 return migf->filp;
1044 }
1045
1046 if (cur == VFIO_DEVICE_STATE_PRE_COPY && new == VFIO_DEVICE_STATE_STOP_COPY) {
1047 struct hisi_acc_vf_migration_file *migf;
1048
1049 ret = hisi_acc_vf_stop_device(hisi_acc_vdev);
1050 if (ret)
1051 return ERR_PTR(ret);
1052
1053 migf = hisi_acc_vf_stop_copy(hisi_acc_vdev, false);
1054 if (IS_ERR(migf))
1055 return ERR_CAST(migf);
1056
1057 return NULL;
1058 }
1059
1060 if (cur == VFIO_DEVICE_STATE_RUNNING && new == VFIO_DEVICE_STATE_STOP) {
1061 ret = hisi_acc_vf_stop_device(hisi_acc_vdev);
1062 if (ret)
1063 return ERR_PTR(ret);
1064 return NULL;
1065 }
1066
1067 if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_STOP_COPY) {
1068 struct hisi_acc_vf_migration_file *migf;
1069
1070 migf = hisi_acc_vf_stop_copy(hisi_acc_vdev, true);
1071 if (IS_ERR(migf))
1072 return ERR_CAST(migf);
1073 get_file(migf->filp);
1074 hisi_acc_vdev->saving_migf = migf;
1075 return migf->filp;
1076 }
1077
1078 if ((cur == VFIO_DEVICE_STATE_STOP_COPY && new == VFIO_DEVICE_STATE_STOP)) {
1079 hisi_acc_vf_disable_fds(hisi_acc_vdev);
1080 return NULL;
1081 }
1082
1083 if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RESUMING) {
1084 struct hisi_acc_vf_migration_file *migf;
1085
1086 migf = hisi_acc_vf_pci_resume(hisi_acc_vdev);
1087 if (IS_ERR(migf))
1088 return ERR_CAST(migf);
1089 get_file(migf->filp);
1090 hisi_acc_vdev->resuming_migf = migf;
1091 return migf->filp;
1092 }
1093
1094 if (cur == VFIO_DEVICE_STATE_RESUMING && new == VFIO_DEVICE_STATE_STOP) {
1095 ret = hisi_acc_vf_load_state(hisi_acc_vdev);
1096 if (ret)
1097 return ERR_PTR(ret);
1098 hisi_acc_vf_disable_fds(hisi_acc_vdev);
1099 return NULL;
1100 }
1101
1102 if (cur == VFIO_DEVICE_STATE_PRE_COPY && new == VFIO_DEVICE_STATE_RUNNING) {
1103 hisi_acc_vf_disable_fds(hisi_acc_vdev);
1104 return NULL;
1105 }
1106
1107 if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RUNNING) {
1108 hisi_acc_vf_start_device(hisi_acc_vdev);
1109 return NULL;
1110 }
1111
1112 /*
1113 * vfio_mig_get_next_state() does not use arcs other than the above
1114 */
1115 WARN_ON(true);
1116 return ERR_PTR(-EINVAL);
1117 }
1118
1119 static struct file *
hisi_acc_vfio_pci_set_device_state(struct vfio_device * vdev,enum vfio_device_mig_state new_state)1120 hisi_acc_vfio_pci_set_device_state(struct vfio_device *vdev,
1121 enum vfio_device_mig_state new_state)
1122 {
1123 struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_get_vf_dev(vdev);
1124 enum vfio_device_mig_state next_state;
1125 struct file *res = NULL;
1126 int ret;
1127
1128 mutex_lock(&hisi_acc_vdev->state_mutex);
1129 while (new_state != hisi_acc_vdev->mig_state) {
1130 ret = vfio_mig_get_next_state(vdev,
1131 hisi_acc_vdev->mig_state,
1132 new_state, &next_state);
1133 if (ret) {
1134 res = ERR_PTR(-EINVAL);
1135 break;
1136 }
1137
1138 res = hisi_acc_vf_set_device_state(hisi_acc_vdev, next_state);
1139 if (IS_ERR(res))
1140 break;
1141 hisi_acc_vdev->mig_state = next_state;
1142 if (WARN_ON(res && new_state != hisi_acc_vdev->mig_state)) {
1143 fput(res);
1144 res = ERR_PTR(-EINVAL);
1145 break;
1146 }
1147 }
1148 mutex_unlock(&hisi_acc_vdev->state_mutex);
1149 return res;
1150 }
1151
1152 static int
hisi_acc_vfio_pci_get_data_size(struct vfio_device * vdev,unsigned long * stop_copy_length)1153 hisi_acc_vfio_pci_get_data_size(struct vfio_device *vdev,
1154 unsigned long *stop_copy_length)
1155 {
1156 *stop_copy_length = sizeof(struct acc_vf_data);
1157 return 0;
1158 }
1159
1160 static int
hisi_acc_vfio_pci_get_device_state(struct vfio_device * vdev,enum vfio_device_mig_state * curr_state)1161 hisi_acc_vfio_pci_get_device_state(struct vfio_device *vdev,
1162 enum vfio_device_mig_state *curr_state)
1163 {
1164 struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_get_vf_dev(vdev);
1165
1166 mutex_lock(&hisi_acc_vdev->state_mutex);
1167 *curr_state = hisi_acc_vdev->mig_state;
1168 mutex_unlock(&hisi_acc_vdev->state_mutex);
1169 return 0;
1170 }
1171
hisi_acc_vf_pci_aer_reset_done(struct pci_dev * pdev)1172 static void hisi_acc_vf_pci_aer_reset_done(struct pci_dev *pdev)
1173 {
1174 struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_drvdata(pdev);
1175
1176 if (hisi_acc_vdev->core_device.vdev.migration_flags !=
1177 VFIO_MIGRATION_STOP_COPY)
1178 return;
1179
1180 mutex_lock(&hisi_acc_vdev->state_mutex);
1181 hisi_acc_vf_reset(hisi_acc_vdev);
1182 mutex_unlock(&hisi_acc_vdev->state_mutex);
1183 }
1184
hisi_acc_vf_qm_init(struct hisi_acc_vf_core_device * hisi_acc_vdev)1185 static int hisi_acc_vf_qm_init(struct hisi_acc_vf_core_device *hisi_acc_vdev)
1186 {
1187 struct vfio_pci_core_device *vdev = &hisi_acc_vdev->core_device;
1188 struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm;
1189 struct pci_dev *vf_dev = vdev->pdev;
1190
1191 /*
1192 * ACC VF dev BAR2 region consists of both functional register space
1193 * and migration control register space. For migration to work, we
1194 * need access to both. Hence, we map the entire BAR2 region here.
1195 * But unnecessarily exposing the migration BAR region to the Guest
1196 * has the potential to prevent/corrupt the Guest migration. Hence,
1197 * we restrict access to the migration control space from
1198 * Guest(Please see mmap/ioctl/read/write override functions).
1199 *
1200 * Please note that it is OK to expose the entire VF BAR if migration
1201 * is not supported or required as this cannot affect the ACC PF
1202 * configurations.
1203 *
1204 * Also the HiSilicon ACC VF devices supported by this driver on
1205 * HiSilicon hardware platforms are integrated end point devices
1206 * and the platform lacks the capability to perform any PCIe P2P
1207 * between these devices.
1208 */
1209
1210 vf_qm->io_base =
1211 ioremap(pci_resource_start(vf_dev, VFIO_PCI_BAR2_REGION_INDEX),
1212 pci_resource_len(vf_dev, VFIO_PCI_BAR2_REGION_INDEX));
1213 if (!vf_qm->io_base)
1214 return -EIO;
1215
1216 vf_qm->fun_type = QM_HW_VF;
1217 vf_qm->pdev = vf_dev;
1218 mutex_init(&vf_qm->mailbox_lock);
1219
1220 return 0;
1221 }
1222
hisi_acc_get_pf_qm(struct pci_dev * pdev)1223 static struct hisi_qm *hisi_acc_get_pf_qm(struct pci_dev *pdev)
1224 {
1225 struct hisi_qm *pf_qm;
1226 struct pci_driver *pf_driver;
1227
1228 if (!pdev->is_virtfn)
1229 return NULL;
1230
1231 switch (pdev->device) {
1232 case PCI_DEVICE_ID_HUAWEI_SEC_VF:
1233 pf_driver = hisi_sec_get_pf_driver();
1234 break;
1235 case PCI_DEVICE_ID_HUAWEI_HPRE_VF:
1236 pf_driver = hisi_hpre_get_pf_driver();
1237 break;
1238 case PCI_DEVICE_ID_HUAWEI_ZIP_VF:
1239 pf_driver = hisi_zip_get_pf_driver();
1240 break;
1241 default:
1242 return NULL;
1243 }
1244
1245 if (!pf_driver)
1246 return NULL;
1247
1248 pf_qm = pci_iov_get_pf_drvdata(pdev, pf_driver);
1249
1250 return !IS_ERR(pf_qm) ? pf_qm : NULL;
1251 }
1252
hisi_acc_pci_rw_access_check(struct vfio_device * core_vdev,size_t count,loff_t * ppos,size_t * new_count)1253 static int hisi_acc_pci_rw_access_check(struct vfio_device *core_vdev,
1254 size_t count, loff_t *ppos,
1255 size_t *new_count)
1256 {
1257 unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
1258 struct vfio_pci_core_device *vdev =
1259 container_of(core_vdev, struct vfio_pci_core_device, vdev);
1260
1261 if (index == VFIO_PCI_BAR2_REGION_INDEX) {
1262 loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
1263 resource_size_t end = pci_resource_len(vdev->pdev, index) / 2;
1264
1265 /* Check if access is for migration control region */
1266 if (pos >= end)
1267 return -EINVAL;
1268
1269 *new_count = min(count, (size_t)(end - pos));
1270 }
1271
1272 return 0;
1273 }
1274
hisi_acc_vfio_pci_mmap(struct vfio_device * core_vdev,struct vm_area_struct * vma)1275 static int hisi_acc_vfio_pci_mmap(struct vfio_device *core_vdev,
1276 struct vm_area_struct *vma)
1277 {
1278 struct vfio_pci_core_device *vdev =
1279 container_of(core_vdev, struct vfio_pci_core_device, vdev);
1280 unsigned int index;
1281
1282 index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
1283 if (index == VFIO_PCI_BAR2_REGION_INDEX) {
1284 u64 req_len, pgoff, req_start;
1285 resource_size_t end = pci_resource_len(vdev->pdev, index) / 2;
1286
1287 req_len = vma->vm_end - vma->vm_start;
1288 pgoff = vma->vm_pgoff &
1289 ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
1290 req_start = pgoff << PAGE_SHIFT;
1291
1292 if (req_start + req_len > end)
1293 return -EINVAL;
1294 }
1295
1296 return vfio_pci_core_mmap(core_vdev, vma);
1297 }
1298
hisi_acc_vfio_pci_write(struct vfio_device * core_vdev,const char __user * buf,size_t count,loff_t * ppos)1299 static ssize_t hisi_acc_vfio_pci_write(struct vfio_device *core_vdev,
1300 const char __user *buf, size_t count,
1301 loff_t *ppos)
1302 {
1303 size_t new_count = count;
1304 int ret;
1305
1306 ret = hisi_acc_pci_rw_access_check(core_vdev, count, ppos, &new_count);
1307 if (ret)
1308 return ret;
1309
1310 return vfio_pci_core_write(core_vdev, buf, new_count, ppos);
1311 }
1312
hisi_acc_vfio_pci_read(struct vfio_device * core_vdev,char __user * buf,size_t count,loff_t * ppos)1313 static ssize_t hisi_acc_vfio_pci_read(struct vfio_device *core_vdev,
1314 char __user *buf, size_t count,
1315 loff_t *ppos)
1316 {
1317 size_t new_count = count;
1318 int ret;
1319
1320 ret = hisi_acc_pci_rw_access_check(core_vdev, count, ppos, &new_count);
1321 if (ret)
1322 return ret;
1323
1324 return vfio_pci_core_read(core_vdev, buf, new_count, ppos);
1325 }
1326
hisi_acc_vfio_pci_ioctl(struct vfio_device * core_vdev,unsigned int cmd,unsigned long arg)1327 static long hisi_acc_vfio_pci_ioctl(struct vfio_device *core_vdev, unsigned int cmd,
1328 unsigned long arg)
1329 {
1330 if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
1331 struct vfio_pci_core_device *vdev =
1332 container_of(core_vdev, struct vfio_pci_core_device, vdev);
1333 struct pci_dev *pdev = vdev->pdev;
1334 struct vfio_region_info info;
1335 unsigned long minsz;
1336
1337 minsz = offsetofend(struct vfio_region_info, offset);
1338
1339 if (copy_from_user(&info, (void __user *)arg, minsz))
1340 return -EFAULT;
1341
1342 if (info.argsz < minsz)
1343 return -EINVAL;
1344
1345 if (info.index == VFIO_PCI_BAR2_REGION_INDEX) {
1346 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1347
1348 /*
1349 * ACC VF dev BAR2 region consists of both functional
1350 * register space and migration control register space.
1351 * Report only the functional region to Guest.
1352 */
1353 info.size = pci_resource_len(pdev, info.index) / 2;
1354
1355 info.flags = VFIO_REGION_INFO_FLAG_READ |
1356 VFIO_REGION_INFO_FLAG_WRITE |
1357 VFIO_REGION_INFO_FLAG_MMAP;
1358
1359 return copy_to_user((void __user *)arg, &info, minsz) ?
1360 -EFAULT : 0;
1361 }
1362 }
1363 return vfio_pci_core_ioctl(core_vdev, cmd, arg);
1364 }
1365
hisi_acc_vf_debug_check(struct seq_file * seq,struct vfio_device * vdev)1366 static int hisi_acc_vf_debug_check(struct seq_file *seq, struct vfio_device *vdev)
1367 {
1368 struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_get_vf_dev(vdev);
1369 struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm;
1370 int ret;
1371
1372 lockdep_assert_held(&hisi_acc_vdev->open_mutex);
1373 /*
1374 * When the device is not opened, the io_base is not mapped.
1375 * The driver cannot perform device read and write operations.
1376 */
1377 if (!hisi_acc_vdev->dev_opened) {
1378 seq_puts(seq, "device not opened!\n");
1379 return -EINVAL;
1380 }
1381
1382 ret = qm_wait_dev_not_ready(vf_qm);
1383 if (ret) {
1384 seq_puts(seq, "VF device not ready!\n");
1385 return ret;
1386 }
1387
1388 return 0;
1389 }
1390
hisi_acc_vf_debug_cmd(struct seq_file * seq,void * data)1391 static int hisi_acc_vf_debug_cmd(struct seq_file *seq, void *data)
1392 {
1393 struct device *vf_dev = seq->private;
1394 struct vfio_pci_core_device *core_device = dev_get_drvdata(vf_dev);
1395 struct vfio_device *vdev = &core_device->vdev;
1396 struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_get_vf_dev(vdev);
1397 struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm;
1398 u64 value;
1399 int ret;
1400
1401 mutex_lock(&hisi_acc_vdev->open_mutex);
1402 ret = hisi_acc_vf_debug_check(seq, vdev);
1403 if (ret) {
1404 mutex_unlock(&hisi_acc_vdev->open_mutex);
1405 return ret;
1406 }
1407
1408 value = readl(vf_qm->io_base + QM_MB_CMD_SEND_BASE);
1409 if (value == QM_MB_CMD_NOT_READY) {
1410 mutex_unlock(&hisi_acc_vdev->open_mutex);
1411 seq_puts(seq, "mailbox cmd channel not ready!\n");
1412 return -EINVAL;
1413 }
1414 mutex_unlock(&hisi_acc_vdev->open_mutex);
1415 seq_puts(seq, "mailbox cmd channel ready!\n");
1416
1417 return 0;
1418 }
1419
hisi_acc_vf_dev_read(struct seq_file * seq,void * data)1420 static int hisi_acc_vf_dev_read(struct seq_file *seq, void *data)
1421 {
1422 struct device *vf_dev = seq->private;
1423 struct vfio_pci_core_device *core_device = dev_get_drvdata(vf_dev);
1424 struct vfio_device *vdev = &core_device->vdev;
1425 struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_get_vf_dev(vdev);
1426 size_t vf_data_sz = offsetofend(struct acc_vf_data, padding);
1427 struct acc_vf_data *vf_data;
1428 int ret;
1429
1430 mutex_lock(&hisi_acc_vdev->open_mutex);
1431 ret = hisi_acc_vf_debug_check(seq, vdev);
1432 if (ret) {
1433 mutex_unlock(&hisi_acc_vdev->open_mutex);
1434 return ret;
1435 }
1436
1437 mutex_lock(&hisi_acc_vdev->state_mutex);
1438 vf_data = kzalloc(sizeof(*vf_data), GFP_KERNEL);
1439 if (!vf_data) {
1440 ret = -ENOMEM;
1441 goto mutex_release;
1442 }
1443
1444 vf_data->vf_qm_state = hisi_acc_vdev->vf_qm_state;
1445 ret = vf_qm_read_data(&hisi_acc_vdev->vf_qm, vf_data);
1446 if (ret)
1447 goto migf_err;
1448
1449 seq_hex_dump(seq, "Dev Data:", DUMP_PREFIX_OFFSET, 16, 1,
1450 (const void *)vf_data, vf_data_sz, false);
1451
1452 seq_printf(seq,
1453 "guest driver load: %u\n"
1454 "data size: %lu\n",
1455 hisi_acc_vdev->vf_qm_state,
1456 sizeof(struct acc_vf_data));
1457
1458 migf_err:
1459 kfree(vf_data);
1460 mutex_release:
1461 mutex_unlock(&hisi_acc_vdev->state_mutex);
1462 mutex_unlock(&hisi_acc_vdev->open_mutex);
1463
1464 return ret;
1465 }
1466
hisi_acc_vf_migf_read(struct seq_file * seq,void * data)1467 static int hisi_acc_vf_migf_read(struct seq_file *seq, void *data)
1468 {
1469 struct device *vf_dev = seq->private;
1470 struct vfio_pci_core_device *core_device = dev_get_drvdata(vf_dev);
1471 struct vfio_device *vdev = &core_device->vdev;
1472 struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_get_vf_dev(vdev);
1473 size_t vf_data_sz = offsetofend(struct acc_vf_data, padding);
1474 struct hisi_acc_vf_migration_file *debug_migf = hisi_acc_vdev->debug_migf;
1475
1476 /* Check whether the live migration operation has been performed */
1477 if (debug_migf->total_length < QM_MATCH_SIZE) {
1478 seq_puts(seq, "device not migrated!\n");
1479 return -EAGAIN;
1480 }
1481
1482 seq_hex_dump(seq, "Mig Data:", DUMP_PREFIX_OFFSET, 16, 1,
1483 (const void *)&debug_migf->vf_data, vf_data_sz, false);
1484 seq_printf(seq, "migrate data length: %lu\n", debug_migf->total_length);
1485
1486 return 0;
1487 }
1488
hisi_acc_vfio_pci_open_device(struct vfio_device * core_vdev)1489 static int hisi_acc_vfio_pci_open_device(struct vfio_device *core_vdev)
1490 {
1491 struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_get_vf_dev(core_vdev);
1492 struct vfio_pci_core_device *vdev = &hisi_acc_vdev->core_device;
1493 int ret;
1494
1495 ret = vfio_pci_core_enable(vdev);
1496 if (ret)
1497 return ret;
1498
1499 if (core_vdev->mig_ops) {
1500 mutex_lock(&hisi_acc_vdev->open_mutex);
1501 ret = hisi_acc_vf_qm_init(hisi_acc_vdev);
1502 if (ret) {
1503 mutex_unlock(&hisi_acc_vdev->open_mutex);
1504 vfio_pci_core_disable(vdev);
1505 return ret;
1506 }
1507 hisi_acc_vdev->mig_state = VFIO_DEVICE_STATE_RUNNING;
1508 hisi_acc_vdev->dev_opened = true;
1509 mutex_unlock(&hisi_acc_vdev->open_mutex);
1510 }
1511
1512 vfio_pci_core_finish_enable(vdev);
1513 return 0;
1514 }
1515
hisi_acc_vfio_pci_close_device(struct vfio_device * core_vdev)1516 static void hisi_acc_vfio_pci_close_device(struct vfio_device *core_vdev)
1517 {
1518 struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_get_vf_dev(core_vdev);
1519 struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm;
1520
1521 hisi_acc_vf_disable_fds(hisi_acc_vdev);
1522 mutex_lock(&hisi_acc_vdev->open_mutex);
1523 hisi_acc_vdev->dev_opened = false;
1524 iounmap(vf_qm->io_base);
1525 mutex_unlock(&hisi_acc_vdev->open_mutex);
1526 vfio_pci_core_close_device(core_vdev);
1527 }
1528
1529 static const struct vfio_migration_ops hisi_acc_vfio_pci_migrn_state_ops = {
1530 .migration_set_state = hisi_acc_vfio_pci_set_device_state,
1531 .migration_get_state = hisi_acc_vfio_pci_get_device_state,
1532 .migration_get_data_size = hisi_acc_vfio_pci_get_data_size,
1533 };
1534
hisi_acc_vfio_pci_migrn_init_dev(struct vfio_device * core_vdev)1535 static int hisi_acc_vfio_pci_migrn_init_dev(struct vfio_device *core_vdev)
1536 {
1537 struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_get_vf_dev(core_vdev);
1538 struct pci_dev *pdev = to_pci_dev(core_vdev->dev);
1539 struct hisi_qm *pf_qm = hisi_acc_get_pf_qm(pdev);
1540
1541 hisi_acc_vdev->vf_id = pci_iov_vf_id(pdev) + 1;
1542 hisi_acc_vdev->pf_qm = pf_qm;
1543 hisi_acc_vdev->vf_dev = pdev;
1544 hisi_acc_vdev->vf_qm_state = QM_NOT_READY;
1545 mutex_init(&hisi_acc_vdev->state_mutex);
1546 mutex_init(&hisi_acc_vdev->open_mutex);
1547
1548 core_vdev->migration_flags = VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_PRE_COPY;
1549 core_vdev->mig_ops = &hisi_acc_vfio_pci_migrn_state_ops;
1550
1551 return vfio_pci_core_init_dev(core_vdev);
1552 }
1553
1554 static const struct vfio_device_ops hisi_acc_vfio_pci_migrn_ops = {
1555 .name = "hisi-acc-vfio-pci-migration",
1556 .init = hisi_acc_vfio_pci_migrn_init_dev,
1557 .release = vfio_pci_core_release_dev,
1558 .open_device = hisi_acc_vfio_pci_open_device,
1559 .close_device = hisi_acc_vfio_pci_close_device,
1560 .ioctl = hisi_acc_vfio_pci_ioctl,
1561 .device_feature = vfio_pci_core_ioctl_feature,
1562 .read = hisi_acc_vfio_pci_read,
1563 .write = hisi_acc_vfio_pci_write,
1564 .mmap = hisi_acc_vfio_pci_mmap,
1565 .request = vfio_pci_core_request,
1566 .match = vfio_pci_core_match,
1567 .bind_iommufd = vfio_iommufd_physical_bind,
1568 .unbind_iommufd = vfio_iommufd_physical_unbind,
1569 .attach_ioas = vfio_iommufd_physical_attach_ioas,
1570 .detach_ioas = vfio_iommufd_physical_detach_ioas,
1571 };
1572
1573 static const struct vfio_device_ops hisi_acc_vfio_pci_ops = {
1574 .name = "hisi-acc-vfio-pci",
1575 .init = vfio_pci_core_init_dev,
1576 .release = vfio_pci_core_release_dev,
1577 .open_device = hisi_acc_vfio_pci_open_device,
1578 .close_device = vfio_pci_core_close_device,
1579 .ioctl = vfio_pci_core_ioctl,
1580 .device_feature = vfio_pci_core_ioctl_feature,
1581 .read = vfio_pci_core_read,
1582 .write = vfio_pci_core_write,
1583 .mmap = vfio_pci_core_mmap,
1584 .request = vfio_pci_core_request,
1585 .match = vfio_pci_core_match,
1586 .bind_iommufd = vfio_iommufd_physical_bind,
1587 .unbind_iommufd = vfio_iommufd_physical_unbind,
1588 .attach_ioas = vfio_iommufd_physical_attach_ioas,
1589 .detach_ioas = vfio_iommufd_physical_detach_ioas,
1590 };
1591
hisi_acc_vfio_debug_init(struct hisi_acc_vf_core_device * hisi_acc_vdev)1592 static void hisi_acc_vfio_debug_init(struct hisi_acc_vf_core_device *hisi_acc_vdev)
1593 {
1594 struct vfio_device *vdev = &hisi_acc_vdev->core_device.vdev;
1595 struct hisi_acc_vf_migration_file *migf;
1596 struct dentry *vfio_dev_migration;
1597 struct dentry *vfio_hisi_acc;
1598 struct device *dev = vdev->dev;
1599
1600 if (!debugfs_initialized() ||
1601 !IS_ENABLED(CONFIG_VFIO_DEBUGFS))
1602 return;
1603
1604 if (vdev->ops != &hisi_acc_vfio_pci_migrn_ops)
1605 return;
1606
1607 vfio_dev_migration = debugfs_lookup("migration", vdev->debug_root);
1608 if (!vfio_dev_migration) {
1609 dev_err(dev, "failed to lookup migration debugfs file!\n");
1610 return;
1611 }
1612
1613 migf = kzalloc(sizeof(*migf), GFP_KERNEL);
1614 if (!migf)
1615 return;
1616 hisi_acc_vdev->debug_migf = migf;
1617
1618 vfio_hisi_acc = debugfs_create_dir("hisi_acc", vfio_dev_migration);
1619 debugfs_create_devm_seqfile(dev, "dev_data", vfio_hisi_acc,
1620 hisi_acc_vf_dev_read);
1621 debugfs_create_devm_seqfile(dev, "migf_data", vfio_hisi_acc,
1622 hisi_acc_vf_migf_read);
1623 debugfs_create_devm_seqfile(dev, "cmd_state", vfio_hisi_acc,
1624 hisi_acc_vf_debug_cmd);
1625 }
1626
hisi_acc_vf_debugfs_exit(struct hisi_acc_vf_core_device * hisi_acc_vdev)1627 static void hisi_acc_vf_debugfs_exit(struct hisi_acc_vf_core_device *hisi_acc_vdev)
1628 {
1629 kfree(hisi_acc_vdev->debug_migf);
1630 hisi_acc_vdev->debug_migf = NULL;
1631 }
1632
hisi_acc_vfio_pci_probe(struct pci_dev * pdev,const struct pci_device_id * id)1633 static int hisi_acc_vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1634 {
1635 struct hisi_acc_vf_core_device *hisi_acc_vdev;
1636 const struct vfio_device_ops *ops = &hisi_acc_vfio_pci_ops;
1637 struct hisi_qm *pf_qm;
1638 int vf_id;
1639 int ret;
1640
1641 pf_qm = hisi_acc_get_pf_qm(pdev);
1642 if (pf_qm && pf_qm->ver >= QM_HW_V3) {
1643 vf_id = pci_iov_vf_id(pdev);
1644 if (vf_id >= 0)
1645 ops = &hisi_acc_vfio_pci_migrn_ops;
1646 else
1647 pci_warn(pdev, "migration support failed, continue with generic interface\n");
1648 }
1649
1650 hisi_acc_vdev = vfio_alloc_device(hisi_acc_vf_core_device,
1651 core_device.vdev, &pdev->dev, ops);
1652 if (IS_ERR(hisi_acc_vdev))
1653 return PTR_ERR(hisi_acc_vdev);
1654
1655 dev_set_drvdata(&pdev->dev, &hisi_acc_vdev->core_device);
1656 ret = vfio_pci_core_register_device(&hisi_acc_vdev->core_device);
1657 if (ret)
1658 goto out_put_vdev;
1659
1660 hisi_acc_vfio_debug_init(hisi_acc_vdev);
1661 return 0;
1662
1663 out_put_vdev:
1664 vfio_put_device(&hisi_acc_vdev->core_device.vdev);
1665 return ret;
1666 }
1667
hisi_acc_vfio_pci_remove(struct pci_dev * pdev)1668 static void hisi_acc_vfio_pci_remove(struct pci_dev *pdev)
1669 {
1670 struct hisi_acc_vf_core_device *hisi_acc_vdev = hisi_acc_drvdata(pdev);
1671
1672 vfio_pci_core_unregister_device(&hisi_acc_vdev->core_device);
1673 hisi_acc_vf_debugfs_exit(hisi_acc_vdev);
1674 vfio_put_device(&hisi_acc_vdev->core_device.vdev);
1675 }
1676
1677 static const struct pci_device_id hisi_acc_vfio_pci_table[] = {
1678 { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_SEC_VF) },
1679 { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_HPRE_VF) },
1680 { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_ZIP_VF) },
1681 { }
1682 };
1683
1684 MODULE_DEVICE_TABLE(pci, hisi_acc_vfio_pci_table);
1685
1686 static const struct pci_error_handlers hisi_acc_vf_err_handlers = {
1687 .reset_done = hisi_acc_vf_pci_aer_reset_done,
1688 .error_detected = vfio_pci_core_aer_err_detected,
1689 };
1690
1691 static struct pci_driver hisi_acc_vfio_pci_driver = {
1692 .name = KBUILD_MODNAME,
1693 .id_table = hisi_acc_vfio_pci_table,
1694 .probe = hisi_acc_vfio_pci_probe,
1695 .remove = hisi_acc_vfio_pci_remove,
1696 .err_handler = &hisi_acc_vf_err_handlers,
1697 .driver_managed_dma = true,
1698 };
1699
1700 module_pci_driver(hisi_acc_vfio_pci_driver);
1701
1702 MODULE_LICENSE("GPL v2");
1703 MODULE_AUTHOR("Liu Longfang <liulongfang@huawei.com>");
1704 MODULE_AUTHOR("Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>");
1705 MODULE_DESCRIPTION("HiSilicon VFIO PCI - VFIO PCI driver with live migration support for HiSilicon ACC device family");
1706