1 // SPDX-License-Identifier: GPL-2.0-only
2 // Copyright 2014 Cisco Systems, Inc. All rights reserved.
3
4 #include <linux/kernel.h>
5 #include <linux/errno.h>
6 #include <linux/types.h>
7 #include <linux/pci.h>
8 #include <linux/delay.h>
9 #include <linux/if_ether.h>
10 #include <linux/slab.h>
11 #include "vnic_resource.h"
12 #include "vnic_devcmd.h"
13 #include "vnic_dev.h"
14 #include "vnic_stats.h"
15 #include "vnic_wq.h"
16
17 #define VNIC_DVCMD_TMO 10000 /* Devcmd Timeout value */
18 #define VNIC_NOTIFY_INTR_MASK 0x0000ffff00000000ULL
19
20 struct devcmd2_controller {
21 struct vnic_wq_ctrl __iomem *wq_ctrl;
22 struct vnic_dev_ring results_ring;
23 struct vnic_wq wq;
24 struct vnic_devcmd2 *cmd_ring;
25 struct devcmd2_result *result;
26 u16 next_result;
27 u16 result_size;
28 int color;
29 };
30
31 struct vnic_res {
32 void __iomem *vaddr;
33 unsigned int count;
34 };
35
36 struct vnic_dev {
37 void *priv;
38 struct pci_dev *pdev;
39 struct vnic_res res[RES_TYPE_MAX];
40 enum vnic_dev_intr_mode intr_mode;
41 struct vnic_devcmd __iomem *devcmd;
42 struct vnic_devcmd_notify *notify;
43 struct vnic_devcmd_notify notify_copy;
44 dma_addr_t notify_pa;
45 struct vnic_stats *stats;
46 dma_addr_t stats_pa;
47 struct vnic_devcmd_fw_info *fw_info;
48 dma_addr_t fw_info_pa;
49 u64 args[VNIC_DEVCMD_NARGS];
50 struct devcmd2_controller *devcmd2;
51
52 int (*devcmd_rtn)(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
53 int wait);
54 };
55
56 #define VNIC_MAX_RES_HDR_SIZE \
57 (sizeof(struct vnic_resource_header) + \
58 sizeof(struct vnic_resource) * RES_TYPE_MAX)
59 #define VNIC_RES_STRIDE 128
60
svnic_dev_priv(struct vnic_dev * vdev)61 void *svnic_dev_priv(struct vnic_dev *vdev)
62 {
63 return vdev->priv;
64 }
65
vnic_dev_discover_res(struct vnic_dev * vdev,struct vnic_dev_bar * bar,unsigned int num_bars)66 static int vnic_dev_discover_res(struct vnic_dev *vdev,
67 struct vnic_dev_bar *bar, unsigned int num_bars)
68 {
69 struct vnic_resource_header __iomem *rh;
70 struct vnic_resource __iomem *r;
71 u8 type;
72
73 if (num_bars == 0)
74 return -EINVAL;
75
76 if (bar->len < VNIC_MAX_RES_HDR_SIZE) {
77 pr_err("vNIC BAR0 res hdr length error\n");
78
79 return -EINVAL;
80 }
81
82 rh = bar->vaddr;
83 if (!rh) {
84 pr_err("vNIC BAR0 res hdr not mem-mapped\n");
85
86 return -EINVAL;
87 }
88
89 if (ioread32(&rh->magic) != VNIC_RES_MAGIC ||
90 ioread32(&rh->version) != VNIC_RES_VERSION) {
91 pr_err("vNIC BAR0 res magic/version error exp (%lx/%lx) curr (%x/%x)\n",
92 VNIC_RES_MAGIC, VNIC_RES_VERSION,
93 ioread32(&rh->magic), ioread32(&rh->version));
94
95 return -EINVAL;
96 }
97
98 r = (struct vnic_resource __iomem *)(rh + 1);
99
100 while ((type = ioread8(&r->type)) != RES_TYPE_EOL) {
101
102 u8 bar_num = ioread8(&r->bar);
103 u32 bar_offset = ioread32(&r->bar_offset);
104 u32 count = ioread32(&r->count);
105 u32 len;
106
107 r++;
108
109 if (bar_num >= num_bars)
110 continue;
111
112 if (!bar[bar_num].len || !bar[bar_num].vaddr)
113 continue;
114
115 switch (type) {
116 case RES_TYPE_WQ:
117 case RES_TYPE_RQ:
118 case RES_TYPE_CQ:
119 case RES_TYPE_INTR_CTRL:
120 /* each count is stride bytes long */
121 len = count * VNIC_RES_STRIDE;
122 if (len + bar_offset > bar->len) {
123 pr_err("vNIC BAR0 resource %d out-of-bounds, offset 0x%x + size 0x%x > bar len 0x%lx\n",
124 type, bar_offset,
125 len,
126 bar->len);
127
128 return -EINVAL;
129 }
130 break;
131
132 case RES_TYPE_INTR_PBA_LEGACY:
133 case RES_TYPE_DEVCMD:
134 case RES_TYPE_DEVCMD2:
135 len = count;
136 break;
137
138 default:
139 continue;
140 }
141
142 vdev->res[type].count = count;
143 vdev->res[type].vaddr = (char __iomem *)bar->vaddr + bar_offset;
144 }
145
146 return 0;
147 }
148
svnic_dev_get_res_count(struct vnic_dev * vdev,enum vnic_res_type type)149 unsigned int svnic_dev_get_res_count(struct vnic_dev *vdev,
150 enum vnic_res_type type)
151 {
152 return vdev->res[type].count;
153 }
154
svnic_dev_get_res(struct vnic_dev * vdev,enum vnic_res_type type,unsigned int index)155 void __iomem *svnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
156 unsigned int index)
157 {
158 if (!vdev->res[type].vaddr)
159 return NULL;
160
161 switch (type) {
162 case RES_TYPE_WQ:
163 case RES_TYPE_RQ:
164 case RES_TYPE_CQ:
165 case RES_TYPE_INTR_CTRL:
166 return (char __iomem *)vdev->res[type].vaddr +
167 index * VNIC_RES_STRIDE;
168
169 default:
170 return (char __iomem *)vdev->res[type].vaddr;
171 }
172 }
173
svnic_dev_desc_ring_size(struct vnic_dev_ring * ring,unsigned int desc_count,unsigned int desc_size)174 unsigned int svnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
175 unsigned int desc_count,
176 unsigned int desc_size)
177 {
178 /* The base address of the desc rings must be 512 byte aligned.
179 * Descriptor count is aligned to groups of 32 descriptors. A
180 * count of 0 means the maximum 4096 descriptors. Descriptor
181 * size is aligned to 16 bytes.
182 */
183
184 unsigned int count_align = 32;
185 unsigned int desc_align = 16;
186
187 ring->base_align = 512;
188
189 if (desc_count == 0)
190 desc_count = 4096;
191
192 ring->desc_count = ALIGN(desc_count, count_align);
193
194 ring->desc_size = ALIGN(desc_size, desc_align);
195
196 ring->size = ring->desc_count * ring->desc_size;
197 ring->size_unaligned = ring->size + ring->base_align;
198
199 return ring->size_unaligned;
200 }
201
svnic_dev_clear_desc_ring(struct vnic_dev_ring * ring)202 void svnic_dev_clear_desc_ring(struct vnic_dev_ring *ring)
203 {
204 memset(ring->descs, 0, ring->size);
205 }
206
svnic_dev_alloc_desc_ring(struct vnic_dev * vdev,struct vnic_dev_ring * ring,unsigned int desc_count,unsigned int desc_size)207 int svnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
208 unsigned int desc_count, unsigned int desc_size)
209 {
210 svnic_dev_desc_ring_size(ring, desc_count, desc_size);
211
212 ring->descs_unaligned = dma_alloc_coherent(&vdev->pdev->dev,
213 ring->size_unaligned, &ring->base_addr_unaligned,
214 GFP_KERNEL);
215 if (!ring->descs_unaligned) {
216 pr_err("Failed to allocate ring (size=%d), aborting\n",
217 (int)ring->size);
218
219 return -ENOMEM;
220 }
221
222 ring->base_addr = ALIGN(ring->base_addr_unaligned,
223 ring->base_align);
224 ring->descs = (u8 *)ring->descs_unaligned +
225 (ring->base_addr - ring->base_addr_unaligned);
226
227 svnic_dev_clear_desc_ring(ring);
228
229 ring->desc_avail = ring->desc_count - 1;
230
231 return 0;
232 }
233
svnic_dev_free_desc_ring(struct vnic_dev * vdev,struct vnic_dev_ring * ring)234 void svnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring)
235 {
236 if (ring->descs) {
237 dma_free_coherent(&vdev->pdev->dev,
238 ring->size_unaligned,
239 ring->descs_unaligned,
240 ring->base_addr_unaligned);
241 ring->descs = NULL;
242 }
243 }
244
_svnic_dev_cmd2(struct vnic_dev * vdev,enum vnic_devcmd_cmd cmd,int wait)245 static int _svnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
246 int wait)
247 {
248 struct devcmd2_controller *dc2c = vdev->devcmd2;
249 struct devcmd2_result *result = NULL;
250 unsigned int i;
251 int delay;
252 int err;
253 u32 posted;
254 u32 fetch_idx;
255 u32 new_posted;
256 u8 color;
257
258 fetch_idx = ioread32(&dc2c->wq_ctrl->fetch_index);
259 if (fetch_idx == 0xFFFFFFFF) { /* check for hardware gone */
260 /* Hardware surprise removal: return error */
261 return -ENODEV;
262 }
263
264 posted = ioread32(&dc2c->wq_ctrl->posted_index);
265
266 if (posted == 0xFFFFFFFF) { /* check for hardware gone */
267 /* Hardware surprise removal: return error */
268 return -ENODEV;
269 }
270
271 new_posted = (posted + 1) % DEVCMD2_RING_SIZE;
272 if (new_posted == fetch_idx) {
273 pr_err("%s: wq is full while issuing devcmd2 command %d, fetch index: %u, posted index: %u\n",
274 pci_name(vdev->pdev), _CMD_N(cmd), fetch_idx, posted);
275
276 return -EBUSY;
277 }
278
279 dc2c->cmd_ring[posted].cmd = cmd;
280 dc2c->cmd_ring[posted].flags = 0;
281
282 if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT))
283 dc2c->cmd_ring[posted].flags |= DEVCMD2_FNORESULT;
284
285 if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) {
286 for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
287 dc2c->cmd_ring[posted].args[i] = vdev->args[i];
288 }
289 /* Adding write memory barrier prevents compiler and/or CPU
290 * reordering, thus avoiding descriptor posting before
291 * descriptor is initialized. Otherwise, hardware can read
292 * stale descriptor fields.
293 */
294 wmb();
295 iowrite32(new_posted, &dc2c->wq_ctrl->posted_index);
296
297 if (dc2c->cmd_ring[posted].flags & DEVCMD2_FNORESULT)
298 return 0;
299
300 result = dc2c->result + dc2c->next_result;
301 color = dc2c->color;
302
303 /*
304 * Increment next_result, after posting the devcmd, irrespective of
305 * devcmd result, and it should be done only once.
306 */
307 dc2c->next_result++;
308 if (dc2c->next_result == dc2c->result_size) {
309 dc2c->next_result = 0;
310 dc2c->color = dc2c->color ? 0 : 1;
311 }
312
313 for (delay = 0; delay < wait; delay++) {
314 udelay(100);
315 if (result->color == color) {
316 if (result->error) {
317 err = (int) result->error;
318 if (err != ERR_ECMDUNKNOWN ||
319 cmd != CMD_CAPABILITY)
320 pr_err("Error %d devcmd %d\n",
321 err, _CMD_N(cmd));
322
323 return err;
324 }
325 if (_CMD_DIR(cmd) & _CMD_DIR_READ) {
326 for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
327 vdev->args[i] = result->results[i];
328 }
329
330 return 0;
331 }
332 }
333
334 pr_err("Timed out devcmd %d\n", _CMD_N(cmd));
335
336 return -ETIMEDOUT;
337 }
338
svnic_dev_init_devcmd2(struct vnic_dev * vdev)339 static int svnic_dev_init_devcmd2(struct vnic_dev *vdev)
340 {
341 struct devcmd2_controller *dc2c = NULL;
342 unsigned int fetch_idx;
343 int ret;
344 void __iomem *p;
345
346 if (vdev->devcmd2)
347 return 0;
348
349 p = svnic_dev_get_res(vdev, RES_TYPE_DEVCMD2, 0);
350 if (!p)
351 return -ENODEV;
352
353 dc2c = kzalloc_obj(*dc2c, GFP_ATOMIC);
354 if (!dc2c)
355 return -ENOMEM;
356
357 vdev->devcmd2 = dc2c;
358
359 dc2c->color = 1;
360 dc2c->result_size = DEVCMD2_RING_SIZE;
361
362 ret = vnic_wq_devcmd2_alloc(vdev,
363 &dc2c->wq,
364 DEVCMD2_RING_SIZE,
365 DEVCMD2_DESC_SIZE);
366 if (ret)
367 goto err_free_devcmd2;
368
369 fetch_idx = ioread32(&dc2c->wq.ctrl->fetch_index);
370 if (fetch_idx == 0xFFFFFFFF) { /* check for hardware gone */
371 /* Hardware surprise removal: reset fetch_index */
372 fetch_idx = 0;
373 }
374
375 /*
376 * Don't change fetch_index ever and
377 * set posted_index same as fetch_index
378 * when setting up the WQ for devcmd2.
379 */
380 vnic_wq_init_start(&dc2c->wq, 0, fetch_idx, fetch_idx, 0, 0);
381 svnic_wq_enable(&dc2c->wq);
382 ret = svnic_dev_alloc_desc_ring(vdev,
383 &dc2c->results_ring,
384 DEVCMD2_RING_SIZE,
385 DEVCMD2_DESC_SIZE);
386 if (ret)
387 goto err_free_wq;
388
389 dc2c->result = (struct devcmd2_result *) dc2c->results_ring.descs;
390 dc2c->cmd_ring = (struct vnic_devcmd2 *) dc2c->wq.ring.descs;
391 dc2c->wq_ctrl = dc2c->wq.ctrl;
392 vdev->args[0] = (u64) dc2c->results_ring.base_addr | VNIC_PADDR_TARGET;
393 vdev->args[1] = DEVCMD2_RING_SIZE;
394
395 ret = _svnic_dev_cmd2(vdev, CMD_INITIALIZE_DEVCMD2, VNIC_DVCMD_TMO);
396 if (ret < 0)
397 goto err_free_desc_ring;
398
399 vdev->devcmd_rtn = &_svnic_dev_cmd2;
400 pr_info("DEVCMD2 Initialized.\n");
401
402 return ret;
403
404 err_free_desc_ring:
405 svnic_dev_free_desc_ring(vdev, &dc2c->results_ring);
406
407 err_free_wq:
408 svnic_wq_disable(&dc2c->wq);
409 svnic_wq_free(&dc2c->wq);
410
411 err_free_devcmd2:
412 kfree(dc2c);
413 vdev->devcmd2 = NULL;
414
415 return ret;
416 } /* end of svnic_dev_init_devcmd2 */
417
vnic_dev_deinit_devcmd2(struct vnic_dev * vdev)418 static void vnic_dev_deinit_devcmd2(struct vnic_dev *vdev)
419 {
420 struct devcmd2_controller *dc2c = vdev->devcmd2;
421
422 vdev->devcmd2 = NULL;
423 vdev->devcmd_rtn = NULL;
424
425 svnic_dev_free_desc_ring(vdev, &dc2c->results_ring);
426 svnic_wq_disable(&dc2c->wq);
427 svnic_wq_free(&dc2c->wq);
428 kfree(dc2c);
429 }
430
svnic_dev_cmd(struct vnic_dev * vdev,enum vnic_devcmd_cmd cmd,u64 * a0,u64 * a1,int wait)431 int svnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
432 u64 *a0, u64 *a1, int wait)
433 {
434 int err;
435
436 memset(vdev->args, 0, sizeof(vdev->args));
437 vdev->args[0] = *a0;
438 vdev->args[1] = *a1;
439
440 err = (*vdev->devcmd_rtn)(vdev, cmd, wait);
441
442 *a0 = vdev->args[0];
443 *a1 = vdev->args[1];
444
445 return err;
446 }
447
svnic_dev_fw_info(struct vnic_dev * vdev,struct vnic_devcmd_fw_info ** fw_info)448 int svnic_dev_fw_info(struct vnic_dev *vdev,
449 struct vnic_devcmd_fw_info **fw_info)
450 {
451 u64 a0, a1 = 0;
452 int wait = VNIC_DVCMD_TMO;
453 int err = 0;
454
455 if (!vdev->fw_info) {
456 vdev->fw_info = dma_alloc_coherent(&vdev->pdev->dev,
457 sizeof(struct vnic_devcmd_fw_info),
458 &vdev->fw_info_pa, GFP_KERNEL);
459 if (!vdev->fw_info)
460 return -ENOMEM;
461
462 a0 = vdev->fw_info_pa;
463
464 /* only get fw_info once and cache it */
465 err = svnic_dev_cmd(vdev, CMD_MCPU_FW_INFO, &a0, &a1, wait);
466 }
467
468 *fw_info = vdev->fw_info;
469
470 return err;
471 }
472
svnic_dev_spec(struct vnic_dev * vdev,unsigned int offset,unsigned int size,void * value)473 int svnic_dev_spec(struct vnic_dev *vdev, unsigned int offset,
474 unsigned int size, void *value)
475 {
476 u64 a0, a1;
477 int wait = VNIC_DVCMD_TMO;
478 int err;
479
480 a0 = offset;
481 a1 = size;
482
483 err = svnic_dev_cmd(vdev, CMD_DEV_SPEC, &a0, &a1, wait);
484
485 switch (size) {
486 case 1:
487 *(u8 *)value = (u8)a0;
488 break;
489 case 2:
490 *(u16 *)value = (u16)a0;
491 break;
492 case 4:
493 *(u32 *)value = (u32)a0;
494 break;
495 case 8:
496 *(u64 *)value = a0;
497 break;
498 default:
499 BUG();
500 break;
501 }
502
503 return err;
504 }
505
svnic_dev_stats_clear(struct vnic_dev * vdev)506 int svnic_dev_stats_clear(struct vnic_dev *vdev)
507 {
508 u64 a0 = 0, a1 = 0;
509 int wait = VNIC_DVCMD_TMO;
510
511 return svnic_dev_cmd(vdev, CMD_STATS_CLEAR, &a0, &a1, wait);
512 }
513
svnic_dev_stats_dump(struct vnic_dev * vdev,struct vnic_stats ** stats)514 int svnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
515 {
516 u64 a0, a1;
517 int wait = VNIC_DVCMD_TMO;
518
519 if (!vdev->stats) {
520 vdev->stats = dma_alloc_coherent(&vdev->pdev->dev,
521 sizeof(struct vnic_stats), &vdev->stats_pa, GFP_KERNEL);
522 if (!vdev->stats)
523 return -ENOMEM;
524 }
525
526 *stats = vdev->stats;
527 a0 = vdev->stats_pa;
528 a1 = sizeof(struct vnic_stats);
529
530 return svnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait);
531 }
532
svnic_dev_close(struct vnic_dev * vdev)533 int svnic_dev_close(struct vnic_dev *vdev)
534 {
535 u64 a0 = 0, a1 = 0;
536 int wait = VNIC_DVCMD_TMO;
537
538 return svnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait);
539 }
540
svnic_dev_enable_wait(struct vnic_dev * vdev)541 int svnic_dev_enable_wait(struct vnic_dev *vdev)
542 {
543 u64 a0 = 0, a1 = 0;
544 int wait = VNIC_DVCMD_TMO;
545 int err = 0;
546
547 err = svnic_dev_cmd(vdev, CMD_ENABLE_WAIT, &a0, &a1, wait);
548 if (err == ERR_ECMDUNKNOWN)
549 return svnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait);
550
551 return err;
552 }
553
svnic_dev_disable(struct vnic_dev * vdev)554 int svnic_dev_disable(struct vnic_dev *vdev)
555 {
556 u64 a0 = 0, a1 = 0;
557 int wait = VNIC_DVCMD_TMO;
558
559 return svnic_dev_cmd(vdev, CMD_DISABLE, &a0, &a1, wait);
560 }
561
svnic_dev_open(struct vnic_dev * vdev,int arg)562 int svnic_dev_open(struct vnic_dev *vdev, int arg)
563 {
564 u64 a0 = (u32)arg, a1 = 0;
565 int wait = VNIC_DVCMD_TMO;
566
567 return svnic_dev_cmd(vdev, CMD_OPEN, &a0, &a1, wait);
568 }
569
svnic_dev_open_done(struct vnic_dev * vdev,int * done)570 int svnic_dev_open_done(struct vnic_dev *vdev, int *done)
571 {
572 u64 a0 = 0, a1 = 0;
573 int wait = VNIC_DVCMD_TMO;
574 int err;
575
576 *done = 0;
577
578 err = svnic_dev_cmd(vdev, CMD_OPEN_STATUS, &a0, &a1, wait);
579 if (err)
580 return err;
581
582 *done = (a0 == 0);
583
584 return 0;
585 }
586
svnic_dev_notify_set(struct vnic_dev * vdev,u16 intr)587 int svnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
588 {
589 u64 a0, a1;
590 int wait = VNIC_DVCMD_TMO;
591
592 if (!vdev->notify) {
593 vdev->notify = dma_alloc_coherent(&vdev->pdev->dev,
594 sizeof(struct vnic_devcmd_notify),
595 &vdev->notify_pa, GFP_KERNEL);
596 if (!vdev->notify)
597 return -ENOMEM;
598 }
599
600 a0 = vdev->notify_pa;
601 a1 = ((u64)intr << 32) & VNIC_NOTIFY_INTR_MASK;
602 a1 += sizeof(struct vnic_devcmd_notify);
603
604 return svnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
605 }
606
svnic_dev_notify_unset(struct vnic_dev * vdev)607 void svnic_dev_notify_unset(struct vnic_dev *vdev)
608 {
609 u64 a0, a1;
610 int wait = VNIC_DVCMD_TMO;
611
612 a0 = 0; /* paddr = 0 to unset notify buffer */
613 a1 = VNIC_NOTIFY_INTR_MASK; /* intr num = -1 to unreg for intr */
614 a1 += sizeof(struct vnic_devcmd_notify);
615
616 svnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
617 }
618
vnic_dev_notify_ready(struct vnic_dev * vdev)619 static int vnic_dev_notify_ready(struct vnic_dev *vdev)
620 {
621 u32 *words;
622 unsigned int nwords = sizeof(struct vnic_devcmd_notify) / 4;
623 unsigned int i;
624 u32 csum;
625
626 if (!vdev->notify)
627 return 0;
628
629 do {
630 csum = 0;
631 memcpy(&vdev->notify_copy, vdev->notify,
632 sizeof(struct vnic_devcmd_notify));
633 words = (u32 *)&vdev->notify_copy;
634 for (i = 1; i < nwords; i++)
635 csum += words[i];
636 } while (csum != words[0]);
637
638 return 1;
639 }
640
svnic_dev_init(struct vnic_dev * vdev,int arg)641 int svnic_dev_init(struct vnic_dev *vdev, int arg)
642 {
643 u64 a0 = (u32)arg, a1 = 0;
644 int wait = VNIC_DVCMD_TMO;
645
646 return svnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait);
647 }
648
svnic_dev_link_status(struct vnic_dev * vdev)649 int svnic_dev_link_status(struct vnic_dev *vdev)
650 {
651
652 if (!vnic_dev_notify_ready(vdev))
653 return 0;
654
655 return vdev->notify_copy.link_state;
656 }
657
svnic_dev_link_down_cnt(struct vnic_dev * vdev)658 u32 svnic_dev_link_down_cnt(struct vnic_dev *vdev)
659 {
660 if (!vnic_dev_notify_ready(vdev))
661 return 0;
662
663 return vdev->notify_copy.link_down_cnt;
664 }
665
svnic_dev_set_intr_mode(struct vnic_dev * vdev,enum vnic_dev_intr_mode intr_mode)666 void svnic_dev_set_intr_mode(struct vnic_dev *vdev,
667 enum vnic_dev_intr_mode intr_mode)
668 {
669 vdev->intr_mode = intr_mode;
670 }
671
svnic_dev_get_intr_mode(struct vnic_dev * vdev)672 enum vnic_dev_intr_mode svnic_dev_get_intr_mode(struct vnic_dev *vdev)
673 {
674 return vdev->intr_mode;
675 }
676
svnic_dev_unregister(struct vnic_dev * vdev)677 void svnic_dev_unregister(struct vnic_dev *vdev)
678 {
679 if (vdev) {
680 if (vdev->notify)
681 dma_free_coherent(&vdev->pdev->dev,
682 sizeof(struct vnic_devcmd_notify),
683 vdev->notify,
684 vdev->notify_pa);
685 if (vdev->stats)
686 dma_free_coherent(&vdev->pdev->dev,
687 sizeof(struct vnic_stats),
688 vdev->stats, vdev->stats_pa);
689 if (vdev->fw_info)
690 dma_free_coherent(&vdev->pdev->dev,
691 sizeof(struct vnic_devcmd_fw_info),
692 vdev->fw_info, vdev->fw_info_pa);
693 if (vdev->devcmd2)
694 vnic_dev_deinit_devcmd2(vdev);
695 kfree(vdev);
696 }
697 }
698
svnic_dev_alloc_discover(struct vnic_dev * vdev,void * priv,struct pci_dev * pdev,struct vnic_dev_bar * bar,unsigned int num_bars)699 struct vnic_dev *svnic_dev_alloc_discover(struct vnic_dev *vdev,
700 void *priv,
701 struct pci_dev *pdev,
702 struct vnic_dev_bar *bar,
703 unsigned int num_bars)
704 {
705 if (!vdev) {
706 vdev = kzalloc_obj(struct vnic_dev, GFP_ATOMIC);
707 if (!vdev)
708 return NULL;
709 }
710
711 vdev->priv = priv;
712 vdev->pdev = pdev;
713
714 if (vnic_dev_discover_res(vdev, bar, num_bars))
715 goto err_out;
716
717 return vdev;
718
719 err_out:
720 svnic_dev_unregister(vdev);
721
722 return NULL;
723 } /* end of svnic_dev_alloc_discover */
724
725 /*
726 * fallback option is left to keep the interface common for other vnics.
727 */
svnic_dev_cmd_init(struct vnic_dev * vdev,int fallback)728 int svnic_dev_cmd_init(struct vnic_dev *vdev, int fallback)
729 {
730 int err = -ENODEV;
731 void __iomem *p;
732
733 p = svnic_dev_get_res(vdev, RES_TYPE_DEVCMD2, 0);
734 if (p)
735 err = svnic_dev_init_devcmd2(vdev);
736 else
737 pr_err("DEVCMD2 resource not found.\n");
738
739 return err;
740 } /* end of svnic_dev_cmd_init */
741