1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 */
5
6 #include "enic.h"
7 #include "vnic_dev.h"
8 #include "vnic_wq.h"
9
vnic_dev_alloc_desc_ring(struct vnic_dev * vdev,struct vnic_dev_ring * ring,unsigned int desc_count,unsigned int desc_size)10 int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev,
11 struct vnic_dev_ring *ring, unsigned int desc_count, unsigned int desc_size)
12 {
13 iflib_dma_info_t ifdip;
14 int err;
15
16 if ((ifdip = malloc(sizeof(struct iflib_dma_info),
17 M_DEVBUF, M_NOWAIT | M_ZERO)) == NULL) {
18 device_printf(dev_from_vnic_dev(vdev),
19 "Unable to allocate DMA info memory\n");
20 return (ENOMEM);
21 }
22
23 err = iflib_dma_alloc(vdev->softc->ctx, desc_count * desc_size,
24 ifdip, 0);
25 if (err) {
26 device_printf(dev_from_vnic_dev(vdev),
27 "Unable to allocate DEVCMD2 descriptors\n");
28 err = ENOMEM;
29 goto err_out_alloc;
30 }
31
32 ring->base_addr = ifdip->idi_paddr;
33 ring->descs = ifdip->idi_vaddr;
34 ring->ifdip = ifdip;
35 ring->desc_size = desc_size;
36 ring->desc_count = desc_count;
37 ring->last_count = 0;
38 ring->desc_avail = ring->desc_count - 1;
39
40 ring->size = ring->desc_count * ring->desc_size;
41 ring->base_align = 512;
42 ring->size_unaligned = ring->size + ring->base_align;
43
44 return (err);
45
46 iflib_dma_free(ifdip);
47
48 err_out_alloc:
49 free(ifdip, M_DEVBUF);
50 return (err);
51 }
52
vnic_dev_free_desc_ring(struct vnic_dev * vdev,struct vnic_dev_ring * ring)53 void vnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring)
54 {
55 if (ring && ring->descs) {
56 iflib_dma_free(ring->ifdip);
57 free(ring->ifdip, M_DEVBUF);
58 ring->descs = NULL;
59 }
60 }
61
vnic_wq_free(struct vnic_wq * wq)62 void vnic_wq_free(struct vnic_wq *wq) {
63 vnic_dev_free_desc_ring(wq->vdev, &wq->ring);
64 wq->ctrl = NULL;
65 }
66
enic_wq_devcmd2_alloc(struct vnic_dev * vdev,struct vnic_wq * wq,unsigned int desc_count,unsigned int desc_size)67 int enic_wq_devcmd2_alloc(struct vnic_dev *vdev, struct vnic_wq *wq,
68 unsigned int desc_count, unsigned int desc_size)
69 {
70 int err;
71
72 wq->index = 0;
73 wq->vdev = vdev;
74
75
76 wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD2, 0);
77 if (!wq->ctrl)
78 return (EINVAL);
79 vnic_wq_disable(wq);
80 err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size);
81
82 return (err);
83 }
84
vnic_dev_deinit_devcmd2(struct vnic_dev * vdev)85 void vnic_dev_deinit_devcmd2(struct vnic_dev *vdev)
86 {
87 if (vdev->devcmd2) {
88 vnic_wq_disable(&vdev->devcmd2->wq);
89 if (vdev->devcmd2->wq_ctrl)
90 vnic_wq_free(&vdev->devcmd2->wq);
91 if (vdev->devcmd2->result)
92 vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring);
93 free(vdev->devcmd2, M_DEVBUF);
94 vdev->devcmd2 = NULL;
95 }
96 }
97
vnic_dev_deinit(struct vnic_dev * vdev)98 int vnic_dev_deinit(struct vnic_dev *vdev) {
99 u64 a0 = 0, a1 = 0;
100 int wait = 1000;
101
102 return (vnic_dev_cmd(vdev, CMD_DEINIT, &a0, &a1, wait));
103 return (0);
104 }
105
enic_wq_init_start(struct vnic_wq * wq,unsigned int cq_index,unsigned int fetch_index,unsigned int posted_index,unsigned int error_interrupt_enable,unsigned int error_interrupt_offset)106 void enic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index,
107 unsigned int fetch_index, unsigned int posted_index,
108 unsigned int error_interrupt_enable,
109 unsigned int error_interrupt_offset)
110 {
111 u64 paddr;
112 unsigned int count = wq->ring.desc_count;
113
114 paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET;
115 ENIC_BUS_WRITE_8(wq->ctrl, TX_RING_BASE, paddr);
116 ENIC_BUS_WRITE_4(wq->ctrl, TX_RING_SIZE, count);
117 ENIC_BUS_WRITE_4(wq->ctrl, TX_FETCH_INDEX, fetch_index);
118 ENIC_BUS_WRITE_4(wq->ctrl, TX_POSTED_INDEX, posted_index);
119 ENIC_BUS_WRITE_4(wq->ctrl, TX_CQ_INDEX, cq_index);
120 ENIC_BUS_WRITE_4(wq->ctrl, TX_ERROR_INTR_ENABLE, error_interrupt_enable);
121 ENIC_BUS_WRITE_4(wq->ctrl, TX_ERROR_INTR_OFFSET, error_interrupt_offset);
122 ENIC_BUS_WRITE_4(wq->ctrl, TX_ERROR_STATUS, 0);
123
124 wq->head_idx = fetch_index;
125 wq->tail_idx = wq->head_idx;
126 }
127
vnic_wq_init(struct vnic_wq * wq,unsigned int cq_index,unsigned int error_interrupt_enable,unsigned int error_interrupt_offset)128 void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
129 unsigned int error_interrupt_enable,
130 unsigned int error_interrupt_offset)
131 {
132 enic_wq_init_start(wq, cq_index, 0, 0,
133 error_interrupt_enable,
134 error_interrupt_offset);
135 wq->cq_pend = 0;
136 wq->last_completed_index = 0;
137 }
138
vnic_wq_error_status(struct vnic_wq * wq)139 unsigned int vnic_wq_error_status(struct vnic_wq *wq)
140 {
141 return (ENIC_BUS_READ_4(wq->ctrl, TX_ERROR_STATUS));
142 }
143
vnic_wq_enable(struct vnic_wq * wq)144 void vnic_wq_enable(struct vnic_wq *wq)
145 {
146 ENIC_BUS_WRITE_4(wq->ctrl, TX_ENABLE, 1);
147 }
148
vnic_wq_disable(struct vnic_wq * wq)149 int vnic_wq_disable(struct vnic_wq *wq)
150 {
151 unsigned int wait;
152
153 ENIC_BUS_WRITE_4(wq->ctrl, TX_ENABLE, 0);
154
155 /* Wait for HW to ACK disable request */
156 for (wait = 0; wait < 1000; wait++) {
157 if (!(ENIC_BUS_READ_4(wq->ctrl, TX_RUNNING)))
158 return 0;
159 udelay(10);
160 }
161
162 pr_err("Failed to disable WQ[%d]\n", wq->index);
163
164 return (ETIMEDOUT);
165 }
166
vnic_wq_clean(struct vnic_wq * wq)167 void vnic_wq_clean(struct vnic_wq *wq)
168 {
169 unsigned int to_clean = wq->tail_idx;
170
171 while (vnic_wq_desc_used(wq) > 0) {
172 to_clean = buf_idx_incr(wq->ring.desc_count, to_clean);
173 wq->ring.desc_avail++;
174 }
175
176 wq->head_idx = 0;
177 wq->tail_idx = 0;
178 wq->last_completed_index = 0;
179
180 ENIC_BUS_WRITE_4(wq->ctrl, TX_FETCH_INDEX, 0);
181 ENIC_BUS_WRITE_4(wq->ctrl, TX_POSTED_INDEX, 0);
182 ENIC_BUS_WRITE_4(wq->ctrl, TX_ERROR_STATUS, 0);
183
184 vnic_dev_clear_desc_ring(&wq->ring);
185 }
186