xref: /linux/drivers/scsi/snic/vnic_wq.c (revision f9bff0e31881d03badf191d3b0005839391f5f2b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 // Copyright 2014 Cisco Systems, Inc.  All rights reserved.
3 
4 #include <linux/errno.h>
5 #include <linux/types.h>
6 #include <linux/pci.h>
7 #include <linux/delay.h>
8 #include <linux/slab.h>
9 #include "vnic_dev.h"
10 #include "vnic_wq.h"
11 
12 static inline int vnic_wq_get_ctrl(struct vnic_dev *vdev, struct vnic_wq *wq,
13 	unsigned int index, enum vnic_res_type res_type)
14 {
15 	wq->ctrl = svnic_dev_get_res(vdev, res_type, index);
16 	if (!wq->ctrl)
17 		return -EINVAL;
18 
19 	return 0;
20 }
21 
22 static inline int vnic_wq_alloc_ring(struct vnic_dev *vdev, struct vnic_wq *wq,
23 	unsigned int index, unsigned int desc_count, unsigned int desc_size)
24 {
25 	return svnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count,
26 					 desc_size);
27 }
28 
29 static int vnic_wq_alloc_bufs(struct vnic_wq *wq)
30 {
31 	struct vnic_wq_buf *buf;
32 	unsigned int i, j, count = wq->ring.desc_count;
33 	unsigned int blks = VNIC_WQ_BUF_BLKS_NEEDED(count);
34 
35 	for (i = 0; i < blks; i++) {
36 		wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ, GFP_ATOMIC);
37 		if (!wq->bufs[i]) {
38 			pr_err("Failed to alloc wq_bufs\n");
39 
40 			return -ENOMEM;
41 		}
42 	}
43 
44 	for (i = 0; i < blks; i++) {
45 		buf = wq->bufs[i];
46 		for (j = 0; j < VNIC_WQ_BUF_DFLT_BLK_ENTRIES; j++) {
47 			buf->index = i * VNIC_WQ_BUF_DFLT_BLK_ENTRIES + j;
48 			buf->desc = (u8 *)wq->ring.descs +
49 				wq->ring.desc_size * buf->index;
50 			if (buf->index + 1 == count) {
51 				buf->next = wq->bufs[0];
52 				break;
53 			} else if (j + 1 == VNIC_WQ_BUF_DFLT_BLK_ENTRIES) {
54 				buf->next = wq->bufs[i + 1];
55 			} else {
56 				buf->next = buf + 1;
57 				buf++;
58 			}
59 		}
60 	}
61 
62 	wq->to_use = wq->to_clean = wq->bufs[0];
63 
64 	return 0;
65 }
66 
67 void svnic_wq_free(struct vnic_wq *wq)
68 {
69 	struct vnic_dev *vdev;
70 	unsigned int i;
71 
72 	vdev = wq->vdev;
73 
74 	svnic_dev_free_desc_ring(vdev, &wq->ring);
75 
76 	for (i = 0; i < VNIC_WQ_BUF_BLKS_MAX; i++) {
77 		kfree(wq->bufs[i]);
78 		wq->bufs[i] = NULL;
79 	}
80 
81 	wq->ctrl = NULL;
82 
83 }
84 
85 int vnic_wq_devcmd2_alloc(struct vnic_dev *vdev, struct vnic_wq *wq,
86 	unsigned int desc_count, unsigned int desc_size)
87 {
88 	int err;
89 
90 	wq->index = 0;
91 	wq->vdev = vdev;
92 
93 	err = vnic_wq_get_ctrl(vdev, wq, 0, RES_TYPE_DEVCMD2);
94 	if (err) {
95 		pr_err("Failed to get devcmd2 resource\n");
96 
97 		return err;
98 	}
99 
100 	svnic_wq_disable(wq);
101 
102 	err = vnic_wq_alloc_ring(vdev, wq, 0, desc_count, desc_size);
103 	if (err)
104 		return err;
105 
106 	return 0;
107 }
108 
109 int svnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq,
110 	unsigned int index, unsigned int desc_count, unsigned int desc_size)
111 {
112 	int err;
113 
114 	wq->index = index;
115 	wq->vdev = vdev;
116 
117 	err = vnic_wq_get_ctrl(vdev, wq, index, RES_TYPE_WQ);
118 	if (err) {
119 		pr_err("Failed to hook WQ[%d] resource\n", index);
120 
121 		return err;
122 	}
123 
124 	svnic_wq_disable(wq);
125 
126 	err = vnic_wq_alloc_ring(vdev, wq, index, desc_count, desc_size);
127 	if (err)
128 		return err;
129 
130 	err = vnic_wq_alloc_bufs(wq);
131 	if (err) {
132 		svnic_wq_free(wq);
133 
134 		return err;
135 	}
136 
137 	return 0;
138 }
139 
140 void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index,
141 	unsigned int fetch_index, unsigned int posted_index,
142 	unsigned int error_interrupt_enable,
143 	unsigned int error_interrupt_offset)
144 {
145 	u64 paddr;
146 	unsigned int count = wq->ring.desc_count;
147 
148 	paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET;
149 	writeq(paddr, &wq->ctrl->ring_base);
150 	iowrite32(count, &wq->ctrl->ring_size);
151 	iowrite32(fetch_index, &wq->ctrl->fetch_index);
152 	iowrite32(posted_index, &wq->ctrl->posted_index);
153 	iowrite32(cq_index, &wq->ctrl->cq_index);
154 	iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable);
155 	iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset);
156 	iowrite32(0, &wq->ctrl->error_status);
157 
158 	wq->to_use = wq->to_clean =
159 		&wq->bufs[fetch_index / VNIC_WQ_BUF_BLK_ENTRIES(count)]
160 			[fetch_index % VNIC_WQ_BUF_BLK_ENTRIES(count)];
161 }
162 
163 void svnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
164 	unsigned int error_interrupt_enable,
165 	unsigned int error_interrupt_offset)
166 {
167 	vnic_wq_init_start(wq, cq_index, 0, 0, error_interrupt_enable,
168 			   error_interrupt_offset);
169 }
170 
171 unsigned int svnic_wq_error_status(struct vnic_wq *wq)
172 {
173 	return ioread32(&wq->ctrl->error_status);
174 }
175 
176 void svnic_wq_enable(struct vnic_wq *wq)
177 {
178 	iowrite32(1, &wq->ctrl->enable);
179 }
180 
181 int svnic_wq_disable(struct vnic_wq *wq)
182 {
183 	unsigned int wait;
184 
185 	iowrite32(0, &wq->ctrl->enable);
186 
187 	/* Wait for HW to ACK disable request */
188 	for (wait = 0; wait < 100; wait++) {
189 		if (!(ioread32(&wq->ctrl->running)))
190 			return 0;
191 		udelay(1);
192 	}
193 
194 	pr_err("Failed to disable WQ[%d]\n", wq->index);
195 
196 	return -ETIMEDOUT;
197 }
198 
199 void svnic_wq_clean(struct vnic_wq *wq,
200 	void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf))
201 {
202 	struct vnic_wq_buf *buf;
203 
204 	BUG_ON(ioread32(&wq->ctrl->enable));
205 
206 	buf = wq->to_clean;
207 
208 	while (svnic_wq_desc_used(wq) > 0) {
209 
210 		(*buf_clean)(wq, buf);
211 
212 		buf = wq->to_clean = buf->next;
213 		wq->ring.desc_avail++;
214 	}
215 
216 	wq->to_use = wq->to_clean = wq->bufs[0];
217 
218 	iowrite32(0, &wq->ctrl->fetch_index);
219 	iowrite32(0, &wq->ctrl->posted_index);
220 	iowrite32(0, &wq->ctrl->error_status);
221 
222 	svnic_dev_clear_desc_ring(&wq->ring);
223 }
224