xref: /freebsd/sys/dev/hyperv/vmbus/vmbus_br.c (revision 3323aadf232bfdced3682a94c16d5f8ac7e3831d)
1 /*-
2  * Copyright (c) 2009-2012,2016 Microsoft Corp.
3  * Copyright (c) 2012 NetApp Inc.
4  * Copyright (c) 2012 Citrix Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice unmodified, this list of conditions, and the following
12  *    disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <sys/param.h>
30 #include <sys/lock.h>
31 #include <sys/mutex.h>
32 #include <sys/sysctl.h>
33 
34 #include <dev/hyperv/vmbus/vmbus_reg.h>
35 #include <dev/hyperv/vmbus/vmbus_brvar.h>
36 
37 /* Amount of space available for write */
38 #define	VMBUS_BR_WAVAIL(r, w, z)	\
39 	(((w) >= (r)) ? ((z) - ((w) - (r))) : ((r) - (w)))
40 
41 /* Increase bufing index */
42 #define VMBUS_BR_IDXINC(idx, inc, sz)	(((idx) + (inc)) % (sz))
43 
44 static int			vmbus_br_sysctl_state(SYSCTL_HANDLER_ARGS);
45 static int			vmbus_br_sysctl_state_bin(SYSCTL_HANDLER_ARGS);
46 static void			vmbus_br_setup(struct vmbus_br *, void *, int);
47 
48 static int
49 vmbus_br_sysctl_state(SYSCTL_HANDLER_ARGS)
50 {
51 	const struct vmbus_br *br = arg1;
52 	uint32_t rindex, windex, imask, ravail, wavail;
53 	char state[256];
54 
55 	rindex = br->vbr_rindex;
56 	windex = br->vbr_windex;
57 	imask = br->vbr_imask;
58 	wavail = VMBUS_BR_WAVAIL(rindex, windex, br->vbr_dsize);
59 	ravail = br->vbr_dsize - wavail;
60 
61 	snprintf(state, sizeof(state),
62 	    "rindex:%u windex:%u imask:%u ravail:%u wavail:%u",
63 	    rindex, windex, imask, ravail, wavail);
64 	return sysctl_handle_string(oidp, state, sizeof(state), req);
65 }
66 
67 /*
68  * Binary bufring states.
69  */
70 static int
71 vmbus_br_sysctl_state_bin(SYSCTL_HANDLER_ARGS)
72 {
73 #define BR_STATE_RIDX	0
74 #define BR_STATE_WIDX	1
75 #define BR_STATE_IMSK	2
76 #define BR_STATE_RSPC	3
77 #define BR_STATE_WSPC	4
78 #define BR_STATE_MAX	5
79 
80 	const struct vmbus_br *br = arg1;
81 	uint32_t rindex, windex, wavail, state[BR_STATE_MAX];
82 
83 	rindex = br->vbr_rindex;
84 	windex = br->vbr_windex;
85 	wavail = VMBUS_BR_WAVAIL(rindex, windex, br->vbr_dsize);
86 
87 	state[BR_STATE_RIDX] = rindex;
88 	state[BR_STATE_WIDX] = windex;
89 	state[BR_STATE_IMSK] = br->vbr_imask;
90 	state[BR_STATE_WSPC] = wavail;
91 	state[BR_STATE_RSPC] = br->vbr_dsize - wavail;
92 
93 	return sysctl_handle_opaque(oidp, state, sizeof(state), req);
94 }
95 
96 void
97 vmbus_br_sysctl_create(struct sysctl_ctx_list *ctx, struct sysctl_oid *br_tree,
98     struct vmbus_br *br, const char *name)
99 {
100 	struct sysctl_oid *tree;
101 	char desc[64];
102 
103 	tree = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(br_tree), OID_AUTO,
104 	    name, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
105 	if (tree == NULL)
106 		return;
107 
108 	snprintf(desc, sizeof(desc), "%s state", name);
109 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "state",
110 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
111 	    br, 0, vmbus_br_sysctl_state, "A", desc);
112 
113 	snprintf(desc, sizeof(desc), "%s binary state", name);
114 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "state_bin",
115 	    CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_MPSAFE,
116 	    br, 0, vmbus_br_sysctl_state_bin, "IU", desc);
117 }
118 
119 void
120 vmbus_rxbr_intr_mask(struct vmbus_rxbr *rbr)
121 {
122 	rbr->rxbr_imask = 1;
123 	mb();
124 }
125 
126 static __inline uint32_t
127 vmbus_rxbr_avail(const struct vmbus_rxbr *rbr)
128 {
129 	uint32_t rindex, windex;
130 
131 	/* Get snapshot */
132 	rindex = rbr->rxbr_rindex;
133 	windex = rbr->rxbr_windex;
134 
135 	return (rbr->rxbr_dsize -
136 	    VMBUS_BR_WAVAIL(rindex, windex, rbr->rxbr_dsize));
137 }
138 
139 uint32_t
140 vmbus_rxbr_intr_unmask(struct vmbus_rxbr *rbr)
141 {
142 	rbr->rxbr_imask = 0;
143 	mb();
144 
145 	/*
146 	 * Now check to see if the ring buffer is still empty.
147 	 * If it is not, we raced and we need to process new
148 	 * incoming channel packets.
149 	 */
150 	return vmbus_rxbr_avail(rbr);
151 }
152 
153 static void
154 vmbus_br_setup(struct vmbus_br *br, void *buf, int blen)
155 {
156 	br->vbr = buf;
157 	br->vbr_dsize = blen - sizeof(struct vmbus_bufring);
158 }
159 
160 void
161 vmbus_rxbr_init(struct vmbus_rxbr *rbr)
162 {
163 	mtx_init(&rbr->rxbr_lock, "vmbus_rxbr", NULL, MTX_SPIN);
164 }
165 
166 void
167 vmbus_rxbr_deinit(struct vmbus_rxbr *rbr)
168 {
169 	mtx_destroy(&rbr->rxbr_lock);
170 }
171 
172 void
173 vmbus_rxbr_setup(struct vmbus_rxbr *rbr, void *buf, int blen)
174 {
175 	vmbus_br_setup(&rbr->rxbr, buf, blen);
176 }
177 
178 void
179 vmbus_txbr_init(struct vmbus_txbr *tbr)
180 {
181 	mtx_init(&tbr->txbr_lock, "vmbus_txbr", NULL, MTX_SPIN);
182 }
183 
184 void
185 vmbus_txbr_deinit(struct vmbus_txbr *tbr)
186 {
187 	mtx_destroy(&tbr->txbr_lock);
188 }
189 
190 void
191 vmbus_txbr_setup(struct vmbus_txbr *tbr, void *buf, int blen)
192 {
193 	vmbus_br_setup(&tbr->txbr, buf, blen);
194 }
195 
196 /*
197  * When we write to the ring buffer, check if the host needs to be
198  * signaled.
199  *
200  * The contract:
201  * - The host guarantees that while it is draining the TX bufring,
202  *   it will set the br_imask to indicate it does not need to be
203  *   interrupted when new data are added.
204  * - The host guarantees that it will completely drain the TX bufring
205  *   before exiting the read loop.  Further, once the TX bufring is
206  *   empty, it will clear the br_imask and re-check to see if new
207  *   data have arrived.
208  */
209 static __inline boolean_t
210 vmbus_txbr_need_signal(const struct vmbus_txbr *tbr, uint32_t old_windex)
211 {
212 	mb();
213 	if (tbr->txbr_imask)
214 		return (FALSE);
215 
216 	__compiler_membar();
217 
218 	/*
219 	 * This is the only case we need to signal when the
220 	 * ring transitions from being empty to non-empty.
221 	 */
222 	if (old_windex == tbr->txbr_rindex)
223 		return (TRUE);
224 
225 	return (FALSE);
226 }
227 
228 static __inline uint32_t
229 vmbus_txbr_avail(const struct vmbus_txbr *tbr)
230 {
231 	uint32_t rindex, windex;
232 
233 	/* Get snapshot */
234 	rindex = tbr->txbr_rindex;
235 	windex = tbr->txbr_windex;
236 
237 	return VMBUS_BR_WAVAIL(rindex, windex, tbr->txbr_dsize);
238 }
239 
240 static __inline uint32_t
241 vmbus_txbr_copyto(const struct vmbus_txbr *tbr, uint32_t windex,
242     const void *src0, uint32_t cplen)
243 {
244 	const uint8_t *src = src0;
245 	uint8_t *br_data = tbr->txbr_data;
246 	uint32_t br_dsize = tbr->txbr_dsize;
247 
248 	if (cplen > br_dsize - windex) {
249 		uint32_t fraglen = br_dsize - windex;
250 
251 		/* Wrap-around detected */
252 		memcpy(br_data + windex, src, fraglen);
253 		memcpy(br_data, src + fraglen, cplen - fraglen);
254 	} else {
255 		memcpy(br_data + windex, src, cplen);
256 	}
257 	return VMBUS_BR_IDXINC(windex, cplen, br_dsize);
258 }
259 
260 /*
261  * Write scattered channel packet to TX bufring.
262  *
263  * The offset of this channel packet is written as a 64bits value
264  * immediately after this channel packet.
265  */
266 int
267 vmbus_txbr_write(struct vmbus_txbr *tbr, const struct iovec iov[], int iovlen,
268     boolean_t *need_sig)
269 {
270 	uint32_t old_windex, windex, total;
271 	uint64_t save_windex;
272 	int i;
273 
274 	total = 0;
275 	for (i = 0; i < iovlen; i++)
276 		total += iov[i].iov_len;
277 	total += sizeof(save_windex);
278 
279 	mtx_lock_spin(&tbr->txbr_lock);
280 
281 	/*
282 	 * NOTE:
283 	 * If this write is going to make br_windex same as br_rindex,
284 	 * i.e. the available space for write is same as the write size,
285 	 * we can't do it then, since br_windex == br_rindex means that
286 	 * the bufring is empty.
287 	 */
288 	if (vmbus_txbr_avail(tbr) <= total) {
289 		mtx_unlock_spin(&tbr->txbr_lock);
290 		return (EAGAIN);
291 	}
292 
293 	/* Save br_windex for later use */
294 	old_windex = tbr->txbr_windex;
295 
296 	/*
297 	 * Copy the scattered channel packet to the TX bufring.
298 	 */
299 	windex = old_windex;
300 	for (i = 0; i < iovlen; i++) {
301 		windex = vmbus_txbr_copyto(tbr, windex,
302 		    iov[i].iov_base, iov[i].iov_len);
303 	}
304 
305 	/*
306 	 * Set the offset of the current channel packet.
307 	 */
308 	save_windex = ((uint64_t)old_windex) << 32;
309 	windex = vmbus_txbr_copyto(tbr, windex, &save_windex,
310 	    sizeof(save_windex));
311 
312 	/*
313 	 * Update the write index _after_ the channel packet
314 	 * is copied.
315 	 */
316 	__compiler_membar();
317 	tbr->txbr_windex = windex;
318 
319 	mtx_unlock_spin(&tbr->txbr_lock);
320 
321 	*need_sig = vmbus_txbr_need_signal(tbr, old_windex);
322 
323 	return (0);
324 }
325 
326 static __inline uint32_t
327 vmbus_rxbr_copyfrom(const struct vmbus_rxbr *rbr, uint32_t rindex,
328     void *dst0, int cplen)
329 {
330 	uint8_t *dst = dst0;
331 	const uint8_t *br_data = rbr->rxbr_data;
332 	uint32_t br_dsize = rbr->rxbr_dsize;
333 
334 	if (cplen > br_dsize - rindex) {
335 		uint32_t fraglen = br_dsize - rindex;
336 
337 		/* Wrap-around detected. */
338 		memcpy(dst, br_data + rindex, fraglen);
339 		memcpy(dst + fraglen, br_data, cplen - fraglen);
340 	} else {
341 		memcpy(dst, br_data + rindex, cplen);
342 	}
343 	return VMBUS_BR_IDXINC(rindex, cplen, br_dsize);
344 }
345 
346 int
347 vmbus_rxbr_peek(struct vmbus_rxbr *rbr, void *data, int dlen)
348 {
349 	mtx_lock_spin(&rbr->rxbr_lock);
350 
351 	/*
352 	 * The requested data and the 64bits channel packet
353 	 * offset should be there at least.
354 	 */
355 	if (vmbus_rxbr_avail(rbr) < dlen + sizeof(uint64_t)) {
356 		mtx_unlock_spin(&rbr->rxbr_lock);
357 		return (EAGAIN);
358 	}
359 	vmbus_rxbr_copyfrom(rbr, rbr->rxbr_rindex, data, dlen);
360 
361 	mtx_unlock_spin(&rbr->rxbr_lock);
362 
363 	return (0);
364 }
365 
366 /*
367  * NOTE:
368  * We assume (dlen + skip) == sizeof(channel packet).
369  */
370 int
371 vmbus_rxbr_read(struct vmbus_rxbr *rbr, void *data, int dlen, uint32_t skip)
372 {
373 	uint32_t rindex, br_dsize = rbr->rxbr_dsize;
374 
375 	KASSERT(dlen + skip > 0, ("invalid dlen %d, offset %u", dlen, skip));
376 
377 	mtx_lock_spin(&rbr->rxbr_lock);
378 
379 	if (vmbus_rxbr_avail(rbr) < dlen + skip + sizeof(uint64_t)) {
380 		mtx_unlock_spin(&rbr->rxbr_lock);
381 		return (EAGAIN);
382 	}
383 
384 	/*
385 	 * Copy channel packet from RX bufring.
386 	 */
387 	rindex = VMBUS_BR_IDXINC(rbr->rxbr_rindex, skip, br_dsize);
388 	rindex = vmbus_rxbr_copyfrom(rbr, rindex, data, dlen);
389 
390 	/*
391 	 * Discard this channel packet's 64bits offset, which is useless to us.
392 	 */
393 	rindex = VMBUS_BR_IDXINC(rindex, sizeof(uint64_t), br_dsize);
394 
395 	/*
396 	 * Update the read index _after_ the channel packet is fetched.
397 	 */
398 	__compiler_membar();
399 	rbr->rxbr_rindex = rindex;
400 
401 	mtx_unlock_spin(&rbr->rxbr_lock);
402 
403 	return (0);
404 }
405