xref: /linux/drivers/char/xillybus/xillybus_core.c (revision c26f4fbd58375bd6ef74f95eb73d61762ad97c59)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * linux/drivers/misc/xillybus_core.c
4  *
5  * Copyright 2011 Xillybus Ltd, http://xillybus.com
6  *
7  * Driver for the Xillybus FPGA/host framework.
8  *
9  * This driver interfaces with a special IP core in an FPGA, setting up
10  * a pipe between a hardware FIFO in the programmable logic and a device
11  * file in the host. The number of such pipes and their attributes are
12  * set up on the logic. This driver detects these automatically and
13  * creates the device files accordingly.
14  */
15 
16 #include <linux/list.h>
17 #include <linux/device.h>
18 #include <linux/module.h>
19 #include <linux/io.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/interrupt.h>
22 #include <linux/sched.h>
23 #include <linux/fs.h>
24 #include <linux/spinlock.h>
25 #include <linux/mutex.h>
26 #include <linux/crc32.h>
27 #include <linux/poll.h>
28 #include <linux/delay.h>
29 #include <linux/slab.h>
30 #include <linux/workqueue.h>
31 #include "xillybus.h"
32 #include "xillybus_class.h"
33 
34 MODULE_DESCRIPTION("Xillybus core functions");
35 MODULE_AUTHOR("Eli Billauer, Xillybus Ltd.");
36 MODULE_ALIAS("xillybus_core");
37 MODULE_LICENSE("GPL v2");
38 
39 /* General timeout is 100 ms, rx timeout is 10 ms */
40 #define XILLY_RX_TIMEOUT (10*HZ/1000)
41 #define XILLY_TIMEOUT (100*HZ/1000)
42 
43 #define fpga_msg_ctrl_reg              0x0008
44 #define fpga_dma_control_reg           0x0020
45 #define fpga_dma_bufno_reg             0x0024
46 #define fpga_dma_bufaddr_lowaddr_reg   0x0028
47 #define fpga_dma_bufaddr_highaddr_reg  0x002c
48 #define fpga_buf_ctrl_reg              0x0030
49 #define fpga_buf_offset_reg            0x0034
50 #define fpga_endian_reg                0x0040
51 
52 #define XILLYMSG_OPCODE_RELEASEBUF 1
53 #define XILLYMSG_OPCODE_QUIESCEACK 2
54 #define XILLYMSG_OPCODE_FIFOEOF 3
55 #define XILLYMSG_OPCODE_FATAL_ERROR 4
56 #define XILLYMSG_OPCODE_NONEMPTY 5
57 
58 static const char xillyname[] = "xillybus";
59 
60 static struct workqueue_struct *xillybus_wq;
61 
62 /*
63  * Locking scheme: Mutexes protect invocations of character device methods.
64  * If both locks are taken, wr_mutex is taken first, rd_mutex second.
65  *
66  * wr_spinlock protects wr_*_buf_idx, wr_empty, wr_sleepy, wr_ready and the
67  * buffers' end_offset fields against changes made by IRQ handler (and in
68  * theory, other file request handlers, but the mutex handles that). Nothing
69  * else.
70  * They are held for short direct memory manipulations. Needless to say,
71  * no mutex locking is allowed when a spinlock is held.
72  *
73  * rd_spinlock does the same with rd_*_buf_idx, rd_empty and end_offset.
74  *
75  * register_mutex is endpoint-specific, and is held when non-atomic
76  * register operations are performed. wr_mutex and rd_mutex may be
77  * held when register_mutex is taken, but none of the spinlocks. Note that
78  * register_mutex doesn't protect against sporadic buf_ctrl_reg writes
79  * which are unrelated to buf_offset_reg, since they are harmless.
80  *
81  * Blocking on the wait queues is allowed with mutexes held, but not with
82  * spinlocks.
83  *
84  * Only interruptible blocking is allowed on mutexes and wait queues.
85  *
86  * All in all, the locking order goes (with skips allowed, of course):
87  * wr_mutex -> rd_mutex -> register_mutex -> wr_spinlock -> rd_spinlock
88  */
89 
malformed_message(struct xilly_endpoint * endpoint,u32 * buf)90 static void malformed_message(struct xilly_endpoint *endpoint, u32 *buf)
91 {
92 	int opcode;
93 	int msg_channel, msg_bufno, msg_data, msg_dir;
94 
95 	opcode = (buf[0] >> 24) & 0xff;
96 	msg_dir = buf[0] & 1;
97 	msg_channel = (buf[0] >> 1) & 0x7ff;
98 	msg_bufno = (buf[0] >> 12) & 0x3ff;
99 	msg_data = buf[1] & 0xfffffff;
100 
101 	dev_warn(endpoint->dev,
102 		 "Malformed message (skipping): opcode=%d, channel=%03x, dir=%d, bufno=%03x, data=%07x\n",
103 		 opcode, msg_channel, msg_dir, msg_bufno, msg_data);
104 }
105 
106 /*
107  * xillybus_isr assumes the interrupt is allocated exclusively to it,
108  * which is the natural case MSI and several other hardware-oriented
109  * interrupts. Sharing is not allowed.
110  */
111 
xillybus_isr(int irq,void * data)112 irqreturn_t xillybus_isr(int irq, void *data)
113 {
114 	struct xilly_endpoint *ep = data;
115 	u32 *buf;
116 	unsigned int buf_size;
117 	int i;
118 	int opcode;
119 	unsigned int msg_channel, msg_bufno, msg_data, msg_dir;
120 	struct xilly_channel *channel;
121 
122 	buf = ep->msgbuf_addr;
123 	buf_size = ep->msg_buf_size/sizeof(u32);
124 
125 	dma_sync_single_for_cpu(ep->dev, ep->msgbuf_dma_addr,
126 				ep->msg_buf_size, DMA_FROM_DEVICE);
127 
128 	for (i = 0; i < buf_size; i += 2) {
129 		if (((buf[i+1] >> 28) & 0xf) != ep->msg_counter) {
130 			malformed_message(ep, &buf[i]);
131 			dev_warn(ep->dev,
132 				 "Sending a NACK on counter %x (instead of %x) on entry %d\n",
133 				 ((buf[i+1] >> 28) & 0xf),
134 				 ep->msg_counter,
135 				 i/2);
136 
137 			if (++ep->failed_messages > 10) {
138 				dev_err(ep->dev,
139 					"Lost sync with interrupt messages. Stopping.\n");
140 			} else {
141 				dma_sync_single_for_device(ep->dev,
142 							   ep->msgbuf_dma_addr,
143 							   ep->msg_buf_size,
144 							   DMA_FROM_DEVICE);
145 
146 				iowrite32(0x01,  /* Message NACK */
147 					  ep->registers + fpga_msg_ctrl_reg);
148 			}
149 			return IRQ_HANDLED;
150 		} else if (buf[i] & (1 << 22)) /* Last message */
151 			break;
152 	}
153 
154 	if (i >= buf_size) {
155 		dev_err(ep->dev, "Bad interrupt message. Stopping.\n");
156 		return IRQ_HANDLED;
157 	}
158 
159 	buf_size = i + 2;
160 
161 	for (i = 0; i < buf_size; i += 2) { /* Scan through messages */
162 		opcode = (buf[i] >> 24) & 0xff;
163 
164 		msg_dir = buf[i] & 1;
165 		msg_channel = (buf[i] >> 1) & 0x7ff;
166 		msg_bufno = (buf[i] >> 12) & 0x3ff;
167 		msg_data = buf[i+1] & 0xfffffff;
168 
169 		switch (opcode) {
170 		case XILLYMSG_OPCODE_RELEASEBUF:
171 			if ((msg_channel > ep->num_channels) ||
172 			    (msg_channel == 0)) {
173 				malformed_message(ep, &buf[i]);
174 				break;
175 			}
176 
177 			channel = ep->channels[msg_channel];
178 
179 			if (msg_dir) { /* Write channel */
180 				if (msg_bufno >= channel->num_wr_buffers) {
181 					malformed_message(ep, &buf[i]);
182 					break;
183 				}
184 				spin_lock(&channel->wr_spinlock);
185 				channel->wr_buffers[msg_bufno]->end_offset =
186 					msg_data;
187 				channel->wr_fpga_buf_idx = msg_bufno;
188 				channel->wr_empty = 0;
189 				channel->wr_sleepy = 0;
190 				spin_unlock(&channel->wr_spinlock);
191 
192 				wake_up_interruptible(&channel->wr_wait);
193 
194 			} else {
195 				/* Read channel */
196 
197 				if (msg_bufno >= channel->num_rd_buffers) {
198 					malformed_message(ep, &buf[i]);
199 					break;
200 				}
201 
202 				spin_lock(&channel->rd_spinlock);
203 				channel->rd_fpga_buf_idx = msg_bufno;
204 				channel->rd_full = 0;
205 				spin_unlock(&channel->rd_spinlock);
206 
207 				wake_up_interruptible(&channel->rd_wait);
208 				if (!channel->rd_synchronous)
209 					queue_delayed_work(
210 						xillybus_wq,
211 						&channel->rd_workitem,
212 						XILLY_RX_TIMEOUT);
213 			}
214 
215 			break;
216 		case XILLYMSG_OPCODE_NONEMPTY:
217 			if ((msg_channel > ep->num_channels) ||
218 			    (msg_channel == 0) || (!msg_dir) ||
219 			    !ep->channels[msg_channel]->wr_supports_nonempty) {
220 				malformed_message(ep, &buf[i]);
221 				break;
222 			}
223 
224 			channel = ep->channels[msg_channel];
225 
226 			if (msg_bufno >= channel->num_wr_buffers) {
227 				malformed_message(ep, &buf[i]);
228 				break;
229 			}
230 			spin_lock(&channel->wr_spinlock);
231 			if (msg_bufno == channel->wr_host_buf_idx)
232 				channel->wr_ready = 1;
233 			spin_unlock(&channel->wr_spinlock);
234 
235 			wake_up_interruptible(&channel->wr_ready_wait);
236 
237 			break;
238 		case XILLYMSG_OPCODE_QUIESCEACK:
239 			ep->idtlen = msg_data;
240 			wake_up_interruptible(&ep->ep_wait);
241 
242 			break;
243 		case XILLYMSG_OPCODE_FIFOEOF:
244 			if ((msg_channel > ep->num_channels) ||
245 			    (msg_channel == 0) || (!msg_dir) ||
246 			    !ep->channels[msg_channel]->num_wr_buffers) {
247 				malformed_message(ep, &buf[i]);
248 				break;
249 			}
250 			channel = ep->channels[msg_channel];
251 			spin_lock(&channel->wr_spinlock);
252 			channel->wr_eof = msg_bufno;
253 			channel->wr_sleepy = 0;
254 
255 			channel->wr_hangup = channel->wr_empty &&
256 				(channel->wr_host_buf_idx == msg_bufno);
257 
258 			spin_unlock(&channel->wr_spinlock);
259 
260 			wake_up_interruptible(&channel->wr_wait);
261 
262 			break;
263 		case XILLYMSG_OPCODE_FATAL_ERROR:
264 			ep->fatal_error = 1;
265 			wake_up_interruptible(&ep->ep_wait); /* For select() */
266 			dev_err(ep->dev,
267 				"FPGA reported a fatal error. This means that the low-level communication with the device has failed. This hardware problem is most likely unrelated to Xillybus (neither kernel module nor FPGA core), but reports are still welcome. All I/O is aborted.\n");
268 			break;
269 		default:
270 			malformed_message(ep, &buf[i]);
271 			break;
272 		}
273 	}
274 
275 	dma_sync_single_for_device(ep->dev, ep->msgbuf_dma_addr,
276 				   ep->msg_buf_size, DMA_FROM_DEVICE);
277 
278 	ep->msg_counter = (ep->msg_counter + 1) & 0xf;
279 	ep->failed_messages = 0;
280 	iowrite32(0x03, ep->registers + fpga_msg_ctrl_reg); /* Message ACK */
281 
282 	return IRQ_HANDLED;
283 }
284 EXPORT_SYMBOL(xillybus_isr);
285 
286 /*
287  * A few trivial memory management functions.
288  * NOTE: These functions are used only on probe and remove, and therefore
289  * no locks are applied!
290  */
291 
292 static void xillybus_autoflush(struct work_struct *work);
293 
294 struct xilly_alloc_state {
295 	void *salami;
296 	int left_of_salami;
297 	int nbuffer;
298 	enum dma_data_direction direction;
299 	u32 regdirection;
300 };
301 
xilly_unmap(void * ptr)302 static void xilly_unmap(void *ptr)
303 {
304 	struct xilly_mapping *data = ptr;
305 
306 	dma_unmap_single(data->device, data->dma_addr,
307 			 data->size, data->direction);
308 
309 	kfree(ptr);
310 }
311 
xilly_map_single(struct xilly_endpoint * ep,void * ptr,size_t size,int direction,dma_addr_t * ret_dma_handle)312 static int xilly_map_single(struct xilly_endpoint *ep,
313 			    void *ptr,
314 			    size_t size,
315 			    int direction,
316 			    dma_addr_t *ret_dma_handle
317 	)
318 {
319 	dma_addr_t addr;
320 	struct xilly_mapping *this;
321 
322 	this = kzalloc(sizeof(*this), GFP_KERNEL);
323 	if (!this)
324 		return -ENOMEM;
325 
326 	addr = dma_map_single(ep->dev, ptr, size, direction);
327 
328 	if (dma_mapping_error(ep->dev, addr)) {
329 		kfree(this);
330 		return -ENODEV;
331 	}
332 
333 	this->device = ep->dev;
334 	this->dma_addr = addr;
335 	this->size = size;
336 	this->direction = direction;
337 
338 	*ret_dma_handle = addr;
339 
340 	return devm_add_action_or_reset(ep->dev, xilly_unmap, this);
341 }
342 
xilly_get_dma_buffers(struct xilly_endpoint * ep,struct xilly_alloc_state * s,struct xilly_buffer ** buffers,int bufnum,int bytebufsize)343 static int xilly_get_dma_buffers(struct xilly_endpoint *ep,
344 				 struct xilly_alloc_state *s,
345 				 struct xilly_buffer **buffers,
346 				 int bufnum, int bytebufsize)
347 {
348 	int i, rc;
349 	dma_addr_t dma_addr;
350 	struct device *dev = ep->dev;
351 	struct xilly_buffer *this_buffer = NULL; /* Init to silence warning */
352 
353 	if (buffers) { /* Not the message buffer */
354 		this_buffer = devm_kcalloc(dev, bufnum,
355 					   sizeof(struct xilly_buffer),
356 					   GFP_KERNEL);
357 		if (!this_buffer)
358 			return -ENOMEM;
359 	}
360 
361 	for (i = 0; i < bufnum; i++) {
362 		/*
363 		 * Buffers are expected in descending size order, so there
364 		 * is either enough space for this buffer or none at all.
365 		 */
366 
367 		if ((s->left_of_salami < bytebufsize) &&
368 		    (s->left_of_salami > 0)) {
369 			dev_err(ep->dev,
370 				"Corrupt buffer allocation in IDT. Aborting.\n");
371 			return -ENODEV;
372 		}
373 
374 		if (s->left_of_salami == 0) {
375 			int allocorder, allocsize;
376 
377 			allocsize = PAGE_SIZE;
378 			allocorder = 0;
379 			while (bytebufsize > allocsize) {
380 				allocsize *= 2;
381 				allocorder++;
382 			}
383 
384 			s->salami = (void *) devm_get_free_pages(
385 				dev,
386 				GFP_KERNEL | __GFP_DMA32 | __GFP_ZERO,
387 				allocorder);
388 			if (!s->salami)
389 				return -ENOMEM;
390 
391 			s->left_of_salami = allocsize;
392 		}
393 
394 		rc = xilly_map_single(ep, s->salami,
395 				      bytebufsize, s->direction,
396 				      &dma_addr);
397 		if (rc)
398 			return rc;
399 
400 		iowrite32((u32) (dma_addr & 0xffffffff),
401 			  ep->registers + fpga_dma_bufaddr_lowaddr_reg);
402 		iowrite32(((u32) ((((u64) dma_addr) >> 32) & 0xffffffff)),
403 			  ep->registers + fpga_dma_bufaddr_highaddr_reg);
404 
405 		if (buffers) { /* Not the message buffer */
406 			this_buffer->addr = s->salami;
407 			this_buffer->dma_addr = dma_addr;
408 			buffers[i] = this_buffer++;
409 
410 			iowrite32(s->regdirection | s->nbuffer++,
411 				  ep->registers + fpga_dma_bufno_reg);
412 		} else {
413 			ep->msgbuf_addr = s->salami;
414 			ep->msgbuf_dma_addr = dma_addr;
415 			ep->msg_buf_size = bytebufsize;
416 
417 			iowrite32(s->regdirection,
418 				  ep->registers + fpga_dma_bufno_reg);
419 		}
420 
421 		s->left_of_salami -= bytebufsize;
422 		s->salami += bytebufsize;
423 	}
424 	return 0;
425 }
426 
xilly_setupchannels(struct xilly_endpoint * ep,unsigned char * chandesc,int entries)427 static int xilly_setupchannels(struct xilly_endpoint *ep,
428 			       unsigned char *chandesc,
429 			       int entries)
430 {
431 	struct device *dev = ep->dev;
432 	int i, entry, rc;
433 	struct xilly_channel *channel;
434 	int channelnum, bufnum, bufsize, format, is_writebuf;
435 	int bytebufsize;
436 	int synchronous, allowpartial, exclusive_open, seekable;
437 	int supports_nonempty;
438 	int msg_buf_done = 0;
439 
440 	struct xilly_alloc_state rd_alloc = {
441 		.salami = NULL,
442 		.left_of_salami = 0,
443 		.nbuffer = 1,
444 		.direction = DMA_TO_DEVICE,
445 		.regdirection = 0,
446 	};
447 
448 	struct xilly_alloc_state wr_alloc = {
449 		.salami = NULL,
450 		.left_of_salami = 0,
451 		.nbuffer = 1,
452 		.direction = DMA_FROM_DEVICE,
453 		.regdirection = 0x80000000,
454 	};
455 
456 	channel = devm_kcalloc(dev, ep->num_channels,
457 			       sizeof(struct xilly_channel), GFP_KERNEL);
458 	if (!channel)
459 		return -ENOMEM;
460 
461 	ep->channels = devm_kcalloc(dev, ep->num_channels + 1,
462 				    sizeof(struct xilly_channel *),
463 				    GFP_KERNEL);
464 	if (!ep->channels)
465 		return -ENOMEM;
466 
467 	ep->channels[0] = NULL; /* Channel 0 is message buf. */
468 
469 	/* Initialize all channels with defaults */
470 
471 	for (i = 1; i <= ep->num_channels; i++) {
472 		channel->wr_buffers = NULL;
473 		channel->rd_buffers = NULL;
474 		channel->num_wr_buffers = 0;
475 		channel->num_rd_buffers = 0;
476 		channel->wr_fpga_buf_idx = -1;
477 		channel->wr_host_buf_idx = 0;
478 		channel->wr_host_buf_pos = 0;
479 		channel->wr_empty = 1;
480 		channel->wr_ready = 0;
481 		channel->wr_sleepy = 1;
482 		channel->rd_fpga_buf_idx = 0;
483 		channel->rd_host_buf_idx = 0;
484 		channel->rd_host_buf_pos = 0;
485 		channel->rd_full = 0;
486 		channel->wr_ref_count = 0;
487 		channel->rd_ref_count = 0;
488 
489 		spin_lock_init(&channel->wr_spinlock);
490 		spin_lock_init(&channel->rd_spinlock);
491 		mutex_init(&channel->wr_mutex);
492 		mutex_init(&channel->rd_mutex);
493 		init_waitqueue_head(&channel->rd_wait);
494 		init_waitqueue_head(&channel->wr_wait);
495 		init_waitqueue_head(&channel->wr_ready_wait);
496 
497 		INIT_DELAYED_WORK(&channel->rd_workitem, xillybus_autoflush);
498 
499 		channel->endpoint = ep;
500 		channel->chan_num = i;
501 
502 		channel->log2_element_size = 0;
503 
504 		ep->channels[i] = channel++;
505 	}
506 
507 	for (entry = 0; entry < entries; entry++, chandesc += 4) {
508 		struct xilly_buffer **buffers = NULL;
509 
510 		is_writebuf = chandesc[0] & 0x01;
511 		channelnum = (chandesc[0] >> 1) | ((chandesc[1] & 0x0f) << 7);
512 		format = (chandesc[1] >> 4) & 0x03;
513 		allowpartial = (chandesc[1] >> 6) & 0x01;
514 		synchronous = (chandesc[1] >> 7) & 0x01;
515 		bufsize = 1 << (chandesc[2] & 0x1f);
516 		bufnum = 1 << (chandesc[3] & 0x0f);
517 		exclusive_open = (chandesc[2] >> 7) & 0x01;
518 		seekable = (chandesc[2] >> 6) & 0x01;
519 		supports_nonempty = (chandesc[2] >> 5) & 0x01;
520 
521 		if ((channelnum > ep->num_channels) ||
522 		    ((channelnum == 0) && !is_writebuf)) {
523 			dev_err(ep->dev,
524 				"IDT requests channel out of range. Aborting.\n");
525 			return -ENODEV;
526 		}
527 
528 		channel = ep->channels[channelnum]; /* NULL for msg channel */
529 
530 		if (!is_writebuf || channelnum > 0) {
531 			channel->log2_element_size = ((format > 2) ?
532 						      2 : format);
533 
534 			bytebufsize = bufsize *
535 				(1 << channel->log2_element_size);
536 
537 			buffers = devm_kcalloc(dev, bufnum,
538 					       sizeof(struct xilly_buffer *),
539 					       GFP_KERNEL);
540 			if (!buffers)
541 				return -ENOMEM;
542 		} else {
543 			bytebufsize = bufsize << 2;
544 		}
545 
546 		if (!is_writebuf) {
547 			channel->num_rd_buffers = bufnum;
548 			channel->rd_buf_size = bytebufsize;
549 			channel->rd_allow_partial = allowpartial;
550 			channel->rd_synchronous = synchronous;
551 			channel->rd_exclusive_open = exclusive_open;
552 			channel->seekable = seekable;
553 
554 			channel->rd_buffers = buffers;
555 			rc = xilly_get_dma_buffers(ep, &rd_alloc, buffers,
556 						   bufnum, bytebufsize);
557 		} else if (channelnum > 0) {
558 			channel->num_wr_buffers = bufnum;
559 			channel->wr_buf_size = bytebufsize;
560 
561 			channel->seekable = seekable;
562 			channel->wr_supports_nonempty = supports_nonempty;
563 
564 			channel->wr_allow_partial = allowpartial;
565 			channel->wr_synchronous = synchronous;
566 			channel->wr_exclusive_open = exclusive_open;
567 
568 			channel->wr_buffers = buffers;
569 			rc = xilly_get_dma_buffers(ep, &wr_alloc, buffers,
570 						   bufnum, bytebufsize);
571 		} else {
572 			rc = xilly_get_dma_buffers(ep, &wr_alloc, NULL,
573 						   bufnum, bytebufsize);
574 			msg_buf_done++;
575 		}
576 
577 		if (rc)
578 			return -ENOMEM;
579 	}
580 
581 	if (!msg_buf_done) {
582 		dev_err(ep->dev,
583 			"Corrupt IDT: No message buffer. Aborting.\n");
584 		return -ENODEV;
585 	}
586 	return 0;
587 }
588 
xilly_scan_idt(struct xilly_endpoint * endpoint,struct xilly_idt_handle * idt_handle)589 static int xilly_scan_idt(struct xilly_endpoint *endpoint,
590 			  struct xilly_idt_handle *idt_handle)
591 {
592 	int count = 0;
593 	unsigned char *idt = endpoint->channels[1]->wr_buffers[0]->addr;
594 	unsigned char *end_of_idt = idt + endpoint->idtlen - 4;
595 	unsigned char *scan;
596 	int len;
597 
598 	scan = idt + 1;
599 	idt_handle->names = scan;
600 
601 	while ((scan <= end_of_idt) && *scan) {
602 		while ((scan <= end_of_idt) && *scan++)
603 			/* Do nothing, just scan thru string */;
604 		count++;
605 	}
606 
607 	idt_handle->names_len = scan - idt_handle->names;
608 
609 	scan++;
610 
611 	if (scan > end_of_idt) {
612 		dev_err(endpoint->dev,
613 			"IDT device name list overflow. Aborting.\n");
614 		return -ENODEV;
615 	}
616 	idt_handle->chandesc = scan;
617 
618 	len = endpoint->idtlen - (3 + ((int) (scan - idt)));
619 
620 	if (len & 0x03) {
621 		dev_err(endpoint->dev,
622 			"Corrupt IDT device name list. Aborting.\n");
623 		return -ENODEV;
624 	}
625 
626 	idt_handle->entries = len >> 2;
627 	endpoint->num_channels = count;
628 
629 	return 0;
630 }
631 
xilly_obtain_idt(struct xilly_endpoint * endpoint)632 static int xilly_obtain_idt(struct xilly_endpoint *endpoint)
633 {
634 	struct xilly_channel *channel;
635 	unsigned char *version;
636 	long t;
637 
638 	channel = endpoint->channels[1]; /* This should be generated ad-hoc */
639 
640 	channel->wr_sleepy = 1;
641 
642 	iowrite32(1 |
643 		  (3 << 24), /* Opcode 3 for channel 0 = Send IDT */
644 		  endpoint->registers + fpga_buf_ctrl_reg);
645 
646 	t = wait_event_interruptible_timeout(channel->wr_wait,
647 					     (!channel->wr_sleepy),
648 					     XILLY_TIMEOUT);
649 
650 	if (t <= 0) {
651 		dev_err(endpoint->dev, "Failed to obtain IDT. Aborting.\n");
652 
653 		if (endpoint->fatal_error)
654 			return -EIO;
655 
656 		return -ENODEV;
657 	}
658 
659 	dma_sync_single_for_cpu(channel->endpoint->dev,
660 				channel->wr_buffers[0]->dma_addr,
661 				channel->wr_buf_size,
662 				DMA_FROM_DEVICE);
663 
664 	if (channel->wr_buffers[0]->end_offset != endpoint->idtlen) {
665 		dev_err(endpoint->dev,
666 			"IDT length mismatch (%d != %d). Aborting.\n",
667 			channel->wr_buffers[0]->end_offset, endpoint->idtlen);
668 		return -ENODEV;
669 	}
670 
671 	if (crc32_le(~0, channel->wr_buffers[0]->addr,
672 		     endpoint->idtlen+1) != 0) {
673 		dev_err(endpoint->dev, "IDT failed CRC check. Aborting.\n");
674 		return -ENODEV;
675 	}
676 
677 	version = channel->wr_buffers[0]->addr;
678 
679 	/* Check version number. Reject anything above 0x82. */
680 	if (*version > 0x82) {
681 		dev_err(endpoint->dev,
682 			"No support for IDT version 0x%02x. Maybe the xillybus driver needs an upgrade. Aborting.\n",
683 			*version);
684 		return -ENODEV;
685 	}
686 
687 	return 0;
688 }
689 
xillybus_read(struct file * filp,char __user * userbuf,size_t count,loff_t * f_pos)690 static ssize_t xillybus_read(struct file *filp, char __user *userbuf,
691 			     size_t count, loff_t *f_pos)
692 {
693 	ssize_t rc;
694 	unsigned long flags;
695 	int bytes_done = 0;
696 	int no_time_left = 0;
697 	long deadline, left_to_sleep;
698 	struct xilly_channel *channel = filp->private_data;
699 
700 	int empty, reached_eof, exhausted, ready;
701 	/* Initializations are there only to silence warnings */
702 
703 	int howmany = 0, bufpos = 0, bufidx = 0, bufferdone = 0;
704 	int waiting_bufidx;
705 
706 	if (channel->endpoint->fatal_error)
707 		return -EIO;
708 
709 	deadline = jiffies + 1 + XILLY_RX_TIMEOUT;
710 
711 	rc = mutex_lock_interruptible(&channel->wr_mutex);
712 	if (rc)
713 		return rc;
714 
715 	while (1) { /* Note that we may drop mutex within this loop */
716 		int bytes_to_do = count - bytes_done;
717 
718 		spin_lock_irqsave(&channel->wr_spinlock, flags);
719 
720 		empty = channel->wr_empty;
721 		ready = !empty || channel->wr_ready;
722 
723 		if (!empty) {
724 			bufidx = channel->wr_host_buf_idx;
725 			bufpos = channel->wr_host_buf_pos;
726 			howmany = ((channel->wr_buffers[bufidx]->end_offset
727 				    + 1) << channel->log2_element_size)
728 				- bufpos;
729 
730 			/* Update wr_host_* to its post-operation state */
731 			if (howmany > bytes_to_do) {
732 				bufferdone = 0;
733 
734 				howmany = bytes_to_do;
735 				channel->wr_host_buf_pos += howmany;
736 			} else {
737 				bufferdone = 1;
738 
739 				channel->wr_host_buf_pos = 0;
740 
741 				if (bufidx == channel->wr_fpga_buf_idx) {
742 					channel->wr_empty = 1;
743 					channel->wr_sleepy = 1;
744 					channel->wr_ready = 0;
745 				}
746 
747 				if (bufidx >= (channel->num_wr_buffers - 1))
748 					channel->wr_host_buf_idx = 0;
749 				else
750 					channel->wr_host_buf_idx++;
751 			}
752 		}
753 
754 		/*
755 		 * Marking our situation after the possible changes above,
756 		 * for use after releasing the spinlock.
757 		 *
758 		 * empty = empty before change
759 		 * exhasted = empty after possible change
760 		 */
761 
762 		reached_eof = channel->wr_empty &&
763 			(channel->wr_host_buf_idx == channel->wr_eof);
764 		channel->wr_hangup = reached_eof;
765 		exhausted = channel->wr_empty;
766 		waiting_bufidx = channel->wr_host_buf_idx;
767 
768 		spin_unlock_irqrestore(&channel->wr_spinlock, flags);
769 
770 		if (!empty) { /* Go on, now without the spinlock */
771 
772 			if (bufpos == 0) /* Position zero means it's virgin */
773 				dma_sync_single_for_cpu(channel->endpoint->dev,
774 							channel->wr_buffers[bufidx]->dma_addr,
775 							channel->wr_buf_size,
776 							DMA_FROM_DEVICE);
777 
778 			if (copy_to_user(
779 				    userbuf,
780 				    channel->wr_buffers[bufidx]->addr
781 				    + bufpos, howmany))
782 				rc = -EFAULT;
783 
784 			userbuf += howmany;
785 			bytes_done += howmany;
786 
787 			if (bufferdone) {
788 				dma_sync_single_for_device(channel->endpoint->dev,
789 							   channel->wr_buffers[bufidx]->dma_addr,
790 							   channel->wr_buf_size,
791 							   DMA_FROM_DEVICE);
792 
793 				/*
794 				 * Tell FPGA the buffer is done with. It's an
795 				 * atomic operation to the FPGA, so what
796 				 * happens with other channels doesn't matter,
797 				 * and the certain channel is protected with
798 				 * the channel-specific mutex.
799 				 */
800 
801 				iowrite32(1 | (channel->chan_num << 1) |
802 					  (bufidx << 12),
803 					  channel->endpoint->registers +
804 					  fpga_buf_ctrl_reg);
805 			}
806 
807 			if (rc) {
808 				mutex_unlock(&channel->wr_mutex);
809 				return rc;
810 			}
811 		}
812 
813 		/* This includes a zero-count return = EOF */
814 		if ((bytes_done >= count) || reached_eof)
815 			break;
816 
817 		if (!exhausted)
818 			continue; /* More in RAM buffer(s)? Just go on. */
819 
820 		if ((bytes_done > 0) &&
821 		    (no_time_left ||
822 		     (channel->wr_synchronous && channel->wr_allow_partial)))
823 			break;
824 
825 		/*
826 		 * Nonblocking read: The "ready" flag tells us that the FPGA
827 		 * has data to send. In non-blocking mode, if it isn't on,
828 		 * just return. But if there is, we jump directly to the point
829 		 * where we ask for the FPGA to send all it has, and wait
830 		 * until that data arrives. So in a sense, we *do* block in
831 		 * nonblocking mode, but only for a very short time.
832 		 */
833 
834 		if (!no_time_left && (filp->f_flags & O_NONBLOCK)) {
835 			if (bytes_done > 0)
836 				break;
837 
838 			if (ready)
839 				goto desperate;
840 
841 			rc = -EAGAIN;
842 			break;
843 		}
844 
845 		if (!no_time_left || (bytes_done > 0)) {
846 			/*
847 			 * Note that in case of an element-misaligned read
848 			 * request, offsetlimit will include the last element,
849 			 * which will be partially read from.
850 			 */
851 			int offsetlimit = ((count - bytes_done) - 1) >>
852 				channel->log2_element_size;
853 			int buf_elements = channel->wr_buf_size >>
854 				channel->log2_element_size;
855 
856 			/*
857 			 * In synchronous mode, always send an offset limit.
858 			 * Just don't send a value too big.
859 			 */
860 
861 			if (channel->wr_synchronous) {
862 				/* Don't request more than one buffer */
863 				if (channel->wr_allow_partial &&
864 				    (offsetlimit >= buf_elements))
865 					offsetlimit = buf_elements - 1;
866 
867 				/* Don't request more than all buffers */
868 				if (!channel->wr_allow_partial &&
869 				    (offsetlimit >=
870 				     (buf_elements * channel->num_wr_buffers)))
871 					offsetlimit = buf_elements *
872 						channel->num_wr_buffers - 1;
873 			}
874 
875 			/*
876 			 * In asynchronous mode, force early flush of a buffer
877 			 * only if that will allow returning a full count. The
878 			 * "offsetlimit < ( ... )" rather than "<=" excludes
879 			 * requesting a full buffer, which would obviously
880 			 * cause a buffer transmission anyhow
881 			 */
882 
883 			if (channel->wr_synchronous ||
884 			    (offsetlimit < (buf_elements - 1))) {
885 				mutex_lock(&channel->endpoint->register_mutex);
886 
887 				iowrite32(offsetlimit,
888 					  channel->endpoint->registers +
889 					  fpga_buf_offset_reg);
890 
891 				iowrite32(1 | (channel->chan_num << 1) |
892 					  (2 << 24) |  /* 2 = offset limit */
893 					  (waiting_bufidx << 12),
894 					  channel->endpoint->registers +
895 					  fpga_buf_ctrl_reg);
896 
897 				mutex_unlock(&channel->endpoint->
898 					     register_mutex);
899 			}
900 		}
901 
902 		/*
903 		 * If partial completion is disallowed, there is no point in
904 		 * timeout sleeping. Neither if no_time_left is set and
905 		 * there's no data.
906 		 */
907 
908 		if (!channel->wr_allow_partial ||
909 		    (no_time_left && (bytes_done == 0))) {
910 			/*
911 			 * This do-loop will run more than once if another
912 			 * thread reasserted wr_sleepy before we got the mutex
913 			 * back, so we try again.
914 			 */
915 
916 			do {
917 				mutex_unlock(&channel->wr_mutex);
918 
919 				if (wait_event_interruptible(
920 					    channel->wr_wait,
921 					    (!channel->wr_sleepy)))
922 					goto interrupted;
923 
924 				if (mutex_lock_interruptible(
925 					    &channel->wr_mutex))
926 					goto interrupted;
927 			} while (channel->wr_sleepy);
928 
929 			continue;
930 
931 interrupted: /* Mutex is not held if got here */
932 			if (channel->endpoint->fatal_error)
933 				return -EIO;
934 			if (bytes_done)
935 				return bytes_done;
936 			if (filp->f_flags & O_NONBLOCK)
937 				return -EAGAIN; /* Don't admit snoozing */
938 			return -EINTR;
939 		}
940 
941 		left_to_sleep = deadline - ((long) jiffies);
942 
943 		/*
944 		 * If our time is out, skip the waiting. We may miss wr_sleepy
945 		 * being deasserted but hey, almost missing the train is like
946 		 * missing it.
947 		 */
948 
949 		if (left_to_sleep > 0) {
950 			left_to_sleep =
951 				wait_event_interruptible_timeout(
952 					channel->wr_wait,
953 					(!channel->wr_sleepy),
954 					left_to_sleep);
955 
956 			if (left_to_sleep > 0) /* wr_sleepy deasserted */
957 				continue;
958 
959 			if (left_to_sleep < 0) { /* Interrupt */
960 				mutex_unlock(&channel->wr_mutex);
961 				if (channel->endpoint->fatal_error)
962 					return -EIO;
963 				if (bytes_done)
964 					return bytes_done;
965 				return -EINTR;
966 			}
967 		}
968 
969 desperate:
970 		no_time_left = 1; /* We're out of sleeping time. Desperate! */
971 
972 		if (bytes_done == 0) {
973 			/*
974 			 * Reaching here means that we allow partial return,
975 			 * that we've run out of time, and that we have
976 			 * nothing to return.
977 			 * So tell the FPGA to send anything it has or gets.
978 			 */
979 
980 			iowrite32(1 | (channel->chan_num << 1) |
981 				  (3 << 24) |  /* Opcode 3, flush it all! */
982 				  (waiting_bufidx << 12),
983 				  channel->endpoint->registers +
984 				  fpga_buf_ctrl_reg);
985 		}
986 
987 		/*
988 		 * Reaching here means that we *do* have data in the buffer,
989 		 * but the "partial" flag disallows returning less than
990 		 * required. And we don't have as much. So loop again,
991 		 * which is likely to end up blocking indefinitely until
992 		 * enough data has arrived.
993 		 */
994 	}
995 
996 	mutex_unlock(&channel->wr_mutex);
997 
998 	if (channel->endpoint->fatal_error)
999 		return -EIO;
1000 
1001 	if (rc)
1002 		return rc;
1003 
1004 	return bytes_done;
1005 }
1006 
1007 /*
1008  * The timeout argument takes values as follows:
1009  *  >0 : Flush with timeout
1010  * ==0 : Flush, and wait idefinitely for the flush to complete
1011  *  <0 : Autoflush: Flush only if there's a single buffer occupied
1012  */
1013 
xillybus_myflush(struct xilly_channel * channel,long timeout)1014 static int xillybus_myflush(struct xilly_channel *channel, long timeout)
1015 {
1016 	int rc;
1017 	unsigned long flags;
1018 
1019 	int end_offset_plus1;
1020 	int bufidx, bufidx_minus1;
1021 	int i;
1022 	int empty;
1023 	int new_rd_host_buf_pos;
1024 
1025 	if (channel->endpoint->fatal_error)
1026 		return -EIO;
1027 	rc = mutex_lock_interruptible(&channel->rd_mutex);
1028 	if (rc)
1029 		return rc;
1030 
1031 	/*
1032 	 * Don't flush a closed channel. This can happen when the work queued
1033 	 * autoflush thread fires off after the file has closed. This is not
1034 	 * an error, just something to dismiss.
1035 	 */
1036 
1037 	if (!channel->rd_ref_count)
1038 		goto done;
1039 
1040 	bufidx = channel->rd_host_buf_idx;
1041 
1042 	bufidx_minus1 = (bufidx == 0) ?
1043 		channel->num_rd_buffers - 1 :
1044 		bufidx - 1;
1045 
1046 	end_offset_plus1 = channel->rd_host_buf_pos >>
1047 		channel->log2_element_size;
1048 
1049 	new_rd_host_buf_pos = channel->rd_host_buf_pos -
1050 		(end_offset_plus1 << channel->log2_element_size);
1051 
1052 	/* Submit the current buffer if it's nonempty */
1053 	if (end_offset_plus1) {
1054 		unsigned char *tail = channel->rd_buffers[bufidx]->addr +
1055 			(end_offset_plus1 << channel->log2_element_size);
1056 
1057 		/* Copy  unflushed data, so we can put it in next buffer */
1058 		for (i = 0; i < new_rd_host_buf_pos; i++)
1059 			channel->rd_leftovers[i] = *tail++;
1060 
1061 		spin_lock_irqsave(&channel->rd_spinlock, flags);
1062 
1063 		/* Autoflush only if a single buffer is occupied */
1064 
1065 		if ((timeout < 0) &&
1066 		    (channel->rd_full ||
1067 		     (bufidx_minus1 != channel->rd_fpga_buf_idx))) {
1068 			spin_unlock_irqrestore(&channel->rd_spinlock, flags);
1069 			/*
1070 			 * A new work item may be queued by the ISR exactly
1071 			 * now, since the execution of a work item allows the
1072 			 * queuing of a new one while it's running.
1073 			 */
1074 			goto done;
1075 		}
1076 
1077 		/* The 4th element is never needed for data, so it's a flag */
1078 		channel->rd_leftovers[3] = (new_rd_host_buf_pos != 0);
1079 
1080 		/* Set up rd_full to reflect a certain moment's state */
1081 
1082 		if (bufidx == channel->rd_fpga_buf_idx)
1083 			channel->rd_full = 1;
1084 		spin_unlock_irqrestore(&channel->rd_spinlock, flags);
1085 
1086 		if (bufidx >= (channel->num_rd_buffers - 1))
1087 			channel->rd_host_buf_idx = 0;
1088 		else
1089 			channel->rd_host_buf_idx++;
1090 
1091 		dma_sync_single_for_device(channel->endpoint->dev,
1092 					   channel->rd_buffers[bufidx]->dma_addr,
1093 					   channel->rd_buf_size,
1094 					   DMA_TO_DEVICE);
1095 
1096 		mutex_lock(&channel->endpoint->register_mutex);
1097 
1098 		iowrite32(end_offset_plus1 - 1,
1099 			  channel->endpoint->registers + fpga_buf_offset_reg);
1100 
1101 		iowrite32((channel->chan_num << 1) | /* Channel ID */
1102 			  (2 << 24) |  /* Opcode 2, submit buffer */
1103 			  (bufidx << 12),
1104 			  channel->endpoint->registers + fpga_buf_ctrl_reg);
1105 
1106 		mutex_unlock(&channel->endpoint->register_mutex);
1107 	} else if (bufidx == 0) {
1108 		bufidx = channel->num_rd_buffers - 1;
1109 	} else {
1110 		bufidx--;
1111 	}
1112 
1113 	channel->rd_host_buf_pos = new_rd_host_buf_pos;
1114 
1115 	if (timeout < 0)
1116 		goto done; /* Autoflush */
1117 
1118 	/*
1119 	 * bufidx is now the last buffer written to (or equal to
1120 	 * rd_fpga_buf_idx if buffer was never written to), and
1121 	 * channel->rd_host_buf_idx the one after it.
1122 	 *
1123 	 * If bufidx == channel->rd_fpga_buf_idx we're either empty or full.
1124 	 */
1125 
1126 	while (1) { /* Loop waiting for draining of buffers */
1127 		spin_lock_irqsave(&channel->rd_spinlock, flags);
1128 
1129 		if (bufidx != channel->rd_fpga_buf_idx)
1130 			channel->rd_full = 1; /*
1131 					       * Not really full,
1132 					       * but needs waiting.
1133 					       */
1134 
1135 		empty = !channel->rd_full;
1136 
1137 		spin_unlock_irqrestore(&channel->rd_spinlock, flags);
1138 
1139 		if (empty)
1140 			break;
1141 
1142 		/*
1143 		 * Indefinite sleep with mutex taken. With data waiting for
1144 		 * flushing user should not be surprised if open() for write
1145 		 * sleeps.
1146 		 */
1147 		if (timeout == 0)
1148 			wait_event_interruptible(channel->rd_wait,
1149 						 (!channel->rd_full));
1150 
1151 		else if (wait_event_interruptible_timeout(
1152 				 channel->rd_wait,
1153 				 (!channel->rd_full),
1154 				 timeout) == 0) {
1155 			dev_warn(channel->endpoint->dev,
1156 				 "Timed out while flushing. Output data may be lost.\n");
1157 
1158 			rc = -ETIMEDOUT;
1159 			break;
1160 		}
1161 
1162 		if (channel->rd_full) {
1163 			rc = -EINTR;
1164 			break;
1165 		}
1166 	}
1167 
1168 done:
1169 	mutex_unlock(&channel->rd_mutex);
1170 
1171 	if (channel->endpoint->fatal_error)
1172 		return -EIO;
1173 
1174 	return rc;
1175 }
1176 
xillybus_flush(struct file * filp,fl_owner_t id)1177 static int xillybus_flush(struct file *filp, fl_owner_t id)
1178 {
1179 	if (!(filp->f_mode & FMODE_WRITE))
1180 		return 0;
1181 
1182 	return xillybus_myflush(filp->private_data, HZ); /* 1 second timeout */
1183 }
1184 
xillybus_autoflush(struct work_struct * work)1185 static void xillybus_autoflush(struct work_struct *work)
1186 {
1187 	struct delayed_work *workitem = to_delayed_work(work);
1188 	struct xilly_channel *channel = container_of(
1189 		workitem, struct xilly_channel, rd_workitem);
1190 	int rc;
1191 
1192 	rc = xillybus_myflush(channel, -1);
1193 	if (rc == -EINTR)
1194 		dev_warn(channel->endpoint->dev,
1195 			 "Autoflush failed because work queue thread got a signal.\n");
1196 	else if (rc)
1197 		dev_err(channel->endpoint->dev,
1198 			"Autoflush failed under weird circumstances.\n");
1199 }
1200 
xillybus_write(struct file * filp,const char __user * userbuf,size_t count,loff_t * f_pos)1201 static ssize_t xillybus_write(struct file *filp, const char __user *userbuf,
1202 			      size_t count, loff_t *f_pos)
1203 {
1204 	ssize_t rc;
1205 	unsigned long flags;
1206 	int bytes_done = 0;
1207 	struct xilly_channel *channel = filp->private_data;
1208 
1209 	int full, exhausted;
1210 	/* Initializations are there only to silence warnings */
1211 
1212 	int howmany = 0, bufpos = 0, bufidx = 0, bufferdone = 0;
1213 	int end_offset_plus1 = 0;
1214 
1215 	if (channel->endpoint->fatal_error)
1216 		return -EIO;
1217 
1218 	rc = mutex_lock_interruptible(&channel->rd_mutex);
1219 	if (rc)
1220 		return rc;
1221 
1222 	while (1) {
1223 		int bytes_to_do = count - bytes_done;
1224 
1225 		spin_lock_irqsave(&channel->rd_spinlock, flags);
1226 
1227 		full = channel->rd_full;
1228 
1229 		if (!full) {
1230 			bufidx = channel->rd_host_buf_idx;
1231 			bufpos = channel->rd_host_buf_pos;
1232 			howmany = channel->rd_buf_size - bufpos;
1233 
1234 			/*
1235 			 * Update rd_host_* to its state after this operation.
1236 			 * count=0 means committing the buffer immediately,
1237 			 * which is like flushing, but not necessarily block.
1238 			 */
1239 
1240 			if ((howmany > bytes_to_do) &&
1241 			    (count ||
1242 			     ((bufpos >> channel->log2_element_size) == 0))) {
1243 				bufferdone = 0;
1244 
1245 				howmany = bytes_to_do;
1246 				channel->rd_host_buf_pos += howmany;
1247 			} else {
1248 				bufferdone = 1;
1249 
1250 				if (count) {
1251 					end_offset_plus1 =
1252 						channel->rd_buf_size >>
1253 						channel->log2_element_size;
1254 					channel->rd_host_buf_pos = 0;
1255 				} else {
1256 					unsigned char *tail;
1257 					int i;
1258 
1259 					howmany = 0;
1260 
1261 					end_offset_plus1 = bufpos >>
1262 						channel->log2_element_size;
1263 
1264 					channel->rd_host_buf_pos -=
1265 						end_offset_plus1 <<
1266 						channel->log2_element_size;
1267 
1268 					tail = channel->
1269 						rd_buffers[bufidx]->addr +
1270 						(end_offset_plus1 <<
1271 						 channel->log2_element_size);
1272 
1273 					for (i = 0;
1274 					     i < channel->rd_host_buf_pos;
1275 					     i++)
1276 						channel->rd_leftovers[i] =
1277 							*tail++;
1278 				}
1279 
1280 				if (bufidx == channel->rd_fpga_buf_idx)
1281 					channel->rd_full = 1;
1282 
1283 				if (bufidx >= (channel->num_rd_buffers - 1))
1284 					channel->rd_host_buf_idx = 0;
1285 				else
1286 					channel->rd_host_buf_idx++;
1287 			}
1288 		}
1289 
1290 		/*
1291 		 * Marking our situation after the possible changes above,
1292 		 * for use  after releasing the spinlock.
1293 		 *
1294 		 * full = full before change
1295 		 * exhasted = full after possible change
1296 		 */
1297 
1298 		exhausted = channel->rd_full;
1299 
1300 		spin_unlock_irqrestore(&channel->rd_spinlock, flags);
1301 
1302 		if (!full) { /* Go on, now without the spinlock */
1303 			unsigned char *head =
1304 				channel->rd_buffers[bufidx]->addr;
1305 			int i;
1306 
1307 			if ((bufpos == 0) || /* Zero means it's virgin */
1308 			    (channel->rd_leftovers[3] != 0)) {
1309 				dma_sync_single_for_cpu(channel->endpoint->dev,
1310 							channel->rd_buffers[bufidx]->dma_addr,
1311 							channel->rd_buf_size,
1312 							DMA_TO_DEVICE);
1313 
1314 				/* Virgin, but leftovers are due */
1315 				for (i = 0; i < bufpos; i++)
1316 					*head++ = channel->rd_leftovers[i];
1317 
1318 				channel->rd_leftovers[3] = 0; /* Clear flag */
1319 			}
1320 
1321 			if (copy_from_user(
1322 				    channel->rd_buffers[bufidx]->addr + bufpos,
1323 				    userbuf, howmany))
1324 				rc = -EFAULT;
1325 
1326 			userbuf += howmany;
1327 			bytes_done += howmany;
1328 
1329 			if (bufferdone) {
1330 				dma_sync_single_for_device(channel->endpoint->dev,
1331 							   channel->rd_buffers[bufidx]->dma_addr,
1332 							   channel->rd_buf_size,
1333 							   DMA_TO_DEVICE);
1334 
1335 				mutex_lock(&channel->endpoint->register_mutex);
1336 
1337 				iowrite32(end_offset_plus1 - 1,
1338 					  channel->endpoint->registers +
1339 					  fpga_buf_offset_reg);
1340 
1341 				iowrite32((channel->chan_num << 1) |
1342 					  (2 << 24) |  /* 2 = submit buffer */
1343 					  (bufidx << 12),
1344 					  channel->endpoint->registers +
1345 					  fpga_buf_ctrl_reg);
1346 
1347 				mutex_unlock(&channel->endpoint->
1348 					     register_mutex);
1349 
1350 				channel->rd_leftovers[3] =
1351 					(channel->rd_host_buf_pos != 0);
1352 			}
1353 
1354 			if (rc) {
1355 				mutex_unlock(&channel->rd_mutex);
1356 
1357 				if (channel->endpoint->fatal_error)
1358 					return -EIO;
1359 
1360 				if (!channel->rd_synchronous)
1361 					queue_delayed_work(
1362 						xillybus_wq,
1363 						&channel->rd_workitem,
1364 						XILLY_RX_TIMEOUT);
1365 
1366 				return rc;
1367 			}
1368 		}
1369 
1370 		if (bytes_done >= count)
1371 			break;
1372 
1373 		if (!exhausted)
1374 			continue; /* If there's more space, just go on */
1375 
1376 		if ((bytes_done > 0) && channel->rd_allow_partial)
1377 			break;
1378 
1379 		/*
1380 		 * Indefinite sleep with mutex taken. With data waiting for
1381 		 * flushing, user should not be surprised if open() for write
1382 		 * sleeps.
1383 		 */
1384 
1385 		if (filp->f_flags & O_NONBLOCK) {
1386 			rc = -EAGAIN;
1387 			break;
1388 		}
1389 
1390 		if (wait_event_interruptible(channel->rd_wait,
1391 					     (!channel->rd_full))) {
1392 			mutex_unlock(&channel->rd_mutex);
1393 
1394 			if (channel->endpoint->fatal_error)
1395 				return -EIO;
1396 
1397 			if (bytes_done)
1398 				return bytes_done;
1399 			return -EINTR;
1400 		}
1401 	}
1402 
1403 	mutex_unlock(&channel->rd_mutex);
1404 
1405 	if (!channel->rd_synchronous)
1406 		queue_delayed_work(xillybus_wq,
1407 				   &channel->rd_workitem,
1408 				   XILLY_RX_TIMEOUT);
1409 
1410 	if (channel->endpoint->fatal_error)
1411 		return -EIO;
1412 
1413 	if (rc)
1414 		return rc;
1415 
1416 	if ((channel->rd_synchronous) && (bytes_done > 0)) {
1417 		rc = xillybus_myflush(filp->private_data, 0); /* No timeout */
1418 
1419 		if (rc && (rc != -EINTR))
1420 			return rc;
1421 	}
1422 
1423 	return bytes_done;
1424 }
1425 
xillybus_open(struct inode * inode,struct file * filp)1426 static int xillybus_open(struct inode *inode, struct file *filp)
1427 {
1428 	int rc;
1429 	unsigned long flags;
1430 	struct xilly_endpoint *endpoint;
1431 	struct xilly_channel *channel;
1432 	int index;
1433 
1434 	rc = xillybus_find_inode(inode, (void **)&endpoint, &index);
1435 	if (rc)
1436 		return rc;
1437 
1438 	if (endpoint->fatal_error)
1439 		return -EIO;
1440 
1441 	channel = endpoint->channels[1 + index];
1442 	filp->private_data = channel;
1443 
1444 	/*
1445 	 * It gets complicated because:
1446 	 * 1. We don't want to take a mutex we don't have to
1447 	 * 2. We don't want to open one direction if the other will fail.
1448 	 */
1449 
1450 	if ((filp->f_mode & FMODE_READ) && (!channel->num_wr_buffers))
1451 		return -ENODEV;
1452 
1453 	if ((filp->f_mode & FMODE_WRITE) && (!channel->num_rd_buffers))
1454 		return -ENODEV;
1455 
1456 	if ((filp->f_mode & FMODE_READ) && (filp->f_flags & O_NONBLOCK) &&
1457 	    (channel->wr_synchronous || !channel->wr_allow_partial ||
1458 	     !channel->wr_supports_nonempty)) {
1459 		dev_err(endpoint->dev,
1460 			"open() failed: O_NONBLOCK not allowed for read on this device\n");
1461 		return -ENODEV;
1462 	}
1463 
1464 	if ((filp->f_mode & FMODE_WRITE) && (filp->f_flags & O_NONBLOCK) &&
1465 	    (channel->rd_synchronous || !channel->rd_allow_partial)) {
1466 		dev_err(endpoint->dev,
1467 			"open() failed: O_NONBLOCK not allowed for write on this device\n");
1468 		return -ENODEV;
1469 	}
1470 
1471 	/*
1472 	 * Note: open() may block on getting mutexes despite O_NONBLOCK.
1473 	 * This shouldn't occur normally, since multiple open of the same
1474 	 * file descriptor is almost always prohibited anyhow
1475 	 * (*_exclusive_open is normally set in real-life systems).
1476 	 */
1477 
1478 	if (filp->f_mode & FMODE_READ) {
1479 		rc = mutex_lock_interruptible(&channel->wr_mutex);
1480 		if (rc)
1481 			return rc;
1482 	}
1483 
1484 	if (filp->f_mode & FMODE_WRITE) {
1485 		rc = mutex_lock_interruptible(&channel->rd_mutex);
1486 		if (rc)
1487 			goto unlock_wr;
1488 	}
1489 
1490 	if ((filp->f_mode & FMODE_READ) &&
1491 	    (channel->wr_ref_count != 0) &&
1492 	    (channel->wr_exclusive_open)) {
1493 		rc = -EBUSY;
1494 		goto unlock;
1495 	}
1496 
1497 	if ((filp->f_mode & FMODE_WRITE) &&
1498 	    (channel->rd_ref_count != 0) &&
1499 	    (channel->rd_exclusive_open)) {
1500 		rc = -EBUSY;
1501 		goto unlock;
1502 	}
1503 
1504 	if (filp->f_mode & FMODE_READ) {
1505 		if (channel->wr_ref_count == 0) { /* First open of file */
1506 			/* Move the host to first buffer */
1507 			spin_lock_irqsave(&channel->wr_spinlock, flags);
1508 			channel->wr_host_buf_idx = 0;
1509 			channel->wr_host_buf_pos = 0;
1510 			channel->wr_fpga_buf_idx = -1;
1511 			channel->wr_empty = 1;
1512 			channel->wr_ready = 0;
1513 			channel->wr_sleepy = 1;
1514 			channel->wr_eof = -1;
1515 			channel->wr_hangup = 0;
1516 
1517 			spin_unlock_irqrestore(&channel->wr_spinlock, flags);
1518 
1519 			iowrite32(1 | (channel->chan_num << 1) |
1520 				  (4 << 24) |  /* Opcode 4, open channel */
1521 				  ((channel->wr_synchronous & 1) << 23),
1522 				  channel->endpoint->registers +
1523 				  fpga_buf_ctrl_reg);
1524 		}
1525 
1526 		channel->wr_ref_count++;
1527 	}
1528 
1529 	if (filp->f_mode & FMODE_WRITE) {
1530 		if (channel->rd_ref_count == 0) { /* First open of file */
1531 			/* Move the host to first buffer */
1532 			spin_lock_irqsave(&channel->rd_spinlock, flags);
1533 			channel->rd_host_buf_idx = 0;
1534 			channel->rd_host_buf_pos = 0;
1535 			channel->rd_leftovers[3] = 0; /* No leftovers. */
1536 			channel->rd_fpga_buf_idx = channel->num_rd_buffers - 1;
1537 			channel->rd_full = 0;
1538 
1539 			spin_unlock_irqrestore(&channel->rd_spinlock, flags);
1540 
1541 			iowrite32((channel->chan_num << 1) |
1542 				  (4 << 24),   /* Opcode 4, open channel */
1543 				  channel->endpoint->registers +
1544 				  fpga_buf_ctrl_reg);
1545 		}
1546 
1547 		channel->rd_ref_count++;
1548 	}
1549 
1550 unlock:
1551 	if (filp->f_mode & FMODE_WRITE)
1552 		mutex_unlock(&channel->rd_mutex);
1553 unlock_wr:
1554 	if (filp->f_mode & FMODE_READ)
1555 		mutex_unlock(&channel->wr_mutex);
1556 
1557 	if (!rc && (!channel->seekable))
1558 		return nonseekable_open(inode, filp);
1559 
1560 	return rc;
1561 }
1562 
xillybus_release(struct inode * inode,struct file * filp)1563 static int xillybus_release(struct inode *inode, struct file *filp)
1564 {
1565 	unsigned long flags;
1566 	struct xilly_channel *channel = filp->private_data;
1567 
1568 	int buf_idx;
1569 	int eof;
1570 
1571 	if (channel->endpoint->fatal_error)
1572 		return -EIO;
1573 
1574 	if (filp->f_mode & FMODE_WRITE) {
1575 		mutex_lock(&channel->rd_mutex);
1576 
1577 		channel->rd_ref_count--;
1578 
1579 		if (channel->rd_ref_count == 0) {
1580 			/*
1581 			 * We rely on the kernel calling flush()
1582 			 * before we get here.
1583 			 */
1584 
1585 			iowrite32((channel->chan_num << 1) | /* Channel ID */
1586 				  (5 << 24),  /* Opcode 5, close channel */
1587 				  channel->endpoint->registers +
1588 				  fpga_buf_ctrl_reg);
1589 		}
1590 		mutex_unlock(&channel->rd_mutex);
1591 	}
1592 
1593 	if (filp->f_mode & FMODE_READ) {
1594 		mutex_lock(&channel->wr_mutex);
1595 
1596 		channel->wr_ref_count--;
1597 
1598 		if (channel->wr_ref_count == 0) {
1599 			iowrite32(1 | (channel->chan_num << 1) |
1600 				  (5 << 24),  /* Opcode 5, close channel */
1601 				  channel->endpoint->registers +
1602 				  fpga_buf_ctrl_reg);
1603 
1604 			/*
1605 			 * This is crazily cautious: We make sure that not
1606 			 * only that we got an EOF (be it because we closed
1607 			 * the channel or because of a user's EOF), but verify
1608 			 * that it's one beyond the last buffer arrived, so
1609 			 * we have no leftover buffers pending before wrapping
1610 			 * up (which can only happen in asynchronous channels,
1611 			 * BTW)
1612 			 */
1613 
1614 			while (1) {
1615 				spin_lock_irqsave(&channel->wr_spinlock,
1616 						  flags);
1617 				buf_idx = channel->wr_fpga_buf_idx;
1618 				eof = channel->wr_eof;
1619 				channel->wr_sleepy = 1;
1620 				spin_unlock_irqrestore(&channel->wr_spinlock,
1621 						       flags);
1622 
1623 				/*
1624 				 * Check if eof points at the buffer after
1625 				 * the last one the FPGA submitted. Note that
1626 				 * no EOF is marked by negative eof.
1627 				 */
1628 
1629 				buf_idx++;
1630 				if (buf_idx == channel->num_wr_buffers)
1631 					buf_idx = 0;
1632 
1633 				if (buf_idx == eof)
1634 					break;
1635 
1636 				/*
1637 				 * Steal extra 100 ms if awaken by interrupt.
1638 				 * This is a simple workaround for an
1639 				 * interrupt pending when entering, which would
1640 				 * otherwise result in declaring the hardware
1641 				 * non-responsive.
1642 				 */
1643 
1644 				if (wait_event_interruptible(
1645 					    channel->wr_wait,
1646 					    (!channel->wr_sleepy)))
1647 					msleep(100);
1648 
1649 				if (channel->wr_sleepy) {
1650 					mutex_unlock(&channel->wr_mutex);
1651 					dev_warn(channel->endpoint->dev,
1652 						 "Hardware failed to respond to close command, therefore left in messy state.\n");
1653 					return -EINTR;
1654 				}
1655 			}
1656 		}
1657 
1658 		mutex_unlock(&channel->wr_mutex);
1659 	}
1660 
1661 	return 0;
1662 }
1663 
xillybus_llseek(struct file * filp,loff_t offset,int whence)1664 static loff_t xillybus_llseek(struct file *filp, loff_t offset, int whence)
1665 {
1666 	struct xilly_channel *channel = filp->private_data;
1667 	loff_t pos = filp->f_pos;
1668 	int rc = 0;
1669 
1670 	/*
1671 	 * Take both mutexes not allowing interrupts, since it seems like
1672 	 * common applications don't expect an -EINTR here. Besides, multiple
1673 	 * access to a single file descriptor on seekable devices is a mess
1674 	 * anyhow.
1675 	 */
1676 
1677 	if (channel->endpoint->fatal_error)
1678 		return -EIO;
1679 
1680 	mutex_lock(&channel->wr_mutex);
1681 	mutex_lock(&channel->rd_mutex);
1682 
1683 	switch (whence) {
1684 	case SEEK_SET:
1685 		pos = offset;
1686 		break;
1687 	case SEEK_CUR:
1688 		pos += offset;
1689 		break;
1690 	case SEEK_END:
1691 		pos = offset; /* Going to the end => to the beginning */
1692 		break;
1693 	default:
1694 		rc = -EINVAL;
1695 		goto end;
1696 	}
1697 
1698 	/* In any case, we must finish on an element boundary */
1699 	if (pos & ((1 << channel->log2_element_size) - 1)) {
1700 		rc = -EINVAL;
1701 		goto end;
1702 	}
1703 
1704 	mutex_lock(&channel->endpoint->register_mutex);
1705 
1706 	iowrite32(pos >> channel->log2_element_size,
1707 		  channel->endpoint->registers + fpga_buf_offset_reg);
1708 
1709 	iowrite32((channel->chan_num << 1) |
1710 		  (6 << 24),  /* Opcode 6, set address */
1711 		  channel->endpoint->registers + fpga_buf_ctrl_reg);
1712 
1713 	mutex_unlock(&channel->endpoint->register_mutex);
1714 
1715 end:
1716 	mutex_unlock(&channel->rd_mutex);
1717 	mutex_unlock(&channel->wr_mutex);
1718 
1719 	if (rc) /* Return error after releasing mutexes */
1720 		return rc;
1721 
1722 	filp->f_pos = pos;
1723 
1724 	/*
1725 	 * Since seekable devices are allowed only when the channel is
1726 	 * synchronous, we assume that there is no data pending in either
1727 	 * direction (which holds true as long as no concurrent access on the
1728 	 * file descriptor takes place).
1729 	 * The only thing we may need to throw away is leftovers from partial
1730 	 * write() flush.
1731 	 */
1732 
1733 	channel->rd_leftovers[3] = 0;
1734 
1735 	return pos;
1736 }
1737 
xillybus_poll(struct file * filp,poll_table * wait)1738 static __poll_t xillybus_poll(struct file *filp, poll_table *wait)
1739 {
1740 	struct xilly_channel *channel = filp->private_data;
1741 	__poll_t mask = 0;
1742 	unsigned long flags;
1743 
1744 	poll_wait(filp, &channel->endpoint->ep_wait, wait);
1745 
1746 	/*
1747 	 * poll() won't play ball regarding read() channels which
1748 	 * aren't asynchronous and support the nonempty message. Allowing
1749 	 * that will create situations where data has been delivered at
1750 	 * the FPGA, and users expecting select() to wake up, which it may
1751 	 * not.
1752 	 */
1753 
1754 	if (!channel->wr_synchronous && channel->wr_supports_nonempty) {
1755 		poll_wait(filp, &channel->wr_wait, wait);
1756 		poll_wait(filp, &channel->wr_ready_wait, wait);
1757 
1758 		spin_lock_irqsave(&channel->wr_spinlock, flags);
1759 		if (!channel->wr_empty || channel->wr_ready)
1760 			mask |= EPOLLIN | EPOLLRDNORM;
1761 
1762 		if (channel->wr_hangup)
1763 			/*
1764 			 * Not EPOLLHUP, because its behavior is in the
1765 			 * mist, and EPOLLIN does what we want: Wake up
1766 			 * the read file descriptor so it sees EOF.
1767 			 */
1768 			mask |=  EPOLLIN | EPOLLRDNORM;
1769 		spin_unlock_irqrestore(&channel->wr_spinlock, flags);
1770 	}
1771 
1772 	/*
1773 	 * If partial data write is disallowed on a write() channel,
1774 	 * it's pointless to ever signal OK to write, because is could
1775 	 * block despite some space being available.
1776 	 */
1777 
1778 	if (channel->rd_allow_partial) {
1779 		poll_wait(filp, &channel->rd_wait, wait);
1780 
1781 		spin_lock_irqsave(&channel->rd_spinlock, flags);
1782 		if (!channel->rd_full)
1783 			mask |= EPOLLOUT | EPOLLWRNORM;
1784 		spin_unlock_irqrestore(&channel->rd_spinlock, flags);
1785 	}
1786 
1787 	if (channel->endpoint->fatal_error)
1788 		mask |= EPOLLERR;
1789 
1790 	return mask;
1791 }
1792 
1793 static const struct file_operations xillybus_fops = {
1794 	.owner      = THIS_MODULE,
1795 	.read       = xillybus_read,
1796 	.write      = xillybus_write,
1797 	.open       = xillybus_open,
1798 	.flush      = xillybus_flush,
1799 	.release    = xillybus_release,
1800 	.llseek     = xillybus_llseek,
1801 	.poll       = xillybus_poll,
1802 };
1803 
xillybus_init_endpoint(struct device * dev)1804 struct xilly_endpoint *xillybus_init_endpoint(struct device *dev)
1805 {
1806 	struct xilly_endpoint *endpoint;
1807 
1808 	endpoint = devm_kzalloc(dev, sizeof(*endpoint), GFP_KERNEL);
1809 	if (!endpoint)
1810 		return NULL;
1811 
1812 	endpoint->dev = dev;
1813 	endpoint->msg_counter = 0x0b;
1814 	endpoint->failed_messages = 0;
1815 	endpoint->fatal_error = 0;
1816 
1817 	init_waitqueue_head(&endpoint->ep_wait);
1818 	mutex_init(&endpoint->register_mutex);
1819 
1820 	return endpoint;
1821 }
1822 EXPORT_SYMBOL(xillybus_init_endpoint);
1823 
xilly_quiesce(struct xilly_endpoint * endpoint)1824 static int xilly_quiesce(struct xilly_endpoint *endpoint)
1825 {
1826 	long t;
1827 
1828 	endpoint->idtlen = -1;
1829 
1830 	iowrite32((u32) (endpoint->dma_using_dac & 0x0001),
1831 		  endpoint->registers + fpga_dma_control_reg);
1832 
1833 	t = wait_event_interruptible_timeout(endpoint->ep_wait,
1834 					     (endpoint->idtlen >= 0),
1835 					     XILLY_TIMEOUT);
1836 	if (t <= 0) {
1837 		dev_err(endpoint->dev,
1838 			"Failed to quiesce the device on exit.\n");
1839 		return -ENODEV;
1840 	}
1841 	return 0;
1842 }
1843 
xillybus_endpoint_discovery(struct xilly_endpoint * endpoint)1844 int xillybus_endpoint_discovery(struct xilly_endpoint *endpoint)
1845 {
1846 	int rc;
1847 	long t;
1848 
1849 	void *bootstrap_resources;
1850 	int idtbuffersize = (1 << PAGE_SHIFT);
1851 	struct device *dev = endpoint->dev;
1852 
1853 	/*
1854 	 * The bogus IDT is used during bootstrap for allocating the initial
1855 	 * message buffer, and then the message buffer and space for the IDT
1856 	 * itself. The initial message buffer is of a single page's size, but
1857 	 * it's soon replaced with a more modest one (and memory is freed).
1858 	 */
1859 
1860 	unsigned char bogus_idt[8] = { 1, 224, (PAGE_SHIFT)-2, 0,
1861 				       3, 192, PAGE_SHIFT, 0 };
1862 	struct xilly_idt_handle idt_handle;
1863 
1864 	/*
1865 	 * Writing the value 0x00000001 to Endianness register signals which
1866 	 * endianness this processor is using, so the FPGA can swap words as
1867 	 * necessary.
1868 	 */
1869 
1870 	iowrite32(1, endpoint->registers + fpga_endian_reg);
1871 
1872 	/* Bootstrap phase I: Allocate temporary message buffer */
1873 
1874 	bootstrap_resources = devres_open_group(dev, NULL, GFP_KERNEL);
1875 	if (!bootstrap_resources)
1876 		return -ENOMEM;
1877 
1878 	endpoint->num_channels = 0;
1879 
1880 	rc = xilly_setupchannels(endpoint, bogus_idt, 1);
1881 	if (rc)
1882 		return rc;
1883 
1884 	/* Clear the message subsystem (and counter in particular) */
1885 	iowrite32(0x04, endpoint->registers + fpga_msg_ctrl_reg);
1886 
1887 	endpoint->idtlen = -1;
1888 
1889 	/*
1890 	 * Set DMA 32/64 bit mode, quiesce the device (?!) and get IDT
1891 	 * buffer size.
1892 	 */
1893 	iowrite32((u32) (endpoint->dma_using_dac & 0x0001),
1894 		  endpoint->registers + fpga_dma_control_reg);
1895 
1896 	t = wait_event_interruptible_timeout(endpoint->ep_wait,
1897 					     (endpoint->idtlen >= 0),
1898 					     XILLY_TIMEOUT);
1899 	if (t <= 0) {
1900 		dev_err(endpoint->dev, "No response from FPGA. Aborting.\n");
1901 		return -ENODEV;
1902 	}
1903 
1904 	/* Enable DMA */
1905 	iowrite32((u32) (0x0002 | (endpoint->dma_using_dac & 0x0001)),
1906 		  endpoint->registers + fpga_dma_control_reg);
1907 
1908 	/* Bootstrap phase II: Allocate buffer for IDT and obtain it */
1909 	while (endpoint->idtlen >= idtbuffersize) {
1910 		idtbuffersize *= 2;
1911 		bogus_idt[6]++;
1912 	}
1913 
1914 	endpoint->num_channels = 1;
1915 
1916 	rc = xilly_setupchannels(endpoint, bogus_idt, 2);
1917 	if (rc)
1918 		goto failed_idt;
1919 
1920 	rc = xilly_obtain_idt(endpoint);
1921 	if (rc)
1922 		goto failed_idt;
1923 
1924 	rc = xilly_scan_idt(endpoint, &idt_handle);
1925 	if (rc)
1926 		goto failed_idt;
1927 
1928 	devres_close_group(dev, bootstrap_resources);
1929 
1930 	/* Bootstrap phase III: Allocate buffers according to IDT */
1931 
1932 	rc = xilly_setupchannels(endpoint,
1933 				 idt_handle.chandesc,
1934 				 idt_handle.entries);
1935 	if (rc)
1936 		goto failed_idt;
1937 
1938 	rc = xillybus_init_chrdev(dev, &xillybus_fops,
1939 				  endpoint->owner, endpoint,
1940 				  idt_handle.names,
1941 				  idt_handle.names_len,
1942 				  endpoint->num_channels,
1943 				  xillyname, false);
1944 
1945 	if (rc)
1946 		goto failed_idt;
1947 
1948 	devres_release_group(dev, bootstrap_resources);
1949 
1950 	return 0;
1951 
1952 failed_idt:
1953 	xilly_quiesce(endpoint);
1954 	flush_workqueue(xillybus_wq);
1955 
1956 	return rc;
1957 }
1958 EXPORT_SYMBOL(xillybus_endpoint_discovery);
1959 
xillybus_endpoint_remove(struct xilly_endpoint * endpoint)1960 void xillybus_endpoint_remove(struct xilly_endpoint *endpoint)
1961 {
1962 	xillybus_cleanup_chrdev(endpoint, endpoint->dev);
1963 
1964 	xilly_quiesce(endpoint);
1965 
1966 	/*
1967 	 * Flushing is done upon endpoint release to prevent access to memory
1968 	 * just about to be released. This makes the quiesce complete.
1969 	 */
1970 	flush_workqueue(xillybus_wq);
1971 }
1972 EXPORT_SYMBOL(xillybus_endpoint_remove);
1973 
xillybus_init(void)1974 static int __init xillybus_init(void)
1975 {
1976 	xillybus_wq = alloc_workqueue(xillyname, 0, 0);
1977 	if (!xillybus_wq)
1978 		return -ENOMEM;
1979 
1980 	return 0;
1981 }
1982 
xillybus_exit(void)1983 static void __exit xillybus_exit(void)
1984 {
1985 	/* flush_workqueue() was called for each endpoint released */
1986 	destroy_workqueue(xillybus_wq);
1987 }
1988 
1989 module_init(xillybus_init);
1990 module_exit(xillybus_exit);
1991