xref: /linux/drivers/firewire/core-cdev.c (revision 363c55cae53742f3f685a1814912c6d4fda245b4)
1 /*
2  * Char device for device raw access
3  *
4  * Copyright (C) 2005-2007  Kristian Hoegsberg <krh@bitplanet.net>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software Foundation,
18  * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19  */
20 
21 #include <linux/compat.h>
22 #include <linux/delay.h>
23 #include <linux/device.h>
24 #include <linux/errno.h>
25 #include <linux/firewire.h>
26 #include <linux/firewire-cdev.h>
27 #include <linux/idr.h>
28 #include <linux/jiffies.h>
29 #include <linux/kernel.h>
30 #include <linux/kref.h>
31 #include <linux/mm.h>
32 #include <linux/module.h>
33 #include <linux/mutex.h>
34 #include <linux/poll.h>
35 #include <linux/preempt.h>
36 #include <linux/spinlock.h>
37 #include <linux/time.h>
38 #include <linux/uaccess.h>
39 #include <linux/vmalloc.h>
40 #include <linux/wait.h>
41 #include <linux/workqueue.h>
42 
43 #include <asm/system.h>
44 
45 #include "core.h"
46 
47 struct client {
48 	u32 version;
49 	struct fw_device *device;
50 
51 	spinlock_t lock;
52 	bool in_shutdown;
53 	struct idr resource_idr;
54 	struct list_head event_list;
55 	wait_queue_head_t wait;
56 	u64 bus_reset_closure;
57 
58 	struct fw_iso_context *iso_context;
59 	u64 iso_closure;
60 	struct fw_iso_buffer buffer;
61 	unsigned long vm_start;
62 
63 	struct list_head link;
64 	struct kref kref;
65 };
66 
67 static inline void client_get(struct client *client)
68 {
69 	kref_get(&client->kref);
70 }
71 
72 static void client_release(struct kref *kref)
73 {
74 	struct client *client = container_of(kref, struct client, kref);
75 
76 	fw_device_put(client->device);
77 	kfree(client);
78 }
79 
80 static void client_put(struct client *client)
81 {
82 	kref_put(&client->kref, client_release);
83 }
84 
85 struct client_resource;
86 typedef void (*client_resource_release_fn_t)(struct client *,
87 					     struct client_resource *);
88 struct client_resource {
89 	client_resource_release_fn_t release;
90 	int handle;
91 };
92 
93 struct address_handler_resource {
94 	struct client_resource resource;
95 	struct fw_address_handler handler;
96 	__u64 closure;
97 	struct client *client;
98 };
99 
100 struct outbound_transaction_resource {
101 	struct client_resource resource;
102 	struct fw_transaction transaction;
103 };
104 
105 struct inbound_transaction_resource {
106 	struct client_resource resource;
107 	struct fw_request *request;
108 	void *data;
109 	size_t length;
110 };
111 
112 struct descriptor_resource {
113 	struct client_resource resource;
114 	struct fw_descriptor descriptor;
115 	u32 data[0];
116 };
117 
118 struct iso_resource {
119 	struct client_resource resource;
120 	struct client *client;
121 	/* Schedule work and access todo only with client->lock held. */
122 	struct delayed_work work;
123 	enum {ISO_RES_ALLOC, ISO_RES_REALLOC, ISO_RES_DEALLOC,
124 	      ISO_RES_ALLOC_ONCE, ISO_RES_DEALLOC_ONCE,} todo;
125 	int generation;
126 	u64 channels;
127 	s32 bandwidth;
128 	struct iso_resource_event *e_alloc, *e_dealloc;
129 };
130 
131 static void schedule_iso_resource(struct iso_resource *);
132 static void release_iso_resource(struct client *, struct client_resource *);
133 
134 /*
135  * dequeue_event() just kfree()'s the event, so the event has to be
136  * the first field in a struct XYZ_event.
137  */
138 struct event {
139 	struct { void *data; size_t size; } v[2];
140 	struct list_head link;
141 };
142 
143 struct bus_reset_event {
144 	struct event event;
145 	struct fw_cdev_event_bus_reset reset;
146 };
147 
148 struct outbound_transaction_event {
149 	struct event event;
150 	struct client *client;
151 	struct outbound_transaction_resource r;
152 	struct fw_cdev_event_response response;
153 };
154 
155 struct inbound_transaction_event {
156 	struct event event;
157 	struct fw_cdev_event_request request;
158 };
159 
160 struct iso_interrupt_event {
161 	struct event event;
162 	struct fw_cdev_event_iso_interrupt interrupt;
163 };
164 
165 struct iso_resource_event {
166 	struct event event;
167 	struct fw_cdev_event_iso_resource resource;
168 };
169 
170 static inline void __user *u64_to_uptr(__u64 value)
171 {
172 	return (void __user *)(unsigned long)value;
173 }
174 
175 static inline __u64 uptr_to_u64(void __user *ptr)
176 {
177 	return (__u64)(unsigned long)ptr;
178 }
179 
180 static int fw_device_op_open(struct inode *inode, struct file *file)
181 {
182 	struct fw_device *device;
183 	struct client *client;
184 
185 	device = fw_device_get_by_devt(inode->i_rdev);
186 	if (device == NULL)
187 		return -ENODEV;
188 
189 	if (fw_device_is_shutdown(device)) {
190 		fw_device_put(device);
191 		return -ENODEV;
192 	}
193 
194 	client = kzalloc(sizeof(*client), GFP_KERNEL);
195 	if (client == NULL) {
196 		fw_device_put(device);
197 		return -ENOMEM;
198 	}
199 
200 	client->device = device;
201 	spin_lock_init(&client->lock);
202 	idr_init(&client->resource_idr);
203 	INIT_LIST_HEAD(&client->event_list);
204 	init_waitqueue_head(&client->wait);
205 	kref_init(&client->kref);
206 
207 	file->private_data = client;
208 
209 	mutex_lock(&device->client_list_mutex);
210 	list_add_tail(&client->link, &device->client_list);
211 	mutex_unlock(&device->client_list_mutex);
212 
213 	return 0;
214 }
215 
216 static void queue_event(struct client *client, struct event *event,
217 			void *data0, size_t size0, void *data1, size_t size1)
218 {
219 	unsigned long flags;
220 
221 	event->v[0].data = data0;
222 	event->v[0].size = size0;
223 	event->v[1].data = data1;
224 	event->v[1].size = size1;
225 
226 	spin_lock_irqsave(&client->lock, flags);
227 	if (client->in_shutdown)
228 		kfree(event);
229 	else
230 		list_add_tail(&event->link, &client->event_list);
231 	spin_unlock_irqrestore(&client->lock, flags);
232 
233 	wake_up_interruptible(&client->wait);
234 }
235 
236 static int dequeue_event(struct client *client,
237 			 char __user *buffer, size_t count)
238 {
239 	struct event *event;
240 	size_t size, total;
241 	int i, ret;
242 
243 	ret = wait_event_interruptible(client->wait,
244 			!list_empty(&client->event_list) ||
245 			fw_device_is_shutdown(client->device));
246 	if (ret < 0)
247 		return ret;
248 
249 	if (list_empty(&client->event_list) &&
250 		       fw_device_is_shutdown(client->device))
251 		return -ENODEV;
252 
253 	spin_lock_irq(&client->lock);
254 	event = list_first_entry(&client->event_list, struct event, link);
255 	list_del(&event->link);
256 	spin_unlock_irq(&client->lock);
257 
258 	total = 0;
259 	for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) {
260 		size = min(event->v[i].size, count - total);
261 		if (copy_to_user(buffer + total, event->v[i].data, size)) {
262 			ret = -EFAULT;
263 			goto out;
264 		}
265 		total += size;
266 	}
267 	ret = total;
268 
269  out:
270 	kfree(event);
271 
272 	return ret;
273 }
274 
275 static ssize_t fw_device_op_read(struct file *file, char __user *buffer,
276 				 size_t count, loff_t *offset)
277 {
278 	struct client *client = file->private_data;
279 
280 	return dequeue_event(client, buffer, count);
281 }
282 
283 static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
284 				 struct client *client)
285 {
286 	struct fw_card *card = client->device->card;
287 
288 	spin_lock_irq(&card->lock);
289 
290 	event->closure	     = client->bus_reset_closure;
291 	event->type          = FW_CDEV_EVENT_BUS_RESET;
292 	event->generation    = client->device->generation;
293 	event->node_id       = client->device->node_id;
294 	event->local_node_id = card->local_node->node_id;
295 	event->bm_node_id    = 0; /* FIXME: We don't track the BM. */
296 	event->irm_node_id   = card->irm_node->node_id;
297 	event->root_node_id  = card->root_node->node_id;
298 
299 	spin_unlock_irq(&card->lock);
300 }
301 
302 static void for_each_client(struct fw_device *device,
303 			    void (*callback)(struct client *client))
304 {
305 	struct client *c;
306 
307 	mutex_lock(&device->client_list_mutex);
308 	list_for_each_entry(c, &device->client_list, link)
309 		callback(c);
310 	mutex_unlock(&device->client_list_mutex);
311 }
312 
313 static int schedule_reallocations(int id, void *p, void *data)
314 {
315 	struct client_resource *r = p;
316 
317 	if (r->release == release_iso_resource)
318 		schedule_iso_resource(container_of(r,
319 					struct iso_resource, resource));
320 	return 0;
321 }
322 
323 static void queue_bus_reset_event(struct client *client)
324 {
325 	struct bus_reset_event *e;
326 
327 	e = kzalloc(sizeof(*e), GFP_KERNEL);
328 	if (e == NULL) {
329 		fw_notify("Out of memory when allocating bus reset event\n");
330 		return;
331 	}
332 
333 	fill_bus_reset_event(&e->reset, client);
334 
335 	queue_event(client, &e->event,
336 		    &e->reset, sizeof(e->reset), NULL, 0);
337 
338 	spin_lock_irq(&client->lock);
339 	idr_for_each(&client->resource_idr, schedule_reallocations, client);
340 	spin_unlock_irq(&client->lock);
341 }
342 
343 void fw_device_cdev_update(struct fw_device *device)
344 {
345 	for_each_client(device, queue_bus_reset_event);
346 }
347 
348 static void wake_up_client(struct client *client)
349 {
350 	wake_up_interruptible(&client->wait);
351 }
352 
353 void fw_device_cdev_remove(struct fw_device *device)
354 {
355 	for_each_client(device, wake_up_client);
356 }
357 
358 static int ioctl_get_info(struct client *client, void *buffer)
359 {
360 	struct fw_cdev_get_info *get_info = buffer;
361 	struct fw_cdev_event_bus_reset bus_reset;
362 	unsigned long ret = 0;
363 
364 	client->version = get_info->version;
365 	get_info->version = FW_CDEV_VERSION;
366 	get_info->card = client->device->card->index;
367 
368 	down_read(&fw_device_rwsem);
369 
370 	if (get_info->rom != 0) {
371 		void __user *uptr = u64_to_uptr(get_info->rom);
372 		size_t want = get_info->rom_length;
373 		size_t have = client->device->config_rom_length * 4;
374 
375 		ret = copy_to_user(uptr, client->device->config_rom,
376 				   min(want, have));
377 	}
378 	get_info->rom_length = client->device->config_rom_length * 4;
379 
380 	up_read(&fw_device_rwsem);
381 
382 	if (ret != 0)
383 		return -EFAULT;
384 
385 	client->bus_reset_closure = get_info->bus_reset_closure;
386 	if (get_info->bus_reset != 0) {
387 		void __user *uptr = u64_to_uptr(get_info->bus_reset);
388 
389 		fill_bus_reset_event(&bus_reset, client);
390 		if (copy_to_user(uptr, &bus_reset, sizeof(bus_reset)))
391 			return -EFAULT;
392 	}
393 
394 	return 0;
395 }
396 
397 static int add_client_resource(struct client *client,
398 			       struct client_resource *resource, gfp_t gfp_mask)
399 {
400 	unsigned long flags;
401 	int ret;
402 
403  retry:
404 	if (idr_pre_get(&client->resource_idr, gfp_mask) == 0)
405 		return -ENOMEM;
406 
407 	spin_lock_irqsave(&client->lock, flags);
408 	if (client->in_shutdown)
409 		ret = -ECANCELED;
410 	else
411 		ret = idr_get_new(&client->resource_idr, resource,
412 				  &resource->handle);
413 	if (ret >= 0) {
414 		client_get(client);
415 		if (resource->release == release_iso_resource)
416 			schedule_iso_resource(container_of(resource,
417 						struct iso_resource, resource));
418 	}
419 	spin_unlock_irqrestore(&client->lock, flags);
420 
421 	if (ret == -EAGAIN)
422 		goto retry;
423 
424 	return ret < 0 ? ret : 0;
425 }
426 
427 static int release_client_resource(struct client *client, u32 handle,
428 				   client_resource_release_fn_t release,
429 				   struct client_resource **resource)
430 {
431 	struct client_resource *r;
432 
433 	spin_lock_irq(&client->lock);
434 	if (client->in_shutdown)
435 		r = NULL;
436 	else
437 		r = idr_find(&client->resource_idr, handle);
438 	if (r && r->release == release)
439 		idr_remove(&client->resource_idr, handle);
440 	spin_unlock_irq(&client->lock);
441 
442 	if (!(r && r->release == release))
443 		return -EINVAL;
444 
445 	if (resource)
446 		*resource = r;
447 	else
448 		r->release(client, r);
449 
450 	client_put(client);
451 
452 	return 0;
453 }
454 
455 static void release_transaction(struct client *client,
456 				struct client_resource *resource)
457 {
458 	struct outbound_transaction_resource *r = container_of(resource,
459 			struct outbound_transaction_resource, resource);
460 
461 	fw_cancel_transaction(client->device->card, &r->transaction);
462 }
463 
464 static void complete_transaction(struct fw_card *card, int rcode,
465 				 void *payload, size_t length, void *data)
466 {
467 	struct outbound_transaction_event *e = data;
468 	struct fw_cdev_event_response *rsp = &e->response;
469 	struct client *client = e->client;
470 	unsigned long flags;
471 
472 	if (length < rsp->length)
473 		rsp->length = length;
474 	if (rcode == RCODE_COMPLETE)
475 		memcpy(rsp->data, payload, rsp->length);
476 
477 	spin_lock_irqsave(&client->lock, flags);
478 	/*
479 	 * 1. If called while in shutdown, the idr tree must be left untouched.
480 	 *    The idr handle will be removed and the client reference will be
481 	 *    dropped later.
482 	 * 2. If the call chain was release_client_resource ->
483 	 *    release_transaction -> complete_transaction (instead of a normal
484 	 *    conclusion of the transaction), i.e. if this resource was already
485 	 *    unregistered from the idr, the client reference will be dropped
486 	 *    by release_client_resource and we must not drop it here.
487 	 */
488 	if (!client->in_shutdown &&
489 	    idr_find(&client->resource_idr, e->r.resource.handle)) {
490 		idr_remove(&client->resource_idr, e->r.resource.handle);
491 		/* Drop the idr's reference */
492 		client_put(client);
493 	}
494 	spin_unlock_irqrestore(&client->lock, flags);
495 
496 	rsp->type = FW_CDEV_EVENT_RESPONSE;
497 	rsp->rcode = rcode;
498 
499 	/*
500 	 * In the case that sizeof(*rsp) doesn't align with the position of the
501 	 * data, and the read is short, preserve an extra copy of the data
502 	 * to stay compatible with a pre-2.6.27 bug.  Since the bug is harmless
503 	 * for short reads and some apps depended on it, this is both safe
504 	 * and prudent for compatibility.
505 	 */
506 	if (rsp->length <= sizeof(*rsp) - offsetof(typeof(*rsp), data))
507 		queue_event(client, &e->event, rsp, sizeof(*rsp),
508 			    rsp->data, rsp->length);
509 	else
510 		queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length,
511 			    NULL, 0);
512 
513 	/* Drop the transaction callback's reference */
514 	client_put(client);
515 }
516 
517 static int init_request(struct client *client,
518 			struct fw_cdev_send_request *request,
519 			int destination_id, int speed)
520 {
521 	struct outbound_transaction_event *e;
522 	int ret;
523 
524 	if (request->tcode != TCODE_STREAM_DATA &&
525 	    (request->length > 4096 || request->length > 512 << speed))
526 		return -EIO;
527 
528 	e = kmalloc(sizeof(*e) + request->length, GFP_KERNEL);
529 	if (e == NULL)
530 		return -ENOMEM;
531 
532 	e->client = client;
533 	e->response.length = request->length;
534 	e->response.closure = request->closure;
535 
536 	if (request->data &&
537 	    copy_from_user(e->response.data,
538 			   u64_to_uptr(request->data), request->length)) {
539 		ret = -EFAULT;
540 		goto failed;
541 	}
542 
543 	e->r.resource.release = release_transaction;
544 	ret = add_client_resource(client, &e->r.resource, GFP_KERNEL);
545 	if (ret < 0)
546 		goto failed;
547 
548 	/* Get a reference for the transaction callback */
549 	client_get(client);
550 
551 	fw_send_request(client->device->card, &e->r.transaction,
552 			request->tcode, destination_id, request->generation,
553 			speed, request->offset, e->response.data,
554 			request->length, complete_transaction, e);
555 	return 0;
556 
557  failed:
558 	kfree(e);
559 
560 	return ret;
561 }
562 
563 static int ioctl_send_request(struct client *client, void *buffer)
564 {
565 	struct fw_cdev_send_request *request = buffer;
566 
567 	switch (request->tcode) {
568 	case TCODE_WRITE_QUADLET_REQUEST:
569 	case TCODE_WRITE_BLOCK_REQUEST:
570 	case TCODE_READ_QUADLET_REQUEST:
571 	case TCODE_READ_BLOCK_REQUEST:
572 	case TCODE_LOCK_MASK_SWAP:
573 	case TCODE_LOCK_COMPARE_SWAP:
574 	case TCODE_LOCK_FETCH_ADD:
575 	case TCODE_LOCK_LITTLE_ADD:
576 	case TCODE_LOCK_BOUNDED_ADD:
577 	case TCODE_LOCK_WRAP_ADD:
578 	case TCODE_LOCK_VENDOR_DEPENDENT:
579 		break;
580 	default:
581 		return -EINVAL;
582 	}
583 
584 	return init_request(client, request, client->device->node_id,
585 			    client->device->max_speed);
586 }
587 
588 static void release_request(struct client *client,
589 			    struct client_resource *resource)
590 {
591 	struct inbound_transaction_resource *r = container_of(resource,
592 			struct inbound_transaction_resource, resource);
593 
594 	fw_send_response(client->device->card, r->request,
595 			 RCODE_CONFLICT_ERROR);
596 	kfree(r);
597 }
598 
599 static void handle_request(struct fw_card *card, struct fw_request *request,
600 			   int tcode, int destination, int source,
601 			   int generation, int speed,
602 			   unsigned long long offset,
603 			   void *payload, size_t length, void *callback_data)
604 {
605 	struct address_handler_resource *handler = callback_data;
606 	struct inbound_transaction_resource *r;
607 	struct inbound_transaction_event *e;
608 	int ret;
609 
610 	r = kmalloc(sizeof(*r), GFP_ATOMIC);
611 	e = kmalloc(sizeof(*e), GFP_ATOMIC);
612 	if (r == NULL || e == NULL)
613 		goto failed;
614 
615 	r->request = request;
616 	r->data    = payload;
617 	r->length  = length;
618 
619 	r->resource.release = release_request;
620 	ret = add_client_resource(handler->client, &r->resource, GFP_ATOMIC);
621 	if (ret < 0)
622 		goto failed;
623 
624 	e->request.type    = FW_CDEV_EVENT_REQUEST;
625 	e->request.tcode   = tcode;
626 	e->request.offset  = offset;
627 	e->request.length  = length;
628 	e->request.handle  = r->resource.handle;
629 	e->request.closure = handler->closure;
630 
631 	queue_event(handler->client, &e->event,
632 		    &e->request, sizeof(e->request), payload, length);
633 	return;
634 
635  failed:
636 	kfree(r);
637 	kfree(e);
638 	fw_send_response(card, request, RCODE_CONFLICT_ERROR);
639 }
640 
641 static void release_address_handler(struct client *client,
642 				    struct client_resource *resource)
643 {
644 	struct address_handler_resource *r =
645 	    container_of(resource, struct address_handler_resource, resource);
646 
647 	fw_core_remove_address_handler(&r->handler);
648 	kfree(r);
649 }
650 
651 static int ioctl_allocate(struct client *client, void *buffer)
652 {
653 	struct fw_cdev_allocate *request = buffer;
654 	struct address_handler_resource *r;
655 	struct fw_address_region region;
656 	int ret;
657 
658 	r = kmalloc(sizeof(*r), GFP_KERNEL);
659 	if (r == NULL)
660 		return -ENOMEM;
661 
662 	region.start = request->offset;
663 	region.end = request->offset + request->length;
664 	r->handler.length = request->length;
665 	r->handler.address_callback = handle_request;
666 	r->handler.callback_data = r;
667 	r->closure = request->closure;
668 	r->client = client;
669 
670 	ret = fw_core_add_address_handler(&r->handler, &region);
671 	if (ret < 0) {
672 		kfree(r);
673 		return ret;
674 	}
675 
676 	r->resource.release = release_address_handler;
677 	ret = add_client_resource(client, &r->resource, GFP_KERNEL);
678 	if (ret < 0) {
679 		release_address_handler(client, &r->resource);
680 		return ret;
681 	}
682 	request->handle = r->resource.handle;
683 
684 	return 0;
685 }
686 
687 static int ioctl_deallocate(struct client *client, void *buffer)
688 {
689 	struct fw_cdev_deallocate *request = buffer;
690 
691 	return release_client_resource(client, request->handle,
692 				       release_address_handler, NULL);
693 }
694 
695 static int ioctl_send_response(struct client *client, void *buffer)
696 {
697 	struct fw_cdev_send_response *request = buffer;
698 	struct client_resource *resource;
699 	struct inbound_transaction_resource *r;
700 
701 	if (release_client_resource(client, request->handle,
702 				    release_request, &resource) < 0)
703 		return -EINVAL;
704 
705 	r = container_of(resource, struct inbound_transaction_resource,
706 			 resource);
707 	if (request->length < r->length)
708 		r->length = request->length;
709 	if (copy_from_user(r->data, u64_to_uptr(request->data), r->length))
710 		return -EFAULT;
711 
712 	fw_send_response(client->device->card, r->request, request->rcode);
713 	kfree(r);
714 
715 	return 0;
716 }
717 
718 static int ioctl_initiate_bus_reset(struct client *client, void *buffer)
719 {
720 	struct fw_cdev_initiate_bus_reset *request = buffer;
721 	int short_reset;
722 
723 	short_reset = (request->type == FW_CDEV_SHORT_RESET);
724 
725 	return fw_core_initiate_bus_reset(client->device->card, short_reset);
726 }
727 
728 static void release_descriptor(struct client *client,
729 			       struct client_resource *resource)
730 {
731 	struct descriptor_resource *r =
732 		container_of(resource, struct descriptor_resource, resource);
733 
734 	fw_core_remove_descriptor(&r->descriptor);
735 	kfree(r);
736 }
737 
738 static int ioctl_add_descriptor(struct client *client, void *buffer)
739 {
740 	struct fw_cdev_add_descriptor *request = buffer;
741 	struct descriptor_resource *r;
742 	int ret;
743 
744 	/* Access policy: Allow this ioctl only on local nodes' device files. */
745 	if (!client->device->is_local)
746 		return -ENOSYS;
747 
748 	if (request->length > 256)
749 		return -EINVAL;
750 
751 	r = kmalloc(sizeof(*r) + request->length * 4, GFP_KERNEL);
752 	if (r == NULL)
753 		return -ENOMEM;
754 
755 	if (copy_from_user(r->data,
756 			   u64_to_uptr(request->data), request->length * 4)) {
757 		ret = -EFAULT;
758 		goto failed;
759 	}
760 
761 	r->descriptor.length    = request->length;
762 	r->descriptor.immediate = request->immediate;
763 	r->descriptor.key       = request->key;
764 	r->descriptor.data      = r->data;
765 
766 	ret = fw_core_add_descriptor(&r->descriptor);
767 	if (ret < 0)
768 		goto failed;
769 
770 	r->resource.release = release_descriptor;
771 	ret = add_client_resource(client, &r->resource, GFP_KERNEL);
772 	if (ret < 0) {
773 		fw_core_remove_descriptor(&r->descriptor);
774 		goto failed;
775 	}
776 	request->handle = r->resource.handle;
777 
778 	return 0;
779  failed:
780 	kfree(r);
781 
782 	return ret;
783 }
784 
785 static int ioctl_remove_descriptor(struct client *client, void *buffer)
786 {
787 	struct fw_cdev_remove_descriptor *request = buffer;
788 
789 	return release_client_resource(client, request->handle,
790 				       release_descriptor, NULL);
791 }
792 
793 static void iso_callback(struct fw_iso_context *context, u32 cycle,
794 			 size_t header_length, void *header, void *data)
795 {
796 	struct client *client = data;
797 	struct iso_interrupt_event *e;
798 
799 	e = kzalloc(sizeof(*e) + header_length, GFP_ATOMIC);
800 	if (e == NULL)
801 		return;
802 
803 	e->interrupt.type      = FW_CDEV_EVENT_ISO_INTERRUPT;
804 	e->interrupt.closure   = client->iso_closure;
805 	e->interrupt.cycle     = cycle;
806 	e->interrupt.header_length = header_length;
807 	memcpy(e->interrupt.header, header, header_length);
808 	queue_event(client, &e->event, &e->interrupt,
809 		    sizeof(e->interrupt) + header_length, NULL, 0);
810 }
811 
812 static int ioctl_create_iso_context(struct client *client, void *buffer)
813 {
814 	struct fw_cdev_create_iso_context *request = buffer;
815 	struct fw_iso_context *context;
816 
817 	/* We only support one context at this time. */
818 	if (client->iso_context != NULL)
819 		return -EBUSY;
820 
821 	if (request->channel > 63)
822 		return -EINVAL;
823 
824 	switch (request->type) {
825 	case FW_ISO_CONTEXT_RECEIVE:
826 		if (request->header_size < 4 || (request->header_size & 3))
827 			return -EINVAL;
828 
829 		break;
830 
831 	case FW_ISO_CONTEXT_TRANSMIT:
832 		if (request->speed > SCODE_3200)
833 			return -EINVAL;
834 
835 		break;
836 
837 	default:
838 		return -EINVAL;
839 	}
840 
841 	context =  fw_iso_context_create(client->device->card,
842 					 request->type,
843 					 request->channel,
844 					 request->speed,
845 					 request->header_size,
846 					 iso_callback, client);
847 	if (IS_ERR(context))
848 		return PTR_ERR(context);
849 
850 	client->iso_closure = request->closure;
851 	client->iso_context = context;
852 
853 	/* We only support one context at this time. */
854 	request->handle = 0;
855 
856 	return 0;
857 }
858 
859 /* Macros for decoding the iso packet control header. */
860 #define GET_PAYLOAD_LENGTH(v)	((v) & 0xffff)
861 #define GET_INTERRUPT(v)	(((v) >> 16) & 0x01)
862 #define GET_SKIP(v)		(((v) >> 17) & 0x01)
863 #define GET_TAG(v)		(((v) >> 18) & 0x03)
864 #define GET_SY(v)		(((v) >> 20) & 0x0f)
865 #define GET_HEADER_LENGTH(v)	(((v) >> 24) & 0xff)
866 
867 static int ioctl_queue_iso(struct client *client, void *buffer)
868 {
869 	struct fw_cdev_queue_iso *request = buffer;
870 	struct fw_cdev_iso_packet __user *p, *end, *next;
871 	struct fw_iso_context *ctx = client->iso_context;
872 	unsigned long payload, buffer_end, header_length;
873 	u32 control;
874 	int count;
875 	struct {
876 		struct fw_iso_packet packet;
877 		u8 header[256];
878 	} u;
879 
880 	if (ctx == NULL || request->handle != 0)
881 		return -EINVAL;
882 
883 	/*
884 	 * If the user passes a non-NULL data pointer, has mmap()'ed
885 	 * the iso buffer, and the pointer points inside the buffer,
886 	 * we setup the payload pointers accordingly.  Otherwise we
887 	 * set them both to 0, which will still let packets with
888 	 * payload_length == 0 through.  In other words, if no packets
889 	 * use the indirect payload, the iso buffer need not be mapped
890 	 * and the request->data pointer is ignored.
891 	 */
892 
893 	payload = (unsigned long)request->data - client->vm_start;
894 	buffer_end = client->buffer.page_count << PAGE_SHIFT;
895 	if (request->data == 0 || client->buffer.pages == NULL ||
896 	    payload >= buffer_end) {
897 		payload = 0;
898 		buffer_end = 0;
899 	}
900 
901 	p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(request->packets);
902 
903 	if (!access_ok(VERIFY_READ, p, request->size))
904 		return -EFAULT;
905 
906 	end = (void __user *)p + request->size;
907 	count = 0;
908 	while (p < end) {
909 		if (get_user(control, &p->control))
910 			return -EFAULT;
911 		u.packet.payload_length = GET_PAYLOAD_LENGTH(control);
912 		u.packet.interrupt = GET_INTERRUPT(control);
913 		u.packet.skip = GET_SKIP(control);
914 		u.packet.tag = GET_TAG(control);
915 		u.packet.sy = GET_SY(control);
916 		u.packet.header_length = GET_HEADER_LENGTH(control);
917 
918 		if (ctx->type == FW_ISO_CONTEXT_TRANSMIT) {
919 			header_length = u.packet.header_length;
920 		} else {
921 			/*
922 			 * We require that header_length is a multiple of
923 			 * the fixed header size, ctx->header_size.
924 			 */
925 			if (ctx->header_size == 0) {
926 				if (u.packet.header_length > 0)
927 					return -EINVAL;
928 			} else if (u.packet.header_length % ctx->header_size != 0) {
929 				return -EINVAL;
930 			}
931 			header_length = 0;
932 		}
933 
934 		next = (struct fw_cdev_iso_packet __user *)
935 			&p->header[header_length / 4];
936 		if (next > end)
937 			return -EINVAL;
938 		if (__copy_from_user
939 		    (u.packet.header, p->header, header_length))
940 			return -EFAULT;
941 		if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT &&
942 		    u.packet.header_length + u.packet.payload_length > 0)
943 			return -EINVAL;
944 		if (payload + u.packet.payload_length > buffer_end)
945 			return -EINVAL;
946 
947 		if (fw_iso_context_queue(ctx, &u.packet,
948 					 &client->buffer, payload))
949 			break;
950 
951 		p = next;
952 		payload += u.packet.payload_length;
953 		count++;
954 	}
955 
956 	request->size    -= uptr_to_u64(p) - request->packets;
957 	request->packets  = uptr_to_u64(p);
958 	request->data     = client->vm_start + payload;
959 
960 	return count;
961 }
962 
963 static int ioctl_start_iso(struct client *client, void *buffer)
964 {
965 	struct fw_cdev_start_iso *request = buffer;
966 
967 	if (client->iso_context == NULL || request->handle != 0)
968 		return -EINVAL;
969 
970 	if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE) {
971 		if (request->tags == 0 || request->tags > 15)
972 			return -EINVAL;
973 
974 		if (request->sync > 15)
975 			return -EINVAL;
976 	}
977 
978 	return fw_iso_context_start(client->iso_context, request->cycle,
979 				    request->sync, request->tags);
980 }
981 
982 static int ioctl_stop_iso(struct client *client, void *buffer)
983 {
984 	struct fw_cdev_stop_iso *request = buffer;
985 
986 	if (client->iso_context == NULL || request->handle != 0)
987 		return -EINVAL;
988 
989 	return fw_iso_context_stop(client->iso_context);
990 }
991 
992 static int ioctl_get_cycle_timer(struct client *client, void *buffer)
993 {
994 	struct fw_cdev_get_cycle_timer *request = buffer;
995 	struct fw_card *card = client->device->card;
996 	unsigned long long bus_time;
997 	struct timeval tv;
998 	unsigned long flags;
999 
1000 	preempt_disable();
1001 	local_irq_save(flags);
1002 
1003 	bus_time = card->driver->get_bus_time(card);
1004 	do_gettimeofday(&tv);
1005 
1006 	local_irq_restore(flags);
1007 	preempt_enable();
1008 
1009 	request->local_time = tv.tv_sec * 1000000ULL + tv.tv_usec;
1010 	request->cycle_timer = bus_time & 0xffffffff;
1011 	return 0;
1012 }
1013 
1014 static void iso_resource_work(struct work_struct *work)
1015 {
1016 	struct iso_resource_event *e;
1017 	struct iso_resource *r =
1018 			container_of(work, struct iso_resource, work.work);
1019 	struct client *client = r->client;
1020 	int generation, channel, bandwidth, todo;
1021 	bool skip, free, success;
1022 
1023 	spin_lock_irq(&client->lock);
1024 	generation = client->device->generation;
1025 	todo = r->todo;
1026 	/* Allow 1000ms grace period for other reallocations. */
1027 	if (todo == ISO_RES_ALLOC &&
1028 	    time_is_after_jiffies(client->device->card->reset_jiffies + HZ)) {
1029 		if (schedule_delayed_work(&r->work, DIV_ROUND_UP(HZ, 3)))
1030 			client_get(client);
1031 		skip = true;
1032 	} else {
1033 		/* We could be called twice within the same generation. */
1034 		skip = todo == ISO_RES_REALLOC &&
1035 		       r->generation == generation;
1036 	}
1037 	free = todo == ISO_RES_DEALLOC ||
1038 	       todo == ISO_RES_ALLOC_ONCE ||
1039 	       todo == ISO_RES_DEALLOC_ONCE;
1040 	r->generation = generation;
1041 	spin_unlock_irq(&client->lock);
1042 
1043 	if (skip)
1044 		goto out;
1045 
1046 	bandwidth = r->bandwidth;
1047 
1048 	fw_iso_resource_manage(client->device->card, generation,
1049 			r->channels, &channel, &bandwidth,
1050 			todo == ISO_RES_ALLOC ||
1051 			todo == ISO_RES_REALLOC ||
1052 			todo == ISO_RES_ALLOC_ONCE);
1053 	/*
1054 	 * Is this generation outdated already?  As long as this resource sticks
1055 	 * in the idr, it will be scheduled again for a newer generation or at
1056 	 * shutdown.
1057 	 */
1058 	if (channel == -EAGAIN &&
1059 	    (todo == ISO_RES_ALLOC || todo == ISO_RES_REALLOC))
1060 		goto out;
1061 
1062 	success = channel >= 0 || bandwidth > 0;
1063 
1064 	spin_lock_irq(&client->lock);
1065 	/*
1066 	 * Transit from allocation to reallocation, except if the client
1067 	 * requested deallocation in the meantime.
1068 	 */
1069 	if (r->todo == ISO_RES_ALLOC)
1070 		r->todo = ISO_RES_REALLOC;
1071 	/*
1072 	 * Allocation or reallocation failure?  Pull this resource out of the
1073 	 * idr and prepare for deletion, unless the client is shutting down.
1074 	 */
1075 	if (r->todo == ISO_RES_REALLOC && !success &&
1076 	    !client->in_shutdown &&
1077 	    idr_find(&client->resource_idr, r->resource.handle)) {
1078 		idr_remove(&client->resource_idr, r->resource.handle);
1079 		client_put(client);
1080 		free = true;
1081 	}
1082 	spin_unlock_irq(&client->lock);
1083 
1084 	if (todo == ISO_RES_ALLOC && channel >= 0)
1085 		r->channels = 1ULL << channel;
1086 
1087 	if (todo == ISO_RES_REALLOC && success)
1088 		goto out;
1089 
1090 	if (todo == ISO_RES_ALLOC || todo == ISO_RES_ALLOC_ONCE) {
1091 		e = r->e_alloc;
1092 		r->e_alloc = NULL;
1093 	} else {
1094 		e = r->e_dealloc;
1095 		r->e_dealloc = NULL;
1096 	}
1097 	e->resource.handle	= r->resource.handle;
1098 	e->resource.channel	= channel;
1099 	e->resource.bandwidth	= bandwidth;
1100 
1101 	queue_event(client, &e->event,
1102 		    &e->resource, sizeof(e->resource), NULL, 0);
1103 
1104 	if (free) {
1105 		cancel_delayed_work(&r->work);
1106 		kfree(r->e_alloc);
1107 		kfree(r->e_dealloc);
1108 		kfree(r);
1109 	}
1110  out:
1111 	client_put(client);
1112 }
1113 
1114 static void schedule_iso_resource(struct iso_resource *r)
1115 {
1116 	client_get(r->client);
1117 	if (!schedule_delayed_work(&r->work, 0))
1118 		client_put(r->client);
1119 }
1120 
1121 static void release_iso_resource(struct client *client,
1122 				 struct client_resource *resource)
1123 {
1124 	struct iso_resource *r =
1125 		container_of(resource, struct iso_resource, resource);
1126 
1127 	spin_lock_irq(&client->lock);
1128 	r->todo = ISO_RES_DEALLOC;
1129 	schedule_iso_resource(r);
1130 	spin_unlock_irq(&client->lock);
1131 }
1132 
1133 static int init_iso_resource(struct client *client,
1134 		struct fw_cdev_allocate_iso_resource *request, int todo)
1135 {
1136 	struct iso_resource_event *e1, *e2;
1137 	struct iso_resource *r;
1138 	int ret;
1139 
1140 	if ((request->channels == 0 && request->bandwidth == 0) ||
1141 	    request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
1142 	    request->bandwidth < 0)
1143 		return -EINVAL;
1144 
1145 	r  = kmalloc(sizeof(*r), GFP_KERNEL);
1146 	e1 = kmalloc(sizeof(*e1), GFP_KERNEL);
1147 	e2 = kmalloc(sizeof(*e2), GFP_KERNEL);
1148 	if (r == NULL || e1 == NULL || e2 == NULL) {
1149 		ret = -ENOMEM;
1150 		goto fail;
1151 	}
1152 
1153 	INIT_DELAYED_WORK(&r->work, iso_resource_work);
1154 	r->client	= client;
1155 	r->todo		= todo;
1156 	r->generation	= -1;
1157 	r->channels	= request->channels;
1158 	r->bandwidth	= request->bandwidth;
1159 	r->e_alloc	= e1;
1160 	r->e_dealloc	= e2;
1161 
1162 	e1->resource.closure	= request->closure;
1163 	e1->resource.type	= FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED;
1164 	e2->resource.closure	= request->closure;
1165 	e2->resource.type	= FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED;
1166 
1167 	if (todo == ISO_RES_ALLOC) {
1168 		r->resource.release = release_iso_resource;
1169 		ret = add_client_resource(client, &r->resource, GFP_KERNEL);
1170 		if (ret < 0)
1171 			goto fail;
1172 	} else {
1173 		r->resource.release = NULL;
1174 		r->resource.handle = -1;
1175 		schedule_iso_resource(r);
1176 	}
1177 	request->handle = r->resource.handle;
1178 
1179 	return 0;
1180  fail:
1181 	kfree(r);
1182 	kfree(e1);
1183 	kfree(e2);
1184 
1185 	return ret;
1186 }
1187 
1188 static int ioctl_allocate_iso_resource(struct client *client, void *buffer)
1189 {
1190 	struct fw_cdev_allocate_iso_resource *request = buffer;
1191 
1192 	return init_iso_resource(client, request, ISO_RES_ALLOC);
1193 }
1194 
1195 static int ioctl_deallocate_iso_resource(struct client *client, void *buffer)
1196 {
1197 	struct fw_cdev_deallocate *request = buffer;
1198 
1199 	return release_client_resource(client, request->handle,
1200 				       release_iso_resource, NULL);
1201 }
1202 
1203 static int ioctl_allocate_iso_resource_once(struct client *client, void *buffer)
1204 {
1205 	struct fw_cdev_allocate_iso_resource *request = buffer;
1206 
1207 	return init_iso_resource(client, request, ISO_RES_ALLOC_ONCE);
1208 }
1209 
1210 static int ioctl_deallocate_iso_resource_once(struct client *client, void *buffer)
1211 {
1212 	struct fw_cdev_allocate_iso_resource *request = buffer;
1213 
1214 	return init_iso_resource(client, request, ISO_RES_DEALLOC_ONCE);
1215 }
1216 
1217 /*
1218  * Returns a speed code:  Maximum speed to or from this device,
1219  * limited by the device's link speed, the local node's link speed,
1220  * and all PHY port speeds between the two links.
1221  */
1222 static int ioctl_get_speed(struct client *client, void *buffer)
1223 {
1224 	return client->device->max_speed;
1225 }
1226 
1227 static int ioctl_send_broadcast_request(struct client *client, void *buffer)
1228 {
1229 	struct fw_cdev_send_request *request = buffer;
1230 
1231 	switch (request->tcode) {
1232 	case TCODE_WRITE_QUADLET_REQUEST:
1233 	case TCODE_WRITE_BLOCK_REQUEST:
1234 		break;
1235 	default:
1236 		return -EINVAL;
1237 	}
1238 
1239 	/* Security policy: Only allow accesses to Units Space. */
1240 	if (request->offset < CSR_REGISTER_BASE + CSR_CONFIG_ROM_END)
1241 		return -EACCES;
1242 
1243 	return init_request(client, request, LOCAL_BUS | 0x3f, SCODE_100);
1244 }
1245 
1246 static int ioctl_send_stream_packet(struct client *client, void *buffer)
1247 {
1248 	struct fw_cdev_send_stream_packet *p = buffer;
1249 	struct fw_cdev_send_request request;
1250 	int dest;
1251 
1252 	if (p->speed > client->device->card->link_speed ||
1253 	    p->length > 1024 << p->speed)
1254 		return -EIO;
1255 
1256 	if (p->tag > 3 || p->channel > 63 || p->sy > 15)
1257 		return -EINVAL;
1258 
1259 	dest = fw_stream_packet_destination_id(p->tag, p->channel, p->sy);
1260 	request.tcode		= TCODE_STREAM_DATA;
1261 	request.length		= p->length;
1262 	request.closure		= p->closure;
1263 	request.data		= p->data;
1264 	request.generation	= p->generation;
1265 
1266 	return init_request(client, &request, dest, p->speed);
1267 }
1268 
1269 static int (* const ioctl_handlers[])(struct client *client, void *buffer) = {
1270 	ioctl_get_info,
1271 	ioctl_send_request,
1272 	ioctl_allocate,
1273 	ioctl_deallocate,
1274 	ioctl_send_response,
1275 	ioctl_initiate_bus_reset,
1276 	ioctl_add_descriptor,
1277 	ioctl_remove_descriptor,
1278 	ioctl_create_iso_context,
1279 	ioctl_queue_iso,
1280 	ioctl_start_iso,
1281 	ioctl_stop_iso,
1282 	ioctl_get_cycle_timer,
1283 	ioctl_allocate_iso_resource,
1284 	ioctl_deallocate_iso_resource,
1285 	ioctl_allocate_iso_resource_once,
1286 	ioctl_deallocate_iso_resource_once,
1287 	ioctl_get_speed,
1288 	ioctl_send_broadcast_request,
1289 	ioctl_send_stream_packet,
1290 };
1291 
1292 static int dispatch_ioctl(struct client *client,
1293 			  unsigned int cmd, void __user *arg)
1294 {
1295 	char buffer[256];
1296 	int ret;
1297 
1298 	if (_IOC_TYPE(cmd) != '#' ||
1299 	    _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers))
1300 		return -EINVAL;
1301 
1302 	if (_IOC_DIR(cmd) & _IOC_WRITE) {
1303 		if (_IOC_SIZE(cmd) > sizeof(buffer) ||
1304 		    copy_from_user(buffer, arg, _IOC_SIZE(cmd)))
1305 			return -EFAULT;
1306 	}
1307 
1308 	ret = ioctl_handlers[_IOC_NR(cmd)](client, buffer);
1309 	if (ret < 0)
1310 		return ret;
1311 
1312 	if (_IOC_DIR(cmd) & _IOC_READ) {
1313 		if (_IOC_SIZE(cmd) > sizeof(buffer) ||
1314 		    copy_to_user(arg, buffer, _IOC_SIZE(cmd)))
1315 			return -EFAULT;
1316 	}
1317 
1318 	return ret;
1319 }
1320 
1321 static long fw_device_op_ioctl(struct file *file,
1322 			       unsigned int cmd, unsigned long arg)
1323 {
1324 	struct client *client = file->private_data;
1325 
1326 	if (fw_device_is_shutdown(client->device))
1327 		return -ENODEV;
1328 
1329 	return dispatch_ioctl(client, cmd, (void __user *) arg);
1330 }
1331 
1332 #ifdef CONFIG_COMPAT
1333 static long fw_device_op_compat_ioctl(struct file *file,
1334 				      unsigned int cmd, unsigned long arg)
1335 {
1336 	struct client *client = file->private_data;
1337 
1338 	if (fw_device_is_shutdown(client->device))
1339 		return -ENODEV;
1340 
1341 	return dispatch_ioctl(client, cmd, compat_ptr(arg));
1342 }
1343 #endif
1344 
1345 static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
1346 {
1347 	struct client *client = file->private_data;
1348 	enum dma_data_direction direction;
1349 	unsigned long size;
1350 	int page_count, ret;
1351 
1352 	if (fw_device_is_shutdown(client->device))
1353 		return -ENODEV;
1354 
1355 	/* FIXME: We could support multiple buffers, but we don't. */
1356 	if (client->buffer.pages != NULL)
1357 		return -EBUSY;
1358 
1359 	if (!(vma->vm_flags & VM_SHARED))
1360 		return -EINVAL;
1361 
1362 	if (vma->vm_start & ~PAGE_MASK)
1363 		return -EINVAL;
1364 
1365 	client->vm_start = vma->vm_start;
1366 	size = vma->vm_end - vma->vm_start;
1367 	page_count = size >> PAGE_SHIFT;
1368 	if (size & ~PAGE_MASK)
1369 		return -EINVAL;
1370 
1371 	if (vma->vm_flags & VM_WRITE)
1372 		direction = DMA_TO_DEVICE;
1373 	else
1374 		direction = DMA_FROM_DEVICE;
1375 
1376 	ret = fw_iso_buffer_init(&client->buffer, client->device->card,
1377 				 page_count, direction);
1378 	if (ret < 0)
1379 		return ret;
1380 
1381 	ret = fw_iso_buffer_map(&client->buffer, vma);
1382 	if (ret < 0)
1383 		fw_iso_buffer_destroy(&client->buffer, client->device->card);
1384 
1385 	return ret;
1386 }
1387 
1388 static int shutdown_resource(int id, void *p, void *data)
1389 {
1390 	struct client_resource *r = p;
1391 	struct client *client = data;
1392 
1393 	r->release(client, r);
1394 	client_put(client);
1395 
1396 	return 0;
1397 }
1398 
1399 static int fw_device_op_release(struct inode *inode, struct file *file)
1400 {
1401 	struct client *client = file->private_data;
1402 	struct event *e, *next_e;
1403 
1404 	mutex_lock(&client->device->client_list_mutex);
1405 	list_del(&client->link);
1406 	mutex_unlock(&client->device->client_list_mutex);
1407 
1408 	if (client->iso_context)
1409 		fw_iso_context_destroy(client->iso_context);
1410 
1411 	if (client->buffer.pages)
1412 		fw_iso_buffer_destroy(&client->buffer, client->device->card);
1413 
1414 	/* Freeze client->resource_idr and client->event_list */
1415 	spin_lock_irq(&client->lock);
1416 	client->in_shutdown = true;
1417 	spin_unlock_irq(&client->lock);
1418 
1419 	idr_for_each(&client->resource_idr, shutdown_resource, client);
1420 	idr_remove_all(&client->resource_idr);
1421 	idr_destroy(&client->resource_idr);
1422 
1423 	list_for_each_entry_safe(e, next_e, &client->event_list, link)
1424 		kfree(e);
1425 
1426 	client_put(client);
1427 
1428 	return 0;
1429 }
1430 
1431 static unsigned int fw_device_op_poll(struct file *file, poll_table * pt)
1432 {
1433 	struct client *client = file->private_data;
1434 	unsigned int mask = 0;
1435 
1436 	poll_wait(file, &client->wait, pt);
1437 
1438 	if (fw_device_is_shutdown(client->device))
1439 		mask |= POLLHUP | POLLERR;
1440 	if (!list_empty(&client->event_list))
1441 		mask |= POLLIN | POLLRDNORM;
1442 
1443 	return mask;
1444 }
1445 
1446 const struct file_operations fw_device_ops = {
1447 	.owner		= THIS_MODULE,
1448 	.open		= fw_device_op_open,
1449 	.read		= fw_device_op_read,
1450 	.unlocked_ioctl	= fw_device_op_ioctl,
1451 	.poll		= fw_device_op_poll,
1452 	.release	= fw_device_op_release,
1453 	.mmap		= fw_device_op_mmap,
1454 
1455 #ifdef CONFIG_COMPAT
1456 	.compat_ioctl	= fw_device_op_compat_ioctl,
1457 #endif
1458 };
1459