1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Char device for device raw access
4 *
5 * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net>
6 */
7
8 #include <linux/bug.h>
9 #include <linux/compat.h>
10 #include <linux/delay.h>
11 #include <linux/device.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/err.h>
14 #include <linux/errno.h>
15 #include <linux/firewire.h>
16 #include <linux/firewire-cdev.h>
17 #include <linux/idr.h>
18 #include <linux/irqflags.h>
19 #include <linux/jiffies.h>
20 #include <linux/kernel.h>
21 #include <linux/kref.h>
22 #include <linux/mm.h>
23 #include <linux/module.h>
24 #include <linux/mutex.h>
25 #include <linux/poll.h>
26 #include <linux/sched.h> /* required for linux/wait.h */
27 #include <linux/slab.h>
28 #include <linux/spinlock.h>
29 #include <linux/string.h>
30 #include <linux/time.h>
31 #include <linux/uaccess.h>
32 #include <linux/vmalloc.h>
33 #include <linux/wait.h>
34 #include <linux/workqueue.h>
35
36
37 #include "core.h"
38 #include <trace/events/firewire.h>
39
40 /*
41 * ABI version history is documented in linux/firewire-cdev.h.
42 */
43 #define FW_CDEV_KERNEL_VERSION 5
44 #define FW_CDEV_VERSION_EVENT_REQUEST2 4
45 #define FW_CDEV_VERSION_ALLOCATE_REGION_END 4
46 #define FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW 5
47 #define FW_CDEV_VERSION_EVENT_ASYNC_TSTAMP 6
48
49 struct client {
50 u32 version;
51 struct fw_device *device;
52
53 spinlock_t lock;
54 bool in_shutdown;
55 struct idr resource_idr;
56 struct list_head event_list;
57 wait_queue_head_t wait;
58 wait_queue_head_t tx_flush_wait;
59 u64 bus_reset_closure;
60
61 struct fw_iso_context *iso_context;
62 u64 iso_closure;
63 struct fw_iso_buffer buffer;
64 unsigned long vm_start;
65 bool buffer_is_mapped;
66
67 struct list_head phy_receiver_link;
68 u64 phy_receiver_closure;
69
70 struct list_head link;
71 struct kref kref;
72 };
73
client_get(struct client * client)74 static inline void client_get(struct client *client)
75 {
76 kref_get(&client->kref);
77 }
78
client_release(struct kref * kref)79 static void client_release(struct kref *kref)
80 {
81 struct client *client = container_of(kref, struct client, kref);
82
83 fw_device_put(client->device);
84 kfree(client);
85 }
86
client_put(struct client * client)87 static void client_put(struct client *client)
88 {
89 kref_put(&client->kref, client_release);
90 }
91
92 struct client_resource;
93 typedef void (*client_resource_release_fn_t)(struct client *,
94 struct client_resource *);
95 struct client_resource {
96 client_resource_release_fn_t release;
97 int handle;
98 };
99
100 struct address_handler_resource {
101 struct client_resource resource;
102 struct fw_address_handler handler;
103 __u64 closure;
104 struct client *client;
105 };
106
107 struct outbound_transaction_resource {
108 struct client_resource resource;
109 struct fw_transaction transaction;
110 };
111
112 struct inbound_transaction_resource {
113 struct client_resource resource;
114 struct fw_card *card;
115 struct fw_request *request;
116 bool is_fcp;
117 void *data;
118 size_t length;
119 };
120
121 struct descriptor_resource {
122 struct client_resource resource;
123 struct fw_descriptor descriptor;
124 u32 data[];
125 };
126
127 struct iso_resource {
128 struct client_resource resource;
129 struct client *client;
130 /* Schedule work and access todo only with client->lock held. */
131 struct delayed_work work;
132 enum {ISO_RES_ALLOC, ISO_RES_REALLOC, ISO_RES_DEALLOC,
133 ISO_RES_ALLOC_ONCE, ISO_RES_DEALLOC_ONCE,} todo;
134 int generation;
135 u64 channels;
136 s32 bandwidth;
137 struct iso_resource_event *e_alloc, *e_dealloc;
138 };
139
140 static void release_iso_resource(struct client *, struct client_resource *);
141
schedule_iso_resource(struct iso_resource * r,unsigned long delay)142 static void schedule_iso_resource(struct iso_resource *r, unsigned long delay)
143 {
144 client_get(r->client);
145 if (!queue_delayed_work(fw_workqueue, &r->work, delay))
146 client_put(r->client);
147 }
148
schedule_if_iso_resource(struct client_resource * resource)149 static void schedule_if_iso_resource(struct client_resource *resource)
150 {
151 if (resource->release == release_iso_resource)
152 schedule_iso_resource(container_of(resource,
153 struct iso_resource, resource), 0);
154 }
155
156 /*
157 * dequeue_event() just kfree()'s the event, so the event has to be
158 * the first field in a struct XYZ_event.
159 */
160 struct event {
161 struct { void *data; size_t size; } v[2];
162 struct list_head link;
163 };
164
165 struct bus_reset_event {
166 struct event event;
167 struct fw_cdev_event_bus_reset reset;
168 };
169
170 struct outbound_transaction_event {
171 struct event event;
172 struct client *client;
173 struct outbound_transaction_resource r;
174 union {
175 struct fw_cdev_event_response without_tstamp;
176 struct fw_cdev_event_response2 with_tstamp;
177 } rsp;
178 };
179
180 struct inbound_transaction_event {
181 struct event event;
182 union {
183 struct fw_cdev_event_request request;
184 struct fw_cdev_event_request2 request2;
185 struct fw_cdev_event_request3 with_tstamp;
186 } req;
187 };
188
189 struct iso_interrupt_event {
190 struct event event;
191 struct fw_cdev_event_iso_interrupt interrupt;
192 };
193
194 struct iso_interrupt_mc_event {
195 struct event event;
196 struct fw_cdev_event_iso_interrupt_mc interrupt;
197 };
198
199 struct iso_resource_event {
200 struct event event;
201 struct fw_cdev_event_iso_resource iso_resource;
202 };
203
204 struct outbound_phy_packet_event {
205 struct event event;
206 struct client *client;
207 struct fw_packet p;
208 union {
209 struct fw_cdev_event_phy_packet without_tstamp;
210 struct fw_cdev_event_phy_packet2 with_tstamp;
211 } phy_packet;
212 };
213
214 struct inbound_phy_packet_event {
215 struct event event;
216 union {
217 struct fw_cdev_event_phy_packet without_tstamp;
218 struct fw_cdev_event_phy_packet2 with_tstamp;
219 } phy_packet;
220 };
221
222 #ifdef CONFIG_COMPAT
u64_to_uptr(u64 value)223 static void __user *u64_to_uptr(u64 value)
224 {
225 if (in_compat_syscall())
226 return compat_ptr(value);
227 else
228 return (void __user *)(unsigned long)value;
229 }
230
uptr_to_u64(void __user * ptr)231 static u64 uptr_to_u64(void __user *ptr)
232 {
233 if (in_compat_syscall())
234 return ptr_to_compat(ptr);
235 else
236 return (u64)(unsigned long)ptr;
237 }
238 #else
u64_to_uptr(u64 value)239 static inline void __user *u64_to_uptr(u64 value)
240 {
241 return (void __user *)(unsigned long)value;
242 }
243
uptr_to_u64(void __user * ptr)244 static inline u64 uptr_to_u64(void __user *ptr)
245 {
246 return (u64)(unsigned long)ptr;
247 }
248 #endif /* CONFIG_COMPAT */
249
fw_device_op_open(struct inode * inode,struct file * file)250 static int fw_device_op_open(struct inode *inode, struct file *file)
251 {
252 struct fw_device *device;
253 struct client *client;
254
255 device = fw_device_get_by_devt(inode->i_rdev);
256 if (device == NULL)
257 return -ENODEV;
258
259 if (fw_device_is_shutdown(device)) {
260 fw_device_put(device);
261 return -ENODEV;
262 }
263
264 client = kzalloc(sizeof(*client), GFP_KERNEL);
265 if (client == NULL) {
266 fw_device_put(device);
267 return -ENOMEM;
268 }
269
270 client->device = device;
271 spin_lock_init(&client->lock);
272 idr_init(&client->resource_idr);
273 INIT_LIST_HEAD(&client->event_list);
274 init_waitqueue_head(&client->wait);
275 init_waitqueue_head(&client->tx_flush_wait);
276 INIT_LIST_HEAD(&client->phy_receiver_link);
277 INIT_LIST_HEAD(&client->link);
278 kref_init(&client->kref);
279
280 file->private_data = client;
281
282 return nonseekable_open(inode, file);
283 }
284
queue_event(struct client * client,struct event * event,void * data0,size_t size0,void * data1,size_t size1)285 static void queue_event(struct client *client, struct event *event,
286 void *data0, size_t size0, void *data1, size_t size1)
287 {
288 unsigned long flags;
289
290 event->v[0].data = data0;
291 event->v[0].size = size0;
292 event->v[1].data = data1;
293 event->v[1].size = size1;
294
295 spin_lock_irqsave(&client->lock, flags);
296 if (client->in_shutdown)
297 kfree(event);
298 else
299 list_add_tail(&event->link, &client->event_list);
300 spin_unlock_irqrestore(&client->lock, flags);
301
302 wake_up_interruptible(&client->wait);
303 }
304
dequeue_event(struct client * client,char __user * buffer,size_t count)305 static int dequeue_event(struct client *client,
306 char __user *buffer, size_t count)
307 {
308 struct event *event;
309 size_t size, total;
310 int i, ret;
311
312 ret = wait_event_interruptible(client->wait,
313 !list_empty(&client->event_list) ||
314 fw_device_is_shutdown(client->device));
315 if (ret < 0)
316 return ret;
317
318 if (list_empty(&client->event_list) &&
319 fw_device_is_shutdown(client->device))
320 return -ENODEV;
321
322 spin_lock_irq(&client->lock);
323 event = list_first_entry(&client->event_list, struct event, link);
324 list_del(&event->link);
325 spin_unlock_irq(&client->lock);
326
327 total = 0;
328 for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) {
329 size = min(event->v[i].size, count - total);
330 if (copy_to_user(buffer + total, event->v[i].data, size)) {
331 ret = -EFAULT;
332 goto out;
333 }
334 total += size;
335 }
336 ret = total;
337
338 out:
339 kfree(event);
340
341 return ret;
342 }
343
fw_device_op_read(struct file * file,char __user * buffer,size_t count,loff_t * offset)344 static ssize_t fw_device_op_read(struct file *file, char __user *buffer,
345 size_t count, loff_t *offset)
346 {
347 struct client *client = file->private_data;
348
349 return dequeue_event(client, buffer, count);
350 }
351
fill_bus_reset_event(struct fw_cdev_event_bus_reset * event,struct client * client)352 static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
353 struct client *client)
354 {
355 struct fw_card *card = client->device->card;
356
357 spin_lock_irq(&card->lock);
358
359 event->closure = client->bus_reset_closure;
360 event->type = FW_CDEV_EVENT_BUS_RESET;
361 event->generation = client->device->generation;
362 event->node_id = client->device->node_id;
363 event->local_node_id = card->local_node->node_id;
364 event->bm_node_id = card->bm_node_id;
365 event->irm_node_id = card->irm_node->node_id;
366 event->root_node_id = card->root_node->node_id;
367
368 spin_unlock_irq(&card->lock);
369 }
370
for_each_client(struct fw_device * device,void (* callback)(struct client * client))371 static void for_each_client(struct fw_device *device,
372 void (*callback)(struct client *client))
373 {
374 struct client *c;
375
376 mutex_lock(&device->client_list_mutex);
377 list_for_each_entry(c, &device->client_list, link)
378 callback(c);
379 mutex_unlock(&device->client_list_mutex);
380 }
381
schedule_reallocations(int id,void * p,void * data)382 static int schedule_reallocations(int id, void *p, void *data)
383 {
384 schedule_if_iso_resource(p);
385
386 return 0;
387 }
388
queue_bus_reset_event(struct client * client)389 static void queue_bus_reset_event(struct client *client)
390 {
391 struct bus_reset_event *e;
392
393 e = kzalloc(sizeof(*e), GFP_KERNEL);
394 if (e == NULL)
395 return;
396
397 fill_bus_reset_event(&e->reset, client);
398
399 queue_event(client, &e->event,
400 &e->reset, sizeof(e->reset), NULL, 0);
401
402 spin_lock_irq(&client->lock);
403 idr_for_each(&client->resource_idr, schedule_reallocations, client);
404 spin_unlock_irq(&client->lock);
405 }
406
fw_device_cdev_update(struct fw_device * device)407 void fw_device_cdev_update(struct fw_device *device)
408 {
409 for_each_client(device, queue_bus_reset_event);
410 }
411
wake_up_client(struct client * client)412 static void wake_up_client(struct client *client)
413 {
414 wake_up_interruptible(&client->wait);
415 }
416
fw_device_cdev_remove(struct fw_device * device)417 void fw_device_cdev_remove(struct fw_device *device)
418 {
419 for_each_client(device, wake_up_client);
420 }
421
422 union ioctl_arg {
423 struct fw_cdev_get_info get_info;
424 struct fw_cdev_send_request send_request;
425 struct fw_cdev_allocate allocate;
426 struct fw_cdev_deallocate deallocate;
427 struct fw_cdev_send_response send_response;
428 struct fw_cdev_initiate_bus_reset initiate_bus_reset;
429 struct fw_cdev_add_descriptor add_descriptor;
430 struct fw_cdev_remove_descriptor remove_descriptor;
431 struct fw_cdev_create_iso_context create_iso_context;
432 struct fw_cdev_queue_iso queue_iso;
433 struct fw_cdev_start_iso start_iso;
434 struct fw_cdev_stop_iso stop_iso;
435 struct fw_cdev_get_cycle_timer get_cycle_timer;
436 struct fw_cdev_allocate_iso_resource allocate_iso_resource;
437 struct fw_cdev_send_stream_packet send_stream_packet;
438 struct fw_cdev_get_cycle_timer2 get_cycle_timer2;
439 struct fw_cdev_send_phy_packet send_phy_packet;
440 struct fw_cdev_receive_phy_packets receive_phy_packets;
441 struct fw_cdev_set_iso_channels set_iso_channels;
442 struct fw_cdev_flush_iso flush_iso;
443 };
444
ioctl_get_info(struct client * client,union ioctl_arg * arg)445 static int ioctl_get_info(struct client *client, union ioctl_arg *arg)
446 {
447 struct fw_cdev_get_info *a = &arg->get_info;
448 struct fw_cdev_event_bus_reset bus_reset;
449 unsigned long ret = 0;
450
451 client->version = a->version;
452 a->version = FW_CDEV_KERNEL_VERSION;
453 a->card = client->device->card->index;
454
455 down_read(&fw_device_rwsem);
456
457 if (a->rom != 0) {
458 size_t want = a->rom_length;
459 size_t have = client->device->config_rom_length * 4;
460
461 ret = copy_to_user(u64_to_uptr(a->rom),
462 client->device->config_rom, min(want, have));
463 }
464 a->rom_length = client->device->config_rom_length * 4;
465
466 up_read(&fw_device_rwsem);
467
468 if (ret != 0)
469 return -EFAULT;
470
471 mutex_lock(&client->device->client_list_mutex);
472
473 client->bus_reset_closure = a->bus_reset_closure;
474 if (a->bus_reset != 0) {
475 fill_bus_reset_event(&bus_reset, client);
476 /* unaligned size of bus_reset is 36 bytes */
477 ret = copy_to_user(u64_to_uptr(a->bus_reset), &bus_reset, 36);
478 }
479 if (ret == 0 && list_empty(&client->link))
480 list_add_tail(&client->link, &client->device->client_list);
481
482 mutex_unlock(&client->device->client_list_mutex);
483
484 return ret ? -EFAULT : 0;
485 }
486
add_client_resource(struct client * client,struct client_resource * resource,gfp_t gfp_mask)487 static int add_client_resource(struct client *client,
488 struct client_resource *resource, gfp_t gfp_mask)
489 {
490 bool preload = gfpflags_allow_blocking(gfp_mask);
491 unsigned long flags;
492 int ret;
493
494 if (preload)
495 idr_preload(gfp_mask);
496 spin_lock_irqsave(&client->lock, flags);
497
498 if (client->in_shutdown)
499 ret = -ECANCELED;
500 else
501 ret = idr_alloc(&client->resource_idr, resource, 0, 0,
502 GFP_NOWAIT);
503 if (ret >= 0) {
504 resource->handle = ret;
505 client_get(client);
506 schedule_if_iso_resource(resource);
507 }
508
509 spin_unlock_irqrestore(&client->lock, flags);
510 if (preload)
511 idr_preload_end();
512
513 return ret < 0 ? ret : 0;
514 }
515
release_client_resource(struct client * client,u32 handle,client_resource_release_fn_t release,struct client_resource ** return_resource)516 static int release_client_resource(struct client *client, u32 handle,
517 client_resource_release_fn_t release,
518 struct client_resource **return_resource)
519 {
520 struct client_resource *resource;
521
522 spin_lock_irq(&client->lock);
523 if (client->in_shutdown)
524 resource = NULL;
525 else
526 resource = idr_find(&client->resource_idr, handle);
527 if (resource && resource->release == release)
528 idr_remove(&client->resource_idr, handle);
529 spin_unlock_irq(&client->lock);
530
531 if (!(resource && resource->release == release))
532 return -EINVAL;
533
534 if (return_resource)
535 *return_resource = resource;
536 else
537 resource->release(client, resource);
538
539 client_put(client);
540
541 return 0;
542 }
543
release_transaction(struct client * client,struct client_resource * resource)544 static void release_transaction(struct client *client,
545 struct client_resource *resource)
546 {
547 }
548
complete_transaction(struct fw_card * card,int rcode,u32 request_tstamp,u32 response_tstamp,void * payload,size_t length,void * data)549 static void complete_transaction(struct fw_card *card, int rcode, u32 request_tstamp,
550 u32 response_tstamp, void *payload, size_t length, void *data)
551 {
552 struct outbound_transaction_event *e = data;
553 struct client *client = e->client;
554 unsigned long flags;
555
556 spin_lock_irqsave(&client->lock, flags);
557 idr_remove(&client->resource_idr, e->r.resource.handle);
558 if (client->in_shutdown)
559 wake_up(&client->tx_flush_wait);
560 spin_unlock_irqrestore(&client->lock, flags);
561
562 switch (e->rsp.without_tstamp.type) {
563 case FW_CDEV_EVENT_RESPONSE:
564 {
565 struct fw_cdev_event_response *rsp = &e->rsp.without_tstamp;
566
567 if (length < rsp->length)
568 rsp->length = length;
569 if (rcode == RCODE_COMPLETE)
570 memcpy(rsp->data, payload, rsp->length);
571
572 rsp->rcode = rcode;
573
574 // In the case that sizeof(*rsp) doesn't align with the position of the
575 // data, and the read is short, preserve an extra copy of the data
576 // to stay compatible with a pre-2.6.27 bug. Since the bug is harmless
577 // for short reads and some apps depended on it, this is both safe
578 // and prudent for compatibility.
579 if (rsp->length <= sizeof(*rsp) - offsetof(typeof(*rsp), data))
580 queue_event(client, &e->event, rsp, sizeof(*rsp), rsp->data, rsp->length);
581 else
582 queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length, NULL, 0);
583
584 break;
585 }
586 case FW_CDEV_EVENT_RESPONSE2:
587 {
588 struct fw_cdev_event_response2 *rsp = &e->rsp.with_tstamp;
589
590 if (length < rsp->length)
591 rsp->length = length;
592 if (rcode == RCODE_COMPLETE)
593 memcpy(rsp->data, payload, rsp->length);
594
595 rsp->rcode = rcode;
596 rsp->request_tstamp = request_tstamp;
597 rsp->response_tstamp = response_tstamp;
598
599 queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length, NULL, 0);
600
601 break;
602 default:
603 WARN_ON(1);
604 break;
605 }
606 }
607
608 /* Drop the idr's reference */
609 client_put(client);
610 }
611
init_request(struct client * client,struct fw_cdev_send_request * request,int destination_id,int speed)612 static int init_request(struct client *client,
613 struct fw_cdev_send_request *request,
614 int destination_id, int speed)
615 {
616 struct outbound_transaction_event *e;
617 void *payload;
618 int ret;
619
620 if (request->tcode != TCODE_STREAM_DATA &&
621 (request->length > 4096 || request->length > 512 << speed))
622 return -EIO;
623
624 if (request->tcode == TCODE_WRITE_QUADLET_REQUEST &&
625 request->length < 4)
626 return -EINVAL;
627
628 e = kmalloc(sizeof(*e) + request->length, GFP_KERNEL);
629 if (e == NULL)
630 return -ENOMEM;
631 e->client = client;
632
633 if (client->version < FW_CDEV_VERSION_EVENT_ASYNC_TSTAMP) {
634 struct fw_cdev_event_response *rsp = &e->rsp.without_tstamp;
635
636 rsp->type = FW_CDEV_EVENT_RESPONSE;
637 rsp->length = request->length;
638 rsp->closure = request->closure;
639 payload = rsp->data;
640 } else {
641 struct fw_cdev_event_response2 *rsp = &e->rsp.with_tstamp;
642
643 rsp->type = FW_CDEV_EVENT_RESPONSE2;
644 rsp->length = request->length;
645 rsp->closure = request->closure;
646 payload = rsp->data;
647 }
648
649 if (request->data && copy_from_user(payload, u64_to_uptr(request->data), request->length)) {
650 ret = -EFAULT;
651 goto failed;
652 }
653
654 e->r.resource.release = release_transaction;
655 ret = add_client_resource(client, &e->r.resource, GFP_KERNEL);
656 if (ret < 0)
657 goto failed;
658
659 fw_send_request_with_tstamp(client->device->card, &e->r.transaction, request->tcode,
660 destination_id, request->generation, speed, request->offset,
661 payload, request->length, complete_transaction, e);
662 return 0;
663
664 failed:
665 kfree(e);
666
667 return ret;
668 }
669
ioctl_send_request(struct client * client,union ioctl_arg * arg)670 static int ioctl_send_request(struct client *client, union ioctl_arg *arg)
671 {
672 switch (arg->send_request.tcode) {
673 case TCODE_WRITE_QUADLET_REQUEST:
674 case TCODE_WRITE_BLOCK_REQUEST:
675 case TCODE_READ_QUADLET_REQUEST:
676 case TCODE_READ_BLOCK_REQUEST:
677 case TCODE_LOCK_MASK_SWAP:
678 case TCODE_LOCK_COMPARE_SWAP:
679 case TCODE_LOCK_FETCH_ADD:
680 case TCODE_LOCK_LITTLE_ADD:
681 case TCODE_LOCK_BOUNDED_ADD:
682 case TCODE_LOCK_WRAP_ADD:
683 case TCODE_LOCK_VENDOR_DEPENDENT:
684 break;
685 default:
686 return -EINVAL;
687 }
688
689 return init_request(client, &arg->send_request, client->device->node_id,
690 client->device->max_speed);
691 }
692
release_request(struct client * client,struct client_resource * resource)693 static void release_request(struct client *client,
694 struct client_resource *resource)
695 {
696 struct inbound_transaction_resource *r = container_of(resource,
697 struct inbound_transaction_resource, resource);
698
699 if (r->is_fcp)
700 fw_request_put(r->request);
701 else
702 fw_send_response(r->card, r->request, RCODE_CONFLICT_ERROR);
703
704 fw_card_put(r->card);
705 kfree(r);
706 }
707
handle_request(struct fw_card * card,struct fw_request * request,int tcode,int destination,int source,int generation,unsigned long long offset,void * payload,size_t length,void * callback_data)708 static void handle_request(struct fw_card *card, struct fw_request *request,
709 int tcode, int destination, int source,
710 int generation, unsigned long long offset,
711 void *payload, size_t length, void *callback_data)
712 {
713 struct address_handler_resource *handler = callback_data;
714 bool is_fcp = is_in_fcp_region(offset, length);
715 struct inbound_transaction_resource *r;
716 struct inbound_transaction_event *e;
717 size_t event_size0;
718 int ret;
719
720 /* card may be different from handler->client->device->card */
721 fw_card_get(card);
722
723 // Extend the lifetime of data for request so that its payload is safely accessible in
724 // the process context for the client.
725 if (is_fcp)
726 fw_request_get(request);
727
728 r = kmalloc(sizeof(*r), GFP_ATOMIC);
729 e = kmalloc(sizeof(*e), GFP_ATOMIC);
730 if (r == NULL || e == NULL)
731 goto failed;
732
733 r->card = card;
734 r->request = request;
735 r->is_fcp = is_fcp;
736 r->data = payload;
737 r->length = length;
738
739 r->resource.release = release_request;
740 ret = add_client_resource(handler->client, &r->resource, GFP_ATOMIC);
741 if (ret < 0)
742 goto failed;
743
744 if (handler->client->version < FW_CDEV_VERSION_EVENT_REQUEST2) {
745 struct fw_cdev_event_request *req = &e->req.request;
746
747 if (tcode & 0x10)
748 tcode = TCODE_LOCK_REQUEST;
749
750 req->type = FW_CDEV_EVENT_REQUEST;
751 req->tcode = tcode;
752 req->offset = offset;
753 req->length = length;
754 req->handle = r->resource.handle;
755 req->closure = handler->closure;
756 event_size0 = sizeof(*req);
757 } else if (handler->client->version < FW_CDEV_VERSION_EVENT_ASYNC_TSTAMP) {
758 struct fw_cdev_event_request2 *req = &e->req.request2;
759
760 req->type = FW_CDEV_EVENT_REQUEST2;
761 req->tcode = tcode;
762 req->offset = offset;
763 req->source_node_id = source;
764 req->destination_node_id = destination;
765 req->card = card->index;
766 req->generation = generation;
767 req->length = length;
768 req->handle = r->resource.handle;
769 req->closure = handler->closure;
770 event_size0 = sizeof(*req);
771 } else {
772 struct fw_cdev_event_request3 *req = &e->req.with_tstamp;
773
774 req->type = FW_CDEV_EVENT_REQUEST3;
775 req->tcode = tcode;
776 req->offset = offset;
777 req->source_node_id = source;
778 req->destination_node_id = destination;
779 req->card = card->index;
780 req->generation = generation;
781 req->length = length;
782 req->handle = r->resource.handle;
783 req->closure = handler->closure;
784 req->tstamp = fw_request_get_timestamp(request);
785 event_size0 = sizeof(*req);
786 }
787
788 queue_event(handler->client, &e->event,
789 &e->req, event_size0, r->data, length);
790 return;
791
792 failed:
793 kfree(r);
794 kfree(e);
795
796 if (!is_fcp)
797 fw_send_response(card, request, RCODE_CONFLICT_ERROR);
798 else
799 fw_request_put(request);
800
801 fw_card_put(card);
802 }
803
release_address_handler(struct client * client,struct client_resource * resource)804 static void release_address_handler(struct client *client,
805 struct client_resource *resource)
806 {
807 struct address_handler_resource *r =
808 container_of(resource, struct address_handler_resource, resource);
809
810 fw_core_remove_address_handler(&r->handler);
811 kfree(r);
812 }
813
ioctl_allocate(struct client * client,union ioctl_arg * arg)814 static int ioctl_allocate(struct client *client, union ioctl_arg *arg)
815 {
816 struct fw_cdev_allocate *a = &arg->allocate;
817 struct address_handler_resource *r;
818 struct fw_address_region region;
819 int ret;
820
821 r = kmalloc(sizeof(*r), GFP_KERNEL);
822 if (r == NULL)
823 return -ENOMEM;
824
825 region.start = a->offset;
826 if (client->version < FW_CDEV_VERSION_ALLOCATE_REGION_END)
827 region.end = a->offset + a->length;
828 else
829 region.end = a->region_end;
830
831 r->handler.length = a->length;
832 r->handler.address_callback = handle_request;
833 r->handler.callback_data = r;
834 r->closure = a->closure;
835 r->client = client;
836
837 ret = fw_core_add_address_handler(&r->handler, ®ion);
838 if (ret < 0) {
839 kfree(r);
840 return ret;
841 }
842 a->offset = r->handler.offset;
843
844 r->resource.release = release_address_handler;
845 ret = add_client_resource(client, &r->resource, GFP_KERNEL);
846 if (ret < 0) {
847 release_address_handler(client, &r->resource);
848 return ret;
849 }
850 a->handle = r->resource.handle;
851
852 return 0;
853 }
854
ioctl_deallocate(struct client * client,union ioctl_arg * arg)855 static int ioctl_deallocate(struct client *client, union ioctl_arg *arg)
856 {
857 return release_client_resource(client, arg->deallocate.handle,
858 release_address_handler, NULL);
859 }
860
ioctl_send_response(struct client * client,union ioctl_arg * arg)861 static int ioctl_send_response(struct client *client, union ioctl_arg *arg)
862 {
863 struct fw_cdev_send_response *a = &arg->send_response;
864 struct client_resource *resource;
865 struct inbound_transaction_resource *r;
866 int ret = 0;
867
868 if (release_client_resource(client, a->handle,
869 release_request, &resource) < 0)
870 return -EINVAL;
871
872 r = container_of(resource, struct inbound_transaction_resource,
873 resource);
874 if (r->is_fcp) {
875 fw_request_put(r->request);
876 goto out;
877 }
878
879 if (a->length != fw_get_response_length(r->request)) {
880 ret = -EINVAL;
881 fw_request_put(r->request);
882 goto out;
883 }
884 if (copy_from_user(r->data, u64_to_uptr(a->data), a->length)) {
885 ret = -EFAULT;
886 fw_request_put(r->request);
887 goto out;
888 }
889 fw_send_response(r->card, r->request, a->rcode);
890 out:
891 fw_card_put(r->card);
892 kfree(r);
893
894 return ret;
895 }
896
ioctl_initiate_bus_reset(struct client * client,union ioctl_arg * arg)897 static int ioctl_initiate_bus_reset(struct client *client, union ioctl_arg *arg)
898 {
899 fw_schedule_bus_reset(client->device->card, true,
900 arg->initiate_bus_reset.type == FW_CDEV_SHORT_RESET);
901 return 0;
902 }
903
release_descriptor(struct client * client,struct client_resource * resource)904 static void release_descriptor(struct client *client,
905 struct client_resource *resource)
906 {
907 struct descriptor_resource *r =
908 container_of(resource, struct descriptor_resource, resource);
909
910 fw_core_remove_descriptor(&r->descriptor);
911 kfree(r);
912 }
913
ioctl_add_descriptor(struct client * client,union ioctl_arg * arg)914 static int ioctl_add_descriptor(struct client *client, union ioctl_arg *arg)
915 {
916 struct fw_cdev_add_descriptor *a = &arg->add_descriptor;
917 struct descriptor_resource *r;
918 int ret;
919
920 /* Access policy: Allow this ioctl only on local nodes' device files. */
921 if (!client->device->is_local)
922 return -ENOSYS;
923
924 if (a->length > 256)
925 return -EINVAL;
926
927 r = kmalloc(sizeof(*r) + a->length * 4, GFP_KERNEL);
928 if (r == NULL)
929 return -ENOMEM;
930
931 if (copy_from_user(r->data, u64_to_uptr(a->data), a->length * 4)) {
932 ret = -EFAULT;
933 goto failed;
934 }
935
936 r->descriptor.length = a->length;
937 r->descriptor.immediate = a->immediate;
938 r->descriptor.key = a->key;
939 r->descriptor.data = r->data;
940
941 ret = fw_core_add_descriptor(&r->descriptor);
942 if (ret < 0)
943 goto failed;
944
945 r->resource.release = release_descriptor;
946 ret = add_client_resource(client, &r->resource, GFP_KERNEL);
947 if (ret < 0) {
948 fw_core_remove_descriptor(&r->descriptor);
949 goto failed;
950 }
951 a->handle = r->resource.handle;
952
953 return 0;
954 failed:
955 kfree(r);
956
957 return ret;
958 }
959
ioctl_remove_descriptor(struct client * client,union ioctl_arg * arg)960 static int ioctl_remove_descriptor(struct client *client, union ioctl_arg *arg)
961 {
962 return release_client_resource(client, arg->remove_descriptor.handle,
963 release_descriptor, NULL);
964 }
965
iso_callback(struct fw_iso_context * context,u32 cycle,size_t header_length,void * header,void * data)966 static void iso_callback(struct fw_iso_context *context, u32 cycle,
967 size_t header_length, void *header, void *data)
968 {
969 struct client *client = data;
970 struct iso_interrupt_event *e;
971
972 e = kmalloc(sizeof(*e) + header_length, GFP_ATOMIC);
973 if (e == NULL)
974 return;
975
976 e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT;
977 e->interrupt.closure = client->iso_closure;
978 e->interrupt.cycle = cycle;
979 e->interrupt.header_length = header_length;
980 memcpy(e->interrupt.header, header, header_length);
981 queue_event(client, &e->event, &e->interrupt,
982 sizeof(e->interrupt) + header_length, NULL, 0);
983 }
984
iso_mc_callback(struct fw_iso_context * context,dma_addr_t completed,void * data)985 static void iso_mc_callback(struct fw_iso_context *context,
986 dma_addr_t completed, void *data)
987 {
988 struct client *client = data;
989 struct iso_interrupt_mc_event *e;
990
991 e = kmalloc(sizeof(*e), GFP_ATOMIC);
992 if (e == NULL)
993 return;
994
995 e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL;
996 e->interrupt.closure = client->iso_closure;
997 e->interrupt.completed = fw_iso_buffer_lookup(&client->buffer,
998 completed);
999 queue_event(client, &e->event, &e->interrupt,
1000 sizeof(e->interrupt), NULL, 0);
1001 }
1002
iso_dma_direction(struct fw_iso_context * context)1003 static enum dma_data_direction iso_dma_direction(struct fw_iso_context *context)
1004 {
1005 if (context->type == FW_ISO_CONTEXT_TRANSMIT)
1006 return DMA_TO_DEVICE;
1007 else
1008 return DMA_FROM_DEVICE;
1009 }
1010
fw_iso_mc_context_create(struct fw_card * card,fw_iso_mc_callback_t callback,void * callback_data)1011 static struct fw_iso_context *fw_iso_mc_context_create(struct fw_card *card,
1012 fw_iso_mc_callback_t callback,
1013 void *callback_data)
1014 {
1015 struct fw_iso_context *ctx;
1016
1017 ctx = fw_iso_context_create(card, FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL,
1018 0, 0, 0, NULL, callback_data);
1019 if (!IS_ERR(ctx))
1020 ctx->callback.mc = callback;
1021
1022 return ctx;
1023 }
1024
ioctl_create_iso_context(struct client * client,union ioctl_arg * arg)1025 static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
1026 {
1027 struct fw_cdev_create_iso_context *a = &arg->create_iso_context;
1028 struct fw_iso_context *context;
1029 union fw_iso_callback cb;
1030 int ret;
1031
1032 BUILD_BUG_ON(FW_CDEV_ISO_CONTEXT_TRANSMIT != FW_ISO_CONTEXT_TRANSMIT ||
1033 FW_CDEV_ISO_CONTEXT_RECEIVE != FW_ISO_CONTEXT_RECEIVE ||
1034 FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL !=
1035 FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL);
1036
1037 switch (a->type) {
1038 case FW_ISO_CONTEXT_TRANSMIT:
1039 if (a->speed > SCODE_3200 || a->channel > 63)
1040 return -EINVAL;
1041
1042 cb.sc = iso_callback;
1043 break;
1044
1045 case FW_ISO_CONTEXT_RECEIVE:
1046 if (a->header_size < 4 || (a->header_size & 3) ||
1047 a->channel > 63)
1048 return -EINVAL;
1049
1050 cb.sc = iso_callback;
1051 break;
1052
1053 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
1054 cb.mc = iso_mc_callback;
1055 break;
1056
1057 default:
1058 return -EINVAL;
1059 }
1060
1061 if (a->type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL)
1062 context = fw_iso_mc_context_create(client->device->card, cb.mc,
1063 client);
1064 else
1065 context = fw_iso_context_create(client->device->card, a->type,
1066 a->channel, a->speed,
1067 a->header_size, cb.sc, client);
1068 if (IS_ERR(context))
1069 return PTR_ERR(context);
1070 if (client->version < FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW)
1071 context->drop_overflow_headers = true;
1072
1073 /* We only support one context at this time. */
1074 spin_lock_irq(&client->lock);
1075 if (client->iso_context != NULL) {
1076 spin_unlock_irq(&client->lock);
1077 fw_iso_context_destroy(context);
1078
1079 return -EBUSY;
1080 }
1081 if (!client->buffer_is_mapped) {
1082 ret = fw_iso_buffer_map_dma(&client->buffer,
1083 client->device->card,
1084 iso_dma_direction(context));
1085 if (ret < 0) {
1086 spin_unlock_irq(&client->lock);
1087 fw_iso_context_destroy(context);
1088
1089 return ret;
1090 }
1091 client->buffer_is_mapped = true;
1092 }
1093 client->iso_closure = a->closure;
1094 client->iso_context = context;
1095 spin_unlock_irq(&client->lock);
1096
1097 a->handle = 0;
1098
1099 return 0;
1100 }
1101
ioctl_set_iso_channels(struct client * client,union ioctl_arg * arg)1102 static int ioctl_set_iso_channels(struct client *client, union ioctl_arg *arg)
1103 {
1104 struct fw_cdev_set_iso_channels *a = &arg->set_iso_channels;
1105 struct fw_iso_context *ctx = client->iso_context;
1106
1107 if (ctx == NULL || a->handle != 0)
1108 return -EINVAL;
1109
1110 return fw_iso_context_set_channels(ctx, &a->channels);
1111 }
1112
1113 /* Macros for decoding the iso packet control header. */
1114 #define GET_PAYLOAD_LENGTH(v) ((v) & 0xffff)
1115 #define GET_INTERRUPT(v) (((v) >> 16) & 0x01)
1116 #define GET_SKIP(v) (((v) >> 17) & 0x01)
1117 #define GET_TAG(v) (((v) >> 18) & 0x03)
1118 #define GET_SY(v) (((v) >> 20) & 0x0f)
1119 #define GET_HEADER_LENGTH(v) (((v) >> 24) & 0xff)
1120
ioctl_queue_iso(struct client * client,union ioctl_arg * arg)1121 static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg)
1122 {
1123 struct fw_cdev_queue_iso *a = &arg->queue_iso;
1124 struct fw_cdev_iso_packet __user *p, *end, *next;
1125 struct fw_iso_context *ctx = client->iso_context;
1126 unsigned long payload, buffer_end, transmit_header_bytes = 0;
1127 u32 control;
1128 int count;
1129 struct {
1130 struct fw_iso_packet packet;
1131 u8 header[256];
1132 } u;
1133
1134 if (ctx == NULL || a->handle != 0)
1135 return -EINVAL;
1136
1137 /*
1138 * If the user passes a non-NULL data pointer, has mmap()'ed
1139 * the iso buffer, and the pointer points inside the buffer,
1140 * we setup the payload pointers accordingly. Otherwise we
1141 * set them both to 0, which will still let packets with
1142 * payload_length == 0 through. In other words, if no packets
1143 * use the indirect payload, the iso buffer need not be mapped
1144 * and the a->data pointer is ignored.
1145 */
1146 payload = (unsigned long)a->data - client->vm_start;
1147 buffer_end = client->buffer.page_count << PAGE_SHIFT;
1148 if (a->data == 0 || client->buffer.pages == NULL ||
1149 payload >= buffer_end) {
1150 payload = 0;
1151 buffer_end = 0;
1152 }
1153
1154 if (ctx->type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL && payload & 3)
1155 return -EINVAL;
1156
1157 p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(a->packets);
1158
1159 end = (void __user *)p + a->size;
1160 count = 0;
1161 while (p < end) {
1162 if (get_user(control, &p->control))
1163 return -EFAULT;
1164 u.packet.payload_length = GET_PAYLOAD_LENGTH(control);
1165 u.packet.interrupt = GET_INTERRUPT(control);
1166 u.packet.skip = GET_SKIP(control);
1167 u.packet.tag = GET_TAG(control);
1168 u.packet.sy = GET_SY(control);
1169 u.packet.header_length = GET_HEADER_LENGTH(control);
1170
1171 switch (ctx->type) {
1172 case FW_ISO_CONTEXT_TRANSMIT:
1173 if (u.packet.header_length & 3)
1174 return -EINVAL;
1175 transmit_header_bytes = u.packet.header_length;
1176 break;
1177
1178 case FW_ISO_CONTEXT_RECEIVE:
1179 if (u.packet.header_length == 0 ||
1180 u.packet.header_length % ctx->header_size != 0)
1181 return -EINVAL;
1182 break;
1183
1184 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
1185 if (u.packet.payload_length == 0 ||
1186 u.packet.payload_length & 3)
1187 return -EINVAL;
1188 break;
1189 }
1190
1191 next = (struct fw_cdev_iso_packet __user *)
1192 &p->header[transmit_header_bytes / 4];
1193 if (next > end)
1194 return -EINVAL;
1195 if (copy_from_user
1196 (u.packet.header, p->header, transmit_header_bytes))
1197 return -EFAULT;
1198 if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT &&
1199 u.packet.header_length + u.packet.payload_length > 0)
1200 return -EINVAL;
1201 if (payload + u.packet.payload_length > buffer_end)
1202 return -EINVAL;
1203
1204 if (fw_iso_context_queue(ctx, &u.packet,
1205 &client->buffer, payload))
1206 break;
1207
1208 p = next;
1209 payload += u.packet.payload_length;
1210 count++;
1211 }
1212 fw_iso_context_queue_flush(ctx);
1213
1214 a->size -= uptr_to_u64(p) - a->packets;
1215 a->packets = uptr_to_u64(p);
1216 a->data = client->vm_start + payload;
1217
1218 return count;
1219 }
1220
ioctl_start_iso(struct client * client,union ioctl_arg * arg)1221 static int ioctl_start_iso(struct client *client, union ioctl_arg *arg)
1222 {
1223 struct fw_cdev_start_iso *a = &arg->start_iso;
1224
1225 BUILD_BUG_ON(
1226 FW_CDEV_ISO_CONTEXT_MATCH_TAG0 != FW_ISO_CONTEXT_MATCH_TAG0 ||
1227 FW_CDEV_ISO_CONTEXT_MATCH_TAG1 != FW_ISO_CONTEXT_MATCH_TAG1 ||
1228 FW_CDEV_ISO_CONTEXT_MATCH_TAG2 != FW_ISO_CONTEXT_MATCH_TAG2 ||
1229 FW_CDEV_ISO_CONTEXT_MATCH_TAG3 != FW_ISO_CONTEXT_MATCH_TAG3 ||
1230 FW_CDEV_ISO_CONTEXT_MATCH_ALL_TAGS != FW_ISO_CONTEXT_MATCH_ALL_TAGS);
1231
1232 if (client->iso_context == NULL || a->handle != 0)
1233 return -EINVAL;
1234
1235 if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE &&
1236 (a->tags == 0 || a->tags > 15 || a->sync > 15))
1237 return -EINVAL;
1238
1239 return fw_iso_context_start(client->iso_context,
1240 a->cycle, a->sync, a->tags);
1241 }
1242
ioctl_stop_iso(struct client * client,union ioctl_arg * arg)1243 static int ioctl_stop_iso(struct client *client, union ioctl_arg *arg)
1244 {
1245 struct fw_cdev_stop_iso *a = &arg->stop_iso;
1246
1247 if (client->iso_context == NULL || a->handle != 0)
1248 return -EINVAL;
1249
1250 return fw_iso_context_stop(client->iso_context);
1251 }
1252
ioctl_flush_iso(struct client * client,union ioctl_arg * arg)1253 static int ioctl_flush_iso(struct client *client, union ioctl_arg *arg)
1254 {
1255 struct fw_cdev_flush_iso *a = &arg->flush_iso;
1256
1257 if (client->iso_context == NULL || a->handle != 0)
1258 return -EINVAL;
1259
1260 return fw_iso_context_flush_completions(client->iso_context);
1261 }
1262
ioctl_get_cycle_timer2(struct client * client,union ioctl_arg * arg)1263 static int ioctl_get_cycle_timer2(struct client *client, union ioctl_arg *arg)
1264 {
1265 struct fw_cdev_get_cycle_timer2 *a = &arg->get_cycle_timer2;
1266 struct fw_card *card = client->device->card;
1267 struct timespec64 ts = {0, 0};
1268 u32 cycle_time = 0;
1269 int ret = 0;
1270
1271 local_irq_disable();
1272
1273 ret = fw_card_read_cycle_time(card, &cycle_time);
1274 if (ret < 0)
1275 goto end;
1276
1277 switch (a->clk_id) {
1278 case CLOCK_REALTIME: ktime_get_real_ts64(&ts); break;
1279 case CLOCK_MONOTONIC: ktime_get_ts64(&ts); break;
1280 case CLOCK_MONOTONIC_RAW: ktime_get_raw_ts64(&ts); break;
1281 default:
1282 ret = -EINVAL;
1283 }
1284 end:
1285 local_irq_enable();
1286
1287 a->tv_sec = ts.tv_sec;
1288 a->tv_nsec = ts.tv_nsec;
1289 a->cycle_timer = cycle_time;
1290
1291 return ret;
1292 }
1293
ioctl_get_cycle_timer(struct client * client,union ioctl_arg * arg)1294 static int ioctl_get_cycle_timer(struct client *client, union ioctl_arg *arg)
1295 {
1296 struct fw_cdev_get_cycle_timer *a = &arg->get_cycle_timer;
1297 struct fw_cdev_get_cycle_timer2 ct2;
1298
1299 ct2.clk_id = CLOCK_REALTIME;
1300 ioctl_get_cycle_timer2(client, (union ioctl_arg *)&ct2);
1301
1302 a->local_time = ct2.tv_sec * USEC_PER_SEC + ct2.tv_nsec / NSEC_PER_USEC;
1303 a->cycle_timer = ct2.cycle_timer;
1304
1305 return 0;
1306 }
1307
iso_resource_work(struct work_struct * work)1308 static void iso_resource_work(struct work_struct *work)
1309 {
1310 struct iso_resource_event *e;
1311 struct iso_resource *r =
1312 container_of(work, struct iso_resource, work.work);
1313 struct client *client = r->client;
1314 int generation, channel, bandwidth, todo;
1315 bool skip, free, success;
1316
1317 spin_lock_irq(&client->lock);
1318 generation = client->device->generation;
1319 todo = r->todo;
1320 /* Allow 1000ms grace period for other reallocations. */
1321 if (todo == ISO_RES_ALLOC &&
1322 time_before64(get_jiffies_64(),
1323 client->device->card->reset_jiffies + HZ)) {
1324 schedule_iso_resource(r, DIV_ROUND_UP(HZ, 3));
1325 skip = true;
1326 } else {
1327 /* We could be called twice within the same generation. */
1328 skip = todo == ISO_RES_REALLOC &&
1329 r->generation == generation;
1330 }
1331 free = todo == ISO_RES_DEALLOC ||
1332 todo == ISO_RES_ALLOC_ONCE ||
1333 todo == ISO_RES_DEALLOC_ONCE;
1334 r->generation = generation;
1335 spin_unlock_irq(&client->lock);
1336
1337 if (skip)
1338 goto out;
1339
1340 bandwidth = r->bandwidth;
1341
1342 fw_iso_resource_manage(client->device->card, generation,
1343 r->channels, &channel, &bandwidth,
1344 todo == ISO_RES_ALLOC ||
1345 todo == ISO_RES_REALLOC ||
1346 todo == ISO_RES_ALLOC_ONCE);
1347 /*
1348 * Is this generation outdated already? As long as this resource sticks
1349 * in the idr, it will be scheduled again for a newer generation or at
1350 * shutdown.
1351 */
1352 if (channel == -EAGAIN &&
1353 (todo == ISO_RES_ALLOC || todo == ISO_RES_REALLOC))
1354 goto out;
1355
1356 success = channel >= 0 || bandwidth > 0;
1357
1358 spin_lock_irq(&client->lock);
1359 /*
1360 * Transit from allocation to reallocation, except if the client
1361 * requested deallocation in the meantime.
1362 */
1363 if (r->todo == ISO_RES_ALLOC)
1364 r->todo = ISO_RES_REALLOC;
1365 /*
1366 * Allocation or reallocation failure? Pull this resource out of the
1367 * idr and prepare for deletion, unless the client is shutting down.
1368 */
1369 if (r->todo == ISO_RES_REALLOC && !success &&
1370 !client->in_shutdown &&
1371 idr_remove(&client->resource_idr, r->resource.handle)) {
1372 client_put(client);
1373 free = true;
1374 }
1375 spin_unlock_irq(&client->lock);
1376
1377 if (todo == ISO_RES_ALLOC && channel >= 0)
1378 r->channels = 1ULL << channel;
1379
1380 if (todo == ISO_RES_REALLOC && success)
1381 goto out;
1382
1383 if (todo == ISO_RES_ALLOC || todo == ISO_RES_ALLOC_ONCE) {
1384 e = r->e_alloc;
1385 r->e_alloc = NULL;
1386 } else {
1387 e = r->e_dealloc;
1388 r->e_dealloc = NULL;
1389 }
1390 e->iso_resource.handle = r->resource.handle;
1391 e->iso_resource.channel = channel;
1392 e->iso_resource.bandwidth = bandwidth;
1393
1394 queue_event(client, &e->event,
1395 &e->iso_resource, sizeof(e->iso_resource), NULL, 0);
1396
1397 if (free) {
1398 cancel_delayed_work(&r->work);
1399 kfree(r->e_alloc);
1400 kfree(r->e_dealloc);
1401 kfree(r);
1402 }
1403 out:
1404 client_put(client);
1405 }
1406
release_iso_resource(struct client * client,struct client_resource * resource)1407 static void release_iso_resource(struct client *client,
1408 struct client_resource *resource)
1409 {
1410 struct iso_resource *r =
1411 container_of(resource, struct iso_resource, resource);
1412
1413 spin_lock_irq(&client->lock);
1414 r->todo = ISO_RES_DEALLOC;
1415 schedule_iso_resource(r, 0);
1416 spin_unlock_irq(&client->lock);
1417 }
1418
init_iso_resource(struct client * client,struct fw_cdev_allocate_iso_resource * request,int todo)1419 static int init_iso_resource(struct client *client,
1420 struct fw_cdev_allocate_iso_resource *request, int todo)
1421 {
1422 struct iso_resource_event *e1, *e2;
1423 struct iso_resource *r;
1424 int ret;
1425
1426 if ((request->channels == 0 && request->bandwidth == 0) ||
1427 request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
1428 return -EINVAL;
1429
1430 r = kmalloc(sizeof(*r), GFP_KERNEL);
1431 e1 = kmalloc(sizeof(*e1), GFP_KERNEL);
1432 e2 = kmalloc(sizeof(*e2), GFP_KERNEL);
1433 if (r == NULL || e1 == NULL || e2 == NULL) {
1434 ret = -ENOMEM;
1435 goto fail;
1436 }
1437
1438 INIT_DELAYED_WORK(&r->work, iso_resource_work);
1439 r->client = client;
1440 r->todo = todo;
1441 r->generation = -1;
1442 r->channels = request->channels;
1443 r->bandwidth = request->bandwidth;
1444 r->e_alloc = e1;
1445 r->e_dealloc = e2;
1446
1447 e1->iso_resource.closure = request->closure;
1448 e1->iso_resource.type = FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED;
1449 e2->iso_resource.closure = request->closure;
1450 e2->iso_resource.type = FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED;
1451
1452 if (todo == ISO_RES_ALLOC) {
1453 r->resource.release = release_iso_resource;
1454 ret = add_client_resource(client, &r->resource, GFP_KERNEL);
1455 if (ret < 0)
1456 goto fail;
1457 } else {
1458 r->resource.release = NULL;
1459 r->resource.handle = -1;
1460 schedule_iso_resource(r, 0);
1461 }
1462 request->handle = r->resource.handle;
1463
1464 return 0;
1465 fail:
1466 kfree(r);
1467 kfree(e1);
1468 kfree(e2);
1469
1470 return ret;
1471 }
1472
ioctl_allocate_iso_resource(struct client * client,union ioctl_arg * arg)1473 static int ioctl_allocate_iso_resource(struct client *client,
1474 union ioctl_arg *arg)
1475 {
1476 return init_iso_resource(client,
1477 &arg->allocate_iso_resource, ISO_RES_ALLOC);
1478 }
1479
ioctl_deallocate_iso_resource(struct client * client,union ioctl_arg * arg)1480 static int ioctl_deallocate_iso_resource(struct client *client,
1481 union ioctl_arg *arg)
1482 {
1483 return release_client_resource(client,
1484 arg->deallocate.handle, release_iso_resource, NULL);
1485 }
1486
ioctl_allocate_iso_resource_once(struct client * client,union ioctl_arg * arg)1487 static int ioctl_allocate_iso_resource_once(struct client *client,
1488 union ioctl_arg *arg)
1489 {
1490 return init_iso_resource(client,
1491 &arg->allocate_iso_resource, ISO_RES_ALLOC_ONCE);
1492 }
1493
ioctl_deallocate_iso_resource_once(struct client * client,union ioctl_arg * arg)1494 static int ioctl_deallocate_iso_resource_once(struct client *client,
1495 union ioctl_arg *arg)
1496 {
1497 return init_iso_resource(client,
1498 &arg->allocate_iso_resource, ISO_RES_DEALLOC_ONCE);
1499 }
1500
1501 /*
1502 * Returns a speed code: Maximum speed to or from this device,
1503 * limited by the device's link speed, the local node's link speed,
1504 * and all PHY port speeds between the two links.
1505 */
ioctl_get_speed(struct client * client,union ioctl_arg * arg)1506 static int ioctl_get_speed(struct client *client, union ioctl_arg *arg)
1507 {
1508 return client->device->max_speed;
1509 }
1510
ioctl_send_broadcast_request(struct client * client,union ioctl_arg * arg)1511 static int ioctl_send_broadcast_request(struct client *client,
1512 union ioctl_arg *arg)
1513 {
1514 struct fw_cdev_send_request *a = &arg->send_request;
1515
1516 switch (a->tcode) {
1517 case TCODE_WRITE_QUADLET_REQUEST:
1518 case TCODE_WRITE_BLOCK_REQUEST:
1519 break;
1520 default:
1521 return -EINVAL;
1522 }
1523
1524 /* Security policy: Only allow accesses to Units Space. */
1525 if (a->offset < CSR_REGISTER_BASE + CSR_CONFIG_ROM_END)
1526 return -EACCES;
1527
1528 return init_request(client, a, LOCAL_BUS | 0x3f, SCODE_100);
1529 }
1530
ioctl_send_stream_packet(struct client * client,union ioctl_arg * arg)1531 static int ioctl_send_stream_packet(struct client *client, union ioctl_arg *arg)
1532 {
1533 struct fw_cdev_send_stream_packet *a = &arg->send_stream_packet;
1534 struct fw_cdev_send_request request;
1535 int dest;
1536
1537 if (a->speed > client->device->card->link_speed ||
1538 a->length > 1024 << a->speed)
1539 return -EIO;
1540
1541 if (a->tag > 3 || a->channel > 63 || a->sy > 15)
1542 return -EINVAL;
1543
1544 dest = fw_stream_packet_destination_id(a->tag, a->channel, a->sy);
1545 request.tcode = TCODE_STREAM_DATA;
1546 request.length = a->length;
1547 request.closure = a->closure;
1548 request.data = a->data;
1549 request.generation = a->generation;
1550
1551 return init_request(client, &request, dest, a->speed);
1552 }
1553
outbound_phy_packet_callback(struct fw_packet * packet,struct fw_card * card,int status)1554 static void outbound_phy_packet_callback(struct fw_packet *packet,
1555 struct fw_card *card, int status)
1556 {
1557 struct outbound_phy_packet_event *e =
1558 container_of(packet, struct outbound_phy_packet_event, p);
1559 struct client *e_client = e->client;
1560 u32 rcode;
1561
1562 trace_async_phy_outbound_complete((uintptr_t)packet, status, packet->generation,
1563 packet->timestamp);
1564
1565 switch (status) {
1566 // expected:
1567 case ACK_COMPLETE:
1568 rcode = RCODE_COMPLETE;
1569 break;
1570 // should never happen with PHY packets:
1571 case ACK_PENDING:
1572 rcode = RCODE_COMPLETE;
1573 break;
1574 case ACK_BUSY_X:
1575 case ACK_BUSY_A:
1576 case ACK_BUSY_B:
1577 rcode = RCODE_BUSY;
1578 break;
1579 case ACK_DATA_ERROR:
1580 rcode = RCODE_DATA_ERROR;
1581 break;
1582 case ACK_TYPE_ERROR:
1583 rcode = RCODE_TYPE_ERROR;
1584 break;
1585 // stale generation; cancelled; on certain controllers: no ack
1586 default:
1587 rcode = status;
1588 break;
1589 }
1590
1591 switch (e->phy_packet.without_tstamp.type) {
1592 case FW_CDEV_EVENT_PHY_PACKET_SENT:
1593 {
1594 struct fw_cdev_event_phy_packet *pp = &e->phy_packet.without_tstamp;
1595
1596 pp->rcode = rcode;
1597 pp->data[0] = packet->timestamp;
1598 queue_event(e->client, &e->event, &e->phy_packet, sizeof(*pp) + pp->length,
1599 NULL, 0);
1600 break;
1601 }
1602 case FW_CDEV_EVENT_PHY_PACKET_SENT2:
1603 {
1604 struct fw_cdev_event_phy_packet2 *pp = &e->phy_packet.with_tstamp;
1605
1606 pp->rcode = rcode;
1607 pp->tstamp = packet->timestamp;
1608 queue_event(e->client, &e->event, &e->phy_packet, sizeof(*pp) + pp->length,
1609 NULL, 0);
1610 break;
1611 }
1612 default:
1613 WARN_ON(1);
1614 break;
1615 }
1616
1617 client_put(e_client);
1618 }
1619
ioctl_send_phy_packet(struct client * client,union ioctl_arg * arg)1620 static int ioctl_send_phy_packet(struct client *client, union ioctl_arg *arg)
1621 {
1622 struct fw_cdev_send_phy_packet *a = &arg->send_phy_packet;
1623 struct fw_card *card = client->device->card;
1624 struct outbound_phy_packet_event *e;
1625
1626 /* Access policy: Allow this ioctl only on local nodes' device files. */
1627 if (!client->device->is_local)
1628 return -ENOSYS;
1629
1630 e = kzalloc(sizeof(*e) + sizeof(a->data), GFP_KERNEL);
1631 if (e == NULL)
1632 return -ENOMEM;
1633
1634 client_get(client);
1635 e->client = client;
1636 e->p.speed = SCODE_100;
1637 e->p.generation = a->generation;
1638 e->p.header[0] = TCODE_LINK_INTERNAL << 4;
1639 e->p.header[1] = a->data[0];
1640 e->p.header[2] = a->data[1];
1641 e->p.header_length = 12;
1642 e->p.callback = outbound_phy_packet_callback;
1643
1644 if (client->version < FW_CDEV_VERSION_EVENT_ASYNC_TSTAMP) {
1645 struct fw_cdev_event_phy_packet *pp = &e->phy_packet.without_tstamp;
1646
1647 pp->closure = a->closure;
1648 pp->type = FW_CDEV_EVENT_PHY_PACKET_SENT;
1649 if (is_ping_packet(a->data))
1650 pp->length = 4;
1651 } else {
1652 struct fw_cdev_event_phy_packet2 *pp = &e->phy_packet.with_tstamp;
1653
1654 pp->closure = a->closure;
1655 pp->type = FW_CDEV_EVENT_PHY_PACKET_SENT2;
1656 // Keep the data field so that application can match the response event to the
1657 // request.
1658 pp->length = sizeof(a->data);
1659 memcpy(pp->data, a->data, sizeof(a->data));
1660 }
1661
1662 trace_async_phy_outbound_initiate((uintptr_t)&e->p, e->p.generation, e->p.header[1],
1663 e->p.header[2]);
1664
1665 card->driver->send_request(card, &e->p);
1666
1667 return 0;
1668 }
1669
ioctl_receive_phy_packets(struct client * client,union ioctl_arg * arg)1670 static int ioctl_receive_phy_packets(struct client *client, union ioctl_arg *arg)
1671 {
1672 struct fw_cdev_receive_phy_packets *a = &arg->receive_phy_packets;
1673 struct fw_card *card = client->device->card;
1674
1675 /* Access policy: Allow this ioctl only on local nodes' device files. */
1676 if (!client->device->is_local)
1677 return -ENOSYS;
1678
1679 spin_lock_irq(&card->lock);
1680
1681 list_move_tail(&client->phy_receiver_link, &card->phy_receiver_list);
1682 client->phy_receiver_closure = a->closure;
1683
1684 spin_unlock_irq(&card->lock);
1685
1686 return 0;
1687 }
1688
fw_cdev_handle_phy_packet(struct fw_card * card,struct fw_packet * p)1689 void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p)
1690 {
1691 struct client *client;
1692 struct inbound_phy_packet_event *e;
1693 unsigned long flags;
1694
1695 spin_lock_irqsave(&card->lock, flags);
1696
1697 list_for_each_entry(client, &card->phy_receiver_list, phy_receiver_link) {
1698 e = kmalloc(sizeof(*e) + 8, GFP_ATOMIC);
1699 if (e == NULL)
1700 break;
1701
1702 if (client->version < FW_CDEV_VERSION_EVENT_ASYNC_TSTAMP) {
1703 struct fw_cdev_event_phy_packet *pp = &e->phy_packet.without_tstamp;
1704
1705 pp->closure = client->phy_receiver_closure;
1706 pp->type = FW_CDEV_EVENT_PHY_PACKET_RECEIVED;
1707 pp->rcode = RCODE_COMPLETE;
1708 pp->length = 8;
1709 pp->data[0] = p->header[1];
1710 pp->data[1] = p->header[2];
1711 queue_event(client, &e->event, &e->phy_packet, sizeof(*pp) + 8, NULL, 0);
1712 } else {
1713 struct fw_cdev_event_phy_packet2 *pp = &e->phy_packet.with_tstamp;
1714
1715 pp = &e->phy_packet.with_tstamp;
1716 pp->closure = client->phy_receiver_closure;
1717 pp->type = FW_CDEV_EVENT_PHY_PACKET_RECEIVED2;
1718 pp->rcode = RCODE_COMPLETE;
1719 pp->length = 8;
1720 pp->tstamp = p->timestamp;
1721 pp->data[0] = p->header[1];
1722 pp->data[1] = p->header[2];
1723 queue_event(client, &e->event, &e->phy_packet, sizeof(*pp) + 8, NULL, 0);
1724 }
1725 }
1726
1727 spin_unlock_irqrestore(&card->lock, flags);
1728 }
1729
1730 static int (* const ioctl_handlers[])(struct client *, union ioctl_arg *) = {
1731 [0x00] = ioctl_get_info,
1732 [0x01] = ioctl_send_request,
1733 [0x02] = ioctl_allocate,
1734 [0x03] = ioctl_deallocate,
1735 [0x04] = ioctl_send_response,
1736 [0x05] = ioctl_initiate_bus_reset,
1737 [0x06] = ioctl_add_descriptor,
1738 [0x07] = ioctl_remove_descriptor,
1739 [0x08] = ioctl_create_iso_context,
1740 [0x09] = ioctl_queue_iso,
1741 [0x0a] = ioctl_start_iso,
1742 [0x0b] = ioctl_stop_iso,
1743 [0x0c] = ioctl_get_cycle_timer,
1744 [0x0d] = ioctl_allocate_iso_resource,
1745 [0x0e] = ioctl_deallocate_iso_resource,
1746 [0x0f] = ioctl_allocate_iso_resource_once,
1747 [0x10] = ioctl_deallocate_iso_resource_once,
1748 [0x11] = ioctl_get_speed,
1749 [0x12] = ioctl_send_broadcast_request,
1750 [0x13] = ioctl_send_stream_packet,
1751 [0x14] = ioctl_get_cycle_timer2,
1752 [0x15] = ioctl_send_phy_packet,
1753 [0x16] = ioctl_receive_phy_packets,
1754 [0x17] = ioctl_set_iso_channels,
1755 [0x18] = ioctl_flush_iso,
1756 };
1757
dispatch_ioctl(struct client * client,unsigned int cmd,void __user * arg)1758 static int dispatch_ioctl(struct client *client,
1759 unsigned int cmd, void __user *arg)
1760 {
1761 union ioctl_arg buffer;
1762 int ret;
1763
1764 if (fw_device_is_shutdown(client->device))
1765 return -ENODEV;
1766
1767 if (_IOC_TYPE(cmd) != '#' ||
1768 _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers) ||
1769 _IOC_SIZE(cmd) > sizeof(buffer))
1770 return -ENOTTY;
1771
1772 memset(&buffer, 0, sizeof(buffer));
1773
1774 if (_IOC_DIR(cmd) & _IOC_WRITE)
1775 if (copy_from_user(&buffer, arg, _IOC_SIZE(cmd)))
1776 return -EFAULT;
1777
1778 ret = ioctl_handlers[_IOC_NR(cmd)](client, &buffer);
1779 if (ret < 0)
1780 return ret;
1781
1782 if (_IOC_DIR(cmd) & _IOC_READ)
1783 if (copy_to_user(arg, &buffer, _IOC_SIZE(cmd)))
1784 return -EFAULT;
1785
1786 return ret;
1787 }
1788
fw_device_op_ioctl(struct file * file,unsigned int cmd,unsigned long arg)1789 static long fw_device_op_ioctl(struct file *file,
1790 unsigned int cmd, unsigned long arg)
1791 {
1792 return dispatch_ioctl(file->private_data, cmd, (void __user *)arg);
1793 }
1794
fw_device_op_mmap(struct file * file,struct vm_area_struct * vma)1795 static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
1796 {
1797 struct client *client = file->private_data;
1798 unsigned long size;
1799 int page_count, ret;
1800
1801 if (fw_device_is_shutdown(client->device))
1802 return -ENODEV;
1803
1804 /* FIXME: We could support multiple buffers, but we don't. */
1805 if (client->buffer.pages != NULL)
1806 return -EBUSY;
1807
1808 if (!(vma->vm_flags & VM_SHARED))
1809 return -EINVAL;
1810
1811 if (vma->vm_start & ~PAGE_MASK)
1812 return -EINVAL;
1813
1814 client->vm_start = vma->vm_start;
1815 size = vma->vm_end - vma->vm_start;
1816 page_count = size >> PAGE_SHIFT;
1817 if (size & ~PAGE_MASK)
1818 return -EINVAL;
1819
1820 ret = fw_iso_buffer_alloc(&client->buffer, page_count);
1821 if (ret < 0)
1822 return ret;
1823
1824 spin_lock_irq(&client->lock);
1825 if (client->iso_context) {
1826 ret = fw_iso_buffer_map_dma(&client->buffer,
1827 client->device->card,
1828 iso_dma_direction(client->iso_context));
1829 client->buffer_is_mapped = (ret == 0);
1830 }
1831 spin_unlock_irq(&client->lock);
1832 if (ret < 0)
1833 goto fail;
1834
1835 ret = vm_map_pages_zero(vma, client->buffer.pages,
1836 client->buffer.page_count);
1837 if (ret < 0)
1838 goto fail;
1839
1840 return 0;
1841 fail:
1842 fw_iso_buffer_destroy(&client->buffer, client->device->card);
1843 return ret;
1844 }
1845
is_outbound_transaction_resource(int id,void * p,void * data)1846 static int is_outbound_transaction_resource(int id, void *p, void *data)
1847 {
1848 struct client_resource *resource = p;
1849
1850 return resource->release == release_transaction;
1851 }
1852
has_outbound_transactions(struct client * client)1853 static int has_outbound_transactions(struct client *client)
1854 {
1855 int ret;
1856
1857 spin_lock_irq(&client->lock);
1858 ret = idr_for_each(&client->resource_idr,
1859 is_outbound_transaction_resource, NULL);
1860 spin_unlock_irq(&client->lock);
1861
1862 return ret;
1863 }
1864
shutdown_resource(int id,void * p,void * data)1865 static int shutdown_resource(int id, void *p, void *data)
1866 {
1867 struct client_resource *resource = p;
1868 struct client *client = data;
1869
1870 resource->release(client, resource);
1871 client_put(client);
1872
1873 return 0;
1874 }
1875
fw_device_op_release(struct inode * inode,struct file * file)1876 static int fw_device_op_release(struct inode *inode, struct file *file)
1877 {
1878 struct client *client = file->private_data;
1879 struct event *event, *next_event;
1880
1881 spin_lock_irq(&client->device->card->lock);
1882 list_del(&client->phy_receiver_link);
1883 spin_unlock_irq(&client->device->card->lock);
1884
1885 mutex_lock(&client->device->client_list_mutex);
1886 list_del(&client->link);
1887 mutex_unlock(&client->device->client_list_mutex);
1888
1889 if (client->iso_context)
1890 fw_iso_context_destroy(client->iso_context);
1891
1892 if (client->buffer.pages)
1893 fw_iso_buffer_destroy(&client->buffer, client->device->card);
1894
1895 /* Freeze client->resource_idr and client->event_list */
1896 spin_lock_irq(&client->lock);
1897 client->in_shutdown = true;
1898 spin_unlock_irq(&client->lock);
1899
1900 wait_event(client->tx_flush_wait, !has_outbound_transactions(client));
1901
1902 idr_for_each(&client->resource_idr, shutdown_resource, client);
1903 idr_destroy(&client->resource_idr);
1904
1905 list_for_each_entry_safe(event, next_event, &client->event_list, link)
1906 kfree(event);
1907
1908 client_put(client);
1909
1910 return 0;
1911 }
1912
fw_device_op_poll(struct file * file,poll_table * pt)1913 static __poll_t fw_device_op_poll(struct file *file, poll_table * pt)
1914 {
1915 struct client *client = file->private_data;
1916 __poll_t mask = 0;
1917
1918 poll_wait(file, &client->wait, pt);
1919
1920 if (fw_device_is_shutdown(client->device))
1921 mask |= EPOLLHUP | EPOLLERR;
1922 if (!list_empty(&client->event_list))
1923 mask |= EPOLLIN | EPOLLRDNORM;
1924
1925 return mask;
1926 }
1927
1928 const struct file_operations fw_device_ops = {
1929 .owner = THIS_MODULE,
1930 .llseek = no_llseek,
1931 .open = fw_device_op_open,
1932 .read = fw_device_op_read,
1933 .unlocked_ioctl = fw_device_op_ioctl,
1934 .mmap = fw_device_op_mmap,
1935 .release = fw_device_op_release,
1936 .poll = fw_device_op_poll,
1937 .compat_ioctl = compat_ptr_ioctl,
1938 };
1939