1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * rio_cm - RapidIO Channelized Messaging Driver
4 *
5 * Copyright 2013-2016 Integrated Device Technology, Inc.
6 * Copyright (c) 2015, Prodrive Technologies
7 * Copyright (c) 2015, RapidIO Trade Association
8 */
9
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/delay.h>
14 #include <linux/sched.h>
15 #include <linux/rio.h>
16 #include <linux/rio_drv.h>
17 #include <linux/slab.h>
18 #include <linux/idr.h>
19 #include <linux/interrupt.h>
20 #include <linux/cdev.h>
21 #include <linux/fs.h>
22 #include <linux/poll.h>
23 #include <linux/reboot.h>
24 #include <linux/bitops.h>
25 #include <linux/printk.h>
26 #include <linux/rio_cm_cdev.h>
27
28 #define DRV_NAME "rio_cm"
29 #define DRV_VERSION "1.0.0"
30 #define DRV_AUTHOR "Alexandre Bounine <alexandre.bounine@idt.com>"
31 #define DRV_DESC "RapidIO Channelized Messaging Driver"
32 #define DEV_NAME "rio_cm"
33
34 /* Debug output filtering masks */
35 enum {
36 DBG_NONE = 0,
37 DBG_INIT = BIT(0), /* driver init */
38 DBG_EXIT = BIT(1), /* driver exit */
39 DBG_MPORT = BIT(2), /* mport add/remove */
40 DBG_RDEV = BIT(3), /* RapidIO device add/remove */
41 DBG_CHOP = BIT(4), /* channel operations */
42 DBG_WAIT = BIT(5), /* waiting for events */
43 DBG_TX = BIT(6), /* message TX */
44 DBG_TX_EVENT = BIT(7), /* message TX event */
45 DBG_RX_DATA = BIT(8), /* inbound data messages */
46 DBG_RX_CMD = BIT(9), /* inbound REQ/ACK/NACK messages */
47 DBG_ALL = ~0,
48 };
49
50 #ifdef DEBUG
51 #define riocm_debug(level, fmt, arg...) \
52 do { \
53 if (DBG_##level & dbg_level) \
54 pr_debug(DRV_NAME ": %s " fmt "\n", \
55 __func__, ##arg); \
56 } while (0)
57 #else
58 #define riocm_debug(level, fmt, arg...) \
59 no_printk(KERN_DEBUG pr_fmt(DRV_NAME fmt "\n"), ##arg)
60 #endif
61
62 #define riocm_warn(fmt, arg...) \
63 pr_warn(DRV_NAME ": %s WARNING " fmt "\n", __func__, ##arg)
64
65 #define riocm_error(fmt, arg...) \
66 pr_err(DRV_NAME ": %s ERROR " fmt "\n", __func__, ##arg)
67
68
69 static int cmbox = 1;
70 module_param(cmbox, int, S_IRUGO);
71 MODULE_PARM_DESC(cmbox, "RapidIO Mailbox number (default 1)");
72
73 static int chstart = 256;
74 module_param(chstart, int, S_IRUGO);
75 MODULE_PARM_DESC(chstart,
76 "Start channel number for dynamic allocation (default 256)");
77
78 #ifdef DEBUG
79 static u32 dbg_level = DBG_NONE;
80 module_param(dbg_level, uint, S_IWUSR | S_IRUGO);
81 MODULE_PARM_DESC(dbg_level, "Debugging output level (default 0 = none)");
82 #endif
83
84 MODULE_AUTHOR(DRV_AUTHOR);
85 MODULE_DESCRIPTION(DRV_DESC);
86 MODULE_LICENSE("GPL");
87 MODULE_VERSION(DRV_VERSION);
88
89 #define RIOCM_TX_RING_SIZE 128
90 #define RIOCM_RX_RING_SIZE 128
91 #define RIOCM_CONNECT_TO 3 /* connect response TO (in sec) */
92
93 #define RIOCM_MAX_CHNUM 0xffff /* Use full range of u16 field */
94 #define RIOCM_CHNUM_AUTO 0
95 #define RIOCM_MAX_EP_COUNT 0x10000 /* Max number of endpoints */
96
97 enum rio_cm_state {
98 RIO_CM_IDLE,
99 RIO_CM_CONNECT,
100 RIO_CM_CONNECTED,
101 RIO_CM_DISCONNECT,
102 RIO_CM_CHAN_BOUND,
103 RIO_CM_LISTEN,
104 RIO_CM_DESTROYING,
105 };
106
107 enum rio_cm_pkt_type {
108 RIO_CM_SYS = 0xaa,
109 RIO_CM_CHAN = 0x55,
110 };
111
112 enum rio_cm_chop {
113 CM_CONN_REQ,
114 CM_CONN_ACK,
115 CM_CONN_CLOSE,
116 CM_DATA_MSG,
117 };
118
119 struct rio_ch_base_bhdr {
120 u32 src_id;
121 u32 dst_id;
122 #define RIO_HDR_LETTER_MASK 0xffff0000
123 #define RIO_HDR_MBOX_MASK 0x0000ffff
124 u8 src_mbox;
125 u8 dst_mbox;
126 u8 type;
127 } __attribute__((__packed__));
128
129 struct rio_ch_chan_hdr {
130 struct rio_ch_base_bhdr bhdr;
131 u8 ch_op;
132 u16 dst_ch;
133 u16 src_ch;
134 u16 msg_len;
135 u16 rsrvd;
136 } __attribute__((__packed__));
137
138 struct tx_req {
139 struct list_head node;
140 struct rio_dev *rdev;
141 void *buffer;
142 size_t len;
143 };
144
145 struct cm_dev {
146 struct list_head list;
147 struct rio_mport *mport;
148 void *rx_buf[RIOCM_RX_RING_SIZE];
149 int rx_slots;
150 struct mutex rx_lock;
151
152 void *tx_buf[RIOCM_TX_RING_SIZE];
153 int tx_slot;
154 int tx_cnt;
155 int tx_ack_slot;
156 struct list_head tx_reqs;
157 spinlock_t tx_lock;
158
159 struct list_head peers;
160 u32 npeers;
161 struct workqueue_struct *rx_wq;
162 struct work_struct rx_work;
163 };
164
165 struct chan_rx_ring {
166 void *buf[RIOCM_RX_RING_SIZE];
167 int head;
168 int tail;
169 int count;
170
171 /* Tracking RX buffers reported to upper level */
172 void *inuse[RIOCM_RX_RING_SIZE];
173 int inuse_cnt;
174 };
175
176 struct rio_channel {
177 u16 id; /* local channel ID */
178 struct kref ref; /* channel refcount */
179 struct file *filp;
180 struct cm_dev *cmdev; /* associated CM device object */
181 struct rio_dev *rdev; /* remote RapidIO device */
182 enum rio_cm_state state;
183 int error;
184 spinlock_t lock;
185 void *context;
186 u32 loc_destid; /* local destID */
187 u32 rem_destid; /* remote destID */
188 u16 rem_channel; /* remote channel ID */
189 struct list_head accept_queue;
190 struct list_head ch_node;
191 struct completion comp;
192 struct completion comp_close;
193 struct chan_rx_ring rx_ring;
194 };
195
196 struct cm_peer {
197 struct list_head node;
198 struct rio_dev *rdev;
199 };
200
201 struct conn_req {
202 struct list_head node;
203 u32 destid; /* requester destID */
204 u16 chan; /* requester channel ID */
205 struct cm_dev *cmdev;
206 };
207
208 /*
209 * A channel_dev structure represents a CM_CDEV
210 * @cdev Character device
211 * @dev Associated device object
212 */
213 struct channel_dev {
214 struct cdev cdev;
215 struct device *dev;
216 };
217
218 static struct rio_channel *riocm_ch_alloc(u16 ch_num);
219 static void riocm_ch_free(struct kref *ref);
220 static int riocm_post_send(struct cm_dev *cm, struct rio_dev *rdev,
221 void *buffer, size_t len);
222 static int riocm_ch_close(struct rio_channel *ch);
223
224 static DEFINE_SPINLOCK(idr_lock);
225 static DEFINE_IDR(ch_idr);
226
227 static LIST_HEAD(cm_dev_list);
228 static DECLARE_RWSEM(rdev_sem);
229
230 static const struct class dev_class = {
231 .name = DRV_NAME,
232 };
233 static unsigned int dev_major;
234 static unsigned int dev_minor_base;
235 static dev_t dev_number;
236 static struct channel_dev riocm_cdev;
237
238 #define is_msg_capable(src_ops, dst_ops) \
239 ((src_ops & RIO_SRC_OPS_DATA_MSG) && \
240 (dst_ops & RIO_DST_OPS_DATA_MSG))
241 #define dev_cm_capable(dev) \
242 is_msg_capable(dev->src_ops, dev->dst_ops)
243
riocm_cmp(struct rio_channel * ch,enum rio_cm_state cmp)244 static int riocm_cmp(struct rio_channel *ch, enum rio_cm_state cmp)
245 {
246 int ret;
247
248 spin_lock_bh(&ch->lock);
249 ret = (ch->state == cmp);
250 spin_unlock_bh(&ch->lock);
251 return ret;
252 }
253
riocm_cmp_exch(struct rio_channel * ch,enum rio_cm_state cmp,enum rio_cm_state exch)254 static int riocm_cmp_exch(struct rio_channel *ch,
255 enum rio_cm_state cmp, enum rio_cm_state exch)
256 {
257 int ret;
258
259 spin_lock_bh(&ch->lock);
260 ret = (ch->state == cmp);
261 if (ret)
262 ch->state = exch;
263 spin_unlock_bh(&ch->lock);
264 return ret;
265 }
266
riocm_exch(struct rio_channel * ch,enum rio_cm_state exch)267 static enum rio_cm_state riocm_exch(struct rio_channel *ch,
268 enum rio_cm_state exch)
269 {
270 enum rio_cm_state old;
271
272 spin_lock_bh(&ch->lock);
273 old = ch->state;
274 ch->state = exch;
275 spin_unlock_bh(&ch->lock);
276 return old;
277 }
278
riocm_get_channel(u16 nr)279 static struct rio_channel *riocm_get_channel(u16 nr)
280 {
281 struct rio_channel *ch;
282
283 spin_lock_bh(&idr_lock);
284 ch = idr_find(&ch_idr, nr);
285 if (ch)
286 kref_get(&ch->ref);
287 spin_unlock_bh(&idr_lock);
288 return ch;
289 }
290
riocm_put_channel(struct rio_channel * ch)291 static void riocm_put_channel(struct rio_channel *ch)
292 {
293 kref_put(&ch->ref, riocm_ch_free);
294 }
295
riocm_rx_get_msg(struct cm_dev * cm)296 static void *riocm_rx_get_msg(struct cm_dev *cm)
297 {
298 void *msg;
299 int i;
300
301 msg = rio_get_inb_message(cm->mport, cmbox);
302 if (msg) {
303 for (i = 0; i < RIOCM_RX_RING_SIZE; i++) {
304 if (cm->rx_buf[i] == msg) {
305 cm->rx_buf[i] = NULL;
306 cm->rx_slots++;
307 break;
308 }
309 }
310
311 if (i == RIOCM_RX_RING_SIZE)
312 riocm_warn("no record for buffer 0x%p", msg);
313 }
314
315 return msg;
316 }
317
318 /*
319 * riocm_rx_fill - fills a ring of receive buffers for given cm device
320 * @cm: cm_dev object
321 * @nent: max number of entries to fill
322 *
323 * Returns: none
324 */
riocm_rx_fill(struct cm_dev * cm,int nent)325 static void riocm_rx_fill(struct cm_dev *cm, int nent)
326 {
327 int i;
328
329 if (cm->rx_slots == 0)
330 return;
331
332 for (i = 0; i < RIOCM_RX_RING_SIZE && cm->rx_slots && nent; i++) {
333 if (cm->rx_buf[i] == NULL) {
334 cm->rx_buf[i] = kmalloc(RIO_MAX_MSG_SIZE, GFP_KERNEL);
335 if (cm->rx_buf[i] == NULL)
336 break;
337 rio_add_inb_buffer(cm->mport, cmbox, cm->rx_buf[i]);
338 cm->rx_slots--;
339 nent--;
340 }
341 }
342 }
343
344 /*
345 * riocm_rx_free - frees all receive buffers associated with given cm device
346 * @cm: cm_dev object
347 *
348 * Returns: none
349 */
riocm_rx_free(struct cm_dev * cm)350 static void riocm_rx_free(struct cm_dev *cm)
351 {
352 int i;
353
354 for (i = 0; i < RIOCM_RX_RING_SIZE; i++) {
355 if (cm->rx_buf[i] != NULL) {
356 kfree(cm->rx_buf[i]);
357 cm->rx_buf[i] = NULL;
358 }
359 }
360 }
361
362 /*
363 * riocm_req_handler - connection request handler
364 * @cm: cm_dev object
365 * @req_data: pointer to the request packet
366 *
367 * Returns: 0 if success, or
368 * -EINVAL if channel is not in correct state,
369 * -ENODEV if cannot find a channel with specified ID,
370 * -ENOMEM if unable to allocate memory to store the request
371 */
riocm_req_handler(struct cm_dev * cm,void * req_data)372 static int riocm_req_handler(struct cm_dev *cm, void *req_data)
373 {
374 struct rio_channel *ch;
375 struct conn_req *req;
376 struct rio_ch_chan_hdr *hh = req_data;
377 u16 chnum;
378
379 chnum = ntohs(hh->dst_ch);
380
381 ch = riocm_get_channel(chnum);
382
383 if (!ch)
384 return -ENODEV;
385
386 if (ch->state != RIO_CM_LISTEN) {
387 riocm_debug(RX_CMD, "channel %d is not in listen state", chnum);
388 riocm_put_channel(ch);
389 return -EINVAL;
390 }
391
392 req = kzalloc(sizeof(*req), GFP_KERNEL);
393 if (!req) {
394 riocm_put_channel(ch);
395 return -ENOMEM;
396 }
397
398 req->destid = ntohl(hh->bhdr.src_id);
399 req->chan = ntohs(hh->src_ch);
400 req->cmdev = cm;
401
402 spin_lock_bh(&ch->lock);
403 list_add_tail(&req->node, &ch->accept_queue);
404 spin_unlock_bh(&ch->lock);
405 complete(&ch->comp);
406 riocm_put_channel(ch);
407
408 return 0;
409 }
410
411 /*
412 * riocm_resp_handler - response to connection request handler
413 * @resp_data: pointer to the response packet
414 *
415 * Returns: 0 if success, or
416 * -EINVAL if channel is not in correct state,
417 * -ENODEV if cannot find a channel with specified ID,
418 */
riocm_resp_handler(void * resp_data)419 static int riocm_resp_handler(void *resp_data)
420 {
421 struct rio_channel *ch;
422 struct rio_ch_chan_hdr *hh = resp_data;
423 u16 chnum;
424
425 chnum = ntohs(hh->dst_ch);
426 ch = riocm_get_channel(chnum);
427 if (!ch)
428 return -ENODEV;
429
430 if (ch->state != RIO_CM_CONNECT) {
431 riocm_put_channel(ch);
432 return -EINVAL;
433 }
434
435 riocm_exch(ch, RIO_CM_CONNECTED);
436 ch->rem_channel = ntohs(hh->src_ch);
437 complete(&ch->comp);
438 riocm_put_channel(ch);
439
440 return 0;
441 }
442
443 /*
444 * riocm_close_handler - channel close request handler
445 * @req_data: pointer to the request packet
446 *
447 * Returns: 0 if success, or
448 * -ENODEV if cannot find a channel with specified ID,
449 * + error codes returned by riocm_ch_close.
450 */
riocm_close_handler(void * data)451 static int riocm_close_handler(void *data)
452 {
453 struct rio_channel *ch;
454 struct rio_ch_chan_hdr *hh = data;
455 int ret;
456
457 riocm_debug(RX_CMD, "for ch=%d", ntohs(hh->dst_ch));
458
459 spin_lock_bh(&idr_lock);
460 ch = idr_find(&ch_idr, ntohs(hh->dst_ch));
461 if (!ch) {
462 spin_unlock_bh(&idr_lock);
463 return -ENODEV;
464 }
465 idr_remove(&ch_idr, ch->id);
466 spin_unlock_bh(&idr_lock);
467
468 riocm_exch(ch, RIO_CM_DISCONNECT);
469
470 ret = riocm_ch_close(ch);
471 if (ret)
472 riocm_debug(RX_CMD, "riocm_ch_close() returned %d", ret);
473
474 return 0;
475 }
476
477 /*
478 * rio_cm_handler - function that services request (non-data) packets
479 * @cm: cm_dev object
480 * @data: pointer to the packet
481 */
rio_cm_handler(struct cm_dev * cm,void * data)482 static void rio_cm_handler(struct cm_dev *cm, void *data)
483 {
484 struct rio_ch_chan_hdr *hdr;
485
486 if (!rio_mport_is_running(cm->mport))
487 goto out;
488
489 hdr = data;
490
491 riocm_debug(RX_CMD, "OP=%x for ch=%d from %d",
492 hdr->ch_op, ntohs(hdr->dst_ch), ntohs(hdr->src_ch));
493
494 switch (hdr->ch_op) {
495 case CM_CONN_REQ:
496 riocm_req_handler(cm, data);
497 break;
498 case CM_CONN_ACK:
499 riocm_resp_handler(data);
500 break;
501 case CM_CONN_CLOSE:
502 riocm_close_handler(data);
503 break;
504 default:
505 riocm_error("Invalid packet header");
506 break;
507 }
508 out:
509 kfree(data);
510 }
511
512 /*
513 * rio_rx_data_handler - received data packet handler
514 * @cm: cm_dev object
515 * @buf: data packet
516 *
517 * Returns: 0 if success, or
518 * -ENODEV if cannot find a channel with specified ID,
519 * -EIO if channel is not in CONNECTED state,
520 * -ENOMEM if channel RX queue is full (packet discarded)
521 */
rio_rx_data_handler(struct cm_dev * cm,void * buf)522 static int rio_rx_data_handler(struct cm_dev *cm, void *buf)
523 {
524 struct rio_ch_chan_hdr *hdr;
525 struct rio_channel *ch;
526
527 hdr = buf;
528
529 riocm_debug(RX_DATA, "for ch=%d", ntohs(hdr->dst_ch));
530
531 ch = riocm_get_channel(ntohs(hdr->dst_ch));
532 if (!ch) {
533 /* Discard data message for non-existing channel */
534 kfree(buf);
535 return -ENODEV;
536 }
537
538 /* Place pointer to the buffer into channel's RX queue */
539 spin_lock(&ch->lock);
540
541 if (ch->state != RIO_CM_CONNECTED) {
542 /* Channel is not ready to receive data, discard a packet */
543 riocm_debug(RX_DATA, "ch=%d is in wrong state=%d",
544 ch->id, ch->state);
545 spin_unlock(&ch->lock);
546 kfree(buf);
547 riocm_put_channel(ch);
548 return -EIO;
549 }
550
551 if (ch->rx_ring.count == RIOCM_RX_RING_SIZE) {
552 /* If RX ring is full, discard a packet */
553 riocm_debug(RX_DATA, "ch=%d is full", ch->id);
554 spin_unlock(&ch->lock);
555 kfree(buf);
556 riocm_put_channel(ch);
557 return -ENOMEM;
558 }
559
560 ch->rx_ring.buf[ch->rx_ring.head] = buf;
561 ch->rx_ring.head++;
562 ch->rx_ring.count++;
563 ch->rx_ring.head %= RIOCM_RX_RING_SIZE;
564
565 complete(&ch->comp);
566
567 spin_unlock(&ch->lock);
568 riocm_put_channel(ch);
569
570 return 0;
571 }
572
573 /*
574 * rio_ibmsg_handler - inbound message packet handler
575 */
rio_ibmsg_handler(struct work_struct * work)576 static void rio_ibmsg_handler(struct work_struct *work)
577 {
578 struct cm_dev *cm = container_of(work, struct cm_dev, rx_work);
579 void *data;
580 struct rio_ch_chan_hdr *hdr;
581
582 if (!rio_mport_is_running(cm->mport))
583 return;
584
585 while (1) {
586 mutex_lock(&cm->rx_lock);
587 data = riocm_rx_get_msg(cm);
588 if (data)
589 riocm_rx_fill(cm, 1);
590 mutex_unlock(&cm->rx_lock);
591
592 if (data == NULL)
593 break;
594
595 hdr = data;
596
597 if (hdr->bhdr.type != RIO_CM_CHAN) {
598 /* For now simply discard packets other than channel */
599 riocm_error("Unsupported TYPE code (0x%x). Msg dropped",
600 hdr->bhdr.type);
601 kfree(data);
602 continue;
603 }
604
605 /* Process a channel message */
606 if (hdr->ch_op == CM_DATA_MSG)
607 rio_rx_data_handler(cm, data);
608 else
609 rio_cm_handler(cm, data);
610 }
611 }
612
riocm_inb_msg_event(struct rio_mport * mport,void * dev_id,int mbox,int slot)613 static void riocm_inb_msg_event(struct rio_mport *mport, void *dev_id,
614 int mbox, int slot)
615 {
616 struct cm_dev *cm = dev_id;
617
618 if (rio_mport_is_running(cm->mport) && !work_pending(&cm->rx_work))
619 queue_work(cm->rx_wq, &cm->rx_work);
620 }
621
622 /*
623 * rio_txcq_handler - TX completion handler
624 * @cm: cm_dev object
625 * @slot: TX queue slot
626 *
627 * TX completion handler also ensures that pending request packets are placed
628 * into transmit queue as soon as a free slot becomes available. This is done
629 * to give higher priority to request packets during high intensity data flow.
630 */
rio_txcq_handler(struct cm_dev * cm,int slot)631 static void rio_txcq_handler(struct cm_dev *cm, int slot)
632 {
633 int ack_slot;
634
635 /* ATTN: Add TX completion notification if/when direct buffer
636 * transfer is implemented. At this moment only correct tracking
637 * of tx_count is important.
638 */
639 riocm_debug(TX_EVENT, "for mport_%d slot %d tx_cnt %d",
640 cm->mport->id, slot, cm->tx_cnt);
641
642 spin_lock(&cm->tx_lock);
643 ack_slot = cm->tx_ack_slot;
644
645 if (ack_slot == slot)
646 riocm_debug(TX_EVENT, "slot == ack_slot");
647
648 while (cm->tx_cnt && ((ack_slot != slot) ||
649 (cm->tx_cnt == RIOCM_TX_RING_SIZE))) {
650
651 cm->tx_buf[ack_slot] = NULL;
652 ++ack_slot;
653 ack_slot &= (RIOCM_TX_RING_SIZE - 1);
654 cm->tx_cnt--;
655 }
656
657 if (cm->tx_cnt < 0 || cm->tx_cnt > RIOCM_TX_RING_SIZE)
658 riocm_error("tx_cnt %d out of sync", cm->tx_cnt);
659
660 WARN_ON((cm->tx_cnt < 0) || (cm->tx_cnt > RIOCM_TX_RING_SIZE));
661
662 cm->tx_ack_slot = ack_slot;
663
664 /*
665 * If there are pending requests, insert them into transmit queue
666 */
667 if (!list_empty(&cm->tx_reqs) && (cm->tx_cnt < RIOCM_TX_RING_SIZE)) {
668 struct tx_req *req, *_req;
669 int rc;
670
671 list_for_each_entry_safe(req, _req, &cm->tx_reqs, node) {
672 list_del(&req->node);
673 cm->tx_buf[cm->tx_slot] = req->buffer;
674 rc = rio_add_outb_message(cm->mport, req->rdev, cmbox,
675 req->buffer, req->len);
676 kfree(req->buffer);
677 kfree(req);
678
679 ++cm->tx_cnt;
680 ++cm->tx_slot;
681 cm->tx_slot &= (RIOCM_TX_RING_SIZE - 1);
682 if (cm->tx_cnt == RIOCM_TX_RING_SIZE)
683 break;
684 }
685 }
686
687 spin_unlock(&cm->tx_lock);
688 }
689
riocm_outb_msg_event(struct rio_mport * mport,void * dev_id,int mbox,int slot)690 static void riocm_outb_msg_event(struct rio_mport *mport, void *dev_id,
691 int mbox, int slot)
692 {
693 struct cm_dev *cm = dev_id;
694
695 if (cm && rio_mport_is_running(cm->mport))
696 rio_txcq_handler(cm, slot);
697 }
698
riocm_queue_req(struct cm_dev * cm,struct rio_dev * rdev,void * buffer,size_t len)699 static int riocm_queue_req(struct cm_dev *cm, struct rio_dev *rdev,
700 void *buffer, size_t len)
701 {
702 unsigned long flags;
703 struct tx_req *treq;
704
705 treq = kzalloc(sizeof(*treq), GFP_KERNEL);
706 if (treq == NULL)
707 return -ENOMEM;
708
709 treq->rdev = rdev;
710 treq->buffer = buffer;
711 treq->len = len;
712
713 spin_lock_irqsave(&cm->tx_lock, flags);
714 list_add_tail(&treq->node, &cm->tx_reqs);
715 spin_unlock_irqrestore(&cm->tx_lock, flags);
716 return 0;
717 }
718
719 /*
720 * riocm_post_send - helper function that places packet into msg TX queue
721 * @cm: cm_dev object
722 * @rdev: target RapidIO device object (required by outbound msg interface)
723 * @buffer: pointer to a packet buffer to send
724 * @len: length of data to transfer
725 * @req: request priority flag
726 *
727 * Returns: 0 if success, or error code otherwise.
728 */
riocm_post_send(struct cm_dev * cm,struct rio_dev * rdev,void * buffer,size_t len)729 static int riocm_post_send(struct cm_dev *cm, struct rio_dev *rdev,
730 void *buffer, size_t len)
731 {
732 int rc;
733 unsigned long flags;
734
735 spin_lock_irqsave(&cm->tx_lock, flags);
736
737 if (cm->mport == NULL) {
738 rc = -ENODEV;
739 goto err_out;
740 }
741
742 if (cm->tx_cnt == RIOCM_TX_RING_SIZE) {
743 riocm_debug(TX, "Tx Queue is full");
744 rc = -EBUSY;
745 goto err_out;
746 }
747
748 cm->tx_buf[cm->tx_slot] = buffer;
749 rc = rio_add_outb_message(cm->mport, rdev, cmbox, buffer, len);
750
751 riocm_debug(TX, "Add buf@%p destid=%x tx_slot=%d tx_cnt=%d",
752 buffer, rdev->destid, cm->tx_slot, cm->tx_cnt);
753
754 ++cm->tx_cnt;
755 ++cm->tx_slot;
756 cm->tx_slot &= (RIOCM_TX_RING_SIZE - 1);
757
758 err_out:
759 spin_unlock_irqrestore(&cm->tx_lock, flags);
760 return rc;
761 }
762
763 /*
764 * riocm_ch_send - sends a data packet to a remote device
765 * @ch_id: local channel ID
766 * @buf: pointer to a data buffer to send (including CM header)
767 * @len: length of data to transfer (including CM header)
768 *
769 * ATTN: ASSUMES THAT THE HEADER SPACE IS RESERVED PART OF THE DATA PACKET
770 *
771 * Returns: 0 if success, or
772 * -EINVAL if one or more input parameters is/are not valid,
773 * -ENODEV if cannot find a channel with specified ID,
774 * -EAGAIN if a channel is not in CONNECTED state,
775 * + error codes returned by HW send routine.
776 */
riocm_ch_send(u16 ch_id,void * buf,int len)777 static int riocm_ch_send(u16 ch_id, void *buf, int len)
778 {
779 struct rio_channel *ch;
780 struct rio_ch_chan_hdr *hdr;
781 int ret;
782
783 if (buf == NULL || ch_id == 0 || len == 0 || len > RIO_MAX_MSG_SIZE)
784 return -EINVAL;
785
786 if (len < sizeof(struct rio_ch_chan_hdr))
787 return -EINVAL; /* insufficient data from user */
788
789 ch = riocm_get_channel(ch_id);
790 if (!ch) {
791 riocm_error("%s(%d) ch_%d not found", current->comm,
792 task_pid_nr(current), ch_id);
793 return -ENODEV;
794 }
795
796 if (!riocm_cmp(ch, RIO_CM_CONNECTED)) {
797 ret = -EAGAIN;
798 goto err_out;
799 }
800
801 /*
802 * Fill buffer header section with corresponding channel data
803 */
804 hdr = buf;
805
806 hdr->bhdr.src_id = htonl(ch->loc_destid);
807 hdr->bhdr.dst_id = htonl(ch->rem_destid);
808 hdr->bhdr.src_mbox = cmbox;
809 hdr->bhdr.dst_mbox = cmbox;
810 hdr->bhdr.type = RIO_CM_CHAN;
811 hdr->ch_op = CM_DATA_MSG;
812 hdr->dst_ch = htons(ch->rem_channel);
813 hdr->src_ch = htons(ch->id);
814 hdr->msg_len = htons((u16)len);
815
816 /* ATTN: the function call below relies on the fact that underlying
817 * HW-specific add_outb_message() routine copies TX data into its own
818 * internal transfer buffer (true for all RIONET compatible mport
819 * drivers). Must be reviewed if mport driver uses the buffer directly.
820 */
821
822 ret = riocm_post_send(ch->cmdev, ch->rdev, buf, len);
823 if (ret)
824 riocm_debug(TX, "ch %d send_err=%d", ch->id, ret);
825 err_out:
826 riocm_put_channel(ch);
827 return ret;
828 }
829
riocm_ch_free_rxbuf(struct rio_channel * ch,void * buf)830 static int riocm_ch_free_rxbuf(struct rio_channel *ch, void *buf)
831 {
832 int i, ret = -EINVAL;
833
834 spin_lock_bh(&ch->lock);
835
836 for (i = 0; i < RIOCM_RX_RING_SIZE; i++) {
837 if (ch->rx_ring.inuse[i] == buf) {
838 ch->rx_ring.inuse[i] = NULL;
839 ch->rx_ring.inuse_cnt--;
840 ret = 0;
841 break;
842 }
843 }
844
845 spin_unlock_bh(&ch->lock);
846
847 if (!ret)
848 kfree(buf);
849
850 return ret;
851 }
852
853 /*
854 * riocm_ch_receive - fetch a data packet received for the specified channel
855 * @ch: local channel ID
856 * @buf: pointer to a packet buffer
857 * @timeout: timeout to wait for incoming packet (in jiffies)
858 *
859 * Returns: 0 and valid buffer pointer if success, or NULL pointer and one of:
860 * -EAGAIN if a channel is not in CONNECTED state,
861 * -ENOMEM if in-use tracking queue is full,
862 * -ETIME if wait timeout expired,
863 * -EINTR if wait was interrupted.
864 */
riocm_ch_receive(struct rio_channel * ch,void ** buf,long timeout)865 static int riocm_ch_receive(struct rio_channel *ch, void **buf, long timeout)
866 {
867 void *rxmsg = NULL;
868 int i, ret = 0;
869 long wret;
870
871 if (!riocm_cmp(ch, RIO_CM_CONNECTED)) {
872 ret = -EAGAIN;
873 goto out;
874 }
875
876 if (ch->rx_ring.inuse_cnt == RIOCM_RX_RING_SIZE) {
877 /* If we do not have entries to track buffers given to upper
878 * layer, reject request.
879 */
880 ret = -ENOMEM;
881 goto out;
882 }
883
884 wret = wait_for_completion_interruptible_timeout(&ch->comp, timeout);
885
886 riocm_debug(WAIT, "wait on %d returned %ld", ch->id, wret);
887
888 if (!wret)
889 ret = -ETIME;
890 else if (wret == -ERESTARTSYS)
891 ret = -EINTR;
892 else
893 ret = riocm_cmp(ch, RIO_CM_CONNECTED) ? 0 : -ECONNRESET;
894
895 if (ret)
896 goto out;
897
898 spin_lock_bh(&ch->lock);
899
900 rxmsg = ch->rx_ring.buf[ch->rx_ring.tail];
901 ch->rx_ring.buf[ch->rx_ring.tail] = NULL;
902 ch->rx_ring.count--;
903 ch->rx_ring.tail++;
904 ch->rx_ring.tail %= RIOCM_RX_RING_SIZE;
905 ret = -ENOMEM;
906
907 for (i = 0; i < RIOCM_RX_RING_SIZE; i++) {
908 if (ch->rx_ring.inuse[i] == NULL) {
909 ch->rx_ring.inuse[i] = rxmsg;
910 ch->rx_ring.inuse_cnt++;
911 ret = 0;
912 break;
913 }
914 }
915
916 if (ret) {
917 /* We have no entry to store pending message: drop it */
918 kfree(rxmsg);
919 rxmsg = NULL;
920 }
921
922 spin_unlock_bh(&ch->lock);
923 out:
924 *buf = rxmsg;
925 return ret;
926 }
927
928 /*
929 * riocm_ch_connect - sends a connect request to a remote device
930 * @loc_ch: local channel ID
931 * @cm: CM device to send connect request
932 * @peer: target RapidIO device
933 * @rem_ch: remote channel ID
934 *
935 * Returns: 0 if success, or
936 * -EINVAL if the channel is not in IDLE state,
937 * -EAGAIN if no connection request available immediately,
938 * -ETIME if ACK response timeout expired,
939 * -EINTR if wait for response was interrupted.
940 */
riocm_ch_connect(u16 loc_ch,struct cm_dev * cm,struct cm_peer * peer,u16 rem_ch)941 static int riocm_ch_connect(u16 loc_ch, struct cm_dev *cm,
942 struct cm_peer *peer, u16 rem_ch)
943 {
944 struct rio_channel *ch = NULL;
945 struct rio_ch_chan_hdr *hdr;
946 int ret;
947 long wret;
948
949 ch = riocm_get_channel(loc_ch);
950 if (!ch)
951 return -ENODEV;
952
953 if (!riocm_cmp_exch(ch, RIO_CM_IDLE, RIO_CM_CONNECT)) {
954 ret = -EINVAL;
955 goto conn_done;
956 }
957
958 ch->cmdev = cm;
959 ch->rdev = peer->rdev;
960 ch->context = NULL;
961 ch->loc_destid = cm->mport->host_deviceid;
962 ch->rem_channel = rem_ch;
963
964 /*
965 * Send connect request to the remote RapidIO device
966 */
967
968 hdr = kzalloc(sizeof(*hdr), GFP_KERNEL);
969 if (hdr == NULL) {
970 ret = -ENOMEM;
971 goto conn_done;
972 }
973
974 hdr->bhdr.src_id = htonl(ch->loc_destid);
975 hdr->bhdr.dst_id = htonl(peer->rdev->destid);
976 hdr->bhdr.src_mbox = cmbox;
977 hdr->bhdr.dst_mbox = cmbox;
978 hdr->bhdr.type = RIO_CM_CHAN;
979 hdr->ch_op = CM_CONN_REQ;
980 hdr->dst_ch = htons(rem_ch);
981 hdr->src_ch = htons(loc_ch);
982
983 /* ATTN: the function call below relies on the fact that underlying
984 * HW-specific add_outb_message() routine copies TX data into its
985 * internal transfer buffer. Must be reviewed if mport driver uses
986 * this buffer directly.
987 */
988 ret = riocm_post_send(cm, peer->rdev, hdr, sizeof(*hdr));
989
990 if (ret != -EBUSY) {
991 kfree(hdr);
992 } else {
993 ret = riocm_queue_req(cm, peer->rdev, hdr, sizeof(*hdr));
994 if (ret)
995 kfree(hdr);
996 }
997
998 if (ret) {
999 riocm_cmp_exch(ch, RIO_CM_CONNECT, RIO_CM_IDLE);
1000 goto conn_done;
1001 }
1002
1003 /* Wait for connect response from the remote device */
1004 wret = wait_for_completion_interruptible_timeout(&ch->comp,
1005 RIOCM_CONNECT_TO * HZ);
1006 riocm_debug(WAIT, "wait on %d returns %ld", ch->id, wret);
1007
1008 if (!wret)
1009 ret = -ETIME;
1010 else if (wret == -ERESTARTSYS)
1011 ret = -EINTR;
1012 else
1013 ret = riocm_cmp(ch, RIO_CM_CONNECTED) ? 0 : -1;
1014
1015 conn_done:
1016 riocm_put_channel(ch);
1017 return ret;
1018 }
1019
riocm_send_ack(struct rio_channel * ch)1020 static int riocm_send_ack(struct rio_channel *ch)
1021 {
1022 struct rio_ch_chan_hdr *hdr;
1023 int ret;
1024
1025 hdr = kzalloc(sizeof(*hdr), GFP_KERNEL);
1026 if (hdr == NULL)
1027 return -ENOMEM;
1028
1029 hdr->bhdr.src_id = htonl(ch->loc_destid);
1030 hdr->bhdr.dst_id = htonl(ch->rem_destid);
1031 hdr->dst_ch = htons(ch->rem_channel);
1032 hdr->src_ch = htons(ch->id);
1033 hdr->bhdr.src_mbox = cmbox;
1034 hdr->bhdr.dst_mbox = cmbox;
1035 hdr->bhdr.type = RIO_CM_CHAN;
1036 hdr->ch_op = CM_CONN_ACK;
1037
1038 /* ATTN: the function call below relies on the fact that underlying
1039 * add_outb_message() routine copies TX data into its internal transfer
1040 * buffer. Review if switching to direct buffer version.
1041 */
1042 ret = riocm_post_send(ch->cmdev, ch->rdev, hdr, sizeof(*hdr));
1043
1044 if (ret == -EBUSY && !riocm_queue_req(ch->cmdev,
1045 ch->rdev, hdr, sizeof(*hdr)))
1046 return 0;
1047 kfree(hdr);
1048
1049 if (ret)
1050 riocm_error("send ACK to ch_%d on %s failed (ret=%d)",
1051 ch->id, rio_name(ch->rdev), ret);
1052 return ret;
1053 }
1054
1055 /*
1056 * riocm_ch_accept - accept incoming connection request
1057 * @ch_id: channel ID
1058 * @new_ch_id: local mport device
1059 * @timeout: wait timeout (if 0 non-blocking call, do not wait if connection
1060 * request is not available).
1061 *
1062 * Returns: pointer to new channel struct if success, or error-valued pointer:
1063 * -ENODEV - cannot find specified channel or mport,
1064 * -EINVAL - the channel is not in IDLE state,
1065 * -EAGAIN - no connection request available immediately (timeout=0),
1066 * -ENOMEM - unable to allocate new channel,
1067 * -ETIME - wait timeout expired,
1068 * -EINTR - wait was interrupted.
1069 */
riocm_ch_accept(u16 ch_id,u16 * new_ch_id,long timeout)1070 static struct rio_channel *riocm_ch_accept(u16 ch_id, u16 *new_ch_id,
1071 long timeout)
1072 {
1073 struct rio_channel *ch;
1074 struct rio_channel *new_ch;
1075 struct conn_req *req;
1076 struct cm_peer *peer;
1077 int found = 0;
1078 int err = 0;
1079 long wret;
1080
1081 ch = riocm_get_channel(ch_id);
1082 if (!ch)
1083 return ERR_PTR(-EINVAL);
1084
1085 if (!riocm_cmp(ch, RIO_CM_LISTEN)) {
1086 err = -EINVAL;
1087 goto err_put;
1088 }
1089
1090 /* Don't sleep if this is a non blocking call */
1091 if (!timeout) {
1092 if (!try_wait_for_completion(&ch->comp)) {
1093 err = -EAGAIN;
1094 goto err_put;
1095 }
1096 } else {
1097 riocm_debug(WAIT, "on %d", ch->id);
1098
1099 wret = wait_for_completion_interruptible_timeout(&ch->comp,
1100 timeout);
1101 if (!wret) {
1102 err = -ETIME;
1103 goto err_put;
1104 } else if (wret == -ERESTARTSYS) {
1105 err = -EINTR;
1106 goto err_put;
1107 }
1108 }
1109
1110 spin_lock_bh(&ch->lock);
1111
1112 if (ch->state != RIO_CM_LISTEN) {
1113 err = -ECANCELED;
1114 } else if (list_empty(&ch->accept_queue)) {
1115 riocm_debug(WAIT, "on %d accept_queue is empty on completion",
1116 ch->id);
1117 err = -EIO;
1118 }
1119
1120 spin_unlock_bh(&ch->lock);
1121
1122 if (err) {
1123 riocm_debug(WAIT, "on %d returns %d", ch->id, err);
1124 goto err_put;
1125 }
1126
1127 /* Create new channel for this connection */
1128 new_ch = riocm_ch_alloc(RIOCM_CHNUM_AUTO);
1129
1130 if (IS_ERR(new_ch)) {
1131 riocm_error("failed to get channel for new req (%ld)",
1132 PTR_ERR(new_ch));
1133 err = -ENOMEM;
1134 goto err_put;
1135 }
1136
1137 spin_lock_bh(&ch->lock);
1138
1139 req = list_first_entry(&ch->accept_queue, struct conn_req, node);
1140 list_del(&req->node);
1141 new_ch->cmdev = ch->cmdev;
1142 new_ch->loc_destid = ch->loc_destid;
1143 new_ch->rem_destid = req->destid;
1144 new_ch->rem_channel = req->chan;
1145
1146 spin_unlock_bh(&ch->lock);
1147 riocm_put_channel(ch);
1148 ch = NULL;
1149 kfree(req);
1150
1151 down_read(&rdev_sem);
1152 /* Find requester's device object */
1153 list_for_each_entry(peer, &new_ch->cmdev->peers, node) {
1154 if (peer->rdev->destid == new_ch->rem_destid) {
1155 riocm_debug(RX_CMD, "found matching device(%s)",
1156 rio_name(peer->rdev));
1157 found = 1;
1158 break;
1159 }
1160 }
1161 up_read(&rdev_sem);
1162
1163 if (!found) {
1164 /* If peer device object not found, simply ignore the request */
1165 err = -ENODEV;
1166 goto err_put_new_ch;
1167 }
1168
1169 new_ch->rdev = peer->rdev;
1170 new_ch->state = RIO_CM_CONNECTED;
1171 spin_lock_init(&new_ch->lock);
1172
1173 /* Acknowledge the connection request. */
1174 riocm_send_ack(new_ch);
1175
1176 *new_ch_id = new_ch->id;
1177 return new_ch;
1178
1179 err_put_new_ch:
1180 spin_lock_bh(&idr_lock);
1181 idr_remove(&ch_idr, new_ch->id);
1182 spin_unlock_bh(&idr_lock);
1183 riocm_put_channel(new_ch);
1184
1185 err_put:
1186 if (ch)
1187 riocm_put_channel(ch);
1188 *new_ch_id = 0;
1189 return ERR_PTR(err);
1190 }
1191
1192 /*
1193 * riocm_ch_listen - puts a channel into LISTEN state
1194 * @ch_id: channel ID
1195 *
1196 * Returns: 0 if success, or
1197 * -EINVAL if the specified channel does not exists or
1198 * is not in CHAN_BOUND state.
1199 */
riocm_ch_listen(u16 ch_id)1200 static int riocm_ch_listen(u16 ch_id)
1201 {
1202 struct rio_channel *ch = NULL;
1203 int ret = 0;
1204
1205 riocm_debug(CHOP, "(ch_%d)", ch_id);
1206
1207 ch = riocm_get_channel(ch_id);
1208 if (!ch)
1209 return -EINVAL;
1210 if (!riocm_cmp_exch(ch, RIO_CM_CHAN_BOUND, RIO_CM_LISTEN))
1211 ret = -EINVAL;
1212 riocm_put_channel(ch);
1213 return ret;
1214 }
1215
1216 /*
1217 * riocm_ch_bind - associate a channel object and an mport device
1218 * @ch_id: channel ID
1219 * @mport_id: local mport device ID
1220 * @context: pointer to the additional caller's context
1221 *
1222 * Returns: 0 if success, or
1223 * -ENODEV if cannot find specified mport,
1224 * -EINVAL if the specified channel does not exist or
1225 * is not in IDLE state.
1226 */
riocm_ch_bind(u16 ch_id,u8 mport_id,void * context)1227 static int riocm_ch_bind(u16 ch_id, u8 mport_id, void *context)
1228 {
1229 struct rio_channel *ch = NULL;
1230 struct cm_dev *cm;
1231 int rc = -ENODEV;
1232
1233 riocm_debug(CHOP, "ch_%d to mport_%d", ch_id, mport_id);
1234
1235 /* Find matching cm_dev object */
1236 down_read(&rdev_sem);
1237 list_for_each_entry(cm, &cm_dev_list, list) {
1238 if ((cm->mport->id == mport_id) &&
1239 rio_mport_is_running(cm->mport)) {
1240 rc = 0;
1241 break;
1242 }
1243 }
1244
1245 if (rc)
1246 goto exit;
1247
1248 ch = riocm_get_channel(ch_id);
1249 if (!ch) {
1250 rc = -EINVAL;
1251 goto exit;
1252 }
1253
1254 spin_lock_bh(&ch->lock);
1255 if (ch->state != RIO_CM_IDLE) {
1256 spin_unlock_bh(&ch->lock);
1257 rc = -EINVAL;
1258 goto err_put;
1259 }
1260
1261 ch->cmdev = cm;
1262 ch->loc_destid = cm->mport->host_deviceid;
1263 ch->context = context;
1264 ch->state = RIO_CM_CHAN_BOUND;
1265 spin_unlock_bh(&ch->lock);
1266 err_put:
1267 riocm_put_channel(ch);
1268 exit:
1269 up_read(&rdev_sem);
1270 return rc;
1271 }
1272
1273 /*
1274 * riocm_ch_alloc - channel object allocation helper routine
1275 * @ch_num: channel ID (1 ... RIOCM_MAX_CHNUM, 0 = automatic)
1276 *
1277 * Return value: pointer to newly created channel object,
1278 * or error-valued pointer
1279 */
riocm_ch_alloc(u16 ch_num)1280 static struct rio_channel *riocm_ch_alloc(u16 ch_num)
1281 {
1282 int id;
1283 int start, end;
1284 struct rio_channel *ch;
1285
1286 ch = kzalloc(sizeof(*ch), GFP_KERNEL);
1287 if (!ch)
1288 return ERR_PTR(-ENOMEM);
1289
1290 if (ch_num) {
1291 /* If requested, try to obtain the specified channel ID */
1292 start = ch_num;
1293 end = ch_num + 1;
1294 } else {
1295 /* Obtain channel ID from the dynamic allocation range */
1296 start = chstart;
1297 end = RIOCM_MAX_CHNUM + 1;
1298 }
1299
1300 idr_preload(GFP_KERNEL);
1301 spin_lock_bh(&idr_lock);
1302 id = idr_alloc_cyclic(&ch_idr, ch, start, end, GFP_NOWAIT);
1303 spin_unlock_bh(&idr_lock);
1304 idr_preload_end();
1305
1306 if (id < 0) {
1307 kfree(ch);
1308 return ERR_PTR(id == -ENOSPC ? -EBUSY : id);
1309 }
1310
1311 ch->id = (u16)id;
1312 ch->state = RIO_CM_IDLE;
1313 spin_lock_init(&ch->lock);
1314 INIT_LIST_HEAD(&ch->accept_queue);
1315 INIT_LIST_HEAD(&ch->ch_node);
1316 init_completion(&ch->comp);
1317 init_completion(&ch->comp_close);
1318 kref_init(&ch->ref);
1319 ch->rx_ring.head = 0;
1320 ch->rx_ring.tail = 0;
1321 ch->rx_ring.count = 0;
1322 ch->rx_ring.inuse_cnt = 0;
1323
1324 return ch;
1325 }
1326
1327 /*
1328 * riocm_ch_create - creates a new channel object and allocates ID for it
1329 * @ch_num: channel ID (1 ... RIOCM_MAX_CHNUM, 0 = automatic)
1330 *
1331 * Allocates and initializes a new channel object. If the parameter ch_num > 0
1332 * and is within the valid range, riocm_ch_create tries to allocate the
1333 * specified ID for the new channel. If ch_num = 0, channel ID will be assigned
1334 * automatically from the range (chstart ... RIOCM_MAX_CHNUM).
1335 * Module parameter 'chstart' defines start of an ID range available for dynamic
1336 * allocation. Range below 'chstart' is reserved for pre-defined ID numbers.
1337 * Available channel numbers are limited by 16-bit size of channel numbers used
1338 * in the packet header.
1339 *
1340 * Return value: PTR to rio_channel structure if successful (with channel number
1341 * updated via pointer) or error-valued pointer if error.
1342 */
riocm_ch_create(u16 * ch_num)1343 static struct rio_channel *riocm_ch_create(u16 *ch_num)
1344 {
1345 struct rio_channel *ch = NULL;
1346
1347 ch = riocm_ch_alloc(*ch_num);
1348
1349 if (IS_ERR(ch))
1350 riocm_debug(CHOP, "Failed to allocate channel %d (err=%ld)",
1351 *ch_num, PTR_ERR(ch));
1352 else
1353 *ch_num = ch->id;
1354
1355 return ch;
1356 }
1357
1358 /*
1359 * riocm_ch_free - channel object release routine
1360 * @ref: pointer to a channel's kref structure
1361 */
riocm_ch_free(struct kref * ref)1362 static void riocm_ch_free(struct kref *ref)
1363 {
1364 struct rio_channel *ch = container_of(ref, struct rio_channel, ref);
1365 int i;
1366
1367 riocm_debug(CHOP, "(ch_%d)", ch->id);
1368
1369 if (ch->rx_ring.inuse_cnt) {
1370 for (i = 0;
1371 i < RIOCM_RX_RING_SIZE && ch->rx_ring.inuse_cnt; i++) {
1372 if (ch->rx_ring.inuse[i] != NULL) {
1373 kfree(ch->rx_ring.inuse[i]);
1374 ch->rx_ring.inuse_cnt--;
1375 }
1376 }
1377 }
1378
1379 if (ch->rx_ring.count)
1380 for (i = 0; i < RIOCM_RX_RING_SIZE && ch->rx_ring.count; i++) {
1381 if (ch->rx_ring.buf[i] != NULL) {
1382 kfree(ch->rx_ring.buf[i]);
1383 ch->rx_ring.count--;
1384 }
1385 }
1386
1387 complete(&ch->comp_close);
1388 }
1389
riocm_send_close(struct rio_channel * ch)1390 static int riocm_send_close(struct rio_channel *ch)
1391 {
1392 struct rio_ch_chan_hdr *hdr;
1393 int ret;
1394
1395 /*
1396 * Send CH_CLOSE notification to the remote RapidIO device
1397 */
1398
1399 hdr = kzalloc(sizeof(*hdr), GFP_KERNEL);
1400 if (hdr == NULL)
1401 return -ENOMEM;
1402
1403 hdr->bhdr.src_id = htonl(ch->loc_destid);
1404 hdr->bhdr.dst_id = htonl(ch->rem_destid);
1405 hdr->bhdr.src_mbox = cmbox;
1406 hdr->bhdr.dst_mbox = cmbox;
1407 hdr->bhdr.type = RIO_CM_CHAN;
1408 hdr->ch_op = CM_CONN_CLOSE;
1409 hdr->dst_ch = htons(ch->rem_channel);
1410 hdr->src_ch = htons(ch->id);
1411
1412 /* ATTN: the function call below relies on the fact that underlying
1413 * add_outb_message() routine copies TX data into its internal transfer
1414 * buffer. Needs to be reviewed if switched to direct buffer mode.
1415 */
1416 ret = riocm_post_send(ch->cmdev, ch->rdev, hdr, sizeof(*hdr));
1417
1418 if (ret == -EBUSY && !riocm_queue_req(ch->cmdev, ch->rdev,
1419 hdr, sizeof(*hdr)))
1420 return 0;
1421 kfree(hdr);
1422
1423 if (ret)
1424 riocm_error("ch(%d) send CLOSE failed (ret=%d)", ch->id, ret);
1425
1426 return ret;
1427 }
1428
1429 /*
1430 * riocm_ch_close - closes a channel object with specified ID (by local request)
1431 * @ch: channel to be closed
1432 */
riocm_ch_close(struct rio_channel * ch)1433 static int riocm_ch_close(struct rio_channel *ch)
1434 {
1435 unsigned long tmo = msecs_to_jiffies(3000);
1436 enum rio_cm_state state;
1437 long wret;
1438 int ret = 0;
1439
1440 riocm_debug(CHOP, "ch_%d by %s(%d)",
1441 ch->id, current->comm, task_pid_nr(current));
1442
1443 state = riocm_exch(ch, RIO_CM_DESTROYING);
1444 if (state == RIO_CM_CONNECTED)
1445 riocm_send_close(ch);
1446
1447 complete_all(&ch->comp);
1448
1449 riocm_put_channel(ch);
1450 wret = wait_for_completion_interruptible_timeout(&ch->comp_close, tmo);
1451
1452 riocm_debug(WAIT, "wait on %d returns %ld", ch->id, wret);
1453
1454 if (wret == 0) {
1455 /* Timeout on wait occurred */
1456 riocm_debug(CHOP, "%s(%d) timed out waiting for ch %d",
1457 current->comm, task_pid_nr(current), ch->id);
1458 ret = -ETIMEDOUT;
1459 } else if (wret == -ERESTARTSYS) {
1460 /* Wait_for_completion was interrupted by a signal */
1461 riocm_debug(CHOP, "%s(%d) wait for ch %d was interrupted",
1462 current->comm, task_pid_nr(current), ch->id);
1463 ret = -EINTR;
1464 }
1465
1466 if (!ret) {
1467 riocm_debug(CHOP, "ch_%d resources released", ch->id);
1468 kfree(ch);
1469 } else {
1470 riocm_debug(CHOP, "failed to release ch_%d resources", ch->id);
1471 }
1472
1473 return ret;
1474 }
1475
1476 /*
1477 * riocm_cdev_open() - Open character device
1478 */
riocm_cdev_open(struct inode * inode,struct file * filp)1479 static int riocm_cdev_open(struct inode *inode, struct file *filp)
1480 {
1481 riocm_debug(INIT, "by %s(%d) filp=%p ",
1482 current->comm, task_pid_nr(current), filp);
1483
1484 if (list_empty(&cm_dev_list))
1485 return -ENODEV;
1486
1487 return 0;
1488 }
1489
1490 /*
1491 * riocm_cdev_release() - Release character device
1492 */
riocm_cdev_release(struct inode * inode,struct file * filp)1493 static int riocm_cdev_release(struct inode *inode, struct file *filp)
1494 {
1495 struct rio_channel *ch, *_c;
1496 unsigned int i;
1497 LIST_HEAD(list);
1498
1499 riocm_debug(EXIT, "by %s(%d) filp=%p",
1500 current->comm, task_pid_nr(current), filp);
1501
1502 /* Check if there are channels associated with this file descriptor */
1503 spin_lock_bh(&idr_lock);
1504 idr_for_each_entry(&ch_idr, ch, i) {
1505 if (ch && ch->filp == filp) {
1506 riocm_debug(EXIT, "ch_%d not released by %s(%d)",
1507 ch->id, current->comm,
1508 task_pid_nr(current));
1509 idr_remove(&ch_idr, ch->id);
1510 list_add(&ch->ch_node, &list);
1511 }
1512 }
1513 spin_unlock_bh(&idr_lock);
1514
1515 if (!list_empty(&list)) {
1516 list_for_each_entry_safe(ch, _c, &list, ch_node) {
1517 list_del(&ch->ch_node);
1518 riocm_ch_close(ch);
1519 }
1520 }
1521
1522 return 0;
1523 }
1524
1525 /*
1526 * cm_ep_get_list_size() - Reports number of endpoints in the network
1527 */
cm_ep_get_list_size(void __user * arg)1528 static int cm_ep_get_list_size(void __user *arg)
1529 {
1530 u32 __user *p = arg;
1531 u32 mport_id;
1532 u32 count = 0;
1533 struct cm_dev *cm;
1534
1535 if (get_user(mport_id, p))
1536 return -EFAULT;
1537 if (mport_id >= RIO_MAX_MPORTS)
1538 return -EINVAL;
1539
1540 /* Find a matching cm_dev object */
1541 down_read(&rdev_sem);
1542 list_for_each_entry(cm, &cm_dev_list, list) {
1543 if (cm->mport->id == mport_id) {
1544 count = cm->npeers;
1545 up_read(&rdev_sem);
1546 if (copy_to_user(arg, &count, sizeof(u32)))
1547 return -EFAULT;
1548 return 0;
1549 }
1550 }
1551 up_read(&rdev_sem);
1552
1553 return -ENODEV;
1554 }
1555
1556 /*
1557 * cm_ep_get_list() - Returns list of attached endpoints
1558 */
cm_ep_get_list(void __user * arg)1559 static int cm_ep_get_list(void __user *arg)
1560 {
1561 struct cm_dev *cm;
1562 struct cm_peer *peer;
1563 u32 info[2];
1564 void *buf;
1565 u32 nent;
1566 u32 *entry_ptr;
1567 u32 i = 0;
1568 int ret = 0;
1569
1570 if (copy_from_user(&info, arg, sizeof(info)))
1571 return -EFAULT;
1572
1573 if (info[1] >= RIO_MAX_MPORTS || info[0] > RIOCM_MAX_EP_COUNT)
1574 return -EINVAL;
1575
1576 /* Find a matching cm_dev object */
1577 down_read(&rdev_sem);
1578 list_for_each_entry(cm, &cm_dev_list, list)
1579 if (cm->mport->id == (u8)info[1])
1580 goto found;
1581
1582 up_read(&rdev_sem);
1583 return -ENODEV;
1584
1585 found:
1586 nent = min(info[0], cm->npeers);
1587 buf = kcalloc(nent + 2, sizeof(u32), GFP_KERNEL);
1588 if (!buf) {
1589 up_read(&rdev_sem);
1590 return -ENOMEM;
1591 }
1592
1593 entry_ptr = (u32 *)((uintptr_t)buf + 2*sizeof(u32));
1594
1595 list_for_each_entry(peer, &cm->peers, node) {
1596 *entry_ptr = (u32)peer->rdev->destid;
1597 entry_ptr++;
1598 if (++i == nent)
1599 break;
1600 }
1601 up_read(&rdev_sem);
1602
1603 ((u32 *)buf)[0] = i; /* report an updated number of entries */
1604 ((u32 *)buf)[1] = info[1]; /* put back an mport ID */
1605 if (copy_to_user(arg, buf, sizeof(u32) * (info[0] + 2)))
1606 ret = -EFAULT;
1607
1608 kfree(buf);
1609 return ret;
1610 }
1611
1612 /*
1613 * cm_mport_get_list() - Returns list of available local mport devices
1614 */
cm_mport_get_list(void __user * arg)1615 static int cm_mport_get_list(void __user *arg)
1616 {
1617 int ret = 0;
1618 u32 entries;
1619 void *buf;
1620 struct cm_dev *cm;
1621 u32 *entry_ptr;
1622 int count = 0;
1623
1624 if (copy_from_user(&entries, arg, sizeof(entries)))
1625 return -EFAULT;
1626 if (entries == 0 || entries > RIO_MAX_MPORTS)
1627 return -EINVAL;
1628 buf = kcalloc(entries + 1, sizeof(u32), GFP_KERNEL);
1629 if (!buf)
1630 return -ENOMEM;
1631
1632 /* Scan all registered cm_dev objects */
1633 entry_ptr = (u32 *)((uintptr_t)buf + sizeof(u32));
1634 down_read(&rdev_sem);
1635 list_for_each_entry(cm, &cm_dev_list, list) {
1636 if (count++ < entries) {
1637 *entry_ptr = (cm->mport->id << 16) |
1638 cm->mport->host_deviceid;
1639 entry_ptr++;
1640 }
1641 }
1642 up_read(&rdev_sem);
1643
1644 *((u32 *)buf) = count; /* report a real number of entries */
1645 if (copy_to_user(arg, buf, sizeof(u32) * (count + 1)))
1646 ret = -EFAULT;
1647
1648 kfree(buf);
1649 return ret;
1650 }
1651
1652 /*
1653 * cm_chan_create() - Create a message exchange channel
1654 */
cm_chan_create(struct file * filp,void __user * arg)1655 static int cm_chan_create(struct file *filp, void __user *arg)
1656 {
1657 u16 __user *p = arg;
1658 u16 ch_num;
1659 struct rio_channel *ch;
1660
1661 if (get_user(ch_num, p))
1662 return -EFAULT;
1663
1664 riocm_debug(CHOP, "ch_%d requested by %s(%d)",
1665 ch_num, current->comm, task_pid_nr(current));
1666 ch = riocm_ch_create(&ch_num);
1667 if (IS_ERR(ch))
1668 return PTR_ERR(ch);
1669
1670 ch->filp = filp;
1671 riocm_debug(CHOP, "ch_%d created by %s(%d)",
1672 ch_num, current->comm, task_pid_nr(current));
1673 return put_user(ch_num, p);
1674 }
1675
1676 /*
1677 * cm_chan_close() - Close channel
1678 * @filp: Pointer to file object
1679 * @arg: Channel to close
1680 */
cm_chan_close(struct file * filp,void __user * arg)1681 static int cm_chan_close(struct file *filp, void __user *arg)
1682 {
1683 u16 __user *p = arg;
1684 u16 ch_num;
1685 struct rio_channel *ch;
1686
1687 if (get_user(ch_num, p))
1688 return -EFAULT;
1689
1690 riocm_debug(CHOP, "ch_%d by %s(%d)",
1691 ch_num, current->comm, task_pid_nr(current));
1692
1693 spin_lock_bh(&idr_lock);
1694 ch = idr_find(&ch_idr, ch_num);
1695 if (!ch) {
1696 spin_unlock_bh(&idr_lock);
1697 return 0;
1698 }
1699 if (ch->filp != filp) {
1700 spin_unlock_bh(&idr_lock);
1701 return -EINVAL;
1702 }
1703 idr_remove(&ch_idr, ch->id);
1704 spin_unlock_bh(&idr_lock);
1705
1706 return riocm_ch_close(ch);
1707 }
1708
1709 /*
1710 * cm_chan_bind() - Bind channel
1711 * @arg: Channel number
1712 */
cm_chan_bind(void __user * arg)1713 static int cm_chan_bind(void __user *arg)
1714 {
1715 struct rio_cm_channel chan;
1716
1717 if (copy_from_user(&chan, arg, sizeof(chan)))
1718 return -EFAULT;
1719 if (chan.mport_id >= RIO_MAX_MPORTS)
1720 return -EINVAL;
1721
1722 return riocm_ch_bind(chan.id, chan.mport_id, NULL);
1723 }
1724
1725 /*
1726 * cm_chan_listen() - Listen on channel
1727 * @arg: Channel number
1728 */
cm_chan_listen(void __user * arg)1729 static int cm_chan_listen(void __user *arg)
1730 {
1731 u16 __user *p = arg;
1732 u16 ch_num;
1733
1734 if (get_user(ch_num, p))
1735 return -EFAULT;
1736
1737 return riocm_ch_listen(ch_num);
1738 }
1739
1740 /*
1741 * cm_chan_accept() - Accept incoming connection
1742 * @filp: Pointer to file object
1743 * @arg: Channel number
1744 */
cm_chan_accept(struct file * filp,void __user * arg)1745 static int cm_chan_accept(struct file *filp, void __user *arg)
1746 {
1747 struct rio_cm_accept param;
1748 long accept_to;
1749 struct rio_channel *ch;
1750
1751 if (copy_from_user(¶m, arg, sizeof(param)))
1752 return -EFAULT;
1753
1754 riocm_debug(CHOP, "on ch_%d by %s(%d)",
1755 param.ch_num, current->comm, task_pid_nr(current));
1756
1757 accept_to = param.wait_to ?
1758 msecs_to_jiffies(param.wait_to) : 0;
1759
1760 ch = riocm_ch_accept(param.ch_num, ¶m.ch_num, accept_to);
1761 if (IS_ERR(ch))
1762 return PTR_ERR(ch);
1763 ch->filp = filp;
1764
1765 riocm_debug(CHOP, "new ch_%d for %s(%d)",
1766 ch->id, current->comm, task_pid_nr(current));
1767
1768 if (copy_to_user(arg, ¶m, sizeof(param)))
1769 return -EFAULT;
1770 return 0;
1771 }
1772
1773 /*
1774 * cm_chan_connect() - Connect on channel
1775 * @arg: Channel information
1776 */
cm_chan_connect(void __user * arg)1777 static int cm_chan_connect(void __user *arg)
1778 {
1779 struct rio_cm_channel chan;
1780 struct cm_dev *cm;
1781 struct cm_peer *peer;
1782 int ret = -ENODEV;
1783
1784 if (copy_from_user(&chan, arg, sizeof(chan)))
1785 return -EFAULT;
1786 if (chan.mport_id >= RIO_MAX_MPORTS)
1787 return -EINVAL;
1788
1789 down_read(&rdev_sem);
1790
1791 /* Find matching cm_dev object */
1792 list_for_each_entry(cm, &cm_dev_list, list) {
1793 if (cm->mport->id == chan.mport_id) {
1794 ret = 0;
1795 break;
1796 }
1797 }
1798
1799 if (ret)
1800 goto err_out;
1801
1802 if (chan.remote_destid >= RIO_ANY_DESTID(cm->mport->sys_size)) {
1803 ret = -EINVAL;
1804 goto err_out;
1805 }
1806
1807 /* Find corresponding RapidIO endpoint device object */
1808 ret = -ENODEV;
1809
1810 list_for_each_entry(peer, &cm->peers, node) {
1811 if (peer->rdev->destid == chan.remote_destid) {
1812 ret = 0;
1813 break;
1814 }
1815 }
1816
1817 if (ret)
1818 goto err_out;
1819
1820 up_read(&rdev_sem);
1821
1822 return riocm_ch_connect(chan.id, cm, peer, chan.remote_channel);
1823 err_out:
1824 up_read(&rdev_sem);
1825 return ret;
1826 }
1827
1828 /*
1829 * cm_chan_msg_send() - Send a message through channel
1830 * @arg: Outbound message information
1831 */
cm_chan_msg_send(void __user * arg)1832 static int cm_chan_msg_send(void __user *arg)
1833 {
1834 struct rio_cm_msg msg;
1835 void *buf;
1836 int ret;
1837
1838 if (copy_from_user(&msg, arg, sizeof(msg)))
1839 return -EFAULT;
1840 if (msg.size > RIO_MAX_MSG_SIZE)
1841 return -EINVAL;
1842
1843 buf = memdup_user((void __user *)(uintptr_t)msg.msg, msg.size);
1844 if (IS_ERR(buf))
1845 return PTR_ERR(buf);
1846
1847 ret = riocm_ch_send(msg.ch_num, buf, msg.size);
1848
1849 kfree(buf);
1850 return ret;
1851 }
1852
1853 /*
1854 * cm_chan_msg_rcv() - Receive a message through channel
1855 * @arg: Inbound message information
1856 */
cm_chan_msg_rcv(void __user * arg)1857 static int cm_chan_msg_rcv(void __user *arg)
1858 {
1859 struct rio_cm_msg msg;
1860 struct rio_channel *ch;
1861 void *buf;
1862 long rxto;
1863 int ret = 0, msg_size;
1864
1865 if (copy_from_user(&msg, arg, sizeof(msg)))
1866 return -EFAULT;
1867
1868 if (msg.ch_num == 0 || msg.size == 0)
1869 return -EINVAL;
1870
1871 ch = riocm_get_channel(msg.ch_num);
1872 if (!ch)
1873 return -ENODEV;
1874
1875 rxto = msg.rxto ? msecs_to_jiffies(msg.rxto) : MAX_SCHEDULE_TIMEOUT;
1876
1877 ret = riocm_ch_receive(ch, &buf, rxto);
1878 if (ret)
1879 goto out;
1880
1881 msg_size = min(msg.size, (u16)(RIO_MAX_MSG_SIZE));
1882
1883 if (copy_to_user((void __user *)(uintptr_t)msg.msg, buf, msg_size))
1884 ret = -EFAULT;
1885
1886 riocm_ch_free_rxbuf(ch, buf);
1887 out:
1888 riocm_put_channel(ch);
1889 return ret;
1890 }
1891
1892 /*
1893 * riocm_cdev_ioctl() - IOCTL requests handler
1894 */
1895 static long
riocm_cdev_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)1896 riocm_cdev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1897 {
1898 switch (cmd) {
1899 case RIO_CM_EP_GET_LIST_SIZE:
1900 return cm_ep_get_list_size((void __user *)arg);
1901 case RIO_CM_EP_GET_LIST:
1902 return cm_ep_get_list((void __user *)arg);
1903 case RIO_CM_CHAN_CREATE:
1904 return cm_chan_create(filp, (void __user *)arg);
1905 case RIO_CM_CHAN_CLOSE:
1906 return cm_chan_close(filp, (void __user *)arg);
1907 case RIO_CM_CHAN_BIND:
1908 return cm_chan_bind((void __user *)arg);
1909 case RIO_CM_CHAN_LISTEN:
1910 return cm_chan_listen((void __user *)arg);
1911 case RIO_CM_CHAN_ACCEPT:
1912 return cm_chan_accept(filp, (void __user *)arg);
1913 case RIO_CM_CHAN_CONNECT:
1914 return cm_chan_connect((void __user *)arg);
1915 case RIO_CM_CHAN_SEND:
1916 return cm_chan_msg_send((void __user *)arg);
1917 case RIO_CM_CHAN_RECEIVE:
1918 return cm_chan_msg_rcv((void __user *)arg);
1919 case RIO_CM_MPORT_GET_LIST:
1920 return cm_mport_get_list((void __user *)arg);
1921 default:
1922 break;
1923 }
1924
1925 return -EINVAL;
1926 }
1927
1928 static const struct file_operations riocm_cdev_fops = {
1929 .owner = THIS_MODULE,
1930 .open = riocm_cdev_open,
1931 .release = riocm_cdev_release,
1932 .unlocked_ioctl = riocm_cdev_ioctl,
1933 };
1934
1935 /*
1936 * riocm_add_dev - add new remote RapidIO device into channel management core
1937 * @dev: device object associated with RapidIO device
1938 * @sif: subsystem interface
1939 *
1940 * Adds the specified RapidIO device (if applicable) into peers list of
1941 * the corresponding channel management device (cm_dev).
1942 */
riocm_add_dev(struct device * dev,struct subsys_interface * sif)1943 static int riocm_add_dev(struct device *dev, struct subsys_interface *sif)
1944 {
1945 struct cm_peer *peer;
1946 struct rio_dev *rdev = to_rio_dev(dev);
1947 struct cm_dev *cm;
1948
1949 /* Check if the remote device has capabilities required to support CM */
1950 if (!dev_cm_capable(rdev))
1951 return 0;
1952
1953 riocm_debug(RDEV, "(%s)", rio_name(rdev));
1954
1955 peer = kmalloc(sizeof(*peer), GFP_KERNEL);
1956 if (!peer)
1957 return -ENOMEM;
1958
1959 /* Find a corresponding cm_dev object */
1960 down_write(&rdev_sem);
1961 list_for_each_entry(cm, &cm_dev_list, list) {
1962 if (cm->mport == rdev->net->hport)
1963 goto found;
1964 }
1965
1966 up_write(&rdev_sem);
1967 kfree(peer);
1968 return -ENODEV;
1969
1970 found:
1971 peer->rdev = rdev;
1972 list_add_tail(&peer->node, &cm->peers);
1973 cm->npeers++;
1974
1975 up_write(&rdev_sem);
1976 return 0;
1977 }
1978
1979 /*
1980 * riocm_remove_dev - remove remote RapidIO device from channel management core
1981 * @dev: device object associated with RapidIO device
1982 * @sif: subsystem interface
1983 *
1984 * Removes the specified RapidIO device (if applicable) from peers list of
1985 * the corresponding channel management device (cm_dev).
1986 */
riocm_remove_dev(struct device * dev,struct subsys_interface * sif)1987 static void riocm_remove_dev(struct device *dev, struct subsys_interface *sif)
1988 {
1989 struct rio_dev *rdev = to_rio_dev(dev);
1990 struct cm_dev *cm;
1991 struct cm_peer *peer;
1992 struct rio_channel *ch, *_c;
1993 unsigned int i;
1994 bool found = false;
1995 LIST_HEAD(list);
1996
1997 /* Check if the remote device has capabilities required to support CM */
1998 if (!dev_cm_capable(rdev))
1999 return;
2000
2001 riocm_debug(RDEV, "(%s)", rio_name(rdev));
2002
2003 /* Find matching cm_dev object */
2004 down_write(&rdev_sem);
2005 list_for_each_entry(cm, &cm_dev_list, list) {
2006 if (cm->mport == rdev->net->hport) {
2007 found = true;
2008 break;
2009 }
2010 }
2011
2012 if (!found) {
2013 up_write(&rdev_sem);
2014 return;
2015 }
2016
2017 /* Remove remote device from the list of peers */
2018 found = false;
2019 list_for_each_entry(peer, &cm->peers, node) {
2020 if (peer->rdev == rdev) {
2021 riocm_debug(RDEV, "removing peer %s", rio_name(rdev));
2022 found = true;
2023 list_del(&peer->node);
2024 cm->npeers--;
2025 kfree(peer);
2026 break;
2027 }
2028 }
2029
2030 up_write(&rdev_sem);
2031
2032 if (!found)
2033 return;
2034
2035 /*
2036 * Release channels associated with this peer
2037 */
2038
2039 spin_lock_bh(&idr_lock);
2040 idr_for_each_entry(&ch_idr, ch, i) {
2041 if (ch && ch->rdev == rdev) {
2042 if (atomic_read(&rdev->state) != RIO_DEVICE_SHUTDOWN)
2043 riocm_exch(ch, RIO_CM_DISCONNECT);
2044 idr_remove(&ch_idr, ch->id);
2045 list_add(&ch->ch_node, &list);
2046 }
2047 }
2048 spin_unlock_bh(&idr_lock);
2049
2050 if (!list_empty(&list)) {
2051 list_for_each_entry_safe(ch, _c, &list, ch_node) {
2052 list_del(&ch->ch_node);
2053 riocm_ch_close(ch);
2054 }
2055 }
2056 }
2057
2058 /*
2059 * riocm_cdev_add() - Create rio_cm char device
2060 * @devno: device number assigned to device (MAJ + MIN)
2061 */
riocm_cdev_add(dev_t devno)2062 static int riocm_cdev_add(dev_t devno)
2063 {
2064 int ret;
2065
2066 cdev_init(&riocm_cdev.cdev, &riocm_cdev_fops);
2067 riocm_cdev.cdev.owner = THIS_MODULE;
2068 ret = cdev_add(&riocm_cdev.cdev, devno, 1);
2069 if (ret < 0) {
2070 riocm_error("Cannot register a device with error %d", ret);
2071 return ret;
2072 }
2073
2074 riocm_cdev.dev = device_create(&dev_class, NULL, devno, NULL, DEV_NAME);
2075 if (IS_ERR(riocm_cdev.dev)) {
2076 cdev_del(&riocm_cdev.cdev);
2077 return PTR_ERR(riocm_cdev.dev);
2078 }
2079
2080 riocm_debug(MPORT, "Added %s cdev(%d:%d)",
2081 DEV_NAME, MAJOR(devno), MINOR(devno));
2082
2083 return 0;
2084 }
2085
2086 /*
2087 * riocm_add_mport - add new local mport device into channel management core
2088 * @dev: device object associated with mport
2089 *
2090 * When a new mport device is added, CM immediately reserves inbound and
2091 * outbound RapidIO mailboxes that will be used.
2092 */
riocm_add_mport(struct device * dev)2093 static int riocm_add_mport(struct device *dev)
2094 {
2095 int rc;
2096 int i;
2097 struct cm_dev *cm;
2098 struct rio_mport *mport = to_rio_mport(dev);
2099
2100 riocm_debug(MPORT, "add mport %s", mport->name);
2101
2102 cm = kzalloc(sizeof(*cm), GFP_KERNEL);
2103 if (!cm)
2104 return -ENOMEM;
2105
2106 cm->mport = mport;
2107
2108 rc = rio_request_outb_mbox(mport, cm, cmbox,
2109 RIOCM_TX_RING_SIZE, riocm_outb_msg_event);
2110 if (rc) {
2111 riocm_error("failed to allocate OBMBOX_%d on %s",
2112 cmbox, mport->name);
2113 kfree(cm);
2114 return -ENODEV;
2115 }
2116
2117 rc = rio_request_inb_mbox(mport, cm, cmbox,
2118 RIOCM_RX_RING_SIZE, riocm_inb_msg_event);
2119 if (rc) {
2120 riocm_error("failed to allocate IBMBOX_%d on %s",
2121 cmbox, mport->name);
2122 rio_release_outb_mbox(mport, cmbox);
2123 kfree(cm);
2124 return -ENODEV;
2125 }
2126
2127 cm->rx_wq = create_workqueue(DRV_NAME "/rxq");
2128 if (!cm->rx_wq) {
2129 rio_release_inb_mbox(mport, cmbox);
2130 rio_release_outb_mbox(mport, cmbox);
2131 kfree(cm);
2132 return -ENOMEM;
2133 }
2134
2135 /*
2136 * Allocate and register inbound messaging buffers to be ready
2137 * to receive channel and system management requests
2138 */
2139 for (i = 0; i < RIOCM_RX_RING_SIZE; i++)
2140 cm->rx_buf[i] = NULL;
2141
2142 cm->rx_slots = RIOCM_RX_RING_SIZE;
2143 mutex_init(&cm->rx_lock);
2144 riocm_rx_fill(cm, RIOCM_RX_RING_SIZE);
2145 INIT_WORK(&cm->rx_work, rio_ibmsg_handler);
2146
2147 cm->tx_slot = 0;
2148 cm->tx_cnt = 0;
2149 cm->tx_ack_slot = 0;
2150 spin_lock_init(&cm->tx_lock);
2151
2152 INIT_LIST_HEAD(&cm->peers);
2153 cm->npeers = 0;
2154 INIT_LIST_HEAD(&cm->tx_reqs);
2155
2156 down_write(&rdev_sem);
2157 list_add_tail(&cm->list, &cm_dev_list);
2158 up_write(&rdev_sem);
2159
2160 return 0;
2161 }
2162
2163 /*
2164 * riocm_remove_mport - remove local mport device from channel management core
2165 * @dev: device object associated with mport
2166 *
2167 * Removes a local mport device from the list of registered devices that provide
2168 * channel management services. Returns an error if the specified mport is not
2169 * registered with the CM core.
2170 */
riocm_remove_mport(struct device * dev)2171 static void riocm_remove_mport(struct device *dev)
2172 {
2173 struct rio_mport *mport = to_rio_mport(dev);
2174 struct cm_dev *cm;
2175 struct cm_peer *peer, *temp;
2176 struct rio_channel *ch, *_c;
2177 unsigned int i;
2178 bool found = false;
2179 LIST_HEAD(list);
2180
2181 riocm_debug(MPORT, "%s", mport->name);
2182
2183 /* Find a matching cm_dev object */
2184 down_write(&rdev_sem);
2185 list_for_each_entry(cm, &cm_dev_list, list) {
2186 if (cm->mport == mport) {
2187 list_del(&cm->list);
2188 found = true;
2189 break;
2190 }
2191 }
2192 up_write(&rdev_sem);
2193 if (!found)
2194 return;
2195
2196 flush_workqueue(cm->rx_wq);
2197 destroy_workqueue(cm->rx_wq);
2198
2199 /* Release channels bound to this mport */
2200 spin_lock_bh(&idr_lock);
2201 idr_for_each_entry(&ch_idr, ch, i) {
2202 if (ch->cmdev == cm) {
2203 riocm_debug(RDEV, "%s drop ch_%d",
2204 mport->name, ch->id);
2205 idr_remove(&ch_idr, ch->id);
2206 list_add(&ch->ch_node, &list);
2207 }
2208 }
2209 spin_unlock_bh(&idr_lock);
2210
2211 if (!list_empty(&list)) {
2212 list_for_each_entry_safe(ch, _c, &list, ch_node) {
2213 list_del(&ch->ch_node);
2214 riocm_ch_close(ch);
2215 }
2216 }
2217
2218 rio_release_inb_mbox(mport, cmbox);
2219 rio_release_outb_mbox(mport, cmbox);
2220
2221 /* Remove and free peer entries */
2222 if (!list_empty(&cm->peers))
2223 riocm_debug(RDEV, "ATTN: peer list not empty");
2224 list_for_each_entry_safe(peer, temp, &cm->peers, node) {
2225 riocm_debug(RDEV, "removing peer %s", rio_name(peer->rdev));
2226 list_del(&peer->node);
2227 kfree(peer);
2228 }
2229
2230 riocm_rx_free(cm);
2231 kfree(cm);
2232 riocm_debug(MPORT, "%s done", mport->name);
2233 }
2234
rio_cm_shutdown(struct notifier_block * nb,unsigned long code,void * unused)2235 static int rio_cm_shutdown(struct notifier_block *nb, unsigned long code,
2236 void *unused)
2237 {
2238 struct rio_channel *ch;
2239 unsigned int i;
2240 LIST_HEAD(list);
2241
2242 riocm_debug(EXIT, ".");
2243
2244 /*
2245 * If there are any channels left in connected state send
2246 * close notification to the connection partner.
2247 * First build a list of channels that require a closing
2248 * notification because function riocm_send_close() should
2249 * be called outside of spinlock protected code.
2250 */
2251 spin_lock_bh(&idr_lock);
2252 idr_for_each_entry(&ch_idr, ch, i) {
2253 if (ch->state == RIO_CM_CONNECTED) {
2254 riocm_debug(EXIT, "close ch %d", ch->id);
2255 idr_remove(&ch_idr, ch->id);
2256 list_add(&ch->ch_node, &list);
2257 }
2258 }
2259 spin_unlock_bh(&idr_lock);
2260
2261 list_for_each_entry(ch, &list, ch_node)
2262 riocm_send_close(ch);
2263
2264 return NOTIFY_DONE;
2265 }
2266
2267 /*
2268 * riocm_interface handles addition/removal of remote RapidIO devices
2269 */
2270 static struct subsys_interface riocm_interface = {
2271 .name = "rio_cm",
2272 .subsys = &rio_bus_type,
2273 .add_dev = riocm_add_dev,
2274 .remove_dev = riocm_remove_dev,
2275 };
2276
2277 /*
2278 * rio_mport_interface handles addition/removal local mport devices
2279 */
2280 static struct class_interface rio_mport_interface __refdata = {
2281 .class = &rio_mport_class,
2282 .add_dev = riocm_add_mport,
2283 .remove_dev = riocm_remove_mport,
2284 };
2285
2286 static struct notifier_block rio_cm_notifier = {
2287 .notifier_call = rio_cm_shutdown,
2288 };
2289
riocm_init(void)2290 static int __init riocm_init(void)
2291 {
2292 int ret;
2293
2294 /* Create device class needed by udev */
2295 ret = class_register(&dev_class);
2296 if (ret) {
2297 riocm_error("Cannot create " DRV_NAME " class");
2298 return ret;
2299 }
2300
2301 ret = alloc_chrdev_region(&dev_number, 0, 1, DRV_NAME);
2302 if (ret) {
2303 class_unregister(&dev_class);
2304 return ret;
2305 }
2306
2307 dev_major = MAJOR(dev_number);
2308 dev_minor_base = MINOR(dev_number);
2309 riocm_debug(INIT, "Registered class with %d major", dev_major);
2310
2311 /*
2312 * Register as rapidio_port class interface to get notifications about
2313 * mport additions and removals.
2314 */
2315 ret = class_interface_register(&rio_mport_interface);
2316 if (ret) {
2317 riocm_error("class_interface_register error: %d", ret);
2318 goto err_reg;
2319 }
2320
2321 /*
2322 * Register as RapidIO bus interface to get notifications about
2323 * addition/removal of remote RapidIO devices.
2324 */
2325 ret = subsys_interface_register(&riocm_interface);
2326 if (ret) {
2327 riocm_error("subsys_interface_register error: %d", ret);
2328 goto err_cl;
2329 }
2330
2331 ret = register_reboot_notifier(&rio_cm_notifier);
2332 if (ret) {
2333 riocm_error("failed to register reboot notifier (err=%d)", ret);
2334 goto err_sif;
2335 }
2336
2337 ret = riocm_cdev_add(dev_number);
2338 if (ret) {
2339 unregister_reboot_notifier(&rio_cm_notifier);
2340 ret = -ENODEV;
2341 goto err_sif;
2342 }
2343
2344 return 0;
2345 err_sif:
2346 subsys_interface_unregister(&riocm_interface);
2347 err_cl:
2348 class_interface_unregister(&rio_mport_interface);
2349 err_reg:
2350 unregister_chrdev_region(dev_number, 1);
2351 class_unregister(&dev_class);
2352 return ret;
2353 }
2354
riocm_exit(void)2355 static void __exit riocm_exit(void)
2356 {
2357 riocm_debug(EXIT, "enter");
2358 unregister_reboot_notifier(&rio_cm_notifier);
2359 subsys_interface_unregister(&riocm_interface);
2360 class_interface_unregister(&rio_mport_interface);
2361 idr_destroy(&ch_idr);
2362
2363 device_unregister(riocm_cdev.dev);
2364 cdev_del(&(riocm_cdev.cdev));
2365
2366 class_unregister(&dev_class);
2367 unregister_chrdev_region(dev_number, 1);
2368 }
2369
2370 late_initcall(riocm_init);
2371 module_exit(riocm_exit);
2372