1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * drivers/net/ethernet/ibm/emac/mal.c
4 *
5 * Memory Access Layer (MAL) support
6 *
7 * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
8 * <benh@kernel.crashing.org>
9 *
10 * Based on the arch/ppc version of the driver:
11 *
12 * Copyright (c) 2004, 2005 Zultys Technologies.
13 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
14 *
15 * Based on original work by
16 * Benjamin Herrenschmidt <benh@kernel.crashing.org>,
17 * David Gibson <hermes@gibson.dropbear.id.au>,
18 *
19 * Armin Kuster <akuster@mvista.com>
20 * Copyright 2002 MontaVista Softare Inc.
21 */
22
23 #include <linux/delay.h>
24 #include <linux/slab.h>
25 #include <linux/of.h>
26 #include <linux/of_irq.h>
27 #include <linux/platform_device.h>
28
29 #include "core.h"
30 #include <asm/dcr-regs.h>
31
32 static int mal_count;
33
mal_register_commac(struct mal_instance * mal,struct mal_commac * commac)34 int mal_register_commac(struct mal_instance *mal, struct mal_commac *commac)
35 {
36 unsigned long flags;
37
38 spin_lock_irqsave(&mal->lock, flags);
39
40 MAL_DBG(mal, "reg(%08x, %08x)" NL,
41 commac->tx_chan_mask, commac->rx_chan_mask);
42
43 /* Don't let multiple commacs claim the same channel(s) */
44 if ((mal->tx_chan_mask & commac->tx_chan_mask) ||
45 (mal->rx_chan_mask & commac->rx_chan_mask)) {
46 spin_unlock_irqrestore(&mal->lock, flags);
47 printk(KERN_WARNING "mal%d: COMMAC channels conflict!\n",
48 mal->index);
49 return -EBUSY;
50 }
51
52 if (list_empty(&mal->list))
53 napi_enable(&mal->napi);
54 mal->tx_chan_mask |= commac->tx_chan_mask;
55 mal->rx_chan_mask |= commac->rx_chan_mask;
56 list_add(&commac->list, &mal->list);
57
58 spin_unlock_irqrestore(&mal->lock, flags);
59
60 return 0;
61 }
62
mal_unregister_commac(struct mal_instance * mal,struct mal_commac * commac)63 void mal_unregister_commac(struct mal_instance *mal,
64 struct mal_commac *commac)
65 {
66 unsigned long flags;
67
68 spin_lock_irqsave(&mal->lock, flags);
69
70 MAL_DBG(mal, "unreg(%08x, %08x)" NL,
71 commac->tx_chan_mask, commac->rx_chan_mask);
72
73 mal->tx_chan_mask &= ~commac->tx_chan_mask;
74 mal->rx_chan_mask &= ~commac->rx_chan_mask;
75 list_del_init(&commac->list);
76 if (list_empty(&mal->list))
77 napi_disable(&mal->napi);
78
79 spin_unlock_irqrestore(&mal->lock, flags);
80 }
81
mal_set_rcbs(struct mal_instance * mal,int channel,unsigned long size)82 int mal_set_rcbs(struct mal_instance *mal, int channel, unsigned long size)
83 {
84 BUG_ON(channel < 0 || channel >= mal->num_rx_chans ||
85 size > MAL_MAX_RX_SIZE);
86
87 MAL_DBG(mal, "set_rbcs(%d, %lu)" NL, channel, size);
88
89 if (size & 0xf) {
90 printk(KERN_WARNING
91 "mal%d: incorrect RX size %lu for the channel %d\n",
92 mal->index, size, channel);
93 return -EINVAL;
94 }
95
96 set_mal_dcrn(mal, MAL_RCBS(channel), size >> 4);
97 return 0;
98 }
99
mal_tx_bd_offset(struct mal_instance * mal,int channel)100 int mal_tx_bd_offset(struct mal_instance *mal, int channel)
101 {
102 BUG_ON(channel < 0 || channel >= mal->num_tx_chans);
103
104 return channel * NUM_TX_BUFF;
105 }
106
mal_rx_bd_offset(struct mal_instance * mal,int channel)107 int mal_rx_bd_offset(struct mal_instance *mal, int channel)
108 {
109 BUG_ON(channel < 0 || channel >= mal->num_rx_chans);
110 return mal->num_tx_chans * NUM_TX_BUFF + channel * NUM_RX_BUFF;
111 }
112
mal_enable_tx_channel(struct mal_instance * mal,int channel)113 void mal_enable_tx_channel(struct mal_instance *mal, int channel)
114 {
115 unsigned long flags;
116
117 spin_lock_irqsave(&mal->lock, flags);
118
119 MAL_DBG(mal, "enable_tx(%d)" NL, channel);
120
121 set_mal_dcrn(mal, MAL_TXCASR,
122 get_mal_dcrn(mal, MAL_TXCASR) | MAL_CHAN_MASK(channel));
123
124 spin_unlock_irqrestore(&mal->lock, flags);
125 }
126
mal_disable_tx_channel(struct mal_instance * mal,int channel)127 void mal_disable_tx_channel(struct mal_instance *mal, int channel)
128 {
129 set_mal_dcrn(mal, MAL_TXCARR, MAL_CHAN_MASK(channel));
130
131 MAL_DBG(mal, "disable_tx(%d)" NL, channel);
132 }
133
mal_enable_rx_channel(struct mal_instance * mal,int channel)134 void mal_enable_rx_channel(struct mal_instance *mal, int channel)
135 {
136 unsigned long flags;
137
138 /*
139 * On some 4xx PPC's (e.g. 460EX/GT), the rx channel is a multiple
140 * of 8, but enabling in MAL_RXCASR needs the divided by 8 value
141 * for the bitmask
142 */
143 if (!(channel % 8))
144 channel >>= 3;
145
146 spin_lock_irqsave(&mal->lock, flags);
147
148 MAL_DBG(mal, "enable_rx(%d)" NL, channel);
149
150 set_mal_dcrn(mal, MAL_RXCASR,
151 get_mal_dcrn(mal, MAL_RXCASR) | MAL_CHAN_MASK(channel));
152
153 spin_unlock_irqrestore(&mal->lock, flags);
154 }
155
mal_disable_rx_channel(struct mal_instance * mal,int channel)156 void mal_disable_rx_channel(struct mal_instance *mal, int channel)
157 {
158 /*
159 * On some 4xx PPC's (e.g. 460EX/GT), the rx channel is a multiple
160 * of 8, but enabling in MAL_RXCASR needs the divided by 8 value
161 * for the bitmask
162 */
163 if (!(channel % 8))
164 channel >>= 3;
165
166 set_mal_dcrn(mal, MAL_RXCARR, MAL_CHAN_MASK(channel));
167
168 MAL_DBG(mal, "disable_rx(%d)" NL, channel);
169 }
170
mal_poll_add(struct mal_instance * mal,struct mal_commac * commac)171 void mal_poll_add(struct mal_instance *mal, struct mal_commac *commac)
172 {
173 unsigned long flags;
174
175 spin_lock_irqsave(&mal->lock, flags);
176
177 MAL_DBG(mal, "poll_add(%p)" NL, commac);
178
179 /* starts disabled */
180 set_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags);
181
182 list_add_tail(&commac->poll_list, &mal->poll_list);
183
184 spin_unlock_irqrestore(&mal->lock, flags);
185 }
186
mal_poll_del(struct mal_instance * mal,struct mal_commac * commac)187 void mal_poll_del(struct mal_instance *mal, struct mal_commac *commac)
188 {
189 unsigned long flags;
190
191 spin_lock_irqsave(&mal->lock, flags);
192
193 MAL_DBG(mal, "poll_del(%p)" NL, commac);
194
195 list_del(&commac->poll_list);
196
197 spin_unlock_irqrestore(&mal->lock, flags);
198 }
199
200 /* synchronized by mal_poll() */
mal_enable_eob_irq(struct mal_instance * mal)201 static inline void mal_enable_eob_irq(struct mal_instance *mal)
202 {
203 MAL_DBG2(mal, "enable_irq" NL);
204
205 // XXX might want to cache MAL_CFG as the DCR read can be slooooow
206 set_mal_dcrn(mal, MAL_CFG, get_mal_dcrn(mal, MAL_CFG) | MAL_CFG_EOPIE);
207 }
208
209 /* synchronized by NAPI state */
mal_disable_eob_irq(struct mal_instance * mal)210 static inline void mal_disable_eob_irq(struct mal_instance *mal)
211 {
212 // XXX might want to cache MAL_CFG as the DCR read can be slooooow
213 set_mal_dcrn(mal, MAL_CFG, get_mal_dcrn(mal, MAL_CFG) & ~MAL_CFG_EOPIE);
214
215 MAL_DBG2(mal, "disable_irq" NL);
216 }
217
mal_serr(int irq,void * dev_instance)218 static irqreturn_t mal_serr(int irq, void *dev_instance)
219 {
220 struct mal_instance *mal = dev_instance;
221
222 u32 esr = get_mal_dcrn(mal, MAL_ESR);
223
224 /* Clear the error status register */
225 set_mal_dcrn(mal, MAL_ESR, esr);
226
227 MAL_DBG(mal, "SERR %08x" NL, esr);
228
229 if (esr & MAL_ESR_EVB) {
230 if (esr & MAL_ESR_DE) {
231 /* We ignore Descriptor error,
232 * TXDE or RXDE interrupt will be generated anyway.
233 */
234 return IRQ_HANDLED;
235 }
236
237 if (esr & MAL_ESR_PEIN) {
238 /* PLB error, it's probably buggy hardware or
239 * incorrect physical address in BD (i.e. bug)
240 */
241 if (net_ratelimit())
242 printk(KERN_ERR
243 "mal%d: system error, "
244 "PLB (ESR = 0x%08x)\n",
245 mal->index, esr);
246 return IRQ_HANDLED;
247 }
248
249 /* OPB error, it's probably buggy hardware or incorrect
250 * EBC setup
251 */
252 if (net_ratelimit())
253 printk(KERN_ERR
254 "mal%d: system error, OPB (ESR = 0x%08x)\n",
255 mal->index, esr);
256 }
257 return IRQ_HANDLED;
258 }
259
mal_schedule_poll(struct mal_instance * mal)260 static inline void mal_schedule_poll(struct mal_instance *mal)
261 {
262 if (likely(napi_schedule_prep(&mal->napi))) {
263 MAL_DBG2(mal, "schedule_poll" NL);
264 spin_lock(&mal->lock);
265 mal_disable_eob_irq(mal);
266 spin_unlock(&mal->lock);
267 __napi_schedule(&mal->napi);
268 } else
269 MAL_DBG2(mal, "already in poll" NL);
270 }
271
mal_txeob(int irq,void * dev_instance)272 static irqreturn_t mal_txeob(int irq, void *dev_instance)
273 {
274 struct mal_instance *mal = dev_instance;
275
276 u32 r = get_mal_dcrn(mal, MAL_TXEOBISR);
277
278 MAL_DBG2(mal, "txeob %08x" NL, r);
279
280 mal_schedule_poll(mal);
281 set_mal_dcrn(mal, MAL_TXEOBISR, r);
282
283 #ifdef CONFIG_PPC_DCR_NATIVE
284 if (mal_has_feature(mal, MAL_FTR_CLEAR_ICINTSTAT))
285 mtdcri(SDR0, DCRN_SDR_ICINTSTAT,
286 (mfdcri(SDR0, DCRN_SDR_ICINTSTAT) | ICINTSTAT_ICTX));
287 #endif
288
289 return IRQ_HANDLED;
290 }
291
mal_rxeob(int irq,void * dev_instance)292 static irqreturn_t mal_rxeob(int irq, void *dev_instance)
293 {
294 struct mal_instance *mal = dev_instance;
295
296 u32 r = get_mal_dcrn(mal, MAL_RXEOBISR);
297
298 MAL_DBG2(mal, "rxeob %08x" NL, r);
299
300 mal_schedule_poll(mal);
301 set_mal_dcrn(mal, MAL_RXEOBISR, r);
302
303 #ifdef CONFIG_PPC_DCR_NATIVE
304 if (mal_has_feature(mal, MAL_FTR_CLEAR_ICINTSTAT))
305 mtdcri(SDR0, DCRN_SDR_ICINTSTAT,
306 (mfdcri(SDR0, DCRN_SDR_ICINTSTAT) | ICINTSTAT_ICRX));
307 #endif
308
309 return IRQ_HANDLED;
310 }
311
mal_txde(int irq,void * dev_instance)312 static irqreturn_t mal_txde(int irq, void *dev_instance)
313 {
314 struct mal_instance *mal = dev_instance;
315
316 u32 deir = get_mal_dcrn(mal, MAL_TXDEIR);
317 set_mal_dcrn(mal, MAL_TXDEIR, deir);
318
319 MAL_DBG(mal, "txde %08x" NL, deir);
320
321 if (net_ratelimit())
322 printk(KERN_ERR
323 "mal%d: TX descriptor error (TXDEIR = 0x%08x)\n",
324 mal->index, deir);
325
326 return IRQ_HANDLED;
327 }
328
mal_rxde(int irq,void * dev_instance)329 static irqreturn_t mal_rxde(int irq, void *dev_instance)
330 {
331 struct mal_instance *mal = dev_instance;
332 struct list_head *l;
333
334 u32 deir = get_mal_dcrn(mal, MAL_RXDEIR);
335
336 MAL_DBG(mal, "rxde %08x" NL, deir);
337
338 list_for_each(l, &mal->list) {
339 struct mal_commac *mc = list_entry(l, struct mal_commac, list);
340 if (deir & mc->rx_chan_mask) {
341 set_bit(MAL_COMMAC_RX_STOPPED, &mc->flags);
342 mc->ops->rxde(mc->dev);
343 }
344 }
345
346 mal_schedule_poll(mal);
347 set_mal_dcrn(mal, MAL_RXDEIR, deir);
348
349 return IRQ_HANDLED;
350 }
351
mal_int(int irq,void * dev_instance)352 static irqreturn_t mal_int(int irq, void *dev_instance)
353 {
354 struct mal_instance *mal = dev_instance;
355 u32 esr = get_mal_dcrn(mal, MAL_ESR);
356
357 if (esr & MAL_ESR_EVB) {
358 /* descriptor error */
359 if (esr & MAL_ESR_DE) {
360 if (esr & MAL_ESR_CIDT)
361 return mal_rxde(irq, dev_instance);
362 else
363 return mal_txde(irq, dev_instance);
364 } else { /* SERR */
365 return mal_serr(irq, dev_instance);
366 }
367 }
368 return IRQ_HANDLED;
369 }
370
mal_poll_disable(struct mal_instance * mal,struct mal_commac * commac)371 void mal_poll_disable(struct mal_instance *mal, struct mal_commac *commac)
372 {
373 /* Spinlock-type semantics: only one caller disable poll at a time */
374 while (test_and_set_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags))
375 msleep(1);
376
377 /* Synchronize with the MAL NAPI poller */
378 napi_synchronize(&mal->napi);
379 }
380
mal_poll_enable(struct mal_instance * mal,struct mal_commac * commac)381 void mal_poll_enable(struct mal_instance *mal, struct mal_commac *commac)
382 {
383 smp_wmb();
384 clear_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags);
385
386 /* Feels better to trigger a poll here to catch up with events that
387 * may have happened on this channel while disabled. It will most
388 * probably be delayed until the next interrupt but that's mostly a
389 * non-issue in the context where this is called.
390 */
391 napi_schedule(&mal->napi);
392 }
393
mal_poll(struct napi_struct * napi,int budget)394 static int mal_poll(struct napi_struct *napi, int budget)
395 {
396 struct mal_instance *mal = container_of(napi, struct mal_instance, napi);
397 struct list_head *l;
398 int received = 0;
399 unsigned long flags;
400
401 MAL_DBG2(mal, "poll(%d)" NL, budget);
402
403 /* Process TX skbs */
404 list_for_each(l, &mal->poll_list) {
405 struct mal_commac *mc =
406 list_entry(l, struct mal_commac, poll_list);
407 mc->ops->poll_tx(mc->dev);
408 }
409
410 /* Process RX skbs.
411 *
412 * We _might_ need something more smart here to enforce polling
413 * fairness.
414 */
415 list_for_each(l, &mal->poll_list) {
416 struct mal_commac *mc =
417 list_entry(l, struct mal_commac, poll_list);
418 int n;
419 if (unlikely(test_bit(MAL_COMMAC_POLL_DISABLED, &mc->flags)))
420 continue;
421 n = mc->ops->poll_rx(mc->dev, budget - received);
422 if (n) {
423 received += n;
424 if (received >= budget)
425 return budget;
426 }
427 }
428
429 if (napi_complete_done(napi, received)) {
430 /* We need to disable IRQs to protect from RXDE IRQ here */
431 spin_lock_irqsave(&mal->lock, flags);
432 mal_enable_eob_irq(mal);
433 spin_unlock_irqrestore(&mal->lock, flags);
434 }
435
436 /* Check for "rotting" packet(s) */
437 list_for_each(l, &mal->poll_list) {
438 struct mal_commac *mc =
439 list_entry(l, struct mal_commac, poll_list);
440 if (unlikely(test_bit(MAL_COMMAC_POLL_DISABLED, &mc->flags)))
441 continue;
442 if (unlikely(mc->ops->peek_rx(mc->dev) ||
443 test_bit(MAL_COMMAC_RX_STOPPED, &mc->flags))) {
444 MAL_DBG2(mal, "rotting packet" NL);
445 if (!napi_schedule(napi))
446 goto more_work;
447
448 spin_lock_irqsave(&mal->lock, flags);
449 mal_disable_eob_irq(mal);
450 spin_unlock_irqrestore(&mal->lock, flags);
451 }
452 mc->ops->poll_tx(mc->dev);
453 }
454
455 more_work:
456 MAL_DBG2(mal, "poll() %d <- %d" NL, budget, received);
457 return received;
458 }
459
mal_reset(struct mal_instance * mal)460 static void mal_reset(struct mal_instance *mal)
461 {
462 int n = 10;
463
464 MAL_DBG(mal, "reset" NL);
465
466 set_mal_dcrn(mal, MAL_CFG, MAL_CFG_SR);
467
468 /* Wait for reset to complete (1 system clock) */
469 while ((get_mal_dcrn(mal, MAL_CFG) & MAL_CFG_SR) && n)
470 --n;
471
472 if (unlikely(!n))
473 printk(KERN_ERR "mal%d: reset timeout\n", mal->index);
474 }
475
mal_get_regs_len(struct mal_instance * mal)476 int mal_get_regs_len(struct mal_instance *mal)
477 {
478 return sizeof(struct emac_ethtool_regs_subhdr) +
479 sizeof(struct mal_regs);
480 }
481
mal_dump_regs(struct mal_instance * mal,void * buf)482 void *mal_dump_regs(struct mal_instance *mal, void *buf)
483 {
484 struct emac_ethtool_regs_subhdr *hdr = buf;
485 struct mal_regs *regs = (struct mal_regs *)(hdr + 1);
486 int i;
487
488 hdr->version = mal->version;
489 hdr->index = mal->index;
490
491 regs->tx_count = mal->num_tx_chans;
492 regs->rx_count = mal->num_rx_chans;
493
494 regs->cfg = get_mal_dcrn(mal, MAL_CFG);
495 regs->esr = get_mal_dcrn(mal, MAL_ESR);
496 regs->ier = get_mal_dcrn(mal, MAL_IER);
497 regs->tx_casr = get_mal_dcrn(mal, MAL_TXCASR);
498 regs->tx_carr = get_mal_dcrn(mal, MAL_TXCARR);
499 regs->tx_eobisr = get_mal_dcrn(mal, MAL_TXEOBISR);
500 regs->tx_deir = get_mal_dcrn(mal, MAL_TXDEIR);
501 regs->rx_casr = get_mal_dcrn(mal, MAL_RXCASR);
502 regs->rx_carr = get_mal_dcrn(mal, MAL_RXCARR);
503 regs->rx_eobisr = get_mal_dcrn(mal, MAL_RXEOBISR);
504 regs->rx_deir = get_mal_dcrn(mal, MAL_RXDEIR);
505
506 for (i = 0; i < regs->tx_count; ++i)
507 regs->tx_ctpr[i] = get_mal_dcrn(mal, MAL_TXCTPR(i));
508
509 for (i = 0; i < regs->rx_count; ++i) {
510 regs->rx_ctpr[i] = get_mal_dcrn(mal, MAL_RXCTPR(i));
511 regs->rcbs[i] = get_mal_dcrn(mal, MAL_RCBS(i));
512 }
513 return regs + 1;
514 }
515
mal_probe(struct platform_device * ofdev)516 static int mal_probe(struct platform_device *ofdev)
517 {
518 struct mal_instance *mal;
519 int err = 0, i, bd_size;
520 int index = mal_count++;
521 unsigned int dcr_base;
522 const u32 *prop;
523 u32 cfg;
524 unsigned long irqflags;
525 irq_handler_t hdlr_serr, hdlr_txde, hdlr_rxde;
526
527 mal = devm_kzalloc(&ofdev->dev, sizeof(struct mal_instance),
528 GFP_KERNEL);
529 if (!mal)
530 return -ENOMEM;
531
532 mal->index = index;
533 mal->ofdev = ofdev;
534 mal->version = of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal2") ? 2 : 1;
535
536 MAL_DBG(mal, "probe" NL);
537
538 prop = of_get_property(ofdev->dev.of_node, "num-tx-chans", NULL);
539 if (prop == NULL) {
540 printk(KERN_ERR
541 "mal%d: can't find MAL num-tx-chans property!\n",
542 index);
543 return -ENODEV;
544 }
545 mal->num_tx_chans = prop[0];
546
547 prop = of_get_property(ofdev->dev.of_node, "num-rx-chans", NULL);
548 if (prop == NULL) {
549 printk(KERN_ERR
550 "mal%d: can't find MAL num-rx-chans property!\n",
551 index);
552 return -ENODEV;
553 }
554 mal->num_rx_chans = prop[0];
555
556 dcr_base = dcr_resource_start(ofdev->dev.of_node, 0);
557 if (dcr_base == 0) {
558 printk(KERN_ERR
559 "mal%d: can't find DCR resource!\n", index);
560 return -ENODEV;
561 }
562 mal->dcr_host = dcr_map(ofdev->dev.of_node, dcr_base, 0x100);
563 if (!DCR_MAP_OK(mal->dcr_host)) {
564 printk(KERN_ERR
565 "mal%d: failed to map DCRs !\n", index);
566 return -ENODEV;
567 }
568
569 if (of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal-405ez")) {
570 #if defined(CONFIG_IBM_EMAC_MAL_CLR_ICINTSTAT) && \
571 defined(CONFIG_IBM_EMAC_MAL_COMMON_ERR)
572 mal->features |= (MAL_FTR_CLEAR_ICINTSTAT |
573 MAL_FTR_COMMON_ERR_INT);
574 #else
575 printk(KERN_ERR "%pOF: Support for 405EZ not enabled!\n",
576 ofdev->dev.of_node);
577 err = -ENODEV;
578 goto fail_unmap;
579 #endif
580 }
581
582 INIT_LIST_HEAD(&mal->poll_list);
583 INIT_LIST_HEAD(&mal->list);
584 spin_lock_init(&mal->lock);
585
586 mal->dummy_dev = alloc_netdev_dummy(0);
587 if (!mal->dummy_dev) {
588 err = -ENOMEM;
589 goto fail_unmap;
590 }
591
592 netif_napi_add_weight(mal->dummy_dev, &mal->napi, mal_poll,
593 CONFIG_IBM_EMAC_POLL_WEIGHT);
594
595 /* Load power-on reset defaults */
596 mal_reset(mal);
597
598 /* Set the MAL configuration register */
599 cfg = (mal->version == 2) ? MAL2_CFG_DEFAULT : MAL1_CFG_DEFAULT;
600 cfg |= MAL_CFG_PLBB | MAL_CFG_OPBBL | MAL_CFG_LEA;
601
602 /* Current Axon is not happy with priority being non-0, it can
603 * deadlock, fix it up here
604 */
605 if (of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal-axon"))
606 cfg &= ~(MAL2_CFG_RPP_10 | MAL2_CFG_WPP_10);
607
608 /* Apply configuration */
609 set_mal_dcrn(mal, MAL_CFG, cfg);
610
611 /* Allocate space for BD rings */
612 BUG_ON(mal->num_tx_chans <= 0 || mal->num_tx_chans > 32);
613 BUG_ON(mal->num_rx_chans <= 0 || mal->num_rx_chans > 32);
614
615 bd_size = sizeof(struct mal_descriptor) *
616 (NUM_TX_BUFF * mal->num_tx_chans +
617 NUM_RX_BUFF * mal->num_rx_chans);
618 mal->bd_virt = dma_alloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma,
619 GFP_KERNEL);
620 if (mal->bd_virt == NULL) {
621 err = -ENOMEM;
622 goto fail_dummy;
623 }
624
625 for (i = 0; i < mal->num_tx_chans; ++i)
626 set_mal_dcrn(mal, MAL_TXCTPR(i), mal->bd_dma +
627 sizeof(struct mal_descriptor) *
628 mal_tx_bd_offset(mal, i));
629
630 for (i = 0; i < mal->num_rx_chans; ++i)
631 set_mal_dcrn(mal, MAL_RXCTPR(i), mal->bd_dma +
632 sizeof(struct mal_descriptor) *
633 mal_rx_bd_offset(mal, i));
634
635 mal->txeob_irq = platform_get_irq(ofdev, 0);
636 mal->rxeob_irq = platform_get_irq(ofdev, 1);
637 mal->serr_irq = platform_get_irq(ofdev, 2);
638
639 if (mal_has_feature(mal, MAL_FTR_COMMON_ERR_INT)) {
640 mal->txde_irq = mal->rxde_irq = mal->serr_irq;
641 irqflags = IRQF_SHARED;
642 hdlr_serr = hdlr_txde = hdlr_rxde = mal_int;
643 } else {
644 mal->txde_irq = platform_get_irq(ofdev, 3);
645 mal->rxde_irq = platform_get_irq(ofdev, 4);
646 irqflags = 0;
647 hdlr_serr = mal_serr;
648 hdlr_txde = mal_txde;
649 hdlr_rxde = mal_rxde;
650 }
651
652 err = devm_request_irq(&ofdev->dev, mal->serr_irq, hdlr_serr, irqflags,
653 "MAL SERR", mal);
654 if (err)
655 goto fail2;
656 err = devm_request_irq(&ofdev->dev, mal->txde_irq, hdlr_txde, irqflags,
657 "MAL TX DE", mal);
658 if (err)
659 goto fail2;
660 err = devm_request_irq(&ofdev->dev, mal->txeob_irq, mal_txeob, 0,
661 "MAL TX EOB", mal);
662 if (err)
663 goto fail2;
664 err = devm_request_irq(&ofdev->dev, mal->rxde_irq, hdlr_rxde, irqflags,
665 "MAL RX DE", mal);
666 if (err)
667 goto fail2;
668 err = devm_request_irq(&ofdev->dev, mal->rxeob_irq, mal_rxeob, 0,
669 "MAL RX EOB", mal);
670 if (err)
671 goto fail2;
672
673 /* Enable all MAL SERR interrupt sources */
674 set_mal_dcrn(mal, MAL_IER, MAL_IER_EVENTS);
675
676 /* Enable EOB interrupt */
677 mal_enable_eob_irq(mal);
678
679 printk(KERN_INFO
680 "MAL v%d %pOF, %d TX channels, %d RX channels\n",
681 mal->version, ofdev->dev.of_node,
682 mal->num_tx_chans, mal->num_rx_chans);
683
684 /* Advertise this instance to the rest of the world */
685 wmb();
686 platform_set_drvdata(ofdev, mal);
687
688 return 0;
689
690 fail2:
691 dma_free_coherent(&ofdev->dev, bd_size, mal->bd_virt, mal->bd_dma);
692 fail_dummy:
693 free_netdev(mal->dummy_dev);
694 fail_unmap:
695 dcr_unmap(mal->dcr_host, 0x100);
696 return err;
697 }
698
mal_remove(struct platform_device * ofdev)699 static void mal_remove(struct platform_device *ofdev)
700 {
701 struct mal_instance *mal = platform_get_drvdata(ofdev);
702
703 MAL_DBG(mal, "remove" NL);
704
705 /* Synchronize with scheduled polling */
706 napi_disable(&mal->napi);
707
708 if (!list_empty(&mal->list))
709 /* This is *very* bad */
710 WARN(1, KERN_EMERG
711 "mal%d: commac list is not empty on remove!\n",
712 mal->index);
713
714 mal_reset(mal);
715
716 free_netdev(mal->dummy_dev);
717
718 dcr_unmap(mal->dcr_host, 0x100);
719
720 dma_free_coherent(&ofdev->dev,
721 sizeof(struct mal_descriptor) *
722 (NUM_TX_BUFF * mal->num_tx_chans +
723 NUM_RX_BUFF * mal->num_rx_chans),
724 mal->bd_virt, mal->bd_dma);
725 }
726
727 static const struct of_device_id mal_platform_match[] =
728 {
729 {
730 .compatible = "ibm,mcmal",
731 },
732 {
733 .compatible = "ibm,mcmal2",
734 },
735 /* Backward compat */
736 {
737 .type = "mcmal-dma",
738 .compatible = "ibm,mcmal",
739 },
740 {
741 .type = "mcmal-dma",
742 .compatible = "ibm,mcmal2",
743 },
744 {},
745 };
746
747 static struct platform_driver mal_of_driver = {
748 .driver = {
749 .name = "mcmal",
750 .of_match_table = mal_platform_match,
751 },
752 .probe = mal_probe,
753 .remove = mal_remove,
754 };
755
mal_init(void)756 int __init mal_init(void)
757 {
758 return platform_driver_register(&mal_of_driver);
759 }
760
mal_exit(void)761 void mal_exit(void)
762 {
763 platform_driver_unregister(&mal_of_driver);
764 }
765