1 /* 2 * SuperH Timer Support - CMT 3 * 4 * Copyright (C) 2008 Magnus Damm 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 */ 19 20 #include <linux/init.h> 21 #include <linux/platform_device.h> 22 #include <linux/spinlock.h> 23 #include <linux/interrupt.h> 24 #include <linux/ioport.h> 25 #include <linux/io.h> 26 #include <linux/clk.h> 27 #include <linux/irq.h> 28 #include <linux/err.h> 29 #include <linux/delay.h> 30 #include <linux/clocksource.h> 31 #include <linux/clockchips.h> 32 #include <linux/sh_timer.h> 33 #include <linux/slab.h> 34 #include <linux/module.h> 35 36 struct sh_cmt_priv { 37 void __iomem *mapbase; 38 struct clk *clk; 39 unsigned long width; /* 16 or 32 bit version of hardware block */ 40 unsigned long overflow_bit; 41 unsigned long clear_bits; 42 struct irqaction irqaction; 43 struct platform_device *pdev; 44 45 unsigned long flags; 46 unsigned long match_value; 47 unsigned long next_match_value; 48 unsigned long max_match_value; 49 unsigned long rate; 50 spinlock_t lock; 51 struct clock_event_device ced; 52 struct clocksource cs; 53 unsigned long total_cycles; 54 }; 55 56 static DEFINE_SPINLOCK(sh_cmt_lock); 57 58 #define CMSTR -1 /* shared register */ 59 #define CMCSR 0 /* channel register */ 60 #define CMCNT 1 /* channel register */ 61 #define CMCOR 2 /* channel register */ 62 63 static inline unsigned long sh_cmt_read(struct sh_cmt_priv *p, int reg_nr) 64 { 65 struct sh_timer_config *cfg = p->pdev->dev.platform_data; 66 void __iomem *base = p->mapbase; 67 unsigned long offs; 68 69 if (reg_nr == CMSTR) { 70 offs = 0; 71 base -= cfg->channel_offset; 72 } else 73 offs = reg_nr; 74 75 if (p->width == 16) 76 offs <<= 1; 77 else { 78 offs <<= 2; 79 if ((reg_nr == CMCNT) || (reg_nr == CMCOR)) 80 return ioread32(base + offs); 81 } 82 83 return ioread16(base + offs); 84 } 85 86 static inline void sh_cmt_write(struct sh_cmt_priv *p, int reg_nr, 87 unsigned long value) 88 { 89 struct sh_timer_config *cfg = p->pdev->dev.platform_data; 90 void __iomem *base = p->mapbase; 91 unsigned long offs; 92 93 if (reg_nr == CMSTR) { 94 offs = 0; 95 base -= cfg->channel_offset; 96 } else 97 offs = reg_nr; 98 99 if (p->width == 16) 100 offs <<= 1; 101 else { 102 offs <<= 2; 103 if ((reg_nr == CMCNT) || (reg_nr == CMCOR)) { 104 iowrite32(value, base + offs); 105 return; 106 } 107 } 108 109 iowrite16(value, base + offs); 110 } 111 112 static unsigned long sh_cmt_get_counter(struct sh_cmt_priv *p, 113 int *has_wrapped) 114 { 115 unsigned long v1, v2, v3; 116 int o1, o2; 117 118 o1 = sh_cmt_read(p, CMCSR) & p->overflow_bit; 119 120 /* Make sure the timer value is stable. Stolen from acpi_pm.c */ 121 do { 122 o2 = o1; 123 v1 = sh_cmt_read(p, CMCNT); 124 v2 = sh_cmt_read(p, CMCNT); 125 v3 = sh_cmt_read(p, CMCNT); 126 o1 = sh_cmt_read(p, CMCSR) & p->overflow_bit; 127 } while (unlikely((o1 != o2) || (v1 > v2 && v1 < v3) 128 || (v2 > v3 && v2 < v1) || (v3 > v1 && v3 < v2))); 129 130 *has_wrapped = o1; 131 return v2; 132 } 133 134 135 static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start) 136 { 137 struct sh_timer_config *cfg = p->pdev->dev.platform_data; 138 unsigned long flags, value; 139 140 /* start stop register shared by multiple timer channels */ 141 spin_lock_irqsave(&sh_cmt_lock, flags); 142 value = sh_cmt_read(p, CMSTR); 143 144 if (start) 145 value |= 1 << cfg->timer_bit; 146 else 147 value &= ~(1 << cfg->timer_bit); 148 149 sh_cmt_write(p, CMSTR, value); 150 spin_unlock_irqrestore(&sh_cmt_lock, flags); 151 } 152 153 static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate) 154 { 155 int k, ret; 156 157 /* enable clock */ 158 ret = clk_enable(p->clk); 159 if (ret) { 160 dev_err(&p->pdev->dev, "cannot enable clock\n"); 161 goto err0; 162 } 163 164 /* make sure channel is disabled */ 165 sh_cmt_start_stop_ch(p, 0); 166 167 /* configure channel, periodic mode and maximum timeout */ 168 if (p->width == 16) { 169 *rate = clk_get_rate(p->clk) / 512; 170 sh_cmt_write(p, CMCSR, 0x43); 171 } else { 172 *rate = clk_get_rate(p->clk) / 8; 173 sh_cmt_write(p, CMCSR, 0x01a4); 174 } 175 176 sh_cmt_write(p, CMCOR, 0xffffffff); 177 sh_cmt_write(p, CMCNT, 0); 178 179 /* 180 * According to the sh73a0 user's manual, as CMCNT can be operated 181 * only by the RCLK (Pseudo 32 KHz), there's one restriction on 182 * modifying CMCNT register; two RCLK cycles are necessary before 183 * this register is either read or any modification of the value 184 * it holds is reflected in the LSI's actual operation. 185 * 186 * While at it, we're supposed to clear out the CMCNT as of this 187 * moment, so make sure it's processed properly here. This will 188 * take RCLKx2 at maximum. 189 */ 190 for (k = 0; k < 100; k++) { 191 if (!sh_cmt_read(p, CMCNT)) 192 break; 193 udelay(1); 194 } 195 196 if (sh_cmt_read(p, CMCNT)) { 197 dev_err(&p->pdev->dev, "cannot clear CMCNT\n"); 198 ret = -ETIMEDOUT; 199 goto err1; 200 } 201 202 /* enable channel */ 203 sh_cmt_start_stop_ch(p, 1); 204 return 0; 205 err1: 206 /* stop clock */ 207 clk_disable(p->clk); 208 209 err0: 210 return ret; 211 } 212 213 static void sh_cmt_disable(struct sh_cmt_priv *p) 214 { 215 /* disable channel */ 216 sh_cmt_start_stop_ch(p, 0); 217 218 /* disable interrupts in CMT block */ 219 sh_cmt_write(p, CMCSR, 0); 220 221 /* stop clock */ 222 clk_disable(p->clk); 223 } 224 225 /* private flags */ 226 #define FLAG_CLOCKEVENT (1 << 0) 227 #define FLAG_CLOCKSOURCE (1 << 1) 228 #define FLAG_REPROGRAM (1 << 2) 229 #define FLAG_SKIPEVENT (1 << 3) 230 #define FLAG_IRQCONTEXT (1 << 4) 231 232 static void sh_cmt_clock_event_program_verify(struct sh_cmt_priv *p, 233 int absolute) 234 { 235 unsigned long new_match; 236 unsigned long value = p->next_match_value; 237 unsigned long delay = 0; 238 unsigned long now = 0; 239 int has_wrapped; 240 241 now = sh_cmt_get_counter(p, &has_wrapped); 242 p->flags |= FLAG_REPROGRAM; /* force reprogram */ 243 244 if (has_wrapped) { 245 /* we're competing with the interrupt handler. 246 * -> let the interrupt handler reprogram the timer. 247 * -> interrupt number two handles the event. 248 */ 249 p->flags |= FLAG_SKIPEVENT; 250 return; 251 } 252 253 if (absolute) 254 now = 0; 255 256 do { 257 /* reprogram the timer hardware, 258 * but don't save the new match value yet. 259 */ 260 new_match = now + value + delay; 261 if (new_match > p->max_match_value) 262 new_match = p->max_match_value; 263 264 sh_cmt_write(p, CMCOR, new_match); 265 266 now = sh_cmt_get_counter(p, &has_wrapped); 267 if (has_wrapped && (new_match > p->match_value)) { 268 /* we are changing to a greater match value, 269 * so this wrap must be caused by the counter 270 * matching the old value. 271 * -> first interrupt reprograms the timer. 272 * -> interrupt number two handles the event. 273 */ 274 p->flags |= FLAG_SKIPEVENT; 275 break; 276 } 277 278 if (has_wrapped) { 279 /* we are changing to a smaller match value, 280 * so the wrap must be caused by the counter 281 * matching the new value. 282 * -> save programmed match value. 283 * -> let isr handle the event. 284 */ 285 p->match_value = new_match; 286 break; 287 } 288 289 /* be safe: verify hardware settings */ 290 if (now < new_match) { 291 /* timer value is below match value, all good. 292 * this makes sure we won't miss any match events. 293 * -> save programmed match value. 294 * -> let isr handle the event. 295 */ 296 p->match_value = new_match; 297 break; 298 } 299 300 /* the counter has reached a value greater 301 * than our new match value. and since the 302 * has_wrapped flag isn't set we must have 303 * programmed a too close event. 304 * -> increase delay and retry. 305 */ 306 if (delay) 307 delay <<= 1; 308 else 309 delay = 1; 310 311 if (!delay) 312 dev_warn(&p->pdev->dev, "too long delay\n"); 313 314 } while (delay); 315 } 316 317 static void __sh_cmt_set_next(struct sh_cmt_priv *p, unsigned long delta) 318 { 319 if (delta > p->max_match_value) 320 dev_warn(&p->pdev->dev, "delta out of range\n"); 321 322 p->next_match_value = delta; 323 sh_cmt_clock_event_program_verify(p, 0); 324 } 325 326 static void sh_cmt_set_next(struct sh_cmt_priv *p, unsigned long delta) 327 { 328 unsigned long flags; 329 330 spin_lock_irqsave(&p->lock, flags); 331 __sh_cmt_set_next(p, delta); 332 spin_unlock_irqrestore(&p->lock, flags); 333 } 334 335 static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id) 336 { 337 struct sh_cmt_priv *p = dev_id; 338 339 /* clear flags */ 340 sh_cmt_write(p, CMCSR, sh_cmt_read(p, CMCSR) & p->clear_bits); 341 342 /* update clock source counter to begin with if enabled 343 * the wrap flag should be cleared by the timer specific 344 * isr before we end up here. 345 */ 346 if (p->flags & FLAG_CLOCKSOURCE) 347 p->total_cycles += p->match_value + 1; 348 349 if (!(p->flags & FLAG_REPROGRAM)) 350 p->next_match_value = p->max_match_value; 351 352 p->flags |= FLAG_IRQCONTEXT; 353 354 if (p->flags & FLAG_CLOCKEVENT) { 355 if (!(p->flags & FLAG_SKIPEVENT)) { 356 if (p->ced.mode == CLOCK_EVT_MODE_ONESHOT) { 357 p->next_match_value = p->max_match_value; 358 p->flags |= FLAG_REPROGRAM; 359 } 360 361 p->ced.event_handler(&p->ced); 362 } 363 } 364 365 p->flags &= ~FLAG_SKIPEVENT; 366 367 if (p->flags & FLAG_REPROGRAM) { 368 p->flags &= ~FLAG_REPROGRAM; 369 sh_cmt_clock_event_program_verify(p, 1); 370 371 if (p->flags & FLAG_CLOCKEVENT) 372 if ((p->ced.mode == CLOCK_EVT_MODE_SHUTDOWN) 373 || (p->match_value == p->next_match_value)) 374 p->flags &= ~FLAG_REPROGRAM; 375 } 376 377 p->flags &= ~FLAG_IRQCONTEXT; 378 379 return IRQ_HANDLED; 380 } 381 382 static int sh_cmt_start(struct sh_cmt_priv *p, unsigned long flag) 383 { 384 int ret = 0; 385 unsigned long flags; 386 387 spin_lock_irqsave(&p->lock, flags); 388 389 if (!(p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) 390 ret = sh_cmt_enable(p, &p->rate); 391 392 if (ret) 393 goto out; 394 p->flags |= flag; 395 396 /* setup timeout if no clockevent */ 397 if ((flag == FLAG_CLOCKSOURCE) && (!(p->flags & FLAG_CLOCKEVENT))) 398 __sh_cmt_set_next(p, p->max_match_value); 399 out: 400 spin_unlock_irqrestore(&p->lock, flags); 401 402 return ret; 403 } 404 405 static void sh_cmt_stop(struct sh_cmt_priv *p, unsigned long flag) 406 { 407 unsigned long flags; 408 unsigned long f; 409 410 spin_lock_irqsave(&p->lock, flags); 411 412 f = p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE); 413 p->flags &= ~flag; 414 415 if (f && !(p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) 416 sh_cmt_disable(p); 417 418 /* adjust the timeout to maximum if only clocksource left */ 419 if ((flag == FLAG_CLOCKEVENT) && (p->flags & FLAG_CLOCKSOURCE)) 420 __sh_cmt_set_next(p, p->max_match_value); 421 422 spin_unlock_irqrestore(&p->lock, flags); 423 } 424 425 static struct sh_cmt_priv *cs_to_sh_cmt(struct clocksource *cs) 426 { 427 return container_of(cs, struct sh_cmt_priv, cs); 428 } 429 430 static cycle_t sh_cmt_clocksource_read(struct clocksource *cs) 431 { 432 struct sh_cmt_priv *p = cs_to_sh_cmt(cs); 433 unsigned long flags, raw; 434 unsigned long value; 435 int has_wrapped; 436 437 spin_lock_irqsave(&p->lock, flags); 438 value = p->total_cycles; 439 raw = sh_cmt_get_counter(p, &has_wrapped); 440 441 if (unlikely(has_wrapped)) 442 raw += p->match_value + 1; 443 spin_unlock_irqrestore(&p->lock, flags); 444 445 return value + raw; 446 } 447 448 static int sh_cmt_clocksource_enable(struct clocksource *cs) 449 { 450 int ret; 451 struct sh_cmt_priv *p = cs_to_sh_cmt(cs); 452 453 p->total_cycles = 0; 454 455 ret = sh_cmt_start(p, FLAG_CLOCKSOURCE); 456 if (!ret) 457 __clocksource_updatefreq_hz(cs, p->rate); 458 return ret; 459 } 460 461 static void sh_cmt_clocksource_disable(struct clocksource *cs) 462 { 463 sh_cmt_stop(cs_to_sh_cmt(cs), FLAG_CLOCKSOURCE); 464 } 465 466 static void sh_cmt_clocksource_resume(struct clocksource *cs) 467 { 468 sh_cmt_start(cs_to_sh_cmt(cs), FLAG_CLOCKSOURCE); 469 } 470 471 static int sh_cmt_register_clocksource(struct sh_cmt_priv *p, 472 char *name, unsigned long rating) 473 { 474 struct clocksource *cs = &p->cs; 475 476 cs->name = name; 477 cs->rating = rating; 478 cs->read = sh_cmt_clocksource_read; 479 cs->enable = sh_cmt_clocksource_enable; 480 cs->disable = sh_cmt_clocksource_disable; 481 cs->suspend = sh_cmt_clocksource_disable; 482 cs->resume = sh_cmt_clocksource_resume; 483 cs->mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8); 484 cs->flags = CLOCK_SOURCE_IS_CONTINUOUS; 485 486 dev_info(&p->pdev->dev, "used as clock source\n"); 487 488 /* Register with dummy 1 Hz value, gets updated in ->enable() */ 489 clocksource_register_hz(cs, 1); 490 return 0; 491 } 492 493 static struct sh_cmt_priv *ced_to_sh_cmt(struct clock_event_device *ced) 494 { 495 return container_of(ced, struct sh_cmt_priv, ced); 496 } 497 498 static void sh_cmt_clock_event_start(struct sh_cmt_priv *p, int periodic) 499 { 500 struct clock_event_device *ced = &p->ced; 501 502 sh_cmt_start(p, FLAG_CLOCKEVENT); 503 504 /* TODO: calculate good shift from rate and counter bit width */ 505 506 ced->shift = 32; 507 ced->mult = div_sc(p->rate, NSEC_PER_SEC, ced->shift); 508 ced->max_delta_ns = clockevent_delta2ns(p->max_match_value, ced); 509 ced->min_delta_ns = clockevent_delta2ns(0x1f, ced); 510 511 if (periodic) 512 sh_cmt_set_next(p, ((p->rate + HZ/2) / HZ) - 1); 513 else 514 sh_cmt_set_next(p, p->max_match_value); 515 } 516 517 static void sh_cmt_clock_event_mode(enum clock_event_mode mode, 518 struct clock_event_device *ced) 519 { 520 struct sh_cmt_priv *p = ced_to_sh_cmt(ced); 521 522 /* deal with old setting first */ 523 switch (ced->mode) { 524 case CLOCK_EVT_MODE_PERIODIC: 525 case CLOCK_EVT_MODE_ONESHOT: 526 sh_cmt_stop(p, FLAG_CLOCKEVENT); 527 break; 528 default: 529 break; 530 } 531 532 switch (mode) { 533 case CLOCK_EVT_MODE_PERIODIC: 534 dev_info(&p->pdev->dev, "used for periodic clock events\n"); 535 sh_cmt_clock_event_start(p, 1); 536 break; 537 case CLOCK_EVT_MODE_ONESHOT: 538 dev_info(&p->pdev->dev, "used for oneshot clock events\n"); 539 sh_cmt_clock_event_start(p, 0); 540 break; 541 case CLOCK_EVT_MODE_SHUTDOWN: 542 case CLOCK_EVT_MODE_UNUSED: 543 sh_cmt_stop(p, FLAG_CLOCKEVENT); 544 break; 545 default: 546 break; 547 } 548 } 549 550 static int sh_cmt_clock_event_next(unsigned long delta, 551 struct clock_event_device *ced) 552 { 553 struct sh_cmt_priv *p = ced_to_sh_cmt(ced); 554 555 BUG_ON(ced->mode != CLOCK_EVT_MODE_ONESHOT); 556 if (likely(p->flags & FLAG_IRQCONTEXT)) 557 p->next_match_value = delta - 1; 558 else 559 sh_cmt_set_next(p, delta - 1); 560 561 return 0; 562 } 563 564 static void sh_cmt_register_clockevent(struct sh_cmt_priv *p, 565 char *name, unsigned long rating) 566 { 567 struct clock_event_device *ced = &p->ced; 568 569 memset(ced, 0, sizeof(*ced)); 570 571 ced->name = name; 572 ced->features = CLOCK_EVT_FEAT_PERIODIC; 573 ced->features |= CLOCK_EVT_FEAT_ONESHOT; 574 ced->rating = rating; 575 ced->cpumask = cpumask_of(0); 576 ced->set_next_event = sh_cmt_clock_event_next; 577 ced->set_mode = sh_cmt_clock_event_mode; 578 579 dev_info(&p->pdev->dev, "used for clock events\n"); 580 clockevents_register_device(ced); 581 } 582 583 static int sh_cmt_register(struct sh_cmt_priv *p, char *name, 584 unsigned long clockevent_rating, 585 unsigned long clocksource_rating) 586 { 587 if (p->width == (sizeof(p->max_match_value) * 8)) 588 p->max_match_value = ~0; 589 else 590 p->max_match_value = (1 << p->width) - 1; 591 592 p->match_value = p->max_match_value; 593 spin_lock_init(&p->lock); 594 595 if (clockevent_rating) 596 sh_cmt_register_clockevent(p, name, clockevent_rating); 597 598 if (clocksource_rating) 599 sh_cmt_register_clocksource(p, name, clocksource_rating); 600 601 return 0; 602 } 603 604 static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev) 605 { 606 struct sh_timer_config *cfg = pdev->dev.platform_data; 607 struct resource *res; 608 int irq, ret; 609 ret = -ENXIO; 610 611 memset(p, 0, sizeof(*p)); 612 p->pdev = pdev; 613 614 if (!cfg) { 615 dev_err(&p->pdev->dev, "missing platform data\n"); 616 goto err0; 617 } 618 619 platform_set_drvdata(pdev, p); 620 621 res = platform_get_resource(p->pdev, IORESOURCE_MEM, 0); 622 if (!res) { 623 dev_err(&p->pdev->dev, "failed to get I/O memory\n"); 624 goto err0; 625 } 626 627 irq = platform_get_irq(p->pdev, 0); 628 if (irq < 0) { 629 dev_err(&p->pdev->dev, "failed to get irq\n"); 630 goto err0; 631 } 632 633 /* map memory, let mapbase point to our channel */ 634 p->mapbase = ioremap_nocache(res->start, resource_size(res)); 635 if (p->mapbase == NULL) { 636 dev_err(&p->pdev->dev, "failed to remap I/O memory\n"); 637 goto err0; 638 } 639 640 /* request irq using setup_irq() (too early for request_irq()) */ 641 p->irqaction.name = dev_name(&p->pdev->dev); 642 p->irqaction.handler = sh_cmt_interrupt; 643 p->irqaction.dev_id = p; 644 p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | \ 645 IRQF_IRQPOLL | IRQF_NOBALANCING; 646 647 /* get hold of clock */ 648 p->clk = clk_get(&p->pdev->dev, "cmt_fck"); 649 if (IS_ERR(p->clk)) { 650 dev_err(&p->pdev->dev, "cannot get clock\n"); 651 ret = PTR_ERR(p->clk); 652 goto err1; 653 } 654 655 if (resource_size(res) == 6) { 656 p->width = 16; 657 p->overflow_bit = 0x80; 658 p->clear_bits = ~0x80; 659 } else { 660 p->width = 32; 661 p->overflow_bit = 0x8000; 662 p->clear_bits = ~0xc000; 663 } 664 665 ret = sh_cmt_register(p, (char *)dev_name(&p->pdev->dev), 666 cfg->clockevent_rating, 667 cfg->clocksource_rating); 668 if (ret) { 669 dev_err(&p->pdev->dev, "registration failed\n"); 670 goto err1; 671 } 672 673 ret = setup_irq(irq, &p->irqaction); 674 if (ret) { 675 dev_err(&p->pdev->dev, "failed to request irq %d\n", irq); 676 goto err1; 677 } 678 679 return 0; 680 681 err1: 682 iounmap(p->mapbase); 683 err0: 684 return ret; 685 } 686 687 static int __devinit sh_cmt_probe(struct platform_device *pdev) 688 { 689 struct sh_cmt_priv *p = platform_get_drvdata(pdev); 690 int ret; 691 692 if (p) { 693 dev_info(&pdev->dev, "kept as earlytimer\n"); 694 return 0; 695 } 696 697 p = kmalloc(sizeof(*p), GFP_KERNEL); 698 if (p == NULL) { 699 dev_err(&pdev->dev, "failed to allocate driver data\n"); 700 return -ENOMEM; 701 } 702 703 ret = sh_cmt_setup(p, pdev); 704 if (ret) { 705 kfree(p); 706 platform_set_drvdata(pdev, NULL); 707 } 708 return ret; 709 } 710 711 static int __devexit sh_cmt_remove(struct platform_device *pdev) 712 { 713 return -EBUSY; /* cannot unregister clockevent and clocksource */ 714 } 715 716 static struct platform_driver sh_cmt_device_driver = { 717 .probe = sh_cmt_probe, 718 .remove = __devexit_p(sh_cmt_remove), 719 .driver = { 720 .name = "sh_cmt", 721 } 722 }; 723 724 static int __init sh_cmt_init(void) 725 { 726 return platform_driver_register(&sh_cmt_device_driver); 727 } 728 729 static void __exit sh_cmt_exit(void) 730 { 731 platform_driver_unregister(&sh_cmt_device_driver); 732 } 733 734 early_platform_init("earlytimer", &sh_cmt_device_driver); 735 module_init(sh_cmt_init); 736 module_exit(sh_cmt_exit); 737 738 MODULE_AUTHOR("Magnus Damm"); 739 MODULE_DESCRIPTION("SuperH CMT Timer Driver"); 740 MODULE_LICENSE("GPL v2"); 741