1 /** 2 * Copyright (C) 2008, Creative Technology Ltd. All Rights Reserved. 3 * 4 * This source file is released under GPL v2 license (no other versions). 5 * See the COPYING file included in the main directory of this source 6 * distribution for the license terms and conditions. 7 * 8 * @File ctsrc.c 9 * 10 * @Brief 11 * This file contains the implementation of the Sample Rate Convertor 12 * resource management object. 13 * 14 * @Author Liu Chun 15 * @Date May 13 2008 16 * 17 */ 18 19 #include "ctsrc.h" 20 #include "cthardware.h" 21 #include <linux/slab.h> 22 23 #define SRC_RESOURCE_NUM 256 24 #define SRCIMP_RESOURCE_NUM 256 25 26 static unsigned int conj_mask; 27 28 static int src_default_config_memrd(struct src *src); 29 static int src_default_config_memwr(struct src *src); 30 static int src_default_config_arcrw(struct src *src); 31 32 static int (*src_default_config[3])(struct src *) = { 33 [MEMRD] = src_default_config_memrd, 34 [MEMWR] = src_default_config_memwr, 35 [ARCRW] = src_default_config_arcrw 36 }; 37 38 static int src_set_state(struct src *src, unsigned int state) 39 { 40 struct hw *hw; 41 42 hw = src->rsc.hw; 43 hw->src_set_state(src->rsc.ctrl_blk, state); 44 45 return 0; 46 } 47 48 static int src_set_bm(struct src *src, unsigned int bm) 49 { 50 struct hw *hw; 51 52 hw = src->rsc.hw; 53 hw->src_set_bm(src->rsc.ctrl_blk, bm); 54 55 return 0; 56 } 57 58 static int src_set_sf(struct src *src, unsigned int sf) 59 { 60 struct hw *hw; 61 62 hw = src->rsc.hw; 63 hw->src_set_sf(src->rsc.ctrl_blk, sf); 64 65 return 0; 66 } 67 68 static int src_set_pm(struct src *src, unsigned int pm) 69 { 70 struct hw *hw; 71 72 hw = src->rsc.hw; 73 hw->src_set_pm(src->rsc.ctrl_blk, pm); 74 75 return 0; 76 } 77 78 static int src_set_rom(struct src *src, unsigned int rom) 79 { 80 struct hw *hw; 81 82 hw = src->rsc.hw; 83 hw->src_set_rom(src->rsc.ctrl_blk, rom); 84 85 return 0; 86 } 87 88 static int src_set_vo(struct src *src, unsigned int vo) 89 { 90 struct hw *hw; 91 92 hw = src->rsc.hw; 93 hw->src_set_vo(src->rsc.ctrl_blk, vo); 94 95 return 0; 96 } 97 98 static int src_set_st(struct src *src, unsigned int st) 99 { 100 struct hw *hw; 101 102 hw = src->rsc.hw; 103 hw->src_set_st(src->rsc.ctrl_blk, st); 104 105 return 0; 106 } 107 108 static int src_set_bp(struct src *src, unsigned int bp) 109 { 110 struct hw *hw; 111 112 hw = src->rsc.hw; 113 hw->src_set_bp(src->rsc.ctrl_blk, bp); 114 115 return 0; 116 } 117 118 static int src_set_cisz(struct src *src, unsigned int cisz) 119 { 120 struct hw *hw; 121 122 hw = src->rsc.hw; 123 hw->src_set_cisz(src->rsc.ctrl_blk, cisz); 124 125 return 0; 126 } 127 128 static int src_set_ca(struct src *src, unsigned int ca) 129 { 130 struct hw *hw; 131 132 hw = src->rsc.hw; 133 hw->src_set_ca(src->rsc.ctrl_blk, ca); 134 135 return 0; 136 } 137 138 static int src_set_sa(struct src *src, unsigned int sa) 139 { 140 struct hw *hw; 141 142 hw = src->rsc.hw; 143 hw->src_set_sa(src->rsc.ctrl_blk, sa); 144 145 return 0; 146 } 147 148 static int src_set_la(struct src *src, unsigned int la) 149 { 150 struct hw *hw; 151 152 hw = src->rsc.hw; 153 hw->src_set_la(src->rsc.ctrl_blk, la); 154 155 return 0; 156 } 157 158 static int src_set_pitch(struct src *src, unsigned int pitch) 159 { 160 struct hw *hw; 161 162 hw = src->rsc.hw; 163 hw->src_set_pitch(src->rsc.ctrl_blk, pitch); 164 165 return 0; 166 } 167 168 static int src_set_clear_zbufs(struct src *src) 169 { 170 struct hw *hw; 171 172 hw = src->rsc.hw; 173 hw->src_set_clear_zbufs(src->rsc.ctrl_blk, 1); 174 175 return 0; 176 } 177 178 static int src_commit_write(struct src *src) 179 { 180 struct hw *hw; 181 int i; 182 unsigned int dirty = 0; 183 184 hw = src->rsc.hw; 185 src->rsc.ops->master(&src->rsc); 186 if (src->rsc.msr > 1) { 187 /* Save dirty flags for conjugate resource programming */ 188 dirty = hw->src_get_dirty(src->rsc.ctrl_blk) & conj_mask; 189 } 190 hw->src_commit_write(hw, src->rsc.ops->index(&src->rsc), 191 src->rsc.ctrl_blk); 192 193 /* Program conjugate parameter mixer resources */ 194 if (MEMWR == src->mode) 195 return 0; 196 197 for (i = 1; i < src->rsc.msr; i++) { 198 src->rsc.ops->next_conj(&src->rsc); 199 hw->src_set_dirty(src->rsc.ctrl_blk, dirty); 200 hw->src_commit_write(hw, src->rsc.ops->index(&src->rsc), 201 src->rsc.ctrl_blk); 202 } 203 src->rsc.ops->master(&src->rsc); 204 205 return 0; 206 } 207 208 static int src_get_ca(struct src *src) 209 { 210 struct hw *hw; 211 212 hw = src->rsc.hw; 213 return hw->src_get_ca(hw, src->rsc.ops->index(&src->rsc), 214 src->rsc.ctrl_blk); 215 } 216 217 static int src_init(struct src *src) 218 { 219 src_default_config[src->mode](src); 220 221 return 0; 222 } 223 224 static struct src *src_next_interleave(struct src *src) 225 { 226 return src->intlv; 227 } 228 229 static int src_default_config_memrd(struct src *src) 230 { 231 struct hw *hw = src->rsc.hw; 232 unsigned int rsr, msr; 233 234 hw->src_set_state(src->rsc.ctrl_blk, SRC_STATE_OFF); 235 hw->src_set_bm(src->rsc.ctrl_blk, 1); 236 for (rsr = 0, msr = src->rsc.msr; msr > 1; msr >>= 1) 237 rsr++; 238 239 hw->src_set_rsr(src->rsc.ctrl_blk, rsr); 240 hw->src_set_sf(src->rsc.ctrl_blk, SRC_SF_S16); 241 hw->src_set_wr(src->rsc.ctrl_blk, 0); 242 hw->src_set_pm(src->rsc.ctrl_blk, 0); 243 hw->src_set_rom(src->rsc.ctrl_blk, 0); 244 hw->src_set_vo(src->rsc.ctrl_blk, 0); 245 hw->src_set_st(src->rsc.ctrl_blk, 0); 246 hw->src_set_ilsz(src->rsc.ctrl_blk, src->multi - 1); 247 hw->src_set_cisz(src->rsc.ctrl_blk, 0x80); 248 hw->src_set_sa(src->rsc.ctrl_blk, 0x0); 249 hw->src_set_la(src->rsc.ctrl_blk, 0x1000); 250 hw->src_set_ca(src->rsc.ctrl_blk, 0x80); 251 hw->src_set_pitch(src->rsc.ctrl_blk, 0x1000000); 252 hw->src_set_clear_zbufs(src->rsc.ctrl_blk, 1); 253 254 src->rsc.ops->master(&src->rsc); 255 hw->src_commit_write(hw, src->rsc.ops->index(&src->rsc), 256 src->rsc.ctrl_blk); 257 258 for (msr = 1; msr < src->rsc.msr; msr++) { 259 src->rsc.ops->next_conj(&src->rsc); 260 hw->src_set_pitch(src->rsc.ctrl_blk, 0x1000000); 261 hw->src_commit_write(hw, src->rsc.ops->index(&src->rsc), 262 src->rsc.ctrl_blk); 263 } 264 src->rsc.ops->master(&src->rsc); 265 266 return 0; 267 } 268 269 static int src_default_config_memwr(struct src *src) 270 { 271 struct hw *hw = src->rsc.hw; 272 273 hw->src_set_state(src->rsc.ctrl_blk, SRC_STATE_OFF); 274 hw->src_set_bm(src->rsc.ctrl_blk, 1); 275 hw->src_set_rsr(src->rsc.ctrl_blk, 0); 276 hw->src_set_sf(src->rsc.ctrl_blk, SRC_SF_S16); 277 hw->src_set_wr(src->rsc.ctrl_blk, 1); 278 hw->src_set_pm(src->rsc.ctrl_blk, 0); 279 hw->src_set_rom(src->rsc.ctrl_blk, 0); 280 hw->src_set_vo(src->rsc.ctrl_blk, 0); 281 hw->src_set_st(src->rsc.ctrl_blk, 0); 282 hw->src_set_ilsz(src->rsc.ctrl_blk, 0); 283 hw->src_set_cisz(src->rsc.ctrl_blk, 0x80); 284 hw->src_set_sa(src->rsc.ctrl_blk, 0x0); 285 hw->src_set_la(src->rsc.ctrl_blk, 0x1000); 286 hw->src_set_ca(src->rsc.ctrl_blk, 0x80); 287 hw->src_set_pitch(src->rsc.ctrl_blk, 0x1000000); 288 hw->src_set_clear_zbufs(src->rsc.ctrl_blk, 1); 289 290 src->rsc.ops->master(&src->rsc); 291 hw->src_commit_write(hw, src->rsc.ops->index(&src->rsc), 292 src->rsc.ctrl_blk); 293 294 return 0; 295 } 296 297 static int src_default_config_arcrw(struct src *src) 298 { 299 struct hw *hw = src->rsc.hw; 300 unsigned int rsr, msr; 301 unsigned int dirty; 302 303 hw->src_set_state(src->rsc.ctrl_blk, SRC_STATE_OFF); 304 hw->src_set_bm(src->rsc.ctrl_blk, 0); 305 for (rsr = 0, msr = src->rsc.msr; msr > 1; msr >>= 1) 306 rsr++; 307 308 hw->src_set_rsr(src->rsc.ctrl_blk, rsr); 309 hw->src_set_sf(src->rsc.ctrl_blk, SRC_SF_F32); 310 hw->src_set_wr(src->rsc.ctrl_blk, 0); 311 hw->src_set_pm(src->rsc.ctrl_blk, 0); 312 hw->src_set_rom(src->rsc.ctrl_blk, 0); 313 hw->src_set_vo(src->rsc.ctrl_blk, 0); 314 hw->src_set_st(src->rsc.ctrl_blk, 0); 315 hw->src_set_ilsz(src->rsc.ctrl_blk, 0); 316 hw->src_set_cisz(src->rsc.ctrl_blk, 0x80); 317 hw->src_set_sa(src->rsc.ctrl_blk, 0x0); 318 /*hw->src_set_sa(src->rsc.ctrl_blk, 0x100);*/ 319 hw->src_set_la(src->rsc.ctrl_blk, 0x1000); 320 /*hw->src_set_la(src->rsc.ctrl_blk, 0x03ffffe0);*/ 321 hw->src_set_ca(src->rsc.ctrl_blk, 0x80); 322 hw->src_set_pitch(src->rsc.ctrl_blk, 0x1000000); 323 hw->src_set_clear_zbufs(src->rsc.ctrl_blk, 1); 324 325 dirty = hw->src_get_dirty(src->rsc.ctrl_blk); 326 src->rsc.ops->master(&src->rsc); 327 for (msr = 0; msr < src->rsc.msr; msr++) { 328 hw->src_set_dirty(src->rsc.ctrl_blk, dirty); 329 hw->src_commit_write(hw, src->rsc.ops->index(&src->rsc), 330 src->rsc.ctrl_blk); 331 src->rsc.ops->next_conj(&src->rsc); 332 } 333 src->rsc.ops->master(&src->rsc); 334 335 return 0; 336 } 337 338 static const struct src_rsc_ops src_rsc_ops = { 339 .set_state = src_set_state, 340 .set_bm = src_set_bm, 341 .set_sf = src_set_sf, 342 .set_pm = src_set_pm, 343 .set_rom = src_set_rom, 344 .set_vo = src_set_vo, 345 .set_st = src_set_st, 346 .set_bp = src_set_bp, 347 .set_cisz = src_set_cisz, 348 .set_ca = src_set_ca, 349 .set_sa = src_set_sa, 350 .set_la = src_set_la, 351 .set_pitch = src_set_pitch, 352 .set_clr_zbufs = src_set_clear_zbufs, 353 .commit_write = src_commit_write, 354 .get_ca = src_get_ca, 355 .init = src_init, 356 .next_interleave = src_next_interleave, 357 }; 358 359 static int 360 src_rsc_init(struct src *src, u32 idx, 361 const struct src_desc *desc, struct src_mgr *mgr) 362 { 363 int err; 364 int i, n; 365 struct src *p; 366 367 n = (MEMRD == desc->mode) ? desc->multi : 1; 368 for (i = 0, p = src; i < n; i++, p++) { 369 err = rsc_init(&p->rsc, idx + i, SRC, desc->msr, mgr->mgr.hw); 370 if (err) 371 goto error1; 372 373 /* Initialize src specific rsc operations */ 374 p->ops = &src_rsc_ops; 375 p->multi = (0 == i) ? desc->multi : 1; 376 p->mode = desc->mode; 377 src_default_config[desc->mode](p); 378 mgr->src_enable(mgr, p); 379 p->intlv = p + 1; 380 } 381 (--p)->intlv = NULL; /* Set @intlv of the last SRC to NULL */ 382 383 mgr->commit_write(mgr); 384 385 return 0; 386 387 error1: 388 for (i--, p--; i >= 0; i--, p--) { 389 mgr->src_disable(mgr, p); 390 rsc_uninit(&p->rsc); 391 } 392 mgr->commit_write(mgr); 393 return err; 394 } 395 396 static int src_rsc_uninit(struct src *src, struct src_mgr *mgr) 397 { 398 int i, n; 399 struct src *p; 400 401 n = (MEMRD == src->mode) ? src->multi : 1; 402 for (i = 0, p = src; i < n; i++, p++) { 403 mgr->src_disable(mgr, p); 404 rsc_uninit(&p->rsc); 405 p->multi = 0; 406 p->ops = NULL; 407 p->mode = NUM_SRCMODES; 408 p->intlv = NULL; 409 } 410 mgr->commit_write(mgr); 411 412 return 0; 413 } 414 415 static int 416 get_src_rsc(struct src_mgr *mgr, const struct src_desc *desc, struct src **rsrc) 417 { 418 unsigned int idx = SRC_RESOURCE_NUM; 419 int err; 420 struct src *src; 421 unsigned long flags; 422 423 *rsrc = NULL; 424 425 /* Check whether there are sufficient src resources to meet request. */ 426 spin_lock_irqsave(&mgr->mgr_lock, flags); 427 if (MEMRD == desc->mode) 428 err = mgr_get_resource(&mgr->mgr, desc->multi, &idx); 429 else 430 err = mgr_get_resource(&mgr->mgr, 1, &idx); 431 432 spin_unlock_irqrestore(&mgr->mgr_lock, flags); 433 if (err) { 434 dev_err(mgr->card->dev, 435 "Can't meet SRC resource request!\n"); 436 return err; 437 } 438 439 /* Allocate mem for master src resource */ 440 if (MEMRD == desc->mode) 441 src = kcalloc(desc->multi, sizeof(*src), GFP_KERNEL); 442 else 443 src = kzalloc(sizeof(*src), GFP_KERNEL); 444 445 if (!src) { 446 err = -ENOMEM; 447 goto error1; 448 } 449 450 err = src_rsc_init(src, idx, desc, mgr); 451 if (err) 452 goto error2; 453 454 *rsrc = src; 455 456 return 0; 457 458 error2: 459 kfree(src); 460 error1: 461 spin_lock_irqsave(&mgr->mgr_lock, flags); 462 if (MEMRD == desc->mode) 463 mgr_put_resource(&mgr->mgr, desc->multi, idx); 464 else 465 mgr_put_resource(&mgr->mgr, 1, idx); 466 467 spin_unlock_irqrestore(&mgr->mgr_lock, flags); 468 return err; 469 } 470 471 static int put_src_rsc(struct src_mgr *mgr, struct src *src) 472 { 473 unsigned long flags; 474 475 spin_lock_irqsave(&mgr->mgr_lock, flags); 476 src->rsc.ops->master(&src->rsc); 477 if (MEMRD == src->mode) 478 mgr_put_resource(&mgr->mgr, src->multi, 479 src->rsc.ops->index(&src->rsc)); 480 else 481 mgr_put_resource(&mgr->mgr, 1, src->rsc.ops->index(&src->rsc)); 482 483 spin_unlock_irqrestore(&mgr->mgr_lock, flags); 484 src_rsc_uninit(src, mgr); 485 kfree(src); 486 487 return 0; 488 } 489 490 static int src_enable_s(struct src_mgr *mgr, struct src *src) 491 { 492 struct hw *hw = mgr->mgr.hw; 493 int i; 494 495 src->rsc.ops->master(&src->rsc); 496 for (i = 0; i < src->rsc.msr; i++) { 497 hw->src_mgr_enbs_src(mgr->mgr.ctrl_blk, 498 src->rsc.ops->index(&src->rsc)); 499 src->rsc.ops->next_conj(&src->rsc); 500 } 501 src->rsc.ops->master(&src->rsc); 502 503 return 0; 504 } 505 506 static int src_enable(struct src_mgr *mgr, struct src *src) 507 { 508 struct hw *hw = mgr->mgr.hw; 509 int i; 510 511 src->rsc.ops->master(&src->rsc); 512 for (i = 0; i < src->rsc.msr; i++) { 513 hw->src_mgr_enb_src(mgr->mgr.ctrl_blk, 514 src->rsc.ops->index(&src->rsc)); 515 src->rsc.ops->next_conj(&src->rsc); 516 } 517 src->rsc.ops->master(&src->rsc); 518 519 return 0; 520 } 521 522 static int src_disable(struct src_mgr *mgr, struct src *src) 523 { 524 struct hw *hw = mgr->mgr.hw; 525 int i; 526 527 src->rsc.ops->master(&src->rsc); 528 for (i = 0; i < src->rsc.msr; i++) { 529 hw->src_mgr_dsb_src(mgr->mgr.ctrl_blk, 530 src->rsc.ops->index(&src->rsc)); 531 src->rsc.ops->next_conj(&src->rsc); 532 } 533 src->rsc.ops->master(&src->rsc); 534 535 return 0; 536 } 537 538 static int src_mgr_commit_write(struct src_mgr *mgr) 539 { 540 struct hw *hw = mgr->mgr.hw; 541 542 hw->src_mgr_commit_write(hw, mgr->mgr.ctrl_blk); 543 544 return 0; 545 } 546 547 int src_mgr_create(struct hw *hw, struct src_mgr **rsrc_mgr) 548 { 549 int err, i; 550 struct src_mgr *src_mgr; 551 552 *rsrc_mgr = NULL; 553 src_mgr = kzalloc(sizeof(*src_mgr), GFP_KERNEL); 554 if (!src_mgr) 555 return -ENOMEM; 556 557 err = rsc_mgr_init(&src_mgr->mgr, SRC, SRC_RESOURCE_NUM, hw); 558 if (err) 559 goto error1; 560 561 spin_lock_init(&src_mgr->mgr_lock); 562 conj_mask = hw->src_dirty_conj_mask(); 563 564 src_mgr->get_src = get_src_rsc; 565 src_mgr->put_src = put_src_rsc; 566 src_mgr->src_enable_s = src_enable_s; 567 src_mgr->src_enable = src_enable; 568 src_mgr->src_disable = src_disable; 569 src_mgr->commit_write = src_mgr_commit_write; 570 src_mgr->card = hw->card; 571 572 /* Disable all SRC resources. */ 573 for (i = 0; i < 256; i++) 574 hw->src_mgr_dsb_src(src_mgr->mgr.ctrl_blk, i); 575 576 hw->src_mgr_commit_write(hw, src_mgr->mgr.ctrl_blk); 577 578 *rsrc_mgr = src_mgr; 579 580 return 0; 581 582 error1: 583 kfree(src_mgr); 584 return err; 585 } 586 587 int src_mgr_destroy(struct src_mgr *src_mgr) 588 { 589 rsc_mgr_uninit(&src_mgr->mgr); 590 kfree(src_mgr); 591 592 return 0; 593 } 594 595 /* SRCIMP resource manager operations */ 596 597 static int srcimp_master(struct rsc *rsc) 598 { 599 rsc->conj = 0; 600 return rsc->idx = container_of(rsc, struct srcimp, rsc)->idx[0]; 601 } 602 603 static int srcimp_next_conj(struct rsc *rsc) 604 { 605 rsc->conj++; 606 return container_of(rsc, struct srcimp, rsc)->idx[rsc->conj]; 607 } 608 609 static int srcimp_index(const struct rsc *rsc) 610 { 611 return container_of(rsc, struct srcimp, rsc)->idx[rsc->conj]; 612 } 613 614 static const struct rsc_ops srcimp_basic_rsc_ops = { 615 .master = srcimp_master, 616 .next_conj = srcimp_next_conj, 617 .index = srcimp_index, 618 .output_slot = NULL, 619 }; 620 621 static int srcimp_map(struct srcimp *srcimp, struct src *src, struct rsc *input) 622 { 623 struct imapper *entry; 624 int i; 625 626 srcimp->rsc.ops->master(&srcimp->rsc); 627 src->rsc.ops->master(&src->rsc); 628 input->ops->master(input); 629 630 /* Program master and conjugate resources */ 631 for (i = 0; i < srcimp->rsc.msr; i++) { 632 entry = &srcimp->imappers[i]; 633 entry->slot = input->ops->output_slot(input); 634 entry->user = src->rsc.ops->index(&src->rsc); 635 entry->addr = srcimp->rsc.ops->index(&srcimp->rsc); 636 srcimp->mgr->imap_add(srcimp->mgr, entry); 637 srcimp->mapped |= (0x1 << i); 638 639 srcimp->rsc.ops->next_conj(&srcimp->rsc); 640 input->ops->next_conj(input); 641 } 642 643 srcimp->rsc.ops->master(&srcimp->rsc); 644 input->ops->master(input); 645 646 return 0; 647 } 648 649 static int srcimp_unmap(struct srcimp *srcimp) 650 { 651 int i; 652 653 /* Program master and conjugate resources */ 654 for (i = 0; i < srcimp->rsc.msr; i++) { 655 if (srcimp->mapped & (0x1 << i)) { 656 srcimp->mgr->imap_delete(srcimp->mgr, 657 &srcimp->imappers[i]); 658 srcimp->mapped &= ~(0x1 << i); 659 } 660 } 661 662 return 0; 663 } 664 665 static const struct srcimp_rsc_ops srcimp_ops = { 666 .map = srcimp_map, 667 .unmap = srcimp_unmap 668 }; 669 670 static int srcimp_rsc_init(struct srcimp *srcimp, 671 const struct srcimp_desc *desc, 672 struct srcimp_mgr *mgr) 673 { 674 int err; 675 676 err = rsc_init(&srcimp->rsc, srcimp->idx[0], 677 SRCIMP, desc->msr, mgr->mgr.hw); 678 if (err) 679 return err; 680 681 /* Reserve memory for imapper nodes */ 682 srcimp->imappers = kcalloc(desc->msr, sizeof(struct imapper), 683 GFP_KERNEL); 684 if (!srcimp->imappers) { 685 err = -ENOMEM; 686 goto error1; 687 } 688 689 /* Set srcimp specific operations */ 690 srcimp->rsc.ops = &srcimp_basic_rsc_ops; 691 srcimp->ops = &srcimp_ops; 692 srcimp->mgr = mgr; 693 694 srcimp->rsc.ops->master(&srcimp->rsc); 695 696 return 0; 697 698 error1: 699 rsc_uninit(&srcimp->rsc); 700 return err; 701 } 702 703 static int srcimp_rsc_uninit(struct srcimp *srcimp) 704 { 705 kfree(srcimp->imappers); 706 srcimp->imappers = NULL; 707 srcimp->ops = NULL; 708 srcimp->mgr = NULL; 709 rsc_uninit(&srcimp->rsc); 710 711 return 0; 712 } 713 714 static int get_srcimp_rsc(struct srcimp_mgr *mgr, 715 const struct srcimp_desc *desc, 716 struct srcimp **rsrcimp) 717 { 718 int err, i; 719 unsigned int idx; 720 struct srcimp *srcimp; 721 unsigned long flags; 722 723 *rsrcimp = NULL; 724 725 /* Allocate mem for SRCIMP resource */ 726 srcimp = kzalloc(sizeof(*srcimp), GFP_KERNEL); 727 if (!srcimp) 728 return -ENOMEM; 729 730 /* Check whether there are sufficient SRCIMP resources. */ 731 err = 0; 732 spin_lock_irqsave(&mgr->mgr_lock, flags); 733 for (i = 0; i < desc->msr; i++) { 734 err = mgr_get_resource(&mgr->mgr, 1, &idx); 735 if (err) 736 break; 737 738 srcimp->idx[i] = idx; 739 } 740 spin_unlock_irqrestore(&mgr->mgr_lock, flags); 741 if (err) { 742 dev_err(mgr->card->dev, 743 "Can't meet SRCIMP resource request!\n"); 744 goto error1; 745 } 746 747 err = srcimp_rsc_init(srcimp, desc, mgr); 748 if (err) 749 goto error1; 750 751 *rsrcimp = srcimp; 752 753 return 0; 754 755 error1: 756 spin_lock_irqsave(&mgr->mgr_lock, flags); 757 for (i--; i >= 0; i--) 758 mgr_put_resource(&mgr->mgr, 1, srcimp->idx[i]); 759 760 spin_unlock_irqrestore(&mgr->mgr_lock, flags); 761 kfree(srcimp); 762 return err; 763 } 764 765 static int put_srcimp_rsc(struct srcimp_mgr *mgr, struct srcimp *srcimp) 766 { 767 unsigned long flags; 768 int i; 769 770 spin_lock_irqsave(&mgr->mgr_lock, flags); 771 for (i = 0; i < srcimp->rsc.msr; i++) 772 mgr_put_resource(&mgr->mgr, 1, srcimp->idx[i]); 773 774 spin_unlock_irqrestore(&mgr->mgr_lock, flags); 775 srcimp_rsc_uninit(srcimp); 776 kfree(srcimp); 777 778 return 0; 779 } 780 781 static int srcimp_map_op(void *data, struct imapper *entry) 782 { 783 struct rsc_mgr *mgr = &((struct srcimp_mgr *)data)->mgr; 784 struct hw *hw = mgr->hw; 785 786 hw->srcimp_mgr_set_imaparc(mgr->ctrl_blk, entry->slot); 787 hw->srcimp_mgr_set_imapuser(mgr->ctrl_blk, entry->user); 788 hw->srcimp_mgr_set_imapnxt(mgr->ctrl_blk, entry->next); 789 hw->srcimp_mgr_set_imapaddr(mgr->ctrl_blk, entry->addr); 790 hw->srcimp_mgr_commit_write(mgr->hw, mgr->ctrl_blk); 791 792 return 0; 793 } 794 795 static int srcimp_imap_add(struct srcimp_mgr *mgr, struct imapper *entry) 796 { 797 unsigned long flags; 798 int err; 799 800 spin_lock_irqsave(&mgr->imap_lock, flags); 801 if ((0 == entry->addr) && (mgr->init_imap_added)) { 802 input_mapper_delete(&mgr->imappers, 803 mgr->init_imap, srcimp_map_op, mgr); 804 mgr->init_imap_added = 0; 805 } 806 err = input_mapper_add(&mgr->imappers, entry, srcimp_map_op, mgr); 807 spin_unlock_irqrestore(&mgr->imap_lock, flags); 808 809 return err; 810 } 811 812 static int srcimp_imap_delete(struct srcimp_mgr *mgr, struct imapper *entry) 813 { 814 unsigned long flags; 815 int err; 816 817 spin_lock_irqsave(&mgr->imap_lock, flags); 818 err = input_mapper_delete(&mgr->imappers, entry, srcimp_map_op, mgr); 819 if (list_empty(&mgr->imappers)) { 820 input_mapper_add(&mgr->imappers, mgr->init_imap, 821 srcimp_map_op, mgr); 822 mgr->init_imap_added = 1; 823 } 824 spin_unlock_irqrestore(&mgr->imap_lock, flags); 825 826 return err; 827 } 828 829 int srcimp_mgr_create(struct hw *hw, struct srcimp_mgr **rsrcimp_mgr) 830 { 831 int err; 832 struct srcimp_mgr *srcimp_mgr; 833 struct imapper *entry; 834 835 *rsrcimp_mgr = NULL; 836 srcimp_mgr = kzalloc(sizeof(*srcimp_mgr), GFP_KERNEL); 837 if (!srcimp_mgr) 838 return -ENOMEM; 839 840 err = rsc_mgr_init(&srcimp_mgr->mgr, SRCIMP, SRCIMP_RESOURCE_NUM, hw); 841 if (err) 842 goto error1; 843 844 spin_lock_init(&srcimp_mgr->mgr_lock); 845 spin_lock_init(&srcimp_mgr->imap_lock); 846 INIT_LIST_HEAD(&srcimp_mgr->imappers); 847 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 848 if (!entry) { 849 err = -ENOMEM; 850 goto error2; 851 } 852 entry->slot = entry->addr = entry->next = entry->user = 0; 853 list_add(&entry->list, &srcimp_mgr->imappers); 854 srcimp_mgr->init_imap = entry; 855 srcimp_mgr->init_imap_added = 1; 856 857 srcimp_mgr->get_srcimp = get_srcimp_rsc; 858 srcimp_mgr->put_srcimp = put_srcimp_rsc; 859 srcimp_mgr->imap_add = srcimp_imap_add; 860 srcimp_mgr->imap_delete = srcimp_imap_delete; 861 srcimp_mgr->card = hw->card; 862 863 *rsrcimp_mgr = srcimp_mgr; 864 865 return 0; 866 867 error2: 868 rsc_mgr_uninit(&srcimp_mgr->mgr); 869 error1: 870 kfree(srcimp_mgr); 871 return err; 872 } 873 874 int srcimp_mgr_destroy(struct srcimp_mgr *srcimp_mgr) 875 { 876 unsigned long flags; 877 878 /* free src input mapper list */ 879 spin_lock_irqsave(&srcimp_mgr->imap_lock, flags); 880 free_input_mapper_list(&srcimp_mgr->imappers); 881 spin_unlock_irqrestore(&srcimp_mgr->imap_lock, flags); 882 883 rsc_mgr_uninit(&srcimp_mgr->mgr); 884 kfree(srcimp_mgr); 885 886 return 0; 887 } 888