1 /* 2 * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family 3 * of PCI-SCSI IO processors. 4 * 5 * Copyright (C) 1999-2001 Gerard Roudier <groudier@free.fr> 6 * 7 * This driver is derived from the Linux sym53c8xx driver. 8 * Copyright (C) 1998-2000 Gerard Roudier 9 * 10 * The sym53c8xx driver is derived from the ncr53c8xx driver that had been 11 * a port of the FreeBSD ncr driver to Linux-1.2.13. 12 * 13 * The original ncr driver has been written for 386bsd and FreeBSD by 14 * Wolfgang Stanglmeier <wolf@cologne.de> 15 * Stefan Esser <se@mi.Uni-Koeln.de> 16 * Copyright (C) 1994 Wolfgang Stanglmeier 17 * 18 * Other major contributions: 19 * 20 * NVRAM detection and reading. 21 * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk> 22 * 23 *----------------------------------------------------------------------------- 24 * 25 * This program is free software; you can redistribute it and/or modify 26 * it under the terms of the GNU General Public License as published by 27 * the Free Software Foundation; either version 2 of the License, or 28 * (at your option) any later version. 29 * 30 * This program is distributed in the hope that it will be useful, 31 * but WITHOUT ANY WARRANTY; without even the implied warranty of 32 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 33 * GNU General Public License for more details. 34 * 35 * You should have received a copy of the GNU General Public License 36 * along with this program; if not, write to the Free Software 37 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 38 */ 39 40 #ifndef SYM_HIPD_H 41 #define SYM_HIPD_H 42 43 /* 44 * Generic driver options. 45 * 46 * They may be defined in platform specific headers, if they 47 * are useful. 48 * 49 * SYM_OPT_HANDLE_DIR_UNKNOWN 50 * When this option is set, the SCRIPTS used by the driver 51 * are able to handle SCSI transfers with direction not 52 * supplied by user. 53 * (set for Linux-2.0.X) 54 * 55 * SYM_OPT_HANDLE_DEVICE_QUEUEING 56 * When this option is set, the driver will use a queue per 57 * device and handle QUEUE FULL status requeuing internally. 58 * 59 * SYM_OPT_LIMIT_COMMAND_REORDERING 60 * When this option is set, the driver tries to limit tagged 61 * command reordering to some reasonnable value. 62 * (set for Linux) 63 */ 64 #if 0 65 #define SYM_OPT_HANDLE_DIR_UNKNOWN 66 #define SYM_OPT_HANDLE_DEVICE_QUEUEING 67 #define SYM_OPT_LIMIT_COMMAND_REORDERING 68 #endif 69 70 /* 71 * Active debugging tags and verbosity. 72 * Both DEBUG_FLAGS and sym_verbose can be redefined 73 * by the platform specific code to something else. 74 */ 75 #define DEBUG_ALLOC (0x0001) 76 #define DEBUG_PHASE (0x0002) 77 #define DEBUG_POLL (0x0004) 78 #define DEBUG_QUEUE (0x0008) 79 #define DEBUG_RESULT (0x0010) 80 #define DEBUG_SCATTER (0x0020) 81 #define DEBUG_SCRIPT (0x0040) 82 #define DEBUG_TINY (0x0080) 83 #define DEBUG_TIMING (0x0100) 84 #define DEBUG_NEGO (0x0200) 85 #define DEBUG_TAGS (0x0400) 86 #define DEBUG_POINTER (0x0800) 87 88 #ifndef DEBUG_FLAGS 89 #define DEBUG_FLAGS (0x0000) 90 #endif 91 92 #ifndef sym_verbose 93 #define sym_verbose (np->verbose) 94 #endif 95 96 /* 97 * These ones should have been already defined. 98 */ 99 #ifndef assert 100 #define assert(expression) { \ 101 if (!(expression)) { \ 102 (void)panic( \ 103 "assertion \"%s\" failed: file \"%s\", line %d\n", \ 104 #expression, \ 105 __FILE__, __LINE__); \ 106 } \ 107 } 108 #endif 109 110 /* 111 * Number of tasks per device we want to handle. 112 */ 113 #if SYM_CONF_MAX_TAG_ORDER > 8 114 #error "more than 256 tags per logical unit not allowed." 115 #endif 116 #define SYM_CONF_MAX_TASK (1<<SYM_CONF_MAX_TAG_ORDER) 117 118 /* 119 * Donnot use more tasks that we can handle. 120 */ 121 #ifndef SYM_CONF_MAX_TAG 122 #define SYM_CONF_MAX_TAG SYM_CONF_MAX_TASK 123 #endif 124 #if SYM_CONF_MAX_TAG > SYM_CONF_MAX_TASK 125 #undef SYM_CONF_MAX_TAG 126 #define SYM_CONF_MAX_TAG SYM_CONF_MAX_TASK 127 #endif 128 129 /* 130 * This one means 'NO TAG for this job' 131 */ 132 #define NO_TAG (256) 133 134 /* 135 * Number of SCSI targets. 136 */ 137 #if SYM_CONF_MAX_TARGET > 16 138 #error "more than 16 targets not allowed." 139 #endif 140 141 /* 142 * Number of logical units per target. 143 */ 144 #if SYM_CONF_MAX_LUN > 64 145 #error "more than 64 logical units per target not allowed." 146 #endif 147 148 /* 149 * Asynchronous pre-scaler (ns). Shall be 40 for 150 * the SCSI timings to be compliant. 151 */ 152 #define SYM_CONF_MIN_ASYNC (40) 153 154 /* 155 * Shortest memory chunk is (1<<SYM_MEM_SHIFT), currently 16. 156 * Actual allocations happen as SYM_MEM_CLUSTER_SIZE sized. 157 * (1 PAGE at a time is just fine). 158 */ 159 #define SYM_MEM_SHIFT 4 160 #define SYM_MEM_CLUSTER_SIZE (1UL << SYM_MEM_CLUSTER_SHIFT) 161 #define SYM_MEM_CLUSTER_MASK (SYM_MEM_CLUSTER_SIZE-1) 162 163 /* 164 * Number of entries in the START and DONE queues. 165 * 166 * We limit to 1 PAGE in order to succeed allocation of 167 * these queues. Each entry is 8 bytes long (2 DWORDS). 168 */ 169 #ifdef SYM_CONF_MAX_START 170 #define SYM_CONF_MAX_QUEUE (SYM_CONF_MAX_START+2) 171 #else 172 #define SYM_CONF_MAX_QUEUE (7*SYM_CONF_MAX_TASK+2) 173 #define SYM_CONF_MAX_START (SYM_CONF_MAX_QUEUE-2) 174 #endif 175 176 #if SYM_CONF_MAX_QUEUE > SYM_MEM_CLUSTER_SIZE/8 177 #undef SYM_CONF_MAX_QUEUE 178 #define SYM_CONF_MAX_QUEUE (SYM_MEM_CLUSTER_SIZE/8) 179 #undef SYM_CONF_MAX_START 180 #define SYM_CONF_MAX_START (SYM_CONF_MAX_QUEUE-2) 181 #endif 182 183 /* 184 * For this one, we want a short name :-) 185 */ 186 #define MAX_QUEUE SYM_CONF_MAX_QUEUE 187 188 /* 189 * Common definitions for both bus space based and legacy IO methods. 190 */ 191 192 #define INB_OFF(np, o) ioread8(np->s.ioaddr + (o)) 193 #define INW_OFF(np, o) ioread16(np->s.ioaddr + (o)) 194 #define INL_OFF(np, o) ioread32(np->s.ioaddr + (o)) 195 196 #define OUTB_OFF(np, o, val) iowrite8((val), np->s.ioaddr + (o)) 197 #define OUTW_OFF(np, o, val) iowrite16((val), np->s.ioaddr + (o)) 198 #define OUTL_OFF(np, o, val) iowrite32((val), np->s.ioaddr + (o)) 199 200 #define INB(np, r) INB_OFF(np, offsetof(struct sym_reg, r)) 201 #define INW(np, r) INW_OFF(np, offsetof(struct sym_reg, r)) 202 #define INL(np, r) INL_OFF(np, offsetof(struct sym_reg, r)) 203 204 #define OUTB(np, r, v) OUTB_OFF(np, offsetof(struct sym_reg, r), (v)) 205 #define OUTW(np, r, v) OUTW_OFF(np, offsetof(struct sym_reg, r), (v)) 206 #define OUTL(np, r, v) OUTL_OFF(np, offsetof(struct sym_reg, r), (v)) 207 208 #define OUTONB(np, r, m) OUTB(np, r, INB(np, r) | (m)) 209 #define OUTOFFB(np, r, m) OUTB(np, r, INB(np, r) & ~(m)) 210 #define OUTONW(np, r, m) OUTW(np, r, INW(np, r) | (m)) 211 #define OUTOFFW(np, r, m) OUTW(np, r, INW(np, r) & ~(m)) 212 #define OUTONL(np, r, m) OUTL(np, r, INL(np, r) | (m)) 213 #define OUTOFFL(np, r, m) OUTL(np, r, INL(np, r) & ~(m)) 214 215 /* 216 * We normally want the chip to have a consistent view 217 * of driver internal data structures when we restart it. 218 * Thus these macros. 219 */ 220 #define OUTL_DSP(np, v) \ 221 do { \ 222 MEMORY_WRITE_BARRIER(); \ 223 OUTL(np, nc_dsp, (v)); \ 224 } while (0) 225 226 #define OUTONB_STD() \ 227 do { \ 228 MEMORY_WRITE_BARRIER(); \ 229 OUTONB(np, nc_dcntl, (STD|NOCOM)); \ 230 } while (0) 231 232 /* 233 * Command control block states. 234 */ 235 #define HS_IDLE (0) 236 #define HS_BUSY (1) 237 #define HS_NEGOTIATE (2) /* sync/wide data transfer*/ 238 #define HS_DISCONNECT (3) /* Disconnected by target */ 239 #define HS_WAIT (4) /* waiting for resource */ 240 241 #define HS_DONEMASK (0x80) 242 #define HS_COMPLETE (4|HS_DONEMASK) 243 #define HS_SEL_TIMEOUT (5|HS_DONEMASK) /* Selection timeout */ 244 #define HS_UNEXPECTED (6|HS_DONEMASK) /* Unexpected disconnect */ 245 #define HS_COMP_ERR (7|HS_DONEMASK) /* Completed with error */ 246 247 /* 248 * Software Interrupt Codes 249 */ 250 #define SIR_BAD_SCSI_STATUS (1) 251 #define SIR_SEL_ATN_NO_MSG_OUT (2) 252 #define SIR_MSG_RECEIVED (3) 253 #define SIR_MSG_WEIRD (4) 254 #define SIR_NEGO_FAILED (5) 255 #define SIR_NEGO_PROTO (6) 256 #define SIR_SCRIPT_STOPPED (7) 257 #define SIR_REJECT_TO_SEND (8) 258 #define SIR_SWIDE_OVERRUN (9) 259 #define SIR_SODL_UNDERRUN (10) 260 #define SIR_RESEL_NO_MSG_IN (11) 261 #define SIR_RESEL_NO_IDENTIFY (12) 262 #define SIR_RESEL_BAD_LUN (13) 263 #define SIR_TARGET_SELECTED (14) 264 #define SIR_RESEL_BAD_I_T_L (15) 265 #define SIR_RESEL_BAD_I_T_L_Q (16) 266 #define SIR_ABORT_SENT (17) 267 #define SIR_RESEL_ABORTED (18) 268 #define SIR_MSG_OUT_DONE (19) 269 #define SIR_COMPLETE_ERROR (20) 270 #define SIR_DATA_OVERRUN (21) 271 #define SIR_BAD_PHASE (22) 272 #if SYM_CONF_DMA_ADDRESSING_MODE == 2 273 #define SIR_DMAP_DIRTY (23) 274 #define SIR_MAX (23) 275 #else 276 #define SIR_MAX (22) 277 #endif 278 279 /* 280 * Extended error bit codes. 281 * xerr_status field of struct sym_ccb. 282 */ 283 #define XE_EXTRA_DATA (1) /* unexpected data phase */ 284 #define XE_BAD_PHASE (1<<1) /* illegal phase (4/5) */ 285 #define XE_PARITY_ERR (1<<2) /* unrecovered SCSI parity error */ 286 #define XE_SODL_UNRUN (1<<3) /* ODD transfer in DATA OUT phase */ 287 #define XE_SWIDE_OVRUN (1<<4) /* ODD transfer in DATA IN phase */ 288 289 /* 290 * Negotiation status. 291 * nego_status field of struct sym_ccb. 292 */ 293 #define NS_SYNC (1) 294 #define NS_WIDE (2) 295 #define NS_PPR (3) 296 297 /* 298 * A CCB hashed table is used to retrieve CCB address 299 * from DSA value. 300 */ 301 #define CCB_HASH_SHIFT 8 302 #define CCB_HASH_SIZE (1UL << CCB_HASH_SHIFT) 303 #define CCB_HASH_MASK (CCB_HASH_SIZE-1) 304 #if 1 305 #define CCB_HASH_CODE(dsa) \ 306 (((dsa) >> (_LGRU16_(sizeof(struct sym_ccb)))) & CCB_HASH_MASK) 307 #else 308 #define CCB_HASH_CODE(dsa) (((dsa) >> 9) & CCB_HASH_MASK) 309 #endif 310 311 #if SYM_CONF_DMA_ADDRESSING_MODE == 2 312 /* 313 * We may want to use segment registers for 64 bit DMA. 314 * 16 segments registers -> up to 64 GB addressable. 315 */ 316 #define SYM_DMAP_SHIFT (4) 317 #define SYM_DMAP_SIZE (1u<<SYM_DMAP_SHIFT) 318 #define SYM_DMAP_MASK (SYM_DMAP_SIZE-1) 319 #endif 320 321 /* 322 * Device flags. 323 */ 324 #define SYM_DISC_ENABLED (1) 325 #define SYM_TAGS_ENABLED (1<<1) 326 #define SYM_SCAN_BOOT_DISABLED (1<<2) 327 #define SYM_SCAN_LUNS_DISABLED (1<<3) 328 329 /* 330 * Host adapter miscellaneous flags. 331 */ 332 #define SYM_AVOID_BUS_RESET (1) 333 334 /* 335 * Misc. 336 */ 337 #define SYM_SNOOP_TIMEOUT (10000000) 338 #define BUS_8_BIT 0 339 #define BUS_16_BIT 1 340 341 /* 342 * Gather negotiable parameters value 343 */ 344 struct sym_trans { 345 u8 period; 346 u8 offset; 347 unsigned int width:1; 348 unsigned int iu:1; 349 unsigned int dt:1; 350 unsigned int qas:1; 351 unsigned int check_nego:1; 352 }; 353 354 /* 355 * Global TCB HEADER. 356 * 357 * Due to lack of indirect addressing on earlier NCR chips, 358 * this substructure is copied from the TCB to a global 359 * address after selection. 360 * For SYMBIOS chips that support LOAD/STORE this copy is 361 * not needed and thus not performed. 362 */ 363 struct sym_tcbh { 364 /* 365 * Scripts bus addresses of LUN table accessed from scripts. 366 * LUN #0 is a special case, since multi-lun devices are rare, 367 * and we we want to speed-up the general case and not waste 368 * resources. 369 */ 370 u32 luntbl_sa; /* bus address of this table */ 371 u32 lun0_sa; /* bus address of LCB #0 */ 372 /* 373 * Actual SYNC/WIDE IO registers value for this target. 374 * 'sval', 'wval' and 'uval' are read from SCRIPTS and 375 * so have alignment constraints. 376 */ 377 /*0*/ u_char uval; /* -> SCNTL4 register */ 378 /*1*/ u_char sval; /* -> SXFER io register */ 379 /*2*/ u_char filler1; 380 /*3*/ u_char wval; /* -> SCNTL3 io register */ 381 }; 382 383 /* 384 * Target Control Block 385 */ 386 struct sym_tcb { 387 /* 388 * TCB header. 389 * Assumed at offset 0. 390 */ 391 /*0*/ struct sym_tcbh head; 392 393 /* 394 * LUN table used by the SCRIPTS processor. 395 * An array of bus addresses is used on reselection. 396 */ 397 u32 *luntbl; /* LCBs bus address table */ 398 399 /* 400 * LUN table used by the C code. 401 */ 402 struct sym_lcb *lun0p; /* LCB of LUN #0 (usual case) */ 403 #if SYM_CONF_MAX_LUN > 1 404 struct sym_lcb **lunmp; /* Other LCBs [1..MAX_LUN] */ 405 #endif 406 407 /* 408 * Bitmap that tells about LUNs that succeeded at least 409 * 1 IO and therefore assumed to be a real device. 410 * Avoid useless allocation of the LCB structure. 411 */ 412 u32 lun_map[(SYM_CONF_MAX_LUN+31)/32]; 413 414 /* 415 * Bitmap that tells about LUNs that haven't yet an LCB 416 * allocated (not discovered or LCB allocation failed). 417 */ 418 u32 busy0_map[(SYM_CONF_MAX_LUN+31)/32]; 419 420 #ifdef SYM_HAVE_STCB 421 /* 422 * O/S specific data structure. 423 */ 424 struct sym_stcb s; 425 #endif 426 427 /* Transfer goal */ 428 struct sym_trans tgoal; 429 430 /* 431 * Keep track of the CCB used for the negotiation in order 432 * to ensure that only 1 negotiation is queued at a time. 433 */ 434 struct sym_ccb * nego_cp; /* CCB used for the nego */ 435 436 /* 437 * Set when we want to reset the device. 438 */ 439 u_char to_reset; 440 441 /* 442 * Other user settable limits and options. 443 * These limits are read from the NVRAM if present. 444 */ 445 u_char usrflags; 446 u_short usrtags; 447 struct scsi_target *starget; 448 }; 449 450 /* 451 * Global LCB HEADER. 452 * 453 * Due to lack of indirect addressing on earlier NCR chips, 454 * this substructure is copied from the LCB to a global 455 * address after selection. 456 * For SYMBIOS chips that support LOAD/STORE this copy is 457 * not needed and thus not performed. 458 */ 459 struct sym_lcbh { 460 /* 461 * SCRIPTS address jumped by SCRIPTS on reselection. 462 * For not probed logical units, this address points to 463 * SCRIPTS that deal with bad LU handling (must be at 464 * offset zero of the LCB for that reason). 465 */ 466 /*0*/ u32 resel_sa; 467 468 /* 469 * Task (bus address of a CCB) read from SCRIPTS that points 470 * to the unique ITL nexus allowed to be disconnected. 471 */ 472 u32 itl_task_sa; 473 474 /* 475 * Task table bus address (read from SCRIPTS). 476 */ 477 u32 itlq_tbl_sa; 478 }; 479 480 /* 481 * Logical Unit Control Block 482 */ 483 struct sym_lcb { 484 /* 485 * TCB header. 486 * Assumed at offset 0. 487 */ 488 /*0*/ struct sym_lcbh head; 489 490 /* 491 * Task table read from SCRIPTS that contains pointers to 492 * ITLQ nexuses. The bus address read from SCRIPTS is 493 * inside the header. 494 */ 495 u32 *itlq_tbl; /* Kernel virtual address */ 496 497 /* 498 * Busy CCBs management. 499 */ 500 u_short busy_itlq; /* Number of busy tagged CCBs */ 501 u_short busy_itl; /* Number of busy untagged CCBs */ 502 503 /* 504 * Circular tag allocation buffer. 505 */ 506 u_short ia_tag; /* Tag allocation index */ 507 u_short if_tag; /* Tag release index */ 508 u_char *cb_tags; /* Circular tags buffer */ 509 510 /* 511 * O/S specific data structure. 512 */ 513 #ifdef SYM_HAVE_SLCB 514 struct sym_slcb s; 515 #endif 516 517 #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING 518 /* 519 * Optionnaly the driver can handle device queueing, 520 * and requeues internally command to redo. 521 */ 522 SYM_QUEHEAD waiting_ccbq; 523 SYM_QUEHEAD started_ccbq; 524 int num_sgood; 525 u_short started_tags; 526 u_short started_no_tag; 527 u_short started_max; 528 u_short started_limit; 529 #endif 530 531 #ifdef SYM_OPT_LIMIT_COMMAND_REORDERING 532 /* 533 * Optionally the driver can try to prevent SCSI 534 * IOs from being reordered too much. 535 */ 536 u_char tags_si; /* Current index to tags sum */ 537 u_short tags_sum[2]; /* Tags sum counters */ 538 u_short tags_since; /* # of tags since last switch */ 539 #endif 540 541 /* 542 * Set when we want to clear all tasks. 543 */ 544 u_char to_clear; 545 546 /* 547 * Capabilities. 548 */ 549 u_char user_flags; 550 u_char curr_flags; 551 }; 552 553 /* 554 * Action from SCRIPTS on a task. 555 * Is part of the CCB, but is also used separately to plug 556 * error handling action to perform from SCRIPTS. 557 */ 558 struct sym_actscr { 559 u32 start; /* Jumped by SCRIPTS after selection */ 560 u32 restart; /* Jumped by SCRIPTS on relection */ 561 }; 562 563 /* 564 * Phase mismatch context. 565 * 566 * It is part of the CCB and is used as parameters for the 567 * DATA pointer. We need two contexts to handle correctly the 568 * SAVED DATA POINTER. 569 */ 570 struct sym_pmc { 571 struct sym_tblmove sg; /* Updated interrupted SG block */ 572 u32 ret; /* SCRIPT return address */ 573 }; 574 575 /* 576 * LUN control block lookup. 577 * We use a direct pointer for LUN #0, and a table of 578 * pointers which is only allocated for devices that support 579 * LUN(s) > 0. 580 */ 581 #if SYM_CONF_MAX_LUN <= 1 582 #define sym_lp(tp, lun) (!lun) ? (tp)->lun0p : NULL 583 #else 584 #define sym_lp(tp, lun) \ 585 (!lun) ? (tp)->lun0p : (tp)->lunmp ? (tp)->lunmp[(lun)] : NULL 586 #endif 587 588 /* 589 * Status are used by the host and the script processor. 590 * 591 * The last four bytes (status[4]) are copied to the 592 * scratchb register (declared as scr0..scr3) just after the 593 * select/reselect, and copied back just after disconnecting. 594 * Inside the script the XX_REG are used. 595 */ 596 597 /* 598 * Last four bytes (script) 599 */ 600 #define HX_REG scr0 601 #define HX_PRT nc_scr0 602 #define HS_REG scr1 603 #define HS_PRT nc_scr1 604 #define SS_REG scr2 605 #define SS_PRT nc_scr2 606 #define HF_REG scr3 607 #define HF_PRT nc_scr3 608 609 /* 610 * Last four bytes (host) 611 */ 612 #define host_xflags phys.head.status[0] 613 #define host_status phys.head.status[1] 614 #define ssss_status phys.head.status[2] 615 #define host_flags phys.head.status[3] 616 617 /* 618 * Host flags 619 */ 620 #define HF_IN_PM0 1u 621 #define HF_IN_PM1 (1u<<1) 622 #define HF_ACT_PM (1u<<2) 623 #define HF_DP_SAVED (1u<<3) 624 #define HF_SENSE (1u<<4) 625 #define HF_EXT_ERR (1u<<5) 626 #define HF_DATA_IN (1u<<6) 627 #ifdef SYM_CONF_IARB_SUPPORT 628 #define HF_HINT_IARB (1u<<7) 629 #endif 630 631 /* 632 * More host flags 633 */ 634 #if SYM_CONF_DMA_ADDRESSING_MODE == 2 635 #define HX_DMAP_DIRTY (1u<<7) 636 #endif 637 638 /* 639 * Global CCB HEADER. 640 * 641 * Due to lack of indirect addressing on earlier NCR chips, 642 * this substructure is copied from the ccb to a global 643 * address after selection (or reselection) and copied back 644 * before disconnect. 645 * For SYMBIOS chips that support LOAD/STORE this copy is 646 * not needed and thus not performed. 647 */ 648 649 struct sym_ccbh { 650 /* 651 * Start and restart SCRIPTS addresses (must be at 0). 652 */ 653 /*0*/ struct sym_actscr go; 654 655 /* 656 * SCRIPTS jump address that deal with data pointers. 657 * 'savep' points to the position in the script responsible 658 * for the actual transfer of data. 659 * It's written on reception of a SAVE_DATA_POINTER message. 660 */ 661 u32 savep; /* Jump address to saved data pointer */ 662 u32 lastp; /* SCRIPTS address at end of data */ 663 #ifdef SYM_OPT_HANDLE_DIR_UNKNOWN 664 u32 wlastp; 665 #endif 666 667 /* 668 * Status fields. 669 */ 670 u8 status[4]; 671 }; 672 673 /* 674 * GET/SET the value of the data pointer used by SCRIPTS. 675 * 676 * We must distinguish between the LOAD/STORE-based SCRIPTS 677 * that use directly the header in the CCB, and the NCR-GENERIC 678 * SCRIPTS that use the copy of the header in the HCB. 679 */ 680 #if SYM_CONF_GENERIC_SUPPORT 681 #define sym_set_script_dp(np, cp, dp) \ 682 do { \ 683 if (np->features & FE_LDSTR) \ 684 cp->phys.head.lastp = cpu_to_scr(dp); \ 685 else \ 686 np->ccb_head.lastp = cpu_to_scr(dp); \ 687 } while (0) 688 #define sym_get_script_dp(np, cp) \ 689 scr_to_cpu((np->features & FE_LDSTR) ? \ 690 cp->phys.head.lastp : np->ccb_head.lastp) 691 #else 692 #define sym_set_script_dp(np, cp, dp) \ 693 do { \ 694 cp->phys.head.lastp = cpu_to_scr(dp); \ 695 } while (0) 696 697 #define sym_get_script_dp(np, cp) (cp->phys.head.lastp) 698 #endif 699 700 /* 701 * Data Structure Block 702 * 703 * During execution of a ccb by the script processor, the 704 * DSA (data structure address) register points to this 705 * substructure of the ccb. 706 */ 707 struct sym_dsb { 708 /* 709 * CCB header. 710 * Also assumed at offset 0 of the sym_ccb structure. 711 */ 712 /*0*/ struct sym_ccbh head; 713 714 /* 715 * Phase mismatch contexts. 716 * We need two to handle correctly the SAVED DATA POINTER. 717 * MUST BOTH BE AT OFFSET < 256, due to using 8 bit arithmetic 718 * for address calculation from SCRIPTS. 719 */ 720 struct sym_pmc pm0; 721 struct sym_pmc pm1; 722 723 /* 724 * Table data for Script 725 */ 726 struct sym_tblsel select; 727 struct sym_tblmove smsg; 728 struct sym_tblmove smsg_ext; 729 struct sym_tblmove cmd; 730 struct sym_tblmove sense; 731 struct sym_tblmove wresid; 732 struct sym_tblmove data [SYM_CONF_MAX_SG]; 733 }; 734 735 /* 736 * Our Command Control Block 737 */ 738 struct sym_ccb { 739 /* 740 * This is the data structure which is pointed by the DSA 741 * register when it is executed by the script processor. 742 * It must be the first entry. 743 */ 744 struct sym_dsb phys; 745 746 /* 747 * Pointer to CAM ccb and related stuff. 748 */ 749 struct scsi_cmnd *cmd; /* CAM scsiio ccb */ 750 u8 cdb_buf[16]; /* Copy of CDB */ 751 #define SYM_SNS_BBUF_LEN 32 752 u8 sns_bbuf[SYM_SNS_BBUF_LEN]; /* Bounce buffer for sense data */ 753 int data_len; /* Total data length */ 754 int segments; /* Number of SG segments */ 755 756 u8 order; /* Tag type (if tagged command) */ 757 unsigned char odd_byte_adjustment; /* odd-sized req on wide bus */ 758 759 u_char nego_status; /* Negotiation status */ 760 u_char xerr_status; /* Extended error flags */ 761 u32 extra_bytes; /* Extraneous bytes transferred */ 762 763 /* 764 * Message areas. 765 * We prepare a message to be sent after selection. 766 * We may use a second one if the command is rescheduled 767 * due to CHECK_CONDITION or COMMAND TERMINATED. 768 * Contents are IDENTIFY and SIMPLE_TAG. 769 * While negotiating sync or wide transfer, 770 * a SDTR or WDTR message is appended. 771 */ 772 u_char scsi_smsg [12]; 773 u_char scsi_smsg2[12]; 774 775 /* 776 * Auto request sense related fields. 777 */ 778 u_char sensecmd[6]; /* Request Sense command */ 779 u_char sv_scsi_status; /* Saved SCSI status */ 780 u_char sv_xerr_status; /* Saved extended status */ 781 int sv_resid; /* Saved residual */ 782 783 /* 784 * Other fields. 785 */ 786 u32 ccb_ba; /* BUS address of this CCB */ 787 u_short tag; /* Tag for this transfer */ 788 /* NO_TAG means no tag */ 789 u_char target; 790 u_char lun; 791 struct sym_ccb *link_ccbh; /* Host adapter CCB hash chain */ 792 SYM_QUEHEAD link_ccbq; /* Link to free/busy CCB queue */ 793 u32 startp; /* Initial data pointer */ 794 u32 goalp; /* Expected last data pointer */ 795 #ifdef SYM_OPT_HANDLE_DIR_UNKNOWN 796 u32 wgoalp; 797 #endif 798 int ext_sg; /* Extreme data pointer, used */ 799 int ext_ofs; /* to calculate the residual. */ 800 #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING 801 SYM_QUEHEAD link2_ccbq; /* Link for device queueing */ 802 u_char started; /* CCB queued to the squeue */ 803 #endif 804 u_char to_abort; /* Want this IO to be aborted */ 805 #ifdef SYM_OPT_LIMIT_COMMAND_REORDERING 806 u_char tags_si; /* Lun tags sum index (0,1) */ 807 #endif 808 }; 809 810 #define CCB_BA(cp,lbl) cpu_to_scr(cp->ccb_ba + offsetof(struct sym_ccb, lbl)) 811 812 #ifdef SYM_OPT_HANDLE_DIR_UNKNOWN 813 #define sym_goalp(cp) ((cp->host_flags & HF_DATA_IN) ? cp->goalp : cp->wgoalp) 814 #else 815 #define sym_goalp(cp) (cp->goalp) 816 #endif 817 818 typedef struct device *m_pool_ident_t; 819 820 /* 821 * Host Control Block 822 */ 823 struct sym_hcb { 824 /* 825 * Global headers. 826 * Due to poorness of addressing capabilities, earlier 827 * chips (810, 815, 825) copy part of the data structures 828 * (CCB, TCB and LCB) in fixed areas. 829 */ 830 #if SYM_CONF_GENERIC_SUPPORT 831 struct sym_ccbh ccb_head; 832 struct sym_tcbh tcb_head; 833 struct sym_lcbh lcb_head; 834 #endif 835 /* 836 * Idle task and invalid task actions and 837 * their bus addresses. 838 */ 839 struct sym_actscr idletask, notask, bad_itl, bad_itlq; 840 u32 idletask_ba, notask_ba, bad_itl_ba, bad_itlq_ba; 841 842 /* 843 * Dummy lun table to protect us against target 844 * returning bad lun number on reselection. 845 */ 846 u32 *badluntbl; /* Table physical address */ 847 u32 badlun_sa; /* SCRIPT handler BUS address */ 848 849 /* 850 * Bus address of this host control block. 851 */ 852 u32 hcb_ba; 853 854 /* 855 * Bit 32-63 of the on-chip RAM bus address in LE format. 856 * The START_RAM64 script loads the MMRS and MMWS from this 857 * field. 858 */ 859 u32 scr_ram_seg; 860 861 /* 862 * Initial value of some IO register bits. 863 * These values are assumed to have been set by BIOS, and may 864 * be used to probe adapter implementation differences. 865 */ 866 u_char sv_scntl0, sv_scntl3, sv_dmode, sv_dcntl, sv_ctest3, sv_ctest4, 867 sv_ctest5, sv_gpcntl, sv_stest2, sv_stest4, sv_scntl4, 868 sv_stest1; 869 870 /* 871 * Actual initial value of IO register bits used by the 872 * driver. They are loaded at initialisation according to 873 * features that are to be enabled/disabled. 874 */ 875 u_char rv_scntl0, rv_scntl3, rv_dmode, rv_dcntl, rv_ctest3, rv_ctest4, 876 rv_ctest5, rv_stest2, rv_ccntl0, rv_ccntl1, rv_scntl4; 877 878 /* 879 * Target data. 880 */ 881 struct sym_tcb target[SYM_CONF_MAX_TARGET]; 882 883 /* 884 * Target control block bus address array used by the SCRIPT 885 * on reselection. 886 */ 887 u32 *targtbl; 888 u32 targtbl_ba; 889 890 /* 891 * DMA pool handle for this HBA. 892 */ 893 m_pool_ident_t bus_dmat; 894 895 /* 896 * O/S specific data structure 897 */ 898 struct sym_shcb s; 899 900 /* 901 * Physical bus addresses of the chip. 902 */ 903 u32 mmio_ba; /* MMIO 32 bit BUS address */ 904 int mmio_ws; /* MMIO Window size */ 905 906 u32 ram_ba; /* RAM 32 bit BUS address */ 907 int ram_ws; /* RAM window size */ 908 909 /* 910 * SCRIPTS virtual and physical bus addresses. 911 * 'script' is loaded in the on-chip RAM if present. 912 * 'scripth' stays in main memory for all chips except the 913 * 53C895A, 53C896 and 53C1010 that provide 8K on-chip RAM. 914 */ 915 u_char *scripta0; /* Copy of scripts A, B, Z */ 916 u_char *scriptb0; 917 u_char *scriptz0; 918 u32 scripta_ba; /* Actual scripts A, B, Z */ 919 u32 scriptb_ba; /* 32 bit bus addresses. */ 920 u32 scriptz_ba; 921 u_short scripta_sz; /* Actual size of script A, B, Z*/ 922 u_short scriptb_sz; 923 u_short scriptz_sz; 924 925 /* 926 * Bus addresses, setup and patch methods for 927 * the selected firmware. 928 */ 929 struct sym_fwa_ba fwa_bas; /* Useful SCRIPTA bus addresses */ 930 struct sym_fwb_ba fwb_bas; /* Useful SCRIPTB bus addresses */ 931 struct sym_fwz_ba fwz_bas; /* Useful SCRIPTZ bus addresses */ 932 void (*fw_setup)(struct sym_hcb *np, struct sym_fw *fw); 933 void (*fw_patch)(struct sym_hcb *np); 934 char *fw_name; 935 936 /* 937 * General controller parameters and configuration. 938 */ 939 u_short device_id; /* PCI device id */ 940 u_char revision_id; /* PCI device revision id */ 941 u_int features; /* Chip features map */ 942 u_char myaddr; /* SCSI id of the adapter */ 943 u_char maxburst; /* log base 2 of dwords burst */ 944 u_char maxwide; /* Maximum transfer width */ 945 u_char minsync; /* Min sync period factor (ST) */ 946 u_char maxsync; /* Max sync period factor (ST) */ 947 u_char maxoffs; /* Max scsi offset (ST) */ 948 u_char minsync_dt; /* Min sync period factor (DT) */ 949 u_char maxsync_dt; /* Max sync period factor (DT) */ 950 u_char maxoffs_dt; /* Max scsi offset (DT) */ 951 u_char multiplier; /* Clock multiplier (1,2,4) */ 952 u_char clock_divn; /* Number of clock divisors */ 953 u32 clock_khz; /* SCSI clock frequency in KHz */ 954 u32 pciclk_khz; /* Estimated PCI clock in KHz */ 955 /* 956 * Start queue management. 957 * It is filled up by the host processor and accessed by the 958 * SCRIPTS processor in order to start SCSI commands. 959 */ 960 volatile /* Prevent code optimizations */ 961 u32 *squeue; /* Start queue virtual address */ 962 u32 squeue_ba; /* Start queue BUS address */ 963 u_short squeueput; /* Next free slot of the queue */ 964 u_short actccbs; /* Number of allocated CCBs */ 965 966 /* 967 * Command completion queue. 968 * It is the same size as the start queue to avoid overflow. 969 */ 970 u_short dqueueget; /* Next position to scan */ 971 volatile /* Prevent code optimizations */ 972 u32 *dqueue; /* Completion (done) queue */ 973 u32 dqueue_ba; /* Done queue BUS address */ 974 975 /* 976 * Miscellaneous buffers accessed by the scripts-processor. 977 * They shall be DWORD aligned, because they may be read or 978 * written with a script command. 979 */ 980 u_char msgout[8]; /* Buffer for MESSAGE OUT */ 981 u_char msgin [8]; /* Buffer for MESSAGE IN */ 982 u32 lastmsg; /* Last SCSI message sent */ 983 u32 scratch; /* Scratch for SCSI receive */ 984 /* Also used for cache test */ 985 /* 986 * Miscellaneous configuration and status parameters. 987 */ 988 u_char usrflags; /* Miscellaneous user flags */ 989 u_char scsi_mode; /* Current SCSI BUS mode */ 990 u_char verbose; /* Verbosity for this controller*/ 991 992 /* 993 * CCB lists and queue. 994 */ 995 struct sym_ccb **ccbh; /* CCBs hashed by DSA value */ 996 /* CCB_HASH_SIZE lists of CCBs */ 997 SYM_QUEHEAD free_ccbq; /* Queue of available CCBs */ 998 SYM_QUEHEAD busy_ccbq; /* Queue of busy CCBs */ 999 1000 /* 1001 * During error handling and/or recovery, 1002 * active CCBs that are to be completed with 1003 * error or requeued are moved from the busy_ccbq 1004 * to the comp_ccbq prior to completion. 1005 */ 1006 SYM_QUEHEAD comp_ccbq; 1007 1008 #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING 1009 SYM_QUEHEAD dummy_ccbq; 1010 #endif 1011 1012 /* 1013 * IMMEDIATE ARBITRATION (IARB) control. 1014 * 1015 * We keep track in 'last_cp' of the last CCB that has been 1016 * queued to the SCRIPTS processor and clear 'last_cp' when 1017 * this CCB completes. If last_cp is not zero at the moment 1018 * we queue a new CCB, we set a flag in 'last_cp' that is 1019 * used by the SCRIPTS as a hint for setting IARB. 1020 * We donnot set more than 'iarb_max' consecutive hints for 1021 * IARB in order to leave devices a chance to reselect. 1022 * By the way, any non zero value of 'iarb_max' is unfair. :) 1023 */ 1024 #ifdef SYM_CONF_IARB_SUPPORT 1025 u_short iarb_max; /* Max. # consecutive IARB hints*/ 1026 u_short iarb_count; /* Actual # of these hints */ 1027 struct sym_ccb * last_cp; 1028 #endif 1029 1030 /* 1031 * Command abort handling. 1032 * We need to synchronize tightly with the SCRIPTS 1033 * processor in order to handle things correctly. 1034 */ 1035 u_char abrt_msg[4]; /* Message to send buffer */ 1036 struct sym_tblmove abrt_tbl; /* Table for the MOV of it */ 1037 struct sym_tblsel abrt_sel; /* Sync params for selection */ 1038 u_char istat_sem; /* Tells the chip to stop (SEM) */ 1039 1040 /* 1041 * 64 bit DMA handling. 1042 */ 1043 #if SYM_CONF_DMA_ADDRESSING_MODE != 0 1044 u_char use_dac; /* Use PCI DAC cycles */ 1045 #if SYM_CONF_DMA_ADDRESSING_MODE == 2 1046 u_char dmap_dirty; /* Dma segments registers dirty */ 1047 u32 dmap_bah[SYM_DMAP_SIZE];/* Segment registers map */ 1048 #endif 1049 #endif 1050 }; 1051 1052 #define HCB_BA(np, lbl) (np->hcb_ba + offsetof(struct sym_hcb, lbl)) 1053 1054 1055 /* 1056 * FIRMWARES (sym_fw.c) 1057 */ 1058 struct sym_fw * sym_find_firmware(struct sym_chip *chip); 1059 void sym_fw_bind_script(struct sym_hcb *np, u32 *start, int len); 1060 1061 /* 1062 * Driver methods called from O/S specific code. 1063 */ 1064 char *sym_driver_name(void); 1065 void sym_print_xerr(struct scsi_cmnd *cmd, int x_status); 1066 int sym_reset_scsi_bus(struct sym_hcb *np, int enab_int); 1067 struct sym_chip *sym_lookup_chip_table(u_short device_id, u_char revision); 1068 void sym_put_start_queue(struct sym_hcb *np, struct sym_ccb *cp); 1069 #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING 1070 void sym_start_next_ccbs(struct sym_hcb *np, struct sym_lcb *lp, int maxn); 1071 #endif 1072 void sym_start_up(struct sym_hcb *np, int reason); 1073 void sym_interrupt(struct sym_hcb *np); 1074 int sym_clear_tasks(struct sym_hcb *np, int cam_status, int target, int lun, int task); 1075 struct sym_ccb *sym_get_ccb(struct sym_hcb *np, struct scsi_cmnd *cmd, u_char tag_order); 1076 void sym_free_ccb(struct sym_hcb *np, struct sym_ccb *cp); 1077 struct sym_lcb *sym_alloc_lcb(struct sym_hcb *np, u_char tn, u_char ln); 1078 int sym_queue_scsiio(struct sym_hcb *np, struct scsi_cmnd *csio, struct sym_ccb *cp); 1079 int sym_abort_scsiio(struct sym_hcb *np, struct scsi_cmnd *ccb, int timed_out); 1080 int sym_reset_scsi_target(struct sym_hcb *np, int target); 1081 void sym_hcb_free(struct sym_hcb *np); 1082 int sym_hcb_attach(struct Scsi_Host *shost, struct sym_fw *fw, struct sym_nvram *nvram); 1083 1084 /* 1085 * Build a scatter/gather entry. 1086 * 1087 * For 64 bit systems, we use the 8 upper bits of the size field 1088 * to provide bus address bits 32-39 to the SCRIPTS processor. 1089 * This allows the 895A, 896, 1010 to address up to 1 TB of memory. 1090 */ 1091 1092 #if SYM_CONF_DMA_ADDRESSING_MODE == 0 1093 #define sym_build_sge(np, data, badd, len) \ 1094 do { \ 1095 (data)->addr = cpu_to_scr(badd); \ 1096 (data)->size = cpu_to_scr(len); \ 1097 } while (0) 1098 #elif SYM_CONF_DMA_ADDRESSING_MODE == 1 1099 #define sym_build_sge(np, data, badd, len) \ 1100 do { \ 1101 (data)->addr = cpu_to_scr(badd); \ 1102 (data)->size = cpu_to_scr((((badd) >> 8) & 0xff000000) + len); \ 1103 } while (0) 1104 #elif SYM_CONF_DMA_ADDRESSING_MODE == 2 1105 int sym_lookup_dmap(struct sym_hcb *np, u32 h, int s); 1106 static __inline void 1107 sym_build_sge(struct sym_hcb *np, struct sym_tblmove *data, u64 badd, int len) 1108 { 1109 u32 h = (badd>>32); 1110 int s = (h&SYM_DMAP_MASK); 1111 1112 if (h != np->dmap_bah[s]) 1113 goto bad; 1114 good: 1115 (data)->addr = cpu_to_scr(badd); 1116 (data)->size = cpu_to_scr((s<<24) + len); 1117 return; 1118 bad: 1119 s = sym_lookup_dmap(np, h, s); 1120 goto good; 1121 } 1122 #else 1123 #error "Unsupported DMA addressing mode" 1124 #endif 1125 1126 /* 1127 * Set up data pointers used by SCRIPTS. 1128 * Called from O/S specific code. 1129 */ 1130 static inline void sym_setup_data_pointers(struct sym_hcb *np, 1131 struct sym_ccb *cp, int dir) 1132 { 1133 u32 lastp, goalp; 1134 1135 /* 1136 * No segments means no data. 1137 */ 1138 if (!cp->segments) 1139 dir = DMA_NONE; 1140 1141 /* 1142 * Set the data pointer. 1143 */ 1144 switch(dir) { 1145 #ifdef SYM_OPT_HANDLE_DIR_UNKNOWN 1146 case DMA_BIDIRECTIONAL: 1147 #endif 1148 case DMA_TO_DEVICE: 1149 goalp = SCRIPTA_BA(np, data_out2) + 8; 1150 lastp = goalp - 8 - (cp->segments * (2*4)); 1151 #ifdef SYM_OPT_HANDLE_DIR_UNKNOWN 1152 cp->wgoalp = cpu_to_scr(goalp); 1153 if (dir != DMA_BIDIRECTIONAL) 1154 break; 1155 cp->phys.head.wlastp = cpu_to_scr(lastp); 1156 /* fall through */ 1157 #else 1158 break; 1159 #endif 1160 case DMA_FROM_DEVICE: 1161 cp->host_flags |= HF_DATA_IN; 1162 goalp = SCRIPTA_BA(np, data_in2) + 8; 1163 lastp = goalp - 8 - (cp->segments * (2*4)); 1164 break; 1165 case DMA_NONE: 1166 default: 1167 #ifdef SYM_OPT_HANDLE_DIR_UNKNOWN 1168 cp->host_flags |= HF_DATA_IN; 1169 #endif 1170 lastp = goalp = SCRIPTB_BA(np, no_data); 1171 break; 1172 } 1173 1174 /* 1175 * Set all pointers values needed by SCRIPTS. 1176 */ 1177 cp->phys.head.lastp = cpu_to_scr(lastp); 1178 cp->phys.head.savep = cpu_to_scr(lastp); 1179 cp->startp = cp->phys.head.savep; 1180 cp->goalp = cpu_to_scr(goalp); 1181 1182 #ifdef SYM_OPT_HANDLE_DIR_UNKNOWN 1183 /* 1184 * If direction is unknown, start at data_io. 1185 */ 1186 if (dir == DMA_BIDIRECTIONAL) 1187 cp->phys.head.savep = cpu_to_scr(SCRIPTB_BA(np, data_io)); 1188 #endif 1189 } 1190 1191 /* 1192 * MEMORY ALLOCATOR. 1193 */ 1194 1195 #define SYM_MEM_PAGE_ORDER 0 /* 1 PAGE maximum */ 1196 #define SYM_MEM_CLUSTER_SHIFT (PAGE_SHIFT+SYM_MEM_PAGE_ORDER) 1197 #define SYM_MEM_FREE_UNUSED /* Free unused pages immediately */ 1198 1199 #define SYM_MEM_WARN 1 /* Warn on failed operations */ 1200 1201 #define sym_get_mem_cluster() \ 1202 (void *) __get_free_pages(GFP_ATOMIC, SYM_MEM_PAGE_ORDER) 1203 #define sym_free_mem_cluster(p) \ 1204 free_pages((unsigned long)p, SYM_MEM_PAGE_ORDER) 1205 1206 /* 1207 * Link between free memory chunks of a given size. 1208 */ 1209 typedef struct sym_m_link { 1210 struct sym_m_link *next; 1211 } *m_link_p; 1212 1213 /* 1214 * Virtual to bus physical translation for a given cluster. 1215 * Such a structure is only useful with DMA abstraction. 1216 */ 1217 typedef struct sym_m_vtob { /* Virtual to Bus address translation */ 1218 struct sym_m_vtob *next; 1219 void *vaddr; /* Virtual address */ 1220 dma_addr_t baddr; /* Bus physical address */ 1221 } *m_vtob_p; 1222 1223 /* Hash this stuff a bit to speed up translations */ 1224 #define VTOB_HASH_SHIFT 5 1225 #define VTOB_HASH_SIZE (1UL << VTOB_HASH_SHIFT) 1226 #define VTOB_HASH_MASK (VTOB_HASH_SIZE-1) 1227 #define VTOB_HASH_CODE(m) \ 1228 ((((unsigned long)(m)) >> SYM_MEM_CLUSTER_SHIFT) & VTOB_HASH_MASK) 1229 1230 /* 1231 * Memory pool of a given kind. 1232 * Ideally, we want to use: 1233 * 1) 1 pool for memory we donnot need to involve in DMA. 1234 * 2) The same pool for controllers that require same DMA 1235 * constraints and features. 1236 * The OS specific m_pool_id_t thing and the sym_m_pool_match() 1237 * method are expected to tell the driver about. 1238 */ 1239 typedef struct sym_m_pool { 1240 m_pool_ident_t dev_dmat; /* Identifies the pool (see above) */ 1241 void * (*get_mem_cluster)(struct sym_m_pool *); 1242 #ifdef SYM_MEM_FREE_UNUSED 1243 void (*free_mem_cluster)(struct sym_m_pool *, void *); 1244 #endif 1245 #define M_GET_MEM_CLUSTER() mp->get_mem_cluster(mp) 1246 #define M_FREE_MEM_CLUSTER(p) mp->free_mem_cluster(mp, p) 1247 int nump; 1248 m_vtob_p vtob[VTOB_HASH_SIZE]; 1249 struct sym_m_pool *next; 1250 struct sym_m_link h[SYM_MEM_CLUSTER_SHIFT - SYM_MEM_SHIFT + 1]; 1251 } *m_pool_p; 1252 1253 /* 1254 * Alloc, free and translate addresses to bus physical 1255 * for DMAable memory. 1256 */ 1257 void *__sym_calloc_dma(m_pool_ident_t dev_dmat, int size, char *name); 1258 void __sym_mfree_dma(m_pool_ident_t dev_dmat, void *m, int size, char *name); 1259 dma_addr_t __vtobus(m_pool_ident_t dev_dmat, void *m); 1260 1261 /* 1262 * Verbs used by the driver code for DMAable memory handling. 1263 * The _uvptv_ macro avoids a nasty warning about pointer to volatile 1264 * being discarded. 1265 */ 1266 #define _uvptv_(p) ((void *)((u_long)(p))) 1267 1268 #define _sym_calloc_dma(np, l, n) __sym_calloc_dma(np->bus_dmat, l, n) 1269 #define _sym_mfree_dma(np, p, l, n) \ 1270 __sym_mfree_dma(np->bus_dmat, _uvptv_(p), l, n) 1271 #define sym_calloc_dma(l, n) _sym_calloc_dma(np, l, n) 1272 #define sym_mfree_dma(p, l, n) _sym_mfree_dma(np, p, l, n) 1273 #define vtobus(p) __vtobus(np->bus_dmat, _uvptv_(p)) 1274 1275 /* 1276 * We have to provide the driver memory allocator with methods for 1277 * it to maintain virtual to bus physical address translations. 1278 */ 1279 1280 #define sym_m_pool_match(mp_id1, mp_id2) (mp_id1 == mp_id2) 1281 1282 static __inline void *sym_m_get_dma_mem_cluster(m_pool_p mp, m_vtob_p vbp) 1283 { 1284 void *vaddr = NULL; 1285 dma_addr_t baddr = 0; 1286 1287 vaddr = dma_alloc_coherent(mp->dev_dmat, SYM_MEM_CLUSTER_SIZE, &baddr, 1288 GFP_ATOMIC); 1289 if (vaddr) { 1290 vbp->vaddr = vaddr; 1291 vbp->baddr = baddr; 1292 } 1293 return vaddr; 1294 } 1295 1296 static __inline void sym_m_free_dma_mem_cluster(m_pool_p mp, m_vtob_p vbp) 1297 { 1298 dma_free_coherent(mp->dev_dmat, SYM_MEM_CLUSTER_SIZE, vbp->vaddr, 1299 vbp->baddr); 1300 } 1301 1302 #endif /* SYM_HIPD_H */ 1303