1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <hpi_rxdma.h> 29 #include <hxge_common.h> 30 31 #define RXDMA_RESET_TRY_COUNT 5 32 #define RXDMA_RESET_DELAY 5 33 34 #define RXDMA_OP_DISABLE 0 35 #define RXDMA_OP_ENABLE 1 36 #define RXDMA_OP_RESET 2 37 38 #define RCR_TIMEOUT_ENABLE 1 39 #define RCR_TIMEOUT_DISABLE 2 40 #define RCR_THRESHOLD 4 41 42 hpi_status_t 43 hpi_rxdma_cfg_logical_page_handle(hpi_handle_t handle, uint8_t rdc, 44 uint64_t page_handle) 45 { 46 rdc_page_handle_t page_hdl; 47 48 if (!RXDMA_CHANNEL_VALID(rdc)) { 49 HPI_ERROR_MSG((handle.function, HPI_ERR_CTL, 50 "rxdma_cfg_logical_page_handle" 51 " Illegal RDC number %d \n", rdc)); 52 return (HPI_RXDMA_RDC_INVALID); 53 } 54 55 page_hdl.value = 0; 56 page_hdl.bits.handle = (uint32_t)page_handle; 57 58 RXDMA_REG_WRITE64(handle, RDC_PAGE_HANDLE, rdc, page_hdl.value); 59 60 return (HPI_SUCCESS); 61 } 62 63 64 /* RX DMA functions */ 65 static hpi_status_t 66 hpi_rxdma_cfg_rdc_ctl(hpi_handle_t handle, uint8_t rdc, uint8_t op) 67 { 68 rdc_rx_cfg1_t cfg; 69 uint32_t count = RXDMA_RESET_TRY_COUNT; 70 uint32_t delay_time = RXDMA_RESET_DELAY; 71 uint32_t error = HPI_RXDMA_ERROR_ENCODE(HPI_RXDMA_RESET_ERR, rdc); 72 73 if (!RXDMA_CHANNEL_VALID(rdc)) { 74 HPI_ERROR_MSG((handle.function, HPI_ERR_CTL, 75 "hpi_rxdma_cfg_rdc_ctl Illegal RDC number %d \n", rdc)); 76 return (HPI_RXDMA_RDC_INVALID); 77 } 78 79 switch (op) { 80 case RXDMA_OP_ENABLE: 81 RXDMA_REG_READ64(handle, RDC_RX_CFG1, rdc, &cfg.value); 82 cfg.bits.enable = 1; 83 RXDMA_REG_WRITE64(handle, RDC_RX_CFG1, rdc, cfg.value); 84 85 HXGE_DELAY(delay_time); 86 87 break; 88 case RXDMA_OP_DISABLE: 89 RXDMA_REG_READ64(handle, RDC_RX_CFG1, rdc, &cfg.value); 90 cfg.bits.enable = 0; 91 RXDMA_REG_WRITE64(handle, RDC_RX_CFG1, rdc, cfg.value); 92 93 HXGE_DELAY(delay_time); 94 RXDMA_REG_READ64(handle, RDC_RX_CFG1, rdc, &cfg.value); 95 96 while ((count--) && (cfg.bits.qst == 0)) { 97 HXGE_DELAY(delay_time); 98 RXDMA_REG_READ64(handle, RDC_RX_CFG1, rdc, &cfg.value); 99 } 100 if (cfg.bits.qst == 0) { 101 HPI_ERROR_MSG((handle.function, HPI_ERR_CTL, 102 " hpi_rxdma_cfg_rdc_ctl" 103 " RXDMA_OP_DISABLE Failed for RDC %d \n", 104 rdc)); 105 return (error); 106 } 107 108 break; 109 case RXDMA_OP_RESET: 110 cfg.value = 0; 111 cfg.bits.reset = 1; 112 RXDMA_REG_WRITE64(handle, RDC_RX_CFG1, rdc, cfg.value); 113 HXGE_DELAY(delay_time); 114 RXDMA_REG_READ64(handle, RDC_RX_CFG1, rdc, &cfg.value); 115 116 while ((count--) && (cfg.bits.qst == 0)) { 117 HXGE_DELAY(delay_time); 118 RXDMA_REG_READ64(handle, RDC_RX_CFG1, rdc, &cfg.value); 119 } 120 if (count == 0) { 121 HPI_ERROR_MSG((handle.function, HPI_ERR_CTL, 122 " hpi_rxdma_cfg_rdc_ctl" 123 " Reset Failed for RDC %d \n", rdc)); 124 return (error); 125 } 126 127 break; 128 default: 129 return (HPI_RXDMA_SW_PARAM_ERROR); 130 } 131 132 return (HPI_SUCCESS); 133 } 134 135 hpi_status_t 136 hpi_rxdma_cfg_rdc_enable(hpi_handle_t handle, uint8_t rdc) 137 { 138 return (hpi_rxdma_cfg_rdc_ctl(handle, rdc, RXDMA_OP_ENABLE)); 139 } 140 141 hpi_status_t 142 hpi_rxdma_cfg_rdc_disable(hpi_handle_t handle, uint8_t rdc) 143 { 144 return (hpi_rxdma_cfg_rdc_ctl(handle, rdc, RXDMA_OP_DISABLE)); 145 } 146 147 hpi_status_t 148 hpi_rxdma_cfg_rdc_reset(hpi_handle_t handle, uint8_t rdc) 149 { 150 return (hpi_rxdma_cfg_rdc_ctl(handle, rdc, RXDMA_OP_RESET)); 151 } 152 153 static hpi_status_t 154 hpi_rxdma_cfg_rdc_rcr_ctl(hpi_handle_t handle, uint8_t rdc, 155 uint8_t op, uint16_t param) 156 { 157 rdc_rcr_cfg_b_t rcr_cfgb; 158 159 if (!RXDMA_CHANNEL_VALID(rdc)) { 160 HPI_ERROR_MSG((handle.function, HPI_ERR_CTL, 161 "rxdma_cfg_rdc_rcr_ctl Illegal RDC number %d \n", rdc)); 162 return (HPI_RXDMA_RDC_INVALID); 163 } 164 165 RXDMA_REG_READ64(handle, RDC_RCR_CFG_B, rdc, &rcr_cfgb.value); 166 167 switch (op) { 168 case RCR_TIMEOUT_ENABLE: 169 rcr_cfgb.bits.timeout = (uint8_t)param; 170 rcr_cfgb.bits.entout = 1; 171 break; 172 173 case RCR_THRESHOLD: 174 rcr_cfgb.bits.pthres = param; 175 break; 176 177 case RCR_TIMEOUT_DISABLE: 178 rcr_cfgb.bits.entout = 0; 179 break; 180 181 default: 182 HPI_ERROR_MSG((handle.function, HPI_ERR_CTL, 183 "rxdma_cfg_rdc_rcr_ctl Illegal opcode %x \n", op)); 184 return (HPI_RXDMA_OPCODE_INVALID(rdc)); 185 } 186 187 RXDMA_REG_WRITE64(handle, RDC_RCR_CFG_B, rdc, rcr_cfgb.value); 188 return (HPI_SUCCESS); 189 } 190 191 hpi_status_t 192 hpi_rxdma_cfg_rdc_rcr_threshold(hpi_handle_t handle, uint8_t rdc, 193 uint16_t rcr_threshold) 194 { 195 return (hpi_rxdma_cfg_rdc_rcr_ctl(handle, rdc, 196 RCR_THRESHOLD, rcr_threshold)); 197 } 198 199 hpi_status_t 200 hpi_rxdma_cfg_rdc_rcr_timeout(hpi_handle_t handle, uint8_t rdc, 201 uint8_t rcr_timeout) 202 { 203 return (hpi_rxdma_cfg_rdc_rcr_ctl(handle, rdc, 204 RCR_TIMEOUT_ENABLE, rcr_timeout)); 205 } 206 207 /* 208 * Configure The RDC channel Rcv Buffer Ring 209 */ 210 hpi_status_t 211 hpi_rxdma_cfg_rdc_ring(hpi_handle_t handle, uint8_t rdc, 212 rdc_desc_cfg_t *rdc_desc_cfg) 213 { 214 rdc_rbr_cfg_a_t cfga; 215 rdc_rbr_cfg_b_t cfgb; 216 rdc_rx_cfg1_t cfg1; 217 rdc_rx_cfg2_t cfg2; 218 rdc_rcr_cfg_a_t rcr_cfga; 219 rdc_rcr_cfg_b_t rcr_cfgb; 220 rdc_page_handle_t page_handle; 221 222 if (!RXDMA_CHANNEL_VALID(rdc)) { 223 HPI_ERROR_MSG((handle.function, HPI_ERR_CTL, 224 "rxdma_cfg_rdc_ring Illegal RDC number %d \n", rdc)); 225 return (HPI_RXDMA_RDC_INVALID); 226 } 227 228 cfga.value = 0; 229 cfgb.value = 0; 230 cfg1.value = 0; 231 cfg2.value = 0; 232 page_handle.value = 0; 233 234 if (rdc_desc_cfg->mbox_enable == 1) { 235 cfg1.bits.mbaddr_h = (rdc_desc_cfg->mbox_addr >> 32) & 0xfff; 236 cfg2.bits.mbaddr_l = ((rdc_desc_cfg->mbox_addr & 237 RXDMA_CFIG2_MBADDR_L_MASK) >> RXDMA_CFIG2_MBADDR_L_SHIFT); 238 239 /* 240 * Only after all the configurations are set, then 241 * enable the RDC or else configuration fatal error 242 * will be returned (especially if the Hypervisor 243 * set up the logical pages with non-zero values. 244 * This HPI function only sets up the configuration. 245 * Call the enable function to enable the RDMC! 246 */ 247 } 248 249 if (rdc_desc_cfg->full_hdr == 1) 250 cfg2.bits.full_hdr = 1; 251 252 if (RXDMA_BUFF_OFFSET_VALID(rdc_desc_cfg->offset)) { 253 cfg2.bits.offset = rdc_desc_cfg->offset; 254 } else { 255 cfg2.bits.offset = SW_OFFSET_NO_OFFSET; 256 } 257 258 /* rbr config */ 259 cfga.value = (rdc_desc_cfg->rbr_addr & 260 (RBR_CFIG_A_STDADDR_MASK | RBR_CFIG_A_STDADDR_BASE_MASK)); 261 262 /* The remaining 20 bits in the DMA address form the handle */ 263 page_handle.bits.handle = (rdc_desc_cfg->rbr_addr >> 44) && 0xfffff; 264 265 /* 266 * The RBR ring size must be multiple of 64. 267 */ 268 if ((rdc_desc_cfg->rbr_len < RBR_DEFAULT_MIN_LEN) || 269 (rdc_desc_cfg->rbr_len > RBR_DEFAULT_MAX_LEN) || 270 (rdc_desc_cfg->rbr_len % 64)) { 271 HPI_ERROR_MSG((handle.function, HPI_ERR_CTL, 272 "hpi_rxdma_cfg_rdc_ring Illegal RBR Queue Length %d \n", 273 rdc_desc_cfg->rbr_len)); 274 return (HPI_RXDMA_ERROR_ENCODE(HPI_RXDMA_RBRSZIE_INVALID, rdc)); 275 } 276 277 /* 278 * The lower 6 bits are hardcoded to 0 and the higher 10 bits are 279 * stored in len. 280 */ 281 cfga.bits.len = rdc_desc_cfg->rbr_len >> 6; 282 HPI_DEBUG_MSG((handle.function, HPI_RDC_CTL, 283 "hpi_rxdma_cfg_rdc_ring CFGA 0x%llx len %d (RBR LEN %d)\n", 284 cfga.value, cfga.bits.len, rdc_desc_cfg->rbr_len)); 285 286 /* 287 * bksize is 1 bit 288 * Buffer Block Size. b0 - 4K; b1 - 8K. 289 */ 290 if (rdc_desc_cfg->page_size == SIZE_4KB) 291 cfgb.bits.bksize = RBR_BKSIZE_4K; 292 else if (rdc_desc_cfg->page_size == SIZE_8KB) 293 cfgb.bits.bksize = RBR_BKSIZE_8K; 294 else { 295 HPI_ERROR_MSG((handle.function, HPI_ERR_CTL, 296 "rxdma_cfg_rdc_ring blksize: Illegal buffer size %d \n", 297 rdc_desc_cfg->page_size)); 298 return (HPI_RXDMA_BUFSZIE_INVALID); 299 } 300 301 /* 302 * Size 0 of packet buffer. b00 - 256; b01 - 512; b10 - 1K; b11 - resvd. 303 */ 304 if (rdc_desc_cfg->valid0) { 305 if (rdc_desc_cfg->size0 == SIZE_256B) 306 cfgb.bits.bufsz0 = RBR_BUFSZ0_256B; 307 else if (rdc_desc_cfg->size0 == SIZE_512B) 308 cfgb.bits.bufsz0 = RBR_BUFSZ0_512B; 309 else if (rdc_desc_cfg->size0 == SIZE_1KB) 310 cfgb.bits.bufsz0 = RBR_BUFSZ0_1K; 311 else { 312 HPI_ERROR_MSG((handle.function, HPI_ERR_CTL, 313 " rxdma_cfg_rdc_ring" 314 " blksize0: Illegal buffer size %x \n", 315 rdc_desc_cfg->size0)); 316 return (HPI_RXDMA_BUFSZIE_INVALID); 317 } 318 cfgb.bits.vld0 = 1; 319 } else { 320 cfgb.bits.vld0 = 0; 321 } 322 323 /* 324 * Size 1 of packet buffer. b0 - 1K; b1 - 2K. 325 */ 326 if (rdc_desc_cfg->valid1) { 327 if (rdc_desc_cfg->size1 == SIZE_1KB) 328 cfgb.bits.bufsz1 = RBR_BUFSZ1_1K; 329 else if (rdc_desc_cfg->size1 == SIZE_2KB) 330 cfgb.bits.bufsz1 = RBR_BUFSZ1_2K; 331 else { 332 HPI_ERROR_MSG((handle.function, HPI_ERR_CTL, 333 " rxdma_cfg_rdc_ring" 334 " blksize1: Illegal buffer size %x \n", 335 rdc_desc_cfg->size1)); 336 return (HPI_RXDMA_BUFSZIE_INVALID); 337 } 338 cfgb.bits.vld1 = 1; 339 } else { 340 cfgb.bits.vld1 = 0; 341 } 342 343 /* 344 * Size 2 of packet buffer. b0 - 2K; b1 - 4K. 345 */ 346 if (rdc_desc_cfg->valid2) { 347 if (rdc_desc_cfg->size2 == SIZE_2KB) 348 cfgb.bits.bufsz2 = RBR_BUFSZ2_2K; 349 else if (rdc_desc_cfg->size2 == SIZE_4KB) 350 cfgb.bits.bufsz2 = RBR_BUFSZ2_4K; 351 else { 352 HPI_ERROR_MSG((handle.function, HPI_ERR_CTL, 353 " rxdma_cfg_rdc_ring" 354 " blksize2: Illegal buffer size %x \n", 355 rdc_desc_cfg->size2)); 356 return (HPI_RXDMA_BUFSZIE_INVALID); 357 } 358 cfgb.bits.vld2 = 1; 359 } else { 360 cfgb.bits.vld2 = 0; 361 } 362 363 rcr_cfga.value = (rdc_desc_cfg->rcr_addr & 364 (RCRCFIG_A_STADDR_MASK | RCRCFIG_A_STADDR_BASE_MASK)); 365 366 /* 367 * The rcr len must be multiple of 32. 368 */ 369 if ((rdc_desc_cfg->rcr_len < RCR_DEFAULT_MIN_LEN) || 370 (rdc_desc_cfg->rcr_len > HXGE_RCR_MAX) || 371 (rdc_desc_cfg->rcr_len % 32)) { 372 HPI_ERROR_MSG((handle.function, HPI_ERR_CTL, 373 " rxdma_cfg_rdc_ring Illegal RCR Queue Length %d \n", 374 rdc_desc_cfg->rcr_len)); 375 return (HPI_RXDMA_ERROR_ENCODE(HPI_RXDMA_RCRSZIE_INVALID, rdc)); 376 } 377 378 /* 379 * Bits 15:5 of the maximum number of 8B entries in RCR. Bits 4:0 are 380 * hard-coded to zero. The maximum size is 2^16 - 32. 381 */ 382 rcr_cfga.bits.len = rdc_desc_cfg->rcr_len >> 5; 383 384 rcr_cfgb.value = 0; 385 if (rdc_desc_cfg->rcr_timeout_enable == 1) { 386 /* check if the rcr timeout value is valid */ 387 388 if (RXDMA_RCR_TO_VALID(rdc_desc_cfg->rcr_timeout)) { 389 rcr_cfgb.bits.timeout = rdc_desc_cfg->rcr_timeout; 390 rcr_cfgb.bits.entout = 1; 391 } else { 392 HPI_ERROR_MSG((handle.function, HPI_ERR_CTL, 393 " rxdma_cfg_rdc_ring" 394 " Illegal RCR Timeout value %d \n", 395 rdc_desc_cfg->rcr_timeout)); 396 rcr_cfgb.bits.entout = 0; 397 } 398 } else { 399 rcr_cfgb.bits.entout = 0; 400 } 401 402 /* check if the rcr threshold value is valid */ 403 if (RXDMA_RCR_THRESH_VALID(rdc_desc_cfg->rcr_threshold)) { 404 rcr_cfgb.bits.pthres = rdc_desc_cfg->rcr_threshold; 405 } else { 406 HPI_ERROR_MSG((handle.function, HPI_ERR_CTL, 407 " rxdma_cfg_rdc_ring Illegal RCR Threshold value %d \n", 408 rdc_desc_cfg->rcr_threshold)); 409 rcr_cfgb.bits.pthres = 1; 410 } 411 412 /* now do the actual HW configuration */ 413 RXDMA_REG_WRITE64(handle, RDC_RX_CFG1, rdc, cfg1.value); 414 RXDMA_REG_WRITE64(handle, RDC_RX_CFG2, rdc, cfg2.value); 415 416 RXDMA_REG_WRITE64(handle, RDC_RBR_CFG_A, rdc, cfga.value); 417 RXDMA_REG_WRITE64(handle, RDC_RBR_CFG_B, rdc, cfgb.value); 418 419 RXDMA_REG_WRITE64(handle, RDC_RCR_CFG_A, rdc, rcr_cfga.value); 420 RXDMA_REG_WRITE64(handle, RDC_RCR_CFG_B, rdc, rcr_cfgb.value); 421 422 RXDMA_REG_WRITE64(handle, RDC_PAGE_HANDLE, rdc, page_handle.value); 423 424 return (HPI_SUCCESS); 425 } 426 427 hpi_status_t 428 hpi_rxdma_ring_perr_stat_get(hpi_handle_t handle, 429 rdc_pref_par_log_t *pre_log, rdc_pref_par_log_t *sha_log) 430 { 431 /* 432 * Hydra doesn't have details about these errors. 433 * It only provides the addresses of the errors. 434 */ 435 HXGE_REG_RD64(handle, RDC_PREF_PAR_LOG, &pre_log->value); 436 HXGE_REG_RD64(handle, RDC_SHADOW_PAR_LOG, &sha_log->value); 437 438 return (HPI_SUCCESS); 439 } 440 441 442 /* system wide conf functions */ 443 444 hpi_status_t 445 hpi_rxdma_cfg_clock_div_set(hpi_handle_t handle, uint16_t count) 446 { 447 uint64_t offset; 448 rdc_clock_div_t clk_div; 449 450 offset = RDC_CLOCK_DIV; 451 452 clk_div.value = 0; 453 clk_div.bits.count = count; 454 HPI_DEBUG_MSG((handle.function, HPI_RDC_CTL, 455 " hpi_rxdma_cfg_clock_div_set: add 0x%llx " 456 "handle 0x%llx value 0x%llx", 457 handle.regp, handle.regh, clk_div.value)); 458 459 HXGE_REG_WR64(handle, offset, clk_div.value); 460 461 return (HPI_SUCCESS); 462 } 463 464 465 hpi_status_t 466 hpi_rxdma_rdc_rbr_stat_get(hpi_handle_t handle, uint8_t rdc, 467 rdc_rbr_qlen_t *rbr_stat) 468 { 469 if (!RXDMA_CHANNEL_VALID(rdc)) { 470 HPI_ERROR_MSG((handle.function, HPI_ERR_CTL, 471 " rxdma_rdc_rbr_stat_get Illegal RDC Number %d \n", rdc)); 472 return (HPI_RXDMA_RDC_INVALID); 473 } 474 475 RXDMA_REG_READ64(handle, RDC_RBR_QLEN, rdc, &rbr_stat->value); 476 return (HPI_SUCCESS); 477 } 478 479 480 hpi_status_t 481 hpi_rxdma_rdc_rcr_qlen_get(hpi_handle_t handle, uint8_t rdc, 482 uint16_t *rcr_qlen) 483 { 484 rdc_rcr_qlen_t stats; 485 486 if (!RXDMA_CHANNEL_VALID(rdc)) { 487 HPI_ERROR_MSG((handle.function, HPI_ERR_CTL, 488 " rxdma_rdc_rcr_qlen_get Illegal RDC Number %d \n", rdc)); 489 return (HPI_RXDMA_RDC_INVALID); 490 } 491 492 RXDMA_REG_READ64(handle, RDC_RCR_QLEN, rdc, &stats.value); 493 *rcr_qlen = stats.bits.qlen; 494 HPI_DEBUG_MSG((handle.function, HPI_RDC_CTL, 495 " rxdma_rdc_rcr_qlen_get RDC %d qlen %x qlen %x\n", 496 rdc, *rcr_qlen, stats.bits.qlen)); 497 return (HPI_SUCCESS); 498 } 499 500 hpi_status_t 501 hpi_rxdma_channel_rbr_empty_clear(hpi_handle_t handle, uint8_t channel) 502 { 503 rdc_stat_t cs; 504 505 if (!RXDMA_CHANNEL_VALID(channel)) { 506 HPI_ERROR_MSG((handle.function, HPI_ERR_CTL, 507 " hpi_rxdma_channel_rbr_empty_clear", " channel", channel)); 508 return (HPI_FAILURE | HPI_RXDMA_CHANNEL_INVALID(channel)); 509 } 510 511 RXDMA_REG_READ64(handle, RDC_STAT, channel, &cs.value); 512 cs.bits.rbr_empty = 1; 513 RXDMA_REG_WRITE64(handle, RDC_STAT, channel, cs.value); 514 515 return (HPI_SUCCESS); 516 } 517 518 /* 519 * This function is called to operate on the control and status register. 520 */ 521 hpi_status_t 522 hpi_rxdma_control_status(hpi_handle_t handle, io_op_t op_mode, uint8_t channel, 523 rdc_stat_t *cs_p) 524 { 525 int status = HPI_SUCCESS; 526 rdc_stat_t cs; 527 528 if (!RXDMA_CHANNEL_VALID(channel)) { 529 HPI_ERROR_MSG((handle.function, HPI_ERR_CTL, 530 "hpi_rxdma_control_status", "channel", channel)); 531 return (HPI_FAILURE | HPI_RXDMA_CHANNEL_INVALID(channel)); 532 } 533 534 switch (op_mode) { 535 case OP_GET: 536 RXDMA_REG_READ64(handle, RDC_STAT, channel, &cs_p->value); 537 break; 538 539 case OP_SET: 540 RXDMA_REG_WRITE64(handle, RDC_STAT, channel, cs_p->value); 541 break; 542 543 case OP_UPDATE: 544 RXDMA_REG_READ64(handle, RDC_STAT, channel, &cs.value); 545 RXDMA_REG_WRITE64(handle, RDC_STAT, channel, 546 cs_p->value | cs.value); 547 break; 548 549 default: 550 HPI_ERROR_MSG((handle.function, HPI_ERR_CTL, 551 "hpi_rxdma_control_status", "control", op_mode)); 552 return (HPI_FAILURE | HPI_RXDMA_OPCODE_INVALID(channel)); 553 } 554 555 return (status); 556 } 557 558 /* 559 * This function is called to operate on the event mask 560 * register which is used for generating interrupts. 561 */ 562 hpi_status_t 563 hpi_rxdma_event_mask(hpi_handle_t handle, io_op_t op_mode, uint8_t channel, 564 rdc_int_mask_t *mask_p) 565 { 566 int status = HPI_SUCCESS; 567 rdc_int_mask_t mask; 568 569 if (!RXDMA_CHANNEL_VALID(channel)) { 570 HPI_ERROR_MSG((handle.function, HPI_ERR_CTL, 571 "hpi_rxdma_event_mask", "channel", channel)); 572 return (HPI_FAILURE | HPI_RXDMA_CHANNEL_INVALID(channel)); 573 } 574 575 switch (op_mode) { 576 case OP_GET: 577 RXDMA_REG_READ64(handle, RDC_INT_MASK, channel, &mask_p->value); 578 break; 579 580 case OP_SET: 581 RXDMA_REG_WRITE64(handle, RDC_INT_MASK, channel, mask_p->value); 582 break; 583 584 case OP_UPDATE: 585 RXDMA_REG_READ64(handle, RDC_INT_MASK, channel, &mask.value); 586 RXDMA_REG_WRITE64(handle, RDC_INT_MASK, channel, 587 mask_p->value | mask.value); 588 break; 589 590 default: 591 HPI_ERROR_MSG((handle.function, HPI_ERR_CTL, 592 "hpi_rxdma_event_mask", "eventmask", op_mode)); 593 return (HPI_FAILURE | HPI_RXDMA_OPCODE_INVALID(channel)); 594 } 595 596 return (status); 597 } 598