1 /* 2 * Copyright (c) 2014-2015 Hisilicon Limited. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 */ 9 10 #include <linux/cdev.h> 11 #include <linux/module.h> 12 #include <linux/kernel.h> 13 #include <linux/init.h> 14 #include <linux/netdevice.h> 15 #include <linux/etherdevice.h> 16 #include <asm/cacheflush.h> 17 #include <linux/platform_device.h> 18 #include <linux/of.h> 19 #include <linux/of_address.h> 20 #include <linux/of_platform.h> 21 #include <linux/of_irq.h> 22 #include <linux/spinlock.h> 23 24 #include "hns_dsaf_main.h" 25 #include "hns_dsaf_ppe.h" 26 #include "hns_dsaf_rcb.h" 27 28 #define RCB_COMMON_REG_OFFSET 0x80000 29 #define TX_RING 0 30 #define RX_RING 1 31 32 #define RCB_RESET_WAIT_TIMES 30 33 #define RCB_RESET_TRY_TIMES 10 34 35 /** 36 *hns_rcb_wait_fbd_clean - clean fbd 37 *@qs: ring struct pointer array 38 *@qnum: num of array 39 *@flag: tx or rx flag 40 */ 41 void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag) 42 { 43 int i, wait_cnt; 44 u32 fbd_num; 45 46 for (wait_cnt = i = 0; i < q_num; wait_cnt++) { 47 usleep_range(200, 300); 48 fbd_num = 0; 49 if (flag & RCB_INT_FLAG_TX) 50 fbd_num += dsaf_read_dev(qs[i], 51 RCB_RING_TX_RING_FBDNUM_REG); 52 if (flag & RCB_INT_FLAG_RX) 53 fbd_num += dsaf_read_dev(qs[i], 54 RCB_RING_RX_RING_FBDNUM_REG); 55 if (!fbd_num) 56 i++; 57 if (wait_cnt >= 10000) 58 break; 59 } 60 61 if (i < q_num) 62 dev_err(qs[i]->handle->owner_dev, 63 "queue(%d) wait fbd(%d) clean fail!!\n", i, fbd_num); 64 } 65 66 /** 67 *hns_rcb_reset_ring_hw - ring reset 68 *@q: ring struct pointer 69 */ 70 void hns_rcb_reset_ring_hw(struct hnae_queue *q) 71 { 72 u32 wait_cnt; 73 u32 try_cnt = 0; 74 u32 could_ret; 75 76 u32 tx_fbd_num; 77 78 while (try_cnt++ < RCB_RESET_TRY_TIMES) { 79 usleep_range(100, 200); 80 tx_fbd_num = dsaf_read_dev(q, RCB_RING_TX_RING_FBDNUM_REG); 81 if (tx_fbd_num) 82 continue; 83 84 dsaf_write_dev(q, RCB_RING_PREFETCH_EN_REG, 0); 85 86 dsaf_write_dev(q, RCB_RING_T0_BE_RST, 1); 87 88 msleep(20); 89 could_ret = dsaf_read_dev(q, RCB_RING_COULD_BE_RST); 90 91 wait_cnt = 0; 92 while (!could_ret && (wait_cnt < RCB_RESET_WAIT_TIMES)) { 93 dsaf_write_dev(q, RCB_RING_T0_BE_RST, 0); 94 95 dsaf_write_dev(q, RCB_RING_T0_BE_RST, 1); 96 97 msleep(20); 98 could_ret = dsaf_read_dev(q, RCB_RING_COULD_BE_RST); 99 100 wait_cnt++; 101 } 102 103 dsaf_write_dev(q, RCB_RING_T0_BE_RST, 0); 104 105 if (could_ret) 106 break; 107 } 108 109 if (try_cnt >= RCB_RESET_TRY_TIMES) 110 dev_err(q->dev->dev, "port%d reset ring fail\n", 111 hns_ae_get_vf_cb(q->handle)->port_index); 112 } 113 114 /** 115 *hns_rcb_int_ctrl_hw - rcb irq enable control 116 *@q: hnae queue struct pointer 117 *@flag:ring flag tx or rx 118 *@mask:mask 119 */ 120 void hns_rcb_int_ctrl_hw(struct hnae_queue *q, u32 flag, u32 mask) 121 { 122 u32 int_mask_en = !!mask; 123 124 if (flag & RCB_INT_FLAG_TX) { 125 dsaf_write_dev(q, RCB_RING_INTMSK_TXWL_REG, int_mask_en); 126 dsaf_write_dev(q, RCB_RING_INTMSK_TX_OVERTIME_REG, 127 int_mask_en); 128 } 129 130 if (flag & RCB_INT_FLAG_RX) { 131 dsaf_write_dev(q, RCB_RING_INTMSK_RXWL_REG, int_mask_en); 132 dsaf_write_dev(q, RCB_RING_INTMSK_RX_OVERTIME_REG, 133 int_mask_en); 134 } 135 } 136 137 void hns_rcb_int_clr_hw(struct hnae_queue *q, u32 flag) 138 { 139 if (flag & RCB_INT_FLAG_TX) { 140 dsaf_write_dev(q, RCB_RING_INTSTS_TX_RING_REG, 1); 141 dsaf_write_dev(q, RCB_RING_INTSTS_TX_OVERTIME_REG, 1); 142 } 143 144 if (flag & RCB_INT_FLAG_RX) { 145 dsaf_write_dev(q, RCB_RING_INTSTS_RX_RING_REG, 1); 146 dsaf_write_dev(q, RCB_RING_INTSTS_RX_OVERTIME_REG, 1); 147 } 148 } 149 150 void hns_rcbv2_int_ctrl_hw(struct hnae_queue *q, u32 flag, u32 mask) 151 { 152 u32 int_mask_en = !!mask; 153 154 if (flag & RCB_INT_FLAG_TX) 155 dsaf_write_dev(q, RCB_RING_INTMSK_TXWL_REG, int_mask_en); 156 157 if (flag & RCB_INT_FLAG_RX) 158 dsaf_write_dev(q, RCB_RING_INTMSK_RXWL_REG, int_mask_en); 159 } 160 161 void hns_rcbv2_int_clr_hw(struct hnae_queue *q, u32 flag) 162 { 163 if (flag & RCB_INT_FLAG_TX) 164 dsaf_write_dev(q, RCBV2_TX_RING_INT_STS_REG, 1); 165 166 if (flag & RCB_INT_FLAG_RX) 167 dsaf_write_dev(q, RCBV2_RX_RING_INT_STS_REG, 1); 168 } 169 170 /** 171 *hns_rcb_ring_enable_hw - enable ring 172 *@ring: rcb ring 173 */ 174 void hns_rcb_ring_enable_hw(struct hnae_queue *q, u32 val) 175 { 176 dsaf_write_dev(q, RCB_RING_PREFETCH_EN_REG, !!val); 177 } 178 179 void hns_rcb_start(struct hnae_queue *q, u32 val) 180 { 181 hns_rcb_ring_enable_hw(q, val); 182 } 183 184 /** 185 *hns_rcb_common_init_commit_hw - make rcb common init completed 186 *@rcb_common: rcb common device 187 */ 188 void hns_rcb_common_init_commit_hw(struct rcb_common_cb *rcb_common) 189 { 190 wmb(); /* Sync point before breakpoint */ 191 dsaf_write_dev(rcb_common, RCB_COM_CFG_SYS_FSH_REG, 1); 192 wmb(); /* Sync point after breakpoint */ 193 } 194 195 /** 196 *hns_rcb_ring_init - init rcb ring 197 *@ring_pair: ring pair control block 198 *@ring_type: ring type, RX_RING or TX_RING 199 */ 200 static void hns_rcb_ring_init(struct ring_pair_cb *ring_pair, int ring_type) 201 { 202 struct hnae_queue *q = &ring_pair->q; 203 struct rcb_common_cb *rcb_common = ring_pair->rcb_common; 204 u32 bd_size_type = rcb_common->dsaf_dev->buf_size_type; 205 struct hnae_ring *ring = 206 (ring_type == RX_RING) ? &q->rx_ring : &q->tx_ring; 207 dma_addr_t dma = ring->desc_dma_addr; 208 209 if (ring_type == RX_RING) { 210 dsaf_write_dev(q, RCB_RING_RX_RING_BASEADDR_L_REG, 211 (u32)dma); 212 dsaf_write_dev(q, RCB_RING_RX_RING_BASEADDR_H_REG, 213 (u32)((dma >> 31) >> 1)); 214 215 dsaf_write_dev(q, RCB_RING_RX_RING_BD_LEN_REG, 216 bd_size_type); 217 dsaf_write_dev(q, RCB_RING_RX_RING_BD_NUM_REG, 218 ring_pair->port_id_in_comm); 219 dsaf_write_dev(q, RCB_RING_RX_RING_PKTLINE_REG, 220 ring_pair->port_id_in_comm); 221 } else { 222 dsaf_write_dev(q, RCB_RING_TX_RING_BASEADDR_L_REG, 223 (u32)dma); 224 dsaf_write_dev(q, RCB_RING_TX_RING_BASEADDR_H_REG, 225 (u32)((dma >> 31) >> 1)); 226 227 dsaf_write_dev(q, RCB_RING_TX_RING_BD_LEN_REG, 228 bd_size_type); 229 dsaf_write_dev(q, RCB_RING_TX_RING_BD_NUM_REG, 230 ring_pair->port_id_in_comm); 231 dsaf_write_dev(q, RCB_RING_TX_RING_PKTLINE_REG, 232 ring_pair->port_id_in_comm); 233 } 234 } 235 236 /** 237 *hns_rcb_init_hw - init rcb hardware 238 *@ring: rcb ring 239 */ 240 void hns_rcb_init_hw(struct ring_pair_cb *ring) 241 { 242 hns_rcb_ring_init(ring, RX_RING); 243 hns_rcb_ring_init(ring, TX_RING); 244 } 245 246 /** 247 *hns_rcb_set_port_desc_cnt - set rcb port description num 248 *@rcb_common: rcb_common device 249 *@port_idx:port index 250 *@desc_cnt:BD num 251 */ 252 static void hns_rcb_set_port_desc_cnt(struct rcb_common_cb *rcb_common, 253 u32 port_idx, u32 desc_cnt) 254 { 255 dsaf_write_dev(rcb_common, RCB_CFG_BD_NUM_REG + port_idx * 4, 256 desc_cnt); 257 } 258 259 static void hns_rcb_set_port_timeout( 260 struct rcb_common_cb *rcb_common, u32 port_idx, u32 timeout) 261 { 262 if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver)) 263 dsaf_write_dev(rcb_common, RCB_CFG_OVERTIME_REG, 264 timeout * HNS_RCB_CLK_FREQ_MHZ); 265 else 266 dsaf_write_dev(rcb_common, 267 RCB_PORT_CFG_OVERTIME_REG + port_idx * 4, 268 timeout); 269 } 270 271 static int hns_rcb_common_get_port_num(struct rcb_common_cb *rcb_common) 272 { 273 if (!HNS_DSAF_IS_DEBUG(rcb_common->dsaf_dev)) 274 return HNS_RCB_SERVICE_NW_ENGINE_NUM; 275 else 276 return HNS_RCB_DEBUG_NW_ENGINE_NUM; 277 } 278 279 /*clr rcb comm exception irq**/ 280 static void hns_rcb_comm_exc_irq_en( 281 struct rcb_common_cb *rcb_common, int en) 282 { 283 u32 clr_vlue = 0xfffffffful; 284 u32 msk_vlue = en ? 0 : 0xfffffffful; 285 286 /* clr int*/ 287 dsaf_write_dev(rcb_common, RCB_COM_INTSTS_ECC_ERR_REG, clr_vlue); 288 289 dsaf_write_dev(rcb_common, RCB_COM_SF_CFG_RING_STS, clr_vlue); 290 291 dsaf_write_dev(rcb_common, RCB_COM_SF_CFG_BD_RINT_STS, clr_vlue); 292 293 dsaf_write_dev(rcb_common, RCB_COM_RINT_TX_PKT_REG, clr_vlue); 294 dsaf_write_dev(rcb_common, RCB_COM_AXI_ERR_STS, clr_vlue); 295 296 /*en msk*/ 297 dsaf_write_dev(rcb_common, RCB_COM_INTMASK_ECC_ERR_REG, msk_vlue); 298 299 dsaf_write_dev(rcb_common, RCB_COM_SF_CFG_INTMASK_RING, msk_vlue); 300 301 /*for tx bd neednot cacheline, so msk sf_txring_fbd_intmask (bit 1)**/ 302 dsaf_write_dev(rcb_common, RCB_COM_SF_CFG_INTMASK_BD, msk_vlue | 2); 303 304 dsaf_write_dev(rcb_common, RCB_COM_INTMSK_TX_PKT_REG, msk_vlue); 305 dsaf_write_dev(rcb_common, RCB_COM_AXI_WR_ERR_INTMASK, msk_vlue); 306 } 307 308 /** 309 *hns_rcb_common_init_hw - init rcb common hardware 310 *@rcb_common: rcb_common device 311 *retuen 0 - success , negative --fail 312 */ 313 int hns_rcb_common_init_hw(struct rcb_common_cb *rcb_common) 314 { 315 u32 reg_val; 316 int i; 317 int port_num = hns_rcb_common_get_port_num(rcb_common); 318 319 hns_rcb_comm_exc_irq_en(rcb_common, 0); 320 321 reg_val = dsaf_read_dev(rcb_common, RCB_COM_CFG_INIT_FLAG_REG); 322 if (0x1 != (reg_val & 0x1)) { 323 dev_err(rcb_common->dsaf_dev->dev, 324 "RCB_COM_CFG_INIT_FLAG_REG reg = 0x%x\n", reg_val); 325 return -EBUSY; 326 } 327 328 for (i = 0; i < port_num; i++) { 329 hns_rcb_set_port_desc_cnt(rcb_common, i, rcb_common->desc_num); 330 (void)hns_rcb_set_coalesced_frames( 331 rcb_common, i, HNS_RCB_DEF_COALESCED_FRAMES); 332 hns_rcb_set_port_timeout( 333 rcb_common, i, HNS_RCB_DEF_COALESCED_USECS); 334 } 335 336 dsaf_write_dev(rcb_common, RCB_COM_CFG_ENDIAN_REG, 337 HNS_RCB_COMMON_ENDIAN); 338 339 if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver)) { 340 dsaf_write_dev(rcb_common, RCB_COM_CFG_FNA_REG, 0x0); 341 dsaf_write_dev(rcb_common, RCB_COM_CFG_FA_REG, 0x1); 342 } else { 343 dsaf_set_dev_bit(rcb_common, RCBV2_COM_CFG_USER_REG, 344 RCB_COM_CFG_FNA_B, false); 345 dsaf_set_dev_bit(rcb_common, RCBV2_COM_CFG_USER_REG, 346 RCB_COM_CFG_FA_B, true); 347 dsaf_set_dev_bit(rcb_common, RCBV2_COM_CFG_TSO_MODE_REG, 348 RCB_COM_TSO_MODE_B, HNS_TSO_MODE_8BD_32K); 349 } 350 351 return 0; 352 } 353 354 int hns_rcb_buf_size2type(u32 buf_size) 355 { 356 int bd_size_type; 357 358 switch (buf_size) { 359 case 512: 360 bd_size_type = HNS_BD_SIZE_512_TYPE; 361 break; 362 case 1024: 363 bd_size_type = HNS_BD_SIZE_1024_TYPE; 364 break; 365 case 2048: 366 bd_size_type = HNS_BD_SIZE_2048_TYPE; 367 break; 368 case 4096: 369 bd_size_type = HNS_BD_SIZE_4096_TYPE; 370 break; 371 default: 372 bd_size_type = -EINVAL; 373 } 374 375 return bd_size_type; 376 } 377 378 static void hns_rcb_ring_get_cfg(struct hnae_queue *q, int ring_type) 379 { 380 struct hnae_ring *ring; 381 struct rcb_common_cb *rcb_common; 382 struct ring_pair_cb *ring_pair_cb; 383 u32 buf_size; 384 u16 desc_num, mdnum_ppkt; 385 bool irq_idx, is_ver1; 386 387 ring_pair_cb = container_of(q, struct ring_pair_cb, q); 388 is_ver1 = AE_IS_VER1(ring_pair_cb->rcb_common->dsaf_dev->dsaf_ver); 389 if (ring_type == RX_RING) { 390 ring = &q->rx_ring; 391 ring->io_base = ring_pair_cb->q.io_base; 392 irq_idx = HNS_RCB_IRQ_IDX_RX; 393 mdnum_ppkt = HNS_RCB_RING_MAX_BD_PER_PKT; 394 } else { 395 ring = &q->tx_ring; 396 ring->io_base = (u8 __iomem *)ring_pair_cb->q.io_base + 397 HNS_RCB_TX_REG_OFFSET; 398 irq_idx = HNS_RCB_IRQ_IDX_TX; 399 mdnum_ppkt = is_ver1 ? HNS_RCB_RING_MAX_TXBD_PER_PKT : 400 HNS_RCBV2_RING_MAX_TXBD_PER_PKT; 401 } 402 403 rcb_common = ring_pair_cb->rcb_common; 404 buf_size = rcb_common->dsaf_dev->buf_size; 405 desc_num = rcb_common->dsaf_dev->desc_num; 406 407 ring->desc = NULL; 408 ring->desc_cb = NULL; 409 410 ring->irq = ring_pair_cb->virq[irq_idx]; 411 ring->desc_dma_addr = 0; 412 413 ring->buf_size = buf_size; 414 ring->desc_num = desc_num; 415 ring->max_desc_num_per_pkt = mdnum_ppkt; 416 ring->max_raw_data_sz_per_desc = HNS_RCB_MAX_PKT_SIZE; 417 ring->max_pkt_size = HNS_RCB_MAX_PKT_SIZE; 418 ring->next_to_use = 0; 419 ring->next_to_clean = 0; 420 } 421 422 static void hns_rcb_ring_pair_get_cfg(struct ring_pair_cb *ring_pair_cb) 423 { 424 ring_pair_cb->q.handle = NULL; 425 426 hns_rcb_ring_get_cfg(&ring_pair_cb->q, RX_RING); 427 hns_rcb_ring_get_cfg(&ring_pair_cb->q, TX_RING); 428 } 429 430 static int hns_rcb_get_port_in_comm( 431 struct rcb_common_cb *rcb_common, int ring_idx) 432 { 433 434 return ring_idx / (rcb_common->max_q_per_vf * rcb_common->max_vfn); 435 } 436 437 #define SERVICE_RING_IRQ_IDX(v1) \ 438 ((v1) ? HNS_SERVICE_RING_IRQ_IDX : HNSV2_SERVICE_RING_IRQ_IDX) 439 static int hns_rcb_get_base_irq_idx(struct rcb_common_cb *rcb_common) 440 { 441 bool is_ver1 = AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver); 442 443 if (!HNS_DSAF_IS_DEBUG(rcb_common->dsaf_dev)) 444 return SERVICE_RING_IRQ_IDX(is_ver1); 445 else 446 return HNS_DEBUG_RING_IRQ_IDX; 447 } 448 449 #define RCB_COMM_BASE_TO_RING_BASE(base, ringid)\ 450 ((base) + 0x10000 + HNS_RCB_REG_OFFSET * (ringid)) 451 /** 452 *hns_rcb_get_cfg - get rcb config 453 *@rcb_common: rcb common device 454 */ 455 void hns_rcb_get_cfg(struct rcb_common_cb *rcb_common) 456 { 457 struct ring_pair_cb *ring_pair_cb; 458 u32 i; 459 u32 ring_num = rcb_common->ring_num; 460 int base_irq_idx = hns_rcb_get_base_irq_idx(rcb_common); 461 struct device_node *np = rcb_common->dsaf_dev->dev->of_node; 462 struct platform_device *pdev = 463 to_platform_device(rcb_common->dsaf_dev->dev); 464 bool is_ver1 = AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver); 465 466 for (i = 0; i < ring_num; i++) { 467 ring_pair_cb = &rcb_common->ring_pair_cb[i]; 468 ring_pair_cb->rcb_common = rcb_common; 469 ring_pair_cb->dev = rcb_common->dsaf_dev->dev; 470 ring_pair_cb->index = i; 471 ring_pair_cb->q.io_base = 472 RCB_COMM_BASE_TO_RING_BASE(rcb_common->io_base, i); 473 ring_pair_cb->port_id_in_comm = 474 hns_rcb_get_port_in_comm(rcb_common, i); 475 ring_pair_cb->virq[HNS_RCB_IRQ_IDX_TX] = 476 is_ver1 ? irq_of_parse_and_map(np, base_irq_idx + i * 2) : 477 platform_get_irq(pdev, base_irq_idx + i * 3 + 1); 478 ring_pair_cb->virq[HNS_RCB_IRQ_IDX_RX] = 479 is_ver1 ? irq_of_parse_and_map(np, base_irq_idx + i * 2 + 1) : 480 platform_get_irq(pdev, base_irq_idx + i * 3); 481 ring_pair_cb->q.phy_base = 482 RCB_COMM_BASE_TO_RING_BASE(rcb_common->phy_base, i); 483 hns_rcb_ring_pair_get_cfg(ring_pair_cb); 484 } 485 } 486 487 /** 488 *hns_rcb_get_coalesced_frames - get rcb port coalesced frames 489 *@rcb_common: rcb_common device 490 *@port_idx:port id in comm 491 * 492 *Returns: coalesced_frames 493 */ 494 u32 hns_rcb_get_coalesced_frames( 495 struct rcb_common_cb *rcb_common, u32 port_idx) 496 { 497 return dsaf_read_dev(rcb_common, RCB_CFG_PKTLINE_REG + port_idx * 4); 498 } 499 500 /** 501 *hns_rcb_get_coalesce_usecs - get rcb port coalesced time_out 502 *@rcb_common: rcb_common device 503 *@port_idx:port id in comm 504 * 505 *Returns: time_out 506 */ 507 u32 hns_rcb_get_coalesce_usecs( 508 struct rcb_common_cb *rcb_common, u32 port_idx) 509 { 510 if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver)) 511 return dsaf_read_dev(rcb_common, RCB_CFG_OVERTIME_REG) / 512 HNS_RCB_CLK_FREQ_MHZ; 513 else 514 return dsaf_read_dev(rcb_common, 515 RCB_PORT_CFG_OVERTIME_REG + port_idx * 4); 516 } 517 518 /** 519 *hns_rcb_set_coalesce_usecs - set rcb port coalesced time_out 520 *@rcb_common: rcb_common device 521 *@port_idx:port id in comm 522 *@timeout:tx/rx time for coalesced time_out 523 * 524 * Returns: 525 * Zero for success, or an error code in case of failure 526 */ 527 int hns_rcb_set_coalesce_usecs( 528 struct rcb_common_cb *rcb_common, u32 port_idx, u32 timeout) 529 { 530 u32 old_timeout = hns_rcb_get_coalesce_usecs(rcb_common, port_idx); 531 532 if (timeout == old_timeout) 533 return 0; 534 535 if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver)) { 536 if (!HNS_DSAF_IS_DEBUG(rcb_common->dsaf_dev)) { 537 dev_err(rcb_common->dsaf_dev->dev, 538 "error: not support coalesce_usecs setting!\n"); 539 return -EINVAL; 540 } 541 } 542 if (timeout > HNS_RCB_MAX_COALESCED_USECS) { 543 dev_err(rcb_common->dsaf_dev->dev, 544 "error: not support coalesce %dus!\n", timeout); 545 return -EINVAL; 546 } 547 hns_rcb_set_port_timeout(rcb_common, port_idx, timeout); 548 return 0; 549 } 550 551 /** 552 *hns_rcb_set_coalesced_frames - set rcb coalesced frames 553 *@rcb_common: rcb_common device 554 *@port_idx:port id in comm 555 *@coalesced_frames:tx/rx BD num for coalesced frames 556 * 557 * Returns: 558 * Zero for success, or an error code in case of failure 559 */ 560 int hns_rcb_set_coalesced_frames( 561 struct rcb_common_cb *rcb_common, u32 port_idx, u32 coalesced_frames) 562 { 563 u32 old_waterline = hns_rcb_get_coalesced_frames(rcb_common, port_idx); 564 565 if (coalesced_frames == old_waterline) 566 return 0; 567 568 if (coalesced_frames >= rcb_common->desc_num || 569 coalesced_frames > HNS_RCB_MAX_COALESCED_FRAMES || 570 coalesced_frames < HNS_RCB_MIN_COALESCED_FRAMES) { 571 dev_err(rcb_common->dsaf_dev->dev, 572 "error: not support coalesce_frames setting!\n"); 573 return -EINVAL; 574 } 575 576 dsaf_write_dev(rcb_common, RCB_CFG_PKTLINE_REG + port_idx * 4, 577 coalesced_frames); 578 return 0; 579 } 580 581 /** 582 *hns_rcb_get_queue_mode - get max VM number and max ring number per VM 583 * accordding to dsaf mode 584 *@dsaf_mode: dsaf mode 585 *@max_vfn : max vfn number 586 *@max_q_per_vf:max ring number per vm 587 */ 588 void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode, u16 *max_vfn, 589 u16 *max_q_per_vf) 590 { 591 switch (dsaf_mode) { 592 case DSAF_MODE_DISABLE_6PORT_0VM: 593 *max_vfn = 1; 594 *max_q_per_vf = 16; 595 break; 596 case DSAF_MODE_DISABLE_FIX: 597 case DSAF_MODE_DISABLE_SP: 598 *max_vfn = 1; 599 *max_q_per_vf = 1; 600 break; 601 case DSAF_MODE_DISABLE_2PORT_64VM: 602 *max_vfn = 64; 603 *max_q_per_vf = 1; 604 break; 605 case DSAF_MODE_DISABLE_6PORT_16VM: 606 *max_vfn = 16; 607 *max_q_per_vf = 1; 608 break; 609 default: 610 *max_vfn = 1; 611 *max_q_per_vf = 16; 612 break; 613 } 614 } 615 616 int hns_rcb_get_ring_num(struct dsaf_device *dsaf_dev) 617 { 618 switch (dsaf_dev->dsaf_mode) { 619 case DSAF_MODE_ENABLE_FIX: 620 case DSAF_MODE_DISABLE_SP: 621 return 1; 622 623 case DSAF_MODE_DISABLE_FIX: 624 return 6; 625 626 case DSAF_MODE_ENABLE_0VM: 627 return 32; 628 629 case DSAF_MODE_DISABLE_6PORT_0VM: 630 case DSAF_MODE_ENABLE_16VM: 631 case DSAF_MODE_DISABLE_6PORT_2VM: 632 case DSAF_MODE_DISABLE_6PORT_16VM: 633 case DSAF_MODE_DISABLE_6PORT_4VM: 634 case DSAF_MODE_ENABLE_8VM: 635 return 96; 636 637 case DSAF_MODE_DISABLE_2PORT_16VM: 638 case DSAF_MODE_DISABLE_2PORT_8VM: 639 case DSAF_MODE_ENABLE_32VM: 640 case DSAF_MODE_DISABLE_2PORT_64VM: 641 case DSAF_MODE_ENABLE_128VM: 642 return 128; 643 644 default: 645 dev_warn(dsaf_dev->dev, 646 "get ring num fail,use default!dsaf_mode=%d\n", 647 dsaf_dev->dsaf_mode); 648 return 128; 649 } 650 } 651 652 void __iomem *hns_rcb_common_get_vaddr(struct rcb_common_cb *rcb_common) 653 { 654 struct dsaf_device *dsaf_dev = rcb_common->dsaf_dev; 655 656 return dsaf_dev->ppe_base + RCB_COMMON_REG_OFFSET; 657 } 658 659 static phys_addr_t hns_rcb_common_get_paddr(struct rcb_common_cb *rcb_common) 660 { 661 struct dsaf_device *dsaf_dev = rcb_common->dsaf_dev; 662 663 return dsaf_dev->ppe_paddr + RCB_COMMON_REG_OFFSET; 664 } 665 666 int hns_rcb_common_get_cfg(struct dsaf_device *dsaf_dev, 667 int comm_index) 668 { 669 struct rcb_common_cb *rcb_common; 670 enum dsaf_mode dsaf_mode = dsaf_dev->dsaf_mode; 671 u16 max_vfn; 672 u16 max_q_per_vf; 673 int ring_num = hns_rcb_get_ring_num(dsaf_dev); 674 675 rcb_common = 676 devm_kzalloc(dsaf_dev->dev, sizeof(*rcb_common) + 677 ring_num * sizeof(struct ring_pair_cb), GFP_KERNEL); 678 if (!rcb_common) { 679 dev_err(dsaf_dev->dev, "rcb common devm_kzalloc fail!\n"); 680 return -ENOMEM; 681 } 682 rcb_common->comm_index = comm_index; 683 rcb_common->ring_num = ring_num; 684 rcb_common->dsaf_dev = dsaf_dev; 685 686 rcb_common->desc_num = dsaf_dev->desc_num; 687 688 hns_rcb_get_queue_mode(dsaf_mode, &max_vfn, &max_q_per_vf); 689 rcb_common->max_vfn = max_vfn; 690 rcb_common->max_q_per_vf = max_q_per_vf; 691 692 rcb_common->io_base = hns_rcb_common_get_vaddr(rcb_common); 693 rcb_common->phy_base = hns_rcb_common_get_paddr(rcb_common); 694 695 dsaf_dev->rcb_common[comm_index] = rcb_common; 696 return 0; 697 } 698 699 void hns_rcb_common_free_cfg(struct dsaf_device *dsaf_dev, 700 u32 comm_index) 701 { 702 dsaf_dev->rcb_common[comm_index] = NULL; 703 } 704 705 void hns_rcb_update_stats(struct hnae_queue *queue) 706 { 707 struct ring_pair_cb *ring = 708 container_of(queue, struct ring_pair_cb, q); 709 struct dsaf_device *dsaf_dev = ring->rcb_common->dsaf_dev; 710 struct ppe_common_cb *ppe_common 711 = dsaf_dev->ppe_common[ring->rcb_common->comm_index]; 712 struct hns_ring_hw_stats *hw_stats = &ring->hw_stats; 713 714 hw_stats->rx_pkts += dsaf_read_dev(queue, 715 RCB_RING_RX_RING_PKTNUM_RECORD_REG); 716 dsaf_write_dev(queue, RCB_RING_RX_RING_PKTNUM_RECORD_REG, 0x1); 717 718 hw_stats->ppe_rx_ok_pkts += dsaf_read_dev(ppe_common, 719 PPE_COM_HIS_RX_PKT_QID_OK_CNT_REG + 4 * ring->index); 720 hw_stats->ppe_rx_drop_pkts += dsaf_read_dev(ppe_common, 721 PPE_COM_HIS_RX_PKT_QID_DROP_CNT_REG + 4 * ring->index); 722 723 hw_stats->tx_pkts += dsaf_read_dev(queue, 724 RCB_RING_TX_RING_PKTNUM_RECORD_REG); 725 dsaf_write_dev(queue, RCB_RING_TX_RING_PKTNUM_RECORD_REG, 0x1); 726 727 hw_stats->ppe_tx_ok_pkts += dsaf_read_dev(ppe_common, 728 PPE_COM_HIS_TX_PKT_QID_OK_CNT_REG + 4 * ring->index); 729 hw_stats->ppe_tx_drop_pkts += dsaf_read_dev(ppe_common, 730 PPE_COM_HIS_TX_PKT_QID_ERR_CNT_REG + 4 * ring->index); 731 } 732 733 /** 734 *hns_rcb_get_stats - get rcb statistic 735 *@ring: rcb ring 736 *@data:statistic value 737 */ 738 void hns_rcb_get_stats(struct hnae_queue *queue, u64 *data) 739 { 740 u64 *regs_buff = data; 741 struct ring_pair_cb *ring = 742 container_of(queue, struct ring_pair_cb, q); 743 struct hns_ring_hw_stats *hw_stats = &ring->hw_stats; 744 745 regs_buff[0] = hw_stats->tx_pkts; 746 regs_buff[1] = hw_stats->ppe_tx_ok_pkts; 747 regs_buff[2] = hw_stats->ppe_tx_drop_pkts; 748 regs_buff[3] = 749 dsaf_read_dev(queue, RCB_RING_TX_RING_FBDNUM_REG); 750 751 regs_buff[4] = queue->tx_ring.stats.tx_pkts; 752 regs_buff[5] = queue->tx_ring.stats.tx_bytes; 753 regs_buff[6] = queue->tx_ring.stats.tx_err_cnt; 754 regs_buff[7] = queue->tx_ring.stats.io_err_cnt; 755 regs_buff[8] = queue->tx_ring.stats.sw_err_cnt; 756 regs_buff[9] = queue->tx_ring.stats.seg_pkt_cnt; 757 regs_buff[10] = queue->tx_ring.stats.restart_queue; 758 regs_buff[11] = queue->tx_ring.stats.tx_busy; 759 760 regs_buff[12] = hw_stats->rx_pkts; 761 regs_buff[13] = hw_stats->ppe_rx_ok_pkts; 762 regs_buff[14] = hw_stats->ppe_rx_drop_pkts; 763 regs_buff[15] = 764 dsaf_read_dev(queue, RCB_RING_RX_RING_FBDNUM_REG); 765 766 regs_buff[16] = queue->rx_ring.stats.rx_pkts; 767 regs_buff[17] = queue->rx_ring.stats.rx_bytes; 768 regs_buff[18] = queue->rx_ring.stats.rx_err_cnt; 769 regs_buff[19] = queue->rx_ring.stats.io_err_cnt; 770 regs_buff[20] = queue->rx_ring.stats.sw_err_cnt; 771 regs_buff[21] = queue->rx_ring.stats.seg_pkt_cnt; 772 regs_buff[22] = queue->rx_ring.stats.reuse_pg_cnt; 773 regs_buff[23] = queue->rx_ring.stats.err_pkt_len; 774 regs_buff[24] = queue->rx_ring.stats.non_vld_descs; 775 regs_buff[25] = queue->rx_ring.stats.err_bd_num; 776 regs_buff[26] = queue->rx_ring.stats.l2_err; 777 regs_buff[27] = queue->rx_ring.stats.l3l4_csum_err; 778 } 779 780 /** 781 *hns_rcb_get_ring_sset_count - rcb string set count 782 *@stringset:ethtool cmd 783 *return rcb ring string set count 784 */ 785 int hns_rcb_get_ring_sset_count(int stringset) 786 { 787 if (stringset == ETH_SS_STATS) 788 return HNS_RING_STATIC_REG_NUM; 789 790 return 0; 791 } 792 793 /** 794 *hns_rcb_get_common_regs_count - rcb common regs count 795 *return regs count 796 */ 797 int hns_rcb_get_common_regs_count(void) 798 { 799 return HNS_RCB_COMMON_DUMP_REG_NUM; 800 } 801 802 /** 803 *rcb_get_sset_count - rcb ring regs count 804 *return regs count 805 */ 806 int hns_rcb_get_ring_regs_count(void) 807 { 808 return HNS_RCB_RING_DUMP_REG_NUM; 809 } 810 811 /** 812 *hns_rcb_get_strings - get rcb string set 813 *@stringset:string set index 814 *@data:strings name value 815 *@index:queue index 816 */ 817 void hns_rcb_get_strings(int stringset, u8 *data, int index) 818 { 819 char *buff = (char *)data; 820 821 if (stringset != ETH_SS_STATS) 822 return; 823 824 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_rcb_pkt_num", index); 825 buff = buff + ETH_GSTRING_LEN; 826 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_ppe_tx_pkt_num", index); 827 buff = buff + ETH_GSTRING_LEN; 828 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_ppe_drop_pkt_num", index); 829 buff = buff + ETH_GSTRING_LEN; 830 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_fbd_num", index); 831 buff = buff + ETH_GSTRING_LEN; 832 833 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_pkt_num", index); 834 buff = buff + ETH_GSTRING_LEN; 835 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_bytes", index); 836 buff = buff + ETH_GSTRING_LEN; 837 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_err_cnt", index); 838 buff = buff + ETH_GSTRING_LEN; 839 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_io_err", index); 840 buff = buff + ETH_GSTRING_LEN; 841 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_sw_err", index); 842 buff = buff + ETH_GSTRING_LEN; 843 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_seg_pkt", index); 844 buff = buff + ETH_GSTRING_LEN; 845 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_restart_queue", index); 846 buff = buff + ETH_GSTRING_LEN; 847 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_tx_busy", index); 848 buff = buff + ETH_GSTRING_LEN; 849 850 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_rcb_pkt_num", index); 851 buff = buff + ETH_GSTRING_LEN; 852 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_ppe_pkt_num", index); 853 buff = buff + ETH_GSTRING_LEN; 854 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_ppe_drop_pkt_num", index); 855 buff = buff + ETH_GSTRING_LEN; 856 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_fbd_num", index); 857 buff = buff + ETH_GSTRING_LEN; 858 859 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_pkt_num", index); 860 buff = buff + ETH_GSTRING_LEN; 861 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_bytes", index); 862 buff = buff + ETH_GSTRING_LEN; 863 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_err_cnt", index); 864 buff = buff + ETH_GSTRING_LEN; 865 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_io_err", index); 866 buff = buff + ETH_GSTRING_LEN; 867 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_sw_err", index); 868 buff = buff + ETH_GSTRING_LEN; 869 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_seg_pkt", index); 870 buff = buff + ETH_GSTRING_LEN; 871 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_reuse_pg", index); 872 buff = buff + ETH_GSTRING_LEN; 873 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_len_err", index); 874 buff = buff + ETH_GSTRING_LEN; 875 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_non_vld_desc_err", index); 876 buff = buff + ETH_GSTRING_LEN; 877 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_bd_num_err", index); 878 buff = buff + ETH_GSTRING_LEN; 879 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_l2_err", index); 880 buff = buff + ETH_GSTRING_LEN; 881 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_l3l4csum_err", index); 882 } 883 884 void hns_rcb_get_common_regs(struct rcb_common_cb *rcb_com, void *data) 885 { 886 u32 *regs = data; 887 bool is_ver1 = AE_IS_VER1(rcb_com->dsaf_dev->dsaf_ver); 888 bool is_dbg = HNS_DSAF_IS_DEBUG(rcb_com->dsaf_dev); 889 u32 reg_tmp; 890 u32 reg_num_tmp; 891 u32 i = 0; 892 893 /*rcb common registers */ 894 regs[0] = dsaf_read_dev(rcb_com, RCB_COM_CFG_ENDIAN_REG); 895 regs[1] = dsaf_read_dev(rcb_com, RCB_COM_CFG_SYS_FSH_REG); 896 regs[2] = dsaf_read_dev(rcb_com, RCB_COM_CFG_INIT_FLAG_REG); 897 898 regs[3] = dsaf_read_dev(rcb_com, RCB_COM_CFG_PKT_REG); 899 regs[4] = dsaf_read_dev(rcb_com, RCB_COM_CFG_RINVLD_REG); 900 regs[5] = dsaf_read_dev(rcb_com, RCB_COM_CFG_FNA_REG); 901 regs[6] = dsaf_read_dev(rcb_com, RCB_COM_CFG_FA_REG); 902 regs[7] = dsaf_read_dev(rcb_com, RCB_COM_CFG_PKT_TC_BP_REG); 903 regs[8] = dsaf_read_dev(rcb_com, RCB_COM_CFG_PPE_TNL_CLKEN_REG); 904 905 regs[9] = dsaf_read_dev(rcb_com, RCB_COM_INTMSK_TX_PKT_REG); 906 regs[10] = dsaf_read_dev(rcb_com, RCB_COM_RINT_TX_PKT_REG); 907 regs[11] = dsaf_read_dev(rcb_com, RCB_COM_INTMASK_ECC_ERR_REG); 908 regs[12] = dsaf_read_dev(rcb_com, RCB_COM_INTSTS_ECC_ERR_REG); 909 regs[13] = dsaf_read_dev(rcb_com, RCB_COM_EBD_SRAM_ERR_REG); 910 regs[14] = dsaf_read_dev(rcb_com, RCB_COM_RXRING_ERR_REG); 911 regs[15] = dsaf_read_dev(rcb_com, RCB_COM_TXRING_ERR_REG); 912 regs[16] = dsaf_read_dev(rcb_com, RCB_COM_TX_FBD_ERR_REG); 913 regs[17] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK_EN_REG); 914 regs[18] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK0_REG); 915 regs[19] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK1_REG); 916 regs[20] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK2_REG); 917 regs[21] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK3_REG); 918 regs[22] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK4_REG); 919 regs[23] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK5_REG); 920 regs[24] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR0_REG); 921 regs[25] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR3_REG); 922 regs[26] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR4_REG); 923 regs[27] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR5_REG); 924 925 regs[28] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_INTMASK_RING); 926 regs[29] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_RING_STS); 927 regs[30] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_RING); 928 regs[31] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_INTMASK_BD); 929 regs[32] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_BD_RINT_STS); 930 regs[33] = dsaf_read_dev(rcb_com, RCB_COM_RCB_RD_BD_BUSY); 931 regs[34] = dsaf_read_dev(rcb_com, RCB_COM_RCB_FBD_CRT_EN); 932 regs[35] = dsaf_read_dev(rcb_com, RCB_COM_AXI_WR_ERR_INTMASK); 933 regs[36] = dsaf_read_dev(rcb_com, RCB_COM_AXI_ERR_STS); 934 regs[37] = dsaf_read_dev(rcb_com, RCB_COM_CHK_TX_FBD_NUM_REG); 935 936 /* rcb common entry registers */ 937 for (i = 0; i < 16; i++) { /* total 16 model registers */ 938 regs[38 + i] 939 = dsaf_read_dev(rcb_com, RCB_CFG_BD_NUM_REG + 4 * i); 940 regs[54 + i] 941 = dsaf_read_dev(rcb_com, RCB_CFG_PKTLINE_REG + 4 * i); 942 } 943 944 reg_tmp = is_ver1 ? RCB_CFG_OVERTIME_REG : RCB_PORT_CFG_OVERTIME_REG; 945 reg_num_tmp = (is_ver1 || is_dbg) ? 1 : 6; 946 for (i = 0; i < reg_num_tmp; i++) 947 regs[70 + i] = dsaf_read_dev(rcb_com, reg_tmp); 948 949 regs[76] = dsaf_read_dev(rcb_com, RCB_CFG_PKTLINE_INT_NUM_REG); 950 regs[77] = dsaf_read_dev(rcb_com, RCB_CFG_OVERTIME_INT_NUM_REG); 951 952 /* mark end of rcb common regs */ 953 for (i = 78; i < 80; i++) 954 regs[i] = 0xcccccccc; 955 } 956 957 void hns_rcb_get_ring_regs(struct hnae_queue *queue, void *data) 958 { 959 u32 *regs = data; 960 struct ring_pair_cb *ring_pair 961 = container_of(queue, struct ring_pair_cb, q); 962 u32 i = 0; 963 964 /*rcb ring registers */ 965 regs[0] = dsaf_read_dev(queue, RCB_RING_RX_RING_BASEADDR_L_REG); 966 regs[1] = dsaf_read_dev(queue, RCB_RING_RX_RING_BASEADDR_H_REG); 967 regs[2] = dsaf_read_dev(queue, RCB_RING_RX_RING_BD_NUM_REG); 968 regs[3] = dsaf_read_dev(queue, RCB_RING_RX_RING_BD_LEN_REG); 969 regs[4] = dsaf_read_dev(queue, RCB_RING_RX_RING_PKTLINE_REG); 970 regs[5] = dsaf_read_dev(queue, RCB_RING_RX_RING_TAIL_REG); 971 regs[6] = dsaf_read_dev(queue, RCB_RING_RX_RING_HEAD_REG); 972 regs[7] = dsaf_read_dev(queue, RCB_RING_RX_RING_FBDNUM_REG); 973 regs[8] = dsaf_read_dev(queue, RCB_RING_RX_RING_PKTNUM_RECORD_REG); 974 975 regs[9] = dsaf_read_dev(queue, RCB_RING_TX_RING_BASEADDR_L_REG); 976 regs[10] = dsaf_read_dev(queue, RCB_RING_TX_RING_BASEADDR_H_REG); 977 regs[11] = dsaf_read_dev(queue, RCB_RING_TX_RING_BD_NUM_REG); 978 regs[12] = dsaf_read_dev(queue, RCB_RING_TX_RING_BD_LEN_REG); 979 regs[13] = dsaf_read_dev(queue, RCB_RING_TX_RING_PKTLINE_REG); 980 regs[15] = dsaf_read_dev(queue, RCB_RING_TX_RING_TAIL_REG); 981 regs[16] = dsaf_read_dev(queue, RCB_RING_TX_RING_HEAD_REG); 982 regs[17] = dsaf_read_dev(queue, RCB_RING_TX_RING_FBDNUM_REG); 983 regs[18] = dsaf_read_dev(queue, RCB_RING_TX_RING_OFFSET_REG); 984 regs[19] = dsaf_read_dev(queue, RCB_RING_TX_RING_PKTNUM_RECORD_REG); 985 986 regs[20] = dsaf_read_dev(queue, RCB_RING_PREFETCH_EN_REG); 987 regs[21] = dsaf_read_dev(queue, RCB_RING_CFG_VF_NUM_REG); 988 regs[22] = dsaf_read_dev(queue, RCB_RING_ASID_REG); 989 regs[23] = dsaf_read_dev(queue, RCB_RING_RX_VM_REG); 990 regs[24] = dsaf_read_dev(queue, RCB_RING_T0_BE_RST); 991 regs[25] = dsaf_read_dev(queue, RCB_RING_COULD_BE_RST); 992 regs[26] = dsaf_read_dev(queue, RCB_RING_WRR_WEIGHT_REG); 993 994 regs[27] = dsaf_read_dev(queue, RCB_RING_INTMSK_RXWL_REG); 995 regs[28] = dsaf_read_dev(queue, RCB_RING_INTSTS_RX_RING_REG); 996 regs[29] = dsaf_read_dev(queue, RCB_RING_INTMSK_TXWL_REG); 997 regs[30] = dsaf_read_dev(queue, RCB_RING_INTSTS_TX_RING_REG); 998 regs[31] = dsaf_read_dev(queue, RCB_RING_INTMSK_RX_OVERTIME_REG); 999 regs[32] = dsaf_read_dev(queue, RCB_RING_INTSTS_RX_OVERTIME_REG); 1000 regs[33] = dsaf_read_dev(queue, RCB_RING_INTMSK_TX_OVERTIME_REG); 1001 regs[34] = dsaf_read_dev(queue, RCB_RING_INTSTS_TX_OVERTIME_REG); 1002 1003 /* mark end of ring regs */ 1004 for (i = 35; i < 40; i++) 1005 regs[i] = 0xcccccc00 + ring_pair->index; 1006 } 1007