1 /* 2 * Copyright (c) 2014-2015 Hisilicon Limited. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 */ 9 10 #include <linux/cdev.h> 11 #include <linux/module.h> 12 #include <linux/kernel.h> 13 #include <linux/init.h> 14 #include <linux/netdevice.h> 15 #include <linux/etherdevice.h> 16 #include <asm/cacheflush.h> 17 #include <linux/platform_device.h> 18 #include <linux/of.h> 19 #include <linux/of_address.h> 20 #include <linux/of_platform.h> 21 #include <linux/of_irq.h> 22 #include <linux/spinlock.h> 23 24 #include "hns_dsaf_main.h" 25 #include "hns_dsaf_ppe.h" 26 #include "hns_dsaf_rcb.h" 27 28 #define RCB_COMMON_REG_OFFSET 0x80000 29 #define TX_RING 0 30 #define RX_RING 1 31 32 #define RCB_RESET_WAIT_TIMES 30 33 #define RCB_RESET_TRY_TIMES 10 34 35 /** 36 *hns_rcb_wait_fbd_clean - clean fbd 37 *@qs: ring struct pointer array 38 *@qnum: num of array 39 *@flag: tx or rx flag 40 */ 41 void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag) 42 { 43 int i, wait_cnt; 44 u32 fbd_num; 45 46 for (wait_cnt = i = 0; i < q_num; wait_cnt++) { 47 usleep_range(200, 300); 48 fbd_num = 0; 49 if (flag & RCB_INT_FLAG_TX) 50 fbd_num += dsaf_read_dev(qs[i], 51 RCB_RING_TX_RING_FBDNUM_REG); 52 if (flag & RCB_INT_FLAG_RX) 53 fbd_num += dsaf_read_dev(qs[i], 54 RCB_RING_RX_RING_FBDNUM_REG); 55 if (!fbd_num) 56 i++; 57 if (wait_cnt >= 10000) 58 break; 59 } 60 61 if (i < q_num) 62 dev_err(qs[i]->handle->owner_dev, 63 "queue(%d) wait fbd(%d) clean fail!!\n", i, fbd_num); 64 } 65 66 /** 67 *hns_rcb_reset_ring_hw - ring reset 68 *@q: ring struct pointer 69 */ 70 void hns_rcb_reset_ring_hw(struct hnae_queue *q) 71 { 72 u32 wait_cnt; 73 u32 try_cnt = 0; 74 u32 could_ret; 75 76 u32 tx_fbd_num; 77 78 while (try_cnt++ < RCB_RESET_TRY_TIMES) { 79 usleep_range(100, 200); 80 tx_fbd_num = dsaf_read_dev(q, RCB_RING_TX_RING_FBDNUM_REG); 81 if (tx_fbd_num) 82 continue; 83 84 dsaf_write_dev(q, RCB_RING_PREFETCH_EN_REG, 0); 85 86 dsaf_write_dev(q, RCB_RING_T0_BE_RST, 1); 87 88 msleep(20); 89 could_ret = dsaf_read_dev(q, RCB_RING_COULD_BE_RST); 90 91 wait_cnt = 0; 92 while (!could_ret && (wait_cnt < RCB_RESET_WAIT_TIMES)) { 93 dsaf_write_dev(q, RCB_RING_T0_BE_RST, 0); 94 95 dsaf_write_dev(q, RCB_RING_T0_BE_RST, 1); 96 97 msleep(20); 98 could_ret = dsaf_read_dev(q, RCB_RING_COULD_BE_RST); 99 100 wait_cnt++; 101 } 102 103 dsaf_write_dev(q, RCB_RING_T0_BE_RST, 0); 104 105 if (could_ret) 106 break; 107 } 108 109 if (try_cnt >= RCB_RESET_TRY_TIMES) 110 dev_err(q->dev->dev, "port%d reset ring fail\n", 111 hns_ae_get_vf_cb(q->handle)->port_index); 112 } 113 114 /** 115 *hns_rcb_int_ctrl_hw - rcb irq enable control 116 *@q: hnae queue struct pointer 117 *@flag:ring flag tx or rx 118 *@mask:mask 119 */ 120 void hns_rcb_int_ctrl_hw(struct hnae_queue *q, u32 flag, u32 mask) 121 { 122 u32 int_mask_en = !!mask; 123 124 if (flag & RCB_INT_FLAG_TX) { 125 dsaf_write_dev(q, RCB_RING_INTMSK_TXWL_REG, int_mask_en); 126 dsaf_write_dev(q, RCB_RING_INTMSK_TX_OVERTIME_REG, 127 int_mask_en); 128 } 129 130 if (flag & RCB_INT_FLAG_RX) { 131 dsaf_write_dev(q, RCB_RING_INTMSK_RXWL_REG, int_mask_en); 132 dsaf_write_dev(q, RCB_RING_INTMSK_RX_OVERTIME_REG, 133 int_mask_en); 134 } 135 } 136 137 void hns_rcb_int_clr_hw(struct hnae_queue *q, u32 flag) 138 { 139 if (flag & RCB_INT_FLAG_TX) { 140 dsaf_write_dev(q, RCB_RING_INTSTS_TX_RING_REG, 1); 141 dsaf_write_dev(q, RCB_RING_INTSTS_TX_OVERTIME_REG, 1); 142 } 143 144 if (flag & RCB_INT_FLAG_RX) { 145 dsaf_write_dev(q, RCB_RING_INTSTS_RX_RING_REG, 1); 146 dsaf_write_dev(q, RCB_RING_INTSTS_RX_OVERTIME_REG, 1); 147 } 148 } 149 150 void hns_rcbv2_int_ctrl_hw(struct hnae_queue *q, u32 flag, u32 mask) 151 { 152 u32 int_mask_en = !!mask; 153 154 if (flag & RCB_INT_FLAG_TX) 155 dsaf_write_dev(q, RCB_RING_INTMSK_TXWL_REG, int_mask_en); 156 157 if (flag & RCB_INT_FLAG_RX) 158 dsaf_write_dev(q, RCB_RING_INTMSK_RXWL_REG, int_mask_en); 159 } 160 161 void hns_rcbv2_int_clr_hw(struct hnae_queue *q, u32 flag) 162 { 163 if (flag & RCB_INT_FLAG_TX) 164 dsaf_write_dev(q, RCBV2_TX_RING_INT_STS_REG, 1); 165 166 if (flag & RCB_INT_FLAG_RX) 167 dsaf_write_dev(q, RCBV2_RX_RING_INT_STS_REG, 1); 168 } 169 170 /** 171 *hns_rcb_ring_enable_hw - enable ring 172 *@ring: rcb ring 173 */ 174 void hns_rcb_ring_enable_hw(struct hnae_queue *q, u32 val) 175 { 176 dsaf_write_dev(q, RCB_RING_PREFETCH_EN_REG, !!val); 177 } 178 179 void hns_rcb_start(struct hnae_queue *q, u32 val) 180 { 181 hns_rcb_ring_enable_hw(q, val); 182 } 183 184 /** 185 *hns_rcb_common_init_commit_hw - make rcb common init completed 186 *@rcb_common: rcb common device 187 */ 188 void hns_rcb_common_init_commit_hw(struct rcb_common_cb *rcb_common) 189 { 190 wmb(); /* Sync point before breakpoint */ 191 dsaf_write_dev(rcb_common, RCB_COM_CFG_SYS_FSH_REG, 1); 192 wmb(); /* Sync point after breakpoint */ 193 } 194 195 /** 196 *hns_rcb_ring_init - init rcb ring 197 *@ring_pair: ring pair control block 198 *@ring_type: ring type, RX_RING or TX_RING 199 */ 200 static void hns_rcb_ring_init(struct ring_pair_cb *ring_pair, int ring_type) 201 { 202 struct hnae_queue *q = &ring_pair->q; 203 struct rcb_common_cb *rcb_common = ring_pair->rcb_common; 204 u32 bd_size_type = rcb_common->dsaf_dev->buf_size_type; 205 struct hnae_ring *ring = 206 (ring_type == RX_RING) ? &q->rx_ring : &q->tx_ring; 207 dma_addr_t dma = ring->desc_dma_addr; 208 209 if (ring_type == RX_RING) { 210 dsaf_write_dev(q, RCB_RING_RX_RING_BASEADDR_L_REG, 211 (u32)dma); 212 dsaf_write_dev(q, RCB_RING_RX_RING_BASEADDR_H_REG, 213 (u32)((dma >> 31) >> 1)); 214 215 dsaf_write_dev(q, RCB_RING_RX_RING_BD_LEN_REG, 216 bd_size_type); 217 dsaf_write_dev(q, RCB_RING_RX_RING_BD_NUM_REG, 218 ring_pair->port_id_in_comm); 219 dsaf_write_dev(q, RCB_RING_RX_RING_PKTLINE_REG, 220 ring_pair->port_id_in_comm); 221 } else { 222 dsaf_write_dev(q, RCB_RING_TX_RING_BASEADDR_L_REG, 223 (u32)dma); 224 dsaf_write_dev(q, RCB_RING_TX_RING_BASEADDR_H_REG, 225 (u32)((dma >> 31) >> 1)); 226 227 dsaf_write_dev(q, RCB_RING_TX_RING_BD_LEN_REG, 228 bd_size_type); 229 dsaf_write_dev(q, RCB_RING_TX_RING_BD_NUM_REG, 230 ring_pair->port_id_in_comm); 231 dsaf_write_dev(q, RCB_RING_TX_RING_PKTLINE_REG, 232 ring_pair->port_id_in_comm); 233 } 234 } 235 236 /** 237 *hns_rcb_init_hw - init rcb hardware 238 *@ring: rcb ring 239 */ 240 void hns_rcb_init_hw(struct ring_pair_cb *ring) 241 { 242 hns_rcb_ring_init(ring, RX_RING); 243 hns_rcb_ring_init(ring, TX_RING); 244 } 245 246 /** 247 *hns_rcb_set_port_desc_cnt - set rcb port description num 248 *@rcb_common: rcb_common device 249 *@port_idx:port index 250 *@desc_cnt:BD num 251 */ 252 static void hns_rcb_set_port_desc_cnt(struct rcb_common_cb *rcb_common, 253 u32 port_idx, u32 desc_cnt) 254 { 255 dsaf_write_dev(rcb_common, RCB_CFG_BD_NUM_REG + port_idx * 4, 256 desc_cnt); 257 } 258 259 static void hns_rcb_set_port_timeout( 260 struct rcb_common_cb *rcb_common, u32 port_idx, u32 timeout) 261 { 262 if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver)) 263 dsaf_write_dev(rcb_common, RCB_CFG_OVERTIME_REG, 264 timeout * HNS_RCB_CLK_FREQ_MHZ); 265 else 266 dsaf_write_dev(rcb_common, 267 RCB_PORT_CFG_OVERTIME_REG + port_idx * 4, 268 timeout); 269 } 270 271 static int hns_rcb_common_get_port_num(struct rcb_common_cb *rcb_common) 272 { 273 if (rcb_common->comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) 274 return HNS_RCB_SERVICE_NW_ENGINE_NUM; 275 else 276 return HNS_RCB_DEBUG_NW_ENGINE_NUM; 277 } 278 279 /*clr rcb comm exception irq**/ 280 static void hns_rcb_comm_exc_irq_en( 281 struct rcb_common_cb *rcb_common, int en) 282 { 283 u32 clr_vlue = 0xfffffffful; 284 u32 msk_vlue = en ? 0 : 0xfffffffful; 285 286 /* clr int*/ 287 dsaf_write_dev(rcb_common, RCB_COM_INTSTS_ECC_ERR_REG, clr_vlue); 288 289 dsaf_write_dev(rcb_common, RCB_COM_SF_CFG_RING_STS, clr_vlue); 290 291 dsaf_write_dev(rcb_common, RCB_COM_SF_CFG_BD_RINT_STS, clr_vlue); 292 293 dsaf_write_dev(rcb_common, RCB_COM_RINT_TX_PKT_REG, clr_vlue); 294 dsaf_write_dev(rcb_common, RCB_COM_AXI_ERR_STS, clr_vlue); 295 296 /*en msk*/ 297 dsaf_write_dev(rcb_common, RCB_COM_INTMASK_ECC_ERR_REG, msk_vlue); 298 299 dsaf_write_dev(rcb_common, RCB_COM_SF_CFG_INTMASK_RING, msk_vlue); 300 301 /*for tx bd neednot cacheline, so msk sf_txring_fbd_intmask (bit 1)**/ 302 dsaf_write_dev(rcb_common, RCB_COM_SF_CFG_INTMASK_BD, msk_vlue | 2); 303 304 dsaf_write_dev(rcb_common, RCB_COM_INTMSK_TX_PKT_REG, msk_vlue); 305 dsaf_write_dev(rcb_common, RCB_COM_AXI_WR_ERR_INTMASK, msk_vlue); 306 } 307 308 /** 309 *hns_rcb_common_init_hw - init rcb common hardware 310 *@rcb_common: rcb_common device 311 *retuen 0 - success , negative --fail 312 */ 313 int hns_rcb_common_init_hw(struct rcb_common_cb *rcb_common) 314 { 315 u32 reg_val; 316 int i; 317 int port_num = hns_rcb_common_get_port_num(rcb_common); 318 319 hns_rcb_comm_exc_irq_en(rcb_common, 0); 320 321 reg_val = dsaf_read_dev(rcb_common, RCB_COM_CFG_INIT_FLAG_REG); 322 if (0x1 != (reg_val & 0x1)) { 323 dev_err(rcb_common->dsaf_dev->dev, 324 "RCB_COM_CFG_INIT_FLAG_REG reg = 0x%x\n", reg_val); 325 return -EBUSY; 326 } 327 328 for (i = 0; i < port_num; i++) { 329 hns_rcb_set_port_desc_cnt(rcb_common, i, rcb_common->desc_num); 330 (void)hns_rcb_set_coalesced_frames( 331 rcb_common, i, HNS_RCB_DEF_COALESCED_FRAMES); 332 hns_rcb_set_port_timeout( 333 rcb_common, i, HNS_RCB_DEF_COALESCED_USECS); 334 } 335 336 dsaf_write_dev(rcb_common, RCB_COM_CFG_ENDIAN_REG, 337 HNS_RCB_COMMON_ENDIAN); 338 339 if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver)) { 340 dsaf_write_dev(rcb_common, RCB_COM_CFG_FNA_REG, 0x0); 341 dsaf_write_dev(rcb_common, RCB_COM_CFG_FA_REG, 0x1); 342 } else { 343 dsaf_set_dev_bit(rcb_common, RCBV2_COM_CFG_USER_REG, 344 RCB_COM_CFG_FNA_B, false); 345 dsaf_set_dev_bit(rcb_common, RCBV2_COM_CFG_USER_REG, 346 RCB_COM_CFG_FA_B, true); 347 dsaf_set_dev_bit(rcb_common, RCBV2_COM_CFG_TSO_MODE_REG, 348 RCB_COM_TSO_MODE_B, HNS_TSO_MODE_8BD_32K); 349 } 350 351 return 0; 352 } 353 354 int hns_rcb_buf_size2type(u32 buf_size) 355 { 356 int bd_size_type; 357 358 switch (buf_size) { 359 case 512: 360 bd_size_type = HNS_BD_SIZE_512_TYPE; 361 break; 362 case 1024: 363 bd_size_type = HNS_BD_SIZE_1024_TYPE; 364 break; 365 case 2048: 366 bd_size_type = HNS_BD_SIZE_2048_TYPE; 367 break; 368 case 4096: 369 bd_size_type = HNS_BD_SIZE_4096_TYPE; 370 break; 371 default: 372 bd_size_type = -EINVAL; 373 } 374 375 return bd_size_type; 376 } 377 378 static void hns_rcb_ring_get_cfg(struct hnae_queue *q, int ring_type) 379 { 380 struct hnae_ring *ring; 381 struct rcb_common_cb *rcb_common; 382 struct ring_pair_cb *ring_pair_cb; 383 u32 buf_size; 384 u16 desc_num, mdnum_ppkt; 385 bool irq_idx, is_ver1; 386 387 ring_pair_cb = container_of(q, struct ring_pair_cb, q); 388 is_ver1 = AE_IS_VER1(ring_pair_cb->rcb_common->dsaf_dev->dsaf_ver); 389 if (ring_type == RX_RING) { 390 ring = &q->rx_ring; 391 ring->io_base = ring_pair_cb->q.io_base; 392 irq_idx = HNS_RCB_IRQ_IDX_RX; 393 mdnum_ppkt = HNS_RCB_RING_MAX_BD_PER_PKT; 394 } else { 395 ring = &q->tx_ring; 396 ring->io_base = (u8 __iomem *)ring_pair_cb->q.io_base + 397 HNS_RCB_TX_REG_OFFSET; 398 irq_idx = HNS_RCB_IRQ_IDX_TX; 399 mdnum_ppkt = is_ver1 ? HNS_RCB_RING_MAX_TXBD_PER_PKT : 400 HNS_RCBV2_RING_MAX_TXBD_PER_PKT; 401 } 402 403 rcb_common = ring_pair_cb->rcb_common; 404 buf_size = rcb_common->dsaf_dev->buf_size; 405 desc_num = rcb_common->dsaf_dev->desc_num; 406 407 ring->desc = NULL; 408 ring->desc_cb = NULL; 409 410 ring->irq = ring_pair_cb->virq[irq_idx]; 411 ring->desc_dma_addr = 0; 412 413 ring->buf_size = buf_size; 414 ring->desc_num = desc_num; 415 ring->max_desc_num_per_pkt = mdnum_ppkt; 416 ring->max_raw_data_sz_per_desc = HNS_RCB_MAX_PKT_SIZE; 417 ring->max_pkt_size = HNS_RCB_MAX_PKT_SIZE; 418 ring->next_to_use = 0; 419 ring->next_to_clean = 0; 420 } 421 422 static void hns_rcb_ring_pair_get_cfg(struct ring_pair_cb *ring_pair_cb) 423 { 424 ring_pair_cb->q.handle = NULL; 425 426 hns_rcb_ring_get_cfg(&ring_pair_cb->q, RX_RING); 427 hns_rcb_ring_get_cfg(&ring_pair_cb->q, TX_RING); 428 } 429 430 static int hns_rcb_get_port_in_comm( 431 struct rcb_common_cb *rcb_common, int ring_idx) 432 { 433 int comm_index = rcb_common->comm_index; 434 int port; 435 int q_num; 436 437 if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) { 438 q_num = (int)rcb_common->max_q_per_vf * rcb_common->max_vfn; 439 port = ring_idx / q_num; 440 } else { 441 port = 0; /* config debug-ports port_id_in_comm to 0*/ 442 } 443 444 return port; 445 } 446 447 #define SERVICE_RING_IRQ_IDX(v1) \ 448 ((v1) ? HNS_SERVICE_RING_IRQ_IDX : HNSV2_SERVICE_RING_IRQ_IDX) 449 #define DEBUG_RING_IRQ_IDX(v1) \ 450 ((v1) ? HNS_DEBUG_RING_IRQ_IDX : HNSV2_DEBUG_RING_IRQ_IDX) 451 #define DEBUG_RING_IRQ_OFFSET(v1) \ 452 ((v1) ? HNS_DEBUG_RING_IRQ_OFFSET : HNSV2_DEBUG_RING_IRQ_OFFSET) 453 static int hns_rcb_get_base_irq_idx(struct rcb_common_cb *rcb_common) 454 { 455 int comm_index = rcb_common->comm_index; 456 bool is_ver1 = AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver); 457 458 if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) 459 return SERVICE_RING_IRQ_IDX(is_ver1); 460 else 461 return DEBUG_RING_IRQ_IDX(is_ver1) + 462 (comm_index - 1) * DEBUG_RING_IRQ_OFFSET(is_ver1); 463 } 464 465 #define RCB_COMM_BASE_TO_RING_BASE(base, ringid)\ 466 ((base) + 0x10000 + HNS_RCB_REG_OFFSET * (ringid)) 467 /** 468 *hns_rcb_get_cfg - get rcb config 469 *@rcb_common: rcb common device 470 */ 471 void hns_rcb_get_cfg(struct rcb_common_cb *rcb_common) 472 { 473 struct ring_pair_cb *ring_pair_cb; 474 u32 i; 475 u32 ring_num = rcb_common->ring_num; 476 int base_irq_idx = hns_rcb_get_base_irq_idx(rcb_common); 477 struct device_node *np = rcb_common->dsaf_dev->dev->of_node; 478 struct platform_device *pdev = 479 to_platform_device(rcb_common->dsaf_dev->dev); 480 bool is_ver1 = AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver); 481 482 for (i = 0; i < ring_num; i++) { 483 ring_pair_cb = &rcb_common->ring_pair_cb[i]; 484 ring_pair_cb->rcb_common = rcb_common; 485 ring_pair_cb->dev = rcb_common->dsaf_dev->dev; 486 ring_pair_cb->index = i; 487 ring_pair_cb->q.io_base = 488 RCB_COMM_BASE_TO_RING_BASE(rcb_common->io_base, i); 489 ring_pair_cb->port_id_in_comm = 490 hns_rcb_get_port_in_comm(rcb_common, i); 491 ring_pair_cb->virq[HNS_RCB_IRQ_IDX_TX] = 492 is_ver1 ? irq_of_parse_and_map(np, base_irq_idx + i * 2) : 493 platform_get_irq(pdev, base_irq_idx + i * 3 + 1); 494 ring_pair_cb->virq[HNS_RCB_IRQ_IDX_RX] = 495 is_ver1 ? irq_of_parse_and_map(np, base_irq_idx + i * 2 + 1) : 496 platform_get_irq(pdev, base_irq_idx + i * 3); 497 ring_pair_cb->q.phy_base = 498 RCB_COMM_BASE_TO_RING_BASE(rcb_common->phy_base, i); 499 hns_rcb_ring_pair_get_cfg(ring_pair_cb); 500 } 501 } 502 503 /** 504 *hns_rcb_get_coalesced_frames - get rcb port coalesced frames 505 *@rcb_common: rcb_common device 506 *@port_idx:port id in comm 507 * 508 *Returns: coalesced_frames 509 */ 510 u32 hns_rcb_get_coalesced_frames( 511 struct rcb_common_cb *rcb_common, u32 port_idx) 512 { 513 return dsaf_read_dev(rcb_common, RCB_CFG_PKTLINE_REG + port_idx * 4); 514 } 515 516 /** 517 *hns_rcb_get_coalesce_usecs - get rcb port coalesced time_out 518 *@rcb_common: rcb_common device 519 *@port_idx:port id in comm 520 * 521 *Returns: time_out 522 */ 523 u32 hns_rcb_get_coalesce_usecs( 524 struct rcb_common_cb *rcb_common, u32 port_idx) 525 { 526 if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver)) 527 return dsaf_read_dev(rcb_common, RCB_CFG_OVERTIME_REG) / 528 HNS_RCB_CLK_FREQ_MHZ; 529 else 530 return dsaf_read_dev(rcb_common, 531 RCB_PORT_CFG_OVERTIME_REG + port_idx * 4); 532 } 533 534 /** 535 *hns_rcb_set_coalesce_usecs - set rcb port coalesced time_out 536 *@rcb_common: rcb_common device 537 *@port_idx:port id in comm 538 *@timeout:tx/rx time for coalesced time_out 539 * 540 * Returns: 541 * Zero for success, or an error code in case of failure 542 */ 543 int hns_rcb_set_coalesce_usecs( 544 struct rcb_common_cb *rcb_common, u32 port_idx, u32 timeout) 545 { 546 u32 old_timeout = hns_rcb_get_coalesce_usecs(rcb_common, port_idx); 547 548 if (timeout == old_timeout) 549 return 0; 550 551 if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver)) { 552 if (rcb_common->comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) { 553 dev_err(rcb_common->dsaf_dev->dev, 554 "error: not support coalesce_usecs setting!\n"); 555 return -EINVAL; 556 } 557 } 558 if (timeout > HNS_RCB_MAX_COALESCED_USECS) { 559 dev_err(rcb_common->dsaf_dev->dev, 560 "error: not support coalesce %dus!\n", timeout); 561 return -EINVAL; 562 } 563 hns_rcb_set_port_timeout(rcb_common, port_idx, timeout); 564 return 0; 565 } 566 567 /** 568 *hns_rcb_set_coalesced_frames - set rcb coalesced frames 569 *@rcb_common: rcb_common device 570 *@port_idx:port id in comm 571 *@coalesced_frames:tx/rx BD num for coalesced frames 572 * 573 * Returns: 574 * Zero for success, or an error code in case of failure 575 */ 576 int hns_rcb_set_coalesced_frames( 577 struct rcb_common_cb *rcb_common, u32 port_idx, u32 coalesced_frames) 578 { 579 u32 old_waterline = hns_rcb_get_coalesced_frames(rcb_common, port_idx); 580 581 if (coalesced_frames == old_waterline) 582 return 0; 583 584 if (coalesced_frames >= rcb_common->desc_num || 585 coalesced_frames > HNS_RCB_MAX_COALESCED_FRAMES || 586 coalesced_frames < HNS_RCB_MIN_COALESCED_FRAMES) { 587 dev_err(rcb_common->dsaf_dev->dev, 588 "error: not support coalesce_frames setting!\n"); 589 return -EINVAL; 590 } 591 592 dsaf_write_dev(rcb_common, RCB_CFG_PKTLINE_REG + port_idx * 4, 593 coalesced_frames); 594 return 0; 595 } 596 597 /** 598 *hns_rcb_get_queue_mode - get max VM number and max ring number per VM 599 * accordding to dsaf mode 600 *@dsaf_mode: dsaf mode 601 *@max_vfn : max vfn number 602 *@max_q_per_vf:max ring number per vm 603 */ 604 void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode, int comm_index, 605 u16 *max_vfn, u16 *max_q_per_vf) 606 { 607 if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) { 608 switch (dsaf_mode) { 609 case DSAF_MODE_DISABLE_6PORT_0VM: 610 *max_vfn = 1; 611 *max_q_per_vf = 16; 612 break; 613 case DSAF_MODE_DISABLE_FIX: 614 *max_vfn = 1; 615 *max_q_per_vf = 1; 616 break; 617 case DSAF_MODE_DISABLE_2PORT_64VM: 618 *max_vfn = 64; 619 *max_q_per_vf = 1; 620 break; 621 case DSAF_MODE_DISABLE_6PORT_16VM: 622 *max_vfn = 16; 623 *max_q_per_vf = 1; 624 break; 625 default: 626 *max_vfn = 1; 627 *max_q_per_vf = 16; 628 break; 629 } 630 } else { 631 *max_vfn = 1; 632 *max_q_per_vf = 1; 633 } 634 } 635 636 int hns_rcb_get_ring_num(struct dsaf_device *dsaf_dev, int comm_index) 637 { 638 if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) { 639 switch (dsaf_dev->dsaf_mode) { 640 case DSAF_MODE_ENABLE_FIX: 641 return 1; 642 643 case DSAF_MODE_DISABLE_FIX: 644 return 6; 645 646 case DSAF_MODE_ENABLE_0VM: 647 return 32; 648 649 case DSAF_MODE_DISABLE_6PORT_0VM: 650 case DSAF_MODE_ENABLE_16VM: 651 case DSAF_MODE_DISABLE_6PORT_2VM: 652 case DSAF_MODE_DISABLE_6PORT_16VM: 653 case DSAF_MODE_DISABLE_6PORT_4VM: 654 case DSAF_MODE_ENABLE_8VM: 655 return 96; 656 657 case DSAF_MODE_DISABLE_2PORT_16VM: 658 case DSAF_MODE_DISABLE_2PORT_8VM: 659 case DSAF_MODE_ENABLE_32VM: 660 case DSAF_MODE_DISABLE_2PORT_64VM: 661 case DSAF_MODE_ENABLE_128VM: 662 return 128; 663 664 default: 665 dev_warn(dsaf_dev->dev, 666 "get ring num fail,use default!dsaf_mode=%d\n", 667 dsaf_dev->dsaf_mode); 668 return 128; 669 } 670 } else { 671 return 1; 672 } 673 } 674 675 void __iomem *hns_rcb_common_get_vaddr(struct dsaf_device *dsaf_dev, 676 int comm_index) 677 { 678 void __iomem *base_addr; 679 680 if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) 681 base_addr = dsaf_dev->ppe_base + RCB_COMMON_REG_OFFSET; 682 else 683 base_addr = dsaf_dev->sds_base 684 + (comm_index - 1) * HNS_DSAF_DEBUG_NW_REG_OFFSET 685 + RCB_COMMON_REG_OFFSET; 686 687 return base_addr; 688 } 689 690 static phys_addr_t hns_rcb_common_get_paddr(struct dsaf_device *dsaf_dev, 691 int comm_index) 692 { 693 struct device_node *np = dsaf_dev->dev->of_node; 694 phys_addr_t phy_addr; 695 const __be32 *tmp_addr; 696 u64 addr_offset = 0; 697 u64 size = 0; 698 int index = 0; 699 700 if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) { 701 index = 2; 702 addr_offset = RCB_COMMON_REG_OFFSET; 703 } else { 704 index = 1; 705 addr_offset = (comm_index - 1) * HNS_DSAF_DEBUG_NW_REG_OFFSET + 706 RCB_COMMON_REG_OFFSET; 707 } 708 tmp_addr = of_get_address(np, index, &size, NULL); 709 phy_addr = of_translate_address(np, tmp_addr); 710 return phy_addr + addr_offset; 711 } 712 713 int hns_rcb_common_get_cfg(struct dsaf_device *dsaf_dev, 714 int comm_index) 715 { 716 struct rcb_common_cb *rcb_common; 717 enum dsaf_mode dsaf_mode = dsaf_dev->dsaf_mode; 718 u16 max_vfn; 719 u16 max_q_per_vf; 720 int ring_num = hns_rcb_get_ring_num(dsaf_dev, comm_index); 721 722 rcb_common = 723 devm_kzalloc(dsaf_dev->dev, sizeof(*rcb_common) + 724 ring_num * sizeof(struct ring_pair_cb), GFP_KERNEL); 725 if (!rcb_common) { 726 dev_err(dsaf_dev->dev, "rcb common devm_kzalloc fail!\n"); 727 return -ENOMEM; 728 } 729 rcb_common->comm_index = comm_index; 730 rcb_common->ring_num = ring_num; 731 rcb_common->dsaf_dev = dsaf_dev; 732 733 rcb_common->desc_num = dsaf_dev->desc_num; 734 735 hns_rcb_get_queue_mode(dsaf_mode, comm_index, &max_vfn, &max_q_per_vf); 736 rcb_common->max_vfn = max_vfn; 737 rcb_common->max_q_per_vf = max_q_per_vf; 738 739 rcb_common->io_base = hns_rcb_common_get_vaddr(dsaf_dev, comm_index); 740 rcb_common->phy_base = hns_rcb_common_get_paddr(dsaf_dev, comm_index); 741 742 dsaf_dev->rcb_common[comm_index] = rcb_common; 743 return 0; 744 } 745 746 void hns_rcb_common_free_cfg(struct dsaf_device *dsaf_dev, 747 u32 comm_index) 748 { 749 dsaf_dev->rcb_common[comm_index] = NULL; 750 } 751 752 void hns_rcb_update_stats(struct hnae_queue *queue) 753 { 754 struct ring_pair_cb *ring = 755 container_of(queue, struct ring_pair_cb, q); 756 struct dsaf_device *dsaf_dev = ring->rcb_common->dsaf_dev; 757 struct ppe_common_cb *ppe_common 758 = dsaf_dev->ppe_common[ring->rcb_common->comm_index]; 759 struct hns_ring_hw_stats *hw_stats = &ring->hw_stats; 760 761 hw_stats->rx_pkts += dsaf_read_dev(queue, 762 RCB_RING_RX_RING_PKTNUM_RECORD_REG); 763 dsaf_write_dev(queue, RCB_RING_RX_RING_PKTNUM_RECORD_REG, 0x1); 764 765 hw_stats->ppe_rx_ok_pkts += dsaf_read_dev(ppe_common, 766 PPE_COM_HIS_RX_PKT_QID_OK_CNT_REG + 4 * ring->index); 767 hw_stats->ppe_rx_drop_pkts += dsaf_read_dev(ppe_common, 768 PPE_COM_HIS_RX_PKT_QID_DROP_CNT_REG + 4 * ring->index); 769 770 hw_stats->tx_pkts += dsaf_read_dev(queue, 771 RCB_RING_TX_RING_PKTNUM_RECORD_REG); 772 dsaf_write_dev(queue, RCB_RING_TX_RING_PKTNUM_RECORD_REG, 0x1); 773 774 hw_stats->ppe_tx_ok_pkts += dsaf_read_dev(ppe_common, 775 PPE_COM_HIS_TX_PKT_QID_OK_CNT_REG + 4 * ring->index); 776 hw_stats->ppe_tx_drop_pkts += dsaf_read_dev(ppe_common, 777 PPE_COM_HIS_TX_PKT_QID_ERR_CNT_REG + 4 * ring->index); 778 } 779 780 /** 781 *hns_rcb_get_stats - get rcb statistic 782 *@ring: rcb ring 783 *@data:statistic value 784 */ 785 void hns_rcb_get_stats(struct hnae_queue *queue, u64 *data) 786 { 787 u64 *regs_buff = data; 788 struct ring_pair_cb *ring = 789 container_of(queue, struct ring_pair_cb, q); 790 struct hns_ring_hw_stats *hw_stats = &ring->hw_stats; 791 792 regs_buff[0] = hw_stats->tx_pkts; 793 regs_buff[1] = hw_stats->ppe_tx_ok_pkts; 794 regs_buff[2] = hw_stats->ppe_tx_drop_pkts; 795 regs_buff[3] = 796 dsaf_read_dev(queue, RCB_RING_TX_RING_FBDNUM_REG); 797 798 regs_buff[4] = queue->tx_ring.stats.tx_pkts; 799 regs_buff[5] = queue->tx_ring.stats.tx_bytes; 800 regs_buff[6] = queue->tx_ring.stats.tx_err_cnt; 801 regs_buff[7] = queue->tx_ring.stats.io_err_cnt; 802 regs_buff[8] = queue->tx_ring.stats.sw_err_cnt; 803 regs_buff[9] = queue->tx_ring.stats.seg_pkt_cnt; 804 regs_buff[10] = queue->tx_ring.stats.restart_queue; 805 regs_buff[11] = queue->tx_ring.stats.tx_busy; 806 807 regs_buff[12] = hw_stats->rx_pkts; 808 regs_buff[13] = hw_stats->ppe_rx_ok_pkts; 809 regs_buff[14] = hw_stats->ppe_rx_drop_pkts; 810 regs_buff[15] = 811 dsaf_read_dev(queue, RCB_RING_RX_RING_FBDNUM_REG); 812 813 regs_buff[16] = queue->rx_ring.stats.rx_pkts; 814 regs_buff[17] = queue->rx_ring.stats.rx_bytes; 815 regs_buff[18] = queue->rx_ring.stats.rx_err_cnt; 816 regs_buff[19] = queue->rx_ring.stats.io_err_cnt; 817 regs_buff[20] = queue->rx_ring.stats.sw_err_cnt; 818 regs_buff[21] = queue->rx_ring.stats.seg_pkt_cnt; 819 regs_buff[22] = queue->rx_ring.stats.reuse_pg_cnt; 820 regs_buff[23] = queue->rx_ring.stats.err_pkt_len; 821 regs_buff[24] = queue->rx_ring.stats.non_vld_descs; 822 regs_buff[25] = queue->rx_ring.stats.err_bd_num; 823 regs_buff[26] = queue->rx_ring.stats.l2_err; 824 regs_buff[27] = queue->rx_ring.stats.l3l4_csum_err; 825 } 826 827 /** 828 *hns_rcb_get_ring_sset_count - rcb string set count 829 *@stringset:ethtool cmd 830 *return rcb ring string set count 831 */ 832 int hns_rcb_get_ring_sset_count(int stringset) 833 { 834 if (stringset == ETH_SS_STATS) 835 return HNS_RING_STATIC_REG_NUM; 836 837 return 0; 838 } 839 840 /** 841 *hns_rcb_get_common_regs_count - rcb common regs count 842 *return regs count 843 */ 844 int hns_rcb_get_common_regs_count(void) 845 { 846 return HNS_RCB_COMMON_DUMP_REG_NUM; 847 } 848 849 /** 850 *rcb_get_sset_count - rcb ring regs count 851 *return regs count 852 */ 853 int hns_rcb_get_ring_regs_count(void) 854 { 855 return HNS_RCB_RING_DUMP_REG_NUM; 856 } 857 858 /** 859 *hns_rcb_get_strings - get rcb string set 860 *@stringset:string set index 861 *@data:strings name value 862 *@index:queue index 863 */ 864 void hns_rcb_get_strings(int stringset, u8 *data, int index) 865 { 866 char *buff = (char *)data; 867 868 if (stringset != ETH_SS_STATS) 869 return; 870 871 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_rcb_pkt_num", index); 872 buff = buff + ETH_GSTRING_LEN; 873 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_ppe_tx_pkt_num", index); 874 buff = buff + ETH_GSTRING_LEN; 875 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_ppe_drop_pkt_num", index); 876 buff = buff + ETH_GSTRING_LEN; 877 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_fbd_num", index); 878 buff = buff + ETH_GSTRING_LEN; 879 880 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_pkt_num", index); 881 buff = buff + ETH_GSTRING_LEN; 882 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_bytes", index); 883 buff = buff + ETH_GSTRING_LEN; 884 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_err_cnt", index); 885 buff = buff + ETH_GSTRING_LEN; 886 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_io_err", index); 887 buff = buff + ETH_GSTRING_LEN; 888 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_sw_err", index); 889 buff = buff + ETH_GSTRING_LEN; 890 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_seg_pkt", index); 891 buff = buff + ETH_GSTRING_LEN; 892 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_restart_queue", index); 893 buff = buff + ETH_GSTRING_LEN; 894 snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_tx_busy", index); 895 buff = buff + ETH_GSTRING_LEN; 896 897 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_rcb_pkt_num", index); 898 buff = buff + ETH_GSTRING_LEN; 899 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_ppe_pkt_num", index); 900 buff = buff + ETH_GSTRING_LEN; 901 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_ppe_drop_pkt_num", index); 902 buff = buff + ETH_GSTRING_LEN; 903 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_fbd_num", index); 904 buff = buff + ETH_GSTRING_LEN; 905 906 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_pkt_num", index); 907 buff = buff + ETH_GSTRING_LEN; 908 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_bytes", index); 909 buff = buff + ETH_GSTRING_LEN; 910 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_err_cnt", index); 911 buff = buff + ETH_GSTRING_LEN; 912 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_io_err", index); 913 buff = buff + ETH_GSTRING_LEN; 914 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_sw_err", index); 915 buff = buff + ETH_GSTRING_LEN; 916 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_seg_pkt", index); 917 buff = buff + ETH_GSTRING_LEN; 918 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_reuse_pg", index); 919 buff = buff + ETH_GSTRING_LEN; 920 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_len_err", index); 921 buff = buff + ETH_GSTRING_LEN; 922 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_non_vld_desc_err", index); 923 buff = buff + ETH_GSTRING_LEN; 924 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_bd_num_err", index); 925 buff = buff + ETH_GSTRING_LEN; 926 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_l2_err", index); 927 buff = buff + ETH_GSTRING_LEN; 928 snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_l3l4csum_err", index); 929 } 930 931 void hns_rcb_get_common_regs(struct rcb_common_cb *rcb_com, void *data) 932 { 933 u32 *regs = data; 934 bool is_ver1 = AE_IS_VER1(rcb_com->dsaf_dev->dsaf_ver); 935 bool is_dbg = (rcb_com->comm_index != HNS_DSAF_COMM_SERVICE_NW_IDX); 936 u32 reg_tmp; 937 u32 reg_num_tmp; 938 u32 i = 0; 939 940 /*rcb common registers */ 941 regs[0] = dsaf_read_dev(rcb_com, RCB_COM_CFG_ENDIAN_REG); 942 regs[1] = dsaf_read_dev(rcb_com, RCB_COM_CFG_SYS_FSH_REG); 943 regs[2] = dsaf_read_dev(rcb_com, RCB_COM_CFG_INIT_FLAG_REG); 944 945 regs[3] = dsaf_read_dev(rcb_com, RCB_COM_CFG_PKT_REG); 946 regs[4] = dsaf_read_dev(rcb_com, RCB_COM_CFG_RINVLD_REG); 947 regs[5] = dsaf_read_dev(rcb_com, RCB_COM_CFG_FNA_REG); 948 regs[6] = dsaf_read_dev(rcb_com, RCB_COM_CFG_FA_REG); 949 regs[7] = dsaf_read_dev(rcb_com, RCB_COM_CFG_PKT_TC_BP_REG); 950 regs[8] = dsaf_read_dev(rcb_com, RCB_COM_CFG_PPE_TNL_CLKEN_REG); 951 952 regs[9] = dsaf_read_dev(rcb_com, RCB_COM_INTMSK_TX_PKT_REG); 953 regs[10] = dsaf_read_dev(rcb_com, RCB_COM_RINT_TX_PKT_REG); 954 regs[11] = dsaf_read_dev(rcb_com, RCB_COM_INTMASK_ECC_ERR_REG); 955 regs[12] = dsaf_read_dev(rcb_com, RCB_COM_INTSTS_ECC_ERR_REG); 956 regs[13] = dsaf_read_dev(rcb_com, RCB_COM_EBD_SRAM_ERR_REG); 957 regs[14] = dsaf_read_dev(rcb_com, RCB_COM_RXRING_ERR_REG); 958 regs[15] = dsaf_read_dev(rcb_com, RCB_COM_TXRING_ERR_REG); 959 regs[16] = dsaf_read_dev(rcb_com, RCB_COM_TX_FBD_ERR_REG); 960 regs[17] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK_EN_REG); 961 regs[18] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK0_REG); 962 regs[19] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK1_REG); 963 regs[20] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK2_REG); 964 regs[21] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK3_REG); 965 regs[22] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK4_REG); 966 regs[23] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK5_REG); 967 regs[24] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR0_REG); 968 regs[25] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR3_REG); 969 regs[26] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR4_REG); 970 regs[27] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR5_REG); 971 972 regs[28] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_INTMASK_RING); 973 regs[29] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_RING_STS); 974 regs[30] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_RING); 975 regs[31] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_INTMASK_BD); 976 regs[32] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_BD_RINT_STS); 977 regs[33] = dsaf_read_dev(rcb_com, RCB_COM_RCB_RD_BD_BUSY); 978 regs[34] = dsaf_read_dev(rcb_com, RCB_COM_RCB_FBD_CRT_EN); 979 regs[35] = dsaf_read_dev(rcb_com, RCB_COM_AXI_WR_ERR_INTMASK); 980 regs[36] = dsaf_read_dev(rcb_com, RCB_COM_AXI_ERR_STS); 981 regs[37] = dsaf_read_dev(rcb_com, RCB_COM_CHK_TX_FBD_NUM_REG); 982 983 /* rcb common entry registers */ 984 for (i = 0; i < 16; i++) { /* total 16 model registers */ 985 regs[38 + i] 986 = dsaf_read_dev(rcb_com, RCB_CFG_BD_NUM_REG + 4 * i); 987 regs[54 + i] 988 = dsaf_read_dev(rcb_com, RCB_CFG_PKTLINE_REG + 4 * i); 989 } 990 991 reg_tmp = is_ver1 ? RCB_CFG_OVERTIME_REG : RCB_PORT_CFG_OVERTIME_REG; 992 reg_num_tmp = (is_ver1 || is_dbg) ? 1 : 6; 993 for (i = 0; i < reg_num_tmp; i++) 994 regs[70 + i] = dsaf_read_dev(rcb_com, reg_tmp); 995 996 regs[76] = dsaf_read_dev(rcb_com, RCB_CFG_PKTLINE_INT_NUM_REG); 997 regs[77] = dsaf_read_dev(rcb_com, RCB_CFG_OVERTIME_INT_NUM_REG); 998 999 /* mark end of rcb common regs */ 1000 for (i = 78; i < 80; i++) 1001 regs[i] = 0xcccccccc; 1002 } 1003 1004 void hns_rcb_get_ring_regs(struct hnae_queue *queue, void *data) 1005 { 1006 u32 *regs = data; 1007 struct ring_pair_cb *ring_pair 1008 = container_of(queue, struct ring_pair_cb, q); 1009 u32 i = 0; 1010 1011 /*rcb ring registers */ 1012 regs[0] = dsaf_read_dev(queue, RCB_RING_RX_RING_BASEADDR_L_REG); 1013 regs[1] = dsaf_read_dev(queue, RCB_RING_RX_RING_BASEADDR_H_REG); 1014 regs[2] = dsaf_read_dev(queue, RCB_RING_RX_RING_BD_NUM_REG); 1015 regs[3] = dsaf_read_dev(queue, RCB_RING_RX_RING_BD_LEN_REG); 1016 regs[4] = dsaf_read_dev(queue, RCB_RING_RX_RING_PKTLINE_REG); 1017 regs[5] = dsaf_read_dev(queue, RCB_RING_RX_RING_TAIL_REG); 1018 regs[6] = dsaf_read_dev(queue, RCB_RING_RX_RING_HEAD_REG); 1019 regs[7] = dsaf_read_dev(queue, RCB_RING_RX_RING_FBDNUM_REG); 1020 regs[8] = dsaf_read_dev(queue, RCB_RING_RX_RING_PKTNUM_RECORD_REG); 1021 1022 regs[9] = dsaf_read_dev(queue, RCB_RING_TX_RING_BASEADDR_L_REG); 1023 regs[10] = dsaf_read_dev(queue, RCB_RING_TX_RING_BASEADDR_H_REG); 1024 regs[11] = dsaf_read_dev(queue, RCB_RING_TX_RING_BD_NUM_REG); 1025 regs[12] = dsaf_read_dev(queue, RCB_RING_TX_RING_BD_LEN_REG); 1026 regs[13] = dsaf_read_dev(queue, RCB_RING_TX_RING_PKTLINE_REG); 1027 regs[15] = dsaf_read_dev(queue, RCB_RING_TX_RING_TAIL_REG); 1028 regs[16] = dsaf_read_dev(queue, RCB_RING_TX_RING_HEAD_REG); 1029 regs[17] = dsaf_read_dev(queue, RCB_RING_TX_RING_FBDNUM_REG); 1030 regs[18] = dsaf_read_dev(queue, RCB_RING_TX_RING_OFFSET_REG); 1031 regs[19] = dsaf_read_dev(queue, RCB_RING_TX_RING_PKTNUM_RECORD_REG); 1032 1033 regs[20] = dsaf_read_dev(queue, RCB_RING_PREFETCH_EN_REG); 1034 regs[21] = dsaf_read_dev(queue, RCB_RING_CFG_VF_NUM_REG); 1035 regs[22] = dsaf_read_dev(queue, RCB_RING_ASID_REG); 1036 regs[23] = dsaf_read_dev(queue, RCB_RING_RX_VM_REG); 1037 regs[24] = dsaf_read_dev(queue, RCB_RING_T0_BE_RST); 1038 regs[25] = dsaf_read_dev(queue, RCB_RING_COULD_BE_RST); 1039 regs[26] = dsaf_read_dev(queue, RCB_RING_WRR_WEIGHT_REG); 1040 1041 regs[27] = dsaf_read_dev(queue, RCB_RING_INTMSK_RXWL_REG); 1042 regs[28] = dsaf_read_dev(queue, RCB_RING_INTSTS_RX_RING_REG); 1043 regs[29] = dsaf_read_dev(queue, RCB_RING_INTMSK_TXWL_REG); 1044 regs[30] = dsaf_read_dev(queue, RCB_RING_INTSTS_TX_RING_REG); 1045 regs[31] = dsaf_read_dev(queue, RCB_RING_INTMSK_RX_OVERTIME_REG); 1046 regs[32] = dsaf_read_dev(queue, RCB_RING_INTSTS_RX_OVERTIME_REG); 1047 regs[33] = dsaf_read_dev(queue, RCB_RING_INTMSK_TX_OVERTIME_REG); 1048 regs[34] = dsaf_read_dev(queue, RCB_RING_INTSTS_TX_OVERTIME_REG); 1049 1050 /* mark end of ring regs */ 1051 for (i = 35; i < 40; i++) 1052 regs[i] = 0xcccccc00 + ring_pair->index; 1053 } 1054