1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Atlantic Network Driver
3 *
4 * Copyright (C) 2014-2019 aQuantia Corporation
5 * Copyright (C) 2019-2020 Marvell International Ltd.
6 */
7
8 /* File aq_hw_utils.c: Definitions of helper functions used across
9 * hardware layer.
10 */
11
12 #include "aq_hw_utils.h"
13
14 #include <linux/io-64-nonatomic-lo-hi.h>
15
16 #include "aq_hw.h"
17 #include "aq_nic.h"
18 #include "hw_atl/hw_atl_llh.h"
19
aq_hw_write_reg_bit(struct aq_hw_s * aq_hw,u32 addr,u32 msk,u32 shift,u32 val)20 void aq_hw_write_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk,
21 u32 shift, u32 val)
22 {
23 if (msk ^ ~0) {
24 u32 reg_old, reg_new;
25
26 reg_old = aq_hw_read_reg(aq_hw, addr);
27 reg_new = (reg_old & (~msk)) | (val << shift);
28
29 if (reg_old != reg_new)
30 aq_hw_write_reg(aq_hw, addr, reg_new);
31 } else {
32 aq_hw_write_reg(aq_hw, addr, val);
33 }
34 }
35
aq_hw_read_reg_bit(struct aq_hw_s * aq_hw,u32 addr,u32 msk,u32 shift)36 u32 aq_hw_read_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk, u32 shift)
37 {
38 return ((aq_hw_read_reg(aq_hw, addr) & msk) >> shift);
39 }
40
aq_hw_read_reg(struct aq_hw_s * hw,u32 reg)41 u32 aq_hw_read_reg(struct aq_hw_s *hw, u32 reg)
42 {
43 u32 value = readl(hw->mmio + reg);
44
45 if (value == U32_MAX &&
46 readl(hw->mmio + hw->aq_nic_cfg->aq_hw_caps->hw_alive_check_addr) == U32_MAX)
47 aq_utils_obj_set(&hw->flags, AQ_HW_FLAG_ERR_UNPLUG);
48
49 return value;
50 }
51
aq_hw_write_reg(struct aq_hw_s * hw,u32 reg,u32 value)52 void aq_hw_write_reg(struct aq_hw_s *hw, u32 reg, u32 value)
53 {
54 writel(value, hw->mmio + reg);
55 }
56
57 /* Most of 64-bit registers are in LSW, MSW form.
58 Counters are normally implemented by HW as latched pairs:
59 reading LSW first locks MSW, to overcome LSW overflow
60 */
aq_hw_read_reg64(struct aq_hw_s * hw,u32 reg)61 u64 aq_hw_read_reg64(struct aq_hw_s *hw, u32 reg)
62 {
63 u64 value = U64_MAX;
64
65 if (hw->aq_nic_cfg->aq_hw_caps->op64bit)
66 value = readq(hw->mmio + reg);
67 else
68 value = lo_hi_readq(hw->mmio + reg);
69
70 if (value == U64_MAX &&
71 readl(hw->mmio + hw->aq_nic_cfg->aq_hw_caps->hw_alive_check_addr) == U32_MAX)
72 aq_utils_obj_set(&hw->flags, AQ_HW_FLAG_ERR_UNPLUG);
73
74 return value;
75 }
76
aq_hw_write_reg64(struct aq_hw_s * hw,u32 reg,u64 value)77 void aq_hw_write_reg64(struct aq_hw_s *hw, u32 reg, u64 value)
78 {
79 if (hw->aq_nic_cfg->aq_hw_caps->op64bit)
80 writeq(value, hw->mmio + reg);
81 else
82 lo_hi_writeq(value, hw->mmio + reg);
83 }
84
aq_hw_invalidate_descriptor_cache(struct aq_hw_s * hw)85 int aq_hw_invalidate_descriptor_cache(struct aq_hw_s *hw)
86 {
87 int err;
88 u32 val;
89
90 /* Invalidate Descriptor Cache to prevent writing to the cached
91 * descriptors and to the data pointer of those descriptors
92 */
93 hw_atl_rdm_rx_dma_desc_cache_init_tgl(hw);
94
95 err = aq_hw_err_from_flags(hw);
96 if (err)
97 goto err_exit;
98
99 readx_poll_timeout_atomic(hw_atl_rdm_rx_dma_desc_cache_init_done_get,
100 hw, val, val == 1, 1000U, 10000U);
101
102 err_exit:
103 return err;
104 }
105
aq_hw_err_from_flags(struct aq_hw_s * hw)106 int aq_hw_err_from_flags(struct aq_hw_s *hw)
107 {
108 int err = 0;
109
110 if (aq_utils_obj_test(&hw->flags, AQ_HW_FLAG_ERR_UNPLUG)) {
111 err = -ENXIO;
112 goto err_exit;
113 }
114 if (aq_utils_obj_test(&hw->flags, AQ_HW_FLAG_ERR_HW)) {
115 err = -EIO;
116 goto err_exit;
117 }
118
119 err_exit:
120 return err;
121 }
122
aq_hw_num_tcs(struct aq_hw_s * hw)123 int aq_hw_num_tcs(struct aq_hw_s *hw)
124 {
125 switch (hw->aq_nic_cfg->tc_mode) {
126 case AQ_TC_MODE_8TCS:
127 return 8;
128 case AQ_TC_MODE_4TCS:
129 return 4;
130 default:
131 break;
132 }
133
134 return 1;
135 }
136
aq_hw_q_per_tc(struct aq_hw_s * hw)137 int aq_hw_q_per_tc(struct aq_hw_s *hw)
138 {
139 switch (hw->aq_nic_cfg->tc_mode) {
140 case AQ_TC_MODE_8TCS:
141 return 4;
142 case AQ_TC_MODE_4TCS:
143 return 8;
144 default:
145 return 4;
146 }
147 }
148