xref: /linux/drivers/net/ethernet/aquantia/atlantic/aq_phy.c (revision 4b4193256c8d3bc3a5397b5cd9494c2ad386317d)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Atlantic Network Driver
3  *
4  * Copyright (C) 2018-2019 aQuantia Corporation
5  * Copyright (C) 2019-2020 Marvell International Ltd.
6  */
7 
8 #include "aq_phy.h"
9 
10 #define HW_ATL_PTP_DISABLE_MSK	BIT(10)
11 
aq_mdio_busy_wait(struct aq_hw_s * aq_hw)12 bool aq_mdio_busy_wait(struct aq_hw_s *aq_hw)
13 {
14 	int err = 0;
15 	u32 val;
16 
17 	err = readx_poll_timeout_atomic(hw_atl_mdio_busy_get, aq_hw,
18 					val, val == 0U, 10U, 100000U);
19 
20 	if (err < 0)
21 		return false;
22 
23 	return true;
24 }
25 
aq_mdio_read_word(struct aq_hw_s * aq_hw,u16 mmd,u16 addr)26 u16 aq_mdio_read_word(struct aq_hw_s *aq_hw, u16 mmd, u16 addr)
27 {
28 	u16 phy_addr = aq_hw->phy_id << 5 | mmd;
29 
30 	/* Set Address register. */
31 	hw_atl_glb_mdio_iface4_set(aq_hw, (addr & HW_ATL_MDIO_ADDRESS_MSK) <<
32 				   HW_ATL_MDIO_ADDRESS_SHIFT);
33 	/* Send Address command. */
34 	hw_atl_glb_mdio_iface2_set(aq_hw, HW_ATL_MDIO_EXECUTE_OPERATION_MSK |
35 				   (3 << HW_ATL_MDIO_OP_MODE_SHIFT) |
36 				   ((phy_addr & HW_ATL_MDIO_PHY_ADDRESS_MSK) <<
37 				    HW_ATL_MDIO_PHY_ADDRESS_SHIFT));
38 
39 	aq_mdio_busy_wait(aq_hw);
40 
41 	/* Send Read command. */
42 	hw_atl_glb_mdio_iface2_set(aq_hw, HW_ATL_MDIO_EXECUTE_OPERATION_MSK |
43 				   (1 << HW_ATL_MDIO_OP_MODE_SHIFT) |
44 				   ((phy_addr & HW_ATL_MDIO_PHY_ADDRESS_MSK) <<
45 				    HW_ATL_MDIO_PHY_ADDRESS_SHIFT));
46 	/* Read result. */
47 	aq_mdio_busy_wait(aq_hw);
48 
49 	return (u16)hw_atl_glb_mdio_iface5_get(aq_hw);
50 }
51 
aq_mdio_write_word(struct aq_hw_s * aq_hw,u16 mmd,u16 addr,u16 data)52 void aq_mdio_write_word(struct aq_hw_s *aq_hw, u16 mmd, u16 addr, u16 data)
53 {
54 	u16 phy_addr = aq_hw->phy_id << 5 | mmd;
55 
56 	/* Set Address register. */
57 	hw_atl_glb_mdio_iface4_set(aq_hw, (addr & HW_ATL_MDIO_ADDRESS_MSK) <<
58 				   HW_ATL_MDIO_ADDRESS_SHIFT);
59 	/* Send Address command. */
60 	hw_atl_glb_mdio_iface2_set(aq_hw, HW_ATL_MDIO_EXECUTE_OPERATION_MSK |
61 				   (3 << HW_ATL_MDIO_OP_MODE_SHIFT) |
62 				   ((phy_addr & HW_ATL_MDIO_PHY_ADDRESS_MSK) <<
63 				    HW_ATL_MDIO_PHY_ADDRESS_SHIFT));
64 
65 	aq_mdio_busy_wait(aq_hw);
66 
67 	hw_atl_glb_mdio_iface3_set(aq_hw, (data & HW_ATL_MDIO_WRITE_DATA_MSK) <<
68 				   HW_ATL_MDIO_WRITE_DATA_SHIFT);
69 	/* Send Write command. */
70 	hw_atl_glb_mdio_iface2_set(aq_hw, HW_ATL_MDIO_EXECUTE_OPERATION_MSK |
71 				   (2 << HW_ATL_MDIO_OP_MODE_SHIFT) |
72 				   ((phy_addr & HW_ATL_MDIO_PHY_ADDRESS_MSK) <<
73 				    HW_ATL_MDIO_PHY_ADDRESS_SHIFT));
74 
75 	aq_mdio_busy_wait(aq_hw);
76 }
77 
aq_phy_read_reg(struct aq_hw_s * aq_hw,u16 mmd,u16 address)78 u16 aq_phy_read_reg(struct aq_hw_s *aq_hw, u16 mmd, u16 address)
79 {
80 	int err = 0;
81 	u32 val;
82 
83 	err = readx_poll_timeout_atomic(hw_atl_sem_mdio_get, aq_hw,
84 					val, val == 1U, 10U, 100000U);
85 
86 	if (err < 0) {
87 		err = 0xffff;
88 		goto err_exit;
89 	}
90 
91 	err = aq_mdio_read_word(aq_hw, mmd, address);
92 
93 	hw_atl_reg_glb_cpu_sem_set(aq_hw, 1U, HW_ATL_FW_SM_MDIO);
94 
95 err_exit:
96 	return err;
97 }
98 
aq_phy_write_reg(struct aq_hw_s * aq_hw,u16 mmd,u16 address,u16 data)99 void aq_phy_write_reg(struct aq_hw_s *aq_hw, u16 mmd, u16 address, u16 data)
100 {
101 	int err = 0;
102 	u32 val;
103 
104 	err = readx_poll_timeout_atomic(hw_atl_sem_mdio_get, aq_hw,
105 					val, val == 1U, 10U, 100000U);
106 	if (err < 0)
107 		return;
108 
109 	aq_mdio_write_word(aq_hw, mmd, address, data);
110 	hw_atl_reg_glb_cpu_sem_set(aq_hw, 1U, HW_ATL_FW_SM_MDIO);
111 }
112 
aq_phy_init_phy_id(struct aq_hw_s * aq_hw)113 bool aq_phy_init_phy_id(struct aq_hw_s *aq_hw)
114 {
115 	u16 val;
116 
117 	for (aq_hw->phy_id = 0; aq_hw->phy_id < HW_ATL_PHY_ID_MAX;
118 	     ++aq_hw->phy_id) {
119 		/* PMA Standard Device Identifier 2: Address 1.3 */
120 		val = aq_phy_read_reg(aq_hw, MDIO_MMD_PMAPMD, 3);
121 
122 		if (val != 0xffff)
123 			return true;
124 	}
125 
126 	return false;
127 }
128 
aq_phy_init(struct aq_hw_s * aq_hw)129 bool aq_phy_init(struct aq_hw_s *aq_hw)
130 {
131 	u32 dev_id;
132 
133 	if (aq_hw->phy_id == HW_ATL_PHY_ID_MAX)
134 		if (!aq_phy_init_phy_id(aq_hw))
135 			return false;
136 
137 	/* PMA Standard Device Identifier:
138 	 * Address 1.2 = MSW,
139 	 * Address 1.3 = LSW
140 	 */
141 	dev_id = aq_phy_read_reg(aq_hw, MDIO_MMD_PMAPMD, 2);
142 	dev_id <<= 16;
143 	dev_id |= aq_phy_read_reg(aq_hw, MDIO_MMD_PMAPMD, 3);
144 
145 	if (dev_id == 0xffffffff) {
146 		aq_hw->phy_id = HW_ATL_PHY_ID_MAX;
147 		return false;
148 	}
149 
150 	return true;
151 }
152 
aq_phy_disable_ptp(struct aq_hw_s * aq_hw)153 void aq_phy_disable_ptp(struct aq_hw_s *aq_hw)
154 {
155 	static const u16 ptp_registers[] = {
156 		0x031e,
157 		0x031d,
158 		0x031c,
159 		0x031b,
160 	};
161 	u16 val;
162 	int i;
163 
164 	for (i = 0; i < ARRAY_SIZE(ptp_registers); i++) {
165 		val = aq_phy_read_reg(aq_hw, MDIO_MMD_VEND1,
166 				      ptp_registers[i]);
167 
168 		aq_phy_write_reg(aq_hw, MDIO_MMD_VEND1,
169 				 ptp_registers[i],
170 				 val & ~HW_ATL_PTP_DISABLE_MSK);
171 	}
172 }
173