1 // SPDX-License-Identifier: GPL-2.0-only 2 /* aQuantia Corporation Network Driver 3 * Copyright (C) 2018-2019 aQuantia Corporation. All rights reserved 4 */ 5 6 #include "aq_phy.h" 7 8 bool aq_mdio_busy_wait(struct aq_hw_s *aq_hw) 9 { 10 int err = 0; 11 u32 val; 12 13 err = readx_poll_timeout_atomic(hw_atl_mdio_busy_get, aq_hw, 14 val, val == 0U, 10U, 100000U); 15 16 if (err < 0) 17 return false; 18 19 return true; 20 } 21 22 u16 aq_mdio_read_word(struct aq_hw_s *aq_hw, u16 mmd, u16 addr) 23 { 24 u16 phy_addr = aq_hw->phy_id << 5 | mmd; 25 26 /* Set Address register. */ 27 hw_atl_glb_mdio_iface4_set(aq_hw, (addr & HW_ATL_MDIO_ADDRESS_MSK) << 28 HW_ATL_MDIO_ADDRESS_SHIFT); 29 /* Send Address command. */ 30 hw_atl_glb_mdio_iface2_set(aq_hw, HW_ATL_MDIO_EXECUTE_OPERATION_MSK | 31 (3 << HW_ATL_MDIO_OP_MODE_SHIFT) | 32 ((phy_addr & HW_ATL_MDIO_PHY_ADDRESS_MSK) << 33 HW_ATL_MDIO_PHY_ADDRESS_SHIFT)); 34 35 aq_mdio_busy_wait(aq_hw); 36 37 /* Send Read command. */ 38 hw_atl_glb_mdio_iface2_set(aq_hw, HW_ATL_MDIO_EXECUTE_OPERATION_MSK | 39 (1 << HW_ATL_MDIO_OP_MODE_SHIFT) | 40 ((phy_addr & HW_ATL_MDIO_PHY_ADDRESS_MSK) << 41 HW_ATL_MDIO_PHY_ADDRESS_SHIFT)); 42 /* Read result. */ 43 aq_mdio_busy_wait(aq_hw); 44 45 return (u16)hw_atl_glb_mdio_iface5_get(aq_hw); 46 } 47 48 void aq_mdio_write_word(struct aq_hw_s *aq_hw, u16 mmd, u16 addr, u16 data) 49 { 50 u16 phy_addr = aq_hw->phy_id << 5 | mmd; 51 52 /* Set Address register. */ 53 hw_atl_glb_mdio_iface4_set(aq_hw, (addr & HW_ATL_MDIO_ADDRESS_MSK) << 54 HW_ATL_MDIO_ADDRESS_SHIFT); 55 /* Send Address command. */ 56 hw_atl_glb_mdio_iface2_set(aq_hw, HW_ATL_MDIO_EXECUTE_OPERATION_MSK | 57 (3 << HW_ATL_MDIO_OP_MODE_SHIFT) | 58 ((phy_addr & HW_ATL_MDIO_PHY_ADDRESS_MSK) << 59 HW_ATL_MDIO_PHY_ADDRESS_SHIFT)); 60 61 aq_mdio_busy_wait(aq_hw); 62 63 hw_atl_glb_mdio_iface3_set(aq_hw, (data & HW_ATL_MDIO_WRITE_DATA_MSK) << 64 HW_ATL_MDIO_WRITE_DATA_SHIFT); 65 /* Send Write command. */ 66 hw_atl_glb_mdio_iface2_set(aq_hw, HW_ATL_MDIO_EXECUTE_OPERATION_MSK | 67 (2 << HW_ATL_MDIO_OP_MODE_SHIFT) | 68 ((phy_addr & HW_ATL_MDIO_PHY_ADDRESS_MSK) << 69 HW_ATL_MDIO_PHY_ADDRESS_SHIFT)); 70 71 aq_mdio_busy_wait(aq_hw); 72 } 73 74 u16 aq_phy_read_reg(struct aq_hw_s *aq_hw, u16 mmd, u16 address) 75 { 76 int err = 0; 77 u32 val; 78 79 err = readx_poll_timeout_atomic(hw_atl_sem_mdio_get, aq_hw, 80 val, val == 1U, 10U, 100000U); 81 82 if (err < 0) { 83 err = 0xffff; 84 goto err_exit; 85 } 86 87 err = aq_mdio_read_word(aq_hw, mmd, address); 88 89 hw_atl_reg_glb_cpu_sem_set(aq_hw, 1U, HW_ATL_FW_SM_MDIO); 90 91 err_exit: 92 return err; 93 } 94 95 void aq_phy_write_reg(struct aq_hw_s *aq_hw, u16 mmd, u16 address, u16 data) 96 { 97 int err = 0; 98 u32 val; 99 100 err = readx_poll_timeout_atomic(hw_atl_sem_mdio_get, aq_hw, 101 val, val == 1U, 10U, 100000U); 102 if (err < 0) 103 return; 104 105 aq_mdio_write_word(aq_hw, mmd, address, data); 106 hw_atl_reg_glb_cpu_sem_set(aq_hw, 1U, HW_ATL_FW_SM_MDIO); 107 } 108 109 bool aq_phy_init_phy_id(struct aq_hw_s *aq_hw) 110 { 111 u16 val; 112 113 for (aq_hw->phy_id = 0; aq_hw->phy_id < HW_ATL_PHY_ID_MAX; 114 ++aq_hw->phy_id) { 115 /* PMA Standard Device Identifier 2: Address 1.3 */ 116 val = aq_phy_read_reg(aq_hw, MDIO_MMD_PMAPMD, 3); 117 118 if (val != 0xffff) 119 return true; 120 } 121 122 return false; 123 } 124 125 bool aq_phy_init(struct aq_hw_s *aq_hw) 126 { 127 u32 dev_id; 128 129 if (aq_hw->phy_id == HW_ATL_PHY_ID_MAX) 130 if (!aq_phy_init_phy_id(aq_hw)) 131 return false; 132 133 /* PMA Standard Device Identifier: 134 * Address 1.2 = MSW, 135 * Address 1.3 = LSW 136 */ 137 dev_id = aq_phy_read_reg(aq_hw, MDIO_MMD_PMAPMD, 2); 138 dev_id <<= 16; 139 dev_id |= aq_phy_read_reg(aq_hw, MDIO_MMD_PMAPMD, 3); 140 141 if (dev_id == 0xffffffff) { 142 aq_hw->phy_id = HW_ATL_PHY_ID_MAX; 143 return false; 144 } 145 146 return true; 147 } 148