1*8be454c9SAlice Michael // SPDX-License-Identifier: GPL-2.0 2*8be454c9SAlice Michael /* Copyright(c) 2013 - 2018 Intel Corporation. */ 3*8be454c9SAlice Michael 4*8be454c9SAlice Michael #include "iavf_status.h" 5*8be454c9SAlice Michael #include "iavf_type.h" 6*8be454c9SAlice Michael #include "iavf_register.h" 7*8be454c9SAlice Michael #include "iavf_adminq.h" 8*8be454c9SAlice Michael #include "iavf_prototype.h" 9*8be454c9SAlice Michael 10*8be454c9SAlice Michael /** 11*8be454c9SAlice Michael * i40e_adminq_init_regs - Initialize AdminQ registers 12*8be454c9SAlice Michael * @hw: pointer to the hardware structure 13*8be454c9SAlice Michael * 14*8be454c9SAlice Michael * This assumes the alloc_asq and alloc_arq functions have already been called 15*8be454c9SAlice Michael **/ 16*8be454c9SAlice Michael static void i40e_adminq_init_regs(struct iavf_hw *hw) 17*8be454c9SAlice Michael { 18*8be454c9SAlice Michael /* set head and tail registers in our local struct */ 19*8be454c9SAlice Michael hw->aq.asq.tail = IAVF_VF_ATQT1; 20*8be454c9SAlice Michael hw->aq.asq.head = IAVF_VF_ATQH1; 21*8be454c9SAlice Michael hw->aq.asq.len = IAVF_VF_ATQLEN1; 22*8be454c9SAlice Michael hw->aq.asq.bal = IAVF_VF_ATQBAL1; 23*8be454c9SAlice Michael hw->aq.asq.bah = IAVF_VF_ATQBAH1; 24*8be454c9SAlice Michael hw->aq.arq.tail = IAVF_VF_ARQT1; 25*8be454c9SAlice Michael hw->aq.arq.head = IAVF_VF_ARQH1; 26*8be454c9SAlice Michael hw->aq.arq.len = IAVF_VF_ARQLEN1; 27*8be454c9SAlice Michael hw->aq.arq.bal = IAVF_VF_ARQBAL1; 28*8be454c9SAlice Michael hw->aq.arq.bah = IAVF_VF_ARQBAH1; 29*8be454c9SAlice Michael } 30*8be454c9SAlice Michael 31*8be454c9SAlice Michael /** 32*8be454c9SAlice Michael * i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings 33*8be454c9SAlice Michael * @hw: pointer to the hardware structure 34*8be454c9SAlice Michael **/ 35*8be454c9SAlice Michael static iavf_status i40e_alloc_adminq_asq_ring(struct iavf_hw *hw) 36*8be454c9SAlice Michael { 37*8be454c9SAlice Michael iavf_status ret_code; 38*8be454c9SAlice Michael 39*8be454c9SAlice Michael ret_code = iavf_allocate_dma_mem(hw, &hw->aq.asq.desc_buf, 40*8be454c9SAlice Michael i40e_mem_atq_ring, 41*8be454c9SAlice Michael (hw->aq.num_asq_entries * 42*8be454c9SAlice Michael sizeof(struct i40e_aq_desc)), 43*8be454c9SAlice Michael IAVF_ADMINQ_DESC_ALIGNMENT); 44*8be454c9SAlice Michael if (ret_code) 45*8be454c9SAlice Michael return ret_code; 46*8be454c9SAlice Michael 47*8be454c9SAlice Michael ret_code = iavf_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf, 48*8be454c9SAlice Michael (hw->aq.num_asq_entries * 49*8be454c9SAlice Michael sizeof(struct i40e_asq_cmd_details))); 50*8be454c9SAlice Michael if (ret_code) { 51*8be454c9SAlice Michael iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf); 52*8be454c9SAlice Michael return ret_code; 53*8be454c9SAlice Michael } 54*8be454c9SAlice Michael 55*8be454c9SAlice Michael return ret_code; 56*8be454c9SAlice Michael } 57*8be454c9SAlice Michael 58*8be454c9SAlice Michael /** 59*8be454c9SAlice Michael * i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings 60*8be454c9SAlice Michael * @hw: pointer to the hardware structure 61*8be454c9SAlice Michael **/ 62*8be454c9SAlice Michael static iavf_status i40e_alloc_adminq_arq_ring(struct iavf_hw *hw) 63*8be454c9SAlice Michael { 64*8be454c9SAlice Michael iavf_status ret_code; 65*8be454c9SAlice Michael 66*8be454c9SAlice Michael ret_code = iavf_allocate_dma_mem(hw, &hw->aq.arq.desc_buf, 67*8be454c9SAlice Michael i40e_mem_arq_ring, 68*8be454c9SAlice Michael (hw->aq.num_arq_entries * 69*8be454c9SAlice Michael sizeof(struct i40e_aq_desc)), 70*8be454c9SAlice Michael IAVF_ADMINQ_DESC_ALIGNMENT); 71*8be454c9SAlice Michael 72*8be454c9SAlice Michael return ret_code; 73*8be454c9SAlice Michael } 74*8be454c9SAlice Michael 75*8be454c9SAlice Michael /** 76*8be454c9SAlice Michael * i40e_free_adminq_asq - Free Admin Queue send rings 77*8be454c9SAlice Michael * @hw: pointer to the hardware structure 78*8be454c9SAlice Michael * 79*8be454c9SAlice Michael * This assumes the posted send buffers have already been cleaned 80*8be454c9SAlice Michael * and de-allocated 81*8be454c9SAlice Michael **/ 82*8be454c9SAlice Michael static void i40e_free_adminq_asq(struct iavf_hw *hw) 83*8be454c9SAlice Michael { 84*8be454c9SAlice Michael iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf); 85*8be454c9SAlice Michael } 86*8be454c9SAlice Michael 87*8be454c9SAlice Michael /** 88*8be454c9SAlice Michael * i40e_free_adminq_arq - Free Admin Queue receive rings 89*8be454c9SAlice Michael * @hw: pointer to the hardware structure 90*8be454c9SAlice Michael * 91*8be454c9SAlice Michael * This assumes the posted receive buffers have already been cleaned 92*8be454c9SAlice Michael * and de-allocated 93*8be454c9SAlice Michael **/ 94*8be454c9SAlice Michael static void i40e_free_adminq_arq(struct iavf_hw *hw) 95*8be454c9SAlice Michael { 96*8be454c9SAlice Michael iavf_free_dma_mem(hw, &hw->aq.arq.desc_buf); 97*8be454c9SAlice Michael } 98*8be454c9SAlice Michael 99*8be454c9SAlice Michael /** 100*8be454c9SAlice Michael * i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue 101*8be454c9SAlice Michael * @hw: pointer to the hardware structure 102*8be454c9SAlice Michael **/ 103*8be454c9SAlice Michael static iavf_status i40e_alloc_arq_bufs(struct iavf_hw *hw) 104*8be454c9SAlice Michael { 105*8be454c9SAlice Michael struct i40e_aq_desc *desc; 106*8be454c9SAlice Michael struct iavf_dma_mem *bi; 107*8be454c9SAlice Michael iavf_status ret_code; 108*8be454c9SAlice Michael int i; 109*8be454c9SAlice Michael 110*8be454c9SAlice Michael /* We'll be allocating the buffer info memory first, then we can 111*8be454c9SAlice Michael * allocate the mapped buffers for the event processing 112*8be454c9SAlice Michael */ 113*8be454c9SAlice Michael 114*8be454c9SAlice Michael /* buffer_info structures do not need alignment */ 115*8be454c9SAlice Michael ret_code = iavf_allocate_virt_mem(hw, &hw->aq.arq.dma_head, 116*8be454c9SAlice Michael (hw->aq.num_arq_entries * 117*8be454c9SAlice Michael sizeof(struct iavf_dma_mem))); 118*8be454c9SAlice Michael if (ret_code) 119*8be454c9SAlice Michael goto alloc_arq_bufs; 120*8be454c9SAlice Michael hw->aq.arq.r.arq_bi = (struct iavf_dma_mem *)hw->aq.arq.dma_head.va; 121*8be454c9SAlice Michael 122*8be454c9SAlice Michael /* allocate the mapped buffers */ 123*8be454c9SAlice Michael for (i = 0; i < hw->aq.num_arq_entries; i++) { 124*8be454c9SAlice Michael bi = &hw->aq.arq.r.arq_bi[i]; 125*8be454c9SAlice Michael ret_code = iavf_allocate_dma_mem(hw, bi, 126*8be454c9SAlice Michael i40e_mem_arq_buf, 127*8be454c9SAlice Michael hw->aq.arq_buf_size, 128*8be454c9SAlice Michael IAVF_ADMINQ_DESC_ALIGNMENT); 129*8be454c9SAlice Michael if (ret_code) 130*8be454c9SAlice Michael goto unwind_alloc_arq_bufs; 131*8be454c9SAlice Michael 132*8be454c9SAlice Michael /* now configure the descriptors for use */ 133*8be454c9SAlice Michael desc = IAVF_ADMINQ_DESC(hw->aq.arq, i); 134*8be454c9SAlice Michael 135*8be454c9SAlice Michael desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF); 136*8be454c9SAlice Michael if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF) 137*8be454c9SAlice Michael desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB); 138*8be454c9SAlice Michael desc->opcode = 0; 139*8be454c9SAlice Michael /* This is in accordance with Admin queue design, there is no 140*8be454c9SAlice Michael * register for buffer size configuration 141*8be454c9SAlice Michael */ 142*8be454c9SAlice Michael desc->datalen = cpu_to_le16((u16)bi->size); 143*8be454c9SAlice Michael desc->retval = 0; 144*8be454c9SAlice Michael desc->cookie_high = 0; 145*8be454c9SAlice Michael desc->cookie_low = 0; 146*8be454c9SAlice Michael desc->params.external.addr_high = 147*8be454c9SAlice Michael cpu_to_le32(upper_32_bits(bi->pa)); 148*8be454c9SAlice Michael desc->params.external.addr_low = 149*8be454c9SAlice Michael cpu_to_le32(lower_32_bits(bi->pa)); 150*8be454c9SAlice Michael desc->params.external.param0 = 0; 151*8be454c9SAlice Michael desc->params.external.param1 = 0; 152*8be454c9SAlice Michael } 153*8be454c9SAlice Michael 154*8be454c9SAlice Michael alloc_arq_bufs: 155*8be454c9SAlice Michael return ret_code; 156*8be454c9SAlice Michael 157*8be454c9SAlice Michael unwind_alloc_arq_bufs: 158*8be454c9SAlice Michael /* don't try to free the one that failed... */ 159*8be454c9SAlice Michael i--; 160*8be454c9SAlice Michael for (; i >= 0; i--) 161*8be454c9SAlice Michael iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]); 162*8be454c9SAlice Michael iavf_free_virt_mem(hw, &hw->aq.arq.dma_head); 163*8be454c9SAlice Michael 164*8be454c9SAlice Michael return ret_code; 165*8be454c9SAlice Michael } 166*8be454c9SAlice Michael 167*8be454c9SAlice Michael /** 168*8be454c9SAlice Michael * i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue 169*8be454c9SAlice Michael * @hw: pointer to the hardware structure 170*8be454c9SAlice Michael **/ 171*8be454c9SAlice Michael static iavf_status i40e_alloc_asq_bufs(struct iavf_hw *hw) 172*8be454c9SAlice Michael { 173*8be454c9SAlice Michael struct iavf_dma_mem *bi; 174*8be454c9SAlice Michael iavf_status ret_code; 175*8be454c9SAlice Michael int i; 176*8be454c9SAlice Michael 177*8be454c9SAlice Michael /* No mapped memory needed yet, just the buffer info structures */ 178*8be454c9SAlice Michael ret_code = iavf_allocate_virt_mem(hw, &hw->aq.asq.dma_head, 179*8be454c9SAlice Michael (hw->aq.num_asq_entries * 180*8be454c9SAlice Michael sizeof(struct iavf_dma_mem))); 181*8be454c9SAlice Michael if (ret_code) 182*8be454c9SAlice Michael goto alloc_asq_bufs; 183*8be454c9SAlice Michael hw->aq.asq.r.asq_bi = (struct iavf_dma_mem *)hw->aq.asq.dma_head.va; 184*8be454c9SAlice Michael 185*8be454c9SAlice Michael /* allocate the mapped buffers */ 186*8be454c9SAlice Michael for (i = 0; i < hw->aq.num_asq_entries; i++) { 187*8be454c9SAlice Michael bi = &hw->aq.asq.r.asq_bi[i]; 188*8be454c9SAlice Michael ret_code = iavf_allocate_dma_mem(hw, bi, 189*8be454c9SAlice Michael i40e_mem_asq_buf, 190*8be454c9SAlice Michael hw->aq.asq_buf_size, 191*8be454c9SAlice Michael IAVF_ADMINQ_DESC_ALIGNMENT); 192*8be454c9SAlice Michael if (ret_code) 193*8be454c9SAlice Michael goto unwind_alloc_asq_bufs; 194*8be454c9SAlice Michael } 195*8be454c9SAlice Michael alloc_asq_bufs: 196*8be454c9SAlice Michael return ret_code; 197*8be454c9SAlice Michael 198*8be454c9SAlice Michael unwind_alloc_asq_bufs: 199*8be454c9SAlice Michael /* don't try to free the one that failed... */ 200*8be454c9SAlice Michael i--; 201*8be454c9SAlice Michael for (; i >= 0; i--) 202*8be454c9SAlice Michael iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]); 203*8be454c9SAlice Michael iavf_free_virt_mem(hw, &hw->aq.asq.dma_head); 204*8be454c9SAlice Michael 205*8be454c9SAlice Michael return ret_code; 206*8be454c9SAlice Michael } 207*8be454c9SAlice Michael 208*8be454c9SAlice Michael /** 209*8be454c9SAlice Michael * i40e_free_arq_bufs - Free receive queue buffer info elements 210*8be454c9SAlice Michael * @hw: pointer to the hardware structure 211*8be454c9SAlice Michael **/ 212*8be454c9SAlice Michael static void i40e_free_arq_bufs(struct iavf_hw *hw) 213*8be454c9SAlice Michael { 214*8be454c9SAlice Michael int i; 215*8be454c9SAlice Michael 216*8be454c9SAlice Michael /* free descriptors */ 217*8be454c9SAlice Michael for (i = 0; i < hw->aq.num_arq_entries; i++) 218*8be454c9SAlice Michael iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]); 219*8be454c9SAlice Michael 220*8be454c9SAlice Michael /* free the descriptor memory */ 221*8be454c9SAlice Michael iavf_free_dma_mem(hw, &hw->aq.arq.desc_buf); 222*8be454c9SAlice Michael 223*8be454c9SAlice Michael /* free the dma header */ 224*8be454c9SAlice Michael iavf_free_virt_mem(hw, &hw->aq.arq.dma_head); 225*8be454c9SAlice Michael } 226*8be454c9SAlice Michael 227*8be454c9SAlice Michael /** 228*8be454c9SAlice Michael * i40e_free_asq_bufs - Free send queue buffer info elements 229*8be454c9SAlice Michael * @hw: pointer to the hardware structure 230*8be454c9SAlice Michael **/ 231*8be454c9SAlice Michael static void i40e_free_asq_bufs(struct iavf_hw *hw) 232*8be454c9SAlice Michael { 233*8be454c9SAlice Michael int i; 234*8be454c9SAlice Michael 235*8be454c9SAlice Michael /* only unmap if the address is non-NULL */ 236*8be454c9SAlice Michael for (i = 0; i < hw->aq.num_asq_entries; i++) 237*8be454c9SAlice Michael if (hw->aq.asq.r.asq_bi[i].pa) 238*8be454c9SAlice Michael iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]); 239*8be454c9SAlice Michael 240*8be454c9SAlice Michael /* free the buffer info list */ 241*8be454c9SAlice Michael iavf_free_virt_mem(hw, &hw->aq.asq.cmd_buf); 242*8be454c9SAlice Michael 243*8be454c9SAlice Michael /* free the descriptor memory */ 244*8be454c9SAlice Michael iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf); 245*8be454c9SAlice Michael 246*8be454c9SAlice Michael /* free the dma header */ 247*8be454c9SAlice Michael iavf_free_virt_mem(hw, &hw->aq.asq.dma_head); 248*8be454c9SAlice Michael } 249*8be454c9SAlice Michael 250*8be454c9SAlice Michael /** 251*8be454c9SAlice Michael * i40e_config_asq_regs - configure ASQ registers 252*8be454c9SAlice Michael * @hw: pointer to the hardware structure 253*8be454c9SAlice Michael * 254*8be454c9SAlice Michael * Configure base address and length registers for the transmit queue 255*8be454c9SAlice Michael **/ 256*8be454c9SAlice Michael static iavf_status i40e_config_asq_regs(struct iavf_hw *hw) 257*8be454c9SAlice Michael { 258*8be454c9SAlice Michael iavf_status ret_code = 0; 259*8be454c9SAlice Michael u32 reg = 0; 260*8be454c9SAlice Michael 261*8be454c9SAlice Michael /* Clear Head and Tail */ 262*8be454c9SAlice Michael wr32(hw, hw->aq.asq.head, 0); 263*8be454c9SAlice Michael wr32(hw, hw->aq.asq.tail, 0); 264*8be454c9SAlice Michael 265*8be454c9SAlice Michael /* set starting point */ 266*8be454c9SAlice Michael wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries | 267*8be454c9SAlice Michael IAVF_VF_ATQLEN1_ATQENABLE_MASK)); 268*8be454c9SAlice Michael wr32(hw, hw->aq.asq.bal, lower_32_bits(hw->aq.asq.desc_buf.pa)); 269*8be454c9SAlice Michael wr32(hw, hw->aq.asq.bah, upper_32_bits(hw->aq.asq.desc_buf.pa)); 270*8be454c9SAlice Michael 271*8be454c9SAlice Michael /* Check one register to verify that config was applied */ 272*8be454c9SAlice Michael reg = rd32(hw, hw->aq.asq.bal); 273*8be454c9SAlice Michael if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa)) 274*8be454c9SAlice Michael ret_code = I40E_ERR_ADMIN_QUEUE_ERROR; 275*8be454c9SAlice Michael 276*8be454c9SAlice Michael return ret_code; 277*8be454c9SAlice Michael } 278*8be454c9SAlice Michael 279*8be454c9SAlice Michael /** 280*8be454c9SAlice Michael * i40e_config_arq_regs - ARQ register configuration 281*8be454c9SAlice Michael * @hw: pointer to the hardware structure 282*8be454c9SAlice Michael * 283*8be454c9SAlice Michael * Configure base address and length registers for the receive (event queue) 284*8be454c9SAlice Michael **/ 285*8be454c9SAlice Michael static iavf_status i40e_config_arq_regs(struct iavf_hw *hw) 286*8be454c9SAlice Michael { 287*8be454c9SAlice Michael iavf_status ret_code = 0; 288*8be454c9SAlice Michael u32 reg = 0; 289*8be454c9SAlice Michael 290*8be454c9SAlice Michael /* Clear Head and Tail */ 291*8be454c9SAlice Michael wr32(hw, hw->aq.arq.head, 0); 292*8be454c9SAlice Michael wr32(hw, hw->aq.arq.tail, 0); 293*8be454c9SAlice Michael 294*8be454c9SAlice Michael /* set starting point */ 295*8be454c9SAlice Michael wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries | 296*8be454c9SAlice Michael IAVF_VF_ARQLEN1_ARQENABLE_MASK)); 297*8be454c9SAlice Michael wr32(hw, hw->aq.arq.bal, lower_32_bits(hw->aq.arq.desc_buf.pa)); 298*8be454c9SAlice Michael wr32(hw, hw->aq.arq.bah, upper_32_bits(hw->aq.arq.desc_buf.pa)); 299*8be454c9SAlice Michael 300*8be454c9SAlice Michael /* Update tail in the HW to post pre-allocated buffers */ 301*8be454c9SAlice Michael wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1); 302*8be454c9SAlice Michael 303*8be454c9SAlice Michael /* Check one register to verify that config was applied */ 304*8be454c9SAlice Michael reg = rd32(hw, hw->aq.arq.bal); 305*8be454c9SAlice Michael if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa)) 306*8be454c9SAlice Michael ret_code = I40E_ERR_ADMIN_QUEUE_ERROR; 307*8be454c9SAlice Michael 308*8be454c9SAlice Michael return ret_code; 309*8be454c9SAlice Michael } 310*8be454c9SAlice Michael 311*8be454c9SAlice Michael /** 312*8be454c9SAlice Michael * i40e_init_asq - main initialization routine for ASQ 313*8be454c9SAlice Michael * @hw: pointer to the hardware structure 314*8be454c9SAlice Michael * 315*8be454c9SAlice Michael * This is the main initialization routine for the Admin Send Queue 316*8be454c9SAlice Michael * Prior to calling this function, drivers *MUST* set the following fields 317*8be454c9SAlice Michael * in the hw->aq structure: 318*8be454c9SAlice Michael * - hw->aq.num_asq_entries 319*8be454c9SAlice Michael * - hw->aq.arq_buf_size 320*8be454c9SAlice Michael * 321*8be454c9SAlice Michael * Do *NOT* hold the lock when calling this as the memory allocation routines 322*8be454c9SAlice Michael * called are not going to be atomic context safe 323*8be454c9SAlice Michael **/ 324*8be454c9SAlice Michael static iavf_status i40e_init_asq(struct iavf_hw *hw) 325*8be454c9SAlice Michael { 326*8be454c9SAlice Michael iavf_status ret_code = 0; 327*8be454c9SAlice Michael 328*8be454c9SAlice Michael if (hw->aq.asq.count > 0) { 329*8be454c9SAlice Michael /* queue already initialized */ 330*8be454c9SAlice Michael ret_code = I40E_ERR_NOT_READY; 331*8be454c9SAlice Michael goto init_adminq_exit; 332*8be454c9SAlice Michael } 333*8be454c9SAlice Michael 334*8be454c9SAlice Michael /* verify input for valid configuration */ 335*8be454c9SAlice Michael if ((hw->aq.num_asq_entries == 0) || 336*8be454c9SAlice Michael (hw->aq.asq_buf_size == 0)) { 337*8be454c9SAlice Michael ret_code = I40E_ERR_CONFIG; 338*8be454c9SAlice Michael goto init_adminq_exit; 339*8be454c9SAlice Michael } 340*8be454c9SAlice Michael 341*8be454c9SAlice Michael hw->aq.asq.next_to_use = 0; 342*8be454c9SAlice Michael hw->aq.asq.next_to_clean = 0; 343*8be454c9SAlice Michael 344*8be454c9SAlice Michael /* allocate the ring memory */ 345*8be454c9SAlice Michael ret_code = i40e_alloc_adminq_asq_ring(hw); 346*8be454c9SAlice Michael if (ret_code) 347*8be454c9SAlice Michael goto init_adminq_exit; 348*8be454c9SAlice Michael 349*8be454c9SAlice Michael /* allocate buffers in the rings */ 350*8be454c9SAlice Michael ret_code = i40e_alloc_asq_bufs(hw); 351*8be454c9SAlice Michael if (ret_code) 352*8be454c9SAlice Michael goto init_adminq_free_rings; 353*8be454c9SAlice Michael 354*8be454c9SAlice Michael /* initialize base registers */ 355*8be454c9SAlice Michael ret_code = i40e_config_asq_regs(hw); 356*8be454c9SAlice Michael if (ret_code) 357*8be454c9SAlice Michael goto init_adminq_free_rings; 358*8be454c9SAlice Michael 359*8be454c9SAlice Michael /* success! */ 360*8be454c9SAlice Michael hw->aq.asq.count = hw->aq.num_asq_entries; 361*8be454c9SAlice Michael goto init_adminq_exit; 362*8be454c9SAlice Michael 363*8be454c9SAlice Michael init_adminq_free_rings: 364*8be454c9SAlice Michael i40e_free_adminq_asq(hw); 365*8be454c9SAlice Michael 366*8be454c9SAlice Michael init_adminq_exit: 367*8be454c9SAlice Michael return ret_code; 368*8be454c9SAlice Michael } 369*8be454c9SAlice Michael 370*8be454c9SAlice Michael /** 371*8be454c9SAlice Michael * i40e_init_arq - initialize ARQ 372*8be454c9SAlice Michael * @hw: pointer to the hardware structure 373*8be454c9SAlice Michael * 374*8be454c9SAlice Michael * The main initialization routine for the Admin Receive (Event) Queue. 375*8be454c9SAlice Michael * Prior to calling this function, drivers *MUST* set the following fields 376*8be454c9SAlice Michael * in the hw->aq structure: 377*8be454c9SAlice Michael * - hw->aq.num_asq_entries 378*8be454c9SAlice Michael * - hw->aq.arq_buf_size 379*8be454c9SAlice Michael * 380*8be454c9SAlice Michael * Do *NOT* hold the lock when calling this as the memory allocation routines 381*8be454c9SAlice Michael * called are not going to be atomic context safe 382*8be454c9SAlice Michael **/ 383*8be454c9SAlice Michael static iavf_status i40e_init_arq(struct iavf_hw *hw) 384*8be454c9SAlice Michael { 385*8be454c9SAlice Michael iavf_status ret_code = 0; 386*8be454c9SAlice Michael 387*8be454c9SAlice Michael if (hw->aq.arq.count > 0) { 388*8be454c9SAlice Michael /* queue already initialized */ 389*8be454c9SAlice Michael ret_code = I40E_ERR_NOT_READY; 390*8be454c9SAlice Michael goto init_adminq_exit; 391*8be454c9SAlice Michael } 392*8be454c9SAlice Michael 393*8be454c9SAlice Michael /* verify input for valid configuration */ 394*8be454c9SAlice Michael if ((hw->aq.num_arq_entries == 0) || 395*8be454c9SAlice Michael (hw->aq.arq_buf_size == 0)) { 396*8be454c9SAlice Michael ret_code = I40E_ERR_CONFIG; 397*8be454c9SAlice Michael goto init_adminq_exit; 398*8be454c9SAlice Michael } 399*8be454c9SAlice Michael 400*8be454c9SAlice Michael hw->aq.arq.next_to_use = 0; 401*8be454c9SAlice Michael hw->aq.arq.next_to_clean = 0; 402*8be454c9SAlice Michael 403*8be454c9SAlice Michael /* allocate the ring memory */ 404*8be454c9SAlice Michael ret_code = i40e_alloc_adminq_arq_ring(hw); 405*8be454c9SAlice Michael if (ret_code) 406*8be454c9SAlice Michael goto init_adminq_exit; 407*8be454c9SAlice Michael 408*8be454c9SAlice Michael /* allocate buffers in the rings */ 409*8be454c9SAlice Michael ret_code = i40e_alloc_arq_bufs(hw); 410*8be454c9SAlice Michael if (ret_code) 411*8be454c9SAlice Michael goto init_adminq_free_rings; 412*8be454c9SAlice Michael 413*8be454c9SAlice Michael /* initialize base registers */ 414*8be454c9SAlice Michael ret_code = i40e_config_arq_regs(hw); 415*8be454c9SAlice Michael if (ret_code) 416*8be454c9SAlice Michael goto init_adminq_free_rings; 417*8be454c9SAlice Michael 418*8be454c9SAlice Michael /* success! */ 419*8be454c9SAlice Michael hw->aq.arq.count = hw->aq.num_arq_entries; 420*8be454c9SAlice Michael goto init_adminq_exit; 421*8be454c9SAlice Michael 422*8be454c9SAlice Michael init_adminq_free_rings: 423*8be454c9SAlice Michael i40e_free_adminq_arq(hw); 424*8be454c9SAlice Michael 425*8be454c9SAlice Michael init_adminq_exit: 426*8be454c9SAlice Michael return ret_code; 427*8be454c9SAlice Michael } 428*8be454c9SAlice Michael 429*8be454c9SAlice Michael /** 430*8be454c9SAlice Michael * i40e_shutdown_asq - shutdown the ASQ 431*8be454c9SAlice Michael * @hw: pointer to the hardware structure 432*8be454c9SAlice Michael * 433*8be454c9SAlice Michael * The main shutdown routine for the Admin Send Queue 434*8be454c9SAlice Michael **/ 435*8be454c9SAlice Michael static iavf_status i40e_shutdown_asq(struct iavf_hw *hw) 436*8be454c9SAlice Michael { 437*8be454c9SAlice Michael iavf_status ret_code = 0; 438*8be454c9SAlice Michael 439*8be454c9SAlice Michael mutex_lock(&hw->aq.asq_mutex); 440*8be454c9SAlice Michael 441*8be454c9SAlice Michael if (hw->aq.asq.count == 0) { 442*8be454c9SAlice Michael ret_code = I40E_ERR_NOT_READY; 443*8be454c9SAlice Michael goto shutdown_asq_out; 444*8be454c9SAlice Michael } 445*8be454c9SAlice Michael 446*8be454c9SAlice Michael /* Stop firmware AdminQ processing */ 447*8be454c9SAlice Michael wr32(hw, hw->aq.asq.head, 0); 448*8be454c9SAlice Michael wr32(hw, hw->aq.asq.tail, 0); 449*8be454c9SAlice Michael wr32(hw, hw->aq.asq.len, 0); 450*8be454c9SAlice Michael wr32(hw, hw->aq.asq.bal, 0); 451*8be454c9SAlice Michael wr32(hw, hw->aq.asq.bah, 0); 452*8be454c9SAlice Michael 453*8be454c9SAlice Michael hw->aq.asq.count = 0; /* to indicate uninitialized queue */ 454*8be454c9SAlice Michael 455*8be454c9SAlice Michael /* free ring buffers */ 456*8be454c9SAlice Michael i40e_free_asq_bufs(hw); 457*8be454c9SAlice Michael 458*8be454c9SAlice Michael shutdown_asq_out: 459*8be454c9SAlice Michael mutex_unlock(&hw->aq.asq_mutex); 460*8be454c9SAlice Michael return ret_code; 461*8be454c9SAlice Michael } 462*8be454c9SAlice Michael 463*8be454c9SAlice Michael /** 464*8be454c9SAlice Michael * i40e_shutdown_arq - shutdown ARQ 465*8be454c9SAlice Michael * @hw: pointer to the hardware structure 466*8be454c9SAlice Michael * 467*8be454c9SAlice Michael * The main shutdown routine for the Admin Receive Queue 468*8be454c9SAlice Michael **/ 469*8be454c9SAlice Michael static iavf_status i40e_shutdown_arq(struct iavf_hw *hw) 470*8be454c9SAlice Michael { 471*8be454c9SAlice Michael iavf_status ret_code = 0; 472*8be454c9SAlice Michael 473*8be454c9SAlice Michael mutex_lock(&hw->aq.arq_mutex); 474*8be454c9SAlice Michael 475*8be454c9SAlice Michael if (hw->aq.arq.count == 0) { 476*8be454c9SAlice Michael ret_code = I40E_ERR_NOT_READY; 477*8be454c9SAlice Michael goto shutdown_arq_out; 478*8be454c9SAlice Michael } 479*8be454c9SAlice Michael 480*8be454c9SAlice Michael /* Stop firmware AdminQ processing */ 481*8be454c9SAlice Michael wr32(hw, hw->aq.arq.head, 0); 482*8be454c9SAlice Michael wr32(hw, hw->aq.arq.tail, 0); 483*8be454c9SAlice Michael wr32(hw, hw->aq.arq.len, 0); 484*8be454c9SAlice Michael wr32(hw, hw->aq.arq.bal, 0); 485*8be454c9SAlice Michael wr32(hw, hw->aq.arq.bah, 0); 486*8be454c9SAlice Michael 487*8be454c9SAlice Michael hw->aq.arq.count = 0; /* to indicate uninitialized queue */ 488*8be454c9SAlice Michael 489*8be454c9SAlice Michael /* free ring buffers */ 490*8be454c9SAlice Michael i40e_free_arq_bufs(hw); 491*8be454c9SAlice Michael 492*8be454c9SAlice Michael shutdown_arq_out: 493*8be454c9SAlice Michael mutex_unlock(&hw->aq.arq_mutex); 494*8be454c9SAlice Michael return ret_code; 495*8be454c9SAlice Michael } 496*8be454c9SAlice Michael 497*8be454c9SAlice Michael /** 498*8be454c9SAlice Michael * iavf_init_adminq - main initialization routine for Admin Queue 499*8be454c9SAlice Michael * @hw: pointer to the hardware structure 500*8be454c9SAlice Michael * 501*8be454c9SAlice Michael * Prior to calling this function, drivers *MUST* set the following fields 502*8be454c9SAlice Michael * in the hw->aq structure: 503*8be454c9SAlice Michael * - hw->aq.num_asq_entries 504*8be454c9SAlice Michael * - hw->aq.num_arq_entries 505*8be454c9SAlice Michael * - hw->aq.arq_buf_size 506*8be454c9SAlice Michael * - hw->aq.asq_buf_size 507*8be454c9SAlice Michael **/ 508*8be454c9SAlice Michael iavf_status iavf_init_adminq(struct iavf_hw *hw) 509*8be454c9SAlice Michael { 510*8be454c9SAlice Michael iavf_status ret_code; 511*8be454c9SAlice Michael 512*8be454c9SAlice Michael /* verify input for valid configuration */ 513*8be454c9SAlice Michael if ((hw->aq.num_arq_entries == 0) || 514*8be454c9SAlice Michael (hw->aq.num_asq_entries == 0) || 515*8be454c9SAlice Michael (hw->aq.arq_buf_size == 0) || 516*8be454c9SAlice Michael (hw->aq.asq_buf_size == 0)) { 517*8be454c9SAlice Michael ret_code = I40E_ERR_CONFIG; 518*8be454c9SAlice Michael goto init_adminq_exit; 519*8be454c9SAlice Michael } 520*8be454c9SAlice Michael 521*8be454c9SAlice Michael /* Set up register offsets */ 522*8be454c9SAlice Michael i40e_adminq_init_regs(hw); 523*8be454c9SAlice Michael 524*8be454c9SAlice Michael /* setup ASQ command write back timeout */ 525*8be454c9SAlice Michael hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT; 526*8be454c9SAlice Michael 527*8be454c9SAlice Michael /* allocate the ASQ */ 528*8be454c9SAlice Michael ret_code = i40e_init_asq(hw); 529*8be454c9SAlice Michael if (ret_code) 530*8be454c9SAlice Michael goto init_adminq_destroy_locks; 531*8be454c9SAlice Michael 532*8be454c9SAlice Michael /* allocate the ARQ */ 533*8be454c9SAlice Michael ret_code = i40e_init_arq(hw); 534*8be454c9SAlice Michael if (ret_code) 535*8be454c9SAlice Michael goto init_adminq_free_asq; 536*8be454c9SAlice Michael 537*8be454c9SAlice Michael /* success! */ 538*8be454c9SAlice Michael goto init_adminq_exit; 539*8be454c9SAlice Michael 540*8be454c9SAlice Michael init_adminq_free_asq: 541*8be454c9SAlice Michael i40e_shutdown_asq(hw); 542*8be454c9SAlice Michael init_adminq_destroy_locks: 543*8be454c9SAlice Michael 544*8be454c9SAlice Michael init_adminq_exit: 545*8be454c9SAlice Michael return ret_code; 546*8be454c9SAlice Michael } 547*8be454c9SAlice Michael 548*8be454c9SAlice Michael /** 549*8be454c9SAlice Michael * iavf_shutdown_adminq - shutdown routine for the Admin Queue 550*8be454c9SAlice Michael * @hw: pointer to the hardware structure 551*8be454c9SAlice Michael **/ 552*8be454c9SAlice Michael iavf_status iavf_shutdown_adminq(struct iavf_hw *hw) 553*8be454c9SAlice Michael { 554*8be454c9SAlice Michael iavf_status ret_code = 0; 555*8be454c9SAlice Michael 556*8be454c9SAlice Michael if (iavf_check_asq_alive(hw)) 557*8be454c9SAlice Michael iavf_aq_queue_shutdown(hw, true); 558*8be454c9SAlice Michael 559*8be454c9SAlice Michael i40e_shutdown_asq(hw); 560*8be454c9SAlice Michael i40e_shutdown_arq(hw); 561*8be454c9SAlice Michael 562*8be454c9SAlice Michael return ret_code; 563*8be454c9SAlice Michael } 564*8be454c9SAlice Michael 565*8be454c9SAlice Michael /** 566*8be454c9SAlice Michael * i40e_clean_asq - cleans Admin send queue 567*8be454c9SAlice Michael * @hw: pointer to the hardware structure 568*8be454c9SAlice Michael * 569*8be454c9SAlice Michael * returns the number of free desc 570*8be454c9SAlice Michael **/ 571*8be454c9SAlice Michael static u16 i40e_clean_asq(struct iavf_hw *hw) 572*8be454c9SAlice Michael { 573*8be454c9SAlice Michael struct iavf_adminq_ring *asq = &hw->aq.asq; 574*8be454c9SAlice Michael struct i40e_asq_cmd_details *details; 575*8be454c9SAlice Michael u16 ntc = asq->next_to_clean; 576*8be454c9SAlice Michael struct i40e_aq_desc desc_cb; 577*8be454c9SAlice Michael struct i40e_aq_desc *desc; 578*8be454c9SAlice Michael 579*8be454c9SAlice Michael desc = IAVF_ADMINQ_DESC(*asq, ntc); 580*8be454c9SAlice Michael details = I40E_ADMINQ_DETAILS(*asq, ntc); 581*8be454c9SAlice Michael while (rd32(hw, hw->aq.asq.head) != ntc) { 582*8be454c9SAlice Michael iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, 583*8be454c9SAlice Michael "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head)); 584*8be454c9SAlice Michael 585*8be454c9SAlice Michael if (details->callback) { 586*8be454c9SAlice Michael I40E_ADMINQ_CALLBACK cb_func = 587*8be454c9SAlice Michael (I40E_ADMINQ_CALLBACK)details->callback; 588*8be454c9SAlice Michael desc_cb = *desc; 589*8be454c9SAlice Michael cb_func(hw, &desc_cb); 590*8be454c9SAlice Michael } 591*8be454c9SAlice Michael memset((void *)desc, 0, sizeof(struct i40e_aq_desc)); 592*8be454c9SAlice Michael memset((void *)details, 0, 593*8be454c9SAlice Michael sizeof(struct i40e_asq_cmd_details)); 594*8be454c9SAlice Michael ntc++; 595*8be454c9SAlice Michael if (ntc == asq->count) 596*8be454c9SAlice Michael ntc = 0; 597*8be454c9SAlice Michael desc = IAVF_ADMINQ_DESC(*asq, ntc); 598*8be454c9SAlice Michael details = I40E_ADMINQ_DETAILS(*asq, ntc); 599*8be454c9SAlice Michael } 600*8be454c9SAlice Michael 601*8be454c9SAlice Michael asq->next_to_clean = ntc; 602*8be454c9SAlice Michael 603*8be454c9SAlice Michael return IAVF_DESC_UNUSED(asq); 604*8be454c9SAlice Michael } 605*8be454c9SAlice Michael 606*8be454c9SAlice Michael /** 607*8be454c9SAlice Michael * iavf_asq_done - check if FW has processed the Admin Send Queue 608*8be454c9SAlice Michael * @hw: pointer to the hw struct 609*8be454c9SAlice Michael * 610*8be454c9SAlice Michael * Returns true if the firmware has processed all descriptors on the 611*8be454c9SAlice Michael * admin send queue. Returns false if there are still requests pending. 612*8be454c9SAlice Michael **/ 613*8be454c9SAlice Michael bool iavf_asq_done(struct iavf_hw *hw) 614*8be454c9SAlice Michael { 615*8be454c9SAlice Michael /* AQ designers suggest use of head for better 616*8be454c9SAlice Michael * timing reliability than DD bit 617*8be454c9SAlice Michael */ 618*8be454c9SAlice Michael return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use; 619*8be454c9SAlice Michael } 620*8be454c9SAlice Michael 621*8be454c9SAlice Michael /** 622*8be454c9SAlice Michael * iavf_asq_send_command - send command to Admin Queue 623*8be454c9SAlice Michael * @hw: pointer to the hw struct 624*8be454c9SAlice Michael * @desc: prefilled descriptor describing the command (non DMA mem) 625*8be454c9SAlice Michael * @buff: buffer to use for indirect commands 626*8be454c9SAlice Michael * @buff_size: size of buffer for indirect commands 627*8be454c9SAlice Michael * @cmd_details: pointer to command details structure 628*8be454c9SAlice Michael * 629*8be454c9SAlice Michael * This is the main send command driver routine for the Admin Queue send 630*8be454c9SAlice Michael * queue. It runs the queue, cleans the queue, etc 631*8be454c9SAlice Michael **/ 632*8be454c9SAlice Michael iavf_status iavf_asq_send_command(struct iavf_hw *hw, struct i40e_aq_desc *desc, 633*8be454c9SAlice Michael void *buff, /* can be NULL */ 634*8be454c9SAlice Michael u16 buff_size, 635*8be454c9SAlice Michael struct i40e_asq_cmd_details *cmd_details) 636*8be454c9SAlice Michael { 637*8be454c9SAlice Michael struct iavf_dma_mem *dma_buff = NULL; 638*8be454c9SAlice Michael struct i40e_asq_cmd_details *details; 639*8be454c9SAlice Michael struct i40e_aq_desc *desc_on_ring; 640*8be454c9SAlice Michael bool cmd_completed = false; 641*8be454c9SAlice Michael iavf_status status = 0; 642*8be454c9SAlice Michael u16 retval = 0; 643*8be454c9SAlice Michael u32 val = 0; 644*8be454c9SAlice Michael 645*8be454c9SAlice Michael mutex_lock(&hw->aq.asq_mutex); 646*8be454c9SAlice Michael 647*8be454c9SAlice Michael if (hw->aq.asq.count == 0) { 648*8be454c9SAlice Michael iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, 649*8be454c9SAlice Michael "AQTX: Admin queue not initialized.\n"); 650*8be454c9SAlice Michael status = I40E_ERR_QUEUE_EMPTY; 651*8be454c9SAlice Michael goto asq_send_command_error; 652*8be454c9SAlice Michael } 653*8be454c9SAlice Michael 654*8be454c9SAlice Michael hw->aq.asq_last_status = I40E_AQ_RC_OK; 655*8be454c9SAlice Michael 656*8be454c9SAlice Michael val = rd32(hw, hw->aq.asq.head); 657*8be454c9SAlice Michael if (val >= hw->aq.num_asq_entries) { 658*8be454c9SAlice Michael iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, 659*8be454c9SAlice Michael "AQTX: head overrun at %d\n", val); 660*8be454c9SAlice Michael status = I40E_ERR_QUEUE_EMPTY; 661*8be454c9SAlice Michael goto asq_send_command_error; 662*8be454c9SAlice Michael } 663*8be454c9SAlice Michael 664*8be454c9SAlice Michael details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use); 665*8be454c9SAlice Michael if (cmd_details) { 666*8be454c9SAlice Michael *details = *cmd_details; 667*8be454c9SAlice Michael 668*8be454c9SAlice Michael /* If the cmd_details are defined copy the cookie. The 669*8be454c9SAlice Michael * cpu_to_le32 is not needed here because the data is ignored 670*8be454c9SAlice Michael * by the FW, only used by the driver 671*8be454c9SAlice Michael */ 672*8be454c9SAlice Michael if (details->cookie) { 673*8be454c9SAlice Michael desc->cookie_high = 674*8be454c9SAlice Michael cpu_to_le32(upper_32_bits(details->cookie)); 675*8be454c9SAlice Michael desc->cookie_low = 676*8be454c9SAlice Michael cpu_to_le32(lower_32_bits(details->cookie)); 677*8be454c9SAlice Michael } 678*8be454c9SAlice Michael } else { 679*8be454c9SAlice Michael memset(details, 0, sizeof(struct i40e_asq_cmd_details)); 680*8be454c9SAlice Michael } 681*8be454c9SAlice Michael 682*8be454c9SAlice Michael /* clear requested flags and then set additional flags if defined */ 683*8be454c9SAlice Michael desc->flags &= ~cpu_to_le16(details->flags_dis); 684*8be454c9SAlice Michael desc->flags |= cpu_to_le16(details->flags_ena); 685*8be454c9SAlice Michael 686*8be454c9SAlice Michael if (buff_size > hw->aq.asq_buf_size) { 687*8be454c9SAlice Michael iavf_debug(hw, 688*8be454c9SAlice Michael IAVF_DEBUG_AQ_MESSAGE, 689*8be454c9SAlice Michael "AQTX: Invalid buffer size: %d.\n", 690*8be454c9SAlice Michael buff_size); 691*8be454c9SAlice Michael status = I40E_ERR_INVALID_SIZE; 692*8be454c9SAlice Michael goto asq_send_command_error; 693*8be454c9SAlice Michael } 694*8be454c9SAlice Michael 695*8be454c9SAlice Michael if (details->postpone && !details->async) { 696*8be454c9SAlice Michael iavf_debug(hw, 697*8be454c9SAlice Michael IAVF_DEBUG_AQ_MESSAGE, 698*8be454c9SAlice Michael "AQTX: Async flag not set along with postpone flag"); 699*8be454c9SAlice Michael status = I40E_ERR_PARAM; 700*8be454c9SAlice Michael goto asq_send_command_error; 701*8be454c9SAlice Michael } 702*8be454c9SAlice Michael 703*8be454c9SAlice Michael /* call clean and check queue available function to reclaim the 704*8be454c9SAlice Michael * descriptors that were processed by FW, the function returns the 705*8be454c9SAlice Michael * number of desc available 706*8be454c9SAlice Michael */ 707*8be454c9SAlice Michael /* the clean function called here could be called in a separate thread 708*8be454c9SAlice Michael * in case of asynchronous completions 709*8be454c9SAlice Michael */ 710*8be454c9SAlice Michael if (i40e_clean_asq(hw) == 0) { 711*8be454c9SAlice Michael iavf_debug(hw, 712*8be454c9SAlice Michael IAVF_DEBUG_AQ_MESSAGE, 713*8be454c9SAlice Michael "AQTX: Error queue is full.\n"); 714*8be454c9SAlice Michael status = I40E_ERR_ADMIN_QUEUE_FULL; 715*8be454c9SAlice Michael goto asq_send_command_error; 716*8be454c9SAlice Michael } 717*8be454c9SAlice Michael 718*8be454c9SAlice Michael /* initialize the temp desc pointer with the right desc */ 719*8be454c9SAlice Michael desc_on_ring = IAVF_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use); 720*8be454c9SAlice Michael 721*8be454c9SAlice Michael /* if the desc is available copy the temp desc to the right place */ 722*8be454c9SAlice Michael *desc_on_ring = *desc; 723*8be454c9SAlice Michael 724*8be454c9SAlice Michael /* if buff is not NULL assume indirect command */ 725*8be454c9SAlice Michael if (buff) { 726*8be454c9SAlice Michael dma_buff = &hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]; 727*8be454c9SAlice Michael /* copy the user buff into the respective DMA buff */ 728*8be454c9SAlice Michael memcpy(dma_buff->va, buff, buff_size); 729*8be454c9SAlice Michael desc_on_ring->datalen = cpu_to_le16(buff_size); 730*8be454c9SAlice Michael 731*8be454c9SAlice Michael /* Update the address values in the desc with the pa value 732*8be454c9SAlice Michael * for respective buffer 733*8be454c9SAlice Michael */ 734*8be454c9SAlice Michael desc_on_ring->params.external.addr_high = 735*8be454c9SAlice Michael cpu_to_le32(upper_32_bits(dma_buff->pa)); 736*8be454c9SAlice Michael desc_on_ring->params.external.addr_low = 737*8be454c9SAlice Michael cpu_to_le32(lower_32_bits(dma_buff->pa)); 738*8be454c9SAlice Michael } 739*8be454c9SAlice Michael 740*8be454c9SAlice Michael /* bump the tail */ 741*8be454c9SAlice Michael iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n"); 742*8be454c9SAlice Michael iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc_on_ring, 743*8be454c9SAlice Michael buff, buff_size); 744*8be454c9SAlice Michael (hw->aq.asq.next_to_use)++; 745*8be454c9SAlice Michael if (hw->aq.asq.next_to_use == hw->aq.asq.count) 746*8be454c9SAlice Michael hw->aq.asq.next_to_use = 0; 747*8be454c9SAlice Michael if (!details->postpone) 748*8be454c9SAlice Michael wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use); 749*8be454c9SAlice Michael 750*8be454c9SAlice Michael /* if cmd_details are not defined or async flag is not set, 751*8be454c9SAlice Michael * we need to wait for desc write back 752*8be454c9SAlice Michael */ 753*8be454c9SAlice Michael if (!details->async && !details->postpone) { 754*8be454c9SAlice Michael u32 total_delay = 0; 755*8be454c9SAlice Michael 756*8be454c9SAlice Michael do { 757*8be454c9SAlice Michael /* AQ designers suggest use of head for better 758*8be454c9SAlice Michael * timing reliability than DD bit 759*8be454c9SAlice Michael */ 760*8be454c9SAlice Michael if (iavf_asq_done(hw)) 761*8be454c9SAlice Michael break; 762*8be454c9SAlice Michael udelay(50); 763*8be454c9SAlice Michael total_delay += 50; 764*8be454c9SAlice Michael } while (total_delay < hw->aq.asq_cmd_timeout); 765*8be454c9SAlice Michael } 766*8be454c9SAlice Michael 767*8be454c9SAlice Michael /* if ready, copy the desc back to temp */ 768*8be454c9SAlice Michael if (iavf_asq_done(hw)) { 769*8be454c9SAlice Michael *desc = *desc_on_ring; 770*8be454c9SAlice Michael if (buff) 771*8be454c9SAlice Michael memcpy(buff, dma_buff->va, buff_size); 772*8be454c9SAlice Michael retval = le16_to_cpu(desc->retval); 773*8be454c9SAlice Michael if (retval != 0) { 774*8be454c9SAlice Michael iavf_debug(hw, 775*8be454c9SAlice Michael IAVF_DEBUG_AQ_MESSAGE, 776*8be454c9SAlice Michael "AQTX: Command completed with error 0x%X.\n", 777*8be454c9SAlice Michael retval); 778*8be454c9SAlice Michael 779*8be454c9SAlice Michael /* strip off FW internal code */ 780*8be454c9SAlice Michael retval &= 0xff; 781*8be454c9SAlice Michael } 782*8be454c9SAlice Michael cmd_completed = true; 783*8be454c9SAlice Michael if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK) 784*8be454c9SAlice Michael status = 0; 785*8be454c9SAlice Michael else if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_EBUSY) 786*8be454c9SAlice Michael status = I40E_ERR_NOT_READY; 787*8be454c9SAlice Michael else 788*8be454c9SAlice Michael status = I40E_ERR_ADMIN_QUEUE_ERROR; 789*8be454c9SAlice Michael hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval; 790*8be454c9SAlice Michael } 791*8be454c9SAlice Michael 792*8be454c9SAlice Michael iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, 793*8be454c9SAlice Michael "AQTX: desc and buffer writeback:\n"); 794*8be454c9SAlice Michael iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size); 795*8be454c9SAlice Michael 796*8be454c9SAlice Michael /* save writeback aq if requested */ 797*8be454c9SAlice Michael if (details->wb_desc) 798*8be454c9SAlice Michael *details->wb_desc = *desc_on_ring; 799*8be454c9SAlice Michael 800*8be454c9SAlice Michael /* update the error if time out occurred */ 801*8be454c9SAlice Michael if ((!cmd_completed) && 802*8be454c9SAlice Michael (!details->async && !details->postpone)) { 803*8be454c9SAlice Michael if (rd32(hw, hw->aq.asq.len) & IAVF_VF_ATQLEN1_ATQCRIT_MASK) { 804*8be454c9SAlice Michael iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, 805*8be454c9SAlice Michael "AQTX: AQ Critical error.\n"); 806*8be454c9SAlice Michael status = I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR; 807*8be454c9SAlice Michael } else { 808*8be454c9SAlice Michael iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, 809*8be454c9SAlice Michael "AQTX: Writeback timeout.\n"); 810*8be454c9SAlice Michael status = I40E_ERR_ADMIN_QUEUE_TIMEOUT; 811*8be454c9SAlice Michael } 812*8be454c9SAlice Michael } 813*8be454c9SAlice Michael 814*8be454c9SAlice Michael asq_send_command_error: 815*8be454c9SAlice Michael mutex_unlock(&hw->aq.asq_mutex); 816*8be454c9SAlice Michael return status; 817*8be454c9SAlice Michael } 818*8be454c9SAlice Michael 819*8be454c9SAlice Michael /** 820*8be454c9SAlice Michael * iavf_fill_default_direct_cmd_desc - AQ descriptor helper function 821*8be454c9SAlice Michael * @desc: pointer to the temp descriptor (non DMA mem) 822*8be454c9SAlice Michael * @opcode: the opcode can be used to decide which flags to turn off or on 823*8be454c9SAlice Michael * 824*8be454c9SAlice Michael * Fill the desc with default values 825*8be454c9SAlice Michael **/ 826*8be454c9SAlice Michael void iavf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, u16 opcode) 827*8be454c9SAlice Michael { 828*8be454c9SAlice Michael /* zero out the desc */ 829*8be454c9SAlice Michael memset((void *)desc, 0, sizeof(struct i40e_aq_desc)); 830*8be454c9SAlice Michael desc->opcode = cpu_to_le16(opcode); 831*8be454c9SAlice Michael desc->flags = cpu_to_le16(I40E_AQ_FLAG_SI); 832*8be454c9SAlice Michael } 833*8be454c9SAlice Michael 834*8be454c9SAlice Michael /** 835*8be454c9SAlice Michael * iavf_clean_arq_element 836*8be454c9SAlice Michael * @hw: pointer to the hw struct 837*8be454c9SAlice Michael * @e: event info from the receive descriptor, includes any buffers 838*8be454c9SAlice Michael * @pending: number of events that could be left to process 839*8be454c9SAlice Michael * 840*8be454c9SAlice Michael * This function cleans one Admin Receive Queue element and returns 841*8be454c9SAlice Michael * the contents through e. It can also return how many events are 842*8be454c9SAlice Michael * left to process through 'pending' 843*8be454c9SAlice Michael **/ 844*8be454c9SAlice Michael iavf_status iavf_clean_arq_element(struct iavf_hw *hw, 845*8be454c9SAlice Michael struct i40e_arq_event_info *e, 846*8be454c9SAlice Michael u16 *pending) 847*8be454c9SAlice Michael { 848*8be454c9SAlice Michael u16 ntc = hw->aq.arq.next_to_clean; 849*8be454c9SAlice Michael struct i40e_aq_desc *desc; 850*8be454c9SAlice Michael iavf_status ret_code = 0; 851*8be454c9SAlice Michael struct iavf_dma_mem *bi; 852*8be454c9SAlice Michael u16 desc_idx; 853*8be454c9SAlice Michael u16 datalen; 854*8be454c9SAlice Michael u16 flags; 855*8be454c9SAlice Michael u16 ntu; 856*8be454c9SAlice Michael 857*8be454c9SAlice Michael /* pre-clean the event info */ 858*8be454c9SAlice Michael memset(&e->desc, 0, sizeof(e->desc)); 859*8be454c9SAlice Michael 860*8be454c9SAlice Michael /* take the lock before we start messing with the ring */ 861*8be454c9SAlice Michael mutex_lock(&hw->aq.arq_mutex); 862*8be454c9SAlice Michael 863*8be454c9SAlice Michael if (hw->aq.arq.count == 0) { 864*8be454c9SAlice Michael iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, 865*8be454c9SAlice Michael "AQRX: Admin queue not initialized.\n"); 866*8be454c9SAlice Michael ret_code = I40E_ERR_QUEUE_EMPTY; 867*8be454c9SAlice Michael goto clean_arq_element_err; 868*8be454c9SAlice Michael } 869*8be454c9SAlice Michael 870*8be454c9SAlice Michael /* set next_to_use to head */ 871*8be454c9SAlice Michael ntu = rd32(hw, hw->aq.arq.head) & IAVF_VF_ARQH1_ARQH_MASK; 872*8be454c9SAlice Michael if (ntu == ntc) { 873*8be454c9SAlice Michael /* nothing to do - shouldn't need to update ring's values */ 874*8be454c9SAlice Michael ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK; 875*8be454c9SAlice Michael goto clean_arq_element_out; 876*8be454c9SAlice Michael } 877*8be454c9SAlice Michael 878*8be454c9SAlice Michael /* now clean the next descriptor */ 879*8be454c9SAlice Michael desc = IAVF_ADMINQ_DESC(hw->aq.arq, ntc); 880*8be454c9SAlice Michael desc_idx = ntc; 881*8be454c9SAlice Michael 882*8be454c9SAlice Michael hw->aq.arq_last_status = 883*8be454c9SAlice Michael (enum i40e_admin_queue_err)le16_to_cpu(desc->retval); 884*8be454c9SAlice Michael flags = le16_to_cpu(desc->flags); 885*8be454c9SAlice Michael if (flags & I40E_AQ_FLAG_ERR) { 886*8be454c9SAlice Michael ret_code = I40E_ERR_ADMIN_QUEUE_ERROR; 887*8be454c9SAlice Michael iavf_debug(hw, 888*8be454c9SAlice Michael IAVF_DEBUG_AQ_MESSAGE, 889*8be454c9SAlice Michael "AQRX: Event received with error 0x%X.\n", 890*8be454c9SAlice Michael hw->aq.arq_last_status); 891*8be454c9SAlice Michael } 892*8be454c9SAlice Michael 893*8be454c9SAlice Michael e->desc = *desc; 894*8be454c9SAlice Michael datalen = le16_to_cpu(desc->datalen); 895*8be454c9SAlice Michael e->msg_len = min(datalen, e->buf_len); 896*8be454c9SAlice Michael if (e->msg_buf && (e->msg_len != 0)) 897*8be454c9SAlice Michael memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va, 898*8be454c9SAlice Michael e->msg_len); 899*8be454c9SAlice Michael 900*8be454c9SAlice Michael iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n"); 901*8be454c9SAlice Michael iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf, 902*8be454c9SAlice Michael hw->aq.arq_buf_size); 903*8be454c9SAlice Michael 904*8be454c9SAlice Michael /* Restore the original datalen and buffer address in the desc, 905*8be454c9SAlice Michael * FW updates datalen to indicate the event message 906*8be454c9SAlice Michael * size 907*8be454c9SAlice Michael */ 908*8be454c9SAlice Michael bi = &hw->aq.arq.r.arq_bi[ntc]; 909*8be454c9SAlice Michael memset((void *)desc, 0, sizeof(struct i40e_aq_desc)); 910*8be454c9SAlice Michael 911*8be454c9SAlice Michael desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF); 912*8be454c9SAlice Michael if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF) 913*8be454c9SAlice Michael desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB); 914*8be454c9SAlice Michael desc->datalen = cpu_to_le16((u16)bi->size); 915*8be454c9SAlice Michael desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa)); 916*8be454c9SAlice Michael desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa)); 917*8be454c9SAlice Michael 918*8be454c9SAlice Michael /* set tail = the last cleaned desc index. */ 919*8be454c9SAlice Michael wr32(hw, hw->aq.arq.tail, ntc); 920*8be454c9SAlice Michael /* ntc is updated to tail + 1 */ 921*8be454c9SAlice Michael ntc++; 922*8be454c9SAlice Michael if (ntc == hw->aq.num_arq_entries) 923*8be454c9SAlice Michael ntc = 0; 924*8be454c9SAlice Michael hw->aq.arq.next_to_clean = ntc; 925*8be454c9SAlice Michael hw->aq.arq.next_to_use = ntu; 926*8be454c9SAlice Michael 927*8be454c9SAlice Michael clean_arq_element_out: 928*8be454c9SAlice Michael /* Set pending if needed, unlock and return */ 929*8be454c9SAlice Michael if (pending) 930*8be454c9SAlice Michael *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc); 931*8be454c9SAlice Michael 932*8be454c9SAlice Michael clean_arq_element_err: 933*8be454c9SAlice Michael mutex_unlock(&hw->aq.arq_mutex); 934*8be454c9SAlice Michael 935*8be454c9SAlice Michael return ret_code; 936*8be454c9SAlice Michael } 937