1 /****************************************************************************** 2 3 Copyright (c) 2013-2018, Intel Corporation 4 All rights reserved. 5 6 Redistribution and use in source and binary forms, with or without 7 modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Redistributions in binary form must reproduce the above copyright 13 notice, this list of conditions and the following disclaimer in the 14 documentation and/or other materials provided with the distribution. 15 16 3. Neither the name of the Intel Corporation nor the names of its 17 contributors may be used to endorse or promote products derived from 18 this software without specific prior written permission. 19 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 POSSIBILITY OF SUCH DAMAGE. 31 32 ******************************************************************************/ 33 34 #include "i40e_osdep.h" 35 #include "i40e_register.h" 36 #include "i40e_status.h" 37 #include "i40e_alloc.h" 38 #include "i40e_hmc.h" 39 #include "i40e_type.h" 40 41 /** 42 * i40e_add_sd_table_entry - Adds a segment descriptor to the table 43 * @hw: pointer to our hw struct 44 * @hmc_info: pointer to the HMC configuration information struct 45 * @sd_index: segment descriptor index to manipulate 46 * @type: what type of segment descriptor we're manipulating 47 * @direct_mode_sz: size to alloc in direct mode 48 **/ 49 enum i40e_status_code i40e_add_sd_table_entry(struct i40e_hw *hw, 50 struct i40e_hmc_info *hmc_info, 51 u32 sd_index, 52 enum i40e_sd_entry_type type, 53 u64 direct_mode_sz) 54 { 55 enum i40e_status_code ret_code = I40E_SUCCESS; 56 struct i40e_hmc_sd_entry *sd_entry; 57 enum i40e_memory_type mem_type; 58 bool dma_mem_alloc_done = FALSE; 59 struct i40e_dma_mem mem; 60 u64 alloc_len; 61 62 if (NULL == hmc_info->sd_table.sd_entry) { 63 ret_code = I40E_ERR_BAD_PTR; 64 DEBUGOUT("i40e_add_sd_table_entry: bad sd_entry\n"); 65 goto exit; 66 } 67 68 if (sd_index >= hmc_info->sd_table.sd_cnt) { 69 ret_code = I40E_ERR_INVALID_SD_INDEX; 70 DEBUGOUT("i40e_add_sd_table_entry: bad sd_index\n"); 71 goto exit; 72 } 73 74 sd_entry = &hmc_info->sd_table.sd_entry[sd_index]; 75 if (!sd_entry->valid) { 76 if (I40E_SD_TYPE_PAGED == type) { 77 mem_type = i40e_mem_pd; 78 alloc_len = I40E_HMC_PAGED_BP_SIZE; 79 } else { 80 mem_type = i40e_mem_bp_jumbo; 81 alloc_len = direct_mode_sz; 82 } 83 84 /* allocate a 4K pd page or 2M backing page */ 85 ret_code = i40e_allocate_dma_mem(hw, &mem, mem_type, alloc_len, 86 I40E_HMC_PD_BP_BUF_ALIGNMENT); 87 if (ret_code) 88 goto exit; 89 dma_mem_alloc_done = TRUE; 90 if (I40E_SD_TYPE_PAGED == type) { 91 ret_code = i40e_allocate_virt_mem(hw, 92 &sd_entry->u.pd_table.pd_entry_virt_mem, 93 sizeof(struct i40e_hmc_pd_entry) * 512); 94 if (ret_code) 95 goto exit; 96 sd_entry->u.pd_table.pd_entry = 97 (struct i40e_hmc_pd_entry *) 98 sd_entry->u.pd_table.pd_entry_virt_mem.va; 99 i40e_memcpy(&sd_entry->u.pd_table.pd_page_addr, 100 &mem, sizeof(struct i40e_dma_mem), 101 I40E_NONDMA_TO_NONDMA); 102 } else { 103 i40e_memcpy(&sd_entry->u.bp.addr, 104 &mem, sizeof(struct i40e_dma_mem), 105 I40E_NONDMA_TO_NONDMA); 106 sd_entry->u.bp.sd_pd_index = sd_index; 107 } 108 /* initialize the sd entry */ 109 hmc_info->sd_table.sd_entry[sd_index].entry_type = type; 110 111 /* increment the ref count */ 112 I40E_INC_SD_REFCNT(&hmc_info->sd_table); 113 } 114 /* Increment backing page reference count */ 115 if (I40E_SD_TYPE_DIRECT == sd_entry->entry_type) 116 I40E_INC_BP_REFCNT(&sd_entry->u.bp); 117 exit: 118 if (I40E_SUCCESS != ret_code) 119 if (dma_mem_alloc_done) 120 i40e_free_dma_mem(hw, &mem); 121 122 return ret_code; 123 } 124 125 /** 126 * i40e_add_pd_table_entry - Adds page descriptor to the specified table 127 * @hw: pointer to our HW structure 128 * @hmc_info: pointer to the HMC configuration information structure 129 * @pd_index: which page descriptor index to manipulate 130 * @rsrc_pg: if not NULL, use preallocated page instead of allocating new one. 131 * 132 * This function: 133 * 1. Initializes the pd entry 134 * 2. Adds pd_entry in the pd_table 135 * 3. Mark the entry valid in i40e_hmc_pd_entry structure 136 * 4. Initializes the pd_entry's ref count to 1 137 * assumptions: 138 * 1. The memory for pd should be pinned down, physically contiguous and 139 * aligned on 4K boundary and zeroed memory. 140 * 2. It should be 4K in size. 141 **/ 142 enum i40e_status_code i40e_add_pd_table_entry(struct i40e_hw *hw, 143 struct i40e_hmc_info *hmc_info, 144 u32 pd_index, 145 struct i40e_dma_mem *rsrc_pg) 146 { 147 enum i40e_status_code ret_code = I40E_SUCCESS; 148 struct i40e_hmc_pd_table *pd_table; 149 struct i40e_hmc_pd_entry *pd_entry; 150 struct i40e_dma_mem mem; 151 struct i40e_dma_mem *page = &mem; 152 u32 sd_idx, rel_pd_idx; 153 u64 *pd_addr; 154 u64 page_desc; 155 156 if (pd_index / I40E_HMC_PD_CNT_IN_SD >= hmc_info->sd_table.sd_cnt) { 157 ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX; 158 DEBUGOUT("i40e_add_pd_table_entry: bad pd_index\n"); 159 goto exit; 160 } 161 162 /* find corresponding sd */ 163 sd_idx = (pd_index / I40E_HMC_PD_CNT_IN_SD); 164 if (I40E_SD_TYPE_PAGED != 165 hmc_info->sd_table.sd_entry[sd_idx].entry_type) 166 goto exit; 167 168 rel_pd_idx = (pd_index % I40E_HMC_PD_CNT_IN_SD); 169 pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table; 170 pd_entry = &pd_table->pd_entry[rel_pd_idx]; 171 if (!pd_entry->valid) { 172 if (rsrc_pg) { 173 pd_entry->rsrc_pg = TRUE; 174 page = rsrc_pg; 175 } else { 176 /* allocate a 4K backing page */ 177 ret_code = i40e_allocate_dma_mem(hw, page, i40e_mem_bp, 178 I40E_HMC_PAGED_BP_SIZE, 179 I40E_HMC_PD_BP_BUF_ALIGNMENT); 180 if (ret_code) 181 goto exit; 182 pd_entry->rsrc_pg = FALSE; 183 } 184 185 i40e_memcpy(&pd_entry->bp.addr, page, 186 sizeof(struct i40e_dma_mem), I40E_NONDMA_TO_NONDMA); 187 pd_entry->bp.sd_pd_index = pd_index; 188 pd_entry->bp.entry_type = I40E_SD_TYPE_PAGED; 189 /* Set page address and valid bit */ 190 page_desc = page->pa | 0x1; 191 192 pd_addr = (u64 *)pd_table->pd_page_addr.va; 193 pd_addr += rel_pd_idx; 194 195 /* Add the backing page physical address in the pd entry */ 196 i40e_memcpy(pd_addr, &page_desc, sizeof(u64), 197 I40E_NONDMA_TO_DMA); 198 199 pd_entry->sd_index = sd_idx; 200 pd_entry->valid = TRUE; 201 I40E_INC_PD_REFCNT(pd_table); 202 } 203 I40E_INC_BP_REFCNT(&pd_entry->bp); 204 exit: 205 return ret_code; 206 } 207 208 /** 209 * i40e_remove_pd_bp - remove a backing page from a page descriptor 210 * @hw: pointer to our HW structure 211 * @hmc_info: pointer to the HMC configuration information structure 212 * @idx: the page index 213 * 214 * This function: 215 * 1. Marks the entry in pd tabe (for paged address mode) or in sd table 216 * (for direct address mode) invalid. 217 * 2. Write to register PMPDINV to invalidate the backing page in FV cache 218 * 3. Decrement the ref count for the pd _entry 219 * assumptions: 220 * 1. Caller can deallocate the memory used by backing storage after this 221 * function returns. 222 **/ 223 enum i40e_status_code i40e_remove_pd_bp(struct i40e_hw *hw, 224 struct i40e_hmc_info *hmc_info, 225 u32 idx) 226 { 227 enum i40e_status_code ret_code = I40E_SUCCESS; 228 struct i40e_hmc_pd_entry *pd_entry; 229 struct i40e_hmc_pd_table *pd_table; 230 struct i40e_hmc_sd_entry *sd_entry; 231 u32 sd_idx, rel_pd_idx; 232 u64 *pd_addr; 233 234 /* calculate index */ 235 sd_idx = idx / I40E_HMC_PD_CNT_IN_SD; 236 rel_pd_idx = idx % I40E_HMC_PD_CNT_IN_SD; 237 if (sd_idx >= hmc_info->sd_table.sd_cnt) { 238 ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX; 239 DEBUGOUT("i40e_remove_pd_bp: bad idx\n"); 240 goto exit; 241 } 242 sd_entry = &hmc_info->sd_table.sd_entry[sd_idx]; 243 if (I40E_SD_TYPE_PAGED != sd_entry->entry_type) { 244 ret_code = I40E_ERR_INVALID_SD_TYPE; 245 DEBUGOUT("i40e_remove_pd_bp: wrong sd_entry type\n"); 246 goto exit; 247 } 248 /* get the entry and decrease its ref counter */ 249 pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table; 250 pd_entry = &pd_table->pd_entry[rel_pd_idx]; 251 I40E_DEC_BP_REFCNT(&pd_entry->bp); 252 if (pd_entry->bp.ref_cnt) 253 goto exit; 254 255 /* mark the entry invalid */ 256 pd_entry->valid = FALSE; 257 I40E_DEC_PD_REFCNT(pd_table); 258 pd_addr = (u64 *)pd_table->pd_page_addr.va; 259 pd_addr += rel_pd_idx; 260 i40e_memset(pd_addr, 0, sizeof(u64), I40E_DMA_MEM); 261 I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx); 262 263 /* free memory here */ 264 if (!pd_entry->rsrc_pg) 265 ret_code = i40e_free_dma_mem(hw, &(pd_entry->bp.addr)); 266 if (I40E_SUCCESS != ret_code) 267 goto exit; 268 if (!pd_table->ref_cnt) 269 i40e_free_virt_mem(hw, &pd_table->pd_entry_virt_mem); 270 exit: 271 return ret_code; 272 } 273 274 /** 275 * i40e_prep_remove_sd_bp - Prepares to remove a backing page from a sd entry 276 * @hmc_info: pointer to the HMC configuration information structure 277 * @idx: the page index 278 **/ 279 enum i40e_status_code i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info, 280 u32 idx) 281 { 282 enum i40e_status_code ret_code = I40E_SUCCESS; 283 struct i40e_hmc_sd_entry *sd_entry; 284 285 /* get the entry and decrease its ref counter */ 286 sd_entry = &hmc_info->sd_table.sd_entry[idx]; 287 I40E_DEC_BP_REFCNT(&sd_entry->u.bp); 288 if (sd_entry->u.bp.ref_cnt) { 289 ret_code = I40E_ERR_NOT_READY; 290 goto exit; 291 } 292 I40E_DEC_SD_REFCNT(&hmc_info->sd_table); 293 294 /* mark the entry invalid */ 295 sd_entry->valid = FALSE; 296 exit: 297 return ret_code; 298 } 299 300 /** 301 * i40e_remove_sd_bp_new - Removes a backing page from a segment descriptor 302 * @hw: pointer to our hw struct 303 * @hmc_info: pointer to the HMC configuration information structure 304 * @idx: the page index 305 * @is_pf: used to distinguish between VF and PF 306 **/ 307 enum i40e_status_code i40e_remove_sd_bp_new(struct i40e_hw *hw, 308 struct i40e_hmc_info *hmc_info, 309 u32 idx, bool is_pf) 310 { 311 struct i40e_hmc_sd_entry *sd_entry; 312 313 if (!is_pf) 314 return I40E_NOT_SUPPORTED; 315 316 /* get the entry and decrease its ref counter */ 317 sd_entry = &hmc_info->sd_table.sd_entry[idx]; 318 I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_DIRECT); 319 320 return i40e_free_dma_mem(hw, &(sd_entry->u.bp.addr)); 321 } 322 323 /** 324 * i40e_prep_remove_pd_page - Prepares to remove a PD page from sd entry. 325 * @hmc_info: pointer to the HMC configuration information structure 326 * @idx: segment descriptor index to find the relevant page descriptor 327 **/ 328 enum i40e_status_code i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info, 329 u32 idx) 330 { 331 enum i40e_status_code ret_code = I40E_SUCCESS; 332 struct i40e_hmc_sd_entry *sd_entry; 333 334 sd_entry = &hmc_info->sd_table.sd_entry[idx]; 335 336 if (sd_entry->u.pd_table.ref_cnt) { 337 ret_code = I40E_ERR_NOT_READY; 338 goto exit; 339 } 340 341 /* mark the entry invalid */ 342 sd_entry->valid = FALSE; 343 344 I40E_DEC_SD_REFCNT(&hmc_info->sd_table); 345 exit: 346 return ret_code; 347 } 348 349 /** 350 * i40e_remove_pd_page_new - Removes a PD page from sd entry. 351 * @hw: pointer to our hw struct 352 * @hmc_info: pointer to the HMC configuration information structure 353 * @idx: segment descriptor index to find the relevant page descriptor 354 * @is_pf: used to distinguish between VF and PF 355 **/ 356 enum i40e_status_code i40e_remove_pd_page_new(struct i40e_hw *hw, 357 struct i40e_hmc_info *hmc_info, 358 u32 idx, bool is_pf) 359 { 360 struct i40e_hmc_sd_entry *sd_entry; 361 362 if (!is_pf) 363 return I40E_NOT_SUPPORTED; 364 365 sd_entry = &hmc_info->sd_table.sd_entry[idx]; 366 I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_PAGED); 367 368 return i40e_free_dma_mem(hw, &(sd_entry->u.pd_table.pd_page_addr)); 369 } 370