1 /******************************************************************************* 2 * 3 * Intel Ethernet Controller XL710 Family Linux Driver 4 * Copyright(c) 2013 - 2014 Intel Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 * You should have received a copy of the GNU General Public License along 16 * with this program. If not, see <http://www.gnu.org/licenses/>. 17 * 18 * The full GNU General Public License is included in this distribution in 19 * the file called "COPYING". 20 * 21 * Contact Information: 22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 24 * 25 ******************************************************************************/ 26 27 #include "i40e_osdep.h" 28 #include "i40e_register.h" 29 #include "i40e_type.h" 30 #include "i40e_hmc.h" 31 #include "i40e_lan_hmc.h" 32 #include "i40e_prototype.h" 33 34 /* lan specific interface functions */ 35 36 /** 37 * i40e_align_l2obj_base - aligns base object pointer to 512 bytes 38 * @offset: base address offset needing alignment 39 * 40 * Aligns the layer 2 function private memory so it's 512-byte aligned. 41 **/ 42 static u64 i40e_align_l2obj_base(u64 offset) 43 { 44 u64 aligned_offset = offset; 45 46 if ((offset % I40E_HMC_L2OBJ_BASE_ALIGNMENT) > 0) 47 aligned_offset += (I40E_HMC_L2OBJ_BASE_ALIGNMENT - 48 (offset % I40E_HMC_L2OBJ_BASE_ALIGNMENT)); 49 50 return aligned_offset; 51 } 52 53 /** 54 * i40e_calculate_l2fpm_size - calculates layer 2 FPM memory size 55 * @txq_num: number of Tx queues needing backing context 56 * @rxq_num: number of Rx queues needing backing context 57 * @fcoe_cntx_num: amount of FCoE statefull contexts needing backing context 58 * @fcoe_filt_num: number of FCoE filters needing backing context 59 * 60 * Calculates the maximum amount of memory for the function required, based 61 * on the number of resources it must provide context for. 62 **/ 63 static u64 i40e_calculate_l2fpm_size(u32 txq_num, u32 rxq_num, 64 u32 fcoe_cntx_num, u32 fcoe_filt_num) 65 { 66 u64 fpm_size = 0; 67 68 fpm_size = txq_num * I40E_HMC_OBJ_SIZE_TXQ; 69 fpm_size = i40e_align_l2obj_base(fpm_size); 70 71 fpm_size += (rxq_num * I40E_HMC_OBJ_SIZE_RXQ); 72 fpm_size = i40e_align_l2obj_base(fpm_size); 73 74 fpm_size += (fcoe_cntx_num * I40E_HMC_OBJ_SIZE_FCOE_CNTX); 75 fpm_size = i40e_align_l2obj_base(fpm_size); 76 77 fpm_size += (fcoe_filt_num * I40E_HMC_OBJ_SIZE_FCOE_FILT); 78 fpm_size = i40e_align_l2obj_base(fpm_size); 79 80 return fpm_size; 81 } 82 83 /** 84 * i40e_init_lan_hmc - initialize i40e_hmc_info struct 85 * @hw: pointer to the HW structure 86 * @txq_num: number of Tx queues needing backing context 87 * @rxq_num: number of Rx queues needing backing context 88 * @fcoe_cntx_num: amount of FCoE statefull contexts needing backing context 89 * @fcoe_filt_num: number of FCoE filters needing backing context 90 * 91 * This function will be called once per physical function initialization. 92 * It will fill out the i40e_hmc_obj_info structure for LAN objects based on 93 * the driver's provided input, as well as information from the HMC itself 94 * loaded from NVRAM. 95 * 96 * Assumptions: 97 * - HMC Resource Profile has been selected before calling this function. 98 **/ 99 i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num, 100 u32 rxq_num, u32 fcoe_cntx_num, 101 u32 fcoe_filt_num) 102 { 103 struct i40e_hmc_obj_info *obj, *full_obj; 104 i40e_status ret_code = 0; 105 u64 l2fpm_size; 106 u32 size_exp; 107 108 hw->hmc.signature = I40E_HMC_INFO_SIGNATURE; 109 hw->hmc.hmc_fn_id = hw->pf_id; 110 111 /* allocate memory for hmc_obj */ 112 ret_code = i40e_allocate_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem, 113 sizeof(struct i40e_hmc_obj_info) * I40E_HMC_LAN_MAX); 114 if (ret_code) 115 goto init_lan_hmc_out; 116 hw->hmc.hmc_obj = (struct i40e_hmc_obj_info *) 117 hw->hmc.hmc_obj_virt_mem.va; 118 119 /* The full object will be used to create the LAN HMC SD */ 120 full_obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_FULL]; 121 full_obj->max_cnt = 0; 122 full_obj->cnt = 0; 123 full_obj->base = 0; 124 full_obj->size = 0; 125 126 /* Tx queue context information */ 127 obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_TX]; 128 obj->max_cnt = rd32(hw, I40E_GLHMC_LANQMAX); 129 obj->cnt = txq_num; 130 obj->base = 0; 131 size_exp = rd32(hw, I40E_GLHMC_LANTXOBJSZ); 132 obj->size = BIT_ULL(size_exp); 133 134 /* validate values requested by driver don't exceed HMC capacity */ 135 if (txq_num > obj->max_cnt) { 136 ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT; 137 hw_dbg(hw, "i40e_init_lan_hmc: Tx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n", 138 txq_num, obj->max_cnt, ret_code); 139 goto init_lan_hmc_out; 140 } 141 142 /* aggregate values into the full LAN object for later */ 143 full_obj->max_cnt += obj->max_cnt; 144 full_obj->cnt += obj->cnt; 145 146 /* Rx queue context information */ 147 obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_RX]; 148 obj->max_cnt = rd32(hw, I40E_GLHMC_LANQMAX); 149 obj->cnt = rxq_num; 150 obj->base = hw->hmc.hmc_obj[I40E_HMC_LAN_TX].base + 151 (hw->hmc.hmc_obj[I40E_HMC_LAN_TX].cnt * 152 hw->hmc.hmc_obj[I40E_HMC_LAN_TX].size); 153 obj->base = i40e_align_l2obj_base(obj->base); 154 size_exp = rd32(hw, I40E_GLHMC_LANRXOBJSZ); 155 obj->size = BIT_ULL(size_exp); 156 157 /* validate values requested by driver don't exceed HMC capacity */ 158 if (rxq_num > obj->max_cnt) { 159 ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT; 160 hw_dbg(hw, "i40e_init_lan_hmc: Rx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n", 161 rxq_num, obj->max_cnt, ret_code); 162 goto init_lan_hmc_out; 163 } 164 165 /* aggregate values into the full LAN object for later */ 166 full_obj->max_cnt += obj->max_cnt; 167 full_obj->cnt += obj->cnt; 168 169 /* FCoE context information */ 170 obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX]; 171 obj->max_cnt = rd32(hw, I40E_GLHMC_FCOEMAX); 172 obj->cnt = fcoe_cntx_num; 173 obj->base = hw->hmc.hmc_obj[I40E_HMC_LAN_RX].base + 174 (hw->hmc.hmc_obj[I40E_HMC_LAN_RX].cnt * 175 hw->hmc.hmc_obj[I40E_HMC_LAN_RX].size); 176 obj->base = i40e_align_l2obj_base(obj->base); 177 size_exp = rd32(hw, I40E_GLHMC_FCOEDDPOBJSZ); 178 obj->size = BIT_ULL(size_exp); 179 180 /* validate values requested by driver don't exceed HMC capacity */ 181 if (fcoe_cntx_num > obj->max_cnt) { 182 ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT; 183 hw_dbg(hw, "i40e_init_lan_hmc: FCoE context: asks for 0x%x but max allowed is 0x%x, returns error %d\n", 184 fcoe_cntx_num, obj->max_cnt, ret_code); 185 goto init_lan_hmc_out; 186 } 187 188 /* aggregate values into the full LAN object for later */ 189 full_obj->max_cnt += obj->max_cnt; 190 full_obj->cnt += obj->cnt; 191 192 /* FCoE filter information */ 193 obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_FILT]; 194 obj->max_cnt = rd32(hw, I40E_GLHMC_FCOEFMAX); 195 obj->cnt = fcoe_filt_num; 196 obj->base = hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].base + 197 (hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].cnt * 198 hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].size); 199 obj->base = i40e_align_l2obj_base(obj->base); 200 size_exp = rd32(hw, I40E_GLHMC_FCOEFOBJSZ); 201 obj->size = BIT_ULL(size_exp); 202 203 /* validate values requested by driver don't exceed HMC capacity */ 204 if (fcoe_filt_num > obj->max_cnt) { 205 ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT; 206 hw_dbg(hw, "i40e_init_lan_hmc: FCoE filter: asks for 0x%x but max allowed is 0x%x, returns error %d\n", 207 fcoe_filt_num, obj->max_cnt, ret_code); 208 goto init_lan_hmc_out; 209 } 210 211 /* aggregate values into the full LAN object for later */ 212 full_obj->max_cnt += obj->max_cnt; 213 full_obj->cnt += obj->cnt; 214 215 hw->hmc.first_sd_index = 0; 216 hw->hmc.sd_table.ref_cnt = 0; 217 l2fpm_size = i40e_calculate_l2fpm_size(txq_num, rxq_num, fcoe_cntx_num, 218 fcoe_filt_num); 219 if (NULL == hw->hmc.sd_table.sd_entry) { 220 hw->hmc.sd_table.sd_cnt = (u32) 221 (l2fpm_size + I40E_HMC_DIRECT_BP_SIZE - 1) / 222 I40E_HMC_DIRECT_BP_SIZE; 223 224 /* allocate the sd_entry members in the sd_table */ 225 ret_code = i40e_allocate_virt_mem(hw, &hw->hmc.sd_table.addr, 226 (sizeof(struct i40e_hmc_sd_entry) * 227 hw->hmc.sd_table.sd_cnt)); 228 if (ret_code) 229 goto init_lan_hmc_out; 230 hw->hmc.sd_table.sd_entry = 231 (struct i40e_hmc_sd_entry *)hw->hmc.sd_table.addr.va; 232 } 233 /* store in the LAN full object for later */ 234 full_obj->size = l2fpm_size; 235 236 init_lan_hmc_out: 237 return ret_code; 238 } 239 240 /** 241 * i40e_remove_pd_page - Remove a page from the page descriptor table 242 * @hw: pointer to the HW structure 243 * @hmc_info: pointer to the HMC configuration information structure 244 * @idx: segment descriptor index to find the relevant page descriptor 245 * 246 * This function: 247 * 1. Marks the entry in pd table (for paged address mode) invalid 248 * 2. write to register PMPDINV to invalidate the backing page in FV cache 249 * 3. Decrement the ref count for pd_entry 250 * assumptions: 251 * 1. caller can deallocate the memory used by pd after this function 252 * returns. 253 **/ 254 static i40e_status i40e_remove_pd_page(struct i40e_hw *hw, 255 struct i40e_hmc_info *hmc_info, 256 u32 idx) 257 { 258 i40e_status ret_code = 0; 259 260 if (!i40e_prep_remove_pd_page(hmc_info, idx)) 261 ret_code = i40e_remove_pd_page_new(hw, hmc_info, idx, true); 262 263 return ret_code; 264 } 265 266 /** 267 * i40e_remove_sd_bp - remove a backing page from a segment descriptor 268 * @hw: pointer to our HW structure 269 * @hmc_info: pointer to the HMC configuration information structure 270 * @idx: the page index 271 * 272 * This function: 273 * 1. Marks the entry in sd table (for direct address mode) invalid 274 * 2. write to register PMSDCMD, PMSDDATALOW(PMSDDATALOW.PMSDVALID set 275 * to 0) and PMSDDATAHIGH to invalidate the sd page 276 * 3. Decrement the ref count for the sd_entry 277 * assumptions: 278 * 1. caller can deallocate the memory used by backing storage after this 279 * function returns. 280 **/ 281 static i40e_status i40e_remove_sd_bp(struct i40e_hw *hw, 282 struct i40e_hmc_info *hmc_info, 283 u32 idx) 284 { 285 i40e_status ret_code = 0; 286 287 if (!i40e_prep_remove_sd_bp(hmc_info, idx)) 288 ret_code = i40e_remove_sd_bp_new(hw, hmc_info, idx, true); 289 290 return ret_code; 291 } 292 293 /** 294 * i40e_create_lan_hmc_object - allocate backing store for hmc objects 295 * @hw: pointer to the HW structure 296 * @info: pointer to i40e_hmc_create_obj_info struct 297 * 298 * This will allocate memory for PDs and backing pages and populate 299 * the sd and pd entries. 300 **/ 301 static i40e_status i40e_create_lan_hmc_object(struct i40e_hw *hw, 302 struct i40e_hmc_lan_create_obj_info *info) 303 { 304 i40e_status ret_code = 0; 305 struct i40e_hmc_sd_entry *sd_entry; 306 u32 pd_idx1 = 0, pd_lmt1 = 0; 307 u32 pd_idx = 0, pd_lmt = 0; 308 bool pd_error = false; 309 u32 sd_idx, sd_lmt; 310 u64 sd_size; 311 u32 i, j; 312 313 if (NULL == info) { 314 ret_code = I40E_ERR_BAD_PTR; 315 hw_dbg(hw, "i40e_create_lan_hmc_object: bad info ptr\n"); 316 goto exit; 317 } 318 if (NULL == info->hmc_info) { 319 ret_code = I40E_ERR_BAD_PTR; 320 hw_dbg(hw, "i40e_create_lan_hmc_object: bad hmc_info ptr\n"); 321 goto exit; 322 } 323 if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) { 324 ret_code = I40E_ERR_BAD_PTR; 325 hw_dbg(hw, "i40e_create_lan_hmc_object: bad signature\n"); 326 goto exit; 327 } 328 329 if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) { 330 ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX; 331 hw_dbg(hw, "i40e_create_lan_hmc_object: returns error %d\n", 332 ret_code); 333 goto exit; 334 } 335 if ((info->start_idx + info->count) > 336 info->hmc_info->hmc_obj[info->rsrc_type].cnt) { 337 ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT; 338 hw_dbg(hw, "i40e_create_lan_hmc_object: returns error %d\n", 339 ret_code); 340 goto exit; 341 } 342 343 /* find sd index and limit */ 344 I40E_FIND_SD_INDEX_LIMIT(info->hmc_info, info->rsrc_type, 345 info->start_idx, info->count, 346 &sd_idx, &sd_lmt); 347 if (sd_idx >= info->hmc_info->sd_table.sd_cnt || 348 sd_lmt > info->hmc_info->sd_table.sd_cnt) { 349 ret_code = I40E_ERR_INVALID_SD_INDEX; 350 goto exit; 351 } 352 /* find pd index */ 353 I40E_FIND_PD_INDEX_LIMIT(info->hmc_info, info->rsrc_type, 354 info->start_idx, info->count, &pd_idx, 355 &pd_lmt); 356 357 /* This is to cover for cases where you may not want to have an SD with 358 * the full 2M memory but something smaller. By not filling out any 359 * size, the function will default the SD size to be 2M. 360 */ 361 if (info->direct_mode_sz == 0) 362 sd_size = I40E_HMC_DIRECT_BP_SIZE; 363 else 364 sd_size = info->direct_mode_sz; 365 366 /* check if all the sds are valid. If not, allocate a page and 367 * initialize it. 368 */ 369 for (j = sd_idx; j < sd_lmt; j++) { 370 /* update the sd table entry */ 371 ret_code = i40e_add_sd_table_entry(hw, info->hmc_info, j, 372 info->entry_type, 373 sd_size); 374 if (ret_code) 375 goto exit_sd_error; 376 sd_entry = &info->hmc_info->sd_table.sd_entry[j]; 377 if (I40E_SD_TYPE_PAGED == sd_entry->entry_type) { 378 /* check if all the pds in this sd are valid. If not, 379 * allocate a page and initialize it. 380 */ 381 382 /* find pd_idx and pd_lmt in this sd */ 383 pd_idx1 = max(pd_idx, (j * I40E_HMC_MAX_BP_COUNT)); 384 pd_lmt1 = min(pd_lmt, 385 ((j + 1) * I40E_HMC_MAX_BP_COUNT)); 386 for (i = pd_idx1; i < pd_lmt1; i++) { 387 /* update the pd table entry */ 388 ret_code = i40e_add_pd_table_entry(hw, 389 info->hmc_info, 390 i, NULL); 391 if (ret_code) { 392 pd_error = true; 393 break; 394 } 395 } 396 if (pd_error) { 397 /* remove the backing pages from pd_idx1 to i */ 398 while (i && (i > pd_idx1)) { 399 i40e_remove_pd_bp(hw, info->hmc_info, 400 (i - 1)); 401 i--; 402 } 403 } 404 } 405 if (!sd_entry->valid) { 406 sd_entry->valid = true; 407 switch (sd_entry->entry_type) { 408 case I40E_SD_TYPE_PAGED: 409 I40E_SET_PF_SD_ENTRY(hw, 410 sd_entry->u.pd_table.pd_page_addr.pa, 411 j, sd_entry->entry_type); 412 break; 413 case I40E_SD_TYPE_DIRECT: 414 I40E_SET_PF_SD_ENTRY(hw, sd_entry->u.bp.addr.pa, 415 j, sd_entry->entry_type); 416 break; 417 default: 418 ret_code = I40E_ERR_INVALID_SD_TYPE; 419 goto exit; 420 } 421 } 422 } 423 goto exit; 424 425 exit_sd_error: 426 /* cleanup for sd entries from j to sd_idx */ 427 while (j && (j > sd_idx)) { 428 sd_entry = &info->hmc_info->sd_table.sd_entry[j - 1]; 429 switch (sd_entry->entry_type) { 430 case I40E_SD_TYPE_PAGED: 431 pd_idx1 = max(pd_idx, 432 ((j - 1) * I40E_HMC_MAX_BP_COUNT)); 433 pd_lmt1 = min(pd_lmt, (j * I40E_HMC_MAX_BP_COUNT)); 434 for (i = pd_idx1; i < pd_lmt1; i++) { 435 i40e_remove_pd_bp(hw, info->hmc_info, i); 436 } 437 i40e_remove_pd_page(hw, info->hmc_info, (j - 1)); 438 break; 439 case I40E_SD_TYPE_DIRECT: 440 i40e_remove_sd_bp(hw, info->hmc_info, (j - 1)); 441 break; 442 default: 443 ret_code = I40E_ERR_INVALID_SD_TYPE; 444 break; 445 } 446 j--; 447 } 448 exit: 449 return ret_code; 450 } 451 452 /** 453 * i40e_configure_lan_hmc - prepare the HMC backing store 454 * @hw: pointer to the hw structure 455 * @model: the model for the layout of the SD/PD tables 456 * 457 * - This function will be called once per physical function initialization. 458 * - This function will be called after i40e_init_lan_hmc() and before 459 * any LAN/FCoE HMC objects can be created. 460 **/ 461 i40e_status i40e_configure_lan_hmc(struct i40e_hw *hw, 462 enum i40e_hmc_model model) 463 { 464 struct i40e_hmc_lan_create_obj_info info; 465 i40e_status ret_code = 0; 466 u8 hmc_fn_id = hw->hmc.hmc_fn_id; 467 struct i40e_hmc_obj_info *obj; 468 469 /* Initialize part of the create object info struct */ 470 info.hmc_info = &hw->hmc; 471 info.rsrc_type = I40E_HMC_LAN_FULL; 472 info.start_idx = 0; 473 info.direct_mode_sz = hw->hmc.hmc_obj[I40E_HMC_LAN_FULL].size; 474 475 /* Build the SD entry for the LAN objects */ 476 switch (model) { 477 case I40E_HMC_MODEL_DIRECT_PREFERRED: 478 case I40E_HMC_MODEL_DIRECT_ONLY: 479 info.entry_type = I40E_SD_TYPE_DIRECT; 480 /* Make one big object, a single SD */ 481 info.count = 1; 482 ret_code = i40e_create_lan_hmc_object(hw, &info); 483 if (ret_code && (model == I40E_HMC_MODEL_DIRECT_PREFERRED)) 484 goto try_type_paged; 485 else if (ret_code) 486 goto configure_lan_hmc_out; 487 /* else clause falls through the break */ 488 break; 489 case I40E_HMC_MODEL_PAGED_ONLY: 490 try_type_paged: 491 info.entry_type = I40E_SD_TYPE_PAGED; 492 /* Make one big object in the PD table */ 493 info.count = 1; 494 ret_code = i40e_create_lan_hmc_object(hw, &info); 495 if (ret_code) 496 goto configure_lan_hmc_out; 497 break; 498 default: 499 /* unsupported type */ 500 ret_code = I40E_ERR_INVALID_SD_TYPE; 501 hw_dbg(hw, "i40e_configure_lan_hmc: Unknown SD type: %d\n", 502 ret_code); 503 goto configure_lan_hmc_out; 504 } 505 506 /* Configure and program the FPM registers so objects can be created */ 507 508 /* Tx contexts */ 509 obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_TX]; 510 wr32(hw, I40E_GLHMC_LANTXBASE(hmc_fn_id), 511 (u32)((obj->base & I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK) / 512)); 512 wr32(hw, I40E_GLHMC_LANTXCNT(hmc_fn_id), obj->cnt); 513 514 /* Rx contexts */ 515 obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_RX]; 516 wr32(hw, I40E_GLHMC_LANRXBASE(hmc_fn_id), 517 (u32)((obj->base & I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK) / 512)); 518 wr32(hw, I40E_GLHMC_LANRXCNT(hmc_fn_id), obj->cnt); 519 520 /* FCoE contexts */ 521 obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX]; 522 wr32(hw, I40E_GLHMC_FCOEDDPBASE(hmc_fn_id), 523 (u32)((obj->base & I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK) / 512)); 524 wr32(hw, I40E_GLHMC_FCOEDDPCNT(hmc_fn_id), obj->cnt); 525 526 /* FCoE filters */ 527 obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_FILT]; 528 wr32(hw, I40E_GLHMC_FCOEFBASE(hmc_fn_id), 529 (u32)((obj->base & I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK) / 512)); 530 wr32(hw, I40E_GLHMC_FCOEFCNT(hmc_fn_id), obj->cnt); 531 532 configure_lan_hmc_out: 533 return ret_code; 534 } 535 536 /** 537 * i40e_delete_hmc_object - remove hmc objects 538 * @hw: pointer to the HW structure 539 * @info: pointer to i40e_hmc_delete_obj_info struct 540 * 541 * This will de-populate the SDs and PDs. It frees 542 * the memory for PDS and backing storage. After this function is returned, 543 * caller should deallocate memory allocated previously for 544 * book-keeping information about PDs and backing storage. 545 **/ 546 static i40e_status i40e_delete_lan_hmc_object(struct i40e_hw *hw, 547 struct i40e_hmc_lan_delete_obj_info *info) 548 { 549 i40e_status ret_code = 0; 550 struct i40e_hmc_pd_table *pd_table; 551 u32 pd_idx, pd_lmt, rel_pd_idx; 552 u32 sd_idx, sd_lmt; 553 u32 i, j; 554 555 if (NULL == info) { 556 ret_code = I40E_ERR_BAD_PTR; 557 hw_dbg(hw, "i40e_delete_hmc_object: bad info ptr\n"); 558 goto exit; 559 } 560 if (NULL == info->hmc_info) { 561 ret_code = I40E_ERR_BAD_PTR; 562 hw_dbg(hw, "i40e_delete_hmc_object: bad info->hmc_info ptr\n"); 563 goto exit; 564 } 565 if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) { 566 ret_code = I40E_ERR_BAD_PTR; 567 hw_dbg(hw, "i40e_delete_hmc_object: bad hmc_info->signature\n"); 568 goto exit; 569 } 570 571 if (NULL == info->hmc_info->sd_table.sd_entry) { 572 ret_code = I40E_ERR_BAD_PTR; 573 hw_dbg(hw, "i40e_delete_hmc_object: bad sd_entry\n"); 574 goto exit; 575 } 576 577 if (NULL == info->hmc_info->hmc_obj) { 578 ret_code = I40E_ERR_BAD_PTR; 579 hw_dbg(hw, "i40e_delete_hmc_object: bad hmc_info->hmc_obj\n"); 580 goto exit; 581 } 582 if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) { 583 ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX; 584 hw_dbg(hw, "i40e_delete_hmc_object: returns error %d\n", 585 ret_code); 586 goto exit; 587 } 588 589 if ((info->start_idx + info->count) > 590 info->hmc_info->hmc_obj[info->rsrc_type].cnt) { 591 ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT; 592 hw_dbg(hw, "i40e_delete_hmc_object: returns error %d\n", 593 ret_code); 594 goto exit; 595 } 596 597 I40E_FIND_PD_INDEX_LIMIT(info->hmc_info, info->rsrc_type, 598 info->start_idx, info->count, &pd_idx, 599 &pd_lmt); 600 601 for (j = pd_idx; j < pd_lmt; j++) { 602 sd_idx = j / I40E_HMC_PD_CNT_IN_SD; 603 604 if (I40E_SD_TYPE_PAGED != 605 info->hmc_info->sd_table.sd_entry[sd_idx].entry_type) 606 continue; 607 608 rel_pd_idx = j % I40E_HMC_PD_CNT_IN_SD; 609 610 pd_table = 611 &info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table; 612 if (pd_table->pd_entry[rel_pd_idx].valid) { 613 ret_code = i40e_remove_pd_bp(hw, info->hmc_info, j); 614 if (ret_code) 615 goto exit; 616 } 617 } 618 619 /* find sd index and limit */ 620 I40E_FIND_SD_INDEX_LIMIT(info->hmc_info, info->rsrc_type, 621 info->start_idx, info->count, 622 &sd_idx, &sd_lmt); 623 if (sd_idx >= info->hmc_info->sd_table.sd_cnt || 624 sd_lmt > info->hmc_info->sd_table.sd_cnt) { 625 ret_code = I40E_ERR_INVALID_SD_INDEX; 626 goto exit; 627 } 628 629 for (i = sd_idx; i < sd_lmt; i++) { 630 if (!info->hmc_info->sd_table.sd_entry[i].valid) 631 continue; 632 switch (info->hmc_info->sd_table.sd_entry[i].entry_type) { 633 case I40E_SD_TYPE_DIRECT: 634 ret_code = i40e_remove_sd_bp(hw, info->hmc_info, i); 635 if (ret_code) 636 goto exit; 637 break; 638 case I40E_SD_TYPE_PAGED: 639 ret_code = i40e_remove_pd_page(hw, info->hmc_info, i); 640 if (ret_code) 641 goto exit; 642 break; 643 default: 644 break; 645 } 646 } 647 exit: 648 return ret_code; 649 } 650 651 /** 652 * i40e_shutdown_lan_hmc - Remove HMC backing store, free allocated memory 653 * @hw: pointer to the hw structure 654 * 655 * This must be called by drivers as they are shutting down and being 656 * removed from the OS. 657 **/ 658 i40e_status i40e_shutdown_lan_hmc(struct i40e_hw *hw) 659 { 660 struct i40e_hmc_lan_delete_obj_info info; 661 i40e_status ret_code; 662 663 info.hmc_info = &hw->hmc; 664 info.rsrc_type = I40E_HMC_LAN_FULL; 665 info.start_idx = 0; 666 info.count = 1; 667 668 /* delete the object */ 669 ret_code = i40e_delete_lan_hmc_object(hw, &info); 670 671 /* free the SD table entry for LAN */ 672 i40e_free_virt_mem(hw, &hw->hmc.sd_table.addr); 673 hw->hmc.sd_table.sd_cnt = 0; 674 hw->hmc.sd_table.sd_entry = NULL; 675 676 /* free memory used for hmc_obj */ 677 i40e_free_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem); 678 hw->hmc.hmc_obj = NULL; 679 680 return ret_code; 681 } 682 683 #define I40E_HMC_STORE(_struct, _ele) \ 684 offsetof(struct _struct, _ele), \ 685 FIELD_SIZEOF(struct _struct, _ele) 686 687 struct i40e_context_ele { 688 u16 offset; 689 u16 size_of; 690 u16 width; 691 u16 lsb; 692 }; 693 694 /* LAN Tx Queue Context */ 695 static struct i40e_context_ele i40e_hmc_txq_ce_info[] = { 696 /* Field Width LSB */ 697 {I40E_HMC_STORE(i40e_hmc_obj_txq, head), 13, 0 }, 698 {I40E_HMC_STORE(i40e_hmc_obj_txq, new_context), 1, 30 }, 699 {I40E_HMC_STORE(i40e_hmc_obj_txq, base), 57, 32 }, 700 {I40E_HMC_STORE(i40e_hmc_obj_txq, fc_ena), 1, 89 }, 701 {I40E_HMC_STORE(i40e_hmc_obj_txq, timesync_ena), 1, 90 }, 702 {I40E_HMC_STORE(i40e_hmc_obj_txq, fd_ena), 1, 91 }, 703 {I40E_HMC_STORE(i40e_hmc_obj_txq, alt_vlan_ena), 1, 92 }, 704 {I40E_HMC_STORE(i40e_hmc_obj_txq, cpuid), 8, 96 }, 705 /* line 1 */ 706 {I40E_HMC_STORE(i40e_hmc_obj_txq, thead_wb), 13, 0 + 128 }, 707 {I40E_HMC_STORE(i40e_hmc_obj_txq, head_wb_ena), 1, 32 + 128 }, 708 {I40E_HMC_STORE(i40e_hmc_obj_txq, qlen), 13, 33 + 128 }, 709 {I40E_HMC_STORE(i40e_hmc_obj_txq, tphrdesc_ena), 1, 46 + 128 }, 710 {I40E_HMC_STORE(i40e_hmc_obj_txq, tphrpacket_ena), 1, 47 + 128 }, 711 {I40E_HMC_STORE(i40e_hmc_obj_txq, tphwdesc_ena), 1, 48 + 128 }, 712 {I40E_HMC_STORE(i40e_hmc_obj_txq, head_wb_addr), 64, 64 + 128 }, 713 /* line 7 */ 714 {I40E_HMC_STORE(i40e_hmc_obj_txq, crc), 32, 0 + (7 * 128) }, 715 {I40E_HMC_STORE(i40e_hmc_obj_txq, rdylist), 10, 84 + (7 * 128) }, 716 {I40E_HMC_STORE(i40e_hmc_obj_txq, rdylist_act), 1, 94 + (7 * 128) }, 717 { 0 } 718 }; 719 720 /* LAN Rx Queue Context */ 721 static struct i40e_context_ele i40e_hmc_rxq_ce_info[] = { 722 /* Field Width LSB */ 723 { I40E_HMC_STORE(i40e_hmc_obj_rxq, head), 13, 0 }, 724 { I40E_HMC_STORE(i40e_hmc_obj_rxq, cpuid), 8, 13 }, 725 { I40E_HMC_STORE(i40e_hmc_obj_rxq, base), 57, 32 }, 726 { I40E_HMC_STORE(i40e_hmc_obj_rxq, qlen), 13, 89 }, 727 { I40E_HMC_STORE(i40e_hmc_obj_rxq, dbuff), 7, 102 }, 728 { I40E_HMC_STORE(i40e_hmc_obj_rxq, hbuff), 5, 109 }, 729 { I40E_HMC_STORE(i40e_hmc_obj_rxq, dtype), 2, 114 }, 730 { I40E_HMC_STORE(i40e_hmc_obj_rxq, dsize), 1, 116 }, 731 { I40E_HMC_STORE(i40e_hmc_obj_rxq, crcstrip), 1, 117 }, 732 { I40E_HMC_STORE(i40e_hmc_obj_rxq, fc_ena), 1, 118 }, 733 { I40E_HMC_STORE(i40e_hmc_obj_rxq, l2tsel), 1, 119 }, 734 { I40E_HMC_STORE(i40e_hmc_obj_rxq, hsplit_0), 4, 120 }, 735 { I40E_HMC_STORE(i40e_hmc_obj_rxq, hsplit_1), 2, 124 }, 736 { I40E_HMC_STORE(i40e_hmc_obj_rxq, showiv), 1, 127 }, 737 { I40E_HMC_STORE(i40e_hmc_obj_rxq, rxmax), 14, 174 }, 738 { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphrdesc_ena), 1, 193 }, 739 { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphwdesc_ena), 1, 194 }, 740 { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphdata_ena), 1, 195 }, 741 { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphhead_ena), 1, 196 }, 742 { I40E_HMC_STORE(i40e_hmc_obj_rxq, lrxqthresh), 3, 198 }, 743 { I40E_HMC_STORE(i40e_hmc_obj_rxq, prefena), 1, 201 }, 744 { 0 } 745 }; 746 747 /** 748 * i40e_write_byte - replace HMC context byte 749 * @hmc_bits: pointer to the HMC memory 750 * @ce_info: a description of the struct to be read from 751 * @src: the struct to be read from 752 **/ 753 static void i40e_write_byte(u8 *hmc_bits, 754 struct i40e_context_ele *ce_info, 755 u8 *src) 756 { 757 u8 src_byte, dest_byte, mask; 758 u8 *from, *dest; 759 u16 shift_width; 760 761 /* copy from the next struct field */ 762 from = src + ce_info->offset; 763 764 /* prepare the bits and mask */ 765 shift_width = ce_info->lsb % 8; 766 mask = BIT(ce_info->width) - 1; 767 768 src_byte = *from; 769 src_byte &= mask; 770 771 /* shift to correct alignment */ 772 mask <<= shift_width; 773 src_byte <<= shift_width; 774 775 /* get the current bits from the target bit string */ 776 dest = hmc_bits + (ce_info->lsb / 8); 777 778 memcpy(&dest_byte, dest, sizeof(dest_byte)); 779 780 dest_byte &= ~mask; /* get the bits not changing */ 781 dest_byte |= src_byte; /* add in the new bits */ 782 783 /* put it all back */ 784 memcpy(dest, &dest_byte, sizeof(dest_byte)); 785 } 786 787 /** 788 * i40e_write_word - replace HMC context word 789 * @hmc_bits: pointer to the HMC memory 790 * @ce_info: a description of the struct to be read from 791 * @src: the struct to be read from 792 **/ 793 static void i40e_write_word(u8 *hmc_bits, 794 struct i40e_context_ele *ce_info, 795 u8 *src) 796 { 797 u16 src_word, mask; 798 u8 *from, *dest; 799 u16 shift_width; 800 __le16 dest_word; 801 802 /* copy from the next struct field */ 803 from = src + ce_info->offset; 804 805 /* prepare the bits and mask */ 806 shift_width = ce_info->lsb % 8; 807 mask = BIT(ce_info->width) - 1; 808 809 /* don't swizzle the bits until after the mask because the mask bits 810 * will be in a different bit position on big endian machines 811 */ 812 src_word = *(u16 *)from; 813 src_word &= mask; 814 815 /* shift to correct alignment */ 816 mask <<= shift_width; 817 src_word <<= shift_width; 818 819 /* get the current bits from the target bit string */ 820 dest = hmc_bits + (ce_info->lsb / 8); 821 822 memcpy(&dest_word, dest, sizeof(dest_word)); 823 824 dest_word &= ~(cpu_to_le16(mask)); /* get the bits not changing */ 825 dest_word |= cpu_to_le16(src_word); /* add in the new bits */ 826 827 /* put it all back */ 828 memcpy(dest, &dest_word, sizeof(dest_word)); 829 } 830 831 /** 832 * i40e_write_dword - replace HMC context dword 833 * @hmc_bits: pointer to the HMC memory 834 * @ce_info: a description of the struct to be read from 835 * @src: the struct to be read from 836 **/ 837 static void i40e_write_dword(u8 *hmc_bits, 838 struct i40e_context_ele *ce_info, 839 u8 *src) 840 { 841 u32 src_dword, mask; 842 u8 *from, *dest; 843 u16 shift_width; 844 __le32 dest_dword; 845 846 /* copy from the next struct field */ 847 from = src + ce_info->offset; 848 849 /* prepare the bits and mask */ 850 shift_width = ce_info->lsb % 8; 851 852 /* if the field width is exactly 32 on an x86 machine, then the shift 853 * operation will not work because the SHL instructions count is masked 854 * to 5 bits so the shift will do nothing 855 */ 856 if (ce_info->width < 32) 857 mask = BIT(ce_info->width) - 1; 858 else 859 mask = ~(u32)0; 860 861 /* don't swizzle the bits until after the mask because the mask bits 862 * will be in a different bit position on big endian machines 863 */ 864 src_dword = *(u32 *)from; 865 src_dword &= mask; 866 867 /* shift to correct alignment */ 868 mask <<= shift_width; 869 src_dword <<= shift_width; 870 871 /* get the current bits from the target bit string */ 872 dest = hmc_bits + (ce_info->lsb / 8); 873 874 memcpy(&dest_dword, dest, sizeof(dest_dword)); 875 876 dest_dword &= ~(cpu_to_le32(mask)); /* get the bits not changing */ 877 dest_dword |= cpu_to_le32(src_dword); /* add in the new bits */ 878 879 /* put it all back */ 880 memcpy(dest, &dest_dword, sizeof(dest_dword)); 881 } 882 883 /** 884 * i40e_write_qword - replace HMC context qword 885 * @hmc_bits: pointer to the HMC memory 886 * @ce_info: a description of the struct to be read from 887 * @src: the struct to be read from 888 **/ 889 static void i40e_write_qword(u8 *hmc_bits, 890 struct i40e_context_ele *ce_info, 891 u8 *src) 892 { 893 u64 src_qword, mask; 894 u8 *from, *dest; 895 u16 shift_width; 896 __le64 dest_qword; 897 898 /* copy from the next struct field */ 899 from = src + ce_info->offset; 900 901 /* prepare the bits and mask */ 902 shift_width = ce_info->lsb % 8; 903 904 /* if the field width is exactly 64 on an x86 machine, then the shift 905 * operation will not work because the SHL instructions count is masked 906 * to 6 bits so the shift will do nothing 907 */ 908 if (ce_info->width < 64) 909 mask = BIT_ULL(ce_info->width) - 1; 910 else 911 mask = ~(u64)0; 912 913 /* don't swizzle the bits until after the mask because the mask bits 914 * will be in a different bit position on big endian machines 915 */ 916 src_qword = *(u64 *)from; 917 src_qword &= mask; 918 919 /* shift to correct alignment */ 920 mask <<= shift_width; 921 src_qword <<= shift_width; 922 923 /* get the current bits from the target bit string */ 924 dest = hmc_bits + (ce_info->lsb / 8); 925 926 memcpy(&dest_qword, dest, sizeof(dest_qword)); 927 928 dest_qword &= ~(cpu_to_le64(mask)); /* get the bits not changing */ 929 dest_qword |= cpu_to_le64(src_qword); /* add in the new bits */ 930 931 /* put it all back */ 932 memcpy(dest, &dest_qword, sizeof(dest_qword)); 933 } 934 935 /** 936 * i40e_clear_hmc_context - zero out the HMC context bits 937 * @hw: the hardware struct 938 * @context_bytes: pointer to the context bit array (DMA memory) 939 * @hmc_type: the type of HMC resource 940 **/ 941 static i40e_status i40e_clear_hmc_context(struct i40e_hw *hw, 942 u8 *context_bytes, 943 enum i40e_hmc_lan_rsrc_type hmc_type) 944 { 945 /* clean the bit array */ 946 memset(context_bytes, 0, (u32)hw->hmc.hmc_obj[hmc_type].size); 947 948 return 0; 949 } 950 951 /** 952 * i40e_set_hmc_context - replace HMC context bits 953 * @context_bytes: pointer to the context bit array 954 * @ce_info: a description of the struct to be filled 955 * @dest: the struct to be filled 956 **/ 957 static i40e_status i40e_set_hmc_context(u8 *context_bytes, 958 struct i40e_context_ele *ce_info, 959 u8 *dest) 960 { 961 int f; 962 963 for (f = 0; ce_info[f].width != 0; f++) { 964 965 /* we have to deal with each element of the HMC using the 966 * correct size so that we are correct regardless of the 967 * endianness of the machine 968 */ 969 switch (ce_info[f].size_of) { 970 case 1: 971 i40e_write_byte(context_bytes, &ce_info[f], dest); 972 break; 973 case 2: 974 i40e_write_word(context_bytes, &ce_info[f], dest); 975 break; 976 case 4: 977 i40e_write_dword(context_bytes, &ce_info[f], dest); 978 break; 979 case 8: 980 i40e_write_qword(context_bytes, &ce_info[f], dest); 981 break; 982 } 983 } 984 985 return 0; 986 } 987 988 /** 989 * i40e_hmc_get_object_va - retrieves an object's virtual address 990 * @hmc_info: pointer to i40e_hmc_info struct 991 * @object_base: pointer to u64 to get the va 992 * @rsrc_type: the hmc resource type 993 * @obj_idx: hmc object index 994 * 995 * This function retrieves the object's virtual address from the object 996 * base pointer. This function is used for LAN Queue contexts. 997 **/ 998 static 999 i40e_status i40e_hmc_get_object_va(struct i40e_hmc_info *hmc_info, 1000 u8 **object_base, 1001 enum i40e_hmc_lan_rsrc_type rsrc_type, 1002 u32 obj_idx) 1003 { 1004 u32 obj_offset_in_sd, obj_offset_in_pd; 1005 i40e_status ret_code = 0; 1006 struct i40e_hmc_sd_entry *sd_entry; 1007 struct i40e_hmc_pd_entry *pd_entry; 1008 u32 pd_idx, pd_lmt, rel_pd_idx; 1009 u64 obj_offset_in_fpm; 1010 u32 sd_idx, sd_lmt; 1011 1012 if (NULL == hmc_info) { 1013 ret_code = I40E_ERR_BAD_PTR; 1014 hw_dbg(hw, "i40e_hmc_get_object_va: bad hmc_info ptr\n"); 1015 goto exit; 1016 } 1017 if (NULL == hmc_info->hmc_obj) { 1018 ret_code = I40E_ERR_BAD_PTR; 1019 hw_dbg(hw, "i40e_hmc_get_object_va: bad hmc_info->hmc_obj ptr\n"); 1020 goto exit; 1021 } 1022 if (NULL == object_base) { 1023 ret_code = I40E_ERR_BAD_PTR; 1024 hw_dbg(hw, "i40e_hmc_get_object_va: bad object_base ptr\n"); 1025 goto exit; 1026 } 1027 if (I40E_HMC_INFO_SIGNATURE != hmc_info->signature) { 1028 ret_code = I40E_ERR_BAD_PTR; 1029 hw_dbg(hw, "i40e_hmc_get_object_va: bad hmc_info->signature\n"); 1030 goto exit; 1031 } 1032 if (obj_idx >= hmc_info->hmc_obj[rsrc_type].cnt) { 1033 hw_dbg(hw, "i40e_hmc_get_object_va: returns error %d\n", 1034 ret_code); 1035 ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX; 1036 goto exit; 1037 } 1038 /* find sd index and limit */ 1039 I40E_FIND_SD_INDEX_LIMIT(hmc_info, rsrc_type, obj_idx, 1, 1040 &sd_idx, &sd_lmt); 1041 1042 sd_entry = &hmc_info->sd_table.sd_entry[sd_idx]; 1043 obj_offset_in_fpm = hmc_info->hmc_obj[rsrc_type].base + 1044 hmc_info->hmc_obj[rsrc_type].size * obj_idx; 1045 1046 if (I40E_SD_TYPE_PAGED == sd_entry->entry_type) { 1047 I40E_FIND_PD_INDEX_LIMIT(hmc_info, rsrc_type, obj_idx, 1, 1048 &pd_idx, &pd_lmt); 1049 rel_pd_idx = pd_idx % I40E_HMC_PD_CNT_IN_SD; 1050 pd_entry = &sd_entry->u.pd_table.pd_entry[rel_pd_idx]; 1051 obj_offset_in_pd = (u32)(obj_offset_in_fpm % 1052 I40E_HMC_PAGED_BP_SIZE); 1053 *object_base = (u8 *)pd_entry->bp.addr.va + obj_offset_in_pd; 1054 } else { 1055 obj_offset_in_sd = (u32)(obj_offset_in_fpm % 1056 I40E_HMC_DIRECT_BP_SIZE); 1057 *object_base = (u8 *)sd_entry->u.bp.addr.va + obj_offset_in_sd; 1058 } 1059 exit: 1060 return ret_code; 1061 } 1062 1063 /** 1064 * i40e_clear_lan_tx_queue_context - clear the HMC context for the queue 1065 * @hw: the hardware struct 1066 * @queue: the queue we care about 1067 **/ 1068 i40e_status i40e_clear_lan_tx_queue_context(struct i40e_hw *hw, 1069 u16 queue) 1070 { 1071 i40e_status err; 1072 u8 *context_bytes; 1073 1074 err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes, 1075 I40E_HMC_LAN_TX, queue); 1076 if (err < 0) 1077 return err; 1078 1079 return i40e_clear_hmc_context(hw, context_bytes, I40E_HMC_LAN_TX); 1080 } 1081 1082 /** 1083 * i40e_set_lan_tx_queue_context - set the HMC context for the queue 1084 * @hw: the hardware struct 1085 * @queue: the queue we care about 1086 * @s: the struct to be filled 1087 **/ 1088 i40e_status i40e_set_lan_tx_queue_context(struct i40e_hw *hw, 1089 u16 queue, 1090 struct i40e_hmc_obj_txq *s) 1091 { 1092 i40e_status err; 1093 u8 *context_bytes; 1094 1095 err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes, 1096 I40E_HMC_LAN_TX, queue); 1097 if (err < 0) 1098 return err; 1099 1100 return i40e_set_hmc_context(context_bytes, 1101 i40e_hmc_txq_ce_info, (u8 *)s); 1102 } 1103 1104 /** 1105 * i40e_clear_lan_rx_queue_context - clear the HMC context for the queue 1106 * @hw: the hardware struct 1107 * @queue: the queue we care about 1108 **/ 1109 i40e_status i40e_clear_lan_rx_queue_context(struct i40e_hw *hw, 1110 u16 queue) 1111 { 1112 i40e_status err; 1113 u8 *context_bytes; 1114 1115 err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes, 1116 I40E_HMC_LAN_RX, queue); 1117 if (err < 0) 1118 return err; 1119 1120 return i40e_clear_hmc_context(hw, context_bytes, I40E_HMC_LAN_RX); 1121 } 1122 1123 /** 1124 * i40e_set_lan_rx_queue_context - set the HMC context for the queue 1125 * @hw: the hardware struct 1126 * @queue: the queue we care about 1127 * @s: the struct to be filled 1128 **/ 1129 i40e_status i40e_set_lan_rx_queue_context(struct i40e_hw *hw, 1130 u16 queue, 1131 struct i40e_hmc_obj_rxq *s) 1132 { 1133 i40e_status err; 1134 u8 *context_bytes; 1135 1136 err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes, 1137 I40E_HMC_LAN_RX, queue); 1138 if (err < 0) 1139 return err; 1140 1141 return i40e_set_hmc_context(context_bytes, 1142 i40e_hmc_rxq_ce_info, (u8 *)s); 1143 } 1144