1 /******************************************************************************
2
3 Copyright (c) 2013-2018, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33
34 #include "i40e_osdep.h"
35 #include "i40e_register.h"
36 #include "i40e_type.h"
37 #include "i40e_hmc.h"
38 #include "i40e_lan_hmc.h"
39 #include "i40e_prototype.h"
40
41 /* lan specific interface functions */
42
43 /**
44 * i40e_align_l2obj_base - aligns base object pointer to 512 bytes
45 * @offset: base address offset needing alignment
46 *
47 * Aligns the layer 2 function private memory so it's 512-byte aligned.
48 **/
i40e_align_l2obj_base(u64 offset)49 static u64 i40e_align_l2obj_base(u64 offset)
50 {
51 u64 aligned_offset = offset;
52
53 if ((offset % I40E_HMC_L2OBJ_BASE_ALIGNMENT) > 0)
54 aligned_offset += (I40E_HMC_L2OBJ_BASE_ALIGNMENT -
55 (offset % I40E_HMC_L2OBJ_BASE_ALIGNMENT));
56
57 return aligned_offset;
58 }
59
60 /**
61 * i40e_calculate_l2fpm_size - calculates layer 2 FPM memory size
62 * @txq_num: number of Tx queues needing backing context
63 * @rxq_num: number of Rx queues needing backing context
64 * @fcoe_cntx_num: amount of FCoE statefull contexts needing backing context
65 * @fcoe_filt_num: number of FCoE filters needing backing context
66 *
67 * Calculates the maximum amount of memory for the function required, based
68 * on the number of resources it must provide context for.
69 **/
i40e_calculate_l2fpm_size(u32 txq_num,u32 rxq_num,u32 fcoe_cntx_num,u32 fcoe_filt_num)70 u64 i40e_calculate_l2fpm_size(u32 txq_num, u32 rxq_num,
71 u32 fcoe_cntx_num, u32 fcoe_filt_num)
72 {
73 u64 fpm_size = 0;
74
75 fpm_size = txq_num * I40E_HMC_OBJ_SIZE_TXQ;
76 fpm_size = i40e_align_l2obj_base(fpm_size);
77
78 fpm_size += (rxq_num * I40E_HMC_OBJ_SIZE_RXQ);
79 fpm_size = i40e_align_l2obj_base(fpm_size);
80
81 fpm_size += (fcoe_cntx_num * I40E_HMC_OBJ_SIZE_FCOE_CNTX);
82 fpm_size = i40e_align_l2obj_base(fpm_size);
83
84 fpm_size += (fcoe_filt_num * I40E_HMC_OBJ_SIZE_FCOE_FILT);
85 fpm_size = i40e_align_l2obj_base(fpm_size);
86
87 return fpm_size;
88 }
89
90 /**
91 * i40e_init_lan_hmc - initialize i40e_hmc_info struct
92 * @hw: pointer to the HW structure
93 * @txq_num: number of Tx queues needing backing context
94 * @rxq_num: number of Rx queues needing backing context
95 * @fcoe_cntx_num: amount of FCoE statefull contexts needing backing context
96 * @fcoe_filt_num: number of FCoE filters needing backing context
97 *
98 * This function will be called once per physical function initialization.
99 * It will fill out the i40e_hmc_obj_info structure for LAN objects based on
100 * the driver's provided input, as well as information from the HMC itself
101 * loaded from NVRAM.
102 *
103 * Assumptions:
104 * - HMC Resource Profile has been selected before calling this function.
105 **/
i40e_init_lan_hmc(struct i40e_hw * hw,u32 txq_num,u32 rxq_num,u32 fcoe_cntx_num,u32 fcoe_filt_num)106 enum i40e_status_code i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
107 u32 rxq_num, u32 fcoe_cntx_num,
108 u32 fcoe_filt_num)
109 {
110 struct i40e_hmc_obj_info *obj, *full_obj;
111 enum i40e_status_code ret_code = I40E_SUCCESS;
112 u64 l2fpm_size;
113 u32 size_exp;
114
115 hw->hmc.signature = I40E_HMC_INFO_SIGNATURE;
116 hw->hmc.hmc_fn_id = hw->pf_id;
117
118 /* allocate memory for hmc_obj */
119 ret_code = i40e_allocate_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem,
120 sizeof(struct i40e_hmc_obj_info) * I40E_HMC_LAN_MAX);
121 if (ret_code)
122 goto init_lan_hmc_out;
123 hw->hmc.hmc_obj = (struct i40e_hmc_obj_info *)
124 hw->hmc.hmc_obj_virt_mem.va;
125
126 /* The full object will be used to create the LAN HMC SD */
127 full_obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_FULL];
128 full_obj->max_cnt = 0;
129 full_obj->cnt = 0;
130 full_obj->base = 0;
131 full_obj->size = 0;
132
133 /* Tx queue context information */
134 obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_TX];
135 obj->max_cnt = rd32(hw, I40E_GLHMC_LANQMAX);
136 obj->cnt = txq_num;
137 obj->base = 0;
138 size_exp = rd32(hw, I40E_GLHMC_LANTXOBJSZ);
139 obj->size = BIT_ULL(size_exp);
140
141 /* validate values requested by driver don't exceed HMC capacity */
142 if (txq_num > obj->max_cnt) {
143 ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
144 DEBUGOUT3("i40e_init_lan_hmc: Tx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
145 txq_num, obj->max_cnt, ret_code);
146 goto free_hmc_out;
147 }
148
149 /* aggregate values into the full LAN object for later */
150 full_obj->max_cnt += obj->max_cnt;
151 full_obj->cnt += obj->cnt;
152
153 /* Rx queue context information */
154 obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_RX];
155 obj->max_cnt = rd32(hw, I40E_GLHMC_LANQMAX);
156 obj->cnt = rxq_num;
157 obj->base = hw->hmc.hmc_obj[I40E_HMC_LAN_TX].base +
158 (hw->hmc.hmc_obj[I40E_HMC_LAN_TX].cnt *
159 hw->hmc.hmc_obj[I40E_HMC_LAN_TX].size);
160 obj->base = i40e_align_l2obj_base(obj->base);
161 size_exp = rd32(hw, I40E_GLHMC_LANRXOBJSZ);
162 obj->size = BIT_ULL(size_exp);
163
164 /* validate values requested by driver don't exceed HMC capacity */
165 if (rxq_num > obj->max_cnt) {
166 ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
167 DEBUGOUT3("i40e_init_lan_hmc: Rx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
168 rxq_num, obj->max_cnt, ret_code);
169 goto free_hmc_out;
170 }
171
172 /* aggregate values into the full LAN object for later */
173 full_obj->max_cnt += obj->max_cnt;
174 full_obj->cnt += obj->cnt;
175
176 /* FCoE context information */
177 obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX];
178 obj->max_cnt = rd32(hw, I40E_GLHMC_FCOEMAX);
179 obj->cnt = fcoe_cntx_num;
180 obj->base = hw->hmc.hmc_obj[I40E_HMC_LAN_RX].base +
181 (hw->hmc.hmc_obj[I40E_HMC_LAN_RX].cnt *
182 hw->hmc.hmc_obj[I40E_HMC_LAN_RX].size);
183 obj->base = i40e_align_l2obj_base(obj->base);
184 size_exp = rd32(hw, I40E_GLHMC_FCOEDDPOBJSZ);
185 obj->size = BIT_ULL(size_exp);
186
187 /* validate values requested by driver don't exceed HMC capacity */
188 if (fcoe_cntx_num > obj->max_cnt) {
189 ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
190 DEBUGOUT3("i40e_init_lan_hmc: FCoE context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
191 fcoe_cntx_num, obj->max_cnt, ret_code);
192 goto free_hmc_out;
193 }
194
195 /* aggregate values into the full LAN object for later */
196 full_obj->max_cnt += obj->max_cnt;
197 full_obj->cnt += obj->cnt;
198
199 /* FCoE filter information */
200 obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_FILT];
201 obj->max_cnt = rd32(hw, I40E_GLHMC_FCOEFMAX);
202 obj->cnt = fcoe_filt_num;
203 obj->base = hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].base +
204 (hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].cnt *
205 hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].size);
206 obj->base = i40e_align_l2obj_base(obj->base);
207 size_exp = rd32(hw, I40E_GLHMC_FCOEFOBJSZ);
208 obj->size = BIT_ULL(size_exp);
209
210 /* validate values requested by driver don't exceed HMC capacity */
211 if (fcoe_filt_num > obj->max_cnt) {
212 ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
213 DEBUGOUT3("i40e_init_lan_hmc: FCoE filter: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
214 fcoe_filt_num, obj->max_cnt, ret_code);
215 goto free_hmc_out;
216 }
217
218 /* aggregate values into the full LAN object for later */
219 full_obj->max_cnt += obj->max_cnt;
220 full_obj->cnt += obj->cnt;
221
222 hw->hmc.first_sd_index = 0;
223 hw->hmc.sd_table.ref_cnt = 0;
224 l2fpm_size = i40e_calculate_l2fpm_size(txq_num, rxq_num, fcoe_cntx_num,
225 fcoe_filt_num);
226 if (NULL == hw->hmc.sd_table.sd_entry) {
227 hw->hmc.sd_table.sd_cnt = (u32)
228 (l2fpm_size + I40E_HMC_DIRECT_BP_SIZE - 1) /
229 I40E_HMC_DIRECT_BP_SIZE;
230
231 /* allocate the sd_entry members in the sd_table */
232 ret_code = i40e_allocate_virt_mem(hw, &hw->hmc.sd_table.addr,
233 (sizeof(struct i40e_hmc_sd_entry) *
234 hw->hmc.sd_table.sd_cnt));
235 if (ret_code)
236 goto free_hmc_out;
237 hw->hmc.sd_table.sd_entry =
238 (struct i40e_hmc_sd_entry *)hw->hmc.sd_table.addr.va;
239 }
240 /* store in the LAN full object for later */
241 full_obj->size = l2fpm_size;
242
243 init_lan_hmc_out:
244 return ret_code;
245 free_hmc_out:
246 if (hw->hmc.hmc_obj_virt_mem.va)
247 i40e_free_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem);
248
249 return ret_code;
250 }
251
252 /**
253 * i40e_remove_pd_page - Remove a page from the page descriptor table
254 * @hw: pointer to the HW structure
255 * @hmc_info: pointer to the HMC configuration information structure
256 * @idx: segment descriptor index to find the relevant page descriptor
257 *
258 * This function:
259 * 1. Marks the entry in pd table (for paged address mode) invalid
260 * 2. write to register PMPDINV to invalidate the backing page in FV cache
261 * 3. Decrement the ref count for pd_entry
262 * assumptions:
263 * 1. caller can deallocate the memory used by pd after this function
264 * returns.
265 **/
i40e_remove_pd_page(struct i40e_hw * hw,struct i40e_hmc_info * hmc_info,u32 idx)266 static enum i40e_status_code i40e_remove_pd_page(struct i40e_hw *hw,
267 struct i40e_hmc_info *hmc_info,
268 u32 idx)
269 {
270 enum i40e_status_code ret_code = I40E_SUCCESS;
271
272 if (i40e_prep_remove_pd_page(hmc_info, idx) == I40E_SUCCESS)
273 ret_code = i40e_remove_pd_page_new(hw, hmc_info, idx, TRUE);
274
275 return ret_code;
276 }
277
278 /**
279 * i40e_remove_sd_bp - remove a backing page from a segment descriptor
280 * @hw: pointer to our HW structure
281 * @hmc_info: pointer to the HMC configuration information structure
282 * @idx: the page index
283 *
284 * This function:
285 * 1. Marks the entry in sd table (for direct address mode) invalid
286 * 2. write to register PMSDCMD, PMSDDATALOW(PMSDDATALOW.PMSDVALID set
287 * to 0) and PMSDDATAHIGH to invalidate the sd page
288 * 3. Decrement the ref count for the sd_entry
289 * assumptions:
290 * 1. caller can deallocate the memory used by backing storage after this
291 * function returns.
292 **/
i40e_remove_sd_bp(struct i40e_hw * hw,struct i40e_hmc_info * hmc_info,u32 idx)293 static enum i40e_status_code i40e_remove_sd_bp(struct i40e_hw *hw,
294 struct i40e_hmc_info *hmc_info,
295 u32 idx)
296 {
297 enum i40e_status_code ret_code = I40E_SUCCESS;
298
299 if (i40e_prep_remove_sd_bp(hmc_info, idx) == I40E_SUCCESS)
300 ret_code = i40e_remove_sd_bp_new(hw, hmc_info, idx, TRUE);
301
302 return ret_code;
303 }
304
305 /**
306 * i40e_create_lan_hmc_object - allocate backing store for hmc objects
307 * @hw: pointer to the HW structure
308 * @info: pointer to i40e_hmc_create_obj_info struct
309 *
310 * This will allocate memory for PDs and backing pages and populate
311 * the sd and pd entries.
312 **/
i40e_create_lan_hmc_object(struct i40e_hw * hw,struct i40e_hmc_lan_create_obj_info * info)313 enum i40e_status_code i40e_create_lan_hmc_object(struct i40e_hw *hw,
314 struct i40e_hmc_lan_create_obj_info *info)
315 {
316 enum i40e_status_code ret_code = I40E_SUCCESS;
317 struct i40e_hmc_sd_entry *sd_entry;
318 u32 pd_idx1 = 0, pd_lmt1 = 0;
319 u32 pd_idx = 0, pd_lmt = 0;
320 bool pd_error = FALSE;
321 u32 sd_idx, sd_lmt;
322 u64 sd_size;
323 u32 i, j;
324
325 if (NULL == info) {
326 ret_code = I40E_ERR_BAD_PTR;
327 DEBUGOUT("i40e_create_lan_hmc_object: bad info ptr\n");
328 goto exit;
329 }
330 if (NULL == info->hmc_info) {
331 ret_code = I40E_ERR_BAD_PTR;
332 DEBUGOUT("i40e_create_lan_hmc_object: bad hmc_info ptr\n");
333 goto exit;
334 }
335 if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) {
336 ret_code = I40E_ERR_BAD_PTR;
337 DEBUGOUT("i40e_create_lan_hmc_object: bad signature\n");
338 goto exit;
339 }
340
341 if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
342 ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
343 DEBUGOUT1("i40e_create_lan_hmc_object: returns error %d\n",
344 ret_code);
345 goto exit;
346 }
347 if ((info->start_idx + info->count) >
348 info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
349 ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
350 DEBUGOUT1("i40e_create_lan_hmc_object: returns error %d\n",
351 ret_code);
352 goto exit;
353 }
354
355 /* find sd index and limit */
356 I40E_FIND_SD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
357 info->start_idx, info->count,
358 &sd_idx, &sd_lmt);
359 if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
360 sd_lmt > info->hmc_info->sd_table.sd_cnt) {
361 ret_code = I40E_ERR_INVALID_SD_INDEX;
362 goto exit;
363 }
364 /* find pd index */
365 I40E_FIND_PD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
366 info->start_idx, info->count, &pd_idx,
367 &pd_lmt);
368
369 /* This is to cover for cases where you may not want to have an SD with
370 * the full 2M memory but something smaller. By not filling out any
371 * size, the function will default the SD size to be 2M.
372 */
373 if (info->direct_mode_sz == 0)
374 sd_size = I40E_HMC_DIRECT_BP_SIZE;
375 else
376 sd_size = info->direct_mode_sz;
377
378 /* check if all the sds are valid. If not, allocate a page and
379 * initialize it.
380 */
381 for (j = sd_idx; j < sd_lmt; j++) {
382 /* update the sd table entry */
383 ret_code = i40e_add_sd_table_entry(hw, info->hmc_info, j,
384 info->entry_type,
385 sd_size);
386 if (I40E_SUCCESS != ret_code)
387 goto exit_sd_error;
388 sd_entry = &info->hmc_info->sd_table.sd_entry[j];
389 if (I40E_SD_TYPE_PAGED == sd_entry->entry_type) {
390 /* check if all the pds in this sd are valid. If not,
391 * allocate a page and initialize it.
392 */
393
394 /* find pd_idx and pd_lmt in this sd */
395 pd_idx1 = max(pd_idx, (j * I40E_HMC_MAX_BP_COUNT));
396 pd_lmt1 = min(pd_lmt,
397 ((j + 1) * I40E_HMC_MAX_BP_COUNT));
398 for (i = pd_idx1; i < pd_lmt1; i++) {
399 /* update the pd table entry */
400 ret_code = i40e_add_pd_table_entry(hw,
401 info->hmc_info,
402 i, NULL);
403 if (I40E_SUCCESS != ret_code) {
404 pd_error = TRUE;
405 break;
406 }
407 }
408 if (pd_error) {
409 /* remove the backing pages from pd_idx1 to i */
410 while (i && (i > pd_idx1)) {
411 i40e_remove_pd_bp(hw, info->hmc_info,
412 (i - 1));
413 i--;
414 }
415 }
416 }
417 if (!sd_entry->valid) {
418 sd_entry->valid = TRUE;
419 switch (sd_entry->entry_type) {
420 case I40E_SD_TYPE_PAGED:
421 I40E_SET_PF_SD_ENTRY(hw,
422 sd_entry->u.pd_table.pd_page_addr.pa,
423 j, sd_entry->entry_type);
424 break;
425 case I40E_SD_TYPE_DIRECT:
426 I40E_SET_PF_SD_ENTRY(hw, sd_entry->u.bp.addr.pa,
427 j, sd_entry->entry_type);
428 break;
429 default:
430 ret_code = I40E_ERR_INVALID_SD_TYPE;
431 goto exit;
432 }
433 }
434 }
435 goto exit;
436
437 exit_sd_error:
438 /* cleanup for sd entries from j to sd_idx */
439 while (j && (j > sd_idx)) {
440 sd_entry = &info->hmc_info->sd_table.sd_entry[j - 1];
441 switch (sd_entry->entry_type) {
442 case I40E_SD_TYPE_PAGED:
443 pd_idx1 = max(pd_idx,
444 ((j - 1) * I40E_HMC_MAX_BP_COUNT));
445 pd_lmt1 = min(pd_lmt, (j * I40E_HMC_MAX_BP_COUNT));
446 for (i = pd_idx1; i < pd_lmt1; i++)
447 i40e_remove_pd_bp(hw, info->hmc_info, i);
448 i40e_remove_pd_page(hw, info->hmc_info, (j - 1));
449 break;
450 case I40E_SD_TYPE_DIRECT:
451 i40e_remove_sd_bp(hw, info->hmc_info, (j - 1));
452 break;
453 default:
454 ret_code = I40E_ERR_INVALID_SD_TYPE;
455 break;
456 }
457 j--;
458 }
459 exit:
460 return ret_code;
461 }
462
463 /**
464 * i40e_configure_lan_hmc - prepare the HMC backing store
465 * @hw: pointer to the hw structure
466 * @model: the model for the layout of the SD/PD tables
467 *
468 * - This function will be called once per physical function initialization.
469 * - This function will be called after i40e_init_lan_hmc() and before
470 * any LAN/FCoE HMC objects can be created.
471 **/
i40e_configure_lan_hmc(struct i40e_hw * hw,enum i40e_hmc_model model)472 enum i40e_status_code i40e_configure_lan_hmc(struct i40e_hw *hw,
473 enum i40e_hmc_model model)
474 {
475 struct i40e_hmc_lan_create_obj_info info;
476 u8 hmc_fn_id = hw->hmc.hmc_fn_id;
477 struct i40e_hmc_obj_info *obj;
478 enum i40e_status_code ret_code = I40E_SUCCESS;
479
480 /* Initialize part of the create object info struct */
481 info.hmc_info = &hw->hmc;
482 info.rsrc_type = I40E_HMC_LAN_FULL;
483 info.start_idx = 0;
484 info.direct_mode_sz = hw->hmc.hmc_obj[I40E_HMC_LAN_FULL].size;
485
486 /* Build the SD entry for the LAN objects */
487 switch (model) {
488 case I40E_HMC_MODEL_DIRECT_PREFERRED:
489 case I40E_HMC_MODEL_DIRECT_ONLY:
490 info.entry_type = I40E_SD_TYPE_DIRECT;
491 /* Make one big object, a single SD */
492 info.count = 1;
493 ret_code = i40e_create_lan_hmc_object(hw, &info);
494 if ((ret_code != I40E_SUCCESS) && (model == I40E_HMC_MODEL_DIRECT_PREFERRED))
495 goto try_type_paged;
496 else if (ret_code != I40E_SUCCESS)
497 goto configure_lan_hmc_out;
498 /* else clause falls through the break */
499 break;
500 case I40E_HMC_MODEL_PAGED_ONLY:
501 try_type_paged:
502 info.entry_type = I40E_SD_TYPE_PAGED;
503 /* Make one big object in the PD table */
504 info.count = 1;
505 ret_code = i40e_create_lan_hmc_object(hw, &info);
506 if (ret_code != I40E_SUCCESS)
507 goto configure_lan_hmc_out;
508 break;
509 default:
510 /* unsupported type */
511 ret_code = I40E_ERR_INVALID_SD_TYPE;
512 DEBUGOUT1("i40e_configure_lan_hmc: Unknown SD type: %d\n",
513 ret_code);
514 goto configure_lan_hmc_out;
515 }
516
517 /* Configure and program the FPM registers so objects can be created */
518
519 /* Tx contexts */
520 obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_TX];
521 wr32(hw, I40E_GLHMC_LANTXBASE(hmc_fn_id),
522 (u32)((obj->base & I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK) / 512));
523 wr32(hw, I40E_GLHMC_LANTXCNT(hmc_fn_id), obj->cnt);
524
525 /* Rx contexts */
526 obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_RX];
527 wr32(hw, I40E_GLHMC_LANRXBASE(hmc_fn_id),
528 (u32)((obj->base & I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK) / 512));
529 wr32(hw, I40E_GLHMC_LANRXCNT(hmc_fn_id), obj->cnt);
530
531 /* FCoE contexts */
532 obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX];
533 wr32(hw, I40E_GLHMC_FCOEDDPBASE(hmc_fn_id),
534 (u32)((obj->base & I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK) / 512));
535 wr32(hw, I40E_GLHMC_FCOEDDPCNT(hmc_fn_id), obj->cnt);
536
537 /* FCoE filters */
538 obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_FILT];
539 wr32(hw, I40E_GLHMC_FCOEFBASE(hmc_fn_id),
540 (u32)((obj->base & I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK) / 512));
541 wr32(hw, I40E_GLHMC_FCOEFCNT(hmc_fn_id), obj->cnt);
542
543 configure_lan_hmc_out:
544 return ret_code;
545 }
546
547 /**
548 * i40e_delete_lan_hmc_object - remove hmc objects
549 * @hw: pointer to the HW structure
550 * @info: pointer to i40e_hmc_delete_obj_info struct
551 *
552 * This will de-populate the SDs and PDs. It frees
553 * the memory for PDS and backing storage. After this function is returned,
554 * caller should deallocate memory allocated previously for
555 * book-keeping information about PDs and backing storage.
556 **/
i40e_delete_lan_hmc_object(struct i40e_hw * hw,struct i40e_hmc_lan_delete_obj_info * info)557 enum i40e_status_code i40e_delete_lan_hmc_object(struct i40e_hw *hw,
558 struct i40e_hmc_lan_delete_obj_info *info)
559 {
560 enum i40e_status_code ret_code = I40E_SUCCESS;
561 struct i40e_hmc_pd_table *pd_table;
562 u32 pd_idx, pd_lmt, rel_pd_idx;
563 u32 sd_idx, sd_lmt;
564 u32 i, j;
565
566 if (NULL == info) {
567 ret_code = I40E_ERR_BAD_PTR;
568 DEBUGOUT("i40e_delete_hmc_object: bad info ptr\n");
569 goto exit;
570 }
571 if (NULL == info->hmc_info) {
572 ret_code = I40E_ERR_BAD_PTR;
573 DEBUGOUT("i40e_delete_hmc_object: bad info->hmc_info ptr\n");
574 goto exit;
575 }
576 if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) {
577 ret_code = I40E_ERR_BAD_PTR;
578 DEBUGOUT("i40e_delete_hmc_object: bad hmc_info->signature\n");
579 goto exit;
580 }
581
582 if (NULL == info->hmc_info->sd_table.sd_entry) {
583 ret_code = I40E_ERR_BAD_PTR;
584 DEBUGOUT("i40e_delete_hmc_object: bad sd_entry\n");
585 goto exit;
586 }
587
588 if (NULL == info->hmc_info->hmc_obj) {
589 ret_code = I40E_ERR_BAD_PTR;
590 DEBUGOUT("i40e_delete_hmc_object: bad hmc_info->hmc_obj\n");
591 goto exit;
592 }
593 if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
594 ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
595 DEBUGOUT1("i40e_delete_hmc_object: returns error %d\n",
596 ret_code);
597 goto exit;
598 }
599
600 if ((info->start_idx + info->count) >
601 info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
602 ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
603 DEBUGOUT1("i40e_delete_hmc_object: returns error %d\n",
604 ret_code);
605 goto exit;
606 }
607
608 I40E_FIND_PD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
609 info->start_idx, info->count, &pd_idx,
610 &pd_lmt);
611
612 for (j = pd_idx; j < pd_lmt; j++) {
613 sd_idx = j / I40E_HMC_PD_CNT_IN_SD;
614
615 if (I40E_SD_TYPE_PAGED !=
616 info->hmc_info->sd_table.sd_entry[sd_idx].entry_type)
617 continue;
618
619 rel_pd_idx = j % I40E_HMC_PD_CNT_IN_SD;
620
621 pd_table =
622 &info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
623 if (pd_table->pd_entry[rel_pd_idx].valid) {
624 ret_code = i40e_remove_pd_bp(hw, info->hmc_info, j);
625 if (I40E_SUCCESS != ret_code)
626 goto exit;
627 }
628 }
629
630 /* find sd index and limit */
631 I40E_FIND_SD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
632 info->start_idx, info->count,
633 &sd_idx, &sd_lmt);
634 if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
635 sd_lmt > info->hmc_info->sd_table.sd_cnt) {
636 ret_code = I40E_ERR_INVALID_SD_INDEX;
637 goto exit;
638 }
639
640 for (i = sd_idx; i < sd_lmt; i++) {
641 if (!info->hmc_info->sd_table.sd_entry[i].valid)
642 continue;
643 switch (info->hmc_info->sd_table.sd_entry[i].entry_type) {
644 case I40E_SD_TYPE_DIRECT:
645 ret_code = i40e_remove_sd_bp(hw, info->hmc_info, i);
646 if (I40E_SUCCESS != ret_code)
647 goto exit;
648 break;
649 case I40E_SD_TYPE_PAGED:
650 ret_code = i40e_remove_pd_page(hw, info->hmc_info, i);
651 if (I40E_SUCCESS != ret_code)
652 goto exit;
653 break;
654 default:
655 break;
656 }
657 }
658 exit:
659 return ret_code;
660 }
661
662 /**
663 * i40e_shutdown_lan_hmc - Remove HMC backing store, free allocated memory
664 * @hw: pointer to the hw structure
665 *
666 * This must be called by drivers as they are shutting down and being
667 * removed from the OS.
668 **/
i40e_shutdown_lan_hmc(struct i40e_hw * hw)669 enum i40e_status_code i40e_shutdown_lan_hmc(struct i40e_hw *hw)
670 {
671 struct i40e_hmc_lan_delete_obj_info info;
672 enum i40e_status_code ret_code;
673
674 info.hmc_info = &hw->hmc;
675 info.rsrc_type = I40E_HMC_LAN_FULL;
676 info.start_idx = 0;
677 info.count = 1;
678
679 /* delete the object */
680 ret_code = i40e_delete_lan_hmc_object(hw, &info);
681
682 /* free the SD table entry for LAN */
683 i40e_free_virt_mem(hw, &hw->hmc.sd_table.addr);
684 hw->hmc.sd_table.sd_cnt = 0;
685 hw->hmc.sd_table.sd_entry = NULL;
686
687 /* free memory used for hmc_obj */
688 i40e_free_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem);
689 hw->hmc.hmc_obj = NULL;
690
691 return ret_code;
692 }
693
694 #define I40E_HMC_STORE(_struct, _ele) \
695 offsetof(struct _struct, _ele), \
696 FIELD_SIZEOF(struct _struct, _ele)
697
698 struct i40e_context_ele {
699 u16 offset;
700 u16 size_of;
701 u16 width;
702 u16 lsb;
703 };
704
705 /* LAN Tx Queue Context */
706 static struct i40e_context_ele i40e_hmc_txq_ce_info[] = {
707 /* Field Width LSB */
708 {I40E_HMC_STORE(i40e_hmc_obj_txq, head), 13, 0 },
709 {I40E_HMC_STORE(i40e_hmc_obj_txq, new_context), 1, 30 },
710 {I40E_HMC_STORE(i40e_hmc_obj_txq, base), 57, 32 },
711 {I40E_HMC_STORE(i40e_hmc_obj_txq, fc_ena), 1, 89 },
712 {I40E_HMC_STORE(i40e_hmc_obj_txq, timesync_ena), 1, 90 },
713 {I40E_HMC_STORE(i40e_hmc_obj_txq, fd_ena), 1, 91 },
714 {I40E_HMC_STORE(i40e_hmc_obj_txq, alt_vlan_ena), 1, 92 },
715 {I40E_HMC_STORE(i40e_hmc_obj_txq, cpuid), 8, 96 },
716 /* line 1 */
717 {I40E_HMC_STORE(i40e_hmc_obj_txq, thead_wb), 13, 0 + 128 },
718 {I40E_HMC_STORE(i40e_hmc_obj_txq, head_wb_ena), 1, 32 + 128 },
719 {I40E_HMC_STORE(i40e_hmc_obj_txq, qlen), 13, 33 + 128 },
720 {I40E_HMC_STORE(i40e_hmc_obj_txq, tphrdesc_ena), 1, 46 + 128 },
721 {I40E_HMC_STORE(i40e_hmc_obj_txq, tphrpacket_ena), 1, 47 + 128 },
722 {I40E_HMC_STORE(i40e_hmc_obj_txq, tphwdesc_ena), 1, 48 + 128 },
723 {I40E_HMC_STORE(i40e_hmc_obj_txq, head_wb_addr), 64, 64 + 128 },
724 /* line 7 */
725 {I40E_HMC_STORE(i40e_hmc_obj_txq, crc), 32, 0 + (7 * 128) },
726 {I40E_HMC_STORE(i40e_hmc_obj_txq, rdylist), 10, 84 + (7 * 128) },
727 {I40E_HMC_STORE(i40e_hmc_obj_txq, rdylist_act), 1, 94 + (7 * 128) },
728 { 0 }
729 };
730
731 /* LAN Rx Queue Context */
732 static struct i40e_context_ele i40e_hmc_rxq_ce_info[] = {
733 /* Field Width LSB */
734 { I40E_HMC_STORE(i40e_hmc_obj_rxq, head), 13, 0 },
735 { I40E_HMC_STORE(i40e_hmc_obj_rxq, cpuid), 8, 13 },
736 { I40E_HMC_STORE(i40e_hmc_obj_rxq, base), 57, 32 },
737 { I40E_HMC_STORE(i40e_hmc_obj_rxq, qlen), 13, 89 },
738 { I40E_HMC_STORE(i40e_hmc_obj_rxq, dbuff), 7, 102 },
739 { I40E_HMC_STORE(i40e_hmc_obj_rxq, hbuff), 5, 109 },
740 { I40E_HMC_STORE(i40e_hmc_obj_rxq, dtype), 2, 114 },
741 { I40E_HMC_STORE(i40e_hmc_obj_rxq, dsize), 1, 116 },
742 { I40E_HMC_STORE(i40e_hmc_obj_rxq, crcstrip), 1, 117 },
743 { I40E_HMC_STORE(i40e_hmc_obj_rxq, fc_ena), 1, 118 },
744 { I40E_HMC_STORE(i40e_hmc_obj_rxq, l2tsel), 1, 119 },
745 { I40E_HMC_STORE(i40e_hmc_obj_rxq, hsplit_0), 4, 120 },
746 { I40E_HMC_STORE(i40e_hmc_obj_rxq, hsplit_1), 2, 124 },
747 { I40E_HMC_STORE(i40e_hmc_obj_rxq, showiv), 1, 127 },
748 { I40E_HMC_STORE(i40e_hmc_obj_rxq, rxmax), 14, 174 },
749 { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphrdesc_ena), 1, 193 },
750 { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphwdesc_ena), 1, 194 },
751 { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphdata_ena), 1, 195 },
752 { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphhead_ena), 1, 196 },
753 { I40E_HMC_STORE(i40e_hmc_obj_rxq, lrxqthresh), 3, 198 },
754 { I40E_HMC_STORE(i40e_hmc_obj_rxq, prefena), 1, 201 },
755 { 0 }
756 };
757
758 /**
759 * i40e_write_byte - replace HMC context byte
760 * @hmc_bits: pointer to the HMC memory
761 * @ce_info: a description of the struct to be read from
762 * @src: the struct to be read from
763 **/
i40e_write_byte(u8 * hmc_bits,struct i40e_context_ele * ce_info,u8 * src)764 static void i40e_write_byte(u8 *hmc_bits,
765 struct i40e_context_ele *ce_info,
766 u8 *src)
767 {
768 u8 src_byte, dest_byte, mask;
769 u8 *from, *dest;
770 u16 shift_width;
771
772 /* copy from the next struct field */
773 from = src + ce_info->offset;
774
775 /* prepare the bits and mask */
776 shift_width = ce_info->lsb % 8;
777 mask = (u8)(BIT(ce_info->width) - 1);
778
779 src_byte = *from;
780 src_byte &= mask;
781
782 /* shift to correct alignment */
783 mask <<= shift_width;
784 src_byte <<= shift_width;
785
786 /* get the current bits from the target bit string */
787 dest = hmc_bits + (ce_info->lsb / 8);
788
789 i40e_memcpy(&dest_byte, dest, sizeof(dest_byte), I40E_DMA_TO_NONDMA);
790
791 dest_byte &= ~mask; /* get the bits not changing */
792 dest_byte |= src_byte; /* add in the new bits */
793
794 /* put it all back */
795 i40e_memcpy(dest, &dest_byte, sizeof(dest_byte), I40E_NONDMA_TO_DMA);
796 }
797
798 /**
799 * i40e_write_word - replace HMC context word
800 * @hmc_bits: pointer to the HMC memory
801 * @ce_info: a description of the struct to be read from
802 * @src: the struct to be read from
803 **/
i40e_write_word(u8 * hmc_bits,struct i40e_context_ele * ce_info,u8 * src)804 static void i40e_write_word(u8 *hmc_bits,
805 struct i40e_context_ele *ce_info,
806 u8 *src)
807 {
808 u16 src_word, mask;
809 u8 *from, *dest;
810 u16 shift_width;
811 __le16 dest_word;
812
813 /* copy from the next struct field */
814 from = src + ce_info->offset;
815
816 /* prepare the bits and mask */
817 shift_width = ce_info->lsb % 8;
818 mask = BIT(ce_info->width) - 1;
819
820 /* don't swizzle the bits until after the mask because the mask bits
821 * will be in a different bit position on big endian machines
822 */
823 src_word = *(u16 *)from;
824 src_word &= mask;
825
826 /* shift to correct alignment */
827 mask <<= shift_width;
828 src_word <<= shift_width;
829
830 /* get the current bits from the target bit string */
831 dest = hmc_bits + (ce_info->lsb / 8);
832
833 i40e_memcpy(&dest_word, dest, sizeof(dest_word), I40E_DMA_TO_NONDMA);
834
835 dest_word &= ~(CPU_TO_LE16(mask)); /* get the bits not changing */
836 dest_word |= CPU_TO_LE16(src_word); /* add in the new bits */
837
838 /* put it all back */
839 i40e_memcpy(dest, &dest_word, sizeof(dest_word), I40E_NONDMA_TO_DMA);
840 }
841
842 /**
843 * i40e_write_dword - replace HMC context dword
844 * @hmc_bits: pointer to the HMC memory
845 * @ce_info: a description of the struct to be read from
846 * @src: the struct to be read from
847 **/
i40e_write_dword(u8 * hmc_bits,struct i40e_context_ele * ce_info,u8 * src)848 static void i40e_write_dword(u8 *hmc_bits,
849 struct i40e_context_ele *ce_info,
850 u8 *src)
851 {
852 u32 src_dword, mask;
853 u8 *from, *dest;
854 u16 shift_width;
855 __le32 dest_dword;
856
857 /* copy from the next struct field */
858 from = src + ce_info->offset;
859
860 /* prepare the bits and mask */
861 shift_width = ce_info->lsb % 8;
862
863 /* if the field width is exactly 32 on an x86 machine, then the shift
864 * operation will not work because the SHL instructions count is masked
865 * to 5 bits so the shift will do nothing
866 */
867 if (ce_info->width < 32)
868 mask = BIT(ce_info->width) - 1;
869 else
870 mask = ~(u32)0;
871
872 /* don't swizzle the bits until after the mask because the mask bits
873 * will be in a different bit position on big endian machines
874 */
875 src_dword = *(u32 *)from;
876 src_dword &= mask;
877
878 /* shift to correct alignment */
879 mask <<= shift_width;
880 src_dword <<= shift_width;
881
882 /* get the current bits from the target bit string */
883 dest = hmc_bits + (ce_info->lsb / 8);
884
885 i40e_memcpy(&dest_dword, dest, sizeof(dest_dword), I40E_DMA_TO_NONDMA);
886
887 dest_dword &= ~(CPU_TO_LE32(mask)); /* get the bits not changing */
888 dest_dword |= CPU_TO_LE32(src_dword); /* add in the new bits */
889
890 /* put it all back */
891 i40e_memcpy(dest, &dest_dword, sizeof(dest_dword), I40E_NONDMA_TO_DMA);
892 }
893
894 /**
895 * i40e_write_qword - replace HMC context qword
896 * @hmc_bits: pointer to the HMC memory
897 * @ce_info: a description of the struct to be read from
898 * @src: the struct to be read from
899 **/
i40e_write_qword(u8 * hmc_bits,struct i40e_context_ele * ce_info,u8 * src)900 static void i40e_write_qword(u8 *hmc_bits,
901 struct i40e_context_ele *ce_info,
902 u8 *src)
903 {
904 u64 src_qword, mask;
905 u8 *from, *dest;
906 u16 shift_width;
907 __le64 dest_qword;
908
909 /* copy from the next struct field */
910 from = src + ce_info->offset;
911
912 /* prepare the bits and mask */
913 shift_width = ce_info->lsb % 8;
914
915 /* if the field width is exactly 64 on an x86 machine, then the shift
916 * operation will not work because the SHL instructions count is masked
917 * to 6 bits so the shift will do nothing
918 */
919 if (ce_info->width < 64)
920 mask = BIT_ULL(ce_info->width) - 1;
921 else
922 mask = ~(u64)0;
923
924 /* don't swizzle the bits until after the mask because the mask bits
925 * will be in a different bit position on big endian machines
926 */
927 src_qword = *(u64 *)from;
928 src_qword &= mask;
929
930 /* shift to correct alignment */
931 mask <<= shift_width;
932 src_qword <<= shift_width;
933
934 /* get the current bits from the target bit string */
935 dest = hmc_bits + (ce_info->lsb / 8);
936
937 i40e_memcpy(&dest_qword, dest, sizeof(dest_qword), I40E_DMA_TO_NONDMA);
938
939 dest_qword &= ~(CPU_TO_LE64(mask)); /* get the bits not changing */
940 dest_qword |= CPU_TO_LE64(src_qword); /* add in the new bits */
941
942 /* put it all back */
943 i40e_memcpy(dest, &dest_qword, sizeof(dest_qword), I40E_NONDMA_TO_DMA);
944 }
945
946 /**
947 * i40e_read_byte - read HMC context byte into struct
948 * @hmc_bits: pointer to the HMC memory
949 * @ce_info: a description of the struct to be filled
950 * @dest: the struct to be filled
951 **/
i40e_read_byte(u8 * hmc_bits,struct i40e_context_ele * ce_info,u8 * dest)952 static void i40e_read_byte(u8 *hmc_bits,
953 struct i40e_context_ele *ce_info,
954 u8 *dest)
955 {
956 u8 dest_byte, mask;
957 u8 *src, *target;
958 u16 shift_width;
959
960 /* prepare the bits and mask */
961 shift_width = ce_info->lsb % 8;
962 mask = (u8)(BIT(ce_info->width) - 1);
963
964 /* shift to correct alignment */
965 mask <<= shift_width;
966
967 /* get the current bits from the src bit string */
968 src = hmc_bits + (ce_info->lsb / 8);
969
970 i40e_memcpy(&dest_byte, src, sizeof(dest_byte), I40E_DMA_TO_NONDMA);
971
972 dest_byte &= ~(mask);
973
974 dest_byte >>= shift_width;
975
976 /* get the address from the struct field */
977 target = dest + ce_info->offset;
978
979 /* put it back in the struct */
980 i40e_memcpy(target, &dest_byte, sizeof(dest_byte), I40E_NONDMA_TO_DMA);
981 }
982
983 /**
984 * i40e_read_word - read HMC context word into struct
985 * @hmc_bits: pointer to the HMC memory
986 * @ce_info: a description of the struct to be filled
987 * @dest: the struct to be filled
988 **/
i40e_read_word(u8 * hmc_bits,struct i40e_context_ele * ce_info,u8 * dest)989 static void i40e_read_word(u8 *hmc_bits,
990 struct i40e_context_ele *ce_info,
991 u8 *dest)
992 {
993 u16 dest_word, mask;
994 u8 *src, *target;
995 u16 shift_width;
996 __le16 src_word;
997
998 /* prepare the bits and mask */
999 shift_width = ce_info->lsb % 8;
1000 mask = BIT(ce_info->width) - 1;
1001
1002 /* shift to correct alignment */
1003 mask <<= shift_width;
1004
1005 /* get the current bits from the src bit string */
1006 src = hmc_bits + (ce_info->lsb / 8);
1007
1008 i40e_memcpy(&src_word, src, sizeof(src_word), I40E_DMA_TO_NONDMA);
1009
1010 /* the data in the memory is stored as little endian so mask it
1011 * correctly
1012 */
1013 src_word &= ~(CPU_TO_LE16(mask));
1014
1015 /* get the data back into host order before shifting */
1016 dest_word = LE16_TO_CPU(src_word);
1017
1018 dest_word >>= shift_width;
1019
1020 /* get the address from the struct field */
1021 target = dest + ce_info->offset;
1022
1023 /* put it back in the struct */
1024 i40e_memcpy(target, &dest_word, sizeof(dest_word), I40E_NONDMA_TO_DMA);
1025 }
1026
1027 /**
1028 * i40e_read_dword - read HMC context dword into struct
1029 * @hmc_bits: pointer to the HMC memory
1030 * @ce_info: a description of the struct to be filled
1031 * @dest: the struct to be filled
1032 **/
i40e_read_dword(u8 * hmc_bits,struct i40e_context_ele * ce_info,u8 * dest)1033 static void i40e_read_dword(u8 *hmc_bits,
1034 struct i40e_context_ele *ce_info,
1035 u8 *dest)
1036 {
1037 u32 dest_dword, mask;
1038 u8 *src, *target;
1039 u16 shift_width;
1040 __le32 src_dword;
1041
1042 /* prepare the bits and mask */
1043 shift_width = ce_info->lsb % 8;
1044
1045 /* if the field width is exactly 32 on an x86 machine, then the shift
1046 * operation will not work because the SHL instructions count is masked
1047 * to 5 bits so the shift will do nothing
1048 */
1049 if (ce_info->width < 32)
1050 mask = BIT(ce_info->width) - 1;
1051 else
1052 mask = ~(u32)0;
1053
1054 /* shift to correct alignment */
1055 mask <<= shift_width;
1056
1057 /* get the current bits from the src bit string */
1058 src = hmc_bits + (ce_info->lsb / 8);
1059
1060 i40e_memcpy(&src_dword, src, sizeof(src_dword), I40E_DMA_TO_NONDMA);
1061
1062 /* the data in the memory is stored as little endian so mask it
1063 * correctly
1064 */
1065 src_dword &= ~(CPU_TO_LE32(mask));
1066
1067 /* get the data back into host order before shifting */
1068 dest_dword = LE32_TO_CPU(src_dword);
1069
1070 dest_dword >>= shift_width;
1071
1072 /* get the address from the struct field */
1073 target = dest + ce_info->offset;
1074
1075 /* put it back in the struct */
1076 i40e_memcpy(target, &dest_dword, sizeof(dest_dword),
1077 I40E_NONDMA_TO_DMA);
1078 }
1079
1080 /**
1081 * i40e_read_qword - read HMC context qword into struct
1082 * @hmc_bits: pointer to the HMC memory
1083 * @ce_info: a description of the struct to be filled
1084 * @dest: the struct to be filled
1085 **/
i40e_read_qword(u8 * hmc_bits,struct i40e_context_ele * ce_info,u8 * dest)1086 static void i40e_read_qword(u8 *hmc_bits,
1087 struct i40e_context_ele *ce_info,
1088 u8 *dest)
1089 {
1090 u64 dest_qword, mask;
1091 u8 *src, *target;
1092 u16 shift_width;
1093 __le64 src_qword;
1094
1095 /* prepare the bits and mask */
1096 shift_width = ce_info->lsb % 8;
1097
1098 /* if the field width is exactly 64 on an x86 machine, then the shift
1099 * operation will not work because the SHL instructions count is masked
1100 * to 6 bits so the shift will do nothing
1101 */
1102 if (ce_info->width < 64)
1103 mask = BIT_ULL(ce_info->width) - 1;
1104 else
1105 mask = ~(u64)0;
1106
1107 /* shift to correct alignment */
1108 mask <<= shift_width;
1109
1110 /* get the current bits from the src bit string */
1111 src = hmc_bits + (ce_info->lsb / 8);
1112
1113 i40e_memcpy(&src_qword, src, sizeof(src_qword), I40E_DMA_TO_NONDMA);
1114
1115 /* the data in the memory is stored as little endian so mask it
1116 * correctly
1117 */
1118 src_qword &= ~(CPU_TO_LE64(mask));
1119
1120 /* get the data back into host order before shifting */
1121 dest_qword = LE64_TO_CPU(src_qword);
1122
1123 dest_qword >>= shift_width;
1124
1125 /* get the address from the struct field */
1126 target = dest + ce_info->offset;
1127
1128 /* put it back in the struct */
1129 i40e_memcpy(target, &dest_qword, sizeof(dest_qword),
1130 I40E_NONDMA_TO_DMA);
1131 }
1132
1133 /**
1134 * i40e_get_hmc_context - extract HMC context bits
1135 * @context_bytes: pointer to the context bit array
1136 * @ce_info: a description of the struct to be filled
1137 * @dest: the struct to be filled
1138 **/
i40e_get_hmc_context(u8 * context_bytes,struct i40e_context_ele * ce_info,u8 * dest)1139 static enum i40e_status_code i40e_get_hmc_context(u8 *context_bytes,
1140 struct i40e_context_ele *ce_info,
1141 u8 *dest)
1142 {
1143 int f;
1144
1145 for (f = 0; ce_info[f].width != 0; f++) {
1146 switch (ce_info[f].size_of) {
1147 case 1:
1148 i40e_read_byte(context_bytes, &ce_info[f], dest);
1149 break;
1150 case 2:
1151 i40e_read_word(context_bytes, &ce_info[f], dest);
1152 break;
1153 case 4:
1154 i40e_read_dword(context_bytes, &ce_info[f], dest);
1155 break;
1156 case 8:
1157 i40e_read_qword(context_bytes, &ce_info[f], dest);
1158 break;
1159 default:
1160 /* nothing to do, just keep going */
1161 break;
1162 }
1163 }
1164
1165 return I40E_SUCCESS;
1166 }
1167
1168 /**
1169 * i40e_clear_hmc_context - zero out the HMC context bits
1170 * @hw: the hardware struct
1171 * @context_bytes: pointer to the context bit array (DMA memory)
1172 * @hmc_type: the type of HMC resource
1173 **/
i40e_clear_hmc_context(struct i40e_hw * hw,u8 * context_bytes,enum i40e_hmc_lan_rsrc_type hmc_type)1174 static enum i40e_status_code i40e_clear_hmc_context(struct i40e_hw *hw,
1175 u8 *context_bytes,
1176 enum i40e_hmc_lan_rsrc_type hmc_type)
1177 {
1178 /* clean the bit array */
1179 i40e_memset(context_bytes, 0, (u32)hw->hmc.hmc_obj[hmc_type].size,
1180 I40E_DMA_MEM);
1181
1182 return I40E_SUCCESS;
1183 }
1184
1185 /**
1186 * i40e_set_hmc_context - replace HMC context bits
1187 * @context_bytes: pointer to the context bit array
1188 * @ce_info: a description of the struct to be filled
1189 * @dest: the struct to be filled
1190 **/
i40e_set_hmc_context(u8 * context_bytes,struct i40e_context_ele * ce_info,u8 * dest)1191 static enum i40e_status_code i40e_set_hmc_context(u8 *context_bytes,
1192 struct i40e_context_ele *ce_info,
1193 u8 *dest)
1194 {
1195 int f;
1196
1197 for (f = 0; ce_info[f].width != 0; f++) {
1198
1199 /* we have to deal with each element of the HMC using the
1200 * correct size so that we are correct regardless of the
1201 * endianness of the machine
1202 */
1203 switch (ce_info[f].size_of) {
1204 case 1:
1205 i40e_write_byte(context_bytes, &ce_info[f], dest);
1206 break;
1207 case 2:
1208 i40e_write_word(context_bytes, &ce_info[f], dest);
1209 break;
1210 case 4:
1211 i40e_write_dword(context_bytes, &ce_info[f], dest);
1212 break;
1213 case 8:
1214 i40e_write_qword(context_bytes, &ce_info[f], dest);
1215 break;
1216 }
1217 }
1218
1219 return I40E_SUCCESS;
1220 }
1221
1222 /**
1223 * i40e_hmc_get_object_va - retrieves an object's virtual address
1224 * @hw: pointer to the hw structure
1225 * @object_base: pointer to u64 to get the va
1226 * @rsrc_type: the hmc resource type
1227 * @obj_idx: hmc object index
1228 *
1229 * This function retrieves the object's virtual address from the object
1230 * base pointer. This function is used for LAN Queue contexts.
1231 **/
1232 static
i40e_hmc_get_object_va(struct i40e_hw * hw,u8 ** object_base,enum i40e_hmc_lan_rsrc_type rsrc_type,u32 obj_idx)1233 enum i40e_status_code i40e_hmc_get_object_va(struct i40e_hw *hw,
1234 u8 **object_base,
1235 enum i40e_hmc_lan_rsrc_type rsrc_type,
1236 u32 obj_idx)
1237 {
1238 u32 obj_offset_in_sd, obj_offset_in_pd;
1239 struct i40e_hmc_info *hmc_info = &hw->hmc;
1240 struct i40e_hmc_sd_entry *sd_entry;
1241 struct i40e_hmc_pd_entry *pd_entry;
1242 u32 pd_idx, pd_lmt, rel_pd_idx;
1243 enum i40e_status_code ret_code = I40E_SUCCESS;
1244 u64 obj_offset_in_fpm;
1245 u32 sd_idx, sd_lmt;
1246
1247 if (NULL == hmc_info->hmc_obj) {
1248 ret_code = I40E_ERR_BAD_PTR;
1249 DEBUGOUT("i40e_hmc_get_object_va: bad hmc_info->hmc_obj ptr\n");
1250 goto exit;
1251 }
1252 if (NULL == object_base) {
1253 ret_code = I40E_ERR_BAD_PTR;
1254 DEBUGOUT("i40e_hmc_get_object_va: bad object_base ptr\n");
1255 goto exit;
1256 }
1257 if (I40E_HMC_INFO_SIGNATURE != hmc_info->signature) {
1258 ret_code = I40E_ERR_BAD_PTR;
1259 DEBUGOUT("i40e_hmc_get_object_va: bad hmc_info->signature\n");
1260 goto exit;
1261 }
1262 if (obj_idx >= hmc_info->hmc_obj[rsrc_type].cnt) {
1263 DEBUGOUT1("i40e_hmc_get_object_va: returns error %d\n",
1264 ret_code);
1265 ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
1266 goto exit;
1267 }
1268 /* find sd index and limit */
1269 I40E_FIND_SD_INDEX_LIMIT(hmc_info, rsrc_type, obj_idx, 1,
1270 &sd_idx, &sd_lmt);
1271
1272 sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
1273 obj_offset_in_fpm = hmc_info->hmc_obj[rsrc_type].base +
1274 hmc_info->hmc_obj[rsrc_type].size * obj_idx;
1275
1276 if (I40E_SD_TYPE_PAGED == sd_entry->entry_type) {
1277 I40E_FIND_PD_INDEX_LIMIT(hmc_info, rsrc_type, obj_idx, 1,
1278 &pd_idx, &pd_lmt);
1279 rel_pd_idx = pd_idx % I40E_HMC_PD_CNT_IN_SD;
1280 pd_entry = &sd_entry->u.pd_table.pd_entry[rel_pd_idx];
1281 obj_offset_in_pd = (u32)(obj_offset_in_fpm %
1282 I40E_HMC_PAGED_BP_SIZE);
1283 *object_base = (u8 *)pd_entry->bp.addr.va + obj_offset_in_pd;
1284 } else {
1285 obj_offset_in_sd = (u32)(obj_offset_in_fpm %
1286 I40E_HMC_DIRECT_BP_SIZE);
1287 *object_base = (u8 *)sd_entry->u.bp.addr.va + obj_offset_in_sd;
1288 }
1289 exit:
1290 return ret_code;
1291 }
1292
1293 /**
1294 * i40e_get_lan_tx_queue_context - return the HMC context for the queue
1295 * @hw: the hardware struct
1296 * @queue: the queue we care about
1297 * @s: the struct to be filled
1298 **/
i40e_get_lan_tx_queue_context(struct i40e_hw * hw,u16 queue,struct i40e_hmc_obj_txq * s)1299 enum i40e_status_code i40e_get_lan_tx_queue_context(struct i40e_hw *hw,
1300 u16 queue,
1301 struct i40e_hmc_obj_txq *s)
1302 {
1303 enum i40e_status_code err;
1304 u8 *context_bytes;
1305
1306 err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_TX, queue);
1307 if (err < 0)
1308 return err;
1309
1310 return i40e_get_hmc_context(context_bytes,
1311 i40e_hmc_txq_ce_info, (u8 *)s);
1312 }
1313
1314 /**
1315 * i40e_clear_lan_tx_queue_context - clear the HMC context for the queue
1316 * @hw: the hardware struct
1317 * @queue: the queue we care about
1318 **/
i40e_clear_lan_tx_queue_context(struct i40e_hw * hw,u16 queue)1319 enum i40e_status_code i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
1320 u16 queue)
1321 {
1322 enum i40e_status_code err;
1323 u8 *context_bytes;
1324
1325 err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_TX, queue);
1326 if (err < 0)
1327 return err;
1328
1329 return i40e_clear_hmc_context(hw, context_bytes, I40E_HMC_LAN_TX);
1330 }
1331
1332 /**
1333 * i40e_set_lan_tx_queue_context - set the HMC context for the queue
1334 * @hw: the hardware struct
1335 * @queue: the queue we care about
1336 * @s: the struct to be filled
1337 **/
i40e_set_lan_tx_queue_context(struct i40e_hw * hw,u16 queue,struct i40e_hmc_obj_txq * s)1338 enum i40e_status_code i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
1339 u16 queue,
1340 struct i40e_hmc_obj_txq *s)
1341 {
1342 enum i40e_status_code err;
1343 u8 *context_bytes;
1344
1345 err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_TX, queue);
1346 if (err < 0)
1347 return err;
1348
1349 return i40e_set_hmc_context(context_bytes,
1350 i40e_hmc_txq_ce_info, (u8 *)s);
1351 }
1352
1353 /**
1354 * i40e_get_lan_rx_queue_context - return the HMC context for the queue
1355 * @hw: the hardware struct
1356 * @queue: the queue we care about
1357 * @s: the struct to be filled
1358 **/
i40e_get_lan_rx_queue_context(struct i40e_hw * hw,u16 queue,struct i40e_hmc_obj_rxq * s)1359 enum i40e_status_code i40e_get_lan_rx_queue_context(struct i40e_hw *hw,
1360 u16 queue,
1361 struct i40e_hmc_obj_rxq *s)
1362 {
1363 enum i40e_status_code err;
1364 u8 *context_bytes;
1365
1366 err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_RX, queue);
1367 if (err < 0)
1368 return err;
1369
1370 return i40e_get_hmc_context(context_bytes,
1371 i40e_hmc_rxq_ce_info, (u8 *)s);
1372 }
1373
1374 /**
1375 * i40e_clear_lan_rx_queue_context - clear the HMC context for the queue
1376 * @hw: the hardware struct
1377 * @queue: the queue we care about
1378 **/
i40e_clear_lan_rx_queue_context(struct i40e_hw * hw,u16 queue)1379 enum i40e_status_code i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
1380 u16 queue)
1381 {
1382 enum i40e_status_code err;
1383 u8 *context_bytes;
1384
1385 err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_RX, queue);
1386 if (err < 0)
1387 return err;
1388
1389 return i40e_clear_hmc_context(hw, context_bytes, I40E_HMC_LAN_RX);
1390 }
1391
1392 /**
1393 * i40e_set_lan_rx_queue_context - set the HMC context for the queue
1394 * @hw: the hardware struct
1395 * @queue: the queue we care about
1396 * @s: the struct to be filled
1397 **/
i40e_set_lan_rx_queue_context(struct i40e_hw * hw,u16 queue,struct i40e_hmc_obj_rxq * s)1398 enum i40e_status_code i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
1399 u16 queue,
1400 struct i40e_hmc_obj_rxq *s)
1401 {
1402 enum i40e_status_code err;
1403 u8 *context_bytes;
1404
1405 err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_RX, queue);
1406 if (err < 0)
1407 return err;
1408
1409 return i40e_set_hmc_context(context_bytes,
1410 i40e_hmc_rxq_ce_info, (u8 *)s);
1411 }
1412