1 /******************************************************************************
2
3 Copyright (c) 2013-2015, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD$*/
34
35 #include "i40e_osdep.h"
36 #include "i40e_register.h"
37 #include "i40e_type.h"
38 #include "i40e_hmc.h"
39 #include "i40e_lan_hmc.h"
40 #include "i40e_prototype.h"
41
42 /* lan specific interface functions */
43
44 /**
45 * i40e_align_l2obj_base - aligns base object pointer to 512 bytes
46 * @offset: base address offset needing alignment
47 *
48 * Aligns the layer 2 function private memory so it's 512-byte aligned.
49 **/
i40e_align_l2obj_base(u64 offset)50 static u64 i40e_align_l2obj_base(u64 offset)
51 {
52 u64 aligned_offset = offset;
53
54 if ((offset % I40E_HMC_L2OBJ_BASE_ALIGNMENT) > 0)
55 aligned_offset += (I40E_HMC_L2OBJ_BASE_ALIGNMENT -
56 (offset % I40E_HMC_L2OBJ_BASE_ALIGNMENT));
57
58 return aligned_offset;
59 }
60
61 /**
62 * i40e_calculate_l2fpm_size - calculates layer 2 FPM memory size
63 * @txq_num: number of Tx queues needing backing context
64 * @rxq_num: number of Rx queues needing backing context
65 * @fcoe_cntx_num: amount of FCoE statefull contexts needing backing context
66 * @fcoe_filt_num: number of FCoE filters needing backing context
67 *
68 * Calculates the maximum amount of memory for the function required, based
69 * on the number of resources it must provide context for.
70 **/
i40e_calculate_l2fpm_size(u32 txq_num,u32 rxq_num,u32 fcoe_cntx_num,u32 fcoe_filt_num)71 u64 i40e_calculate_l2fpm_size(u32 txq_num, u32 rxq_num,
72 u32 fcoe_cntx_num, u32 fcoe_filt_num)
73 {
74 u64 fpm_size = 0;
75
76 fpm_size = txq_num * I40E_HMC_OBJ_SIZE_TXQ;
77 fpm_size = i40e_align_l2obj_base(fpm_size);
78
79 fpm_size += (rxq_num * I40E_HMC_OBJ_SIZE_RXQ);
80 fpm_size = i40e_align_l2obj_base(fpm_size);
81
82 fpm_size += (fcoe_cntx_num * I40E_HMC_OBJ_SIZE_FCOE_CNTX);
83 fpm_size = i40e_align_l2obj_base(fpm_size);
84
85 fpm_size += (fcoe_filt_num * I40E_HMC_OBJ_SIZE_FCOE_FILT);
86 fpm_size = i40e_align_l2obj_base(fpm_size);
87
88 return fpm_size;
89 }
90
91 /**
92 * i40e_init_lan_hmc - initialize i40e_hmc_info struct
93 * @hw: pointer to the HW structure
94 * @txq_num: number of Tx queues needing backing context
95 * @rxq_num: number of Rx queues needing backing context
96 * @fcoe_cntx_num: amount of FCoE statefull contexts needing backing context
97 * @fcoe_filt_num: number of FCoE filters needing backing context
98 *
99 * This function will be called once per physical function initialization.
100 * It will fill out the i40e_hmc_obj_info structure for LAN objects based on
101 * the driver's provided input, as well as information from the HMC itself
102 * loaded from NVRAM.
103 *
104 * Assumptions:
105 * - HMC Resource Profile has been selected before calling this function.
106 **/
i40e_init_lan_hmc(struct i40e_hw * hw,u32 txq_num,u32 rxq_num,u32 fcoe_cntx_num,u32 fcoe_filt_num)107 enum i40e_status_code i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
108 u32 rxq_num, u32 fcoe_cntx_num,
109 u32 fcoe_filt_num)
110 {
111 struct i40e_hmc_obj_info *obj, *full_obj;
112 enum i40e_status_code ret_code = I40E_SUCCESS;
113 u64 l2fpm_size;
114 u32 size_exp;
115
116 hw->hmc.signature = I40E_HMC_INFO_SIGNATURE;
117 hw->hmc.hmc_fn_id = hw->pf_id;
118
119 /* allocate memory for hmc_obj */
120 ret_code = i40e_allocate_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem,
121 sizeof(struct i40e_hmc_obj_info) * I40E_HMC_LAN_MAX);
122 if (ret_code)
123 goto init_lan_hmc_out;
124 hw->hmc.hmc_obj = (struct i40e_hmc_obj_info *)
125 hw->hmc.hmc_obj_virt_mem.va;
126
127 /* The full object will be used to create the LAN HMC SD */
128 full_obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_FULL];
129 full_obj->max_cnt = 0;
130 full_obj->cnt = 0;
131 full_obj->base = 0;
132 full_obj->size = 0;
133
134 /* Tx queue context information */
135 obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_TX];
136 obj->max_cnt = rd32(hw, I40E_GLHMC_LANQMAX);
137 obj->cnt = txq_num;
138 obj->base = 0;
139 size_exp = rd32(hw, I40E_GLHMC_LANTXOBJSZ);
140 obj->size = BIT_ULL(size_exp);
141
142 /* validate values requested by driver don't exceed HMC capacity */
143 if (txq_num > obj->max_cnt) {
144 ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
145 DEBUGOUT3("i40e_init_lan_hmc: Tx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
146 txq_num, obj->max_cnt, ret_code);
147 goto init_lan_hmc_out;
148 }
149
150 /* aggregate values into the full LAN object for later */
151 full_obj->max_cnt += obj->max_cnt;
152 full_obj->cnt += obj->cnt;
153
154 /* Rx queue context information */
155 obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_RX];
156 obj->max_cnt = rd32(hw, I40E_GLHMC_LANQMAX);
157 obj->cnt = rxq_num;
158 obj->base = hw->hmc.hmc_obj[I40E_HMC_LAN_TX].base +
159 (hw->hmc.hmc_obj[I40E_HMC_LAN_TX].cnt *
160 hw->hmc.hmc_obj[I40E_HMC_LAN_TX].size);
161 obj->base = i40e_align_l2obj_base(obj->base);
162 size_exp = rd32(hw, I40E_GLHMC_LANRXOBJSZ);
163 obj->size = BIT_ULL(size_exp);
164
165 /* validate values requested by driver don't exceed HMC capacity */
166 if (rxq_num > obj->max_cnt) {
167 ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
168 DEBUGOUT3("i40e_init_lan_hmc: Rx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
169 rxq_num, obj->max_cnt, ret_code);
170 goto init_lan_hmc_out;
171 }
172
173 /* aggregate values into the full LAN object for later */
174 full_obj->max_cnt += obj->max_cnt;
175 full_obj->cnt += obj->cnt;
176
177 /* FCoE context information */
178 obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX];
179 obj->max_cnt = rd32(hw, I40E_GLHMC_FCOEMAX);
180 obj->cnt = fcoe_cntx_num;
181 obj->base = hw->hmc.hmc_obj[I40E_HMC_LAN_RX].base +
182 (hw->hmc.hmc_obj[I40E_HMC_LAN_RX].cnt *
183 hw->hmc.hmc_obj[I40E_HMC_LAN_RX].size);
184 obj->base = i40e_align_l2obj_base(obj->base);
185 size_exp = rd32(hw, I40E_GLHMC_FCOEDDPOBJSZ);
186 obj->size = BIT_ULL(size_exp);
187
188 /* validate values requested by driver don't exceed HMC capacity */
189 if (fcoe_cntx_num > obj->max_cnt) {
190 ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
191 DEBUGOUT3("i40e_init_lan_hmc: FCoE context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
192 fcoe_cntx_num, obj->max_cnt, ret_code);
193 goto init_lan_hmc_out;
194 }
195
196 /* aggregate values into the full LAN object for later */
197 full_obj->max_cnt += obj->max_cnt;
198 full_obj->cnt += obj->cnt;
199
200 /* FCoE filter information */
201 obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_FILT];
202 obj->max_cnt = rd32(hw, I40E_GLHMC_FCOEFMAX);
203 obj->cnt = fcoe_filt_num;
204 obj->base = hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].base +
205 (hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].cnt *
206 hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].size);
207 obj->base = i40e_align_l2obj_base(obj->base);
208 size_exp = rd32(hw, I40E_GLHMC_FCOEFOBJSZ);
209 obj->size = BIT_ULL(size_exp);
210
211 /* validate values requested by driver don't exceed HMC capacity */
212 if (fcoe_filt_num > obj->max_cnt) {
213 ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
214 DEBUGOUT3("i40e_init_lan_hmc: FCoE filter: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
215 fcoe_filt_num, obj->max_cnt, ret_code);
216 goto init_lan_hmc_out;
217 }
218
219 /* aggregate values into the full LAN object for later */
220 full_obj->max_cnt += obj->max_cnt;
221 full_obj->cnt += obj->cnt;
222
223 hw->hmc.first_sd_index = 0;
224 hw->hmc.sd_table.ref_cnt = 0;
225 l2fpm_size = i40e_calculate_l2fpm_size(txq_num, rxq_num, fcoe_cntx_num,
226 fcoe_filt_num);
227 if (NULL == hw->hmc.sd_table.sd_entry) {
228 hw->hmc.sd_table.sd_cnt = (u32)
229 (l2fpm_size + I40E_HMC_DIRECT_BP_SIZE - 1) /
230 I40E_HMC_DIRECT_BP_SIZE;
231
232 /* allocate the sd_entry members in the sd_table */
233 ret_code = i40e_allocate_virt_mem(hw, &hw->hmc.sd_table.addr,
234 (sizeof(struct i40e_hmc_sd_entry) *
235 hw->hmc.sd_table.sd_cnt));
236 if (ret_code)
237 goto init_lan_hmc_out;
238 hw->hmc.sd_table.sd_entry =
239 (struct i40e_hmc_sd_entry *)hw->hmc.sd_table.addr.va;
240 }
241 /* store in the LAN full object for later */
242 full_obj->size = l2fpm_size;
243
244 init_lan_hmc_out:
245 return ret_code;
246 }
247
248 /**
249 * i40e_remove_pd_page - Remove a page from the page descriptor table
250 * @hw: pointer to the HW structure
251 * @hmc_info: pointer to the HMC configuration information structure
252 * @idx: segment descriptor index to find the relevant page descriptor
253 *
254 * This function:
255 * 1. Marks the entry in pd table (for paged address mode) invalid
256 * 2. write to register PMPDINV to invalidate the backing page in FV cache
257 * 3. Decrement the ref count for pd_entry
258 * assumptions:
259 * 1. caller can deallocate the memory used by pd after this function
260 * returns.
261 **/
i40e_remove_pd_page(struct i40e_hw * hw,struct i40e_hmc_info * hmc_info,u32 idx)262 static enum i40e_status_code i40e_remove_pd_page(struct i40e_hw *hw,
263 struct i40e_hmc_info *hmc_info,
264 u32 idx)
265 {
266 enum i40e_status_code ret_code = I40E_SUCCESS;
267
268 if (i40e_prep_remove_pd_page(hmc_info, idx) == I40E_SUCCESS)
269 ret_code = i40e_remove_pd_page_new(hw, hmc_info, idx, TRUE);
270
271 return ret_code;
272 }
273
274 /**
275 * i40e_remove_sd_bp - remove a backing page from a segment descriptor
276 * @hw: pointer to our HW structure
277 * @hmc_info: pointer to the HMC configuration information structure
278 * @idx: the page index
279 *
280 * This function:
281 * 1. Marks the entry in sd table (for direct address mode) invalid
282 * 2. write to register PMSDCMD, PMSDDATALOW(PMSDDATALOW.PMSDVALID set
283 * to 0) and PMSDDATAHIGH to invalidate the sd page
284 * 3. Decrement the ref count for the sd_entry
285 * assumptions:
286 * 1. caller can deallocate the memory used by backing storage after this
287 * function returns.
288 **/
i40e_remove_sd_bp(struct i40e_hw * hw,struct i40e_hmc_info * hmc_info,u32 idx)289 static enum i40e_status_code i40e_remove_sd_bp(struct i40e_hw *hw,
290 struct i40e_hmc_info *hmc_info,
291 u32 idx)
292 {
293 enum i40e_status_code ret_code = I40E_SUCCESS;
294
295 if (i40e_prep_remove_sd_bp(hmc_info, idx) == I40E_SUCCESS)
296 ret_code = i40e_remove_sd_bp_new(hw, hmc_info, idx, TRUE);
297
298 return ret_code;
299 }
300
301 /**
302 * i40e_create_lan_hmc_object - allocate backing store for hmc objects
303 * @hw: pointer to the HW structure
304 * @info: pointer to i40e_hmc_create_obj_info struct
305 *
306 * This will allocate memory for PDs and backing pages and populate
307 * the sd and pd entries.
308 **/
i40e_create_lan_hmc_object(struct i40e_hw * hw,struct i40e_hmc_lan_create_obj_info * info)309 enum i40e_status_code i40e_create_lan_hmc_object(struct i40e_hw *hw,
310 struct i40e_hmc_lan_create_obj_info *info)
311 {
312 enum i40e_status_code ret_code = I40E_SUCCESS;
313 struct i40e_hmc_sd_entry *sd_entry;
314 u32 pd_idx1 = 0, pd_lmt1 = 0;
315 u32 pd_idx = 0, pd_lmt = 0;
316 bool pd_error = FALSE;
317 u32 sd_idx, sd_lmt;
318 u64 sd_size;
319 u32 i, j;
320
321 if (NULL == info) {
322 ret_code = I40E_ERR_BAD_PTR;
323 DEBUGOUT("i40e_create_lan_hmc_object: bad info ptr\n");
324 goto exit;
325 }
326 if (NULL == info->hmc_info) {
327 ret_code = I40E_ERR_BAD_PTR;
328 DEBUGOUT("i40e_create_lan_hmc_object: bad hmc_info ptr\n");
329 goto exit;
330 }
331 if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) {
332 ret_code = I40E_ERR_BAD_PTR;
333 DEBUGOUT("i40e_create_lan_hmc_object: bad signature\n");
334 goto exit;
335 }
336
337 if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
338 ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
339 DEBUGOUT1("i40e_create_lan_hmc_object: returns error %d\n",
340 ret_code);
341 goto exit;
342 }
343 if ((info->start_idx + info->count) >
344 info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
345 ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
346 DEBUGOUT1("i40e_create_lan_hmc_object: returns error %d\n",
347 ret_code);
348 goto exit;
349 }
350
351 /* find sd index and limit */
352 I40E_FIND_SD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
353 info->start_idx, info->count,
354 &sd_idx, &sd_lmt);
355 if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
356 sd_lmt > info->hmc_info->sd_table.sd_cnt) {
357 ret_code = I40E_ERR_INVALID_SD_INDEX;
358 goto exit;
359 }
360 /* find pd index */
361 I40E_FIND_PD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
362 info->start_idx, info->count, &pd_idx,
363 &pd_lmt);
364
365 /* This is to cover for cases where you may not want to have an SD with
366 * the full 2M memory but something smaller. By not filling out any
367 * size, the function will default the SD size to be 2M.
368 */
369 if (info->direct_mode_sz == 0)
370 sd_size = I40E_HMC_DIRECT_BP_SIZE;
371 else
372 sd_size = info->direct_mode_sz;
373
374 /* check if all the sds are valid. If not, allocate a page and
375 * initialize it.
376 */
377 for (j = sd_idx; j < sd_lmt; j++) {
378 /* update the sd table entry */
379 ret_code = i40e_add_sd_table_entry(hw, info->hmc_info, j,
380 info->entry_type,
381 sd_size);
382 if (I40E_SUCCESS != ret_code)
383 goto exit_sd_error;
384 sd_entry = &info->hmc_info->sd_table.sd_entry[j];
385 if (I40E_SD_TYPE_PAGED == sd_entry->entry_type) {
386 /* check if all the pds in this sd are valid. If not,
387 * allocate a page and initialize it.
388 */
389
390 /* find pd_idx and pd_lmt in this sd */
391 pd_idx1 = max(pd_idx, (j * I40E_HMC_MAX_BP_COUNT));
392 pd_lmt1 = min(pd_lmt,
393 ((j + 1) * I40E_HMC_MAX_BP_COUNT));
394 for (i = pd_idx1; i < pd_lmt1; i++) {
395 /* update the pd table entry */
396 ret_code = i40e_add_pd_table_entry(hw,
397 info->hmc_info,
398 i, NULL);
399 if (I40E_SUCCESS != ret_code) {
400 pd_error = TRUE;
401 break;
402 }
403 }
404 if (pd_error) {
405 /* remove the backing pages from pd_idx1 to i */
406 while (i && (i > pd_idx1)) {
407 i40e_remove_pd_bp(hw, info->hmc_info,
408 (i - 1));
409 i--;
410 }
411 }
412 }
413 if (!sd_entry->valid) {
414 sd_entry->valid = TRUE;
415 switch (sd_entry->entry_type) {
416 case I40E_SD_TYPE_PAGED:
417 I40E_SET_PF_SD_ENTRY(hw,
418 sd_entry->u.pd_table.pd_page_addr.pa,
419 j, sd_entry->entry_type);
420 break;
421 case I40E_SD_TYPE_DIRECT:
422 I40E_SET_PF_SD_ENTRY(hw, sd_entry->u.bp.addr.pa,
423 j, sd_entry->entry_type);
424 break;
425 default:
426 ret_code = I40E_ERR_INVALID_SD_TYPE;
427 goto exit;
428 }
429 }
430 }
431 goto exit;
432
433 exit_sd_error:
434 /* cleanup for sd entries from j to sd_idx */
435 while (j && (j > sd_idx)) {
436 sd_entry = &info->hmc_info->sd_table.sd_entry[j - 1];
437 switch (sd_entry->entry_type) {
438 case I40E_SD_TYPE_PAGED:
439 pd_idx1 = max(pd_idx,
440 ((j - 1) * I40E_HMC_MAX_BP_COUNT));
441 pd_lmt1 = min(pd_lmt, (j * I40E_HMC_MAX_BP_COUNT));
442 for (i = pd_idx1; i < pd_lmt1; i++)
443 i40e_remove_pd_bp(hw, info->hmc_info, i);
444 i40e_remove_pd_page(hw, info->hmc_info, (j - 1));
445 break;
446 case I40E_SD_TYPE_DIRECT:
447 i40e_remove_sd_bp(hw, info->hmc_info, (j - 1));
448 break;
449 default:
450 ret_code = I40E_ERR_INVALID_SD_TYPE;
451 break;
452 }
453 j--;
454 }
455 exit:
456 return ret_code;
457 }
458
459 /**
460 * i40e_configure_lan_hmc - prepare the HMC backing store
461 * @hw: pointer to the hw structure
462 * @model: the model for the layout of the SD/PD tables
463 *
464 * - This function will be called once per physical function initialization.
465 * - This function will be called after i40e_init_lan_hmc() and before
466 * any LAN/FCoE HMC objects can be created.
467 **/
i40e_configure_lan_hmc(struct i40e_hw * hw,enum i40e_hmc_model model)468 enum i40e_status_code i40e_configure_lan_hmc(struct i40e_hw *hw,
469 enum i40e_hmc_model model)
470 {
471 struct i40e_hmc_lan_create_obj_info info;
472 u8 hmc_fn_id = hw->hmc.hmc_fn_id;
473 struct i40e_hmc_obj_info *obj;
474 enum i40e_status_code ret_code = I40E_SUCCESS;
475
476 /* Initialize part of the create object info struct */
477 info.hmc_info = &hw->hmc;
478 info.rsrc_type = I40E_HMC_LAN_FULL;
479 info.start_idx = 0;
480 info.direct_mode_sz = hw->hmc.hmc_obj[I40E_HMC_LAN_FULL].size;
481
482 /* Build the SD entry for the LAN objects */
483 switch (model) {
484 case I40E_HMC_MODEL_DIRECT_PREFERRED:
485 case I40E_HMC_MODEL_DIRECT_ONLY:
486 info.entry_type = I40E_SD_TYPE_DIRECT;
487 /* Make one big object, a single SD */
488 info.count = 1;
489 ret_code = i40e_create_lan_hmc_object(hw, &info);
490 if ((ret_code != I40E_SUCCESS) && (model == I40E_HMC_MODEL_DIRECT_PREFERRED))
491 goto try_type_paged;
492 else if (ret_code != I40E_SUCCESS)
493 goto configure_lan_hmc_out;
494 /* else clause falls through the break */
495 break;
496 case I40E_HMC_MODEL_PAGED_ONLY:
497 try_type_paged:
498 info.entry_type = I40E_SD_TYPE_PAGED;
499 /* Make one big object in the PD table */
500 info.count = 1;
501 ret_code = i40e_create_lan_hmc_object(hw, &info);
502 if (ret_code != I40E_SUCCESS)
503 goto configure_lan_hmc_out;
504 break;
505 default:
506 /* unsupported type */
507 ret_code = I40E_ERR_INVALID_SD_TYPE;
508 DEBUGOUT1("i40e_configure_lan_hmc: Unknown SD type: %d\n",
509 ret_code);
510 goto configure_lan_hmc_out;
511 }
512
513 /* Configure and program the FPM registers so objects can be created */
514
515 /* Tx contexts */
516 obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_TX];
517 wr32(hw, I40E_GLHMC_LANTXBASE(hmc_fn_id),
518 (u32)((obj->base & I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK) / 512));
519 wr32(hw, I40E_GLHMC_LANTXCNT(hmc_fn_id), obj->cnt);
520
521 /* Rx contexts */
522 obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_RX];
523 wr32(hw, I40E_GLHMC_LANRXBASE(hmc_fn_id),
524 (u32)((obj->base & I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK) / 512));
525 wr32(hw, I40E_GLHMC_LANRXCNT(hmc_fn_id), obj->cnt);
526
527 /* FCoE contexts */
528 obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX];
529 wr32(hw, I40E_GLHMC_FCOEDDPBASE(hmc_fn_id),
530 (u32)((obj->base & I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK) / 512));
531 wr32(hw, I40E_GLHMC_FCOEDDPCNT(hmc_fn_id), obj->cnt);
532
533 /* FCoE filters */
534 obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_FILT];
535 wr32(hw, I40E_GLHMC_FCOEFBASE(hmc_fn_id),
536 (u32)((obj->base & I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK) / 512));
537 wr32(hw, I40E_GLHMC_FCOEFCNT(hmc_fn_id), obj->cnt);
538
539 configure_lan_hmc_out:
540 return ret_code;
541 }
542
543 /**
544 * i40e_delete_hmc_object - remove hmc objects
545 * @hw: pointer to the HW structure
546 * @info: pointer to i40e_hmc_delete_obj_info struct
547 *
548 * This will de-populate the SDs and PDs. It frees
549 * the memory for PDS and backing storage. After this function is returned,
550 * caller should deallocate memory allocated previously for
551 * book-keeping information about PDs and backing storage.
552 **/
i40e_delete_lan_hmc_object(struct i40e_hw * hw,struct i40e_hmc_lan_delete_obj_info * info)553 enum i40e_status_code i40e_delete_lan_hmc_object(struct i40e_hw *hw,
554 struct i40e_hmc_lan_delete_obj_info *info)
555 {
556 enum i40e_status_code ret_code = I40E_SUCCESS;
557 struct i40e_hmc_pd_table *pd_table;
558 u32 pd_idx, pd_lmt, rel_pd_idx;
559 u32 sd_idx, sd_lmt;
560 u32 i, j;
561
562 if (NULL == info) {
563 ret_code = I40E_ERR_BAD_PTR;
564 DEBUGOUT("i40e_delete_hmc_object: bad info ptr\n");
565 goto exit;
566 }
567 if (NULL == info->hmc_info) {
568 ret_code = I40E_ERR_BAD_PTR;
569 DEBUGOUT("i40e_delete_hmc_object: bad info->hmc_info ptr\n");
570 goto exit;
571 }
572 if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) {
573 ret_code = I40E_ERR_BAD_PTR;
574 DEBUGOUT("i40e_delete_hmc_object: bad hmc_info->signature\n");
575 goto exit;
576 }
577
578 if (NULL == info->hmc_info->sd_table.sd_entry) {
579 ret_code = I40E_ERR_BAD_PTR;
580 DEBUGOUT("i40e_delete_hmc_object: bad sd_entry\n");
581 goto exit;
582 }
583
584 if (NULL == info->hmc_info->hmc_obj) {
585 ret_code = I40E_ERR_BAD_PTR;
586 DEBUGOUT("i40e_delete_hmc_object: bad hmc_info->hmc_obj\n");
587 goto exit;
588 }
589 if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
590 ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
591 DEBUGOUT1("i40e_delete_hmc_object: returns error %d\n",
592 ret_code);
593 goto exit;
594 }
595
596 if ((info->start_idx + info->count) >
597 info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
598 ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
599 DEBUGOUT1("i40e_delete_hmc_object: returns error %d\n",
600 ret_code);
601 goto exit;
602 }
603
604 I40E_FIND_PD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
605 info->start_idx, info->count, &pd_idx,
606 &pd_lmt);
607
608 for (j = pd_idx; j < pd_lmt; j++) {
609 sd_idx = j / I40E_HMC_PD_CNT_IN_SD;
610
611 if (I40E_SD_TYPE_PAGED !=
612 info->hmc_info->sd_table.sd_entry[sd_idx].entry_type)
613 continue;
614
615 rel_pd_idx = j % I40E_HMC_PD_CNT_IN_SD;
616
617 pd_table =
618 &info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
619 if (pd_table->pd_entry[rel_pd_idx].valid) {
620 ret_code = i40e_remove_pd_bp(hw, info->hmc_info, j);
621 if (I40E_SUCCESS != ret_code)
622 goto exit;
623 }
624 }
625
626 /* find sd index and limit */
627 I40E_FIND_SD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
628 info->start_idx, info->count,
629 &sd_idx, &sd_lmt);
630 if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
631 sd_lmt > info->hmc_info->sd_table.sd_cnt) {
632 ret_code = I40E_ERR_INVALID_SD_INDEX;
633 goto exit;
634 }
635
636 for (i = sd_idx; i < sd_lmt; i++) {
637 if (!info->hmc_info->sd_table.sd_entry[i].valid)
638 continue;
639 switch (info->hmc_info->sd_table.sd_entry[i].entry_type) {
640 case I40E_SD_TYPE_DIRECT:
641 ret_code = i40e_remove_sd_bp(hw, info->hmc_info, i);
642 if (I40E_SUCCESS != ret_code)
643 goto exit;
644 break;
645 case I40E_SD_TYPE_PAGED:
646 ret_code = i40e_remove_pd_page(hw, info->hmc_info, i);
647 if (I40E_SUCCESS != ret_code)
648 goto exit;
649 break;
650 default:
651 break;
652 }
653 }
654 exit:
655 return ret_code;
656 }
657
658 /**
659 * i40e_shutdown_lan_hmc - Remove HMC backing store, free allocated memory
660 * @hw: pointer to the hw structure
661 *
662 * This must be called by drivers as they are shutting down and being
663 * removed from the OS.
664 **/
i40e_shutdown_lan_hmc(struct i40e_hw * hw)665 enum i40e_status_code i40e_shutdown_lan_hmc(struct i40e_hw *hw)
666 {
667 struct i40e_hmc_lan_delete_obj_info info;
668 enum i40e_status_code ret_code;
669
670 info.hmc_info = &hw->hmc;
671 info.rsrc_type = I40E_HMC_LAN_FULL;
672 info.start_idx = 0;
673 info.count = 1;
674
675 /* delete the object */
676 ret_code = i40e_delete_lan_hmc_object(hw, &info);
677
678 /* free the SD table entry for LAN */
679 i40e_free_virt_mem(hw, &hw->hmc.sd_table.addr);
680 hw->hmc.sd_table.sd_cnt = 0;
681 hw->hmc.sd_table.sd_entry = NULL;
682
683 /* free memory used for hmc_obj */
684 i40e_free_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem);
685 hw->hmc.hmc_obj = NULL;
686
687 return ret_code;
688 }
689
690 #define I40E_HMC_STORE(_struct, _ele) \
691 offsetof(struct _struct, _ele), \
692 FIELD_SIZEOF(struct _struct, _ele)
693
694 struct i40e_context_ele {
695 u16 offset;
696 u16 size_of;
697 u16 width;
698 u16 lsb;
699 };
700
701 /* LAN Tx Queue Context */
702 static struct i40e_context_ele i40e_hmc_txq_ce_info[] = {
703 /* Field Width LSB */
704 {I40E_HMC_STORE(i40e_hmc_obj_txq, head), 13, 0 },
705 {I40E_HMC_STORE(i40e_hmc_obj_txq, new_context), 1, 30 },
706 {I40E_HMC_STORE(i40e_hmc_obj_txq, base), 57, 32 },
707 {I40E_HMC_STORE(i40e_hmc_obj_txq, fc_ena), 1, 89 },
708 {I40E_HMC_STORE(i40e_hmc_obj_txq, timesync_ena), 1, 90 },
709 {I40E_HMC_STORE(i40e_hmc_obj_txq, fd_ena), 1, 91 },
710 {I40E_HMC_STORE(i40e_hmc_obj_txq, alt_vlan_ena), 1, 92 },
711 {I40E_HMC_STORE(i40e_hmc_obj_txq, cpuid), 8, 96 },
712 /* line 1 */
713 {I40E_HMC_STORE(i40e_hmc_obj_txq, thead_wb), 13, 0 + 128 },
714 {I40E_HMC_STORE(i40e_hmc_obj_txq, head_wb_ena), 1, 32 + 128 },
715 {I40E_HMC_STORE(i40e_hmc_obj_txq, qlen), 13, 33 + 128 },
716 {I40E_HMC_STORE(i40e_hmc_obj_txq, tphrdesc_ena), 1, 46 + 128 },
717 {I40E_HMC_STORE(i40e_hmc_obj_txq, tphrpacket_ena), 1, 47 + 128 },
718 {I40E_HMC_STORE(i40e_hmc_obj_txq, tphwdesc_ena), 1, 48 + 128 },
719 {I40E_HMC_STORE(i40e_hmc_obj_txq, head_wb_addr), 64, 64 + 128 },
720 /* line 7 */
721 {I40E_HMC_STORE(i40e_hmc_obj_txq, crc), 32, 0 + (7 * 128) },
722 {I40E_HMC_STORE(i40e_hmc_obj_txq, rdylist), 10, 84 + (7 * 128) },
723 {I40E_HMC_STORE(i40e_hmc_obj_txq, rdylist_act), 1, 94 + (7 * 128) },
724 { 0 }
725 };
726
727 /* LAN Rx Queue Context */
728 static struct i40e_context_ele i40e_hmc_rxq_ce_info[] = {
729 /* Field Width LSB */
730 { I40E_HMC_STORE(i40e_hmc_obj_rxq, head), 13, 0 },
731 { I40E_HMC_STORE(i40e_hmc_obj_rxq, cpuid), 8, 13 },
732 { I40E_HMC_STORE(i40e_hmc_obj_rxq, base), 57, 32 },
733 { I40E_HMC_STORE(i40e_hmc_obj_rxq, qlen), 13, 89 },
734 { I40E_HMC_STORE(i40e_hmc_obj_rxq, dbuff), 7, 102 },
735 { I40E_HMC_STORE(i40e_hmc_obj_rxq, hbuff), 5, 109 },
736 { I40E_HMC_STORE(i40e_hmc_obj_rxq, dtype), 2, 114 },
737 { I40E_HMC_STORE(i40e_hmc_obj_rxq, dsize), 1, 116 },
738 { I40E_HMC_STORE(i40e_hmc_obj_rxq, crcstrip), 1, 117 },
739 { I40E_HMC_STORE(i40e_hmc_obj_rxq, fc_ena), 1, 118 },
740 { I40E_HMC_STORE(i40e_hmc_obj_rxq, l2tsel), 1, 119 },
741 { I40E_HMC_STORE(i40e_hmc_obj_rxq, hsplit_0), 4, 120 },
742 { I40E_HMC_STORE(i40e_hmc_obj_rxq, hsplit_1), 2, 124 },
743 { I40E_HMC_STORE(i40e_hmc_obj_rxq, showiv), 1, 127 },
744 { I40E_HMC_STORE(i40e_hmc_obj_rxq, rxmax), 14, 174 },
745 { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphrdesc_ena), 1, 193 },
746 { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphwdesc_ena), 1, 194 },
747 { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphdata_ena), 1, 195 },
748 { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphhead_ena), 1, 196 },
749 { I40E_HMC_STORE(i40e_hmc_obj_rxq, lrxqthresh), 3, 198 },
750 { I40E_HMC_STORE(i40e_hmc_obj_rxq, prefena), 1, 201 },
751 { 0 }
752 };
753
754 /**
755 * i40e_write_byte - replace HMC context byte
756 * @hmc_bits: pointer to the HMC memory
757 * @ce_info: a description of the struct to be read from
758 * @src: the struct to be read from
759 **/
i40e_write_byte(u8 * hmc_bits,struct i40e_context_ele * ce_info,u8 * src)760 static void i40e_write_byte(u8 *hmc_bits,
761 struct i40e_context_ele *ce_info,
762 u8 *src)
763 {
764 u8 src_byte, dest_byte, mask;
765 u8 *from, *dest;
766 u16 shift_width;
767
768 /* copy from the next struct field */
769 from = src + ce_info->offset;
770
771 /* prepare the bits and mask */
772 shift_width = ce_info->lsb % 8;
773 mask = (u8)(BIT(ce_info->width) - 1);
774
775 src_byte = *from;
776 src_byte &= mask;
777
778 /* shift to correct alignment */
779 mask <<= shift_width;
780 src_byte <<= shift_width;
781
782 /* get the current bits from the target bit string */
783 dest = hmc_bits + (ce_info->lsb / 8);
784
785 i40e_memcpy(&dest_byte, dest, sizeof(dest_byte), I40E_DMA_TO_NONDMA);
786
787 dest_byte &= ~mask; /* get the bits not changing */
788 dest_byte |= src_byte; /* add in the new bits */
789
790 /* put it all back */
791 i40e_memcpy(dest, &dest_byte, sizeof(dest_byte), I40E_NONDMA_TO_DMA);
792 }
793
794 /**
795 * i40e_write_word - replace HMC context word
796 * @hmc_bits: pointer to the HMC memory
797 * @ce_info: a description of the struct to be read from
798 * @src: the struct to be read from
799 **/
i40e_write_word(u8 * hmc_bits,struct i40e_context_ele * ce_info,u8 * src)800 static void i40e_write_word(u8 *hmc_bits,
801 struct i40e_context_ele *ce_info,
802 u8 *src)
803 {
804 u16 src_word, mask;
805 u8 *from, *dest;
806 u16 shift_width;
807 __le16 dest_word;
808
809 /* copy from the next struct field */
810 from = src + ce_info->offset;
811
812 /* prepare the bits and mask */
813 shift_width = ce_info->lsb % 8;
814 mask = BIT(ce_info->width) - 1;
815
816 /* don't swizzle the bits until after the mask because the mask bits
817 * will be in a different bit position on big endian machines
818 */
819 src_word = *(u16 *)from;
820 src_word &= mask;
821
822 /* shift to correct alignment */
823 mask <<= shift_width;
824 src_word <<= shift_width;
825
826 /* get the current bits from the target bit string */
827 dest = hmc_bits + (ce_info->lsb / 8);
828
829 i40e_memcpy(&dest_word, dest, sizeof(dest_word), I40E_DMA_TO_NONDMA);
830
831 dest_word &= ~(CPU_TO_LE16(mask)); /* get the bits not changing */
832 dest_word |= CPU_TO_LE16(src_word); /* add in the new bits */
833
834 /* put it all back */
835 i40e_memcpy(dest, &dest_word, sizeof(dest_word), I40E_NONDMA_TO_DMA);
836 }
837
838 /**
839 * i40e_write_dword - replace HMC context dword
840 * @hmc_bits: pointer to the HMC memory
841 * @ce_info: a description of the struct to be read from
842 * @src: the struct to be read from
843 **/
i40e_write_dword(u8 * hmc_bits,struct i40e_context_ele * ce_info,u8 * src)844 static void i40e_write_dword(u8 *hmc_bits,
845 struct i40e_context_ele *ce_info,
846 u8 *src)
847 {
848 u32 src_dword, mask;
849 u8 *from, *dest;
850 u16 shift_width;
851 __le32 dest_dword;
852
853 /* copy from the next struct field */
854 from = src + ce_info->offset;
855
856 /* prepare the bits and mask */
857 shift_width = ce_info->lsb % 8;
858
859 /* if the field width is exactly 32 on an x86 machine, then the shift
860 * operation will not work because the SHL instructions count is masked
861 * to 5 bits so the shift will do nothing
862 */
863 if (ce_info->width < 32)
864 mask = BIT(ce_info->width) - 1;
865 else
866 mask = ~(u32)0;
867
868 /* don't swizzle the bits until after the mask because the mask bits
869 * will be in a different bit position on big endian machines
870 */
871 src_dword = *(u32 *)from;
872 src_dword &= mask;
873
874 /* shift to correct alignment */
875 mask <<= shift_width;
876 src_dword <<= shift_width;
877
878 /* get the current bits from the target bit string */
879 dest = hmc_bits + (ce_info->lsb / 8);
880
881 i40e_memcpy(&dest_dword, dest, sizeof(dest_dword), I40E_DMA_TO_NONDMA);
882
883 dest_dword &= ~(CPU_TO_LE32(mask)); /* get the bits not changing */
884 dest_dword |= CPU_TO_LE32(src_dword); /* add in the new bits */
885
886 /* put it all back */
887 i40e_memcpy(dest, &dest_dword, sizeof(dest_dword), I40E_NONDMA_TO_DMA);
888 }
889
890 /**
891 * i40e_write_qword - replace HMC context qword
892 * @hmc_bits: pointer to the HMC memory
893 * @ce_info: a description of the struct to be read from
894 * @src: the struct to be read from
895 **/
i40e_write_qword(u8 * hmc_bits,struct i40e_context_ele * ce_info,u8 * src)896 static void i40e_write_qword(u8 *hmc_bits,
897 struct i40e_context_ele *ce_info,
898 u8 *src)
899 {
900 u64 src_qword, mask;
901 u8 *from, *dest;
902 u16 shift_width;
903 __le64 dest_qword;
904
905 /* copy from the next struct field */
906 from = src + ce_info->offset;
907
908 /* prepare the bits and mask */
909 shift_width = ce_info->lsb % 8;
910
911 /* if the field width is exactly 64 on an x86 machine, then the shift
912 * operation will not work because the SHL instructions count is masked
913 * to 6 bits so the shift will do nothing
914 */
915 if (ce_info->width < 64)
916 mask = BIT_ULL(ce_info->width) - 1;
917 else
918 mask = ~(u64)0;
919
920 /* don't swizzle the bits until after the mask because the mask bits
921 * will be in a different bit position on big endian machines
922 */
923 src_qword = *(u64 *)from;
924 src_qword &= mask;
925
926 /* shift to correct alignment */
927 mask <<= shift_width;
928 src_qword <<= shift_width;
929
930 /* get the current bits from the target bit string */
931 dest = hmc_bits + (ce_info->lsb / 8);
932
933 i40e_memcpy(&dest_qword, dest, sizeof(dest_qword), I40E_DMA_TO_NONDMA);
934
935 dest_qword &= ~(CPU_TO_LE64(mask)); /* get the bits not changing */
936 dest_qword |= CPU_TO_LE64(src_qword); /* add in the new bits */
937
938 /* put it all back */
939 i40e_memcpy(dest, &dest_qword, sizeof(dest_qword), I40E_NONDMA_TO_DMA);
940 }
941
942 /**
943 * i40e_read_byte - read HMC context byte into struct
944 * @hmc_bits: pointer to the HMC memory
945 * @ce_info: a description of the struct to be filled
946 * @dest: the struct to be filled
947 **/
i40e_read_byte(u8 * hmc_bits,struct i40e_context_ele * ce_info,u8 * dest)948 static void i40e_read_byte(u8 *hmc_bits,
949 struct i40e_context_ele *ce_info,
950 u8 *dest)
951 {
952 u8 dest_byte, mask;
953 u8 *src, *target;
954 u16 shift_width;
955
956 /* prepare the bits and mask */
957 shift_width = ce_info->lsb % 8;
958 mask = (u8)(BIT(ce_info->width) - 1);
959
960 /* shift to correct alignment */
961 mask <<= shift_width;
962
963 /* get the current bits from the src bit string */
964 src = hmc_bits + (ce_info->lsb / 8);
965
966 i40e_memcpy(&dest_byte, src, sizeof(dest_byte), I40E_DMA_TO_NONDMA);
967
968 dest_byte &= ~(mask);
969
970 dest_byte >>= shift_width;
971
972 /* get the address from the struct field */
973 target = dest + ce_info->offset;
974
975 /* put it back in the struct */
976 i40e_memcpy(target, &dest_byte, sizeof(dest_byte), I40E_NONDMA_TO_DMA);
977 }
978
979 /**
980 * i40e_read_word - read HMC context word into struct
981 * @hmc_bits: pointer to the HMC memory
982 * @ce_info: a description of the struct to be filled
983 * @dest: the struct to be filled
984 **/
i40e_read_word(u8 * hmc_bits,struct i40e_context_ele * ce_info,u8 * dest)985 static void i40e_read_word(u8 *hmc_bits,
986 struct i40e_context_ele *ce_info,
987 u8 *dest)
988 {
989 u16 dest_word, mask;
990 u8 *src, *target;
991 u16 shift_width;
992 __le16 src_word;
993
994 /* prepare the bits and mask */
995 shift_width = ce_info->lsb % 8;
996 mask = BIT(ce_info->width) - 1;
997
998 /* shift to correct alignment */
999 mask <<= shift_width;
1000
1001 /* get the current bits from the src bit string */
1002 src = hmc_bits + (ce_info->lsb / 8);
1003
1004 i40e_memcpy(&src_word, src, sizeof(src_word), I40E_DMA_TO_NONDMA);
1005
1006 /* the data in the memory is stored as little endian so mask it
1007 * correctly
1008 */
1009 src_word &= ~(CPU_TO_LE16(mask));
1010
1011 /* get the data back into host order before shifting */
1012 dest_word = LE16_TO_CPU(src_word);
1013
1014 dest_word >>= shift_width;
1015
1016 /* get the address from the struct field */
1017 target = dest + ce_info->offset;
1018
1019 /* put it back in the struct */
1020 i40e_memcpy(target, &dest_word, sizeof(dest_word), I40E_NONDMA_TO_DMA);
1021 }
1022
1023 /**
1024 * i40e_read_dword - read HMC context dword into struct
1025 * @hmc_bits: pointer to the HMC memory
1026 * @ce_info: a description of the struct to be filled
1027 * @dest: the struct to be filled
1028 **/
i40e_read_dword(u8 * hmc_bits,struct i40e_context_ele * ce_info,u8 * dest)1029 static void i40e_read_dword(u8 *hmc_bits,
1030 struct i40e_context_ele *ce_info,
1031 u8 *dest)
1032 {
1033 u32 dest_dword, mask;
1034 u8 *src, *target;
1035 u16 shift_width;
1036 __le32 src_dword;
1037
1038 /* prepare the bits and mask */
1039 shift_width = ce_info->lsb % 8;
1040
1041 /* if the field width is exactly 32 on an x86 machine, then the shift
1042 * operation will not work because the SHL instructions count is masked
1043 * to 5 bits so the shift will do nothing
1044 */
1045 if (ce_info->width < 32)
1046 mask = BIT(ce_info->width) - 1;
1047 else
1048 mask = ~(u32)0;
1049
1050 /* shift to correct alignment */
1051 mask <<= shift_width;
1052
1053 /* get the current bits from the src bit string */
1054 src = hmc_bits + (ce_info->lsb / 8);
1055
1056 i40e_memcpy(&src_dword, src, sizeof(src_dword), I40E_DMA_TO_NONDMA);
1057
1058 /* the data in the memory is stored as little endian so mask it
1059 * correctly
1060 */
1061 src_dword &= ~(CPU_TO_LE32(mask));
1062
1063 /* get the data back into host order before shifting */
1064 dest_dword = LE32_TO_CPU(src_dword);
1065
1066 dest_dword >>= shift_width;
1067
1068 /* get the address from the struct field */
1069 target = dest + ce_info->offset;
1070
1071 /* put it back in the struct */
1072 i40e_memcpy(target, &dest_dword, sizeof(dest_dword),
1073 I40E_NONDMA_TO_DMA);
1074 }
1075
1076 /**
1077 * i40e_read_qword - read HMC context qword into struct
1078 * @hmc_bits: pointer to the HMC memory
1079 * @ce_info: a description of the struct to be filled
1080 * @dest: the struct to be filled
1081 **/
i40e_read_qword(u8 * hmc_bits,struct i40e_context_ele * ce_info,u8 * dest)1082 static void i40e_read_qword(u8 *hmc_bits,
1083 struct i40e_context_ele *ce_info,
1084 u8 *dest)
1085 {
1086 u64 dest_qword, mask;
1087 u8 *src, *target;
1088 u16 shift_width;
1089 __le64 src_qword;
1090
1091 /* prepare the bits and mask */
1092 shift_width = ce_info->lsb % 8;
1093
1094 /* if the field width is exactly 64 on an x86 machine, then the shift
1095 * operation will not work because the SHL instructions count is masked
1096 * to 6 bits so the shift will do nothing
1097 */
1098 if (ce_info->width < 64)
1099 mask = BIT_ULL(ce_info->width) - 1;
1100 else
1101 mask = ~(u64)0;
1102
1103 /* shift to correct alignment */
1104 mask <<= shift_width;
1105
1106 /* get the current bits from the src bit string */
1107 src = hmc_bits + (ce_info->lsb / 8);
1108
1109 i40e_memcpy(&src_qword, src, sizeof(src_qword), I40E_DMA_TO_NONDMA);
1110
1111 /* the data in the memory is stored as little endian so mask it
1112 * correctly
1113 */
1114 src_qword &= ~(CPU_TO_LE64(mask));
1115
1116 /* get the data back into host order before shifting */
1117 dest_qword = LE64_TO_CPU(src_qword);
1118
1119 dest_qword >>= shift_width;
1120
1121 /* get the address from the struct field */
1122 target = dest + ce_info->offset;
1123
1124 /* put it back in the struct */
1125 i40e_memcpy(target, &dest_qword, sizeof(dest_qword),
1126 I40E_NONDMA_TO_DMA);
1127 }
1128
1129 /**
1130 * i40e_get_hmc_context - extract HMC context bits
1131 * @context_bytes: pointer to the context bit array
1132 * @ce_info: a description of the struct to be filled
1133 * @dest: the struct to be filled
1134 **/
i40e_get_hmc_context(u8 * context_bytes,struct i40e_context_ele * ce_info,u8 * dest)1135 static enum i40e_status_code i40e_get_hmc_context(u8 *context_bytes,
1136 struct i40e_context_ele *ce_info,
1137 u8 *dest)
1138 {
1139 int f;
1140
1141 for (f = 0; ce_info[f].width != 0; f++) {
1142 switch (ce_info[f].size_of) {
1143 case 1:
1144 i40e_read_byte(context_bytes, &ce_info[f], dest);
1145 break;
1146 case 2:
1147 i40e_read_word(context_bytes, &ce_info[f], dest);
1148 break;
1149 case 4:
1150 i40e_read_dword(context_bytes, &ce_info[f], dest);
1151 break;
1152 case 8:
1153 i40e_read_qword(context_bytes, &ce_info[f], dest);
1154 break;
1155 default:
1156 /* nothing to do, just keep going */
1157 break;
1158 }
1159 }
1160
1161 return I40E_SUCCESS;
1162 }
1163
1164 /**
1165 * i40e_clear_hmc_context - zero out the HMC context bits
1166 * @hw: the hardware struct
1167 * @context_bytes: pointer to the context bit array (DMA memory)
1168 * @hmc_type: the type of HMC resource
1169 **/
i40e_clear_hmc_context(struct i40e_hw * hw,u8 * context_bytes,enum i40e_hmc_lan_rsrc_type hmc_type)1170 static enum i40e_status_code i40e_clear_hmc_context(struct i40e_hw *hw,
1171 u8 *context_bytes,
1172 enum i40e_hmc_lan_rsrc_type hmc_type)
1173 {
1174 /* clean the bit array */
1175 i40e_memset(context_bytes, 0, (u32)hw->hmc.hmc_obj[hmc_type].size,
1176 I40E_DMA_MEM);
1177
1178 return I40E_SUCCESS;
1179 }
1180
1181 /**
1182 * i40e_set_hmc_context - replace HMC context bits
1183 * @context_bytes: pointer to the context bit array
1184 * @ce_info: a description of the struct to be filled
1185 * @dest: the struct to be filled
1186 **/
i40e_set_hmc_context(u8 * context_bytes,struct i40e_context_ele * ce_info,u8 * dest)1187 static enum i40e_status_code i40e_set_hmc_context(u8 *context_bytes,
1188 struct i40e_context_ele *ce_info,
1189 u8 *dest)
1190 {
1191 int f;
1192
1193 for (f = 0; ce_info[f].width != 0; f++) {
1194
1195 /* we have to deal with each element of the HMC using the
1196 * correct size so that we are correct regardless of the
1197 * endianness of the machine
1198 */
1199 switch (ce_info[f].size_of) {
1200 case 1:
1201 i40e_write_byte(context_bytes, &ce_info[f], dest);
1202 break;
1203 case 2:
1204 i40e_write_word(context_bytes, &ce_info[f], dest);
1205 break;
1206 case 4:
1207 i40e_write_dword(context_bytes, &ce_info[f], dest);
1208 break;
1209 case 8:
1210 i40e_write_qword(context_bytes, &ce_info[f], dest);
1211 break;
1212 }
1213 }
1214
1215 return I40E_SUCCESS;
1216 }
1217
1218 /**
1219 * i40e_hmc_get_object_va - retrieves an object's virtual address
1220 * @hw: pointer to the hw structure
1221 * @object_base: pointer to u64 to get the va
1222 * @rsrc_type: the hmc resource type
1223 * @obj_idx: hmc object index
1224 *
1225 * This function retrieves the object's virtual address from the object
1226 * base pointer. This function is used for LAN Queue contexts.
1227 **/
1228 static
i40e_hmc_get_object_va(struct i40e_hw * hw,u8 ** object_base,enum i40e_hmc_lan_rsrc_type rsrc_type,u32 obj_idx)1229 enum i40e_status_code i40e_hmc_get_object_va(struct i40e_hw *hw,
1230 u8 **object_base,
1231 enum i40e_hmc_lan_rsrc_type rsrc_type,
1232 u32 obj_idx)
1233 {
1234 u32 obj_offset_in_sd, obj_offset_in_pd;
1235 struct i40e_hmc_info *hmc_info = &hw->hmc;
1236 struct i40e_hmc_sd_entry *sd_entry;
1237 struct i40e_hmc_pd_entry *pd_entry;
1238 u32 pd_idx, pd_lmt, rel_pd_idx;
1239 enum i40e_status_code ret_code = I40E_SUCCESS;
1240 u64 obj_offset_in_fpm;
1241 u32 sd_idx, sd_lmt;
1242
1243 if (NULL == hmc_info->hmc_obj) {
1244 ret_code = I40E_ERR_BAD_PTR;
1245 DEBUGOUT("i40e_hmc_get_object_va: bad hmc_info->hmc_obj ptr\n");
1246 goto exit;
1247 }
1248 if (NULL == object_base) {
1249 ret_code = I40E_ERR_BAD_PTR;
1250 DEBUGOUT("i40e_hmc_get_object_va: bad object_base ptr\n");
1251 goto exit;
1252 }
1253 if (I40E_HMC_INFO_SIGNATURE != hmc_info->signature) {
1254 ret_code = I40E_ERR_BAD_PTR;
1255 DEBUGOUT("i40e_hmc_get_object_va: bad hmc_info->signature\n");
1256 goto exit;
1257 }
1258 if (obj_idx >= hmc_info->hmc_obj[rsrc_type].cnt) {
1259 DEBUGOUT1("i40e_hmc_get_object_va: returns error %d\n",
1260 ret_code);
1261 ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
1262 goto exit;
1263 }
1264 /* find sd index and limit */
1265 I40E_FIND_SD_INDEX_LIMIT(hmc_info, rsrc_type, obj_idx, 1,
1266 &sd_idx, &sd_lmt);
1267
1268 sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
1269 obj_offset_in_fpm = hmc_info->hmc_obj[rsrc_type].base +
1270 hmc_info->hmc_obj[rsrc_type].size * obj_idx;
1271
1272 if (I40E_SD_TYPE_PAGED == sd_entry->entry_type) {
1273 I40E_FIND_PD_INDEX_LIMIT(hmc_info, rsrc_type, obj_idx, 1,
1274 &pd_idx, &pd_lmt);
1275 rel_pd_idx = pd_idx % I40E_HMC_PD_CNT_IN_SD;
1276 pd_entry = &sd_entry->u.pd_table.pd_entry[rel_pd_idx];
1277 obj_offset_in_pd = (u32)(obj_offset_in_fpm %
1278 I40E_HMC_PAGED_BP_SIZE);
1279 *object_base = (u8 *)pd_entry->bp.addr.va + obj_offset_in_pd;
1280 } else {
1281 obj_offset_in_sd = (u32)(obj_offset_in_fpm %
1282 I40E_HMC_DIRECT_BP_SIZE);
1283 *object_base = (u8 *)sd_entry->u.bp.addr.va + obj_offset_in_sd;
1284 }
1285 exit:
1286 return ret_code;
1287 }
1288
1289 /**
1290 * i40e_get_lan_tx_queue_context - return the HMC context for the queue
1291 * @hw: the hardware struct
1292 * @queue: the queue we care about
1293 * @s: the struct to be filled
1294 **/
i40e_get_lan_tx_queue_context(struct i40e_hw * hw,u16 queue,struct i40e_hmc_obj_txq * s)1295 enum i40e_status_code i40e_get_lan_tx_queue_context(struct i40e_hw *hw,
1296 u16 queue,
1297 struct i40e_hmc_obj_txq *s)
1298 {
1299 enum i40e_status_code err;
1300 u8 *context_bytes;
1301
1302 err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_TX, queue);
1303 if (err < 0)
1304 return err;
1305
1306 return i40e_get_hmc_context(context_bytes,
1307 i40e_hmc_txq_ce_info, (u8 *)s);
1308 }
1309
1310 /**
1311 * i40e_clear_lan_tx_queue_context - clear the HMC context for the queue
1312 * @hw: the hardware struct
1313 * @queue: the queue we care about
1314 **/
i40e_clear_lan_tx_queue_context(struct i40e_hw * hw,u16 queue)1315 enum i40e_status_code i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
1316 u16 queue)
1317 {
1318 enum i40e_status_code err;
1319 u8 *context_bytes;
1320
1321 err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_TX, queue);
1322 if (err < 0)
1323 return err;
1324
1325 return i40e_clear_hmc_context(hw, context_bytes, I40E_HMC_LAN_TX);
1326 }
1327
1328 /**
1329 * i40e_set_lan_tx_queue_context - set the HMC context for the queue
1330 * @hw: the hardware struct
1331 * @queue: the queue we care about
1332 * @s: the struct to be filled
1333 **/
i40e_set_lan_tx_queue_context(struct i40e_hw * hw,u16 queue,struct i40e_hmc_obj_txq * s)1334 enum i40e_status_code i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
1335 u16 queue,
1336 struct i40e_hmc_obj_txq *s)
1337 {
1338 enum i40e_status_code err;
1339 u8 *context_bytes;
1340
1341 err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_TX, queue);
1342 if (err < 0)
1343 return err;
1344
1345 return i40e_set_hmc_context(context_bytes,
1346 i40e_hmc_txq_ce_info, (u8 *)s);
1347 }
1348
1349 /**
1350 * i40e_get_lan_rx_queue_context - return the HMC context for the queue
1351 * @hw: the hardware struct
1352 * @queue: the queue we care about
1353 * @s: the struct to be filled
1354 **/
i40e_get_lan_rx_queue_context(struct i40e_hw * hw,u16 queue,struct i40e_hmc_obj_rxq * s)1355 enum i40e_status_code i40e_get_lan_rx_queue_context(struct i40e_hw *hw,
1356 u16 queue,
1357 struct i40e_hmc_obj_rxq *s)
1358 {
1359 enum i40e_status_code err;
1360 u8 *context_bytes;
1361
1362 err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_RX, queue);
1363 if (err < 0)
1364 return err;
1365
1366 return i40e_get_hmc_context(context_bytes,
1367 i40e_hmc_rxq_ce_info, (u8 *)s);
1368 }
1369
1370 /**
1371 * i40e_clear_lan_rx_queue_context - clear the HMC context for the queue
1372 * @hw: the hardware struct
1373 * @queue: the queue we care about
1374 **/
i40e_clear_lan_rx_queue_context(struct i40e_hw * hw,u16 queue)1375 enum i40e_status_code i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
1376 u16 queue)
1377 {
1378 enum i40e_status_code err;
1379 u8 *context_bytes;
1380
1381 err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_RX, queue);
1382 if (err < 0)
1383 return err;
1384
1385 return i40e_clear_hmc_context(hw, context_bytes, I40E_HMC_LAN_RX);
1386 }
1387
1388 /**
1389 * i40e_set_lan_rx_queue_context - set the HMC context for the queue
1390 * @hw: the hardware struct
1391 * @queue: the queue we care about
1392 * @s: the struct to be filled
1393 **/
i40e_set_lan_rx_queue_context(struct i40e_hw * hw,u16 queue,struct i40e_hmc_obj_rxq * s)1394 enum i40e_status_code i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
1395 u16 queue,
1396 struct i40e_hmc_obj_rxq *s)
1397 {
1398 enum i40e_status_code err;
1399 u8 *context_bytes;
1400
1401 err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_RX, queue);
1402 if (err < 0)
1403 return err;
1404
1405 return i40e_set_hmc_context(context_bytes,
1406 i40e_hmc_rxq_ce_info, (u8 *)s);
1407 }
1408