xref: /freebsd/sys/dev/ixl/i40e_lan_hmc.c (revision 98e0ffaefb0f241cda3a72395d3be04192ae0d47)
1 /******************************************************************************
2 
3   Copyright (c) 2013-2015, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 #include "i40e_osdep.h"
36 #include "i40e_register.h"
37 #include "i40e_type.h"
38 #include "i40e_hmc.h"
39 #include "i40e_lan_hmc.h"
40 #include "i40e_prototype.h"
41 
42 /* lan specific interface functions */
43 
44 /**
45  * i40e_align_l2obj_base - aligns base object pointer to 512 bytes
46  * @offset: base address offset needing alignment
47  *
48  * Aligns the layer 2 function private memory so it's 512-byte aligned.
49  **/
50 static u64 i40e_align_l2obj_base(u64 offset)
51 {
52 	u64 aligned_offset = offset;
53 
54 	if ((offset % I40E_HMC_L2OBJ_BASE_ALIGNMENT) > 0)
55 		aligned_offset += (I40E_HMC_L2OBJ_BASE_ALIGNMENT -
56 				   (offset % I40E_HMC_L2OBJ_BASE_ALIGNMENT));
57 
58 	return aligned_offset;
59 }
60 
61 /**
62  * i40e_calculate_l2fpm_size - calculates layer 2 FPM memory size
63  * @txq_num: number of Tx queues needing backing context
64  * @rxq_num: number of Rx queues needing backing context
65  * @fcoe_cntx_num: amount of FCoE statefull contexts needing backing context
66  * @fcoe_filt_num: number of FCoE filters needing backing context
67  *
68  * Calculates the maximum amount of memory for the function required, based
69  * on the number of resources it must provide context for.
70  **/
71 u64 i40e_calculate_l2fpm_size(u32 txq_num, u32 rxq_num,
72 			      u32 fcoe_cntx_num, u32 fcoe_filt_num)
73 {
74 	u64 fpm_size = 0;
75 
76 	fpm_size = txq_num * I40E_HMC_OBJ_SIZE_TXQ;
77 	fpm_size = i40e_align_l2obj_base(fpm_size);
78 
79 	fpm_size += (rxq_num * I40E_HMC_OBJ_SIZE_RXQ);
80 	fpm_size = i40e_align_l2obj_base(fpm_size);
81 
82 	fpm_size += (fcoe_cntx_num * I40E_HMC_OBJ_SIZE_FCOE_CNTX);
83 	fpm_size = i40e_align_l2obj_base(fpm_size);
84 
85 	fpm_size += (fcoe_filt_num * I40E_HMC_OBJ_SIZE_FCOE_FILT);
86 	fpm_size = i40e_align_l2obj_base(fpm_size);
87 
88 	return fpm_size;
89 }
90 
91 /**
92  * i40e_init_lan_hmc - initialize i40e_hmc_info struct
93  * @hw: pointer to the HW structure
94  * @txq_num: number of Tx queues needing backing context
95  * @rxq_num: number of Rx queues needing backing context
96  * @fcoe_cntx_num: amount of FCoE statefull contexts needing backing context
97  * @fcoe_filt_num: number of FCoE filters needing backing context
98  *
99  * This function will be called once per physical function initialization.
100  * It will fill out the i40e_hmc_obj_info structure for LAN objects based on
101  * the driver's provided input, as well as information from the HMC itself
102  * loaded from NVRAM.
103  *
104  * Assumptions:
105  *   - HMC Resource Profile has been selected before calling this function.
106  **/
107 enum i40e_status_code i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
108 					u32 rxq_num, u32 fcoe_cntx_num,
109 					u32 fcoe_filt_num)
110 {
111 	struct i40e_hmc_obj_info *obj, *full_obj;
112 	enum i40e_status_code ret_code = I40E_SUCCESS;
113 	u64 l2fpm_size;
114 	u32 size_exp;
115 
116 	hw->hmc.signature = I40E_HMC_INFO_SIGNATURE;
117 	hw->hmc.hmc_fn_id = hw->pf_id;
118 
119 	/* allocate memory for hmc_obj */
120 	ret_code = i40e_allocate_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem,
121 			sizeof(struct i40e_hmc_obj_info) * I40E_HMC_LAN_MAX);
122 	if (ret_code)
123 		goto init_lan_hmc_out;
124 	hw->hmc.hmc_obj = (struct i40e_hmc_obj_info *)
125 			  hw->hmc.hmc_obj_virt_mem.va;
126 
127 	/* The full object will be used to create the LAN HMC SD */
128 	full_obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_FULL];
129 	full_obj->max_cnt = 0;
130 	full_obj->cnt = 0;
131 	full_obj->base = 0;
132 	full_obj->size = 0;
133 
134 	/* Tx queue context information */
135 	obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_TX];
136 	obj->max_cnt = rd32(hw, I40E_GLHMC_LANQMAX);
137 	obj->cnt = txq_num;
138 	obj->base = 0;
139 	size_exp = rd32(hw, I40E_GLHMC_LANTXOBJSZ);
140 	obj->size = (u64)1 << size_exp;
141 
142 	/* validate values requested by driver don't exceed HMC capacity */
143 	if (txq_num > obj->max_cnt) {
144 		ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
145 		DEBUGOUT3("i40e_init_lan_hmc: Tx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
146 			  txq_num, obj->max_cnt, ret_code);
147 		goto init_lan_hmc_out;
148 	}
149 
150 	/* aggregate values into the full LAN object for later */
151 	full_obj->max_cnt += obj->max_cnt;
152 	full_obj->cnt += obj->cnt;
153 
154 	/* Rx queue context information */
155 	obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_RX];
156 	obj->max_cnt = rd32(hw, I40E_GLHMC_LANQMAX);
157 	obj->cnt = rxq_num;
158 	obj->base = hw->hmc.hmc_obj[I40E_HMC_LAN_TX].base +
159 		    (hw->hmc.hmc_obj[I40E_HMC_LAN_TX].cnt *
160 		     hw->hmc.hmc_obj[I40E_HMC_LAN_TX].size);
161 	obj->base = i40e_align_l2obj_base(obj->base);
162 	size_exp = rd32(hw, I40E_GLHMC_LANRXOBJSZ);
163 	obj->size = (u64)1 << size_exp;
164 
165 	/* validate values requested by driver don't exceed HMC capacity */
166 	if (rxq_num > obj->max_cnt) {
167 		ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
168 		DEBUGOUT3("i40e_init_lan_hmc: Rx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
169 			  rxq_num, obj->max_cnt, ret_code);
170 		goto init_lan_hmc_out;
171 	}
172 
173 	/* aggregate values into the full LAN object for later */
174 	full_obj->max_cnt += obj->max_cnt;
175 	full_obj->cnt += obj->cnt;
176 
177 	/* FCoE context information */
178 	obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX];
179 	obj->max_cnt = rd32(hw, I40E_GLHMC_FCOEMAX);
180 	obj->cnt = fcoe_cntx_num;
181 	obj->base = hw->hmc.hmc_obj[I40E_HMC_LAN_RX].base +
182 		    (hw->hmc.hmc_obj[I40E_HMC_LAN_RX].cnt *
183 		     hw->hmc.hmc_obj[I40E_HMC_LAN_RX].size);
184 	obj->base = i40e_align_l2obj_base(obj->base);
185 	size_exp = rd32(hw, I40E_GLHMC_FCOEDDPOBJSZ);
186 	obj->size = (u64)1 << size_exp;
187 
188 	/* validate values requested by driver don't exceed HMC capacity */
189 	if (fcoe_cntx_num > obj->max_cnt) {
190 		ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
191 		DEBUGOUT3("i40e_init_lan_hmc: FCoE context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
192 			  fcoe_cntx_num, obj->max_cnt, ret_code);
193 		goto init_lan_hmc_out;
194 	}
195 
196 	/* aggregate values into the full LAN object for later */
197 	full_obj->max_cnt += obj->max_cnt;
198 	full_obj->cnt += obj->cnt;
199 
200 	/* FCoE filter information */
201 	obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_FILT];
202 	obj->max_cnt = rd32(hw, I40E_GLHMC_FCOEFMAX);
203 	obj->cnt = fcoe_filt_num;
204 	obj->base = hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].base +
205 		    (hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].cnt *
206 		     hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].size);
207 	obj->base = i40e_align_l2obj_base(obj->base);
208 	size_exp = rd32(hw, I40E_GLHMC_FCOEFOBJSZ);
209 	obj->size = (u64)1 << size_exp;
210 
211 	/* validate values requested by driver don't exceed HMC capacity */
212 	if (fcoe_filt_num > obj->max_cnt) {
213 		ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
214 		DEBUGOUT3("i40e_init_lan_hmc: FCoE filter: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
215 			  fcoe_filt_num, obj->max_cnt, ret_code);
216 		goto init_lan_hmc_out;
217 	}
218 
219 	/* aggregate values into the full LAN object for later */
220 	full_obj->max_cnt += obj->max_cnt;
221 	full_obj->cnt += obj->cnt;
222 
223 	hw->hmc.first_sd_index = 0;
224 	hw->hmc.sd_table.ref_cnt = 0;
225 	l2fpm_size = i40e_calculate_l2fpm_size(txq_num, rxq_num, fcoe_cntx_num,
226 					       fcoe_filt_num);
227 	if (NULL == hw->hmc.sd_table.sd_entry) {
228 		hw->hmc.sd_table.sd_cnt = (u32)
229 				   (l2fpm_size + I40E_HMC_DIRECT_BP_SIZE - 1) /
230 				   I40E_HMC_DIRECT_BP_SIZE;
231 
232 		/* allocate the sd_entry members in the sd_table */
233 		ret_code = i40e_allocate_virt_mem(hw, &hw->hmc.sd_table.addr,
234 					  (sizeof(struct i40e_hmc_sd_entry) *
235 					  hw->hmc.sd_table.sd_cnt));
236 		if (ret_code)
237 			goto init_lan_hmc_out;
238 		hw->hmc.sd_table.sd_entry =
239 			(struct i40e_hmc_sd_entry *)hw->hmc.sd_table.addr.va;
240 	}
241 	/* store in the LAN full object for later */
242 	full_obj->size = l2fpm_size;
243 
244 init_lan_hmc_out:
245 	return ret_code;
246 }
247 
248 /**
249  * i40e_remove_pd_page - Remove a page from the page descriptor table
250  * @hw: pointer to the HW structure
251  * @hmc_info: pointer to the HMC configuration information structure
252  * @idx: segment descriptor index to find the relevant page descriptor
253  *
254  * This function:
255  *	1. Marks the entry in pd table (for paged address mode) invalid
256  *	2. write to register PMPDINV to invalidate the backing page in FV cache
257  *	3. Decrement the ref count for  pd_entry
258  * assumptions:
259  *	1. caller can deallocate the memory used by pd after this function
260  *	   returns.
261  **/
262 static enum i40e_status_code i40e_remove_pd_page(struct i40e_hw *hw,
263 						 struct i40e_hmc_info *hmc_info,
264 						 u32 idx)
265 {
266 	enum i40e_status_code ret_code = I40E_SUCCESS;
267 
268 	if (i40e_prep_remove_pd_page(hmc_info, idx) == I40E_SUCCESS)
269 		ret_code = i40e_remove_pd_page_new(hw, hmc_info, idx, TRUE);
270 
271 	return ret_code;
272 }
273 
274 /**
275  * i40e_remove_sd_bp - remove a backing page from a segment descriptor
276  * @hw: pointer to our HW structure
277  * @hmc_info: pointer to the HMC configuration information structure
278  * @idx: the page index
279  *
280  * This function:
281  *	1. Marks the entry in sd table (for direct address mode) invalid
282  *	2. write to register PMSDCMD, PMSDDATALOW(PMSDDATALOW.PMSDVALID set
283  *	   to 0) and PMSDDATAHIGH to invalidate the sd page
284  *	3. Decrement the ref count for the sd_entry
285  * assumptions:
286  *	1. caller can deallocate the memory used by backing storage after this
287  *	   function returns.
288  **/
289 static enum i40e_status_code i40e_remove_sd_bp(struct i40e_hw *hw,
290 					       struct i40e_hmc_info *hmc_info,
291 					       u32 idx)
292 {
293 	enum i40e_status_code ret_code = I40E_SUCCESS;
294 
295 	if (i40e_prep_remove_sd_bp(hmc_info, idx) == I40E_SUCCESS)
296 		ret_code = i40e_remove_sd_bp_new(hw, hmc_info, idx, TRUE);
297 
298 	return ret_code;
299 }
300 
301 /**
302  * i40e_create_lan_hmc_object - allocate backing store for hmc objects
303  * @hw: pointer to the HW structure
304  * @info: pointer to i40e_hmc_create_obj_info struct
305  *
306  * This will allocate memory for PDs and backing pages and populate
307  * the sd and pd entries.
308  **/
309 enum i40e_status_code i40e_create_lan_hmc_object(struct i40e_hw *hw,
310 				struct i40e_hmc_lan_create_obj_info *info)
311 {
312 	enum i40e_status_code ret_code = I40E_SUCCESS;
313 	struct i40e_hmc_sd_entry *sd_entry;
314 	u32 pd_idx1 = 0, pd_lmt1 = 0;
315 	u32 pd_idx = 0, pd_lmt = 0;
316 	bool pd_error = FALSE;
317 	u32 sd_idx, sd_lmt;
318 	u64 sd_size;
319 	u32 i, j;
320 
321 	if (NULL == info) {
322 		ret_code = I40E_ERR_BAD_PTR;
323 		DEBUGOUT("i40e_create_lan_hmc_object: bad info ptr\n");
324 		goto exit;
325 	}
326 	if (NULL == info->hmc_info) {
327 		ret_code = I40E_ERR_BAD_PTR;
328 		DEBUGOUT("i40e_create_lan_hmc_object: bad hmc_info ptr\n");
329 		goto exit;
330 	}
331 	if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) {
332 		ret_code = I40E_ERR_BAD_PTR;
333 		DEBUGOUT("i40e_create_lan_hmc_object: bad signature\n");
334 		goto exit;
335 	}
336 
337 	if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
338 		ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
339 		DEBUGOUT1("i40e_create_lan_hmc_object: returns error %d\n",
340 			  ret_code);
341 		goto exit;
342 	}
343 	if ((info->start_idx + info->count) >
344 	    info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
345 		ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
346 		DEBUGOUT1("i40e_create_lan_hmc_object: returns error %d\n",
347 			  ret_code);
348 		goto exit;
349 	}
350 
351 	/* find sd index and limit */
352 	I40E_FIND_SD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
353 				 info->start_idx, info->count,
354 				 &sd_idx, &sd_lmt);
355 	if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
356 	    sd_lmt > info->hmc_info->sd_table.sd_cnt) {
357 			ret_code = I40E_ERR_INVALID_SD_INDEX;
358 			goto exit;
359 	}
360 	/* find pd index */
361 	I40E_FIND_PD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
362 				 info->start_idx, info->count, &pd_idx,
363 				 &pd_lmt);
364 
365 	/* This is to cover for cases where you may not want to have an SD with
366 	 * the full 2M memory but something smaller. By not filling out any
367 	 * size, the function will default the SD size to be 2M.
368 	 */
369 	if (info->direct_mode_sz == 0)
370 		sd_size = I40E_HMC_DIRECT_BP_SIZE;
371 	else
372 		sd_size = info->direct_mode_sz;
373 
374 	/* check if all the sds are valid. If not, allocate a page and
375 	 * initialize it.
376 	 */
377 	for (j = sd_idx; j < sd_lmt; j++) {
378 		/* update the sd table entry */
379 		ret_code = i40e_add_sd_table_entry(hw, info->hmc_info, j,
380 						   info->entry_type,
381 						   sd_size);
382 		if (I40E_SUCCESS != ret_code)
383 			goto exit_sd_error;
384 		sd_entry = &info->hmc_info->sd_table.sd_entry[j];
385 		if (I40E_SD_TYPE_PAGED == sd_entry->entry_type) {
386 			/* check if all the pds in this sd are valid. If not,
387 			 * allocate a page and initialize it.
388 			 */
389 
390 			/* find pd_idx and pd_lmt in this sd */
391 			pd_idx1 = max(pd_idx, (j * I40E_HMC_MAX_BP_COUNT));
392 			pd_lmt1 = min(pd_lmt,
393 				      ((j + 1) * I40E_HMC_MAX_BP_COUNT));
394 			for (i = pd_idx1; i < pd_lmt1; i++) {
395 				/* update the pd table entry */
396 				ret_code = i40e_add_pd_table_entry(hw,
397 								info->hmc_info,
398 								i);
399 				if (I40E_SUCCESS != ret_code) {
400 					pd_error = TRUE;
401 					break;
402 				}
403 			}
404 			if (pd_error) {
405 				/* remove the backing pages from pd_idx1 to i */
406 				while (i && (i > pd_idx1)) {
407 					i40e_remove_pd_bp(hw, info->hmc_info,
408 							  (i - 1));
409 					i--;
410 				}
411 			}
412 		}
413 		if (!sd_entry->valid) {
414 			sd_entry->valid = TRUE;
415 			switch (sd_entry->entry_type) {
416 			case I40E_SD_TYPE_PAGED:
417 				I40E_SET_PF_SD_ENTRY(hw,
418 					sd_entry->u.pd_table.pd_page_addr.pa,
419 					j, sd_entry->entry_type);
420 				break;
421 			case I40E_SD_TYPE_DIRECT:
422 				I40E_SET_PF_SD_ENTRY(hw, sd_entry->u.bp.addr.pa,
423 						     j, sd_entry->entry_type);
424 				break;
425 			default:
426 				ret_code = I40E_ERR_INVALID_SD_TYPE;
427 				goto exit;
428 			}
429 		}
430 	}
431 	goto exit;
432 
433 exit_sd_error:
434 	/* cleanup for sd entries from j to sd_idx */
435 	while (j && (j > sd_idx)) {
436 		sd_entry = &info->hmc_info->sd_table.sd_entry[j - 1];
437 		switch (sd_entry->entry_type) {
438 		case I40E_SD_TYPE_PAGED:
439 			pd_idx1 = max(pd_idx,
440 				      ((j - 1) * I40E_HMC_MAX_BP_COUNT));
441 			pd_lmt1 = min(pd_lmt, (j * I40E_HMC_MAX_BP_COUNT));
442 			for (i = pd_idx1; i < pd_lmt1; i++) {
443 				i40e_remove_pd_bp(hw, info->hmc_info, i);
444 			}
445 			i40e_remove_pd_page(hw, info->hmc_info, (j - 1));
446 			break;
447 		case I40E_SD_TYPE_DIRECT:
448 			i40e_remove_sd_bp(hw, info->hmc_info, (j - 1));
449 			break;
450 		default:
451 			ret_code = I40E_ERR_INVALID_SD_TYPE;
452 			break;
453 		}
454 		j--;
455 	}
456 exit:
457 	return ret_code;
458 }
459 
460 /**
461  * i40e_configure_lan_hmc - prepare the HMC backing store
462  * @hw: pointer to the hw structure
463  * @model: the model for the layout of the SD/PD tables
464  *
465  * - This function will be called once per physical function initialization.
466  * - This function will be called after i40e_init_lan_hmc() and before
467  *   any LAN/FCoE HMC objects can be created.
468  **/
469 enum i40e_status_code i40e_configure_lan_hmc(struct i40e_hw *hw,
470 					     enum i40e_hmc_model model)
471 {
472 	struct i40e_hmc_lan_create_obj_info info;
473 	u8 hmc_fn_id = hw->hmc.hmc_fn_id;
474 	struct i40e_hmc_obj_info *obj;
475 	enum i40e_status_code ret_code = I40E_SUCCESS;
476 
477 	/* Initialize part of the create object info struct */
478 	info.hmc_info = &hw->hmc;
479 	info.rsrc_type = I40E_HMC_LAN_FULL;
480 	info.start_idx = 0;
481 	info.direct_mode_sz = hw->hmc.hmc_obj[I40E_HMC_LAN_FULL].size;
482 
483 	/* Build the SD entry for the LAN objects */
484 	switch (model) {
485 	case I40E_HMC_MODEL_DIRECT_PREFERRED:
486 	case I40E_HMC_MODEL_DIRECT_ONLY:
487 		info.entry_type = I40E_SD_TYPE_DIRECT;
488 		/* Make one big object, a single SD */
489 		info.count = 1;
490 		ret_code = i40e_create_lan_hmc_object(hw, &info);
491 		if ((ret_code != I40E_SUCCESS) && (model == I40E_HMC_MODEL_DIRECT_PREFERRED))
492 			goto try_type_paged;
493 		else if (ret_code != I40E_SUCCESS)
494 			goto configure_lan_hmc_out;
495 		/* else clause falls through the break */
496 		break;
497 	case I40E_HMC_MODEL_PAGED_ONLY:
498 try_type_paged:
499 		info.entry_type = I40E_SD_TYPE_PAGED;
500 		/* Make one big object in the PD table */
501 		info.count = 1;
502 		ret_code = i40e_create_lan_hmc_object(hw, &info);
503 		if (ret_code != I40E_SUCCESS)
504 			goto configure_lan_hmc_out;
505 		break;
506 	default:
507 		/* unsupported type */
508 		ret_code = I40E_ERR_INVALID_SD_TYPE;
509 		DEBUGOUT1("i40e_configure_lan_hmc: Unknown SD type: %d\n",
510 			  ret_code);
511 		goto configure_lan_hmc_out;
512 	}
513 
514 	/* Configure and program the FPM registers so objects can be created */
515 
516 	/* Tx contexts */
517 	obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_TX];
518 	wr32(hw, I40E_GLHMC_LANTXBASE(hmc_fn_id),
519 	     (u32)((obj->base & I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK) / 512));
520 	wr32(hw, I40E_GLHMC_LANTXCNT(hmc_fn_id), obj->cnt);
521 
522 	/* Rx contexts */
523 	obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_RX];
524 	wr32(hw, I40E_GLHMC_LANRXBASE(hmc_fn_id),
525 	     (u32)((obj->base & I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK) / 512));
526 	wr32(hw, I40E_GLHMC_LANRXCNT(hmc_fn_id), obj->cnt);
527 
528 	/* FCoE contexts */
529 	obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX];
530 	wr32(hw, I40E_GLHMC_FCOEDDPBASE(hmc_fn_id),
531 	 (u32)((obj->base & I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK) / 512));
532 	wr32(hw, I40E_GLHMC_FCOEDDPCNT(hmc_fn_id), obj->cnt);
533 
534 	/* FCoE filters */
535 	obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_FILT];
536 	wr32(hw, I40E_GLHMC_FCOEFBASE(hmc_fn_id),
537 	     (u32)((obj->base & I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK) / 512));
538 	wr32(hw, I40E_GLHMC_FCOEFCNT(hmc_fn_id), obj->cnt);
539 
540 configure_lan_hmc_out:
541 	return ret_code;
542 }
543 
544 /**
545  * i40e_delete_hmc_object - remove hmc objects
546  * @hw: pointer to the HW structure
547  * @info: pointer to i40e_hmc_delete_obj_info struct
548  *
549  * This will de-populate the SDs and PDs.  It frees
550  * the memory for PDS and backing storage.  After this function is returned,
551  * caller should deallocate memory allocated previously for
552  * book-keeping information about PDs and backing storage.
553  **/
554 enum i40e_status_code i40e_delete_lan_hmc_object(struct i40e_hw *hw,
555 				struct i40e_hmc_lan_delete_obj_info *info)
556 {
557 	enum i40e_status_code ret_code = I40E_SUCCESS;
558 	struct i40e_hmc_pd_table *pd_table;
559 	u32 pd_idx, pd_lmt, rel_pd_idx;
560 	u32 sd_idx, sd_lmt;
561 	u32 i, j;
562 
563 	if (NULL == info) {
564 		ret_code = I40E_ERR_BAD_PTR;
565 		DEBUGOUT("i40e_delete_hmc_object: bad info ptr\n");
566 		goto exit;
567 	}
568 	if (NULL == info->hmc_info) {
569 		ret_code = I40E_ERR_BAD_PTR;
570 		DEBUGOUT("i40e_delete_hmc_object: bad info->hmc_info ptr\n");
571 		goto exit;
572 	}
573 	if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) {
574 		ret_code = I40E_ERR_BAD_PTR;
575 		DEBUGOUT("i40e_delete_hmc_object: bad hmc_info->signature\n");
576 		goto exit;
577 	}
578 
579 	if (NULL == info->hmc_info->sd_table.sd_entry) {
580 		ret_code = I40E_ERR_BAD_PTR;
581 		DEBUGOUT("i40e_delete_hmc_object: bad sd_entry\n");
582 		goto exit;
583 	}
584 
585 	if (NULL == info->hmc_info->hmc_obj) {
586 		ret_code = I40E_ERR_BAD_PTR;
587 		DEBUGOUT("i40e_delete_hmc_object: bad hmc_info->hmc_obj\n");
588 		goto exit;
589 	}
590 	if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
591 		ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
592 		DEBUGOUT1("i40e_delete_hmc_object: returns error %d\n",
593 			  ret_code);
594 		goto exit;
595 	}
596 
597 	if ((info->start_idx + info->count) >
598 	    info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
599 		ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
600 		DEBUGOUT1("i40e_delete_hmc_object: returns error %d\n",
601 			  ret_code);
602 		goto exit;
603 	}
604 
605 	I40E_FIND_PD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
606 				 info->start_idx, info->count, &pd_idx,
607 				 &pd_lmt);
608 
609 	for (j = pd_idx; j < pd_lmt; j++) {
610 		sd_idx = j / I40E_HMC_PD_CNT_IN_SD;
611 
612 		if (I40E_SD_TYPE_PAGED !=
613 		    info->hmc_info->sd_table.sd_entry[sd_idx].entry_type)
614 			continue;
615 
616 		rel_pd_idx = j % I40E_HMC_PD_CNT_IN_SD;
617 
618 		pd_table =
619 			&info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
620 		if (pd_table->pd_entry[rel_pd_idx].valid) {
621 			ret_code = i40e_remove_pd_bp(hw, info->hmc_info, j);
622 			if (I40E_SUCCESS != ret_code)
623 				goto exit;
624 		}
625 	}
626 
627 	/* find sd index and limit */
628 	I40E_FIND_SD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
629 				 info->start_idx, info->count,
630 				 &sd_idx, &sd_lmt);
631 	if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
632 	    sd_lmt > info->hmc_info->sd_table.sd_cnt) {
633 		ret_code = I40E_ERR_INVALID_SD_INDEX;
634 		goto exit;
635 	}
636 
637 	for (i = sd_idx; i < sd_lmt; i++) {
638 		if (!info->hmc_info->sd_table.sd_entry[i].valid)
639 			continue;
640 		switch (info->hmc_info->sd_table.sd_entry[i].entry_type) {
641 		case I40E_SD_TYPE_DIRECT:
642 			ret_code = i40e_remove_sd_bp(hw, info->hmc_info, i);
643 			if (I40E_SUCCESS != ret_code)
644 				goto exit;
645 			break;
646 		case I40E_SD_TYPE_PAGED:
647 			ret_code = i40e_remove_pd_page(hw, info->hmc_info, i);
648 			if (I40E_SUCCESS != ret_code)
649 				goto exit;
650 			break;
651 		default:
652 			break;
653 		}
654 	}
655 exit:
656 	return ret_code;
657 }
658 
659 /**
660  * i40e_shutdown_lan_hmc - Remove HMC backing store, free allocated memory
661  * @hw: pointer to the hw structure
662  *
663  * This must be called by drivers as they are shutting down and being
664  * removed from the OS.
665  **/
666 enum i40e_status_code i40e_shutdown_lan_hmc(struct i40e_hw *hw)
667 {
668 	struct i40e_hmc_lan_delete_obj_info info;
669 	enum i40e_status_code ret_code;
670 
671 	info.hmc_info = &hw->hmc;
672 	info.rsrc_type = I40E_HMC_LAN_FULL;
673 	info.start_idx = 0;
674 	info.count = 1;
675 
676 	/* delete the object */
677 	ret_code = i40e_delete_lan_hmc_object(hw, &info);
678 
679 	/* free the SD table entry for LAN */
680 	i40e_free_virt_mem(hw, &hw->hmc.sd_table.addr);
681 	hw->hmc.sd_table.sd_cnt = 0;
682 	hw->hmc.sd_table.sd_entry = NULL;
683 
684 	/* free memory used for hmc_obj */
685 	i40e_free_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem);
686 	hw->hmc.hmc_obj = NULL;
687 
688 	return ret_code;
689 }
690 
691 #define I40E_HMC_STORE(_struct, _ele)		\
692 	offsetof(struct _struct, _ele),		\
693 	FIELD_SIZEOF(struct _struct, _ele)
694 
695 struct i40e_context_ele {
696 	u16 offset;
697 	u16 size_of;
698 	u16 width;
699 	u16 lsb;
700 };
701 
702 /* LAN Tx Queue Context */
703 static struct i40e_context_ele i40e_hmc_txq_ce_info[] = {
704 					     /* Field      Width    LSB */
705 	{I40E_HMC_STORE(i40e_hmc_obj_txq, head),           13,      0 },
706 	{I40E_HMC_STORE(i40e_hmc_obj_txq, new_context),     1,     30 },
707 	{I40E_HMC_STORE(i40e_hmc_obj_txq, base),           57,     32 },
708 	{I40E_HMC_STORE(i40e_hmc_obj_txq, fc_ena),          1,     89 },
709 	{I40E_HMC_STORE(i40e_hmc_obj_txq, timesync_ena),    1,     90 },
710 	{I40E_HMC_STORE(i40e_hmc_obj_txq, fd_ena),          1,     91 },
711 	{I40E_HMC_STORE(i40e_hmc_obj_txq, alt_vlan_ena),    1,     92 },
712 	{I40E_HMC_STORE(i40e_hmc_obj_txq, cpuid),           8,     96 },
713 /* line 1 */
714 	{I40E_HMC_STORE(i40e_hmc_obj_txq, thead_wb),       13,  0 + 128 },
715 	{I40E_HMC_STORE(i40e_hmc_obj_txq, head_wb_ena),     1, 32 + 128 },
716 	{I40E_HMC_STORE(i40e_hmc_obj_txq, qlen),           13, 33 + 128 },
717 	{I40E_HMC_STORE(i40e_hmc_obj_txq, tphrdesc_ena),    1, 46 + 128 },
718 	{I40E_HMC_STORE(i40e_hmc_obj_txq, tphrpacket_ena),  1, 47 + 128 },
719 	{I40E_HMC_STORE(i40e_hmc_obj_txq, tphwdesc_ena),    1, 48 + 128 },
720 	{I40E_HMC_STORE(i40e_hmc_obj_txq, head_wb_addr),   64, 64 + 128 },
721 /* line 7 */
722 	{I40E_HMC_STORE(i40e_hmc_obj_txq, crc),            32,  0 + (7 * 128) },
723 	{I40E_HMC_STORE(i40e_hmc_obj_txq, rdylist),        10, 84 + (7 * 128) },
724 	{I40E_HMC_STORE(i40e_hmc_obj_txq, rdylist_act),     1, 94 + (7 * 128) },
725 	{ 0 }
726 };
727 
728 /* LAN Rx Queue Context */
729 static struct i40e_context_ele i40e_hmc_rxq_ce_info[] = {
730 					 /* Field      Width    LSB */
731 	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, head),        13,	0   },
732 	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, cpuid),        8,	13  },
733 	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, base),        57,	32  },
734 	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, qlen),        13,	89  },
735 	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, dbuff),        7,	102 },
736 	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, hbuff),        5,	109 },
737 	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, dtype),        2,	114 },
738 	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, dsize),        1,	116 },
739 	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, crcstrip),     1,	117 },
740 	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, fc_ena),       1,	118 },
741 	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, l2tsel),       1,	119 },
742 	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, hsplit_0),     4,	120 },
743 	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, hsplit_1),     2,	124 },
744 	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, showiv),       1,	127 },
745 	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, rxmax),       14,	174 },
746 	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, tphrdesc_ena), 1,	193 },
747 	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, tphwdesc_ena), 1,	194 },
748 	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, tphdata_ena),  1,	195 },
749 	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, tphhead_ena),  1,	196 },
750 	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, lrxqthresh),   3,	198 },
751 	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, prefena),      1,	201 },
752 	{ 0 }
753 };
754 
755 /**
756  * i40e_write_byte - replace HMC context byte
757  * @hmc_bits: pointer to the HMC memory
758  * @ce_info: a description of the struct to be read from
759  * @src: the struct to be read from
760  **/
761 static void i40e_write_byte(u8 *hmc_bits,
762 			    struct i40e_context_ele *ce_info,
763 			    u8 *src)
764 {
765 	u8 src_byte, dest_byte, mask;
766 	u8 *from, *dest;
767 	u16 shift_width;
768 
769 	/* copy from the next struct field */
770 	from = src + ce_info->offset;
771 
772 	/* prepare the bits and mask */
773 	shift_width = ce_info->lsb % 8;
774 	mask = ((u8)1 << ce_info->width) - 1;
775 
776 	src_byte = *from;
777 	src_byte &= mask;
778 
779 	/* shift to correct alignment */
780 	mask <<= shift_width;
781 	src_byte <<= shift_width;
782 
783 	/* get the current bits from the target bit string */
784 	dest = hmc_bits + (ce_info->lsb / 8);
785 
786 	i40e_memcpy(&dest_byte, dest, sizeof(dest_byte), I40E_DMA_TO_NONDMA);
787 
788 	dest_byte &= ~mask;	/* get the bits not changing */
789 	dest_byte |= src_byte;	/* add in the new bits */
790 
791 	/* put it all back */
792 	i40e_memcpy(dest, &dest_byte, sizeof(dest_byte), I40E_NONDMA_TO_DMA);
793 }
794 
795 /**
796  * i40e_write_word - replace HMC context word
797  * @hmc_bits: pointer to the HMC memory
798  * @ce_info: a description of the struct to be read from
799  * @src: the struct to be read from
800  **/
801 static void i40e_write_word(u8 *hmc_bits,
802 			    struct i40e_context_ele *ce_info,
803 			    u8 *src)
804 {
805 	u16 src_word, mask;
806 	u8 *from, *dest;
807 	u16 shift_width;
808 	__le16 dest_word;
809 
810 	/* copy from the next struct field */
811 	from = src + ce_info->offset;
812 
813 	/* prepare the bits and mask */
814 	shift_width = ce_info->lsb % 8;
815 	mask = ((u16)1 << ce_info->width) - 1;
816 
817 	/* don't swizzle the bits until after the mask because the mask bits
818 	 * will be in a different bit position on big endian machines
819 	 */
820 	src_word = *(u16 *)from;
821 	src_word &= mask;
822 
823 	/* shift to correct alignment */
824 	mask <<= shift_width;
825 	src_word <<= shift_width;
826 
827 	/* get the current bits from the target bit string */
828 	dest = hmc_bits + (ce_info->lsb / 8);
829 
830 	i40e_memcpy(&dest_word, dest, sizeof(dest_word), I40E_DMA_TO_NONDMA);
831 
832 	dest_word &= ~(CPU_TO_LE16(mask));	/* get the bits not changing */
833 	dest_word |= CPU_TO_LE16(src_word);	/* add in the new bits */
834 
835 	/* put it all back */
836 	i40e_memcpy(dest, &dest_word, sizeof(dest_word), I40E_NONDMA_TO_DMA);
837 }
838 
839 /**
840  * i40e_write_dword - replace HMC context dword
841  * @hmc_bits: pointer to the HMC memory
842  * @ce_info: a description of the struct to be read from
843  * @src: the struct to be read from
844  **/
845 static void i40e_write_dword(u8 *hmc_bits,
846 			     struct i40e_context_ele *ce_info,
847 			     u8 *src)
848 {
849 	u32 src_dword, mask;
850 	u8 *from, *dest;
851 	u16 shift_width;
852 	__le32 dest_dword;
853 
854 	/* copy from the next struct field */
855 	from = src + ce_info->offset;
856 
857 	/* prepare the bits and mask */
858 	shift_width = ce_info->lsb % 8;
859 
860 	/* if the field width is exactly 32 on an x86 machine, then the shift
861 	 * operation will not work because the SHL instructions count is masked
862 	 * to 5 bits so the shift will do nothing
863 	 */
864 	if (ce_info->width < 32)
865 		mask = ((u32)1 << ce_info->width) - 1;
866 	else
867 		mask = ~(u32)0;
868 
869 	/* don't swizzle the bits until after the mask because the mask bits
870 	 * will be in a different bit position on big endian machines
871 	 */
872 	src_dword = *(u32 *)from;
873 	src_dword &= mask;
874 
875 	/* shift to correct alignment */
876 	mask <<= shift_width;
877 	src_dword <<= shift_width;
878 
879 	/* get the current bits from the target bit string */
880 	dest = hmc_bits + (ce_info->lsb / 8);
881 
882 	i40e_memcpy(&dest_dword, dest, sizeof(dest_dword), I40E_DMA_TO_NONDMA);
883 
884 	dest_dword &= ~(CPU_TO_LE32(mask));	/* get the bits not changing */
885 	dest_dword |= CPU_TO_LE32(src_dword);	/* add in the new bits */
886 
887 	/* put it all back */
888 	i40e_memcpy(dest, &dest_dword, sizeof(dest_dword), I40E_NONDMA_TO_DMA);
889 }
890 
891 /**
892  * i40e_write_qword - replace HMC context qword
893  * @hmc_bits: pointer to the HMC memory
894  * @ce_info: a description of the struct to be read from
895  * @src: the struct to be read from
896  **/
897 static void i40e_write_qword(u8 *hmc_bits,
898 			     struct i40e_context_ele *ce_info,
899 			     u8 *src)
900 {
901 	u64 src_qword, mask;
902 	u8 *from, *dest;
903 	u16 shift_width;
904 	__le64 dest_qword;
905 
906 	/* copy from the next struct field */
907 	from = src + ce_info->offset;
908 
909 	/* prepare the bits and mask */
910 	shift_width = ce_info->lsb % 8;
911 
912 	/* if the field width is exactly 64 on an x86 machine, then the shift
913 	 * operation will not work because the SHL instructions count is masked
914 	 * to 6 bits so the shift will do nothing
915 	 */
916 	if (ce_info->width < 64)
917 		mask = ((u64)1 << ce_info->width) - 1;
918 	else
919 		mask = ~(u64)0;
920 
921 	/* don't swizzle the bits until after the mask because the mask bits
922 	 * will be in a different bit position on big endian machines
923 	 */
924 	src_qword = *(u64 *)from;
925 	src_qword &= mask;
926 
927 	/* shift to correct alignment */
928 	mask <<= shift_width;
929 	src_qword <<= shift_width;
930 
931 	/* get the current bits from the target bit string */
932 	dest = hmc_bits + (ce_info->lsb / 8);
933 
934 	i40e_memcpy(&dest_qword, dest, sizeof(dest_qword), I40E_DMA_TO_NONDMA);
935 
936 	dest_qword &= ~(CPU_TO_LE64(mask));	/* get the bits not changing */
937 	dest_qword |= CPU_TO_LE64(src_qword);	/* add in the new bits */
938 
939 	/* put it all back */
940 	i40e_memcpy(dest, &dest_qword, sizeof(dest_qword), I40E_NONDMA_TO_DMA);
941 }
942 
943 /**
944  * i40e_read_byte - read HMC context byte into struct
945  * @hmc_bits: pointer to the HMC memory
946  * @ce_info: a description of the struct to be filled
947  * @dest: the struct to be filled
948  **/
949 static void i40e_read_byte(u8 *hmc_bits,
950 			   struct i40e_context_ele *ce_info,
951 			   u8 *dest)
952 {
953 	u8 dest_byte, mask;
954 	u8 *src, *target;
955 	u16 shift_width;
956 
957 	/* prepare the bits and mask */
958 	shift_width = ce_info->lsb % 8;
959 	mask = ((u8)1 << ce_info->width) - 1;
960 
961 	/* shift to correct alignment */
962 	mask <<= shift_width;
963 
964 	/* get the current bits from the src bit string */
965 	src = hmc_bits + (ce_info->lsb / 8);
966 
967 	i40e_memcpy(&dest_byte, src, sizeof(dest_byte), I40E_DMA_TO_NONDMA);
968 
969 	dest_byte &= ~(mask);
970 
971 	dest_byte >>= shift_width;
972 
973 	/* get the address from the struct field */
974 	target = dest + ce_info->offset;
975 
976 	/* put it back in the struct */
977 	i40e_memcpy(target, &dest_byte, sizeof(dest_byte), I40E_NONDMA_TO_DMA);
978 }
979 
980 /**
981  * i40e_read_word - read HMC context word into struct
982  * @hmc_bits: pointer to the HMC memory
983  * @ce_info: a description of the struct to be filled
984  * @dest: the struct to be filled
985  **/
986 static void i40e_read_word(u8 *hmc_bits,
987 			   struct i40e_context_ele *ce_info,
988 			   u8 *dest)
989 {
990 	u16 dest_word, mask;
991 	u8 *src, *target;
992 	u16 shift_width;
993 	__le16 src_word;
994 
995 	/* prepare the bits and mask */
996 	shift_width = ce_info->lsb % 8;
997 	mask = ((u16)1 << ce_info->width) - 1;
998 
999 	/* shift to correct alignment */
1000 	mask <<= shift_width;
1001 
1002 	/* get the current bits from the src bit string */
1003 	src = hmc_bits + (ce_info->lsb / 8);
1004 
1005 	i40e_memcpy(&src_word, src, sizeof(src_word), I40E_DMA_TO_NONDMA);
1006 
1007 	/* the data in the memory is stored as little endian so mask it
1008 	 * correctly
1009 	 */
1010 	src_word &= ~(CPU_TO_LE16(mask));
1011 
1012 	/* get the data back into host order before shifting */
1013 	dest_word = LE16_TO_CPU(src_word);
1014 
1015 	dest_word >>= shift_width;
1016 
1017 	/* get the address from the struct field */
1018 	target = dest + ce_info->offset;
1019 
1020 	/* put it back in the struct */
1021 	i40e_memcpy(target, &dest_word, sizeof(dest_word), I40E_NONDMA_TO_DMA);
1022 }
1023 
1024 /**
1025  * i40e_read_dword - read HMC context dword into struct
1026  * @hmc_bits: pointer to the HMC memory
1027  * @ce_info: a description of the struct to be filled
1028  * @dest: the struct to be filled
1029  **/
1030 static void i40e_read_dword(u8 *hmc_bits,
1031 			    struct i40e_context_ele *ce_info,
1032 			    u8 *dest)
1033 {
1034 	u32 dest_dword, mask;
1035 	u8 *src, *target;
1036 	u16 shift_width;
1037 	__le32 src_dword;
1038 
1039 	/* prepare the bits and mask */
1040 	shift_width = ce_info->lsb % 8;
1041 
1042 	/* if the field width is exactly 32 on an x86 machine, then the shift
1043 	 * operation will not work because the SHL instructions count is masked
1044 	 * to 5 bits so the shift will do nothing
1045 	 */
1046 	if (ce_info->width < 32)
1047 		mask = ((u32)1 << ce_info->width) - 1;
1048 	else
1049 		mask = ~(u32)0;
1050 
1051 	/* shift to correct alignment */
1052 	mask <<= shift_width;
1053 
1054 	/* get the current bits from the src bit string */
1055 	src = hmc_bits + (ce_info->lsb / 8);
1056 
1057 	i40e_memcpy(&src_dword, src, sizeof(src_dword), I40E_DMA_TO_NONDMA);
1058 
1059 	/* the data in the memory is stored as little endian so mask it
1060 	 * correctly
1061 	 */
1062 	src_dword &= ~(CPU_TO_LE32(mask));
1063 
1064 	/* get the data back into host order before shifting */
1065 	dest_dword = LE32_TO_CPU(src_dword);
1066 
1067 	dest_dword >>= shift_width;
1068 
1069 	/* get the address from the struct field */
1070 	target = dest + ce_info->offset;
1071 
1072 	/* put it back in the struct */
1073 	i40e_memcpy(target, &dest_dword, sizeof(dest_dword),
1074 		    I40E_NONDMA_TO_DMA);
1075 }
1076 
1077 /**
1078  * i40e_read_qword - read HMC context qword into struct
1079  * @hmc_bits: pointer to the HMC memory
1080  * @ce_info: a description of the struct to be filled
1081  * @dest: the struct to be filled
1082  **/
1083 static void i40e_read_qword(u8 *hmc_bits,
1084 			    struct i40e_context_ele *ce_info,
1085 			    u8 *dest)
1086 {
1087 	u64 dest_qword, mask;
1088 	u8 *src, *target;
1089 	u16 shift_width;
1090 	__le64 src_qword;
1091 
1092 	/* prepare the bits and mask */
1093 	shift_width = ce_info->lsb % 8;
1094 
1095 	/* if the field width is exactly 64 on an x86 machine, then the shift
1096 	 * operation will not work because the SHL instructions count is masked
1097 	 * to 6 bits so the shift will do nothing
1098 	 */
1099 	if (ce_info->width < 64)
1100 		mask = ((u64)1 << ce_info->width) - 1;
1101 	else
1102 		mask = ~(u64)0;
1103 
1104 	/* shift to correct alignment */
1105 	mask <<= shift_width;
1106 
1107 	/* get the current bits from the src bit string */
1108 	src = hmc_bits + (ce_info->lsb / 8);
1109 
1110 	i40e_memcpy(&src_qword, src, sizeof(src_qword), I40E_DMA_TO_NONDMA);
1111 
1112 	/* the data in the memory is stored as little endian so mask it
1113 	 * correctly
1114 	 */
1115 	src_qword &= ~(CPU_TO_LE64(mask));
1116 
1117 	/* get the data back into host order before shifting */
1118 	dest_qword = LE64_TO_CPU(src_qword);
1119 
1120 	dest_qword >>= shift_width;
1121 
1122 	/* get the address from the struct field */
1123 	target = dest + ce_info->offset;
1124 
1125 	/* put it back in the struct */
1126 	i40e_memcpy(target, &dest_qword, sizeof(dest_qword),
1127 		    I40E_NONDMA_TO_DMA);
1128 }
1129 
1130 /**
1131  * i40e_get_hmc_context - extract HMC context bits
1132  * @context_bytes: pointer to the context bit array
1133  * @ce_info: a description of the struct to be filled
1134  * @dest: the struct to be filled
1135  **/
1136 static enum i40e_status_code i40e_get_hmc_context(u8 *context_bytes,
1137 					struct i40e_context_ele *ce_info,
1138 					u8 *dest)
1139 {
1140 	int f;
1141 
1142 	for (f = 0; ce_info[f].width != 0; f++) {
1143 		switch (ce_info[f].size_of) {
1144 		case 1:
1145 			i40e_read_byte(context_bytes, &ce_info[f], dest);
1146 			break;
1147 		case 2:
1148 			i40e_read_word(context_bytes, &ce_info[f], dest);
1149 			break;
1150 		case 4:
1151 			i40e_read_dword(context_bytes, &ce_info[f], dest);
1152 			break;
1153 		case 8:
1154 			i40e_read_qword(context_bytes, &ce_info[f], dest);
1155 			break;
1156 		default:
1157 			/* nothing to do, just keep going */
1158 			break;
1159 		}
1160 	}
1161 
1162 	return I40E_SUCCESS;
1163 }
1164 
1165 /**
1166  * i40e_clear_hmc_context - zero out the HMC context bits
1167  * @hw:       the hardware struct
1168  * @context_bytes: pointer to the context bit array (DMA memory)
1169  * @hmc_type: the type of HMC resource
1170  **/
1171 static enum i40e_status_code i40e_clear_hmc_context(struct i40e_hw *hw,
1172 					u8 *context_bytes,
1173 					enum i40e_hmc_lan_rsrc_type hmc_type)
1174 {
1175 	/* clean the bit array */
1176 	i40e_memset(context_bytes, 0, (u32)hw->hmc.hmc_obj[hmc_type].size,
1177 		    I40E_DMA_MEM);
1178 
1179 	return I40E_SUCCESS;
1180 }
1181 
1182 /**
1183  * i40e_set_hmc_context - replace HMC context bits
1184  * @context_bytes: pointer to the context bit array
1185  * @ce_info:  a description of the struct to be filled
1186  * @dest:     the struct to be filled
1187  **/
1188 static enum i40e_status_code i40e_set_hmc_context(u8 *context_bytes,
1189 					struct i40e_context_ele *ce_info,
1190 					u8 *dest)
1191 {
1192 	int f;
1193 
1194 	for (f = 0; ce_info[f].width != 0; f++) {
1195 
1196 		/* we have to deal with each element of the HMC using the
1197 		 * correct size so that we are correct regardless of the
1198 		 * endianness of the machine
1199 		 */
1200 		switch (ce_info[f].size_of) {
1201 		case 1:
1202 			i40e_write_byte(context_bytes, &ce_info[f], dest);
1203 			break;
1204 		case 2:
1205 			i40e_write_word(context_bytes, &ce_info[f], dest);
1206 			break;
1207 		case 4:
1208 			i40e_write_dword(context_bytes, &ce_info[f], dest);
1209 			break;
1210 		case 8:
1211 			i40e_write_qword(context_bytes, &ce_info[f], dest);
1212 			break;
1213 		}
1214 	}
1215 
1216 	return I40E_SUCCESS;
1217 }
1218 
1219 /**
1220  * i40e_hmc_get_object_va - retrieves an object's virtual address
1221  * @hw: pointer to the hw structure
1222  * @object_base: pointer to u64 to get the va
1223  * @rsrc_type: the hmc resource type
1224  * @obj_idx: hmc object index
1225  *
1226  * This function retrieves the object's virtual address from the object
1227  * base pointer.  This function is used for LAN Queue contexts.
1228  **/
1229 static
1230 enum i40e_status_code i40e_hmc_get_object_va(struct i40e_hw *hw,
1231 					u8 **object_base,
1232 					enum i40e_hmc_lan_rsrc_type rsrc_type,
1233 					u32 obj_idx)
1234 {
1235 	u32 obj_offset_in_sd, obj_offset_in_pd;
1236 	struct i40e_hmc_info     *hmc_info = &hw->hmc;
1237 	struct i40e_hmc_sd_entry *sd_entry;
1238 	struct i40e_hmc_pd_entry *pd_entry;
1239 	u32 pd_idx, pd_lmt, rel_pd_idx;
1240 	enum i40e_status_code ret_code = I40E_SUCCESS;
1241 	u64 obj_offset_in_fpm;
1242 	u32 sd_idx, sd_lmt;
1243 
1244 	if (NULL == hmc_info) {
1245 		ret_code = I40E_ERR_BAD_PTR;
1246 		DEBUGOUT("i40e_hmc_get_object_va: bad hmc_info ptr\n");
1247 		goto exit;
1248 	}
1249 	if (NULL == hmc_info->hmc_obj) {
1250 		ret_code = I40E_ERR_BAD_PTR;
1251 		DEBUGOUT("i40e_hmc_get_object_va: bad hmc_info->hmc_obj ptr\n");
1252 		goto exit;
1253 	}
1254 	if (NULL == object_base) {
1255 		ret_code = I40E_ERR_BAD_PTR;
1256 		DEBUGOUT("i40e_hmc_get_object_va: bad object_base ptr\n");
1257 		goto exit;
1258 	}
1259 	if (I40E_HMC_INFO_SIGNATURE != hmc_info->signature) {
1260 		ret_code = I40E_ERR_BAD_PTR;
1261 		DEBUGOUT("i40e_hmc_get_object_va: bad hmc_info->signature\n");
1262 		goto exit;
1263 	}
1264 	if (obj_idx >= hmc_info->hmc_obj[rsrc_type].cnt) {
1265 		DEBUGOUT1("i40e_hmc_get_object_va: returns error %d\n",
1266 			  ret_code);
1267 		ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
1268 		goto exit;
1269 	}
1270 	/* find sd index and limit */
1271 	I40E_FIND_SD_INDEX_LIMIT(hmc_info, rsrc_type, obj_idx, 1,
1272 				 &sd_idx, &sd_lmt);
1273 
1274 	sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
1275 	obj_offset_in_fpm = hmc_info->hmc_obj[rsrc_type].base +
1276 			    hmc_info->hmc_obj[rsrc_type].size * obj_idx;
1277 
1278 	if (I40E_SD_TYPE_PAGED == sd_entry->entry_type) {
1279 		I40E_FIND_PD_INDEX_LIMIT(hmc_info, rsrc_type, obj_idx, 1,
1280 					 &pd_idx, &pd_lmt);
1281 		rel_pd_idx = pd_idx % I40E_HMC_PD_CNT_IN_SD;
1282 		pd_entry = &sd_entry->u.pd_table.pd_entry[rel_pd_idx];
1283 		obj_offset_in_pd = (u32)(obj_offset_in_fpm %
1284 					 I40E_HMC_PAGED_BP_SIZE);
1285 		*object_base = (u8 *)pd_entry->bp.addr.va + obj_offset_in_pd;
1286 	} else {
1287 		obj_offset_in_sd = (u32)(obj_offset_in_fpm %
1288 					 I40E_HMC_DIRECT_BP_SIZE);
1289 		*object_base = (u8 *)sd_entry->u.bp.addr.va + obj_offset_in_sd;
1290 	}
1291 exit:
1292 	return ret_code;
1293 }
1294 
1295 /**
1296  * i40e_get_lan_tx_queue_context - return the HMC context for the queue
1297  * @hw:    the hardware struct
1298  * @queue: the queue we care about
1299  * @s:     the struct to be filled
1300  **/
1301 enum i40e_status_code i40e_get_lan_tx_queue_context(struct i40e_hw *hw,
1302 						    u16 queue,
1303 						    struct i40e_hmc_obj_txq *s)
1304 {
1305 	enum i40e_status_code err;
1306 	u8 *context_bytes;
1307 
1308 	err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_TX, queue);
1309 	if (err < 0)
1310 		return err;
1311 
1312 	return i40e_get_hmc_context(context_bytes,
1313 				    i40e_hmc_txq_ce_info, (u8 *)s);
1314 }
1315 
1316 /**
1317  * i40e_clear_lan_tx_queue_context - clear the HMC context for the queue
1318  * @hw:    the hardware struct
1319  * @queue: the queue we care about
1320  **/
1321 enum i40e_status_code i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
1322 						      u16 queue)
1323 {
1324 	enum i40e_status_code err;
1325 	u8 *context_bytes;
1326 
1327 	err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_TX, queue);
1328 	if (err < 0)
1329 		return err;
1330 
1331 	return i40e_clear_hmc_context(hw, context_bytes, I40E_HMC_LAN_TX);
1332 }
1333 
1334 /**
1335  * i40e_set_lan_tx_queue_context - set the HMC context for the queue
1336  * @hw:    the hardware struct
1337  * @queue: the queue we care about
1338  * @s:     the struct to be filled
1339  **/
1340 enum i40e_status_code i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
1341 						    u16 queue,
1342 						    struct i40e_hmc_obj_txq *s)
1343 {
1344 	enum i40e_status_code err;
1345 	u8 *context_bytes;
1346 
1347 	err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_TX, queue);
1348 	if (err < 0)
1349 		return err;
1350 
1351 	return i40e_set_hmc_context(context_bytes,
1352 				    i40e_hmc_txq_ce_info, (u8 *)s);
1353 }
1354 
1355 /**
1356  * i40e_get_lan_rx_queue_context - return the HMC context for the queue
1357  * @hw:    the hardware struct
1358  * @queue: the queue we care about
1359  * @s:     the struct to be filled
1360  **/
1361 enum i40e_status_code i40e_get_lan_rx_queue_context(struct i40e_hw *hw,
1362 						    u16 queue,
1363 						    struct i40e_hmc_obj_rxq *s)
1364 {
1365 	enum i40e_status_code err;
1366 	u8 *context_bytes;
1367 
1368 	err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_RX, queue);
1369 	if (err < 0)
1370 		return err;
1371 
1372 	return i40e_get_hmc_context(context_bytes,
1373 				    i40e_hmc_rxq_ce_info, (u8 *)s);
1374 }
1375 
1376 /**
1377  * i40e_clear_lan_rx_queue_context - clear the HMC context for the queue
1378  * @hw:    the hardware struct
1379  * @queue: the queue we care about
1380  **/
1381 enum i40e_status_code i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
1382 						      u16 queue)
1383 {
1384 	enum i40e_status_code err;
1385 	u8 *context_bytes;
1386 
1387 	err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_RX, queue);
1388 	if (err < 0)
1389 		return err;
1390 
1391 	return i40e_clear_hmc_context(hw, context_bytes, I40E_HMC_LAN_RX);
1392 }
1393 
1394 /**
1395  * i40e_set_lan_rx_queue_context - set the HMC context for the queue
1396  * @hw:    the hardware struct
1397  * @queue: the queue we care about
1398  * @s:     the struct to be filled
1399  **/
1400 enum i40e_status_code i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
1401 						    u16 queue,
1402 						    struct i40e_hmc_obj_rxq *s)
1403 {
1404 	enum i40e_status_code err;
1405 	u8 *context_bytes;
1406 
1407 	err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_RX, queue);
1408 	if (err < 0)
1409 		return err;
1410 
1411 	return i40e_set_hmc_context(context_bytes,
1412 				    i40e_hmc_rxq_ce_info, (u8 *)s);
1413 }
1414