xref: /freebsd/sys/dev/irdma/irdma_hmc.c (revision bc7512cc58af2e8bbe5bbf5ca0059b1daa1da897)
1 /*-
2  * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
3  *
4  * Copyright (c) 2015 - 2021 Intel Corporation
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenFabrics.org BSD license below:
11  *
12  *   Redistribution and use in source and binary forms, with or
13  *   without modification, are permitted provided that the following
14  *   conditions are met:
15  *
16  *    - Redistributions of source code must retain the above
17  *	copyright notice, this list of conditions and the following
18  *	disclaimer.
19  *
20  *    - Redistributions in binary form must reproduce the above
21  *	copyright notice, this list of conditions and the following
22  *	disclaimer in the documentation and/or other materials
23  *	provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 /*$FreeBSD$*/
35 
36 #include "osdep.h"
37 #include "irdma_hmc.h"
38 #include "irdma_defs.h"
39 #include "irdma_type.h"
40 #include "irdma_protos.h"
41 
42 /**
43  * irdma_find_sd_index_limit - finds segment descriptor index limit
44  * @hmc_info: pointer to the HMC configuration information structure
45  * @type: type of HMC resources we're searching
46  * @idx: starting index for the object
47  * @cnt: number of objects we're trying to create
48  * @sd_idx: pointer to return index of the segment descriptor in question
49  * @sd_limit: pointer to return the maximum number of segment descriptors
50  *
51  * This function calculates the segment descriptor index and index limit
52  * for the resource defined by irdma_hmc_rsrc_type.
53  */
54 
55 static void
56 irdma_find_sd_index_limit(struct irdma_hmc_info *hmc_info, u32 type,
57 			  u32 idx, u32 cnt, u32 *sd_idx,
58 			  u32 *sd_limit)
59 {
60 	u64 fpm_addr, fpm_limit;
61 
62 	fpm_addr = hmc_info->hmc_obj[(type)].base +
63 	    hmc_info->hmc_obj[type].size * idx;
64 	fpm_limit = fpm_addr + hmc_info->hmc_obj[type].size * cnt;
65 	*sd_idx = (u32)(fpm_addr / IRDMA_HMC_DIRECT_BP_SIZE);
66 	*sd_limit = (u32)((fpm_limit - 1) / IRDMA_HMC_DIRECT_BP_SIZE);
67 	*sd_limit += 1;
68 }
69 
70 /**
71  * irdma_find_pd_index_limit - finds page descriptor index limit
72  * @hmc_info: pointer to the HMC configuration information struct
73  * @type: HMC resource type we're examining
74  * @idx: starting index for the object
75  * @cnt: number of objects we're trying to create
76  * @pd_idx: pointer to return page descriptor index
77  * @pd_limit: pointer to return page descriptor index limit
78  *
79  * Calculates the page descriptor index and index limit for the resource
80  * defined by irdma_hmc_rsrc_type.
81  */
82 
83 static void
84 irdma_find_pd_index_limit(struct irdma_hmc_info *hmc_info, u32 type,
85 			  u32 idx, u32 cnt, u32 *pd_idx,
86 			  u32 *pd_limit)
87 {
88 	u64 fpm_adr, fpm_limit;
89 
90 	fpm_adr = hmc_info->hmc_obj[type].base +
91 	    hmc_info->hmc_obj[type].size * idx;
92 	fpm_limit = fpm_adr + (hmc_info)->hmc_obj[(type)].size * (cnt);
93 	*pd_idx = (u32)(fpm_adr / IRDMA_HMC_PAGED_BP_SIZE);
94 	*pd_limit = (u32)((fpm_limit - 1) / IRDMA_HMC_PAGED_BP_SIZE);
95 	*pd_limit += 1;
96 }
97 
98 /**
99  * irdma_set_sd_entry - setup entry for sd programming
100  * @pa: physical addr
101  * @idx: sd index
102  * @type: paged or direct sd
103  * @entry: sd entry ptr
104  */
105 static void
106 irdma_set_sd_entry(u64 pa, u32 idx, enum irdma_sd_entry_type type,
107 		   struct irdma_update_sd_entry *entry)
108 {
109 	entry->data = pa | (IRDMA_HMC_MAX_BP_COUNT << IRDMA_PFHMC_SDDATALOW_PMSDBPCOUNT_S) |
110 	    (((type == IRDMA_SD_TYPE_PAGED) ? 0 : 1) << IRDMA_PFHMC_SDDATALOW_PMSDTYPE_S) |
111 	    (1 << IRDMA_PFHMC_SDDATALOW_PMSDVALID_S);
112 	entry->cmd = (idx | (1 << IRDMA_PFHMC_SDCMD_PMSDWR_S) | (1 << 15));
113 }
114 
115 /**
116  * irdma_clr_sd_entry - setup entry for sd clear
117  * @idx: sd index
118  * @type: paged or direct sd
119  * @entry: sd entry ptr
120  */
121 static void
122 irdma_clr_sd_entry(u32 idx, enum irdma_sd_entry_type type,
123 		   struct irdma_update_sd_entry *entry)
124 {
125 	entry->data = (IRDMA_HMC_MAX_BP_COUNT << IRDMA_PFHMC_SDDATALOW_PMSDBPCOUNT_S) |
126 	    (((type == IRDMA_SD_TYPE_PAGED) ? 0 : 1) << IRDMA_PFHMC_SDDATALOW_PMSDTYPE_S);
127 	entry->cmd = (idx | (1 << IRDMA_PFHMC_SDCMD_PMSDWR_S) | (1 << 15));
128 }
129 
130 /**
131  * irdma_invalidate_pf_hmc_pd - Invalidates the pd cache in the hardware for PF
132  * @dev: pointer to our device struct
133  * @sd_idx: segment descriptor index
134  * @pd_idx: page descriptor index
135  */
136 static inline void
137 irdma_invalidate_pf_hmc_pd(struct irdma_sc_dev *dev, u32 sd_idx,
138 			   u32 pd_idx)
139 {
140 	u32 val = LS_32(sd_idx, IRDMA_PFHMC_PDINV_PMSDIDX) |
141 	LS_32(1, IRDMA_PFHMC_PDINV_PMSDPARTSEL) |
142 	LS_32(pd_idx, IRDMA_PFHMC_PDINV_PMPDIDX);
143 
144 	writel(val, dev->hw_regs[IRDMA_PFHMC_PDINV]);
145 }
146 
147 /**
148  * irdma_hmc_sd_one - setup 1 sd entry for cqp
149  * @dev: pointer to the device structure
150  * @hmc_fn_id: hmc's function id
151  * @pa: physical addr
152  * @sd_idx: sd index
153  * @type: paged or direct sd
154  * @setsd: flag to set or clear sd
155  */
156 int
157 irdma_hmc_sd_one(struct irdma_sc_dev *dev, u8 hmc_fn_id, u64 pa, u32 sd_idx,
158 		 enum irdma_sd_entry_type type, bool setsd)
159 {
160 	struct irdma_update_sds_info sdinfo;
161 
162 	sdinfo.cnt = 1;
163 	sdinfo.hmc_fn_id = hmc_fn_id;
164 	if (setsd)
165 		irdma_set_sd_entry(pa, sd_idx, type, sdinfo.entry);
166 	else
167 		irdma_clr_sd_entry(sd_idx, type, sdinfo.entry);
168 	return dev->cqp->process_cqp_sds(dev, &sdinfo);
169 }
170 
171 /**
172  * irdma_hmc_sd_grp - setup group of sd entries for cqp
173  * @dev: pointer to the device structure
174  * @hmc_info: pointer to the HMC configuration information struct
175  * @sd_index: sd index
176  * @sd_cnt: number of sd entries
177  * @setsd: flag to set or clear sd
178  */
179 static int
180 irdma_hmc_sd_grp(struct irdma_sc_dev *dev,
181 		 struct irdma_hmc_info *hmc_info, u32 sd_index,
182 		 u32 sd_cnt, bool setsd)
183 {
184 	struct irdma_hmc_sd_entry *sd_entry;
185 	struct irdma_update_sds_info sdinfo = {0};
186 	u64 pa;
187 	u32 i;
188 	int ret_code = 0;
189 
190 	sdinfo.hmc_fn_id = hmc_info->hmc_fn_id;
191 	for (i = sd_index; i < sd_index + sd_cnt; i++) {
192 		sd_entry = &hmc_info->sd_table.sd_entry[i];
193 		if (!sd_entry || (!sd_entry->valid && setsd) ||
194 		    (sd_entry->valid && !setsd))
195 			continue;
196 		if (setsd) {
197 			pa = (sd_entry->entry_type == IRDMA_SD_TYPE_PAGED) ?
198 			    sd_entry->u.pd_table.pd_page_addr.pa :
199 			    sd_entry->u.bp.addr.pa;
200 			irdma_set_sd_entry(pa, i, sd_entry->entry_type,
201 					   &sdinfo.entry[sdinfo.cnt]);
202 		} else {
203 			irdma_clr_sd_entry(i, sd_entry->entry_type,
204 					   &sdinfo.entry[sdinfo.cnt]);
205 		}
206 		sdinfo.cnt++;
207 		if (sdinfo.cnt == IRDMA_MAX_SD_ENTRIES) {
208 			ret_code = dev->cqp->process_cqp_sds(dev, &sdinfo);
209 			if (ret_code) {
210 				irdma_debug(dev, IRDMA_DEBUG_HMC,
211 					    "sd_programming failed err=%d\n",
212 					    ret_code);
213 				return ret_code;
214 			}
215 
216 			sdinfo.cnt = 0;
217 		}
218 	}
219 	if (sdinfo.cnt)
220 		ret_code = dev->cqp->process_cqp_sds(dev, &sdinfo);
221 
222 	return ret_code;
223 }
224 
225 /**
226  * irdma_hmc_finish_add_sd_reg - program sd entries for objects
227  * @dev: pointer to the device structure
228  * @info: create obj info
229  */
230 static int
231 irdma_hmc_finish_add_sd_reg(struct irdma_sc_dev *dev,
232 			    struct irdma_hmc_create_obj_info *info)
233 {
234 	if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt)
235 		return -EINVAL;
236 
237 	if ((info->start_idx + info->count) >
238 	    info->hmc_info->hmc_obj[info->rsrc_type].cnt)
239 		return -EINVAL;
240 
241 	if (!info->add_sd_cnt)
242 		return 0;
243 	return irdma_hmc_sd_grp(dev, info->hmc_info,
244 				info->hmc_info->sd_indexes[0], info->add_sd_cnt,
245 				true);
246 }
247 
248 /**
249  * irdma_sc_create_hmc_obj - allocate backing store for hmc objects
250  * @dev: pointer to the device structure
251  * @info: pointer to irdma_hmc_create_obj_info struct
252  *
253  * This will allocate memory for PDs and backing pages and populate
254  * the sd and pd entries.
255  */
256 int
257 irdma_sc_create_hmc_obj(struct irdma_sc_dev *dev,
258 			struct irdma_hmc_create_obj_info *info)
259 {
260 	struct irdma_hmc_sd_entry *sd_entry;
261 	u32 sd_idx, sd_lmt;
262 	u32 pd_idx = 0, pd_lmt = 0;
263 	u32 pd_idx1 = 0, pd_lmt1 = 0;
264 	u32 i, j;
265 	bool pd_error = false;
266 	int ret_code = 0;
267 
268 	if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt)
269 		return -EINVAL;
270 
271 	if ((info->start_idx + info->count) >
272 	    info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
273 		irdma_debug(dev, IRDMA_DEBUG_HMC,
274 			    "error type %u, start = %u, req cnt %u, cnt = %u\n",
275 			    info->rsrc_type, info->start_idx, info->count,
276 			    info->hmc_info->hmc_obj[info->rsrc_type].cnt);
277 		return -EINVAL;
278 	}
279 
280 	irdma_find_sd_index_limit(info->hmc_info, info->rsrc_type,
281 				  info->start_idx, info->count, &sd_idx,
282 				  &sd_lmt);
283 	if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
284 	    sd_lmt > info->hmc_info->sd_table.sd_cnt) {
285 		return -EINVAL;
286 	}
287 
288 	irdma_find_pd_index_limit(info->hmc_info, info->rsrc_type,
289 				  info->start_idx, info->count, &pd_idx,
290 				  &pd_lmt);
291 
292 	for (j = sd_idx; j < sd_lmt; j++) {
293 		ret_code = irdma_add_sd_table_entry(dev->hw, info->hmc_info, j,
294 						    info->entry_type,
295 						    IRDMA_HMC_DIRECT_BP_SIZE);
296 		if (ret_code)
297 			goto exit_sd_error;
298 
299 		sd_entry = &info->hmc_info->sd_table.sd_entry[j];
300 		if (sd_entry->entry_type == IRDMA_SD_TYPE_PAGED &&
301 		    (dev->hmc_info == info->hmc_info &&
302 		     info->rsrc_type != IRDMA_HMC_IW_PBLE)) {
303 			pd_idx1 = max(pd_idx, (j * IRDMA_HMC_MAX_BP_COUNT));
304 			pd_lmt1 = min(pd_lmt, (j + 1) * IRDMA_HMC_MAX_BP_COUNT);
305 			for (i = pd_idx1; i < pd_lmt1; i++) {
306 				/* update the pd table entry */
307 				ret_code = irdma_add_pd_table_entry(dev,
308 								    info->hmc_info,
309 								    i, NULL);
310 				if (ret_code) {
311 					pd_error = true;
312 					break;
313 				}
314 			}
315 			if (pd_error) {
316 				while (i && (i > pd_idx1)) {
317 					irdma_remove_pd_bp(dev, info->hmc_info,
318 							   i - 1);
319 					i--;
320 				}
321 			}
322 		}
323 		if (sd_entry->valid)
324 			continue;
325 
326 		info->hmc_info->sd_indexes[info->add_sd_cnt] = (u16)j;
327 		info->add_sd_cnt++;
328 		sd_entry->valid = true;
329 	}
330 	return irdma_hmc_finish_add_sd_reg(dev, info);
331 
332 exit_sd_error:
333 	while (j && (j > sd_idx)) {
334 		sd_entry = &info->hmc_info->sd_table.sd_entry[j - 1];
335 		switch (sd_entry->entry_type) {
336 		case IRDMA_SD_TYPE_PAGED:
337 			pd_idx1 = max(pd_idx, (j - 1) * IRDMA_HMC_MAX_BP_COUNT);
338 			pd_lmt1 = min(pd_lmt, (j * IRDMA_HMC_MAX_BP_COUNT));
339 			for (i = pd_idx1; i < pd_lmt1; i++)
340 				irdma_prep_remove_pd_page(info->hmc_info, i);
341 			break;
342 		case IRDMA_SD_TYPE_DIRECT:
343 			irdma_prep_remove_pd_page(info->hmc_info, (j - 1));
344 			break;
345 		default:
346 			ret_code = -EINVAL;
347 			break;
348 		}
349 		j--;
350 	}
351 
352 	return ret_code;
353 }
354 
355 /**
356  * irdma_finish_del_sd_reg - delete sd entries for objects
357  * @dev: pointer to the device structure
358  * @info: dele obj info
359  * @reset: true if called before reset
360  */
361 static int
362 irdma_finish_del_sd_reg(struct irdma_sc_dev *dev,
363 			struct irdma_hmc_del_obj_info *info,
364 			bool reset)
365 {
366 	struct irdma_hmc_sd_entry *sd_entry;
367 	int ret_code = 0;
368 	struct irdma_dma_mem *mem;
369 	u32 i, sd_idx;
370 
371 	if (!reset)
372 		ret_code = irdma_hmc_sd_grp(dev, info->hmc_info,
373 					    info->hmc_info->sd_indexes[0],
374 					    info->del_sd_cnt, false);
375 
376 	if (ret_code)
377 		irdma_debug(dev, IRDMA_DEBUG_HMC, "error cqp sd sd_grp\n");
378 	for (i = 0; i < info->del_sd_cnt; i++) {
379 		sd_idx = info->hmc_info->sd_indexes[i];
380 		sd_entry = &info->hmc_info->sd_table.sd_entry[sd_idx];
381 
382 		mem = (sd_entry->entry_type == IRDMA_SD_TYPE_PAGED) ?
383 		    &sd_entry->u.pd_table.pd_page_addr :
384 		    &sd_entry->u.bp.addr;
385 
386 		if (!mem || !mem->va)
387 			irdma_debug(dev, IRDMA_DEBUG_HMC, "error cqp sd mem\n");
388 		else
389 			irdma_free_dma_mem(dev->hw, mem);
390 	}
391 
392 	return ret_code;
393 }
394 
395 /**
396  * irdma_sc_del_hmc_obj - remove pe hmc objects
397  * @dev: pointer to the device structure
398  * @info: pointer to irdma_hmc_del_obj_info struct
399  * @reset: true if called before reset
400  *
401  * This will de-populate the SDs and PDs.  It frees
402  * the memory for PDS and backing storage.  After this function is returned,
403  * caller should deallocate memory allocated previously for
404  * book-keeping information about PDs and backing storage.
405  */
406 int
407 irdma_sc_del_hmc_obj(struct irdma_sc_dev *dev,
408 		     struct irdma_hmc_del_obj_info *info, bool reset)
409 {
410 	struct irdma_hmc_pd_table *pd_table;
411 	u32 sd_idx, sd_lmt;
412 	u32 pd_idx, pd_lmt, rel_pd_idx;
413 	u32 i, j;
414 	int ret_code = 0;
415 
416 	if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
417 		irdma_debug(dev, IRDMA_DEBUG_HMC,
418 			    "error start_idx[%04d]  >= [type %04d].cnt[%04d]\n",
419 			    info->start_idx, info->rsrc_type,
420 			    info->hmc_info->hmc_obj[info->rsrc_type].cnt);
421 		return -EINVAL;
422 	}
423 
424 	if ((info->start_idx + info->count) >
425 	    info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
426 		irdma_debug(dev, IRDMA_DEBUG_HMC,
427 			    "error start_idx[%04d] + count %04d  >= [type %04d].cnt[%04d]\n",
428 			    info->start_idx, info->count, info->rsrc_type,
429 			    info->hmc_info->hmc_obj[info->rsrc_type].cnt);
430 		return -EINVAL;
431 	}
432 
433 	irdma_find_pd_index_limit(info->hmc_info, info->rsrc_type,
434 				  info->start_idx, info->count, &pd_idx,
435 				  &pd_lmt);
436 
437 	for (j = pd_idx; j < pd_lmt; j++) {
438 		sd_idx = j / IRDMA_HMC_PD_CNT_IN_SD;
439 
440 		if (!info->hmc_info->sd_table.sd_entry[sd_idx].valid)
441 			continue;
442 
443 		if (info->hmc_info->sd_table.sd_entry[sd_idx].entry_type !=
444 		    IRDMA_SD_TYPE_PAGED)
445 			continue;
446 
447 		rel_pd_idx = j % IRDMA_HMC_PD_CNT_IN_SD;
448 		pd_table = &info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
449 		if (pd_table->pd_entry &&
450 		    pd_table->pd_entry[rel_pd_idx].valid) {
451 			ret_code = irdma_remove_pd_bp(dev, info->hmc_info, j);
452 			if (ret_code) {
453 				irdma_debug(dev, IRDMA_DEBUG_HMC,
454 					    "remove_pd_bp error\n");
455 				return ret_code;
456 			}
457 		}
458 	}
459 
460 	irdma_find_sd_index_limit(info->hmc_info, info->rsrc_type,
461 				  info->start_idx, info->count, &sd_idx,
462 				  &sd_lmt);
463 	if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
464 	    sd_lmt > info->hmc_info->sd_table.sd_cnt) {
465 		irdma_debug(dev, IRDMA_DEBUG_HMC, "invalid sd_idx\n");
466 		return -EINVAL;
467 	}
468 
469 	for (i = sd_idx; i < sd_lmt; i++) {
470 		pd_table = &info->hmc_info->sd_table.sd_entry[i].u.pd_table;
471 		if (!info->hmc_info->sd_table.sd_entry[i].valid)
472 			continue;
473 		switch (info->hmc_info->sd_table.sd_entry[i].entry_type) {
474 		case IRDMA_SD_TYPE_DIRECT:
475 			ret_code = irdma_prep_remove_sd_bp(info->hmc_info, i);
476 			if (!ret_code) {
477 				info->hmc_info->sd_indexes[info->del_sd_cnt] =
478 				    (u16)i;
479 				info->del_sd_cnt++;
480 			}
481 			break;
482 		case IRDMA_SD_TYPE_PAGED:
483 			ret_code = irdma_prep_remove_pd_page(info->hmc_info, i);
484 			if (ret_code)
485 				break;
486 			if (dev->hmc_info != info->hmc_info &&
487 			    info->rsrc_type == IRDMA_HMC_IW_PBLE &&
488 			    pd_table->pd_entry) {
489 				kfree(pd_table->pd_entry_virt_mem.va);
490 				pd_table->pd_entry = NULL;
491 			}
492 			info->hmc_info->sd_indexes[info->del_sd_cnt] = (u16)i;
493 			info->del_sd_cnt++;
494 			break;
495 		default:
496 			break;
497 		}
498 	}
499 	return irdma_finish_del_sd_reg(dev, info, reset);
500 }
501 
502 /**
503  * irdma_add_sd_table_entry - Adds a segment descriptor to the table
504  * @hw: pointer to our hw struct
505  * @hmc_info: pointer to the HMC configuration information struct
506  * @sd_index: segment descriptor index to manipulate
507  * @type: what type of segment descriptor we're manipulating
508  * @direct_mode_sz: size to alloc in direct mode
509  */
510 int
511 irdma_add_sd_table_entry(struct irdma_hw *hw,
512 			 struct irdma_hmc_info *hmc_info, u32 sd_index,
513 			 enum irdma_sd_entry_type type, u64 direct_mode_sz)
514 {
515 	struct irdma_hmc_sd_entry *sd_entry;
516 	struct irdma_dma_mem dma_mem;
517 	u64 alloc_len;
518 
519 	sd_entry = &hmc_info->sd_table.sd_entry[sd_index];
520 	if (!sd_entry->valid) {
521 		if (type == IRDMA_SD_TYPE_PAGED)
522 			alloc_len = IRDMA_HMC_PAGED_BP_SIZE;
523 		else
524 			alloc_len = direct_mode_sz;
525 
526 		/* allocate a 4K pd page or 2M backing page */
527 		dma_mem.size = alloc_len;
528 		dma_mem.va = irdma_allocate_dma_mem(hw, &dma_mem, dma_mem.size,
529 						    IRDMA_HMC_PD_BP_BUF_ALIGNMENT);
530 		if (!dma_mem.va)
531 			return -ENOMEM;
532 		if (type == IRDMA_SD_TYPE_PAGED) {
533 			struct irdma_virt_mem *vmem =
534 			&sd_entry->u.pd_table.pd_entry_virt_mem;
535 
536 			vmem->size = sizeof(struct irdma_hmc_pd_entry) * 512;
537 			vmem->va = kzalloc(vmem->size, GFP_ATOMIC);
538 			if (!vmem->va) {
539 				irdma_free_dma_mem(hw, &dma_mem);
540 				return -ENOMEM;
541 			}
542 			sd_entry->u.pd_table.pd_entry = vmem->va;
543 
544 			irdma_memcpy(&sd_entry->u.pd_table.pd_page_addr, &dma_mem,
545 				     sizeof(sd_entry->u.pd_table.pd_page_addr));
546 		} else {
547 			irdma_memcpy(&sd_entry->u.bp.addr, &dma_mem,
548 				     sizeof(sd_entry->u.bp.addr));
549 
550 			sd_entry->u.bp.sd_pd_index = sd_index;
551 		}
552 
553 		hmc_info->sd_table.sd_entry[sd_index].entry_type = type;
554 		hmc_info->sd_table.use_cnt++;
555 	}
556 	if (sd_entry->entry_type == IRDMA_SD_TYPE_DIRECT)
557 		sd_entry->u.bp.use_cnt++;
558 
559 	return 0;
560 }
561 
562 /**
563  * irdma_add_pd_table_entry - Adds page descriptor to the specified table
564  * @dev: pointer to our device structure
565  * @hmc_info: pointer to the HMC configuration information structure
566  * @pd_index: which page descriptor index to manipulate
567  * @rsrc_pg: if not NULL, use preallocated page instead of allocating new one.
568  *
569  * This function:
570  *	1. Initializes the pd entry
571  *	2. Adds pd_entry in the pd_table
572  *	3. Mark the entry valid in irdma_hmc_pd_entry structure
573  *	4. Initializes the pd_entry's ref count to 1
574  * assumptions:
575  *	1. The memory for pd should be pinned down, physically contiguous and
576  *	   aligned on 4K boundary and zeroed memory.
577  *	2. It should be 4K in size.
578  */
579 int
580 irdma_add_pd_table_entry(struct irdma_sc_dev *dev,
581 			 struct irdma_hmc_info *hmc_info, u32 pd_index,
582 			 struct irdma_dma_mem *rsrc_pg)
583 {
584 	struct irdma_hmc_pd_table *pd_table;
585 	struct irdma_hmc_pd_entry *pd_entry;
586 	struct irdma_dma_mem mem;
587 	struct irdma_dma_mem *page = &mem;
588 	u32 sd_idx, rel_pd_idx;
589 	u64 *pd_addr;
590 	u64 page_desc;
591 
592 	if (pd_index / IRDMA_HMC_PD_CNT_IN_SD >= hmc_info->sd_table.sd_cnt)
593 		return -EINVAL;
594 
595 	sd_idx = (pd_index / IRDMA_HMC_PD_CNT_IN_SD);
596 	if (hmc_info->sd_table.sd_entry[sd_idx].entry_type !=
597 	    IRDMA_SD_TYPE_PAGED)
598 		return 0;
599 
600 	rel_pd_idx = (pd_index % IRDMA_HMC_PD_CNT_IN_SD);
601 	pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
602 	pd_entry = &pd_table->pd_entry[rel_pd_idx];
603 	if (!pd_entry->valid) {
604 		if (rsrc_pg) {
605 			pd_entry->rsrc_pg = true;
606 			page = rsrc_pg;
607 		} else {
608 			page->size = IRDMA_HMC_PAGED_BP_SIZE;
609 			page->va = irdma_allocate_dma_mem(dev->hw, page,
610 							  page->size,
611 							  IRDMA_HMC_PD_BP_BUF_ALIGNMENT);
612 			if (!page->va)
613 				return -ENOMEM;
614 
615 			pd_entry->rsrc_pg = false;
616 		}
617 
618 		irdma_memcpy(&pd_entry->bp.addr, page, sizeof(pd_entry->bp.addr));
619 		pd_entry->bp.sd_pd_index = pd_index;
620 		pd_entry->bp.entry_type = IRDMA_SD_TYPE_PAGED;
621 		page_desc = page->pa | 0x1;
622 		pd_addr = pd_table->pd_page_addr.va;
623 		pd_addr += rel_pd_idx;
624 		irdma_memcpy(pd_addr, &page_desc, sizeof(*pd_addr));
625 		pd_entry->sd_index = sd_idx;
626 		pd_entry->valid = true;
627 		pd_table->use_cnt++;
628 		irdma_invalidate_pf_hmc_pd(dev, sd_idx, rel_pd_idx);
629 	}
630 	pd_entry->bp.use_cnt++;
631 
632 	return 0;
633 }
634 
635 /**
636  * irdma_remove_pd_bp - remove a backing page from a page descriptor
637  * @dev: pointer to our HW structure
638  * @hmc_info: pointer to the HMC configuration information structure
639  * @idx: the page index
640  *
641  * This function:
642  *	1. Marks the entry in pd table (for paged address mode) or in sd table
643  *	   (for direct address mode) invalid.
644  *	2. Write to register PMPDINV to invalidate the backing page in FV cache
645  *	3. Decrement the ref count for the pd _entry
646  * assumptions:
647  *	1. Caller can deallocate the memory used by backing storage after this
648  *	   function returns.
649  */
650 int
651 irdma_remove_pd_bp(struct irdma_sc_dev *dev,
652 		   struct irdma_hmc_info *hmc_info, u32 idx)
653 {
654 	struct irdma_hmc_pd_entry *pd_entry;
655 	struct irdma_hmc_pd_table *pd_table;
656 	struct irdma_hmc_sd_entry *sd_entry;
657 	u32 sd_idx, rel_pd_idx;
658 	struct irdma_dma_mem *mem;
659 	u64 *pd_addr;
660 
661 	sd_idx = idx / IRDMA_HMC_PD_CNT_IN_SD;
662 	rel_pd_idx = idx % IRDMA_HMC_PD_CNT_IN_SD;
663 	if (sd_idx >= hmc_info->sd_table.sd_cnt)
664 		return -EINVAL;
665 
666 	sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
667 	if (sd_entry->entry_type != IRDMA_SD_TYPE_PAGED)
668 		return -EINVAL;
669 
670 	pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
671 	pd_entry = &pd_table->pd_entry[rel_pd_idx];
672 	if (--pd_entry->bp.use_cnt)
673 		return 0;
674 
675 	pd_entry->valid = false;
676 	pd_table->use_cnt--;
677 	pd_addr = pd_table->pd_page_addr.va;
678 	pd_addr += rel_pd_idx;
679 	irdma_memset(pd_addr, 0, sizeof(u64));
680 	irdma_invalidate_pf_hmc_pd(dev, sd_idx, idx);
681 
682 	if (!pd_entry->rsrc_pg) {
683 		mem = &pd_entry->bp.addr;
684 		if (!mem || !mem->va)
685 			return -EINVAL;
686 
687 		irdma_free_dma_mem(dev->hw, mem);
688 	}
689 	if (!pd_table->use_cnt)
690 		kfree(pd_table->pd_entry_virt_mem.va);
691 
692 	return 0;
693 }
694 
695 /**
696  * irdma_prep_remove_sd_bp - Prepares to remove a backing page from a sd entry
697  * @hmc_info: pointer to the HMC configuration information structure
698  * @idx: the page index
699  */
700 int
701 irdma_prep_remove_sd_bp(struct irdma_hmc_info *hmc_info, u32 idx)
702 {
703 	struct irdma_hmc_sd_entry *sd_entry;
704 
705 	sd_entry = &hmc_info->sd_table.sd_entry[idx];
706 	if (--sd_entry->u.bp.use_cnt)
707 		return -EBUSY;
708 
709 	hmc_info->sd_table.use_cnt--;
710 	sd_entry->valid = false;
711 
712 	return 0;
713 }
714 
715 /**
716  * irdma_prep_remove_pd_page - Prepares to remove a PD page from sd entry.
717  * @hmc_info: pointer to the HMC configuration information structure
718  * @idx: segment descriptor index to find the relevant page descriptor
719  */
720 int
721 irdma_prep_remove_pd_page(struct irdma_hmc_info *hmc_info, u32 idx)
722 {
723 	struct irdma_hmc_sd_entry *sd_entry;
724 
725 	sd_entry = &hmc_info->sd_table.sd_entry[idx];
726 
727 	if (sd_entry->u.pd_table.use_cnt)
728 		return -EBUSY;
729 
730 	sd_entry->valid = false;
731 	hmc_info->sd_table.use_cnt--;
732 
733 	return 0;
734 }
735