xref: /freebsd/sys/dev/irdma/irdma_hmc.c (revision 2008043f386721d58158e37e0d7e50df8095942d)
1 /*-
2  * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
3  *
4  * Copyright (c) 2015 - 2023 Intel Corporation
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenFabrics.org BSD license below:
11  *
12  *   Redistribution and use in source and binary forms, with or
13  *   without modification, are permitted provided that the following
14  *   conditions are met:
15  *
16  *    - Redistributions of source code must retain the above
17  *	copyright notice, this list of conditions and the following
18  *	disclaimer.
19  *
20  *    - Redistributions in binary form must reproduce the above
21  *	copyright notice, this list of conditions and the following
22  *	disclaimer in the documentation and/or other materials
23  *	provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #include "osdep.h"
36 #include "irdma_hmc.h"
37 #include "irdma_defs.h"
38 #include "irdma_type.h"
39 #include "irdma_protos.h"
40 
41 /**
42  * irdma_find_sd_index_limit - finds segment descriptor index limit
43  * @hmc_info: pointer to the HMC configuration information structure
44  * @type: type of HMC resources we're searching
45  * @idx: starting index for the object
46  * @cnt: number of objects we're trying to create
47  * @sd_idx: pointer to return index of the segment descriptor in question
48  * @sd_limit: pointer to return the maximum number of segment descriptors
49  *
50  * This function calculates the segment descriptor index and index limit
51  * for the resource defined by irdma_hmc_rsrc_type.
52  */
53 
54 static void
55 irdma_find_sd_index_limit(struct irdma_hmc_info *hmc_info, u32 type,
56 			  u32 idx, u32 cnt, u32 *sd_idx,
57 			  u32 *sd_limit)
58 {
59 	u64 fpm_addr, fpm_limit;
60 
61 	fpm_addr = hmc_info->hmc_obj[(type)].base +
62 	    hmc_info->hmc_obj[type].size * idx;
63 	fpm_limit = fpm_addr + hmc_info->hmc_obj[type].size * cnt;
64 	*sd_idx = (u32)(fpm_addr / IRDMA_HMC_DIRECT_BP_SIZE);
65 	*sd_limit = (u32)((fpm_limit - 1) / IRDMA_HMC_DIRECT_BP_SIZE);
66 	*sd_limit += 1;
67 }
68 
69 /**
70  * irdma_find_pd_index_limit - finds page descriptor index limit
71  * @hmc_info: pointer to the HMC configuration information struct
72  * @type: HMC resource type we're examining
73  * @idx: starting index for the object
74  * @cnt: number of objects we're trying to create
75  * @pd_idx: pointer to return page descriptor index
76  * @pd_limit: pointer to return page descriptor index limit
77  *
78  * Calculates the page descriptor index and index limit for the resource
79  * defined by irdma_hmc_rsrc_type.
80  */
81 
82 static void
83 irdma_find_pd_index_limit(struct irdma_hmc_info *hmc_info, u32 type,
84 			  u32 idx, u32 cnt, u32 *pd_idx,
85 			  u32 *pd_limit)
86 {
87 	u64 fpm_adr, fpm_limit;
88 
89 	fpm_adr = hmc_info->hmc_obj[type].base +
90 	    hmc_info->hmc_obj[type].size * idx;
91 	fpm_limit = fpm_adr + (hmc_info)->hmc_obj[(type)].size * (cnt);
92 	*pd_idx = (u32)(fpm_adr / IRDMA_HMC_PAGED_BP_SIZE);
93 	*pd_limit = (u32)((fpm_limit - 1) / IRDMA_HMC_PAGED_BP_SIZE);
94 	*pd_limit += 1;
95 }
96 
97 /**
98  * irdma_set_sd_entry - setup entry for sd programming
99  * @pa: physical addr
100  * @idx: sd index
101  * @type: paged or direct sd
102  * @entry: sd entry ptr
103  */
104 static void
105 irdma_set_sd_entry(u64 pa, u32 idx, enum irdma_sd_entry_type type,
106 		   struct irdma_update_sd_entry *entry)
107 {
108 	entry->data = pa |
109 	    FIELD_PREP(IRDMA_PFHMC_SDDATALOW_PMSDBPCOUNT, IRDMA_HMC_MAX_BP_COUNT) |
110 	    FIELD_PREP(IRDMA_PFHMC_SDDATALOW_PMSDTYPE,
111 		       type == IRDMA_SD_TYPE_PAGED ? 0 : 1) |
112 	    FIELD_PREP(IRDMA_PFHMC_SDDATALOW_PMSDVALID, 1);
113 
114 	entry->cmd = idx | FIELD_PREP(IRDMA_PFHMC_SDCMD_PMSDWR, 1) |
115 	    IRDMA_PFHMC_SDCMD_PMSDPARTSEL;
116 }
117 
118 /**
119  * irdma_clr_sd_entry - setup entry for sd clear
120  * @idx: sd index
121  * @type: paged or direct sd
122  * @entry: sd entry ptr
123  */
124 static void
125 irdma_clr_sd_entry(u32 idx, enum irdma_sd_entry_type type,
126 		   struct irdma_update_sd_entry *entry)
127 {
128 	entry->data = FIELD_PREP(IRDMA_PFHMC_SDDATALOW_PMSDBPCOUNT, IRDMA_HMC_MAX_BP_COUNT) |
129 	    FIELD_PREP(IRDMA_PFHMC_SDDATALOW_PMSDTYPE,
130 		       type == IRDMA_SD_TYPE_PAGED ? 0 : 1);
131 
132 	entry->cmd = idx | FIELD_PREP(IRDMA_PFHMC_SDCMD_PMSDWR, 1) |
133 	    IRDMA_PFHMC_SDCMD_PMSDPARTSEL;
134 }
135 
136 /**
137  * irdma_invalidate_pf_hmc_pd - Invalidates the pd cache in the hardware for PF
138  * @dev: pointer to our device struct
139  * @sd_idx: segment descriptor index
140  * @pd_idx: page descriptor index
141  */
142 static inline void
143 irdma_invalidate_pf_hmc_pd(struct irdma_sc_dev *dev, u32 sd_idx,
144 			   u32 pd_idx)
145 {
146 	u32 val = FIELD_PREP(IRDMA_PFHMC_PDINV_PMSDIDX, sd_idx) |
147 	FIELD_PREP(IRDMA_PFHMC_PDINV_PMSDPARTSEL, 1) |
148 	FIELD_PREP(IRDMA_PFHMC_PDINV_PMPDIDX, pd_idx);
149 
150 	writel(val, dev->hw_regs[IRDMA_PFHMC_PDINV]);
151 }
152 
153 /**
154  * irdma_hmc_sd_one - setup 1 sd entry for cqp
155  * @dev: pointer to the device structure
156  * @hmc_fn_id: hmc's function id
157  * @pa: physical addr
158  * @sd_idx: sd index
159  * @type: paged or direct sd
160  * @setsd: flag to set or clear sd
161  */
162 int
163 irdma_hmc_sd_one(struct irdma_sc_dev *dev, u16 hmc_fn_id, u64 pa, u32 sd_idx,
164 		 enum irdma_sd_entry_type type, bool setsd)
165 {
166 	struct irdma_update_sds_info sdinfo;
167 
168 	sdinfo.cnt = 1;
169 	sdinfo.hmc_fn_id = hmc_fn_id;
170 	if (setsd)
171 		irdma_set_sd_entry(pa, sd_idx, type, sdinfo.entry);
172 	else
173 		irdma_clr_sd_entry(sd_idx, type, sdinfo.entry);
174 	return dev->cqp->process_cqp_sds(dev, &sdinfo);
175 }
176 
177 /**
178  * irdma_hmc_sd_grp - setup group of sd entries for cqp
179  * @dev: pointer to the device structure
180  * @hmc_info: pointer to the HMC configuration information struct
181  * @sd_index: sd index
182  * @sd_cnt: number of sd entries
183  * @setsd: flag to set or clear sd
184  */
185 static int
186 irdma_hmc_sd_grp(struct irdma_sc_dev *dev,
187 		 struct irdma_hmc_info *hmc_info, u32 sd_index,
188 		 u32 sd_cnt, bool setsd)
189 {
190 	struct irdma_hmc_sd_entry *sd_entry;
191 	struct irdma_update_sds_info sdinfo = {0};
192 	u64 pa;
193 	u32 i;
194 	int ret_code = 0;
195 
196 	sdinfo.hmc_fn_id = hmc_info->hmc_fn_id;
197 	for (i = sd_index; i < sd_index + sd_cnt; i++) {
198 		sd_entry = &hmc_info->sd_table.sd_entry[i];
199 		if (!sd_entry || (!sd_entry->valid && setsd) ||
200 		    (sd_entry->valid && !setsd))
201 			continue;
202 		if (setsd) {
203 			pa = (sd_entry->entry_type == IRDMA_SD_TYPE_PAGED) ?
204 			    sd_entry->u.pd_table.pd_page_addr.pa :
205 			    sd_entry->u.bp.addr.pa;
206 			irdma_set_sd_entry(pa, i, sd_entry->entry_type,
207 					   &sdinfo.entry[sdinfo.cnt]);
208 		} else {
209 			irdma_clr_sd_entry(i, sd_entry->entry_type,
210 					   &sdinfo.entry[sdinfo.cnt]);
211 		}
212 		sdinfo.cnt++;
213 		if (sdinfo.cnt == IRDMA_MAX_SD_ENTRIES) {
214 			ret_code = dev->cqp->process_cqp_sds(dev, &sdinfo);
215 			if (ret_code) {
216 				irdma_debug(dev, IRDMA_DEBUG_HMC,
217 					    "sd_programming failed err=%d\n",
218 					    ret_code);
219 				return ret_code;
220 			}
221 
222 			sdinfo.cnt = 0;
223 		}
224 	}
225 	if (sdinfo.cnt)
226 		ret_code = dev->cqp->process_cqp_sds(dev, &sdinfo);
227 
228 	return ret_code;
229 }
230 
231 /**
232  * irdma_hmc_finish_add_sd_reg - program sd entries for objects
233  * @dev: pointer to the device structure
234  * @info: create obj info
235  */
236 static int
237 irdma_hmc_finish_add_sd_reg(struct irdma_sc_dev *dev,
238 			    struct irdma_hmc_create_obj_info *info)
239 {
240 	if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt)
241 		return -EINVAL;
242 
243 	if ((info->start_idx + info->count) >
244 	    info->hmc_info->hmc_obj[info->rsrc_type].cnt)
245 		return -EINVAL;
246 
247 	if (!info->add_sd_cnt)
248 		return 0;
249 	return irdma_hmc_sd_grp(dev, info->hmc_info,
250 				info->hmc_info->sd_indexes[0], info->add_sd_cnt,
251 				true);
252 }
253 
254 /**
255  * irdma_sc_create_hmc_obj - allocate backing store for hmc objects
256  * @dev: pointer to the device structure
257  * @info: pointer to irdma_hmc_create_obj_info struct
258  *
259  * This will allocate memory for PDs and backing pages and populate
260  * the sd and pd entries.
261  */
262 int
263 irdma_sc_create_hmc_obj(struct irdma_sc_dev *dev,
264 			struct irdma_hmc_create_obj_info *info)
265 {
266 	struct irdma_hmc_sd_entry *sd_entry;
267 	u32 sd_idx, sd_lmt;
268 	u32 pd_idx = 0, pd_lmt = 0;
269 	u32 pd_idx1 = 0, pd_lmt1 = 0;
270 	u32 i, j;
271 	bool pd_error = false;
272 	int ret_code = 0;
273 
274 	if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt)
275 		return -EINVAL;
276 
277 	if ((info->start_idx + info->count) >
278 	    info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
279 		irdma_debug(dev, IRDMA_DEBUG_HMC,
280 			    "error type %u, start = %u, req cnt %u, cnt = %u\n",
281 			    info->rsrc_type, info->start_idx, info->count,
282 			    info->hmc_info->hmc_obj[info->rsrc_type].cnt);
283 		return -EINVAL;
284 	}
285 
286 	irdma_find_sd_index_limit(info->hmc_info, info->rsrc_type,
287 				  info->start_idx, info->count, &sd_idx,
288 				  &sd_lmt);
289 	if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
290 	    sd_lmt > info->hmc_info->sd_table.sd_cnt) {
291 		return -EINVAL;
292 	}
293 
294 	irdma_find_pd_index_limit(info->hmc_info, info->rsrc_type,
295 				  info->start_idx, info->count, &pd_idx,
296 				  &pd_lmt);
297 
298 	for (j = sd_idx; j < sd_lmt; j++) {
299 		ret_code = irdma_add_sd_table_entry(dev->hw, info->hmc_info, j,
300 						    info->entry_type,
301 						    IRDMA_HMC_DIRECT_BP_SIZE);
302 		if (ret_code)
303 			goto exit_sd_error;
304 
305 		sd_entry = &info->hmc_info->sd_table.sd_entry[j];
306 		if (sd_entry->entry_type == IRDMA_SD_TYPE_PAGED &&
307 		    (dev->hmc_info == info->hmc_info &&
308 		     info->rsrc_type != IRDMA_HMC_IW_PBLE)) {
309 			pd_idx1 = max(pd_idx, (j * IRDMA_HMC_MAX_BP_COUNT));
310 			pd_lmt1 = min(pd_lmt, (j + 1) * IRDMA_HMC_MAX_BP_COUNT);
311 			for (i = pd_idx1; i < pd_lmt1; i++) {
312 				/* update the pd table entry */
313 				ret_code = irdma_add_pd_table_entry(dev,
314 								    info->hmc_info,
315 								    i, NULL);
316 				if (ret_code) {
317 					pd_error = true;
318 					break;
319 				}
320 			}
321 			if (pd_error) {
322 				while (i && (i > pd_idx1)) {
323 					irdma_remove_pd_bp(dev, info->hmc_info,
324 							   i - 1);
325 					i--;
326 				}
327 			}
328 		}
329 		if (sd_entry->valid)
330 			continue;
331 
332 		info->hmc_info->sd_indexes[info->add_sd_cnt] = (u16)j;
333 		info->add_sd_cnt++;
334 		sd_entry->valid = true;
335 	}
336 	return irdma_hmc_finish_add_sd_reg(dev, info);
337 
338 exit_sd_error:
339 	while (j && (j > sd_idx)) {
340 		sd_entry = &info->hmc_info->sd_table.sd_entry[j - 1];
341 		switch (sd_entry->entry_type) {
342 		case IRDMA_SD_TYPE_PAGED:
343 			pd_idx1 = max(pd_idx, (j - 1) * IRDMA_HMC_MAX_BP_COUNT);
344 			pd_lmt1 = min(pd_lmt, (j * IRDMA_HMC_MAX_BP_COUNT));
345 			for (i = pd_idx1; i < pd_lmt1; i++)
346 				irdma_prep_remove_pd_page(info->hmc_info, i);
347 			break;
348 		case IRDMA_SD_TYPE_DIRECT:
349 			irdma_prep_remove_pd_page(info->hmc_info, (j - 1));
350 			break;
351 		default:
352 			ret_code = -EINVAL;
353 			break;
354 		}
355 		j--;
356 	}
357 
358 	return ret_code;
359 }
360 
361 /**
362  * irdma_finish_del_sd_reg - delete sd entries for objects
363  * @dev: pointer to the device structure
364  * @info: dele obj info
365  * @reset: true if called before reset
366  */
367 static int
368 irdma_finish_del_sd_reg(struct irdma_sc_dev *dev,
369 			struct irdma_hmc_del_obj_info *info,
370 			bool reset)
371 {
372 	struct irdma_hmc_sd_entry *sd_entry;
373 	int ret_code = 0;
374 	struct irdma_dma_mem *mem;
375 	u32 i, sd_idx;
376 
377 	if (!reset)
378 		ret_code = irdma_hmc_sd_grp(dev, info->hmc_info,
379 					    info->hmc_info->sd_indexes[0],
380 					    info->del_sd_cnt, false);
381 
382 	if (ret_code)
383 		irdma_debug(dev, IRDMA_DEBUG_HMC, "error cqp sd sd_grp\n");
384 	for (i = 0; i < info->del_sd_cnt; i++) {
385 		sd_idx = info->hmc_info->sd_indexes[i];
386 		sd_entry = &info->hmc_info->sd_table.sd_entry[sd_idx];
387 
388 		mem = (sd_entry->entry_type == IRDMA_SD_TYPE_PAGED) ?
389 		    &sd_entry->u.pd_table.pd_page_addr :
390 		    &sd_entry->u.bp.addr;
391 
392 		if (!mem || !mem->va)
393 			irdma_debug(dev, IRDMA_DEBUG_HMC, "error cqp sd mem\n");
394 		else
395 			irdma_free_dma_mem(dev->hw, mem);
396 	}
397 
398 	return ret_code;
399 }
400 
401 /**
402  * irdma_sc_del_hmc_obj - remove pe hmc objects
403  * @dev: pointer to the device structure
404  * @info: pointer to irdma_hmc_del_obj_info struct
405  * @reset: true if called before reset
406  *
407  * This will de-populate the SDs and PDs.  It frees
408  * the memory for PDS and backing storage.  After this function is returned,
409  * caller should deallocate memory allocated previously for
410  * book-keeping information about PDs and backing storage.
411  */
412 int
413 irdma_sc_del_hmc_obj(struct irdma_sc_dev *dev,
414 		     struct irdma_hmc_del_obj_info *info, bool reset)
415 {
416 	struct irdma_hmc_pd_table *pd_table;
417 	u32 sd_idx, sd_lmt;
418 	u32 pd_idx, pd_lmt, rel_pd_idx;
419 	u32 i, j;
420 	int ret_code = 0;
421 
422 	if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
423 		irdma_debug(dev, IRDMA_DEBUG_HMC,
424 			    "error start_idx[%04d] >= [type %04d].cnt[%04d]\n",
425 			    info->start_idx, info->rsrc_type,
426 			    info->hmc_info->hmc_obj[info->rsrc_type].cnt);
427 		return -EINVAL;
428 	}
429 
430 	if ((info->start_idx + info->count) >
431 	    info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
432 		irdma_debug(dev, IRDMA_DEBUG_HMC,
433 			    "error start_idx[%04d] + count %04d >= [type %04d].cnt[%04d]\n",
434 			    info->start_idx, info->count, info->rsrc_type,
435 			    info->hmc_info->hmc_obj[info->rsrc_type].cnt);
436 		return -EINVAL;
437 	}
438 
439 	irdma_find_pd_index_limit(info->hmc_info, info->rsrc_type,
440 				  info->start_idx, info->count, &pd_idx,
441 				  &pd_lmt);
442 
443 	for (j = pd_idx; j < pd_lmt; j++) {
444 		sd_idx = j / IRDMA_HMC_PD_CNT_IN_SD;
445 
446 		if (!info->hmc_info->sd_table.sd_entry[sd_idx].valid)
447 			continue;
448 
449 		if (info->hmc_info->sd_table.sd_entry[sd_idx].entry_type !=
450 		    IRDMA_SD_TYPE_PAGED)
451 			continue;
452 
453 		rel_pd_idx = j % IRDMA_HMC_PD_CNT_IN_SD;
454 		pd_table = &info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
455 		if (pd_table->pd_entry &&
456 		    pd_table->pd_entry[rel_pd_idx].valid) {
457 			ret_code = irdma_remove_pd_bp(dev, info->hmc_info, j);
458 			if (ret_code) {
459 				irdma_debug(dev, IRDMA_DEBUG_HMC,
460 					    "remove_pd_bp error\n");
461 				return ret_code;
462 			}
463 		}
464 	}
465 
466 	irdma_find_sd_index_limit(info->hmc_info, info->rsrc_type,
467 				  info->start_idx, info->count, &sd_idx,
468 				  &sd_lmt);
469 	if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
470 	    sd_lmt > info->hmc_info->sd_table.sd_cnt) {
471 		irdma_debug(dev, IRDMA_DEBUG_HMC, "invalid sd_idx\n");
472 		return -EINVAL;
473 	}
474 
475 	for (i = sd_idx; i < sd_lmt; i++) {
476 		pd_table = &info->hmc_info->sd_table.sd_entry[i].u.pd_table;
477 		if (!info->hmc_info->sd_table.sd_entry[i].valid)
478 			continue;
479 		switch (info->hmc_info->sd_table.sd_entry[i].entry_type) {
480 		case IRDMA_SD_TYPE_DIRECT:
481 			ret_code = irdma_prep_remove_sd_bp(info->hmc_info, i);
482 			if (!ret_code) {
483 				info->hmc_info->sd_indexes[info->del_sd_cnt] =
484 				    (u16)i;
485 				info->del_sd_cnt++;
486 			}
487 			break;
488 		case IRDMA_SD_TYPE_PAGED:
489 			ret_code = irdma_prep_remove_pd_page(info->hmc_info, i);
490 			if (ret_code)
491 				break;
492 			if (dev->hmc_info != info->hmc_info &&
493 			    info->rsrc_type == IRDMA_HMC_IW_PBLE &&
494 			    pd_table->pd_entry) {
495 				kfree(pd_table->pd_entry_virt_mem.va);
496 				pd_table->pd_entry = NULL;
497 			}
498 			info->hmc_info->sd_indexes[info->del_sd_cnt] = (u16)i;
499 			info->del_sd_cnt++;
500 			break;
501 		default:
502 			break;
503 		}
504 	}
505 	return irdma_finish_del_sd_reg(dev, info, reset);
506 }
507 
508 /**
509  * irdma_add_sd_table_entry - Adds a segment descriptor to the table
510  * @hw: pointer to our hw struct
511  * @hmc_info: pointer to the HMC configuration information struct
512  * @sd_index: segment descriptor index to manipulate
513  * @type: what type of segment descriptor we're manipulating
514  * @direct_mode_sz: size to alloc in direct mode
515  */
516 int
517 irdma_add_sd_table_entry(struct irdma_hw *hw,
518 			 struct irdma_hmc_info *hmc_info, u32 sd_index,
519 			 enum irdma_sd_entry_type type, u64 direct_mode_sz)
520 {
521 	struct irdma_hmc_sd_entry *sd_entry;
522 	struct irdma_dma_mem dma_mem;
523 	u64 alloc_len;
524 
525 	sd_entry = &hmc_info->sd_table.sd_entry[sd_index];
526 	if (!sd_entry->valid) {
527 		if (type == IRDMA_SD_TYPE_PAGED)
528 			alloc_len = IRDMA_HMC_PAGED_BP_SIZE;
529 		else
530 			alloc_len = direct_mode_sz;
531 
532 		/* allocate a 4K pd page or 2M backing page */
533 		dma_mem.size = alloc_len;
534 		dma_mem.va = irdma_allocate_dma_mem(hw, &dma_mem, dma_mem.size,
535 						    IRDMA_HMC_PD_BP_BUF_ALIGNMENT);
536 		if (!dma_mem.va)
537 			return -ENOMEM;
538 		if (type == IRDMA_SD_TYPE_PAGED) {
539 			struct irdma_virt_mem *vmem =
540 			&sd_entry->u.pd_table.pd_entry_virt_mem;
541 
542 			vmem->size = sizeof(struct irdma_hmc_pd_entry) * 512;
543 			vmem->va = kzalloc(vmem->size, GFP_KERNEL);
544 			if (!vmem->va) {
545 				irdma_free_dma_mem(hw, &dma_mem);
546 				return -ENOMEM;
547 			}
548 			sd_entry->u.pd_table.pd_entry = vmem->va;
549 
550 			irdma_memcpy(&sd_entry->u.pd_table.pd_page_addr, &dma_mem,
551 				     sizeof(sd_entry->u.pd_table.pd_page_addr));
552 		} else {
553 			irdma_memcpy(&sd_entry->u.bp.addr, &dma_mem,
554 				     sizeof(sd_entry->u.bp.addr));
555 
556 			sd_entry->u.bp.sd_pd_index = sd_index;
557 		}
558 
559 		hmc_info->sd_table.sd_entry[sd_index].entry_type = type;
560 		hmc_info->sd_table.use_cnt++;
561 	}
562 	if (sd_entry->entry_type == IRDMA_SD_TYPE_DIRECT)
563 		sd_entry->u.bp.use_cnt++;
564 
565 	return 0;
566 }
567 
568 /**
569  * irdma_add_pd_table_entry - Adds page descriptor to the specified table
570  * @dev: pointer to our device structure
571  * @hmc_info: pointer to the HMC configuration information structure
572  * @pd_index: which page descriptor index to manipulate
573  * @rsrc_pg: if not NULL, use preallocated page instead of allocating new one.
574  *
575  * This function:
576  *	1. Initializes the pd entry
577  *	2. Adds pd_entry in the pd_table
578  *	3. Mark the entry valid in irdma_hmc_pd_entry structure
579  *	4. Initializes the pd_entry's ref count to 1
580  * assumptions:
581  *	1. The memory for pd should be pinned down, physically contiguous and
582  *	   aligned on 4K boundary and zeroed memory.
583  *	2. It should be 4K in size.
584  */
585 int
586 irdma_add_pd_table_entry(struct irdma_sc_dev *dev,
587 			 struct irdma_hmc_info *hmc_info, u32 pd_index,
588 			 struct irdma_dma_mem *rsrc_pg)
589 {
590 	struct irdma_hmc_pd_table *pd_table;
591 	struct irdma_hmc_pd_entry *pd_entry;
592 	struct irdma_dma_mem mem;
593 	struct irdma_dma_mem *page = &mem;
594 	u32 sd_idx, rel_pd_idx;
595 	u64 *pd_addr;
596 	u64 page_desc;
597 
598 	if (pd_index / IRDMA_HMC_PD_CNT_IN_SD >= hmc_info->sd_table.sd_cnt)
599 		return -EINVAL;
600 
601 	sd_idx = (pd_index / IRDMA_HMC_PD_CNT_IN_SD);
602 	if (hmc_info->sd_table.sd_entry[sd_idx].entry_type !=
603 	    IRDMA_SD_TYPE_PAGED)
604 		return 0;
605 
606 	rel_pd_idx = (pd_index % IRDMA_HMC_PD_CNT_IN_SD);
607 	pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
608 	pd_entry = &pd_table->pd_entry[rel_pd_idx];
609 	if (!pd_entry->valid) {
610 		if (rsrc_pg) {
611 			pd_entry->rsrc_pg = true;
612 			page = rsrc_pg;
613 		} else {
614 			page->size = IRDMA_HMC_PAGED_BP_SIZE;
615 			page->va = irdma_allocate_dma_mem(dev->hw, page,
616 							  page->size,
617 							  IRDMA_HMC_PD_BP_BUF_ALIGNMENT);
618 			if (!page->va)
619 				return -ENOMEM;
620 
621 			pd_entry->rsrc_pg = false;
622 		}
623 
624 		irdma_memcpy(&pd_entry->bp.addr, page, sizeof(pd_entry->bp.addr));
625 		pd_entry->bp.sd_pd_index = pd_index;
626 		pd_entry->bp.entry_type = IRDMA_SD_TYPE_PAGED;
627 		page_desc = page->pa | 0x1;
628 		pd_addr = pd_table->pd_page_addr.va;
629 		pd_addr += rel_pd_idx;
630 		irdma_memcpy(pd_addr, &page_desc, sizeof(*pd_addr));
631 		pd_entry->sd_index = sd_idx;
632 		pd_entry->valid = true;
633 		pd_table->use_cnt++;
634 		irdma_invalidate_pf_hmc_pd(dev, sd_idx, rel_pd_idx);
635 	}
636 	pd_entry->bp.use_cnt++;
637 
638 	return 0;
639 }
640 
641 /**
642  * irdma_remove_pd_bp - remove a backing page from a page descriptor
643  * @dev: pointer to our HW structure
644  * @hmc_info: pointer to the HMC configuration information structure
645  * @idx: the page index
646  *
647  * This function:
648  *	1. Marks the entry in pd table (for paged address mode) or in sd table
649  *	   (for direct address mode) invalid.
650  *	2. Write to register PMPDINV to invalidate the backing page in FV cache
651  *	3. Decrement the ref count for the pd _entry
652  * assumptions:
653  *	1. Caller can deallocate the memory used by backing storage after this
654  *	   function returns.
655  */
656 int
657 irdma_remove_pd_bp(struct irdma_sc_dev *dev,
658 		   struct irdma_hmc_info *hmc_info, u32 idx)
659 {
660 	struct irdma_hmc_pd_entry *pd_entry;
661 	struct irdma_hmc_pd_table *pd_table;
662 	struct irdma_hmc_sd_entry *sd_entry;
663 	u32 sd_idx, rel_pd_idx;
664 	struct irdma_dma_mem *mem;
665 	u64 *pd_addr;
666 
667 	sd_idx = idx / IRDMA_HMC_PD_CNT_IN_SD;
668 	rel_pd_idx = idx % IRDMA_HMC_PD_CNT_IN_SD;
669 	if (sd_idx >= hmc_info->sd_table.sd_cnt)
670 		return -EINVAL;
671 
672 	sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
673 	if (sd_entry->entry_type != IRDMA_SD_TYPE_PAGED)
674 		return -EINVAL;
675 
676 	pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
677 	pd_entry = &pd_table->pd_entry[rel_pd_idx];
678 	if (--pd_entry->bp.use_cnt)
679 		return 0;
680 
681 	pd_entry->valid = false;
682 	pd_table->use_cnt--;
683 	pd_addr = pd_table->pd_page_addr.va;
684 	pd_addr += rel_pd_idx;
685 	irdma_memset(pd_addr, 0, sizeof(u64));
686 	irdma_invalidate_pf_hmc_pd(dev, sd_idx, idx);
687 
688 	if (!pd_entry->rsrc_pg) {
689 		mem = &pd_entry->bp.addr;
690 		if (!mem || !mem->va)
691 			return -EINVAL;
692 
693 		irdma_free_dma_mem(dev->hw, mem);
694 	}
695 	if (!pd_table->use_cnt)
696 		kfree(pd_table->pd_entry_virt_mem.va);
697 
698 	return 0;
699 }
700 
701 /**
702  * irdma_prep_remove_sd_bp - Prepares to remove a backing page from a sd entry
703  * @hmc_info: pointer to the HMC configuration information structure
704  * @idx: the page index
705  */
706 int
707 irdma_prep_remove_sd_bp(struct irdma_hmc_info *hmc_info, u32 idx)
708 {
709 	struct irdma_hmc_sd_entry *sd_entry;
710 
711 	sd_entry = &hmc_info->sd_table.sd_entry[idx];
712 	if (--sd_entry->u.bp.use_cnt)
713 		return -EBUSY;
714 
715 	hmc_info->sd_table.use_cnt--;
716 	sd_entry->valid = false;
717 
718 	return 0;
719 }
720 
721 /**
722  * irdma_prep_remove_pd_page - Prepares to remove a PD page from sd entry.
723  * @hmc_info: pointer to the HMC configuration information structure
724  * @idx: segment descriptor index to find the relevant page descriptor
725  */
726 int
727 irdma_prep_remove_pd_page(struct irdma_hmc_info *hmc_info, u32 idx)
728 {
729 	struct irdma_hmc_sd_entry *sd_entry;
730 
731 	sd_entry = &hmc_info->sd_table.sd_entry[idx];
732 
733 	if (sd_entry->u.pd_table.use_cnt)
734 		return -EBUSY;
735 
736 	sd_entry->valid = false;
737 	hmc_info->sd_table.use_cnt--;
738 
739 	return 0;
740 }
741