1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2015 - 2021 Intel Corporation */
3 #include "osdep.h"
4 #include "hmc.h"
5 #include "defs.h"
6 #include "type.h"
7 #include "protos.h"
8 #include "pble.h"
9
10 static int add_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc);
11
12 /**
13 * irdma_destroy_pble_prm - destroy prm during module unload
14 * @pble_rsrc: pble resources
15 */
irdma_destroy_pble_prm(struct irdma_hmc_pble_rsrc * pble_rsrc)16 void irdma_destroy_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc)
17 {
18 struct irdma_chunk *chunk;
19 struct irdma_pble_prm *pinfo = &pble_rsrc->pinfo;
20
21 while (!list_empty(&pinfo->clist)) {
22 chunk = (struct irdma_chunk *) pinfo->clist.next;
23 list_del(&chunk->list);
24 if (chunk->type == PBLE_SD_PAGED)
25 irdma_pble_free_paged_mem(chunk);
26 bitmap_free(chunk->bitmapbuf);
27 kfree(chunk->chunkmem.va);
28 }
29 }
30
31 /**
32 * irdma_hmc_init_pble - Initialize pble resources during module load
33 * @dev: irdma_sc_dev struct
34 * @pble_rsrc: pble resources
35 */
irdma_hmc_init_pble(struct irdma_sc_dev * dev,struct irdma_hmc_pble_rsrc * pble_rsrc)36 int irdma_hmc_init_pble(struct irdma_sc_dev *dev,
37 struct irdma_hmc_pble_rsrc *pble_rsrc)
38 {
39 struct irdma_hmc_info *hmc_info;
40 u32 fpm_idx = 0;
41 int status = 0;
42
43 hmc_info = dev->hmc_info;
44 pble_rsrc->dev = dev;
45 pble_rsrc->fpm_base_addr = hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].base;
46 /* Start pble' on 4k boundary */
47 if (pble_rsrc->fpm_base_addr & 0xfff)
48 fpm_idx = (4096 - (pble_rsrc->fpm_base_addr & 0xfff)) >> 3;
49 pble_rsrc->unallocated_pble =
50 hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt - fpm_idx;
51 pble_rsrc->next_fpm_addr = pble_rsrc->fpm_base_addr + (fpm_idx << 3);
52 pble_rsrc->pinfo.pble_shift = PBLE_SHIFT;
53
54 mutex_init(&pble_rsrc->pble_mutex_lock);
55
56 spin_lock_init(&pble_rsrc->pinfo.prm_lock);
57 INIT_LIST_HEAD(&pble_rsrc->pinfo.clist);
58 if (add_pble_prm(pble_rsrc)) {
59 irdma_destroy_pble_prm(pble_rsrc);
60 status = -ENOMEM;
61 }
62
63 return status;
64 }
65
66 /**
67 * get_sd_pd_idx - Returns sd index, pd index and rel_pd_idx from fpm address
68 * @pble_rsrc: structure containing fpm address
69 * @idx: where to return indexes
70 */
get_sd_pd_idx(struct irdma_hmc_pble_rsrc * pble_rsrc,struct sd_pd_idx * idx)71 static void get_sd_pd_idx(struct irdma_hmc_pble_rsrc *pble_rsrc,
72 struct sd_pd_idx *idx)
73 {
74 idx->sd_idx = pble_rsrc->next_fpm_addr / IRDMA_HMC_DIRECT_BP_SIZE;
75 idx->pd_idx = (u32)(pble_rsrc->next_fpm_addr / IRDMA_HMC_PAGED_BP_SIZE);
76 idx->rel_pd_idx = (idx->pd_idx % IRDMA_HMC_PD_CNT_IN_SD);
77 }
78
79 /**
80 * add_sd_direct - add sd direct for pble
81 * @pble_rsrc: pble resource ptr
82 * @info: page info for sd
83 */
add_sd_direct(struct irdma_hmc_pble_rsrc * pble_rsrc,struct irdma_add_page_info * info)84 static int add_sd_direct(struct irdma_hmc_pble_rsrc *pble_rsrc,
85 struct irdma_add_page_info *info)
86 {
87 struct irdma_sc_dev *dev = pble_rsrc->dev;
88 int ret_code = 0;
89 struct sd_pd_idx *idx = &info->idx;
90 struct irdma_chunk *chunk = info->chunk;
91 struct irdma_hmc_info *hmc_info = info->hmc_info;
92 struct irdma_hmc_sd_entry *sd_entry = info->sd_entry;
93 u32 offset = 0;
94
95 if (!sd_entry->valid) {
96 ret_code = irdma_add_sd_table_entry(dev->hw, hmc_info,
97 info->idx.sd_idx,
98 IRDMA_SD_TYPE_DIRECT,
99 IRDMA_HMC_DIRECT_BP_SIZE);
100 if (ret_code)
101 return ret_code;
102
103 chunk->type = PBLE_SD_CONTIGOUS;
104 }
105
106 offset = idx->rel_pd_idx << HMC_PAGED_BP_SHIFT;
107 chunk->size = info->pages << HMC_PAGED_BP_SHIFT;
108 chunk->vaddr = sd_entry->u.bp.addr.va + offset;
109 chunk->fpm_addr = pble_rsrc->next_fpm_addr;
110 ibdev_dbg(to_ibdev(dev),
111 "PBLE: chunk_size[%lld] = 0x%llx vaddr=0x%p fpm_addr = %llx\n",
112 chunk->size, chunk->size, chunk->vaddr, chunk->fpm_addr);
113
114 return 0;
115 }
116
117 /**
118 * fpm_to_idx - given fpm address, get pble index
119 * @pble_rsrc: pble resource management
120 * @addr: fpm address for index
121 */
fpm_to_idx(struct irdma_hmc_pble_rsrc * pble_rsrc,u64 addr)122 static u32 fpm_to_idx(struct irdma_hmc_pble_rsrc *pble_rsrc, u64 addr)
123 {
124 u64 idx;
125
126 idx = (addr - (pble_rsrc->fpm_base_addr)) >> 3;
127
128 return (u32)idx;
129 }
130
131 /**
132 * add_bp_pages - add backing pages for sd
133 * @pble_rsrc: pble resource management
134 * @info: page info for sd
135 */
add_bp_pages(struct irdma_hmc_pble_rsrc * pble_rsrc,struct irdma_add_page_info * info)136 static int add_bp_pages(struct irdma_hmc_pble_rsrc *pble_rsrc,
137 struct irdma_add_page_info *info)
138 {
139 struct irdma_sc_dev *dev = pble_rsrc->dev;
140 u8 *addr;
141 struct irdma_dma_mem mem;
142 struct irdma_hmc_pd_entry *pd_entry;
143 struct irdma_hmc_sd_entry *sd_entry = info->sd_entry;
144 struct irdma_hmc_info *hmc_info = info->hmc_info;
145 struct irdma_chunk *chunk = info->chunk;
146 int status = 0;
147 u32 rel_pd_idx = info->idx.rel_pd_idx;
148 u32 pd_idx = info->idx.pd_idx;
149 u32 i;
150
151 if (irdma_pble_get_paged_mem(chunk, info->pages))
152 return -ENOMEM;
153
154 status = irdma_add_sd_table_entry(dev->hw, hmc_info, info->idx.sd_idx,
155 IRDMA_SD_TYPE_PAGED,
156 IRDMA_HMC_DIRECT_BP_SIZE);
157 if (status)
158 goto error;
159
160 addr = chunk->vaddr;
161 for (i = 0; i < info->pages; i++) {
162 mem.pa = (u64)chunk->dmainfo.dmaaddrs[i];
163 mem.size = 4096;
164 mem.va = addr;
165 pd_entry = &sd_entry->u.pd_table.pd_entry[rel_pd_idx++];
166 if (!pd_entry->valid) {
167 status = irdma_add_pd_table_entry(dev, hmc_info,
168 pd_idx++, &mem);
169 if (status)
170 goto error;
171
172 addr += 4096;
173 }
174 }
175
176 chunk->fpm_addr = pble_rsrc->next_fpm_addr;
177 return 0;
178
179 error:
180 irdma_pble_free_paged_mem(chunk);
181
182 return status;
183 }
184
185 /**
186 * irdma_get_type - add a sd entry type for sd
187 * @dev: irdma_sc_dev struct
188 * @idx: index of sd
189 * @pages: pages in the sd
190 */
irdma_get_type(struct irdma_sc_dev * dev,struct sd_pd_idx * idx,u32 pages)191 static enum irdma_sd_entry_type irdma_get_type(struct irdma_sc_dev *dev,
192 struct sd_pd_idx *idx, u32 pages)
193 {
194 enum irdma_sd_entry_type sd_entry_type;
195
196 if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3)
197 sd_entry_type = (!idx->rel_pd_idx &&
198 pages == IRDMA_HMC_PD_CNT_IN_SD) ?
199 IRDMA_SD_TYPE_DIRECT : IRDMA_SD_TYPE_PAGED;
200 else
201 sd_entry_type = (!idx->rel_pd_idx &&
202 pages == IRDMA_HMC_PD_CNT_IN_SD &&
203 dev->privileged) ?
204 IRDMA_SD_TYPE_DIRECT : IRDMA_SD_TYPE_PAGED;
205 return sd_entry_type;
206 }
207
208 /**
209 * add_pble_prm - add a sd entry for pble resoure
210 * @pble_rsrc: pble resource management
211 */
add_pble_prm(struct irdma_hmc_pble_rsrc * pble_rsrc)212 static int add_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc)
213 {
214 struct irdma_sc_dev *dev = pble_rsrc->dev;
215 struct irdma_hmc_sd_entry *sd_entry;
216 struct irdma_hmc_info *hmc_info;
217 struct irdma_chunk *chunk;
218 struct irdma_add_page_info info;
219 struct sd_pd_idx *idx = &info.idx;
220 int ret_code = 0;
221 enum irdma_sd_entry_type sd_entry_type;
222 u64 sd_reg_val = 0;
223 struct irdma_virt_mem chunkmem;
224 u32 pages;
225
226 if (pble_rsrc->unallocated_pble < PBLE_PER_PAGE)
227 return -ENOMEM;
228
229 if (pble_rsrc->next_fpm_addr & 0xfff)
230 return -EINVAL;
231
232 chunkmem.size = sizeof(*chunk);
233 chunkmem.va = kzalloc(chunkmem.size, GFP_KERNEL);
234 if (!chunkmem.va)
235 return -ENOMEM;
236
237 chunk = chunkmem.va;
238 chunk->chunkmem = chunkmem;
239 hmc_info = dev->hmc_info;
240 chunk->dev = dev;
241 chunk->fpm_addr = pble_rsrc->next_fpm_addr;
242 get_sd_pd_idx(pble_rsrc, idx);
243 sd_entry = &hmc_info->sd_table.sd_entry[idx->sd_idx];
244 pages = (idx->rel_pd_idx) ? (IRDMA_HMC_PD_CNT_IN_SD - idx->rel_pd_idx) :
245 IRDMA_HMC_PD_CNT_IN_SD;
246 pages = min(pages, pble_rsrc->unallocated_pble >> PBLE_512_SHIFT);
247 info.chunk = chunk;
248 info.hmc_info = hmc_info;
249 info.pages = pages;
250 info.sd_entry = sd_entry;
251 if (!sd_entry->valid)
252 sd_entry_type = irdma_get_type(dev, idx, pages);
253 else
254 sd_entry_type = sd_entry->entry_type;
255
256 ibdev_dbg(to_ibdev(dev),
257 "PBLE: pages = %d, unallocated_pble[%d] current_fpm_addr = %llx\n",
258 pages, pble_rsrc->unallocated_pble,
259 pble_rsrc->next_fpm_addr);
260 ibdev_dbg(to_ibdev(dev), "PBLE: sd_entry_type = %d\n", sd_entry_type);
261 if (sd_entry_type == IRDMA_SD_TYPE_DIRECT)
262 ret_code = add_sd_direct(pble_rsrc, &info);
263
264 if (ret_code)
265 sd_entry_type = IRDMA_SD_TYPE_PAGED;
266 else
267 pble_rsrc->stats_direct_sds++;
268
269 if (sd_entry_type == IRDMA_SD_TYPE_PAGED) {
270 ret_code = add_bp_pages(pble_rsrc, &info);
271 if (ret_code)
272 goto error;
273 else
274 pble_rsrc->stats_paged_sds++;
275 }
276
277 ret_code = irdma_prm_add_pble_mem(&pble_rsrc->pinfo, chunk);
278 if (ret_code)
279 goto error;
280
281 pble_rsrc->next_fpm_addr += chunk->size;
282 ibdev_dbg(to_ibdev(dev),
283 "PBLE: next_fpm_addr = %llx chunk_size[%llu] = 0x%llx\n",
284 pble_rsrc->next_fpm_addr, chunk->size, chunk->size);
285 pble_rsrc->unallocated_pble -= (u32)(chunk->size >> 3);
286 sd_reg_val = (sd_entry_type == IRDMA_SD_TYPE_PAGED) ?
287 sd_entry->u.pd_table.pd_page_addr.pa :
288 sd_entry->u.bp.addr.pa;
289 if ((dev->privileged && !sd_entry->valid) ||
290 dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3) {
291 ret_code = irdma_hmc_sd_one(dev, hmc_info->hmc_fn_id,
292 sd_reg_val, idx->sd_idx,
293 sd_entry->entry_type, true);
294 if (ret_code)
295 goto error;
296 }
297
298 list_add(&chunk->list, &pble_rsrc->pinfo.clist);
299 sd_entry->valid = true;
300 return 0;
301
302 error:
303 bitmap_free(chunk->bitmapbuf);
304 kfree(chunk->chunkmem.va);
305
306 return ret_code;
307 }
308
309 /**
310 * free_lvl2 - fee level 2 pble
311 * @pble_rsrc: pble resource management
312 * @palloc: level 2 pble allocation
313 */
free_lvl2(struct irdma_hmc_pble_rsrc * pble_rsrc,struct irdma_pble_alloc * palloc)314 static void free_lvl2(struct irdma_hmc_pble_rsrc *pble_rsrc,
315 struct irdma_pble_alloc *palloc)
316 {
317 u32 i;
318 struct irdma_pble_level2 *lvl2 = &palloc->level2;
319 struct irdma_pble_info *root = &lvl2->root;
320 struct irdma_pble_info *leaf = lvl2->leaf;
321
322 for (i = 0; i < lvl2->leaf_cnt; i++, leaf++) {
323 if (leaf->addr)
324 irdma_prm_return_pbles(&pble_rsrc->pinfo,
325 &leaf->chunkinfo);
326 else
327 break;
328 }
329
330 if (root->addr)
331 irdma_prm_return_pbles(&pble_rsrc->pinfo, &root->chunkinfo);
332
333 kfree(lvl2->leafmem.va);
334 lvl2->leaf = NULL;
335 }
336
337 /**
338 * get_lvl2_pble - get level 2 pble resource
339 * @pble_rsrc: pble resource management
340 * @palloc: level 2 pble allocation
341 */
get_lvl2_pble(struct irdma_hmc_pble_rsrc * pble_rsrc,struct irdma_pble_alloc * palloc)342 static int get_lvl2_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
343 struct irdma_pble_alloc *palloc)
344 {
345 u32 lf4k, lflast, total, i;
346 u32 pblcnt = PBLE_PER_PAGE;
347 u64 *addr;
348 struct irdma_pble_level2 *lvl2 = &palloc->level2;
349 struct irdma_pble_info *root = &lvl2->root;
350 struct irdma_pble_info *leaf;
351 int ret_code;
352 u64 fpm_addr;
353
354 /* number of full 512 (4K) leafs) */
355 lf4k = palloc->total_cnt >> 9;
356 lflast = palloc->total_cnt % PBLE_PER_PAGE;
357 total = (lflast == 0) ? lf4k : lf4k + 1;
358 lvl2->leaf_cnt = total;
359
360 lvl2->leafmem.size = (sizeof(*leaf) * total);
361 lvl2->leafmem.va = kzalloc(lvl2->leafmem.size, GFP_KERNEL);
362 if (!lvl2->leafmem.va)
363 return -ENOMEM;
364
365 lvl2->leaf = lvl2->leafmem.va;
366 leaf = lvl2->leaf;
367 ret_code = irdma_prm_get_pbles(&pble_rsrc->pinfo, &root->chunkinfo,
368 total << 3, &root->addr, &fpm_addr);
369 if (ret_code) {
370 kfree(lvl2->leafmem.va);
371 lvl2->leaf = NULL;
372 return -ENOMEM;
373 }
374
375 root->idx = fpm_to_idx(pble_rsrc, fpm_addr);
376 root->cnt = total;
377 addr = root->addr;
378 for (i = 0; i < total; i++, leaf++) {
379 pblcnt = (lflast && ((i + 1) == total)) ?
380 lflast : PBLE_PER_PAGE;
381 ret_code = irdma_prm_get_pbles(&pble_rsrc->pinfo,
382 &leaf->chunkinfo, pblcnt << 3,
383 &leaf->addr, &fpm_addr);
384 if (ret_code)
385 goto error;
386
387 leaf->idx = fpm_to_idx(pble_rsrc, fpm_addr);
388
389 leaf->cnt = pblcnt;
390 *addr = (u64)leaf->idx;
391 addr++;
392 }
393
394 palloc->level = PBLE_LEVEL_2;
395 pble_rsrc->stats_lvl2++;
396 return 0;
397
398 error:
399 free_lvl2(pble_rsrc, palloc);
400
401 return -ENOMEM;
402 }
403
404 /**
405 * get_lvl1_pble - get level 1 pble resource
406 * @pble_rsrc: pble resource management
407 * @palloc: level 1 pble allocation
408 */
get_lvl1_pble(struct irdma_hmc_pble_rsrc * pble_rsrc,struct irdma_pble_alloc * palloc)409 static int get_lvl1_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
410 struct irdma_pble_alloc *palloc)
411 {
412 int ret_code;
413 u64 fpm_addr;
414 struct irdma_pble_info *lvl1 = &palloc->level1;
415
416 ret_code = irdma_prm_get_pbles(&pble_rsrc->pinfo, &lvl1->chunkinfo,
417 palloc->total_cnt << 3, &lvl1->addr,
418 &fpm_addr);
419 if (ret_code)
420 return -ENOMEM;
421
422 palloc->level = PBLE_LEVEL_1;
423 lvl1->idx = fpm_to_idx(pble_rsrc, fpm_addr);
424 lvl1->cnt = palloc->total_cnt;
425 pble_rsrc->stats_lvl1++;
426
427 return 0;
428 }
429
430 /**
431 * get_lvl1_lvl2_pble - calls get_lvl1 and get_lvl2 pble routine
432 * @pble_rsrc: pble resources
433 * @palloc: contains all inforamtion regarding pble (idx + pble addr)
434 * @lvl: Bitmask for requested pble level
435 */
get_lvl1_lvl2_pble(struct irdma_hmc_pble_rsrc * pble_rsrc,struct irdma_pble_alloc * palloc,u8 lvl)436 static int get_lvl1_lvl2_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
437 struct irdma_pble_alloc *palloc, u8 lvl)
438 {
439 int status = 0;
440
441 status = get_lvl1_pble(pble_rsrc, palloc);
442 if (!status || lvl == PBLE_LEVEL_1 || palloc->total_cnt <= PBLE_PER_PAGE)
443 return status;
444
445 status = get_lvl2_pble(pble_rsrc, palloc);
446
447 return status;
448 }
449
450 /**
451 * irdma_get_pble - allocate pbles from the prm
452 * @pble_rsrc: pble resources
453 * @palloc: contains all inforamtion regarding pble (idx + pble addr)
454 * @pble_cnt: #of pbles requested
455 * @lvl: requested pble level mask
456 */
irdma_get_pble(struct irdma_hmc_pble_rsrc * pble_rsrc,struct irdma_pble_alloc * palloc,u32 pble_cnt,u8 lvl)457 int irdma_get_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
458 struct irdma_pble_alloc *palloc, u32 pble_cnt,
459 u8 lvl)
460 {
461 int status = 0;
462 int max_sds = 0;
463 int i;
464
465 palloc->total_cnt = pble_cnt;
466 palloc->level = PBLE_LEVEL_0;
467
468 mutex_lock(&pble_rsrc->pble_mutex_lock);
469
470 /*check first to see if we can get pble's without acquiring
471 * additional sd's
472 */
473 status = get_lvl1_lvl2_pble(pble_rsrc, palloc, lvl);
474 if (!status)
475 goto exit;
476
477 max_sds = (palloc->total_cnt >> 18) + 1;
478 for (i = 0; i < max_sds; i++) {
479 status = add_pble_prm(pble_rsrc);
480 if (status)
481 break;
482
483 status = get_lvl1_lvl2_pble(pble_rsrc, palloc, lvl);
484 /* if level1_only, only go through it once */
485 if (!status || lvl)
486 break;
487 }
488
489 exit:
490 if (!status) {
491 pble_rsrc->allocdpbles += pble_cnt;
492 pble_rsrc->stats_alloc_ok++;
493 } else {
494 pble_rsrc->stats_alloc_fail++;
495 }
496 mutex_unlock(&pble_rsrc->pble_mutex_lock);
497
498 return status;
499 }
500
501 /**
502 * irdma_free_pble - put pbles back into prm
503 * @pble_rsrc: pble resources
504 * @palloc: contains all information regarding pble resource being freed
505 */
irdma_free_pble(struct irdma_hmc_pble_rsrc * pble_rsrc,struct irdma_pble_alloc * palloc)506 void irdma_free_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
507 struct irdma_pble_alloc *palloc)
508 {
509 pble_rsrc->freedpbles += palloc->total_cnt;
510
511 if (palloc->level == PBLE_LEVEL_2)
512 free_lvl2(pble_rsrc, palloc);
513 else
514 irdma_prm_return_pbles(&pble_rsrc->pinfo,
515 &palloc->level1.chunkinfo);
516 pble_rsrc->stats_alloc_freed++;
517 }
518