xref: /linux/drivers/net/ethernet/ibm/ehea/ehea_qmr.c (revision b8d312aa075f33282565467662c4628dae0a2aff)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  linux/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
4  *
5  *  eHEA ethernet device driver for IBM eServer System p
6  *
7  *  (C) Copyright IBM Corp. 2006
8  *
9  *  Authors:
10  *       Christoph Raisch <raisch@de.ibm.com>
11  *       Jan-Bernd Themann <themann@de.ibm.com>
12  *       Thomas Klein <tklein@de.ibm.com>
13  */
14 
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 
17 #include <linux/mm.h>
18 #include <linux/slab.h>
19 #include "ehea.h"
20 #include "ehea_phyp.h"
21 #include "ehea_qmr.h"
22 
23 static struct ehea_bmap *ehea_bmap;
24 
25 static void *hw_qpageit_get_inc(struct hw_queue *queue)
26 {
27 	void *retvalue = hw_qeit_get(queue);
28 
29 	queue->current_q_offset += queue->pagesize;
30 	if (queue->current_q_offset > queue->queue_length) {
31 		queue->current_q_offset -= queue->pagesize;
32 		retvalue = NULL;
33 	} else if (((u64) retvalue) & (EHEA_PAGESIZE-1)) {
34 		pr_err("not on pageboundary\n");
35 		retvalue = NULL;
36 	}
37 	return retvalue;
38 }
39 
40 static int hw_queue_ctor(struct hw_queue *queue, const u32 nr_of_pages,
41 			  const u32 pagesize, const u32 qe_size)
42 {
43 	int pages_per_kpage = PAGE_SIZE / pagesize;
44 	int i, k;
45 
46 	if ((pagesize > PAGE_SIZE) || (!pages_per_kpage)) {
47 		pr_err("pagesize conflict! kernel pagesize=%d, ehea pagesize=%d\n",
48 		       (int)PAGE_SIZE, (int)pagesize);
49 		return -EINVAL;
50 	}
51 
52 	queue->queue_length = nr_of_pages * pagesize;
53 	queue->queue_pages = kmalloc_array(nr_of_pages, sizeof(void *),
54 					   GFP_KERNEL);
55 	if (!queue->queue_pages)
56 		return -ENOMEM;
57 
58 	/*
59 	 * allocate pages for queue:
60 	 * outer loop allocates whole kernel pages (page aligned) and
61 	 * inner loop divides a kernel page into smaller hea queue pages
62 	 */
63 	i = 0;
64 	while (i < nr_of_pages) {
65 		u8 *kpage = (u8 *)get_zeroed_page(GFP_KERNEL);
66 		if (!kpage)
67 			goto out_nomem;
68 		for (k = 0; k < pages_per_kpage && i < nr_of_pages; k++) {
69 			(queue->queue_pages)[i] = (struct ehea_page *)kpage;
70 			kpage += pagesize;
71 			i++;
72 		}
73 	}
74 
75 	queue->current_q_offset = 0;
76 	queue->qe_size = qe_size;
77 	queue->pagesize = pagesize;
78 	queue->toggle_state = 1;
79 
80 	return 0;
81 out_nomem:
82 	for (i = 0; i < nr_of_pages; i += pages_per_kpage) {
83 		if (!(queue->queue_pages)[i])
84 			break;
85 		free_page((unsigned long)(queue->queue_pages)[i]);
86 	}
87 	return -ENOMEM;
88 }
89 
90 static void hw_queue_dtor(struct hw_queue *queue)
91 {
92 	int pages_per_kpage;
93 	int i, nr_pages;
94 
95 	if (!queue || !queue->queue_pages)
96 		return;
97 
98 	pages_per_kpage = PAGE_SIZE / queue->pagesize;
99 
100 	nr_pages = queue->queue_length / queue->pagesize;
101 
102 	for (i = 0; i < nr_pages; i += pages_per_kpage)
103 		free_page((unsigned long)(queue->queue_pages)[i]);
104 
105 	kfree(queue->queue_pages);
106 }
107 
108 struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter,
109 			       int nr_of_cqe, u64 eq_handle, u32 cq_token)
110 {
111 	struct ehea_cq *cq;
112 	u64 hret, rpage;
113 	u32 counter;
114 	int ret;
115 	void *vpage;
116 
117 	cq = kzalloc(sizeof(*cq), GFP_KERNEL);
118 	if (!cq)
119 		goto out_nomem;
120 
121 	cq->attr.max_nr_of_cqes = nr_of_cqe;
122 	cq->attr.cq_token = cq_token;
123 	cq->attr.eq_handle = eq_handle;
124 
125 	cq->adapter = adapter;
126 
127 	hret = ehea_h_alloc_resource_cq(adapter->handle, &cq->attr,
128 					&cq->fw_handle, &cq->epas);
129 	if (hret != H_SUCCESS) {
130 		pr_err("alloc_resource_cq failed\n");
131 		goto out_freemem;
132 	}
133 
134 	ret = hw_queue_ctor(&cq->hw_queue, cq->attr.nr_pages,
135 			    EHEA_PAGESIZE, sizeof(struct ehea_cqe));
136 	if (ret)
137 		goto out_freeres;
138 
139 	for (counter = 0; counter < cq->attr.nr_pages; counter++) {
140 		vpage = hw_qpageit_get_inc(&cq->hw_queue);
141 		if (!vpage) {
142 			pr_err("hw_qpageit_get_inc failed\n");
143 			goto out_kill_hwq;
144 		}
145 
146 		rpage = __pa(vpage);
147 		hret = ehea_h_register_rpage(adapter->handle,
148 					     0, EHEA_CQ_REGISTER_ORIG,
149 					     cq->fw_handle, rpage, 1);
150 		if (hret < H_SUCCESS) {
151 			pr_err("register_rpage_cq failed ehea_cq=%p hret=%llx counter=%i act_pages=%i\n",
152 			       cq, hret, counter, cq->attr.nr_pages);
153 			goto out_kill_hwq;
154 		}
155 
156 		if (counter == (cq->attr.nr_pages - 1)) {
157 			vpage = hw_qpageit_get_inc(&cq->hw_queue);
158 
159 			if ((hret != H_SUCCESS) || (vpage)) {
160 				pr_err("registration of pages not complete hret=%llx\n",
161 				       hret);
162 				goto out_kill_hwq;
163 			}
164 		} else {
165 			if (hret != H_PAGE_REGISTERED) {
166 				pr_err("CQ: registration of page failed hret=%llx\n",
167 				       hret);
168 				goto out_kill_hwq;
169 			}
170 		}
171 	}
172 
173 	hw_qeit_reset(&cq->hw_queue);
174 	ehea_reset_cq_ep(cq);
175 	ehea_reset_cq_n1(cq);
176 
177 	return cq;
178 
179 out_kill_hwq:
180 	hw_queue_dtor(&cq->hw_queue);
181 
182 out_freeres:
183 	ehea_h_free_resource(adapter->handle, cq->fw_handle, FORCE_FREE);
184 
185 out_freemem:
186 	kfree(cq);
187 
188 out_nomem:
189 	return NULL;
190 }
191 
192 static u64 ehea_destroy_cq_res(struct ehea_cq *cq, u64 force)
193 {
194 	u64 hret;
195 	u64 adapter_handle = cq->adapter->handle;
196 
197 	/* deregister all previous registered pages */
198 	hret = ehea_h_free_resource(adapter_handle, cq->fw_handle, force);
199 	if (hret != H_SUCCESS)
200 		return hret;
201 
202 	hw_queue_dtor(&cq->hw_queue);
203 	kfree(cq);
204 
205 	return hret;
206 }
207 
208 int ehea_destroy_cq(struct ehea_cq *cq)
209 {
210 	u64 hret, aer, aerr;
211 	if (!cq)
212 		return 0;
213 
214 	hcp_epas_dtor(&cq->epas);
215 	hret = ehea_destroy_cq_res(cq, NORMAL_FREE);
216 	if (hret == H_R_STATE) {
217 		ehea_error_data(cq->adapter, cq->fw_handle, &aer, &aerr);
218 		hret = ehea_destroy_cq_res(cq, FORCE_FREE);
219 	}
220 
221 	if (hret != H_SUCCESS) {
222 		pr_err("destroy CQ failed\n");
223 		return -EIO;
224 	}
225 
226 	return 0;
227 }
228 
229 struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter,
230 			       const enum ehea_eq_type type,
231 			       const u32 max_nr_of_eqes, const u8 eqe_gen)
232 {
233 	int ret, i;
234 	u64 hret, rpage;
235 	void *vpage;
236 	struct ehea_eq *eq;
237 
238 	eq = kzalloc(sizeof(*eq), GFP_KERNEL);
239 	if (!eq)
240 		return NULL;
241 
242 	eq->adapter = adapter;
243 	eq->attr.type = type;
244 	eq->attr.max_nr_of_eqes = max_nr_of_eqes;
245 	eq->attr.eqe_gen = eqe_gen;
246 	spin_lock_init(&eq->spinlock);
247 
248 	hret = ehea_h_alloc_resource_eq(adapter->handle,
249 					&eq->attr, &eq->fw_handle);
250 	if (hret != H_SUCCESS) {
251 		pr_err("alloc_resource_eq failed\n");
252 		goto out_freemem;
253 	}
254 
255 	ret = hw_queue_ctor(&eq->hw_queue, eq->attr.nr_pages,
256 			    EHEA_PAGESIZE, sizeof(struct ehea_eqe));
257 	if (ret) {
258 		pr_err("can't allocate eq pages\n");
259 		goto out_freeres;
260 	}
261 
262 	for (i = 0; i < eq->attr.nr_pages; i++) {
263 		vpage = hw_qpageit_get_inc(&eq->hw_queue);
264 		if (!vpage) {
265 			pr_err("hw_qpageit_get_inc failed\n");
266 			hret = H_RESOURCE;
267 			goto out_kill_hwq;
268 		}
269 
270 		rpage = __pa(vpage);
271 
272 		hret = ehea_h_register_rpage(adapter->handle, 0,
273 					     EHEA_EQ_REGISTER_ORIG,
274 					     eq->fw_handle, rpage, 1);
275 
276 		if (i == (eq->attr.nr_pages - 1)) {
277 			/* last page */
278 			vpage = hw_qpageit_get_inc(&eq->hw_queue);
279 			if ((hret != H_SUCCESS) || (vpage))
280 				goto out_kill_hwq;
281 
282 		} else {
283 			if (hret != H_PAGE_REGISTERED)
284 				goto out_kill_hwq;
285 
286 		}
287 	}
288 
289 	hw_qeit_reset(&eq->hw_queue);
290 	return eq;
291 
292 out_kill_hwq:
293 	hw_queue_dtor(&eq->hw_queue);
294 
295 out_freeres:
296 	ehea_h_free_resource(adapter->handle, eq->fw_handle, FORCE_FREE);
297 
298 out_freemem:
299 	kfree(eq);
300 	return NULL;
301 }
302 
303 struct ehea_eqe *ehea_poll_eq(struct ehea_eq *eq)
304 {
305 	struct ehea_eqe *eqe;
306 	unsigned long flags;
307 
308 	spin_lock_irqsave(&eq->spinlock, flags);
309 	eqe = hw_eqit_eq_get_inc_valid(&eq->hw_queue);
310 	spin_unlock_irqrestore(&eq->spinlock, flags);
311 
312 	return eqe;
313 }
314 
315 static u64 ehea_destroy_eq_res(struct ehea_eq *eq, u64 force)
316 {
317 	u64 hret;
318 	unsigned long flags;
319 
320 	spin_lock_irqsave(&eq->spinlock, flags);
321 
322 	hret = ehea_h_free_resource(eq->adapter->handle, eq->fw_handle, force);
323 	spin_unlock_irqrestore(&eq->spinlock, flags);
324 
325 	if (hret != H_SUCCESS)
326 		return hret;
327 
328 	hw_queue_dtor(&eq->hw_queue);
329 	kfree(eq);
330 
331 	return hret;
332 }
333 
334 int ehea_destroy_eq(struct ehea_eq *eq)
335 {
336 	u64 hret, aer, aerr;
337 	if (!eq)
338 		return 0;
339 
340 	hcp_epas_dtor(&eq->epas);
341 
342 	hret = ehea_destroy_eq_res(eq, NORMAL_FREE);
343 	if (hret == H_R_STATE) {
344 		ehea_error_data(eq->adapter, eq->fw_handle, &aer, &aerr);
345 		hret = ehea_destroy_eq_res(eq, FORCE_FREE);
346 	}
347 
348 	if (hret != H_SUCCESS) {
349 		pr_err("destroy EQ failed\n");
350 		return -EIO;
351 	}
352 
353 	return 0;
354 }
355 
356 /* allocates memory for a queue and registers pages in phyp */
357 static int ehea_qp_alloc_register(struct ehea_qp *qp, struct hw_queue *hw_queue,
358 			   int nr_pages, int wqe_size, int act_nr_sges,
359 			   struct ehea_adapter *adapter, int h_call_q_selector)
360 {
361 	u64 hret, rpage;
362 	int ret, cnt;
363 	void *vpage;
364 
365 	ret = hw_queue_ctor(hw_queue, nr_pages, EHEA_PAGESIZE, wqe_size);
366 	if (ret)
367 		return ret;
368 
369 	for (cnt = 0; cnt < nr_pages; cnt++) {
370 		vpage = hw_qpageit_get_inc(hw_queue);
371 		if (!vpage) {
372 			pr_err("hw_qpageit_get_inc failed\n");
373 			goto out_kill_hwq;
374 		}
375 		rpage = __pa(vpage);
376 		hret = ehea_h_register_rpage(adapter->handle,
377 					     0, h_call_q_selector,
378 					     qp->fw_handle, rpage, 1);
379 		if (hret < H_SUCCESS) {
380 			pr_err("register_rpage_qp failed\n");
381 			goto out_kill_hwq;
382 		}
383 	}
384 	hw_qeit_reset(hw_queue);
385 	return 0;
386 
387 out_kill_hwq:
388 	hw_queue_dtor(hw_queue);
389 	return -EIO;
390 }
391 
392 static inline u32 map_wqe_size(u8 wqe_enc_size)
393 {
394 	return 128 << wqe_enc_size;
395 }
396 
397 struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter,
398 			       u32 pd, struct ehea_qp_init_attr *init_attr)
399 {
400 	int ret;
401 	u64 hret;
402 	struct ehea_qp *qp;
403 	u32 wqe_size_in_bytes_sq, wqe_size_in_bytes_rq1;
404 	u32 wqe_size_in_bytes_rq2, wqe_size_in_bytes_rq3;
405 
406 
407 	qp = kzalloc(sizeof(*qp), GFP_KERNEL);
408 	if (!qp)
409 		return NULL;
410 
411 	qp->adapter = adapter;
412 
413 	hret = ehea_h_alloc_resource_qp(adapter->handle, init_attr, pd,
414 					&qp->fw_handle, &qp->epas);
415 	if (hret != H_SUCCESS) {
416 		pr_err("ehea_h_alloc_resource_qp failed\n");
417 		goto out_freemem;
418 	}
419 
420 	wqe_size_in_bytes_sq = map_wqe_size(init_attr->act_wqe_size_enc_sq);
421 	wqe_size_in_bytes_rq1 = map_wqe_size(init_attr->act_wqe_size_enc_rq1);
422 	wqe_size_in_bytes_rq2 = map_wqe_size(init_attr->act_wqe_size_enc_rq2);
423 	wqe_size_in_bytes_rq3 = map_wqe_size(init_attr->act_wqe_size_enc_rq3);
424 
425 	ret = ehea_qp_alloc_register(qp, &qp->hw_squeue, init_attr->nr_sq_pages,
426 				     wqe_size_in_bytes_sq,
427 				     init_attr->act_wqe_size_enc_sq, adapter,
428 				     0);
429 	if (ret) {
430 		pr_err("can't register for sq ret=%x\n", ret);
431 		goto out_freeres;
432 	}
433 
434 	ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue1,
435 				     init_attr->nr_rq1_pages,
436 				     wqe_size_in_bytes_rq1,
437 				     init_attr->act_wqe_size_enc_rq1,
438 				     adapter, 1);
439 	if (ret) {
440 		pr_err("can't register for rq1 ret=%x\n", ret);
441 		goto out_kill_hwsq;
442 	}
443 
444 	if (init_attr->rq_count > 1) {
445 		ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue2,
446 					     init_attr->nr_rq2_pages,
447 					     wqe_size_in_bytes_rq2,
448 					     init_attr->act_wqe_size_enc_rq2,
449 					     adapter, 2);
450 		if (ret) {
451 			pr_err("can't register for rq2 ret=%x\n", ret);
452 			goto out_kill_hwr1q;
453 		}
454 	}
455 
456 	if (init_attr->rq_count > 2) {
457 		ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue3,
458 					     init_attr->nr_rq3_pages,
459 					     wqe_size_in_bytes_rq3,
460 					     init_attr->act_wqe_size_enc_rq3,
461 					     adapter, 3);
462 		if (ret) {
463 			pr_err("can't register for rq3 ret=%x\n", ret);
464 			goto out_kill_hwr2q;
465 		}
466 	}
467 
468 	qp->init_attr = *init_attr;
469 
470 	return qp;
471 
472 out_kill_hwr2q:
473 	hw_queue_dtor(&qp->hw_rqueue2);
474 
475 out_kill_hwr1q:
476 	hw_queue_dtor(&qp->hw_rqueue1);
477 
478 out_kill_hwsq:
479 	hw_queue_dtor(&qp->hw_squeue);
480 
481 out_freeres:
482 	ehea_h_disable_and_get_hea(adapter->handle, qp->fw_handle);
483 	ehea_h_free_resource(adapter->handle, qp->fw_handle, FORCE_FREE);
484 
485 out_freemem:
486 	kfree(qp);
487 	return NULL;
488 }
489 
490 static u64 ehea_destroy_qp_res(struct ehea_qp *qp, u64 force)
491 {
492 	u64 hret;
493 	struct ehea_qp_init_attr *qp_attr = &qp->init_attr;
494 
495 
496 	ehea_h_disable_and_get_hea(qp->adapter->handle, qp->fw_handle);
497 	hret = ehea_h_free_resource(qp->adapter->handle, qp->fw_handle, force);
498 	if (hret != H_SUCCESS)
499 		return hret;
500 
501 	hw_queue_dtor(&qp->hw_squeue);
502 	hw_queue_dtor(&qp->hw_rqueue1);
503 
504 	if (qp_attr->rq_count > 1)
505 		hw_queue_dtor(&qp->hw_rqueue2);
506 	if (qp_attr->rq_count > 2)
507 		hw_queue_dtor(&qp->hw_rqueue3);
508 	kfree(qp);
509 
510 	return hret;
511 }
512 
513 int ehea_destroy_qp(struct ehea_qp *qp)
514 {
515 	u64 hret, aer, aerr;
516 	if (!qp)
517 		return 0;
518 
519 	hcp_epas_dtor(&qp->epas);
520 
521 	hret = ehea_destroy_qp_res(qp, NORMAL_FREE);
522 	if (hret == H_R_STATE) {
523 		ehea_error_data(qp->adapter, qp->fw_handle, &aer, &aerr);
524 		hret = ehea_destroy_qp_res(qp, FORCE_FREE);
525 	}
526 
527 	if (hret != H_SUCCESS) {
528 		pr_err("destroy QP failed\n");
529 		return -EIO;
530 	}
531 
532 	return 0;
533 }
534 
535 static inline int ehea_calc_index(unsigned long i, unsigned long s)
536 {
537 	return (i >> s) & EHEA_INDEX_MASK;
538 }
539 
540 static inline int ehea_init_top_bmap(struct ehea_top_bmap *ehea_top_bmap,
541 				     int dir)
542 {
543 	if (!ehea_top_bmap->dir[dir]) {
544 		ehea_top_bmap->dir[dir] =
545 			kzalloc(sizeof(struct ehea_dir_bmap), GFP_KERNEL);
546 		if (!ehea_top_bmap->dir[dir])
547 			return -ENOMEM;
548 	}
549 	return 0;
550 }
551 
552 static inline int ehea_init_bmap(struct ehea_bmap *ehea_bmap, int top, int dir)
553 {
554 	if (!ehea_bmap->top[top]) {
555 		ehea_bmap->top[top] =
556 			kzalloc(sizeof(struct ehea_top_bmap), GFP_KERNEL);
557 		if (!ehea_bmap->top[top])
558 			return -ENOMEM;
559 	}
560 	return ehea_init_top_bmap(ehea_bmap->top[top], dir);
561 }
562 
563 static DEFINE_MUTEX(ehea_busmap_mutex);
564 static unsigned long ehea_mr_len;
565 
566 #define EHEA_BUSMAP_ADD_SECT 1
567 #define EHEA_BUSMAP_REM_SECT 0
568 
569 static void ehea_rebuild_busmap(void)
570 {
571 	u64 vaddr = EHEA_BUSMAP_START;
572 	int top, dir, idx;
573 
574 	for (top = 0; top < EHEA_MAP_ENTRIES; top++) {
575 		struct ehea_top_bmap *ehea_top;
576 		int valid_dir_entries = 0;
577 
578 		if (!ehea_bmap->top[top])
579 			continue;
580 		ehea_top = ehea_bmap->top[top];
581 		for (dir = 0; dir < EHEA_MAP_ENTRIES; dir++) {
582 			struct ehea_dir_bmap *ehea_dir;
583 			int valid_entries = 0;
584 
585 			if (!ehea_top->dir[dir])
586 				continue;
587 			valid_dir_entries++;
588 			ehea_dir = ehea_top->dir[dir];
589 			for (idx = 0; idx < EHEA_MAP_ENTRIES; idx++) {
590 				if (!ehea_dir->ent[idx])
591 					continue;
592 				valid_entries++;
593 				ehea_dir->ent[idx] = vaddr;
594 				vaddr += EHEA_SECTSIZE;
595 			}
596 			if (!valid_entries) {
597 				ehea_top->dir[dir] = NULL;
598 				kfree(ehea_dir);
599 			}
600 		}
601 		if (!valid_dir_entries) {
602 			ehea_bmap->top[top] = NULL;
603 			kfree(ehea_top);
604 		}
605 	}
606 }
607 
608 static int ehea_update_busmap(unsigned long pfn, unsigned long nr_pages, int add)
609 {
610 	unsigned long i, start_section, end_section;
611 
612 	if (!nr_pages)
613 		return 0;
614 
615 	if (!ehea_bmap) {
616 		ehea_bmap = kzalloc(sizeof(struct ehea_bmap), GFP_KERNEL);
617 		if (!ehea_bmap)
618 			return -ENOMEM;
619 	}
620 
621 	start_section = (pfn * PAGE_SIZE) / EHEA_SECTSIZE;
622 	end_section = start_section + ((nr_pages * PAGE_SIZE) / EHEA_SECTSIZE);
623 	/* Mark entries as valid or invalid only; address is assigned later */
624 	for (i = start_section; i < end_section; i++) {
625 		u64 flag;
626 		int top = ehea_calc_index(i, EHEA_TOP_INDEX_SHIFT);
627 		int dir = ehea_calc_index(i, EHEA_DIR_INDEX_SHIFT);
628 		int idx = i & EHEA_INDEX_MASK;
629 
630 		if (add) {
631 			int ret = ehea_init_bmap(ehea_bmap, top, dir);
632 			if (ret)
633 				return ret;
634 			flag = 1; /* valid */
635 			ehea_mr_len += EHEA_SECTSIZE;
636 		} else {
637 			if (!ehea_bmap->top[top])
638 				continue;
639 			if (!ehea_bmap->top[top]->dir[dir])
640 				continue;
641 			flag = 0; /* invalid */
642 			ehea_mr_len -= EHEA_SECTSIZE;
643 		}
644 
645 		ehea_bmap->top[top]->dir[dir]->ent[idx] = flag;
646 	}
647 	ehea_rebuild_busmap(); /* Assign contiguous addresses for mr */
648 	return 0;
649 }
650 
651 int ehea_add_sect_bmap(unsigned long pfn, unsigned long nr_pages)
652 {
653 	int ret;
654 
655 	mutex_lock(&ehea_busmap_mutex);
656 	ret = ehea_update_busmap(pfn, nr_pages, EHEA_BUSMAP_ADD_SECT);
657 	mutex_unlock(&ehea_busmap_mutex);
658 	return ret;
659 }
660 
661 int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages)
662 {
663 	int ret;
664 
665 	mutex_lock(&ehea_busmap_mutex);
666 	ret = ehea_update_busmap(pfn, nr_pages, EHEA_BUSMAP_REM_SECT);
667 	mutex_unlock(&ehea_busmap_mutex);
668 	return ret;
669 }
670 
671 static int ehea_is_hugepage(unsigned long pfn)
672 {
673 	int page_order;
674 
675 	if (pfn & EHEA_HUGEPAGE_PFN_MASK)
676 		return 0;
677 
678 	page_order = compound_order(pfn_to_page(pfn));
679 	if (page_order + PAGE_SHIFT != EHEA_HUGEPAGESHIFT)
680 		return 0;
681 
682 	return 1;
683 }
684 
685 static int ehea_create_busmap_callback(unsigned long initial_pfn,
686 				       unsigned long total_nr_pages, void *arg)
687 {
688 	int ret;
689 	unsigned long pfn, start_pfn, end_pfn, nr_pages;
690 
691 	if ((total_nr_pages * PAGE_SIZE) < EHEA_HUGEPAGE_SIZE)
692 		return ehea_update_busmap(initial_pfn, total_nr_pages,
693 					  EHEA_BUSMAP_ADD_SECT);
694 
695 	/* Given chunk is >= 16GB -> check for hugepages */
696 	start_pfn = initial_pfn;
697 	end_pfn = initial_pfn + total_nr_pages;
698 	pfn = start_pfn;
699 
700 	while (pfn < end_pfn) {
701 		if (ehea_is_hugepage(pfn)) {
702 			/* Add mem found in front of the hugepage */
703 			nr_pages = pfn - start_pfn;
704 			ret = ehea_update_busmap(start_pfn, nr_pages,
705 						 EHEA_BUSMAP_ADD_SECT);
706 			if (ret)
707 				return ret;
708 
709 			/* Skip the hugepage */
710 			pfn += (EHEA_HUGEPAGE_SIZE / PAGE_SIZE);
711 			start_pfn = pfn;
712 		} else
713 			pfn += (EHEA_SECTSIZE / PAGE_SIZE);
714 	}
715 
716 	/* Add mem found behind the hugepage(s)  */
717 	nr_pages = pfn - start_pfn;
718 	return ehea_update_busmap(start_pfn, nr_pages, EHEA_BUSMAP_ADD_SECT);
719 }
720 
721 int ehea_create_busmap(void)
722 {
723 	int ret;
724 
725 	mutex_lock(&ehea_busmap_mutex);
726 	ehea_mr_len = 0;
727 	ret = walk_system_ram_range(0, 1ULL << MAX_PHYSMEM_BITS, NULL,
728 				   ehea_create_busmap_callback);
729 	mutex_unlock(&ehea_busmap_mutex);
730 	return ret;
731 }
732 
733 void ehea_destroy_busmap(void)
734 {
735 	int top, dir;
736 	mutex_lock(&ehea_busmap_mutex);
737 	if (!ehea_bmap)
738 		goto out_destroy;
739 
740 	for (top = 0; top < EHEA_MAP_ENTRIES; top++) {
741 		if (!ehea_bmap->top[top])
742 			continue;
743 
744 		for (dir = 0; dir < EHEA_MAP_ENTRIES; dir++) {
745 			if (!ehea_bmap->top[top]->dir[dir])
746 				continue;
747 
748 			kfree(ehea_bmap->top[top]->dir[dir]);
749 		}
750 
751 		kfree(ehea_bmap->top[top]);
752 	}
753 
754 	kfree(ehea_bmap);
755 	ehea_bmap = NULL;
756 out_destroy:
757 	mutex_unlock(&ehea_busmap_mutex);
758 }
759 
760 u64 ehea_map_vaddr(void *caddr)
761 {
762 	int top, dir, idx;
763 	unsigned long index, offset;
764 
765 	if (!ehea_bmap)
766 		return EHEA_INVAL_ADDR;
767 
768 	index = __pa(caddr) >> SECTION_SIZE_BITS;
769 	top = (index >> EHEA_TOP_INDEX_SHIFT) & EHEA_INDEX_MASK;
770 	if (!ehea_bmap->top[top])
771 		return EHEA_INVAL_ADDR;
772 
773 	dir = (index >> EHEA_DIR_INDEX_SHIFT) & EHEA_INDEX_MASK;
774 	if (!ehea_bmap->top[top]->dir[dir])
775 		return EHEA_INVAL_ADDR;
776 
777 	idx = index & EHEA_INDEX_MASK;
778 	if (!ehea_bmap->top[top]->dir[dir]->ent[idx])
779 		return EHEA_INVAL_ADDR;
780 
781 	offset = (unsigned long)caddr & (EHEA_SECTSIZE - 1);
782 	return ehea_bmap->top[top]->dir[dir]->ent[idx] | offset;
783 }
784 
785 static inline void *ehea_calc_sectbase(int top, int dir, int idx)
786 {
787 	unsigned long ret = idx;
788 	ret |= dir << EHEA_DIR_INDEX_SHIFT;
789 	ret |= top << EHEA_TOP_INDEX_SHIFT;
790 	return __va(ret << SECTION_SIZE_BITS);
791 }
792 
793 static u64 ehea_reg_mr_section(int top, int dir, int idx, u64 *pt,
794 			       struct ehea_adapter *adapter,
795 			       struct ehea_mr *mr)
796 {
797 	void *pg;
798 	u64 j, m, hret;
799 	unsigned long k = 0;
800 	u64 pt_abs = __pa(pt);
801 
802 	void *sectbase = ehea_calc_sectbase(top, dir, idx);
803 
804 	for (j = 0; j < (EHEA_PAGES_PER_SECTION / EHEA_MAX_RPAGE); j++) {
805 
806 		for (m = 0; m < EHEA_MAX_RPAGE; m++) {
807 			pg = sectbase + ((k++) * EHEA_PAGESIZE);
808 			pt[m] = __pa(pg);
809 		}
810 		hret = ehea_h_register_rpage_mr(adapter->handle, mr->handle, 0,
811 						0, pt_abs, EHEA_MAX_RPAGE);
812 
813 		if ((hret != H_SUCCESS) &&
814 		    (hret != H_PAGE_REGISTERED)) {
815 			ehea_h_free_resource(adapter->handle, mr->handle,
816 					     FORCE_FREE);
817 			pr_err("register_rpage_mr failed\n");
818 			return hret;
819 		}
820 	}
821 	return hret;
822 }
823 
824 static u64 ehea_reg_mr_sections(int top, int dir, u64 *pt,
825 				struct ehea_adapter *adapter,
826 				struct ehea_mr *mr)
827 {
828 	u64 hret = H_SUCCESS;
829 	int idx;
830 
831 	for (idx = 0; idx < EHEA_MAP_ENTRIES; idx++) {
832 		if (!ehea_bmap->top[top]->dir[dir]->ent[idx])
833 			continue;
834 
835 		hret = ehea_reg_mr_section(top, dir, idx, pt, adapter, mr);
836 		if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
837 			return hret;
838 	}
839 	return hret;
840 }
841 
842 static u64 ehea_reg_mr_dir_sections(int top, u64 *pt,
843 				    struct ehea_adapter *adapter,
844 				    struct ehea_mr *mr)
845 {
846 	u64 hret = H_SUCCESS;
847 	int dir;
848 
849 	for (dir = 0; dir < EHEA_MAP_ENTRIES; dir++) {
850 		if (!ehea_bmap->top[top]->dir[dir])
851 			continue;
852 
853 		hret = ehea_reg_mr_sections(top, dir, pt, adapter, mr);
854 		if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
855 			return hret;
856 	}
857 	return hret;
858 }
859 
860 int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr)
861 {
862 	int ret;
863 	u64 *pt;
864 	u64 hret;
865 	u32 acc_ctrl = EHEA_MR_ACC_CTRL;
866 
867 	unsigned long top;
868 
869 	pt = (void *)get_zeroed_page(GFP_KERNEL);
870 	if (!pt) {
871 		pr_err("no mem\n");
872 		ret = -ENOMEM;
873 		goto out;
874 	}
875 
876 	hret = ehea_h_alloc_resource_mr(adapter->handle, EHEA_BUSMAP_START,
877 					ehea_mr_len, acc_ctrl, adapter->pd,
878 					&mr->handle, &mr->lkey);
879 
880 	if (hret != H_SUCCESS) {
881 		pr_err("alloc_resource_mr failed\n");
882 		ret = -EIO;
883 		goto out;
884 	}
885 
886 	if (!ehea_bmap) {
887 		ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE);
888 		pr_err("no busmap available\n");
889 		ret = -EIO;
890 		goto out;
891 	}
892 
893 	for (top = 0; top < EHEA_MAP_ENTRIES; top++) {
894 		if (!ehea_bmap->top[top])
895 			continue;
896 
897 		hret = ehea_reg_mr_dir_sections(top, pt, adapter, mr);
898 		if((hret != H_PAGE_REGISTERED) && (hret != H_SUCCESS))
899 			break;
900 	}
901 
902 	if (hret != H_SUCCESS) {
903 		ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE);
904 		pr_err("registering mr failed\n");
905 		ret = -EIO;
906 		goto out;
907 	}
908 
909 	mr->vaddr = EHEA_BUSMAP_START;
910 	mr->adapter = adapter;
911 	ret = 0;
912 out:
913 	free_page((unsigned long)pt);
914 	return ret;
915 }
916 
917 int ehea_rem_mr(struct ehea_mr *mr)
918 {
919 	u64 hret;
920 
921 	if (!mr || !mr->adapter)
922 		return -EINVAL;
923 
924 	hret = ehea_h_free_resource(mr->adapter->handle, mr->handle,
925 				    FORCE_FREE);
926 	if (hret != H_SUCCESS) {
927 		pr_err("destroy MR failed\n");
928 		return -EIO;
929 	}
930 
931 	return 0;
932 }
933 
934 int ehea_gen_smr(struct ehea_adapter *adapter, struct ehea_mr *old_mr,
935 		 struct ehea_mr *shared_mr)
936 {
937 	u64 hret;
938 
939 	hret = ehea_h_register_smr(adapter->handle, old_mr->handle,
940 				   old_mr->vaddr, EHEA_MR_ACC_CTRL,
941 				   adapter->pd, shared_mr);
942 	if (hret != H_SUCCESS)
943 		return -EIO;
944 
945 	shared_mr->adapter = adapter;
946 
947 	return 0;
948 }
949 
950 static void print_error_data(u64 *data)
951 {
952 	int length;
953 	u64 type = EHEA_BMASK_GET(ERROR_DATA_TYPE, data[2]);
954 	u64 resource = data[1];
955 
956 	length = EHEA_BMASK_GET(ERROR_DATA_LENGTH, data[0]);
957 
958 	if (length > EHEA_PAGESIZE)
959 		length = EHEA_PAGESIZE;
960 
961 	if (type == EHEA_AER_RESTYPE_QP)
962 		pr_err("QP (resource=%llX) state: AER=0x%llX, AERR=0x%llX, port=%llX\n",
963 		       resource, data[6], data[12], data[22]);
964 	else if (type == EHEA_AER_RESTYPE_CQ)
965 		pr_err("CQ (resource=%llX) state: AER=0x%llX\n",
966 		       resource, data[6]);
967 	else if (type == EHEA_AER_RESTYPE_EQ)
968 		pr_err("EQ (resource=%llX) state: AER=0x%llX\n",
969 		       resource, data[6]);
970 
971 	ehea_dump(data, length, "error data");
972 }
973 
974 u64 ehea_error_data(struct ehea_adapter *adapter, u64 res_handle,
975 		    u64 *aer, u64 *aerr)
976 {
977 	unsigned long ret;
978 	u64 *rblock;
979 	u64 type = 0;
980 
981 	rblock = (void *)get_zeroed_page(GFP_KERNEL);
982 	if (!rblock) {
983 		pr_err("Cannot allocate rblock memory\n");
984 		goto out;
985 	}
986 
987 	ret = ehea_h_error_data(adapter->handle, res_handle, rblock);
988 
989 	if (ret == H_SUCCESS) {
990 		type = EHEA_BMASK_GET(ERROR_DATA_TYPE, rblock[2]);
991 		*aer = rblock[6];
992 		*aerr = rblock[12];
993 		print_error_data(rblock);
994 	} else if (ret == H_R_STATE) {
995 		pr_err("No error data available: %llX\n", res_handle);
996 	} else
997 		pr_err("Error data could not be fetched: %llX\n", res_handle);
998 
999 	free_page((unsigned long)rblock);
1000 out:
1001 	return type;
1002 }
1003