xref: /linux/drivers/misc/cxl/guest.c (revision 14baf4d9c739e6e69150512d2eb23c71fffcc192)
1 /*
2  * Copyright 2015 IBM Corp.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version
7  * 2 of the License, or (at your option) any later version.
8  */
9 
10 #include <linux/spinlock.h>
11 #include <linux/uaccess.h>
12 #include <linux/delay.h>
13 
14 #include "cxl.h"
15 #include "hcalls.h"
16 #include "trace.h"
17 
18 
19 static irqreturn_t guest_handle_psl_slice_error(struct cxl_context *ctx, u64 dsisr,
20 					u64 errstat)
21 {
22 	pr_devel("in %s\n", __func__);
23 	dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%.16llx\n", errstat);
24 
25 	return cxl_ops->ack_irq(ctx, 0, errstat);
26 }
27 
28 static ssize_t guest_collect_vpd(struct cxl *adapter, struct cxl_afu *afu,
29 			void *buf, size_t len)
30 {
31 	unsigned int entries, mod;
32 	unsigned long **vpd_buf = NULL;
33 	struct sg_list *le;
34 	int rc = 0, i, tocopy;
35 	u64 out = 0;
36 
37 	if (buf == NULL)
38 		return -EINVAL;
39 
40 	/* number of entries in the list */
41 	entries = len / SG_BUFFER_SIZE;
42 	mod = len % SG_BUFFER_SIZE;
43 	if (mod)
44 		entries++;
45 
46 	if (entries > SG_MAX_ENTRIES) {
47 		entries = SG_MAX_ENTRIES;
48 		len = SG_MAX_ENTRIES * SG_BUFFER_SIZE;
49 		mod = 0;
50 	}
51 
52 	vpd_buf = kzalloc(entries * sizeof(unsigned long *), GFP_KERNEL);
53 	if (!vpd_buf)
54 		return -ENOMEM;
55 
56 	le = (struct sg_list *)get_zeroed_page(GFP_KERNEL);
57 	if (!le) {
58 		rc = -ENOMEM;
59 		goto err1;
60 	}
61 
62 	for (i = 0; i < entries; i++) {
63 		vpd_buf[i] = (unsigned long *)get_zeroed_page(GFP_KERNEL);
64 		if (!vpd_buf[i]) {
65 			rc = -ENOMEM;
66 			goto err2;
67 		}
68 		le[i].phys_addr = cpu_to_be64(virt_to_phys(vpd_buf[i]));
69 		le[i].len = cpu_to_be64(SG_BUFFER_SIZE);
70 		if ((i == (entries - 1)) && mod)
71 			le[i].len = cpu_to_be64(mod);
72 	}
73 
74 	if (adapter)
75 		rc = cxl_h_collect_vpd_adapter(adapter->guest->handle,
76 					virt_to_phys(le), entries, &out);
77 	else
78 		rc = cxl_h_collect_vpd(afu->guest->handle, 0,
79 				virt_to_phys(le), entries, &out);
80 	pr_devel("length of available (entries: %i), vpd: %#llx\n",
81 		entries, out);
82 
83 	if (!rc) {
84 		/*
85 		 * hcall returns in 'out' the size of available VPDs.
86 		 * It fills the buffer with as much data as possible.
87 		 */
88 		if (out < len)
89 			len = out;
90 		rc = len;
91 		if (out) {
92 			for (i = 0; i < entries; i++) {
93 				if (len < SG_BUFFER_SIZE)
94 					tocopy = len;
95 				else
96 					tocopy = SG_BUFFER_SIZE;
97 				memcpy(buf, vpd_buf[i], tocopy);
98 				buf += tocopy;
99 				len -= tocopy;
100 			}
101 		}
102 	}
103 err2:
104 	for (i = 0; i < entries; i++) {
105 		if (vpd_buf[i])
106 			free_page((unsigned long) vpd_buf[i]);
107 	}
108 	free_page((unsigned long) le);
109 err1:
110 	kfree(vpd_buf);
111 	return rc;
112 }
113 
114 static int guest_get_irq_info(struct cxl_context *ctx, struct cxl_irq_info *info)
115 {
116 	return cxl_h_collect_int_info(ctx->afu->guest->handle, ctx->process_token, info);
117 }
118 
119 static irqreturn_t guest_psl_irq(int irq, void *data)
120 {
121 	struct cxl_context *ctx = data;
122 	struct cxl_irq_info irq_info;
123 	int rc;
124 
125 	pr_devel("%d: received PSL interrupt %i\n", ctx->pe, irq);
126 	rc = guest_get_irq_info(ctx, &irq_info);
127 	if (rc) {
128 		WARN(1, "Unable to get IRQ info: %i\n", rc);
129 		return IRQ_HANDLED;
130 	}
131 
132 	rc = cxl_irq(irq, ctx, &irq_info);
133 	return rc;
134 }
135 
136 static irqreturn_t guest_slice_irq_err(int irq, void *data)
137 {
138 	struct cxl_afu *afu = data;
139 	int rc;
140 	u64 serr;
141 
142 	WARN(irq, "CXL SLICE ERROR interrupt %i\n", irq);
143 	rc = cxl_h_get_fn_error_interrupt(afu->guest->handle, &serr);
144 	if (rc) {
145 		dev_crit(&afu->dev, "Couldn't read PSL_SERR_An: %d\n", rc);
146 		return IRQ_HANDLED;
147 	}
148 	dev_crit(&afu->dev, "PSL_SERR_An: 0x%.16llx\n", serr);
149 
150 	rc = cxl_h_ack_fn_error_interrupt(afu->guest->handle, serr);
151 	if (rc)
152 		dev_crit(&afu->dev, "Couldn't ack slice error interrupt: %d\n",
153 			rc);
154 
155 	return IRQ_HANDLED;
156 }
157 
158 
159 static int irq_alloc_range(struct cxl *adapter, int len, int *irq)
160 {
161 	int i, n;
162 	struct irq_avail *cur;
163 
164 	for (i = 0; i < adapter->guest->irq_nranges; i++) {
165 		cur = &adapter->guest->irq_avail[i];
166 		n = bitmap_find_next_zero_area(cur->bitmap, cur->range,
167 					0, len, 0);
168 		if (n < cur->range) {
169 			bitmap_set(cur->bitmap, n, len);
170 			*irq = cur->offset + n;
171 			pr_devel("guest: allocate IRQs %#x->%#x\n",
172 				*irq, *irq + len - 1);
173 
174 			return 0;
175 		}
176 	}
177 	return -ENOSPC;
178 }
179 
180 static int irq_free_range(struct cxl *adapter, int irq, int len)
181 {
182 	int i, n;
183 	struct irq_avail *cur;
184 
185 	if (len == 0)
186 		return -ENOENT;
187 
188 	for (i = 0; i < adapter->guest->irq_nranges; i++) {
189 		cur = &adapter->guest->irq_avail[i];
190 		if (irq >= cur->offset &&
191 			(irq + len) <= (cur->offset + cur->range)) {
192 			n = irq - cur->offset;
193 			bitmap_clear(cur->bitmap, n, len);
194 			pr_devel("guest: release IRQs %#x->%#x\n",
195 				irq, irq + len - 1);
196 			return 0;
197 		}
198 	}
199 	return -ENOENT;
200 }
201 
202 static int guest_reset(struct cxl *adapter)
203 {
204 	int rc;
205 
206 	pr_devel("Adapter reset request\n");
207 	rc = cxl_h_reset_adapter(adapter->guest->handle);
208 	return rc;
209 }
210 
211 static int guest_alloc_one_irq(struct cxl *adapter)
212 {
213 	int irq;
214 
215 	spin_lock(&adapter->guest->irq_alloc_lock);
216 	if (irq_alloc_range(adapter, 1, &irq))
217 		irq = -ENOSPC;
218 	spin_unlock(&adapter->guest->irq_alloc_lock);
219 	return irq;
220 }
221 
222 static void guest_release_one_irq(struct cxl *adapter, int irq)
223 {
224 	spin_lock(&adapter->guest->irq_alloc_lock);
225 	irq_free_range(adapter, irq, 1);
226 	spin_unlock(&adapter->guest->irq_alloc_lock);
227 }
228 
229 static int guest_alloc_irq_ranges(struct cxl_irq_ranges *irqs,
230 				struct cxl *adapter, unsigned int num)
231 {
232 	int i, try, irq;
233 
234 	memset(irqs, 0, sizeof(struct cxl_irq_ranges));
235 
236 	spin_lock(&adapter->guest->irq_alloc_lock);
237 	for (i = 0; i < CXL_IRQ_RANGES && num; i++) {
238 		try = num;
239 		while (try) {
240 			if (irq_alloc_range(adapter, try, &irq) == 0)
241 				break;
242 			try /= 2;
243 		}
244 		if (!try)
245 			goto error;
246 		irqs->offset[i] = irq;
247 		irqs->range[i] = try;
248 		num -= try;
249 	}
250 	if (num)
251 		goto error;
252 	spin_unlock(&adapter->guest->irq_alloc_lock);
253 	return 0;
254 
255 error:
256 	for (i = 0; i < CXL_IRQ_RANGES; i++)
257 		irq_free_range(adapter, irqs->offset[i], irqs->range[i]);
258 	spin_unlock(&adapter->guest->irq_alloc_lock);
259 	return -ENOSPC;
260 }
261 
262 static void guest_release_irq_ranges(struct cxl_irq_ranges *irqs,
263 				struct cxl *adapter)
264 {
265 	int i;
266 
267 	spin_lock(&adapter->guest->irq_alloc_lock);
268 	for (i = 0; i < CXL_IRQ_RANGES; i++)
269 		irq_free_range(adapter, irqs->offset[i], irqs->range[i]);
270 	spin_unlock(&adapter->guest->irq_alloc_lock);
271 }
272 
273 static int guest_register_serr_irq(struct cxl_afu *afu)
274 {
275 	afu->err_irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err",
276 				      dev_name(&afu->dev));
277 	if (!afu->err_irq_name)
278 		return -ENOMEM;
279 
280 	if (!(afu->serr_virq = cxl_map_irq(afu->adapter, afu->serr_hwirq,
281 				 guest_slice_irq_err, afu, afu->err_irq_name))) {
282 		kfree(afu->err_irq_name);
283 		afu->err_irq_name = NULL;
284 		return -ENOMEM;
285 	}
286 
287 	return 0;
288 }
289 
290 static void guest_release_serr_irq(struct cxl_afu *afu)
291 {
292 	cxl_unmap_irq(afu->serr_virq, afu);
293 	cxl_ops->release_one_irq(afu->adapter, afu->serr_hwirq);
294 	kfree(afu->err_irq_name);
295 }
296 
297 static int guest_ack_irq(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask)
298 {
299 	return cxl_h_control_faults(ctx->afu->guest->handle, ctx->process_token,
300 				tfc >> 32, (psl_reset_mask != 0));
301 }
302 
303 static void disable_afu_irqs(struct cxl_context *ctx)
304 {
305 	irq_hw_number_t hwirq;
306 	unsigned int virq;
307 	int r, i;
308 
309 	pr_devel("Disabling AFU(%d) interrupts\n", ctx->afu->slice);
310 	for (r = 0; r < CXL_IRQ_RANGES; r++) {
311 		hwirq = ctx->irqs.offset[r];
312 		for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) {
313 			virq = irq_find_mapping(NULL, hwirq);
314 			disable_irq(virq);
315 		}
316 	}
317 }
318 
319 static void enable_afu_irqs(struct cxl_context *ctx)
320 {
321 	irq_hw_number_t hwirq;
322 	unsigned int virq;
323 	int r, i;
324 
325 	pr_devel("Enabling AFU(%d) interrupts\n", ctx->afu->slice);
326 	for (r = 0; r < CXL_IRQ_RANGES; r++) {
327 		hwirq = ctx->irqs.offset[r];
328 		for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) {
329 			virq = irq_find_mapping(NULL, hwirq);
330 			enable_irq(virq);
331 		}
332 	}
333 }
334 
335 static int _guest_afu_cr_readXX(int sz, struct cxl_afu *afu, int cr_idx,
336 			u64 offset, u64 *val)
337 {
338 	unsigned long cr;
339 	char c;
340 	int rc = 0;
341 
342 	if (afu->crs_len < sz)
343 		return -ENOENT;
344 
345 	if (unlikely(offset >= afu->crs_len))
346 		return -ERANGE;
347 
348 	cr = get_zeroed_page(GFP_KERNEL);
349 	if (!cr)
350 		return -ENOMEM;
351 
352 	rc = cxl_h_get_config(afu->guest->handle, cr_idx, offset,
353 			virt_to_phys((void *)cr), sz);
354 	if (rc)
355 		goto err;
356 
357 	switch (sz) {
358 	case 1:
359 		c = *((char *) cr);
360 		*val = c;
361 		break;
362 	case 2:
363 		*val = in_le16((u16 *)cr);
364 		break;
365 	case 4:
366 		*val = in_le32((unsigned *)cr);
367 		break;
368 	case 8:
369 		*val = in_le64((u64 *)cr);
370 		break;
371 	default:
372 		WARN_ON(1);
373 	}
374 err:
375 	free_page(cr);
376 	return rc;
377 }
378 
379 static int guest_afu_cr_read32(struct cxl_afu *afu, int cr_idx, u64 offset,
380 			u32 *out)
381 {
382 	int rc;
383 	u64 val;
384 
385 	rc = _guest_afu_cr_readXX(4, afu, cr_idx, offset, &val);
386 	if (!rc)
387 		*out = (u32) val;
388 	return rc;
389 }
390 
391 static int guest_afu_cr_read16(struct cxl_afu *afu, int cr_idx, u64 offset,
392 			u16 *out)
393 {
394 	int rc;
395 	u64 val;
396 
397 	rc = _guest_afu_cr_readXX(2, afu, cr_idx, offset, &val);
398 	if (!rc)
399 		*out = (u16) val;
400 	return rc;
401 }
402 
403 static int guest_afu_cr_read8(struct cxl_afu *afu, int cr_idx, u64 offset,
404 			u8 *out)
405 {
406 	int rc;
407 	u64 val;
408 
409 	rc = _guest_afu_cr_readXX(1, afu, cr_idx, offset, &val);
410 	if (!rc)
411 		*out = (u8) val;
412 	return rc;
413 }
414 
415 static int guest_afu_cr_read64(struct cxl_afu *afu, int cr_idx, u64 offset,
416 			u64 *out)
417 {
418 	return _guest_afu_cr_readXX(8, afu, cr_idx, offset, out);
419 }
420 
421 static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr)
422 {
423 	struct cxl_process_element_hcall *elem;
424 	struct cxl *adapter = ctx->afu->adapter;
425 	const struct cred *cred;
426 	u32 pid, idx;
427 	int rc, r, i;
428 	u64 mmio_addr, mmio_size;
429 	__be64 flags = 0;
430 
431 	/* Must be 8 byte aligned and cannot cross a 4096 byte boundary */
432 	if (!(elem = (struct cxl_process_element_hcall *)
433 			get_zeroed_page(GFP_KERNEL)))
434 		return -ENOMEM;
435 
436 	elem->version = cpu_to_be64(CXL_PROCESS_ELEMENT_VERSION);
437 	if (ctx->kernel) {
438 		pid = 0;
439 		flags |= CXL_PE_TRANSLATION_ENABLED;
440 		flags |= CXL_PE_PRIVILEGED_PROCESS;
441 		if (mfmsr() & MSR_SF)
442 			flags |= CXL_PE_64_BIT;
443 	} else {
444 		pid = current->pid;
445 		flags |= CXL_PE_PROBLEM_STATE;
446 		flags |= CXL_PE_TRANSLATION_ENABLED;
447 		if (!test_tsk_thread_flag(current, TIF_32BIT))
448 			flags |= CXL_PE_64_BIT;
449 		cred = get_current_cred();
450 		if (uid_eq(cred->euid, GLOBAL_ROOT_UID))
451 			flags |= CXL_PE_PRIVILEGED_PROCESS;
452 		put_cred(cred);
453 	}
454 	elem->flags         = cpu_to_be64(flags);
455 	elem->common.tid    = cpu_to_be32(0); /* Unused */
456 	elem->common.pid    = cpu_to_be32(pid);
457 	elem->common.csrp   = cpu_to_be64(0); /* disable */
458 	elem->common.aurp0  = cpu_to_be64(0); /* disable */
459 	elem->common.aurp1  = cpu_to_be64(0); /* disable */
460 
461 	cxl_prefault(ctx, wed);
462 
463 	elem->common.sstp0  = cpu_to_be64(ctx->sstp0);
464 	elem->common.sstp1  = cpu_to_be64(ctx->sstp1);
465 	for (r = 0; r < CXL_IRQ_RANGES; r++) {
466 		for (i = 0; i < ctx->irqs.range[r]; i++) {
467 			if (r == 0 && i == 0) {
468 				elem->pslVirtualIsn = cpu_to_be32(ctx->irqs.offset[0]);
469 			} else {
470 				idx = ctx->irqs.offset[r] + i - adapter->guest->irq_base_offset;
471 				elem->applicationVirtualIsnBitmap[idx / 8] |= 0x80 >> (idx % 8);
472 			}
473 		}
474 	}
475 	elem->common.amr = cpu_to_be64(amr);
476 	elem->common.wed = cpu_to_be64(wed);
477 
478 	disable_afu_irqs(ctx);
479 
480 	rc = cxl_h_attach_process(ctx->afu->guest->handle, elem,
481 				&ctx->process_token, &mmio_addr, &mmio_size);
482 	if (rc == H_SUCCESS) {
483 		if (ctx->master || !ctx->afu->pp_psa) {
484 			ctx->psn_phys = ctx->afu->psn_phys;
485 			ctx->psn_size = ctx->afu->adapter->ps_size;
486 		} else {
487 			ctx->psn_phys = mmio_addr;
488 			ctx->psn_size = mmio_size;
489 		}
490 		if (ctx->afu->pp_psa && mmio_size &&
491 			ctx->afu->pp_size == 0) {
492 			/*
493 			 * There's no property in the device tree to read the
494 			 * pp_size. We only find out at the 1st attach.
495 			 * Compared to bare-metal, it is too late and we
496 			 * should really lock here. However, on powerVM,
497 			 * pp_size is really only used to display in /sys.
498 			 * Being discussed with pHyp for their next release.
499 			 */
500 			ctx->afu->pp_size = mmio_size;
501 		}
502 		/* from PAPR: process element is bytes 4-7 of process token */
503 		ctx->external_pe = ctx->process_token & 0xFFFFFFFF;
504 		pr_devel("CXL pe=%i is known as %i for pHyp, mmio_size=%#llx",
505 			ctx->pe, ctx->external_pe, ctx->psn_size);
506 		ctx->pe_inserted = true;
507 		enable_afu_irqs(ctx);
508 	}
509 
510 	free_page((u64)elem);
511 	return rc;
512 }
513 
514 static int guest_attach_process(struct cxl_context *ctx, bool kernel, u64 wed, u64 amr)
515 {
516 	pr_devel("in %s\n", __func__);
517 
518 	ctx->kernel = kernel;
519 	if (ctx->afu->current_mode == CXL_MODE_DIRECTED)
520 		return attach_afu_directed(ctx, wed, amr);
521 
522 	/* dedicated mode not supported on FW840 */
523 
524 	return -EINVAL;
525 }
526 
527 static int detach_afu_directed(struct cxl_context *ctx)
528 {
529 	if (!ctx->pe_inserted)
530 		return 0;
531 	if (cxl_h_detach_process(ctx->afu->guest->handle, ctx->process_token))
532 		return -1;
533 	return 0;
534 }
535 
536 static int guest_detach_process(struct cxl_context *ctx)
537 {
538 	pr_devel("in %s\n", __func__);
539 	trace_cxl_detach(ctx);
540 
541 	if (!cxl_ops->link_ok(ctx->afu->adapter))
542 		return -EIO;
543 
544 	if (ctx->afu->current_mode == CXL_MODE_DIRECTED)
545 		return detach_afu_directed(ctx);
546 
547 	return -EINVAL;
548 }
549 
550 static void guest_release_afu(struct device *dev)
551 {
552 	struct cxl_afu *afu = to_cxl_afu(dev);
553 
554 	pr_devel("%s\n", __func__);
555 
556 	idr_destroy(&afu->contexts_idr);
557 
558 	kfree(afu->guest);
559 	kfree(afu);
560 }
561 
562 ssize_t cxl_guest_read_afu_vpd(struct cxl_afu *afu, void *buf, size_t len)
563 {
564 	return guest_collect_vpd(NULL, afu, buf, len);
565 }
566 
567 #define ERR_BUFF_MAX_COPY_SIZE PAGE_SIZE
568 static ssize_t guest_afu_read_err_buffer(struct cxl_afu *afu, char *buf,
569 					loff_t off, size_t count)
570 {
571 	void *tbuf = NULL;
572 	int rc = 0;
573 
574 	tbuf = (void *) get_zeroed_page(GFP_KERNEL);
575 	if (!tbuf)
576 		return -ENOMEM;
577 
578 	rc = cxl_h_get_afu_err(afu->guest->handle,
579 			       off & 0x7,
580 			       virt_to_phys(tbuf),
581 			       count);
582 	if (rc)
583 		goto err;
584 
585 	if (count > ERR_BUFF_MAX_COPY_SIZE)
586 		count = ERR_BUFF_MAX_COPY_SIZE - (off & 0x7);
587 	memcpy(buf, tbuf, count);
588 err:
589 	free_page((u64)tbuf);
590 
591 	return rc;
592 }
593 
594 static int guest_afu_check_and_enable(struct cxl_afu *afu)
595 {
596 	return 0;
597 }
598 
599 static int activate_afu_directed(struct cxl_afu *afu)
600 {
601 	int rc;
602 
603 	dev_info(&afu->dev, "Activating AFU(%d) directed mode\n", afu->slice);
604 
605 	afu->current_mode = CXL_MODE_DIRECTED;
606 
607 	afu->num_procs = afu->max_procs_virtualised;
608 
609 	if ((rc = cxl_chardev_m_afu_add(afu)))
610 		return rc;
611 
612 	if ((rc = cxl_sysfs_afu_m_add(afu)))
613 		goto err;
614 
615 	if ((rc = cxl_chardev_s_afu_add(afu)))
616 		goto err1;
617 
618 	return 0;
619 err1:
620 	cxl_sysfs_afu_m_remove(afu);
621 err:
622 	cxl_chardev_afu_remove(afu);
623 	return rc;
624 }
625 
626 static int guest_afu_activate_mode(struct cxl_afu *afu, int mode)
627 {
628 	if (!mode)
629 		return 0;
630 	if (!(mode & afu->modes_supported))
631 		return -EINVAL;
632 
633 	if (mode == CXL_MODE_DIRECTED)
634 		return activate_afu_directed(afu);
635 
636 	if (mode == CXL_MODE_DEDICATED)
637 		dev_err(&afu->dev, "Dedicated mode not supported\n");
638 
639 	return -EINVAL;
640 }
641 
642 static int deactivate_afu_directed(struct cxl_afu *afu)
643 {
644 	dev_info(&afu->dev, "Deactivating AFU(%d) directed mode\n", afu->slice);
645 
646 	afu->current_mode = 0;
647 	afu->num_procs = 0;
648 
649 	cxl_sysfs_afu_m_remove(afu);
650 	cxl_chardev_afu_remove(afu);
651 
652 	cxl_ops->afu_reset(afu);
653 
654 	return 0;
655 }
656 
657 static int guest_afu_deactivate_mode(struct cxl_afu *afu, int mode)
658 {
659 	if (!mode)
660 		return 0;
661 	if (!(mode & afu->modes_supported))
662 		return -EINVAL;
663 
664 	if (mode == CXL_MODE_DIRECTED)
665 		return deactivate_afu_directed(afu);
666 	return 0;
667 }
668 
669 static int guest_afu_reset(struct cxl_afu *afu)
670 {
671 	pr_devel("AFU(%d) reset request\n", afu->slice);
672 	return cxl_h_reset_afu(afu->guest->handle);
673 }
674 
675 static int guest_map_slice_regs(struct cxl_afu *afu)
676 {
677 	if (!(afu->p2n_mmio = ioremap(afu->guest->p2n_phys, afu->guest->p2n_size))) {
678 		dev_err(&afu->dev, "Error mapping AFU(%d) MMIO regions\n",
679 			afu->slice);
680 		return -ENOMEM;
681 	}
682 	return 0;
683 }
684 
685 static void guest_unmap_slice_regs(struct cxl_afu *afu)
686 {
687 	if (afu->p2n_mmio)
688 		iounmap(afu->p2n_mmio);
689 }
690 
691 static bool guest_link_ok(struct cxl *cxl)
692 {
693 	return true;
694 }
695 
696 static int afu_properties_look_ok(struct cxl_afu *afu)
697 {
698 	if (afu->pp_irqs < 0) {
699 		dev_err(&afu->dev, "Unexpected per-process minimum interrupt value\n");
700 		return -EINVAL;
701 	}
702 
703 	if (afu->max_procs_virtualised < 1) {
704 		dev_err(&afu->dev, "Unexpected max number of processes virtualised value\n");
705 		return -EINVAL;
706 	}
707 
708 	if (afu->crs_len < 0) {
709 		dev_err(&afu->dev, "Unexpected configuration record size value\n");
710 		return -EINVAL;
711 	}
712 
713 	return 0;
714 }
715 
716 int cxl_guest_init_afu(struct cxl *adapter, int slice, struct device_node *afu_np)
717 {
718 	struct cxl_afu *afu;
719 	bool free = true;
720 	int rc;
721 
722 	pr_devel("in %s - AFU(%d)\n", __func__, slice);
723 	if (!(afu = cxl_alloc_afu(adapter, slice)))
724 		return -ENOMEM;
725 
726 	if (!(afu->guest = kzalloc(sizeof(struct cxl_afu_guest), GFP_KERNEL))) {
727 		kfree(afu);
728 		return -ENOMEM;
729 	}
730 
731 	if ((rc = dev_set_name(&afu->dev, "afu%i.%i",
732 					  adapter->adapter_num,
733 					  slice)))
734 		goto err1;
735 
736 	adapter->slices++;
737 
738 	if ((rc = cxl_of_read_afu_handle(afu, afu_np)))
739 		goto err1;
740 
741 	if ((rc = cxl_ops->afu_reset(afu)))
742 		goto err1;
743 
744 	if ((rc = cxl_of_read_afu_properties(afu, afu_np)))
745 		goto err1;
746 
747 	if ((rc = afu_properties_look_ok(afu)))
748 		goto err1;
749 
750 	if ((rc = guest_map_slice_regs(afu)))
751 		goto err1;
752 
753 	if ((rc = guest_register_serr_irq(afu)))
754 		goto err2;
755 
756 	/*
757 	 * After we call this function we must not free the afu directly, even
758 	 * if it returns an error!
759 	 */
760 	if ((rc = cxl_register_afu(afu)))
761 		goto err_put1;
762 
763 	if ((rc = cxl_sysfs_afu_add(afu)))
764 		goto err_put1;
765 
766 	/*
767 	 * pHyp doesn't expose the programming models supported by the
768 	 * AFU. pHyp currently only supports directed mode. If it adds
769 	 * dedicated mode later, this version of cxl has no way to
770 	 * detect it. So we'll initialize the driver, but the first
771 	 * attach will fail.
772 	 * Being discussed with pHyp to do better (likely new property)
773 	 */
774 	if (afu->max_procs_virtualised == 1)
775 		afu->modes_supported = CXL_MODE_DEDICATED;
776 	else
777 		afu->modes_supported = CXL_MODE_DIRECTED;
778 
779 	if ((rc = cxl_afu_select_best_mode(afu)))
780 		goto err_put2;
781 
782 	adapter->afu[afu->slice] = afu;
783 
784 	afu->enabled = true;
785 
786 	return 0;
787 
788 err_put2:
789 	cxl_sysfs_afu_remove(afu);
790 err_put1:
791 	device_unregister(&afu->dev);
792 	free = false;
793 	guest_release_serr_irq(afu);
794 err2:
795 	guest_unmap_slice_regs(afu);
796 err1:
797 	if (free) {
798 		kfree(afu->guest);
799 		kfree(afu);
800 	}
801 	return rc;
802 }
803 
804 void cxl_guest_remove_afu(struct cxl_afu *afu)
805 {
806 	pr_devel("in %s - AFU(%d)\n", __func__, afu->slice);
807 
808 	if (!afu)
809 		return;
810 
811 	cxl_sysfs_afu_remove(afu);
812 
813 	spin_lock(&afu->adapter->afu_list_lock);
814 	afu->adapter->afu[afu->slice] = NULL;
815 	spin_unlock(&afu->adapter->afu_list_lock);
816 
817 	cxl_context_detach_all(afu);
818 	cxl_ops->afu_deactivate_mode(afu, afu->current_mode);
819 	guest_release_serr_irq(afu);
820 	guest_unmap_slice_regs(afu);
821 
822 	device_unregister(&afu->dev);
823 }
824 
825 static void free_adapter(struct cxl *adapter)
826 {
827 	struct irq_avail *cur;
828 	int i;
829 
830 	if (adapter->guest->irq_avail) {
831 		for (i = 0; i < adapter->guest->irq_nranges; i++) {
832 			cur = &adapter->guest->irq_avail[i];
833 			kfree(cur->bitmap);
834 		}
835 		kfree(adapter->guest->irq_avail);
836 	}
837 	kfree(adapter->guest->status);
838 	cxl_remove_adapter_nr(adapter);
839 	kfree(adapter->guest);
840 	kfree(adapter);
841 }
842 
843 static int properties_look_ok(struct cxl *adapter)
844 {
845 	/* The absence of this property means that the operational
846 	 * status is unknown or okay
847 	 */
848 	if (strlen(adapter->guest->status) &&
849 	    strcmp(adapter->guest->status, "okay")) {
850 		pr_err("ABORTING:Bad operational status of the device\n");
851 		return -EINVAL;
852 	}
853 
854 	return 0;
855 }
856 
857 ssize_t cxl_guest_read_adapter_vpd(struct cxl *adapter, void *buf, size_t len)
858 {
859 	return guest_collect_vpd(adapter, NULL, buf, len);
860 }
861 
862 void cxl_guest_remove_adapter(struct cxl *adapter)
863 {
864 	pr_devel("in %s\n", __func__);
865 
866 	cxl_sysfs_adapter_remove(adapter);
867 
868 	device_unregister(&adapter->dev);
869 }
870 
871 static void release_adapter(struct device *dev)
872 {
873 	free_adapter(to_cxl_adapter(dev));
874 }
875 
876 struct cxl *cxl_guest_init_adapter(struct device_node *np, struct platform_device *pdev)
877 {
878 	struct cxl *adapter;
879 	bool free = true;
880 	int rc;
881 
882 	if (!(adapter = cxl_alloc_adapter()))
883 		return ERR_PTR(-ENOMEM);
884 
885 	if (!(adapter->guest = kzalloc(sizeof(struct cxl_guest), GFP_KERNEL))) {
886 		free_adapter(adapter);
887 		return ERR_PTR(-ENOMEM);
888 	}
889 
890 	adapter->slices = 0;
891 	adapter->guest->pdev = pdev;
892 	adapter->dev.parent = &pdev->dev;
893 	adapter->dev.release = release_adapter;
894 	dev_set_drvdata(&pdev->dev, adapter);
895 
896 	if ((rc = cxl_of_read_adapter_handle(adapter, np)))
897 		goto err1;
898 
899 	if ((rc = cxl_of_read_adapter_properties(adapter, np)))
900 		goto err1;
901 
902 	if ((rc = properties_look_ok(adapter)))
903 		goto err1;
904 
905 	/*
906 	 * After we call this function we must not free the adapter directly,
907 	 * even if it returns an error!
908 	 */
909 	if ((rc = cxl_register_adapter(adapter)))
910 		goto err_put1;
911 
912 	if ((rc = cxl_sysfs_adapter_add(adapter)))
913 		goto err_put1;
914 
915 	return adapter;
916 
917 err_put1:
918 	device_unregister(&adapter->dev);
919 	free = false;
920 err1:
921 	if (free)
922 		free_adapter(adapter);
923 	return ERR_PTR(rc);
924 }
925 
926 const struct cxl_backend_ops cxl_guest_ops = {
927 	.module = THIS_MODULE,
928 	.adapter_reset = guest_reset,
929 	.alloc_one_irq = guest_alloc_one_irq,
930 	.release_one_irq = guest_release_one_irq,
931 	.alloc_irq_ranges = guest_alloc_irq_ranges,
932 	.release_irq_ranges = guest_release_irq_ranges,
933 	.setup_irq = NULL,
934 	.handle_psl_slice_error = guest_handle_psl_slice_error,
935 	.psl_interrupt = guest_psl_irq,
936 	.ack_irq = guest_ack_irq,
937 	.attach_process = guest_attach_process,
938 	.detach_process = guest_detach_process,
939 	.link_ok = guest_link_ok,
940 	.release_afu = guest_release_afu,
941 	.afu_read_err_buffer = guest_afu_read_err_buffer,
942 	.afu_check_and_enable = guest_afu_check_and_enable,
943 	.afu_activate_mode = guest_afu_activate_mode,
944 	.afu_deactivate_mode = guest_afu_deactivate_mode,
945 	.afu_reset = guest_afu_reset,
946 	.afu_cr_read8 = guest_afu_cr_read8,
947 	.afu_cr_read16 = guest_afu_cr_read16,
948 	.afu_cr_read32 = guest_afu_cr_read32,
949 	.afu_cr_read64 = guest_afu_cr_read64,
950 };
951