xref: /linux/drivers/misc/cxl/guest.c (revision 4752876c71701b7663a5ded789058ab2c05f7d0f)
1 /*
2  * Copyright 2015 IBM Corp.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version
7  * 2 of the License, or (at your option) any later version.
8  */
9 
10 #include <linux/spinlock.h>
11 #include <linux/uaccess.h>
12 #include <linux/delay.h>
13 
14 #include "cxl.h"
15 #include "hcalls.h"
16 #include "trace.h"
17 
18 
19 static irqreturn_t guest_handle_psl_slice_error(struct cxl_context *ctx, u64 dsisr,
20 					u64 errstat)
21 {
22 	pr_devel("in %s\n", __func__);
23 	dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%.16llx\n", errstat);
24 
25 	return cxl_ops->ack_irq(ctx, 0, errstat);
26 }
27 
28 static ssize_t guest_collect_vpd(struct cxl *adapter, struct cxl_afu *afu,
29 			void *buf, size_t len)
30 {
31 	unsigned int entries, mod;
32 	unsigned long **vpd_buf = NULL;
33 	struct sg_list *le;
34 	int rc = 0, i, tocopy;
35 	u64 out = 0;
36 
37 	if (buf == NULL)
38 		return -EINVAL;
39 
40 	/* number of entries in the list */
41 	entries = len / SG_BUFFER_SIZE;
42 	mod = len % SG_BUFFER_SIZE;
43 	if (mod)
44 		entries++;
45 
46 	if (entries > SG_MAX_ENTRIES) {
47 		entries = SG_MAX_ENTRIES;
48 		len = SG_MAX_ENTRIES * SG_BUFFER_SIZE;
49 		mod = 0;
50 	}
51 
52 	vpd_buf = kzalloc(entries * sizeof(unsigned long *), GFP_KERNEL);
53 	if (!vpd_buf)
54 		return -ENOMEM;
55 
56 	le = (struct sg_list *)get_zeroed_page(GFP_KERNEL);
57 	if (!le) {
58 		rc = -ENOMEM;
59 		goto err1;
60 	}
61 
62 	for (i = 0; i < entries; i++) {
63 		vpd_buf[i] = (unsigned long *)get_zeroed_page(GFP_KERNEL);
64 		if (!vpd_buf[i]) {
65 			rc = -ENOMEM;
66 			goto err2;
67 		}
68 		le[i].phys_addr = cpu_to_be64(virt_to_phys(vpd_buf[i]));
69 		le[i].len = cpu_to_be64(SG_BUFFER_SIZE);
70 		if ((i == (entries - 1)) && mod)
71 			le[i].len = cpu_to_be64(mod);
72 	}
73 
74 	if (adapter)
75 		rc = cxl_h_collect_vpd_adapter(adapter->guest->handle,
76 					virt_to_phys(le), entries, &out);
77 	else
78 		rc = cxl_h_collect_vpd(afu->guest->handle, 0,
79 				virt_to_phys(le), entries, &out);
80 	pr_devel("length of available (entries: %i), vpd: %#llx\n",
81 		entries, out);
82 
83 	if (!rc) {
84 		/*
85 		 * hcall returns in 'out' the size of available VPDs.
86 		 * It fills the buffer with as much data as possible.
87 		 */
88 		if (out < len)
89 			len = out;
90 		rc = len;
91 		if (out) {
92 			for (i = 0; i < entries; i++) {
93 				if (len < SG_BUFFER_SIZE)
94 					tocopy = len;
95 				else
96 					tocopy = SG_BUFFER_SIZE;
97 				memcpy(buf, vpd_buf[i], tocopy);
98 				buf += tocopy;
99 				len -= tocopy;
100 			}
101 		}
102 	}
103 err2:
104 	for (i = 0; i < entries; i++) {
105 		if (vpd_buf[i])
106 			free_page((unsigned long) vpd_buf[i]);
107 	}
108 	free_page((unsigned long) le);
109 err1:
110 	kfree(vpd_buf);
111 	return rc;
112 }
113 
114 static int guest_get_irq_info(struct cxl_context *ctx, struct cxl_irq_info *info)
115 {
116 	return cxl_h_collect_int_info(ctx->afu->guest->handle, ctx->process_token, info);
117 }
118 
119 static irqreturn_t guest_psl_irq(int irq, void *data)
120 {
121 	struct cxl_context *ctx = data;
122 	struct cxl_irq_info irq_info;
123 	int rc;
124 
125 	pr_devel("%d: received PSL interrupt %i\n", ctx->pe, irq);
126 	rc = guest_get_irq_info(ctx, &irq_info);
127 	if (rc) {
128 		WARN(1, "Unable to get IRQ info: %i\n", rc);
129 		return IRQ_HANDLED;
130 	}
131 
132 	rc = cxl_irq(irq, ctx, &irq_info);
133 	return rc;
134 }
135 
136 static irqreturn_t guest_slice_irq_err(int irq, void *data)
137 {
138 	struct cxl_afu *afu = data;
139 	int rc;
140 	u64 serr;
141 
142 	WARN(irq, "CXL SLICE ERROR interrupt %i\n", irq);
143 	rc = cxl_h_get_fn_error_interrupt(afu->guest->handle, &serr);
144 	if (rc) {
145 		dev_crit(&afu->dev, "Couldn't read PSL_SERR_An: %d\n", rc);
146 		return IRQ_HANDLED;
147 	}
148 	dev_crit(&afu->dev, "PSL_SERR_An: 0x%.16llx\n", serr);
149 
150 	rc = cxl_h_ack_fn_error_interrupt(afu->guest->handle, serr);
151 	if (rc)
152 		dev_crit(&afu->dev, "Couldn't ack slice error interrupt: %d\n",
153 			rc);
154 
155 	return IRQ_HANDLED;
156 }
157 
158 
159 static int irq_alloc_range(struct cxl *adapter, int len, int *irq)
160 {
161 	int i, n;
162 	struct irq_avail *cur;
163 
164 	for (i = 0; i < adapter->guest->irq_nranges; i++) {
165 		cur = &adapter->guest->irq_avail[i];
166 		n = bitmap_find_next_zero_area(cur->bitmap, cur->range,
167 					0, len, 0);
168 		if (n < cur->range) {
169 			bitmap_set(cur->bitmap, n, len);
170 			*irq = cur->offset + n;
171 			pr_devel("guest: allocate IRQs %#x->%#x\n",
172 				*irq, *irq + len - 1);
173 
174 			return 0;
175 		}
176 	}
177 	return -ENOSPC;
178 }
179 
180 static int irq_free_range(struct cxl *adapter, int irq, int len)
181 {
182 	int i, n;
183 	struct irq_avail *cur;
184 
185 	if (len == 0)
186 		return -ENOENT;
187 
188 	for (i = 0; i < adapter->guest->irq_nranges; i++) {
189 		cur = &adapter->guest->irq_avail[i];
190 		if (irq >= cur->offset &&
191 			(irq + len) <= (cur->offset + cur->range)) {
192 			n = irq - cur->offset;
193 			bitmap_clear(cur->bitmap, n, len);
194 			pr_devel("guest: release IRQs %#x->%#x\n",
195 				irq, irq + len - 1);
196 			return 0;
197 		}
198 	}
199 	return -ENOENT;
200 }
201 
202 static int guest_reset(struct cxl *adapter)
203 {
204 	int rc;
205 
206 	pr_devel("Adapter reset request\n");
207 	rc = cxl_h_reset_adapter(adapter->guest->handle);
208 	return rc;
209 }
210 
211 static int guest_alloc_one_irq(struct cxl *adapter)
212 {
213 	int irq;
214 
215 	spin_lock(&adapter->guest->irq_alloc_lock);
216 	if (irq_alloc_range(adapter, 1, &irq))
217 		irq = -ENOSPC;
218 	spin_unlock(&adapter->guest->irq_alloc_lock);
219 	return irq;
220 }
221 
222 static void guest_release_one_irq(struct cxl *adapter, int irq)
223 {
224 	spin_lock(&adapter->guest->irq_alloc_lock);
225 	irq_free_range(adapter, irq, 1);
226 	spin_unlock(&adapter->guest->irq_alloc_lock);
227 }
228 
229 static int guest_alloc_irq_ranges(struct cxl_irq_ranges *irqs,
230 				struct cxl *adapter, unsigned int num)
231 {
232 	int i, try, irq;
233 
234 	memset(irqs, 0, sizeof(struct cxl_irq_ranges));
235 
236 	spin_lock(&adapter->guest->irq_alloc_lock);
237 	for (i = 0; i < CXL_IRQ_RANGES && num; i++) {
238 		try = num;
239 		while (try) {
240 			if (irq_alloc_range(adapter, try, &irq) == 0)
241 				break;
242 			try /= 2;
243 		}
244 		if (!try)
245 			goto error;
246 		irqs->offset[i] = irq;
247 		irqs->range[i] = try;
248 		num -= try;
249 	}
250 	if (num)
251 		goto error;
252 	spin_unlock(&adapter->guest->irq_alloc_lock);
253 	return 0;
254 
255 error:
256 	for (i = 0; i < CXL_IRQ_RANGES; i++)
257 		irq_free_range(adapter, irqs->offset[i], irqs->range[i]);
258 	spin_unlock(&adapter->guest->irq_alloc_lock);
259 	return -ENOSPC;
260 }
261 
262 static void guest_release_irq_ranges(struct cxl_irq_ranges *irqs,
263 				struct cxl *adapter)
264 {
265 	int i;
266 
267 	spin_lock(&adapter->guest->irq_alloc_lock);
268 	for (i = 0; i < CXL_IRQ_RANGES; i++)
269 		irq_free_range(adapter, irqs->offset[i], irqs->range[i]);
270 	spin_unlock(&adapter->guest->irq_alloc_lock);
271 }
272 
273 static int guest_register_serr_irq(struct cxl_afu *afu)
274 {
275 	afu->err_irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err",
276 				      dev_name(&afu->dev));
277 	if (!afu->err_irq_name)
278 		return -ENOMEM;
279 
280 	if (!(afu->serr_virq = cxl_map_irq(afu->adapter, afu->serr_hwirq,
281 				 guest_slice_irq_err, afu, afu->err_irq_name))) {
282 		kfree(afu->err_irq_name);
283 		afu->err_irq_name = NULL;
284 		return -ENOMEM;
285 	}
286 
287 	return 0;
288 }
289 
290 static void guest_release_serr_irq(struct cxl_afu *afu)
291 {
292 	cxl_unmap_irq(afu->serr_virq, afu);
293 	cxl_ops->release_one_irq(afu->adapter, afu->serr_hwirq);
294 	kfree(afu->err_irq_name);
295 }
296 
297 static int guest_ack_irq(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask)
298 {
299 	return cxl_h_control_faults(ctx->afu->guest->handle, ctx->process_token,
300 				tfc >> 32, (psl_reset_mask != 0));
301 }
302 
303 static void disable_afu_irqs(struct cxl_context *ctx)
304 {
305 	irq_hw_number_t hwirq;
306 	unsigned int virq;
307 	int r, i;
308 
309 	pr_devel("Disabling AFU(%d) interrupts\n", ctx->afu->slice);
310 	for (r = 0; r < CXL_IRQ_RANGES; r++) {
311 		hwirq = ctx->irqs.offset[r];
312 		for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) {
313 			virq = irq_find_mapping(NULL, hwirq);
314 			disable_irq(virq);
315 		}
316 	}
317 }
318 
319 static void enable_afu_irqs(struct cxl_context *ctx)
320 {
321 	irq_hw_number_t hwirq;
322 	unsigned int virq;
323 	int r, i;
324 
325 	pr_devel("Enabling AFU(%d) interrupts\n", ctx->afu->slice);
326 	for (r = 0; r < CXL_IRQ_RANGES; r++) {
327 		hwirq = ctx->irqs.offset[r];
328 		for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) {
329 			virq = irq_find_mapping(NULL, hwirq);
330 			enable_irq(virq);
331 		}
332 	}
333 }
334 
335 static int _guest_afu_cr_readXX(int sz, struct cxl_afu *afu, int cr_idx,
336 			u64 offset, u64 *val)
337 {
338 	unsigned long cr;
339 	char c;
340 	int rc = 0;
341 
342 	if (afu->crs_len < sz)
343 		return -ENOENT;
344 
345 	if (unlikely(offset >= afu->crs_len))
346 		return -ERANGE;
347 
348 	cr = get_zeroed_page(GFP_KERNEL);
349 	if (!cr)
350 		return -ENOMEM;
351 
352 	rc = cxl_h_get_config(afu->guest->handle, cr_idx, offset,
353 			virt_to_phys((void *)cr), sz);
354 	if (rc)
355 		goto err;
356 
357 	switch (sz) {
358 	case 1:
359 		c = *((char *) cr);
360 		*val = c;
361 		break;
362 	case 2:
363 		*val = in_le16((u16 *)cr);
364 		break;
365 	case 4:
366 		*val = in_le32((unsigned *)cr);
367 		break;
368 	case 8:
369 		*val = in_le64((u64 *)cr);
370 		break;
371 	default:
372 		WARN_ON(1);
373 	}
374 err:
375 	free_page(cr);
376 	return rc;
377 }
378 
379 static int guest_afu_cr_read32(struct cxl_afu *afu, int cr_idx, u64 offset,
380 			u32 *out)
381 {
382 	int rc;
383 	u64 val;
384 
385 	rc = _guest_afu_cr_readXX(4, afu, cr_idx, offset, &val);
386 	if (!rc)
387 		*out = (u32) val;
388 	return rc;
389 }
390 
391 static int guest_afu_cr_read16(struct cxl_afu *afu, int cr_idx, u64 offset,
392 			u16 *out)
393 {
394 	int rc;
395 	u64 val;
396 
397 	rc = _guest_afu_cr_readXX(2, afu, cr_idx, offset, &val);
398 	if (!rc)
399 		*out = (u16) val;
400 	return rc;
401 }
402 
403 static int guest_afu_cr_read8(struct cxl_afu *afu, int cr_idx, u64 offset,
404 			u8 *out)
405 {
406 	int rc;
407 	u64 val;
408 
409 	rc = _guest_afu_cr_readXX(1, afu, cr_idx, offset, &val);
410 	if (!rc)
411 		*out = (u8) val;
412 	return rc;
413 }
414 
415 static int guest_afu_cr_read64(struct cxl_afu *afu, int cr_idx, u64 offset,
416 			u64 *out)
417 {
418 	return _guest_afu_cr_readXX(8, afu, cr_idx, offset, out);
419 }
420 
421 static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr)
422 {
423 	struct cxl_process_element_hcall *elem;
424 	struct cxl *adapter = ctx->afu->adapter;
425 	const struct cred *cred;
426 	u32 pid, idx;
427 	int rc, r, i;
428 	u64 mmio_addr, mmio_size;
429 	__be64 flags = 0;
430 
431 	/* Must be 8 byte aligned and cannot cross a 4096 byte boundary */
432 	if (!(elem = (struct cxl_process_element_hcall *)
433 			get_zeroed_page(GFP_KERNEL)))
434 		return -ENOMEM;
435 
436 	elem->version = cpu_to_be64(CXL_PROCESS_ELEMENT_VERSION);
437 	if (ctx->kernel) {
438 		pid = 0;
439 		flags |= CXL_PE_TRANSLATION_ENABLED;
440 		flags |= CXL_PE_PRIVILEGED_PROCESS;
441 		if (mfmsr() & MSR_SF)
442 			flags |= CXL_PE_64_BIT;
443 	} else {
444 		pid = current->pid;
445 		flags |= CXL_PE_PROBLEM_STATE;
446 		flags |= CXL_PE_TRANSLATION_ENABLED;
447 		if (!test_tsk_thread_flag(current, TIF_32BIT))
448 			flags |= CXL_PE_64_BIT;
449 		cred = get_current_cred();
450 		if (uid_eq(cred->euid, GLOBAL_ROOT_UID))
451 			flags |= CXL_PE_PRIVILEGED_PROCESS;
452 		put_cred(cred);
453 	}
454 	elem->flags         = cpu_to_be64(flags);
455 	elem->common.tid    = cpu_to_be32(0); /* Unused */
456 	elem->common.pid    = cpu_to_be32(pid);
457 	elem->common.csrp   = cpu_to_be64(0); /* disable */
458 	elem->common.aurp0  = cpu_to_be64(0); /* disable */
459 	elem->common.aurp1  = cpu_to_be64(0); /* disable */
460 
461 	cxl_prefault(ctx, wed);
462 
463 	elem->common.sstp0  = cpu_to_be64(ctx->sstp0);
464 	elem->common.sstp1  = cpu_to_be64(ctx->sstp1);
465 	for (r = 0; r < CXL_IRQ_RANGES; r++) {
466 		for (i = 0; i < ctx->irqs.range[r]; i++) {
467 			if (r == 0 && i == 0) {
468 				elem->pslVirtualIsn = cpu_to_be32(ctx->irqs.offset[0]);
469 			} else {
470 				idx = ctx->irqs.offset[r] + i - adapter->guest->irq_base_offset;
471 				elem->applicationVirtualIsnBitmap[idx / 8] |= 0x80 >> (idx % 8);
472 			}
473 		}
474 	}
475 	elem->common.amr = cpu_to_be64(amr);
476 	elem->common.wed = cpu_to_be64(wed);
477 
478 	disable_afu_irqs(ctx);
479 
480 	rc = cxl_h_attach_process(ctx->afu->guest->handle, elem,
481 				&ctx->process_token, &mmio_addr, &mmio_size);
482 	if (rc == H_SUCCESS) {
483 		if (ctx->master || !ctx->afu->pp_psa) {
484 			ctx->psn_phys = ctx->afu->psn_phys;
485 			ctx->psn_size = ctx->afu->adapter->ps_size;
486 		} else {
487 			ctx->psn_phys = mmio_addr;
488 			ctx->psn_size = mmio_size;
489 		}
490 		if (ctx->afu->pp_psa && mmio_size &&
491 			ctx->afu->pp_size == 0) {
492 			/*
493 			 * There's no property in the device tree to read the
494 			 * pp_size. We only find out at the 1st attach.
495 			 * Compared to bare-metal, it is too late and we
496 			 * should really lock here. However, on powerVM,
497 			 * pp_size is really only used to display in /sys.
498 			 * Being discussed with pHyp for their next release.
499 			 */
500 			ctx->afu->pp_size = mmio_size;
501 		}
502 		/* from PAPR: process element is bytes 4-7 of process token */
503 		ctx->external_pe = ctx->process_token & 0xFFFFFFFF;
504 		pr_devel("CXL pe=%i is known as %i for pHyp, mmio_size=%#llx",
505 			ctx->pe, ctx->external_pe, ctx->psn_size);
506 		ctx->pe_inserted = true;
507 		enable_afu_irqs(ctx);
508 	}
509 
510 	free_page((u64)elem);
511 	return rc;
512 }
513 
514 static int guest_attach_process(struct cxl_context *ctx, bool kernel, u64 wed, u64 amr)
515 {
516 	pr_devel("in %s\n", __func__);
517 
518 	ctx->kernel = kernel;
519 	if (ctx->afu->current_mode == CXL_MODE_DIRECTED)
520 		return attach_afu_directed(ctx, wed, amr);
521 
522 	/* dedicated mode not supported on FW840 */
523 
524 	return -EINVAL;
525 }
526 
527 static int detach_afu_directed(struct cxl_context *ctx)
528 {
529 	if (!ctx->pe_inserted)
530 		return 0;
531 	if (cxl_h_detach_process(ctx->afu->guest->handle, ctx->process_token))
532 		return -1;
533 	return 0;
534 }
535 
536 static int guest_detach_process(struct cxl_context *ctx)
537 {
538 	pr_devel("in %s\n", __func__);
539 	trace_cxl_detach(ctx);
540 
541 	if (!cxl_ops->link_ok(ctx->afu->adapter))
542 		return -EIO;
543 
544 	if (ctx->afu->current_mode == CXL_MODE_DIRECTED)
545 		return detach_afu_directed(ctx);
546 
547 	return -EINVAL;
548 }
549 
550 static void guest_release_afu(struct device *dev)
551 {
552 	struct cxl_afu *afu = to_cxl_afu(dev);
553 
554 	pr_devel("%s\n", __func__);
555 
556 	idr_destroy(&afu->contexts_idr);
557 
558 	kfree(afu->guest);
559 	kfree(afu);
560 }
561 
562 ssize_t cxl_guest_read_afu_vpd(struct cxl_afu *afu, void *buf, size_t len)
563 {
564 	return guest_collect_vpd(NULL, afu, buf, len);
565 }
566 
567 #define ERR_BUFF_MAX_COPY_SIZE PAGE_SIZE
568 static ssize_t guest_afu_read_err_buffer(struct cxl_afu *afu, char *buf,
569 					loff_t off, size_t count)
570 {
571 	void *tbuf = NULL;
572 	int rc = 0;
573 
574 	tbuf = (void *) get_zeroed_page(GFP_KERNEL);
575 	if (!tbuf)
576 		return -ENOMEM;
577 
578 	rc = cxl_h_get_afu_err(afu->guest->handle,
579 			       off & 0x7,
580 			       virt_to_phys(tbuf),
581 			       count);
582 	if (rc)
583 		goto err;
584 
585 	if (count > ERR_BUFF_MAX_COPY_SIZE)
586 		count = ERR_BUFF_MAX_COPY_SIZE - (off & 0x7);
587 	memcpy(buf, tbuf, count);
588 err:
589 	free_page((u64)tbuf);
590 
591 	return rc;
592 }
593 
594 static int guest_afu_check_and_enable(struct cxl_afu *afu)
595 {
596 	return 0;
597 }
598 
599 static bool guest_support_attributes(const char *attr_name,
600 				     enum cxl_attrs type)
601 {
602 	switch (type) {
603 	case CXL_ADAPTER_ATTRS:
604 		if ((strcmp(attr_name, "base_image") == 0) ||
605 			(strcmp(attr_name, "load_image_on_perst") == 0) ||
606 			(strcmp(attr_name, "perst_reloads_same_image") == 0) ||
607 			(strcmp(attr_name, "image_loaded") == 0))
608 			return false;
609 		break;
610 	case CXL_AFU_MASTER_ATTRS:
611 		if ((strcmp(attr_name, "pp_mmio_off") == 0))
612 			return false;
613 		break;
614 	case CXL_AFU_ATTRS:
615 		break;
616 	default:
617 		break;
618 	}
619 
620 	return true;
621 }
622 
623 static int activate_afu_directed(struct cxl_afu *afu)
624 {
625 	int rc;
626 
627 	dev_info(&afu->dev, "Activating AFU(%d) directed mode\n", afu->slice);
628 
629 	afu->current_mode = CXL_MODE_DIRECTED;
630 
631 	afu->num_procs = afu->max_procs_virtualised;
632 
633 	if ((rc = cxl_chardev_m_afu_add(afu)))
634 		return rc;
635 
636 	if ((rc = cxl_sysfs_afu_m_add(afu)))
637 		goto err;
638 
639 	if ((rc = cxl_chardev_s_afu_add(afu)))
640 		goto err1;
641 
642 	return 0;
643 err1:
644 	cxl_sysfs_afu_m_remove(afu);
645 err:
646 	cxl_chardev_afu_remove(afu);
647 	return rc;
648 }
649 
650 static int guest_afu_activate_mode(struct cxl_afu *afu, int mode)
651 {
652 	if (!mode)
653 		return 0;
654 	if (!(mode & afu->modes_supported))
655 		return -EINVAL;
656 
657 	if (mode == CXL_MODE_DIRECTED)
658 		return activate_afu_directed(afu);
659 
660 	if (mode == CXL_MODE_DEDICATED)
661 		dev_err(&afu->dev, "Dedicated mode not supported\n");
662 
663 	return -EINVAL;
664 }
665 
666 static int deactivate_afu_directed(struct cxl_afu *afu)
667 {
668 	dev_info(&afu->dev, "Deactivating AFU(%d) directed mode\n", afu->slice);
669 
670 	afu->current_mode = 0;
671 	afu->num_procs = 0;
672 
673 	cxl_sysfs_afu_m_remove(afu);
674 	cxl_chardev_afu_remove(afu);
675 
676 	cxl_ops->afu_reset(afu);
677 
678 	return 0;
679 }
680 
681 static int guest_afu_deactivate_mode(struct cxl_afu *afu, int mode)
682 {
683 	if (!mode)
684 		return 0;
685 	if (!(mode & afu->modes_supported))
686 		return -EINVAL;
687 
688 	if (mode == CXL_MODE_DIRECTED)
689 		return deactivate_afu_directed(afu);
690 	return 0;
691 }
692 
693 static int guest_afu_reset(struct cxl_afu *afu)
694 {
695 	pr_devel("AFU(%d) reset request\n", afu->slice);
696 	return cxl_h_reset_afu(afu->guest->handle);
697 }
698 
699 static int guest_map_slice_regs(struct cxl_afu *afu)
700 {
701 	if (!(afu->p2n_mmio = ioremap(afu->guest->p2n_phys, afu->guest->p2n_size))) {
702 		dev_err(&afu->dev, "Error mapping AFU(%d) MMIO regions\n",
703 			afu->slice);
704 		return -ENOMEM;
705 	}
706 	return 0;
707 }
708 
709 static void guest_unmap_slice_regs(struct cxl_afu *afu)
710 {
711 	if (afu->p2n_mmio)
712 		iounmap(afu->p2n_mmio);
713 }
714 
715 static bool guest_link_ok(struct cxl *cxl)
716 {
717 	return true;
718 }
719 
720 static int afu_properties_look_ok(struct cxl_afu *afu)
721 {
722 	if (afu->pp_irqs < 0) {
723 		dev_err(&afu->dev, "Unexpected per-process minimum interrupt value\n");
724 		return -EINVAL;
725 	}
726 
727 	if (afu->max_procs_virtualised < 1) {
728 		dev_err(&afu->dev, "Unexpected max number of processes virtualised value\n");
729 		return -EINVAL;
730 	}
731 
732 	if (afu->crs_len < 0) {
733 		dev_err(&afu->dev, "Unexpected configuration record size value\n");
734 		return -EINVAL;
735 	}
736 
737 	return 0;
738 }
739 
740 int cxl_guest_init_afu(struct cxl *adapter, int slice, struct device_node *afu_np)
741 {
742 	struct cxl_afu *afu;
743 	bool free = true;
744 	int rc;
745 
746 	pr_devel("in %s - AFU(%d)\n", __func__, slice);
747 	if (!(afu = cxl_alloc_afu(adapter, slice)))
748 		return -ENOMEM;
749 
750 	if (!(afu->guest = kzalloc(sizeof(struct cxl_afu_guest), GFP_KERNEL))) {
751 		kfree(afu);
752 		return -ENOMEM;
753 	}
754 
755 	if ((rc = dev_set_name(&afu->dev, "afu%i.%i",
756 					  adapter->adapter_num,
757 					  slice)))
758 		goto err1;
759 
760 	adapter->slices++;
761 
762 	if ((rc = cxl_of_read_afu_handle(afu, afu_np)))
763 		goto err1;
764 
765 	if ((rc = cxl_ops->afu_reset(afu)))
766 		goto err1;
767 
768 	if ((rc = cxl_of_read_afu_properties(afu, afu_np)))
769 		goto err1;
770 
771 	if ((rc = afu_properties_look_ok(afu)))
772 		goto err1;
773 
774 	if ((rc = guest_map_slice_regs(afu)))
775 		goto err1;
776 
777 	if ((rc = guest_register_serr_irq(afu)))
778 		goto err2;
779 
780 	/*
781 	 * After we call this function we must not free the afu directly, even
782 	 * if it returns an error!
783 	 */
784 	if ((rc = cxl_register_afu(afu)))
785 		goto err_put1;
786 
787 	if ((rc = cxl_sysfs_afu_add(afu)))
788 		goto err_put1;
789 
790 	/*
791 	 * pHyp doesn't expose the programming models supported by the
792 	 * AFU. pHyp currently only supports directed mode. If it adds
793 	 * dedicated mode later, this version of cxl has no way to
794 	 * detect it. So we'll initialize the driver, but the first
795 	 * attach will fail.
796 	 * Being discussed with pHyp to do better (likely new property)
797 	 */
798 	if (afu->max_procs_virtualised == 1)
799 		afu->modes_supported = CXL_MODE_DEDICATED;
800 	else
801 		afu->modes_supported = CXL_MODE_DIRECTED;
802 
803 	if ((rc = cxl_afu_select_best_mode(afu)))
804 		goto err_put2;
805 
806 	adapter->afu[afu->slice] = afu;
807 
808 	afu->enabled = true;
809 
810 	return 0;
811 
812 err_put2:
813 	cxl_sysfs_afu_remove(afu);
814 err_put1:
815 	device_unregister(&afu->dev);
816 	free = false;
817 	guest_release_serr_irq(afu);
818 err2:
819 	guest_unmap_slice_regs(afu);
820 err1:
821 	if (free) {
822 		kfree(afu->guest);
823 		kfree(afu);
824 	}
825 	return rc;
826 }
827 
828 void cxl_guest_remove_afu(struct cxl_afu *afu)
829 {
830 	pr_devel("in %s - AFU(%d)\n", __func__, afu->slice);
831 
832 	if (!afu)
833 		return;
834 
835 	cxl_sysfs_afu_remove(afu);
836 
837 	spin_lock(&afu->adapter->afu_list_lock);
838 	afu->adapter->afu[afu->slice] = NULL;
839 	spin_unlock(&afu->adapter->afu_list_lock);
840 
841 	cxl_context_detach_all(afu);
842 	cxl_ops->afu_deactivate_mode(afu, afu->current_mode);
843 	guest_release_serr_irq(afu);
844 	guest_unmap_slice_regs(afu);
845 
846 	device_unregister(&afu->dev);
847 }
848 
849 static void free_adapter(struct cxl *adapter)
850 {
851 	struct irq_avail *cur;
852 	int i;
853 
854 	if (adapter->guest->irq_avail) {
855 		for (i = 0; i < adapter->guest->irq_nranges; i++) {
856 			cur = &adapter->guest->irq_avail[i];
857 			kfree(cur->bitmap);
858 		}
859 		kfree(adapter->guest->irq_avail);
860 	}
861 	kfree(adapter->guest->status);
862 	cxl_remove_adapter_nr(adapter);
863 	kfree(adapter->guest);
864 	kfree(adapter);
865 }
866 
867 static int properties_look_ok(struct cxl *adapter)
868 {
869 	/* The absence of this property means that the operational
870 	 * status is unknown or okay
871 	 */
872 	if (strlen(adapter->guest->status) &&
873 	    strcmp(adapter->guest->status, "okay")) {
874 		pr_err("ABORTING:Bad operational status of the device\n");
875 		return -EINVAL;
876 	}
877 
878 	return 0;
879 }
880 
881 ssize_t cxl_guest_read_adapter_vpd(struct cxl *adapter, void *buf, size_t len)
882 {
883 	return guest_collect_vpd(adapter, NULL, buf, len);
884 }
885 
886 void cxl_guest_remove_adapter(struct cxl *adapter)
887 {
888 	pr_devel("in %s\n", __func__);
889 
890 	cxl_sysfs_adapter_remove(adapter);
891 
892 	device_unregister(&adapter->dev);
893 }
894 
895 static void release_adapter(struct device *dev)
896 {
897 	free_adapter(to_cxl_adapter(dev));
898 }
899 
900 struct cxl *cxl_guest_init_adapter(struct device_node *np, struct platform_device *pdev)
901 {
902 	struct cxl *adapter;
903 	bool free = true;
904 	int rc;
905 
906 	if (!(adapter = cxl_alloc_adapter()))
907 		return ERR_PTR(-ENOMEM);
908 
909 	if (!(adapter->guest = kzalloc(sizeof(struct cxl_guest), GFP_KERNEL))) {
910 		free_adapter(adapter);
911 		return ERR_PTR(-ENOMEM);
912 	}
913 
914 	adapter->slices = 0;
915 	adapter->guest->pdev = pdev;
916 	adapter->dev.parent = &pdev->dev;
917 	adapter->dev.release = release_adapter;
918 	dev_set_drvdata(&pdev->dev, adapter);
919 
920 	if ((rc = cxl_of_read_adapter_handle(adapter, np)))
921 		goto err1;
922 
923 	if ((rc = cxl_of_read_adapter_properties(adapter, np)))
924 		goto err1;
925 
926 	if ((rc = properties_look_ok(adapter)))
927 		goto err1;
928 
929 	/*
930 	 * After we call this function we must not free the adapter directly,
931 	 * even if it returns an error!
932 	 */
933 	if ((rc = cxl_register_adapter(adapter)))
934 		goto err_put1;
935 
936 	if ((rc = cxl_sysfs_adapter_add(adapter)))
937 		goto err_put1;
938 
939 	return adapter;
940 
941 err_put1:
942 	device_unregister(&adapter->dev);
943 	free = false;
944 err1:
945 	if (free)
946 		free_adapter(adapter);
947 	return ERR_PTR(rc);
948 }
949 
950 const struct cxl_backend_ops cxl_guest_ops = {
951 	.module = THIS_MODULE,
952 	.adapter_reset = guest_reset,
953 	.alloc_one_irq = guest_alloc_one_irq,
954 	.release_one_irq = guest_release_one_irq,
955 	.alloc_irq_ranges = guest_alloc_irq_ranges,
956 	.release_irq_ranges = guest_release_irq_ranges,
957 	.setup_irq = NULL,
958 	.handle_psl_slice_error = guest_handle_psl_slice_error,
959 	.psl_interrupt = guest_psl_irq,
960 	.ack_irq = guest_ack_irq,
961 	.attach_process = guest_attach_process,
962 	.detach_process = guest_detach_process,
963 	.support_attributes = guest_support_attributes,
964 	.link_ok = guest_link_ok,
965 	.release_afu = guest_release_afu,
966 	.afu_read_err_buffer = guest_afu_read_err_buffer,
967 	.afu_check_and_enable = guest_afu_check_and_enable,
968 	.afu_activate_mode = guest_afu_activate_mode,
969 	.afu_deactivate_mode = guest_afu_deactivate_mode,
970 	.afu_reset = guest_afu_reset,
971 	.afu_cr_read8 = guest_afu_cr_read8,
972 	.afu_cr_read16 = guest_afu_cr_read16,
973 	.afu_cr_read32 = guest_afu_cr_read32,
974 	.afu_cr_read64 = guest_afu_cr_read64,
975 };
976