xref: /linux/arch/powerpc/platforms/pseries/mobility.c (revision 0ea5c948cb64bab5bc7a5516774eb8536f05aa0d)
1d2912cb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2410bccf9SNathan Fontenot /*
3410bccf9SNathan Fontenot  * Support for Partition Mobility/Migration
4410bccf9SNathan Fontenot  *
5410bccf9SNathan Fontenot  * Copyright (C) 2010 Nathan Fontenot
6410bccf9SNathan Fontenot  * Copyright (C) 2010 IBM Corporation
7410bccf9SNathan Fontenot  */
8410bccf9SNathan Fontenot 
9494a66f3SNathan Lynch 
10494a66f3SNathan Lynch #define pr_fmt(fmt) "mobility: " fmt
11494a66f3SNathan Lynch 
12e59a175fSNathan Lynch #include <linux/cpu.h>
13410bccf9SNathan Fontenot #include <linux/kernel.h>
14410bccf9SNathan Fontenot #include <linux/kobject.h>
159327dc0aSNathan Lynch #include <linux/nmi.h>
16ccfb5bd7SNathan Lynch #include <linux/sched.h>
17410bccf9SNathan Fontenot #include <linux/smp.h>
18b56eade5SPaul Gortmaker #include <linux/stat.h>
199327dc0aSNathan Lynch #include <linux/stop_machine.h>
20410bccf9SNathan Fontenot #include <linux/completion.h>
21410bccf9SNathan Fontenot #include <linux/device.h>
22410bccf9SNathan Fontenot #include <linux/delay.h>
23410bccf9SNathan Fontenot #include <linux/slab.h>
245c35a02cSChristophe Leroy #include <linux/stringify.h>
25410bccf9SNathan Fontenot 
268e83e905SMichael Ellerman #include <asm/machdep.h>
27*6cb44befSDouglas Anderson #include <asm/nmi.h>
28410bccf9SNathan Fontenot #include <asm/rtas.h>
29410bccf9SNathan Fontenot #include "pseries.h"
3037e67648SHaren Myneni #include "vas.h"	/* vas_migration_handler() */
31e610a466SNathan Lynch #include "../../kernel/cacheinfo.h"
32410bccf9SNathan Fontenot 
33410bccf9SNathan Fontenot static struct kobject *mobility_kobj;
34410bccf9SNathan Fontenot 
35410bccf9SNathan Fontenot struct update_props_workarea {
36f6ff0414STyrel Datwyler 	__be32 phandle;
37f6ff0414STyrel Datwyler 	__be32 state;
38f6ff0414STyrel Datwyler 	__be64 reserved;
39f6ff0414STyrel Datwyler 	__be32 nprops;
40d0ef4403STyrel Datwyler } __packed;
41410bccf9SNathan Fontenot 
42410bccf9SNathan Fontenot #define NODE_ACTION_MASK	0xff000000
43410bccf9SNathan Fontenot #define NODE_COUNT_MASK		0x00ffffff
44410bccf9SNathan Fontenot 
45410bccf9SNathan Fontenot #define DELETE_DT_NODE	0x01000000
46410bccf9SNathan Fontenot #define UPDATE_DT_NODE	0x02000000
47410bccf9SNathan Fontenot #define ADD_DT_NODE	0x03000000
48410bccf9SNathan Fontenot 
49762ec157SNathan Fontenot #define MIGRATION_SCOPE	(1)
50675d8ee6SJohn Allen #define PRRN_SCOPE -2
51762ec157SNathan Fontenot 
52118b1366SLaurent Dufour #ifdef CONFIG_PPC_WATCHDOG
53118b1366SLaurent Dufour static unsigned int nmi_wd_lpm_factor = 200;
54118b1366SLaurent Dufour 
55118b1366SLaurent Dufour #ifdef CONFIG_SYSCTL
56118b1366SLaurent Dufour static struct ctl_table nmi_wd_lpm_factor_ctl_table[] = {
57118b1366SLaurent Dufour 	{
58118b1366SLaurent Dufour 		.procname	= "nmi_wd_lpm_factor",
59118b1366SLaurent Dufour 		.data		= &nmi_wd_lpm_factor,
60118b1366SLaurent Dufour 		.maxlen		= sizeof(int),
61118b1366SLaurent Dufour 		.mode		= 0644,
62118b1366SLaurent Dufour 		.proc_handler	= proc_douintvec_minmax,
63118b1366SLaurent Dufour 	},
64118b1366SLaurent Dufour };
65118b1366SLaurent Dufour 
register_nmi_wd_lpm_factor_sysctl(void)66118b1366SLaurent Dufour static int __init register_nmi_wd_lpm_factor_sysctl(void)
67118b1366SLaurent Dufour {
683a713753SLuis Chamberlain 	register_sysctl("kernel", nmi_wd_lpm_factor_ctl_table);
69118b1366SLaurent Dufour 
70118b1366SLaurent Dufour 	return 0;
71118b1366SLaurent Dufour }
72118b1366SLaurent Dufour device_initcall(register_nmi_wd_lpm_factor_sysctl);
73118b1366SLaurent Dufour #endif /* CONFIG_SYSCTL */
74118b1366SLaurent Dufour #endif /* CONFIG_PPC_WATCHDOG */
75118b1366SLaurent Dufour 
mobility_rtas_call(int token,char * buf,s32 scope)76762ec157SNathan Fontenot static int mobility_rtas_call(int token, char *buf, s32 scope)
77410bccf9SNathan Fontenot {
78410bccf9SNathan Fontenot 	int rc;
79410bccf9SNathan Fontenot 
80410bccf9SNathan Fontenot 	spin_lock(&rtas_data_buf_lock);
81410bccf9SNathan Fontenot 
82410bccf9SNathan Fontenot 	memcpy(rtas_data_buf, buf, RTAS_DATA_BUF_SIZE);
83762ec157SNathan Fontenot 	rc = rtas_call(token, 2, 1, NULL, rtas_data_buf, scope);
84410bccf9SNathan Fontenot 	memcpy(buf, rtas_data_buf, RTAS_DATA_BUF_SIZE);
85410bccf9SNathan Fontenot 
86410bccf9SNathan Fontenot 	spin_unlock(&rtas_data_buf_lock);
87410bccf9SNathan Fontenot 	return rc;
88410bccf9SNathan Fontenot }
89410bccf9SNathan Fontenot 
delete_dt_node(struct device_node * dn)902efd7f6eSNathan Lynch static int delete_dt_node(struct device_node *dn)
91410bccf9SNathan Fontenot {
92319fa1a5SNathan Lynch 	struct device_node *pdn;
93319fa1a5SNathan Lynch 	bool is_platfac;
94319fa1a5SNathan Lynch 
95319fa1a5SNathan Lynch 	pdn = of_get_parent(dn);
96319fa1a5SNathan Lynch 	is_platfac = of_node_is_type(dn, "ibm,platform-facilities") ||
97319fa1a5SNathan Lynch 		     of_node_is_type(pdn, "ibm,platform-facilities");
98319fa1a5SNathan Lynch 	of_node_put(pdn);
99319fa1a5SNathan Lynch 
100319fa1a5SNathan Lynch 	/*
101319fa1a5SNathan Lynch 	 * The drivers that bind to nodes in the platform-facilities
102319fa1a5SNathan Lynch 	 * hierarchy don't support node removal, and the removal directive
103319fa1a5SNathan Lynch 	 * from firmware is always followed by an add of an equivalent
104319fa1a5SNathan Lynch 	 * node. The capability (e.g. RNG, encryption, compression)
105319fa1a5SNathan Lynch 	 * represented by the node is never interrupted by the migration.
106319fa1a5SNathan Lynch 	 * So ignore changes to this part of the tree.
107319fa1a5SNathan Lynch 	 */
108319fa1a5SNathan Lynch 	if (is_platfac) {
109319fa1a5SNathan Lynch 		pr_notice("ignoring remove operation for %pOFfp\n", dn);
110319fa1a5SNathan Lynch 		return 0;
111319fa1a5SNathan Lynch 	}
112319fa1a5SNathan Lynch 
1135d8b1f9dSNathan Lynch 	pr_debug("removing node %pOFfp\n", dn);
114410bccf9SNathan Fontenot 	dlpar_detach_node(dn);
115410bccf9SNathan Fontenot 	return 0;
116410bccf9SNathan Fontenot }
117410bccf9SNathan Fontenot 
update_dt_property(struct device_node * dn,struct property ** prop,const char * name,u32 vd,char * value)118410bccf9SNathan Fontenot static int update_dt_property(struct device_node *dn, struct property **prop,
119410bccf9SNathan Fontenot 			      const char *name, u32 vd, char *value)
120410bccf9SNathan Fontenot {
121410bccf9SNathan Fontenot 	struct property *new_prop = *prop;
122410bccf9SNathan Fontenot 	int more = 0;
123410bccf9SNathan Fontenot 
124410bccf9SNathan Fontenot 	/* A negative 'vd' value indicates that only part of the new property
125410bccf9SNathan Fontenot 	 * value is contained in the buffer and we need to call
126410bccf9SNathan Fontenot 	 * ibm,update-properties again to get the rest of the value.
127410bccf9SNathan Fontenot 	 *
128410bccf9SNathan Fontenot 	 * A negative value is also the two's compliment of the actual value.
129410bccf9SNathan Fontenot 	 */
130410bccf9SNathan Fontenot 	if (vd & 0x80000000) {
131410bccf9SNathan Fontenot 		vd = ~vd + 1;
132410bccf9SNathan Fontenot 		more = 1;
133410bccf9SNathan Fontenot 	}
134410bccf9SNathan Fontenot 
135410bccf9SNathan Fontenot 	if (new_prop) {
136410bccf9SNathan Fontenot 		/* partial property fixup */
137410bccf9SNathan Fontenot 		char *new_data = kzalloc(new_prop->length + vd, GFP_KERNEL);
138410bccf9SNathan Fontenot 		if (!new_data)
139410bccf9SNathan Fontenot 			return -ENOMEM;
140410bccf9SNathan Fontenot 
141410bccf9SNathan Fontenot 		memcpy(new_data, new_prop->value, new_prop->length);
142410bccf9SNathan Fontenot 		memcpy(new_data + new_prop->length, value, vd);
143410bccf9SNathan Fontenot 
144410bccf9SNathan Fontenot 		kfree(new_prop->value);
145410bccf9SNathan Fontenot 		new_prop->value = new_data;
146410bccf9SNathan Fontenot 		new_prop->length += vd;
147410bccf9SNathan Fontenot 	} else {
148410bccf9SNathan Fontenot 		new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL);
149410bccf9SNathan Fontenot 		if (!new_prop)
150410bccf9SNathan Fontenot 			return -ENOMEM;
151410bccf9SNathan Fontenot 
152410bccf9SNathan Fontenot 		new_prop->name = kstrdup(name, GFP_KERNEL);
153410bccf9SNathan Fontenot 		if (!new_prop->name) {
154410bccf9SNathan Fontenot 			kfree(new_prop);
155410bccf9SNathan Fontenot 			return -ENOMEM;
156410bccf9SNathan Fontenot 		}
157410bccf9SNathan Fontenot 
158410bccf9SNathan Fontenot 		new_prop->length = vd;
159410bccf9SNathan Fontenot 		new_prop->value = kzalloc(new_prop->length, GFP_KERNEL);
160410bccf9SNathan Fontenot 		if (!new_prop->value) {
161410bccf9SNathan Fontenot 			kfree(new_prop->name);
162410bccf9SNathan Fontenot 			kfree(new_prop);
163410bccf9SNathan Fontenot 			return -ENOMEM;
164410bccf9SNathan Fontenot 		}
165410bccf9SNathan Fontenot 
166410bccf9SNathan Fontenot 		memcpy(new_prop->value, value, vd);
167410bccf9SNathan Fontenot 		*prop = new_prop;
168410bccf9SNathan Fontenot 	}
169410bccf9SNathan Fontenot 
170410bccf9SNathan Fontenot 	if (!more) {
1715d8b1f9dSNathan Lynch 		pr_debug("updating node %pOF property %s\n", dn, name);
17279d1c712SNathan Fontenot 		of_update_property(dn, new_prop);
173d8e533b4STyrel Datwyler 		*prop = NULL;
174410bccf9SNathan Fontenot 	}
175410bccf9SNathan Fontenot 
176410bccf9SNathan Fontenot 	return 0;
177410bccf9SNathan Fontenot }
178410bccf9SNathan Fontenot 
update_dt_node(struct device_node * dn,s32 scope)1792efd7f6eSNathan Lynch static int update_dt_node(struct device_node *dn, s32 scope)
180410bccf9SNathan Fontenot {
181410bccf9SNathan Fontenot 	struct update_props_workarea *upwa;
182410bccf9SNathan Fontenot 	struct property *prop = NULL;
183638a405fSTyrel Datwyler 	int i, rc, rtas_rc;
184410bccf9SNathan Fontenot 	char *prop_data;
185410bccf9SNathan Fontenot 	char *rtas_buf;
186410bccf9SNathan Fontenot 	int update_properties_token;
187f6ff0414STyrel Datwyler 	u32 nprops;
1882e9b7b02SNathan Fontenot 	u32 vd;
189410bccf9SNathan Fontenot 
19008273c9fSNathan Lynch 	update_properties_token = rtas_function_token(RTAS_FN_IBM_UPDATE_PROPERTIES);
191410bccf9SNathan Fontenot 	if (update_properties_token == RTAS_UNKNOWN_SERVICE)
192410bccf9SNathan Fontenot 		return -EINVAL;
193410bccf9SNathan Fontenot 
194410bccf9SNathan Fontenot 	rtas_buf = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL);
195410bccf9SNathan Fontenot 	if (!rtas_buf)
196410bccf9SNathan Fontenot 		return -ENOMEM;
197410bccf9SNathan Fontenot 
198410bccf9SNathan Fontenot 	upwa = (struct update_props_workarea *)&rtas_buf[0];
1992efd7f6eSNathan Lynch 	upwa->phandle = cpu_to_be32(dn->phandle);
200410bccf9SNathan Fontenot 
201410bccf9SNathan Fontenot 	do {
202638a405fSTyrel Datwyler 		rtas_rc = mobility_rtas_call(update_properties_token, rtas_buf,
203762ec157SNathan Fontenot 					scope);
204638a405fSTyrel Datwyler 		if (rtas_rc < 0)
205410bccf9SNathan Fontenot 			break;
206410bccf9SNathan Fontenot 
207410bccf9SNathan Fontenot 		prop_data = rtas_buf + sizeof(*upwa);
208f6ff0414STyrel Datwyler 		nprops = be32_to_cpu(upwa->nprops);
209410bccf9SNathan Fontenot 
210c8f5a57cSTyrel Datwyler 		/* On the first call to ibm,update-properties for a node the
2114c73cadcSJilin Yuan 		 * first property value descriptor contains an empty
212c8f5a57cSTyrel Datwyler 		 * property name, the property value length encoded as u32,
213c8f5a57cSTyrel Datwyler 		 * and the property value is the node path being updated.
2142e9b7b02SNathan Fontenot 		 */
215c8f5a57cSTyrel Datwyler 		if (*prop_data == 0) {
216c8f5a57cSTyrel Datwyler 			prop_data++;
217f6ff0414STyrel Datwyler 			vd = be32_to_cpu(*(__be32 *)prop_data);
218c8f5a57cSTyrel Datwyler 			prop_data += vd + sizeof(vd);
219f6ff0414STyrel Datwyler 			nprops--;
220c8f5a57cSTyrel Datwyler 		}
2212e9b7b02SNathan Fontenot 
222f6ff0414STyrel Datwyler 		for (i = 0; i < nprops; i++) {
2232e9b7b02SNathan Fontenot 			char *prop_name;
2242e9b7b02SNathan Fontenot 
2252e9b7b02SNathan Fontenot 			prop_name = prop_data;
2262e9b7b02SNathan Fontenot 			prop_data += strlen(prop_name) + 1;
227f6ff0414STyrel Datwyler 			vd = be32_to_cpu(*(__be32 *)prop_data);
2282e9b7b02SNathan Fontenot 			prop_data += sizeof(vd);
229410bccf9SNathan Fontenot 
230410bccf9SNathan Fontenot 			switch (vd) {
231410bccf9SNathan Fontenot 			case 0x00000000:
232410bccf9SNathan Fontenot 				/* name only property, nothing to do */
233410bccf9SNathan Fontenot 				break;
234410bccf9SNathan Fontenot 
235410bccf9SNathan Fontenot 			case 0x80000000:
236925e2d1dSSuraj Jitindar Singh 				of_remove_property(dn, of_find_property(dn,
237925e2d1dSSuraj Jitindar Singh 							prop_name, NULL));
238410bccf9SNathan Fontenot 				prop = NULL;
239410bccf9SNathan Fontenot 				break;
240410bccf9SNathan Fontenot 
241410bccf9SNathan Fontenot 			default:
242410bccf9SNathan Fontenot 				rc = update_dt_property(dn, &prop, prop_name,
243410bccf9SNathan Fontenot 							vd, prop_data);
244410bccf9SNathan Fontenot 				if (rc) {
2452d5be6f1SNathan Lynch 					pr_err("updating %s property failed: %d\n",
2462d5be6f1SNathan Lynch 					       prop_name, rc);
247410bccf9SNathan Fontenot 				}
248410bccf9SNathan Fontenot 
249410bccf9SNathan Fontenot 				prop_data += vd;
250aa5e5c9bSNathan Lynch 				break;
251410bccf9SNathan Fontenot 			}
252ccfb5bd7SNathan Lynch 
253ccfb5bd7SNathan Lynch 			cond_resched();
254410bccf9SNathan Fontenot 		}
255ccfb5bd7SNathan Lynch 
256ccfb5bd7SNathan Lynch 		cond_resched();
257638a405fSTyrel Datwyler 	} while (rtas_rc == 1);
258410bccf9SNathan Fontenot 
259410bccf9SNathan Fontenot 	kfree(rtas_buf);
260410bccf9SNathan Fontenot 	return 0;
261410bccf9SNathan Fontenot }
262410bccf9SNathan Fontenot 
add_dt_node(struct device_node * parent_dn,__be32 drc_index)2632efd7f6eSNathan Lynch static int add_dt_node(struct device_node *parent_dn, __be32 drc_index)
264410bccf9SNathan Fontenot {
265410bccf9SNathan Fontenot 	struct device_node *dn;
266410bccf9SNathan Fontenot 	int rc;
267410bccf9SNathan Fontenot 
2688d5ff320STyrel Datwyler 	dn = dlpar_configure_connector(drc_index, parent_dn);
2692efd7f6eSNathan Lynch 	if (!dn)
270410bccf9SNathan Fontenot 		return -ENOENT;
271410bccf9SNathan Fontenot 
272319fa1a5SNathan Lynch 	/*
273319fa1a5SNathan Lynch 	 * Since delete_dt_node() ignores this node type, this is the
274319fa1a5SNathan Lynch 	 * necessary counterpart. We also know that a platform-facilities
275319fa1a5SNathan Lynch 	 * node returned from dlpar_configure_connector() has children
276319fa1a5SNathan Lynch 	 * attached, and dlpar_attach_node() only adds the parent, leaking
277319fa1a5SNathan Lynch 	 * the children. So ignore these on the add side for now.
278319fa1a5SNathan Lynch 	 */
279319fa1a5SNathan Lynch 	if (of_node_is_type(dn, "ibm,platform-facilities")) {
280319fa1a5SNathan Lynch 		pr_notice("ignoring add operation for %pOF\n", dn);
281319fa1a5SNathan Lynch 		dlpar_free_cc_nodes(dn);
282319fa1a5SNathan Lynch 		return 0;
283319fa1a5SNathan Lynch 	}
284319fa1a5SNathan Lynch 
285215ee763SRob Herring 	rc = dlpar_attach_node(dn, parent_dn);
286410bccf9SNathan Fontenot 	if (rc)
287410bccf9SNathan Fontenot 		dlpar_free_cc_nodes(dn);
288410bccf9SNathan Fontenot 
2895d8b1f9dSNathan Lynch 	pr_debug("added node %pOFfp\n", dn);
2905d8b1f9dSNathan Lynch 
291410bccf9SNathan Fontenot 	return rc;
292410bccf9SNathan Fontenot }
293410bccf9SNathan Fontenot 
pseries_devicetree_update(s32 scope)29492e6dc25SNathan Lynch static int pseries_devicetree_update(s32 scope)
295410bccf9SNathan Fontenot {
296410bccf9SNathan Fontenot 	char *rtas_buf;
297f6ff0414STyrel Datwyler 	__be32 *data;
298410bccf9SNathan Fontenot 	int update_nodes_token;
299410bccf9SNathan Fontenot 	int rc;
300410bccf9SNathan Fontenot 
30108273c9fSNathan Lynch 	update_nodes_token = rtas_function_token(RTAS_FN_IBM_UPDATE_NODES);
302410bccf9SNathan Fontenot 	if (update_nodes_token == RTAS_UNKNOWN_SERVICE)
303b06a6717SNathan Lynch 		return 0;
304410bccf9SNathan Fontenot 
305410bccf9SNathan Fontenot 	rtas_buf = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL);
306410bccf9SNathan Fontenot 	if (!rtas_buf)
307410bccf9SNathan Fontenot 		return -ENOMEM;
308410bccf9SNathan Fontenot 
309410bccf9SNathan Fontenot 	do {
310762ec157SNathan Fontenot 		rc = mobility_rtas_call(update_nodes_token, rtas_buf, scope);
311410bccf9SNathan Fontenot 		if (rc && rc != 1)
312410bccf9SNathan Fontenot 			break;
313410bccf9SNathan Fontenot 
314f6ff0414STyrel Datwyler 		data = (__be32 *)rtas_buf + 4;
315f6ff0414STyrel Datwyler 		while (be32_to_cpu(*data) & NODE_ACTION_MASK) {
316410bccf9SNathan Fontenot 			int i;
317f6ff0414STyrel Datwyler 			u32 action = be32_to_cpu(*data) & NODE_ACTION_MASK;
318f6ff0414STyrel Datwyler 			u32 node_count = be32_to_cpu(*data) & NODE_COUNT_MASK;
319410bccf9SNathan Fontenot 
320410bccf9SNathan Fontenot 			data++;
321410bccf9SNathan Fontenot 
322410bccf9SNathan Fontenot 			for (i = 0; i < node_count; i++) {
3232efd7f6eSNathan Lynch 				struct device_node *np;
324f6ff0414STyrel Datwyler 				__be32 phandle = *data++;
325f6ff0414STyrel Datwyler 				__be32 drc_index;
326410bccf9SNathan Fontenot 
3272efd7f6eSNathan Lynch 				np = of_find_node_by_phandle(be32_to_cpu(phandle));
3282efd7f6eSNathan Lynch 				if (!np) {
3292efd7f6eSNathan Lynch 					pr_warn("Failed lookup: phandle 0x%x for action 0x%x\n",
3302efd7f6eSNathan Lynch 						be32_to_cpu(phandle), action);
3312efd7f6eSNathan Lynch 					continue;
3322efd7f6eSNathan Lynch 				}
3332efd7f6eSNathan Lynch 
334410bccf9SNathan Fontenot 				switch (action) {
335410bccf9SNathan Fontenot 				case DELETE_DT_NODE:
3362efd7f6eSNathan Lynch 					delete_dt_node(np);
337410bccf9SNathan Fontenot 					break;
338410bccf9SNathan Fontenot 				case UPDATE_DT_NODE:
3392efd7f6eSNathan Lynch 					update_dt_node(np, scope);
340410bccf9SNathan Fontenot 					break;
341410bccf9SNathan Fontenot 				case ADD_DT_NODE:
342410bccf9SNathan Fontenot 					drc_index = *data++;
3432efd7f6eSNathan Lynch 					add_dt_node(np, drc_index);
344410bccf9SNathan Fontenot 					break;
345410bccf9SNathan Fontenot 				}
346ccfb5bd7SNathan Lynch 
3472efd7f6eSNathan Lynch 				of_node_put(np);
348ccfb5bd7SNathan Lynch 				cond_resched();
349410bccf9SNathan Fontenot 			}
350410bccf9SNathan Fontenot 		}
351ccfb5bd7SNathan Lynch 
352ccfb5bd7SNathan Lynch 		cond_resched();
353410bccf9SNathan Fontenot 	} while (rc == 1);
354410bccf9SNathan Fontenot 
355410bccf9SNathan Fontenot 	kfree(rtas_buf);
356410bccf9SNathan Fontenot 	return rc;
357410bccf9SNathan Fontenot }
358410bccf9SNathan Fontenot 
post_mobility_fixup(void)359410bccf9SNathan Fontenot void post_mobility_fixup(void)
360410bccf9SNathan Fontenot {
361410bccf9SNathan Fontenot 	int rc;
362410bccf9SNathan Fontenot 
363c3ae9781SNathan Lynch 	rtas_activate_firmware();
36439a33b59SHaren Myneni 
365e59a175fSNathan Lynch 	/*
366e59a175fSNathan Lynch 	 * We don't want CPUs to go online/offline while the device
367e59a175fSNathan Lynch 	 * tree is being updated.
368e59a175fSNathan Lynch 	 */
369e59a175fSNathan Lynch 	cpus_read_lock();
370e59a175fSNathan Lynch 
371e610a466SNathan Lynch 	/*
372e610a466SNathan Lynch 	 * It's common for the destination firmware to replace cache
373e610a466SNathan Lynch 	 * nodes.  Release all of the cacheinfo hierarchy's references
374e610a466SNathan Lynch 	 * before updating the device tree.
375e610a466SNathan Lynch 	 */
376e610a466SNathan Lynch 	cacheinfo_teardown();
377e610a466SNathan Lynch 
378762ec157SNathan Fontenot 	rc = pseries_devicetree_update(MIGRATION_SCOPE);
379410bccf9SNathan Fontenot 	if (rc)
3802d5be6f1SNathan Lynch 		pr_err("device tree update failed: %d\n", rc);
381410bccf9SNathan Fontenot 
382e610a466SNathan Lynch 	cacheinfo_rebuild();
383e610a466SNathan Lynch 
384e59a175fSNathan Lynch 	cpus_read_unlock();
385e59a175fSNathan Lynch 
386da631f7fSDaniel Axtens 	/* Possibly switch to a new L1 flush type */
387da631f7fSDaniel Axtens 	pseries_setup_security_mitigations();
388921bc6cfSMichael Ellerman 
389373b3730SKajol Jain 	/* Reinitialise system information for hv-24x7 */
390373b3730SKajol Jain 	read_24x7_sys_info();
391373b3730SKajol Jain 
392410bccf9SNathan Fontenot 	return;
393410bccf9SNathan Fontenot }
394410bccf9SNathan Fontenot 
poll_vasi_state(u64 handle,unsigned long * res)395d9213319SNathan Lynch static int poll_vasi_state(u64 handle, unsigned long *res)
396d9213319SNathan Lynch {
397d9213319SNathan Lynch 	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
398d9213319SNathan Lynch 	long hvrc;
399d9213319SNathan Lynch 	int ret;
400d9213319SNathan Lynch 
401d9213319SNathan Lynch 	hvrc = plpar_hcall(H_VASI_STATE, retbuf, handle);
402d9213319SNathan Lynch 	switch (hvrc) {
403d9213319SNathan Lynch 	case H_SUCCESS:
404d9213319SNathan Lynch 		ret = 0;
405d9213319SNathan Lynch 		*res = retbuf[0];
406d9213319SNathan Lynch 		break;
407d9213319SNathan Lynch 	case H_PARAMETER:
408d9213319SNathan Lynch 		ret = -EINVAL;
409d9213319SNathan Lynch 		break;
410d9213319SNathan Lynch 	case H_FUNCTION:
411d9213319SNathan Lynch 		ret = -EOPNOTSUPP;
412d9213319SNathan Lynch 		break;
413d9213319SNathan Lynch 	case H_HARDWARE:
414d9213319SNathan Lynch 	default:
415d9213319SNathan Lynch 		pr_err("unexpected H_VASI_STATE result %ld\n", hvrc);
416d9213319SNathan Lynch 		ret = -EIO;
417d9213319SNathan Lynch 		break;
418d9213319SNathan Lynch 	}
419d9213319SNathan Lynch 	return ret;
420d9213319SNathan Lynch }
421d9213319SNathan Lynch 
wait_for_vasi_session_suspending(u64 handle)422d9213319SNathan Lynch static int wait_for_vasi_session_suspending(u64 handle)
423d9213319SNathan Lynch {
424d9213319SNathan Lynch 	unsigned long state;
425d9213319SNathan Lynch 	int ret;
426d9213319SNathan Lynch 
427d9213319SNathan Lynch 	/*
428d9213319SNathan Lynch 	 * Wait for transition from H_VASI_ENABLED to
429d9213319SNathan Lynch 	 * H_VASI_SUSPENDING. Treat anything else as an error.
430d9213319SNathan Lynch 	 */
431d9213319SNathan Lynch 	while (true) {
432d9213319SNathan Lynch 		ret = poll_vasi_state(handle, &state);
433d9213319SNathan Lynch 
434d9213319SNathan Lynch 		if (ret != 0 || state == H_VASI_SUSPENDING) {
435d9213319SNathan Lynch 			break;
436d9213319SNathan Lynch 		} else if (state == H_VASI_ENABLED) {
437d9213319SNathan Lynch 			ssleep(1);
438d9213319SNathan Lynch 		} else {
439d9213319SNathan Lynch 			pr_err("unexpected H_VASI_STATE result %lu\n", state);
440d9213319SNathan Lynch 			ret = -EIO;
441d9213319SNathan Lynch 			break;
442d9213319SNathan Lynch 		}
443d9213319SNathan Lynch 	}
444d9213319SNathan Lynch 
445d9213319SNathan Lynch 	/*
446d9213319SNathan Lynch 	 * Proceed even if H_VASI_STATE is unavailable. If H_JOIN or
447d9213319SNathan Lynch 	 * ibm,suspend-me are also unimplemented, we'll recover then.
448d9213319SNathan Lynch 	 */
449d9213319SNathan Lynch 	if (ret == -EOPNOTSUPP)
450d9213319SNathan Lynch 		ret = 0;
451d9213319SNathan Lynch 
452d9213319SNathan Lynch 	return ret;
453d9213319SNathan Lynch }
454d9213319SNathan Lynch 
wait_for_vasi_session_completed(u64 handle)455882c0d17SLaurent Dufour static void wait_for_vasi_session_completed(u64 handle)
456882c0d17SLaurent Dufour {
457882c0d17SLaurent Dufour 	unsigned long state = 0;
458882c0d17SLaurent Dufour 	int ret;
459882c0d17SLaurent Dufour 
460882c0d17SLaurent Dufour 	pr_info("waiting for memory transfer to complete...\n");
461882c0d17SLaurent Dufour 
462882c0d17SLaurent Dufour 	/*
463882c0d17SLaurent Dufour 	 * Wait for transition from H_VASI_RESUMED to H_VASI_COMPLETED.
464882c0d17SLaurent Dufour 	 */
465882c0d17SLaurent Dufour 	while (true) {
466882c0d17SLaurent Dufour 		ret = poll_vasi_state(handle, &state);
467882c0d17SLaurent Dufour 
468882c0d17SLaurent Dufour 		/*
469882c0d17SLaurent Dufour 		 * If the memory transfer is already complete and the migration
470882c0d17SLaurent Dufour 		 * has been cleaned up by the hypervisor, H_PARAMETER is return,
471882c0d17SLaurent Dufour 		 * which is translate in EINVAL by poll_vasi_state().
472882c0d17SLaurent Dufour 		 */
473882c0d17SLaurent Dufour 		if (ret == -EINVAL || (!ret && state == H_VASI_COMPLETED)) {
474882c0d17SLaurent Dufour 			pr_info("memory transfer completed.\n");
475882c0d17SLaurent Dufour 			break;
476882c0d17SLaurent Dufour 		}
477882c0d17SLaurent Dufour 
478882c0d17SLaurent Dufour 		if (ret) {
479882c0d17SLaurent Dufour 			pr_err("H_VASI_STATE return error (%d)\n", ret);
480882c0d17SLaurent Dufour 			break;
481882c0d17SLaurent Dufour 		}
482882c0d17SLaurent Dufour 
483882c0d17SLaurent Dufour 		if (state != H_VASI_RESUMED) {
484882c0d17SLaurent Dufour 			pr_err("unexpected H_VASI_STATE result %lu\n", state);
485882c0d17SLaurent Dufour 			break;
486882c0d17SLaurent Dufour 		}
487882c0d17SLaurent Dufour 
488882c0d17SLaurent Dufour 		msleep(500);
489882c0d17SLaurent Dufour 	}
490882c0d17SLaurent Dufour }
491882c0d17SLaurent Dufour 
prod_single(unsigned int target_cpu)4929327dc0aSNathan Lynch static void prod_single(unsigned int target_cpu)
4939327dc0aSNathan Lynch {
4949327dc0aSNathan Lynch 	long hvrc;
4959327dc0aSNathan Lynch 	int hwid;
4969327dc0aSNathan Lynch 
4979327dc0aSNathan Lynch 	hwid = get_hard_smp_processor_id(target_cpu);
4989327dc0aSNathan Lynch 	hvrc = plpar_hcall_norets(H_PROD, hwid);
4999327dc0aSNathan Lynch 	if (hvrc == H_SUCCESS)
5009327dc0aSNathan Lynch 		return;
5019327dc0aSNathan Lynch 	pr_err_ratelimited("H_PROD of CPU %u (hwid %d) error: %ld\n",
5029327dc0aSNathan Lynch 			   target_cpu, hwid, hvrc);
5039327dc0aSNathan Lynch }
5049327dc0aSNathan Lynch 
prod_others(void)5059327dc0aSNathan Lynch static void prod_others(void)
5069327dc0aSNathan Lynch {
5079327dc0aSNathan Lynch 	unsigned int cpu;
5089327dc0aSNathan Lynch 
5099327dc0aSNathan Lynch 	for_each_online_cpu(cpu) {
5109327dc0aSNathan Lynch 		if (cpu != smp_processor_id())
5119327dc0aSNathan Lynch 			prod_single(cpu);
5129327dc0aSNathan Lynch 	}
5139327dc0aSNathan Lynch }
5149327dc0aSNathan Lynch 
clamp_slb_size(void)5159327dc0aSNathan Lynch static u16 clamp_slb_size(void)
5169327dc0aSNathan Lynch {
517387e220aSNicholas Piggin #ifdef CONFIG_PPC_64S_HASH_MMU
5189327dc0aSNathan Lynch 	u16 prev = mmu_slb_size;
5199327dc0aSNathan Lynch 
5209327dc0aSNathan Lynch 	slb_set_size(SLB_MIN_SIZE);
5219327dc0aSNathan Lynch 
5229327dc0aSNathan Lynch 	return prev;
523387e220aSNicholas Piggin #else
524387e220aSNicholas Piggin 	return 0;
525387e220aSNicholas Piggin #endif
5269327dc0aSNathan Lynch }
5279327dc0aSNathan Lynch 
do_suspend(void)5289327dc0aSNathan Lynch static int do_suspend(void)
5299327dc0aSNathan Lynch {
5309327dc0aSNathan Lynch 	u16 saved_slb_size;
5319327dc0aSNathan Lynch 	int status;
5329327dc0aSNathan Lynch 	int ret;
5339327dc0aSNathan Lynch 
5349327dc0aSNathan Lynch 	pr_info("calling ibm,suspend-me on CPU %i\n", smp_processor_id());
5359327dc0aSNathan Lynch 
5369327dc0aSNathan Lynch 	/*
5379327dc0aSNathan Lynch 	 * The destination processor model may have fewer SLB entries
5389327dc0aSNathan Lynch 	 * than the source. We reduce mmu_slb_size to a safe minimum
5399327dc0aSNathan Lynch 	 * before suspending in order to minimize the possibility of
5409327dc0aSNathan Lynch 	 * programming non-existent entries on the destination. If
5419327dc0aSNathan Lynch 	 * suspend fails, we restore it before returning. On success
5429327dc0aSNathan Lynch 	 * the OF reconfig path will update it from the new device
5439327dc0aSNathan Lynch 	 * tree after resuming on the destination.
5449327dc0aSNathan Lynch 	 */
5459327dc0aSNathan Lynch 	saved_slb_size = clamp_slb_size();
5469327dc0aSNathan Lynch 
5479327dc0aSNathan Lynch 	ret = rtas_ibm_suspend_me(&status);
5489327dc0aSNathan Lynch 	if (ret != 0) {
5499327dc0aSNathan Lynch 		pr_err("ibm,suspend-me error: %d\n", status);
5509327dc0aSNathan Lynch 		slb_set_size(saved_slb_size);
5519327dc0aSNathan Lynch 	}
5529327dc0aSNathan Lynch 
5539327dc0aSNathan Lynch 	return ret;
5549327dc0aSNathan Lynch }
5559327dc0aSNathan Lynch 
556e834df6cSNathan Lynch /**
557e834df6cSNathan Lynch  * struct pseries_suspend_info - State shared between CPUs for join/suspend.
558e834df6cSNathan Lynch  * @counter: Threads are to increment this upon resuming from suspend
559e834df6cSNathan Lynch  *           or if an error is received from H_JOIN. The thread which performs
560e834df6cSNathan Lynch  *           the first increment (i.e. sets it to 1) is responsible for
561e834df6cSNathan Lynch  *           waking the other threads.
562274cb1caSNathan Lynch  * @done: False if join/suspend is in progress. True if the operation is
563274cb1caSNathan Lynch  *        complete (successful or not).
564e834df6cSNathan Lynch  */
565e834df6cSNathan Lynch struct pseries_suspend_info {
566e834df6cSNathan Lynch 	atomic_t counter;
567274cb1caSNathan Lynch 	bool done;
568e834df6cSNathan Lynch };
569e834df6cSNathan Lynch 
do_join(void * arg)5709327dc0aSNathan Lynch static int do_join(void *arg)
5719327dc0aSNathan Lynch {
572e834df6cSNathan Lynch 	struct pseries_suspend_info *info = arg;
573e834df6cSNathan Lynch 	atomic_t *counter = &info->counter;
5749327dc0aSNathan Lynch 	long hvrc;
5759327dc0aSNathan Lynch 	int ret;
5769327dc0aSNathan Lynch 
577274cb1caSNathan Lynch retry:
5789327dc0aSNathan Lynch 	/* Must ensure MSR.EE off for H_JOIN. */
5799327dc0aSNathan Lynch 	hard_irq_disable();
5809327dc0aSNathan Lynch 	hvrc = plpar_hcall_norets(H_JOIN);
5819327dc0aSNathan Lynch 
5829327dc0aSNathan Lynch 	switch (hvrc) {
5839327dc0aSNathan Lynch 	case H_CONTINUE:
5849327dc0aSNathan Lynch 		/*
5859327dc0aSNathan Lynch 		 * All other CPUs are offline or in H_JOIN. This CPU
5869327dc0aSNathan Lynch 		 * attempts the suspend.
5879327dc0aSNathan Lynch 		 */
5889327dc0aSNathan Lynch 		ret = do_suspend();
5899327dc0aSNathan Lynch 		break;
5909327dc0aSNathan Lynch 	case H_SUCCESS:
5919327dc0aSNathan Lynch 		/*
5929327dc0aSNathan Lynch 		 * The suspend is complete and this cpu has received a
593274cb1caSNathan Lynch 		 * prod, or we've received a stray prod from unrelated
594274cb1caSNathan Lynch 		 * code (e.g. paravirt spinlocks) and we need to join
595274cb1caSNathan Lynch 		 * again.
596274cb1caSNathan Lynch 		 *
597274cb1caSNathan Lynch 		 * This barrier orders the return from H_JOIN above vs
598274cb1caSNathan Lynch 		 * the load of info->done. It pairs with the barrier
599274cb1caSNathan Lynch 		 * in the wakeup/prod path below.
6009327dc0aSNathan Lynch 		 */
601274cb1caSNathan Lynch 		smp_mb();
602274cb1caSNathan Lynch 		if (READ_ONCE(info->done) == false) {
603274cb1caSNathan Lynch 			pr_info_ratelimited("premature return from H_JOIN on CPU %i, retrying",
604274cb1caSNathan Lynch 					    smp_processor_id());
605274cb1caSNathan Lynch 			goto retry;
606274cb1caSNathan Lynch 		}
6079327dc0aSNathan Lynch 		ret = 0;
6089327dc0aSNathan Lynch 		break;
6099327dc0aSNathan Lynch 	case H_BAD_MODE:
6109327dc0aSNathan Lynch 	case H_HARDWARE:
6119327dc0aSNathan Lynch 	default:
6129327dc0aSNathan Lynch 		ret = -EIO;
6139327dc0aSNathan Lynch 		pr_err_ratelimited("H_JOIN error %ld on CPU %i\n",
6149327dc0aSNathan Lynch 				   hvrc, smp_processor_id());
6159327dc0aSNathan Lynch 		break;
6169327dc0aSNathan Lynch 	}
6179327dc0aSNathan Lynch 
6189327dc0aSNathan Lynch 	if (atomic_inc_return(counter) == 1) {
6199327dc0aSNathan Lynch 		pr_info("CPU %u waking all threads\n", smp_processor_id());
620274cb1caSNathan Lynch 		WRITE_ONCE(info->done, true);
621274cb1caSNathan Lynch 		/*
622274cb1caSNathan Lynch 		 * This barrier orders the store to info->done vs subsequent
623274cb1caSNathan Lynch 		 * H_PRODs to wake the other CPUs. It pairs with the barrier
624274cb1caSNathan Lynch 		 * in the H_SUCCESS case above.
625274cb1caSNathan Lynch 		 */
626274cb1caSNathan Lynch 		smp_mb();
6279327dc0aSNathan Lynch 		prod_others();
6289327dc0aSNathan Lynch 	}
6299327dc0aSNathan Lynch 	/*
6309b574cfaSLaurent Dufour 	 * Execution may have been suspended for several seconds, so reset
6319b574cfaSLaurent Dufour 	 * the watchdogs. touch_nmi_watchdog() also touches the soft lockup
6329b574cfaSLaurent Dufour 	 * watchdog.
6339327dc0aSNathan Lynch 	 */
6349b574cfaSLaurent Dufour 	rcu_cpu_stall_reset();
6359327dc0aSNathan Lynch 	touch_nmi_watchdog();
6369b574cfaSLaurent Dufour 
6379327dc0aSNathan Lynch 	return ret;
6389327dc0aSNathan Lynch }
6399327dc0aSNathan Lynch 
64037cddc7dSNathan Lynch /*
64137cddc7dSNathan Lynch  * Abort reason code byte 0. We use only the 'Migrating partition' value.
64237cddc7dSNathan Lynch  */
64337cddc7dSNathan Lynch enum vasi_aborting_entity {
64437cddc7dSNathan Lynch 	ORCHESTRATOR        = 1,
64537cddc7dSNathan Lynch 	VSP_SOURCE          = 2,
64637cddc7dSNathan Lynch 	PARTITION_FIRMWARE  = 3,
64737cddc7dSNathan Lynch 	PLATFORM_FIRMWARE   = 4,
64837cddc7dSNathan Lynch 	VSP_TARGET          = 5,
64937cddc7dSNathan Lynch 	MIGRATING_PARTITION = 6,
65037cddc7dSNathan Lynch };
65137cddc7dSNathan Lynch 
pseries_cancel_migration(u64 handle,int err)65237cddc7dSNathan Lynch static void pseries_cancel_migration(u64 handle, int err)
65337cddc7dSNathan Lynch {
65437cddc7dSNathan Lynch 	u32 reason_code;
65537cddc7dSNathan Lynch 	u32 detail;
65637cddc7dSNathan Lynch 	u8 entity;
65737cddc7dSNathan Lynch 	long hvrc;
65837cddc7dSNathan Lynch 
65937cddc7dSNathan Lynch 	entity = MIGRATING_PARTITION;
66037cddc7dSNathan Lynch 	detail = abs(err) & 0xffffff;
66137cddc7dSNathan Lynch 	reason_code = (entity << 24) | detail;
66237cddc7dSNathan Lynch 
66337cddc7dSNathan Lynch 	hvrc = plpar_hcall_norets(H_VASI_SIGNAL, handle,
66437cddc7dSNathan Lynch 				  H_VASI_SIGNAL_CANCEL, reason_code);
66537cddc7dSNathan Lynch 	if (hvrc)
66637cddc7dSNathan Lynch 		pr_err("H_VASI_SIGNAL error: %ld\n", hvrc);
66737cddc7dSNathan Lynch }
66837cddc7dSNathan Lynch 
pseries_suspend(u64 handle)669aeca35b9SNathan Lynch static int pseries_suspend(u64 handle)
670aeca35b9SNathan Lynch {
671aeca35b9SNathan Lynch 	const unsigned int max_attempts = 5;
672aeca35b9SNathan Lynch 	unsigned int retry_interval_ms = 1;
673aeca35b9SNathan Lynch 	unsigned int attempt = 1;
674aeca35b9SNathan Lynch 	int ret;
675aeca35b9SNathan Lynch 
676aeca35b9SNathan Lynch 	while (true) {
677e834df6cSNathan Lynch 		struct pseries_suspend_info info;
678aeca35b9SNathan Lynch 		unsigned long vasi_state;
679aeca35b9SNathan Lynch 		int vasi_err;
680aeca35b9SNathan Lynch 
681e834df6cSNathan Lynch 		info = (struct pseries_suspend_info) {
682e834df6cSNathan Lynch 			.counter = ATOMIC_INIT(0),
683274cb1caSNathan Lynch 			.done = false,
684e834df6cSNathan Lynch 		};
685e834df6cSNathan Lynch 
686e834df6cSNathan Lynch 		ret = stop_machine(do_join, &info, cpu_online_mask);
687aeca35b9SNathan Lynch 		if (ret == 0)
688aeca35b9SNathan Lynch 			break;
689aeca35b9SNathan Lynch 		/*
690aeca35b9SNathan Lynch 		 * Encountered an error. If the VASI stream is still
691aeca35b9SNathan Lynch 		 * in Suspending state, it's likely a transient
692aeca35b9SNathan Lynch 		 * condition related to some device in the partition
693aeca35b9SNathan Lynch 		 * and we can retry in the hope that the cause has
694aeca35b9SNathan Lynch 		 * cleared after some delay.
695aeca35b9SNathan Lynch 		 *
696aeca35b9SNathan Lynch 		 * A better design would allow drivers etc to prepare
697aeca35b9SNathan Lynch 		 * for the suspend and avoid conditions which prevent
698aeca35b9SNathan Lynch 		 * the suspend from succeeding. For now, we have this
699aeca35b9SNathan Lynch 		 * mitigation.
700aeca35b9SNathan Lynch 		 */
701aeca35b9SNathan Lynch 		pr_notice("Partition suspend attempt %u of %u error: %d\n",
702aeca35b9SNathan Lynch 			  attempt, max_attempts, ret);
703aeca35b9SNathan Lynch 
704aeca35b9SNathan Lynch 		if (attempt == max_attempts)
705aeca35b9SNathan Lynch 			break;
706aeca35b9SNathan Lynch 
707aeca35b9SNathan Lynch 		vasi_err = poll_vasi_state(handle, &vasi_state);
708aeca35b9SNathan Lynch 		if (vasi_err == 0) {
709aeca35b9SNathan Lynch 			if (vasi_state != H_VASI_SUSPENDING) {
710aeca35b9SNathan Lynch 				pr_notice("VASI state %lu after failed suspend\n",
711aeca35b9SNathan Lynch 					  vasi_state);
712aeca35b9SNathan Lynch 				break;
713aeca35b9SNathan Lynch 			}
714aeca35b9SNathan Lynch 		} else if (vasi_err != -EOPNOTSUPP) {
715aeca35b9SNathan Lynch 			pr_err("VASI state poll error: %d", vasi_err);
716aeca35b9SNathan Lynch 			break;
717aeca35b9SNathan Lynch 		}
718aeca35b9SNathan Lynch 
719aeca35b9SNathan Lynch 		pr_notice("Will retry partition suspend after %u ms\n",
720aeca35b9SNathan Lynch 			  retry_interval_ms);
721aeca35b9SNathan Lynch 
722aeca35b9SNathan Lynch 		msleep(retry_interval_ms);
723aeca35b9SNathan Lynch 		retry_interval_ms *= 10;
724aeca35b9SNathan Lynch 		attempt++;
725aeca35b9SNathan Lynch 	}
726aeca35b9SNathan Lynch 
727aeca35b9SNathan Lynch 	return ret;
728aeca35b9SNathan Lynch }
729aeca35b9SNathan Lynch 
pseries_migrate_partition(u64 handle)7309327dc0aSNathan Lynch static int pseries_migrate_partition(u64 handle)
7319327dc0aSNathan Lynch {
7329327dc0aSNathan Lynch 	int ret;
733118b1366SLaurent Dufour 	unsigned int factor = 0;
7349327dc0aSNathan Lynch 
735118b1366SLaurent Dufour #ifdef CONFIG_PPC_WATCHDOG
736118b1366SLaurent Dufour 	factor = nmi_wd_lpm_factor;
737118b1366SLaurent Dufour #endif
738465dda9dSHaren Myneni 	/*
739465dda9dSHaren Myneni 	 * When the migration is initiated, the hypervisor changes VAS
740465dda9dSHaren Myneni 	 * mappings to prepare before OS gets the notification and
741465dda9dSHaren Myneni 	 * closes all VAS windows. NX generates continuous faults during
742465dda9dSHaren Myneni 	 * this time and the user space can not differentiate these
743465dda9dSHaren Myneni 	 * faults from the migration event. So reduce this time window
744465dda9dSHaren Myneni 	 * by closing VAS windows at the beginning of this function.
745465dda9dSHaren Myneni 	 */
746465dda9dSHaren Myneni 	vas_migration_handler(VAS_SUSPEND);
747465dda9dSHaren Myneni 
7489327dc0aSNathan Lynch 	ret = wait_for_vasi_session_suspending(handle);
7499327dc0aSNathan Lynch 	if (ret)
750465dda9dSHaren Myneni 		goto out;
75137e67648SHaren Myneni 
752118b1366SLaurent Dufour 	if (factor)
753df95d308SDouglas Anderson 		watchdog_hardlockup_set_timeout_pct(factor);
754118b1366SLaurent Dufour 
755aeca35b9SNathan Lynch 	ret = pseries_suspend(handle);
756882c0d17SLaurent Dufour 	if (ret == 0) {
7579327dc0aSNathan Lynch 		post_mobility_fixup();
758882c0d17SLaurent Dufour 		/*
759882c0d17SLaurent Dufour 		 * Wait until the memory transfer is complete, so that the user
760882c0d17SLaurent Dufour 		 * space process returns from the syscall after the transfer is
761882c0d17SLaurent Dufour 		 * complete. This allows the user hooks to be executed at the
762882c0d17SLaurent Dufour 		 * right time.
763882c0d17SLaurent Dufour 		 */
764882c0d17SLaurent Dufour 		wait_for_vasi_session_completed(handle);
765882c0d17SLaurent Dufour 	} else
76637cddc7dSNathan Lynch 		pseries_cancel_migration(handle, ret);
7679327dc0aSNathan Lynch 
768118b1366SLaurent Dufour 	if (factor)
769df95d308SDouglas Anderson 		watchdog_hardlockup_set_timeout_pct(0);
770118b1366SLaurent Dufour 
771465dda9dSHaren Myneni out:
77237e67648SHaren Myneni 	vas_migration_handler(VAS_RESUME);
77337e67648SHaren Myneni 
7749327dc0aSNathan Lynch 	return ret;
7759327dc0aSNathan Lynch }
7769327dc0aSNathan Lynch 
rtas_syscall_dispatch_ibm_suspend_me(u64 handle)7774d756894SNathan Lynch int rtas_syscall_dispatch_ibm_suspend_me(u64 handle)
7784d756894SNathan Lynch {
7794d756894SNathan Lynch 	return pseries_migrate_partition(handle);
7804d756894SNathan Lynch }
7814d756894SNathan Lynch 
migration_store(const struct class * class,const struct class_attribute * attr,const char * buf,size_t count)78275a2d422SGreg Kroah-Hartman static ssize_t migration_store(const struct class *class,
78375a2d422SGreg Kroah-Hartman 			       const struct class_attribute *attr, const char *buf,
7846f428096SGreg Kroah-Hartman 			       size_t count)
785410bccf9SNathan Fontenot {
786410bccf9SNathan Fontenot 	u64 streamid;
787410bccf9SNathan Fontenot 	int rc;
788410bccf9SNathan Fontenot 
7891618bd53SDaniel Walter 	rc = kstrtou64(buf, 0, &streamid);
790410bccf9SNathan Fontenot 	if (rc)
791410bccf9SNathan Fontenot 		return rc;
792410bccf9SNathan Fontenot 
7939327dc0aSNathan Lynch 	rc = pseries_migrate_partition(streamid);
794d9213319SNathan Lynch 	if (rc)
795d9213319SNathan Lynch 		return rc;
796410bccf9SNathan Fontenot 
797410bccf9SNathan Fontenot 	return count;
798410bccf9SNathan Fontenot }
799410bccf9SNathan Fontenot 
800288a298cSTyrel Datwyler /*
801288a298cSTyrel Datwyler  * Used by drmgr to determine the kernel behavior of the migration interface.
802288a298cSTyrel Datwyler  *
803288a298cSTyrel Datwyler  * Version 1: Performs all PAPR requirements for migration including
804288a298cSTyrel Datwyler  *	firmware activation and device tree update.
805288a298cSTyrel Datwyler  */
806288a298cSTyrel Datwyler #define MIGRATION_API_VERSION	1
807288a298cSTyrel Datwyler 
8086f428096SGreg Kroah-Hartman static CLASS_ATTR_WO(migration);
80957ad583fSRussell Currey static CLASS_ATTR_STRING(api_version, 0444, __stringify(MIGRATION_API_VERSION));
810410bccf9SNathan Fontenot 
mobility_sysfs_init(void)811410bccf9SNathan Fontenot static int __init mobility_sysfs_init(void)
812410bccf9SNathan Fontenot {
813410bccf9SNathan Fontenot 	int rc;
814410bccf9SNathan Fontenot 
815410bccf9SNathan Fontenot 	mobility_kobj = kobject_create_and_add("mobility", kernel_kobj);
816410bccf9SNathan Fontenot 	if (!mobility_kobj)
817410bccf9SNathan Fontenot 		return -ENOMEM;
818410bccf9SNathan Fontenot 
819410bccf9SNathan Fontenot 	rc = sysfs_create_file(mobility_kobj, &class_attr_migration.attr);
820288a298cSTyrel Datwyler 	if (rc)
821494a66f3SNathan Lynch 		pr_err("unable to create migration sysfs file (%d)\n", rc);
822410bccf9SNathan Fontenot 
823288a298cSTyrel Datwyler 	rc = sysfs_create_file(mobility_kobj, &class_attr_api_version.attr.attr);
824288a298cSTyrel Datwyler 	if (rc)
825494a66f3SNathan Lynch 		pr_err("unable to create api_version sysfs file (%d)\n", rc);
826288a298cSTyrel Datwyler 
827288a298cSTyrel Datwyler 	return 0;
828410bccf9SNathan Fontenot }
8298e83e905SMichael Ellerman machine_device_initcall(pseries, mobility_sysfs_init);
830