xref: /linux/drivers/acpi/nfit/intel.c (revision be239684b18e1cdcafcf8c7face4a2f562c745ad)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2018 Intel Corporation. All rights reserved. */
3 #include <linux/libnvdimm.h>
4 #include <linux/ndctl.h>
5 #include <linux/acpi.h>
6 #include <linux/memregion.h>
7 #include <asm/smp.h>
8 #include "intel.h"
9 #include "nfit.h"
10 
11 static ssize_t firmware_activate_noidle_show(struct device *dev,
12 		struct device_attribute *attr, char *buf)
13 {
14 	struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
15 	struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
16 	struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
17 
18 	return sprintf(buf, "%s\n", acpi_desc->fwa_noidle ? "Y" : "N");
19 }
20 
21 static ssize_t firmware_activate_noidle_store(struct device *dev,
22 		struct device_attribute *attr, const char *buf, size_t size)
23 {
24 	struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
25 	struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
26 	struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
27 	ssize_t rc;
28 	bool val;
29 
30 	rc = kstrtobool(buf, &val);
31 	if (rc)
32 		return rc;
33 	if (val != acpi_desc->fwa_noidle)
34 		acpi_desc->fwa_cap = NVDIMM_FWA_CAP_INVALID;
35 	acpi_desc->fwa_noidle = val;
36 	return size;
37 }
38 DEVICE_ATTR_RW(firmware_activate_noidle);
39 
40 bool intel_fwa_supported(struct nvdimm_bus *nvdimm_bus)
41 {
42 	struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
43 	struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
44 	unsigned long *mask;
45 
46 	if (!test_bit(NVDIMM_BUS_FAMILY_INTEL, &nd_desc->bus_family_mask))
47 		return false;
48 
49 	mask = &acpi_desc->family_dsm_mask[NVDIMM_BUS_FAMILY_INTEL];
50 	return *mask == NVDIMM_BUS_INTEL_FW_ACTIVATE_CMDMASK;
51 }
52 
53 static unsigned long intel_security_flags(struct nvdimm *nvdimm,
54 		enum nvdimm_passphrase_type ptype)
55 {
56 	struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
57 	unsigned long security_flags = 0;
58 	struct {
59 		struct nd_cmd_pkg pkg;
60 		struct nd_intel_get_security_state cmd;
61 	} nd_cmd = {
62 		.pkg = {
63 			.nd_command = NVDIMM_INTEL_GET_SECURITY_STATE,
64 			.nd_family = NVDIMM_FAMILY_INTEL,
65 			.nd_size_out =
66 				sizeof(struct nd_intel_get_security_state),
67 			.nd_fw_size =
68 				sizeof(struct nd_intel_get_security_state),
69 		},
70 	};
71 	int rc;
72 
73 	if (!test_bit(NVDIMM_INTEL_GET_SECURITY_STATE, &nfit_mem->dsm_mask))
74 		return 0;
75 
76 	/*
77 	 * Short circuit the state retrieval while we are doing overwrite.
78 	 * The DSM spec states that the security state is indeterminate
79 	 * until the overwrite DSM completes.
80 	 */
81 	if (nvdimm_in_overwrite(nvdimm) && ptype == NVDIMM_USER)
82 		return BIT(NVDIMM_SECURITY_OVERWRITE);
83 
84 	rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
85 	if (rc < 0 || nd_cmd.cmd.status) {
86 		pr_err("%s: security state retrieval failed (%d:%#x)\n",
87 				nvdimm_name(nvdimm), rc, nd_cmd.cmd.status);
88 		return 0;
89 	}
90 
91 	/* check and see if security is enabled and locked */
92 	if (ptype == NVDIMM_MASTER) {
93 		if (nd_cmd.cmd.extended_state & ND_INTEL_SEC_ESTATE_ENABLED)
94 			set_bit(NVDIMM_SECURITY_UNLOCKED, &security_flags);
95 		else
96 			set_bit(NVDIMM_SECURITY_DISABLED, &security_flags);
97 		if (nd_cmd.cmd.extended_state & ND_INTEL_SEC_ESTATE_PLIMIT)
98 			set_bit(NVDIMM_SECURITY_FROZEN, &security_flags);
99 		return security_flags;
100 	}
101 
102 	if (nd_cmd.cmd.state & ND_INTEL_SEC_STATE_UNSUPPORTED)
103 		return 0;
104 
105 	if (nd_cmd.cmd.state & ND_INTEL_SEC_STATE_ENABLED) {
106 		if (nd_cmd.cmd.state & ND_INTEL_SEC_STATE_FROZEN ||
107 		    nd_cmd.cmd.state & ND_INTEL_SEC_STATE_PLIMIT)
108 			set_bit(NVDIMM_SECURITY_FROZEN, &security_flags);
109 
110 		if (nd_cmd.cmd.state & ND_INTEL_SEC_STATE_LOCKED)
111 			set_bit(NVDIMM_SECURITY_LOCKED, &security_flags);
112 		else
113 			set_bit(NVDIMM_SECURITY_UNLOCKED, &security_flags);
114 	} else
115 		set_bit(NVDIMM_SECURITY_DISABLED, &security_flags);
116 
117 	return security_flags;
118 }
119 
120 static int intel_security_freeze(struct nvdimm *nvdimm)
121 {
122 	struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
123 	struct {
124 		struct nd_cmd_pkg pkg;
125 		struct nd_intel_freeze_lock cmd;
126 	} nd_cmd = {
127 		.pkg = {
128 			.nd_command = NVDIMM_INTEL_FREEZE_LOCK,
129 			.nd_family = NVDIMM_FAMILY_INTEL,
130 			.nd_size_out = ND_INTEL_STATUS_SIZE,
131 			.nd_fw_size = ND_INTEL_STATUS_SIZE,
132 		},
133 	};
134 	int rc;
135 
136 	if (!test_bit(NVDIMM_INTEL_FREEZE_LOCK, &nfit_mem->dsm_mask))
137 		return -ENOTTY;
138 
139 	rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
140 	if (rc < 0)
141 		return rc;
142 	if (nd_cmd.cmd.status)
143 		return -EIO;
144 	return 0;
145 }
146 
147 static int intel_security_change_key(struct nvdimm *nvdimm,
148 		const struct nvdimm_key_data *old_data,
149 		const struct nvdimm_key_data *new_data,
150 		enum nvdimm_passphrase_type ptype)
151 {
152 	struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
153 	unsigned int cmd = ptype == NVDIMM_MASTER ?
154 		NVDIMM_INTEL_SET_MASTER_PASSPHRASE :
155 		NVDIMM_INTEL_SET_PASSPHRASE;
156 	struct {
157 		struct nd_cmd_pkg pkg;
158 		struct nd_intel_set_passphrase cmd;
159 	} nd_cmd = {
160 		.pkg = {
161 			.nd_family = NVDIMM_FAMILY_INTEL,
162 			.nd_size_in = ND_INTEL_PASSPHRASE_SIZE * 2,
163 			.nd_size_out = ND_INTEL_STATUS_SIZE,
164 			.nd_fw_size = ND_INTEL_STATUS_SIZE,
165 			.nd_command = cmd,
166 		},
167 	};
168 	int rc;
169 
170 	if (!test_bit(cmd, &nfit_mem->dsm_mask))
171 		return -ENOTTY;
172 
173 	memcpy(nd_cmd.cmd.old_pass, old_data->data,
174 			sizeof(nd_cmd.cmd.old_pass));
175 	memcpy(nd_cmd.cmd.new_pass, new_data->data,
176 			sizeof(nd_cmd.cmd.new_pass));
177 	rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
178 	if (rc < 0)
179 		return rc;
180 
181 	switch (nd_cmd.cmd.status) {
182 	case 0:
183 		return 0;
184 	case ND_INTEL_STATUS_INVALID_PASS:
185 		return -EINVAL;
186 	case ND_INTEL_STATUS_NOT_SUPPORTED:
187 		return -EOPNOTSUPP;
188 	case ND_INTEL_STATUS_INVALID_STATE:
189 	default:
190 		return -EIO;
191 	}
192 }
193 
194 static int __maybe_unused intel_security_unlock(struct nvdimm *nvdimm,
195 		const struct nvdimm_key_data *key_data)
196 {
197 	struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
198 	struct {
199 		struct nd_cmd_pkg pkg;
200 		struct nd_intel_unlock_unit cmd;
201 	} nd_cmd = {
202 		.pkg = {
203 			.nd_command = NVDIMM_INTEL_UNLOCK_UNIT,
204 			.nd_family = NVDIMM_FAMILY_INTEL,
205 			.nd_size_in = ND_INTEL_PASSPHRASE_SIZE,
206 			.nd_size_out = ND_INTEL_STATUS_SIZE,
207 			.nd_fw_size = ND_INTEL_STATUS_SIZE,
208 		},
209 	};
210 	int rc;
211 
212 	if (!test_bit(NVDIMM_INTEL_UNLOCK_UNIT, &nfit_mem->dsm_mask))
213 		return -ENOTTY;
214 
215 	memcpy(nd_cmd.cmd.passphrase, key_data->data,
216 			sizeof(nd_cmd.cmd.passphrase));
217 	rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
218 	if (rc < 0)
219 		return rc;
220 	switch (nd_cmd.cmd.status) {
221 	case 0:
222 		break;
223 	case ND_INTEL_STATUS_INVALID_PASS:
224 		return -EINVAL;
225 	default:
226 		return -EIO;
227 	}
228 
229 	return 0;
230 }
231 
232 static int intel_security_disable(struct nvdimm *nvdimm,
233 		const struct nvdimm_key_data *key_data)
234 {
235 	int rc;
236 	struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
237 	struct {
238 		struct nd_cmd_pkg pkg;
239 		struct nd_intel_disable_passphrase cmd;
240 	} nd_cmd = {
241 		.pkg = {
242 			.nd_command = NVDIMM_INTEL_DISABLE_PASSPHRASE,
243 			.nd_family = NVDIMM_FAMILY_INTEL,
244 			.nd_size_in = ND_INTEL_PASSPHRASE_SIZE,
245 			.nd_size_out = ND_INTEL_STATUS_SIZE,
246 			.nd_fw_size = ND_INTEL_STATUS_SIZE,
247 		},
248 	};
249 
250 	if (!test_bit(NVDIMM_INTEL_DISABLE_PASSPHRASE, &nfit_mem->dsm_mask))
251 		return -ENOTTY;
252 
253 	memcpy(nd_cmd.cmd.passphrase, key_data->data,
254 			sizeof(nd_cmd.cmd.passphrase));
255 	rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
256 	if (rc < 0)
257 		return rc;
258 
259 	switch (nd_cmd.cmd.status) {
260 	case 0:
261 		break;
262 	case ND_INTEL_STATUS_INVALID_PASS:
263 		return -EINVAL;
264 	case ND_INTEL_STATUS_INVALID_STATE:
265 	default:
266 		return -ENXIO;
267 	}
268 
269 	return 0;
270 }
271 
272 static int __maybe_unused intel_security_erase(struct nvdimm *nvdimm,
273 		const struct nvdimm_key_data *key,
274 		enum nvdimm_passphrase_type ptype)
275 {
276 	int rc;
277 	struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
278 	unsigned int cmd = ptype == NVDIMM_MASTER ?
279 		NVDIMM_INTEL_MASTER_SECURE_ERASE : NVDIMM_INTEL_SECURE_ERASE;
280 	struct {
281 		struct nd_cmd_pkg pkg;
282 		struct nd_intel_secure_erase cmd;
283 	} nd_cmd = {
284 		.pkg = {
285 			.nd_family = NVDIMM_FAMILY_INTEL,
286 			.nd_size_in = ND_INTEL_PASSPHRASE_SIZE,
287 			.nd_size_out = ND_INTEL_STATUS_SIZE,
288 			.nd_fw_size = ND_INTEL_STATUS_SIZE,
289 			.nd_command = cmd,
290 		},
291 	};
292 
293 	if (!test_bit(cmd, &nfit_mem->dsm_mask))
294 		return -ENOTTY;
295 
296 	memcpy(nd_cmd.cmd.passphrase, key->data,
297 			sizeof(nd_cmd.cmd.passphrase));
298 	rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
299 	if (rc < 0)
300 		return rc;
301 
302 	switch (nd_cmd.cmd.status) {
303 	case 0:
304 		break;
305 	case ND_INTEL_STATUS_NOT_SUPPORTED:
306 		return -EOPNOTSUPP;
307 	case ND_INTEL_STATUS_INVALID_PASS:
308 		return -EINVAL;
309 	case ND_INTEL_STATUS_INVALID_STATE:
310 	default:
311 		return -ENXIO;
312 	}
313 
314 	return 0;
315 }
316 
317 static int __maybe_unused intel_security_query_overwrite(struct nvdimm *nvdimm)
318 {
319 	int rc;
320 	struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
321 	struct {
322 		struct nd_cmd_pkg pkg;
323 		struct nd_intel_query_overwrite cmd;
324 	} nd_cmd = {
325 		.pkg = {
326 			.nd_command = NVDIMM_INTEL_QUERY_OVERWRITE,
327 			.nd_family = NVDIMM_FAMILY_INTEL,
328 			.nd_size_out = ND_INTEL_STATUS_SIZE,
329 			.nd_fw_size = ND_INTEL_STATUS_SIZE,
330 		},
331 	};
332 
333 	if (!test_bit(NVDIMM_INTEL_QUERY_OVERWRITE, &nfit_mem->dsm_mask))
334 		return -ENOTTY;
335 
336 	rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
337 	if (rc < 0)
338 		return rc;
339 
340 	switch (nd_cmd.cmd.status) {
341 	case 0:
342 		break;
343 	case ND_INTEL_STATUS_OQUERY_INPROGRESS:
344 		return -EBUSY;
345 	default:
346 		return -ENXIO;
347 	}
348 
349 	return 0;
350 }
351 
352 static int __maybe_unused intel_security_overwrite(struct nvdimm *nvdimm,
353 		const struct nvdimm_key_data *nkey)
354 {
355 	int rc;
356 	struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
357 	struct {
358 		struct nd_cmd_pkg pkg;
359 		struct nd_intel_overwrite cmd;
360 	} nd_cmd = {
361 		.pkg = {
362 			.nd_command = NVDIMM_INTEL_OVERWRITE,
363 			.nd_family = NVDIMM_FAMILY_INTEL,
364 			.nd_size_in = ND_INTEL_PASSPHRASE_SIZE,
365 			.nd_size_out = ND_INTEL_STATUS_SIZE,
366 			.nd_fw_size = ND_INTEL_STATUS_SIZE,
367 		},
368 	};
369 
370 	if (!test_bit(NVDIMM_INTEL_OVERWRITE, &nfit_mem->dsm_mask))
371 		return -ENOTTY;
372 
373 	memcpy(nd_cmd.cmd.passphrase, nkey->data,
374 			sizeof(nd_cmd.cmd.passphrase));
375 	rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
376 	if (rc < 0)
377 		return rc;
378 
379 	switch (nd_cmd.cmd.status) {
380 	case 0:
381 		return 0;
382 	case ND_INTEL_STATUS_OVERWRITE_UNSUPPORTED:
383 		return -ENOTSUPP;
384 	case ND_INTEL_STATUS_INVALID_PASS:
385 		return -EINVAL;
386 	case ND_INTEL_STATUS_INVALID_STATE:
387 	default:
388 		return -ENXIO;
389 	}
390 }
391 
392 static const struct nvdimm_security_ops __intel_security_ops = {
393 	.get_flags = intel_security_flags,
394 	.freeze = intel_security_freeze,
395 	.change_key = intel_security_change_key,
396 	.disable = intel_security_disable,
397 #ifdef CONFIG_X86
398 	.unlock = intel_security_unlock,
399 	.erase = intel_security_erase,
400 	.overwrite = intel_security_overwrite,
401 	.query_overwrite = intel_security_query_overwrite,
402 #endif
403 };
404 
405 const struct nvdimm_security_ops *intel_security_ops = &__intel_security_ops;
406 
407 static int intel_bus_fwa_businfo(struct nvdimm_bus_descriptor *nd_desc,
408 		struct nd_intel_bus_fw_activate_businfo *info)
409 {
410 	struct {
411 		struct nd_cmd_pkg pkg;
412 		struct nd_intel_bus_fw_activate_businfo cmd;
413 	} nd_cmd = {
414 		.pkg = {
415 			.nd_command = NVDIMM_BUS_INTEL_FW_ACTIVATE_BUSINFO,
416 			.nd_family = NVDIMM_BUS_FAMILY_INTEL,
417 			.nd_size_out =
418 				sizeof(struct nd_intel_bus_fw_activate_businfo),
419 			.nd_fw_size =
420 				sizeof(struct nd_intel_bus_fw_activate_businfo),
421 		},
422 	};
423 	int rc;
424 
425 	rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd),
426 			NULL);
427 	*info = nd_cmd.cmd;
428 	return rc;
429 }
430 
431 /* The fw_ops expect to be called with the nvdimm_bus_lock() held */
432 static enum nvdimm_fwa_state intel_bus_fwa_state(
433 		struct nvdimm_bus_descriptor *nd_desc)
434 {
435 	struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
436 	struct nd_intel_bus_fw_activate_businfo info;
437 	struct device *dev = acpi_desc->dev;
438 	enum nvdimm_fwa_state state;
439 	int rc;
440 
441 	/*
442 	 * It should not be possible for platform firmware to return
443 	 * busy because activate is a synchronous operation. Treat it
444 	 * similar to invalid, i.e. always refresh / poll the status.
445 	 */
446 	switch (acpi_desc->fwa_state) {
447 	case NVDIMM_FWA_INVALID:
448 	case NVDIMM_FWA_BUSY:
449 		break;
450 	default:
451 		/* check if capability needs to be refreshed */
452 		if (acpi_desc->fwa_cap == NVDIMM_FWA_CAP_INVALID)
453 			break;
454 		return acpi_desc->fwa_state;
455 	}
456 
457 	/* Refresh with platform firmware */
458 	rc = intel_bus_fwa_businfo(nd_desc, &info);
459 	if (rc)
460 		return NVDIMM_FWA_INVALID;
461 
462 	switch (info.state) {
463 	case ND_INTEL_FWA_IDLE:
464 		state = NVDIMM_FWA_IDLE;
465 		break;
466 	case ND_INTEL_FWA_BUSY:
467 		state = NVDIMM_FWA_BUSY;
468 		break;
469 	case ND_INTEL_FWA_ARMED:
470 		if (info.activate_tmo > info.max_quiesce_tmo)
471 			state = NVDIMM_FWA_ARM_OVERFLOW;
472 		else
473 			state = NVDIMM_FWA_ARMED;
474 		break;
475 	default:
476 		dev_err_once(dev, "invalid firmware activate state %d\n",
477 				info.state);
478 		return NVDIMM_FWA_INVALID;
479 	}
480 
481 	/*
482 	 * Capability data is available in the same payload as state. It
483 	 * is expected to be static.
484 	 */
485 	if (acpi_desc->fwa_cap == NVDIMM_FWA_CAP_INVALID) {
486 		if (info.capability & ND_INTEL_BUS_FWA_CAP_FWQUIESCE)
487 			acpi_desc->fwa_cap = NVDIMM_FWA_CAP_QUIESCE;
488 		else if (info.capability & ND_INTEL_BUS_FWA_CAP_OSQUIESCE) {
489 			/*
490 			 * Skip hibernate cycle by default if platform
491 			 * indicates that it does not need devices to be
492 			 * quiesced.
493 			 */
494 			acpi_desc->fwa_cap = NVDIMM_FWA_CAP_LIVE;
495 		} else
496 			acpi_desc->fwa_cap = NVDIMM_FWA_CAP_NONE;
497 	}
498 
499 	acpi_desc->fwa_state = state;
500 
501 	return state;
502 }
503 
504 static enum nvdimm_fwa_capability intel_bus_fwa_capability(
505 		struct nvdimm_bus_descriptor *nd_desc)
506 {
507 	struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
508 
509 	if (acpi_desc->fwa_cap > NVDIMM_FWA_CAP_INVALID)
510 		return acpi_desc->fwa_cap;
511 
512 	if (intel_bus_fwa_state(nd_desc) > NVDIMM_FWA_INVALID)
513 		return acpi_desc->fwa_cap;
514 
515 	return NVDIMM_FWA_CAP_INVALID;
516 }
517 
518 static int intel_bus_fwa_activate(struct nvdimm_bus_descriptor *nd_desc)
519 {
520 	struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
521 	struct {
522 		struct nd_cmd_pkg pkg;
523 		struct nd_intel_bus_fw_activate cmd;
524 	} nd_cmd = {
525 		.pkg = {
526 			.nd_command = NVDIMM_BUS_INTEL_FW_ACTIVATE,
527 			.nd_family = NVDIMM_BUS_FAMILY_INTEL,
528 			.nd_size_in = sizeof(nd_cmd.cmd.iodev_state),
529 			.nd_size_out =
530 				sizeof(struct nd_intel_bus_fw_activate),
531 			.nd_fw_size =
532 				sizeof(struct nd_intel_bus_fw_activate),
533 		},
534 		/*
535 		 * Even though activate is run from a suspended context,
536 		 * for safety, still ask platform firmware to force
537 		 * quiesce devices by default. Let a module
538 		 * parameter override that policy.
539 		 */
540 		.cmd = {
541 			.iodev_state = acpi_desc->fwa_noidle
542 				? ND_INTEL_BUS_FWA_IODEV_OS_IDLE
543 				: ND_INTEL_BUS_FWA_IODEV_FORCE_IDLE,
544 		},
545 	};
546 	int rc;
547 
548 	switch (intel_bus_fwa_state(nd_desc)) {
549 	case NVDIMM_FWA_ARMED:
550 	case NVDIMM_FWA_ARM_OVERFLOW:
551 		break;
552 	default:
553 		return -ENXIO;
554 	}
555 
556 	rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd),
557 			NULL);
558 
559 	/*
560 	 * Whether the command succeeded, or failed, the agent checking
561 	 * for the result needs to query the DIMMs individually.
562 	 * Increment the activation count to invalidate all the DIMM
563 	 * states at once (it's otherwise not possible to take
564 	 * acpi_desc->init_mutex in this context)
565 	 */
566 	acpi_desc->fwa_state = NVDIMM_FWA_INVALID;
567 	acpi_desc->fwa_count++;
568 
569 	dev_dbg(acpi_desc->dev, "result: %d\n", rc);
570 
571 	return rc;
572 }
573 
574 static const struct nvdimm_bus_fw_ops __intel_bus_fw_ops = {
575 	.activate_state = intel_bus_fwa_state,
576 	.capability = intel_bus_fwa_capability,
577 	.activate = intel_bus_fwa_activate,
578 };
579 
580 const struct nvdimm_bus_fw_ops *intel_bus_fw_ops = &__intel_bus_fw_ops;
581 
582 static int intel_fwa_dimminfo(struct nvdimm *nvdimm,
583 		struct nd_intel_fw_activate_dimminfo *info)
584 {
585 	struct {
586 		struct nd_cmd_pkg pkg;
587 		struct nd_intel_fw_activate_dimminfo cmd;
588 	} nd_cmd = {
589 		.pkg = {
590 			.nd_command = NVDIMM_INTEL_FW_ACTIVATE_DIMMINFO,
591 			.nd_family = NVDIMM_FAMILY_INTEL,
592 			.nd_size_out =
593 				sizeof(struct nd_intel_fw_activate_dimminfo),
594 			.nd_fw_size =
595 				sizeof(struct nd_intel_fw_activate_dimminfo),
596 		},
597 	};
598 	int rc;
599 
600 	rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
601 	*info = nd_cmd.cmd;
602 	return rc;
603 }
604 
605 static enum nvdimm_fwa_state intel_fwa_state(struct nvdimm *nvdimm)
606 {
607 	struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
608 	struct acpi_nfit_desc *acpi_desc = nfit_mem->acpi_desc;
609 	struct nd_intel_fw_activate_dimminfo info;
610 	int rc;
611 
612 	/*
613 	 * Similar to the bus state, since activate is synchronous the
614 	 * busy state should resolve within the context of 'activate'.
615 	 */
616 	switch (nfit_mem->fwa_state) {
617 	case NVDIMM_FWA_INVALID:
618 	case NVDIMM_FWA_BUSY:
619 		break;
620 	default:
621 		/* If no activations occurred the old state is still valid */
622 		if (nfit_mem->fwa_count == acpi_desc->fwa_count)
623 			return nfit_mem->fwa_state;
624 	}
625 
626 	rc = intel_fwa_dimminfo(nvdimm, &info);
627 	if (rc)
628 		return NVDIMM_FWA_INVALID;
629 
630 	switch (info.state) {
631 	case ND_INTEL_FWA_IDLE:
632 		nfit_mem->fwa_state = NVDIMM_FWA_IDLE;
633 		break;
634 	case ND_INTEL_FWA_BUSY:
635 		nfit_mem->fwa_state = NVDIMM_FWA_BUSY;
636 		break;
637 	case ND_INTEL_FWA_ARMED:
638 		nfit_mem->fwa_state = NVDIMM_FWA_ARMED;
639 		break;
640 	default:
641 		nfit_mem->fwa_state = NVDIMM_FWA_INVALID;
642 		break;
643 	}
644 
645 	switch (info.result) {
646 	case ND_INTEL_DIMM_FWA_NONE:
647 		nfit_mem->fwa_result = NVDIMM_FWA_RESULT_NONE;
648 		break;
649 	case ND_INTEL_DIMM_FWA_SUCCESS:
650 		nfit_mem->fwa_result = NVDIMM_FWA_RESULT_SUCCESS;
651 		break;
652 	case ND_INTEL_DIMM_FWA_NOTSTAGED:
653 		nfit_mem->fwa_result = NVDIMM_FWA_RESULT_NOTSTAGED;
654 		break;
655 	case ND_INTEL_DIMM_FWA_NEEDRESET:
656 		nfit_mem->fwa_result = NVDIMM_FWA_RESULT_NEEDRESET;
657 		break;
658 	case ND_INTEL_DIMM_FWA_MEDIAFAILED:
659 	case ND_INTEL_DIMM_FWA_ABORT:
660 	case ND_INTEL_DIMM_FWA_NOTSUPP:
661 	case ND_INTEL_DIMM_FWA_ERROR:
662 	default:
663 		nfit_mem->fwa_result = NVDIMM_FWA_RESULT_FAIL;
664 		break;
665 	}
666 
667 	nfit_mem->fwa_count = acpi_desc->fwa_count;
668 
669 	return nfit_mem->fwa_state;
670 }
671 
672 static enum nvdimm_fwa_result intel_fwa_result(struct nvdimm *nvdimm)
673 {
674 	struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
675 	struct acpi_nfit_desc *acpi_desc = nfit_mem->acpi_desc;
676 
677 	if (nfit_mem->fwa_count == acpi_desc->fwa_count
678 			&& nfit_mem->fwa_result > NVDIMM_FWA_RESULT_INVALID)
679 		return nfit_mem->fwa_result;
680 
681 	if (intel_fwa_state(nvdimm) > NVDIMM_FWA_INVALID)
682 		return nfit_mem->fwa_result;
683 
684 	return NVDIMM_FWA_RESULT_INVALID;
685 }
686 
687 static int intel_fwa_arm(struct nvdimm *nvdimm, enum nvdimm_fwa_trigger arm)
688 {
689 	struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
690 	struct acpi_nfit_desc *acpi_desc = nfit_mem->acpi_desc;
691 	struct {
692 		struct nd_cmd_pkg pkg;
693 		struct nd_intel_fw_activate_arm cmd;
694 	} nd_cmd = {
695 		.pkg = {
696 			.nd_command = NVDIMM_INTEL_FW_ACTIVATE_ARM,
697 			.nd_family = NVDIMM_FAMILY_INTEL,
698 			.nd_size_in = sizeof(nd_cmd.cmd.activate_arm),
699 			.nd_size_out =
700 				sizeof(struct nd_intel_fw_activate_arm),
701 			.nd_fw_size =
702 				sizeof(struct nd_intel_fw_activate_arm),
703 		},
704 		.cmd = {
705 			.activate_arm = arm == NVDIMM_FWA_ARM
706 				? ND_INTEL_DIMM_FWA_ARM
707 				: ND_INTEL_DIMM_FWA_DISARM,
708 		},
709 	};
710 	int rc;
711 
712 	switch (intel_fwa_state(nvdimm)) {
713 	case NVDIMM_FWA_INVALID:
714 		return -ENXIO;
715 	case NVDIMM_FWA_BUSY:
716 		return -EBUSY;
717 	case NVDIMM_FWA_IDLE:
718 		if (arm == NVDIMM_FWA_DISARM)
719 			return 0;
720 		break;
721 	case NVDIMM_FWA_ARMED:
722 		if (arm == NVDIMM_FWA_ARM)
723 			return 0;
724 		break;
725 	default:
726 		return -ENXIO;
727 	}
728 
729 	/*
730 	 * Invalidate the bus-level state, now that we're committed to
731 	 * changing the 'arm' state.
732 	 */
733 	acpi_desc->fwa_state = NVDIMM_FWA_INVALID;
734 	nfit_mem->fwa_state = NVDIMM_FWA_INVALID;
735 
736 	rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
737 
738 	dev_dbg(acpi_desc->dev, "%s result: %d\n", arm == NVDIMM_FWA_ARM
739 			? "arm" : "disarm", rc);
740 	return rc;
741 }
742 
743 static const struct nvdimm_fw_ops __intel_fw_ops = {
744 	.activate_state = intel_fwa_state,
745 	.activate_result = intel_fwa_result,
746 	.arm = intel_fwa_arm,
747 };
748 
749 const struct nvdimm_fw_ops *intel_fw_ops = &__intel_fw_ops;
750