xref: /linux/drivers/nvme/host/pr.c (revision 4482ebb2970efa58173075c101426b2f3af40b41)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2015 Intel Corporation
4  *	Keith Busch <kbusch@kernel.org>
5  */
6 #include <linux/blkdev.h>
7 #include <linux/pr.h>
8 #include <linux/unaligned.h>
9 
10 #include "nvme.h"
11 
nvme_pr_type_from_blk(enum pr_type type)12 static enum nvme_pr_type nvme_pr_type_from_blk(enum pr_type type)
13 {
14 	switch (type) {
15 	case PR_WRITE_EXCLUSIVE:
16 		return NVME_PR_WRITE_EXCLUSIVE;
17 	case PR_EXCLUSIVE_ACCESS:
18 		return NVME_PR_EXCLUSIVE_ACCESS;
19 	case PR_WRITE_EXCLUSIVE_REG_ONLY:
20 		return NVME_PR_WRITE_EXCLUSIVE_REG_ONLY;
21 	case PR_EXCLUSIVE_ACCESS_REG_ONLY:
22 		return NVME_PR_EXCLUSIVE_ACCESS_REG_ONLY;
23 	case PR_WRITE_EXCLUSIVE_ALL_REGS:
24 		return NVME_PR_WRITE_EXCLUSIVE_ALL_REGS;
25 	case PR_EXCLUSIVE_ACCESS_ALL_REGS:
26 		return NVME_PR_EXCLUSIVE_ACCESS_ALL_REGS;
27 	}
28 
29 	return 0;
30 }
31 
block_pr_type_from_nvme(enum nvme_pr_type type)32 static enum pr_type block_pr_type_from_nvme(enum nvme_pr_type type)
33 {
34 	switch (type) {
35 	case NVME_PR_WRITE_EXCLUSIVE:
36 		return PR_WRITE_EXCLUSIVE;
37 	case NVME_PR_EXCLUSIVE_ACCESS:
38 		return PR_EXCLUSIVE_ACCESS;
39 	case NVME_PR_WRITE_EXCLUSIVE_REG_ONLY:
40 		return PR_WRITE_EXCLUSIVE_REG_ONLY;
41 	case NVME_PR_EXCLUSIVE_ACCESS_REG_ONLY:
42 		return PR_EXCLUSIVE_ACCESS_REG_ONLY;
43 	case NVME_PR_WRITE_EXCLUSIVE_ALL_REGS:
44 		return PR_WRITE_EXCLUSIVE_ALL_REGS;
45 	case NVME_PR_EXCLUSIVE_ACCESS_ALL_REGS:
46 		return PR_EXCLUSIVE_ACCESS_ALL_REGS;
47 	}
48 
49 	return 0;
50 }
51 
nvme_send_ns_head_pr_command(struct block_device * bdev,struct nvme_command * c,void * data,unsigned int data_len)52 static int nvme_send_ns_head_pr_command(struct block_device *bdev,
53 		struct nvme_command *c, void *data, unsigned int data_len)
54 {
55 	struct nvme_ns_head *head = bdev->bd_disk->private_data;
56 	int srcu_idx = srcu_read_lock(&head->srcu);
57 	struct nvme_ns *ns = nvme_find_path(head);
58 	int ret = -EWOULDBLOCK;
59 
60 	if (ns) {
61 		c->common.nsid = cpu_to_le32(ns->head->ns_id);
62 		ret = nvme_submit_sync_cmd(ns->queue, c, data, data_len);
63 	}
64 	srcu_read_unlock(&head->srcu, srcu_idx);
65 	return ret;
66 }
67 
nvme_send_ns_pr_command(struct nvme_ns * ns,struct nvme_command * c,void * data,unsigned int data_len)68 static int nvme_send_ns_pr_command(struct nvme_ns *ns, struct nvme_command *c,
69 		void *data, unsigned int data_len)
70 {
71 	c->common.nsid = cpu_to_le32(ns->head->ns_id);
72 	return nvme_submit_sync_cmd(ns->queue, c, data, data_len);
73 }
74 
nvme_status_to_pr_err(int status)75 static int nvme_status_to_pr_err(int status)
76 {
77 	if (nvme_is_path_error(status))
78 		return PR_STS_PATH_FAILED;
79 
80 	switch (status & NVME_SCT_SC_MASK) {
81 	case NVME_SC_SUCCESS:
82 		return PR_STS_SUCCESS;
83 	case NVME_SC_RESERVATION_CONFLICT:
84 		return PR_STS_RESERVATION_CONFLICT;
85 	case NVME_SC_BAD_ATTRIBUTES:
86 	case NVME_SC_INVALID_OPCODE:
87 	case NVME_SC_INVALID_FIELD:
88 	case NVME_SC_INVALID_NS:
89 		return -EINVAL;
90 	default:
91 		return PR_STS_IOERR;
92 	}
93 }
94 
__nvme_send_pr_command(struct block_device * bdev,u32 cdw10,u32 cdw11,u8 op,void * data,unsigned int data_len)95 static int __nvme_send_pr_command(struct block_device *bdev, u32 cdw10,
96 		u32 cdw11, u8 op, void *data, unsigned int data_len)
97 {
98 	struct nvme_command c = { 0 };
99 
100 	c.common.opcode = op;
101 	c.common.cdw10 = cpu_to_le32(cdw10);
102 	c.common.cdw11 = cpu_to_le32(cdw11);
103 
104 	if (nvme_disk_is_ns_head(bdev->bd_disk))
105 		return nvme_send_ns_head_pr_command(bdev, &c, data, data_len);
106 	return nvme_send_ns_pr_command(bdev->bd_disk->private_data, &c,
107 				data, data_len);
108 }
109 
nvme_send_pr_command(struct block_device * bdev,u32 cdw10,u32 cdw11,u8 op,void * data,unsigned int data_len)110 static int nvme_send_pr_command(struct block_device *bdev, u32 cdw10, u32 cdw11,
111 		u8 op, void *data, unsigned int data_len)
112 {
113 	int ret;
114 
115 	ret = __nvme_send_pr_command(bdev, cdw10, cdw11, op, data, data_len);
116 	return ret < 0 ? ret : nvme_status_to_pr_err(ret);
117 }
118 
nvme_pr_register(struct block_device * bdev,u64 old_key,u64 new_key,unsigned int flags)119 static int nvme_pr_register(struct block_device *bdev, u64 old_key, u64 new_key,
120 		unsigned int flags)
121 {
122 	struct nvmet_pr_register_data data = { 0 };
123 	u32 cdw10;
124 
125 	if (flags & ~PR_FL_IGNORE_KEY)
126 		return -EOPNOTSUPP;
127 
128 	data.crkey = cpu_to_le64(old_key);
129 	data.nrkey = cpu_to_le64(new_key);
130 
131 	cdw10 = old_key ? NVME_PR_REGISTER_ACT_REPLACE :
132 		NVME_PR_REGISTER_ACT_REG;
133 	cdw10 |= (flags & PR_FL_IGNORE_KEY) ? NVME_PR_IGNORE_KEY : 0;
134 	cdw10 |= NVME_PR_CPTPL_PERSIST;
135 
136 	return nvme_send_pr_command(bdev, cdw10, 0, nvme_cmd_resv_register,
137 			&data, sizeof(data));
138 }
139 
nvme_pr_reserve(struct block_device * bdev,u64 key,enum pr_type type,unsigned flags)140 static int nvme_pr_reserve(struct block_device *bdev, u64 key,
141 		enum pr_type type, unsigned flags)
142 {
143 	struct nvmet_pr_acquire_data data = { 0 };
144 	u32 cdw10;
145 
146 	if (flags & ~PR_FL_IGNORE_KEY)
147 		return -EOPNOTSUPP;
148 
149 	data.crkey = cpu_to_le64(key);
150 
151 	cdw10 = NVME_PR_ACQUIRE_ACT_ACQUIRE;
152 	cdw10 |= nvme_pr_type_from_blk(type) << 8;
153 	cdw10 |= (flags & PR_FL_IGNORE_KEY) ? NVME_PR_IGNORE_KEY : 0;
154 
155 	return nvme_send_pr_command(bdev, cdw10, 0, nvme_cmd_resv_acquire,
156 			&data, sizeof(data));
157 }
158 
nvme_pr_preempt(struct block_device * bdev,u64 old,u64 new,enum pr_type type,bool abort)159 static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new,
160 		enum pr_type type, bool abort)
161 {
162 	struct nvmet_pr_acquire_data data = { 0 };
163 	u32 cdw10;
164 
165 	data.crkey = cpu_to_le64(old);
166 	data.prkey = cpu_to_le64(new);
167 
168 	cdw10 = abort ? NVME_PR_ACQUIRE_ACT_PREEMPT_AND_ABORT :
169 			NVME_PR_ACQUIRE_ACT_PREEMPT;
170 	cdw10 |= nvme_pr_type_from_blk(type) << 8;
171 
172 	return nvme_send_pr_command(bdev, cdw10, 0, nvme_cmd_resv_acquire,
173 			&data, sizeof(data));
174 }
175 
nvme_pr_clear(struct block_device * bdev,u64 key)176 static int nvme_pr_clear(struct block_device *bdev, u64 key)
177 {
178 	struct nvmet_pr_release_data data = { 0 };
179 	u32 cdw10;
180 
181 	data.crkey = cpu_to_le64(key);
182 
183 	cdw10 = NVME_PR_RELEASE_ACT_CLEAR;
184 	cdw10 |= key ? 0 : NVME_PR_IGNORE_KEY;
185 
186 	return nvme_send_pr_command(bdev, cdw10, 0, nvme_cmd_resv_release,
187 			&data, sizeof(data));
188 }
189 
nvme_pr_release(struct block_device * bdev,u64 key,enum pr_type type)190 static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
191 {
192 	struct nvmet_pr_release_data data = { 0 };
193 	u32 cdw10;
194 
195 	data.crkey = cpu_to_le64(key);
196 
197 	cdw10 = NVME_PR_RELEASE_ACT_RELEASE;
198 	cdw10 |= nvme_pr_type_from_blk(type) << 8;
199 	cdw10 |= key ? 0 : NVME_PR_IGNORE_KEY;
200 
201 	return nvme_send_pr_command(bdev, cdw10, 0, nvme_cmd_resv_release,
202 			&data, sizeof(data));
203 }
204 
nvme_pr_resv_report(struct block_device * bdev,void * data,u32 data_len,bool * eds)205 static int nvme_pr_resv_report(struct block_device *bdev, void *data,
206 		u32 data_len, bool *eds)
207 {
208 	u32 cdw10, cdw11;
209 	int ret;
210 
211 	cdw10 = nvme_bytes_to_numd(data_len);
212 	cdw11 = NVME_EXTENDED_DATA_STRUCT;
213 	*eds = true;
214 
215 retry:
216 	ret = __nvme_send_pr_command(bdev, cdw10, cdw11, nvme_cmd_resv_report,
217 			data, data_len);
218 	if (ret == NVME_SC_HOST_ID_INCONSIST &&
219 	    cdw11 == NVME_EXTENDED_DATA_STRUCT) {
220 		cdw11 = 0;
221 		*eds = false;
222 		goto retry;
223 	}
224 
225 	return ret < 0 ? ret : nvme_status_to_pr_err(ret);
226 }
227 
nvme_pr_read_keys(struct block_device * bdev,struct pr_keys * keys_info)228 static int nvme_pr_read_keys(struct block_device *bdev,
229 		struct pr_keys *keys_info)
230 {
231 	size_t rse_len;
232 	u32 num_keys = keys_info->num_keys;
233 	struct nvme_reservation_status_ext *rse;
234 	int ret, i;
235 	bool eds;
236 
237 	/*
238 	 * Assume we are using 128-bit host IDs and allocate a buffer large
239 	 * enough to get enough keys to fill the return keys buffer.
240 	 */
241 	rse_len = struct_size(rse, regctl_eds, num_keys);
242 	if (rse_len > U32_MAX)
243 		return -EINVAL;
244 
245 	rse = kzalloc(rse_len, GFP_KERNEL);
246 	if (!rse)
247 		return -ENOMEM;
248 
249 	ret = nvme_pr_resv_report(bdev, rse, rse_len, &eds);
250 	if (ret)
251 		goto free_rse;
252 
253 	keys_info->generation = le32_to_cpu(rse->gen);
254 	keys_info->num_keys = get_unaligned_le16(&rse->regctl);
255 
256 	num_keys = min(num_keys, keys_info->num_keys);
257 	for (i = 0; i < num_keys; i++) {
258 		if (eds) {
259 			keys_info->keys[i] =
260 					le64_to_cpu(rse->regctl_eds[i].rkey);
261 		} else {
262 			struct nvme_reservation_status *rs;
263 
264 			rs = (struct nvme_reservation_status *)rse;
265 			keys_info->keys[i] = le64_to_cpu(rs->regctl_ds[i].rkey);
266 		}
267 	}
268 
269 free_rse:
270 	kfree(rse);
271 	return ret;
272 }
273 
nvme_pr_read_reservation(struct block_device * bdev,struct pr_held_reservation * resv)274 static int nvme_pr_read_reservation(struct block_device *bdev,
275 		struct pr_held_reservation *resv)
276 {
277 	struct nvme_reservation_status_ext tmp_rse, *rse;
278 	int ret, i, num_regs;
279 	u32 rse_len;
280 	bool eds;
281 
282 get_num_regs:
283 	/*
284 	 * Get the number of registrations so we know how big to allocate
285 	 * the response buffer.
286 	 */
287 	ret = nvme_pr_resv_report(bdev, &tmp_rse, sizeof(tmp_rse), &eds);
288 	if (ret)
289 		return ret;
290 
291 	num_regs = get_unaligned_le16(&tmp_rse.regctl);
292 	if (!num_regs) {
293 		resv->generation = le32_to_cpu(tmp_rse.gen);
294 		return 0;
295 	}
296 
297 	rse_len = struct_size(rse, regctl_eds, num_regs);
298 	rse = kzalloc(rse_len, GFP_KERNEL);
299 	if (!rse)
300 		return -ENOMEM;
301 
302 	ret = nvme_pr_resv_report(bdev, rse, rse_len, &eds);
303 	if (ret)
304 		goto free_rse;
305 
306 	if (num_regs != get_unaligned_le16(&rse->regctl)) {
307 		kfree(rse);
308 		goto get_num_regs;
309 	}
310 
311 	resv->generation = le32_to_cpu(rse->gen);
312 	resv->type = block_pr_type_from_nvme(rse->rtype);
313 
314 	for (i = 0; i < num_regs; i++) {
315 		if (eds) {
316 			if (rse->regctl_eds[i].rcsts) {
317 				resv->key = le64_to_cpu(rse->regctl_eds[i].rkey);
318 				break;
319 			}
320 		} else {
321 			struct nvme_reservation_status *rs;
322 
323 			rs = (struct nvme_reservation_status *)rse;
324 			if (rs->regctl_ds[i].rcsts) {
325 				resv->key = le64_to_cpu(rs->regctl_ds[i].rkey);
326 				break;
327 			}
328 		}
329 	}
330 
331 free_rse:
332 	kfree(rse);
333 	return ret;
334 }
335 
336 const struct pr_ops nvme_pr_ops = {
337 	.pr_register	= nvme_pr_register,
338 	.pr_reserve	= nvme_pr_reserve,
339 	.pr_release	= nvme_pr_release,
340 	.pr_preempt	= nvme_pr_preempt,
341 	.pr_clear	= nvme_pr_clear,
342 	.pr_read_keys	= nvme_pr_read_keys,
343 	.pr_read_reservation = nvme_pr_read_reservation,
344 };
345