xref: /linux/drivers/nvme/target/pr.c (revision 5a47c2080a7316f184107464e4f76737c0c05186)
1*5a47c208SGuixin Liu // SPDX-License-Identifier: GPL-2.0
2*5a47c208SGuixin Liu /*
3*5a47c208SGuixin Liu  * NVMe over Fabrics Persist Reservation.
4*5a47c208SGuixin Liu  * Copyright (c) 2024 Guixin Liu, Alibaba Group.
5*5a47c208SGuixin Liu  * All rights reserved.
6*5a47c208SGuixin Liu  */
7*5a47c208SGuixin Liu #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8*5a47c208SGuixin Liu #include <linux/unaligned.h>
9*5a47c208SGuixin Liu #include "nvmet.h"
10*5a47c208SGuixin Liu 
11*5a47c208SGuixin Liu #define NVMET_PR_NOTIFI_MASK_ALL \
12*5a47c208SGuixin Liu 	(1 << NVME_PR_NOTIFY_BIT_REG_PREEMPTED | \
13*5a47c208SGuixin Liu 	 1 << NVME_PR_NOTIFY_BIT_RESV_RELEASED | \
14*5a47c208SGuixin Liu 	 1 << NVME_PR_NOTIFY_BIT_RESV_PREEMPTED)
15*5a47c208SGuixin Liu 
16*5a47c208SGuixin Liu static inline bool nvmet_pr_parse_ignore_key(u32 cdw10)
17*5a47c208SGuixin Liu {
18*5a47c208SGuixin Liu 	/* Ignore existing key, bit 03. */
19*5a47c208SGuixin Liu 	return (cdw10 >> 3) & 1;
20*5a47c208SGuixin Liu }
21*5a47c208SGuixin Liu 
22*5a47c208SGuixin Liu static inline struct nvmet_ns *nvmet_pr_to_ns(struct nvmet_pr *pr)
23*5a47c208SGuixin Liu {
24*5a47c208SGuixin Liu 	return container_of(pr, struct nvmet_ns, pr);
25*5a47c208SGuixin Liu }
26*5a47c208SGuixin Liu 
27*5a47c208SGuixin Liu static struct nvmet_pr_registrant *
28*5a47c208SGuixin Liu nvmet_pr_find_registrant(struct nvmet_pr *pr, uuid_t *hostid)
29*5a47c208SGuixin Liu {
30*5a47c208SGuixin Liu 	struct nvmet_pr_registrant *reg;
31*5a47c208SGuixin Liu 
32*5a47c208SGuixin Liu 	list_for_each_entry_rcu(reg, &pr->registrant_list, entry) {
33*5a47c208SGuixin Liu 		if (uuid_equal(&reg->hostid, hostid))
34*5a47c208SGuixin Liu 			return reg;
35*5a47c208SGuixin Liu 	}
36*5a47c208SGuixin Liu 	return NULL;
37*5a47c208SGuixin Liu }
38*5a47c208SGuixin Liu 
39*5a47c208SGuixin Liu u16 nvmet_set_feat_resv_notif_mask(struct nvmet_req *req, u32 mask)
40*5a47c208SGuixin Liu {
41*5a47c208SGuixin Liu 	u32 nsid = le32_to_cpu(req->cmd->common.nsid);
42*5a47c208SGuixin Liu 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
43*5a47c208SGuixin Liu 	struct nvmet_ns *ns;
44*5a47c208SGuixin Liu 	unsigned long idx;
45*5a47c208SGuixin Liu 	u16 status;
46*5a47c208SGuixin Liu 
47*5a47c208SGuixin Liu 	if (mask & ~(NVMET_PR_NOTIFI_MASK_ALL)) {
48*5a47c208SGuixin Liu 		req->error_loc = offsetof(struct nvme_common_command, cdw11);
49*5a47c208SGuixin Liu 		return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
50*5a47c208SGuixin Liu 	}
51*5a47c208SGuixin Liu 
52*5a47c208SGuixin Liu 	if (nsid != U32_MAX) {
53*5a47c208SGuixin Liu 		status = nvmet_req_find_ns(req);
54*5a47c208SGuixin Liu 		if (status)
55*5a47c208SGuixin Liu 			return status;
56*5a47c208SGuixin Liu 		if (!req->ns->pr.enable)
57*5a47c208SGuixin Liu 			return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
58*5a47c208SGuixin Liu 
59*5a47c208SGuixin Liu 		WRITE_ONCE(req->ns->pr.notify_mask, mask);
60*5a47c208SGuixin Liu 		goto success;
61*5a47c208SGuixin Liu 	}
62*5a47c208SGuixin Liu 
63*5a47c208SGuixin Liu 	xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
64*5a47c208SGuixin Liu 		if (ns->pr.enable)
65*5a47c208SGuixin Liu 			WRITE_ONCE(ns->pr.notify_mask, mask);
66*5a47c208SGuixin Liu 	}
67*5a47c208SGuixin Liu 
68*5a47c208SGuixin Liu success:
69*5a47c208SGuixin Liu 	nvmet_set_result(req, mask);
70*5a47c208SGuixin Liu 	return NVME_SC_SUCCESS;
71*5a47c208SGuixin Liu }
72*5a47c208SGuixin Liu 
73*5a47c208SGuixin Liu u16 nvmet_get_feat_resv_notif_mask(struct nvmet_req *req)
74*5a47c208SGuixin Liu {
75*5a47c208SGuixin Liu 	u16 status;
76*5a47c208SGuixin Liu 
77*5a47c208SGuixin Liu 	status = nvmet_req_find_ns(req);
78*5a47c208SGuixin Liu 	if (status)
79*5a47c208SGuixin Liu 		return status;
80*5a47c208SGuixin Liu 
81*5a47c208SGuixin Liu 	if (!req->ns->pr.enable)
82*5a47c208SGuixin Liu 		return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
83*5a47c208SGuixin Liu 
84*5a47c208SGuixin Liu 	nvmet_set_result(req, READ_ONCE(req->ns->pr.notify_mask));
85*5a47c208SGuixin Liu 	return status;
86*5a47c208SGuixin Liu }
87*5a47c208SGuixin Liu 
88*5a47c208SGuixin Liu void nvmet_execute_get_log_page_resv(struct nvmet_req *req)
89*5a47c208SGuixin Liu {
90*5a47c208SGuixin Liu 	struct nvmet_pr_log_mgr *log_mgr = &req->sq->ctrl->pr_log_mgr;
91*5a47c208SGuixin Liu 	struct nvme_pr_log next_log = {0};
92*5a47c208SGuixin Liu 	struct nvme_pr_log log = {0};
93*5a47c208SGuixin Liu 	u16 status = NVME_SC_SUCCESS;
94*5a47c208SGuixin Liu 	u64 lost_count;
95*5a47c208SGuixin Liu 	u64 cur_count;
96*5a47c208SGuixin Liu 	u64 next_count;
97*5a47c208SGuixin Liu 
98*5a47c208SGuixin Liu 	mutex_lock(&log_mgr->lock);
99*5a47c208SGuixin Liu 	if (!kfifo_get(&log_mgr->log_queue, &log))
100*5a47c208SGuixin Liu 		goto out;
101*5a47c208SGuixin Liu 
102*5a47c208SGuixin Liu 	/*
103*5a47c208SGuixin Liu 	 * We can't get the last in kfifo.
104*5a47c208SGuixin Liu 	 * Utilize the current count and the count from the next log to
105*5a47c208SGuixin Liu 	 * calculate the number of lost logs, while also addressing cases
106*5a47c208SGuixin Liu 	 * of overflow. If there is no subsequent log, the number of lost
107*5a47c208SGuixin Liu 	 * logs is equal to the lost_count within the nvmet_pr_log_mgr.
108*5a47c208SGuixin Liu 	 */
109*5a47c208SGuixin Liu 	cur_count = le64_to_cpu(log.count);
110*5a47c208SGuixin Liu 	if (kfifo_peek(&log_mgr->log_queue, &next_log)) {
111*5a47c208SGuixin Liu 		next_count = le64_to_cpu(next_log.count);
112*5a47c208SGuixin Liu 		if (next_count > cur_count)
113*5a47c208SGuixin Liu 			lost_count = next_count - cur_count - 1;
114*5a47c208SGuixin Liu 		else
115*5a47c208SGuixin Liu 			lost_count = U64_MAX - cur_count + next_count - 1;
116*5a47c208SGuixin Liu 	} else {
117*5a47c208SGuixin Liu 		lost_count = log_mgr->lost_count;
118*5a47c208SGuixin Liu 	}
119*5a47c208SGuixin Liu 
120*5a47c208SGuixin Liu 	log.count = cpu_to_le64((cur_count + lost_count) == 0 ?
121*5a47c208SGuixin Liu 				1 : (cur_count + lost_count));
122*5a47c208SGuixin Liu 	log_mgr->lost_count -= lost_count;
123*5a47c208SGuixin Liu 
124*5a47c208SGuixin Liu 	log.nr_pages = kfifo_len(&log_mgr->log_queue);
125*5a47c208SGuixin Liu 
126*5a47c208SGuixin Liu out:
127*5a47c208SGuixin Liu 	status = nvmet_copy_to_sgl(req, 0, &log, sizeof(log));
128*5a47c208SGuixin Liu 	mutex_unlock(&log_mgr->lock);
129*5a47c208SGuixin Liu 	nvmet_req_complete(req, status);
130*5a47c208SGuixin Liu }
131*5a47c208SGuixin Liu 
132*5a47c208SGuixin Liu static void nvmet_pr_add_resv_log(struct nvmet_ctrl *ctrl, u8 log_type,
133*5a47c208SGuixin Liu 				  u32 nsid)
134*5a47c208SGuixin Liu {
135*5a47c208SGuixin Liu 	struct nvmet_pr_log_mgr *log_mgr = &ctrl->pr_log_mgr;
136*5a47c208SGuixin Liu 	struct nvme_pr_log log = {0};
137*5a47c208SGuixin Liu 
138*5a47c208SGuixin Liu 	mutex_lock(&log_mgr->lock);
139*5a47c208SGuixin Liu 	log_mgr->counter++;
140*5a47c208SGuixin Liu 	if (log_mgr->counter == 0)
141*5a47c208SGuixin Liu 		log_mgr->counter = 1;
142*5a47c208SGuixin Liu 
143*5a47c208SGuixin Liu 	log.count = cpu_to_le64(log_mgr->counter);
144*5a47c208SGuixin Liu 	log.type = log_type;
145*5a47c208SGuixin Liu 	log.nsid = cpu_to_le32(nsid);
146*5a47c208SGuixin Liu 
147*5a47c208SGuixin Liu 	if (!kfifo_put(&log_mgr->log_queue, log)) {
148*5a47c208SGuixin Liu 		pr_info("a reservation log lost, cntlid:%d, log_type:%d, nsid:%d\n",
149*5a47c208SGuixin Liu 			ctrl->cntlid, log_type, nsid);
150*5a47c208SGuixin Liu 		log_mgr->lost_count++;
151*5a47c208SGuixin Liu 	}
152*5a47c208SGuixin Liu 
153*5a47c208SGuixin Liu 	mutex_unlock(&log_mgr->lock);
154*5a47c208SGuixin Liu }
155*5a47c208SGuixin Liu 
156*5a47c208SGuixin Liu static void nvmet_pr_resv_released(struct nvmet_pr *pr, uuid_t *hostid)
157*5a47c208SGuixin Liu {
158*5a47c208SGuixin Liu 	struct nvmet_ns *ns = nvmet_pr_to_ns(pr);
159*5a47c208SGuixin Liu 	struct nvmet_subsys *subsys = ns->subsys;
160*5a47c208SGuixin Liu 	struct nvmet_ctrl *ctrl;
161*5a47c208SGuixin Liu 
162*5a47c208SGuixin Liu 	if (test_bit(NVME_PR_NOTIFY_BIT_RESV_RELEASED, &pr->notify_mask))
163*5a47c208SGuixin Liu 		return;
164*5a47c208SGuixin Liu 
165*5a47c208SGuixin Liu 	mutex_lock(&subsys->lock);
166*5a47c208SGuixin Liu 	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
167*5a47c208SGuixin Liu 		if (!uuid_equal(&ctrl->hostid, hostid) &&
168*5a47c208SGuixin Liu 		    nvmet_pr_find_registrant(pr, &ctrl->hostid)) {
169*5a47c208SGuixin Liu 			nvmet_pr_add_resv_log(ctrl,
170*5a47c208SGuixin Liu 				NVME_PR_LOG_RESERVATION_RELEASED, ns->nsid);
171*5a47c208SGuixin Liu 			nvmet_add_async_event(ctrl, NVME_AER_CSS,
172*5a47c208SGuixin Liu 				NVME_AEN_RESV_LOG_PAGE_AVALIABLE,
173*5a47c208SGuixin Liu 				NVME_LOG_RESERVATION);
174*5a47c208SGuixin Liu 		}
175*5a47c208SGuixin Liu 	}
176*5a47c208SGuixin Liu 	mutex_unlock(&subsys->lock);
177*5a47c208SGuixin Liu }
178*5a47c208SGuixin Liu 
179*5a47c208SGuixin Liu static void nvmet_pr_send_event_to_host(struct nvmet_pr *pr, uuid_t *hostid,
180*5a47c208SGuixin Liu 					  u8 log_type)
181*5a47c208SGuixin Liu {
182*5a47c208SGuixin Liu 	struct nvmet_ns *ns = nvmet_pr_to_ns(pr);
183*5a47c208SGuixin Liu 	struct nvmet_subsys *subsys = ns->subsys;
184*5a47c208SGuixin Liu 	struct nvmet_ctrl *ctrl;
185*5a47c208SGuixin Liu 
186*5a47c208SGuixin Liu 	mutex_lock(&subsys->lock);
187*5a47c208SGuixin Liu 	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
188*5a47c208SGuixin Liu 		if (uuid_equal(hostid, &ctrl->hostid)) {
189*5a47c208SGuixin Liu 			nvmet_pr_add_resv_log(ctrl, log_type, ns->nsid);
190*5a47c208SGuixin Liu 			nvmet_add_async_event(ctrl, NVME_AER_CSS,
191*5a47c208SGuixin Liu 				NVME_AEN_RESV_LOG_PAGE_AVALIABLE,
192*5a47c208SGuixin Liu 				NVME_LOG_RESERVATION);
193*5a47c208SGuixin Liu 		}
194*5a47c208SGuixin Liu 	}
195*5a47c208SGuixin Liu 	mutex_unlock(&subsys->lock);
196*5a47c208SGuixin Liu }
197*5a47c208SGuixin Liu 
198*5a47c208SGuixin Liu static void nvmet_pr_resv_preempted(struct nvmet_pr *pr, uuid_t *hostid)
199*5a47c208SGuixin Liu {
200*5a47c208SGuixin Liu 	if (test_bit(NVME_PR_NOTIFY_BIT_RESV_PREEMPTED, &pr->notify_mask))
201*5a47c208SGuixin Liu 		return;
202*5a47c208SGuixin Liu 
203*5a47c208SGuixin Liu 	nvmet_pr_send_event_to_host(pr, hostid,
204*5a47c208SGuixin Liu 		NVME_PR_LOG_RESERVATOIN_PREEMPTED);
205*5a47c208SGuixin Liu }
206*5a47c208SGuixin Liu 
207*5a47c208SGuixin Liu static void nvmet_pr_registration_preempted(struct nvmet_pr *pr,
208*5a47c208SGuixin Liu 					    uuid_t *hostid)
209*5a47c208SGuixin Liu {
210*5a47c208SGuixin Liu 	if (test_bit(NVME_PR_NOTIFY_BIT_REG_PREEMPTED, &pr->notify_mask))
211*5a47c208SGuixin Liu 		return;
212*5a47c208SGuixin Liu 
213*5a47c208SGuixin Liu 	nvmet_pr_send_event_to_host(pr, hostid,
214*5a47c208SGuixin Liu 		NVME_PR_LOG_REGISTRATION_PREEMPTED);
215*5a47c208SGuixin Liu }
216*5a47c208SGuixin Liu 
217*5a47c208SGuixin Liu static inline void nvmet_pr_set_new_holder(struct nvmet_pr *pr, u8 new_rtype,
218*5a47c208SGuixin Liu 					   struct nvmet_pr_registrant *reg)
219*5a47c208SGuixin Liu {
220*5a47c208SGuixin Liu 	reg->rtype = new_rtype;
221*5a47c208SGuixin Liu 	rcu_assign_pointer(pr->holder, reg);
222*5a47c208SGuixin Liu }
223*5a47c208SGuixin Liu 
224*5a47c208SGuixin Liu static u16 nvmet_pr_register(struct nvmet_req *req,
225*5a47c208SGuixin Liu 			     struct nvmet_pr_register_data *d)
226*5a47c208SGuixin Liu {
227*5a47c208SGuixin Liu 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
228*5a47c208SGuixin Liu 	struct nvmet_pr_registrant *new, *reg;
229*5a47c208SGuixin Liu 	struct nvmet_pr *pr = &req->ns->pr;
230*5a47c208SGuixin Liu 	u16 status = NVME_SC_SUCCESS;
231*5a47c208SGuixin Liu 	u64 nrkey = le64_to_cpu(d->nrkey);
232*5a47c208SGuixin Liu 
233*5a47c208SGuixin Liu 	new = kmalloc(sizeof(*new), GFP_KERNEL);
234*5a47c208SGuixin Liu 	if (!new)
235*5a47c208SGuixin Liu 		return NVME_SC_INTERNAL;
236*5a47c208SGuixin Liu 
237*5a47c208SGuixin Liu 	down(&pr->pr_sem);
238*5a47c208SGuixin Liu 	reg = nvmet_pr_find_registrant(pr, &ctrl->hostid);
239*5a47c208SGuixin Liu 	if (reg) {
240*5a47c208SGuixin Liu 		if (reg->rkey != nrkey)
241*5a47c208SGuixin Liu 			status = NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR;
242*5a47c208SGuixin Liu 		kfree(new);
243*5a47c208SGuixin Liu 		goto out;
244*5a47c208SGuixin Liu 	}
245*5a47c208SGuixin Liu 
246*5a47c208SGuixin Liu 	memset(new, 0, sizeof(*new));
247*5a47c208SGuixin Liu 	INIT_LIST_HEAD(&new->entry);
248*5a47c208SGuixin Liu 	new->rkey = nrkey;
249*5a47c208SGuixin Liu 	uuid_copy(&new->hostid, &ctrl->hostid);
250*5a47c208SGuixin Liu 	list_add_tail_rcu(&new->entry, &pr->registrant_list);
251*5a47c208SGuixin Liu 
252*5a47c208SGuixin Liu out:
253*5a47c208SGuixin Liu 	up(&pr->pr_sem);
254*5a47c208SGuixin Liu 	return status;
255*5a47c208SGuixin Liu }
256*5a47c208SGuixin Liu 
257*5a47c208SGuixin Liu static void nvmet_pr_unregister_one(struct nvmet_pr *pr,
258*5a47c208SGuixin Liu 				    struct nvmet_pr_registrant *reg)
259*5a47c208SGuixin Liu {
260*5a47c208SGuixin Liu 	struct nvmet_pr_registrant *first_reg;
261*5a47c208SGuixin Liu 	struct nvmet_pr_registrant *holder;
262*5a47c208SGuixin Liu 	u8 original_rtype;
263*5a47c208SGuixin Liu 
264*5a47c208SGuixin Liu 	list_del_rcu(&reg->entry);
265*5a47c208SGuixin Liu 
266*5a47c208SGuixin Liu 	holder = rcu_dereference_protected(pr->holder, 1);
267*5a47c208SGuixin Liu 	if (reg != holder)
268*5a47c208SGuixin Liu 		goto out;
269*5a47c208SGuixin Liu 
270*5a47c208SGuixin Liu 	original_rtype = holder->rtype;
271*5a47c208SGuixin Liu 	if (original_rtype == NVME_PR_WRITE_EXCLUSIVE_ALL_REGS ||
272*5a47c208SGuixin Liu 	    original_rtype == NVME_PR_EXCLUSIVE_ACCESS_ALL_REGS) {
273*5a47c208SGuixin Liu 		first_reg = list_first_or_null_rcu(&pr->registrant_list,
274*5a47c208SGuixin Liu 				struct nvmet_pr_registrant, entry);
275*5a47c208SGuixin Liu 		if (first_reg)
276*5a47c208SGuixin Liu 			first_reg->rtype = original_rtype;
277*5a47c208SGuixin Liu 		rcu_assign_pointer(pr->holder, first_reg);
278*5a47c208SGuixin Liu 	} else {
279*5a47c208SGuixin Liu 		rcu_assign_pointer(pr->holder, NULL);
280*5a47c208SGuixin Liu 
281*5a47c208SGuixin Liu 		if (original_rtype == NVME_PR_WRITE_EXCLUSIVE_REG_ONLY ||
282*5a47c208SGuixin Liu 		    original_rtype == NVME_PR_EXCLUSIVE_ACCESS_REG_ONLY)
283*5a47c208SGuixin Liu 			nvmet_pr_resv_released(pr, &reg->hostid);
284*5a47c208SGuixin Liu 	}
285*5a47c208SGuixin Liu out:
286*5a47c208SGuixin Liu 	kfree_rcu(reg, rcu);
287*5a47c208SGuixin Liu }
288*5a47c208SGuixin Liu 
289*5a47c208SGuixin Liu static u16 nvmet_pr_unregister(struct nvmet_req *req,
290*5a47c208SGuixin Liu 			       struct nvmet_pr_register_data *d,
291*5a47c208SGuixin Liu 			       bool ignore_key)
292*5a47c208SGuixin Liu {
293*5a47c208SGuixin Liu 	u16 status = NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR;
294*5a47c208SGuixin Liu 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
295*5a47c208SGuixin Liu 	struct nvmet_pr *pr = &req->ns->pr;
296*5a47c208SGuixin Liu 	struct nvmet_pr_registrant *reg;
297*5a47c208SGuixin Liu 
298*5a47c208SGuixin Liu 	down(&pr->pr_sem);
299*5a47c208SGuixin Liu 	list_for_each_entry_rcu(reg, &pr->registrant_list, entry) {
300*5a47c208SGuixin Liu 		if (uuid_equal(&reg->hostid, &ctrl->hostid)) {
301*5a47c208SGuixin Liu 			if (ignore_key || reg->rkey == le64_to_cpu(d->crkey)) {
302*5a47c208SGuixin Liu 				status = NVME_SC_SUCCESS;
303*5a47c208SGuixin Liu 				nvmet_pr_unregister_one(pr, reg);
304*5a47c208SGuixin Liu 			}
305*5a47c208SGuixin Liu 			break;
306*5a47c208SGuixin Liu 		}
307*5a47c208SGuixin Liu 	}
308*5a47c208SGuixin Liu 	up(&pr->pr_sem);
309*5a47c208SGuixin Liu 
310*5a47c208SGuixin Liu 	return status;
311*5a47c208SGuixin Liu }
312*5a47c208SGuixin Liu 
313*5a47c208SGuixin Liu static void nvmet_pr_update_reg_rkey(struct nvmet_pr_registrant *reg,
314*5a47c208SGuixin Liu 				     void *attr)
315*5a47c208SGuixin Liu {
316*5a47c208SGuixin Liu 	reg->rkey = *(u64 *)attr;
317*5a47c208SGuixin Liu }
318*5a47c208SGuixin Liu 
319*5a47c208SGuixin Liu static u16 nvmet_pr_update_reg_attr(struct nvmet_pr *pr,
320*5a47c208SGuixin Liu 			struct nvmet_pr_registrant *reg,
321*5a47c208SGuixin Liu 			void (*change_attr)(struct nvmet_pr_registrant *reg,
322*5a47c208SGuixin Liu 			void *attr),
323*5a47c208SGuixin Liu 			void *attr)
324*5a47c208SGuixin Liu {
325*5a47c208SGuixin Liu 	struct nvmet_pr_registrant *holder;
326*5a47c208SGuixin Liu 	struct nvmet_pr_registrant *new;
327*5a47c208SGuixin Liu 
328*5a47c208SGuixin Liu 	holder = rcu_dereference_protected(pr->holder, 1);
329*5a47c208SGuixin Liu 	if (reg != holder) {
330*5a47c208SGuixin Liu 		change_attr(reg, attr);
331*5a47c208SGuixin Liu 		return NVME_SC_SUCCESS;
332*5a47c208SGuixin Liu 	}
333*5a47c208SGuixin Liu 
334*5a47c208SGuixin Liu 	new = kmalloc(sizeof(*new), GFP_ATOMIC);
335*5a47c208SGuixin Liu 	if (!new)
336*5a47c208SGuixin Liu 		return NVME_SC_INTERNAL;
337*5a47c208SGuixin Liu 
338*5a47c208SGuixin Liu 	new->rkey = holder->rkey;
339*5a47c208SGuixin Liu 	new->rtype = holder->rtype;
340*5a47c208SGuixin Liu 	uuid_copy(&new->hostid, &holder->hostid);
341*5a47c208SGuixin Liu 	INIT_LIST_HEAD(&new->entry);
342*5a47c208SGuixin Liu 
343*5a47c208SGuixin Liu 	change_attr(new, attr);
344*5a47c208SGuixin Liu 	list_replace_rcu(&holder->entry, &new->entry);
345*5a47c208SGuixin Liu 	rcu_assign_pointer(pr->holder, new);
346*5a47c208SGuixin Liu 	kfree_rcu(holder, rcu);
347*5a47c208SGuixin Liu 
348*5a47c208SGuixin Liu 	return NVME_SC_SUCCESS;
349*5a47c208SGuixin Liu }
350*5a47c208SGuixin Liu 
351*5a47c208SGuixin Liu static u16 nvmet_pr_replace(struct nvmet_req *req,
352*5a47c208SGuixin Liu 			    struct nvmet_pr_register_data *d,
353*5a47c208SGuixin Liu 			    bool ignore_key)
354*5a47c208SGuixin Liu {
355*5a47c208SGuixin Liu 	u16 status = NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR;
356*5a47c208SGuixin Liu 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
357*5a47c208SGuixin Liu 	struct nvmet_pr *pr = &req->ns->pr;
358*5a47c208SGuixin Liu 	struct nvmet_pr_registrant *reg;
359*5a47c208SGuixin Liu 	u64 nrkey = le64_to_cpu(d->nrkey);
360*5a47c208SGuixin Liu 
361*5a47c208SGuixin Liu 	down(&pr->pr_sem);
362*5a47c208SGuixin Liu 	list_for_each_entry_rcu(reg, &pr->registrant_list, entry) {
363*5a47c208SGuixin Liu 		if (uuid_equal(&reg->hostid, &ctrl->hostid)) {
364*5a47c208SGuixin Liu 			if (ignore_key || reg->rkey == le64_to_cpu(d->crkey))
365*5a47c208SGuixin Liu 				status = nvmet_pr_update_reg_attr(pr, reg,
366*5a47c208SGuixin Liu 						nvmet_pr_update_reg_rkey,
367*5a47c208SGuixin Liu 						&nrkey);
368*5a47c208SGuixin Liu 			break;
369*5a47c208SGuixin Liu 		}
370*5a47c208SGuixin Liu 	}
371*5a47c208SGuixin Liu 	up(&pr->pr_sem);
372*5a47c208SGuixin Liu 	return status;
373*5a47c208SGuixin Liu }
374*5a47c208SGuixin Liu 
375*5a47c208SGuixin Liu static void nvmet_execute_pr_register(struct nvmet_req *req)
376*5a47c208SGuixin Liu {
377*5a47c208SGuixin Liu 	u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
378*5a47c208SGuixin Liu 	bool ignore_key = nvmet_pr_parse_ignore_key(cdw10);
379*5a47c208SGuixin Liu 	struct nvmet_pr_register_data *d;
380*5a47c208SGuixin Liu 	u8 reg_act = cdw10 & 0x07; /* Reservation Register Action, bit 02:00 */
381*5a47c208SGuixin Liu 	u16 status;
382*5a47c208SGuixin Liu 
383*5a47c208SGuixin Liu 	d = kmalloc(sizeof(*d), GFP_KERNEL);
384*5a47c208SGuixin Liu 	if (!d) {
385*5a47c208SGuixin Liu 		status = NVME_SC_INTERNAL;
386*5a47c208SGuixin Liu 		goto out;
387*5a47c208SGuixin Liu 	}
388*5a47c208SGuixin Liu 
389*5a47c208SGuixin Liu 	status = nvmet_copy_from_sgl(req, 0, d, sizeof(*d));
390*5a47c208SGuixin Liu 	if (status)
391*5a47c208SGuixin Liu 		goto free_data;
392*5a47c208SGuixin Liu 
393*5a47c208SGuixin Liu 	switch (reg_act) {
394*5a47c208SGuixin Liu 	case NVME_PR_REGISTER_ACT_REG:
395*5a47c208SGuixin Liu 		status = nvmet_pr_register(req, d);
396*5a47c208SGuixin Liu 		break;
397*5a47c208SGuixin Liu 	case NVME_PR_REGISTER_ACT_UNREG:
398*5a47c208SGuixin Liu 		status = nvmet_pr_unregister(req, d, ignore_key);
399*5a47c208SGuixin Liu 		break;
400*5a47c208SGuixin Liu 	case NVME_PR_REGISTER_ACT_REPLACE:
401*5a47c208SGuixin Liu 		status = nvmet_pr_replace(req, d, ignore_key);
402*5a47c208SGuixin Liu 		break;
403*5a47c208SGuixin Liu 	default:
404*5a47c208SGuixin Liu 		req->error_loc = offsetof(struct nvme_common_command, cdw10);
405*5a47c208SGuixin Liu 		status = NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
406*5a47c208SGuixin Liu 		break;
407*5a47c208SGuixin Liu 	}
408*5a47c208SGuixin Liu free_data:
409*5a47c208SGuixin Liu 	kfree(d);
410*5a47c208SGuixin Liu out:
411*5a47c208SGuixin Liu 	if (!status)
412*5a47c208SGuixin Liu 		atomic_inc(&req->ns->pr.generation);
413*5a47c208SGuixin Liu 	nvmet_req_complete(req, status);
414*5a47c208SGuixin Liu }
415*5a47c208SGuixin Liu 
416*5a47c208SGuixin Liu static u16 nvmet_pr_acquire(struct nvmet_req *req,
417*5a47c208SGuixin Liu 			    struct nvmet_pr_registrant *reg,
418*5a47c208SGuixin Liu 			    u8 rtype)
419*5a47c208SGuixin Liu {
420*5a47c208SGuixin Liu 	struct nvmet_pr *pr = &req->ns->pr;
421*5a47c208SGuixin Liu 	struct nvmet_pr_registrant *holder;
422*5a47c208SGuixin Liu 
423*5a47c208SGuixin Liu 	holder = rcu_dereference_protected(pr->holder, 1);
424*5a47c208SGuixin Liu 	if (holder && reg != holder)
425*5a47c208SGuixin Liu 		return  NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR;
426*5a47c208SGuixin Liu 	if (holder && reg == holder) {
427*5a47c208SGuixin Liu 		if (holder->rtype == rtype)
428*5a47c208SGuixin Liu 			return NVME_SC_SUCCESS;
429*5a47c208SGuixin Liu 		return NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR;
430*5a47c208SGuixin Liu 	}
431*5a47c208SGuixin Liu 
432*5a47c208SGuixin Liu 	nvmet_pr_set_new_holder(pr, rtype, reg);
433*5a47c208SGuixin Liu 	return NVME_SC_SUCCESS;
434*5a47c208SGuixin Liu }
435*5a47c208SGuixin Liu 
436*5a47c208SGuixin Liu static void nvmet_pr_confirm_ns_pc_ref(struct percpu_ref *ref)
437*5a47c208SGuixin Liu {
438*5a47c208SGuixin Liu 	struct nvmet_pr_per_ctrl_ref *pc_ref =
439*5a47c208SGuixin Liu 		container_of(ref, struct nvmet_pr_per_ctrl_ref, ref);
440*5a47c208SGuixin Liu 
441*5a47c208SGuixin Liu 	complete(&pc_ref->confirm_done);
442*5a47c208SGuixin Liu }
443*5a47c208SGuixin Liu 
444*5a47c208SGuixin Liu static void nvmet_pr_set_ctrl_to_abort(struct nvmet_req *req, uuid_t *hostid)
445*5a47c208SGuixin Liu {
446*5a47c208SGuixin Liu 	struct nvmet_pr_per_ctrl_ref *pc_ref;
447*5a47c208SGuixin Liu 	struct nvmet_ns *ns = req->ns;
448*5a47c208SGuixin Liu 	unsigned long idx;
449*5a47c208SGuixin Liu 
450*5a47c208SGuixin Liu 	xa_for_each(&ns->pr_per_ctrl_refs, idx, pc_ref) {
451*5a47c208SGuixin Liu 		if (uuid_equal(&pc_ref->hostid, hostid)) {
452*5a47c208SGuixin Liu 			percpu_ref_kill_and_confirm(&pc_ref->ref,
453*5a47c208SGuixin Liu 						nvmet_pr_confirm_ns_pc_ref);
454*5a47c208SGuixin Liu 			wait_for_completion(&pc_ref->confirm_done);
455*5a47c208SGuixin Liu 		}
456*5a47c208SGuixin Liu 	}
457*5a47c208SGuixin Liu }
458*5a47c208SGuixin Liu 
459*5a47c208SGuixin Liu static u16 nvmet_pr_unreg_all_host_by_prkey(struct nvmet_req *req, u64 prkey,
460*5a47c208SGuixin Liu 					    uuid_t *send_hostid,
461*5a47c208SGuixin Liu 					    bool abort)
462*5a47c208SGuixin Liu {
463*5a47c208SGuixin Liu 	u16 status = NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR;
464*5a47c208SGuixin Liu 	struct nvmet_pr_registrant *reg, *tmp;
465*5a47c208SGuixin Liu 	struct nvmet_pr *pr = &req->ns->pr;
466*5a47c208SGuixin Liu 	uuid_t hostid;
467*5a47c208SGuixin Liu 
468*5a47c208SGuixin Liu 	list_for_each_entry_safe(reg, tmp, &pr->registrant_list, entry) {
469*5a47c208SGuixin Liu 		if (reg->rkey == prkey) {
470*5a47c208SGuixin Liu 			status = NVME_SC_SUCCESS;
471*5a47c208SGuixin Liu 			uuid_copy(&hostid, &reg->hostid);
472*5a47c208SGuixin Liu 			if (abort)
473*5a47c208SGuixin Liu 				nvmet_pr_set_ctrl_to_abort(req, &hostid);
474*5a47c208SGuixin Liu 			nvmet_pr_unregister_one(pr, reg);
475*5a47c208SGuixin Liu 			if (!uuid_equal(&hostid, send_hostid))
476*5a47c208SGuixin Liu 				nvmet_pr_registration_preempted(pr, &hostid);
477*5a47c208SGuixin Liu 		}
478*5a47c208SGuixin Liu 	}
479*5a47c208SGuixin Liu 	return status;
480*5a47c208SGuixin Liu }
481*5a47c208SGuixin Liu 
482*5a47c208SGuixin Liu static void nvmet_pr_unreg_all_others_by_prkey(struct nvmet_req *req,
483*5a47c208SGuixin Liu 					       u64 prkey,
484*5a47c208SGuixin Liu 					       uuid_t *send_hostid,
485*5a47c208SGuixin Liu 					       bool abort)
486*5a47c208SGuixin Liu {
487*5a47c208SGuixin Liu 	struct nvmet_pr_registrant *reg, *tmp;
488*5a47c208SGuixin Liu 	struct nvmet_pr *pr = &req->ns->pr;
489*5a47c208SGuixin Liu 	uuid_t hostid;
490*5a47c208SGuixin Liu 
491*5a47c208SGuixin Liu 	list_for_each_entry_safe(reg, tmp, &pr->registrant_list, entry) {
492*5a47c208SGuixin Liu 		if (reg->rkey == prkey &&
493*5a47c208SGuixin Liu 		    !uuid_equal(&reg->hostid, send_hostid)) {
494*5a47c208SGuixin Liu 			uuid_copy(&hostid, &reg->hostid);
495*5a47c208SGuixin Liu 			if (abort)
496*5a47c208SGuixin Liu 				nvmet_pr_set_ctrl_to_abort(req, &hostid);
497*5a47c208SGuixin Liu 			nvmet_pr_unregister_one(pr, reg);
498*5a47c208SGuixin Liu 			nvmet_pr_registration_preempted(pr, &hostid);
499*5a47c208SGuixin Liu 		}
500*5a47c208SGuixin Liu 	}
501*5a47c208SGuixin Liu }
502*5a47c208SGuixin Liu 
503*5a47c208SGuixin Liu static void nvmet_pr_unreg_all_others(struct nvmet_req *req,
504*5a47c208SGuixin Liu 				      uuid_t *send_hostid,
505*5a47c208SGuixin Liu 				      bool abort)
506*5a47c208SGuixin Liu {
507*5a47c208SGuixin Liu 	struct nvmet_pr_registrant *reg, *tmp;
508*5a47c208SGuixin Liu 	struct nvmet_pr *pr = &req->ns->pr;
509*5a47c208SGuixin Liu 	uuid_t hostid;
510*5a47c208SGuixin Liu 
511*5a47c208SGuixin Liu 	list_for_each_entry_safe(reg, tmp, &pr->registrant_list, entry) {
512*5a47c208SGuixin Liu 		if (!uuid_equal(&reg->hostid, send_hostid)) {
513*5a47c208SGuixin Liu 			uuid_copy(&hostid, &reg->hostid);
514*5a47c208SGuixin Liu 			if (abort)
515*5a47c208SGuixin Liu 				nvmet_pr_set_ctrl_to_abort(req, &hostid);
516*5a47c208SGuixin Liu 			nvmet_pr_unregister_one(pr, reg);
517*5a47c208SGuixin Liu 			nvmet_pr_registration_preempted(pr, &hostid);
518*5a47c208SGuixin Liu 		}
519*5a47c208SGuixin Liu 	}
520*5a47c208SGuixin Liu }
521*5a47c208SGuixin Liu 
522*5a47c208SGuixin Liu static void nvmet_pr_update_holder_rtype(struct nvmet_pr_registrant *reg,
523*5a47c208SGuixin Liu 					 void *attr)
524*5a47c208SGuixin Liu {
525*5a47c208SGuixin Liu 	u8 new_rtype = *(u8 *)attr;
526*5a47c208SGuixin Liu 
527*5a47c208SGuixin Liu 	reg->rtype = new_rtype;
528*5a47c208SGuixin Liu }
529*5a47c208SGuixin Liu 
530*5a47c208SGuixin Liu static u16 nvmet_pr_preempt(struct nvmet_req *req,
531*5a47c208SGuixin Liu 			    struct nvmet_pr_registrant *reg,
532*5a47c208SGuixin Liu 			    u8 rtype,
533*5a47c208SGuixin Liu 			    struct nvmet_pr_acquire_data *d,
534*5a47c208SGuixin Liu 			    bool abort)
535*5a47c208SGuixin Liu {
536*5a47c208SGuixin Liu 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
537*5a47c208SGuixin Liu 	struct nvmet_pr *pr = &req->ns->pr;
538*5a47c208SGuixin Liu 	struct nvmet_pr_registrant *holder;
539*5a47c208SGuixin Liu 	enum nvme_pr_type original_rtype;
540*5a47c208SGuixin Liu 	u64 prkey = le64_to_cpu(d->prkey);
541*5a47c208SGuixin Liu 	u16 status;
542*5a47c208SGuixin Liu 
543*5a47c208SGuixin Liu 	holder = rcu_dereference_protected(pr->holder, 1);
544*5a47c208SGuixin Liu 	if (!holder)
545*5a47c208SGuixin Liu 		return nvmet_pr_unreg_all_host_by_prkey(req, prkey,
546*5a47c208SGuixin Liu 					&ctrl->hostid, abort);
547*5a47c208SGuixin Liu 
548*5a47c208SGuixin Liu 	original_rtype = holder->rtype;
549*5a47c208SGuixin Liu 	if (original_rtype == NVME_PR_WRITE_EXCLUSIVE_ALL_REGS ||
550*5a47c208SGuixin Liu 	    original_rtype == NVME_PR_EXCLUSIVE_ACCESS_ALL_REGS) {
551*5a47c208SGuixin Liu 		if (!prkey) {
552*5a47c208SGuixin Liu 			/*
553*5a47c208SGuixin Liu 			 * To prevent possible access from other hosts, and
554*5a47c208SGuixin Liu 			 * avoid terminate the holder, set the new holder
555*5a47c208SGuixin Liu 			 * first before unregistering.
556*5a47c208SGuixin Liu 			 */
557*5a47c208SGuixin Liu 			nvmet_pr_set_new_holder(pr, rtype, reg);
558*5a47c208SGuixin Liu 			nvmet_pr_unreg_all_others(req, &ctrl->hostid, abort);
559*5a47c208SGuixin Liu 			return NVME_SC_SUCCESS;
560*5a47c208SGuixin Liu 		}
561*5a47c208SGuixin Liu 		return nvmet_pr_unreg_all_host_by_prkey(req, prkey,
562*5a47c208SGuixin Liu 				&ctrl->hostid, abort);
563*5a47c208SGuixin Liu 	}
564*5a47c208SGuixin Liu 
565*5a47c208SGuixin Liu 	if (holder == reg) {
566*5a47c208SGuixin Liu 		status = nvmet_pr_update_reg_attr(pr, holder,
567*5a47c208SGuixin Liu 				nvmet_pr_update_holder_rtype, &rtype);
568*5a47c208SGuixin Liu 		if (!status && original_rtype != rtype)
569*5a47c208SGuixin Liu 			nvmet_pr_resv_released(pr, &reg->hostid);
570*5a47c208SGuixin Liu 		return status;
571*5a47c208SGuixin Liu 	}
572*5a47c208SGuixin Liu 
573*5a47c208SGuixin Liu 	if (prkey == holder->rkey) {
574*5a47c208SGuixin Liu 		/*
575*5a47c208SGuixin Liu 		 * Same as before, set the new holder first.
576*5a47c208SGuixin Liu 		 */
577*5a47c208SGuixin Liu 		nvmet_pr_set_new_holder(pr, rtype, reg);
578*5a47c208SGuixin Liu 		nvmet_pr_unreg_all_others_by_prkey(req, prkey, &ctrl->hostid,
579*5a47c208SGuixin Liu 						abort);
580*5a47c208SGuixin Liu 		if (original_rtype != rtype)
581*5a47c208SGuixin Liu 			nvmet_pr_resv_released(pr, &reg->hostid);
582*5a47c208SGuixin Liu 		return NVME_SC_SUCCESS;
583*5a47c208SGuixin Liu 	}
584*5a47c208SGuixin Liu 
585*5a47c208SGuixin Liu 	if (prkey)
586*5a47c208SGuixin Liu 		return nvmet_pr_unreg_all_host_by_prkey(req, prkey,
587*5a47c208SGuixin Liu 					&ctrl->hostid, abort);
588*5a47c208SGuixin Liu 	return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
589*5a47c208SGuixin Liu }
590*5a47c208SGuixin Liu 
591*5a47c208SGuixin Liu static void nvmet_pr_do_abort(struct work_struct *w)
592*5a47c208SGuixin Liu {
593*5a47c208SGuixin Liu 	struct nvmet_req *req = container_of(w, struct nvmet_req, r.abort_work);
594*5a47c208SGuixin Liu 	struct nvmet_pr_per_ctrl_ref *pc_ref;
595*5a47c208SGuixin Liu 	struct nvmet_ns *ns = req->ns;
596*5a47c208SGuixin Liu 	unsigned long idx;
597*5a47c208SGuixin Liu 
598*5a47c208SGuixin Liu 	/*
599*5a47c208SGuixin Liu 	 * The target does not support abort, just wait per-controller ref to 0.
600*5a47c208SGuixin Liu 	 */
601*5a47c208SGuixin Liu 	xa_for_each(&ns->pr_per_ctrl_refs, idx, pc_ref) {
602*5a47c208SGuixin Liu 		if (percpu_ref_is_dying(&pc_ref->ref)) {
603*5a47c208SGuixin Liu 			wait_for_completion(&pc_ref->free_done);
604*5a47c208SGuixin Liu 			reinit_completion(&pc_ref->confirm_done);
605*5a47c208SGuixin Liu 			reinit_completion(&pc_ref->free_done);
606*5a47c208SGuixin Liu 			percpu_ref_resurrect(&pc_ref->ref);
607*5a47c208SGuixin Liu 		}
608*5a47c208SGuixin Liu 	}
609*5a47c208SGuixin Liu 
610*5a47c208SGuixin Liu 	up(&ns->pr.pr_sem);
611*5a47c208SGuixin Liu 	nvmet_req_complete(req, NVME_SC_SUCCESS);
612*5a47c208SGuixin Liu }
613*5a47c208SGuixin Liu 
614*5a47c208SGuixin Liu static u16 __nvmet_execute_pr_acquire(struct nvmet_req *req,
615*5a47c208SGuixin Liu 				      struct nvmet_pr_registrant *reg,
616*5a47c208SGuixin Liu 				      u8 acquire_act,
617*5a47c208SGuixin Liu 				      u8 rtype,
618*5a47c208SGuixin Liu 				      struct nvmet_pr_acquire_data *d)
619*5a47c208SGuixin Liu {
620*5a47c208SGuixin Liu 	u16 status;
621*5a47c208SGuixin Liu 
622*5a47c208SGuixin Liu 	switch (acquire_act) {
623*5a47c208SGuixin Liu 	case NVME_PR_ACQUIRE_ACT_ACQUIRE:
624*5a47c208SGuixin Liu 		status = nvmet_pr_acquire(req, reg, rtype);
625*5a47c208SGuixin Liu 		goto out;
626*5a47c208SGuixin Liu 	case NVME_PR_ACQUIRE_ACT_PREEMPT:
627*5a47c208SGuixin Liu 		status = nvmet_pr_preempt(req, reg, rtype, d, false);
628*5a47c208SGuixin Liu 		goto inc_gen;
629*5a47c208SGuixin Liu 	case NVME_PR_ACQUIRE_ACT_PREEMPT_AND_ABORT:
630*5a47c208SGuixin Liu 		status = nvmet_pr_preempt(req, reg, rtype, d, true);
631*5a47c208SGuixin Liu 		goto inc_gen;
632*5a47c208SGuixin Liu 	default:
633*5a47c208SGuixin Liu 		req->error_loc = offsetof(struct nvme_common_command, cdw10);
634*5a47c208SGuixin Liu 		status = NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
635*5a47c208SGuixin Liu 		goto out;
636*5a47c208SGuixin Liu 	}
637*5a47c208SGuixin Liu inc_gen:
638*5a47c208SGuixin Liu 	if (!status)
639*5a47c208SGuixin Liu 		atomic_inc(&req->ns->pr.generation);
640*5a47c208SGuixin Liu out:
641*5a47c208SGuixin Liu 	return status;
642*5a47c208SGuixin Liu }
643*5a47c208SGuixin Liu 
644*5a47c208SGuixin Liu static void nvmet_execute_pr_acquire(struct nvmet_req *req)
645*5a47c208SGuixin Liu {
646*5a47c208SGuixin Liu 	u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
647*5a47c208SGuixin Liu 	bool ignore_key = nvmet_pr_parse_ignore_key(cdw10);
648*5a47c208SGuixin Liu 	/* Reservation type, bit 15:08 */
649*5a47c208SGuixin Liu 	u8 rtype = (u8)((cdw10 >> 8) & 0xff);
650*5a47c208SGuixin Liu 	/* Reservation acquire action, bit 02:00 */
651*5a47c208SGuixin Liu 	u8 acquire_act = cdw10 & 0x07;
652*5a47c208SGuixin Liu 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
653*5a47c208SGuixin Liu 	struct nvmet_pr_acquire_data *d = NULL;
654*5a47c208SGuixin Liu 	struct nvmet_pr *pr = &req->ns->pr;
655*5a47c208SGuixin Liu 	struct nvmet_pr_registrant *reg;
656*5a47c208SGuixin Liu 	u16 status = NVME_SC_SUCCESS;
657*5a47c208SGuixin Liu 
658*5a47c208SGuixin Liu 	if (ignore_key ||
659*5a47c208SGuixin Liu 	    rtype < NVME_PR_WRITE_EXCLUSIVE ||
660*5a47c208SGuixin Liu 	    rtype > NVME_PR_EXCLUSIVE_ACCESS_ALL_REGS) {
661*5a47c208SGuixin Liu 		status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
662*5a47c208SGuixin Liu 		goto out;
663*5a47c208SGuixin Liu 	}
664*5a47c208SGuixin Liu 
665*5a47c208SGuixin Liu 	d = kmalloc(sizeof(*d), GFP_KERNEL);
666*5a47c208SGuixin Liu 	if (!d) {
667*5a47c208SGuixin Liu 		status = NVME_SC_INTERNAL;
668*5a47c208SGuixin Liu 		goto out;
669*5a47c208SGuixin Liu 	}
670*5a47c208SGuixin Liu 
671*5a47c208SGuixin Liu 	status = nvmet_copy_from_sgl(req, 0, d, sizeof(*d));
672*5a47c208SGuixin Liu 	if (status)
673*5a47c208SGuixin Liu 		goto free_data;
674*5a47c208SGuixin Liu 
675*5a47c208SGuixin Liu 	status = NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR;
676*5a47c208SGuixin Liu 	down(&pr->pr_sem);
677*5a47c208SGuixin Liu 	list_for_each_entry_rcu(reg, &pr->registrant_list, entry) {
678*5a47c208SGuixin Liu 		if (uuid_equal(&reg->hostid, &ctrl->hostid) &&
679*5a47c208SGuixin Liu 		    reg->rkey == le64_to_cpu(d->crkey)) {
680*5a47c208SGuixin Liu 			status = __nvmet_execute_pr_acquire(req, reg,
681*5a47c208SGuixin Liu 					acquire_act, rtype, d);
682*5a47c208SGuixin Liu 			break;
683*5a47c208SGuixin Liu 		}
684*5a47c208SGuixin Liu 	}
685*5a47c208SGuixin Liu 
686*5a47c208SGuixin Liu 	if (!status && acquire_act == NVME_PR_ACQUIRE_ACT_PREEMPT_AND_ABORT) {
687*5a47c208SGuixin Liu 		kfree(d);
688*5a47c208SGuixin Liu 		INIT_WORK(&req->r.abort_work, nvmet_pr_do_abort);
689*5a47c208SGuixin Liu 		queue_work(nvmet_wq, &req->r.abort_work);
690*5a47c208SGuixin Liu 		return;
691*5a47c208SGuixin Liu 	}
692*5a47c208SGuixin Liu 
693*5a47c208SGuixin Liu 	up(&pr->pr_sem);
694*5a47c208SGuixin Liu 
695*5a47c208SGuixin Liu free_data:
696*5a47c208SGuixin Liu 	kfree(d);
697*5a47c208SGuixin Liu out:
698*5a47c208SGuixin Liu 	nvmet_req_complete(req, status);
699*5a47c208SGuixin Liu }
700*5a47c208SGuixin Liu 
701*5a47c208SGuixin Liu static u16 nvmet_pr_release(struct nvmet_req *req,
702*5a47c208SGuixin Liu 			    struct nvmet_pr_registrant *reg,
703*5a47c208SGuixin Liu 			    u8 rtype)
704*5a47c208SGuixin Liu {
705*5a47c208SGuixin Liu 	struct nvmet_pr *pr = &req->ns->pr;
706*5a47c208SGuixin Liu 	struct nvmet_pr_registrant *holder;
707*5a47c208SGuixin Liu 	u8 original_rtype;
708*5a47c208SGuixin Liu 
709*5a47c208SGuixin Liu 	holder = rcu_dereference_protected(pr->holder, 1);
710*5a47c208SGuixin Liu 	if (!holder || reg != holder)
711*5a47c208SGuixin Liu 		return NVME_SC_SUCCESS;
712*5a47c208SGuixin Liu 
713*5a47c208SGuixin Liu 	original_rtype = holder->rtype;
714*5a47c208SGuixin Liu 	if (original_rtype != rtype)
715*5a47c208SGuixin Liu 		return NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR;
716*5a47c208SGuixin Liu 
717*5a47c208SGuixin Liu 	rcu_assign_pointer(pr->holder, NULL);
718*5a47c208SGuixin Liu 
719*5a47c208SGuixin Liu 	if (original_rtype != NVME_PR_WRITE_EXCLUSIVE &&
720*5a47c208SGuixin Liu 	    original_rtype != NVME_PR_EXCLUSIVE_ACCESS)
721*5a47c208SGuixin Liu 		nvmet_pr_resv_released(pr, &reg->hostid);
722*5a47c208SGuixin Liu 
723*5a47c208SGuixin Liu 	return NVME_SC_SUCCESS;
724*5a47c208SGuixin Liu }
725*5a47c208SGuixin Liu 
726*5a47c208SGuixin Liu static void nvmet_pr_clear(struct nvmet_req *req)
727*5a47c208SGuixin Liu {
728*5a47c208SGuixin Liu 	struct nvmet_pr_registrant *reg, *tmp;
729*5a47c208SGuixin Liu 	struct nvmet_pr *pr = &req->ns->pr;
730*5a47c208SGuixin Liu 
731*5a47c208SGuixin Liu 	rcu_assign_pointer(pr->holder, NULL);
732*5a47c208SGuixin Liu 
733*5a47c208SGuixin Liu 	list_for_each_entry_safe(reg, tmp, &pr->registrant_list, entry) {
734*5a47c208SGuixin Liu 		list_del_rcu(&reg->entry);
735*5a47c208SGuixin Liu 		if (!uuid_equal(&req->sq->ctrl->hostid, &reg->hostid))
736*5a47c208SGuixin Liu 			nvmet_pr_resv_preempted(pr, &reg->hostid);
737*5a47c208SGuixin Liu 		kfree_rcu(reg, rcu);
738*5a47c208SGuixin Liu 	}
739*5a47c208SGuixin Liu 
740*5a47c208SGuixin Liu 	atomic_inc(&pr->generation);
741*5a47c208SGuixin Liu }
742*5a47c208SGuixin Liu 
743*5a47c208SGuixin Liu static u16 __nvmet_execute_pr_release(struct nvmet_req *req,
744*5a47c208SGuixin Liu 				      struct nvmet_pr_registrant *reg,
745*5a47c208SGuixin Liu 				      u8 release_act, u8 rtype)
746*5a47c208SGuixin Liu {
747*5a47c208SGuixin Liu 	switch (release_act) {
748*5a47c208SGuixin Liu 	case NVME_PR_RELEASE_ACT_RELEASE:
749*5a47c208SGuixin Liu 		return nvmet_pr_release(req, reg, rtype);
750*5a47c208SGuixin Liu 	case NVME_PR_RELEASE_ACT_CLEAR:
751*5a47c208SGuixin Liu 		nvmet_pr_clear(req);
752*5a47c208SGuixin Liu 		return NVME_SC_SUCCESS;
753*5a47c208SGuixin Liu 	default:
754*5a47c208SGuixin Liu 		req->error_loc = offsetof(struct nvme_common_command, cdw10);
755*5a47c208SGuixin Liu 		return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
756*5a47c208SGuixin Liu 	}
757*5a47c208SGuixin Liu }
758*5a47c208SGuixin Liu 
759*5a47c208SGuixin Liu static void nvmet_execute_pr_release(struct nvmet_req *req)
760*5a47c208SGuixin Liu {
761*5a47c208SGuixin Liu 	u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
762*5a47c208SGuixin Liu 	bool ignore_key = nvmet_pr_parse_ignore_key(cdw10);
763*5a47c208SGuixin Liu 	u8 rtype = (u8)((cdw10 >> 8) & 0xff); /* Reservation type, bit 15:08 */
764*5a47c208SGuixin Liu 	u8 release_act = cdw10 & 0x07; /* Reservation release action, bit 02:00 */
765*5a47c208SGuixin Liu 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
766*5a47c208SGuixin Liu 	struct nvmet_pr *pr = &req->ns->pr;
767*5a47c208SGuixin Liu 	struct nvmet_pr_release_data *d;
768*5a47c208SGuixin Liu 	struct nvmet_pr_registrant *reg;
769*5a47c208SGuixin Liu 	u16 status;
770*5a47c208SGuixin Liu 
771*5a47c208SGuixin Liu 	if (ignore_key) {
772*5a47c208SGuixin Liu 		status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
773*5a47c208SGuixin Liu 		goto out;
774*5a47c208SGuixin Liu 	}
775*5a47c208SGuixin Liu 
776*5a47c208SGuixin Liu 	d = kmalloc(sizeof(*d), GFP_KERNEL);
777*5a47c208SGuixin Liu 	if (!d) {
778*5a47c208SGuixin Liu 		status = NVME_SC_INTERNAL;
779*5a47c208SGuixin Liu 		goto out;
780*5a47c208SGuixin Liu 	}
781*5a47c208SGuixin Liu 
782*5a47c208SGuixin Liu 	status = nvmet_copy_from_sgl(req, 0, d, sizeof(*d));
783*5a47c208SGuixin Liu 	if (status)
784*5a47c208SGuixin Liu 		goto free_data;
785*5a47c208SGuixin Liu 
786*5a47c208SGuixin Liu 	status = NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR;
787*5a47c208SGuixin Liu 	down(&pr->pr_sem);
788*5a47c208SGuixin Liu 	list_for_each_entry_rcu(reg, &pr->registrant_list, entry) {
789*5a47c208SGuixin Liu 		if (uuid_equal(&reg->hostid, &ctrl->hostid) &&
790*5a47c208SGuixin Liu 		    reg->rkey == le64_to_cpu(d->crkey)) {
791*5a47c208SGuixin Liu 			status = __nvmet_execute_pr_release(req, reg,
792*5a47c208SGuixin Liu 					release_act, rtype);
793*5a47c208SGuixin Liu 			break;
794*5a47c208SGuixin Liu 		}
795*5a47c208SGuixin Liu 	}
796*5a47c208SGuixin Liu 	up(&pr->pr_sem);
797*5a47c208SGuixin Liu free_data:
798*5a47c208SGuixin Liu 	kfree(d);
799*5a47c208SGuixin Liu out:
800*5a47c208SGuixin Liu 	nvmet_req_complete(req, status);
801*5a47c208SGuixin Liu }
802*5a47c208SGuixin Liu 
803*5a47c208SGuixin Liu static void nvmet_execute_pr_report(struct nvmet_req *req)
804*5a47c208SGuixin Liu {
805*5a47c208SGuixin Liu 	u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11);
806*5a47c208SGuixin Liu 	u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
807*5a47c208SGuixin Liu 	u32 num_bytes = 4 * (cdw10 + 1); /* cdw10 is number of dwords */
808*5a47c208SGuixin Liu 	u8 eds = cdw11 & 1; /* Extended data structure, bit 00 */
809*5a47c208SGuixin Liu 	struct nvme_registered_ctrl_ext *ctrl_eds;
810*5a47c208SGuixin Liu 	struct nvme_reservation_status_ext *data;
811*5a47c208SGuixin Liu 	struct nvmet_pr *pr = &req->ns->pr;
812*5a47c208SGuixin Liu 	struct nvmet_pr_registrant *holder;
813*5a47c208SGuixin Liu 	struct nvmet_pr_registrant *reg;
814*5a47c208SGuixin Liu 	u16 num_ctrls = 0;
815*5a47c208SGuixin Liu 	u16 status;
816*5a47c208SGuixin Liu 	u8 rtype;
817*5a47c208SGuixin Liu 
818*5a47c208SGuixin Liu 	/* nvmet hostid(uuid_t) is 128 bit. */
819*5a47c208SGuixin Liu 	if (!eds) {
820*5a47c208SGuixin Liu 		req->error_loc = offsetof(struct nvme_common_command, cdw11);
821*5a47c208SGuixin Liu 		status = NVME_SC_HOST_ID_INCONSIST | NVME_STATUS_DNR;
822*5a47c208SGuixin Liu 		goto out;
823*5a47c208SGuixin Liu 	}
824*5a47c208SGuixin Liu 
825*5a47c208SGuixin Liu 	if (num_bytes < sizeof(struct nvme_reservation_status_ext)) {
826*5a47c208SGuixin Liu 		req->error_loc = offsetof(struct nvme_common_command, cdw10);
827*5a47c208SGuixin Liu 		status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
828*5a47c208SGuixin Liu 		goto out;
829*5a47c208SGuixin Liu 	}
830*5a47c208SGuixin Liu 
831*5a47c208SGuixin Liu 	data = kmalloc(num_bytes, GFP_KERNEL);
832*5a47c208SGuixin Liu 	if (!data) {
833*5a47c208SGuixin Liu 		status = NVME_SC_INTERNAL;
834*5a47c208SGuixin Liu 		goto out;
835*5a47c208SGuixin Liu 	}
836*5a47c208SGuixin Liu 	memset(data, 0, num_bytes);
837*5a47c208SGuixin Liu 	data->gen = cpu_to_le32(atomic_read(&pr->generation));
838*5a47c208SGuixin Liu 	data->ptpls = 0;
839*5a47c208SGuixin Liu 	ctrl_eds = data->regctl_eds;
840*5a47c208SGuixin Liu 
841*5a47c208SGuixin Liu 	rcu_read_lock();
842*5a47c208SGuixin Liu 	holder = rcu_dereference(pr->holder);
843*5a47c208SGuixin Liu 	rtype = holder ? holder->rtype : 0;
844*5a47c208SGuixin Liu 	data->rtype = rtype;
845*5a47c208SGuixin Liu 
846*5a47c208SGuixin Liu 	list_for_each_entry_rcu(reg, &pr->registrant_list, entry) {
847*5a47c208SGuixin Liu 		num_ctrls++;
848*5a47c208SGuixin Liu 		/*
849*5a47c208SGuixin Liu 		 * continue to get the number of all registrans.
850*5a47c208SGuixin Liu 		 */
851*5a47c208SGuixin Liu 		if (((void *)ctrl_eds + sizeof(*ctrl_eds)) >
852*5a47c208SGuixin Liu 		    ((void *)data + num_bytes))
853*5a47c208SGuixin Liu 			continue;
854*5a47c208SGuixin Liu 		/*
855*5a47c208SGuixin Liu 		 * Dynamic controller, set cntlid to 0xffff.
856*5a47c208SGuixin Liu 		 */
857*5a47c208SGuixin Liu 		ctrl_eds->cntlid = cpu_to_le16(NVME_CNTLID_DYNAMIC);
858*5a47c208SGuixin Liu 		if (rtype == NVME_PR_WRITE_EXCLUSIVE_ALL_REGS ||
859*5a47c208SGuixin Liu 		    rtype == NVME_PR_EXCLUSIVE_ACCESS_ALL_REGS)
860*5a47c208SGuixin Liu 			ctrl_eds->rcsts = 1;
861*5a47c208SGuixin Liu 		if (reg == holder)
862*5a47c208SGuixin Liu 			ctrl_eds->rcsts = 1;
863*5a47c208SGuixin Liu 		uuid_copy((uuid_t *)&ctrl_eds->hostid, &reg->hostid);
864*5a47c208SGuixin Liu 		ctrl_eds->rkey = cpu_to_le64(reg->rkey);
865*5a47c208SGuixin Liu 		ctrl_eds++;
866*5a47c208SGuixin Liu 	}
867*5a47c208SGuixin Liu 	rcu_read_unlock();
868*5a47c208SGuixin Liu 
869*5a47c208SGuixin Liu 	put_unaligned_le16(num_ctrls, data->regctl);
870*5a47c208SGuixin Liu 	status = nvmet_copy_to_sgl(req, 0, data, num_bytes);
871*5a47c208SGuixin Liu 	kfree(data);
872*5a47c208SGuixin Liu out:
873*5a47c208SGuixin Liu 	nvmet_req_complete(req, status);
874*5a47c208SGuixin Liu }
875*5a47c208SGuixin Liu 
876*5a47c208SGuixin Liu u16 nvmet_parse_pr_cmd(struct nvmet_req *req)
877*5a47c208SGuixin Liu {
878*5a47c208SGuixin Liu 	struct nvme_command *cmd = req->cmd;
879*5a47c208SGuixin Liu 
880*5a47c208SGuixin Liu 	switch (cmd->common.opcode) {
881*5a47c208SGuixin Liu 	case nvme_cmd_resv_register:
882*5a47c208SGuixin Liu 		req->execute = nvmet_execute_pr_register;
883*5a47c208SGuixin Liu 		break;
884*5a47c208SGuixin Liu 	case nvme_cmd_resv_acquire:
885*5a47c208SGuixin Liu 		req->execute = nvmet_execute_pr_acquire;
886*5a47c208SGuixin Liu 		break;
887*5a47c208SGuixin Liu 	case nvme_cmd_resv_release:
888*5a47c208SGuixin Liu 		req->execute = nvmet_execute_pr_release;
889*5a47c208SGuixin Liu 		break;
890*5a47c208SGuixin Liu 	case nvme_cmd_resv_report:
891*5a47c208SGuixin Liu 		req->execute = nvmet_execute_pr_report;
892*5a47c208SGuixin Liu 		break;
893*5a47c208SGuixin Liu 	default:
894*5a47c208SGuixin Liu 		return 1;
895*5a47c208SGuixin Liu 	}
896*5a47c208SGuixin Liu 	return NVME_SC_SUCCESS;
897*5a47c208SGuixin Liu }
898*5a47c208SGuixin Liu 
899*5a47c208SGuixin Liu static bool nvmet_is_req_write_cmd_group(struct nvmet_req *req)
900*5a47c208SGuixin Liu {
901*5a47c208SGuixin Liu 	u8 opcode = req->cmd->common.opcode;
902*5a47c208SGuixin Liu 
903*5a47c208SGuixin Liu 	if (req->sq->qid) {
904*5a47c208SGuixin Liu 		switch (opcode) {
905*5a47c208SGuixin Liu 		case nvme_cmd_flush:
906*5a47c208SGuixin Liu 		case nvme_cmd_write:
907*5a47c208SGuixin Liu 		case nvme_cmd_write_zeroes:
908*5a47c208SGuixin Liu 		case nvme_cmd_dsm:
909*5a47c208SGuixin Liu 		case nvme_cmd_zone_append:
910*5a47c208SGuixin Liu 		case nvme_cmd_zone_mgmt_send:
911*5a47c208SGuixin Liu 			return true;
912*5a47c208SGuixin Liu 		default:
913*5a47c208SGuixin Liu 			return false;
914*5a47c208SGuixin Liu 		}
915*5a47c208SGuixin Liu 	}
916*5a47c208SGuixin Liu 	return false;
917*5a47c208SGuixin Liu }
918*5a47c208SGuixin Liu 
919*5a47c208SGuixin Liu static bool nvmet_is_req_read_cmd_group(struct nvmet_req *req)
920*5a47c208SGuixin Liu {
921*5a47c208SGuixin Liu 	u8 opcode = req->cmd->common.opcode;
922*5a47c208SGuixin Liu 
923*5a47c208SGuixin Liu 	if (req->sq->qid) {
924*5a47c208SGuixin Liu 		switch (opcode) {
925*5a47c208SGuixin Liu 		case nvme_cmd_read:
926*5a47c208SGuixin Liu 		case nvme_cmd_zone_mgmt_recv:
927*5a47c208SGuixin Liu 			return true;
928*5a47c208SGuixin Liu 		default:
929*5a47c208SGuixin Liu 			return false;
930*5a47c208SGuixin Liu 		}
931*5a47c208SGuixin Liu 	}
932*5a47c208SGuixin Liu 	return false;
933*5a47c208SGuixin Liu }
934*5a47c208SGuixin Liu 
935*5a47c208SGuixin Liu u16 nvmet_pr_check_cmd_access(struct nvmet_req *req)
936*5a47c208SGuixin Liu {
937*5a47c208SGuixin Liu 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
938*5a47c208SGuixin Liu 	struct nvmet_pr_registrant *holder;
939*5a47c208SGuixin Liu 	struct nvmet_ns *ns = req->ns;
940*5a47c208SGuixin Liu 	struct nvmet_pr *pr = &ns->pr;
941*5a47c208SGuixin Liu 	u16 status = NVME_SC_SUCCESS;
942*5a47c208SGuixin Liu 
943*5a47c208SGuixin Liu 	rcu_read_lock();
944*5a47c208SGuixin Liu 	holder = rcu_dereference(pr->holder);
945*5a47c208SGuixin Liu 	if (!holder)
946*5a47c208SGuixin Liu 		goto unlock;
947*5a47c208SGuixin Liu 	if (uuid_equal(&ctrl->hostid, &holder->hostid))
948*5a47c208SGuixin Liu 		goto unlock;
949*5a47c208SGuixin Liu 
950*5a47c208SGuixin Liu 	/*
951*5a47c208SGuixin Liu 	 * The Reservation command group is checked in executing,
952*5a47c208SGuixin Liu 	 * allow it here.
953*5a47c208SGuixin Liu 	 */
954*5a47c208SGuixin Liu 	switch (holder->rtype) {
955*5a47c208SGuixin Liu 	case NVME_PR_WRITE_EXCLUSIVE:
956*5a47c208SGuixin Liu 		if (nvmet_is_req_write_cmd_group(req))
957*5a47c208SGuixin Liu 			status = NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR;
958*5a47c208SGuixin Liu 		break;
959*5a47c208SGuixin Liu 	case NVME_PR_EXCLUSIVE_ACCESS:
960*5a47c208SGuixin Liu 		if (nvmet_is_req_read_cmd_group(req) ||
961*5a47c208SGuixin Liu 		    nvmet_is_req_write_cmd_group(req))
962*5a47c208SGuixin Liu 			status = NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR;
963*5a47c208SGuixin Liu 		break;
964*5a47c208SGuixin Liu 	case NVME_PR_WRITE_EXCLUSIVE_REG_ONLY:
965*5a47c208SGuixin Liu 	case NVME_PR_WRITE_EXCLUSIVE_ALL_REGS:
966*5a47c208SGuixin Liu 		if ((nvmet_is_req_write_cmd_group(req)) &&
967*5a47c208SGuixin Liu 		    !nvmet_pr_find_registrant(pr, &ctrl->hostid))
968*5a47c208SGuixin Liu 			status = NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR;
969*5a47c208SGuixin Liu 		break;
970*5a47c208SGuixin Liu 	case NVME_PR_EXCLUSIVE_ACCESS_REG_ONLY:
971*5a47c208SGuixin Liu 	case NVME_PR_EXCLUSIVE_ACCESS_ALL_REGS:
972*5a47c208SGuixin Liu 		if ((nvmet_is_req_read_cmd_group(req) ||
973*5a47c208SGuixin Liu 		    nvmet_is_req_write_cmd_group(req)) &&
974*5a47c208SGuixin Liu 		    !nvmet_pr_find_registrant(pr, &ctrl->hostid))
975*5a47c208SGuixin Liu 			status = NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR;
976*5a47c208SGuixin Liu 		break;
977*5a47c208SGuixin Liu 	default:
978*5a47c208SGuixin Liu 		pr_warn("the reservation type is set wrong, type:%d\n",
979*5a47c208SGuixin Liu 			holder->rtype);
980*5a47c208SGuixin Liu 		break;
981*5a47c208SGuixin Liu 	}
982*5a47c208SGuixin Liu 
983*5a47c208SGuixin Liu unlock:
984*5a47c208SGuixin Liu 	rcu_read_unlock();
985*5a47c208SGuixin Liu 	if (status)
986*5a47c208SGuixin Liu 		req->error_loc = offsetof(struct nvme_common_command, opcode);
987*5a47c208SGuixin Liu 	return status;
988*5a47c208SGuixin Liu }
989*5a47c208SGuixin Liu 
990*5a47c208SGuixin Liu u16 nvmet_pr_get_ns_pc_ref(struct nvmet_req *req)
991*5a47c208SGuixin Liu {
992*5a47c208SGuixin Liu 	struct nvmet_pr_per_ctrl_ref *pc_ref;
993*5a47c208SGuixin Liu 
994*5a47c208SGuixin Liu 	pc_ref = xa_load(&req->ns->pr_per_ctrl_refs,
995*5a47c208SGuixin Liu 			req->sq->ctrl->cntlid);
996*5a47c208SGuixin Liu 	if (unlikely(!percpu_ref_tryget_live(&pc_ref->ref)))
997*5a47c208SGuixin Liu 		return NVME_SC_INTERNAL;
998*5a47c208SGuixin Liu 	req->pc_ref = pc_ref;
999*5a47c208SGuixin Liu 	return NVME_SC_SUCCESS;
1000*5a47c208SGuixin Liu }
1001*5a47c208SGuixin Liu 
1002*5a47c208SGuixin Liu static void nvmet_pr_ctrl_ns_all_cmds_done(struct percpu_ref *ref)
1003*5a47c208SGuixin Liu {
1004*5a47c208SGuixin Liu 	struct nvmet_pr_per_ctrl_ref *pc_ref =
1005*5a47c208SGuixin Liu 		container_of(ref, struct nvmet_pr_per_ctrl_ref, ref);
1006*5a47c208SGuixin Liu 
1007*5a47c208SGuixin Liu 	complete(&pc_ref->free_done);
1008*5a47c208SGuixin Liu }
1009*5a47c208SGuixin Liu 
1010*5a47c208SGuixin Liu static int nvmet_pr_alloc_and_insert_pc_ref(struct nvmet_ns *ns,
1011*5a47c208SGuixin Liu 					    unsigned long idx,
1012*5a47c208SGuixin Liu 					    uuid_t *hostid)
1013*5a47c208SGuixin Liu {
1014*5a47c208SGuixin Liu 	struct nvmet_pr_per_ctrl_ref *pc_ref;
1015*5a47c208SGuixin Liu 	int ret;
1016*5a47c208SGuixin Liu 
1017*5a47c208SGuixin Liu 	pc_ref = kmalloc(sizeof(*pc_ref), GFP_ATOMIC);
1018*5a47c208SGuixin Liu 	if (!pc_ref)
1019*5a47c208SGuixin Liu 		return  -ENOMEM;
1020*5a47c208SGuixin Liu 
1021*5a47c208SGuixin Liu 	ret = percpu_ref_init(&pc_ref->ref, nvmet_pr_ctrl_ns_all_cmds_done,
1022*5a47c208SGuixin Liu 			PERCPU_REF_ALLOW_REINIT, GFP_KERNEL);
1023*5a47c208SGuixin Liu 	if (ret)
1024*5a47c208SGuixin Liu 		goto free;
1025*5a47c208SGuixin Liu 
1026*5a47c208SGuixin Liu 	init_completion(&pc_ref->free_done);
1027*5a47c208SGuixin Liu 	init_completion(&pc_ref->confirm_done);
1028*5a47c208SGuixin Liu 	uuid_copy(&pc_ref->hostid, hostid);
1029*5a47c208SGuixin Liu 
1030*5a47c208SGuixin Liu 	ret = xa_insert(&ns->pr_per_ctrl_refs, idx, pc_ref, GFP_KERNEL);
1031*5a47c208SGuixin Liu 	if (ret)
1032*5a47c208SGuixin Liu 		goto exit;
1033*5a47c208SGuixin Liu 	return ret;
1034*5a47c208SGuixin Liu exit:
1035*5a47c208SGuixin Liu 	percpu_ref_exit(&pc_ref->ref);
1036*5a47c208SGuixin Liu free:
1037*5a47c208SGuixin Liu 	kfree(pc_ref);
1038*5a47c208SGuixin Liu 	return ret;
1039*5a47c208SGuixin Liu }
1040*5a47c208SGuixin Liu 
1041*5a47c208SGuixin Liu int nvmet_ctrl_init_pr(struct nvmet_ctrl *ctrl)
1042*5a47c208SGuixin Liu {
1043*5a47c208SGuixin Liu 	struct nvmet_subsys *subsys = ctrl->subsys;
1044*5a47c208SGuixin Liu 	struct nvmet_pr_per_ctrl_ref *pc_ref;
1045*5a47c208SGuixin Liu 	struct nvmet_ns *ns = NULL;
1046*5a47c208SGuixin Liu 	unsigned long idx;
1047*5a47c208SGuixin Liu 	int ret;
1048*5a47c208SGuixin Liu 
1049*5a47c208SGuixin Liu 	ctrl->pr_log_mgr.counter = 0;
1050*5a47c208SGuixin Liu 	ctrl->pr_log_mgr.lost_count = 0;
1051*5a47c208SGuixin Liu 	mutex_init(&ctrl->pr_log_mgr.lock);
1052*5a47c208SGuixin Liu 	INIT_KFIFO(ctrl->pr_log_mgr.log_queue);
1053*5a47c208SGuixin Liu 
1054*5a47c208SGuixin Liu 	/*
1055*5a47c208SGuixin Liu 	 * Here we are under subsys lock, if an ns not in subsys->namespaces,
1056*5a47c208SGuixin Liu 	 * we can make sure that ns is not enabled, and not call
1057*5a47c208SGuixin Liu 	 * nvmet_pr_init_ns(), see more details in nvmet_ns_enable().
1058*5a47c208SGuixin Liu 	 * So just check ns->pr.enable.
1059*5a47c208SGuixin Liu 	 */
1060*5a47c208SGuixin Liu 	xa_for_each(&subsys->namespaces, idx, ns) {
1061*5a47c208SGuixin Liu 		if (ns->pr.enable) {
1062*5a47c208SGuixin Liu 			ret = nvmet_pr_alloc_and_insert_pc_ref(ns, ctrl->cntlid,
1063*5a47c208SGuixin Liu 							&ctrl->hostid);
1064*5a47c208SGuixin Liu 			if (ret)
1065*5a47c208SGuixin Liu 				goto free_per_ctrl_refs;
1066*5a47c208SGuixin Liu 		}
1067*5a47c208SGuixin Liu 	}
1068*5a47c208SGuixin Liu 	return 0;
1069*5a47c208SGuixin Liu 
1070*5a47c208SGuixin Liu free_per_ctrl_refs:
1071*5a47c208SGuixin Liu 	xa_for_each(&subsys->namespaces, idx, ns) {
1072*5a47c208SGuixin Liu 		if (ns->pr.enable) {
1073*5a47c208SGuixin Liu 			pc_ref = xa_erase(&ns->pr_per_ctrl_refs, ctrl->cntlid);
1074*5a47c208SGuixin Liu 			if (pc_ref)
1075*5a47c208SGuixin Liu 				percpu_ref_exit(&pc_ref->ref);
1076*5a47c208SGuixin Liu 			kfree(pc_ref);
1077*5a47c208SGuixin Liu 		}
1078*5a47c208SGuixin Liu 	}
1079*5a47c208SGuixin Liu 	return ret;
1080*5a47c208SGuixin Liu }
1081*5a47c208SGuixin Liu 
1082*5a47c208SGuixin Liu void nvmet_ctrl_destroy_pr(struct nvmet_ctrl *ctrl)
1083*5a47c208SGuixin Liu {
1084*5a47c208SGuixin Liu 	struct nvmet_pr_per_ctrl_ref *pc_ref;
1085*5a47c208SGuixin Liu 	struct nvmet_ns *ns;
1086*5a47c208SGuixin Liu 	unsigned long idx;
1087*5a47c208SGuixin Liu 
1088*5a47c208SGuixin Liu 	kfifo_free(&ctrl->pr_log_mgr.log_queue);
1089*5a47c208SGuixin Liu 	mutex_destroy(&ctrl->pr_log_mgr.lock);
1090*5a47c208SGuixin Liu 
1091*5a47c208SGuixin Liu 	xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
1092*5a47c208SGuixin Liu 		if (ns->pr.enable) {
1093*5a47c208SGuixin Liu 			pc_ref = xa_erase(&ns->pr_per_ctrl_refs, ctrl->cntlid);
1094*5a47c208SGuixin Liu 			if (pc_ref)
1095*5a47c208SGuixin Liu 				percpu_ref_exit(&pc_ref->ref);
1096*5a47c208SGuixin Liu 			kfree(pc_ref);
1097*5a47c208SGuixin Liu 		}
1098*5a47c208SGuixin Liu 	}
1099*5a47c208SGuixin Liu }
1100*5a47c208SGuixin Liu 
1101*5a47c208SGuixin Liu int nvmet_pr_init_ns(struct nvmet_ns *ns)
1102*5a47c208SGuixin Liu {
1103*5a47c208SGuixin Liu 	struct nvmet_subsys *subsys = ns->subsys;
1104*5a47c208SGuixin Liu 	struct nvmet_pr_per_ctrl_ref *pc_ref;
1105*5a47c208SGuixin Liu 	struct nvmet_ctrl *ctrl = NULL;
1106*5a47c208SGuixin Liu 	unsigned long idx;
1107*5a47c208SGuixin Liu 	int ret;
1108*5a47c208SGuixin Liu 
1109*5a47c208SGuixin Liu 	ns->pr.holder = NULL;
1110*5a47c208SGuixin Liu 	atomic_set(&ns->pr.generation, 0);
1111*5a47c208SGuixin Liu 	sema_init(&ns->pr.pr_sem, 1);
1112*5a47c208SGuixin Liu 	INIT_LIST_HEAD(&ns->pr.registrant_list);
1113*5a47c208SGuixin Liu 	ns->pr.notify_mask = 0;
1114*5a47c208SGuixin Liu 
1115*5a47c208SGuixin Liu 	xa_init(&ns->pr_per_ctrl_refs);
1116*5a47c208SGuixin Liu 
1117*5a47c208SGuixin Liu 	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
1118*5a47c208SGuixin Liu 		ret = nvmet_pr_alloc_and_insert_pc_ref(ns, ctrl->cntlid,
1119*5a47c208SGuixin Liu 						&ctrl->hostid);
1120*5a47c208SGuixin Liu 		if (ret)
1121*5a47c208SGuixin Liu 			goto free_per_ctrl_refs;
1122*5a47c208SGuixin Liu 	}
1123*5a47c208SGuixin Liu 	return 0;
1124*5a47c208SGuixin Liu 
1125*5a47c208SGuixin Liu free_per_ctrl_refs:
1126*5a47c208SGuixin Liu 	xa_for_each(&ns->pr_per_ctrl_refs, idx, pc_ref) {
1127*5a47c208SGuixin Liu 		xa_erase(&ns->pr_per_ctrl_refs, idx);
1128*5a47c208SGuixin Liu 		percpu_ref_exit(&pc_ref->ref);
1129*5a47c208SGuixin Liu 		kfree(pc_ref);
1130*5a47c208SGuixin Liu 	}
1131*5a47c208SGuixin Liu 	return ret;
1132*5a47c208SGuixin Liu }
1133*5a47c208SGuixin Liu 
1134*5a47c208SGuixin Liu void nvmet_pr_exit_ns(struct nvmet_ns *ns)
1135*5a47c208SGuixin Liu {
1136*5a47c208SGuixin Liu 	struct nvmet_pr_registrant *reg, *tmp;
1137*5a47c208SGuixin Liu 	struct nvmet_pr_per_ctrl_ref *pc_ref;
1138*5a47c208SGuixin Liu 	struct nvmet_pr *pr = &ns->pr;
1139*5a47c208SGuixin Liu 	unsigned long idx;
1140*5a47c208SGuixin Liu 
1141*5a47c208SGuixin Liu 	list_for_each_entry_safe(reg, tmp, &pr->registrant_list, entry) {
1142*5a47c208SGuixin Liu 		list_del(&reg->entry);
1143*5a47c208SGuixin Liu 		kfree(reg);
1144*5a47c208SGuixin Liu 	}
1145*5a47c208SGuixin Liu 
1146*5a47c208SGuixin Liu 	xa_for_each(&ns->pr_per_ctrl_refs, idx, pc_ref) {
1147*5a47c208SGuixin Liu 		/*
1148*5a47c208SGuixin Liu 		 * No command on ns here, we can safely free pc_ref.
1149*5a47c208SGuixin Liu 		 */
1150*5a47c208SGuixin Liu 		pc_ref = xa_erase(&ns->pr_per_ctrl_refs, idx);
1151*5a47c208SGuixin Liu 		percpu_ref_exit(&pc_ref->ref);
1152*5a47c208SGuixin Liu 		kfree(pc_ref);
1153*5a47c208SGuixin Liu 	}
1154*5a47c208SGuixin Liu 
1155*5a47c208SGuixin Liu 	xa_destroy(&ns->pr_per_ctrl_refs);
1156*5a47c208SGuixin Liu }
1157