15a47c208SGuixin Liu // SPDX-License-Identifier: GPL-2.0
25a47c208SGuixin Liu /*
35a47c208SGuixin Liu * NVMe over Fabrics Persist Reservation.
45a47c208SGuixin Liu * Copyright (c) 2024 Guixin Liu, Alibaba Group.
55a47c208SGuixin Liu * All rights reserved.
65a47c208SGuixin Liu */
75a47c208SGuixin Liu #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
85a47c208SGuixin Liu #include <linux/unaligned.h>
95a47c208SGuixin Liu #include "nvmet.h"
105a47c208SGuixin Liu
115a47c208SGuixin Liu #define NVMET_PR_NOTIFI_MASK_ALL \
125a47c208SGuixin Liu (1 << NVME_PR_NOTIFY_BIT_REG_PREEMPTED | \
135a47c208SGuixin Liu 1 << NVME_PR_NOTIFY_BIT_RESV_RELEASED | \
145a47c208SGuixin Liu 1 << NVME_PR_NOTIFY_BIT_RESV_PREEMPTED)
155a47c208SGuixin Liu
nvmet_pr_parse_ignore_key(u32 cdw10)165a47c208SGuixin Liu static inline bool nvmet_pr_parse_ignore_key(u32 cdw10)
175a47c208SGuixin Liu {
185a47c208SGuixin Liu /* Ignore existing key, bit 03. */
195a47c208SGuixin Liu return (cdw10 >> 3) & 1;
205a47c208SGuixin Liu }
215a47c208SGuixin Liu
nvmet_pr_to_ns(struct nvmet_pr * pr)225a47c208SGuixin Liu static inline struct nvmet_ns *nvmet_pr_to_ns(struct nvmet_pr *pr)
235a47c208SGuixin Liu {
245a47c208SGuixin Liu return container_of(pr, struct nvmet_ns, pr);
255a47c208SGuixin Liu }
265a47c208SGuixin Liu
275a47c208SGuixin Liu static struct nvmet_pr_registrant *
nvmet_pr_find_registrant(struct nvmet_pr * pr,uuid_t * hostid)285a47c208SGuixin Liu nvmet_pr_find_registrant(struct nvmet_pr *pr, uuid_t *hostid)
295a47c208SGuixin Liu {
305a47c208SGuixin Liu struct nvmet_pr_registrant *reg;
315a47c208SGuixin Liu
325a47c208SGuixin Liu list_for_each_entry_rcu(reg, &pr->registrant_list, entry) {
335a47c208SGuixin Liu if (uuid_equal(®->hostid, hostid))
345a47c208SGuixin Liu return reg;
355a47c208SGuixin Liu }
365a47c208SGuixin Liu return NULL;
375a47c208SGuixin Liu }
385a47c208SGuixin Liu
nvmet_set_feat_resv_notif_mask(struct nvmet_req * req,u32 mask)395a47c208SGuixin Liu u16 nvmet_set_feat_resv_notif_mask(struct nvmet_req *req, u32 mask)
405a47c208SGuixin Liu {
415a47c208SGuixin Liu u32 nsid = le32_to_cpu(req->cmd->common.nsid);
425a47c208SGuixin Liu struct nvmet_ctrl *ctrl = req->sq->ctrl;
435a47c208SGuixin Liu struct nvmet_ns *ns;
445a47c208SGuixin Liu unsigned long idx;
455a47c208SGuixin Liu u16 status;
465a47c208SGuixin Liu
475a47c208SGuixin Liu if (mask & ~(NVMET_PR_NOTIFI_MASK_ALL)) {
485a47c208SGuixin Liu req->error_loc = offsetof(struct nvme_common_command, cdw11);
495a47c208SGuixin Liu return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
505a47c208SGuixin Liu }
515a47c208SGuixin Liu
525a47c208SGuixin Liu if (nsid != U32_MAX) {
535a47c208SGuixin Liu status = nvmet_req_find_ns(req);
545a47c208SGuixin Liu if (status)
555a47c208SGuixin Liu return status;
565a47c208SGuixin Liu if (!req->ns->pr.enable)
575a47c208SGuixin Liu return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
585a47c208SGuixin Liu
595a47c208SGuixin Liu WRITE_ONCE(req->ns->pr.notify_mask, mask);
605a47c208SGuixin Liu goto success;
615a47c208SGuixin Liu }
625a47c208SGuixin Liu
635a47c208SGuixin Liu xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
645a47c208SGuixin Liu if (ns->pr.enable)
655a47c208SGuixin Liu WRITE_ONCE(ns->pr.notify_mask, mask);
665a47c208SGuixin Liu }
675a47c208SGuixin Liu
685a47c208SGuixin Liu success:
695a47c208SGuixin Liu nvmet_set_result(req, mask);
705a47c208SGuixin Liu return NVME_SC_SUCCESS;
715a47c208SGuixin Liu }
725a47c208SGuixin Liu
nvmet_get_feat_resv_notif_mask(struct nvmet_req * req)735a47c208SGuixin Liu u16 nvmet_get_feat_resv_notif_mask(struct nvmet_req *req)
745a47c208SGuixin Liu {
755a47c208SGuixin Liu u16 status;
765a47c208SGuixin Liu
775a47c208SGuixin Liu status = nvmet_req_find_ns(req);
785a47c208SGuixin Liu if (status)
795a47c208SGuixin Liu return status;
805a47c208SGuixin Liu
815a47c208SGuixin Liu if (!req->ns->pr.enable)
825a47c208SGuixin Liu return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
835a47c208SGuixin Liu
845a47c208SGuixin Liu nvmet_set_result(req, READ_ONCE(req->ns->pr.notify_mask));
855a47c208SGuixin Liu return status;
865a47c208SGuixin Liu }
875a47c208SGuixin Liu
nvmet_execute_get_log_page_resv(struct nvmet_req * req)885a47c208SGuixin Liu void nvmet_execute_get_log_page_resv(struct nvmet_req *req)
895a47c208SGuixin Liu {
905a47c208SGuixin Liu struct nvmet_pr_log_mgr *log_mgr = &req->sq->ctrl->pr_log_mgr;
915a47c208SGuixin Liu struct nvme_pr_log next_log = {0};
925a47c208SGuixin Liu struct nvme_pr_log log = {0};
935a47c208SGuixin Liu u16 status = NVME_SC_SUCCESS;
945a47c208SGuixin Liu u64 lost_count;
955a47c208SGuixin Liu u64 cur_count;
965a47c208SGuixin Liu u64 next_count;
975a47c208SGuixin Liu
985a47c208SGuixin Liu mutex_lock(&log_mgr->lock);
995a47c208SGuixin Liu if (!kfifo_get(&log_mgr->log_queue, &log))
1005a47c208SGuixin Liu goto out;
1015a47c208SGuixin Liu
1025a47c208SGuixin Liu /*
1035a47c208SGuixin Liu * We can't get the last in kfifo.
1045a47c208SGuixin Liu * Utilize the current count and the count from the next log to
1055a47c208SGuixin Liu * calculate the number of lost logs, while also addressing cases
1065a47c208SGuixin Liu * of overflow. If there is no subsequent log, the number of lost
1075a47c208SGuixin Liu * logs is equal to the lost_count within the nvmet_pr_log_mgr.
1085a47c208SGuixin Liu */
1095a47c208SGuixin Liu cur_count = le64_to_cpu(log.count);
1105a47c208SGuixin Liu if (kfifo_peek(&log_mgr->log_queue, &next_log)) {
1115a47c208SGuixin Liu next_count = le64_to_cpu(next_log.count);
1125a47c208SGuixin Liu if (next_count > cur_count)
1135a47c208SGuixin Liu lost_count = next_count - cur_count - 1;
1145a47c208SGuixin Liu else
1155a47c208SGuixin Liu lost_count = U64_MAX - cur_count + next_count - 1;
1165a47c208SGuixin Liu } else {
1175a47c208SGuixin Liu lost_count = log_mgr->lost_count;
1185a47c208SGuixin Liu }
1195a47c208SGuixin Liu
1205a47c208SGuixin Liu log.count = cpu_to_le64((cur_count + lost_count) == 0 ?
1215a47c208SGuixin Liu 1 : (cur_count + lost_count));
1225a47c208SGuixin Liu log_mgr->lost_count -= lost_count;
1235a47c208SGuixin Liu
1245a47c208SGuixin Liu log.nr_pages = kfifo_len(&log_mgr->log_queue);
1255a47c208SGuixin Liu
1265a47c208SGuixin Liu out:
1275a47c208SGuixin Liu status = nvmet_copy_to_sgl(req, 0, &log, sizeof(log));
1285a47c208SGuixin Liu mutex_unlock(&log_mgr->lock);
1295a47c208SGuixin Liu nvmet_req_complete(req, status);
1305a47c208SGuixin Liu }
1315a47c208SGuixin Liu
nvmet_pr_add_resv_log(struct nvmet_ctrl * ctrl,u8 log_type,u32 nsid)1325a47c208SGuixin Liu static void nvmet_pr_add_resv_log(struct nvmet_ctrl *ctrl, u8 log_type,
1335a47c208SGuixin Liu u32 nsid)
1345a47c208SGuixin Liu {
1355a47c208SGuixin Liu struct nvmet_pr_log_mgr *log_mgr = &ctrl->pr_log_mgr;
1365a47c208SGuixin Liu struct nvme_pr_log log = {0};
1375a47c208SGuixin Liu
1385a47c208SGuixin Liu mutex_lock(&log_mgr->lock);
1395a47c208SGuixin Liu log_mgr->counter++;
1405a47c208SGuixin Liu if (log_mgr->counter == 0)
1415a47c208SGuixin Liu log_mgr->counter = 1;
1425a47c208SGuixin Liu
1435a47c208SGuixin Liu log.count = cpu_to_le64(log_mgr->counter);
1445a47c208SGuixin Liu log.type = log_type;
1455a47c208SGuixin Liu log.nsid = cpu_to_le32(nsid);
1465a47c208SGuixin Liu
1475a47c208SGuixin Liu if (!kfifo_put(&log_mgr->log_queue, log)) {
1485a47c208SGuixin Liu pr_info("a reservation log lost, cntlid:%d, log_type:%d, nsid:%d\n",
1495a47c208SGuixin Liu ctrl->cntlid, log_type, nsid);
1505a47c208SGuixin Liu log_mgr->lost_count++;
1515a47c208SGuixin Liu }
1525a47c208SGuixin Liu
1535a47c208SGuixin Liu mutex_unlock(&log_mgr->lock);
1545a47c208SGuixin Liu }
1555a47c208SGuixin Liu
nvmet_pr_resv_released(struct nvmet_pr * pr,uuid_t * hostid)1565a47c208SGuixin Liu static void nvmet_pr_resv_released(struct nvmet_pr *pr, uuid_t *hostid)
1575a47c208SGuixin Liu {
1585a47c208SGuixin Liu struct nvmet_ns *ns = nvmet_pr_to_ns(pr);
1595a47c208SGuixin Liu struct nvmet_subsys *subsys = ns->subsys;
1605a47c208SGuixin Liu struct nvmet_ctrl *ctrl;
1615a47c208SGuixin Liu
1625a47c208SGuixin Liu if (test_bit(NVME_PR_NOTIFY_BIT_RESV_RELEASED, &pr->notify_mask))
1635a47c208SGuixin Liu return;
1645a47c208SGuixin Liu
1655a47c208SGuixin Liu mutex_lock(&subsys->lock);
1665a47c208SGuixin Liu list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
1675a47c208SGuixin Liu if (!uuid_equal(&ctrl->hostid, hostid) &&
1685a47c208SGuixin Liu nvmet_pr_find_registrant(pr, &ctrl->hostid)) {
1695a47c208SGuixin Liu nvmet_pr_add_resv_log(ctrl,
1705a47c208SGuixin Liu NVME_PR_LOG_RESERVATION_RELEASED, ns->nsid);
1715a47c208SGuixin Liu nvmet_add_async_event(ctrl, NVME_AER_CSS,
1725a47c208SGuixin Liu NVME_AEN_RESV_LOG_PAGE_AVALIABLE,
1735a47c208SGuixin Liu NVME_LOG_RESERVATION);
1745a47c208SGuixin Liu }
1755a47c208SGuixin Liu }
1765a47c208SGuixin Liu mutex_unlock(&subsys->lock);
1775a47c208SGuixin Liu }
1785a47c208SGuixin Liu
nvmet_pr_send_event_to_host(struct nvmet_pr * pr,uuid_t * hostid,u8 log_type)1795a47c208SGuixin Liu static void nvmet_pr_send_event_to_host(struct nvmet_pr *pr, uuid_t *hostid,
1805a47c208SGuixin Liu u8 log_type)
1815a47c208SGuixin Liu {
1825a47c208SGuixin Liu struct nvmet_ns *ns = nvmet_pr_to_ns(pr);
1835a47c208SGuixin Liu struct nvmet_subsys *subsys = ns->subsys;
1845a47c208SGuixin Liu struct nvmet_ctrl *ctrl;
1855a47c208SGuixin Liu
1865a47c208SGuixin Liu mutex_lock(&subsys->lock);
1875a47c208SGuixin Liu list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
1885a47c208SGuixin Liu if (uuid_equal(hostid, &ctrl->hostid)) {
1895a47c208SGuixin Liu nvmet_pr_add_resv_log(ctrl, log_type, ns->nsid);
1905a47c208SGuixin Liu nvmet_add_async_event(ctrl, NVME_AER_CSS,
1915a47c208SGuixin Liu NVME_AEN_RESV_LOG_PAGE_AVALIABLE,
1925a47c208SGuixin Liu NVME_LOG_RESERVATION);
1935a47c208SGuixin Liu }
1945a47c208SGuixin Liu }
1955a47c208SGuixin Liu mutex_unlock(&subsys->lock);
1965a47c208SGuixin Liu }
1975a47c208SGuixin Liu
nvmet_pr_resv_preempted(struct nvmet_pr * pr,uuid_t * hostid)1985a47c208SGuixin Liu static void nvmet_pr_resv_preempted(struct nvmet_pr *pr, uuid_t *hostid)
1995a47c208SGuixin Liu {
2005a47c208SGuixin Liu if (test_bit(NVME_PR_NOTIFY_BIT_RESV_PREEMPTED, &pr->notify_mask))
2015a47c208SGuixin Liu return;
2025a47c208SGuixin Liu
2035a47c208SGuixin Liu nvmet_pr_send_event_to_host(pr, hostid,
2045a47c208SGuixin Liu NVME_PR_LOG_RESERVATOIN_PREEMPTED);
2055a47c208SGuixin Liu }
2065a47c208SGuixin Liu
nvmet_pr_registration_preempted(struct nvmet_pr * pr,uuid_t * hostid)2075a47c208SGuixin Liu static void nvmet_pr_registration_preempted(struct nvmet_pr *pr,
2085a47c208SGuixin Liu uuid_t *hostid)
2095a47c208SGuixin Liu {
2105a47c208SGuixin Liu if (test_bit(NVME_PR_NOTIFY_BIT_REG_PREEMPTED, &pr->notify_mask))
2115a47c208SGuixin Liu return;
2125a47c208SGuixin Liu
2135a47c208SGuixin Liu nvmet_pr_send_event_to_host(pr, hostid,
2145a47c208SGuixin Liu NVME_PR_LOG_REGISTRATION_PREEMPTED);
2155a47c208SGuixin Liu }
2165a47c208SGuixin Liu
nvmet_pr_set_new_holder(struct nvmet_pr * pr,u8 new_rtype,struct nvmet_pr_registrant * reg)2175a47c208SGuixin Liu static inline void nvmet_pr_set_new_holder(struct nvmet_pr *pr, u8 new_rtype,
2185a47c208SGuixin Liu struct nvmet_pr_registrant *reg)
2195a47c208SGuixin Liu {
2205a47c208SGuixin Liu reg->rtype = new_rtype;
2215a47c208SGuixin Liu rcu_assign_pointer(pr->holder, reg);
2225a47c208SGuixin Liu }
2235a47c208SGuixin Liu
nvmet_pr_register(struct nvmet_req * req,struct nvmet_pr_register_data * d)2245a47c208SGuixin Liu static u16 nvmet_pr_register(struct nvmet_req *req,
2255a47c208SGuixin Liu struct nvmet_pr_register_data *d)
2265a47c208SGuixin Liu {
2275a47c208SGuixin Liu struct nvmet_ctrl *ctrl = req->sq->ctrl;
2285a47c208SGuixin Liu struct nvmet_pr_registrant *new, *reg;
2295a47c208SGuixin Liu struct nvmet_pr *pr = &req->ns->pr;
2305a47c208SGuixin Liu u16 status = NVME_SC_SUCCESS;
2315a47c208SGuixin Liu u64 nrkey = le64_to_cpu(d->nrkey);
2325a47c208SGuixin Liu
2335a47c208SGuixin Liu new = kmalloc(sizeof(*new), GFP_KERNEL);
2345a47c208SGuixin Liu if (!new)
2355a47c208SGuixin Liu return NVME_SC_INTERNAL;
2365a47c208SGuixin Liu
2375a47c208SGuixin Liu down(&pr->pr_sem);
2385a47c208SGuixin Liu reg = nvmet_pr_find_registrant(pr, &ctrl->hostid);
2395a47c208SGuixin Liu if (reg) {
2405a47c208SGuixin Liu if (reg->rkey != nrkey)
2415a47c208SGuixin Liu status = NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR;
2425a47c208SGuixin Liu kfree(new);
2435a47c208SGuixin Liu goto out;
2445a47c208SGuixin Liu }
2455a47c208SGuixin Liu
2465a47c208SGuixin Liu memset(new, 0, sizeof(*new));
2475a47c208SGuixin Liu INIT_LIST_HEAD(&new->entry);
2485a47c208SGuixin Liu new->rkey = nrkey;
2495a47c208SGuixin Liu uuid_copy(&new->hostid, &ctrl->hostid);
2505a47c208SGuixin Liu list_add_tail_rcu(&new->entry, &pr->registrant_list);
2515a47c208SGuixin Liu
2525a47c208SGuixin Liu out:
2535a47c208SGuixin Liu up(&pr->pr_sem);
2545a47c208SGuixin Liu return status;
2555a47c208SGuixin Liu }
2565a47c208SGuixin Liu
nvmet_pr_unregister_one(struct nvmet_pr * pr,struct nvmet_pr_registrant * reg)2575a47c208SGuixin Liu static void nvmet_pr_unregister_one(struct nvmet_pr *pr,
2585a47c208SGuixin Liu struct nvmet_pr_registrant *reg)
2595a47c208SGuixin Liu {
2605a47c208SGuixin Liu struct nvmet_pr_registrant *first_reg;
2615a47c208SGuixin Liu struct nvmet_pr_registrant *holder;
2625a47c208SGuixin Liu u8 original_rtype;
2635a47c208SGuixin Liu
2645a47c208SGuixin Liu list_del_rcu(®->entry);
2655a47c208SGuixin Liu
2665a47c208SGuixin Liu holder = rcu_dereference_protected(pr->holder, 1);
2675a47c208SGuixin Liu if (reg != holder)
2685a47c208SGuixin Liu goto out;
2695a47c208SGuixin Liu
2705a47c208SGuixin Liu original_rtype = holder->rtype;
2715a47c208SGuixin Liu if (original_rtype == NVME_PR_WRITE_EXCLUSIVE_ALL_REGS ||
2725a47c208SGuixin Liu original_rtype == NVME_PR_EXCLUSIVE_ACCESS_ALL_REGS) {
2735a47c208SGuixin Liu first_reg = list_first_or_null_rcu(&pr->registrant_list,
2745a47c208SGuixin Liu struct nvmet_pr_registrant, entry);
2755a47c208SGuixin Liu if (first_reg)
2765a47c208SGuixin Liu first_reg->rtype = original_rtype;
2775a47c208SGuixin Liu rcu_assign_pointer(pr->holder, first_reg);
2785a47c208SGuixin Liu } else {
2795a47c208SGuixin Liu rcu_assign_pointer(pr->holder, NULL);
2805a47c208SGuixin Liu
2815a47c208SGuixin Liu if (original_rtype == NVME_PR_WRITE_EXCLUSIVE_REG_ONLY ||
2825a47c208SGuixin Liu original_rtype == NVME_PR_EXCLUSIVE_ACCESS_REG_ONLY)
2835a47c208SGuixin Liu nvmet_pr_resv_released(pr, ®->hostid);
2845a47c208SGuixin Liu }
2855a47c208SGuixin Liu out:
2865a47c208SGuixin Liu kfree_rcu(reg, rcu);
2875a47c208SGuixin Liu }
2885a47c208SGuixin Liu
nvmet_pr_unregister(struct nvmet_req * req,struct nvmet_pr_register_data * d,bool ignore_key)2895a47c208SGuixin Liu static u16 nvmet_pr_unregister(struct nvmet_req *req,
2905a47c208SGuixin Liu struct nvmet_pr_register_data *d,
2915a47c208SGuixin Liu bool ignore_key)
2925a47c208SGuixin Liu {
2935a47c208SGuixin Liu u16 status = NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR;
2945a47c208SGuixin Liu struct nvmet_ctrl *ctrl = req->sq->ctrl;
2955a47c208SGuixin Liu struct nvmet_pr *pr = &req->ns->pr;
2965a47c208SGuixin Liu struct nvmet_pr_registrant *reg;
2975a47c208SGuixin Liu
2985a47c208SGuixin Liu down(&pr->pr_sem);
2995a47c208SGuixin Liu list_for_each_entry_rcu(reg, &pr->registrant_list, entry) {
3005a47c208SGuixin Liu if (uuid_equal(®->hostid, &ctrl->hostid)) {
3015a47c208SGuixin Liu if (ignore_key || reg->rkey == le64_to_cpu(d->crkey)) {
3025a47c208SGuixin Liu status = NVME_SC_SUCCESS;
3035a47c208SGuixin Liu nvmet_pr_unregister_one(pr, reg);
3045a47c208SGuixin Liu }
3055a47c208SGuixin Liu break;
3065a47c208SGuixin Liu }
3075a47c208SGuixin Liu }
3085a47c208SGuixin Liu up(&pr->pr_sem);
3095a47c208SGuixin Liu
3105a47c208SGuixin Liu return status;
3115a47c208SGuixin Liu }
3125a47c208SGuixin Liu
nvmet_pr_update_reg_rkey(struct nvmet_pr_registrant * reg,void * attr)3135a47c208SGuixin Liu static void nvmet_pr_update_reg_rkey(struct nvmet_pr_registrant *reg,
3145a47c208SGuixin Liu void *attr)
3155a47c208SGuixin Liu {
3165a47c208SGuixin Liu reg->rkey = *(u64 *)attr;
3175a47c208SGuixin Liu }
3185a47c208SGuixin Liu
nvmet_pr_update_reg_attr(struct nvmet_pr * pr,struct nvmet_pr_registrant * reg,void (* change_attr)(struct nvmet_pr_registrant * reg,void * attr),void * attr)3195a47c208SGuixin Liu static u16 nvmet_pr_update_reg_attr(struct nvmet_pr *pr,
3205a47c208SGuixin Liu struct nvmet_pr_registrant *reg,
3215a47c208SGuixin Liu void (*change_attr)(struct nvmet_pr_registrant *reg,
3225a47c208SGuixin Liu void *attr),
3235a47c208SGuixin Liu void *attr)
3245a47c208SGuixin Liu {
3255a47c208SGuixin Liu struct nvmet_pr_registrant *holder;
3265a47c208SGuixin Liu struct nvmet_pr_registrant *new;
3275a47c208SGuixin Liu
3285a47c208SGuixin Liu holder = rcu_dereference_protected(pr->holder, 1);
3295a47c208SGuixin Liu if (reg != holder) {
3305a47c208SGuixin Liu change_attr(reg, attr);
3315a47c208SGuixin Liu return NVME_SC_SUCCESS;
3325a47c208SGuixin Liu }
3335a47c208SGuixin Liu
3345a47c208SGuixin Liu new = kmalloc(sizeof(*new), GFP_ATOMIC);
3355a47c208SGuixin Liu if (!new)
3365a47c208SGuixin Liu return NVME_SC_INTERNAL;
3375a47c208SGuixin Liu
3385a47c208SGuixin Liu new->rkey = holder->rkey;
3395a47c208SGuixin Liu new->rtype = holder->rtype;
3405a47c208SGuixin Liu uuid_copy(&new->hostid, &holder->hostid);
3415a47c208SGuixin Liu INIT_LIST_HEAD(&new->entry);
3425a47c208SGuixin Liu
3435a47c208SGuixin Liu change_attr(new, attr);
3445a47c208SGuixin Liu list_replace_rcu(&holder->entry, &new->entry);
3455a47c208SGuixin Liu rcu_assign_pointer(pr->holder, new);
3465a47c208SGuixin Liu kfree_rcu(holder, rcu);
3475a47c208SGuixin Liu
3485a47c208SGuixin Liu return NVME_SC_SUCCESS;
3495a47c208SGuixin Liu }
3505a47c208SGuixin Liu
nvmet_pr_replace(struct nvmet_req * req,struct nvmet_pr_register_data * d,bool ignore_key)3515a47c208SGuixin Liu static u16 nvmet_pr_replace(struct nvmet_req *req,
3525a47c208SGuixin Liu struct nvmet_pr_register_data *d,
3535a47c208SGuixin Liu bool ignore_key)
3545a47c208SGuixin Liu {
3555a47c208SGuixin Liu u16 status = NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR;
3565a47c208SGuixin Liu struct nvmet_ctrl *ctrl = req->sq->ctrl;
3575a47c208SGuixin Liu struct nvmet_pr *pr = &req->ns->pr;
3585a47c208SGuixin Liu struct nvmet_pr_registrant *reg;
3595a47c208SGuixin Liu u64 nrkey = le64_to_cpu(d->nrkey);
3605a47c208SGuixin Liu
3615a47c208SGuixin Liu down(&pr->pr_sem);
3625a47c208SGuixin Liu list_for_each_entry_rcu(reg, &pr->registrant_list, entry) {
3635a47c208SGuixin Liu if (uuid_equal(®->hostid, &ctrl->hostid)) {
3645a47c208SGuixin Liu if (ignore_key || reg->rkey == le64_to_cpu(d->crkey))
3655a47c208SGuixin Liu status = nvmet_pr_update_reg_attr(pr, reg,
3665a47c208SGuixin Liu nvmet_pr_update_reg_rkey,
3675a47c208SGuixin Liu &nrkey);
3685a47c208SGuixin Liu break;
3695a47c208SGuixin Liu }
3705a47c208SGuixin Liu }
3715a47c208SGuixin Liu up(&pr->pr_sem);
3725a47c208SGuixin Liu return status;
3735a47c208SGuixin Liu }
3745a47c208SGuixin Liu
nvmet_execute_pr_register(struct nvmet_req * req)3755a47c208SGuixin Liu static void nvmet_execute_pr_register(struct nvmet_req *req)
3765a47c208SGuixin Liu {
3775a47c208SGuixin Liu u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
3785a47c208SGuixin Liu bool ignore_key = nvmet_pr_parse_ignore_key(cdw10);
3795a47c208SGuixin Liu struct nvmet_pr_register_data *d;
3805a47c208SGuixin Liu u8 reg_act = cdw10 & 0x07; /* Reservation Register Action, bit 02:00 */
3815a47c208SGuixin Liu u16 status;
3825a47c208SGuixin Liu
3835a47c208SGuixin Liu d = kmalloc(sizeof(*d), GFP_KERNEL);
3845a47c208SGuixin Liu if (!d) {
3855a47c208SGuixin Liu status = NVME_SC_INTERNAL;
3865a47c208SGuixin Liu goto out;
3875a47c208SGuixin Liu }
3885a47c208SGuixin Liu
3895a47c208SGuixin Liu status = nvmet_copy_from_sgl(req, 0, d, sizeof(*d));
3905a47c208SGuixin Liu if (status)
3915a47c208SGuixin Liu goto free_data;
3925a47c208SGuixin Liu
3935a47c208SGuixin Liu switch (reg_act) {
3945a47c208SGuixin Liu case NVME_PR_REGISTER_ACT_REG:
3955a47c208SGuixin Liu status = nvmet_pr_register(req, d);
3965a47c208SGuixin Liu break;
3975a47c208SGuixin Liu case NVME_PR_REGISTER_ACT_UNREG:
3985a47c208SGuixin Liu status = nvmet_pr_unregister(req, d, ignore_key);
3995a47c208SGuixin Liu break;
4005a47c208SGuixin Liu case NVME_PR_REGISTER_ACT_REPLACE:
4015a47c208SGuixin Liu status = nvmet_pr_replace(req, d, ignore_key);
4025a47c208SGuixin Liu break;
4035a47c208SGuixin Liu default:
4045a47c208SGuixin Liu req->error_loc = offsetof(struct nvme_common_command, cdw10);
4055a47c208SGuixin Liu status = NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
4065a47c208SGuixin Liu break;
4075a47c208SGuixin Liu }
4085a47c208SGuixin Liu free_data:
4095a47c208SGuixin Liu kfree(d);
4105a47c208SGuixin Liu out:
4115a47c208SGuixin Liu if (!status)
4125a47c208SGuixin Liu atomic_inc(&req->ns->pr.generation);
4135a47c208SGuixin Liu nvmet_req_complete(req, status);
4145a47c208SGuixin Liu }
4155a47c208SGuixin Liu
nvmet_pr_acquire(struct nvmet_req * req,struct nvmet_pr_registrant * reg,u8 rtype)4165a47c208SGuixin Liu static u16 nvmet_pr_acquire(struct nvmet_req *req,
4175a47c208SGuixin Liu struct nvmet_pr_registrant *reg,
4185a47c208SGuixin Liu u8 rtype)
4195a47c208SGuixin Liu {
4205a47c208SGuixin Liu struct nvmet_pr *pr = &req->ns->pr;
4215a47c208SGuixin Liu struct nvmet_pr_registrant *holder;
4225a47c208SGuixin Liu
4235a47c208SGuixin Liu holder = rcu_dereference_protected(pr->holder, 1);
4245a47c208SGuixin Liu if (holder && reg != holder)
4255a47c208SGuixin Liu return NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR;
4265a47c208SGuixin Liu if (holder && reg == holder) {
4275a47c208SGuixin Liu if (holder->rtype == rtype)
4285a47c208SGuixin Liu return NVME_SC_SUCCESS;
4295a47c208SGuixin Liu return NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR;
4305a47c208SGuixin Liu }
4315a47c208SGuixin Liu
4325a47c208SGuixin Liu nvmet_pr_set_new_holder(pr, rtype, reg);
4335a47c208SGuixin Liu return NVME_SC_SUCCESS;
4345a47c208SGuixin Liu }
4355a47c208SGuixin Liu
nvmet_pr_confirm_ns_pc_ref(struct percpu_ref * ref)4365a47c208SGuixin Liu static void nvmet_pr_confirm_ns_pc_ref(struct percpu_ref *ref)
4375a47c208SGuixin Liu {
4385a47c208SGuixin Liu struct nvmet_pr_per_ctrl_ref *pc_ref =
4395a47c208SGuixin Liu container_of(ref, struct nvmet_pr_per_ctrl_ref, ref);
4405a47c208SGuixin Liu
4415a47c208SGuixin Liu complete(&pc_ref->confirm_done);
4425a47c208SGuixin Liu }
4435a47c208SGuixin Liu
nvmet_pr_set_ctrl_to_abort(struct nvmet_req * req,uuid_t * hostid)4445a47c208SGuixin Liu static void nvmet_pr_set_ctrl_to_abort(struct nvmet_req *req, uuid_t *hostid)
4455a47c208SGuixin Liu {
4465a47c208SGuixin Liu struct nvmet_pr_per_ctrl_ref *pc_ref;
4475a47c208SGuixin Liu struct nvmet_ns *ns = req->ns;
4485a47c208SGuixin Liu unsigned long idx;
4495a47c208SGuixin Liu
4505a47c208SGuixin Liu xa_for_each(&ns->pr_per_ctrl_refs, idx, pc_ref) {
4515a47c208SGuixin Liu if (uuid_equal(&pc_ref->hostid, hostid)) {
4525a47c208SGuixin Liu percpu_ref_kill_and_confirm(&pc_ref->ref,
4535a47c208SGuixin Liu nvmet_pr_confirm_ns_pc_ref);
4545a47c208SGuixin Liu wait_for_completion(&pc_ref->confirm_done);
4555a47c208SGuixin Liu }
4565a47c208SGuixin Liu }
4575a47c208SGuixin Liu }
4585a47c208SGuixin Liu
nvmet_pr_unreg_all_host_by_prkey(struct nvmet_req * req,u64 prkey,uuid_t * send_hostid,bool abort)4595a47c208SGuixin Liu static u16 nvmet_pr_unreg_all_host_by_prkey(struct nvmet_req *req, u64 prkey,
4605a47c208SGuixin Liu uuid_t *send_hostid,
4615a47c208SGuixin Liu bool abort)
4625a47c208SGuixin Liu {
4635a47c208SGuixin Liu u16 status = NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR;
4645a47c208SGuixin Liu struct nvmet_pr_registrant *reg, *tmp;
4655a47c208SGuixin Liu struct nvmet_pr *pr = &req->ns->pr;
4665a47c208SGuixin Liu uuid_t hostid;
4675a47c208SGuixin Liu
4685a47c208SGuixin Liu list_for_each_entry_safe(reg, tmp, &pr->registrant_list, entry) {
4695a47c208SGuixin Liu if (reg->rkey == prkey) {
4705a47c208SGuixin Liu status = NVME_SC_SUCCESS;
4715a47c208SGuixin Liu uuid_copy(&hostid, ®->hostid);
4725a47c208SGuixin Liu if (abort)
4735a47c208SGuixin Liu nvmet_pr_set_ctrl_to_abort(req, &hostid);
4745a47c208SGuixin Liu nvmet_pr_unregister_one(pr, reg);
4755a47c208SGuixin Liu if (!uuid_equal(&hostid, send_hostid))
4765a47c208SGuixin Liu nvmet_pr_registration_preempted(pr, &hostid);
4775a47c208SGuixin Liu }
4785a47c208SGuixin Liu }
4795a47c208SGuixin Liu return status;
4805a47c208SGuixin Liu }
4815a47c208SGuixin Liu
nvmet_pr_unreg_all_others_by_prkey(struct nvmet_req * req,u64 prkey,uuid_t * send_hostid,bool abort)4825a47c208SGuixin Liu static void nvmet_pr_unreg_all_others_by_prkey(struct nvmet_req *req,
4835a47c208SGuixin Liu u64 prkey,
4845a47c208SGuixin Liu uuid_t *send_hostid,
4855a47c208SGuixin Liu bool abort)
4865a47c208SGuixin Liu {
4875a47c208SGuixin Liu struct nvmet_pr_registrant *reg, *tmp;
4885a47c208SGuixin Liu struct nvmet_pr *pr = &req->ns->pr;
4895a47c208SGuixin Liu uuid_t hostid;
4905a47c208SGuixin Liu
4915a47c208SGuixin Liu list_for_each_entry_safe(reg, tmp, &pr->registrant_list, entry) {
4925a47c208SGuixin Liu if (reg->rkey == prkey &&
4935a47c208SGuixin Liu !uuid_equal(®->hostid, send_hostid)) {
4945a47c208SGuixin Liu uuid_copy(&hostid, ®->hostid);
4955a47c208SGuixin Liu if (abort)
4965a47c208SGuixin Liu nvmet_pr_set_ctrl_to_abort(req, &hostid);
4975a47c208SGuixin Liu nvmet_pr_unregister_one(pr, reg);
4985a47c208SGuixin Liu nvmet_pr_registration_preempted(pr, &hostid);
4995a47c208SGuixin Liu }
5005a47c208SGuixin Liu }
5015a47c208SGuixin Liu }
5025a47c208SGuixin Liu
nvmet_pr_unreg_all_others(struct nvmet_req * req,uuid_t * send_hostid,bool abort)5035a47c208SGuixin Liu static void nvmet_pr_unreg_all_others(struct nvmet_req *req,
5045a47c208SGuixin Liu uuid_t *send_hostid,
5055a47c208SGuixin Liu bool abort)
5065a47c208SGuixin Liu {
5075a47c208SGuixin Liu struct nvmet_pr_registrant *reg, *tmp;
5085a47c208SGuixin Liu struct nvmet_pr *pr = &req->ns->pr;
5095a47c208SGuixin Liu uuid_t hostid;
5105a47c208SGuixin Liu
5115a47c208SGuixin Liu list_for_each_entry_safe(reg, tmp, &pr->registrant_list, entry) {
5125a47c208SGuixin Liu if (!uuid_equal(®->hostid, send_hostid)) {
5135a47c208SGuixin Liu uuid_copy(&hostid, ®->hostid);
5145a47c208SGuixin Liu if (abort)
5155a47c208SGuixin Liu nvmet_pr_set_ctrl_to_abort(req, &hostid);
5165a47c208SGuixin Liu nvmet_pr_unregister_one(pr, reg);
5175a47c208SGuixin Liu nvmet_pr_registration_preempted(pr, &hostid);
5185a47c208SGuixin Liu }
5195a47c208SGuixin Liu }
5205a47c208SGuixin Liu }
5215a47c208SGuixin Liu
nvmet_pr_update_holder_rtype(struct nvmet_pr_registrant * reg,void * attr)5225a47c208SGuixin Liu static void nvmet_pr_update_holder_rtype(struct nvmet_pr_registrant *reg,
5235a47c208SGuixin Liu void *attr)
5245a47c208SGuixin Liu {
5255a47c208SGuixin Liu u8 new_rtype = *(u8 *)attr;
5265a47c208SGuixin Liu
5275a47c208SGuixin Liu reg->rtype = new_rtype;
5285a47c208SGuixin Liu }
5295a47c208SGuixin Liu
nvmet_pr_preempt(struct nvmet_req * req,struct nvmet_pr_registrant * reg,u8 rtype,struct nvmet_pr_acquire_data * d,bool abort)5305a47c208SGuixin Liu static u16 nvmet_pr_preempt(struct nvmet_req *req,
5315a47c208SGuixin Liu struct nvmet_pr_registrant *reg,
5325a47c208SGuixin Liu u8 rtype,
5335a47c208SGuixin Liu struct nvmet_pr_acquire_data *d,
5345a47c208SGuixin Liu bool abort)
5355a47c208SGuixin Liu {
5365a47c208SGuixin Liu struct nvmet_ctrl *ctrl = req->sq->ctrl;
5375a47c208SGuixin Liu struct nvmet_pr *pr = &req->ns->pr;
5385a47c208SGuixin Liu struct nvmet_pr_registrant *holder;
5395a47c208SGuixin Liu enum nvme_pr_type original_rtype;
5405a47c208SGuixin Liu u64 prkey = le64_to_cpu(d->prkey);
5415a47c208SGuixin Liu u16 status;
5425a47c208SGuixin Liu
5435a47c208SGuixin Liu holder = rcu_dereference_protected(pr->holder, 1);
5445a47c208SGuixin Liu if (!holder)
5455a47c208SGuixin Liu return nvmet_pr_unreg_all_host_by_prkey(req, prkey,
5465a47c208SGuixin Liu &ctrl->hostid, abort);
5475a47c208SGuixin Liu
5485a47c208SGuixin Liu original_rtype = holder->rtype;
5495a47c208SGuixin Liu if (original_rtype == NVME_PR_WRITE_EXCLUSIVE_ALL_REGS ||
5505a47c208SGuixin Liu original_rtype == NVME_PR_EXCLUSIVE_ACCESS_ALL_REGS) {
5515a47c208SGuixin Liu if (!prkey) {
5525a47c208SGuixin Liu /*
5535a47c208SGuixin Liu * To prevent possible access from other hosts, and
5545a47c208SGuixin Liu * avoid terminate the holder, set the new holder
5555a47c208SGuixin Liu * first before unregistering.
5565a47c208SGuixin Liu */
5575a47c208SGuixin Liu nvmet_pr_set_new_holder(pr, rtype, reg);
5585a47c208SGuixin Liu nvmet_pr_unreg_all_others(req, &ctrl->hostid, abort);
5595a47c208SGuixin Liu return NVME_SC_SUCCESS;
5605a47c208SGuixin Liu }
5615a47c208SGuixin Liu return nvmet_pr_unreg_all_host_by_prkey(req, prkey,
5625a47c208SGuixin Liu &ctrl->hostid, abort);
5635a47c208SGuixin Liu }
5645a47c208SGuixin Liu
5655a47c208SGuixin Liu if (holder == reg) {
5665a47c208SGuixin Liu status = nvmet_pr_update_reg_attr(pr, holder,
5675a47c208SGuixin Liu nvmet_pr_update_holder_rtype, &rtype);
5685a47c208SGuixin Liu if (!status && original_rtype != rtype)
5695a47c208SGuixin Liu nvmet_pr_resv_released(pr, ®->hostid);
5705a47c208SGuixin Liu return status;
5715a47c208SGuixin Liu }
5725a47c208SGuixin Liu
5735a47c208SGuixin Liu if (prkey == holder->rkey) {
5745a47c208SGuixin Liu /*
5755a47c208SGuixin Liu * Same as before, set the new holder first.
5765a47c208SGuixin Liu */
5775a47c208SGuixin Liu nvmet_pr_set_new_holder(pr, rtype, reg);
5785a47c208SGuixin Liu nvmet_pr_unreg_all_others_by_prkey(req, prkey, &ctrl->hostid,
5795a47c208SGuixin Liu abort);
5805a47c208SGuixin Liu if (original_rtype != rtype)
5815a47c208SGuixin Liu nvmet_pr_resv_released(pr, ®->hostid);
5825a47c208SGuixin Liu return NVME_SC_SUCCESS;
5835a47c208SGuixin Liu }
5845a47c208SGuixin Liu
5855a47c208SGuixin Liu if (prkey)
5865a47c208SGuixin Liu return nvmet_pr_unreg_all_host_by_prkey(req, prkey,
5875a47c208SGuixin Liu &ctrl->hostid, abort);
5885a47c208SGuixin Liu return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
5895a47c208SGuixin Liu }
5905a47c208SGuixin Liu
nvmet_pr_do_abort(struct work_struct * w)5915a47c208SGuixin Liu static void nvmet_pr_do_abort(struct work_struct *w)
5925a47c208SGuixin Liu {
5935a47c208SGuixin Liu struct nvmet_req *req = container_of(w, struct nvmet_req, r.abort_work);
5945a47c208SGuixin Liu struct nvmet_pr_per_ctrl_ref *pc_ref;
5955a47c208SGuixin Liu struct nvmet_ns *ns = req->ns;
5965a47c208SGuixin Liu unsigned long idx;
5975a47c208SGuixin Liu
5985a47c208SGuixin Liu /*
5995a47c208SGuixin Liu * The target does not support abort, just wait per-controller ref to 0.
6005a47c208SGuixin Liu */
6015a47c208SGuixin Liu xa_for_each(&ns->pr_per_ctrl_refs, idx, pc_ref) {
6025a47c208SGuixin Liu if (percpu_ref_is_dying(&pc_ref->ref)) {
6035a47c208SGuixin Liu wait_for_completion(&pc_ref->free_done);
6045a47c208SGuixin Liu reinit_completion(&pc_ref->confirm_done);
6055a47c208SGuixin Liu reinit_completion(&pc_ref->free_done);
6065a47c208SGuixin Liu percpu_ref_resurrect(&pc_ref->ref);
6075a47c208SGuixin Liu }
6085a47c208SGuixin Liu }
6095a47c208SGuixin Liu
6105a47c208SGuixin Liu up(&ns->pr.pr_sem);
6115a47c208SGuixin Liu nvmet_req_complete(req, NVME_SC_SUCCESS);
6125a47c208SGuixin Liu }
6135a47c208SGuixin Liu
__nvmet_execute_pr_acquire(struct nvmet_req * req,struct nvmet_pr_registrant * reg,u8 acquire_act,u8 rtype,struct nvmet_pr_acquire_data * d)6145a47c208SGuixin Liu static u16 __nvmet_execute_pr_acquire(struct nvmet_req *req,
6155a47c208SGuixin Liu struct nvmet_pr_registrant *reg,
6165a47c208SGuixin Liu u8 acquire_act,
6175a47c208SGuixin Liu u8 rtype,
6185a47c208SGuixin Liu struct nvmet_pr_acquire_data *d)
6195a47c208SGuixin Liu {
6205a47c208SGuixin Liu u16 status;
6215a47c208SGuixin Liu
6225a47c208SGuixin Liu switch (acquire_act) {
6235a47c208SGuixin Liu case NVME_PR_ACQUIRE_ACT_ACQUIRE:
6245a47c208SGuixin Liu status = nvmet_pr_acquire(req, reg, rtype);
6255a47c208SGuixin Liu goto out;
6265a47c208SGuixin Liu case NVME_PR_ACQUIRE_ACT_PREEMPT:
6275a47c208SGuixin Liu status = nvmet_pr_preempt(req, reg, rtype, d, false);
6285a47c208SGuixin Liu goto inc_gen;
6295a47c208SGuixin Liu case NVME_PR_ACQUIRE_ACT_PREEMPT_AND_ABORT:
6305a47c208SGuixin Liu status = nvmet_pr_preempt(req, reg, rtype, d, true);
6315a47c208SGuixin Liu goto inc_gen;
6325a47c208SGuixin Liu default:
6335a47c208SGuixin Liu req->error_loc = offsetof(struct nvme_common_command, cdw10);
6345a47c208SGuixin Liu status = NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
6355a47c208SGuixin Liu goto out;
6365a47c208SGuixin Liu }
6375a47c208SGuixin Liu inc_gen:
6385a47c208SGuixin Liu if (!status)
6395a47c208SGuixin Liu atomic_inc(&req->ns->pr.generation);
6405a47c208SGuixin Liu out:
6415a47c208SGuixin Liu return status;
6425a47c208SGuixin Liu }
6435a47c208SGuixin Liu
nvmet_execute_pr_acquire(struct nvmet_req * req)6445a47c208SGuixin Liu static void nvmet_execute_pr_acquire(struct nvmet_req *req)
6455a47c208SGuixin Liu {
6465a47c208SGuixin Liu u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
6475a47c208SGuixin Liu bool ignore_key = nvmet_pr_parse_ignore_key(cdw10);
6485a47c208SGuixin Liu /* Reservation type, bit 15:08 */
6495a47c208SGuixin Liu u8 rtype = (u8)((cdw10 >> 8) & 0xff);
6505a47c208SGuixin Liu /* Reservation acquire action, bit 02:00 */
6515a47c208SGuixin Liu u8 acquire_act = cdw10 & 0x07;
6525a47c208SGuixin Liu struct nvmet_ctrl *ctrl = req->sq->ctrl;
6535a47c208SGuixin Liu struct nvmet_pr_acquire_data *d = NULL;
6545a47c208SGuixin Liu struct nvmet_pr *pr = &req->ns->pr;
6555a47c208SGuixin Liu struct nvmet_pr_registrant *reg;
6565a47c208SGuixin Liu u16 status = NVME_SC_SUCCESS;
6575a47c208SGuixin Liu
6585a47c208SGuixin Liu if (ignore_key ||
6595a47c208SGuixin Liu rtype < NVME_PR_WRITE_EXCLUSIVE ||
6605a47c208SGuixin Liu rtype > NVME_PR_EXCLUSIVE_ACCESS_ALL_REGS) {
6615a47c208SGuixin Liu status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
6625a47c208SGuixin Liu goto out;
6635a47c208SGuixin Liu }
6645a47c208SGuixin Liu
6655a47c208SGuixin Liu d = kmalloc(sizeof(*d), GFP_KERNEL);
6665a47c208SGuixin Liu if (!d) {
6675a47c208SGuixin Liu status = NVME_SC_INTERNAL;
6685a47c208SGuixin Liu goto out;
6695a47c208SGuixin Liu }
6705a47c208SGuixin Liu
6715a47c208SGuixin Liu status = nvmet_copy_from_sgl(req, 0, d, sizeof(*d));
6725a47c208SGuixin Liu if (status)
6735a47c208SGuixin Liu goto free_data;
6745a47c208SGuixin Liu
6755a47c208SGuixin Liu status = NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR;
6765a47c208SGuixin Liu down(&pr->pr_sem);
6775a47c208SGuixin Liu list_for_each_entry_rcu(reg, &pr->registrant_list, entry) {
6785a47c208SGuixin Liu if (uuid_equal(®->hostid, &ctrl->hostid) &&
6795a47c208SGuixin Liu reg->rkey == le64_to_cpu(d->crkey)) {
6805a47c208SGuixin Liu status = __nvmet_execute_pr_acquire(req, reg,
6815a47c208SGuixin Liu acquire_act, rtype, d);
6825a47c208SGuixin Liu break;
6835a47c208SGuixin Liu }
6845a47c208SGuixin Liu }
6855a47c208SGuixin Liu
6865a47c208SGuixin Liu if (!status && acquire_act == NVME_PR_ACQUIRE_ACT_PREEMPT_AND_ABORT) {
6875a47c208SGuixin Liu kfree(d);
6885a47c208SGuixin Liu INIT_WORK(&req->r.abort_work, nvmet_pr_do_abort);
6895a47c208SGuixin Liu queue_work(nvmet_wq, &req->r.abort_work);
6905a47c208SGuixin Liu return;
6915a47c208SGuixin Liu }
6925a47c208SGuixin Liu
6935a47c208SGuixin Liu up(&pr->pr_sem);
6945a47c208SGuixin Liu
6955a47c208SGuixin Liu free_data:
6965a47c208SGuixin Liu kfree(d);
6975a47c208SGuixin Liu out:
6985a47c208SGuixin Liu nvmet_req_complete(req, status);
6995a47c208SGuixin Liu }
7005a47c208SGuixin Liu
nvmet_pr_release(struct nvmet_req * req,struct nvmet_pr_registrant * reg,u8 rtype)7015a47c208SGuixin Liu static u16 nvmet_pr_release(struct nvmet_req *req,
7025a47c208SGuixin Liu struct nvmet_pr_registrant *reg,
7035a47c208SGuixin Liu u8 rtype)
7045a47c208SGuixin Liu {
7055a47c208SGuixin Liu struct nvmet_pr *pr = &req->ns->pr;
7065a47c208SGuixin Liu struct nvmet_pr_registrant *holder;
7075a47c208SGuixin Liu u8 original_rtype;
7085a47c208SGuixin Liu
7095a47c208SGuixin Liu holder = rcu_dereference_protected(pr->holder, 1);
7105a47c208SGuixin Liu if (!holder || reg != holder)
7115a47c208SGuixin Liu return NVME_SC_SUCCESS;
7125a47c208SGuixin Liu
7135a47c208SGuixin Liu original_rtype = holder->rtype;
7145a47c208SGuixin Liu if (original_rtype != rtype)
7155a47c208SGuixin Liu return NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR;
7165a47c208SGuixin Liu
7175a47c208SGuixin Liu rcu_assign_pointer(pr->holder, NULL);
7185a47c208SGuixin Liu
7195a47c208SGuixin Liu if (original_rtype != NVME_PR_WRITE_EXCLUSIVE &&
7205a47c208SGuixin Liu original_rtype != NVME_PR_EXCLUSIVE_ACCESS)
7215a47c208SGuixin Liu nvmet_pr_resv_released(pr, ®->hostid);
7225a47c208SGuixin Liu
7235a47c208SGuixin Liu return NVME_SC_SUCCESS;
7245a47c208SGuixin Liu }
7255a47c208SGuixin Liu
nvmet_pr_clear(struct nvmet_req * req)7265a47c208SGuixin Liu static void nvmet_pr_clear(struct nvmet_req *req)
7275a47c208SGuixin Liu {
7285a47c208SGuixin Liu struct nvmet_pr_registrant *reg, *tmp;
7295a47c208SGuixin Liu struct nvmet_pr *pr = &req->ns->pr;
7305a47c208SGuixin Liu
7315a47c208SGuixin Liu rcu_assign_pointer(pr->holder, NULL);
7325a47c208SGuixin Liu
7335a47c208SGuixin Liu list_for_each_entry_safe(reg, tmp, &pr->registrant_list, entry) {
7345a47c208SGuixin Liu list_del_rcu(®->entry);
7355a47c208SGuixin Liu if (!uuid_equal(&req->sq->ctrl->hostid, ®->hostid))
7365a47c208SGuixin Liu nvmet_pr_resv_preempted(pr, ®->hostid);
7375a47c208SGuixin Liu kfree_rcu(reg, rcu);
7385a47c208SGuixin Liu }
7395a47c208SGuixin Liu
7405a47c208SGuixin Liu atomic_inc(&pr->generation);
7415a47c208SGuixin Liu }
7425a47c208SGuixin Liu
__nvmet_execute_pr_release(struct nvmet_req * req,struct nvmet_pr_registrant * reg,u8 release_act,u8 rtype)7435a47c208SGuixin Liu static u16 __nvmet_execute_pr_release(struct nvmet_req *req,
7445a47c208SGuixin Liu struct nvmet_pr_registrant *reg,
7455a47c208SGuixin Liu u8 release_act, u8 rtype)
7465a47c208SGuixin Liu {
7475a47c208SGuixin Liu switch (release_act) {
7485a47c208SGuixin Liu case NVME_PR_RELEASE_ACT_RELEASE:
7495a47c208SGuixin Liu return nvmet_pr_release(req, reg, rtype);
7505a47c208SGuixin Liu case NVME_PR_RELEASE_ACT_CLEAR:
7515a47c208SGuixin Liu nvmet_pr_clear(req);
7525a47c208SGuixin Liu return NVME_SC_SUCCESS;
7535a47c208SGuixin Liu default:
7545a47c208SGuixin Liu req->error_loc = offsetof(struct nvme_common_command, cdw10);
7555a47c208SGuixin Liu return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR;
7565a47c208SGuixin Liu }
7575a47c208SGuixin Liu }
7585a47c208SGuixin Liu
nvmet_execute_pr_release(struct nvmet_req * req)7595a47c208SGuixin Liu static void nvmet_execute_pr_release(struct nvmet_req *req)
7605a47c208SGuixin Liu {
7615a47c208SGuixin Liu u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
7625a47c208SGuixin Liu bool ignore_key = nvmet_pr_parse_ignore_key(cdw10);
7635a47c208SGuixin Liu u8 rtype = (u8)((cdw10 >> 8) & 0xff); /* Reservation type, bit 15:08 */
7645a47c208SGuixin Liu u8 release_act = cdw10 & 0x07; /* Reservation release action, bit 02:00 */
7655a47c208SGuixin Liu struct nvmet_ctrl *ctrl = req->sq->ctrl;
7665a47c208SGuixin Liu struct nvmet_pr *pr = &req->ns->pr;
7675a47c208SGuixin Liu struct nvmet_pr_release_data *d;
7685a47c208SGuixin Liu struct nvmet_pr_registrant *reg;
7695a47c208SGuixin Liu u16 status;
7705a47c208SGuixin Liu
7715a47c208SGuixin Liu if (ignore_key) {
7725a47c208SGuixin Liu status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
7735a47c208SGuixin Liu goto out;
7745a47c208SGuixin Liu }
7755a47c208SGuixin Liu
7765a47c208SGuixin Liu d = kmalloc(sizeof(*d), GFP_KERNEL);
7775a47c208SGuixin Liu if (!d) {
7785a47c208SGuixin Liu status = NVME_SC_INTERNAL;
7795a47c208SGuixin Liu goto out;
7805a47c208SGuixin Liu }
7815a47c208SGuixin Liu
7825a47c208SGuixin Liu status = nvmet_copy_from_sgl(req, 0, d, sizeof(*d));
7835a47c208SGuixin Liu if (status)
7845a47c208SGuixin Liu goto free_data;
7855a47c208SGuixin Liu
7865a47c208SGuixin Liu status = NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR;
7875a47c208SGuixin Liu down(&pr->pr_sem);
7885a47c208SGuixin Liu list_for_each_entry_rcu(reg, &pr->registrant_list, entry) {
7895a47c208SGuixin Liu if (uuid_equal(®->hostid, &ctrl->hostid) &&
7905a47c208SGuixin Liu reg->rkey == le64_to_cpu(d->crkey)) {
7915a47c208SGuixin Liu status = __nvmet_execute_pr_release(req, reg,
7925a47c208SGuixin Liu release_act, rtype);
7935a47c208SGuixin Liu break;
7945a47c208SGuixin Liu }
7955a47c208SGuixin Liu }
7965a47c208SGuixin Liu up(&pr->pr_sem);
7975a47c208SGuixin Liu free_data:
7985a47c208SGuixin Liu kfree(d);
7995a47c208SGuixin Liu out:
8005a47c208SGuixin Liu nvmet_req_complete(req, status);
8015a47c208SGuixin Liu }
8025a47c208SGuixin Liu
nvmet_execute_pr_report(struct nvmet_req * req)8035a47c208SGuixin Liu static void nvmet_execute_pr_report(struct nvmet_req *req)
8045a47c208SGuixin Liu {
8055a47c208SGuixin Liu u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11);
8065a47c208SGuixin Liu u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
8075a47c208SGuixin Liu u32 num_bytes = 4 * (cdw10 + 1); /* cdw10 is number of dwords */
8085a47c208SGuixin Liu u8 eds = cdw11 & 1; /* Extended data structure, bit 00 */
8095a47c208SGuixin Liu struct nvme_registered_ctrl_ext *ctrl_eds;
8105a47c208SGuixin Liu struct nvme_reservation_status_ext *data;
8115a47c208SGuixin Liu struct nvmet_pr *pr = &req->ns->pr;
8125a47c208SGuixin Liu struct nvmet_pr_registrant *holder;
8135a47c208SGuixin Liu struct nvmet_pr_registrant *reg;
8145a47c208SGuixin Liu u16 num_ctrls = 0;
8155a47c208SGuixin Liu u16 status;
8165a47c208SGuixin Liu u8 rtype;
8175a47c208SGuixin Liu
8185a47c208SGuixin Liu /* nvmet hostid(uuid_t) is 128 bit. */
8195a47c208SGuixin Liu if (!eds) {
8205a47c208SGuixin Liu req->error_loc = offsetof(struct nvme_common_command, cdw11);
8215a47c208SGuixin Liu status = NVME_SC_HOST_ID_INCONSIST | NVME_STATUS_DNR;
8225a47c208SGuixin Liu goto out;
8235a47c208SGuixin Liu }
8245a47c208SGuixin Liu
8255a47c208SGuixin Liu if (num_bytes < sizeof(struct nvme_reservation_status_ext)) {
8265a47c208SGuixin Liu req->error_loc = offsetof(struct nvme_common_command, cdw10);
8275a47c208SGuixin Liu status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
8285a47c208SGuixin Liu goto out;
8295a47c208SGuixin Liu }
8305a47c208SGuixin Liu
831*41d826c8SYu-Chun Lin data = kzalloc(num_bytes, GFP_KERNEL);
8325a47c208SGuixin Liu if (!data) {
8335a47c208SGuixin Liu status = NVME_SC_INTERNAL;
8345a47c208SGuixin Liu goto out;
8355a47c208SGuixin Liu }
8365a47c208SGuixin Liu data->gen = cpu_to_le32(atomic_read(&pr->generation));
8375a47c208SGuixin Liu data->ptpls = 0;
8385a47c208SGuixin Liu ctrl_eds = data->regctl_eds;
8395a47c208SGuixin Liu
8405a47c208SGuixin Liu rcu_read_lock();
8415a47c208SGuixin Liu holder = rcu_dereference(pr->holder);
8425a47c208SGuixin Liu rtype = holder ? holder->rtype : 0;
8435a47c208SGuixin Liu data->rtype = rtype;
8445a47c208SGuixin Liu
8455a47c208SGuixin Liu list_for_each_entry_rcu(reg, &pr->registrant_list, entry) {
8465a47c208SGuixin Liu num_ctrls++;
8475a47c208SGuixin Liu /*
8485a47c208SGuixin Liu * continue to get the number of all registrans.
8495a47c208SGuixin Liu */
8505a47c208SGuixin Liu if (((void *)ctrl_eds + sizeof(*ctrl_eds)) >
8515a47c208SGuixin Liu ((void *)data + num_bytes))
8525a47c208SGuixin Liu continue;
8535a47c208SGuixin Liu /*
8545a47c208SGuixin Liu * Dynamic controller, set cntlid to 0xffff.
8555a47c208SGuixin Liu */
8565a47c208SGuixin Liu ctrl_eds->cntlid = cpu_to_le16(NVME_CNTLID_DYNAMIC);
8575a47c208SGuixin Liu if (rtype == NVME_PR_WRITE_EXCLUSIVE_ALL_REGS ||
8585a47c208SGuixin Liu rtype == NVME_PR_EXCLUSIVE_ACCESS_ALL_REGS)
8595a47c208SGuixin Liu ctrl_eds->rcsts = 1;
8605a47c208SGuixin Liu if (reg == holder)
8615a47c208SGuixin Liu ctrl_eds->rcsts = 1;
8625a47c208SGuixin Liu uuid_copy((uuid_t *)&ctrl_eds->hostid, ®->hostid);
8635a47c208SGuixin Liu ctrl_eds->rkey = cpu_to_le64(reg->rkey);
8645a47c208SGuixin Liu ctrl_eds++;
8655a47c208SGuixin Liu }
8665a47c208SGuixin Liu rcu_read_unlock();
8675a47c208SGuixin Liu
8685a47c208SGuixin Liu put_unaligned_le16(num_ctrls, data->regctl);
8695a47c208SGuixin Liu status = nvmet_copy_to_sgl(req, 0, data, num_bytes);
8705a47c208SGuixin Liu kfree(data);
8715a47c208SGuixin Liu out:
8725a47c208SGuixin Liu nvmet_req_complete(req, status);
8735a47c208SGuixin Liu }
8745a47c208SGuixin Liu
nvmet_parse_pr_cmd(struct nvmet_req * req)8755a47c208SGuixin Liu u16 nvmet_parse_pr_cmd(struct nvmet_req *req)
8765a47c208SGuixin Liu {
8775a47c208SGuixin Liu struct nvme_command *cmd = req->cmd;
8785a47c208SGuixin Liu
8795a47c208SGuixin Liu switch (cmd->common.opcode) {
8805a47c208SGuixin Liu case nvme_cmd_resv_register:
8815a47c208SGuixin Liu req->execute = nvmet_execute_pr_register;
8825a47c208SGuixin Liu break;
8835a47c208SGuixin Liu case nvme_cmd_resv_acquire:
8845a47c208SGuixin Liu req->execute = nvmet_execute_pr_acquire;
8855a47c208SGuixin Liu break;
8865a47c208SGuixin Liu case nvme_cmd_resv_release:
8875a47c208SGuixin Liu req->execute = nvmet_execute_pr_release;
8885a47c208SGuixin Liu break;
8895a47c208SGuixin Liu case nvme_cmd_resv_report:
8905a47c208SGuixin Liu req->execute = nvmet_execute_pr_report;
8915a47c208SGuixin Liu break;
8925a47c208SGuixin Liu default:
8935a47c208SGuixin Liu return 1;
8945a47c208SGuixin Liu }
8955a47c208SGuixin Liu return NVME_SC_SUCCESS;
8965a47c208SGuixin Liu }
8975a47c208SGuixin Liu
nvmet_is_req_write_cmd_group(struct nvmet_req * req)8985a47c208SGuixin Liu static bool nvmet_is_req_write_cmd_group(struct nvmet_req *req)
8995a47c208SGuixin Liu {
9005a47c208SGuixin Liu u8 opcode = req->cmd->common.opcode;
9015a47c208SGuixin Liu
9025a47c208SGuixin Liu if (req->sq->qid) {
9035a47c208SGuixin Liu switch (opcode) {
9045a47c208SGuixin Liu case nvme_cmd_flush:
9055a47c208SGuixin Liu case nvme_cmd_write:
9065a47c208SGuixin Liu case nvme_cmd_write_zeroes:
9075a47c208SGuixin Liu case nvme_cmd_dsm:
9085a47c208SGuixin Liu case nvme_cmd_zone_append:
9095a47c208SGuixin Liu case nvme_cmd_zone_mgmt_send:
9105a47c208SGuixin Liu return true;
9115a47c208SGuixin Liu default:
9125a47c208SGuixin Liu return false;
9135a47c208SGuixin Liu }
9145a47c208SGuixin Liu }
9155a47c208SGuixin Liu return false;
9165a47c208SGuixin Liu }
9175a47c208SGuixin Liu
nvmet_is_req_read_cmd_group(struct nvmet_req * req)9185a47c208SGuixin Liu static bool nvmet_is_req_read_cmd_group(struct nvmet_req *req)
9195a47c208SGuixin Liu {
9205a47c208SGuixin Liu u8 opcode = req->cmd->common.opcode;
9215a47c208SGuixin Liu
9225a47c208SGuixin Liu if (req->sq->qid) {
9235a47c208SGuixin Liu switch (opcode) {
9245a47c208SGuixin Liu case nvme_cmd_read:
9255a47c208SGuixin Liu case nvme_cmd_zone_mgmt_recv:
9265a47c208SGuixin Liu return true;
9275a47c208SGuixin Liu default:
9285a47c208SGuixin Liu return false;
9295a47c208SGuixin Liu }
9305a47c208SGuixin Liu }
9315a47c208SGuixin Liu return false;
9325a47c208SGuixin Liu }
9335a47c208SGuixin Liu
nvmet_pr_check_cmd_access(struct nvmet_req * req)9345a47c208SGuixin Liu u16 nvmet_pr_check_cmd_access(struct nvmet_req *req)
9355a47c208SGuixin Liu {
9365a47c208SGuixin Liu struct nvmet_ctrl *ctrl = req->sq->ctrl;
9375a47c208SGuixin Liu struct nvmet_pr_registrant *holder;
9385a47c208SGuixin Liu struct nvmet_ns *ns = req->ns;
9395a47c208SGuixin Liu struct nvmet_pr *pr = &ns->pr;
9405a47c208SGuixin Liu u16 status = NVME_SC_SUCCESS;
9415a47c208SGuixin Liu
9425a47c208SGuixin Liu rcu_read_lock();
9435a47c208SGuixin Liu holder = rcu_dereference(pr->holder);
9445a47c208SGuixin Liu if (!holder)
9455a47c208SGuixin Liu goto unlock;
9465a47c208SGuixin Liu if (uuid_equal(&ctrl->hostid, &holder->hostid))
9475a47c208SGuixin Liu goto unlock;
9485a47c208SGuixin Liu
9495a47c208SGuixin Liu /*
9505a47c208SGuixin Liu * The Reservation command group is checked in executing,
9515a47c208SGuixin Liu * allow it here.
9525a47c208SGuixin Liu */
9535a47c208SGuixin Liu switch (holder->rtype) {
9545a47c208SGuixin Liu case NVME_PR_WRITE_EXCLUSIVE:
9555a47c208SGuixin Liu if (nvmet_is_req_write_cmd_group(req))
9565a47c208SGuixin Liu status = NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR;
9575a47c208SGuixin Liu break;
9585a47c208SGuixin Liu case NVME_PR_EXCLUSIVE_ACCESS:
9595a47c208SGuixin Liu if (nvmet_is_req_read_cmd_group(req) ||
9605a47c208SGuixin Liu nvmet_is_req_write_cmd_group(req))
9615a47c208SGuixin Liu status = NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR;
9625a47c208SGuixin Liu break;
9635a47c208SGuixin Liu case NVME_PR_WRITE_EXCLUSIVE_REG_ONLY:
9645a47c208SGuixin Liu case NVME_PR_WRITE_EXCLUSIVE_ALL_REGS:
9655a47c208SGuixin Liu if ((nvmet_is_req_write_cmd_group(req)) &&
9665a47c208SGuixin Liu !nvmet_pr_find_registrant(pr, &ctrl->hostid))
9675a47c208SGuixin Liu status = NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR;
9685a47c208SGuixin Liu break;
9695a47c208SGuixin Liu case NVME_PR_EXCLUSIVE_ACCESS_REG_ONLY:
9705a47c208SGuixin Liu case NVME_PR_EXCLUSIVE_ACCESS_ALL_REGS:
9715a47c208SGuixin Liu if ((nvmet_is_req_read_cmd_group(req) ||
9725a47c208SGuixin Liu nvmet_is_req_write_cmd_group(req)) &&
9735a47c208SGuixin Liu !nvmet_pr_find_registrant(pr, &ctrl->hostid))
9745a47c208SGuixin Liu status = NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR;
9755a47c208SGuixin Liu break;
9765a47c208SGuixin Liu default:
9775a47c208SGuixin Liu pr_warn("the reservation type is set wrong, type:%d\n",
9785a47c208SGuixin Liu holder->rtype);
9795a47c208SGuixin Liu break;
9805a47c208SGuixin Liu }
9815a47c208SGuixin Liu
9825a47c208SGuixin Liu unlock:
9835a47c208SGuixin Liu rcu_read_unlock();
9845a47c208SGuixin Liu if (status)
9855a47c208SGuixin Liu req->error_loc = offsetof(struct nvme_common_command, opcode);
9865a47c208SGuixin Liu return status;
9875a47c208SGuixin Liu }
9885a47c208SGuixin Liu
nvmet_pr_get_ns_pc_ref(struct nvmet_req * req)9895a47c208SGuixin Liu u16 nvmet_pr_get_ns_pc_ref(struct nvmet_req *req)
9905a47c208SGuixin Liu {
9915a47c208SGuixin Liu struct nvmet_pr_per_ctrl_ref *pc_ref;
9925a47c208SGuixin Liu
9935a47c208SGuixin Liu pc_ref = xa_load(&req->ns->pr_per_ctrl_refs,
9945a47c208SGuixin Liu req->sq->ctrl->cntlid);
9955a47c208SGuixin Liu if (unlikely(!percpu_ref_tryget_live(&pc_ref->ref)))
9965a47c208SGuixin Liu return NVME_SC_INTERNAL;
9975a47c208SGuixin Liu req->pc_ref = pc_ref;
9985a47c208SGuixin Liu return NVME_SC_SUCCESS;
9995a47c208SGuixin Liu }
10005a47c208SGuixin Liu
nvmet_pr_ctrl_ns_all_cmds_done(struct percpu_ref * ref)10015a47c208SGuixin Liu static void nvmet_pr_ctrl_ns_all_cmds_done(struct percpu_ref *ref)
10025a47c208SGuixin Liu {
10035a47c208SGuixin Liu struct nvmet_pr_per_ctrl_ref *pc_ref =
10045a47c208SGuixin Liu container_of(ref, struct nvmet_pr_per_ctrl_ref, ref);
10055a47c208SGuixin Liu
10065a47c208SGuixin Liu complete(&pc_ref->free_done);
10075a47c208SGuixin Liu }
10085a47c208SGuixin Liu
nvmet_pr_alloc_and_insert_pc_ref(struct nvmet_ns * ns,unsigned long idx,uuid_t * hostid)10095a47c208SGuixin Liu static int nvmet_pr_alloc_and_insert_pc_ref(struct nvmet_ns *ns,
10105a47c208SGuixin Liu unsigned long idx,
10115a47c208SGuixin Liu uuid_t *hostid)
10125a47c208SGuixin Liu {
10135a47c208SGuixin Liu struct nvmet_pr_per_ctrl_ref *pc_ref;
10145a47c208SGuixin Liu int ret;
10155a47c208SGuixin Liu
10165a47c208SGuixin Liu pc_ref = kmalloc(sizeof(*pc_ref), GFP_ATOMIC);
10175a47c208SGuixin Liu if (!pc_ref)
10185a47c208SGuixin Liu return -ENOMEM;
10195a47c208SGuixin Liu
10205a47c208SGuixin Liu ret = percpu_ref_init(&pc_ref->ref, nvmet_pr_ctrl_ns_all_cmds_done,
10215a47c208SGuixin Liu PERCPU_REF_ALLOW_REINIT, GFP_KERNEL);
10225a47c208SGuixin Liu if (ret)
10235a47c208SGuixin Liu goto free;
10245a47c208SGuixin Liu
10255a47c208SGuixin Liu init_completion(&pc_ref->free_done);
10265a47c208SGuixin Liu init_completion(&pc_ref->confirm_done);
10275a47c208SGuixin Liu uuid_copy(&pc_ref->hostid, hostid);
10285a47c208SGuixin Liu
10295a47c208SGuixin Liu ret = xa_insert(&ns->pr_per_ctrl_refs, idx, pc_ref, GFP_KERNEL);
10305a47c208SGuixin Liu if (ret)
10315a47c208SGuixin Liu goto exit;
10325a47c208SGuixin Liu return ret;
10335a47c208SGuixin Liu exit:
10345a47c208SGuixin Liu percpu_ref_exit(&pc_ref->ref);
10355a47c208SGuixin Liu free:
10365a47c208SGuixin Liu kfree(pc_ref);
10375a47c208SGuixin Liu return ret;
10385a47c208SGuixin Liu }
10395a47c208SGuixin Liu
nvmet_ctrl_init_pr(struct nvmet_ctrl * ctrl)10405a47c208SGuixin Liu int nvmet_ctrl_init_pr(struct nvmet_ctrl *ctrl)
10415a47c208SGuixin Liu {
10425a47c208SGuixin Liu struct nvmet_subsys *subsys = ctrl->subsys;
10435a47c208SGuixin Liu struct nvmet_pr_per_ctrl_ref *pc_ref;
10445a47c208SGuixin Liu struct nvmet_ns *ns = NULL;
10455a47c208SGuixin Liu unsigned long idx;
10465a47c208SGuixin Liu int ret;
10475a47c208SGuixin Liu
10485a47c208SGuixin Liu ctrl->pr_log_mgr.counter = 0;
10495a47c208SGuixin Liu ctrl->pr_log_mgr.lost_count = 0;
10505a47c208SGuixin Liu mutex_init(&ctrl->pr_log_mgr.lock);
10515a47c208SGuixin Liu INIT_KFIFO(ctrl->pr_log_mgr.log_queue);
10525a47c208SGuixin Liu
10535a47c208SGuixin Liu /*
10545a47c208SGuixin Liu * Here we are under subsys lock, if an ns not in subsys->namespaces,
10555a47c208SGuixin Liu * we can make sure that ns is not enabled, and not call
10565a47c208SGuixin Liu * nvmet_pr_init_ns(), see more details in nvmet_ns_enable().
10575a47c208SGuixin Liu * So just check ns->pr.enable.
10585a47c208SGuixin Liu */
10595a47c208SGuixin Liu xa_for_each(&subsys->namespaces, idx, ns) {
10605a47c208SGuixin Liu if (ns->pr.enable) {
10615a47c208SGuixin Liu ret = nvmet_pr_alloc_and_insert_pc_ref(ns, ctrl->cntlid,
10625a47c208SGuixin Liu &ctrl->hostid);
10635a47c208SGuixin Liu if (ret)
10645a47c208SGuixin Liu goto free_per_ctrl_refs;
10655a47c208SGuixin Liu }
10665a47c208SGuixin Liu }
10675a47c208SGuixin Liu return 0;
10685a47c208SGuixin Liu
10695a47c208SGuixin Liu free_per_ctrl_refs:
10705a47c208SGuixin Liu xa_for_each(&subsys->namespaces, idx, ns) {
10715a47c208SGuixin Liu if (ns->pr.enable) {
10725a47c208SGuixin Liu pc_ref = xa_erase(&ns->pr_per_ctrl_refs, ctrl->cntlid);
10735a47c208SGuixin Liu if (pc_ref)
10745a47c208SGuixin Liu percpu_ref_exit(&pc_ref->ref);
10755a47c208SGuixin Liu kfree(pc_ref);
10765a47c208SGuixin Liu }
10775a47c208SGuixin Liu }
10785a47c208SGuixin Liu return ret;
10795a47c208SGuixin Liu }
10805a47c208SGuixin Liu
nvmet_ctrl_destroy_pr(struct nvmet_ctrl * ctrl)10815a47c208SGuixin Liu void nvmet_ctrl_destroy_pr(struct nvmet_ctrl *ctrl)
10825a47c208SGuixin Liu {
10835a47c208SGuixin Liu struct nvmet_pr_per_ctrl_ref *pc_ref;
10845a47c208SGuixin Liu struct nvmet_ns *ns;
10855a47c208SGuixin Liu unsigned long idx;
10865a47c208SGuixin Liu
10875a47c208SGuixin Liu kfifo_free(&ctrl->pr_log_mgr.log_queue);
10885a47c208SGuixin Liu mutex_destroy(&ctrl->pr_log_mgr.lock);
10895a47c208SGuixin Liu
10905a47c208SGuixin Liu xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
10915a47c208SGuixin Liu if (ns->pr.enable) {
10925a47c208SGuixin Liu pc_ref = xa_erase(&ns->pr_per_ctrl_refs, ctrl->cntlid);
10935a47c208SGuixin Liu if (pc_ref)
10945a47c208SGuixin Liu percpu_ref_exit(&pc_ref->ref);
10955a47c208SGuixin Liu kfree(pc_ref);
10965a47c208SGuixin Liu }
10975a47c208SGuixin Liu }
10985a47c208SGuixin Liu }
10995a47c208SGuixin Liu
nvmet_pr_init_ns(struct nvmet_ns * ns)11005a47c208SGuixin Liu int nvmet_pr_init_ns(struct nvmet_ns *ns)
11015a47c208SGuixin Liu {
11025a47c208SGuixin Liu struct nvmet_subsys *subsys = ns->subsys;
11035a47c208SGuixin Liu struct nvmet_pr_per_ctrl_ref *pc_ref;
11045a47c208SGuixin Liu struct nvmet_ctrl *ctrl = NULL;
11055a47c208SGuixin Liu unsigned long idx;
11065a47c208SGuixin Liu int ret;
11075a47c208SGuixin Liu
11085a47c208SGuixin Liu ns->pr.holder = NULL;
11095a47c208SGuixin Liu atomic_set(&ns->pr.generation, 0);
11105a47c208SGuixin Liu sema_init(&ns->pr.pr_sem, 1);
11115a47c208SGuixin Liu INIT_LIST_HEAD(&ns->pr.registrant_list);
11125a47c208SGuixin Liu ns->pr.notify_mask = 0;
11135a47c208SGuixin Liu
11145a47c208SGuixin Liu xa_init(&ns->pr_per_ctrl_refs);
11155a47c208SGuixin Liu
11165a47c208SGuixin Liu list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
11175a47c208SGuixin Liu ret = nvmet_pr_alloc_and_insert_pc_ref(ns, ctrl->cntlid,
11185a47c208SGuixin Liu &ctrl->hostid);
11195a47c208SGuixin Liu if (ret)
11205a47c208SGuixin Liu goto free_per_ctrl_refs;
11215a47c208SGuixin Liu }
11225a47c208SGuixin Liu return 0;
11235a47c208SGuixin Liu
11245a47c208SGuixin Liu free_per_ctrl_refs:
11255a47c208SGuixin Liu xa_for_each(&ns->pr_per_ctrl_refs, idx, pc_ref) {
11265a47c208SGuixin Liu xa_erase(&ns->pr_per_ctrl_refs, idx);
11275a47c208SGuixin Liu percpu_ref_exit(&pc_ref->ref);
11285a47c208SGuixin Liu kfree(pc_ref);
11295a47c208SGuixin Liu }
11305a47c208SGuixin Liu return ret;
11315a47c208SGuixin Liu }
11325a47c208SGuixin Liu
nvmet_pr_exit_ns(struct nvmet_ns * ns)11335a47c208SGuixin Liu void nvmet_pr_exit_ns(struct nvmet_ns *ns)
11345a47c208SGuixin Liu {
11355a47c208SGuixin Liu struct nvmet_pr_registrant *reg, *tmp;
11365a47c208SGuixin Liu struct nvmet_pr_per_ctrl_ref *pc_ref;
11375a47c208SGuixin Liu struct nvmet_pr *pr = &ns->pr;
11385a47c208SGuixin Liu unsigned long idx;
11395a47c208SGuixin Liu
11405a47c208SGuixin Liu list_for_each_entry_safe(reg, tmp, &pr->registrant_list, entry) {
11415a47c208SGuixin Liu list_del(®->entry);
11425a47c208SGuixin Liu kfree(reg);
11435a47c208SGuixin Liu }
11445a47c208SGuixin Liu
11455a47c208SGuixin Liu xa_for_each(&ns->pr_per_ctrl_refs, idx, pc_ref) {
11465a47c208SGuixin Liu /*
11475a47c208SGuixin Liu * No command on ns here, we can safely free pc_ref.
11485a47c208SGuixin Liu */
11495a47c208SGuixin Liu pc_ref = xa_erase(&ns->pr_per_ctrl_refs, idx);
11505a47c208SGuixin Liu percpu_ref_exit(&pc_ref->ref);
11515a47c208SGuixin Liu kfree(pc_ref);
11525a47c208SGuixin Liu }
11535a47c208SGuixin Liu
11545a47c208SGuixin Liu xa_destroy(&ns->pr_per_ctrl_refs);
11555a47c208SGuixin Liu }
1156