xref: /linux/drivers/target/target_core_user.c (revision e721eb0616f62e766882b80fd3433b80635abd5f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2013 Shaohua Li <shli@kernel.org>
4  * Copyright (C) 2014 Red Hat, Inc.
5  * Copyright (C) 2015 Arrikto, Inc.
6  * Copyright (C) 2017 Chinamobile, Inc.
7  */
8 
9 #include <linux/spinlock.h>
10 #include <linux/module.h>
11 #include <linux/idr.h>
12 #include <linux/kernel.h>
13 #include <linux/timer.h>
14 #include <linux/parser.h>
15 #include <linux/vmalloc.h>
16 #include <linux/uio_driver.h>
17 #include <linux/radix-tree.h>
18 #include <linux/stringify.h>
19 #include <linux/bitops.h>
20 #include <linux/highmem.h>
21 #include <linux/configfs.h>
22 #include <linux/mutex.h>
23 #include <linux/workqueue.h>
24 #include <net/genetlink.h>
25 #include <scsi/scsi_common.h>
26 #include <scsi/scsi_proto.h>
27 #include <target/target_core_base.h>
28 #include <target/target_core_fabric.h>
29 #include <target/target_core_backend.h>
30 
31 #include <linux/target_core_user.h>
32 
33 /**
34  * DOC: Userspace I/O
35  * Userspace I/O
36  * -------------
37  *
38  * Define a shared-memory interface for LIO to pass SCSI commands and
39  * data to userspace for processing. This is to allow backends that
40  * are too complex for in-kernel support to be possible.
41  *
42  * It uses the UIO framework to do a lot of the device-creation and
43  * introspection work for us.
44  *
45  * See the .h file for how the ring is laid out. Note that while the
46  * command ring is defined, the particulars of the data area are
47  * not. Offset values in the command entry point to other locations
48  * internal to the mmap-ed area. There is separate space outside the
49  * command ring for data buffers. This leaves maximum flexibility for
50  * moving buffer allocations, or even page flipping or other
51  * allocation techniques, without altering the command ring layout.
52  *
53  * SECURITY:
54  * The user process must be assumed to be malicious. There's no way to
55  * prevent it breaking the command ring protocol if it wants, but in
56  * order to prevent other issues we must only ever read *data* from
57  * the shared memory area, not offsets or sizes. This applies to
58  * command ring entries as well as the mailbox. Extra code needed for
59  * this may have a 'UAM' comment.
60  */
61 
62 #define TCMU_TIME_OUT (30 * MSEC_PER_SEC)
63 
64 /* For cmd area, the size is fixed 8MB */
65 #define CMDR_SIZE (8 * 1024 * 1024)
66 
67 /*
68  * For data area, the block size is PAGE_SIZE and
69  * the total size is 256K * PAGE_SIZE.
70  */
71 #define DATA_BLOCK_SIZE PAGE_SIZE
72 #define DATA_BLOCK_SHIFT PAGE_SHIFT
73 #define DATA_BLOCK_BITS_DEF (256 * 1024)
74 
75 #define TCMU_MBS_TO_BLOCKS(_mbs) (_mbs << (20 - DATA_BLOCK_SHIFT))
76 #define TCMU_BLOCKS_TO_MBS(_blocks) (_blocks >> (20 - DATA_BLOCK_SHIFT))
77 
78 /*
79  * Default number of global data blocks(512K * PAGE_SIZE)
80  * when the unmap thread will be started.
81  */
82 #define TCMU_GLOBAL_MAX_BLOCKS_DEF (512 * 1024)
83 
84 static u8 tcmu_kern_cmd_reply_supported;
85 static u8 tcmu_netlink_blocked;
86 
87 static struct device *tcmu_root_device;
88 
89 struct tcmu_hba {
90 	u32 host_id;
91 };
92 
93 #define TCMU_CONFIG_LEN 256
94 
95 static DEFINE_MUTEX(tcmu_nl_cmd_mutex);
96 static LIST_HEAD(tcmu_nl_cmd_list);
97 
98 struct tcmu_dev;
99 
100 struct tcmu_nl_cmd {
101 	/* wake up thread waiting for reply */
102 	struct completion complete;
103 	struct list_head nl_list;
104 	struct tcmu_dev *udev;
105 	int cmd;
106 	int status;
107 };
108 
109 struct tcmu_dev {
110 	struct list_head node;
111 	struct kref kref;
112 
113 	struct se_device se_dev;
114 
115 	char *name;
116 	struct se_hba *hba;
117 
118 #define TCMU_DEV_BIT_OPEN 0
119 #define TCMU_DEV_BIT_BROKEN 1
120 #define TCMU_DEV_BIT_BLOCKED 2
121 	unsigned long flags;
122 
123 	struct uio_info uio_info;
124 
125 	struct inode *inode;
126 
127 	struct tcmu_mailbox *mb_addr;
128 	uint64_t dev_size;
129 	u32 cmdr_size;
130 	u32 cmdr_last_cleaned;
131 	/* Offset of data area from start of mb */
132 	/* Must add data_off and mb_addr to get the address */
133 	size_t data_off;
134 	size_t data_size;
135 	uint32_t max_blocks;
136 	size_t ring_size;
137 
138 	struct mutex cmdr_lock;
139 	struct list_head qfull_queue;
140 
141 	uint32_t dbi_max;
142 	uint32_t dbi_thresh;
143 	unsigned long *data_bitmap;
144 	struct radix_tree_root data_blocks;
145 
146 	struct idr commands;
147 
148 	struct timer_list cmd_timer;
149 	unsigned int cmd_time_out;
150 	struct list_head inflight_queue;
151 
152 	struct timer_list qfull_timer;
153 	int qfull_time_out;
154 
155 	struct list_head timedout_entry;
156 
157 	struct tcmu_nl_cmd curr_nl_cmd;
158 
159 	char dev_config[TCMU_CONFIG_LEN];
160 
161 	int nl_reply_supported;
162 };
163 
164 #define TCMU_DEV(_se_dev) container_of(_se_dev, struct tcmu_dev, se_dev)
165 
166 #define CMDR_OFF sizeof(struct tcmu_mailbox)
167 
168 struct tcmu_cmd {
169 	struct se_cmd *se_cmd;
170 	struct tcmu_dev *tcmu_dev;
171 	struct list_head queue_entry;
172 
173 	uint16_t cmd_id;
174 
175 	/* Can't use se_cmd when cleaning up expired cmds, because if
176 	   cmd has been completed then accessing se_cmd is off limits */
177 	uint32_t dbi_cnt;
178 	uint32_t dbi_cur;
179 	uint32_t *dbi;
180 
181 	unsigned long deadline;
182 
183 #define TCMU_CMD_BIT_EXPIRED 0
184 	unsigned long flags;
185 };
186 /*
187  * To avoid dead lock the mutex lock order should always be:
188  *
189  * mutex_lock(&root_udev_mutex);
190  * ...
191  * mutex_lock(&tcmu_dev->cmdr_lock);
192  * mutex_unlock(&tcmu_dev->cmdr_lock);
193  * ...
194  * mutex_unlock(&root_udev_mutex);
195  */
196 static DEFINE_MUTEX(root_udev_mutex);
197 static LIST_HEAD(root_udev);
198 
199 static DEFINE_SPINLOCK(timed_out_udevs_lock);
200 static LIST_HEAD(timed_out_udevs);
201 
202 static struct kmem_cache *tcmu_cmd_cache;
203 
204 static atomic_t global_db_count = ATOMIC_INIT(0);
205 static struct delayed_work tcmu_unmap_work;
206 static int tcmu_global_max_blocks = TCMU_GLOBAL_MAX_BLOCKS_DEF;
207 
208 static int tcmu_set_global_max_data_area(const char *str,
209 					 const struct kernel_param *kp)
210 {
211 	int ret, max_area_mb;
212 
213 	ret = kstrtoint(str, 10, &max_area_mb);
214 	if (ret)
215 		return -EINVAL;
216 
217 	if (max_area_mb <= 0) {
218 		pr_err("global_max_data_area must be larger than 0.\n");
219 		return -EINVAL;
220 	}
221 
222 	tcmu_global_max_blocks = TCMU_MBS_TO_BLOCKS(max_area_mb);
223 	if (atomic_read(&global_db_count) > tcmu_global_max_blocks)
224 		schedule_delayed_work(&tcmu_unmap_work, 0);
225 	else
226 		cancel_delayed_work_sync(&tcmu_unmap_work);
227 
228 	return 0;
229 }
230 
231 static int tcmu_get_global_max_data_area(char *buffer,
232 					 const struct kernel_param *kp)
233 {
234 	return sprintf(buffer, "%d", TCMU_BLOCKS_TO_MBS(tcmu_global_max_blocks));
235 }
236 
237 static const struct kernel_param_ops tcmu_global_max_data_area_op = {
238 	.set = tcmu_set_global_max_data_area,
239 	.get = tcmu_get_global_max_data_area,
240 };
241 
242 module_param_cb(global_max_data_area_mb, &tcmu_global_max_data_area_op, NULL,
243 		S_IWUSR | S_IRUGO);
244 MODULE_PARM_DESC(global_max_data_area_mb,
245 		 "Max MBs allowed to be allocated to all the tcmu device's "
246 		 "data areas.");
247 
248 static int tcmu_get_block_netlink(char *buffer,
249 				  const struct kernel_param *kp)
250 {
251 	return sprintf(buffer, "%s\n", tcmu_netlink_blocked ?
252 		       "blocked" : "unblocked");
253 }
254 
255 static int tcmu_set_block_netlink(const char *str,
256 				  const struct kernel_param *kp)
257 {
258 	int ret;
259 	u8 val;
260 
261 	ret = kstrtou8(str, 0, &val);
262 	if (ret < 0)
263 		return ret;
264 
265 	if (val > 1) {
266 		pr_err("Invalid block netlink value %u\n", val);
267 		return -EINVAL;
268 	}
269 
270 	tcmu_netlink_blocked = val;
271 	return 0;
272 }
273 
274 static const struct kernel_param_ops tcmu_block_netlink_op = {
275 	.set = tcmu_set_block_netlink,
276 	.get = tcmu_get_block_netlink,
277 };
278 
279 module_param_cb(block_netlink, &tcmu_block_netlink_op, NULL, S_IWUSR | S_IRUGO);
280 MODULE_PARM_DESC(block_netlink, "Block new netlink commands.");
281 
282 static int tcmu_fail_netlink_cmd(struct tcmu_nl_cmd *nl_cmd)
283 {
284 	struct tcmu_dev *udev = nl_cmd->udev;
285 
286 	if (!tcmu_netlink_blocked) {
287 		pr_err("Could not reset device's netlink interface. Netlink is not blocked.\n");
288 		return -EBUSY;
289 	}
290 
291 	if (nl_cmd->cmd != TCMU_CMD_UNSPEC) {
292 		pr_debug("Aborting nl cmd %d on %s\n", nl_cmd->cmd, udev->name);
293 		nl_cmd->status = -EINTR;
294 		list_del(&nl_cmd->nl_list);
295 		complete(&nl_cmd->complete);
296 	}
297 	return 0;
298 }
299 
300 static int tcmu_set_reset_netlink(const char *str,
301 				  const struct kernel_param *kp)
302 {
303 	struct tcmu_nl_cmd *nl_cmd, *tmp_cmd;
304 	int ret;
305 	u8 val;
306 
307 	ret = kstrtou8(str, 0, &val);
308 	if (ret < 0)
309 		return ret;
310 
311 	if (val != 1) {
312 		pr_err("Invalid reset netlink value %u\n", val);
313 		return -EINVAL;
314 	}
315 
316 	mutex_lock(&tcmu_nl_cmd_mutex);
317 	list_for_each_entry_safe(nl_cmd, tmp_cmd, &tcmu_nl_cmd_list, nl_list) {
318 		ret = tcmu_fail_netlink_cmd(nl_cmd);
319 		if (ret)
320 			break;
321 	}
322 	mutex_unlock(&tcmu_nl_cmd_mutex);
323 
324 	return ret;
325 }
326 
327 static const struct kernel_param_ops tcmu_reset_netlink_op = {
328 	.set = tcmu_set_reset_netlink,
329 };
330 
331 module_param_cb(reset_netlink, &tcmu_reset_netlink_op, NULL, S_IWUSR);
332 MODULE_PARM_DESC(reset_netlink, "Reset netlink commands.");
333 
334 /* multicast group */
335 enum tcmu_multicast_groups {
336 	TCMU_MCGRP_CONFIG,
337 };
338 
339 static const struct genl_multicast_group tcmu_mcgrps[] = {
340 	[TCMU_MCGRP_CONFIG] = { .name = "config", },
341 };
342 
343 static struct nla_policy tcmu_attr_policy[TCMU_ATTR_MAX+1] = {
344 	[TCMU_ATTR_DEVICE]	= { .type = NLA_STRING },
345 	[TCMU_ATTR_MINOR]	= { .type = NLA_U32 },
346 	[TCMU_ATTR_CMD_STATUS]	= { .type = NLA_S32 },
347 	[TCMU_ATTR_DEVICE_ID]	= { .type = NLA_U32 },
348 	[TCMU_ATTR_SUPP_KERN_CMD_REPLY] = { .type = NLA_U8 },
349 };
350 
351 static int tcmu_genl_cmd_done(struct genl_info *info, int completed_cmd)
352 {
353 	struct tcmu_dev *udev = NULL;
354 	struct tcmu_nl_cmd *nl_cmd;
355 	int dev_id, rc, ret = 0;
356 
357 	if (!info->attrs[TCMU_ATTR_CMD_STATUS] ||
358 	    !info->attrs[TCMU_ATTR_DEVICE_ID]) {
359 		printk(KERN_ERR "TCMU_ATTR_CMD_STATUS or TCMU_ATTR_DEVICE_ID not set, doing nothing\n");
360 		return -EINVAL;
361         }
362 
363 	dev_id = nla_get_u32(info->attrs[TCMU_ATTR_DEVICE_ID]);
364 	rc = nla_get_s32(info->attrs[TCMU_ATTR_CMD_STATUS]);
365 
366 	mutex_lock(&tcmu_nl_cmd_mutex);
367 	list_for_each_entry(nl_cmd, &tcmu_nl_cmd_list, nl_list) {
368 		if (nl_cmd->udev->se_dev.dev_index == dev_id) {
369 			udev = nl_cmd->udev;
370 			break;
371 		}
372 	}
373 
374 	if (!udev) {
375 		pr_err("tcmu nl cmd %u/%d completion could not find device with dev id %u.\n",
376 		       completed_cmd, rc, dev_id);
377 		ret = -ENODEV;
378 		goto unlock;
379 	}
380 	list_del(&nl_cmd->nl_list);
381 
382 	pr_debug("%s genl cmd done got id %d curr %d done %d rc %d stat %d\n",
383 		 udev->name, dev_id, nl_cmd->cmd, completed_cmd, rc,
384 		 nl_cmd->status);
385 
386 	if (nl_cmd->cmd != completed_cmd) {
387 		pr_err("Mismatched commands on %s (Expecting reply for %d. Current %d).\n",
388 		       udev->name, completed_cmd, nl_cmd->cmd);
389 		ret = -EINVAL;
390 		goto unlock;
391 	}
392 
393 	nl_cmd->status = rc;
394 	complete(&nl_cmd->complete);
395 unlock:
396 	mutex_unlock(&tcmu_nl_cmd_mutex);
397 	return ret;
398 }
399 
400 static int tcmu_genl_rm_dev_done(struct sk_buff *skb, struct genl_info *info)
401 {
402 	return tcmu_genl_cmd_done(info, TCMU_CMD_REMOVED_DEVICE);
403 }
404 
405 static int tcmu_genl_add_dev_done(struct sk_buff *skb, struct genl_info *info)
406 {
407 	return tcmu_genl_cmd_done(info, TCMU_CMD_ADDED_DEVICE);
408 }
409 
410 static int tcmu_genl_reconfig_dev_done(struct sk_buff *skb,
411 				       struct genl_info *info)
412 {
413 	return tcmu_genl_cmd_done(info, TCMU_CMD_RECONFIG_DEVICE);
414 }
415 
416 static int tcmu_genl_set_features(struct sk_buff *skb, struct genl_info *info)
417 {
418 	if (info->attrs[TCMU_ATTR_SUPP_KERN_CMD_REPLY]) {
419 		tcmu_kern_cmd_reply_supported  =
420 			nla_get_u8(info->attrs[TCMU_ATTR_SUPP_KERN_CMD_REPLY]);
421 		printk(KERN_INFO "tcmu daemon: command reply support %u.\n",
422 		       tcmu_kern_cmd_reply_supported);
423 	}
424 
425 	return 0;
426 }
427 
428 static const struct genl_ops tcmu_genl_ops[] = {
429 	{
430 		.cmd	= TCMU_CMD_SET_FEATURES,
431 		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
432 		.flags	= GENL_ADMIN_PERM,
433 		.doit	= tcmu_genl_set_features,
434 	},
435 	{
436 		.cmd	= TCMU_CMD_ADDED_DEVICE_DONE,
437 		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
438 		.flags	= GENL_ADMIN_PERM,
439 		.doit	= tcmu_genl_add_dev_done,
440 	},
441 	{
442 		.cmd	= TCMU_CMD_REMOVED_DEVICE_DONE,
443 		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
444 		.flags	= GENL_ADMIN_PERM,
445 		.doit	= tcmu_genl_rm_dev_done,
446 	},
447 	{
448 		.cmd	= TCMU_CMD_RECONFIG_DEVICE_DONE,
449 		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
450 		.flags	= GENL_ADMIN_PERM,
451 		.doit	= tcmu_genl_reconfig_dev_done,
452 	},
453 };
454 
455 /* Our generic netlink family */
456 static struct genl_family tcmu_genl_family __ro_after_init = {
457 	.module = THIS_MODULE,
458 	.hdrsize = 0,
459 	.name = "TCM-USER",
460 	.version = 2,
461 	.maxattr = TCMU_ATTR_MAX,
462 	.policy = tcmu_attr_policy,
463 	.mcgrps = tcmu_mcgrps,
464 	.n_mcgrps = ARRAY_SIZE(tcmu_mcgrps),
465 	.netnsok = true,
466 	.ops = tcmu_genl_ops,
467 	.n_ops = ARRAY_SIZE(tcmu_genl_ops),
468 };
469 
470 #define tcmu_cmd_set_dbi_cur(cmd, index) ((cmd)->dbi_cur = (index))
471 #define tcmu_cmd_reset_dbi_cur(cmd) tcmu_cmd_set_dbi_cur(cmd, 0)
472 #define tcmu_cmd_set_dbi(cmd, index) ((cmd)->dbi[(cmd)->dbi_cur++] = (index))
473 #define tcmu_cmd_get_dbi(cmd) ((cmd)->dbi[(cmd)->dbi_cur++])
474 
475 static void tcmu_cmd_free_data(struct tcmu_cmd *tcmu_cmd, uint32_t len)
476 {
477 	struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
478 	uint32_t i;
479 
480 	for (i = 0; i < len; i++)
481 		clear_bit(tcmu_cmd->dbi[i], udev->data_bitmap);
482 }
483 
484 static inline bool tcmu_get_empty_block(struct tcmu_dev *udev,
485 					struct tcmu_cmd *tcmu_cmd)
486 {
487 	struct page *page;
488 	int ret, dbi;
489 
490 	dbi = find_first_zero_bit(udev->data_bitmap, udev->dbi_thresh);
491 	if (dbi == udev->dbi_thresh)
492 		return false;
493 
494 	page = radix_tree_lookup(&udev->data_blocks, dbi);
495 	if (!page) {
496 		if (atomic_add_return(1, &global_db_count) >
497 				      tcmu_global_max_blocks)
498 			schedule_delayed_work(&tcmu_unmap_work, 0);
499 
500 		/* try to get new page from the mm */
501 		page = alloc_page(GFP_NOIO);
502 		if (!page)
503 			goto err_alloc;
504 
505 		ret = radix_tree_insert(&udev->data_blocks, dbi, page);
506 		if (ret)
507 			goto err_insert;
508 	}
509 
510 	if (dbi > udev->dbi_max)
511 		udev->dbi_max = dbi;
512 
513 	set_bit(dbi, udev->data_bitmap);
514 	tcmu_cmd_set_dbi(tcmu_cmd, dbi);
515 
516 	return true;
517 err_insert:
518 	__free_page(page);
519 err_alloc:
520 	atomic_dec(&global_db_count);
521 	return false;
522 }
523 
524 static bool tcmu_get_empty_blocks(struct tcmu_dev *udev,
525 				  struct tcmu_cmd *tcmu_cmd)
526 {
527 	int i;
528 
529 	for (i = tcmu_cmd->dbi_cur; i < tcmu_cmd->dbi_cnt; i++) {
530 		if (!tcmu_get_empty_block(udev, tcmu_cmd))
531 			return false;
532 	}
533 	return true;
534 }
535 
536 static inline struct page *
537 tcmu_get_block_page(struct tcmu_dev *udev, uint32_t dbi)
538 {
539 	return radix_tree_lookup(&udev->data_blocks, dbi);
540 }
541 
542 static inline void tcmu_free_cmd(struct tcmu_cmd *tcmu_cmd)
543 {
544 	kfree(tcmu_cmd->dbi);
545 	kmem_cache_free(tcmu_cmd_cache, tcmu_cmd);
546 }
547 
548 static inline size_t tcmu_cmd_get_data_length(struct tcmu_cmd *tcmu_cmd)
549 {
550 	struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
551 	size_t data_length = round_up(se_cmd->data_length, DATA_BLOCK_SIZE);
552 
553 	if (se_cmd->se_cmd_flags & SCF_BIDI) {
554 		BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
555 		data_length += round_up(se_cmd->t_bidi_data_sg->length,
556 				DATA_BLOCK_SIZE);
557 	}
558 
559 	return data_length;
560 }
561 
562 static inline uint32_t tcmu_cmd_get_block_cnt(struct tcmu_cmd *tcmu_cmd)
563 {
564 	size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd);
565 
566 	return data_length / DATA_BLOCK_SIZE;
567 }
568 
569 static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
570 {
571 	struct se_device *se_dev = se_cmd->se_dev;
572 	struct tcmu_dev *udev = TCMU_DEV(se_dev);
573 	struct tcmu_cmd *tcmu_cmd;
574 
575 	tcmu_cmd = kmem_cache_zalloc(tcmu_cmd_cache, GFP_NOIO);
576 	if (!tcmu_cmd)
577 		return NULL;
578 
579 	INIT_LIST_HEAD(&tcmu_cmd->queue_entry);
580 	tcmu_cmd->se_cmd = se_cmd;
581 	tcmu_cmd->tcmu_dev = udev;
582 
583 	tcmu_cmd_reset_dbi_cur(tcmu_cmd);
584 	tcmu_cmd->dbi_cnt = tcmu_cmd_get_block_cnt(tcmu_cmd);
585 	tcmu_cmd->dbi = kcalloc(tcmu_cmd->dbi_cnt, sizeof(uint32_t),
586 				GFP_NOIO);
587 	if (!tcmu_cmd->dbi) {
588 		kmem_cache_free(tcmu_cmd_cache, tcmu_cmd);
589 		return NULL;
590 	}
591 
592 	return tcmu_cmd;
593 }
594 
595 static inline void tcmu_flush_dcache_range(void *vaddr, size_t size)
596 {
597 	unsigned long offset = offset_in_page(vaddr);
598 	void *start = vaddr - offset;
599 
600 	size = round_up(size+offset, PAGE_SIZE);
601 
602 	while (size) {
603 		flush_dcache_page(vmalloc_to_page(start));
604 		start += PAGE_SIZE;
605 		size -= PAGE_SIZE;
606 	}
607 }
608 
609 /*
610  * Some ring helper functions. We don't assume size is a power of 2 so
611  * we can't use circ_buf.h.
612  */
613 static inline size_t spc_used(size_t head, size_t tail, size_t size)
614 {
615 	int diff = head - tail;
616 
617 	if (diff >= 0)
618 		return diff;
619 	else
620 		return size + diff;
621 }
622 
623 static inline size_t spc_free(size_t head, size_t tail, size_t size)
624 {
625 	/* Keep 1 byte unused or we can't tell full from empty */
626 	return (size - spc_used(head, tail, size) - 1);
627 }
628 
629 static inline size_t head_to_end(size_t head, size_t size)
630 {
631 	return size - head;
632 }
633 
634 static inline void new_iov(struct iovec **iov, int *iov_cnt)
635 {
636 	struct iovec *iovec;
637 
638 	if (*iov_cnt != 0)
639 		(*iov)++;
640 	(*iov_cnt)++;
641 
642 	iovec = *iov;
643 	memset(iovec, 0, sizeof(struct iovec));
644 }
645 
646 #define UPDATE_HEAD(head, used, size) smp_store_release(&head, ((head % size) + used) % size)
647 
648 /* offset is relative to mb_addr */
649 static inline size_t get_block_offset_user(struct tcmu_dev *dev,
650 		int dbi, int remaining)
651 {
652 	return dev->data_off + dbi * DATA_BLOCK_SIZE +
653 		DATA_BLOCK_SIZE - remaining;
654 }
655 
656 static inline size_t iov_tail(struct iovec *iov)
657 {
658 	return (size_t)iov->iov_base + iov->iov_len;
659 }
660 
661 static void scatter_data_area(struct tcmu_dev *udev,
662 	struct tcmu_cmd *tcmu_cmd, struct scatterlist *data_sg,
663 	unsigned int data_nents, struct iovec **iov,
664 	int *iov_cnt, bool copy_data)
665 {
666 	int i, dbi;
667 	int block_remaining = 0;
668 	void *from, *to = NULL;
669 	size_t copy_bytes, to_offset, offset;
670 	struct scatterlist *sg;
671 	struct page *page;
672 
673 	for_each_sg(data_sg, sg, data_nents, i) {
674 		int sg_remaining = sg->length;
675 		from = kmap_atomic(sg_page(sg)) + sg->offset;
676 		while (sg_remaining > 0) {
677 			if (block_remaining == 0) {
678 				if (to) {
679 					flush_dcache_page(page);
680 					kunmap_atomic(to);
681 				}
682 
683 				block_remaining = DATA_BLOCK_SIZE;
684 				dbi = tcmu_cmd_get_dbi(tcmu_cmd);
685 				page = tcmu_get_block_page(udev, dbi);
686 				to = kmap_atomic(page);
687 			}
688 
689 			/*
690 			 * Covert to virtual offset of the ring data area.
691 			 */
692 			to_offset = get_block_offset_user(udev, dbi,
693 					block_remaining);
694 
695 			/*
696 			 * The following code will gather and map the blocks
697 			 * to the same iovec when the blocks are all next to
698 			 * each other.
699 			 */
700 			copy_bytes = min_t(size_t, sg_remaining,
701 					block_remaining);
702 			if (*iov_cnt != 0 &&
703 			    to_offset == iov_tail(*iov)) {
704 				/*
705 				 * Will append to the current iovec, because
706 				 * the current block page is next to the
707 				 * previous one.
708 				 */
709 				(*iov)->iov_len += copy_bytes;
710 			} else {
711 				/*
712 				 * Will allocate a new iovec because we are
713 				 * first time here or the current block page
714 				 * is not next to the previous one.
715 				 */
716 				new_iov(iov, iov_cnt);
717 				(*iov)->iov_base = (void __user *)to_offset;
718 				(*iov)->iov_len = copy_bytes;
719 			}
720 
721 			if (copy_data) {
722 				offset = DATA_BLOCK_SIZE - block_remaining;
723 				memcpy(to + offset,
724 				       from + sg->length - sg_remaining,
725 				       copy_bytes);
726 			}
727 
728 			sg_remaining -= copy_bytes;
729 			block_remaining -= copy_bytes;
730 		}
731 		kunmap_atomic(from - sg->offset);
732 	}
733 
734 	if (to) {
735 		flush_dcache_page(page);
736 		kunmap_atomic(to);
737 	}
738 }
739 
740 static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
741 			     bool bidi, uint32_t read_len)
742 {
743 	struct se_cmd *se_cmd = cmd->se_cmd;
744 	int i, dbi;
745 	int block_remaining = 0;
746 	void *from = NULL, *to;
747 	size_t copy_bytes, offset;
748 	struct scatterlist *sg, *data_sg;
749 	struct page *page;
750 	unsigned int data_nents;
751 	uint32_t count = 0;
752 
753 	if (!bidi) {
754 		data_sg = se_cmd->t_data_sg;
755 		data_nents = se_cmd->t_data_nents;
756 	} else {
757 
758 		/*
759 		 * For bidi case, the first count blocks are for Data-Out
760 		 * buffer blocks, and before gathering the Data-In buffer
761 		 * the Data-Out buffer blocks should be discarded.
762 		 */
763 		count = DIV_ROUND_UP(se_cmd->data_length, DATA_BLOCK_SIZE);
764 
765 		data_sg = se_cmd->t_bidi_data_sg;
766 		data_nents = se_cmd->t_bidi_data_nents;
767 	}
768 
769 	tcmu_cmd_set_dbi_cur(cmd, count);
770 
771 	for_each_sg(data_sg, sg, data_nents, i) {
772 		int sg_remaining = sg->length;
773 		to = kmap_atomic(sg_page(sg)) + sg->offset;
774 		while (sg_remaining > 0 && read_len > 0) {
775 			if (block_remaining == 0) {
776 				if (from)
777 					kunmap_atomic(from);
778 
779 				block_remaining = DATA_BLOCK_SIZE;
780 				dbi = tcmu_cmd_get_dbi(cmd);
781 				page = tcmu_get_block_page(udev, dbi);
782 				from = kmap_atomic(page);
783 				flush_dcache_page(page);
784 			}
785 			copy_bytes = min_t(size_t, sg_remaining,
786 					block_remaining);
787 			if (read_len < copy_bytes)
788 				copy_bytes = read_len;
789 			offset = DATA_BLOCK_SIZE - block_remaining;
790 			memcpy(to + sg->length - sg_remaining, from + offset,
791 					copy_bytes);
792 
793 			sg_remaining -= copy_bytes;
794 			block_remaining -= copy_bytes;
795 			read_len -= copy_bytes;
796 		}
797 		kunmap_atomic(to - sg->offset);
798 		if (read_len == 0)
799 			break;
800 	}
801 	if (from)
802 		kunmap_atomic(from);
803 }
804 
805 static inline size_t spc_bitmap_free(unsigned long *bitmap, uint32_t thresh)
806 {
807 	return thresh - bitmap_weight(bitmap, thresh);
808 }
809 
810 /*
811  * We can't queue a command until we have space available on the cmd ring *and*
812  * space available on the data area.
813  *
814  * Called with ring lock held.
815  */
816 static bool is_ring_space_avail(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
817 		size_t cmd_size, size_t data_needed)
818 {
819 	struct tcmu_mailbox *mb = udev->mb_addr;
820 	uint32_t blocks_needed = (data_needed + DATA_BLOCK_SIZE - 1)
821 				/ DATA_BLOCK_SIZE;
822 	size_t space, cmd_needed;
823 	u32 cmd_head;
824 
825 	tcmu_flush_dcache_range(mb, sizeof(*mb));
826 
827 	cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
828 
829 	/*
830 	 * If cmd end-of-ring space is too small then we need space for a NOP plus
831 	 * original cmd - cmds are internally contiguous.
832 	 */
833 	if (head_to_end(cmd_head, udev->cmdr_size) >= cmd_size)
834 		cmd_needed = cmd_size;
835 	else
836 		cmd_needed = cmd_size + head_to_end(cmd_head, udev->cmdr_size);
837 
838 	space = spc_free(cmd_head, udev->cmdr_last_cleaned, udev->cmdr_size);
839 	if (space < cmd_needed) {
840 		pr_debug("no cmd space: %u %u %u\n", cmd_head,
841 		       udev->cmdr_last_cleaned, udev->cmdr_size);
842 		return false;
843 	}
844 
845 	/* try to check and get the data blocks as needed */
846 	space = spc_bitmap_free(udev->data_bitmap, udev->dbi_thresh);
847 	if ((space * DATA_BLOCK_SIZE) < data_needed) {
848 		unsigned long blocks_left =
849 				(udev->max_blocks - udev->dbi_thresh) + space;
850 
851 		if (blocks_left < blocks_needed) {
852 			pr_debug("no data space: only %lu available, but ask for %zu\n",
853 					blocks_left * DATA_BLOCK_SIZE,
854 					data_needed);
855 			return false;
856 		}
857 
858 		udev->dbi_thresh += blocks_needed;
859 		if (udev->dbi_thresh > udev->max_blocks)
860 			udev->dbi_thresh = udev->max_blocks;
861 	}
862 
863 	return tcmu_get_empty_blocks(udev, cmd);
864 }
865 
866 static inline size_t tcmu_cmd_get_base_cmd_size(size_t iov_cnt)
867 {
868 	return max(offsetof(struct tcmu_cmd_entry, req.iov[iov_cnt]),
869 			sizeof(struct tcmu_cmd_entry));
870 }
871 
872 static inline size_t tcmu_cmd_get_cmd_size(struct tcmu_cmd *tcmu_cmd,
873 					   size_t base_command_size)
874 {
875 	struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
876 	size_t command_size;
877 
878 	command_size = base_command_size +
879 		round_up(scsi_command_size(se_cmd->t_task_cdb),
880 				TCMU_OP_ALIGN_SIZE);
881 
882 	WARN_ON(command_size & (TCMU_OP_ALIGN_SIZE-1));
883 
884 	return command_size;
885 }
886 
887 static void tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd, unsigned int tmo,
888 				 struct timer_list *timer)
889 {
890 	if (!tmo)
891 		return;
892 
893 	tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo));
894 	if (!timer_pending(timer))
895 		mod_timer(timer, tcmu_cmd->deadline);
896 
897 	pr_debug("Timeout set up for cmd %p, dev = %s, tmo = %lu\n", tcmu_cmd,
898 		 tcmu_cmd->tcmu_dev->name, tmo / MSEC_PER_SEC);
899 }
900 
901 static int add_to_qfull_queue(struct tcmu_cmd *tcmu_cmd)
902 {
903 	struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
904 	unsigned int tmo;
905 
906 	/*
907 	 * For backwards compat if qfull_time_out is not set use
908 	 * cmd_time_out and if that's not set use the default time out.
909 	 */
910 	if (!udev->qfull_time_out)
911 		return -ETIMEDOUT;
912 	else if (udev->qfull_time_out > 0)
913 		tmo = udev->qfull_time_out;
914 	else if (udev->cmd_time_out)
915 		tmo = udev->cmd_time_out;
916 	else
917 		tmo = TCMU_TIME_OUT;
918 
919 	tcmu_setup_cmd_timer(tcmu_cmd, tmo, &udev->qfull_timer);
920 
921 	list_add_tail(&tcmu_cmd->queue_entry, &udev->qfull_queue);
922 	pr_debug("adding cmd %p on dev %s to ring space wait queue\n",
923 		 tcmu_cmd, udev->name);
924 	return 0;
925 }
926 
927 /**
928  * queue_cmd_ring - queue cmd to ring or internally
929  * @tcmu_cmd: cmd to queue
930  * @scsi_err: TCM error code if failure (-1) returned.
931  *
932  * Returns:
933  * -1 we cannot queue internally or to the ring.
934  *  0 success
935  *  1 internally queued to wait for ring memory to free.
936  */
937 static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
938 {
939 	struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
940 	struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
941 	size_t base_command_size, command_size;
942 	struct tcmu_mailbox *mb;
943 	struct tcmu_cmd_entry *entry;
944 	struct iovec *iov;
945 	int iov_cnt, cmd_id;
946 	uint32_t cmd_head;
947 	uint64_t cdb_off;
948 	bool copy_to_data_area;
949 	size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd);
950 
951 	*scsi_err = TCM_NO_SENSE;
952 
953 	if (test_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags)) {
954 		*scsi_err = TCM_LUN_BUSY;
955 		return -1;
956 	}
957 
958 	if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) {
959 		*scsi_err = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
960 		return -1;
961 	}
962 
963 	/*
964 	 * Must be a certain minimum size for response sense info, but
965 	 * also may be larger if the iov array is large.
966 	 *
967 	 * We prepare as many iovs as possbile for potential uses here,
968 	 * because it's expensive to tell how many regions are freed in
969 	 * the bitmap & global data pool, as the size calculated here
970 	 * will only be used to do the checks.
971 	 *
972 	 * The size will be recalculated later as actually needed to save
973 	 * cmd area memories.
974 	 */
975 	base_command_size = tcmu_cmd_get_base_cmd_size(tcmu_cmd->dbi_cnt);
976 	command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size);
977 
978 	if (!list_empty(&udev->qfull_queue))
979 		goto queue;
980 
981 	mb = udev->mb_addr;
982 	cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
983 	if ((command_size > (udev->cmdr_size / 2)) ||
984 	    data_length > udev->data_size) {
985 		pr_warn("TCMU: Request of size %zu/%zu is too big for %u/%zu "
986 			"cmd ring/data area\n", command_size, data_length,
987 			udev->cmdr_size, udev->data_size);
988 		*scsi_err = TCM_INVALID_CDB_FIELD;
989 		return -1;
990 	}
991 
992 	if (!is_ring_space_avail(udev, tcmu_cmd, command_size, data_length)) {
993 		/*
994 		 * Don't leave commands partially setup because the unmap
995 		 * thread might need the blocks to make forward progress.
996 		 */
997 		tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cur);
998 		tcmu_cmd_reset_dbi_cur(tcmu_cmd);
999 		goto queue;
1000 	}
1001 
1002 	/* Insert a PAD if end-of-ring space is too small */
1003 	if (head_to_end(cmd_head, udev->cmdr_size) < command_size) {
1004 		size_t pad_size = head_to_end(cmd_head, udev->cmdr_size);
1005 
1006 		entry = (void *) mb + CMDR_OFF + cmd_head;
1007 		tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_PAD);
1008 		tcmu_hdr_set_len(&entry->hdr.len_op, pad_size);
1009 		entry->hdr.cmd_id = 0; /* not used for PAD */
1010 		entry->hdr.kflags = 0;
1011 		entry->hdr.uflags = 0;
1012 		tcmu_flush_dcache_range(entry, sizeof(entry->hdr));
1013 
1014 		UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size);
1015 		tcmu_flush_dcache_range(mb, sizeof(*mb));
1016 
1017 		cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
1018 		WARN_ON(cmd_head != 0);
1019 	}
1020 
1021 	entry = (void *) mb + CMDR_OFF + cmd_head;
1022 	memset(entry, 0, command_size);
1023 	tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD);
1024 
1025 	/* Handle allocating space from the data area */
1026 	tcmu_cmd_reset_dbi_cur(tcmu_cmd);
1027 	iov = &entry->req.iov[0];
1028 	iov_cnt = 0;
1029 	copy_to_data_area = (se_cmd->data_direction == DMA_TO_DEVICE
1030 		|| se_cmd->se_cmd_flags & SCF_BIDI);
1031 	scatter_data_area(udev, tcmu_cmd, se_cmd->t_data_sg,
1032 			  se_cmd->t_data_nents, &iov, &iov_cnt,
1033 			  copy_to_data_area);
1034 	entry->req.iov_cnt = iov_cnt;
1035 
1036 	/* Handle BIDI commands */
1037 	iov_cnt = 0;
1038 	if (se_cmd->se_cmd_flags & SCF_BIDI) {
1039 		iov++;
1040 		scatter_data_area(udev, tcmu_cmd, se_cmd->t_bidi_data_sg,
1041 				  se_cmd->t_bidi_data_nents, &iov, &iov_cnt,
1042 				  false);
1043 	}
1044 	entry->req.iov_bidi_cnt = iov_cnt;
1045 
1046 	cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 1, USHRT_MAX, GFP_NOWAIT);
1047 	if (cmd_id < 0) {
1048 		pr_err("tcmu: Could not allocate cmd id.\n");
1049 
1050 		tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt);
1051 		*scsi_err = TCM_OUT_OF_RESOURCES;
1052 		return -1;
1053 	}
1054 	tcmu_cmd->cmd_id = cmd_id;
1055 
1056 	pr_debug("allocated cmd id %u for cmd %p dev %s\n", tcmu_cmd->cmd_id,
1057 		 tcmu_cmd, udev->name);
1058 
1059 	tcmu_setup_cmd_timer(tcmu_cmd, udev->cmd_time_out, &udev->cmd_timer);
1060 
1061 	entry->hdr.cmd_id = tcmu_cmd->cmd_id;
1062 
1063 	/*
1064 	 * Recalaulate the command's base size and size according
1065 	 * to the actual needs
1066 	 */
1067 	base_command_size = tcmu_cmd_get_base_cmd_size(entry->req.iov_cnt +
1068 						       entry->req.iov_bidi_cnt);
1069 	command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size);
1070 
1071 	tcmu_hdr_set_len(&entry->hdr.len_op, command_size);
1072 
1073 	/* All offsets relative to mb_addr, not start of entry! */
1074 	cdb_off = CMDR_OFF + cmd_head + base_command_size;
1075 	memcpy((void *) mb + cdb_off, se_cmd->t_task_cdb, scsi_command_size(se_cmd->t_task_cdb));
1076 	entry->req.cdb_off = cdb_off;
1077 	tcmu_flush_dcache_range(entry, command_size);
1078 
1079 	UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size);
1080 	tcmu_flush_dcache_range(mb, sizeof(*mb));
1081 
1082 	list_add_tail(&tcmu_cmd->queue_entry, &udev->inflight_queue);
1083 
1084 	/* TODO: only if FLUSH and FUA? */
1085 	uio_event_notify(&udev->uio_info);
1086 
1087 	return 0;
1088 
1089 queue:
1090 	if (add_to_qfull_queue(tcmu_cmd)) {
1091 		*scsi_err = TCM_OUT_OF_RESOURCES;
1092 		return -1;
1093 	}
1094 
1095 	return 1;
1096 }
1097 
1098 static sense_reason_t
1099 tcmu_queue_cmd(struct se_cmd *se_cmd)
1100 {
1101 	struct se_device *se_dev = se_cmd->se_dev;
1102 	struct tcmu_dev *udev = TCMU_DEV(se_dev);
1103 	struct tcmu_cmd *tcmu_cmd;
1104 	sense_reason_t scsi_ret;
1105 	int ret;
1106 
1107 	tcmu_cmd = tcmu_alloc_cmd(se_cmd);
1108 	if (!tcmu_cmd)
1109 		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1110 
1111 	mutex_lock(&udev->cmdr_lock);
1112 	ret = queue_cmd_ring(tcmu_cmd, &scsi_ret);
1113 	mutex_unlock(&udev->cmdr_lock);
1114 	if (ret < 0)
1115 		tcmu_free_cmd(tcmu_cmd);
1116 	return scsi_ret;
1117 }
1118 
1119 static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *entry)
1120 {
1121 	struct se_cmd *se_cmd = cmd->se_cmd;
1122 	struct tcmu_dev *udev = cmd->tcmu_dev;
1123 	bool read_len_valid = false;
1124 	uint32_t read_len;
1125 
1126 	/*
1127 	 * cmd has been completed already from timeout, just reclaim
1128 	 * data area space and free cmd
1129 	 */
1130 	if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
1131 		WARN_ON_ONCE(se_cmd);
1132 		goto out;
1133 	}
1134 
1135 	list_del_init(&cmd->queue_entry);
1136 
1137 	tcmu_cmd_reset_dbi_cur(cmd);
1138 
1139 	if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) {
1140 		pr_warn("TCMU: Userspace set UNKNOWN_OP flag on se_cmd %p\n",
1141 			cmd->se_cmd);
1142 		entry->rsp.scsi_status = SAM_STAT_CHECK_CONDITION;
1143 		goto done;
1144 	}
1145 
1146 	read_len = se_cmd->data_length;
1147 	if (se_cmd->data_direction == DMA_FROM_DEVICE &&
1148 	    (entry->hdr.uflags & TCMU_UFLAG_READ_LEN) && entry->rsp.read_len) {
1149 		read_len_valid = true;
1150 		if (entry->rsp.read_len < read_len)
1151 			read_len = entry->rsp.read_len;
1152 	}
1153 
1154 	if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) {
1155 		transport_copy_sense_to_cmd(se_cmd, entry->rsp.sense_buffer);
1156 		if (!read_len_valid )
1157 			goto done;
1158 		else
1159 			se_cmd->se_cmd_flags |= SCF_TREAT_READ_AS_NORMAL;
1160 	}
1161 	if (se_cmd->se_cmd_flags & SCF_BIDI) {
1162 		/* Get Data-In buffer before clean up */
1163 		gather_data_area(udev, cmd, true, read_len);
1164 	} else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
1165 		gather_data_area(udev, cmd, false, read_len);
1166 	} else if (se_cmd->data_direction == DMA_TO_DEVICE) {
1167 		/* TODO: */
1168 	} else if (se_cmd->data_direction != DMA_NONE) {
1169 		pr_warn("TCMU: data direction was %d!\n",
1170 			se_cmd->data_direction);
1171 	}
1172 
1173 done:
1174 	if (read_len_valid) {
1175 		pr_debug("read_len = %d\n", read_len);
1176 		target_complete_cmd_with_length(cmd->se_cmd,
1177 					entry->rsp.scsi_status, read_len);
1178 	} else
1179 		target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status);
1180 
1181 out:
1182 	cmd->se_cmd = NULL;
1183 	tcmu_cmd_free_data(cmd, cmd->dbi_cnt);
1184 	tcmu_free_cmd(cmd);
1185 }
1186 
1187 static void tcmu_set_next_deadline(struct list_head *queue,
1188 				   struct timer_list *timer)
1189 {
1190 	struct tcmu_cmd *tcmu_cmd, *tmp_cmd;
1191 	unsigned long deadline = 0;
1192 
1193 	list_for_each_entry_safe(tcmu_cmd, tmp_cmd, queue, queue_entry) {
1194 		if (!time_after(jiffies, tcmu_cmd->deadline)) {
1195 			deadline = tcmu_cmd->deadline;
1196 			break;
1197 		}
1198 	}
1199 
1200 	if (deadline)
1201 		mod_timer(timer, deadline);
1202 	else
1203 		del_timer(timer);
1204 }
1205 
1206 static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
1207 {
1208 	struct tcmu_mailbox *mb;
1209 	struct tcmu_cmd *cmd;
1210 	int handled = 0;
1211 
1212 	if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) {
1213 		pr_err("ring broken, not handling completions\n");
1214 		return 0;
1215 	}
1216 
1217 	mb = udev->mb_addr;
1218 	tcmu_flush_dcache_range(mb, sizeof(*mb));
1219 
1220 	while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) {
1221 
1222 		struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned;
1223 
1224 		/*
1225 		 * Flush max. up to end of cmd ring since current entry might
1226 		 * be a padding that is shorter than sizeof(*entry)
1227 		 */
1228 		size_t ring_left = head_to_end(udev->cmdr_last_cleaned,
1229 					       udev->cmdr_size);
1230 		tcmu_flush_dcache_range(entry, ring_left < sizeof(*entry) ?
1231 					ring_left : sizeof(*entry));
1232 
1233 		if (tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_PAD) {
1234 			UPDATE_HEAD(udev->cmdr_last_cleaned,
1235 				    tcmu_hdr_get_len(entry->hdr.len_op),
1236 				    udev->cmdr_size);
1237 			continue;
1238 		}
1239 		WARN_ON(tcmu_hdr_get_op(entry->hdr.len_op) != TCMU_OP_CMD);
1240 
1241 		cmd = idr_remove(&udev->commands, entry->hdr.cmd_id);
1242 		if (!cmd) {
1243 			pr_err("cmd_id %u not found, ring is broken\n",
1244 			       entry->hdr.cmd_id);
1245 			set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags);
1246 			break;
1247 		}
1248 
1249 		tcmu_handle_completion(cmd, entry);
1250 
1251 		UPDATE_HEAD(udev->cmdr_last_cleaned,
1252 			    tcmu_hdr_get_len(entry->hdr.len_op),
1253 			    udev->cmdr_size);
1254 
1255 		handled++;
1256 	}
1257 
1258 	if (mb->cmd_tail == mb->cmd_head) {
1259 		/* no more pending commands */
1260 		del_timer(&udev->cmd_timer);
1261 
1262 		if (list_empty(&udev->qfull_queue)) {
1263 			/*
1264 			 * no more pending or waiting commands so try to
1265 			 * reclaim blocks if needed.
1266 			 */
1267 			if (atomic_read(&global_db_count) >
1268 			    tcmu_global_max_blocks)
1269 				schedule_delayed_work(&tcmu_unmap_work, 0);
1270 		}
1271 	} else if (udev->cmd_time_out) {
1272 		tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer);
1273 	}
1274 
1275 	return handled;
1276 }
1277 
1278 static void tcmu_check_expired_ring_cmd(struct tcmu_cmd *cmd)
1279 {
1280 	struct se_cmd *se_cmd;
1281 
1282 	if (!time_after(jiffies, cmd->deadline))
1283 		return;
1284 
1285 	set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags);
1286 	list_del_init(&cmd->queue_entry);
1287 	se_cmd = cmd->se_cmd;
1288 	cmd->se_cmd = NULL;
1289 
1290 	pr_debug("Timing out inflight cmd %u on dev %s.\n",
1291 		 cmd->cmd_id, cmd->tcmu_dev->name);
1292 
1293 	target_complete_cmd(se_cmd, SAM_STAT_CHECK_CONDITION);
1294 }
1295 
1296 static void tcmu_check_expired_queue_cmd(struct tcmu_cmd *cmd)
1297 {
1298 	struct se_cmd *se_cmd;
1299 
1300 	if (!time_after(jiffies, cmd->deadline))
1301 		return;
1302 
1303 	pr_debug("Timing out queued cmd %p on dev %s.\n",
1304 		  cmd, cmd->tcmu_dev->name);
1305 
1306 	list_del_init(&cmd->queue_entry);
1307 	se_cmd = cmd->se_cmd;
1308 	tcmu_free_cmd(cmd);
1309 
1310 	target_complete_cmd(se_cmd, SAM_STAT_TASK_SET_FULL);
1311 }
1312 
1313 static void tcmu_device_timedout(struct tcmu_dev *udev)
1314 {
1315 	spin_lock(&timed_out_udevs_lock);
1316 	if (list_empty(&udev->timedout_entry))
1317 		list_add_tail(&udev->timedout_entry, &timed_out_udevs);
1318 	spin_unlock(&timed_out_udevs_lock);
1319 
1320 	schedule_delayed_work(&tcmu_unmap_work, 0);
1321 }
1322 
1323 static void tcmu_cmd_timedout(struct timer_list *t)
1324 {
1325 	struct tcmu_dev *udev = from_timer(udev, t, cmd_timer);
1326 
1327 	pr_debug("%s cmd timeout has expired\n", udev->name);
1328 	tcmu_device_timedout(udev);
1329 }
1330 
1331 static void tcmu_qfull_timedout(struct timer_list *t)
1332 {
1333 	struct tcmu_dev *udev = from_timer(udev, t, qfull_timer);
1334 
1335 	pr_debug("%s qfull timeout has expired\n", udev->name);
1336 	tcmu_device_timedout(udev);
1337 }
1338 
1339 static int tcmu_attach_hba(struct se_hba *hba, u32 host_id)
1340 {
1341 	struct tcmu_hba *tcmu_hba;
1342 
1343 	tcmu_hba = kzalloc(sizeof(struct tcmu_hba), GFP_KERNEL);
1344 	if (!tcmu_hba)
1345 		return -ENOMEM;
1346 
1347 	tcmu_hba->host_id = host_id;
1348 	hba->hba_ptr = tcmu_hba;
1349 
1350 	return 0;
1351 }
1352 
1353 static void tcmu_detach_hba(struct se_hba *hba)
1354 {
1355 	kfree(hba->hba_ptr);
1356 	hba->hba_ptr = NULL;
1357 }
1358 
1359 static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
1360 {
1361 	struct tcmu_dev *udev;
1362 
1363 	udev = kzalloc(sizeof(struct tcmu_dev), GFP_KERNEL);
1364 	if (!udev)
1365 		return NULL;
1366 	kref_init(&udev->kref);
1367 
1368 	udev->name = kstrdup(name, GFP_KERNEL);
1369 	if (!udev->name) {
1370 		kfree(udev);
1371 		return NULL;
1372 	}
1373 
1374 	udev->hba = hba;
1375 	udev->cmd_time_out = TCMU_TIME_OUT;
1376 	udev->qfull_time_out = -1;
1377 
1378 	udev->max_blocks = DATA_BLOCK_BITS_DEF;
1379 	mutex_init(&udev->cmdr_lock);
1380 
1381 	INIT_LIST_HEAD(&udev->node);
1382 	INIT_LIST_HEAD(&udev->timedout_entry);
1383 	INIT_LIST_HEAD(&udev->qfull_queue);
1384 	INIT_LIST_HEAD(&udev->inflight_queue);
1385 	idr_init(&udev->commands);
1386 
1387 	timer_setup(&udev->qfull_timer, tcmu_qfull_timedout, 0);
1388 	timer_setup(&udev->cmd_timer, tcmu_cmd_timedout, 0);
1389 
1390 	INIT_RADIX_TREE(&udev->data_blocks, GFP_KERNEL);
1391 
1392 	return &udev->se_dev;
1393 }
1394 
1395 static void run_qfull_queue(struct tcmu_dev *udev, bool fail)
1396 {
1397 	struct tcmu_cmd *tcmu_cmd, *tmp_cmd;
1398 	LIST_HEAD(cmds);
1399 	sense_reason_t scsi_ret;
1400 	int ret;
1401 
1402 	if (list_empty(&udev->qfull_queue))
1403 		return;
1404 
1405 	pr_debug("running %s's cmdr queue forcefail %d\n", udev->name, fail);
1406 
1407 	list_splice_init(&udev->qfull_queue, &cmds);
1408 
1409 	list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, queue_entry) {
1410 		list_del_init(&tcmu_cmd->queue_entry);
1411 
1412 		pr_debug("removing cmd %p on dev %s from queue\n",
1413 			 tcmu_cmd, udev->name);
1414 
1415 		if (fail) {
1416 			/*
1417 			 * We were not able to even start the command, so
1418 			 * fail with busy to allow a retry in case runner
1419 			 * was only temporarily down. If the device is being
1420 			 * removed then LIO core will do the right thing and
1421 			 * fail the retry.
1422 			 */
1423 			target_complete_cmd(tcmu_cmd->se_cmd, SAM_STAT_BUSY);
1424 			tcmu_free_cmd(tcmu_cmd);
1425 			continue;
1426 		}
1427 
1428 		ret = queue_cmd_ring(tcmu_cmd, &scsi_ret);
1429 		if (ret < 0) {
1430 			pr_debug("cmd %p on dev %s failed with %u\n",
1431 				 tcmu_cmd, udev->name, scsi_ret);
1432 			/*
1433 			 * Ignore scsi_ret for now. target_complete_cmd
1434 			 * drops it.
1435 			 */
1436 			target_complete_cmd(tcmu_cmd->se_cmd,
1437 					    SAM_STAT_CHECK_CONDITION);
1438 			tcmu_free_cmd(tcmu_cmd);
1439 		} else if (ret > 0) {
1440 			pr_debug("ran out of space during cmdr queue run\n");
1441 			/*
1442 			 * cmd was requeued, so just put all cmds back in
1443 			 * the queue
1444 			 */
1445 			list_splice_tail(&cmds, &udev->qfull_queue);
1446 			break;
1447 		}
1448 	}
1449 
1450 	tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
1451 }
1452 
1453 static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on)
1454 {
1455 	struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
1456 
1457 	mutex_lock(&udev->cmdr_lock);
1458 	tcmu_handle_completions(udev);
1459 	run_qfull_queue(udev, false);
1460 	mutex_unlock(&udev->cmdr_lock);
1461 
1462 	return 0;
1463 }
1464 
1465 /*
1466  * mmap code from uio.c. Copied here because we want to hook mmap()
1467  * and this stuff must come along.
1468  */
1469 static int tcmu_find_mem_index(struct vm_area_struct *vma)
1470 {
1471 	struct tcmu_dev *udev = vma->vm_private_data;
1472 	struct uio_info *info = &udev->uio_info;
1473 
1474 	if (vma->vm_pgoff < MAX_UIO_MAPS) {
1475 		if (info->mem[vma->vm_pgoff].size == 0)
1476 			return -1;
1477 		return (int)vma->vm_pgoff;
1478 	}
1479 	return -1;
1480 }
1481 
1482 static struct page *tcmu_try_get_block_page(struct tcmu_dev *udev, uint32_t dbi)
1483 {
1484 	struct page *page;
1485 
1486 	mutex_lock(&udev->cmdr_lock);
1487 	page = tcmu_get_block_page(udev, dbi);
1488 	if (likely(page)) {
1489 		mutex_unlock(&udev->cmdr_lock);
1490 		return page;
1491 	}
1492 
1493 	/*
1494 	 * Userspace messed up and passed in a address not in the
1495 	 * data iov passed to it.
1496 	 */
1497 	pr_err("Invalid addr to data block mapping  (dbi %u) on device %s\n",
1498 	       dbi, udev->name);
1499 	page = NULL;
1500 	mutex_unlock(&udev->cmdr_lock);
1501 
1502 	return page;
1503 }
1504 
1505 static vm_fault_t tcmu_vma_fault(struct vm_fault *vmf)
1506 {
1507 	struct tcmu_dev *udev = vmf->vma->vm_private_data;
1508 	struct uio_info *info = &udev->uio_info;
1509 	struct page *page;
1510 	unsigned long offset;
1511 	void *addr;
1512 
1513 	int mi = tcmu_find_mem_index(vmf->vma);
1514 	if (mi < 0)
1515 		return VM_FAULT_SIGBUS;
1516 
1517 	/*
1518 	 * We need to subtract mi because userspace uses offset = N*PAGE_SIZE
1519 	 * to use mem[N].
1520 	 */
1521 	offset = (vmf->pgoff - mi) << PAGE_SHIFT;
1522 
1523 	if (offset < udev->data_off) {
1524 		/* For the vmalloc()ed cmd area pages */
1525 		addr = (void *)(unsigned long)info->mem[mi].addr + offset;
1526 		page = vmalloc_to_page(addr);
1527 	} else {
1528 		uint32_t dbi;
1529 
1530 		/* For the dynamically growing data area pages */
1531 		dbi = (offset - udev->data_off) / DATA_BLOCK_SIZE;
1532 		page = tcmu_try_get_block_page(udev, dbi);
1533 		if (!page)
1534 			return VM_FAULT_SIGBUS;
1535 	}
1536 
1537 	get_page(page);
1538 	vmf->page = page;
1539 	return 0;
1540 }
1541 
1542 static const struct vm_operations_struct tcmu_vm_ops = {
1543 	.fault = tcmu_vma_fault,
1544 };
1545 
1546 static int tcmu_mmap(struct uio_info *info, struct vm_area_struct *vma)
1547 {
1548 	struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
1549 
1550 	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
1551 	vma->vm_ops = &tcmu_vm_ops;
1552 
1553 	vma->vm_private_data = udev;
1554 
1555 	/* Ensure the mmap is exactly the right size */
1556 	if (vma_pages(vma) != (udev->ring_size >> PAGE_SHIFT))
1557 		return -EINVAL;
1558 
1559 	return 0;
1560 }
1561 
1562 static int tcmu_open(struct uio_info *info, struct inode *inode)
1563 {
1564 	struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
1565 
1566 	/* O_EXCL not supported for char devs, so fake it? */
1567 	if (test_and_set_bit(TCMU_DEV_BIT_OPEN, &udev->flags))
1568 		return -EBUSY;
1569 
1570 	udev->inode = inode;
1571 	kref_get(&udev->kref);
1572 
1573 	pr_debug("open\n");
1574 
1575 	return 0;
1576 }
1577 
1578 static void tcmu_dev_call_rcu(struct rcu_head *p)
1579 {
1580 	struct se_device *dev = container_of(p, struct se_device, rcu_head);
1581 	struct tcmu_dev *udev = TCMU_DEV(dev);
1582 
1583 	kfree(udev->uio_info.name);
1584 	kfree(udev->name);
1585 	kfree(udev);
1586 }
1587 
1588 static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd)
1589 {
1590 	if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
1591 		kmem_cache_free(tcmu_cmd_cache, cmd);
1592 		return 0;
1593 	}
1594 	return -EINVAL;
1595 }
1596 
1597 static void tcmu_blocks_release(struct radix_tree_root *blocks,
1598 				int start, int end)
1599 {
1600 	int i;
1601 	struct page *page;
1602 
1603 	for (i = start; i < end; i++) {
1604 		page = radix_tree_delete(blocks, i);
1605 		if (page) {
1606 			__free_page(page);
1607 			atomic_dec(&global_db_count);
1608 		}
1609 	}
1610 }
1611 
1612 static void tcmu_dev_kref_release(struct kref *kref)
1613 {
1614 	struct tcmu_dev *udev = container_of(kref, struct tcmu_dev, kref);
1615 	struct se_device *dev = &udev->se_dev;
1616 	struct tcmu_cmd *cmd;
1617 	bool all_expired = true;
1618 	int i;
1619 
1620 	vfree(udev->mb_addr);
1621 	udev->mb_addr = NULL;
1622 
1623 	spin_lock_bh(&timed_out_udevs_lock);
1624 	if (!list_empty(&udev->timedout_entry))
1625 		list_del(&udev->timedout_entry);
1626 	spin_unlock_bh(&timed_out_udevs_lock);
1627 
1628 	/* Upper layer should drain all requests before calling this */
1629 	mutex_lock(&udev->cmdr_lock);
1630 	idr_for_each_entry(&udev->commands, cmd, i) {
1631 		if (tcmu_check_and_free_pending_cmd(cmd) != 0)
1632 			all_expired = false;
1633 	}
1634 	if (!list_empty(&udev->qfull_queue))
1635 		all_expired = false;
1636 	idr_destroy(&udev->commands);
1637 	WARN_ON(!all_expired);
1638 
1639 	tcmu_blocks_release(&udev->data_blocks, 0, udev->dbi_max + 1);
1640 	bitmap_free(udev->data_bitmap);
1641 	mutex_unlock(&udev->cmdr_lock);
1642 
1643 	call_rcu(&dev->rcu_head, tcmu_dev_call_rcu);
1644 }
1645 
1646 static int tcmu_release(struct uio_info *info, struct inode *inode)
1647 {
1648 	struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
1649 
1650 	clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags);
1651 
1652 	pr_debug("close\n");
1653 	/* release ref from open */
1654 	kref_put(&udev->kref, tcmu_dev_kref_release);
1655 	return 0;
1656 }
1657 
1658 static int tcmu_init_genl_cmd_reply(struct tcmu_dev *udev, int cmd)
1659 {
1660 	struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd;
1661 
1662 	if (!tcmu_kern_cmd_reply_supported)
1663 		return 0;
1664 
1665 	if (udev->nl_reply_supported <= 0)
1666 		return 0;
1667 
1668 	mutex_lock(&tcmu_nl_cmd_mutex);
1669 
1670 	if (tcmu_netlink_blocked) {
1671 		mutex_unlock(&tcmu_nl_cmd_mutex);
1672 		pr_warn("Failing nl cmd %d on %s. Interface is blocked.\n", cmd,
1673 			udev->name);
1674 		return -EAGAIN;
1675 	}
1676 
1677 	if (nl_cmd->cmd != TCMU_CMD_UNSPEC) {
1678 		mutex_unlock(&tcmu_nl_cmd_mutex);
1679 		pr_warn("netlink cmd %d already executing on %s\n",
1680 			 nl_cmd->cmd, udev->name);
1681 		return -EBUSY;
1682 	}
1683 
1684 	memset(nl_cmd, 0, sizeof(*nl_cmd));
1685 	nl_cmd->cmd = cmd;
1686 	nl_cmd->udev = udev;
1687 	init_completion(&nl_cmd->complete);
1688 	INIT_LIST_HEAD(&nl_cmd->nl_list);
1689 
1690 	list_add_tail(&nl_cmd->nl_list, &tcmu_nl_cmd_list);
1691 
1692 	mutex_unlock(&tcmu_nl_cmd_mutex);
1693 	return 0;
1694 }
1695 
1696 static void tcmu_destroy_genl_cmd_reply(struct tcmu_dev *udev)
1697 {
1698 	struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd;
1699 
1700 	if (!tcmu_kern_cmd_reply_supported)
1701 		return;
1702 
1703 	if (udev->nl_reply_supported <= 0)
1704 		return;
1705 
1706 	mutex_lock(&tcmu_nl_cmd_mutex);
1707 
1708 	list_del(&nl_cmd->nl_list);
1709 	memset(nl_cmd, 0, sizeof(*nl_cmd));
1710 
1711 	mutex_unlock(&tcmu_nl_cmd_mutex);
1712 }
1713 
1714 static int tcmu_wait_genl_cmd_reply(struct tcmu_dev *udev)
1715 {
1716 	struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd;
1717 	int ret;
1718 
1719 	if (!tcmu_kern_cmd_reply_supported)
1720 		return 0;
1721 
1722 	if (udev->nl_reply_supported <= 0)
1723 		return 0;
1724 
1725 	pr_debug("sleeping for nl reply\n");
1726 	wait_for_completion(&nl_cmd->complete);
1727 
1728 	mutex_lock(&tcmu_nl_cmd_mutex);
1729 	nl_cmd->cmd = TCMU_CMD_UNSPEC;
1730 	ret = nl_cmd->status;
1731 	mutex_unlock(&tcmu_nl_cmd_mutex);
1732 
1733 	return ret;
1734 }
1735 
1736 static int tcmu_netlink_event_init(struct tcmu_dev *udev,
1737 				   enum tcmu_genl_cmd cmd,
1738 				   struct sk_buff **buf, void **hdr)
1739 {
1740 	struct sk_buff *skb;
1741 	void *msg_header;
1742 	int ret = -ENOMEM;
1743 
1744 	skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
1745 	if (!skb)
1746 		return ret;
1747 
1748 	msg_header = genlmsg_put(skb, 0, 0, &tcmu_genl_family, 0, cmd);
1749 	if (!msg_header)
1750 		goto free_skb;
1751 
1752 	ret = nla_put_string(skb, TCMU_ATTR_DEVICE, udev->uio_info.name);
1753 	if (ret < 0)
1754 		goto free_skb;
1755 
1756 	ret = nla_put_u32(skb, TCMU_ATTR_MINOR, udev->uio_info.uio_dev->minor);
1757 	if (ret < 0)
1758 		goto free_skb;
1759 
1760 	ret = nla_put_u32(skb, TCMU_ATTR_DEVICE_ID, udev->se_dev.dev_index);
1761 	if (ret < 0)
1762 		goto free_skb;
1763 
1764 	*buf = skb;
1765 	*hdr = msg_header;
1766 	return ret;
1767 
1768 free_skb:
1769 	nlmsg_free(skb);
1770 	return ret;
1771 }
1772 
1773 static int tcmu_netlink_event_send(struct tcmu_dev *udev,
1774 				   enum tcmu_genl_cmd cmd,
1775 				   struct sk_buff *skb, void *msg_header)
1776 {
1777 	int ret;
1778 
1779 	genlmsg_end(skb, msg_header);
1780 
1781 	ret = tcmu_init_genl_cmd_reply(udev, cmd);
1782 	if (ret) {
1783 		nlmsg_free(skb);
1784 		return ret;
1785 	}
1786 
1787 	ret = genlmsg_multicast_allns(&tcmu_genl_family, skb, 0,
1788 				      TCMU_MCGRP_CONFIG, GFP_KERNEL);
1789 
1790 	/* Wait during an add as the listener may not be up yet */
1791 	if (ret == 0 ||
1792 	   (ret == -ESRCH && cmd == TCMU_CMD_ADDED_DEVICE))
1793 		return tcmu_wait_genl_cmd_reply(udev);
1794 	else
1795 		tcmu_destroy_genl_cmd_reply(udev);
1796 
1797 	return ret;
1798 }
1799 
1800 static int tcmu_send_dev_add_event(struct tcmu_dev *udev)
1801 {
1802 	struct sk_buff *skb = NULL;
1803 	void *msg_header = NULL;
1804 	int ret = 0;
1805 
1806 	ret = tcmu_netlink_event_init(udev, TCMU_CMD_ADDED_DEVICE, &skb,
1807 				      &msg_header);
1808 	if (ret < 0)
1809 		return ret;
1810 	return tcmu_netlink_event_send(udev, TCMU_CMD_ADDED_DEVICE, skb,
1811 				       msg_header);
1812 }
1813 
1814 static int tcmu_send_dev_remove_event(struct tcmu_dev *udev)
1815 {
1816 	struct sk_buff *skb = NULL;
1817 	void *msg_header = NULL;
1818 	int ret = 0;
1819 
1820 	ret = tcmu_netlink_event_init(udev, TCMU_CMD_REMOVED_DEVICE,
1821 				      &skb, &msg_header);
1822 	if (ret < 0)
1823 		return ret;
1824 	return tcmu_netlink_event_send(udev, TCMU_CMD_REMOVED_DEVICE,
1825 				       skb, msg_header);
1826 }
1827 
1828 static int tcmu_update_uio_info(struct tcmu_dev *udev)
1829 {
1830 	struct tcmu_hba *hba = udev->hba->hba_ptr;
1831 	struct uio_info *info;
1832 	char *str;
1833 
1834 	info = &udev->uio_info;
1835 
1836 	if (udev->dev_config[0])
1837 		str = kasprintf(GFP_KERNEL, "tcm-user/%u/%s/%s", hba->host_id,
1838 				udev->name, udev->dev_config);
1839 	else
1840 		str = kasprintf(GFP_KERNEL, "tcm-user/%u/%s", hba->host_id,
1841 				udev->name);
1842 	if (!str)
1843 		return -ENOMEM;
1844 
1845 	/* If the old string exists, free it */
1846 	kfree(info->name);
1847 	info->name = str;
1848 
1849 	return 0;
1850 }
1851 
1852 static int tcmu_configure_device(struct se_device *dev)
1853 {
1854 	struct tcmu_dev *udev = TCMU_DEV(dev);
1855 	struct uio_info *info;
1856 	struct tcmu_mailbox *mb;
1857 	int ret = 0;
1858 
1859 	ret = tcmu_update_uio_info(udev);
1860 	if (ret)
1861 		return ret;
1862 
1863 	info = &udev->uio_info;
1864 
1865 	mutex_lock(&udev->cmdr_lock);
1866 	udev->data_bitmap = bitmap_zalloc(udev->max_blocks, GFP_KERNEL);
1867 	mutex_unlock(&udev->cmdr_lock);
1868 	if (!udev->data_bitmap) {
1869 		ret = -ENOMEM;
1870 		goto err_bitmap_alloc;
1871 	}
1872 
1873 	udev->mb_addr = vzalloc(CMDR_SIZE);
1874 	if (!udev->mb_addr) {
1875 		ret = -ENOMEM;
1876 		goto err_vzalloc;
1877 	}
1878 
1879 	/* mailbox fits in first part of CMDR space */
1880 	udev->cmdr_size = CMDR_SIZE - CMDR_OFF;
1881 	udev->data_off = CMDR_SIZE;
1882 	udev->data_size = udev->max_blocks * DATA_BLOCK_SIZE;
1883 	udev->dbi_thresh = 0; /* Default in Idle state */
1884 
1885 	/* Initialise the mailbox of the ring buffer */
1886 	mb = udev->mb_addr;
1887 	mb->version = TCMU_MAILBOX_VERSION;
1888 	mb->flags = TCMU_MAILBOX_FLAG_CAP_OOOC | TCMU_MAILBOX_FLAG_CAP_READ_LEN;
1889 	mb->cmdr_off = CMDR_OFF;
1890 	mb->cmdr_size = udev->cmdr_size;
1891 
1892 	WARN_ON(!PAGE_ALIGNED(udev->data_off));
1893 	WARN_ON(udev->data_size % PAGE_SIZE);
1894 	WARN_ON(udev->data_size % DATA_BLOCK_SIZE);
1895 
1896 	info->version = __stringify(TCMU_MAILBOX_VERSION);
1897 
1898 	info->mem[0].name = "tcm-user command & data buffer";
1899 	info->mem[0].addr = (phys_addr_t)(uintptr_t)udev->mb_addr;
1900 	info->mem[0].size = udev->ring_size = udev->data_size + CMDR_SIZE;
1901 	info->mem[0].memtype = UIO_MEM_NONE;
1902 
1903 	info->irqcontrol = tcmu_irqcontrol;
1904 	info->irq = UIO_IRQ_CUSTOM;
1905 
1906 	info->mmap = tcmu_mmap;
1907 	info->open = tcmu_open;
1908 	info->release = tcmu_release;
1909 
1910 	ret = uio_register_device(tcmu_root_device, info);
1911 	if (ret)
1912 		goto err_register;
1913 
1914 	/* User can set hw_block_size before enable the device */
1915 	if (dev->dev_attrib.hw_block_size == 0)
1916 		dev->dev_attrib.hw_block_size = 512;
1917 	/* Other attributes can be configured in userspace */
1918 	if (!dev->dev_attrib.hw_max_sectors)
1919 		dev->dev_attrib.hw_max_sectors = 128;
1920 	if (!dev->dev_attrib.emulate_write_cache)
1921 		dev->dev_attrib.emulate_write_cache = 0;
1922 	dev->dev_attrib.hw_queue_depth = 128;
1923 
1924 	/* If user didn't explicitly disable netlink reply support, use
1925 	 * module scope setting.
1926 	 */
1927 	if (udev->nl_reply_supported >= 0)
1928 		udev->nl_reply_supported = tcmu_kern_cmd_reply_supported;
1929 
1930 	/*
1931 	 * Get a ref incase userspace does a close on the uio device before
1932 	 * LIO has initiated tcmu_free_device.
1933 	 */
1934 	kref_get(&udev->kref);
1935 
1936 	ret = tcmu_send_dev_add_event(udev);
1937 	if (ret)
1938 		goto err_netlink;
1939 
1940 	mutex_lock(&root_udev_mutex);
1941 	list_add(&udev->node, &root_udev);
1942 	mutex_unlock(&root_udev_mutex);
1943 
1944 	return 0;
1945 
1946 err_netlink:
1947 	kref_put(&udev->kref, tcmu_dev_kref_release);
1948 	uio_unregister_device(&udev->uio_info);
1949 err_register:
1950 	vfree(udev->mb_addr);
1951 	udev->mb_addr = NULL;
1952 err_vzalloc:
1953 	bitmap_free(udev->data_bitmap);
1954 	udev->data_bitmap = NULL;
1955 err_bitmap_alloc:
1956 	kfree(info->name);
1957 	info->name = NULL;
1958 
1959 	return ret;
1960 }
1961 
1962 static void tcmu_free_device(struct se_device *dev)
1963 {
1964 	struct tcmu_dev *udev = TCMU_DEV(dev);
1965 
1966 	/* release ref from init */
1967 	kref_put(&udev->kref, tcmu_dev_kref_release);
1968 }
1969 
1970 static void tcmu_destroy_device(struct se_device *dev)
1971 {
1972 	struct tcmu_dev *udev = TCMU_DEV(dev);
1973 
1974 	del_timer_sync(&udev->cmd_timer);
1975 	del_timer_sync(&udev->qfull_timer);
1976 
1977 	mutex_lock(&root_udev_mutex);
1978 	list_del(&udev->node);
1979 	mutex_unlock(&root_udev_mutex);
1980 
1981 	tcmu_send_dev_remove_event(udev);
1982 
1983 	uio_unregister_device(&udev->uio_info);
1984 
1985 	/* release ref from configure */
1986 	kref_put(&udev->kref, tcmu_dev_kref_release);
1987 }
1988 
1989 static void tcmu_unblock_dev(struct tcmu_dev *udev)
1990 {
1991 	mutex_lock(&udev->cmdr_lock);
1992 	clear_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags);
1993 	mutex_unlock(&udev->cmdr_lock);
1994 }
1995 
1996 static void tcmu_block_dev(struct tcmu_dev *udev)
1997 {
1998 	mutex_lock(&udev->cmdr_lock);
1999 
2000 	if (test_and_set_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags))
2001 		goto unlock;
2002 
2003 	/* complete IO that has executed successfully */
2004 	tcmu_handle_completions(udev);
2005 	/* fail IO waiting to be queued */
2006 	run_qfull_queue(udev, true);
2007 
2008 unlock:
2009 	mutex_unlock(&udev->cmdr_lock);
2010 }
2011 
2012 static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
2013 {
2014 	struct tcmu_mailbox *mb;
2015 	struct tcmu_cmd *cmd;
2016 	int i;
2017 
2018 	mutex_lock(&udev->cmdr_lock);
2019 
2020 	idr_for_each_entry(&udev->commands, cmd, i) {
2021 		pr_debug("removing cmd %u on dev %s from ring (is expired %d)\n",
2022 			  cmd->cmd_id, udev->name,
2023 			  test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags));
2024 
2025 		idr_remove(&udev->commands, i);
2026 		if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
2027 			WARN_ON(!cmd->se_cmd);
2028 			list_del_init(&cmd->queue_entry);
2029 			if (err_level == 1) {
2030 				/*
2031 				 * Userspace was not able to start the
2032 				 * command or it is retryable.
2033 				 */
2034 				target_complete_cmd(cmd->se_cmd, SAM_STAT_BUSY);
2035 			} else {
2036 				/* hard failure */
2037 				target_complete_cmd(cmd->se_cmd,
2038 						    SAM_STAT_CHECK_CONDITION);
2039 			}
2040 		}
2041 		tcmu_cmd_free_data(cmd, cmd->dbi_cnt);
2042 		tcmu_free_cmd(cmd);
2043 	}
2044 
2045 	mb = udev->mb_addr;
2046 	tcmu_flush_dcache_range(mb, sizeof(*mb));
2047 	pr_debug("mb last %u head %u tail %u\n", udev->cmdr_last_cleaned,
2048 		 mb->cmd_tail, mb->cmd_head);
2049 
2050 	udev->cmdr_last_cleaned = 0;
2051 	mb->cmd_tail = 0;
2052 	mb->cmd_head = 0;
2053 	tcmu_flush_dcache_range(mb, sizeof(*mb));
2054 	clear_bit(TCMU_DEV_BIT_BROKEN, &udev->flags);
2055 
2056 	del_timer(&udev->cmd_timer);
2057 
2058 	run_qfull_queue(udev, false);
2059 
2060 	mutex_unlock(&udev->cmdr_lock);
2061 }
2062 
2063 enum {
2064 	Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_hw_max_sectors,
2065 	Opt_nl_reply_supported, Opt_max_data_area_mb, Opt_err,
2066 };
2067 
2068 static match_table_t tokens = {
2069 	{Opt_dev_config, "dev_config=%s"},
2070 	{Opt_dev_size, "dev_size=%s"},
2071 	{Opt_hw_block_size, "hw_block_size=%d"},
2072 	{Opt_hw_max_sectors, "hw_max_sectors=%d"},
2073 	{Opt_nl_reply_supported, "nl_reply_supported=%d"},
2074 	{Opt_max_data_area_mb, "max_data_area_mb=%d"},
2075 	{Opt_err, NULL}
2076 };
2077 
2078 static int tcmu_set_dev_attrib(substring_t *arg, u32 *dev_attrib)
2079 {
2080 	int val, ret;
2081 
2082 	ret = match_int(arg, &val);
2083 	if (ret < 0) {
2084 		pr_err("match_int() failed for dev attrib. Error %d.\n",
2085 		       ret);
2086 		return ret;
2087 	}
2088 
2089 	if (val <= 0) {
2090 		pr_err("Invalid dev attrib value %d. Must be greater than zero.\n",
2091 		       val);
2092 		return -EINVAL;
2093 	}
2094 	*dev_attrib = val;
2095 	return 0;
2096 }
2097 
2098 static int tcmu_set_max_blocks_param(struct tcmu_dev *udev, substring_t *arg)
2099 {
2100 	int val, ret;
2101 
2102 	ret = match_int(arg, &val);
2103 	if (ret < 0) {
2104 		pr_err("match_int() failed for max_data_area_mb=. Error %d.\n",
2105 		       ret);
2106 		return ret;
2107 	}
2108 
2109 	if (val <= 0) {
2110 		pr_err("Invalid max_data_area %d.\n", val);
2111 		return -EINVAL;
2112 	}
2113 
2114 	mutex_lock(&udev->cmdr_lock);
2115 	if (udev->data_bitmap) {
2116 		pr_err("Cannot set max_data_area_mb after it has been enabled.\n");
2117 		ret = -EINVAL;
2118 		goto unlock;
2119 	}
2120 
2121 	udev->max_blocks = TCMU_MBS_TO_BLOCKS(val);
2122 	if (udev->max_blocks > tcmu_global_max_blocks) {
2123 		pr_err("%d is too large. Adjusting max_data_area_mb to global limit of %u\n",
2124 		       val, TCMU_BLOCKS_TO_MBS(tcmu_global_max_blocks));
2125 		udev->max_blocks = tcmu_global_max_blocks;
2126 	}
2127 
2128 unlock:
2129 	mutex_unlock(&udev->cmdr_lock);
2130 	return ret;
2131 }
2132 
2133 static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
2134 		const char *page, ssize_t count)
2135 {
2136 	struct tcmu_dev *udev = TCMU_DEV(dev);
2137 	char *orig, *ptr, *opts;
2138 	substring_t args[MAX_OPT_ARGS];
2139 	int ret = 0, token;
2140 
2141 	opts = kstrdup(page, GFP_KERNEL);
2142 	if (!opts)
2143 		return -ENOMEM;
2144 
2145 	orig = opts;
2146 
2147 	while ((ptr = strsep(&opts, ",\n")) != NULL) {
2148 		if (!*ptr)
2149 			continue;
2150 
2151 		token = match_token(ptr, tokens, args);
2152 		switch (token) {
2153 		case Opt_dev_config:
2154 			if (match_strlcpy(udev->dev_config, &args[0],
2155 					  TCMU_CONFIG_LEN) == 0) {
2156 				ret = -EINVAL;
2157 				break;
2158 			}
2159 			pr_debug("TCMU: Referencing Path: %s\n", udev->dev_config);
2160 			break;
2161 		case Opt_dev_size:
2162 			ret = match_u64(&args[0], &udev->dev_size);
2163 			if (ret < 0)
2164 				pr_err("match_u64() failed for dev_size=. Error %d.\n",
2165 				       ret);
2166 			break;
2167 		case Opt_hw_block_size:
2168 			ret = tcmu_set_dev_attrib(&args[0],
2169 					&(dev->dev_attrib.hw_block_size));
2170 			break;
2171 		case Opt_hw_max_sectors:
2172 			ret = tcmu_set_dev_attrib(&args[0],
2173 					&(dev->dev_attrib.hw_max_sectors));
2174 			break;
2175 		case Opt_nl_reply_supported:
2176 			ret = match_int(&args[0], &udev->nl_reply_supported);
2177 			if (ret < 0)
2178 				pr_err("match_int() failed for nl_reply_supported=. Error %d.\n",
2179 				       ret);
2180 			break;
2181 		case Opt_max_data_area_mb:
2182 			ret = tcmu_set_max_blocks_param(udev, &args[0]);
2183 			break;
2184 		default:
2185 			break;
2186 		}
2187 
2188 		if (ret)
2189 			break;
2190 	}
2191 
2192 	kfree(orig);
2193 	return (!ret) ? count : ret;
2194 }
2195 
2196 static ssize_t tcmu_show_configfs_dev_params(struct se_device *dev, char *b)
2197 {
2198 	struct tcmu_dev *udev = TCMU_DEV(dev);
2199 	ssize_t bl = 0;
2200 
2201 	bl = sprintf(b + bl, "Config: %s ",
2202 		     udev->dev_config[0] ? udev->dev_config : "NULL");
2203 	bl += sprintf(b + bl, "Size: %llu ", udev->dev_size);
2204 	bl += sprintf(b + bl, "MaxDataAreaMB: %u\n",
2205 		      TCMU_BLOCKS_TO_MBS(udev->max_blocks));
2206 
2207 	return bl;
2208 }
2209 
2210 static sector_t tcmu_get_blocks(struct se_device *dev)
2211 {
2212 	struct tcmu_dev *udev = TCMU_DEV(dev);
2213 
2214 	return div_u64(udev->dev_size - dev->dev_attrib.block_size,
2215 		       dev->dev_attrib.block_size);
2216 }
2217 
2218 static sense_reason_t
2219 tcmu_parse_cdb(struct se_cmd *cmd)
2220 {
2221 	return passthrough_parse_cdb(cmd, tcmu_queue_cmd);
2222 }
2223 
2224 static ssize_t tcmu_cmd_time_out_show(struct config_item *item, char *page)
2225 {
2226 	struct se_dev_attrib *da = container_of(to_config_group(item),
2227 					struct se_dev_attrib, da_group);
2228 	struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2229 
2230 	return snprintf(page, PAGE_SIZE, "%lu\n", udev->cmd_time_out / MSEC_PER_SEC);
2231 }
2232 
2233 static ssize_t tcmu_cmd_time_out_store(struct config_item *item, const char *page,
2234 				       size_t count)
2235 {
2236 	struct se_dev_attrib *da = container_of(to_config_group(item),
2237 					struct se_dev_attrib, da_group);
2238 	struct tcmu_dev *udev = container_of(da->da_dev,
2239 					struct tcmu_dev, se_dev);
2240 	u32 val;
2241 	int ret;
2242 
2243 	if (da->da_dev->export_count) {
2244 		pr_err("Unable to set tcmu cmd_time_out while exports exist\n");
2245 		return -EINVAL;
2246 	}
2247 
2248 	ret = kstrtou32(page, 0, &val);
2249 	if (ret < 0)
2250 		return ret;
2251 
2252 	udev->cmd_time_out = val * MSEC_PER_SEC;
2253 	return count;
2254 }
2255 CONFIGFS_ATTR(tcmu_, cmd_time_out);
2256 
2257 static ssize_t tcmu_qfull_time_out_show(struct config_item *item, char *page)
2258 {
2259 	struct se_dev_attrib *da = container_of(to_config_group(item),
2260 						struct se_dev_attrib, da_group);
2261 	struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2262 
2263 	return snprintf(page, PAGE_SIZE, "%ld\n", udev->qfull_time_out <= 0 ?
2264 			udev->qfull_time_out :
2265 			udev->qfull_time_out / MSEC_PER_SEC);
2266 }
2267 
2268 static ssize_t tcmu_qfull_time_out_store(struct config_item *item,
2269 					 const char *page, size_t count)
2270 {
2271 	struct se_dev_attrib *da = container_of(to_config_group(item),
2272 					struct se_dev_attrib, da_group);
2273 	struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2274 	s32 val;
2275 	int ret;
2276 
2277 	ret = kstrtos32(page, 0, &val);
2278 	if (ret < 0)
2279 		return ret;
2280 
2281 	if (val >= 0) {
2282 		udev->qfull_time_out = val * MSEC_PER_SEC;
2283 	} else if (val == -1) {
2284 		udev->qfull_time_out = val;
2285 	} else {
2286 		printk(KERN_ERR "Invalid qfull timeout value %d\n", val);
2287 		return -EINVAL;
2288 	}
2289 	return count;
2290 }
2291 CONFIGFS_ATTR(tcmu_, qfull_time_out);
2292 
2293 static ssize_t tcmu_max_data_area_mb_show(struct config_item *item, char *page)
2294 {
2295 	struct se_dev_attrib *da = container_of(to_config_group(item),
2296 						struct se_dev_attrib, da_group);
2297 	struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2298 
2299 	return snprintf(page, PAGE_SIZE, "%u\n",
2300 			TCMU_BLOCKS_TO_MBS(udev->max_blocks));
2301 }
2302 CONFIGFS_ATTR_RO(tcmu_, max_data_area_mb);
2303 
2304 static ssize_t tcmu_dev_config_show(struct config_item *item, char *page)
2305 {
2306 	struct se_dev_attrib *da = container_of(to_config_group(item),
2307 						struct se_dev_attrib, da_group);
2308 	struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2309 
2310 	return snprintf(page, PAGE_SIZE, "%s\n", udev->dev_config);
2311 }
2312 
2313 static int tcmu_send_dev_config_event(struct tcmu_dev *udev,
2314 				      const char *reconfig_data)
2315 {
2316 	struct sk_buff *skb = NULL;
2317 	void *msg_header = NULL;
2318 	int ret = 0;
2319 
2320 	ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE,
2321 				      &skb, &msg_header);
2322 	if (ret < 0)
2323 		return ret;
2324 	ret = nla_put_string(skb, TCMU_ATTR_DEV_CFG, reconfig_data);
2325 	if (ret < 0) {
2326 		nlmsg_free(skb);
2327 		return ret;
2328 	}
2329 	return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE,
2330 				       skb, msg_header);
2331 }
2332 
2333 
2334 static ssize_t tcmu_dev_config_store(struct config_item *item, const char *page,
2335 				     size_t count)
2336 {
2337 	struct se_dev_attrib *da = container_of(to_config_group(item),
2338 						struct se_dev_attrib, da_group);
2339 	struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2340 	int ret, len;
2341 
2342 	len = strlen(page);
2343 	if (!len || len > TCMU_CONFIG_LEN - 1)
2344 		return -EINVAL;
2345 
2346 	/* Check if device has been configured before */
2347 	if (target_dev_configured(&udev->se_dev)) {
2348 		ret = tcmu_send_dev_config_event(udev, page);
2349 		if (ret) {
2350 			pr_err("Unable to reconfigure device\n");
2351 			return ret;
2352 		}
2353 		strlcpy(udev->dev_config, page, TCMU_CONFIG_LEN);
2354 
2355 		ret = tcmu_update_uio_info(udev);
2356 		if (ret)
2357 			return ret;
2358 		return count;
2359 	}
2360 	strlcpy(udev->dev_config, page, TCMU_CONFIG_LEN);
2361 
2362 	return count;
2363 }
2364 CONFIGFS_ATTR(tcmu_, dev_config);
2365 
2366 static ssize_t tcmu_dev_size_show(struct config_item *item, char *page)
2367 {
2368 	struct se_dev_attrib *da = container_of(to_config_group(item),
2369 						struct se_dev_attrib, da_group);
2370 	struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2371 
2372 	return snprintf(page, PAGE_SIZE, "%llu\n", udev->dev_size);
2373 }
2374 
2375 static int tcmu_send_dev_size_event(struct tcmu_dev *udev, u64 size)
2376 {
2377 	struct sk_buff *skb = NULL;
2378 	void *msg_header = NULL;
2379 	int ret = 0;
2380 
2381 	ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE,
2382 				      &skb, &msg_header);
2383 	if (ret < 0)
2384 		return ret;
2385 	ret = nla_put_u64_64bit(skb, TCMU_ATTR_DEV_SIZE,
2386 				size, TCMU_ATTR_PAD);
2387 	if (ret < 0) {
2388 		nlmsg_free(skb);
2389 		return ret;
2390 	}
2391 	return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE,
2392 				       skb, msg_header);
2393 }
2394 
2395 static ssize_t tcmu_dev_size_store(struct config_item *item, const char *page,
2396 				   size_t count)
2397 {
2398 	struct se_dev_attrib *da = container_of(to_config_group(item),
2399 						struct se_dev_attrib, da_group);
2400 	struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2401 	u64 val;
2402 	int ret;
2403 
2404 	ret = kstrtou64(page, 0, &val);
2405 	if (ret < 0)
2406 		return ret;
2407 
2408 	/* Check if device has been configured before */
2409 	if (target_dev_configured(&udev->se_dev)) {
2410 		ret = tcmu_send_dev_size_event(udev, val);
2411 		if (ret) {
2412 			pr_err("Unable to reconfigure device\n");
2413 			return ret;
2414 		}
2415 	}
2416 	udev->dev_size = val;
2417 	return count;
2418 }
2419 CONFIGFS_ATTR(tcmu_, dev_size);
2420 
2421 static ssize_t tcmu_nl_reply_supported_show(struct config_item *item,
2422 		char *page)
2423 {
2424 	struct se_dev_attrib *da = container_of(to_config_group(item),
2425 						struct se_dev_attrib, da_group);
2426 	struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2427 
2428 	return snprintf(page, PAGE_SIZE, "%d\n", udev->nl_reply_supported);
2429 }
2430 
2431 static ssize_t tcmu_nl_reply_supported_store(struct config_item *item,
2432 		const char *page, size_t count)
2433 {
2434 	struct se_dev_attrib *da = container_of(to_config_group(item),
2435 						struct se_dev_attrib, da_group);
2436 	struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2437 	s8 val;
2438 	int ret;
2439 
2440 	ret = kstrtos8(page, 0, &val);
2441 	if (ret < 0)
2442 		return ret;
2443 
2444 	udev->nl_reply_supported = val;
2445 	return count;
2446 }
2447 CONFIGFS_ATTR(tcmu_, nl_reply_supported);
2448 
2449 static ssize_t tcmu_emulate_write_cache_show(struct config_item *item,
2450 					     char *page)
2451 {
2452 	struct se_dev_attrib *da = container_of(to_config_group(item),
2453 					struct se_dev_attrib, da_group);
2454 
2455 	return snprintf(page, PAGE_SIZE, "%i\n", da->emulate_write_cache);
2456 }
2457 
2458 static int tcmu_send_emulate_write_cache(struct tcmu_dev *udev, u8 val)
2459 {
2460 	struct sk_buff *skb = NULL;
2461 	void *msg_header = NULL;
2462 	int ret = 0;
2463 
2464 	ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE,
2465 				      &skb, &msg_header);
2466 	if (ret < 0)
2467 		return ret;
2468 	ret = nla_put_u8(skb, TCMU_ATTR_WRITECACHE, val);
2469 	if (ret < 0) {
2470 		nlmsg_free(skb);
2471 		return ret;
2472 	}
2473 	return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE,
2474 				       skb, msg_header);
2475 }
2476 
2477 static ssize_t tcmu_emulate_write_cache_store(struct config_item *item,
2478 					      const char *page, size_t count)
2479 {
2480 	struct se_dev_attrib *da = container_of(to_config_group(item),
2481 					struct se_dev_attrib, da_group);
2482 	struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2483 	u8 val;
2484 	int ret;
2485 
2486 	ret = kstrtou8(page, 0, &val);
2487 	if (ret < 0)
2488 		return ret;
2489 
2490 	/* Check if device has been configured before */
2491 	if (target_dev_configured(&udev->se_dev)) {
2492 		ret = tcmu_send_emulate_write_cache(udev, val);
2493 		if (ret) {
2494 			pr_err("Unable to reconfigure device\n");
2495 			return ret;
2496 		}
2497 	}
2498 
2499 	da->emulate_write_cache = val;
2500 	return count;
2501 }
2502 CONFIGFS_ATTR(tcmu_, emulate_write_cache);
2503 
2504 static ssize_t tcmu_block_dev_show(struct config_item *item, char *page)
2505 {
2506 	struct se_device *se_dev = container_of(to_config_group(item),
2507 						struct se_device,
2508 						dev_action_group);
2509 	struct tcmu_dev *udev = TCMU_DEV(se_dev);
2510 
2511 	if (test_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags))
2512 		return snprintf(page, PAGE_SIZE, "%s\n", "blocked");
2513 	else
2514 		return snprintf(page, PAGE_SIZE, "%s\n", "unblocked");
2515 }
2516 
2517 static ssize_t tcmu_block_dev_store(struct config_item *item, const char *page,
2518 				    size_t count)
2519 {
2520 	struct se_device *se_dev = container_of(to_config_group(item),
2521 						struct se_device,
2522 						dev_action_group);
2523 	struct tcmu_dev *udev = TCMU_DEV(se_dev);
2524 	u8 val;
2525 	int ret;
2526 
2527 	if (!target_dev_configured(&udev->se_dev)) {
2528 		pr_err("Device is not configured.\n");
2529 		return -EINVAL;
2530 	}
2531 
2532 	ret = kstrtou8(page, 0, &val);
2533 	if (ret < 0)
2534 		return ret;
2535 
2536 	if (val > 1) {
2537 		pr_err("Invalid block value %d\n", val);
2538 		return -EINVAL;
2539 	}
2540 
2541 	if (!val)
2542 		tcmu_unblock_dev(udev);
2543 	else
2544 		tcmu_block_dev(udev);
2545 	return count;
2546 }
2547 CONFIGFS_ATTR(tcmu_, block_dev);
2548 
2549 static ssize_t tcmu_reset_ring_store(struct config_item *item, const char *page,
2550 				     size_t count)
2551 {
2552 	struct se_device *se_dev = container_of(to_config_group(item),
2553 						struct se_device,
2554 						dev_action_group);
2555 	struct tcmu_dev *udev = TCMU_DEV(se_dev);
2556 	u8 val;
2557 	int ret;
2558 
2559 	if (!target_dev_configured(&udev->se_dev)) {
2560 		pr_err("Device is not configured.\n");
2561 		return -EINVAL;
2562 	}
2563 
2564 	ret = kstrtou8(page, 0, &val);
2565 	if (ret < 0)
2566 		return ret;
2567 
2568 	if (val != 1 && val != 2) {
2569 		pr_err("Invalid reset ring value %d\n", val);
2570 		return -EINVAL;
2571 	}
2572 
2573 	tcmu_reset_ring(udev, val);
2574 	return count;
2575 }
2576 CONFIGFS_ATTR_WO(tcmu_, reset_ring);
2577 
2578 static struct configfs_attribute *tcmu_attrib_attrs[] = {
2579 	&tcmu_attr_cmd_time_out,
2580 	&tcmu_attr_qfull_time_out,
2581 	&tcmu_attr_max_data_area_mb,
2582 	&tcmu_attr_dev_config,
2583 	&tcmu_attr_dev_size,
2584 	&tcmu_attr_emulate_write_cache,
2585 	&tcmu_attr_nl_reply_supported,
2586 	NULL,
2587 };
2588 
2589 static struct configfs_attribute **tcmu_attrs;
2590 
2591 static struct configfs_attribute *tcmu_action_attrs[] = {
2592 	&tcmu_attr_block_dev,
2593 	&tcmu_attr_reset_ring,
2594 	NULL,
2595 };
2596 
2597 static struct target_backend_ops tcmu_ops = {
2598 	.name			= "user",
2599 	.owner			= THIS_MODULE,
2600 	.transport_flags_default = TRANSPORT_FLAG_PASSTHROUGH,
2601 	.transport_flags_changeable = TRANSPORT_FLAG_PASSTHROUGH_PGR |
2602 				      TRANSPORT_FLAG_PASSTHROUGH_ALUA,
2603 	.attach_hba		= tcmu_attach_hba,
2604 	.detach_hba		= tcmu_detach_hba,
2605 	.alloc_device		= tcmu_alloc_device,
2606 	.configure_device	= tcmu_configure_device,
2607 	.destroy_device		= tcmu_destroy_device,
2608 	.free_device		= tcmu_free_device,
2609 	.parse_cdb		= tcmu_parse_cdb,
2610 	.set_configfs_dev_params = tcmu_set_configfs_dev_params,
2611 	.show_configfs_dev_params = tcmu_show_configfs_dev_params,
2612 	.get_device_type	= sbc_get_device_type,
2613 	.get_blocks		= tcmu_get_blocks,
2614 	.tb_dev_action_attrs	= tcmu_action_attrs,
2615 };
2616 
2617 static void find_free_blocks(void)
2618 {
2619 	struct tcmu_dev *udev;
2620 	loff_t off;
2621 	u32 start, end, block, total_freed = 0;
2622 
2623 	if (atomic_read(&global_db_count) <= tcmu_global_max_blocks)
2624 		return;
2625 
2626 	mutex_lock(&root_udev_mutex);
2627 	list_for_each_entry(udev, &root_udev, node) {
2628 		mutex_lock(&udev->cmdr_lock);
2629 
2630 		if (!target_dev_configured(&udev->se_dev)) {
2631 			mutex_unlock(&udev->cmdr_lock);
2632 			continue;
2633 		}
2634 
2635 		/* Try to complete the finished commands first */
2636 		tcmu_handle_completions(udev);
2637 
2638 		/* Skip the udevs in idle */
2639 		if (!udev->dbi_thresh) {
2640 			mutex_unlock(&udev->cmdr_lock);
2641 			continue;
2642 		}
2643 
2644 		end = udev->dbi_max + 1;
2645 		block = find_last_bit(udev->data_bitmap, end);
2646 		if (block == udev->dbi_max) {
2647 			/*
2648 			 * The last bit is dbi_max, so it is not possible
2649 			 * reclaim any blocks.
2650 			 */
2651 			mutex_unlock(&udev->cmdr_lock);
2652 			continue;
2653 		} else if (block == end) {
2654 			/* The current udev will goto idle state */
2655 			udev->dbi_thresh = start = 0;
2656 			udev->dbi_max = 0;
2657 		} else {
2658 			udev->dbi_thresh = start = block + 1;
2659 			udev->dbi_max = block;
2660 		}
2661 
2662 		/* Here will truncate the data area from off */
2663 		off = udev->data_off + start * DATA_BLOCK_SIZE;
2664 		unmap_mapping_range(udev->inode->i_mapping, off, 0, 1);
2665 
2666 		/* Release the block pages */
2667 		tcmu_blocks_release(&udev->data_blocks, start, end);
2668 		mutex_unlock(&udev->cmdr_lock);
2669 
2670 		total_freed += end - start;
2671 		pr_debug("Freed %u blocks (total %u) from %s.\n", end - start,
2672 			 total_freed, udev->name);
2673 	}
2674 	mutex_unlock(&root_udev_mutex);
2675 
2676 	if (atomic_read(&global_db_count) > tcmu_global_max_blocks)
2677 		schedule_delayed_work(&tcmu_unmap_work, msecs_to_jiffies(5000));
2678 }
2679 
2680 static void check_timedout_devices(void)
2681 {
2682 	struct tcmu_dev *udev, *tmp_dev;
2683 	struct tcmu_cmd *cmd, *tmp_cmd;
2684 	LIST_HEAD(devs);
2685 
2686 	spin_lock_bh(&timed_out_udevs_lock);
2687 	list_splice_init(&timed_out_udevs, &devs);
2688 
2689 	list_for_each_entry_safe(udev, tmp_dev, &devs, timedout_entry) {
2690 		list_del_init(&udev->timedout_entry);
2691 		spin_unlock_bh(&timed_out_udevs_lock);
2692 
2693 		mutex_lock(&udev->cmdr_lock);
2694 
2695 		/*
2696 		 * If cmd_time_out is disabled but qfull is set deadline
2697 		 * will only reflect the qfull timeout. Ignore it.
2698 		 */
2699 		if (udev->cmd_time_out) {
2700 			list_for_each_entry_safe(cmd, tmp_cmd,
2701 						 &udev->inflight_queue,
2702 						 queue_entry) {
2703 				tcmu_check_expired_ring_cmd(cmd);
2704 			}
2705 			tcmu_set_next_deadline(&udev->inflight_queue,
2706 					       &udev->cmd_timer);
2707 		}
2708 		list_for_each_entry_safe(cmd, tmp_cmd, &udev->qfull_queue,
2709 					 queue_entry) {
2710 			tcmu_check_expired_queue_cmd(cmd);
2711 		}
2712 		tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
2713 
2714 		mutex_unlock(&udev->cmdr_lock);
2715 
2716 		spin_lock_bh(&timed_out_udevs_lock);
2717 	}
2718 
2719 	spin_unlock_bh(&timed_out_udevs_lock);
2720 }
2721 
2722 static void tcmu_unmap_work_fn(struct work_struct *work)
2723 {
2724 	check_timedout_devices();
2725 	find_free_blocks();
2726 }
2727 
2728 static int __init tcmu_module_init(void)
2729 {
2730 	int ret, i, k, len = 0;
2731 
2732 	BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0);
2733 
2734 	INIT_DELAYED_WORK(&tcmu_unmap_work, tcmu_unmap_work_fn);
2735 
2736 	tcmu_cmd_cache = kmem_cache_create("tcmu_cmd_cache",
2737 				sizeof(struct tcmu_cmd),
2738 				__alignof__(struct tcmu_cmd),
2739 				0, NULL);
2740 	if (!tcmu_cmd_cache)
2741 		return -ENOMEM;
2742 
2743 	tcmu_root_device = root_device_register("tcm_user");
2744 	if (IS_ERR(tcmu_root_device)) {
2745 		ret = PTR_ERR(tcmu_root_device);
2746 		goto out_free_cache;
2747 	}
2748 
2749 	ret = genl_register_family(&tcmu_genl_family);
2750 	if (ret < 0) {
2751 		goto out_unreg_device;
2752 	}
2753 
2754 	for (i = 0; passthrough_attrib_attrs[i] != NULL; i++)
2755 		len += sizeof(struct configfs_attribute *);
2756 	for (i = 0; passthrough_pr_attrib_attrs[i] != NULL; i++)
2757 		len += sizeof(struct configfs_attribute *);
2758 	for (i = 0; tcmu_attrib_attrs[i] != NULL; i++)
2759 		len += sizeof(struct configfs_attribute *);
2760 	len += sizeof(struct configfs_attribute *);
2761 
2762 	tcmu_attrs = kzalloc(len, GFP_KERNEL);
2763 	if (!tcmu_attrs) {
2764 		ret = -ENOMEM;
2765 		goto out_unreg_genl;
2766 	}
2767 
2768 	for (i = 0; passthrough_attrib_attrs[i] != NULL; i++)
2769 		tcmu_attrs[i] = passthrough_attrib_attrs[i];
2770 	for (k = 0; passthrough_pr_attrib_attrs[k] != NULL; k++)
2771 		tcmu_attrs[i++] = passthrough_pr_attrib_attrs[k];
2772 	for (k = 0; tcmu_attrib_attrs[k] != NULL; k++)
2773 		tcmu_attrs[i++] = tcmu_attrib_attrs[k];
2774 	tcmu_ops.tb_dev_attrib_attrs = tcmu_attrs;
2775 
2776 	ret = transport_backend_register(&tcmu_ops);
2777 	if (ret)
2778 		goto out_attrs;
2779 
2780 	return 0;
2781 
2782 out_attrs:
2783 	kfree(tcmu_attrs);
2784 out_unreg_genl:
2785 	genl_unregister_family(&tcmu_genl_family);
2786 out_unreg_device:
2787 	root_device_unregister(tcmu_root_device);
2788 out_free_cache:
2789 	kmem_cache_destroy(tcmu_cmd_cache);
2790 
2791 	return ret;
2792 }
2793 
2794 static void __exit tcmu_module_exit(void)
2795 {
2796 	cancel_delayed_work_sync(&tcmu_unmap_work);
2797 	target_backend_unregister(&tcmu_ops);
2798 	kfree(tcmu_attrs);
2799 	genl_unregister_family(&tcmu_genl_family);
2800 	root_device_unregister(tcmu_root_device);
2801 	kmem_cache_destroy(tcmu_cmd_cache);
2802 }
2803 
2804 MODULE_DESCRIPTION("TCM USER subsystem plugin");
2805 MODULE_AUTHOR("Shaohua Li <shli@kernel.org>");
2806 MODULE_AUTHOR("Andy Grover <agrover@redhat.com>");
2807 MODULE_LICENSE("GPL");
2808 
2809 module_init(tcmu_module_init);
2810 module_exit(tcmu_module_exit);
2811