1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*******************************************************************************
3 * Filename: target_core_configfs.c
4 *
5 * This file contains ConfigFS logic for the Generic Target Engine project.
6 *
7 * (c) Copyright 2008-2013 Datera, Inc.
8 *
9 * Nicholas A. Bellinger <nab@kernel.org>
10 *
11 * based on configfs Copyright (C) 2005 Oracle. All rights reserved.
12 *
13 ****************************************************************************/
14
15 #include <linux/kstrtox.h>
16 #include <linux/module.h>
17 #include <linux/moduleparam.h>
18 #include <generated/utsrelease.h>
19 #include <linux/utsname.h>
20 #include <linux/init.h>
21 #include <linux/fs.h>
22 #include <linux/namei.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/delay.h>
26 #include <linux/unistd.h>
27 #include <linux/string.h>
28 #include <linux/parser.h>
29 #include <linux/syscalls.h>
30 #include <linux/configfs.h>
31 #include <linux/spinlock.h>
32
33 #include <target/target_core_base.h>
34 #include <target/target_core_backend.h>
35 #include <target/target_core_fabric.h>
36
37 #include "target_core_internal.h"
38 #include "target_core_alua.h"
39 #include "target_core_pr.h"
40 #include "target_core_rd.h"
41 #include "target_core_xcopy.h"
42
43 #define TB_CIT_SETUP(_name, _item_ops, _group_ops, _attrs) \
44 static void target_core_setup_##_name##_cit(struct target_backend *tb) \
45 { \
46 struct config_item_type *cit = &tb->tb_##_name##_cit; \
47 \
48 cit->ct_item_ops = _item_ops; \
49 cit->ct_group_ops = _group_ops; \
50 cit->ct_attrs = _attrs; \
51 cit->ct_owner = tb->ops->owner; \
52 pr_debug("Setup generic %s\n", __stringify(_name)); \
53 }
54
55 #define TB_CIT_SETUP_DRV(_name, _item_ops, _group_ops) \
56 static void target_core_setup_##_name##_cit(struct target_backend *tb) \
57 { \
58 struct config_item_type *cit = &tb->tb_##_name##_cit; \
59 \
60 cit->ct_item_ops = _item_ops; \
61 cit->ct_group_ops = _group_ops; \
62 cit->ct_attrs = tb->ops->tb_##_name##_attrs; \
63 cit->ct_owner = tb->ops->owner; \
64 pr_debug("Setup generic %s\n", __stringify(_name)); \
65 }
66
67 extern struct t10_alua_lu_gp *default_lu_gp;
68
69 static LIST_HEAD(g_tf_list);
70 static DEFINE_MUTEX(g_tf_lock);
71
72 static struct config_group target_core_hbagroup;
73 static struct config_group alua_group;
74 static struct config_group alua_lu_gps_group;
75
76 static unsigned int target_devices;
77 static DEFINE_MUTEX(target_devices_lock);
78
79 static inline struct se_hba *
item_to_hba(struct config_item * item)80 item_to_hba(struct config_item *item)
81 {
82 return container_of(to_config_group(item), struct se_hba, hba_group);
83 }
84
85 /*
86 * Attributes for /sys/kernel/config/target/
87 */
target_core_item_version_show(struct config_item * item,char * page)88 static ssize_t target_core_item_version_show(struct config_item *item,
89 char *page)
90 {
91 return sprintf(page, "Target Engine Core ConfigFS Infrastructure %s"
92 " on %s/%s on "UTS_RELEASE"\n", TARGET_CORE_VERSION,
93 utsname()->sysname, utsname()->machine);
94 }
95
96 CONFIGFS_ATTR_RO(target_core_item_, version);
97
98 char db_root[DB_ROOT_LEN] = DB_ROOT_DEFAULT;
99 static char db_root_stage[DB_ROOT_LEN];
100
target_core_item_dbroot_show(struct config_item * item,char * page)101 static ssize_t target_core_item_dbroot_show(struct config_item *item,
102 char *page)
103 {
104 return sprintf(page, "%s\n", db_root);
105 }
106
target_core_item_dbroot_store(struct config_item * item,const char * page,size_t count)107 static ssize_t target_core_item_dbroot_store(struct config_item *item,
108 const char *page, size_t count)
109 {
110 ssize_t read_bytes;
111 ssize_t r = -EINVAL;
112 struct path path = {};
113
114 mutex_lock(&target_devices_lock);
115 if (target_devices) {
116 pr_err("db_root: cannot be changed because it's in use\n");
117 goto unlock;
118 }
119
120 if (count > (DB_ROOT_LEN - 1)) {
121 pr_err("db_root: count %d exceeds DB_ROOT_LEN-1: %u\n",
122 (int)count, DB_ROOT_LEN - 1);
123 goto unlock;
124 }
125
126 read_bytes = scnprintf(db_root_stage, DB_ROOT_LEN, "%s", page);
127 if (!read_bytes)
128 goto unlock;
129
130 if (db_root_stage[read_bytes - 1] == '\n')
131 db_root_stage[read_bytes - 1] = '\0';
132
133 /* validate new db root before accepting it */
134 r = kern_path(db_root_stage, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &path);
135 if (r) {
136 pr_err("db_root: cannot open: %s\n", db_root_stage);
137 if (r == -ENOTDIR)
138 pr_err("db_root: not a directory: %s\n", db_root_stage);
139 goto unlock;
140 }
141 path_put(&path);
142
143 strscpy(db_root, db_root_stage);
144 pr_debug("Target_Core_ConfigFS: db_root set to %s\n", db_root);
145
146 r = read_bytes;
147
148 unlock:
149 mutex_unlock(&target_devices_lock);
150 return r;
151 }
152
153 CONFIGFS_ATTR(target_core_item_, dbroot);
154
target_core_get_fabric(const char * name)155 static struct target_fabric_configfs *target_core_get_fabric(
156 const char *name)
157 {
158 struct target_fabric_configfs *tf;
159
160 if (!name)
161 return NULL;
162
163 mutex_lock(&g_tf_lock);
164 list_for_each_entry(tf, &g_tf_list, tf_list) {
165 const char *cmp_name = tf->tf_ops->fabric_alias;
166 if (!cmp_name)
167 cmp_name = tf->tf_ops->fabric_name;
168 if (!strcmp(cmp_name, name)) {
169 atomic_inc(&tf->tf_access_cnt);
170 mutex_unlock(&g_tf_lock);
171 return tf;
172 }
173 }
174 mutex_unlock(&g_tf_lock);
175
176 return NULL;
177 }
178
179 /*
180 * Called from struct target_core_group_ops->make_group()
181 */
target_core_register_fabric(struct config_group * group,const char * name)182 static struct config_group *target_core_register_fabric(
183 struct config_group *group,
184 const char *name)
185 {
186 struct target_fabric_configfs *tf;
187 int ret;
188
189 pr_debug("Target_Core_ConfigFS: REGISTER -> group: %p name:"
190 " %s\n", group, name);
191
192 tf = target_core_get_fabric(name);
193 if (!tf) {
194 pr_debug("target_core_register_fabric() trying autoload for %s\n",
195 name);
196
197 /*
198 * Below are some hardcoded request_module() calls to automatically
199 * local fabric modules when the following is called:
200 *
201 * mkdir -p /sys/kernel/config/target/$MODULE_NAME
202 *
203 * Note that this does not limit which TCM fabric module can be
204 * registered, but simply provids auto loading logic for modules with
205 * mkdir(2) system calls with known TCM fabric modules.
206 */
207
208 if (!strncmp(name, "iscsi", 5)) {
209 /*
210 * Automatically load the LIO Target fabric module when the
211 * following is called:
212 *
213 * mkdir -p $CONFIGFS/target/iscsi
214 */
215 ret = request_module("iscsi_target_mod");
216 if (ret < 0) {
217 pr_debug("request_module() failed for"
218 " iscsi_target_mod.ko: %d\n", ret);
219 return ERR_PTR(-EINVAL);
220 }
221 } else if (!strncmp(name, "loopback", 8)) {
222 /*
223 * Automatically load the tcm_loop fabric module when the
224 * following is called:
225 *
226 * mkdir -p $CONFIGFS/target/loopback
227 */
228 ret = request_module("tcm_loop");
229 if (ret < 0) {
230 pr_debug("request_module() failed for"
231 " tcm_loop.ko: %d\n", ret);
232 return ERR_PTR(-EINVAL);
233 }
234 }
235
236 tf = target_core_get_fabric(name);
237 }
238
239 if (!tf) {
240 pr_debug("target_core_get_fabric() failed for %s\n",
241 name);
242 return ERR_PTR(-EINVAL);
243 }
244 pr_debug("Target_Core_ConfigFS: REGISTER -> Located fabric:"
245 " %s\n", tf->tf_ops->fabric_name);
246 /*
247 * On a successful target_core_get_fabric() look, the returned
248 * struct target_fabric_configfs *tf will contain a usage reference.
249 */
250 pr_debug("Target_Core_ConfigFS: REGISTER tfc_wwn_cit -> %p\n",
251 &tf->tf_wwn_cit);
252
253 config_group_init_type_name(&tf->tf_group, name, &tf->tf_wwn_cit);
254
255 config_group_init_type_name(&tf->tf_disc_group, "discovery_auth",
256 &tf->tf_discovery_cit);
257 configfs_add_default_group(&tf->tf_disc_group, &tf->tf_group);
258
259 pr_debug("Target_Core_ConfigFS: REGISTER -> Allocated Fabric: %s\n",
260 config_item_name(&tf->tf_group.cg_item));
261 return &tf->tf_group;
262 }
263
264 /*
265 * Called from struct target_core_group_ops->drop_item()
266 */
target_core_deregister_fabric(struct config_group * group,struct config_item * item)267 static void target_core_deregister_fabric(
268 struct config_group *group,
269 struct config_item *item)
270 {
271 struct target_fabric_configfs *tf = container_of(
272 to_config_group(item), struct target_fabric_configfs, tf_group);
273
274 pr_debug("Target_Core_ConfigFS: DEREGISTER -> Looking up %s in"
275 " tf list\n", config_item_name(item));
276
277 pr_debug("Target_Core_ConfigFS: DEREGISTER -> located fabric:"
278 " %s\n", tf->tf_ops->fabric_name);
279 atomic_dec(&tf->tf_access_cnt);
280
281 pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing ci"
282 " %s\n", config_item_name(item));
283
284 configfs_remove_default_groups(&tf->tf_group);
285 config_item_put(item);
286 }
287
288 static const struct configfs_group_operations target_core_fabric_group_ops = {
289 .make_group = &target_core_register_fabric,
290 .drop_item = &target_core_deregister_fabric,
291 };
292
293 /*
294 * All item attributes appearing in /sys/kernel/target/ appear here.
295 */
296 static struct configfs_attribute *target_core_fabric_item_attrs[] = {
297 &target_core_item_attr_version,
298 &target_core_item_attr_dbroot,
299 NULL,
300 };
301
302 /*
303 * Provides Fabrics Groups and Item Attributes for /sys/kernel/config/target/
304 */
305 static const struct config_item_type target_core_fabrics_item = {
306 .ct_group_ops = &target_core_fabric_group_ops,
307 .ct_attrs = target_core_fabric_item_attrs,
308 .ct_owner = THIS_MODULE,
309 };
310
311 static struct configfs_subsystem target_core_fabrics = {
312 .su_group = {
313 .cg_item = {
314 .ci_namebuf = "target",
315 .ci_type = &target_core_fabrics_item,
316 },
317 },
318 };
319
target_depend_item(struct config_item * item)320 int target_depend_item(struct config_item *item)
321 {
322 return configfs_depend_item(&target_core_fabrics, item);
323 }
324 EXPORT_SYMBOL(target_depend_item);
325
target_undepend_item(struct config_item * item)326 void target_undepend_item(struct config_item *item)
327 {
328 return configfs_undepend_item(item);
329 }
330 EXPORT_SYMBOL(target_undepend_item);
331
332 /*##############################################################################
333 // Start functions called by external Target Fabrics Modules
334 //############################################################################*/
target_disable_feature(struct se_portal_group * se_tpg)335 static int target_disable_feature(struct se_portal_group *se_tpg)
336 {
337 return 0;
338 }
339
target_default_get_inst_index(struct se_portal_group * se_tpg)340 static u32 target_default_get_inst_index(struct se_portal_group *se_tpg)
341 {
342 return 1;
343 }
344
target_default_sess_get_index(struct se_session * se_sess)345 static u32 target_default_sess_get_index(struct se_session *se_sess)
346 {
347 return 0;
348 }
349
target_set_default_node_attributes(struct se_node_acl * se_acl)350 static void target_set_default_node_attributes(struct se_node_acl *se_acl)
351 {
352 }
353
target_default_get_cmd_state(struct se_cmd * se_cmd)354 static int target_default_get_cmd_state(struct se_cmd *se_cmd)
355 {
356 return 0;
357 }
358
target_fabric_tf_ops_check(const struct target_core_fabric_ops * tfo)359 static int target_fabric_tf_ops_check(const struct target_core_fabric_ops *tfo)
360 {
361 if (tfo->fabric_alias) {
362 if (strlen(tfo->fabric_alias) >= TARGET_FABRIC_NAME_SIZE) {
363 pr_err("Passed alias: %s exceeds "
364 "TARGET_FABRIC_NAME_SIZE\n", tfo->fabric_alias);
365 return -EINVAL;
366 }
367 }
368 if (!tfo->fabric_name) {
369 pr_err("Missing tfo->fabric_name\n");
370 return -EINVAL;
371 }
372 if (strlen(tfo->fabric_name) >= TARGET_FABRIC_NAME_SIZE) {
373 pr_err("Passed name: %s exceeds "
374 "TARGET_FABRIC_NAME_SIZE\n", tfo->fabric_name);
375 return -EINVAL;
376 }
377 if (!tfo->tpg_get_wwn) {
378 pr_err("Missing tfo->tpg_get_wwn()\n");
379 return -EINVAL;
380 }
381 if (!tfo->tpg_get_tag) {
382 pr_err("Missing tfo->tpg_get_tag()\n");
383 return -EINVAL;
384 }
385 if (!tfo->release_cmd) {
386 pr_err("Missing tfo->release_cmd()\n");
387 return -EINVAL;
388 }
389 if (!tfo->write_pending) {
390 pr_err("Missing tfo->write_pending()\n");
391 return -EINVAL;
392 }
393 if (!tfo->queue_data_in) {
394 pr_err("Missing tfo->queue_data_in()\n");
395 return -EINVAL;
396 }
397 if (!tfo->queue_status) {
398 pr_err("Missing tfo->queue_status()\n");
399 return -EINVAL;
400 }
401 if (!tfo->queue_tm_rsp) {
402 pr_err("Missing tfo->queue_tm_rsp()\n");
403 return -EINVAL;
404 }
405 if (!tfo->aborted_task) {
406 pr_err("Missing tfo->aborted_task()\n");
407 return -EINVAL;
408 }
409 if (!tfo->check_stop_free) {
410 pr_err("Missing tfo->check_stop_free()\n");
411 return -EINVAL;
412 }
413 /*
414 * We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn()
415 * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in
416 * target_core_fabric_configfs.c WWN+TPG group context code.
417 */
418 if (!tfo->fabric_make_wwn) {
419 pr_err("Missing tfo->fabric_make_wwn()\n");
420 return -EINVAL;
421 }
422 if (!tfo->fabric_drop_wwn) {
423 pr_err("Missing tfo->fabric_drop_wwn()\n");
424 return -EINVAL;
425 }
426 if (!tfo->fabric_make_tpg) {
427 pr_err("Missing tfo->fabric_make_tpg()\n");
428 return -EINVAL;
429 }
430 if (!tfo->fabric_drop_tpg) {
431 pr_err("Missing tfo->fabric_drop_tpg()\n");
432 return -EINVAL;
433 }
434
435 return 0;
436 }
437
target_set_default_ops(struct target_core_fabric_ops * tfo)438 static void target_set_default_ops(struct target_core_fabric_ops *tfo)
439 {
440 if (!tfo->tpg_check_demo_mode)
441 tfo->tpg_check_demo_mode = target_disable_feature;
442
443 if (!tfo->tpg_check_demo_mode_cache)
444 tfo->tpg_check_demo_mode_cache = target_disable_feature;
445
446 if (!tfo->tpg_check_demo_mode_write_protect)
447 tfo->tpg_check_demo_mode_write_protect = target_disable_feature;
448
449 if (!tfo->tpg_check_prod_mode_write_protect)
450 tfo->tpg_check_prod_mode_write_protect = target_disable_feature;
451
452 if (!tfo->tpg_get_inst_index)
453 tfo->tpg_get_inst_index = target_default_get_inst_index;
454
455 if (!tfo->sess_get_index)
456 tfo->sess_get_index = target_default_sess_get_index;
457
458 if (!tfo->set_default_node_attributes)
459 tfo->set_default_node_attributes = target_set_default_node_attributes;
460
461 if (!tfo->get_cmd_state)
462 tfo->get_cmd_state = target_default_get_cmd_state;
463 }
464
target_register_template(const struct target_core_fabric_ops * fo)465 int target_register_template(const struct target_core_fabric_ops *fo)
466 {
467 struct target_core_fabric_ops *tfo;
468 struct target_fabric_configfs *tf;
469 int ret;
470
471 ret = target_fabric_tf_ops_check(fo);
472 if (ret)
473 return ret;
474
475 tf = kzalloc_obj(struct target_fabric_configfs);
476 if (!tf) {
477 pr_err("%s: could not allocate memory!\n", __func__);
478 return -ENOMEM;
479 }
480 tfo = kzalloc_obj(struct target_core_fabric_ops);
481 if (!tfo) {
482 kfree(tf);
483 pr_err("%s: could not allocate memory!\n", __func__);
484 return -ENOMEM;
485 }
486 memcpy(tfo, fo, sizeof(*tfo));
487 target_set_default_ops(tfo);
488
489 INIT_LIST_HEAD(&tf->tf_list);
490 atomic_set(&tf->tf_access_cnt, 0);
491 tf->tf_ops = tfo;
492 target_fabric_setup_cits(tf);
493
494 mutex_lock(&g_tf_lock);
495 list_add_tail(&tf->tf_list, &g_tf_list);
496 mutex_unlock(&g_tf_lock);
497
498 return 0;
499 }
500 EXPORT_SYMBOL(target_register_template);
501
target_unregister_template(const struct target_core_fabric_ops * fo)502 void target_unregister_template(const struct target_core_fabric_ops *fo)
503 {
504 struct target_fabric_configfs *t;
505
506 mutex_lock(&g_tf_lock);
507 list_for_each_entry(t, &g_tf_list, tf_list) {
508 if (!strcmp(t->tf_ops->fabric_name, fo->fabric_name)) {
509 BUG_ON(atomic_read(&t->tf_access_cnt));
510 list_del(&t->tf_list);
511 mutex_unlock(&g_tf_lock);
512 /*
513 * Wait for any outstanding fabric se_deve_entry->rcu_head
514 * callbacks to complete post kfree_rcu(), before allowing
515 * fabric driver unload of TFO->module to proceed.
516 */
517 rcu_barrier();
518 kfree(t->tf_tpg_base_cit.ct_attrs);
519 kfree(t->tf_ops);
520 kfree(t);
521 return;
522 }
523 }
524 mutex_unlock(&g_tf_lock);
525 }
526 EXPORT_SYMBOL(target_unregister_template);
527
528 /*##############################################################################
529 // Stop functions called by external Target Fabrics Modules
530 //############################################################################*/
531
to_attrib(struct config_item * item)532 static inline struct se_dev_attrib *to_attrib(struct config_item *item)
533 {
534 return container_of(to_config_group(item), struct se_dev_attrib,
535 da_group);
536 }
537
538 /* Start functions for struct config_item_type tb_dev_attrib_cit */
539 #define DEF_CONFIGFS_ATTRIB_SHOW(_name) \
540 static ssize_t _name##_show(struct config_item *item, char *page) \
541 { \
542 return snprintf(page, PAGE_SIZE, "%u\n", to_attrib(item)->_name); \
543 }
544
545 DEF_CONFIGFS_ATTRIB_SHOW(emulate_model_alias);
546 DEF_CONFIGFS_ATTRIB_SHOW(emulate_dpo);
547 DEF_CONFIGFS_ATTRIB_SHOW(emulate_fua_write);
548 DEF_CONFIGFS_ATTRIB_SHOW(emulate_fua_read);
549 DEF_CONFIGFS_ATTRIB_SHOW(emulate_write_cache);
550 DEF_CONFIGFS_ATTRIB_SHOW(emulate_ua_intlck_ctrl);
551 DEF_CONFIGFS_ATTRIB_SHOW(emulate_tas);
552 DEF_CONFIGFS_ATTRIB_SHOW(emulate_tpu);
553 DEF_CONFIGFS_ATTRIB_SHOW(emulate_tpws);
554 DEF_CONFIGFS_ATTRIB_SHOW(emulate_caw);
555 DEF_CONFIGFS_ATTRIB_SHOW(emulate_3pc);
556 DEF_CONFIGFS_ATTRIB_SHOW(emulate_pr);
557 DEF_CONFIGFS_ATTRIB_SHOW(pi_prot_type);
558 DEF_CONFIGFS_ATTRIB_SHOW(hw_pi_prot_type);
559 DEF_CONFIGFS_ATTRIB_SHOW(pi_prot_verify);
560 DEF_CONFIGFS_ATTRIB_SHOW(enforce_pr_isids);
561 DEF_CONFIGFS_ATTRIB_SHOW(is_nonrot);
562 DEF_CONFIGFS_ATTRIB_SHOW(emulate_rest_reord);
563 DEF_CONFIGFS_ATTRIB_SHOW(force_pr_aptpl);
564 DEF_CONFIGFS_ATTRIB_SHOW(hw_block_size);
565 DEF_CONFIGFS_ATTRIB_SHOW(block_size);
566 DEF_CONFIGFS_ATTRIB_SHOW(hw_max_sectors);
567 DEF_CONFIGFS_ATTRIB_SHOW(optimal_sectors);
568 DEF_CONFIGFS_ATTRIB_SHOW(hw_queue_depth);
569 DEF_CONFIGFS_ATTRIB_SHOW(queue_depth);
570 DEF_CONFIGFS_ATTRIB_SHOW(max_unmap_lba_count);
571 DEF_CONFIGFS_ATTRIB_SHOW(max_unmap_block_desc_count);
572 DEF_CONFIGFS_ATTRIB_SHOW(unmap_granularity);
573 DEF_CONFIGFS_ATTRIB_SHOW(unmap_granularity_alignment);
574 DEF_CONFIGFS_ATTRIB_SHOW(unmap_zeroes_data);
575 DEF_CONFIGFS_ATTRIB_SHOW(max_write_same_len);
576 DEF_CONFIGFS_ATTRIB_SHOW(emulate_rsoc);
577 DEF_CONFIGFS_ATTRIB_SHOW(submit_type);
578 DEF_CONFIGFS_ATTRIB_SHOW(complete_type);
579 DEF_CONFIGFS_ATTRIB_SHOW(atomic_max_len);
580 DEF_CONFIGFS_ATTRIB_SHOW(atomic_alignment);
581 DEF_CONFIGFS_ATTRIB_SHOW(atomic_granularity);
582 DEF_CONFIGFS_ATTRIB_SHOW(atomic_max_with_boundary);
583 DEF_CONFIGFS_ATTRIB_SHOW(atomic_max_boundary);
584
585 #define DEF_CONFIGFS_ATTRIB_STORE_U32(_name) \
586 static ssize_t _name##_store(struct config_item *item, const char *page,\
587 size_t count) \
588 { \
589 struct se_dev_attrib *da = to_attrib(item); \
590 u32 val; \
591 int ret; \
592 \
593 ret = kstrtou32(page, 0, &val); \
594 if (ret < 0) \
595 return ret; \
596 da->_name = val; \
597 return count; \
598 }
599
600 DEF_CONFIGFS_ATTRIB_STORE_U32(max_unmap_lba_count);
601 DEF_CONFIGFS_ATTRIB_STORE_U32(max_unmap_block_desc_count);
602 DEF_CONFIGFS_ATTRIB_STORE_U32(unmap_granularity);
603 DEF_CONFIGFS_ATTRIB_STORE_U32(unmap_granularity_alignment);
604 DEF_CONFIGFS_ATTRIB_STORE_U32(max_write_same_len);
605
606 #define DEF_CONFIGFS_ATTRIB_STORE_BOOL(_name) \
607 static ssize_t _name##_store(struct config_item *item, const char *page, \
608 size_t count) \
609 { \
610 struct se_dev_attrib *da = to_attrib(item); \
611 bool flag; \
612 int ret; \
613 \
614 ret = kstrtobool(page, &flag); \
615 if (ret < 0) \
616 return ret; \
617 da->_name = flag; \
618 return count; \
619 }
620
621 DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_fua_write);
622 DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_caw);
623 DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_3pc);
624 DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_pr);
625 DEF_CONFIGFS_ATTRIB_STORE_BOOL(enforce_pr_isids);
626 DEF_CONFIGFS_ATTRIB_STORE_BOOL(is_nonrot);
627
628 #define DEF_CONFIGFS_ATTRIB_STORE_STUB(_name) \
629 static ssize_t _name##_store(struct config_item *item, const char *page,\
630 size_t count) \
631 { \
632 printk_once(KERN_WARNING \
633 "ignoring deprecated %s attribute\n", \
634 __stringify(_name)); \
635 return count; \
636 }
637
638 DEF_CONFIGFS_ATTRIB_STORE_STUB(emulate_dpo);
639 DEF_CONFIGFS_ATTRIB_STORE_STUB(emulate_fua_read);
640
dev_set_t10_wwn_model_alias(struct se_device * dev)641 static void dev_set_t10_wwn_model_alias(struct se_device *dev)
642 {
643 const char *configname;
644
645 configname = config_item_name(&dev->dev_group.cg_item);
646 if (strlen(configname) >= INQUIRY_MODEL_LEN) {
647 pr_warn("dev[%p]: Backstore name '%s' is too long for "
648 "INQUIRY_MODEL, truncating to 15 characters\n", dev,
649 configname);
650 }
651 /*
652 * XXX We can't use sizeof(dev->t10_wwn.model) (INQUIRY_MODEL_LEN + 1)
653 * here without potentially breaking existing setups, so continue to
654 * truncate one byte shorter than what can be carried in INQUIRY.
655 */
656 strscpy(dev->t10_wwn.model, configname, INQUIRY_MODEL_LEN);
657 }
658
emulate_model_alias_store(struct config_item * item,const char * page,size_t count)659 static ssize_t emulate_model_alias_store(struct config_item *item,
660 const char *page, size_t count)
661 {
662 struct se_dev_attrib *da = to_attrib(item);
663 struct se_device *dev = da->da_dev;
664 bool flag;
665 int ret;
666
667 if (dev->export_count) {
668 pr_err("dev[%p]: Unable to change model alias"
669 " while export_count is %d\n",
670 dev, dev->export_count);
671 return -EINVAL;
672 }
673
674 ret = kstrtobool(page, &flag);
675 if (ret < 0)
676 return ret;
677
678 BUILD_BUG_ON(sizeof(dev->t10_wwn.model) != INQUIRY_MODEL_LEN + 1);
679 if (flag)
680 dev_set_t10_wwn_model_alias(dev);
681 else
682 strscpy(dev->t10_wwn.model, dev->transport->inquiry_prod);
683 da->emulate_model_alias = flag;
684 return count;
685 }
686
emulate_write_cache_store(struct config_item * item,const char * page,size_t count)687 static ssize_t emulate_write_cache_store(struct config_item *item,
688 const char *page, size_t count)
689 {
690 struct se_dev_attrib *da = to_attrib(item);
691 bool flag;
692 int ret;
693
694 ret = kstrtobool(page, &flag);
695 if (ret < 0)
696 return ret;
697
698 if (flag && da->da_dev->transport->get_write_cache) {
699 pr_err("emulate_write_cache not supported for this device\n");
700 return -EINVAL;
701 }
702
703 da->emulate_write_cache = flag;
704 pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
705 da->da_dev, flag);
706 return count;
707 }
708
emulate_ua_intlck_ctrl_store(struct config_item * item,const char * page,size_t count)709 static ssize_t emulate_ua_intlck_ctrl_store(struct config_item *item,
710 const char *page, size_t count)
711 {
712 struct se_dev_attrib *da = to_attrib(item);
713 u32 val;
714 int ret;
715
716 ret = kstrtou32(page, 0, &val);
717 if (ret < 0)
718 return ret;
719
720 if (val != TARGET_UA_INTLCK_CTRL_CLEAR
721 && val != TARGET_UA_INTLCK_CTRL_NO_CLEAR
722 && val != TARGET_UA_INTLCK_CTRL_ESTABLISH_UA) {
723 pr_err("Illegal value %d\n", val);
724 return -EINVAL;
725 }
726
727 if (da->da_dev->export_count) {
728 pr_err("dev[%p]: Unable to change SE Device"
729 " UA_INTRLCK_CTRL while export_count is %d\n",
730 da->da_dev, da->da_dev->export_count);
731 return -EINVAL;
732 }
733 da->emulate_ua_intlck_ctrl = val;
734 pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
735 da->da_dev, val);
736 return count;
737 }
738
emulate_tas_store(struct config_item * item,const char * page,size_t count)739 static ssize_t emulate_tas_store(struct config_item *item,
740 const char *page, size_t count)
741 {
742 struct se_dev_attrib *da = to_attrib(item);
743 bool flag;
744 int ret;
745
746 ret = kstrtobool(page, &flag);
747 if (ret < 0)
748 return ret;
749
750 if (da->da_dev->export_count) {
751 pr_err("dev[%p]: Unable to change SE Device TAS while"
752 " export_count is %d\n",
753 da->da_dev, da->da_dev->export_count);
754 return -EINVAL;
755 }
756 da->emulate_tas = flag;
757 pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
758 da->da_dev, flag ? "Enabled" : "Disabled");
759
760 return count;
761 }
762
target_try_configure_unmap(struct se_device * dev,const char * config_opt)763 static int target_try_configure_unmap(struct se_device *dev,
764 const char *config_opt)
765 {
766 if (!dev->transport->configure_unmap) {
767 pr_err("Generic Block Discard not supported\n");
768 return -ENOSYS;
769 }
770
771 if (!target_dev_configured(dev)) {
772 pr_err("Generic Block Discard setup for %s requires device to be configured\n",
773 config_opt);
774 return -ENODEV;
775 }
776
777 if (!dev->transport->configure_unmap(dev)) {
778 pr_err("Generic Block Discard setup for %s failed\n",
779 config_opt);
780 return -ENOSYS;
781 }
782
783 return 0;
784 }
785
emulate_tpu_store(struct config_item * item,const char * page,size_t count)786 static ssize_t emulate_tpu_store(struct config_item *item,
787 const char *page, size_t count)
788 {
789 struct se_dev_attrib *da = to_attrib(item);
790 struct se_device *dev = da->da_dev;
791 bool flag;
792 int ret;
793
794 ret = kstrtobool(page, &flag);
795 if (ret < 0)
796 return ret;
797
798 /*
799 * We expect this value to be non-zero when generic Block Layer
800 * Discard supported is detected iblock_create_virtdevice().
801 */
802 if (flag && !da->max_unmap_block_desc_count) {
803 ret = target_try_configure_unmap(dev, "emulate_tpu");
804 if (ret)
805 return ret;
806 }
807
808 da->emulate_tpu = flag;
809 pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
810 da->da_dev, flag);
811 return count;
812 }
813
emulate_tpws_store(struct config_item * item,const char * page,size_t count)814 static ssize_t emulate_tpws_store(struct config_item *item,
815 const char *page, size_t count)
816 {
817 struct se_dev_attrib *da = to_attrib(item);
818 struct se_device *dev = da->da_dev;
819 bool flag;
820 int ret;
821
822 ret = kstrtobool(page, &flag);
823 if (ret < 0)
824 return ret;
825
826 /*
827 * We expect this value to be non-zero when generic Block Layer
828 * Discard supported is detected iblock_create_virtdevice().
829 */
830 if (flag && !da->max_unmap_block_desc_count) {
831 ret = target_try_configure_unmap(dev, "emulate_tpws");
832 if (ret)
833 return ret;
834 }
835
836 da->emulate_tpws = flag;
837 pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
838 da->da_dev, flag);
839 return count;
840 }
841
pi_prot_type_store(struct config_item * item,const char * page,size_t count)842 static ssize_t pi_prot_type_store(struct config_item *item,
843 const char *page, size_t count)
844 {
845 struct se_dev_attrib *da = to_attrib(item);
846 int old_prot = da->pi_prot_type, ret;
847 struct se_device *dev = da->da_dev;
848 u32 flag;
849
850 ret = kstrtou32(page, 0, &flag);
851 if (ret < 0)
852 return ret;
853
854 if (flag != 0 && flag != 1 && flag != 2 && flag != 3) {
855 pr_err("Illegal value %d for pi_prot_type\n", flag);
856 return -EINVAL;
857 }
858 if (flag == 2) {
859 pr_err("DIF TYPE2 protection currently not supported\n");
860 return -ENOSYS;
861 }
862 if (da->hw_pi_prot_type) {
863 pr_warn("DIF protection enabled on underlying hardware,"
864 " ignoring\n");
865 return count;
866 }
867 if (!dev->transport->init_prot || !dev->transport->free_prot) {
868 /* 0 is only allowed value for non-supporting backends */
869 if (flag == 0)
870 return count;
871
872 pr_err("DIF protection not supported by backend: %s\n",
873 dev->transport->name);
874 return -ENOSYS;
875 }
876 if (!target_dev_configured(dev)) {
877 pr_err("DIF protection requires device to be configured\n");
878 return -ENODEV;
879 }
880 if (dev->export_count) {
881 pr_err("dev[%p]: Unable to change SE Device PROT type while"
882 " export_count is %d\n", dev, dev->export_count);
883 return -EINVAL;
884 }
885
886 da->pi_prot_type = flag;
887
888 if (flag && !old_prot) {
889 ret = dev->transport->init_prot(dev);
890 if (ret) {
891 da->pi_prot_type = old_prot;
892 da->pi_prot_verify = (bool) da->pi_prot_type;
893 return ret;
894 }
895
896 } else if (!flag && old_prot) {
897 dev->transport->free_prot(dev);
898 }
899
900 da->pi_prot_verify = (bool) da->pi_prot_type;
901 pr_debug("dev[%p]: SE Device Protection Type: %d\n", dev, flag);
902 return count;
903 }
904
905 /* always zero, but attr needs to remain RW to avoid userspace breakage */
pi_prot_format_show(struct config_item * item,char * page)906 static ssize_t pi_prot_format_show(struct config_item *item, char *page)
907 {
908 return snprintf(page, PAGE_SIZE, "0\n");
909 }
910
pi_prot_format_store(struct config_item * item,const char * page,size_t count)911 static ssize_t pi_prot_format_store(struct config_item *item,
912 const char *page, size_t count)
913 {
914 struct se_dev_attrib *da = to_attrib(item);
915 struct se_device *dev = da->da_dev;
916 bool flag;
917 int ret;
918
919 ret = kstrtobool(page, &flag);
920 if (ret < 0)
921 return ret;
922
923 if (!flag)
924 return count;
925
926 if (!dev->transport->format_prot) {
927 pr_err("DIF protection format not supported by backend %s\n",
928 dev->transport->name);
929 return -ENOSYS;
930 }
931 if (!target_dev_configured(dev)) {
932 pr_err("DIF protection format requires device to be configured\n");
933 return -ENODEV;
934 }
935 if (dev->export_count) {
936 pr_err("dev[%p]: Unable to format SE Device PROT type while"
937 " export_count is %d\n", dev, dev->export_count);
938 return -EINVAL;
939 }
940
941 ret = dev->transport->format_prot(dev);
942 if (ret)
943 return ret;
944
945 pr_debug("dev[%p]: SE Device Protection Format complete\n", dev);
946 return count;
947 }
948
pi_prot_verify_store(struct config_item * item,const char * page,size_t count)949 static ssize_t pi_prot_verify_store(struct config_item *item,
950 const char *page, size_t count)
951 {
952 struct se_dev_attrib *da = to_attrib(item);
953 bool flag;
954 int ret;
955
956 ret = kstrtobool(page, &flag);
957 if (ret < 0)
958 return ret;
959
960 if (!flag) {
961 da->pi_prot_verify = flag;
962 return count;
963 }
964 if (da->hw_pi_prot_type) {
965 pr_warn("DIF protection enabled on underlying hardware,"
966 " ignoring\n");
967 return count;
968 }
969 if (!da->pi_prot_type) {
970 pr_warn("DIF protection not supported by backend, ignoring\n");
971 return count;
972 }
973 da->pi_prot_verify = flag;
974
975 return count;
976 }
977
force_pr_aptpl_store(struct config_item * item,const char * page,size_t count)978 static ssize_t force_pr_aptpl_store(struct config_item *item,
979 const char *page, size_t count)
980 {
981 struct se_dev_attrib *da = to_attrib(item);
982 bool flag;
983 int ret;
984
985 ret = kstrtobool(page, &flag);
986 if (ret < 0)
987 return ret;
988 if (da->da_dev->export_count) {
989 pr_err("dev[%p]: Unable to set force_pr_aptpl while"
990 " export_count is %d\n",
991 da->da_dev, da->da_dev->export_count);
992 return -EINVAL;
993 }
994
995 da->force_pr_aptpl = flag;
996 pr_debug("dev[%p]: SE Device force_pr_aptpl: %d\n", da->da_dev, flag);
997 return count;
998 }
999
emulate_rest_reord_store(struct config_item * item,const char * page,size_t count)1000 static ssize_t emulate_rest_reord_store(struct config_item *item,
1001 const char *page, size_t count)
1002 {
1003 struct se_dev_attrib *da = to_attrib(item);
1004 bool flag;
1005 int ret;
1006
1007 ret = kstrtobool(page, &flag);
1008 if (ret < 0)
1009 return ret;
1010
1011 if (flag != 0) {
1012 printk(KERN_ERR "dev[%p]: SE Device emulation of restricted"
1013 " reordering not implemented\n", da->da_dev);
1014 return -ENOSYS;
1015 }
1016 da->emulate_rest_reord = flag;
1017 pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n",
1018 da->da_dev, flag);
1019 return count;
1020 }
1021
unmap_zeroes_data_store(struct config_item * item,const char * page,size_t count)1022 static ssize_t unmap_zeroes_data_store(struct config_item *item,
1023 const char *page, size_t count)
1024 {
1025 struct se_dev_attrib *da = to_attrib(item);
1026 struct se_device *dev = da->da_dev;
1027 bool flag;
1028 int ret;
1029
1030 ret = kstrtobool(page, &flag);
1031 if (ret < 0)
1032 return ret;
1033
1034 if (da->da_dev->export_count) {
1035 pr_err("dev[%p]: Unable to change SE Device"
1036 " unmap_zeroes_data while export_count is %d\n",
1037 da->da_dev, da->da_dev->export_count);
1038 return -EINVAL;
1039 }
1040 /*
1041 * We expect this value to be non-zero when generic Block Layer
1042 * Discard supported is detected iblock_configure_device().
1043 */
1044 if (flag && !da->max_unmap_block_desc_count) {
1045 ret = target_try_configure_unmap(dev, "unmap_zeroes_data");
1046 if (ret)
1047 return ret;
1048 }
1049 da->unmap_zeroes_data = flag;
1050 pr_debug("dev[%p]: SE Device Thin Provisioning LBPRZ bit: %d\n",
1051 da->da_dev, flag);
1052 return count;
1053 }
1054
1055 /*
1056 * Note, this can only be called on unexported SE Device Object.
1057 */
queue_depth_store(struct config_item * item,const char * page,size_t count)1058 static ssize_t queue_depth_store(struct config_item *item,
1059 const char *page, size_t count)
1060 {
1061 struct se_dev_attrib *da = to_attrib(item);
1062 struct se_device *dev = da->da_dev;
1063 u32 val;
1064 int ret;
1065
1066 ret = kstrtou32(page, 0, &val);
1067 if (ret < 0)
1068 return ret;
1069
1070 if (dev->export_count) {
1071 pr_err("dev[%p]: Unable to change SE Device TCQ while"
1072 " export_count is %d\n",
1073 dev, dev->export_count);
1074 return -EINVAL;
1075 }
1076 if (!val) {
1077 pr_err("dev[%p]: Illegal ZERO value for queue_depth\n", dev);
1078 return -EINVAL;
1079 }
1080
1081 if (val > dev->dev_attrib.queue_depth) {
1082 if (val > dev->dev_attrib.hw_queue_depth) {
1083 pr_err("dev[%p]: Passed queue_depth:"
1084 " %u exceeds TCM/SE_Device MAX"
1085 " TCQ: %u\n", dev, val,
1086 dev->dev_attrib.hw_queue_depth);
1087 return -EINVAL;
1088 }
1089 }
1090 da->queue_depth = dev->queue_depth = val;
1091 pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n", dev, val);
1092 return count;
1093 }
1094
optimal_sectors_store(struct config_item * item,const char * page,size_t count)1095 static ssize_t optimal_sectors_store(struct config_item *item,
1096 const char *page, size_t count)
1097 {
1098 struct se_dev_attrib *da = to_attrib(item);
1099 u32 val;
1100 int ret;
1101
1102 ret = kstrtou32(page, 0, &val);
1103 if (ret < 0)
1104 return ret;
1105
1106 if (da->da_dev->export_count) {
1107 pr_err("dev[%p]: Unable to change SE Device"
1108 " optimal_sectors while export_count is %d\n",
1109 da->da_dev, da->da_dev->export_count);
1110 return -EINVAL;
1111 }
1112 if (val > da->hw_max_sectors) {
1113 pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
1114 " greater than hw_max_sectors: %u\n",
1115 da->da_dev, val, da->hw_max_sectors);
1116 return -EINVAL;
1117 }
1118
1119 da->optimal_sectors = val;
1120 pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n",
1121 da->da_dev, val);
1122 return count;
1123 }
1124
block_size_store(struct config_item * item,const char * page,size_t count)1125 static ssize_t block_size_store(struct config_item *item,
1126 const char *page, size_t count)
1127 {
1128 struct se_dev_attrib *da = to_attrib(item);
1129 u32 val;
1130 int ret;
1131
1132 ret = kstrtou32(page, 0, &val);
1133 if (ret < 0)
1134 return ret;
1135
1136 if (da->da_dev->export_count) {
1137 pr_err("dev[%p]: Unable to change SE Device block_size"
1138 " while export_count is %d\n",
1139 da->da_dev, da->da_dev->export_count);
1140 return -EINVAL;
1141 }
1142
1143 if (val != 512 && val != 1024 && val != 2048 && val != 4096) {
1144 pr_err("dev[%p]: Illegal value for block_device: %u"
1145 " for SE device, must be 512, 1024, 2048 or 4096\n",
1146 da->da_dev, val);
1147 return -EINVAL;
1148 }
1149
1150 da->block_size = val;
1151
1152 pr_debug("dev[%p]: SE Device block_size changed to %u\n",
1153 da->da_dev, val);
1154 return count;
1155 }
1156
alua_support_show(struct config_item * item,char * page)1157 static ssize_t alua_support_show(struct config_item *item, char *page)
1158 {
1159 struct se_dev_attrib *da = to_attrib(item);
1160 u8 flags = da->da_dev->transport_flags;
1161
1162 return snprintf(page, PAGE_SIZE, "%d\n",
1163 flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA ? 0 : 1);
1164 }
1165
alua_support_store(struct config_item * item,const char * page,size_t count)1166 static ssize_t alua_support_store(struct config_item *item,
1167 const char *page, size_t count)
1168 {
1169 struct se_dev_attrib *da = to_attrib(item);
1170 struct se_device *dev = da->da_dev;
1171 bool flag, oldflag;
1172 int ret;
1173
1174 ret = kstrtobool(page, &flag);
1175 if (ret < 0)
1176 return ret;
1177
1178 oldflag = !(dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA);
1179 if (flag == oldflag)
1180 return count;
1181
1182 if (!(dev->transport->transport_flags_changeable &
1183 TRANSPORT_FLAG_PASSTHROUGH_ALUA)) {
1184 pr_err("dev[%p]: Unable to change SE Device alua_support:"
1185 " alua_support has fixed value\n", dev);
1186 return -ENOSYS;
1187 }
1188
1189 if (flag)
1190 dev->transport_flags &= ~TRANSPORT_FLAG_PASSTHROUGH_ALUA;
1191 else
1192 dev->transport_flags |= TRANSPORT_FLAG_PASSTHROUGH_ALUA;
1193 return count;
1194 }
1195
pgr_support_show(struct config_item * item,char * page)1196 static ssize_t pgr_support_show(struct config_item *item, char *page)
1197 {
1198 struct se_dev_attrib *da = to_attrib(item);
1199 u8 flags = da->da_dev->transport_flags;
1200
1201 return snprintf(page, PAGE_SIZE, "%d\n",
1202 flags & TRANSPORT_FLAG_PASSTHROUGH_PGR ? 0 : 1);
1203 }
1204
pgr_support_store(struct config_item * item,const char * page,size_t count)1205 static ssize_t pgr_support_store(struct config_item *item,
1206 const char *page, size_t count)
1207 {
1208 struct se_dev_attrib *da = to_attrib(item);
1209 struct se_device *dev = da->da_dev;
1210 bool flag, oldflag;
1211 int ret;
1212
1213 ret = kstrtobool(page, &flag);
1214 if (ret < 0)
1215 return ret;
1216
1217 oldflag = !(dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR);
1218 if (flag == oldflag)
1219 return count;
1220
1221 if (!(dev->transport->transport_flags_changeable &
1222 TRANSPORT_FLAG_PASSTHROUGH_PGR)) {
1223 pr_err("dev[%p]: Unable to change SE Device pgr_support:"
1224 " pgr_support has fixed value\n", dev);
1225 return -ENOSYS;
1226 }
1227
1228 if (flag)
1229 dev->transport_flags &= ~TRANSPORT_FLAG_PASSTHROUGH_PGR;
1230 else
1231 dev->transport_flags |= TRANSPORT_FLAG_PASSTHROUGH_PGR;
1232 return count;
1233 }
1234
emulate_rsoc_store(struct config_item * item,const char * page,size_t count)1235 static ssize_t emulate_rsoc_store(struct config_item *item,
1236 const char *page, size_t count)
1237 {
1238 struct se_dev_attrib *da = to_attrib(item);
1239 bool flag;
1240 int ret;
1241
1242 ret = kstrtobool(page, &flag);
1243 if (ret < 0)
1244 return ret;
1245
1246 da->emulate_rsoc = flag;
1247 pr_debug("dev[%p]: SE Device REPORT_SUPPORTED_OPERATION_CODES_EMULATION flag: %d\n",
1248 da->da_dev, flag);
1249 return count;
1250 }
1251
submit_type_store(struct config_item * item,const char * page,size_t count)1252 static ssize_t submit_type_store(struct config_item *item, const char *page,
1253 size_t count)
1254 {
1255 struct se_dev_attrib *da = to_attrib(item);
1256 int ret;
1257 u8 val;
1258
1259 ret = kstrtou8(page, 0, &val);
1260 if (ret < 0)
1261 return ret;
1262
1263 if (val > TARGET_QUEUE_SUBMIT)
1264 return -EINVAL;
1265
1266 da->submit_type = val;
1267 return count;
1268 }
1269
complete_type_store(struct config_item * item,const char * page,size_t count)1270 static ssize_t complete_type_store(struct config_item *item, const char *page,
1271 size_t count)
1272 {
1273 struct se_dev_attrib *da = to_attrib(item);
1274 int ret;
1275 u8 val;
1276
1277 ret = kstrtou8(page, 0, &val);
1278 if (ret < 0)
1279 return ret;
1280
1281 if (val > TARGET_QUEUE_COMPL)
1282 return -EINVAL;
1283
1284 da->complete_type = val;
1285 return count;
1286 }
1287
1288 CONFIGFS_ATTR(, emulate_model_alias);
1289 CONFIGFS_ATTR(, emulate_dpo);
1290 CONFIGFS_ATTR(, emulate_fua_write);
1291 CONFIGFS_ATTR(, emulate_fua_read);
1292 CONFIGFS_ATTR(, emulate_write_cache);
1293 CONFIGFS_ATTR(, emulate_ua_intlck_ctrl);
1294 CONFIGFS_ATTR(, emulate_tas);
1295 CONFIGFS_ATTR(, emulate_tpu);
1296 CONFIGFS_ATTR(, emulate_tpws);
1297 CONFIGFS_ATTR(, emulate_caw);
1298 CONFIGFS_ATTR(, emulate_3pc);
1299 CONFIGFS_ATTR(, emulate_pr);
1300 CONFIGFS_ATTR(, emulate_rsoc);
1301 CONFIGFS_ATTR(, pi_prot_type);
1302 CONFIGFS_ATTR_RO(, hw_pi_prot_type);
1303 CONFIGFS_ATTR(, pi_prot_format);
1304 CONFIGFS_ATTR(, pi_prot_verify);
1305 CONFIGFS_ATTR(, enforce_pr_isids);
1306 CONFIGFS_ATTR(, is_nonrot);
1307 CONFIGFS_ATTR(, emulate_rest_reord);
1308 CONFIGFS_ATTR(, force_pr_aptpl);
1309 CONFIGFS_ATTR_RO(, hw_block_size);
1310 CONFIGFS_ATTR(, block_size);
1311 CONFIGFS_ATTR_RO(, hw_max_sectors);
1312 CONFIGFS_ATTR(, optimal_sectors);
1313 CONFIGFS_ATTR_RO(, hw_queue_depth);
1314 CONFIGFS_ATTR(, queue_depth);
1315 CONFIGFS_ATTR(, max_unmap_lba_count);
1316 CONFIGFS_ATTR(, max_unmap_block_desc_count);
1317 CONFIGFS_ATTR(, unmap_granularity);
1318 CONFIGFS_ATTR(, unmap_granularity_alignment);
1319 CONFIGFS_ATTR(, unmap_zeroes_data);
1320 CONFIGFS_ATTR(, max_write_same_len);
1321 CONFIGFS_ATTR(, alua_support);
1322 CONFIGFS_ATTR(, pgr_support);
1323 CONFIGFS_ATTR(, submit_type);
1324 CONFIGFS_ATTR(, complete_type);
1325 CONFIGFS_ATTR_RO(, atomic_max_len);
1326 CONFIGFS_ATTR_RO(, atomic_alignment);
1327 CONFIGFS_ATTR_RO(, atomic_granularity);
1328 CONFIGFS_ATTR_RO(, atomic_max_with_boundary);
1329 CONFIGFS_ATTR_RO(, atomic_max_boundary);
1330
1331 /*
1332 * dev_attrib attributes for devices using the target core SBC/SPC
1333 * interpreter. Any backend using spc_parse_cdb should be using
1334 * these.
1335 */
1336 struct configfs_attribute *sbc_attrib_attrs[] = {
1337 &attr_emulate_model_alias,
1338 &attr_emulate_dpo,
1339 &attr_emulate_fua_write,
1340 &attr_emulate_fua_read,
1341 &attr_emulate_write_cache,
1342 &attr_emulate_ua_intlck_ctrl,
1343 &attr_emulate_tas,
1344 &attr_emulate_tpu,
1345 &attr_emulate_tpws,
1346 &attr_emulate_caw,
1347 &attr_emulate_3pc,
1348 &attr_emulate_pr,
1349 &attr_pi_prot_type,
1350 &attr_hw_pi_prot_type,
1351 &attr_pi_prot_format,
1352 &attr_pi_prot_verify,
1353 &attr_enforce_pr_isids,
1354 &attr_is_nonrot,
1355 &attr_emulate_rest_reord,
1356 &attr_force_pr_aptpl,
1357 &attr_hw_block_size,
1358 &attr_block_size,
1359 &attr_hw_max_sectors,
1360 &attr_optimal_sectors,
1361 &attr_hw_queue_depth,
1362 &attr_queue_depth,
1363 &attr_max_unmap_lba_count,
1364 &attr_max_unmap_block_desc_count,
1365 &attr_unmap_granularity,
1366 &attr_unmap_granularity_alignment,
1367 &attr_unmap_zeroes_data,
1368 &attr_max_write_same_len,
1369 &attr_alua_support,
1370 &attr_pgr_support,
1371 &attr_emulate_rsoc,
1372 &attr_submit_type,
1373 &attr_complete_type,
1374 &attr_atomic_alignment,
1375 &attr_atomic_max_len,
1376 &attr_atomic_granularity,
1377 &attr_atomic_max_with_boundary,
1378 &attr_atomic_max_boundary,
1379 NULL,
1380 };
1381 EXPORT_SYMBOL(sbc_attrib_attrs);
1382
1383 /*
1384 * Minimal dev_attrib attributes for devices passing through CDBs.
1385 * In this case we only provide a few read-only attributes for
1386 * backwards compatibility.
1387 */
1388 struct configfs_attribute *passthrough_attrib_attrs[] = {
1389 &attr_hw_pi_prot_type,
1390 &attr_hw_block_size,
1391 &attr_hw_max_sectors,
1392 &attr_hw_queue_depth,
1393 &attr_emulate_pr,
1394 &attr_alua_support,
1395 &attr_pgr_support,
1396 &attr_submit_type,
1397 &attr_complete_type,
1398 NULL,
1399 };
1400 EXPORT_SYMBOL(passthrough_attrib_attrs);
1401
1402 /*
1403 * pr related dev_attrib attributes for devices passing through CDBs,
1404 * but allowing in core pr emulation.
1405 */
1406 struct configfs_attribute *passthrough_pr_attrib_attrs[] = {
1407 &attr_enforce_pr_isids,
1408 &attr_force_pr_aptpl,
1409 NULL,
1410 };
1411 EXPORT_SYMBOL(passthrough_pr_attrib_attrs);
1412
1413 TB_CIT_SETUP_DRV(dev_attrib, NULL, NULL);
1414 TB_CIT_SETUP_DRV(dev_action, NULL, NULL);
1415
1416 /* End functions for struct config_item_type tb_dev_attrib_cit */
1417
1418 /* Start functions for struct config_item_type tb_dev_wwn_cit */
1419
to_t10_wwn(struct config_item * item)1420 static struct t10_wwn *to_t10_wwn(struct config_item *item)
1421 {
1422 return container_of(to_config_group(item), struct t10_wwn, t10_wwn_group);
1423 }
1424
target_check_inquiry_data(char * buf)1425 static ssize_t target_check_inquiry_data(char *buf)
1426 {
1427 size_t len;
1428 int i;
1429
1430 len = strlen(buf);
1431
1432 /*
1433 * SPC 4.3.1:
1434 * ASCII data fields shall contain only ASCII printable characters
1435 * (i.e., code values 20h to 7Eh) and may be terminated with one or
1436 * more ASCII null (00h) characters.
1437 */
1438 for (i = 0; i < len; i++) {
1439 if (buf[i] < 0x20 || buf[i] > 0x7E) {
1440 pr_err("Emulated T10 Inquiry Data contains non-ASCII-printable characters\n");
1441 return -EINVAL;
1442 }
1443 }
1444
1445 return len;
1446 }
1447
1448 /*
1449 * STANDARD and VPD page 0x83 T10 Vendor Identification
1450 */
target_wwn_vendor_id_show(struct config_item * item,char * page)1451 static ssize_t target_wwn_vendor_id_show(struct config_item *item,
1452 char *page)
1453 {
1454 return sprintf(page, "%s\n", &to_t10_wwn(item)->vendor[0]);
1455 }
1456
target_wwn_vendor_id_store(struct config_item * item,const char * page,size_t count)1457 static ssize_t target_wwn_vendor_id_store(struct config_item *item,
1458 const char *page, size_t count)
1459 {
1460 struct t10_wwn *t10_wwn = to_t10_wwn(item);
1461 struct se_device *dev = t10_wwn->t10_dev;
1462 /* +2 to allow for a trailing (stripped) '\n' and null-terminator */
1463 unsigned char buf[INQUIRY_VENDOR_LEN + 2];
1464 char *stripped = NULL;
1465 ssize_t len;
1466 ssize_t ret;
1467
1468 len = strscpy(buf, page);
1469 if (len > 0) {
1470 /* Strip any newline added from userspace. */
1471 stripped = strstrip(buf);
1472 len = strlen(stripped);
1473 }
1474 if (len < 0 || len > INQUIRY_VENDOR_LEN) {
1475 pr_err("Emulated T10 Vendor Identification exceeds"
1476 " INQUIRY_VENDOR_LEN: " __stringify(INQUIRY_VENDOR_LEN)
1477 "\n");
1478 return -EOVERFLOW;
1479 }
1480
1481 ret = target_check_inquiry_data(stripped);
1482
1483 if (ret < 0)
1484 return ret;
1485
1486 /*
1487 * Check to see if any active exports exist. If they do exist, fail
1488 * here as changing this information on the fly (underneath the
1489 * initiator side OS dependent multipath code) could cause negative
1490 * effects.
1491 */
1492 if (dev->export_count) {
1493 pr_err("Unable to set T10 Vendor Identification while"
1494 " active %d exports exist\n", dev->export_count);
1495 return -EINVAL;
1496 }
1497
1498 BUILD_BUG_ON(sizeof(dev->t10_wwn.vendor) != INQUIRY_VENDOR_LEN + 1);
1499 strscpy(dev->t10_wwn.vendor, stripped);
1500
1501 pr_debug("Target_Core_ConfigFS: Set emulated T10 Vendor Identification:"
1502 " %s\n", dev->t10_wwn.vendor);
1503
1504 return count;
1505 }
1506
target_wwn_product_id_show(struct config_item * item,char * page)1507 static ssize_t target_wwn_product_id_show(struct config_item *item,
1508 char *page)
1509 {
1510 return sprintf(page, "%s\n", &to_t10_wwn(item)->model[0]);
1511 }
1512
target_wwn_product_id_store(struct config_item * item,const char * page,size_t count)1513 static ssize_t target_wwn_product_id_store(struct config_item *item,
1514 const char *page, size_t count)
1515 {
1516 struct t10_wwn *t10_wwn = to_t10_wwn(item);
1517 struct se_device *dev = t10_wwn->t10_dev;
1518 /* +2 to allow for a trailing (stripped) '\n' and null-terminator */
1519 unsigned char buf[INQUIRY_MODEL_LEN + 2];
1520 char *stripped = NULL;
1521 ssize_t len;
1522 ssize_t ret;
1523
1524 len = strscpy(buf, page);
1525 if (len > 0) {
1526 /* Strip any newline added from userspace. */
1527 stripped = strstrip(buf);
1528 len = strlen(stripped);
1529 }
1530 if (len < 0 || len > INQUIRY_MODEL_LEN) {
1531 pr_err("Emulated T10 Vendor exceeds INQUIRY_MODEL_LEN: "
1532 __stringify(INQUIRY_MODEL_LEN)
1533 "\n");
1534 return -EOVERFLOW;
1535 }
1536
1537 ret = target_check_inquiry_data(stripped);
1538
1539 if (ret < 0)
1540 return ret;
1541
1542 /*
1543 * Check to see if any active exports exist. If they do exist, fail
1544 * here as changing this information on the fly (underneath the
1545 * initiator side OS dependent multipath code) could cause negative
1546 * effects.
1547 */
1548 if (dev->export_count) {
1549 pr_err("Unable to set T10 Model while active %d exports exist\n",
1550 dev->export_count);
1551 return -EINVAL;
1552 }
1553
1554 BUILD_BUG_ON(sizeof(dev->t10_wwn.model) != INQUIRY_MODEL_LEN + 1);
1555 strscpy(dev->t10_wwn.model, stripped);
1556
1557 pr_debug("Target_Core_ConfigFS: Set emulated T10 Model Identification: %s\n",
1558 dev->t10_wwn.model);
1559
1560 return count;
1561 }
1562
target_wwn_revision_show(struct config_item * item,char * page)1563 static ssize_t target_wwn_revision_show(struct config_item *item,
1564 char *page)
1565 {
1566 return sprintf(page, "%s\n", &to_t10_wwn(item)->revision[0]);
1567 }
1568
target_wwn_revision_store(struct config_item * item,const char * page,size_t count)1569 static ssize_t target_wwn_revision_store(struct config_item *item,
1570 const char *page, size_t count)
1571 {
1572 struct t10_wwn *t10_wwn = to_t10_wwn(item);
1573 struct se_device *dev = t10_wwn->t10_dev;
1574 /* +2 to allow for a trailing (stripped) '\n' and null-terminator */
1575 unsigned char buf[INQUIRY_REVISION_LEN + 2];
1576 char *stripped = NULL;
1577 ssize_t len;
1578 ssize_t ret;
1579
1580 len = strscpy(buf, page);
1581 if (len > 0) {
1582 /* Strip any newline added from userspace. */
1583 stripped = strstrip(buf);
1584 len = strlen(stripped);
1585 }
1586 if (len < 0 || len > INQUIRY_REVISION_LEN) {
1587 pr_err("Emulated T10 Revision exceeds INQUIRY_REVISION_LEN: "
1588 __stringify(INQUIRY_REVISION_LEN)
1589 "\n");
1590 return -EOVERFLOW;
1591 }
1592
1593 ret = target_check_inquiry_data(stripped);
1594
1595 if (ret < 0)
1596 return ret;
1597
1598 /*
1599 * Check to see if any active exports exist. If they do exist, fail
1600 * here as changing this information on the fly (underneath the
1601 * initiator side OS dependent multipath code) could cause negative
1602 * effects.
1603 */
1604 if (dev->export_count) {
1605 pr_err("Unable to set T10 Revision while active %d exports exist\n",
1606 dev->export_count);
1607 return -EINVAL;
1608 }
1609
1610 BUILD_BUG_ON(sizeof(dev->t10_wwn.revision) != INQUIRY_REVISION_LEN + 1);
1611 strscpy(dev->t10_wwn.revision, stripped);
1612
1613 pr_debug("Target_Core_ConfigFS: Set emulated T10 Revision: %s\n",
1614 dev->t10_wwn.revision);
1615
1616 return count;
1617 }
1618
1619 static ssize_t
target_wwn_company_id_show(struct config_item * item,char * page)1620 target_wwn_company_id_show(struct config_item *item,
1621 char *page)
1622 {
1623 return snprintf(page, PAGE_SIZE, "%#08x\n",
1624 to_t10_wwn(item)->company_id);
1625 }
1626
1627 static ssize_t
target_wwn_company_id_store(struct config_item * item,const char * page,size_t count)1628 target_wwn_company_id_store(struct config_item *item,
1629 const char *page, size_t count)
1630 {
1631 struct t10_wwn *t10_wwn = to_t10_wwn(item);
1632 struct se_device *dev = t10_wwn->t10_dev;
1633 u32 val;
1634 int ret;
1635
1636 /*
1637 * The IEEE COMPANY_ID field should contain a 24-bit canonical
1638 * form OUI assigned by the IEEE.
1639 */
1640 ret = kstrtou32(page, 0, &val);
1641 if (ret < 0)
1642 return ret;
1643
1644 if (val >= 0x1000000)
1645 return -EOVERFLOW;
1646
1647 /*
1648 * Check to see if any active exports exist. If they do exist, fail
1649 * here as changing this information on the fly (underneath the
1650 * initiator side OS dependent multipath code) could cause negative
1651 * effects.
1652 */
1653 if (dev->export_count) {
1654 pr_err("Unable to set Company ID while %u exports exist\n",
1655 dev->export_count);
1656 return -EINVAL;
1657 }
1658
1659 t10_wwn->company_id = val;
1660
1661 pr_debug("Target_Core_ConfigFS: Set IEEE Company ID: %#08x\n",
1662 t10_wwn->company_id);
1663
1664 return count;
1665 }
1666
1667 /*
1668 * VPD page 0x80 Unit serial
1669 */
target_wwn_vpd_unit_serial_show(struct config_item * item,char * page)1670 static ssize_t target_wwn_vpd_unit_serial_show(struct config_item *item,
1671 char *page)
1672 {
1673 return sprintf(page, "T10 VPD Unit Serial Number: %s\n",
1674 &to_t10_wwn(item)->unit_serial[0]);
1675 }
1676
target_wwn_vpd_unit_serial_store(struct config_item * item,const char * page,size_t count)1677 static ssize_t target_wwn_vpd_unit_serial_store(struct config_item *item,
1678 const char *page, size_t count)
1679 {
1680 struct t10_wwn *t10_wwn = to_t10_wwn(item);
1681 struct se_device *dev = t10_wwn->t10_dev;
1682 unsigned char buf[INQUIRY_VPD_SERIAL_LEN] = { };
1683
1684 /*
1685 * If Linux/SCSI subsystem_api_t plugin got a VPD Unit Serial
1686 * from the struct scsi_device level firmware, do not allow
1687 * VPD Unit Serial to be emulated.
1688 *
1689 * Note this struct scsi_device could also be emulating VPD
1690 * information from its drivers/scsi LLD. But for now we assume
1691 * it is doing 'the right thing' wrt a world wide unique
1692 * VPD Unit Serial Number that OS dependent multipath can depend on.
1693 */
1694 if (dev->dev_flags & DF_FIRMWARE_VPD_UNIT_SERIAL) {
1695 pr_err("Underlying SCSI device firmware provided VPD"
1696 " Unit Serial, ignoring request\n");
1697 return -EOPNOTSUPP;
1698 }
1699
1700 if (strlen(page) >= INQUIRY_VPD_SERIAL_LEN) {
1701 pr_err("Emulated VPD Unit Serial exceeds"
1702 " INQUIRY_VPD_SERIAL_LEN: %d\n", INQUIRY_VPD_SERIAL_LEN);
1703 return -EOVERFLOW;
1704 }
1705 /*
1706 * Check to see if any active $FABRIC_MOD exports exist. If they
1707 * do exist, fail here as changing this information on the fly
1708 * (underneath the initiator side OS dependent multipath code)
1709 * could cause negative effects.
1710 */
1711 if (dev->export_count) {
1712 pr_err("Unable to set VPD Unit Serial while"
1713 " active %d $FABRIC_MOD exports exist\n",
1714 dev->export_count);
1715 return -EINVAL;
1716 }
1717
1718 /*
1719 * This currently assumes ASCII encoding for emulated VPD Unit Serial.
1720 *
1721 * Also, strip any newline added from the userspace
1722 * echo $UUID > $TARGET/$HBA/$STORAGE_OBJECT/wwn/vpd_unit_serial
1723 */
1724 snprintf(buf, INQUIRY_VPD_SERIAL_LEN, "%s", page);
1725 snprintf(dev->t10_wwn.unit_serial, INQUIRY_VPD_SERIAL_LEN,
1726 "%s", strstrip(buf));
1727 dev->dev_flags |= DF_EMULATED_VPD_UNIT_SERIAL;
1728
1729 pr_debug("Target_Core_ConfigFS: Set emulated VPD Unit Serial:"
1730 " %s\n", dev->t10_wwn.unit_serial);
1731
1732 return count;
1733 }
1734
1735 /*
1736 * VPD page 0x83 Protocol Identifier
1737 */
target_wwn_vpd_protocol_identifier_show(struct config_item * item,char * page)1738 static ssize_t target_wwn_vpd_protocol_identifier_show(struct config_item *item,
1739 char *page)
1740 {
1741 struct t10_wwn *t10_wwn = to_t10_wwn(item);
1742 struct t10_vpd *vpd;
1743 unsigned char buf[VPD_TMP_BUF_SIZE] = { };
1744 ssize_t len = 0;
1745
1746 spin_lock(&t10_wwn->t10_vpd_lock);
1747 list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) {
1748 if (!vpd->protocol_identifier_set)
1749 continue;
1750
1751 transport_dump_vpd_proto_id(vpd, buf, VPD_TMP_BUF_SIZE);
1752
1753 if (len + strlen(buf) >= PAGE_SIZE)
1754 break;
1755
1756 len += sprintf(page+len, "%s", buf);
1757 }
1758 spin_unlock(&t10_wwn->t10_vpd_lock);
1759
1760 return len;
1761 }
1762
target_wwn_pd_text_id_info_show(struct config_item * item,char * page)1763 static ssize_t target_wwn_pd_text_id_info_show(struct config_item *item,
1764 char *page)
1765 {
1766 return sysfs_emit(page, "%s\n", &to_t10_wwn(item)->pd_text_id_info[0]);
1767 }
1768
target_wwn_pd_text_id_info_store(struct config_item * item,const char * page,size_t count)1769 static ssize_t target_wwn_pd_text_id_info_store(struct config_item *item,
1770 const char *page, size_t count)
1771 {
1772 struct t10_wwn *t10_wwn = to_t10_wwn(item);
1773 struct se_device *dev = t10_wwn->t10_dev;
1774
1775 /* +2 to allow for a trailing (stripped) '\n' and null-terminator */
1776 unsigned char buf[PD_TEXT_ID_INFO_LEN + 2];
1777 char *stripped;
1778
1779 /*
1780 * Check to see if any active exports exist. If they do exist, fail
1781 * here as changing this information on the fly (underneath the
1782 * initiator side OS dependent multipath code) could cause negative
1783 * effects.
1784 */
1785 if (dev->export_count) {
1786 pr_err("Unable to set the peripheral device text id info while active %d exports exist\n",
1787 dev->export_count);
1788 return -EINVAL;
1789 }
1790
1791 if (strscpy(buf, page, sizeof(buf)) < 0)
1792 return -EOVERFLOW;
1793
1794 /* Strip any newline added from userspace. */
1795 stripped = strstrip(buf);
1796 if (strlen(stripped) >= PD_TEXT_ID_INFO_LEN) {
1797 pr_err("Emulated peripheral device text id info exceeds PD_TEXT_ID_INFO_LEN: " __stringify(PD_TEXT_ID_INFO_LEN "\n"));
1798 return -EOVERFLOW;
1799 }
1800
1801 BUILD_BUG_ON(sizeof(dev->t10_wwn.pd_text_id_info) != PD_TEXT_ID_INFO_LEN);
1802 strscpy(dev->t10_wwn.pd_text_id_info, stripped,
1803 sizeof(dev->t10_wwn.pd_text_id_info));
1804
1805 pr_debug("Target_Core_ConfigFS: Set emulated peripheral dev text id info:"
1806 " %s\n", dev->t10_wwn.pd_text_id_info);
1807
1808 return count;
1809 }
1810
1811 /*
1812 * Generic wrapper for dumping VPD identifiers by association.
1813 */
1814 #define DEF_DEV_WWN_ASSOC_SHOW(_name, _assoc) \
1815 static ssize_t target_wwn_##_name##_show(struct config_item *item, \
1816 char *page) \
1817 { \
1818 struct t10_wwn *t10_wwn = to_t10_wwn(item); \
1819 struct t10_vpd *vpd; \
1820 unsigned char buf[VPD_TMP_BUF_SIZE]; \
1821 ssize_t len = 0; \
1822 \
1823 spin_lock(&t10_wwn->t10_vpd_lock); \
1824 list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) { \
1825 if (vpd->association != _assoc) \
1826 continue; \
1827 \
1828 memset(buf, 0, VPD_TMP_BUF_SIZE); \
1829 transport_dump_vpd_assoc(vpd, buf, VPD_TMP_BUF_SIZE); \
1830 if (len + strlen(buf) >= PAGE_SIZE) \
1831 break; \
1832 len += sprintf(page+len, "%s", buf); \
1833 \
1834 memset(buf, 0, VPD_TMP_BUF_SIZE); \
1835 transport_dump_vpd_ident_type(vpd, buf, VPD_TMP_BUF_SIZE); \
1836 if (len + strlen(buf) >= PAGE_SIZE) \
1837 break; \
1838 len += sprintf(page+len, "%s", buf); \
1839 \
1840 memset(buf, 0, VPD_TMP_BUF_SIZE); \
1841 transport_dump_vpd_ident(vpd, buf, VPD_TMP_BUF_SIZE); \
1842 if (len + strlen(buf) >= PAGE_SIZE) \
1843 break; \
1844 len += sprintf(page+len, "%s", buf); \
1845 } \
1846 spin_unlock(&t10_wwn->t10_vpd_lock); \
1847 \
1848 return len; \
1849 }
1850
1851 /* VPD page 0x83 Association: Logical Unit */
1852 DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_logical_unit, 0x00);
1853 /* VPD page 0x83 Association: Target Port */
1854 DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_target_port, 0x10);
1855 /* VPD page 0x83 Association: SCSI Target Device */
1856 DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_scsi_target_device, 0x20);
1857
1858 CONFIGFS_ATTR(target_wwn_, vendor_id);
1859 CONFIGFS_ATTR(target_wwn_, product_id);
1860 CONFIGFS_ATTR(target_wwn_, revision);
1861 CONFIGFS_ATTR(target_wwn_, company_id);
1862 CONFIGFS_ATTR(target_wwn_, vpd_unit_serial);
1863 CONFIGFS_ATTR_RO(target_wwn_, vpd_protocol_identifier);
1864 CONFIGFS_ATTR_RO(target_wwn_, vpd_assoc_logical_unit);
1865 CONFIGFS_ATTR_RO(target_wwn_, vpd_assoc_target_port);
1866 CONFIGFS_ATTR_RO(target_wwn_, vpd_assoc_scsi_target_device);
1867 CONFIGFS_ATTR(target_wwn_, pd_text_id_info);
1868
1869 static struct configfs_attribute *target_core_dev_wwn_attrs[] = {
1870 &target_wwn_attr_vendor_id,
1871 &target_wwn_attr_product_id,
1872 &target_wwn_attr_revision,
1873 &target_wwn_attr_company_id,
1874 &target_wwn_attr_vpd_unit_serial,
1875 &target_wwn_attr_vpd_protocol_identifier,
1876 &target_wwn_attr_vpd_assoc_logical_unit,
1877 &target_wwn_attr_vpd_assoc_target_port,
1878 &target_wwn_attr_vpd_assoc_scsi_target_device,
1879 &target_wwn_attr_pd_text_id_info,
1880 NULL,
1881 };
1882
1883 TB_CIT_SETUP(dev_wwn, NULL, NULL, target_core_dev_wwn_attrs);
1884
1885 /* End functions for struct config_item_type tb_dev_wwn_cit */
1886
1887 /* Start functions for struct config_item_type tb_dev_pr_cit */
1888
pr_to_dev(struct config_item * item)1889 static struct se_device *pr_to_dev(struct config_item *item)
1890 {
1891 return container_of(to_config_group(item), struct se_device,
1892 dev_pr_group);
1893 }
1894
target_core_dev_pr_show_spc3_res(struct se_device * dev,char * page)1895 static ssize_t target_core_dev_pr_show_spc3_res(struct se_device *dev,
1896 char *page)
1897 {
1898 struct se_node_acl *se_nacl;
1899 struct t10_pr_registration *pr_reg;
1900 char i_buf[PR_REG_ISID_ID_LEN] = { };
1901
1902 pr_reg = dev->dev_pr_res_holder;
1903 if (!pr_reg)
1904 return sprintf(page, "No SPC-3 Reservation holder\n");
1905
1906 se_nacl = pr_reg->pr_reg_nacl;
1907 core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
1908
1909 return sprintf(page, "SPC-3 Reservation: %s Initiator: %s%s\n",
1910 se_nacl->se_tpg->se_tpg_tfo->fabric_name,
1911 se_nacl->initiatorname, i_buf);
1912 }
1913
target_core_dev_pr_show_spc2_res(struct se_device * dev,char * page)1914 static ssize_t target_core_dev_pr_show_spc2_res(struct se_device *dev,
1915 char *page)
1916 {
1917 struct se_session *sess = dev->reservation_holder;
1918 struct se_node_acl *se_nacl;
1919 ssize_t len;
1920
1921 if (sess) {
1922 se_nacl = sess->se_node_acl;
1923 len = sprintf(page,
1924 "SPC-2 Reservation: %s Initiator: %s\n",
1925 se_nacl->se_tpg->se_tpg_tfo->fabric_name,
1926 se_nacl->initiatorname);
1927 } else {
1928 len = sprintf(page, "No SPC-2 Reservation holder\n");
1929 }
1930 return len;
1931 }
1932
target_pr_res_holder_show(struct config_item * item,char * page)1933 static ssize_t target_pr_res_holder_show(struct config_item *item, char *page)
1934 {
1935 struct se_device *dev = pr_to_dev(item);
1936 int ret;
1937
1938 if (!dev->dev_attrib.emulate_pr)
1939 return sprintf(page, "SPC_RESERVATIONS_DISABLED\n");
1940
1941 if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR)
1942 return sprintf(page, "Passthrough\n");
1943
1944 spin_lock(&dev->dev_reservation_lock);
1945 if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
1946 ret = target_core_dev_pr_show_spc2_res(dev, page);
1947 else
1948 ret = target_core_dev_pr_show_spc3_res(dev, page);
1949 spin_unlock(&dev->dev_reservation_lock);
1950 return ret;
1951 }
1952
target_pr_res_pr_all_tgt_pts_show(struct config_item * item,char * page)1953 static ssize_t target_pr_res_pr_all_tgt_pts_show(struct config_item *item,
1954 char *page)
1955 {
1956 struct se_device *dev = pr_to_dev(item);
1957 ssize_t len = 0;
1958
1959 spin_lock(&dev->dev_reservation_lock);
1960 if (!dev->dev_pr_res_holder) {
1961 len = sprintf(page, "No SPC-3 Reservation holder\n");
1962 } else if (dev->dev_pr_res_holder->pr_reg_all_tg_pt) {
1963 len = sprintf(page, "SPC-3 Reservation: All Target"
1964 " Ports registration\n");
1965 } else {
1966 len = sprintf(page, "SPC-3 Reservation: Single"
1967 " Target Port registration\n");
1968 }
1969
1970 spin_unlock(&dev->dev_reservation_lock);
1971 return len;
1972 }
1973
target_pr_res_pr_generation_show(struct config_item * item,char * page)1974 static ssize_t target_pr_res_pr_generation_show(struct config_item *item,
1975 char *page)
1976 {
1977 return sprintf(page, "0x%08x\n", pr_to_dev(item)->t10_pr.pr_generation);
1978 }
1979
1980
target_pr_res_pr_holder_tg_port_show(struct config_item * item,char * page)1981 static ssize_t target_pr_res_pr_holder_tg_port_show(struct config_item *item,
1982 char *page)
1983 {
1984 struct se_device *dev = pr_to_dev(item);
1985 struct se_node_acl *se_nacl;
1986 struct se_portal_group *se_tpg;
1987 struct t10_pr_registration *pr_reg;
1988 const struct target_core_fabric_ops *tfo;
1989 ssize_t len = 0;
1990
1991 spin_lock(&dev->dev_reservation_lock);
1992 pr_reg = dev->dev_pr_res_holder;
1993 if (!pr_reg) {
1994 len = sprintf(page, "No SPC-3 Reservation holder\n");
1995 goto out_unlock;
1996 }
1997
1998 se_nacl = pr_reg->pr_reg_nacl;
1999 se_tpg = se_nacl->se_tpg;
2000 tfo = se_tpg->se_tpg_tfo;
2001
2002 len += sprintf(page+len, "SPC-3 Reservation: %s"
2003 " Target Node Endpoint: %s\n", tfo->fabric_name,
2004 tfo->tpg_get_wwn(se_tpg));
2005 len += sprintf(page+len, "SPC-3 Reservation: Relative Port"
2006 " Identifier Tag: %hu %s Portal Group Tag: %hu"
2007 " %s Logical Unit: %llu\n", pr_reg->tg_pt_sep_rtpi,
2008 tfo->fabric_name, tfo->tpg_get_tag(se_tpg),
2009 tfo->fabric_name, pr_reg->pr_aptpl_target_lun);
2010
2011 out_unlock:
2012 spin_unlock(&dev->dev_reservation_lock);
2013 return len;
2014 }
2015
2016
target_pr_res_pr_registered_i_pts_show(struct config_item * item,char * page)2017 static ssize_t target_pr_res_pr_registered_i_pts_show(struct config_item *item,
2018 char *page)
2019 {
2020 struct se_device *dev = pr_to_dev(item);
2021 const struct target_core_fabric_ops *tfo;
2022 struct t10_pr_registration *pr_reg;
2023 unsigned char buf[384];
2024 char i_buf[PR_REG_ISID_ID_LEN];
2025 ssize_t len = 0;
2026 int reg_count = 0;
2027
2028 len += sprintf(page+len, "SPC-3 PR Registrations:\n");
2029
2030 spin_lock(&dev->t10_pr.registration_lock);
2031 list_for_each_entry(pr_reg, &dev->t10_pr.registration_list,
2032 pr_reg_list) {
2033
2034 memset(buf, 0, 384);
2035 memset(i_buf, 0, PR_REG_ISID_ID_LEN);
2036 tfo = pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo;
2037 core_pr_dump_initiator_port(pr_reg, i_buf,
2038 PR_REG_ISID_ID_LEN);
2039 sprintf(buf, "%s Node: %s%s Key: 0x%016Lx PRgen: 0x%08x\n",
2040 tfo->fabric_name,
2041 pr_reg->pr_reg_nacl->initiatorname, i_buf, pr_reg->pr_res_key,
2042 pr_reg->pr_res_generation);
2043
2044 if (len + strlen(buf) >= PAGE_SIZE)
2045 break;
2046
2047 len += sprintf(page+len, "%s", buf);
2048 reg_count++;
2049 }
2050 spin_unlock(&dev->t10_pr.registration_lock);
2051
2052 if (!reg_count)
2053 len += sprintf(page+len, "None\n");
2054
2055 return len;
2056 }
2057
target_pr_res_pr_type_show(struct config_item * item,char * page)2058 static ssize_t target_pr_res_pr_type_show(struct config_item *item, char *page)
2059 {
2060 struct se_device *dev = pr_to_dev(item);
2061 struct t10_pr_registration *pr_reg;
2062 ssize_t len = 0;
2063
2064 spin_lock(&dev->dev_reservation_lock);
2065 pr_reg = dev->dev_pr_res_holder;
2066 if (pr_reg) {
2067 len = sprintf(page, "SPC-3 Reservation Type: %s\n",
2068 core_scsi3_pr_dump_type(pr_reg->pr_res_type));
2069 } else {
2070 len = sprintf(page, "No SPC-3 Reservation holder\n");
2071 }
2072
2073 spin_unlock(&dev->dev_reservation_lock);
2074 return len;
2075 }
2076
target_pr_res_type_show(struct config_item * item,char * page)2077 static ssize_t target_pr_res_type_show(struct config_item *item, char *page)
2078 {
2079 struct se_device *dev = pr_to_dev(item);
2080
2081 if (!dev->dev_attrib.emulate_pr)
2082 return sprintf(page, "SPC_RESERVATIONS_DISABLED\n");
2083 if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR)
2084 return sprintf(page, "SPC_PASSTHROUGH\n");
2085 if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
2086 return sprintf(page, "SPC2_RESERVATIONS\n");
2087
2088 return sprintf(page, "SPC3_PERSISTENT_RESERVATIONS\n");
2089 }
2090
target_pr_res_aptpl_active_show(struct config_item * item,char * page)2091 static ssize_t target_pr_res_aptpl_active_show(struct config_item *item,
2092 char *page)
2093 {
2094 struct se_device *dev = pr_to_dev(item);
2095
2096 if (!dev->dev_attrib.emulate_pr ||
2097 (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR))
2098 return 0;
2099
2100 return sprintf(page, "APTPL Bit Status: %s\n",
2101 (dev->t10_pr.pr_aptpl_active) ? "Activated" : "Disabled");
2102 }
2103
target_pr_res_aptpl_metadata_show(struct config_item * item,char * page)2104 static ssize_t target_pr_res_aptpl_metadata_show(struct config_item *item,
2105 char *page)
2106 {
2107 struct se_device *dev = pr_to_dev(item);
2108
2109 if (!dev->dev_attrib.emulate_pr ||
2110 (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR))
2111 return 0;
2112
2113 return sprintf(page, "Ready to process PR APTPL metadata..\n");
2114 }
2115
2116 enum {
2117 Opt_initiator_fabric, Opt_initiator_node, Opt_initiator_sid,
2118 Opt_sa_res_key, Opt_res_holder, Opt_res_type, Opt_res_scope,
2119 Opt_res_all_tg_pt, Opt_mapped_lun, Opt_target_fabric,
2120 Opt_target_node, Opt_tpgt, Opt_port_rtpi, Opt_target_lun, Opt_err
2121 };
2122
2123 static match_table_t tokens = {
2124 {Opt_initiator_fabric, "initiator_fabric=%s"},
2125 {Opt_initiator_node, "initiator_node=%s"},
2126 {Opt_initiator_sid, "initiator_sid=%s"},
2127 {Opt_sa_res_key, "sa_res_key=%s"},
2128 {Opt_res_holder, "res_holder=%d"},
2129 {Opt_res_type, "res_type=%d"},
2130 {Opt_res_scope, "res_scope=%d"},
2131 {Opt_res_all_tg_pt, "res_all_tg_pt=%d"},
2132 {Opt_mapped_lun, "mapped_lun=%u"},
2133 {Opt_target_fabric, "target_fabric=%s"},
2134 {Opt_target_node, "target_node=%s"},
2135 {Opt_tpgt, "tpgt=%d"},
2136 {Opt_port_rtpi, "port_rtpi=%d"},
2137 {Opt_target_lun, "target_lun=%u"},
2138 {Opt_err, NULL}
2139 };
2140
target_pr_res_aptpl_metadata_store(struct config_item * item,const char * page,size_t count)2141 static ssize_t target_pr_res_aptpl_metadata_store(struct config_item *item,
2142 const char *page, size_t count)
2143 {
2144 struct se_device *dev = pr_to_dev(item);
2145 unsigned char *i_fabric = NULL, *i_port = NULL, *isid = NULL;
2146 unsigned char *t_fabric = NULL, *t_port = NULL;
2147 char *orig, *ptr, *opts;
2148 substring_t args[MAX_OPT_ARGS];
2149 unsigned long long tmp_ll;
2150 u64 sa_res_key = 0;
2151 u64 mapped_lun = 0, target_lun = 0;
2152 int ret = -1, res_holder = 0, all_tg_pt = 0, arg, token;
2153 u16 tpgt = 0;
2154 u8 type = 0;
2155
2156 if (!dev->dev_attrib.emulate_pr ||
2157 (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR))
2158 return count;
2159 if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
2160 return count;
2161
2162 if (dev->export_count) {
2163 pr_debug("Unable to process APTPL metadata while"
2164 " active fabric exports exist\n");
2165 return -EINVAL;
2166 }
2167
2168 opts = kstrdup(page, GFP_KERNEL);
2169 if (!opts)
2170 return -ENOMEM;
2171
2172 orig = opts;
2173 while ((ptr = strsep(&opts, ",\n")) != NULL) {
2174 if (!*ptr)
2175 continue;
2176
2177 token = match_token(ptr, tokens, args);
2178 switch (token) {
2179 case Opt_initiator_fabric:
2180 i_fabric = match_strdup(args);
2181 if (!i_fabric) {
2182 ret = -ENOMEM;
2183 goto out;
2184 }
2185 break;
2186 case Opt_initiator_node:
2187 i_port = match_strdup(args);
2188 if (!i_port) {
2189 ret = -ENOMEM;
2190 goto out;
2191 }
2192 if (strlen(i_port) >= PR_APTPL_MAX_IPORT_LEN) {
2193 pr_err("APTPL metadata initiator_node="
2194 " exceeds PR_APTPL_MAX_IPORT_LEN: %d\n",
2195 PR_APTPL_MAX_IPORT_LEN);
2196 ret = -EINVAL;
2197 break;
2198 }
2199 break;
2200 case Opt_initiator_sid:
2201 isid = match_strdup(args);
2202 if (!isid) {
2203 ret = -ENOMEM;
2204 goto out;
2205 }
2206 if (strlen(isid) >= PR_REG_ISID_LEN) {
2207 pr_err("APTPL metadata initiator_isid"
2208 "= exceeds PR_REG_ISID_LEN: %d\n",
2209 PR_REG_ISID_LEN);
2210 ret = -EINVAL;
2211 break;
2212 }
2213 break;
2214 case Opt_sa_res_key:
2215 ret = match_u64(args, &tmp_ll);
2216 if (ret < 0) {
2217 pr_err("kstrtoull() failed for sa_res_key=\n");
2218 goto out;
2219 }
2220 sa_res_key = (u64)tmp_ll;
2221 break;
2222 /*
2223 * PR APTPL Metadata for Reservation
2224 */
2225 case Opt_res_holder:
2226 ret = match_int(args, &arg);
2227 if (ret)
2228 goto out;
2229 res_holder = arg;
2230 break;
2231 case Opt_res_type:
2232 ret = match_int(args, &arg);
2233 if (ret)
2234 goto out;
2235 type = (u8)arg;
2236 break;
2237 case Opt_res_scope:
2238 ret = match_int(args, &arg);
2239 if (ret)
2240 goto out;
2241 break;
2242 case Opt_res_all_tg_pt:
2243 ret = match_int(args, &arg);
2244 if (ret)
2245 goto out;
2246 all_tg_pt = (int)arg;
2247 break;
2248 case Opt_mapped_lun:
2249 ret = match_u64(args, &tmp_ll);
2250 if (ret)
2251 goto out;
2252 mapped_lun = (u64)tmp_ll;
2253 break;
2254 /*
2255 * PR APTPL Metadata for Target Port
2256 */
2257 case Opt_target_fabric:
2258 t_fabric = match_strdup(args);
2259 if (!t_fabric) {
2260 ret = -ENOMEM;
2261 goto out;
2262 }
2263 break;
2264 case Opt_target_node:
2265 t_port = match_strdup(args);
2266 if (!t_port) {
2267 ret = -ENOMEM;
2268 goto out;
2269 }
2270 if (strlen(t_port) >= PR_APTPL_MAX_TPORT_LEN) {
2271 pr_err("APTPL metadata target_node="
2272 " exceeds PR_APTPL_MAX_TPORT_LEN: %d\n",
2273 PR_APTPL_MAX_TPORT_LEN);
2274 ret = -EINVAL;
2275 break;
2276 }
2277 break;
2278 case Opt_tpgt:
2279 ret = match_int(args, &arg);
2280 if (ret)
2281 goto out;
2282 tpgt = (u16)arg;
2283 break;
2284 case Opt_port_rtpi:
2285 ret = match_int(args, &arg);
2286 if (ret)
2287 goto out;
2288 break;
2289 case Opt_target_lun:
2290 ret = match_u64(args, &tmp_ll);
2291 if (ret)
2292 goto out;
2293 target_lun = (u64)tmp_ll;
2294 break;
2295 default:
2296 break;
2297 }
2298 }
2299
2300 if (!i_port || !t_port || !sa_res_key) {
2301 pr_err("Illegal parameters for APTPL registration\n");
2302 ret = -EINVAL;
2303 goto out;
2304 }
2305
2306 if (res_holder && !(type)) {
2307 pr_err("Illegal PR type: 0x%02x for reservation"
2308 " holder\n", type);
2309 ret = -EINVAL;
2310 goto out;
2311 }
2312
2313 ret = core_scsi3_alloc_aptpl_registration(&dev->t10_pr, sa_res_key,
2314 i_port, isid, mapped_lun, t_port, tpgt, target_lun,
2315 res_holder, all_tg_pt, type);
2316 out:
2317 kfree(i_fabric);
2318 kfree(i_port);
2319 kfree(isid);
2320 kfree(t_fabric);
2321 kfree(t_port);
2322 kfree(orig);
2323 return (ret == 0) ? count : ret;
2324 }
2325
2326
2327 CONFIGFS_ATTR_RO(target_pr_, res_holder);
2328 CONFIGFS_ATTR_RO(target_pr_, res_pr_all_tgt_pts);
2329 CONFIGFS_ATTR_RO(target_pr_, res_pr_generation);
2330 CONFIGFS_ATTR_RO(target_pr_, res_pr_holder_tg_port);
2331 CONFIGFS_ATTR_RO(target_pr_, res_pr_registered_i_pts);
2332 CONFIGFS_ATTR_RO(target_pr_, res_pr_type);
2333 CONFIGFS_ATTR_RO(target_pr_, res_type);
2334 CONFIGFS_ATTR_RO(target_pr_, res_aptpl_active);
2335 CONFIGFS_ATTR(target_pr_, res_aptpl_metadata);
2336
2337 static struct configfs_attribute *target_core_dev_pr_attrs[] = {
2338 &target_pr_attr_res_holder,
2339 &target_pr_attr_res_pr_all_tgt_pts,
2340 &target_pr_attr_res_pr_generation,
2341 &target_pr_attr_res_pr_holder_tg_port,
2342 &target_pr_attr_res_pr_registered_i_pts,
2343 &target_pr_attr_res_pr_type,
2344 &target_pr_attr_res_type,
2345 &target_pr_attr_res_aptpl_active,
2346 &target_pr_attr_res_aptpl_metadata,
2347 NULL,
2348 };
2349
2350 TB_CIT_SETUP(dev_pr, NULL, NULL, target_core_dev_pr_attrs);
2351
2352 /* End functions for struct config_item_type tb_dev_pr_cit */
2353
2354 /* Start functions for struct config_item_type tb_dev_cit */
2355
to_device(struct config_item * item)2356 static inline struct se_device *to_device(struct config_item *item)
2357 {
2358 return container_of(to_config_group(item), struct se_device, dev_group);
2359 }
2360
target_dev_info_show(struct config_item * item,char * page)2361 static ssize_t target_dev_info_show(struct config_item *item, char *page)
2362 {
2363 struct se_device *dev = to_device(item);
2364 int bl = 0;
2365 ssize_t read_bytes = 0;
2366
2367 transport_dump_dev_state(dev, page, &bl);
2368 read_bytes += bl;
2369 read_bytes += dev->transport->show_configfs_dev_params(dev,
2370 page+read_bytes);
2371 return read_bytes;
2372 }
2373
target_dev_control_store(struct config_item * item,const char * page,size_t count)2374 static ssize_t target_dev_control_store(struct config_item *item,
2375 const char *page, size_t count)
2376 {
2377 struct se_device *dev = to_device(item);
2378
2379 return dev->transport->set_configfs_dev_params(dev, page, count);
2380 }
2381
target_dev_alias_show(struct config_item * item,char * page)2382 static ssize_t target_dev_alias_show(struct config_item *item, char *page)
2383 {
2384 struct se_device *dev = to_device(item);
2385
2386 if (!(dev->dev_flags & DF_USING_ALIAS))
2387 return 0;
2388
2389 return snprintf(page, PAGE_SIZE, "%s\n", dev->dev_alias);
2390 }
2391
target_dev_alias_store(struct config_item * item,const char * page,size_t count)2392 static ssize_t target_dev_alias_store(struct config_item *item,
2393 const char *page, size_t count)
2394 {
2395 struct se_device *dev = to_device(item);
2396 struct se_hba *hba = dev->se_hba;
2397 ssize_t read_bytes;
2398
2399 if (count > (SE_DEV_ALIAS_LEN-1)) {
2400 pr_err("alias count: %d exceeds"
2401 " SE_DEV_ALIAS_LEN-1: %u\n", (int)count,
2402 SE_DEV_ALIAS_LEN-1);
2403 return -EINVAL;
2404 }
2405
2406 read_bytes = snprintf(&dev->dev_alias[0], SE_DEV_ALIAS_LEN, "%s", page);
2407 if (!read_bytes)
2408 return -EINVAL;
2409 if (dev->dev_alias[read_bytes - 1] == '\n')
2410 dev->dev_alias[read_bytes - 1] = '\0';
2411
2412 dev->dev_flags |= DF_USING_ALIAS;
2413
2414 pr_debug("Target_Core_ConfigFS: %s/%s set alias: %s\n",
2415 config_item_name(&hba->hba_group.cg_item),
2416 config_item_name(&dev->dev_group.cg_item),
2417 dev->dev_alias);
2418
2419 return read_bytes;
2420 }
2421
target_dev_udev_path_show(struct config_item * item,char * page)2422 static ssize_t target_dev_udev_path_show(struct config_item *item, char *page)
2423 {
2424 struct se_device *dev = to_device(item);
2425
2426 if (!(dev->dev_flags & DF_USING_UDEV_PATH))
2427 return 0;
2428
2429 return snprintf(page, PAGE_SIZE, "%s\n", dev->udev_path);
2430 }
2431
target_dev_udev_path_store(struct config_item * item,const char * page,size_t count)2432 static ssize_t target_dev_udev_path_store(struct config_item *item,
2433 const char *page, size_t count)
2434 {
2435 struct se_device *dev = to_device(item);
2436 struct se_hba *hba = dev->se_hba;
2437 ssize_t read_bytes;
2438
2439 if (count > (SE_UDEV_PATH_LEN-1)) {
2440 pr_err("udev_path count: %d exceeds"
2441 " SE_UDEV_PATH_LEN-1: %u\n", (int)count,
2442 SE_UDEV_PATH_LEN-1);
2443 return -EINVAL;
2444 }
2445
2446 read_bytes = snprintf(&dev->udev_path[0], SE_UDEV_PATH_LEN,
2447 "%s", page);
2448 if (!read_bytes)
2449 return -EINVAL;
2450 if (dev->udev_path[read_bytes - 1] == '\n')
2451 dev->udev_path[read_bytes - 1] = '\0';
2452
2453 dev->dev_flags |= DF_USING_UDEV_PATH;
2454
2455 pr_debug("Target_Core_ConfigFS: %s/%s set udev_path: %s\n",
2456 config_item_name(&hba->hba_group.cg_item),
2457 config_item_name(&dev->dev_group.cg_item),
2458 dev->udev_path);
2459
2460 return read_bytes;
2461 }
2462
target_dev_enable_show(struct config_item * item,char * page)2463 static ssize_t target_dev_enable_show(struct config_item *item, char *page)
2464 {
2465 struct se_device *dev = to_device(item);
2466
2467 return snprintf(page, PAGE_SIZE, "%d\n", target_dev_configured(dev));
2468 }
2469
target_dev_enable_store(struct config_item * item,const char * page,size_t count)2470 static ssize_t target_dev_enable_store(struct config_item *item,
2471 const char *page, size_t count)
2472 {
2473 struct se_device *dev = to_device(item);
2474 char *ptr;
2475 int ret;
2476
2477 ptr = strstr(page, "1");
2478 if (!ptr) {
2479 pr_err("For dev_enable ops, only valid value"
2480 " is \"1\"\n");
2481 return -EINVAL;
2482 }
2483
2484 ret = target_configure_device(dev);
2485 if (ret)
2486 return ret;
2487 return count;
2488 }
2489
target_dev_alua_lu_gp_show(struct config_item * item,char * page)2490 static ssize_t target_dev_alua_lu_gp_show(struct config_item *item, char *page)
2491 {
2492 struct se_device *dev = to_device(item);
2493 struct config_item *lu_ci;
2494 struct t10_alua_lu_gp *lu_gp;
2495 struct t10_alua_lu_gp_member *lu_gp_mem;
2496 ssize_t len = 0;
2497
2498 lu_gp_mem = dev->dev_alua_lu_gp_mem;
2499 if (!lu_gp_mem)
2500 return 0;
2501
2502 spin_lock(&lu_gp_mem->lu_gp_mem_lock);
2503 lu_gp = lu_gp_mem->lu_gp;
2504 if (lu_gp) {
2505 lu_ci = &lu_gp->lu_gp_group.cg_item;
2506 len += sprintf(page, "LU Group Alias: %s\nLU Group ID: %hu\n",
2507 config_item_name(lu_ci), lu_gp->lu_gp_id);
2508 }
2509 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
2510
2511 return len;
2512 }
2513
target_dev_alua_lu_gp_store(struct config_item * item,const char * page,size_t count)2514 static ssize_t target_dev_alua_lu_gp_store(struct config_item *item,
2515 const char *page, size_t count)
2516 {
2517 struct se_device *dev = to_device(item);
2518 struct se_hba *hba = dev->se_hba;
2519 struct t10_alua_lu_gp *lu_gp = NULL, *lu_gp_new = NULL;
2520 struct t10_alua_lu_gp_member *lu_gp_mem;
2521 unsigned char buf[LU_GROUP_NAME_BUF] = { };
2522 int move = 0;
2523
2524 lu_gp_mem = dev->dev_alua_lu_gp_mem;
2525 if (!lu_gp_mem)
2526 return count;
2527
2528 if (count > LU_GROUP_NAME_BUF) {
2529 pr_err("ALUA LU Group Alias too large!\n");
2530 return -EINVAL;
2531 }
2532 memcpy(buf, page, count);
2533 /*
2534 * Any ALUA logical unit alias besides "NULL" means we will be
2535 * making a new group association.
2536 */
2537 if (strcmp(strstrip(buf), "NULL")) {
2538 /*
2539 * core_alua_get_lu_gp_by_name() will increment reference to
2540 * struct t10_alua_lu_gp. This reference is released with
2541 * core_alua_get_lu_gp_by_name below().
2542 */
2543 lu_gp_new = core_alua_get_lu_gp_by_name(strstrip(buf));
2544 if (!lu_gp_new)
2545 return -ENODEV;
2546 }
2547
2548 spin_lock(&lu_gp_mem->lu_gp_mem_lock);
2549 lu_gp = lu_gp_mem->lu_gp;
2550 if (lu_gp) {
2551 /*
2552 * Clearing an existing lu_gp association, and replacing
2553 * with NULL
2554 */
2555 if (!lu_gp_new) {
2556 pr_debug("Target_Core_ConfigFS: Releasing %s/%s"
2557 " from ALUA LU Group: core/alua/lu_gps/%s, ID:"
2558 " %hu\n",
2559 config_item_name(&hba->hba_group.cg_item),
2560 config_item_name(&dev->dev_group.cg_item),
2561 config_item_name(&lu_gp->lu_gp_group.cg_item),
2562 lu_gp->lu_gp_id);
2563
2564 __core_alua_drop_lu_gp_mem(lu_gp_mem, lu_gp);
2565 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
2566
2567 return count;
2568 }
2569 /*
2570 * Removing existing association of lu_gp_mem with lu_gp
2571 */
2572 __core_alua_drop_lu_gp_mem(lu_gp_mem, lu_gp);
2573 move = 1;
2574 }
2575 /*
2576 * Associate lu_gp_mem with lu_gp_new.
2577 */
2578 __core_alua_attach_lu_gp_mem(lu_gp_mem, lu_gp_new);
2579 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
2580
2581 pr_debug("Target_Core_ConfigFS: %s %s/%s to ALUA LU Group:"
2582 " core/alua/lu_gps/%s, ID: %hu\n",
2583 (move) ? "Moving" : "Adding",
2584 config_item_name(&hba->hba_group.cg_item),
2585 config_item_name(&dev->dev_group.cg_item),
2586 config_item_name(&lu_gp_new->lu_gp_group.cg_item),
2587 lu_gp_new->lu_gp_id);
2588
2589 core_alua_put_lu_gp_from_name(lu_gp_new);
2590 return count;
2591 }
2592
target_dev_lba_map_show(struct config_item * item,char * page)2593 static ssize_t target_dev_lba_map_show(struct config_item *item, char *page)
2594 {
2595 struct se_device *dev = to_device(item);
2596 struct t10_alua_lba_map *map;
2597 struct t10_alua_lba_map_member *mem;
2598 char *b = page;
2599 int bl = 0;
2600 char state;
2601
2602 spin_lock(&dev->t10_alua.lba_map_lock);
2603 if (!list_empty(&dev->t10_alua.lba_map_list))
2604 bl += sprintf(b + bl, "%u %u\n",
2605 dev->t10_alua.lba_map_segment_size,
2606 dev->t10_alua.lba_map_segment_multiplier);
2607 list_for_each_entry(map, &dev->t10_alua.lba_map_list, lba_map_list) {
2608 bl += sprintf(b + bl, "%llu %llu",
2609 map->lba_map_first_lba, map->lba_map_last_lba);
2610 list_for_each_entry(mem, &map->lba_map_mem_list,
2611 lba_map_mem_list) {
2612 switch (mem->lba_map_mem_alua_state) {
2613 case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
2614 state = 'O';
2615 break;
2616 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
2617 state = 'A';
2618 break;
2619 case ALUA_ACCESS_STATE_STANDBY:
2620 state = 'S';
2621 break;
2622 case ALUA_ACCESS_STATE_UNAVAILABLE:
2623 state = 'U';
2624 break;
2625 default:
2626 state = '.';
2627 break;
2628 }
2629 bl += sprintf(b + bl, " %d:%c",
2630 mem->lba_map_mem_alua_pg_id, state);
2631 }
2632 bl += sprintf(b + bl, "\n");
2633 }
2634 spin_unlock(&dev->t10_alua.lba_map_lock);
2635 return bl;
2636 }
2637
target_dev_lba_map_store(struct config_item * item,const char * page,size_t count)2638 static ssize_t target_dev_lba_map_store(struct config_item *item,
2639 const char *page, size_t count)
2640 {
2641 struct se_device *dev = to_device(item);
2642 struct t10_alua_lba_map *lba_map = NULL;
2643 struct list_head lba_list;
2644 char *map_entries, *orig, *ptr;
2645 char state;
2646 int pg_num = -1, pg;
2647 int ret = 0, num = 0, pg_id, alua_state;
2648 unsigned long start_lba = -1, end_lba = -1;
2649 unsigned long segment_size = -1, segment_mult = -1;
2650
2651 orig = map_entries = kstrdup(page, GFP_KERNEL);
2652 if (!map_entries)
2653 return -ENOMEM;
2654
2655 INIT_LIST_HEAD(&lba_list);
2656 while ((ptr = strsep(&map_entries, "\n")) != NULL) {
2657 if (!*ptr)
2658 continue;
2659
2660 if (num == 0) {
2661 if (sscanf(ptr, "%lu %lu\n",
2662 &segment_size, &segment_mult) != 2) {
2663 pr_err("Invalid line %d\n", num);
2664 ret = -EINVAL;
2665 break;
2666 }
2667 num++;
2668 continue;
2669 }
2670 if (sscanf(ptr, "%lu %lu", &start_lba, &end_lba) != 2) {
2671 pr_err("Invalid line %d\n", num);
2672 ret = -EINVAL;
2673 break;
2674 }
2675 ptr = strchr(ptr, ' ');
2676 if (!ptr) {
2677 pr_err("Invalid line %d, missing end lba\n", num);
2678 ret = -EINVAL;
2679 break;
2680 }
2681 ptr++;
2682 ptr = strchr(ptr, ' ');
2683 if (!ptr) {
2684 pr_err("Invalid line %d, missing state definitions\n",
2685 num);
2686 ret = -EINVAL;
2687 break;
2688 }
2689 ptr++;
2690 lba_map = core_alua_allocate_lba_map(&lba_list,
2691 start_lba, end_lba);
2692 if (IS_ERR(lba_map)) {
2693 ret = PTR_ERR(lba_map);
2694 break;
2695 }
2696 pg = 0;
2697 while (sscanf(ptr, "%d:%c", &pg_id, &state) == 2) {
2698 switch (state) {
2699 case 'O':
2700 alua_state = ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED;
2701 break;
2702 case 'A':
2703 alua_state = ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED;
2704 break;
2705 case 'S':
2706 alua_state = ALUA_ACCESS_STATE_STANDBY;
2707 break;
2708 case 'U':
2709 alua_state = ALUA_ACCESS_STATE_UNAVAILABLE;
2710 break;
2711 default:
2712 pr_err("Invalid ALUA state '%c'\n", state);
2713 ret = -EINVAL;
2714 goto out;
2715 }
2716
2717 ret = core_alua_allocate_lba_map_mem(lba_map,
2718 pg_id, alua_state);
2719 if (ret) {
2720 pr_err("Invalid target descriptor %d:%c "
2721 "at line %d\n",
2722 pg_id, state, num);
2723 break;
2724 }
2725 pg++;
2726 ptr = strchr(ptr, ' ');
2727 if (ptr)
2728 ptr++;
2729 else
2730 break;
2731 }
2732 if (pg_num == -1)
2733 pg_num = pg;
2734 else if (pg != pg_num) {
2735 pr_err("Only %d from %d port groups definitions "
2736 "at line %d\n", pg, pg_num, num);
2737 ret = -EINVAL;
2738 break;
2739 }
2740 num++;
2741 }
2742 out:
2743 if (ret) {
2744 core_alua_free_lba_map(&lba_list);
2745 count = ret;
2746 } else
2747 core_alua_set_lba_map(dev, &lba_list,
2748 segment_size, segment_mult);
2749 kfree(orig);
2750 return count;
2751 }
2752
2753 CONFIGFS_ATTR_RO(target_dev_, info);
2754 CONFIGFS_ATTR_WO(target_dev_, control);
2755 CONFIGFS_ATTR(target_dev_, alias);
2756 CONFIGFS_ATTR(target_dev_, udev_path);
2757 CONFIGFS_ATTR(target_dev_, enable);
2758 CONFIGFS_ATTR(target_dev_, alua_lu_gp);
2759 CONFIGFS_ATTR(target_dev_, lba_map);
2760
2761 static struct configfs_attribute *target_core_dev_attrs[] = {
2762 &target_dev_attr_info,
2763 &target_dev_attr_control,
2764 &target_dev_attr_alias,
2765 &target_dev_attr_udev_path,
2766 &target_dev_attr_enable,
2767 &target_dev_attr_alua_lu_gp,
2768 &target_dev_attr_lba_map,
2769 NULL,
2770 };
2771
target_core_dev_release(struct config_item * item)2772 static void target_core_dev_release(struct config_item *item)
2773 {
2774 struct config_group *dev_cg = to_config_group(item);
2775 struct se_device *dev =
2776 container_of(dev_cg, struct se_device, dev_group);
2777
2778 target_free_device(dev);
2779 }
2780
2781 /*
2782 * Used in target_core_fabric_configfs.c to verify valid se_device symlink
2783 * within target_fabric_port_link()
2784 */
2785 struct configfs_item_operations target_core_dev_item_ops = {
2786 .release = target_core_dev_release,
2787 };
2788
2789 TB_CIT_SETUP(dev, &target_core_dev_item_ops, NULL, target_core_dev_attrs);
2790
2791 /* End functions for struct config_item_type tb_dev_cit */
2792
2793 /* Start functions for struct config_item_type target_core_alua_lu_gp_cit */
2794
to_lu_gp(struct config_item * item)2795 static inline struct t10_alua_lu_gp *to_lu_gp(struct config_item *item)
2796 {
2797 return container_of(to_config_group(item), struct t10_alua_lu_gp,
2798 lu_gp_group);
2799 }
2800
target_lu_gp_lu_gp_id_show(struct config_item * item,char * page)2801 static ssize_t target_lu_gp_lu_gp_id_show(struct config_item *item, char *page)
2802 {
2803 struct t10_alua_lu_gp *lu_gp = to_lu_gp(item);
2804
2805 if (!lu_gp->lu_gp_valid_id)
2806 return 0;
2807 return sprintf(page, "%hu\n", lu_gp->lu_gp_id);
2808 }
2809
target_lu_gp_lu_gp_id_store(struct config_item * item,const char * page,size_t count)2810 static ssize_t target_lu_gp_lu_gp_id_store(struct config_item *item,
2811 const char *page, size_t count)
2812 {
2813 struct t10_alua_lu_gp *lu_gp = to_lu_gp(item);
2814 struct config_group *alua_lu_gp_cg = &lu_gp->lu_gp_group;
2815 unsigned long lu_gp_id;
2816 int ret;
2817
2818 ret = kstrtoul(page, 0, &lu_gp_id);
2819 if (ret < 0) {
2820 pr_err("kstrtoul() returned %d for"
2821 " lu_gp_id\n", ret);
2822 return ret;
2823 }
2824 if (lu_gp_id > 0x0000ffff) {
2825 pr_err("ALUA lu_gp_id: %lu exceeds maximum:"
2826 " 0x0000ffff\n", lu_gp_id);
2827 return -EINVAL;
2828 }
2829
2830 ret = core_alua_set_lu_gp_id(lu_gp, (u16)lu_gp_id);
2831 if (ret < 0)
2832 return -EINVAL;
2833
2834 pr_debug("Target_Core_ConfigFS: Set ALUA Logical Unit"
2835 " Group: core/alua/lu_gps/%s to ID: %hu\n",
2836 config_item_name(&alua_lu_gp_cg->cg_item),
2837 lu_gp->lu_gp_id);
2838
2839 return count;
2840 }
2841
target_lu_gp_members_show(struct config_item * item,char * page)2842 static ssize_t target_lu_gp_members_show(struct config_item *item, char *page)
2843 {
2844 struct t10_alua_lu_gp *lu_gp = to_lu_gp(item);
2845 struct t10_alua_lu_gp_member *lu_gp_mem;
2846 const char *const end = page + PAGE_SIZE;
2847 char *cur = page;
2848
2849 spin_lock(&lu_gp->lu_gp_lock);
2850 list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list, lu_gp_mem_list) {
2851 struct se_device *dev = lu_gp_mem->lu_gp_mem_dev;
2852 struct se_hba *hba = dev->se_hba;
2853
2854 cur += scnprintf(cur, end - cur, "%s/%s\n",
2855 config_item_name(&hba->hba_group.cg_item),
2856 config_item_name(&dev->dev_group.cg_item));
2857 if (WARN_ON_ONCE(cur >= end))
2858 break;
2859 }
2860 spin_unlock(&lu_gp->lu_gp_lock);
2861
2862 return cur - page;
2863 }
2864
2865 CONFIGFS_ATTR(target_lu_gp_, lu_gp_id);
2866 CONFIGFS_ATTR_RO(target_lu_gp_, members);
2867
2868 static struct configfs_attribute *target_core_alua_lu_gp_attrs[] = {
2869 &target_lu_gp_attr_lu_gp_id,
2870 &target_lu_gp_attr_members,
2871 NULL,
2872 };
2873
target_core_alua_lu_gp_release(struct config_item * item)2874 static void target_core_alua_lu_gp_release(struct config_item *item)
2875 {
2876 struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item),
2877 struct t10_alua_lu_gp, lu_gp_group);
2878
2879 core_alua_free_lu_gp(lu_gp);
2880 }
2881
2882 static const struct configfs_item_operations target_core_alua_lu_gp_ops = {
2883 .release = target_core_alua_lu_gp_release,
2884 };
2885
2886 static const struct config_item_type target_core_alua_lu_gp_cit = {
2887 .ct_item_ops = &target_core_alua_lu_gp_ops,
2888 .ct_attrs = target_core_alua_lu_gp_attrs,
2889 .ct_owner = THIS_MODULE,
2890 };
2891
2892 /* End functions for struct config_item_type target_core_alua_lu_gp_cit */
2893
2894 /* Start functions for struct config_item_type target_core_alua_lu_gps_cit */
2895
target_core_alua_create_lu_gp(struct config_group * group,const char * name)2896 static struct config_group *target_core_alua_create_lu_gp(
2897 struct config_group *group,
2898 const char *name)
2899 {
2900 struct t10_alua_lu_gp *lu_gp;
2901 struct config_group *alua_lu_gp_cg = NULL;
2902 struct config_item *alua_lu_gp_ci = NULL;
2903
2904 lu_gp = core_alua_allocate_lu_gp(name, 0);
2905 if (IS_ERR(lu_gp))
2906 return NULL;
2907
2908 alua_lu_gp_cg = &lu_gp->lu_gp_group;
2909 alua_lu_gp_ci = &alua_lu_gp_cg->cg_item;
2910
2911 config_group_init_type_name(alua_lu_gp_cg, name,
2912 &target_core_alua_lu_gp_cit);
2913
2914 pr_debug("Target_Core_ConfigFS: Allocated ALUA Logical Unit"
2915 " Group: core/alua/lu_gps/%s\n",
2916 config_item_name(alua_lu_gp_ci));
2917
2918 return alua_lu_gp_cg;
2919
2920 }
2921
target_core_alua_drop_lu_gp(struct config_group * group,struct config_item * item)2922 static void target_core_alua_drop_lu_gp(
2923 struct config_group *group,
2924 struct config_item *item)
2925 {
2926 struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item),
2927 struct t10_alua_lu_gp, lu_gp_group);
2928
2929 pr_debug("Target_Core_ConfigFS: Releasing ALUA Logical Unit"
2930 " Group: core/alua/lu_gps/%s, ID: %hu\n",
2931 config_item_name(item), lu_gp->lu_gp_id);
2932 /*
2933 * core_alua_free_lu_gp() is called from target_core_alua_lu_gp_ops->release()
2934 * -> target_core_alua_lu_gp_release()
2935 */
2936 config_item_put(item);
2937 }
2938
2939 static const struct configfs_group_operations target_core_alua_lu_gps_group_ops = {
2940 .make_group = &target_core_alua_create_lu_gp,
2941 .drop_item = &target_core_alua_drop_lu_gp,
2942 };
2943
2944 static const struct config_item_type target_core_alua_lu_gps_cit = {
2945 .ct_item_ops = NULL,
2946 .ct_group_ops = &target_core_alua_lu_gps_group_ops,
2947 .ct_owner = THIS_MODULE,
2948 };
2949
2950 /* End functions for struct config_item_type target_core_alua_lu_gps_cit */
2951
2952 /* Start functions for struct config_item_type target_core_alua_tg_pt_gp_cit */
2953
to_tg_pt_gp(struct config_item * item)2954 static inline struct t10_alua_tg_pt_gp *to_tg_pt_gp(struct config_item *item)
2955 {
2956 return container_of(to_config_group(item), struct t10_alua_tg_pt_gp,
2957 tg_pt_gp_group);
2958 }
2959
target_tg_pt_gp_alua_access_state_show(struct config_item * item,char * page)2960 static ssize_t target_tg_pt_gp_alua_access_state_show(struct config_item *item,
2961 char *page)
2962 {
2963 return sprintf(page, "%d\n",
2964 to_tg_pt_gp(item)->tg_pt_gp_alua_access_state);
2965 }
2966
target_tg_pt_gp_alua_access_state_store(struct config_item * item,const char * page,size_t count)2967 static ssize_t target_tg_pt_gp_alua_access_state_store(struct config_item *item,
2968 const char *page, size_t count)
2969 {
2970 struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
2971 struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
2972 unsigned long tmp;
2973 int new_state, ret;
2974
2975 if (!tg_pt_gp->tg_pt_gp_valid_id) {
2976 pr_err("Unable to do implicit ALUA on invalid tg_pt_gp ID\n");
2977 return -EINVAL;
2978 }
2979 if (!target_dev_configured(dev)) {
2980 pr_err("Unable to set alua_access_state while device is"
2981 " not configured\n");
2982 return -ENODEV;
2983 }
2984
2985 ret = kstrtoul(page, 0, &tmp);
2986 if (ret < 0) {
2987 pr_err("Unable to extract new ALUA access state from"
2988 " %s\n", page);
2989 return ret;
2990 }
2991 new_state = (int)tmp;
2992
2993 if (!(tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA)) {
2994 pr_err("Unable to process implicit configfs ALUA"
2995 " transition while TPGS_IMPLICIT_ALUA is disabled\n");
2996 return -EINVAL;
2997 }
2998 if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA &&
2999 new_state == ALUA_ACCESS_STATE_LBA_DEPENDENT) {
3000 /* LBA DEPENDENT is only allowed with implicit ALUA */
3001 pr_err("Unable to process implicit configfs ALUA transition"
3002 " while explicit ALUA management is enabled\n");
3003 return -EINVAL;
3004 }
3005
3006 ret = core_alua_do_port_transition(tg_pt_gp, dev,
3007 NULL, NULL, new_state, 0);
3008 return (!ret) ? count : -EINVAL;
3009 }
3010
target_tg_pt_gp_alua_access_status_show(struct config_item * item,char * page)3011 static ssize_t target_tg_pt_gp_alua_access_status_show(struct config_item *item,
3012 char *page)
3013 {
3014 struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
3015 return sprintf(page, "%s\n",
3016 core_alua_dump_status(tg_pt_gp->tg_pt_gp_alua_access_status));
3017 }
3018
target_tg_pt_gp_alua_access_status_store(struct config_item * item,const char * page,size_t count)3019 static ssize_t target_tg_pt_gp_alua_access_status_store(
3020 struct config_item *item, const char *page, size_t count)
3021 {
3022 struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
3023 unsigned long tmp;
3024 int new_status, ret;
3025
3026 if (!tg_pt_gp->tg_pt_gp_valid_id) {
3027 pr_err("Unable to set ALUA access status on invalid tg_pt_gp ID\n");
3028 return -EINVAL;
3029 }
3030
3031 ret = kstrtoul(page, 0, &tmp);
3032 if (ret < 0) {
3033 pr_err("Unable to extract new ALUA access status"
3034 " from %s\n", page);
3035 return ret;
3036 }
3037 new_status = (int)tmp;
3038
3039 if ((new_status != ALUA_STATUS_NONE) &&
3040 (new_status != ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) &&
3041 (new_status != ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA)) {
3042 pr_err("Illegal ALUA access status: 0x%02x\n",
3043 new_status);
3044 return -EINVAL;
3045 }
3046
3047 tg_pt_gp->tg_pt_gp_alua_access_status = new_status;
3048 return count;
3049 }
3050
target_tg_pt_gp_alua_access_type_show(struct config_item * item,char * page)3051 static ssize_t target_tg_pt_gp_alua_access_type_show(struct config_item *item,
3052 char *page)
3053 {
3054 return core_alua_show_access_type(to_tg_pt_gp(item), page);
3055 }
3056
target_tg_pt_gp_alua_access_type_store(struct config_item * item,const char * page,size_t count)3057 static ssize_t target_tg_pt_gp_alua_access_type_store(struct config_item *item,
3058 const char *page, size_t count)
3059 {
3060 return core_alua_store_access_type(to_tg_pt_gp(item), page, count);
3061 }
3062
3063 #define ALUA_SUPPORTED_STATE_ATTR(_name, _bit) \
3064 static ssize_t target_tg_pt_gp_alua_support_##_name##_show( \
3065 struct config_item *item, char *p) \
3066 { \
3067 struct t10_alua_tg_pt_gp *t = to_tg_pt_gp(item); \
3068 return sprintf(p, "%d\n", \
3069 !!(t->tg_pt_gp_alua_supported_states & _bit)); \
3070 } \
3071 \
3072 static ssize_t target_tg_pt_gp_alua_support_##_name##_store( \
3073 struct config_item *item, const char *p, size_t c) \
3074 { \
3075 struct t10_alua_tg_pt_gp *t = to_tg_pt_gp(item); \
3076 unsigned long tmp; \
3077 int ret; \
3078 \
3079 if (!t->tg_pt_gp_valid_id) { \
3080 pr_err("Unable to set " #_name " ALUA state on invalid tg_pt_gp ID\n"); \
3081 return -EINVAL; \
3082 } \
3083 \
3084 ret = kstrtoul(p, 0, &tmp); \
3085 if (ret < 0) { \
3086 pr_err("Invalid value '%s', must be '0' or '1'\n", p); \
3087 return -EINVAL; \
3088 } \
3089 if (tmp > 1) { \
3090 pr_err("Invalid value '%ld', must be '0' or '1'\n", tmp); \
3091 return -EINVAL; \
3092 } \
3093 if (tmp) \
3094 t->tg_pt_gp_alua_supported_states |= _bit; \
3095 else \
3096 t->tg_pt_gp_alua_supported_states &= ~_bit; \
3097 \
3098 return c; \
3099 }
3100
3101 ALUA_SUPPORTED_STATE_ATTR(transitioning, ALUA_T_SUP);
3102 ALUA_SUPPORTED_STATE_ATTR(offline, ALUA_O_SUP);
3103 ALUA_SUPPORTED_STATE_ATTR(lba_dependent, ALUA_LBD_SUP);
3104 ALUA_SUPPORTED_STATE_ATTR(unavailable, ALUA_U_SUP);
3105 ALUA_SUPPORTED_STATE_ATTR(standby, ALUA_S_SUP);
3106 ALUA_SUPPORTED_STATE_ATTR(active_optimized, ALUA_AO_SUP);
3107 ALUA_SUPPORTED_STATE_ATTR(active_nonoptimized, ALUA_AN_SUP);
3108
target_tg_pt_gp_alua_write_metadata_show(struct config_item * item,char * page)3109 static ssize_t target_tg_pt_gp_alua_write_metadata_show(
3110 struct config_item *item, char *page)
3111 {
3112 return sprintf(page, "%d\n",
3113 to_tg_pt_gp(item)->tg_pt_gp_write_metadata);
3114 }
3115
target_tg_pt_gp_alua_write_metadata_store(struct config_item * item,const char * page,size_t count)3116 static ssize_t target_tg_pt_gp_alua_write_metadata_store(
3117 struct config_item *item, const char *page, size_t count)
3118 {
3119 struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
3120 unsigned long tmp;
3121 int ret;
3122
3123 ret = kstrtoul(page, 0, &tmp);
3124 if (ret < 0) {
3125 pr_err("Unable to extract alua_write_metadata\n");
3126 return ret;
3127 }
3128
3129 if ((tmp != 0) && (tmp != 1)) {
3130 pr_err("Illegal value for alua_write_metadata:"
3131 " %lu\n", tmp);
3132 return -EINVAL;
3133 }
3134 tg_pt_gp->tg_pt_gp_write_metadata = (int)tmp;
3135
3136 return count;
3137 }
3138
target_tg_pt_gp_nonop_delay_msecs_show(struct config_item * item,char * page)3139 static ssize_t target_tg_pt_gp_nonop_delay_msecs_show(struct config_item *item,
3140 char *page)
3141 {
3142 return core_alua_show_nonop_delay_msecs(to_tg_pt_gp(item), page);
3143 }
3144
target_tg_pt_gp_nonop_delay_msecs_store(struct config_item * item,const char * page,size_t count)3145 static ssize_t target_tg_pt_gp_nonop_delay_msecs_store(struct config_item *item,
3146 const char *page, size_t count)
3147 {
3148 return core_alua_store_nonop_delay_msecs(to_tg_pt_gp(item), page,
3149 count);
3150 }
3151
target_tg_pt_gp_trans_delay_msecs_show(struct config_item * item,char * page)3152 static ssize_t target_tg_pt_gp_trans_delay_msecs_show(struct config_item *item,
3153 char *page)
3154 {
3155 return core_alua_show_trans_delay_msecs(to_tg_pt_gp(item), page);
3156 }
3157
target_tg_pt_gp_trans_delay_msecs_store(struct config_item * item,const char * page,size_t count)3158 static ssize_t target_tg_pt_gp_trans_delay_msecs_store(struct config_item *item,
3159 const char *page, size_t count)
3160 {
3161 return core_alua_store_trans_delay_msecs(to_tg_pt_gp(item), page,
3162 count);
3163 }
3164
target_tg_pt_gp_implicit_trans_secs_show(struct config_item * item,char * page)3165 static ssize_t target_tg_pt_gp_implicit_trans_secs_show(
3166 struct config_item *item, char *page)
3167 {
3168 return core_alua_show_implicit_trans_secs(to_tg_pt_gp(item), page);
3169 }
3170
target_tg_pt_gp_implicit_trans_secs_store(struct config_item * item,const char * page,size_t count)3171 static ssize_t target_tg_pt_gp_implicit_trans_secs_store(
3172 struct config_item *item, const char *page, size_t count)
3173 {
3174 return core_alua_store_implicit_trans_secs(to_tg_pt_gp(item), page,
3175 count);
3176 }
3177
target_tg_pt_gp_preferred_show(struct config_item * item,char * page)3178 static ssize_t target_tg_pt_gp_preferred_show(struct config_item *item,
3179 char *page)
3180 {
3181 return core_alua_show_preferred_bit(to_tg_pt_gp(item), page);
3182 }
3183
target_tg_pt_gp_preferred_store(struct config_item * item,const char * page,size_t count)3184 static ssize_t target_tg_pt_gp_preferred_store(struct config_item *item,
3185 const char *page, size_t count)
3186 {
3187 return core_alua_store_preferred_bit(to_tg_pt_gp(item), page, count);
3188 }
3189
target_tg_pt_gp_tg_pt_gp_id_show(struct config_item * item,char * page)3190 static ssize_t target_tg_pt_gp_tg_pt_gp_id_show(struct config_item *item,
3191 char *page)
3192 {
3193 struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
3194
3195 if (!tg_pt_gp->tg_pt_gp_valid_id)
3196 return 0;
3197 return sprintf(page, "%hu\n", tg_pt_gp->tg_pt_gp_id);
3198 }
3199
target_tg_pt_gp_tg_pt_gp_id_store(struct config_item * item,const char * page,size_t count)3200 static ssize_t target_tg_pt_gp_tg_pt_gp_id_store(struct config_item *item,
3201 const char *page, size_t count)
3202 {
3203 struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
3204 struct config_group *alua_tg_pt_gp_cg = &tg_pt_gp->tg_pt_gp_group;
3205 unsigned long tg_pt_gp_id;
3206 int ret;
3207
3208 ret = kstrtoul(page, 0, &tg_pt_gp_id);
3209 if (ret < 0) {
3210 pr_err("ALUA tg_pt_gp_id: invalid value '%s' for tg_pt_gp_id\n",
3211 page);
3212 return ret;
3213 }
3214 if (tg_pt_gp_id > 0x0000ffff) {
3215 pr_err("ALUA tg_pt_gp_id: %lu exceeds maximum: 0x0000ffff\n",
3216 tg_pt_gp_id);
3217 return -EINVAL;
3218 }
3219
3220 ret = core_alua_set_tg_pt_gp_id(tg_pt_gp, (u16)tg_pt_gp_id);
3221 if (ret < 0)
3222 return -EINVAL;
3223
3224 pr_debug("Target_Core_ConfigFS: Set ALUA Target Port Group: "
3225 "core/alua/tg_pt_gps/%s to ID: %hu\n",
3226 config_item_name(&alua_tg_pt_gp_cg->cg_item),
3227 tg_pt_gp->tg_pt_gp_id);
3228
3229 return count;
3230 }
3231
target_tg_pt_gp_members_show(struct config_item * item,char * page)3232 static ssize_t target_tg_pt_gp_members_show(struct config_item *item,
3233 char *page)
3234 {
3235 struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
3236 struct se_lun *lun;
3237 ssize_t len = 0, cur_len;
3238 unsigned char buf[TG_PT_GROUP_NAME_BUF] = { };
3239
3240 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
3241 list_for_each_entry(lun, &tg_pt_gp->tg_pt_gp_lun_list,
3242 lun_tg_pt_gp_link) {
3243 struct se_portal_group *tpg = lun->lun_tpg;
3244
3245 cur_len = snprintf(buf, TG_PT_GROUP_NAME_BUF, "%s/%s/tpgt_%hu"
3246 "/%s\n", tpg->se_tpg_tfo->fabric_name,
3247 tpg->se_tpg_tfo->tpg_get_wwn(tpg),
3248 tpg->se_tpg_tfo->tpg_get_tag(tpg),
3249 config_item_name(&lun->lun_group.cg_item));
3250 cur_len++; /* Extra byte for NULL terminator */
3251
3252 if ((cur_len + len) > PAGE_SIZE) {
3253 pr_warn("Ran out of lu_gp_show_attr"
3254 "_members buffer\n");
3255 break;
3256 }
3257 memcpy(page+len, buf, cur_len);
3258 len += cur_len;
3259 }
3260 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
3261
3262 return len;
3263 }
3264
3265 CONFIGFS_ATTR(target_tg_pt_gp_, alua_access_state);
3266 CONFIGFS_ATTR(target_tg_pt_gp_, alua_access_status);
3267 CONFIGFS_ATTR(target_tg_pt_gp_, alua_access_type);
3268 CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_transitioning);
3269 CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_offline);
3270 CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_lba_dependent);
3271 CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_unavailable);
3272 CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_standby);
3273 CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_active_optimized);
3274 CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_active_nonoptimized);
3275 CONFIGFS_ATTR(target_tg_pt_gp_, alua_write_metadata);
3276 CONFIGFS_ATTR(target_tg_pt_gp_, nonop_delay_msecs);
3277 CONFIGFS_ATTR(target_tg_pt_gp_, trans_delay_msecs);
3278 CONFIGFS_ATTR(target_tg_pt_gp_, implicit_trans_secs);
3279 CONFIGFS_ATTR(target_tg_pt_gp_, preferred);
3280 CONFIGFS_ATTR(target_tg_pt_gp_, tg_pt_gp_id);
3281 CONFIGFS_ATTR_RO(target_tg_pt_gp_, members);
3282
3283 static struct configfs_attribute *target_core_alua_tg_pt_gp_attrs[] = {
3284 &target_tg_pt_gp_attr_alua_access_state,
3285 &target_tg_pt_gp_attr_alua_access_status,
3286 &target_tg_pt_gp_attr_alua_access_type,
3287 &target_tg_pt_gp_attr_alua_support_transitioning,
3288 &target_tg_pt_gp_attr_alua_support_offline,
3289 &target_tg_pt_gp_attr_alua_support_lba_dependent,
3290 &target_tg_pt_gp_attr_alua_support_unavailable,
3291 &target_tg_pt_gp_attr_alua_support_standby,
3292 &target_tg_pt_gp_attr_alua_support_active_nonoptimized,
3293 &target_tg_pt_gp_attr_alua_support_active_optimized,
3294 &target_tg_pt_gp_attr_alua_write_metadata,
3295 &target_tg_pt_gp_attr_nonop_delay_msecs,
3296 &target_tg_pt_gp_attr_trans_delay_msecs,
3297 &target_tg_pt_gp_attr_implicit_trans_secs,
3298 &target_tg_pt_gp_attr_preferred,
3299 &target_tg_pt_gp_attr_tg_pt_gp_id,
3300 &target_tg_pt_gp_attr_members,
3301 NULL,
3302 };
3303
target_core_alua_tg_pt_gp_release(struct config_item * item)3304 static void target_core_alua_tg_pt_gp_release(struct config_item *item)
3305 {
3306 struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item),
3307 struct t10_alua_tg_pt_gp, tg_pt_gp_group);
3308
3309 core_alua_free_tg_pt_gp(tg_pt_gp);
3310 }
3311
3312 static const struct configfs_item_operations target_core_alua_tg_pt_gp_ops = {
3313 .release = target_core_alua_tg_pt_gp_release,
3314 };
3315
3316 static const struct config_item_type target_core_alua_tg_pt_gp_cit = {
3317 .ct_item_ops = &target_core_alua_tg_pt_gp_ops,
3318 .ct_attrs = target_core_alua_tg_pt_gp_attrs,
3319 .ct_owner = THIS_MODULE,
3320 };
3321
3322 /* End functions for struct config_item_type target_core_alua_tg_pt_gp_cit */
3323
3324 /* Start functions for struct config_item_type tb_alua_tg_pt_gps_cit */
3325
target_core_alua_create_tg_pt_gp(struct config_group * group,const char * name)3326 static struct config_group *target_core_alua_create_tg_pt_gp(
3327 struct config_group *group,
3328 const char *name)
3329 {
3330 struct t10_alua *alua = container_of(group, struct t10_alua,
3331 alua_tg_pt_gps_group);
3332 struct t10_alua_tg_pt_gp *tg_pt_gp;
3333 struct config_group *alua_tg_pt_gp_cg = NULL;
3334 struct config_item *alua_tg_pt_gp_ci = NULL;
3335
3336 tg_pt_gp = core_alua_allocate_tg_pt_gp(alua->t10_dev, name, 0);
3337 if (!tg_pt_gp)
3338 return NULL;
3339
3340 alua_tg_pt_gp_cg = &tg_pt_gp->tg_pt_gp_group;
3341 alua_tg_pt_gp_ci = &alua_tg_pt_gp_cg->cg_item;
3342
3343 config_group_init_type_name(alua_tg_pt_gp_cg, name,
3344 &target_core_alua_tg_pt_gp_cit);
3345
3346 pr_debug("Target_Core_ConfigFS: Allocated ALUA Target Port"
3347 " Group: alua/tg_pt_gps/%s\n",
3348 config_item_name(alua_tg_pt_gp_ci));
3349
3350 return alua_tg_pt_gp_cg;
3351 }
3352
target_core_alua_drop_tg_pt_gp(struct config_group * group,struct config_item * item)3353 static void target_core_alua_drop_tg_pt_gp(
3354 struct config_group *group,
3355 struct config_item *item)
3356 {
3357 struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item),
3358 struct t10_alua_tg_pt_gp, tg_pt_gp_group);
3359
3360 pr_debug("Target_Core_ConfigFS: Releasing ALUA Target Port"
3361 " Group: alua/tg_pt_gps/%s, ID: %hu\n",
3362 config_item_name(item), tg_pt_gp->tg_pt_gp_id);
3363 /*
3364 * core_alua_free_tg_pt_gp() is called from target_core_alua_tg_pt_gp_ops->release()
3365 * -> target_core_alua_tg_pt_gp_release().
3366 */
3367 config_item_put(item);
3368 }
3369
3370 static const struct configfs_group_operations target_core_alua_tg_pt_gps_group_ops = {
3371 .make_group = &target_core_alua_create_tg_pt_gp,
3372 .drop_item = &target_core_alua_drop_tg_pt_gp,
3373 };
3374
3375 TB_CIT_SETUP(dev_alua_tg_pt_gps, NULL, &target_core_alua_tg_pt_gps_group_ops, NULL);
3376
3377 /* End functions for struct config_item_type tb_alua_tg_pt_gps_cit */
3378
3379 /* Start functions for struct config_item_type target_core_alua_cit */
3380
3381 /*
3382 * target_core_alua_cit is a ConfigFS group that lives under
3383 * /sys/kernel/config/target/core/alua. There are default groups
3384 * core/alua/lu_gps and core/alua/tg_pt_gps that are attached to
3385 * target_core_alua_cit in target_core_init_configfs() below.
3386 */
3387 static const struct config_item_type target_core_alua_cit = {
3388 .ct_item_ops = NULL,
3389 .ct_attrs = NULL,
3390 .ct_owner = THIS_MODULE,
3391 };
3392
3393 /* End functions for struct config_item_type target_core_alua_cit */
3394
3395 /* Start functions for struct config_item_type tb_dev_stat_cit */
3396
target_core_stat_mkdir(struct config_group * group,const char * name)3397 static struct config_group *target_core_stat_mkdir(
3398 struct config_group *group,
3399 const char *name)
3400 {
3401 return ERR_PTR(-ENOSYS);
3402 }
3403
target_core_stat_rmdir(struct config_group * group,struct config_item * item)3404 static void target_core_stat_rmdir(
3405 struct config_group *group,
3406 struct config_item *item)
3407 {
3408 return;
3409 }
3410
3411 static const struct configfs_group_operations target_core_stat_group_ops = {
3412 .make_group = &target_core_stat_mkdir,
3413 .drop_item = &target_core_stat_rmdir,
3414 };
3415
3416 TB_CIT_SETUP(dev_stat, NULL, &target_core_stat_group_ops, NULL);
3417
3418 /* End functions for struct config_item_type tb_dev_stat_cit */
3419
3420 /* Start functions for struct config_item_type target_core_hba_cit */
3421
target_core_make_subdev(struct config_group * group,const char * name)3422 static struct config_group *target_core_make_subdev(
3423 struct config_group *group,
3424 const char *name)
3425 {
3426 struct t10_alua_tg_pt_gp *tg_pt_gp;
3427 struct config_item *hba_ci = &group->cg_item;
3428 struct se_hba *hba = item_to_hba(hba_ci);
3429 struct target_backend *tb = hba->backend;
3430 struct se_device *dev;
3431 int errno = -ENOMEM, ret;
3432
3433 ret = mutex_lock_interruptible(&hba->hba_access_mutex);
3434 if (ret)
3435 return ERR_PTR(ret);
3436
3437 dev = target_alloc_device(hba, name);
3438 if (!dev)
3439 goto out_unlock;
3440
3441 config_group_init_type_name(&dev->dev_group, name, &tb->tb_dev_cit);
3442
3443 config_group_init_type_name(&dev->dev_action_group, "action",
3444 &tb->tb_dev_action_cit);
3445 configfs_add_default_group(&dev->dev_action_group, &dev->dev_group);
3446
3447 config_group_init_type_name(&dev->dev_attrib.da_group, "attrib",
3448 &tb->tb_dev_attrib_cit);
3449 configfs_add_default_group(&dev->dev_attrib.da_group, &dev->dev_group);
3450
3451 config_group_init_type_name(&dev->dev_pr_group, "pr",
3452 &tb->tb_dev_pr_cit);
3453 configfs_add_default_group(&dev->dev_pr_group, &dev->dev_group);
3454
3455 config_group_init_type_name(&dev->t10_wwn.t10_wwn_group, "wwn",
3456 &tb->tb_dev_wwn_cit);
3457 configfs_add_default_group(&dev->t10_wwn.t10_wwn_group,
3458 &dev->dev_group);
3459
3460 config_group_init_type_name(&dev->t10_alua.alua_tg_pt_gps_group,
3461 "alua", &tb->tb_dev_alua_tg_pt_gps_cit);
3462 configfs_add_default_group(&dev->t10_alua.alua_tg_pt_gps_group,
3463 &dev->dev_group);
3464
3465 config_group_init_type_name(&dev->dev_stat_grps.stat_group,
3466 "statistics", &tb->tb_dev_stat_cit);
3467 configfs_add_default_group(&dev->dev_stat_grps.stat_group,
3468 &dev->dev_group);
3469
3470 /*
3471 * Add core/$HBA/$DEV/alua/default_tg_pt_gp
3472 */
3473 tg_pt_gp = core_alua_allocate_tg_pt_gp(dev, "default_tg_pt_gp", 1);
3474 if (!tg_pt_gp)
3475 goto out_free_device;
3476 dev->t10_alua.default_tg_pt_gp = tg_pt_gp;
3477
3478 config_group_init_type_name(&tg_pt_gp->tg_pt_gp_group,
3479 "default_tg_pt_gp", &target_core_alua_tg_pt_gp_cit);
3480 configfs_add_default_group(&tg_pt_gp->tg_pt_gp_group,
3481 &dev->t10_alua.alua_tg_pt_gps_group);
3482
3483 /*
3484 * Add core/$HBA/$DEV/statistics/ default groups
3485 */
3486 target_stat_setup_dev_default_groups(dev);
3487
3488 mutex_lock(&target_devices_lock);
3489 target_devices++;
3490 mutex_unlock(&target_devices_lock);
3491
3492 mutex_unlock(&hba->hba_access_mutex);
3493 return &dev->dev_group;
3494
3495 out_free_device:
3496 target_free_device(dev);
3497 out_unlock:
3498 mutex_unlock(&hba->hba_access_mutex);
3499 return ERR_PTR(errno);
3500 }
3501
target_core_drop_subdev(struct config_group * group,struct config_item * item)3502 static void target_core_drop_subdev(
3503 struct config_group *group,
3504 struct config_item *item)
3505 {
3506 struct config_group *dev_cg = to_config_group(item);
3507 struct se_device *dev =
3508 container_of(dev_cg, struct se_device, dev_group);
3509 struct se_hba *hba;
3510
3511 hba = item_to_hba(&dev->se_hba->hba_group.cg_item);
3512
3513 mutex_lock(&hba->hba_access_mutex);
3514
3515 configfs_remove_default_groups(&dev->dev_stat_grps.stat_group);
3516 configfs_remove_default_groups(&dev->t10_alua.alua_tg_pt_gps_group);
3517
3518 /*
3519 * core_alua_free_tg_pt_gp() is called from ->default_tg_pt_gp
3520 * directly from target_core_alua_tg_pt_gp_release().
3521 */
3522 dev->t10_alua.default_tg_pt_gp = NULL;
3523
3524 configfs_remove_default_groups(dev_cg);
3525
3526 /*
3527 * se_dev is released from target_core_dev_item_ops->release()
3528 */
3529 config_item_put(item);
3530
3531 mutex_lock(&target_devices_lock);
3532 target_devices--;
3533 mutex_unlock(&target_devices_lock);
3534
3535 mutex_unlock(&hba->hba_access_mutex);
3536 }
3537
3538 static const struct configfs_group_operations target_core_hba_group_ops = {
3539 .make_group = target_core_make_subdev,
3540 .drop_item = target_core_drop_subdev,
3541 };
3542
3543
to_hba(struct config_item * item)3544 static inline struct se_hba *to_hba(struct config_item *item)
3545 {
3546 return container_of(to_config_group(item), struct se_hba, hba_group);
3547 }
3548
target_hba_info_show(struct config_item * item,char * page)3549 static ssize_t target_hba_info_show(struct config_item *item, char *page)
3550 {
3551 struct se_hba *hba = to_hba(item);
3552
3553 return sprintf(page, "HBA Index: %d plugin: %s version: %s\n",
3554 hba->hba_id, hba->backend->ops->name,
3555 TARGET_CORE_VERSION);
3556 }
3557
target_hba_mode_show(struct config_item * item,char * page)3558 static ssize_t target_hba_mode_show(struct config_item *item, char *page)
3559 {
3560 struct se_hba *hba = to_hba(item);
3561 int hba_mode = 0;
3562
3563 if (hba->hba_flags & HBA_FLAGS_PSCSI_MODE)
3564 hba_mode = 1;
3565
3566 return sprintf(page, "%d\n", hba_mode);
3567 }
3568
target_hba_mode_store(struct config_item * item,const char * page,size_t count)3569 static ssize_t target_hba_mode_store(struct config_item *item,
3570 const char *page, size_t count)
3571 {
3572 struct se_hba *hba = to_hba(item);
3573 unsigned long mode_flag;
3574 int ret;
3575
3576 if (hba->backend->ops->pmode_enable_hba == NULL)
3577 return -EINVAL;
3578
3579 ret = kstrtoul(page, 0, &mode_flag);
3580 if (ret < 0) {
3581 pr_err("Unable to extract hba mode flag: %d\n", ret);
3582 return ret;
3583 }
3584
3585 if (hba->dev_count) {
3586 pr_err("Unable to set hba_mode with active devices\n");
3587 return -EINVAL;
3588 }
3589
3590 ret = hba->backend->ops->pmode_enable_hba(hba, mode_flag);
3591 if (ret < 0)
3592 return -EINVAL;
3593 if (ret > 0)
3594 hba->hba_flags |= HBA_FLAGS_PSCSI_MODE;
3595 else if (ret == 0)
3596 hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE;
3597
3598 return count;
3599 }
3600
3601 CONFIGFS_ATTR_RO(target_, hba_info);
3602 CONFIGFS_ATTR(target_, hba_mode);
3603
target_core_hba_release(struct config_item * item)3604 static void target_core_hba_release(struct config_item *item)
3605 {
3606 struct se_hba *hba = container_of(to_config_group(item),
3607 struct se_hba, hba_group);
3608 core_delete_hba(hba);
3609 }
3610
3611 static struct configfs_attribute *target_core_hba_attrs[] = {
3612 &target_attr_hba_info,
3613 &target_attr_hba_mode,
3614 NULL,
3615 };
3616
3617 static const struct configfs_item_operations target_core_hba_item_ops = {
3618 .release = target_core_hba_release,
3619 };
3620
3621 static const struct config_item_type target_core_hba_cit = {
3622 .ct_item_ops = &target_core_hba_item_ops,
3623 .ct_group_ops = &target_core_hba_group_ops,
3624 .ct_attrs = target_core_hba_attrs,
3625 .ct_owner = THIS_MODULE,
3626 };
3627
target_core_call_addhbatotarget(struct config_group * group,const char * name)3628 static struct config_group *target_core_call_addhbatotarget(
3629 struct config_group *group,
3630 const char *name)
3631 {
3632 char *se_plugin_str, *str, *str2;
3633 struct se_hba *hba;
3634 char buf[TARGET_CORE_NAME_MAX_LEN] = { };
3635 unsigned long plugin_dep_id = 0;
3636 int ret;
3637
3638 if (strlen(name) >= TARGET_CORE_NAME_MAX_LEN) {
3639 pr_err("Passed *name strlen(): %d exceeds"
3640 " TARGET_CORE_NAME_MAX_LEN: %d\n", (int)strlen(name),
3641 TARGET_CORE_NAME_MAX_LEN);
3642 return ERR_PTR(-ENAMETOOLONG);
3643 }
3644 snprintf(buf, TARGET_CORE_NAME_MAX_LEN, "%s", name);
3645
3646 str = strstr(buf, "_");
3647 if (!str) {
3648 pr_err("Unable to locate \"_\" for $SUBSYSTEM_PLUGIN_$HOST_ID\n");
3649 return ERR_PTR(-EINVAL);
3650 }
3651 se_plugin_str = buf;
3652 /*
3653 * Special case for subsystem plugins that have "_" in their names.
3654 * Namely rd_direct and rd_mcp..
3655 */
3656 str2 = strstr(str+1, "_");
3657 if (str2) {
3658 *str2 = '\0'; /* Terminate for *se_plugin_str */
3659 str2++; /* Skip to start of plugin dependent ID */
3660 str = str2;
3661 } else {
3662 *str = '\0'; /* Terminate for *se_plugin_str */
3663 str++; /* Skip to start of plugin dependent ID */
3664 }
3665
3666 ret = kstrtoul(str, 0, &plugin_dep_id);
3667 if (ret < 0) {
3668 pr_err("kstrtoul() returned %d for"
3669 " plugin_dep_id\n", ret);
3670 return ERR_PTR(ret);
3671 }
3672 /*
3673 * Load up TCM subsystem plugins if they have not already been loaded.
3674 */
3675 transport_subsystem_check_init();
3676
3677 hba = core_alloc_hba(se_plugin_str, plugin_dep_id, 0);
3678 if (IS_ERR(hba))
3679 return ERR_CAST(hba);
3680
3681 config_group_init_type_name(&hba->hba_group, name,
3682 &target_core_hba_cit);
3683
3684 return &hba->hba_group;
3685 }
3686
target_core_call_delhbafromtarget(struct config_group * group,struct config_item * item)3687 static void target_core_call_delhbafromtarget(
3688 struct config_group *group,
3689 struct config_item *item)
3690 {
3691 /*
3692 * core_delete_hba() is called from target_core_hba_item_ops->release()
3693 * -> target_core_hba_release()
3694 */
3695 config_item_put(item);
3696 }
3697
3698 static const struct configfs_group_operations target_core_group_ops = {
3699 .make_group = target_core_call_addhbatotarget,
3700 .drop_item = target_core_call_delhbafromtarget,
3701 };
3702
3703 static const struct config_item_type target_core_cit = {
3704 .ct_item_ops = NULL,
3705 .ct_group_ops = &target_core_group_ops,
3706 .ct_attrs = NULL,
3707 .ct_owner = THIS_MODULE,
3708 };
3709
3710 /* Stop functions for struct config_item_type target_core_hba_cit */
3711
target_setup_backend_cits(struct target_backend * tb)3712 void target_setup_backend_cits(struct target_backend *tb)
3713 {
3714 target_core_setup_dev_cit(tb);
3715 target_core_setup_dev_action_cit(tb);
3716 target_core_setup_dev_attrib_cit(tb);
3717 target_core_setup_dev_pr_cit(tb);
3718 target_core_setup_dev_wwn_cit(tb);
3719 target_core_setup_dev_alua_tg_pt_gps_cit(tb);
3720 target_core_setup_dev_stat_cit(tb);
3721 }
3722
target_init_dbroot(void)3723 static void target_init_dbroot(void)
3724 {
3725 struct file *fp;
3726
3727 snprintf(db_root_stage, DB_ROOT_LEN, DB_ROOT_PREFERRED);
3728 fp = filp_open(db_root_stage, O_RDONLY, 0);
3729 if (IS_ERR(fp)) {
3730 pr_err("db_root: cannot open: %s\n", db_root_stage);
3731 return;
3732 }
3733 if (!S_ISDIR(file_inode(fp)->i_mode)) {
3734 filp_close(fp, NULL);
3735 pr_err("db_root: not a valid directory: %s\n", db_root_stage);
3736 return;
3737 }
3738 filp_close(fp, NULL);
3739
3740 strscpy(db_root, db_root_stage);
3741 pr_debug("Target_Core_ConfigFS: db_root set to %s\n", db_root);
3742 }
3743
target_core_init_configfs(void)3744 static int __init target_core_init_configfs(void)
3745 {
3746 struct configfs_subsystem *subsys = &target_core_fabrics;
3747 struct t10_alua_lu_gp *lu_gp;
3748 int ret;
3749
3750 pr_debug("TARGET_CORE[0]: Loading Generic Kernel Storage"
3751 " Engine: %s on %s/%s on "UTS_RELEASE"\n",
3752 TARGET_CORE_VERSION, utsname()->sysname, utsname()->machine);
3753
3754 config_group_init(&subsys->su_group);
3755 mutex_init(&subsys->su_mutex);
3756
3757 ret = init_se_kmem_caches();
3758 if (ret < 0)
3759 return ret;
3760 /*
3761 * Create $CONFIGFS/target/core default group for HBA <-> Storage Object
3762 * and ALUA Logical Unit Group and Target Port Group infrastructure.
3763 */
3764 config_group_init_type_name(&target_core_hbagroup, "core",
3765 &target_core_cit);
3766 configfs_add_default_group(&target_core_hbagroup, &subsys->su_group);
3767
3768 /*
3769 * Create ALUA infrastructure under /sys/kernel/config/target/core/alua/
3770 */
3771 config_group_init_type_name(&alua_group, "alua", &target_core_alua_cit);
3772 configfs_add_default_group(&alua_group, &target_core_hbagroup);
3773
3774 /*
3775 * Add ALUA Logical Unit Group and Target Port Group ConfigFS
3776 * groups under /sys/kernel/config/target/core/alua/
3777 */
3778 config_group_init_type_name(&alua_lu_gps_group, "lu_gps",
3779 &target_core_alua_lu_gps_cit);
3780 configfs_add_default_group(&alua_lu_gps_group, &alua_group);
3781
3782 /*
3783 * Add core/alua/lu_gps/default_lu_gp
3784 */
3785 lu_gp = core_alua_allocate_lu_gp("default_lu_gp", 1);
3786 if (IS_ERR(lu_gp)) {
3787 ret = -ENOMEM;
3788 goto out_global;
3789 }
3790
3791 config_group_init_type_name(&lu_gp->lu_gp_group, "default_lu_gp",
3792 &target_core_alua_lu_gp_cit);
3793 configfs_add_default_group(&lu_gp->lu_gp_group, &alua_lu_gps_group);
3794
3795 default_lu_gp = lu_gp;
3796
3797 /*
3798 * Register the target_core_mod subsystem with configfs.
3799 */
3800 ret = configfs_register_subsystem(subsys);
3801 if (ret < 0) {
3802 pr_err("Error %d while registering subsystem %s\n",
3803 ret, subsys->su_group.cg_item.ci_namebuf);
3804 goto out_global;
3805 }
3806 pr_debug("TARGET_CORE[0]: Initialized ConfigFS Fabric"
3807 " Infrastructure: "TARGET_CORE_VERSION" on %s/%s"
3808 " on "UTS_RELEASE"\n", utsname()->sysname, utsname()->machine);
3809 /*
3810 * Register built-in RAMDISK subsystem logic for virtual LUN 0
3811 */
3812 ret = rd_module_init();
3813 if (ret < 0)
3814 goto out;
3815
3816 ret = core_dev_setup_virtual_lun0();
3817 if (ret < 0)
3818 goto out;
3819
3820 ret = target_xcopy_setup_pt();
3821 if (ret < 0)
3822 goto out;
3823
3824 scoped_with_kernel_creds()
3825 target_init_dbroot();
3826
3827 return 0;
3828
3829 out:
3830 target_xcopy_release_pt();
3831 configfs_unregister_subsystem(subsys);
3832 core_dev_release_virtual_lun0();
3833 rd_module_exit();
3834 out_global:
3835 if (default_lu_gp) {
3836 core_alua_free_lu_gp(default_lu_gp);
3837 default_lu_gp = NULL;
3838 }
3839 release_se_kmem_caches();
3840 return ret;
3841 }
3842
target_core_exit_configfs(void)3843 static void __exit target_core_exit_configfs(void)
3844 {
3845 configfs_remove_default_groups(&alua_lu_gps_group);
3846 configfs_remove_default_groups(&alua_group);
3847 configfs_remove_default_groups(&target_core_hbagroup);
3848
3849 /*
3850 * We expect subsys->su_group.default_groups to be released
3851 * by configfs subsystem provider logic..
3852 */
3853 configfs_unregister_subsystem(&target_core_fabrics);
3854
3855 core_alua_free_lu_gp(default_lu_gp);
3856 default_lu_gp = NULL;
3857
3858 pr_debug("TARGET_CORE[0]: Released ConfigFS Fabric"
3859 " Infrastructure\n");
3860
3861 core_dev_release_virtual_lun0();
3862 rd_module_exit();
3863 target_xcopy_release_pt();
3864 release_se_kmem_caches();
3865 }
3866
3867 MODULE_DESCRIPTION("Target_Core_Mod/ConfigFS");
3868 MODULE_AUTHOR("nab@Linux-iSCSI.org");
3869 MODULE_LICENSE("GPL");
3870
3871 module_init(target_core_init_configfs);
3872 module_exit(target_core_exit_configfs);
3873