1 /* 2 * Device Mapper Uevent Support (dm-uevent) 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License as published by the 6 * Free Software Foundation; either version 2 of the License, or (at your 7 * option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, but 10 * WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 12 * General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License along 15 * with this program; if not, write to the Free Software Foundation, Inc., 16 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 17 * 18 * Copyright IBM Corporation, 2007 19 * Author: Mike Anderson <andmike@linux.vnet.ibm.com> 20 */ 21 #include <linux/list.h> 22 #include <linux/slab.h> 23 #include <linux/kobject.h> 24 #include <linux/dm-ioctl.h> 25 26 #include "dm.h" 27 #include "dm-uevent.h" 28 29 #define DM_MSG_PREFIX "uevent" 30 31 static const struct { 32 enum dm_uevent_type type; 33 enum kobject_action action; 34 char *name; 35 } _dm_uevent_type_names[] = { 36 {DM_UEVENT_PATH_FAILED, KOBJ_CHANGE, "PATH_FAILED"}, 37 {DM_UEVENT_PATH_REINSTATED, KOBJ_CHANGE, "PATH_REINSTATED"}, 38 }; 39 40 static struct kmem_cache *_dm_event_cache; 41 42 struct dm_uevent { 43 struct mapped_device *md; 44 enum kobject_action action; 45 struct kobj_uevent_env ku_env; 46 struct list_head elist; 47 char name[DM_NAME_LEN]; 48 char uuid[DM_UUID_LEN]; 49 }; 50 51 static void dm_uevent_free(struct dm_uevent *event) 52 { 53 kmem_cache_free(_dm_event_cache, event); 54 } 55 56 static struct dm_uevent *dm_uevent_alloc(struct mapped_device *md) 57 { 58 struct dm_uevent *event; 59 60 event = kmem_cache_zalloc(_dm_event_cache, GFP_ATOMIC); 61 if (!event) 62 return NULL; 63 64 INIT_LIST_HEAD(&event->elist); 65 event->md = md; 66 67 return event; 68 } 69 70 static struct dm_uevent *dm_build_path_uevent(struct mapped_device *md, 71 struct dm_target *ti, 72 enum kobject_action action, 73 const char *dm_action, 74 const char *path, 75 unsigned nr_valid_paths) 76 { 77 struct dm_uevent *event; 78 79 event = dm_uevent_alloc(md); 80 if (!event) { 81 DMERR("%s: dm_uevent_alloc() failed", __func__); 82 goto err_nomem; 83 } 84 85 event->action = action; 86 87 if (add_uevent_var(&event->ku_env, "DM_TARGET=%s", ti->type->name)) { 88 DMERR("%s: add_uevent_var() for DM_TARGET failed", 89 __func__); 90 goto err_add; 91 } 92 93 if (add_uevent_var(&event->ku_env, "DM_ACTION=%s", dm_action)) { 94 DMERR("%s: add_uevent_var() for DM_ACTION failed", 95 __func__); 96 goto err_add; 97 } 98 99 if (add_uevent_var(&event->ku_env, "DM_SEQNUM=%u", 100 dm_next_uevent_seq(md))) { 101 DMERR("%s: add_uevent_var() for DM_SEQNUM failed", 102 __func__); 103 goto err_add; 104 } 105 106 if (add_uevent_var(&event->ku_env, "DM_PATH=%s", path)) { 107 DMERR("%s: add_uevent_var() for DM_PATH failed", __func__); 108 goto err_add; 109 } 110 111 if (add_uevent_var(&event->ku_env, "DM_NR_VALID_PATHS=%d", 112 nr_valid_paths)) { 113 DMERR("%s: add_uevent_var() for DM_NR_VALID_PATHS failed", 114 __func__); 115 goto err_add; 116 } 117 118 return event; 119 120 err_add: 121 dm_uevent_free(event); 122 err_nomem: 123 return ERR_PTR(-ENOMEM); 124 } 125 126 /** 127 * dm_send_uevents - send uevents for given list 128 * 129 * @events: list of events to send 130 * @kobj: kobject generating event 131 * 132 */ 133 void dm_send_uevents(struct list_head *events, struct kobject *kobj) 134 { 135 int r; 136 struct dm_uevent *event, *next; 137 138 list_for_each_entry_safe(event, next, events, elist) { 139 list_del_init(&event->elist); 140 141 /* 142 * When a device is being removed this copy fails and we 143 * discard these unsent events. 144 */ 145 if (dm_copy_name_and_uuid(event->md, event->name, 146 event->uuid)) { 147 DMINFO("%s: skipping sending uevent for lost device", 148 __func__); 149 goto uevent_free; 150 } 151 152 if (add_uevent_var(&event->ku_env, "DM_NAME=%s", event->name)) { 153 DMERR("%s: add_uevent_var() for DM_NAME failed", 154 __func__); 155 goto uevent_free; 156 } 157 158 if (add_uevent_var(&event->ku_env, "DM_UUID=%s", event->uuid)) { 159 DMERR("%s: add_uevent_var() for DM_UUID failed", 160 __func__); 161 goto uevent_free; 162 } 163 164 r = kobject_uevent_env(kobj, event->action, event->ku_env.envp); 165 if (r) 166 DMERR("%s: kobject_uevent_env failed", __func__); 167 uevent_free: 168 dm_uevent_free(event); 169 } 170 } 171 EXPORT_SYMBOL_GPL(dm_send_uevents); 172 173 /** 174 * dm_path_uevent - called to create a new path event and queue it 175 * 176 * @event_type: path event type enum 177 * @ti: pointer to a dm_target 178 * @path: string containing pathname 179 * @nr_valid_paths: number of valid paths remaining 180 * 181 */ 182 void dm_path_uevent(enum dm_uevent_type event_type, struct dm_target *ti, 183 const char *path, unsigned nr_valid_paths) 184 { 185 struct mapped_device *md = dm_table_get_md(ti->table); 186 struct dm_uevent *event; 187 188 if (event_type >= ARRAY_SIZE(_dm_uevent_type_names)) { 189 DMERR("%s: Invalid event_type %d", __func__, event_type); 190 return; 191 } 192 193 event = dm_build_path_uevent(md, ti, 194 _dm_uevent_type_names[event_type].action, 195 _dm_uevent_type_names[event_type].name, 196 path, nr_valid_paths); 197 if (IS_ERR(event)) 198 return; 199 200 dm_uevent_add(md, &event->elist); 201 } 202 EXPORT_SYMBOL_GPL(dm_path_uevent); 203 204 int dm_uevent_init(void) 205 { 206 _dm_event_cache = KMEM_CACHE(dm_uevent, 0); 207 if (!_dm_event_cache) 208 return -ENOMEM; 209 210 DMINFO("version 1.0.3"); 211 212 return 0; 213 } 214 215 void dm_uevent_exit(void) 216 { 217 kmem_cache_destroy(_dm_event_cache); 218 } 219