xref: /linux/drivers/w1/w1_netlink.c (revision e0bf6c5ca2d3281f231c5f0c9bf145e9513644de)
1 /*
2  * w1_netlink.c
3  *
4  * Copyright (c) 2003 Evgeniy Polyakov <zbr@ioremap.net>
5  *
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20  */
21 
22 #include <linux/slab.h>
23 #include <linux/skbuff.h>
24 #include <linux/netlink.h>
25 #include <linux/connector.h>
26 
27 #include "w1.h"
28 #include "w1_log.h"
29 #include "w1_netlink.h"
30 
31 #if defined(CONFIG_W1_CON) && (defined(CONFIG_CONNECTOR) || (defined(CONFIG_CONNECTOR_MODULE) && defined(CONFIG_W1_MODULE)))
32 
33 #define MIN(a, b)                   (((a) < (b)) ? (a) : (b))
34 
35 /* Bundle together everything required to process a request in one memory
36  * allocation.
37  */
38 struct w1_cb_block {
39 	atomic_t refcnt;
40 	u32 portid; /* Sending process port ID */
41 	/* maximum value for first_cn->len */
42 	u16 maxlen;
43 	/* pointers to building up the reply message */
44 	struct cn_msg *first_cn; /* fixed once the structure is populated */
45 	struct cn_msg *cn; /* advances as cn_msg is appeneded */
46 	struct w1_netlink_msg *msg; /* advances as w1_netlink_msg is appened */
47 	struct w1_netlink_cmd *cmd; /* advances as cmds are appened */
48 	struct w1_netlink_msg *cur_msg; /* currently message being processed */
49 	/* copy of the original request follows */
50 	struct cn_msg request_cn;
51 	/* followed by variable length:
52 	 * cn_msg, data (w1_netlink_msg and w1_netlink_cmd)
53 	 * one or more struct w1_cb_node
54 	 * reply first_cn, data (w1_netlink_msg and w1_netlink_cmd)
55 	 */
56 };
57 struct w1_cb_node {
58 	struct w1_async_cmd async;
59 	/* pointers within w1_cb_block and cn data */
60 	struct w1_cb_block *block;
61 	struct w1_netlink_msg *msg;
62 	struct w1_slave *sl;
63 	struct w1_master *dev;
64 };
65 
66 /**
67  * w1_reply_len() - calculate current reply length, compare to maxlen
68  * @block: block to calculate
69  *
70  * Calculates the current message length including possible multiple
71  * cn_msg and data, excludes the first sizeof(struct cn_msg).  Direclty
72  * compariable to maxlen and usable to send the message.
73  */
74 static u16 w1_reply_len(struct w1_cb_block *block)
75 {
76 	if (!block->cn)
77 		return 0;
78 	return (u8 *)block->cn - (u8 *)block->first_cn + block->cn->len;
79 }
80 
81 static void w1_unref_block(struct w1_cb_block *block)
82 {
83 	if (atomic_sub_return(1, &block->refcnt) == 0) {
84 		u16 len = w1_reply_len(block);
85 		if (len) {
86 			cn_netlink_send_mult(block->first_cn, len,
87 				block->portid, 0, GFP_KERNEL);
88 		}
89 		kfree(block);
90 	}
91 }
92 
93 /**
94  * w1_reply_make_space() - send message if needed to make space
95  * @block: block to make space on
96  * @space: how many bytes requested
97  *
98  * Verify there is enough room left for the caller to add "space" bytes to the
99  * message, if there isn't send the message and reset.
100  */
101 static void w1_reply_make_space(struct w1_cb_block *block, u16 space)
102 {
103 	u16 len = w1_reply_len(block);
104 	if (len + space >= block->maxlen) {
105 		cn_netlink_send_mult(block->first_cn, len, block->portid, 0, GFP_KERNEL);
106 		block->first_cn->len = 0;
107 		block->cn = NULL;
108 		block->msg = NULL;
109 		block->cmd = NULL;
110 	}
111 }
112 
113 /* Early send when replies aren't bundled. */
114 static void w1_netlink_check_send(struct w1_cb_block *block)
115 {
116 	if (!(block->request_cn.flags & W1_CN_BUNDLE) && block->cn)
117 		w1_reply_make_space(block, block->maxlen);
118 }
119 
120 /**
121  * w1_netlink_setup_msg() - prepare to write block->msg
122  * @block: block to operate on
123  * @ack: determines if cn can be reused
124  *
125  * block->cn will be setup with the correct ack, advancing if needed
126  * block->cn->len does not include space for block->msg
127  * block->msg advances but remains uninitialized
128  */
129 static void w1_netlink_setup_msg(struct w1_cb_block *block, u32 ack)
130 {
131 	if (block->cn && block->cn->ack == ack) {
132 		block->msg = (struct w1_netlink_msg *)(block->cn->data + block->cn->len);
133 	} else {
134 		/* advance or set to data */
135 		if (block->cn)
136 			block->cn = (struct cn_msg *)(block->cn->data +
137 				block->cn->len);
138 		else
139 			block->cn = block->first_cn;
140 
141 		memcpy(block->cn, &block->request_cn, sizeof(*block->cn));
142 		block->cn->len = 0;
143 		block->cn->ack = ack;
144 		block->msg = (struct w1_netlink_msg *)block->cn->data;
145 	}
146 }
147 
148 /* Append cmd to msg, include cmd->data as well.  This is because
149  * any following data goes with the command and in the case of a read is
150  * the results.
151  */
152 static void w1_netlink_queue_cmd(struct w1_cb_block *block,
153 	struct w1_netlink_cmd *cmd)
154 {
155 	u32 space;
156 	w1_reply_make_space(block, sizeof(struct cn_msg) +
157 		sizeof(struct w1_netlink_msg) + sizeof(*cmd) + cmd->len);
158 
159 	/* There's a status message sent after each command, so no point
160 	 * in trying to bundle this cmd after an existing one, because
161 	 * there won't be one.  Allocate and copy over a new cn_msg.
162 	 */
163 	w1_netlink_setup_msg(block, block->request_cn.seq + 1);
164 	memcpy(block->msg, block->cur_msg, sizeof(*block->msg));
165 	block->cn->len += sizeof(*block->msg);
166 	block->msg->len = 0;
167 	block->cmd = (struct w1_netlink_cmd *)(block->msg->data);
168 
169 	space = sizeof(*cmd) + cmd->len;
170 	if (block->cmd != cmd)
171 		memcpy(block->cmd, cmd, space);
172 	block->cn->len += space;
173 	block->msg->len += space;
174 }
175 
176 /* Append req_msg and req_cmd, no other commands and no data from req_cmd are
177  * copied.
178  */
179 static void w1_netlink_queue_status(struct w1_cb_block *block,
180 	struct w1_netlink_msg *req_msg, struct w1_netlink_cmd *req_cmd,
181 	int error)
182 {
183 	u16 space = sizeof(struct cn_msg) + sizeof(*req_msg) + sizeof(*req_cmd);
184 	w1_reply_make_space(block, space);
185 	w1_netlink_setup_msg(block, block->request_cn.ack);
186 
187 	memcpy(block->msg, req_msg, sizeof(*req_msg));
188 	block->cn->len += sizeof(*req_msg);
189 	block->msg->len = 0;
190 	block->msg->status = (u8)-error;
191 	if (req_cmd) {
192 		struct w1_netlink_cmd *cmd = (struct w1_netlink_cmd *)block->msg->data;
193 		memcpy(cmd, req_cmd, sizeof(*cmd));
194 		block->cn->len += sizeof(*cmd);
195 		block->msg->len += sizeof(*cmd);
196 		cmd->len = 0;
197 	}
198 	w1_netlink_check_send(block);
199 }
200 
201 /**
202  * w1_netlink_send_error() - sends the error message now
203  * @cn: original cn_msg
204  * @msg: original w1_netlink_msg
205  * @portid: where to send it
206  * @error: error status
207  *
208  * Use when a block isn't available to queue the message to and cn, msg
209  * might not be contiguous.
210  */
211 static void w1_netlink_send_error(struct cn_msg *cn, struct w1_netlink_msg *msg,
212 	int portid, int error)
213 {
214 	struct {
215 		struct cn_msg cn;
216 		struct w1_netlink_msg msg;
217 	} packet;
218 	memcpy(&packet.cn, cn, sizeof(packet.cn));
219 	memcpy(&packet.msg, msg, sizeof(packet.msg));
220 	packet.cn.len = sizeof(packet.msg);
221 	packet.msg.len = 0;
222 	packet.msg.status = (u8)-error;
223 	cn_netlink_send(&packet.cn, portid, 0, GFP_KERNEL);
224 }
225 
226 /**
227  * w1_netlink_send() - sends w1 netlink notifications
228  * @dev: w1_master the even is associated with or for
229  * @msg: w1_netlink_msg message to be sent
230  *
231  * This are notifications generated from the kernel.
232  */
233 void w1_netlink_send(struct w1_master *dev, struct w1_netlink_msg *msg)
234 {
235 	struct {
236 		struct cn_msg cn;
237 		struct w1_netlink_msg msg;
238 	} packet;
239 	memset(&packet, 0, sizeof(packet));
240 
241 	packet.cn.id.idx = CN_W1_IDX;
242 	packet.cn.id.val = CN_W1_VAL;
243 
244 	packet.cn.seq = dev->seq++;
245 	packet.cn.len = sizeof(*msg);
246 
247 	memcpy(&packet.msg, msg, sizeof(*msg));
248 	packet.msg.len = 0;
249 
250 	cn_netlink_send(&packet.cn, 0, 0, GFP_KERNEL);
251 }
252 
253 static void w1_send_slave(struct w1_master *dev, u64 rn)
254 {
255 	struct w1_cb_block *block = dev->priv;
256 	struct w1_netlink_cmd *cache_cmd = block->cmd;
257 	u64 *data;
258 
259 	w1_reply_make_space(block, sizeof(*data));
260 
261 	/* Add cmd back if the packet was sent */
262 	if (!block->cmd) {
263 		cache_cmd->len = 0;
264 		w1_netlink_queue_cmd(block, cache_cmd);
265 	}
266 
267 	data = (u64 *)(block->cmd->data + block->cmd->len);
268 
269 	*data = rn;
270 	block->cn->len += sizeof(*data);
271 	block->msg->len += sizeof(*data);
272 	block->cmd->len += sizeof(*data);
273 }
274 
275 static void w1_found_send_slave(struct w1_master *dev, u64 rn)
276 {
277 	/* update kernel slave list */
278 	w1_slave_found(dev, rn);
279 
280 	w1_send_slave(dev, rn);
281 }
282 
283 /* Get the current slave list, or search (with or without alarm) */
284 static int w1_get_slaves(struct w1_master *dev, struct w1_netlink_cmd *req_cmd)
285 {
286 	struct w1_slave *sl;
287 
288 	req_cmd->len = 0;
289 	w1_netlink_queue_cmd(dev->priv, req_cmd);
290 
291 	if (req_cmd->cmd == W1_CMD_LIST_SLAVES) {
292 		u64 rn;
293 		mutex_lock(&dev->list_mutex);
294 		list_for_each_entry(sl, &dev->slist, w1_slave_entry) {
295 			memcpy(&rn, &sl->reg_num, sizeof(rn));
296 			w1_send_slave(dev, rn);
297 		}
298 		mutex_unlock(&dev->list_mutex);
299 	} else {
300 		w1_search_process_cb(dev, req_cmd->cmd == W1_CMD_ALARM_SEARCH ?
301 			W1_ALARM_SEARCH : W1_SEARCH, w1_found_send_slave);
302 	}
303 
304 	return 0;
305 }
306 
307 static int w1_process_command_io(struct w1_master *dev,
308 	struct w1_netlink_cmd *cmd)
309 {
310 	int err = 0;
311 
312 	switch (cmd->cmd) {
313 	case W1_CMD_TOUCH:
314 		w1_touch_block(dev, cmd->data, cmd->len);
315 		w1_netlink_queue_cmd(dev->priv, cmd);
316 		break;
317 	case W1_CMD_READ:
318 		w1_read_block(dev, cmd->data, cmd->len);
319 		w1_netlink_queue_cmd(dev->priv, cmd);
320 		break;
321 	case W1_CMD_WRITE:
322 		w1_write_block(dev, cmd->data, cmd->len);
323 		break;
324 	default:
325 		err = -EINVAL;
326 		break;
327 	}
328 
329 	return err;
330 }
331 
332 static int w1_process_command_addremove(struct w1_master *dev,
333 	struct w1_netlink_cmd *cmd)
334 {
335 	struct w1_slave *sl;
336 	int err = 0;
337 	struct w1_reg_num *id;
338 
339 	if (cmd->len != sizeof(*id))
340 		return -EINVAL;
341 
342 	id = (struct w1_reg_num *)cmd->data;
343 
344 	sl = w1_slave_search_device(dev, id);
345 	switch (cmd->cmd) {
346 	case W1_CMD_SLAVE_ADD:
347 		if (sl)
348 			err = -EINVAL;
349 		else
350 			err = w1_attach_slave_device(dev, id);
351 		break;
352 	case W1_CMD_SLAVE_REMOVE:
353 		if (sl)
354 			w1_slave_detach(sl);
355 		else
356 			err = -EINVAL;
357 		break;
358 	default:
359 		err = -EINVAL;
360 		break;
361 	}
362 
363 	return err;
364 }
365 
366 static int w1_process_command_master(struct w1_master *dev,
367 	struct w1_netlink_cmd *req_cmd)
368 {
369 	int err = -EINVAL;
370 
371 	/* drop bus_mutex for search (does it's own locking), and add/remove
372 	 * which doesn't use the bus
373 	 */
374 	switch (req_cmd->cmd) {
375 	case W1_CMD_SEARCH:
376 	case W1_CMD_ALARM_SEARCH:
377 	case W1_CMD_LIST_SLAVES:
378 		mutex_unlock(&dev->bus_mutex);
379 		err = w1_get_slaves(dev, req_cmd);
380 		mutex_lock(&dev->bus_mutex);
381 		break;
382 	case W1_CMD_READ:
383 	case W1_CMD_WRITE:
384 	case W1_CMD_TOUCH:
385 		err = w1_process_command_io(dev, req_cmd);
386 		break;
387 	case W1_CMD_RESET:
388 		err = w1_reset_bus(dev);
389 		break;
390 	case W1_CMD_SLAVE_ADD:
391 	case W1_CMD_SLAVE_REMOVE:
392 		mutex_unlock(&dev->bus_mutex);
393 		mutex_lock(&dev->mutex);
394 		err = w1_process_command_addremove(dev, req_cmd);
395 		mutex_unlock(&dev->mutex);
396 		mutex_lock(&dev->bus_mutex);
397 		break;
398 	default:
399 		err = -EINVAL;
400 		break;
401 	}
402 
403 	return err;
404 }
405 
406 static int w1_process_command_slave(struct w1_slave *sl,
407 		struct w1_netlink_cmd *cmd)
408 {
409 	dev_dbg(&sl->master->dev, "%s: %02x.%012llx.%02x: cmd=%02x, len=%u.\n",
410 		__func__, sl->reg_num.family, (unsigned long long)sl->reg_num.id,
411 		sl->reg_num.crc, cmd->cmd, cmd->len);
412 
413 	return w1_process_command_io(sl->master, cmd);
414 }
415 
416 static int w1_process_command_root(struct cn_msg *req_cn, u32 portid)
417 {
418 	struct w1_master *dev;
419 	struct cn_msg *cn;
420 	struct w1_netlink_msg *msg;
421 	u32 *id;
422 
423 	cn = kmalloc(PAGE_SIZE, GFP_KERNEL);
424 	if (!cn)
425 		return -ENOMEM;
426 
427 	cn->id.idx = CN_W1_IDX;
428 	cn->id.val = CN_W1_VAL;
429 
430 	cn->seq = req_cn->seq;
431 	cn->ack = req_cn->seq + 1;
432 	cn->len = sizeof(struct w1_netlink_msg);
433 	msg = (struct w1_netlink_msg *)cn->data;
434 
435 	msg->type = W1_LIST_MASTERS;
436 	msg->status = 0;
437 	msg->len = 0;
438 	id = (u32 *)msg->data;
439 
440 	mutex_lock(&w1_mlock);
441 	list_for_each_entry(dev, &w1_masters, w1_master_entry) {
442 		if (cn->len + sizeof(*id) > PAGE_SIZE - sizeof(struct cn_msg)) {
443 			cn_netlink_send(cn, portid, 0, GFP_KERNEL);
444 			cn->len = sizeof(struct w1_netlink_msg);
445 			msg->len = 0;
446 			id = (u32 *)msg->data;
447 		}
448 
449 		*id = dev->id;
450 		msg->len += sizeof(*id);
451 		cn->len += sizeof(*id);
452 		id++;
453 	}
454 	cn_netlink_send(cn, portid, 0, GFP_KERNEL);
455 	mutex_unlock(&w1_mlock);
456 
457 	kfree(cn);
458 	return 0;
459 }
460 
461 static void w1_process_cb(struct w1_master *dev, struct w1_async_cmd *async_cmd)
462 {
463 	struct w1_cb_node *node = container_of(async_cmd, struct w1_cb_node,
464 		async);
465 	u16 mlen = node->msg->len;
466 	u16 len;
467 	int err = 0;
468 	struct w1_slave *sl = node->sl;
469 	struct w1_netlink_cmd *cmd = (struct w1_netlink_cmd *)node->msg->data;
470 
471 	mutex_lock(&dev->bus_mutex);
472 	dev->priv = node->block;
473 	if (sl && w1_reset_select_slave(sl))
474 		err = -ENODEV;
475 	node->block->cur_msg = node->msg;
476 
477 	while (mlen && !err) {
478 		if (cmd->len + sizeof(struct w1_netlink_cmd) > mlen) {
479 			err = -E2BIG;
480 			break;
481 		}
482 
483 		if (sl)
484 			err = w1_process_command_slave(sl, cmd);
485 		else
486 			err = w1_process_command_master(dev, cmd);
487 		w1_netlink_check_send(node->block);
488 
489 		w1_netlink_queue_status(node->block, node->msg, cmd, err);
490 		err = 0;
491 
492 		len = sizeof(*cmd) + cmd->len;
493 		cmd = (struct w1_netlink_cmd *)((u8 *)cmd + len);
494 		mlen -= len;
495 	}
496 
497 	if (!cmd || err)
498 		w1_netlink_queue_status(node->block, node->msg, cmd, err);
499 
500 	/* ref taken in w1_search_slave or w1_search_master_id when building
501 	 * the block
502 	 */
503 	if (sl)
504 		w1_unref_slave(sl);
505 	else
506 		atomic_dec(&dev->refcnt);
507 	dev->priv = NULL;
508 	mutex_unlock(&dev->bus_mutex);
509 
510 	mutex_lock(&dev->list_mutex);
511 	list_del(&async_cmd->async_entry);
512 	mutex_unlock(&dev->list_mutex);
513 
514 	w1_unref_block(node->block);
515 }
516 
517 static void w1_list_count_cmds(struct w1_netlink_msg *msg, int *cmd_count,
518 	u16 *slave_len)
519 {
520 	struct w1_netlink_cmd *cmd = (struct w1_netlink_cmd *)msg->data;
521 	u16 mlen = msg->len;
522 	u16 len;
523 	int slave_list = 0;
524 	while (mlen) {
525 		if (cmd->len + sizeof(struct w1_netlink_cmd) > mlen)
526 			break;
527 
528 		switch (cmd->cmd) {
529 		case W1_CMD_SEARCH:
530 		case W1_CMD_ALARM_SEARCH:
531 		case W1_CMD_LIST_SLAVES:
532 			++slave_list;
533 		}
534 		++*cmd_count;
535 		len = sizeof(*cmd) + cmd->len;
536 		cmd = (struct w1_netlink_cmd *)((u8 *)cmd + len);
537 		mlen -= len;
538 	}
539 
540 	if (slave_list) {
541 		struct w1_master *dev = w1_search_master_id(msg->id.mst.id);
542 		if (dev) {
543 			/* Bytes, and likely an overstimate, and if it isn't
544 			 * the results can still be split between packets.
545 			 */
546 			*slave_len += sizeof(struct w1_reg_num) * slave_list *
547 				(dev->slave_count + dev->max_slave_count);
548 			/* search incremented it */
549 			atomic_dec(&dev->refcnt);
550 		}
551 	}
552 }
553 
554 static void w1_cn_callback(struct cn_msg *cn, struct netlink_skb_parms *nsp)
555 {
556 	struct w1_netlink_msg *msg = (struct w1_netlink_msg *)(cn + 1);
557 	struct w1_slave *sl;
558 	struct w1_master *dev;
559 	u16 msg_len;
560 	u16 slave_len = 0;
561 	int err = 0;
562 	struct w1_cb_block *block = NULL;
563 	struct w1_cb_node *node = NULL;
564 	int node_count = 0;
565 	int cmd_count = 0;
566 
567 	/* If any unknown flag is set let the application know, that way
568 	 * applications can detect the absence of features in kernels that
569 	 * don't know about them.  http://lwn.net/Articles/587527/
570 	 */
571 	if (cn->flags & ~(W1_CN_BUNDLE)) {
572 		w1_netlink_send_error(cn, msg, nsp->portid, -EINVAL);
573 		return;
574 	}
575 
576 	/* Count the number of master or slave commands there are to allocate
577 	 * space for one cb_node each.
578 	 */
579 	msg_len = cn->len;
580 	while (msg_len && !err) {
581 		if (msg->len + sizeof(struct w1_netlink_msg) > msg_len) {
582 			err = -E2BIG;
583 			break;
584 		}
585 
586 		/* count messages for nodes and allocate any additional space
587 		 * required for slave lists
588 		 */
589 		if (msg->type == W1_MASTER_CMD || msg->type == W1_SLAVE_CMD) {
590 			++node_count;
591 			w1_list_count_cmds(msg, &cmd_count, &slave_len);
592 		}
593 
594 		msg_len -= sizeof(struct w1_netlink_msg) + msg->len;
595 		msg = (struct w1_netlink_msg *)(((u8 *)msg) +
596 			sizeof(struct w1_netlink_msg) + msg->len);
597 	}
598 	msg = (struct w1_netlink_msg *)(cn + 1);
599 	if (node_count) {
600 		int size;
601 		int reply_size = sizeof(*cn) + cn->len + slave_len;
602 		if (cn->flags & W1_CN_BUNDLE) {
603 			/* bundling duplicats some of the messages */
604 			reply_size += 2 * cmd_count * (sizeof(struct cn_msg) +
605 				sizeof(struct w1_netlink_msg) +
606 				sizeof(struct w1_netlink_cmd));
607 		}
608 		reply_size = MIN(CONNECTOR_MAX_MSG_SIZE, reply_size);
609 
610 		/* allocate space for the block, a copy of the original message,
611 		 * one node per cmd to point into the original message,
612 		 * space for replies which is the original message size plus
613 		 * space for any list slave data and status messages
614 		 * cn->len doesn't include itself which is part of the block
615 		 * */
616 		size =  /* block + original message */
617 			sizeof(struct w1_cb_block) + sizeof(*cn) + cn->len +
618 			/* space for nodes */
619 			node_count * sizeof(struct w1_cb_node) +
620 			/* replies */
621 			sizeof(struct cn_msg) + reply_size;
622 		block = kzalloc(size, GFP_KERNEL);
623 		if (!block) {
624 			/* if the system is already out of memory,
625 			 * (A) will this work, and (B) would it be better
626 			 * to not try?
627 			 */
628 			w1_netlink_send_error(cn, msg, nsp->portid, -ENOMEM);
629 			return;
630 		}
631 		atomic_set(&block->refcnt, 1);
632 		block->portid = nsp->portid;
633 		memcpy(&block->request_cn, cn, sizeof(*cn) + cn->len);
634 		node = (struct w1_cb_node *)(block->request_cn.data + cn->len);
635 
636 		/* Sneeky, when not bundling, reply_size is the allocated space
637 		 * required for the reply, cn_msg isn't part of maxlen so
638 		 * it should be reply_size - sizeof(struct cn_msg), however
639 		 * when checking if there is enough space, w1_reply_make_space
640 		 * is called with the full message size including cn_msg,
641 		 * because it isn't known at that time if an additional cn_msg
642 		 * will need to be allocated.  So an extra cn_msg is added
643 		 * above in "size".
644 		 */
645 		block->maxlen = reply_size;
646 		block->first_cn = (struct cn_msg *)(node + node_count);
647 		memset(block->first_cn, 0, sizeof(*block->first_cn));
648 	}
649 
650 	msg_len = cn->len;
651 	while (msg_len && !err) {
652 
653 		dev = NULL;
654 		sl = NULL;
655 
656 		if (msg->len + sizeof(struct w1_netlink_msg) > msg_len) {
657 			err = -E2BIG;
658 			break;
659 		}
660 
661 		/* execute on this thread, no need to process later */
662 		if (msg->type == W1_LIST_MASTERS) {
663 			err = w1_process_command_root(cn, nsp->portid);
664 			goto out_cont;
665 		}
666 
667 		/* All following message types require additional data,
668 		 * check here before references are taken.
669 		 */
670 		if (!msg->len) {
671 			err = -EPROTO;
672 			goto out_cont;
673 		}
674 
675 		/* both search calls take references */
676 		if (msg->type == W1_MASTER_CMD) {
677 			dev = w1_search_master_id(msg->id.mst.id);
678 		} else if (msg->type == W1_SLAVE_CMD) {
679 			sl = w1_search_slave((struct w1_reg_num *)msg->id.id);
680 			if (sl)
681 				dev = sl->master;
682 		} else {
683 			pr_notice("%s: cn: %x.%x, wrong type: %u, len: %u.\n",
684 				__func__, cn->id.idx, cn->id.val,
685 				msg->type, msg->len);
686 			err = -EPROTO;
687 			goto out_cont;
688 		}
689 
690 		if (!dev) {
691 			err = -ENODEV;
692 			goto out_cont;
693 		}
694 
695 		err = 0;
696 
697 		atomic_inc(&block->refcnt);
698 		node->async.cb = w1_process_cb;
699 		node->block = block;
700 		node->msg = (struct w1_netlink_msg *)((u8 *)&block->request_cn +
701 			(size_t)((u8 *)msg - (u8 *)cn));
702 		node->sl = sl;
703 		node->dev = dev;
704 
705 		mutex_lock(&dev->list_mutex);
706 		list_add_tail(&node->async.async_entry, &dev->async_list);
707 		wake_up_process(dev->thread);
708 		mutex_unlock(&dev->list_mutex);
709 		++node;
710 
711 out_cont:
712 		/* Can't queue because that modifies block and another
713 		 * thread could be processing the messages by now and
714 		 * there isn't a lock, send directly.
715 		 */
716 		if (err)
717 			w1_netlink_send_error(cn, msg, nsp->portid, err);
718 		msg_len -= sizeof(struct w1_netlink_msg) + msg->len;
719 		msg = (struct w1_netlink_msg *)(((u8 *)msg) +
720 			sizeof(struct w1_netlink_msg) + msg->len);
721 
722 		/*
723 		 * Let's allow requests for nonexisting devices.
724 		 */
725 		if (err == -ENODEV)
726 			err = 0;
727 	}
728 	if (block)
729 		w1_unref_block(block);
730 }
731 
732 int w1_init_netlink(void)
733 {
734 	struct cb_id w1_id = {.idx = CN_W1_IDX, .val = CN_W1_VAL};
735 
736 	return cn_add_callback(&w1_id, "w1", &w1_cn_callback);
737 }
738 
739 void w1_fini_netlink(void)
740 {
741 	struct cb_id w1_id = {.idx = CN_W1_IDX, .val = CN_W1_VAL};
742 
743 	cn_del_callback(&w1_id);
744 }
745 #else
746 void w1_netlink_send(struct w1_master *dev, struct w1_netlink_msg *cn)
747 {
748 }
749 
750 int w1_init_netlink(void)
751 {
752 	return 0;
753 }
754 
755 void w1_fini_netlink(void)
756 {
757 }
758 #endif
759