xref: /linux/drivers/hwtracing/coresight/coresight-tmc-etf.c (revision da1d9caf95def6f0320819cf941c9fd1069ba9e1)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright(C) 2016 Linaro Limited. All rights reserved.
4  * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
5  */
6 
7 #include <linux/atomic.h>
8 #include <linux/circ_buf.h>
9 #include <linux/coresight.h>
10 #include <linux/perf_event.h>
11 #include <linux/slab.h>
12 #include "coresight-priv.h"
13 #include "coresight-tmc.h"
14 #include "coresight-etm-perf.h"
15 
16 static int tmc_set_etf_buffer(struct coresight_device *csdev,
17 			      struct perf_output_handle *handle);
18 
19 static void __tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
20 {
21 	CS_UNLOCK(drvdata->base);
22 
23 	/* Wait for TMCSReady bit to be set */
24 	tmc_wait_for_tmcready(drvdata);
25 
26 	writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
27 	writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
28 		       TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
29 		       TMC_FFCR_TRIGON_TRIGIN,
30 		       drvdata->base + TMC_FFCR);
31 
32 	writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
33 	tmc_enable_hw(drvdata);
34 
35 	CS_LOCK(drvdata->base);
36 }
37 
38 static int tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
39 {
40 	int rc = coresight_claim_device(drvdata->csdev);
41 
42 	if (rc)
43 		return rc;
44 
45 	__tmc_etb_enable_hw(drvdata);
46 	return 0;
47 }
48 
49 static void tmc_etb_dump_hw(struct tmc_drvdata *drvdata)
50 {
51 	char *bufp;
52 	u32 read_data, lost;
53 
54 	/* Check if the buffer wrapped around. */
55 	lost = readl_relaxed(drvdata->base + TMC_STS) & TMC_STS_FULL;
56 	bufp = drvdata->buf;
57 	drvdata->len = 0;
58 	while (1) {
59 		read_data = readl_relaxed(drvdata->base + TMC_RRD);
60 		if (read_data == 0xFFFFFFFF)
61 			break;
62 		memcpy(bufp, &read_data, 4);
63 		bufp += 4;
64 		drvdata->len += 4;
65 	}
66 
67 	if (lost)
68 		coresight_insert_barrier_packet(drvdata->buf);
69 	return;
70 }
71 
72 static void __tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
73 {
74 	CS_UNLOCK(drvdata->base);
75 
76 	tmc_flush_and_stop(drvdata);
77 	/*
78 	 * When operating in sysFS mode the content of the buffer needs to be
79 	 * read before the TMC is disabled.
80 	 */
81 	if (drvdata->mode == CS_MODE_SYSFS)
82 		tmc_etb_dump_hw(drvdata);
83 	tmc_disable_hw(drvdata);
84 
85 	CS_LOCK(drvdata->base);
86 }
87 
88 static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
89 {
90 	__tmc_etb_disable_hw(drvdata);
91 	coresight_disclaim_device(drvdata->csdev);
92 }
93 
94 static void __tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
95 {
96 	CS_UNLOCK(drvdata->base);
97 
98 	/* Wait for TMCSReady bit to be set */
99 	tmc_wait_for_tmcready(drvdata);
100 
101 	writel_relaxed(TMC_MODE_HARDWARE_FIFO, drvdata->base + TMC_MODE);
102 	writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI,
103 		       drvdata->base + TMC_FFCR);
104 	writel_relaxed(0x0, drvdata->base + TMC_BUFWM);
105 	tmc_enable_hw(drvdata);
106 
107 	CS_LOCK(drvdata->base);
108 }
109 
110 static int tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
111 {
112 	int rc = coresight_claim_device(drvdata->csdev);
113 
114 	if (rc)
115 		return rc;
116 
117 	__tmc_etf_enable_hw(drvdata);
118 	return 0;
119 }
120 
121 static void tmc_etf_disable_hw(struct tmc_drvdata *drvdata)
122 {
123 	struct coresight_device *csdev = drvdata->csdev;
124 
125 	CS_UNLOCK(drvdata->base);
126 
127 	tmc_flush_and_stop(drvdata);
128 	tmc_disable_hw(drvdata);
129 	coresight_disclaim_device_unlocked(csdev);
130 	CS_LOCK(drvdata->base);
131 }
132 
133 /*
134  * Return the available trace data in the buffer from @pos, with
135  * a maximum limit of @len, updating the @bufpp on where to
136  * find it.
137  */
138 ssize_t tmc_etb_get_sysfs_trace(struct tmc_drvdata *drvdata,
139 				loff_t pos, size_t len, char **bufpp)
140 {
141 	ssize_t actual = len;
142 
143 	/* Adjust the len to available size @pos */
144 	if (pos + actual > drvdata->len)
145 		actual = drvdata->len - pos;
146 	if (actual > 0)
147 		*bufpp = drvdata->buf + pos;
148 	return actual;
149 }
150 
151 static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev)
152 {
153 	int ret = 0;
154 	bool used = false;
155 	char *buf = NULL;
156 	unsigned long flags;
157 	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
158 
159 	/*
160 	 * If we don't have a buffer release the lock and allocate memory.
161 	 * Otherwise keep the lock and move along.
162 	 */
163 	spin_lock_irqsave(&drvdata->spinlock, flags);
164 	if (!drvdata->buf) {
165 		spin_unlock_irqrestore(&drvdata->spinlock, flags);
166 
167 		/* Allocating the memory here while outside of the spinlock */
168 		buf = kzalloc(drvdata->size, GFP_KERNEL);
169 		if (!buf)
170 			return -ENOMEM;
171 
172 		/* Let's try again */
173 		spin_lock_irqsave(&drvdata->spinlock, flags);
174 	}
175 
176 	if (drvdata->reading) {
177 		ret = -EBUSY;
178 		goto out;
179 	}
180 
181 	/*
182 	 * In sysFS mode we can have multiple writers per sink.  Since this
183 	 * sink is already enabled no memory is needed and the HW need not be
184 	 * touched.
185 	 */
186 	if (drvdata->mode == CS_MODE_SYSFS) {
187 		atomic_inc(csdev->refcnt);
188 		goto out;
189 	}
190 
191 	/*
192 	 * If drvdata::buf isn't NULL, memory was allocated for a previous
193 	 * trace run but wasn't read.  If so simply zero-out the memory.
194 	 * Otherwise use the memory allocated above.
195 	 *
196 	 * The memory is freed when users read the buffer using the
197 	 * /dev/xyz.{etf|etb} interface.  See tmc_read_unprepare_etf() for
198 	 * details.
199 	 */
200 	if (drvdata->buf) {
201 		memset(drvdata->buf, 0, drvdata->size);
202 	} else {
203 		used = true;
204 		drvdata->buf = buf;
205 	}
206 
207 	ret = tmc_etb_enable_hw(drvdata);
208 	if (!ret) {
209 		drvdata->mode = CS_MODE_SYSFS;
210 		atomic_inc(csdev->refcnt);
211 	} else {
212 		/* Free up the buffer if we failed to enable */
213 		used = false;
214 	}
215 out:
216 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
217 
218 	/* Free memory outside the spinlock if need be */
219 	if (!used)
220 		kfree(buf);
221 
222 	return ret;
223 }
224 
225 static int tmc_enable_etf_sink_perf(struct coresight_device *csdev, void *data)
226 {
227 	int ret = 0;
228 	pid_t pid;
229 	unsigned long flags;
230 	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
231 	struct perf_output_handle *handle = data;
232 	struct cs_buffers *buf = etm_perf_sink_config(handle);
233 
234 	spin_lock_irqsave(&drvdata->spinlock, flags);
235 	do {
236 		ret = -EINVAL;
237 		if (drvdata->reading)
238 			break;
239 		/*
240 		 * No need to continue if the ETB/ETF is already operated
241 		 * from sysFS.
242 		 */
243 		if (drvdata->mode == CS_MODE_SYSFS) {
244 			ret = -EBUSY;
245 			break;
246 		}
247 
248 		/* Get a handle on the pid of the process to monitor */
249 		pid = buf->pid;
250 
251 		if (drvdata->pid != -1 && drvdata->pid != pid) {
252 			ret = -EBUSY;
253 			break;
254 		}
255 
256 		ret = tmc_set_etf_buffer(csdev, handle);
257 		if (ret)
258 			break;
259 
260 		/*
261 		 * No HW configuration is needed if the sink is already in
262 		 * use for this session.
263 		 */
264 		if (drvdata->pid == pid) {
265 			atomic_inc(csdev->refcnt);
266 			break;
267 		}
268 
269 		ret  = tmc_etb_enable_hw(drvdata);
270 		if (!ret) {
271 			/* Associate with monitored process. */
272 			drvdata->pid = pid;
273 			drvdata->mode = CS_MODE_PERF;
274 			atomic_inc(csdev->refcnt);
275 		}
276 	} while (0);
277 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
278 
279 	return ret;
280 }
281 
282 static int tmc_enable_etf_sink(struct coresight_device *csdev,
283 			       u32 mode, void *data)
284 {
285 	int ret;
286 
287 	switch (mode) {
288 	case CS_MODE_SYSFS:
289 		ret = tmc_enable_etf_sink_sysfs(csdev);
290 		break;
291 	case CS_MODE_PERF:
292 		ret = tmc_enable_etf_sink_perf(csdev, data);
293 		break;
294 	/* We shouldn't be here */
295 	default:
296 		ret = -EINVAL;
297 		break;
298 	}
299 
300 	if (ret)
301 		return ret;
302 
303 	dev_dbg(&csdev->dev, "TMC-ETB/ETF enabled\n");
304 	return 0;
305 }
306 
307 static int tmc_disable_etf_sink(struct coresight_device *csdev)
308 {
309 	unsigned long flags;
310 	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
311 
312 	spin_lock_irqsave(&drvdata->spinlock, flags);
313 
314 	if (drvdata->reading) {
315 		spin_unlock_irqrestore(&drvdata->spinlock, flags);
316 		return -EBUSY;
317 	}
318 
319 	if (atomic_dec_return(csdev->refcnt)) {
320 		spin_unlock_irqrestore(&drvdata->spinlock, flags);
321 		return -EBUSY;
322 	}
323 
324 	/* Complain if we (somehow) got out of sync */
325 	WARN_ON_ONCE(drvdata->mode == CS_MODE_DISABLED);
326 	tmc_etb_disable_hw(drvdata);
327 	/* Dissociate from monitored process. */
328 	drvdata->pid = -1;
329 	drvdata->mode = CS_MODE_DISABLED;
330 
331 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
332 
333 	dev_dbg(&csdev->dev, "TMC-ETB/ETF disabled\n");
334 	return 0;
335 }
336 
337 static int tmc_enable_etf_link(struct coresight_device *csdev,
338 			       int inport, int outport)
339 {
340 	int ret = 0;
341 	unsigned long flags;
342 	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
343 	bool first_enable = false;
344 
345 	spin_lock_irqsave(&drvdata->spinlock, flags);
346 	if (drvdata->reading) {
347 		spin_unlock_irqrestore(&drvdata->spinlock, flags);
348 		return -EBUSY;
349 	}
350 
351 	if (atomic_read(&csdev->refcnt[0]) == 0) {
352 		ret = tmc_etf_enable_hw(drvdata);
353 		if (!ret) {
354 			drvdata->mode = CS_MODE_SYSFS;
355 			first_enable = true;
356 		}
357 	}
358 	if (!ret)
359 		atomic_inc(&csdev->refcnt[0]);
360 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
361 
362 	if (first_enable)
363 		dev_dbg(&csdev->dev, "TMC-ETF enabled\n");
364 	return ret;
365 }
366 
367 static void tmc_disable_etf_link(struct coresight_device *csdev,
368 				 int inport, int outport)
369 {
370 	unsigned long flags;
371 	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
372 	bool last_disable = false;
373 
374 	spin_lock_irqsave(&drvdata->spinlock, flags);
375 	if (drvdata->reading) {
376 		spin_unlock_irqrestore(&drvdata->spinlock, flags);
377 		return;
378 	}
379 
380 	if (atomic_dec_return(&csdev->refcnt[0]) == 0) {
381 		tmc_etf_disable_hw(drvdata);
382 		drvdata->mode = CS_MODE_DISABLED;
383 		last_disable = true;
384 	}
385 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
386 
387 	if (last_disable)
388 		dev_dbg(&csdev->dev, "TMC-ETF disabled\n");
389 }
390 
391 static void *tmc_alloc_etf_buffer(struct coresight_device *csdev,
392 				  struct perf_event *event, void **pages,
393 				  int nr_pages, bool overwrite)
394 {
395 	int node;
396 	struct cs_buffers *buf;
397 
398 	node = (event->cpu == -1) ? NUMA_NO_NODE : cpu_to_node(event->cpu);
399 
400 	/* Allocate memory structure for interaction with Perf */
401 	buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node);
402 	if (!buf)
403 		return NULL;
404 
405 	buf->pid = task_pid_nr(event->owner);
406 	buf->snapshot = overwrite;
407 	buf->nr_pages = nr_pages;
408 	buf->data_pages = pages;
409 
410 	return buf;
411 }
412 
413 static void tmc_free_etf_buffer(void *config)
414 {
415 	struct cs_buffers *buf = config;
416 
417 	kfree(buf);
418 }
419 
420 static int tmc_set_etf_buffer(struct coresight_device *csdev,
421 			      struct perf_output_handle *handle)
422 {
423 	int ret = 0;
424 	unsigned long head;
425 	struct cs_buffers *buf = etm_perf_sink_config(handle);
426 
427 	if (!buf)
428 		return -EINVAL;
429 
430 	/* wrap head around to the amount of space we have */
431 	head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1);
432 
433 	/* find the page to write to */
434 	buf->cur = head / PAGE_SIZE;
435 
436 	/* and offset within that page */
437 	buf->offset = head % PAGE_SIZE;
438 
439 	local_set(&buf->data_size, 0);
440 
441 	return ret;
442 }
443 
444 static unsigned long tmc_update_etf_buffer(struct coresight_device *csdev,
445 				  struct perf_output_handle *handle,
446 				  void *sink_config)
447 {
448 	bool lost = false;
449 	int i, cur;
450 	const u32 *barrier;
451 	u32 *buf_ptr;
452 	u64 read_ptr, write_ptr;
453 	u32 status;
454 	unsigned long offset, to_read = 0, flags;
455 	struct cs_buffers *buf = sink_config;
456 	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
457 
458 	if (!buf)
459 		return 0;
460 
461 	/* This shouldn't happen */
462 	if (WARN_ON_ONCE(drvdata->mode != CS_MODE_PERF))
463 		return 0;
464 
465 	spin_lock_irqsave(&drvdata->spinlock, flags);
466 
467 	/* Don't do anything if another tracer is using this sink */
468 	if (atomic_read(csdev->refcnt) != 1)
469 		goto out;
470 
471 	CS_UNLOCK(drvdata->base);
472 
473 	tmc_flush_and_stop(drvdata);
474 
475 	read_ptr = tmc_read_rrp(drvdata);
476 	write_ptr = tmc_read_rwp(drvdata);
477 
478 	/*
479 	 * Get a hold of the status register and see if a wrap around
480 	 * has occurred.  If so adjust things accordingly.
481 	 */
482 	status = readl_relaxed(drvdata->base + TMC_STS);
483 	if (status & TMC_STS_FULL) {
484 		lost = true;
485 		to_read = drvdata->size;
486 	} else {
487 		to_read = CIRC_CNT(write_ptr, read_ptr, drvdata->size);
488 	}
489 
490 	/*
491 	 * The TMC RAM buffer may be bigger than the space available in the
492 	 * perf ring buffer (handle->size).  If so advance the RRP so that we
493 	 * get the latest trace data.  In snapshot mode none of that matters
494 	 * since we are expected to clobber stale data in favour of the latest
495 	 * traces.
496 	 */
497 	if (!buf->snapshot && to_read > handle->size) {
498 		u32 mask = tmc_get_memwidth_mask(drvdata);
499 
500 		/*
501 		 * Make sure the new size is aligned in accordance with the
502 		 * requirement explained in function tmc_get_memwidth_mask().
503 		 */
504 		to_read = handle->size & mask;
505 		/* Move the RAM read pointer up */
506 		read_ptr = (write_ptr + drvdata->size) - to_read;
507 		/* Make sure we are still within our limits */
508 		if (read_ptr > (drvdata->size - 1))
509 			read_ptr -= drvdata->size;
510 		/* Tell the HW */
511 		tmc_write_rrp(drvdata, read_ptr);
512 		lost = true;
513 	}
514 
515 	/*
516 	 * Don't set the TRUNCATED flag in snapshot mode because 1) the
517 	 * captured buffer is expected to be truncated and 2) a full buffer
518 	 * prevents the event from being re-enabled by the perf core,
519 	 * resulting in stale data being send to user space.
520 	 */
521 	if (!buf->snapshot && lost)
522 		perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
523 
524 	cur = buf->cur;
525 	offset = buf->offset;
526 	barrier = coresight_barrier_pkt;
527 
528 	/* for every byte to read */
529 	for (i = 0; i < to_read; i += 4) {
530 		buf_ptr = buf->data_pages[cur] + offset;
531 		*buf_ptr = readl_relaxed(drvdata->base + TMC_RRD);
532 
533 		if (lost && i < CORESIGHT_BARRIER_PKT_SIZE) {
534 			*buf_ptr = *barrier;
535 			barrier++;
536 		}
537 
538 		offset += 4;
539 		if (offset >= PAGE_SIZE) {
540 			offset = 0;
541 			cur++;
542 			/* wrap around at the end of the buffer */
543 			cur &= buf->nr_pages - 1;
544 		}
545 	}
546 
547 	/*
548 	 * In snapshot mode we simply increment the head by the number of byte
549 	 * that were written.  User space will figure out how many bytes to get
550 	 * from the AUX buffer based on the position of the head.
551 	 */
552 	if (buf->snapshot)
553 		handle->head += to_read;
554 
555 	/*
556 	 * CS_LOCK() contains mb() so it can ensure visibility of the AUX trace
557 	 * data before the aux_head is updated via perf_aux_output_end(), which
558 	 * is expected by the perf ring buffer.
559 	 */
560 	CS_LOCK(drvdata->base);
561 out:
562 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
563 
564 	return to_read;
565 }
566 
567 static const struct coresight_ops_sink tmc_etf_sink_ops = {
568 	.enable		= tmc_enable_etf_sink,
569 	.disable	= tmc_disable_etf_sink,
570 	.alloc_buffer	= tmc_alloc_etf_buffer,
571 	.free_buffer	= tmc_free_etf_buffer,
572 	.update_buffer	= tmc_update_etf_buffer,
573 };
574 
575 static const struct coresight_ops_link tmc_etf_link_ops = {
576 	.enable		= tmc_enable_etf_link,
577 	.disable	= tmc_disable_etf_link,
578 };
579 
580 const struct coresight_ops tmc_etb_cs_ops = {
581 	.sink_ops	= &tmc_etf_sink_ops,
582 };
583 
584 const struct coresight_ops tmc_etf_cs_ops = {
585 	.sink_ops	= &tmc_etf_sink_ops,
586 	.link_ops	= &tmc_etf_link_ops,
587 };
588 
589 int tmc_read_prepare_etb(struct tmc_drvdata *drvdata)
590 {
591 	enum tmc_mode mode;
592 	int ret = 0;
593 	unsigned long flags;
594 
595 	/* config types are set a boot time and never change */
596 	if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB &&
597 			 drvdata->config_type != TMC_CONFIG_TYPE_ETF))
598 		return -EINVAL;
599 
600 	spin_lock_irqsave(&drvdata->spinlock, flags);
601 
602 	if (drvdata->reading) {
603 		ret = -EBUSY;
604 		goto out;
605 	}
606 
607 	/* Don't interfere if operated from Perf */
608 	if (drvdata->mode == CS_MODE_PERF) {
609 		ret = -EINVAL;
610 		goto out;
611 	}
612 
613 	/* If drvdata::buf is NULL the trace data has been read already */
614 	if (drvdata->buf == NULL) {
615 		ret = -EINVAL;
616 		goto out;
617 	}
618 
619 	/* Disable the TMC if need be */
620 	if (drvdata->mode == CS_MODE_SYSFS) {
621 		/* There is no point in reading a TMC in HW FIFO mode */
622 		mode = readl_relaxed(drvdata->base + TMC_MODE);
623 		if (mode != TMC_MODE_CIRCULAR_BUFFER) {
624 			ret = -EINVAL;
625 			goto out;
626 		}
627 		__tmc_etb_disable_hw(drvdata);
628 	}
629 
630 	drvdata->reading = true;
631 out:
632 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
633 
634 	return ret;
635 }
636 
637 int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata)
638 {
639 	char *buf = NULL;
640 	enum tmc_mode mode;
641 	unsigned long flags;
642 
643 	/* config types are set a boot time and never change */
644 	if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB &&
645 			 drvdata->config_type != TMC_CONFIG_TYPE_ETF))
646 		return -EINVAL;
647 
648 	spin_lock_irqsave(&drvdata->spinlock, flags);
649 
650 	/* Re-enable the TMC if need be */
651 	if (drvdata->mode == CS_MODE_SYSFS) {
652 		/* There is no point in reading a TMC in HW FIFO mode */
653 		mode = readl_relaxed(drvdata->base + TMC_MODE);
654 		if (mode != TMC_MODE_CIRCULAR_BUFFER) {
655 			spin_unlock_irqrestore(&drvdata->spinlock, flags);
656 			return -EINVAL;
657 		}
658 		/*
659 		 * The trace run will continue with the same allocated trace
660 		 * buffer. As such zero-out the buffer so that we don't end
661 		 * up with stale data.
662 		 *
663 		 * Since the tracer is still enabled drvdata::buf
664 		 * can't be NULL.
665 		 */
666 		memset(drvdata->buf, 0, drvdata->size);
667 		__tmc_etb_enable_hw(drvdata);
668 	} else {
669 		/*
670 		 * The ETB/ETF is not tracing and the buffer was just read.
671 		 * As such prepare to free the trace buffer.
672 		 */
673 		buf = drvdata->buf;
674 		drvdata->buf = NULL;
675 	}
676 
677 	drvdata->reading = false;
678 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
679 
680 	/*
681 	 * Free allocated memory outside of the spinlock.  There is no need
682 	 * to assert the validity of 'buf' since calling kfree(NULL) is safe.
683 	 */
684 	kfree(buf);
685 
686 	return 0;
687 }
688