xref: /linux/fs/gfs2/xattr.c (revision b43ab901d671e3e3cad425ea5e9a3c74e266dcdd)
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9 
10 #include <linux/slab.h>
11 #include <linux/spinlock.h>
12 #include <linux/completion.h>
13 #include <linux/buffer_head.h>
14 #include <linux/xattr.h>
15 #include <linux/gfs2_ondisk.h>
16 #include <asm/uaccess.h>
17 
18 #include "gfs2.h"
19 #include "incore.h"
20 #include "acl.h"
21 #include "xattr.h"
22 #include "glock.h"
23 #include "inode.h"
24 #include "meta_io.h"
25 #include "quota.h"
26 #include "rgrp.h"
27 #include "trans.h"
28 #include "util.h"
29 
30 /**
31  * ea_calc_size - returns the acutal number of bytes the request will take up
32  *                (not counting any unstuffed data blocks)
33  * @sdp:
34  * @er:
35  * @size:
36  *
37  * Returns: 1 if the EA should be stuffed
38  */
39 
40 static int ea_calc_size(struct gfs2_sbd *sdp, unsigned int nsize, size_t dsize,
41 			unsigned int *size)
42 {
43 	unsigned int jbsize = sdp->sd_jbsize;
44 
45 	/* Stuffed */
46 	*size = ALIGN(sizeof(struct gfs2_ea_header) + nsize + dsize, 8);
47 
48 	if (*size <= jbsize)
49 		return 1;
50 
51 	/* Unstuffed */
52 	*size = ALIGN(sizeof(struct gfs2_ea_header) + nsize +
53 		      (sizeof(__be64) * DIV_ROUND_UP(dsize, jbsize)), 8);
54 
55 	return 0;
56 }
57 
58 static int ea_check_size(struct gfs2_sbd *sdp, unsigned int nsize, size_t dsize)
59 {
60 	unsigned int size;
61 
62 	if (dsize > GFS2_EA_MAX_DATA_LEN)
63 		return -ERANGE;
64 
65 	ea_calc_size(sdp, nsize, dsize, &size);
66 
67 	/* This can only happen with 512 byte blocks */
68 	if (size > sdp->sd_jbsize)
69 		return -ERANGE;
70 
71 	return 0;
72 }
73 
74 typedef int (*ea_call_t) (struct gfs2_inode *ip, struct buffer_head *bh,
75 			  struct gfs2_ea_header *ea,
76 			  struct gfs2_ea_header *prev, void *private);
77 
78 static int ea_foreach_i(struct gfs2_inode *ip, struct buffer_head *bh,
79 			ea_call_t ea_call, void *data)
80 {
81 	struct gfs2_ea_header *ea, *prev = NULL;
82 	int error = 0;
83 
84 	if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), bh, GFS2_METATYPE_EA))
85 		return -EIO;
86 
87 	for (ea = GFS2_EA_BH2FIRST(bh);; prev = ea, ea = GFS2_EA2NEXT(ea)) {
88 		if (!GFS2_EA_REC_LEN(ea))
89 			goto fail;
90 		if (!(bh->b_data <= (char *)ea && (char *)GFS2_EA2NEXT(ea) <=
91 						  bh->b_data + bh->b_size))
92 			goto fail;
93 		if (!GFS2_EATYPE_VALID(ea->ea_type))
94 			goto fail;
95 
96 		error = ea_call(ip, bh, ea, prev, data);
97 		if (error)
98 			return error;
99 
100 		if (GFS2_EA_IS_LAST(ea)) {
101 			if ((char *)GFS2_EA2NEXT(ea) !=
102 			    bh->b_data + bh->b_size)
103 				goto fail;
104 			break;
105 		}
106 	}
107 
108 	return error;
109 
110 fail:
111 	gfs2_consist_inode(ip);
112 	return -EIO;
113 }
114 
115 static int ea_foreach(struct gfs2_inode *ip, ea_call_t ea_call, void *data)
116 {
117 	struct buffer_head *bh, *eabh;
118 	__be64 *eablk, *end;
119 	int error;
120 
121 	error = gfs2_meta_read(ip->i_gl, ip->i_eattr, DIO_WAIT, &bh);
122 	if (error)
123 		return error;
124 
125 	if (!(ip->i_diskflags & GFS2_DIF_EA_INDIRECT)) {
126 		error = ea_foreach_i(ip, bh, ea_call, data);
127 		goto out;
128 	}
129 
130 	if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), bh, GFS2_METATYPE_IN)) {
131 		error = -EIO;
132 		goto out;
133 	}
134 
135 	eablk = (__be64 *)(bh->b_data + sizeof(struct gfs2_meta_header));
136 	end = eablk + GFS2_SB(&ip->i_inode)->sd_inptrs;
137 
138 	for (; eablk < end; eablk++) {
139 		u64 bn;
140 
141 		if (!*eablk)
142 			break;
143 		bn = be64_to_cpu(*eablk);
144 
145 		error = gfs2_meta_read(ip->i_gl, bn, DIO_WAIT, &eabh);
146 		if (error)
147 			break;
148 		error = ea_foreach_i(ip, eabh, ea_call, data);
149 		brelse(eabh);
150 		if (error)
151 			break;
152 	}
153 out:
154 	brelse(bh);
155 	return error;
156 }
157 
158 struct ea_find {
159 	int type;
160 	const char *name;
161 	size_t namel;
162 	struct gfs2_ea_location *ef_el;
163 };
164 
165 static int ea_find_i(struct gfs2_inode *ip, struct buffer_head *bh,
166 		     struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
167 		     void *private)
168 {
169 	struct ea_find *ef = private;
170 
171 	if (ea->ea_type == GFS2_EATYPE_UNUSED)
172 		return 0;
173 
174 	if (ea->ea_type == ef->type) {
175 		if (ea->ea_name_len == ef->namel &&
176 		    !memcmp(GFS2_EA2NAME(ea), ef->name, ea->ea_name_len)) {
177 			struct gfs2_ea_location *el = ef->ef_el;
178 			get_bh(bh);
179 			el->el_bh = bh;
180 			el->el_ea = ea;
181 			el->el_prev = prev;
182 			return 1;
183 		}
184 	}
185 
186 	return 0;
187 }
188 
189 static int gfs2_ea_find(struct gfs2_inode *ip, int type, const char *name,
190 			struct gfs2_ea_location *el)
191 {
192 	struct ea_find ef;
193 	int error;
194 
195 	ef.type = type;
196 	ef.name = name;
197 	ef.namel = strlen(name);
198 	ef.ef_el = el;
199 
200 	memset(el, 0, sizeof(struct gfs2_ea_location));
201 
202 	error = ea_foreach(ip, ea_find_i, &ef);
203 	if (error > 0)
204 		return 0;
205 
206 	return error;
207 }
208 
209 /**
210  * ea_dealloc_unstuffed -
211  * @ip:
212  * @bh:
213  * @ea:
214  * @prev:
215  * @private:
216  *
217  * Take advantage of the fact that all unstuffed blocks are
218  * allocated from the same RG.  But watch, this may not always
219  * be true.
220  *
221  * Returns: errno
222  */
223 
224 static int ea_dealloc_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
225 				struct gfs2_ea_header *ea,
226 				struct gfs2_ea_header *prev, void *private)
227 {
228 	int *leave = private;
229 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
230 	struct gfs2_rgrpd *rgd;
231 	struct gfs2_holder rg_gh;
232 	struct buffer_head *dibh;
233 	__be64 *dataptrs;
234 	u64 bn = 0;
235 	u64 bstart = 0;
236 	unsigned int blen = 0;
237 	unsigned int blks = 0;
238 	unsigned int x;
239 	int error;
240 
241 	if (GFS2_EA_IS_STUFFED(ea))
242 		return 0;
243 
244 	dataptrs = GFS2_EA2DATAPTRS(ea);
245 	for (x = 0; x < ea->ea_num_ptrs; x++, dataptrs++) {
246 		if (*dataptrs) {
247 			blks++;
248 			bn = be64_to_cpu(*dataptrs);
249 		}
250 	}
251 	if (!blks)
252 		return 0;
253 
254 	rgd = gfs2_blk2rgrpd(sdp, bn);
255 	if (!rgd) {
256 		gfs2_consist_inode(ip);
257 		return -EIO;
258 	}
259 
260 	error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &rg_gh);
261 	if (error)
262 		return error;
263 
264 	error = gfs2_trans_begin(sdp, rgd->rd_length + RES_DINODE +
265 				 RES_EATTR + RES_STATFS + RES_QUOTA, blks);
266 	if (error)
267 		goto out_gunlock;
268 
269 	gfs2_trans_add_bh(ip->i_gl, bh, 1);
270 
271 	dataptrs = GFS2_EA2DATAPTRS(ea);
272 	for (x = 0; x < ea->ea_num_ptrs; x++, dataptrs++) {
273 		if (!*dataptrs)
274 			break;
275 		bn = be64_to_cpu(*dataptrs);
276 
277 		if (bstart + blen == bn)
278 			blen++;
279 		else {
280 			if (bstart)
281 				gfs2_free_meta(ip, bstart, blen);
282 			bstart = bn;
283 			blen = 1;
284 		}
285 
286 		*dataptrs = 0;
287 		gfs2_add_inode_blocks(&ip->i_inode, -1);
288 	}
289 	if (bstart)
290 		gfs2_free_meta(ip, bstart, blen);
291 
292 	if (prev && !leave) {
293 		u32 len;
294 
295 		len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea);
296 		prev->ea_rec_len = cpu_to_be32(len);
297 
298 		if (GFS2_EA_IS_LAST(ea))
299 			prev->ea_flags |= GFS2_EAFLAG_LAST;
300 	} else {
301 		ea->ea_type = GFS2_EATYPE_UNUSED;
302 		ea->ea_num_ptrs = 0;
303 	}
304 
305 	error = gfs2_meta_inode_buffer(ip, &dibh);
306 	if (!error) {
307 		ip->i_inode.i_ctime = CURRENT_TIME;
308 		gfs2_trans_add_bh(ip->i_gl, dibh, 1);
309 		gfs2_dinode_out(ip, dibh->b_data);
310 		brelse(dibh);
311 	}
312 
313 	gfs2_trans_end(sdp);
314 
315 out_gunlock:
316 	gfs2_glock_dq_uninit(&rg_gh);
317 	return error;
318 }
319 
320 static int ea_remove_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
321 			       struct gfs2_ea_header *ea,
322 			       struct gfs2_ea_header *prev, int leave)
323 {
324 	struct gfs2_qadata *qa;
325 	int error;
326 
327 	qa = gfs2_qadata_get(ip);
328 	if (!qa)
329 		return -ENOMEM;
330 
331 	error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
332 	if (error)
333 		goto out_alloc;
334 
335 	error = ea_dealloc_unstuffed(ip, bh, ea, prev, (leave) ? &error : NULL);
336 
337 	gfs2_quota_unhold(ip);
338 out_alloc:
339 	gfs2_qadata_put(ip);
340 	return error;
341 }
342 
343 struct ea_list {
344 	struct gfs2_ea_request *ei_er;
345 	unsigned int ei_size;
346 };
347 
348 static inline unsigned int gfs2_ea_strlen(struct gfs2_ea_header *ea)
349 {
350 	switch (ea->ea_type) {
351 	case GFS2_EATYPE_USR:
352 		return 5 + ea->ea_name_len + 1;
353 	case GFS2_EATYPE_SYS:
354 		return 7 + ea->ea_name_len + 1;
355 	case GFS2_EATYPE_SECURITY:
356 		return 9 + ea->ea_name_len + 1;
357 	default:
358 		return 0;
359 	}
360 }
361 
362 static int ea_list_i(struct gfs2_inode *ip, struct buffer_head *bh,
363 		     struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
364 		     void *private)
365 {
366 	struct ea_list *ei = private;
367 	struct gfs2_ea_request *er = ei->ei_er;
368 	unsigned int ea_size = gfs2_ea_strlen(ea);
369 
370 	if (ea->ea_type == GFS2_EATYPE_UNUSED)
371 		return 0;
372 
373 	if (er->er_data_len) {
374 		char *prefix = NULL;
375 		unsigned int l = 0;
376 		char c = 0;
377 
378 		if (ei->ei_size + ea_size > er->er_data_len)
379 			return -ERANGE;
380 
381 		switch (ea->ea_type) {
382 		case GFS2_EATYPE_USR:
383 			prefix = "user.";
384 			l = 5;
385 			break;
386 		case GFS2_EATYPE_SYS:
387 			prefix = "system.";
388 			l = 7;
389 			break;
390 		case GFS2_EATYPE_SECURITY:
391 			prefix = "security.";
392 			l = 9;
393 			break;
394 		}
395 
396 		BUG_ON(l == 0);
397 
398 		memcpy(er->er_data + ei->ei_size, prefix, l);
399 		memcpy(er->er_data + ei->ei_size + l, GFS2_EA2NAME(ea),
400 		       ea->ea_name_len);
401 		memcpy(er->er_data + ei->ei_size + ea_size - 1, &c, 1);
402 	}
403 
404 	ei->ei_size += ea_size;
405 
406 	return 0;
407 }
408 
409 /**
410  * gfs2_listxattr - List gfs2 extended attributes
411  * @dentry: The dentry whose inode we are interested in
412  * @buffer: The buffer to write the results
413  * @size: The size of the buffer
414  *
415  * Returns: actual size of data on success, -errno on error
416  */
417 
418 ssize_t gfs2_listxattr(struct dentry *dentry, char *buffer, size_t size)
419 {
420 	struct gfs2_inode *ip = GFS2_I(dentry->d_inode);
421 	struct gfs2_ea_request er;
422 	struct gfs2_holder i_gh;
423 	int error;
424 
425 	memset(&er, 0, sizeof(struct gfs2_ea_request));
426 	if (size) {
427 		er.er_data = buffer;
428 		er.er_data_len = size;
429 	}
430 
431 	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
432 	if (error)
433 		return error;
434 
435 	if (ip->i_eattr) {
436 		struct ea_list ei = { .ei_er = &er, .ei_size = 0 };
437 
438 		error = ea_foreach(ip, ea_list_i, &ei);
439 		if (!error)
440 			error = ei.ei_size;
441 	}
442 
443 	gfs2_glock_dq_uninit(&i_gh);
444 
445 	return error;
446 }
447 
448 /**
449  * ea_get_unstuffed - actually copies the unstuffed data into the
450  *                    request buffer
451  * @ip: The GFS2 inode
452  * @ea: The extended attribute header structure
453  * @data: The data to be copied
454  *
455  * Returns: errno
456  */
457 
458 static int ea_get_unstuffed(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
459 			    char *data)
460 {
461 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
462 	struct buffer_head **bh;
463 	unsigned int amount = GFS2_EA_DATA_LEN(ea);
464 	unsigned int nptrs = DIV_ROUND_UP(amount, sdp->sd_jbsize);
465 	__be64 *dataptrs = GFS2_EA2DATAPTRS(ea);
466 	unsigned int x;
467 	int error = 0;
468 
469 	bh = kcalloc(nptrs, sizeof(struct buffer_head *), GFP_NOFS);
470 	if (!bh)
471 		return -ENOMEM;
472 
473 	for (x = 0; x < nptrs; x++) {
474 		error = gfs2_meta_read(ip->i_gl, be64_to_cpu(*dataptrs), 0,
475 				       bh + x);
476 		if (error) {
477 			while (x--)
478 				brelse(bh[x]);
479 			goto out;
480 		}
481 		dataptrs++;
482 	}
483 
484 	for (x = 0; x < nptrs; x++) {
485 		error = gfs2_meta_wait(sdp, bh[x]);
486 		if (error) {
487 			for (; x < nptrs; x++)
488 				brelse(bh[x]);
489 			goto out;
490 		}
491 		if (gfs2_metatype_check(sdp, bh[x], GFS2_METATYPE_ED)) {
492 			for (; x < nptrs; x++)
493 				brelse(bh[x]);
494 			error = -EIO;
495 			goto out;
496 		}
497 
498 		memcpy(data, bh[x]->b_data + sizeof(struct gfs2_meta_header),
499 		       (sdp->sd_jbsize > amount) ? amount : sdp->sd_jbsize);
500 
501 		amount -= sdp->sd_jbsize;
502 		data += sdp->sd_jbsize;
503 
504 		brelse(bh[x]);
505 	}
506 
507 out:
508 	kfree(bh);
509 	return error;
510 }
511 
512 static int gfs2_ea_get_copy(struct gfs2_inode *ip, struct gfs2_ea_location *el,
513 			    char *data, size_t size)
514 {
515 	int ret;
516 	size_t len = GFS2_EA_DATA_LEN(el->el_ea);
517 	if (len > size)
518 		return -ERANGE;
519 
520 	if (GFS2_EA_IS_STUFFED(el->el_ea)) {
521 		memcpy(data, GFS2_EA2DATA(el->el_ea), len);
522 		return len;
523 	}
524 	ret = ea_get_unstuffed(ip, el->el_ea, data);
525 	if (ret < 0)
526 		return ret;
527 	return len;
528 }
529 
530 int gfs2_xattr_acl_get(struct gfs2_inode *ip, const char *name, char **ppdata)
531 {
532 	struct gfs2_ea_location el;
533 	int error;
534 	int len;
535 	char *data;
536 
537 	error = gfs2_ea_find(ip, GFS2_EATYPE_SYS, name, &el);
538 	if (error)
539 		return error;
540 	if (!el.el_ea)
541 		goto out;
542 	if (!GFS2_EA_DATA_LEN(el.el_ea))
543 		goto out;
544 
545 	len = GFS2_EA_DATA_LEN(el.el_ea);
546 	data = kmalloc(len, GFP_NOFS);
547 	error = -ENOMEM;
548 	if (data == NULL)
549 		goto out;
550 
551 	error = gfs2_ea_get_copy(ip, &el, data, len);
552 	if (error < 0)
553 		kfree(data);
554 	else
555 		*ppdata = data;
556 out:
557 	brelse(el.el_bh);
558 	return error;
559 }
560 
561 /**
562  * gfs2_xattr_get - Get a GFS2 extended attribute
563  * @inode: The inode
564  * @name: The name of the extended attribute
565  * @buffer: The buffer to write the result into
566  * @size: The size of the buffer
567  * @type: The type of extended attribute
568  *
569  * Returns: actual size of data on success, -errno on error
570  */
571 static int gfs2_xattr_get(struct dentry *dentry, const char *name,
572 		void *buffer, size_t size, int type)
573 {
574 	struct gfs2_inode *ip = GFS2_I(dentry->d_inode);
575 	struct gfs2_ea_location el;
576 	int error;
577 
578 	if (!ip->i_eattr)
579 		return -ENODATA;
580 	if (strlen(name) > GFS2_EA_MAX_NAME_LEN)
581 		return -EINVAL;
582 
583 	error = gfs2_ea_find(ip, type, name, &el);
584 	if (error)
585 		return error;
586 	if (!el.el_ea)
587 		return -ENODATA;
588 	if (size)
589 		error = gfs2_ea_get_copy(ip, &el, buffer, size);
590 	else
591 		error = GFS2_EA_DATA_LEN(el.el_ea);
592 	brelse(el.el_bh);
593 
594 	return error;
595 }
596 
597 /**
598  * ea_alloc_blk - allocates a new block for extended attributes.
599  * @ip: A pointer to the inode that's getting extended attributes
600  * @bhp: Pointer to pointer to a struct buffer_head
601  *
602  * Returns: errno
603  */
604 
605 static int ea_alloc_blk(struct gfs2_inode *ip, struct buffer_head **bhp)
606 {
607 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
608 	struct gfs2_ea_header *ea;
609 	unsigned int n = 1;
610 	u64 block;
611 	int error;
612 
613 	error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL);
614 	if (error)
615 		return error;
616 	gfs2_trans_add_unrevoke(sdp, block, 1);
617 	*bhp = gfs2_meta_new(ip->i_gl, block);
618 	gfs2_trans_add_bh(ip->i_gl, *bhp, 1);
619 	gfs2_metatype_set(*bhp, GFS2_METATYPE_EA, GFS2_FORMAT_EA);
620 	gfs2_buffer_clear_tail(*bhp, sizeof(struct gfs2_meta_header));
621 
622 	ea = GFS2_EA_BH2FIRST(*bhp);
623 	ea->ea_rec_len = cpu_to_be32(sdp->sd_jbsize);
624 	ea->ea_type = GFS2_EATYPE_UNUSED;
625 	ea->ea_flags = GFS2_EAFLAG_LAST;
626 	ea->ea_num_ptrs = 0;
627 
628 	gfs2_add_inode_blocks(&ip->i_inode, 1);
629 
630 	return 0;
631 }
632 
633 /**
634  * ea_write - writes the request info to an ea, creating new blocks if
635  *            necessary
636  * @ip: inode that is being modified
637  * @ea: the location of the new ea in a block
638  * @er: the write request
639  *
640  * Note: does not update ea_rec_len or the GFS2_EAFLAG_LAST bin of ea_flags
641  *
642  * returns : errno
643  */
644 
645 static int ea_write(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
646 		    struct gfs2_ea_request *er)
647 {
648 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
649 	int error;
650 
651 	ea->ea_data_len = cpu_to_be32(er->er_data_len);
652 	ea->ea_name_len = er->er_name_len;
653 	ea->ea_type = er->er_type;
654 	ea->__pad = 0;
655 
656 	memcpy(GFS2_EA2NAME(ea), er->er_name, er->er_name_len);
657 
658 	if (GFS2_EAREQ_SIZE_STUFFED(er) <= sdp->sd_jbsize) {
659 		ea->ea_num_ptrs = 0;
660 		memcpy(GFS2_EA2DATA(ea), er->er_data, er->er_data_len);
661 	} else {
662 		__be64 *dataptr = GFS2_EA2DATAPTRS(ea);
663 		const char *data = er->er_data;
664 		unsigned int data_len = er->er_data_len;
665 		unsigned int copy;
666 		unsigned int x;
667 
668 		ea->ea_num_ptrs = DIV_ROUND_UP(er->er_data_len, sdp->sd_jbsize);
669 		for (x = 0; x < ea->ea_num_ptrs; x++) {
670 			struct buffer_head *bh;
671 			u64 block;
672 			int mh_size = sizeof(struct gfs2_meta_header);
673 			unsigned int n = 1;
674 
675 			error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL);
676 			if (error)
677 				return error;
678 			gfs2_trans_add_unrevoke(sdp, block, 1);
679 			bh = gfs2_meta_new(ip->i_gl, block);
680 			gfs2_trans_add_bh(ip->i_gl, bh, 1);
681 			gfs2_metatype_set(bh, GFS2_METATYPE_ED, GFS2_FORMAT_ED);
682 
683 			gfs2_add_inode_blocks(&ip->i_inode, 1);
684 
685 			copy = data_len > sdp->sd_jbsize ? sdp->sd_jbsize :
686 							   data_len;
687 			memcpy(bh->b_data + mh_size, data, copy);
688 			if (copy < sdp->sd_jbsize)
689 				memset(bh->b_data + mh_size + copy, 0,
690 				       sdp->sd_jbsize - copy);
691 
692 			*dataptr++ = cpu_to_be64(bh->b_blocknr);
693 			data += copy;
694 			data_len -= copy;
695 
696 			brelse(bh);
697 		}
698 
699 		gfs2_assert_withdraw(sdp, !data_len);
700 	}
701 
702 	return 0;
703 }
704 
705 typedef int (*ea_skeleton_call_t) (struct gfs2_inode *ip,
706 				   struct gfs2_ea_request *er, void *private);
707 
708 static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er,
709 			     unsigned int blks,
710 			     ea_skeleton_call_t skeleton_call, void *private)
711 {
712 	struct gfs2_qadata *qa;
713 	struct buffer_head *dibh;
714 	int error;
715 
716 	qa = gfs2_qadata_get(ip);
717 	if (!qa)
718 		return -ENOMEM;
719 
720 	error = gfs2_quota_lock_check(ip);
721 	if (error)
722 		goto out;
723 
724 	error = gfs2_inplace_reserve(ip, blks);
725 	if (error)
726 		goto out_gunlock_q;
727 
728 	error = gfs2_trans_begin(GFS2_SB(&ip->i_inode),
729 				 blks + gfs2_rg_blocks(ip) +
730 				 RES_DINODE + RES_STATFS + RES_QUOTA, 0);
731 	if (error)
732 		goto out_ipres;
733 
734 	error = skeleton_call(ip, er, private);
735 	if (error)
736 		goto out_end_trans;
737 
738 	error = gfs2_meta_inode_buffer(ip, &dibh);
739 	if (!error) {
740 		ip->i_inode.i_ctime = CURRENT_TIME;
741 		gfs2_trans_add_bh(ip->i_gl, dibh, 1);
742 		gfs2_dinode_out(ip, dibh->b_data);
743 		brelse(dibh);
744 	}
745 
746 out_end_trans:
747 	gfs2_trans_end(GFS2_SB(&ip->i_inode));
748 out_ipres:
749 	gfs2_inplace_release(ip);
750 out_gunlock_q:
751 	gfs2_quota_unlock(ip);
752 out:
753 	gfs2_qadata_put(ip);
754 	return error;
755 }
756 
757 static int ea_init_i(struct gfs2_inode *ip, struct gfs2_ea_request *er,
758 		     void *private)
759 {
760 	struct buffer_head *bh;
761 	int error;
762 
763 	error = ea_alloc_blk(ip, &bh);
764 	if (error)
765 		return error;
766 
767 	ip->i_eattr = bh->b_blocknr;
768 	error = ea_write(ip, GFS2_EA_BH2FIRST(bh), er);
769 
770 	brelse(bh);
771 
772 	return error;
773 }
774 
775 /**
776  * ea_init - initializes a new eattr block
777  * @ip:
778  * @er:
779  *
780  * Returns: errno
781  */
782 
783 static int ea_init(struct gfs2_inode *ip, int type, const char *name,
784 		   const void *data, size_t size)
785 {
786 	struct gfs2_ea_request er;
787 	unsigned int jbsize = GFS2_SB(&ip->i_inode)->sd_jbsize;
788 	unsigned int blks = 1;
789 
790 	er.er_type = type;
791 	er.er_name = name;
792 	er.er_name_len = strlen(name);
793 	er.er_data = (void *)data;
794 	er.er_data_len = size;
795 
796 	if (GFS2_EAREQ_SIZE_STUFFED(&er) > jbsize)
797 		blks += DIV_ROUND_UP(er.er_data_len, jbsize);
798 
799 	return ea_alloc_skeleton(ip, &er, blks, ea_init_i, NULL);
800 }
801 
802 static struct gfs2_ea_header *ea_split_ea(struct gfs2_ea_header *ea)
803 {
804 	u32 ea_size = GFS2_EA_SIZE(ea);
805 	struct gfs2_ea_header *new = (struct gfs2_ea_header *)((char *)ea +
806 				     ea_size);
807 	u32 new_size = GFS2_EA_REC_LEN(ea) - ea_size;
808 	int last = ea->ea_flags & GFS2_EAFLAG_LAST;
809 
810 	ea->ea_rec_len = cpu_to_be32(ea_size);
811 	ea->ea_flags ^= last;
812 
813 	new->ea_rec_len = cpu_to_be32(new_size);
814 	new->ea_flags = last;
815 
816 	return new;
817 }
818 
819 static void ea_set_remove_stuffed(struct gfs2_inode *ip,
820 				  struct gfs2_ea_location *el)
821 {
822 	struct gfs2_ea_header *ea = el->el_ea;
823 	struct gfs2_ea_header *prev = el->el_prev;
824 	u32 len;
825 
826 	gfs2_trans_add_bh(ip->i_gl, el->el_bh, 1);
827 
828 	if (!prev || !GFS2_EA_IS_STUFFED(ea)) {
829 		ea->ea_type = GFS2_EATYPE_UNUSED;
830 		return;
831 	} else if (GFS2_EA2NEXT(prev) != ea) {
832 		prev = GFS2_EA2NEXT(prev);
833 		gfs2_assert_withdraw(GFS2_SB(&ip->i_inode), GFS2_EA2NEXT(prev) == ea);
834 	}
835 
836 	len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea);
837 	prev->ea_rec_len = cpu_to_be32(len);
838 
839 	if (GFS2_EA_IS_LAST(ea))
840 		prev->ea_flags |= GFS2_EAFLAG_LAST;
841 }
842 
843 struct ea_set {
844 	int ea_split;
845 
846 	struct gfs2_ea_request *es_er;
847 	struct gfs2_ea_location *es_el;
848 
849 	struct buffer_head *es_bh;
850 	struct gfs2_ea_header *es_ea;
851 };
852 
853 static int ea_set_simple_noalloc(struct gfs2_inode *ip, struct buffer_head *bh,
854 				 struct gfs2_ea_header *ea, struct ea_set *es)
855 {
856 	struct gfs2_ea_request *er = es->es_er;
857 	struct buffer_head *dibh;
858 	int error;
859 
860 	error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + 2 * RES_EATTR, 0);
861 	if (error)
862 		return error;
863 
864 	gfs2_trans_add_bh(ip->i_gl, bh, 1);
865 
866 	if (es->ea_split)
867 		ea = ea_split_ea(ea);
868 
869 	ea_write(ip, ea, er);
870 
871 	if (es->es_el)
872 		ea_set_remove_stuffed(ip, es->es_el);
873 
874 	error = gfs2_meta_inode_buffer(ip, &dibh);
875 	if (error)
876 		goto out;
877 	ip->i_inode.i_ctime = CURRENT_TIME;
878 	gfs2_trans_add_bh(ip->i_gl, dibh, 1);
879 	gfs2_dinode_out(ip, dibh->b_data);
880 	brelse(dibh);
881 out:
882 	gfs2_trans_end(GFS2_SB(&ip->i_inode));
883 	return error;
884 }
885 
886 static int ea_set_simple_alloc(struct gfs2_inode *ip,
887 			       struct gfs2_ea_request *er, void *private)
888 {
889 	struct ea_set *es = private;
890 	struct gfs2_ea_header *ea = es->es_ea;
891 	int error;
892 
893 	gfs2_trans_add_bh(ip->i_gl, es->es_bh, 1);
894 
895 	if (es->ea_split)
896 		ea = ea_split_ea(ea);
897 
898 	error = ea_write(ip, ea, er);
899 	if (error)
900 		return error;
901 
902 	if (es->es_el)
903 		ea_set_remove_stuffed(ip, es->es_el);
904 
905 	return 0;
906 }
907 
908 static int ea_set_simple(struct gfs2_inode *ip, struct buffer_head *bh,
909 			 struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
910 			 void *private)
911 {
912 	struct ea_set *es = private;
913 	unsigned int size;
914 	int stuffed;
915 	int error;
916 
917 	stuffed = ea_calc_size(GFS2_SB(&ip->i_inode), es->es_er->er_name_len,
918 			       es->es_er->er_data_len, &size);
919 
920 	if (ea->ea_type == GFS2_EATYPE_UNUSED) {
921 		if (GFS2_EA_REC_LEN(ea) < size)
922 			return 0;
923 		if (!GFS2_EA_IS_STUFFED(ea)) {
924 			error = ea_remove_unstuffed(ip, bh, ea, prev, 1);
925 			if (error)
926 				return error;
927 		}
928 		es->ea_split = 0;
929 	} else if (GFS2_EA_REC_LEN(ea) - GFS2_EA_SIZE(ea) >= size)
930 		es->ea_split = 1;
931 	else
932 		return 0;
933 
934 	if (stuffed) {
935 		error = ea_set_simple_noalloc(ip, bh, ea, es);
936 		if (error)
937 			return error;
938 	} else {
939 		unsigned int blks;
940 
941 		es->es_bh = bh;
942 		es->es_ea = ea;
943 		blks = 2 + DIV_ROUND_UP(es->es_er->er_data_len,
944 					GFS2_SB(&ip->i_inode)->sd_jbsize);
945 
946 		error = ea_alloc_skeleton(ip, es->es_er, blks,
947 					  ea_set_simple_alloc, es);
948 		if (error)
949 			return error;
950 	}
951 
952 	return 1;
953 }
954 
955 static int ea_set_block(struct gfs2_inode *ip, struct gfs2_ea_request *er,
956 			void *private)
957 {
958 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
959 	struct buffer_head *indbh, *newbh;
960 	__be64 *eablk;
961 	int error;
962 	int mh_size = sizeof(struct gfs2_meta_header);
963 
964 	if (ip->i_diskflags & GFS2_DIF_EA_INDIRECT) {
965 		__be64 *end;
966 
967 		error = gfs2_meta_read(ip->i_gl, ip->i_eattr, DIO_WAIT,
968 				       &indbh);
969 		if (error)
970 			return error;
971 
972 		if (gfs2_metatype_check(sdp, indbh, GFS2_METATYPE_IN)) {
973 			error = -EIO;
974 			goto out;
975 		}
976 
977 		eablk = (__be64 *)(indbh->b_data + mh_size);
978 		end = eablk + sdp->sd_inptrs;
979 
980 		for (; eablk < end; eablk++)
981 			if (!*eablk)
982 				break;
983 
984 		if (eablk == end) {
985 			error = -ENOSPC;
986 			goto out;
987 		}
988 
989 		gfs2_trans_add_bh(ip->i_gl, indbh, 1);
990 	} else {
991 		u64 blk;
992 		unsigned int n = 1;
993 		error = gfs2_alloc_blocks(ip, &blk, &n, 0, NULL);
994 		if (error)
995 			return error;
996 		gfs2_trans_add_unrevoke(sdp, blk, 1);
997 		indbh = gfs2_meta_new(ip->i_gl, blk);
998 		gfs2_trans_add_bh(ip->i_gl, indbh, 1);
999 		gfs2_metatype_set(indbh, GFS2_METATYPE_IN, GFS2_FORMAT_IN);
1000 		gfs2_buffer_clear_tail(indbh, mh_size);
1001 
1002 		eablk = (__be64 *)(indbh->b_data + mh_size);
1003 		*eablk = cpu_to_be64(ip->i_eattr);
1004 		ip->i_eattr = blk;
1005 		ip->i_diskflags |= GFS2_DIF_EA_INDIRECT;
1006 		gfs2_add_inode_blocks(&ip->i_inode, 1);
1007 
1008 		eablk++;
1009 	}
1010 
1011 	error = ea_alloc_blk(ip, &newbh);
1012 	if (error)
1013 		goto out;
1014 
1015 	*eablk = cpu_to_be64((u64)newbh->b_blocknr);
1016 	error = ea_write(ip, GFS2_EA_BH2FIRST(newbh), er);
1017 	brelse(newbh);
1018 	if (error)
1019 		goto out;
1020 
1021 	if (private)
1022 		ea_set_remove_stuffed(ip, private);
1023 
1024 out:
1025 	brelse(indbh);
1026 	return error;
1027 }
1028 
1029 static int ea_set_i(struct gfs2_inode *ip, int type, const char *name,
1030 		    const void *value, size_t size, struct gfs2_ea_location *el)
1031 {
1032 	struct gfs2_ea_request er;
1033 	struct ea_set es;
1034 	unsigned int blks = 2;
1035 	int error;
1036 
1037 	er.er_type = type;
1038 	er.er_name = name;
1039 	er.er_data = (void *)value;
1040 	er.er_name_len = strlen(name);
1041 	er.er_data_len = size;
1042 
1043 	memset(&es, 0, sizeof(struct ea_set));
1044 	es.es_er = &er;
1045 	es.es_el = el;
1046 
1047 	error = ea_foreach(ip, ea_set_simple, &es);
1048 	if (error > 0)
1049 		return 0;
1050 	if (error)
1051 		return error;
1052 
1053 	if (!(ip->i_diskflags & GFS2_DIF_EA_INDIRECT))
1054 		blks++;
1055 	if (GFS2_EAREQ_SIZE_STUFFED(&er) > GFS2_SB(&ip->i_inode)->sd_jbsize)
1056 		blks += DIV_ROUND_UP(er.er_data_len, GFS2_SB(&ip->i_inode)->sd_jbsize);
1057 
1058 	return ea_alloc_skeleton(ip, &er, blks, ea_set_block, el);
1059 }
1060 
1061 static int ea_set_remove_unstuffed(struct gfs2_inode *ip,
1062 				   struct gfs2_ea_location *el)
1063 {
1064 	if (el->el_prev && GFS2_EA2NEXT(el->el_prev) != el->el_ea) {
1065 		el->el_prev = GFS2_EA2NEXT(el->el_prev);
1066 		gfs2_assert_withdraw(GFS2_SB(&ip->i_inode),
1067 				     GFS2_EA2NEXT(el->el_prev) == el->el_ea);
1068 	}
1069 
1070 	return ea_remove_unstuffed(ip, el->el_bh, el->el_ea, el->el_prev, 0);
1071 }
1072 
1073 static int ea_remove_stuffed(struct gfs2_inode *ip, struct gfs2_ea_location *el)
1074 {
1075 	struct gfs2_ea_header *ea = el->el_ea;
1076 	struct gfs2_ea_header *prev = el->el_prev;
1077 	struct buffer_head *dibh;
1078 	int error;
1079 
1080 	error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + RES_EATTR, 0);
1081 	if (error)
1082 		return error;
1083 
1084 	gfs2_trans_add_bh(ip->i_gl, el->el_bh, 1);
1085 
1086 	if (prev) {
1087 		u32 len;
1088 
1089 		len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea);
1090 		prev->ea_rec_len = cpu_to_be32(len);
1091 
1092 		if (GFS2_EA_IS_LAST(ea))
1093 			prev->ea_flags |= GFS2_EAFLAG_LAST;
1094 	} else {
1095 		ea->ea_type = GFS2_EATYPE_UNUSED;
1096 	}
1097 
1098 	error = gfs2_meta_inode_buffer(ip, &dibh);
1099 	if (!error) {
1100 		ip->i_inode.i_ctime = CURRENT_TIME;
1101 		gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1102 		gfs2_dinode_out(ip, dibh->b_data);
1103 		brelse(dibh);
1104 	}
1105 
1106 	gfs2_trans_end(GFS2_SB(&ip->i_inode));
1107 
1108 	return error;
1109 }
1110 
1111 /**
1112  * gfs2_xattr_remove - Remove a GFS2 extended attribute
1113  * @ip: The inode
1114  * @type: The type of the extended attribute
1115  * @name: The name of the extended attribute
1116  *
1117  * This is not called directly by the VFS since we use the (common)
1118  * scheme of making a "set with NULL data" mean a remove request. Note
1119  * that this is different from a set with zero length data.
1120  *
1121  * Returns: 0, or errno on failure
1122  */
1123 
1124 static int gfs2_xattr_remove(struct gfs2_inode *ip, int type, const char *name)
1125 {
1126 	struct gfs2_ea_location el;
1127 	int error;
1128 
1129 	if (!ip->i_eattr)
1130 		return -ENODATA;
1131 
1132 	error = gfs2_ea_find(ip, type, name, &el);
1133 	if (error)
1134 		return error;
1135 	if (!el.el_ea)
1136 		return -ENODATA;
1137 
1138 	if (GFS2_EA_IS_STUFFED(el.el_ea))
1139 		error = ea_remove_stuffed(ip, &el);
1140 	else
1141 		error = ea_remove_unstuffed(ip, el.el_bh, el.el_ea, el.el_prev, 0);
1142 
1143 	brelse(el.el_bh);
1144 
1145 	return error;
1146 }
1147 
1148 /**
1149  * __gfs2_xattr_set - Set (or remove) a GFS2 extended attribute
1150  * @ip: The inode
1151  * @name: The name of the extended attribute
1152  * @value: The value of the extended attribute (NULL for remove)
1153  * @size: The size of the @value argument
1154  * @flags: Create or Replace
1155  * @type: The type of the extended attribute
1156  *
1157  * See gfs2_xattr_remove() for details of the removal of xattrs.
1158  *
1159  * Returns: 0 or errno on failure
1160  */
1161 
1162 int __gfs2_xattr_set(struct inode *inode, const char *name,
1163 		   const void *value, size_t size, int flags, int type)
1164 {
1165 	struct gfs2_inode *ip = GFS2_I(inode);
1166 	struct gfs2_sbd *sdp = GFS2_SB(inode);
1167 	struct gfs2_ea_location el;
1168 	unsigned int namel = strlen(name);
1169 	int error;
1170 
1171 	if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
1172 		return -EPERM;
1173 	if (namel > GFS2_EA_MAX_NAME_LEN)
1174 		return -ERANGE;
1175 
1176 	if (value == NULL)
1177 		return gfs2_xattr_remove(ip, type, name);
1178 
1179 	if (ea_check_size(sdp, namel, size))
1180 		return -ERANGE;
1181 
1182 	if (!ip->i_eattr) {
1183 		if (flags & XATTR_REPLACE)
1184 			return -ENODATA;
1185 		return ea_init(ip, type, name, value, size);
1186 	}
1187 
1188 	error = gfs2_ea_find(ip, type, name, &el);
1189 	if (error)
1190 		return error;
1191 
1192 	if (el.el_ea) {
1193 		if (ip->i_diskflags & GFS2_DIF_APPENDONLY) {
1194 			brelse(el.el_bh);
1195 			return -EPERM;
1196 		}
1197 
1198 		error = -EEXIST;
1199 		if (!(flags & XATTR_CREATE)) {
1200 			int unstuffed = !GFS2_EA_IS_STUFFED(el.el_ea);
1201 			error = ea_set_i(ip, type, name, value, size, &el);
1202 			if (!error && unstuffed)
1203 				ea_set_remove_unstuffed(ip, &el);
1204 		}
1205 
1206 		brelse(el.el_bh);
1207 		return error;
1208 	}
1209 
1210 	error = -ENODATA;
1211 	if (!(flags & XATTR_REPLACE))
1212 		error = ea_set_i(ip, type, name, value, size, NULL);
1213 
1214 	return error;
1215 }
1216 
1217 static int gfs2_xattr_set(struct dentry *dentry, const char *name,
1218 		const void *value, size_t size, int flags, int type)
1219 {
1220 	return __gfs2_xattr_set(dentry->d_inode, name, value,
1221 				size, flags, type);
1222 }
1223 
1224 static int ea_acl_chmod_unstuffed(struct gfs2_inode *ip,
1225 				  struct gfs2_ea_header *ea, char *data)
1226 {
1227 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1228 	struct buffer_head **bh;
1229 	unsigned int amount = GFS2_EA_DATA_LEN(ea);
1230 	unsigned int nptrs = DIV_ROUND_UP(amount, sdp->sd_jbsize);
1231 	__be64 *dataptrs = GFS2_EA2DATAPTRS(ea);
1232 	unsigned int x;
1233 	int error;
1234 
1235 	bh = kcalloc(nptrs, sizeof(struct buffer_head *), GFP_NOFS);
1236 	if (!bh)
1237 		return -ENOMEM;
1238 
1239 	error = gfs2_trans_begin(sdp, nptrs + RES_DINODE, 0);
1240 	if (error)
1241 		goto out;
1242 
1243 	for (x = 0; x < nptrs; x++) {
1244 		error = gfs2_meta_read(ip->i_gl, be64_to_cpu(*dataptrs), 0,
1245 				       bh + x);
1246 		if (error) {
1247 			while (x--)
1248 				brelse(bh[x]);
1249 			goto fail;
1250 		}
1251 		dataptrs++;
1252 	}
1253 
1254 	for (x = 0; x < nptrs; x++) {
1255 		error = gfs2_meta_wait(sdp, bh[x]);
1256 		if (error) {
1257 			for (; x < nptrs; x++)
1258 				brelse(bh[x]);
1259 			goto fail;
1260 		}
1261 		if (gfs2_metatype_check(sdp, bh[x], GFS2_METATYPE_ED)) {
1262 			for (; x < nptrs; x++)
1263 				brelse(bh[x]);
1264 			error = -EIO;
1265 			goto fail;
1266 		}
1267 
1268 		gfs2_trans_add_bh(ip->i_gl, bh[x], 1);
1269 
1270 		memcpy(bh[x]->b_data + sizeof(struct gfs2_meta_header), data,
1271 		       (sdp->sd_jbsize > amount) ? amount : sdp->sd_jbsize);
1272 
1273 		amount -= sdp->sd_jbsize;
1274 		data += sdp->sd_jbsize;
1275 
1276 		brelse(bh[x]);
1277 	}
1278 
1279 out:
1280 	kfree(bh);
1281 	return error;
1282 
1283 fail:
1284 	gfs2_trans_end(sdp);
1285 	kfree(bh);
1286 	return error;
1287 }
1288 
1289 int gfs2_xattr_acl_chmod(struct gfs2_inode *ip, struct iattr *attr, char *data)
1290 {
1291 	struct inode *inode = &ip->i_inode;
1292 	struct gfs2_sbd *sdp = GFS2_SB(inode);
1293 	struct gfs2_ea_location el;
1294 	int error;
1295 
1296 	error = gfs2_ea_find(ip, GFS2_EATYPE_SYS, GFS2_POSIX_ACL_ACCESS, &el);
1297 	if (error)
1298 		return error;
1299 
1300 	if (GFS2_EA_IS_STUFFED(el.el_ea)) {
1301 		error = gfs2_trans_begin(sdp, RES_DINODE + RES_EATTR, 0);
1302 		if (error == 0) {
1303 			gfs2_trans_add_bh(ip->i_gl, el.el_bh, 1);
1304 			memcpy(GFS2_EA2DATA(el.el_ea), data,
1305 			       GFS2_EA_DATA_LEN(el.el_ea));
1306 		}
1307 	} else {
1308 		error = ea_acl_chmod_unstuffed(ip, el.el_ea, data);
1309 	}
1310 
1311 	brelse(el.el_bh);
1312 	if (error)
1313 		return error;
1314 
1315 	error = gfs2_setattr_simple(inode, attr);
1316 	gfs2_trans_end(sdp);
1317 	return error;
1318 }
1319 
1320 static int ea_dealloc_indirect(struct gfs2_inode *ip)
1321 {
1322 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1323 	struct gfs2_rgrp_list rlist;
1324 	struct buffer_head *indbh, *dibh;
1325 	__be64 *eablk, *end;
1326 	unsigned int rg_blocks = 0;
1327 	u64 bstart = 0;
1328 	unsigned int blen = 0;
1329 	unsigned int blks = 0;
1330 	unsigned int x;
1331 	int error;
1332 
1333 	memset(&rlist, 0, sizeof(struct gfs2_rgrp_list));
1334 
1335 	error = gfs2_meta_read(ip->i_gl, ip->i_eattr, DIO_WAIT, &indbh);
1336 	if (error)
1337 		return error;
1338 
1339 	if (gfs2_metatype_check(sdp, indbh, GFS2_METATYPE_IN)) {
1340 		error = -EIO;
1341 		goto out;
1342 	}
1343 
1344 	eablk = (__be64 *)(indbh->b_data + sizeof(struct gfs2_meta_header));
1345 	end = eablk + sdp->sd_inptrs;
1346 
1347 	for (; eablk < end; eablk++) {
1348 		u64 bn;
1349 
1350 		if (!*eablk)
1351 			break;
1352 		bn = be64_to_cpu(*eablk);
1353 
1354 		if (bstart + blen == bn)
1355 			blen++;
1356 		else {
1357 			if (bstart)
1358 				gfs2_rlist_add(ip, &rlist, bstart);
1359 			bstart = bn;
1360 			blen = 1;
1361 		}
1362 		blks++;
1363 	}
1364 	if (bstart)
1365 		gfs2_rlist_add(ip, &rlist, bstart);
1366 	else
1367 		goto out;
1368 
1369 	gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE);
1370 
1371 	for (x = 0; x < rlist.rl_rgrps; x++) {
1372 		struct gfs2_rgrpd *rgd;
1373 		rgd = rlist.rl_ghs[x].gh_gl->gl_object;
1374 		rg_blocks += rgd->rd_length;
1375 	}
1376 
1377 	error = gfs2_glock_nq_m(rlist.rl_rgrps, rlist.rl_ghs);
1378 	if (error)
1379 		goto out_rlist_free;
1380 
1381 	error = gfs2_trans_begin(sdp, rg_blocks + RES_DINODE + RES_INDIRECT +
1382 				 RES_STATFS + RES_QUOTA, blks);
1383 	if (error)
1384 		goto out_gunlock;
1385 
1386 	gfs2_trans_add_bh(ip->i_gl, indbh, 1);
1387 
1388 	eablk = (__be64 *)(indbh->b_data + sizeof(struct gfs2_meta_header));
1389 	bstart = 0;
1390 	blen = 0;
1391 
1392 	for (; eablk < end; eablk++) {
1393 		u64 bn;
1394 
1395 		if (!*eablk)
1396 			break;
1397 		bn = be64_to_cpu(*eablk);
1398 
1399 		if (bstart + blen == bn)
1400 			blen++;
1401 		else {
1402 			if (bstart)
1403 				gfs2_free_meta(ip, bstart, blen);
1404 			bstart = bn;
1405 			blen = 1;
1406 		}
1407 
1408 		*eablk = 0;
1409 		gfs2_add_inode_blocks(&ip->i_inode, -1);
1410 	}
1411 	if (bstart)
1412 		gfs2_free_meta(ip, bstart, blen);
1413 
1414 	ip->i_diskflags &= ~GFS2_DIF_EA_INDIRECT;
1415 
1416 	error = gfs2_meta_inode_buffer(ip, &dibh);
1417 	if (!error) {
1418 		gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1419 		gfs2_dinode_out(ip, dibh->b_data);
1420 		brelse(dibh);
1421 	}
1422 
1423 	gfs2_trans_end(sdp);
1424 
1425 out_gunlock:
1426 	gfs2_glock_dq_m(rlist.rl_rgrps, rlist.rl_ghs);
1427 out_rlist_free:
1428 	gfs2_rlist_free(&rlist);
1429 out:
1430 	brelse(indbh);
1431 	return error;
1432 }
1433 
1434 static int ea_dealloc_block(struct gfs2_inode *ip)
1435 {
1436 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1437 	struct gfs2_rgrpd *rgd;
1438 	struct buffer_head *dibh;
1439 	struct gfs2_holder gh;
1440 	int error;
1441 
1442 	rgd = gfs2_blk2rgrpd(sdp, ip->i_eattr);
1443 	if (!rgd) {
1444 		gfs2_consist_inode(ip);
1445 		return -EIO;
1446 	}
1447 
1448 	error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &gh);
1449 	if (error)
1450 		return error;
1451 
1452 	error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_DINODE + RES_STATFS +
1453 				 RES_QUOTA, 1);
1454 	if (error)
1455 		goto out_gunlock;
1456 
1457 	gfs2_free_meta(ip, ip->i_eattr, 1);
1458 
1459 	ip->i_eattr = 0;
1460 	gfs2_add_inode_blocks(&ip->i_inode, -1);
1461 
1462 	error = gfs2_meta_inode_buffer(ip, &dibh);
1463 	if (!error) {
1464 		gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1465 		gfs2_dinode_out(ip, dibh->b_data);
1466 		brelse(dibh);
1467 	}
1468 
1469 	gfs2_trans_end(sdp);
1470 
1471 out_gunlock:
1472 	gfs2_glock_dq_uninit(&gh);
1473 	return error;
1474 }
1475 
1476 /**
1477  * gfs2_ea_dealloc - deallocate the extended attribute fork
1478  * @ip: the inode
1479  *
1480  * Returns: errno
1481  */
1482 
1483 int gfs2_ea_dealloc(struct gfs2_inode *ip)
1484 {
1485 	struct gfs2_qadata *qa;
1486 	int error;
1487 
1488 	qa = gfs2_qadata_get(ip);
1489 	if (!qa)
1490 		return -ENOMEM;
1491 
1492 	error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
1493 	if (error)
1494 		goto out_alloc;
1495 
1496 	error = ea_foreach(ip, ea_dealloc_unstuffed, NULL);
1497 	if (error)
1498 		goto out_quota;
1499 
1500 	if (ip->i_diskflags & GFS2_DIF_EA_INDIRECT) {
1501 		error = ea_dealloc_indirect(ip);
1502 		if (error)
1503 			goto out_quota;
1504 	}
1505 
1506 	error = ea_dealloc_block(ip);
1507 
1508 out_quota:
1509 	gfs2_quota_unhold(ip);
1510 out_alloc:
1511 	gfs2_qadata_put(ip);
1512 	return error;
1513 }
1514 
1515 static const struct xattr_handler gfs2_xattr_user_handler = {
1516 	.prefix = XATTR_USER_PREFIX,
1517 	.flags  = GFS2_EATYPE_USR,
1518 	.get    = gfs2_xattr_get,
1519 	.set    = gfs2_xattr_set,
1520 };
1521 
1522 static const struct xattr_handler gfs2_xattr_security_handler = {
1523 	.prefix = XATTR_SECURITY_PREFIX,
1524 	.flags  = GFS2_EATYPE_SECURITY,
1525 	.get    = gfs2_xattr_get,
1526 	.set    = gfs2_xattr_set,
1527 };
1528 
1529 const struct xattr_handler *gfs2_xattr_handlers[] = {
1530 	&gfs2_xattr_user_handler,
1531 	&gfs2_xattr_security_handler,
1532 	&gfs2_xattr_system_handler,
1533 	NULL,
1534 };
1535 
1536