1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/ceph/ceph_debug.h> 3 #include <linux/ceph/pagelist.h> 4 5 #include "super.h" 6 #include "mds_client.h" 7 8 #include <linux/ceph/decode.h> 9 10 #include <linux/xattr.h> 11 #include <linux/security.h> 12 #include <linux/posix_acl_xattr.h> 13 #include <linux/slab.h> 14 15 #define XATTR_CEPH_PREFIX "ceph." 16 #define XATTR_CEPH_PREFIX_LEN (sizeof (XATTR_CEPH_PREFIX) - 1) 17 18 static int __remove_xattr(struct ceph_inode_info *ci, 19 struct ceph_inode_xattr *xattr); 20 21 static bool ceph_is_valid_xattr(const char *name) 22 { 23 return !strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) || 24 !strncmp(name, XATTR_CEPH_PREFIX, XATTR_CEPH_PREFIX_LEN) || 25 !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) || 26 !strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN); 27 } 28 29 /* 30 * These define virtual xattrs exposing the recursive directory 31 * statistics and layout metadata. 32 */ 33 struct ceph_vxattr { 34 char *name; 35 size_t name_size; /* strlen(name) + 1 (for '\0') */ 36 ssize_t (*getxattr_cb)(struct ceph_inode_info *ci, char *val, 37 size_t size); 38 bool (*exists_cb)(struct ceph_inode_info *ci); 39 unsigned int flags; 40 }; 41 42 #define VXATTR_FLAG_READONLY (1<<0) 43 #define VXATTR_FLAG_HIDDEN (1<<1) 44 #define VXATTR_FLAG_RSTAT (1<<2) 45 #define VXATTR_FLAG_DIRSTAT (1<<3) 46 47 /* layouts */ 48 49 static bool ceph_vxattrcb_layout_exists(struct ceph_inode_info *ci) 50 { 51 struct ceph_file_layout *fl = &ci->i_layout; 52 return (fl->stripe_unit > 0 || fl->stripe_count > 0 || 53 fl->object_size > 0 || fl->pool_id >= 0 || 54 rcu_dereference_raw(fl->pool_ns) != NULL); 55 } 56 57 static ssize_t ceph_vxattrcb_layout(struct ceph_inode_info *ci, char *val, 58 size_t size) 59 { 60 struct ceph_fs_client *fsc = ceph_sb_to_client(ci->netfs.inode.i_sb); 61 struct ceph_osd_client *osdc = &fsc->client->osdc; 62 struct ceph_string *pool_ns; 63 s64 pool = ci->i_layout.pool_id; 64 const char *pool_name; 65 const char *ns_field = " pool_namespace="; 66 char buf[128]; 67 size_t len, total_len = 0; 68 ssize_t ret; 69 70 pool_ns = ceph_try_get_string(ci->i_layout.pool_ns); 71 72 dout("ceph_vxattrcb_layout %p\n", &ci->netfs.inode); 73 down_read(&osdc->lock); 74 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, pool); 75 if (pool_name) { 76 len = snprintf(buf, sizeof(buf), 77 "stripe_unit=%u stripe_count=%u object_size=%u pool=", 78 ci->i_layout.stripe_unit, ci->i_layout.stripe_count, 79 ci->i_layout.object_size); 80 total_len = len + strlen(pool_name); 81 } else { 82 len = snprintf(buf, sizeof(buf), 83 "stripe_unit=%u stripe_count=%u object_size=%u pool=%lld", 84 ci->i_layout.stripe_unit, ci->i_layout.stripe_count, 85 ci->i_layout.object_size, pool); 86 total_len = len; 87 } 88 89 if (pool_ns) 90 total_len += strlen(ns_field) + pool_ns->len; 91 92 ret = total_len; 93 if (size >= total_len) { 94 memcpy(val, buf, len); 95 ret = len; 96 if (pool_name) { 97 len = strlen(pool_name); 98 memcpy(val + ret, pool_name, len); 99 ret += len; 100 } 101 if (pool_ns) { 102 len = strlen(ns_field); 103 memcpy(val + ret, ns_field, len); 104 ret += len; 105 memcpy(val + ret, pool_ns->str, pool_ns->len); 106 ret += pool_ns->len; 107 } 108 } 109 up_read(&osdc->lock); 110 ceph_put_string(pool_ns); 111 return ret; 112 } 113 114 /* 115 * The convention with strings in xattrs is that they should not be NULL 116 * terminated, since we're returning the length with them. snprintf always 117 * NULL terminates however, so call it on a temporary buffer and then memcpy 118 * the result into place. 119 */ 120 static __printf(3, 4) 121 int ceph_fmt_xattr(char *val, size_t size, const char *fmt, ...) 122 { 123 int ret; 124 va_list args; 125 char buf[96]; /* NB: reevaluate size if new vxattrs are added */ 126 127 va_start(args, fmt); 128 ret = vsnprintf(buf, size ? sizeof(buf) : 0, fmt, args); 129 va_end(args); 130 131 /* Sanity check */ 132 if (size && ret + 1 > sizeof(buf)) { 133 WARN_ONCE(true, "Returned length too big (%d)", ret); 134 return -E2BIG; 135 } 136 137 if (ret <= size) 138 memcpy(val, buf, ret); 139 return ret; 140 } 141 142 static ssize_t ceph_vxattrcb_layout_stripe_unit(struct ceph_inode_info *ci, 143 char *val, size_t size) 144 { 145 return ceph_fmt_xattr(val, size, "%u", ci->i_layout.stripe_unit); 146 } 147 148 static ssize_t ceph_vxattrcb_layout_stripe_count(struct ceph_inode_info *ci, 149 char *val, size_t size) 150 { 151 return ceph_fmt_xattr(val, size, "%u", ci->i_layout.stripe_count); 152 } 153 154 static ssize_t ceph_vxattrcb_layout_object_size(struct ceph_inode_info *ci, 155 char *val, size_t size) 156 { 157 return ceph_fmt_xattr(val, size, "%u", ci->i_layout.object_size); 158 } 159 160 static ssize_t ceph_vxattrcb_layout_pool(struct ceph_inode_info *ci, 161 char *val, size_t size) 162 { 163 ssize_t ret; 164 struct ceph_fs_client *fsc = ceph_sb_to_client(ci->netfs.inode.i_sb); 165 struct ceph_osd_client *osdc = &fsc->client->osdc; 166 s64 pool = ci->i_layout.pool_id; 167 const char *pool_name; 168 169 down_read(&osdc->lock); 170 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, pool); 171 if (pool_name) { 172 ret = strlen(pool_name); 173 if (ret <= size) 174 memcpy(val, pool_name, ret); 175 } else { 176 ret = ceph_fmt_xattr(val, size, "%lld", pool); 177 } 178 up_read(&osdc->lock); 179 return ret; 180 } 181 182 static ssize_t ceph_vxattrcb_layout_pool_namespace(struct ceph_inode_info *ci, 183 char *val, size_t size) 184 { 185 ssize_t ret = 0; 186 struct ceph_string *ns = ceph_try_get_string(ci->i_layout.pool_ns); 187 188 if (ns) { 189 ret = ns->len; 190 if (ret <= size) 191 memcpy(val, ns->str, ret); 192 ceph_put_string(ns); 193 } 194 return ret; 195 } 196 197 /* directories */ 198 199 static ssize_t ceph_vxattrcb_dir_entries(struct ceph_inode_info *ci, char *val, 200 size_t size) 201 { 202 return ceph_fmt_xattr(val, size, "%lld", ci->i_files + ci->i_subdirs); 203 } 204 205 static ssize_t ceph_vxattrcb_dir_files(struct ceph_inode_info *ci, char *val, 206 size_t size) 207 { 208 return ceph_fmt_xattr(val, size, "%lld", ci->i_files); 209 } 210 211 static ssize_t ceph_vxattrcb_dir_subdirs(struct ceph_inode_info *ci, char *val, 212 size_t size) 213 { 214 return ceph_fmt_xattr(val, size, "%lld", ci->i_subdirs); 215 } 216 217 static ssize_t ceph_vxattrcb_dir_rentries(struct ceph_inode_info *ci, char *val, 218 size_t size) 219 { 220 return ceph_fmt_xattr(val, size, "%lld", 221 ci->i_rfiles + ci->i_rsubdirs); 222 } 223 224 static ssize_t ceph_vxattrcb_dir_rfiles(struct ceph_inode_info *ci, char *val, 225 size_t size) 226 { 227 return ceph_fmt_xattr(val, size, "%lld", ci->i_rfiles); 228 } 229 230 static ssize_t ceph_vxattrcb_dir_rsubdirs(struct ceph_inode_info *ci, char *val, 231 size_t size) 232 { 233 return ceph_fmt_xattr(val, size, "%lld", ci->i_rsubdirs); 234 } 235 236 static ssize_t ceph_vxattrcb_dir_rsnaps(struct ceph_inode_info *ci, char *val, 237 size_t size) 238 { 239 return ceph_fmt_xattr(val, size, "%lld", ci->i_rsnaps); 240 } 241 242 static ssize_t ceph_vxattrcb_dir_rbytes(struct ceph_inode_info *ci, char *val, 243 size_t size) 244 { 245 return ceph_fmt_xattr(val, size, "%lld", ci->i_rbytes); 246 } 247 248 static ssize_t ceph_vxattrcb_dir_rctime(struct ceph_inode_info *ci, char *val, 249 size_t size) 250 { 251 return ceph_fmt_xattr(val, size, "%lld.%09ld", ci->i_rctime.tv_sec, 252 ci->i_rctime.tv_nsec); 253 } 254 255 /* dir pin */ 256 static bool ceph_vxattrcb_dir_pin_exists(struct ceph_inode_info *ci) 257 { 258 return ci->i_dir_pin != -ENODATA; 259 } 260 261 static ssize_t ceph_vxattrcb_dir_pin(struct ceph_inode_info *ci, char *val, 262 size_t size) 263 { 264 return ceph_fmt_xattr(val, size, "%d", (int)ci->i_dir_pin); 265 } 266 267 /* quotas */ 268 static bool ceph_vxattrcb_quota_exists(struct ceph_inode_info *ci) 269 { 270 bool ret = false; 271 spin_lock(&ci->i_ceph_lock); 272 if ((ci->i_max_files || ci->i_max_bytes) && 273 ci->i_vino.snap == CEPH_NOSNAP && 274 ci->i_snap_realm && 275 ci->i_snap_realm->ino == ci->i_vino.ino) 276 ret = true; 277 spin_unlock(&ci->i_ceph_lock); 278 return ret; 279 } 280 281 static ssize_t ceph_vxattrcb_quota(struct ceph_inode_info *ci, char *val, 282 size_t size) 283 { 284 return ceph_fmt_xattr(val, size, "max_bytes=%llu max_files=%llu", 285 ci->i_max_bytes, ci->i_max_files); 286 } 287 288 static ssize_t ceph_vxattrcb_quota_max_bytes(struct ceph_inode_info *ci, 289 char *val, size_t size) 290 { 291 return ceph_fmt_xattr(val, size, "%llu", ci->i_max_bytes); 292 } 293 294 static ssize_t ceph_vxattrcb_quota_max_files(struct ceph_inode_info *ci, 295 char *val, size_t size) 296 { 297 return ceph_fmt_xattr(val, size, "%llu", ci->i_max_files); 298 } 299 300 /* snapshots */ 301 static bool ceph_vxattrcb_snap_btime_exists(struct ceph_inode_info *ci) 302 { 303 return (ci->i_snap_btime.tv_sec != 0 || ci->i_snap_btime.tv_nsec != 0); 304 } 305 306 static ssize_t ceph_vxattrcb_snap_btime(struct ceph_inode_info *ci, char *val, 307 size_t size) 308 { 309 return ceph_fmt_xattr(val, size, "%lld.%09ld", ci->i_snap_btime.tv_sec, 310 ci->i_snap_btime.tv_nsec); 311 } 312 313 static ssize_t ceph_vxattrcb_cluster_fsid(struct ceph_inode_info *ci, 314 char *val, size_t size) 315 { 316 struct ceph_fs_client *fsc = ceph_sb_to_client(ci->netfs.inode.i_sb); 317 318 return ceph_fmt_xattr(val, size, "%pU", &fsc->client->fsid); 319 } 320 321 static ssize_t ceph_vxattrcb_client_id(struct ceph_inode_info *ci, 322 char *val, size_t size) 323 { 324 struct ceph_fs_client *fsc = ceph_sb_to_client(ci->netfs.inode.i_sb); 325 326 return ceph_fmt_xattr(val, size, "client%lld", 327 ceph_client_gid(fsc->client)); 328 } 329 330 static ssize_t ceph_vxattrcb_caps(struct ceph_inode_info *ci, char *val, 331 size_t size) 332 { 333 int issued; 334 335 spin_lock(&ci->i_ceph_lock); 336 issued = __ceph_caps_issued(ci, NULL); 337 spin_unlock(&ci->i_ceph_lock); 338 339 return ceph_fmt_xattr(val, size, "%s/0x%x", 340 ceph_cap_string(issued), issued); 341 } 342 343 static ssize_t ceph_vxattrcb_auth_mds(struct ceph_inode_info *ci, 344 char *val, size_t size) 345 { 346 int ret; 347 348 spin_lock(&ci->i_ceph_lock); 349 ret = ceph_fmt_xattr(val, size, "%d", 350 ci->i_auth_cap ? ci->i_auth_cap->session->s_mds : -1); 351 spin_unlock(&ci->i_ceph_lock); 352 return ret; 353 } 354 355 #define CEPH_XATTR_NAME(_type, _name) XATTR_CEPH_PREFIX #_type "." #_name 356 #define CEPH_XATTR_NAME2(_type, _name, _name2) \ 357 XATTR_CEPH_PREFIX #_type "." #_name "." #_name2 358 359 #define XATTR_NAME_CEPH(_type, _name, _flags) \ 360 { \ 361 .name = CEPH_XATTR_NAME(_type, _name), \ 362 .name_size = sizeof (CEPH_XATTR_NAME(_type, _name)), \ 363 .getxattr_cb = ceph_vxattrcb_ ## _type ## _ ## _name, \ 364 .exists_cb = NULL, \ 365 .flags = (VXATTR_FLAG_READONLY | _flags), \ 366 } 367 #define XATTR_RSTAT_FIELD(_type, _name) \ 368 XATTR_NAME_CEPH(_type, _name, VXATTR_FLAG_RSTAT) 369 #define XATTR_RSTAT_FIELD_UPDATABLE(_type, _name) \ 370 { \ 371 .name = CEPH_XATTR_NAME(_type, _name), \ 372 .name_size = sizeof (CEPH_XATTR_NAME(_type, _name)), \ 373 .getxattr_cb = ceph_vxattrcb_ ## _type ## _ ## _name, \ 374 .exists_cb = NULL, \ 375 .flags = VXATTR_FLAG_RSTAT, \ 376 } 377 #define XATTR_LAYOUT_FIELD(_type, _name, _field) \ 378 { \ 379 .name = CEPH_XATTR_NAME2(_type, _name, _field), \ 380 .name_size = sizeof (CEPH_XATTR_NAME2(_type, _name, _field)), \ 381 .getxattr_cb = ceph_vxattrcb_ ## _name ## _ ## _field, \ 382 .exists_cb = ceph_vxattrcb_layout_exists, \ 383 .flags = VXATTR_FLAG_HIDDEN, \ 384 } 385 #define XATTR_QUOTA_FIELD(_type, _name) \ 386 { \ 387 .name = CEPH_XATTR_NAME(_type, _name), \ 388 .name_size = sizeof(CEPH_XATTR_NAME(_type, _name)), \ 389 .getxattr_cb = ceph_vxattrcb_ ## _type ## _ ## _name, \ 390 .exists_cb = ceph_vxattrcb_quota_exists, \ 391 .flags = VXATTR_FLAG_HIDDEN, \ 392 } 393 394 static struct ceph_vxattr ceph_dir_vxattrs[] = { 395 { 396 .name = "ceph.dir.layout", 397 .name_size = sizeof("ceph.dir.layout"), 398 .getxattr_cb = ceph_vxattrcb_layout, 399 .exists_cb = ceph_vxattrcb_layout_exists, 400 .flags = VXATTR_FLAG_HIDDEN, 401 }, 402 XATTR_LAYOUT_FIELD(dir, layout, stripe_unit), 403 XATTR_LAYOUT_FIELD(dir, layout, stripe_count), 404 XATTR_LAYOUT_FIELD(dir, layout, object_size), 405 XATTR_LAYOUT_FIELD(dir, layout, pool), 406 XATTR_LAYOUT_FIELD(dir, layout, pool_namespace), 407 XATTR_NAME_CEPH(dir, entries, VXATTR_FLAG_DIRSTAT), 408 XATTR_NAME_CEPH(dir, files, VXATTR_FLAG_DIRSTAT), 409 XATTR_NAME_CEPH(dir, subdirs, VXATTR_FLAG_DIRSTAT), 410 XATTR_RSTAT_FIELD(dir, rentries), 411 XATTR_RSTAT_FIELD(dir, rfiles), 412 XATTR_RSTAT_FIELD(dir, rsubdirs), 413 XATTR_RSTAT_FIELD(dir, rsnaps), 414 XATTR_RSTAT_FIELD(dir, rbytes), 415 XATTR_RSTAT_FIELD_UPDATABLE(dir, rctime), 416 { 417 .name = "ceph.dir.pin", 418 .name_size = sizeof("ceph.dir.pin"), 419 .getxattr_cb = ceph_vxattrcb_dir_pin, 420 .exists_cb = ceph_vxattrcb_dir_pin_exists, 421 .flags = VXATTR_FLAG_HIDDEN, 422 }, 423 { 424 .name = "ceph.quota", 425 .name_size = sizeof("ceph.quota"), 426 .getxattr_cb = ceph_vxattrcb_quota, 427 .exists_cb = ceph_vxattrcb_quota_exists, 428 .flags = VXATTR_FLAG_HIDDEN, 429 }, 430 XATTR_QUOTA_FIELD(quota, max_bytes), 431 XATTR_QUOTA_FIELD(quota, max_files), 432 { 433 .name = "ceph.snap.btime", 434 .name_size = sizeof("ceph.snap.btime"), 435 .getxattr_cb = ceph_vxattrcb_snap_btime, 436 .exists_cb = ceph_vxattrcb_snap_btime_exists, 437 .flags = VXATTR_FLAG_READONLY, 438 }, 439 { 440 .name = "ceph.caps", 441 .name_size = sizeof("ceph.caps"), 442 .getxattr_cb = ceph_vxattrcb_caps, 443 .exists_cb = NULL, 444 .flags = VXATTR_FLAG_HIDDEN, 445 }, 446 { .name = NULL, 0 } /* Required table terminator */ 447 }; 448 449 /* files */ 450 451 static struct ceph_vxattr ceph_file_vxattrs[] = { 452 { 453 .name = "ceph.file.layout", 454 .name_size = sizeof("ceph.file.layout"), 455 .getxattr_cb = ceph_vxattrcb_layout, 456 .exists_cb = ceph_vxattrcb_layout_exists, 457 .flags = VXATTR_FLAG_HIDDEN, 458 }, 459 XATTR_LAYOUT_FIELD(file, layout, stripe_unit), 460 XATTR_LAYOUT_FIELD(file, layout, stripe_count), 461 XATTR_LAYOUT_FIELD(file, layout, object_size), 462 XATTR_LAYOUT_FIELD(file, layout, pool), 463 XATTR_LAYOUT_FIELD(file, layout, pool_namespace), 464 { 465 .name = "ceph.snap.btime", 466 .name_size = sizeof("ceph.snap.btime"), 467 .getxattr_cb = ceph_vxattrcb_snap_btime, 468 .exists_cb = ceph_vxattrcb_snap_btime_exists, 469 .flags = VXATTR_FLAG_READONLY, 470 }, 471 { 472 .name = "ceph.caps", 473 .name_size = sizeof("ceph.caps"), 474 .getxattr_cb = ceph_vxattrcb_caps, 475 .exists_cb = NULL, 476 .flags = VXATTR_FLAG_HIDDEN, 477 }, 478 { .name = NULL, 0 } /* Required table terminator */ 479 }; 480 481 static struct ceph_vxattr ceph_common_vxattrs[] = { 482 { 483 .name = "ceph.cluster_fsid", 484 .name_size = sizeof("ceph.cluster_fsid"), 485 .getxattr_cb = ceph_vxattrcb_cluster_fsid, 486 .exists_cb = NULL, 487 .flags = VXATTR_FLAG_READONLY, 488 }, 489 { 490 .name = "ceph.client_id", 491 .name_size = sizeof("ceph.client_id"), 492 .getxattr_cb = ceph_vxattrcb_client_id, 493 .exists_cb = NULL, 494 .flags = VXATTR_FLAG_READONLY, 495 }, 496 { 497 .name = "ceph.auth_mds", 498 .name_size = sizeof("ceph.auth_mds"), 499 .getxattr_cb = ceph_vxattrcb_auth_mds, 500 .exists_cb = NULL, 501 .flags = VXATTR_FLAG_READONLY, 502 }, 503 { .name = NULL, 0 } /* Required table terminator */ 504 }; 505 506 static struct ceph_vxattr *ceph_inode_vxattrs(struct inode *inode) 507 { 508 if (S_ISDIR(inode->i_mode)) 509 return ceph_dir_vxattrs; 510 else if (S_ISREG(inode->i_mode)) 511 return ceph_file_vxattrs; 512 return NULL; 513 } 514 515 static struct ceph_vxattr *ceph_match_vxattr(struct inode *inode, 516 const char *name) 517 { 518 struct ceph_vxattr *vxattr = ceph_inode_vxattrs(inode); 519 520 if (vxattr) { 521 while (vxattr->name) { 522 if (!strcmp(vxattr->name, name)) 523 return vxattr; 524 vxattr++; 525 } 526 } 527 528 vxattr = ceph_common_vxattrs; 529 while (vxattr->name) { 530 if (!strcmp(vxattr->name, name)) 531 return vxattr; 532 vxattr++; 533 } 534 535 return NULL; 536 } 537 538 #define MAX_XATTR_VAL_PRINT_LEN 256 539 540 static int __set_xattr(struct ceph_inode_info *ci, 541 const char *name, int name_len, 542 const char *val, int val_len, 543 int flags, int update_xattr, 544 struct ceph_inode_xattr **newxattr) 545 { 546 struct rb_node **p; 547 struct rb_node *parent = NULL; 548 struct ceph_inode_xattr *xattr = NULL; 549 int c; 550 int new = 0; 551 552 p = &ci->i_xattrs.index.rb_node; 553 while (*p) { 554 parent = *p; 555 xattr = rb_entry(parent, struct ceph_inode_xattr, node); 556 c = strncmp(name, xattr->name, min(name_len, xattr->name_len)); 557 if (c < 0) 558 p = &(*p)->rb_left; 559 else if (c > 0) 560 p = &(*p)->rb_right; 561 else { 562 if (name_len == xattr->name_len) 563 break; 564 else if (name_len < xattr->name_len) 565 p = &(*p)->rb_left; 566 else 567 p = &(*p)->rb_right; 568 } 569 xattr = NULL; 570 } 571 572 if (update_xattr) { 573 int err = 0; 574 575 if (xattr && (flags & XATTR_CREATE)) 576 err = -EEXIST; 577 else if (!xattr && (flags & XATTR_REPLACE)) 578 err = -ENODATA; 579 if (err) { 580 kfree(name); 581 kfree(val); 582 kfree(*newxattr); 583 return err; 584 } 585 if (update_xattr < 0) { 586 if (xattr) 587 __remove_xattr(ci, xattr); 588 kfree(name); 589 kfree(*newxattr); 590 return 0; 591 } 592 } 593 594 if (!xattr) { 595 new = 1; 596 xattr = *newxattr; 597 xattr->name = name; 598 xattr->name_len = name_len; 599 xattr->should_free_name = update_xattr; 600 601 ci->i_xattrs.count++; 602 dout("%s count=%d\n", __func__, ci->i_xattrs.count); 603 } else { 604 kfree(*newxattr); 605 *newxattr = NULL; 606 if (xattr->should_free_val) 607 kfree(xattr->val); 608 609 if (update_xattr) { 610 kfree(name); 611 name = xattr->name; 612 } 613 ci->i_xattrs.names_size -= xattr->name_len; 614 ci->i_xattrs.vals_size -= xattr->val_len; 615 } 616 ci->i_xattrs.names_size += name_len; 617 ci->i_xattrs.vals_size += val_len; 618 if (val) 619 xattr->val = val; 620 else 621 xattr->val = ""; 622 623 xattr->val_len = val_len; 624 xattr->dirty = update_xattr; 625 xattr->should_free_val = (val && update_xattr); 626 627 if (new) { 628 rb_link_node(&xattr->node, parent, p); 629 rb_insert_color(&xattr->node, &ci->i_xattrs.index); 630 dout("%s p=%p\n", __func__, p); 631 } 632 633 dout("%s added %llx.%llx xattr %p %.*s=%.*s%s\n", __func__, 634 ceph_vinop(&ci->netfs.inode), xattr, name_len, name, 635 min(val_len, MAX_XATTR_VAL_PRINT_LEN), val, 636 val_len > MAX_XATTR_VAL_PRINT_LEN ? "..." : ""); 637 638 return 0; 639 } 640 641 static struct ceph_inode_xattr *__get_xattr(struct ceph_inode_info *ci, 642 const char *name) 643 { 644 struct rb_node **p; 645 struct rb_node *parent = NULL; 646 struct ceph_inode_xattr *xattr = NULL; 647 int name_len = strlen(name); 648 int c; 649 650 p = &ci->i_xattrs.index.rb_node; 651 while (*p) { 652 parent = *p; 653 xattr = rb_entry(parent, struct ceph_inode_xattr, node); 654 c = strncmp(name, xattr->name, xattr->name_len); 655 if (c == 0 && name_len > xattr->name_len) 656 c = 1; 657 if (c < 0) 658 p = &(*p)->rb_left; 659 else if (c > 0) 660 p = &(*p)->rb_right; 661 else { 662 int len = min(xattr->val_len, MAX_XATTR_VAL_PRINT_LEN); 663 664 dout("%s %s: found %.*s%s\n", __func__, name, len, 665 xattr->val, xattr->val_len > len ? "..." : ""); 666 return xattr; 667 } 668 } 669 670 dout("%s %s: not found\n", __func__, name); 671 672 return NULL; 673 } 674 675 static void __free_xattr(struct ceph_inode_xattr *xattr) 676 { 677 BUG_ON(!xattr); 678 679 if (xattr->should_free_name) 680 kfree(xattr->name); 681 if (xattr->should_free_val) 682 kfree(xattr->val); 683 684 kfree(xattr); 685 } 686 687 static int __remove_xattr(struct ceph_inode_info *ci, 688 struct ceph_inode_xattr *xattr) 689 { 690 if (!xattr) 691 return -ENODATA; 692 693 rb_erase(&xattr->node, &ci->i_xattrs.index); 694 695 if (xattr->should_free_name) 696 kfree(xattr->name); 697 if (xattr->should_free_val) 698 kfree(xattr->val); 699 700 ci->i_xattrs.names_size -= xattr->name_len; 701 ci->i_xattrs.vals_size -= xattr->val_len; 702 ci->i_xattrs.count--; 703 kfree(xattr); 704 705 return 0; 706 } 707 708 static char *__copy_xattr_names(struct ceph_inode_info *ci, 709 char *dest) 710 { 711 struct rb_node *p; 712 struct ceph_inode_xattr *xattr = NULL; 713 714 p = rb_first(&ci->i_xattrs.index); 715 dout("__copy_xattr_names count=%d\n", ci->i_xattrs.count); 716 717 while (p) { 718 xattr = rb_entry(p, struct ceph_inode_xattr, node); 719 memcpy(dest, xattr->name, xattr->name_len); 720 dest[xattr->name_len] = '\0'; 721 722 dout("dest=%s %p (%s) (%d/%d)\n", dest, xattr, xattr->name, 723 xattr->name_len, ci->i_xattrs.names_size); 724 725 dest += xattr->name_len + 1; 726 p = rb_next(p); 727 } 728 729 return dest; 730 } 731 732 void __ceph_destroy_xattrs(struct ceph_inode_info *ci) 733 { 734 struct rb_node *p, *tmp; 735 struct ceph_inode_xattr *xattr = NULL; 736 737 p = rb_first(&ci->i_xattrs.index); 738 739 dout("__ceph_destroy_xattrs p=%p\n", p); 740 741 while (p) { 742 xattr = rb_entry(p, struct ceph_inode_xattr, node); 743 tmp = p; 744 p = rb_next(tmp); 745 dout("__ceph_destroy_xattrs next p=%p (%.*s)\n", p, 746 xattr->name_len, xattr->name); 747 rb_erase(tmp, &ci->i_xattrs.index); 748 749 __free_xattr(xattr); 750 } 751 752 ci->i_xattrs.names_size = 0; 753 ci->i_xattrs.vals_size = 0; 754 ci->i_xattrs.index_version = 0; 755 ci->i_xattrs.count = 0; 756 ci->i_xattrs.index = RB_ROOT; 757 } 758 759 static int __build_xattrs(struct inode *inode) 760 __releases(ci->i_ceph_lock) 761 __acquires(ci->i_ceph_lock) 762 { 763 u32 namelen; 764 u32 numattr = 0; 765 void *p, *end; 766 u32 len; 767 const char *name, *val; 768 struct ceph_inode_info *ci = ceph_inode(inode); 769 u64 xattr_version; 770 struct ceph_inode_xattr **xattrs = NULL; 771 int err = 0; 772 int i; 773 774 dout("__build_xattrs() len=%d\n", 775 ci->i_xattrs.blob ? (int)ci->i_xattrs.blob->vec.iov_len : 0); 776 777 if (ci->i_xattrs.index_version >= ci->i_xattrs.version) 778 return 0; /* already built */ 779 780 __ceph_destroy_xattrs(ci); 781 782 start: 783 /* updated internal xattr rb tree */ 784 if (ci->i_xattrs.blob && ci->i_xattrs.blob->vec.iov_len > 4) { 785 p = ci->i_xattrs.blob->vec.iov_base; 786 end = p + ci->i_xattrs.blob->vec.iov_len; 787 ceph_decode_32_safe(&p, end, numattr, bad); 788 xattr_version = ci->i_xattrs.version; 789 spin_unlock(&ci->i_ceph_lock); 790 791 xattrs = kcalloc(numattr, sizeof(struct ceph_inode_xattr *), 792 GFP_NOFS); 793 err = -ENOMEM; 794 if (!xattrs) 795 goto bad_lock; 796 797 for (i = 0; i < numattr; i++) { 798 xattrs[i] = kmalloc(sizeof(struct ceph_inode_xattr), 799 GFP_NOFS); 800 if (!xattrs[i]) 801 goto bad_lock; 802 } 803 804 spin_lock(&ci->i_ceph_lock); 805 if (ci->i_xattrs.version != xattr_version) { 806 /* lost a race, retry */ 807 for (i = 0; i < numattr; i++) 808 kfree(xattrs[i]); 809 kfree(xattrs); 810 xattrs = NULL; 811 goto start; 812 } 813 err = -EIO; 814 while (numattr--) { 815 ceph_decode_32_safe(&p, end, len, bad); 816 namelen = len; 817 name = p; 818 p += len; 819 ceph_decode_32_safe(&p, end, len, bad); 820 val = p; 821 p += len; 822 823 err = __set_xattr(ci, name, namelen, val, len, 824 0, 0, &xattrs[numattr]); 825 826 if (err < 0) 827 goto bad; 828 } 829 kfree(xattrs); 830 } 831 ci->i_xattrs.index_version = ci->i_xattrs.version; 832 ci->i_xattrs.dirty = false; 833 834 return err; 835 bad_lock: 836 spin_lock(&ci->i_ceph_lock); 837 bad: 838 if (xattrs) { 839 for (i = 0; i < numattr; i++) 840 kfree(xattrs[i]); 841 kfree(xattrs); 842 } 843 ci->i_xattrs.names_size = 0; 844 return err; 845 } 846 847 static int __get_required_blob_size(struct ceph_inode_info *ci, int name_size, 848 int val_size) 849 { 850 /* 851 * 4 bytes for the length, and additional 4 bytes per each xattr name, 852 * 4 bytes per each value 853 */ 854 int size = 4 + ci->i_xattrs.count*(4 + 4) + 855 ci->i_xattrs.names_size + 856 ci->i_xattrs.vals_size; 857 dout("__get_required_blob_size c=%d names.size=%d vals.size=%d\n", 858 ci->i_xattrs.count, ci->i_xattrs.names_size, 859 ci->i_xattrs.vals_size); 860 861 if (name_size) 862 size += 4 + 4 + name_size + val_size; 863 864 return size; 865 } 866 867 /* 868 * If there are dirty xattrs, reencode xattrs into the prealloc_blob 869 * and swap into place. It returns the old i_xattrs.blob (or NULL) so 870 * that it can be freed by the caller as the i_ceph_lock is likely to be 871 * held. 872 */ 873 struct ceph_buffer *__ceph_build_xattrs_blob(struct ceph_inode_info *ci) 874 { 875 struct rb_node *p; 876 struct ceph_inode_xattr *xattr = NULL; 877 struct ceph_buffer *old_blob = NULL; 878 void *dest; 879 880 dout("__build_xattrs_blob %p\n", &ci->netfs.inode); 881 if (ci->i_xattrs.dirty) { 882 int need = __get_required_blob_size(ci, 0, 0); 883 884 BUG_ON(need > ci->i_xattrs.prealloc_blob->alloc_len); 885 886 p = rb_first(&ci->i_xattrs.index); 887 dest = ci->i_xattrs.prealloc_blob->vec.iov_base; 888 889 ceph_encode_32(&dest, ci->i_xattrs.count); 890 while (p) { 891 xattr = rb_entry(p, struct ceph_inode_xattr, node); 892 893 ceph_encode_32(&dest, xattr->name_len); 894 memcpy(dest, xattr->name, xattr->name_len); 895 dest += xattr->name_len; 896 ceph_encode_32(&dest, xattr->val_len); 897 memcpy(dest, xattr->val, xattr->val_len); 898 dest += xattr->val_len; 899 900 p = rb_next(p); 901 } 902 903 /* adjust buffer len; it may be larger than we need */ 904 ci->i_xattrs.prealloc_blob->vec.iov_len = 905 dest - ci->i_xattrs.prealloc_blob->vec.iov_base; 906 907 if (ci->i_xattrs.blob) 908 old_blob = ci->i_xattrs.blob; 909 ci->i_xattrs.blob = ci->i_xattrs.prealloc_blob; 910 ci->i_xattrs.prealloc_blob = NULL; 911 ci->i_xattrs.dirty = false; 912 ci->i_xattrs.version++; 913 } 914 915 return old_blob; 916 } 917 918 static inline int __get_request_mask(struct inode *in) { 919 struct ceph_mds_request *req = current->journal_info; 920 int mask = 0; 921 if (req && req->r_target_inode == in) { 922 if (req->r_op == CEPH_MDS_OP_LOOKUP || 923 req->r_op == CEPH_MDS_OP_LOOKUPINO || 924 req->r_op == CEPH_MDS_OP_LOOKUPPARENT || 925 req->r_op == CEPH_MDS_OP_GETATTR) { 926 mask = le32_to_cpu(req->r_args.getattr.mask); 927 } else if (req->r_op == CEPH_MDS_OP_OPEN || 928 req->r_op == CEPH_MDS_OP_CREATE) { 929 mask = le32_to_cpu(req->r_args.open.mask); 930 } 931 } 932 return mask; 933 } 934 935 ssize_t __ceph_getxattr(struct inode *inode, const char *name, void *value, 936 size_t size) 937 { 938 struct ceph_inode_info *ci = ceph_inode(inode); 939 struct ceph_inode_xattr *xattr; 940 struct ceph_vxattr *vxattr; 941 int req_mask; 942 ssize_t err; 943 944 if (strncmp(name, XATTR_CEPH_PREFIX, XATTR_CEPH_PREFIX_LEN)) 945 goto handle_non_vxattrs; 946 947 /* let's see if a virtual xattr was requested */ 948 vxattr = ceph_match_vxattr(inode, name); 949 if (vxattr) { 950 int mask = 0; 951 if (vxattr->flags & VXATTR_FLAG_RSTAT) 952 mask |= CEPH_STAT_RSTAT; 953 if (vxattr->flags & VXATTR_FLAG_DIRSTAT) 954 mask |= CEPH_CAP_FILE_SHARED; 955 err = ceph_do_getattr(inode, mask, true); 956 if (err) 957 return err; 958 err = -ENODATA; 959 if (!(vxattr->exists_cb && !vxattr->exists_cb(ci))) { 960 err = vxattr->getxattr_cb(ci, value, size); 961 if (size && size < err) 962 err = -ERANGE; 963 } 964 return err; 965 } else { 966 err = ceph_do_getvxattr(inode, name, value, size); 967 /* this would happen with a new client and old server combo */ 968 if (err == -EOPNOTSUPP) 969 err = -ENODATA; 970 return err; 971 } 972 handle_non_vxattrs: 973 req_mask = __get_request_mask(inode); 974 975 spin_lock(&ci->i_ceph_lock); 976 dout("getxattr %p name '%s' ver=%lld index_ver=%lld\n", inode, name, 977 ci->i_xattrs.version, ci->i_xattrs.index_version); 978 979 if (ci->i_xattrs.version == 0 || 980 !((req_mask & CEPH_CAP_XATTR_SHARED) || 981 __ceph_caps_issued_mask_metric(ci, CEPH_CAP_XATTR_SHARED, 1))) { 982 spin_unlock(&ci->i_ceph_lock); 983 984 /* security module gets xattr while filling trace */ 985 if (current->journal_info) { 986 pr_warn_ratelimited("sync getxattr %p " 987 "during filling trace\n", inode); 988 return -EBUSY; 989 } 990 991 /* get xattrs from mds (if we don't already have them) */ 992 err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR, true); 993 if (err) 994 return err; 995 spin_lock(&ci->i_ceph_lock); 996 } 997 998 err = __build_xattrs(inode); 999 if (err < 0) 1000 goto out; 1001 1002 err = -ENODATA; /* == ENOATTR */ 1003 xattr = __get_xattr(ci, name); 1004 if (!xattr) 1005 goto out; 1006 1007 err = -ERANGE; 1008 if (size && size < xattr->val_len) 1009 goto out; 1010 1011 err = xattr->val_len; 1012 if (size == 0) 1013 goto out; 1014 1015 memcpy(value, xattr->val, xattr->val_len); 1016 1017 if (current->journal_info && 1018 !strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) && 1019 security_ismaclabel(name + XATTR_SECURITY_PREFIX_LEN)) 1020 ci->i_ceph_flags |= CEPH_I_SEC_INITED; 1021 out: 1022 spin_unlock(&ci->i_ceph_lock); 1023 return err; 1024 } 1025 1026 ssize_t ceph_listxattr(struct dentry *dentry, char *names, size_t size) 1027 { 1028 struct inode *inode = d_inode(dentry); 1029 struct ceph_inode_info *ci = ceph_inode(inode); 1030 bool len_only = (size == 0); 1031 u32 namelen; 1032 int err; 1033 1034 spin_lock(&ci->i_ceph_lock); 1035 dout("listxattr %p ver=%lld index_ver=%lld\n", inode, 1036 ci->i_xattrs.version, ci->i_xattrs.index_version); 1037 1038 if (ci->i_xattrs.version == 0 || 1039 !__ceph_caps_issued_mask_metric(ci, CEPH_CAP_XATTR_SHARED, 1)) { 1040 spin_unlock(&ci->i_ceph_lock); 1041 err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR, true); 1042 if (err) 1043 return err; 1044 spin_lock(&ci->i_ceph_lock); 1045 } 1046 1047 err = __build_xattrs(inode); 1048 if (err < 0) 1049 goto out; 1050 1051 /* add 1 byte for each xattr due to the null termination */ 1052 namelen = ci->i_xattrs.names_size + ci->i_xattrs.count; 1053 if (!len_only) { 1054 if (namelen > size) { 1055 err = -ERANGE; 1056 goto out; 1057 } 1058 names = __copy_xattr_names(ci, names); 1059 size -= namelen; 1060 } 1061 err = namelen; 1062 out: 1063 spin_unlock(&ci->i_ceph_lock); 1064 return err; 1065 } 1066 1067 static int ceph_sync_setxattr(struct inode *inode, const char *name, 1068 const char *value, size_t size, int flags) 1069 { 1070 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb); 1071 struct ceph_inode_info *ci = ceph_inode(inode); 1072 struct ceph_mds_request *req; 1073 struct ceph_mds_client *mdsc = fsc->mdsc; 1074 struct ceph_osd_client *osdc = &fsc->client->osdc; 1075 struct ceph_pagelist *pagelist = NULL; 1076 int op = CEPH_MDS_OP_SETXATTR; 1077 int err; 1078 1079 if (size > 0) { 1080 /* copy value into pagelist */ 1081 pagelist = ceph_pagelist_alloc(GFP_NOFS); 1082 if (!pagelist) 1083 return -ENOMEM; 1084 1085 err = ceph_pagelist_append(pagelist, value, size); 1086 if (err) 1087 goto out; 1088 } else if (!value) { 1089 if (flags & CEPH_XATTR_REPLACE) 1090 op = CEPH_MDS_OP_RMXATTR; 1091 else 1092 flags |= CEPH_XATTR_REMOVE; 1093 } 1094 1095 dout("setxattr value size: %zu\n", size); 1096 1097 /* do request */ 1098 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS); 1099 if (IS_ERR(req)) { 1100 err = PTR_ERR(req); 1101 goto out; 1102 } 1103 1104 req->r_path2 = kstrdup(name, GFP_NOFS); 1105 if (!req->r_path2) { 1106 ceph_mdsc_put_request(req); 1107 err = -ENOMEM; 1108 goto out; 1109 } 1110 1111 if (op == CEPH_MDS_OP_SETXATTR) { 1112 req->r_args.setxattr.flags = cpu_to_le32(flags); 1113 req->r_args.setxattr.osdmap_epoch = 1114 cpu_to_le32(osdc->osdmap->epoch); 1115 req->r_pagelist = pagelist; 1116 pagelist = NULL; 1117 } 1118 1119 req->r_inode = inode; 1120 ihold(inode); 1121 req->r_num_caps = 1; 1122 req->r_inode_drop = CEPH_CAP_XATTR_SHARED; 1123 1124 dout("xattr.ver (before): %lld\n", ci->i_xattrs.version); 1125 err = ceph_mdsc_do_request(mdsc, NULL, req); 1126 ceph_mdsc_put_request(req); 1127 dout("xattr.ver (after): %lld\n", ci->i_xattrs.version); 1128 1129 out: 1130 if (pagelist) 1131 ceph_pagelist_release(pagelist); 1132 return err; 1133 } 1134 1135 int __ceph_setxattr(struct inode *inode, const char *name, 1136 const void *value, size_t size, int flags) 1137 { 1138 struct ceph_vxattr *vxattr; 1139 struct ceph_inode_info *ci = ceph_inode(inode); 1140 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; 1141 struct ceph_cap_flush *prealloc_cf = NULL; 1142 struct ceph_buffer *old_blob = NULL; 1143 int issued; 1144 int err; 1145 int dirty = 0; 1146 int name_len = strlen(name); 1147 int val_len = size; 1148 char *newname = NULL; 1149 char *newval = NULL; 1150 struct ceph_inode_xattr *xattr = NULL; 1151 int required_blob_size; 1152 bool check_realm = false; 1153 bool lock_snap_rwsem = false; 1154 1155 if (ceph_snap(inode) != CEPH_NOSNAP) 1156 return -EROFS; 1157 1158 vxattr = ceph_match_vxattr(inode, name); 1159 if (vxattr) { 1160 if (vxattr->flags & VXATTR_FLAG_READONLY) 1161 return -EOPNOTSUPP; 1162 if (value && !strncmp(vxattr->name, "ceph.quota", 10)) 1163 check_realm = true; 1164 } 1165 1166 /* pass any unhandled ceph.* xattrs through to the MDS */ 1167 if (!strncmp(name, XATTR_CEPH_PREFIX, XATTR_CEPH_PREFIX_LEN)) 1168 goto do_sync_unlocked; 1169 1170 /* preallocate memory for xattr name, value, index node */ 1171 err = -ENOMEM; 1172 newname = kmemdup(name, name_len + 1, GFP_NOFS); 1173 if (!newname) 1174 goto out; 1175 1176 if (val_len) { 1177 newval = kmemdup(value, val_len, GFP_NOFS); 1178 if (!newval) 1179 goto out; 1180 } 1181 1182 xattr = kmalloc(sizeof(struct ceph_inode_xattr), GFP_NOFS); 1183 if (!xattr) 1184 goto out; 1185 1186 prealloc_cf = ceph_alloc_cap_flush(); 1187 if (!prealloc_cf) 1188 goto out; 1189 1190 spin_lock(&ci->i_ceph_lock); 1191 retry: 1192 issued = __ceph_caps_issued(ci, NULL); 1193 required_blob_size = __get_required_blob_size(ci, name_len, val_len); 1194 if ((ci->i_xattrs.version == 0) || !(issued & CEPH_CAP_XATTR_EXCL) || 1195 (required_blob_size > mdsc->mdsmap->m_max_xattr_size)) { 1196 dout("%s do sync setxattr: version: %llu size: %d max: %llu\n", 1197 __func__, ci->i_xattrs.version, required_blob_size, 1198 mdsc->mdsmap->m_max_xattr_size); 1199 goto do_sync; 1200 } 1201 1202 if (!lock_snap_rwsem && !ci->i_head_snapc) { 1203 lock_snap_rwsem = true; 1204 if (!down_read_trylock(&mdsc->snap_rwsem)) { 1205 spin_unlock(&ci->i_ceph_lock); 1206 down_read(&mdsc->snap_rwsem); 1207 spin_lock(&ci->i_ceph_lock); 1208 goto retry; 1209 } 1210 } 1211 1212 dout("setxattr %p name '%s' issued %s\n", inode, name, 1213 ceph_cap_string(issued)); 1214 __build_xattrs(inode); 1215 1216 if (!ci->i_xattrs.prealloc_blob || 1217 required_blob_size > ci->i_xattrs.prealloc_blob->alloc_len) { 1218 struct ceph_buffer *blob; 1219 1220 spin_unlock(&ci->i_ceph_lock); 1221 ceph_buffer_put(old_blob); /* Shouldn't be required */ 1222 dout(" pre-allocating new blob size=%d\n", required_blob_size); 1223 blob = ceph_buffer_new(required_blob_size, GFP_NOFS); 1224 if (!blob) 1225 goto do_sync_unlocked; 1226 spin_lock(&ci->i_ceph_lock); 1227 /* prealloc_blob can't be released while holding i_ceph_lock */ 1228 if (ci->i_xattrs.prealloc_blob) 1229 old_blob = ci->i_xattrs.prealloc_blob; 1230 ci->i_xattrs.prealloc_blob = blob; 1231 goto retry; 1232 } 1233 1234 err = __set_xattr(ci, newname, name_len, newval, val_len, 1235 flags, value ? 1 : -1, &xattr); 1236 1237 if (!err) { 1238 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL, 1239 &prealloc_cf); 1240 ci->i_xattrs.dirty = true; 1241 inode->i_ctime = current_time(inode); 1242 } 1243 1244 spin_unlock(&ci->i_ceph_lock); 1245 ceph_buffer_put(old_blob); 1246 if (lock_snap_rwsem) 1247 up_read(&mdsc->snap_rwsem); 1248 if (dirty) 1249 __mark_inode_dirty(inode, dirty); 1250 ceph_free_cap_flush(prealloc_cf); 1251 return err; 1252 1253 do_sync: 1254 spin_unlock(&ci->i_ceph_lock); 1255 do_sync_unlocked: 1256 if (lock_snap_rwsem) 1257 up_read(&mdsc->snap_rwsem); 1258 1259 /* security module set xattr while filling trace */ 1260 if (current->journal_info) { 1261 pr_warn_ratelimited("sync setxattr %p " 1262 "during filling trace\n", inode); 1263 err = -EBUSY; 1264 } else { 1265 err = ceph_sync_setxattr(inode, name, value, size, flags); 1266 if (err >= 0 && check_realm) { 1267 /* check if snaprealm was created for quota inode */ 1268 spin_lock(&ci->i_ceph_lock); 1269 if ((ci->i_max_files || ci->i_max_bytes) && 1270 !(ci->i_snap_realm && 1271 ci->i_snap_realm->ino == ci->i_vino.ino)) 1272 err = -EOPNOTSUPP; 1273 spin_unlock(&ci->i_ceph_lock); 1274 } 1275 } 1276 out: 1277 ceph_free_cap_flush(prealloc_cf); 1278 kfree(newname); 1279 kfree(newval); 1280 kfree(xattr); 1281 return err; 1282 } 1283 1284 static int ceph_get_xattr_handler(const struct xattr_handler *handler, 1285 struct dentry *dentry, struct inode *inode, 1286 const char *name, void *value, size_t size) 1287 { 1288 if (!ceph_is_valid_xattr(name)) 1289 return -EOPNOTSUPP; 1290 return __ceph_getxattr(inode, name, value, size); 1291 } 1292 1293 static int ceph_set_xattr_handler(const struct xattr_handler *handler, 1294 struct mnt_idmap *idmap, 1295 struct dentry *unused, struct inode *inode, 1296 const char *name, const void *value, 1297 size_t size, int flags) 1298 { 1299 if (!ceph_is_valid_xattr(name)) 1300 return -EOPNOTSUPP; 1301 return __ceph_setxattr(inode, name, value, size, flags); 1302 } 1303 1304 static const struct xattr_handler ceph_other_xattr_handler = { 1305 .prefix = "", /* match any name => handlers called with full name */ 1306 .get = ceph_get_xattr_handler, 1307 .set = ceph_set_xattr_handler, 1308 }; 1309 1310 #ifdef CONFIG_SECURITY 1311 bool ceph_security_xattr_wanted(struct inode *in) 1312 { 1313 return in->i_security != NULL; 1314 } 1315 1316 bool ceph_security_xattr_deadlock(struct inode *in) 1317 { 1318 struct ceph_inode_info *ci; 1319 bool ret; 1320 if (!in->i_security) 1321 return false; 1322 ci = ceph_inode(in); 1323 spin_lock(&ci->i_ceph_lock); 1324 ret = !(ci->i_ceph_flags & CEPH_I_SEC_INITED) && 1325 !(ci->i_xattrs.version > 0 && 1326 __ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 0)); 1327 spin_unlock(&ci->i_ceph_lock); 1328 return ret; 1329 } 1330 1331 #ifdef CONFIG_CEPH_FS_SECURITY_LABEL 1332 int ceph_security_init_secctx(struct dentry *dentry, umode_t mode, 1333 struct ceph_acl_sec_ctx *as_ctx) 1334 { 1335 struct ceph_pagelist *pagelist = as_ctx->pagelist; 1336 const char *name; 1337 size_t name_len; 1338 int err; 1339 1340 err = security_dentry_init_security(dentry, mode, &dentry->d_name, 1341 &name, &as_ctx->sec_ctx, 1342 &as_ctx->sec_ctxlen); 1343 if (err < 0) { 1344 WARN_ON_ONCE(err != -EOPNOTSUPP); 1345 err = 0; /* do nothing */ 1346 goto out; 1347 } 1348 1349 err = -ENOMEM; 1350 if (!pagelist) { 1351 pagelist = ceph_pagelist_alloc(GFP_KERNEL); 1352 if (!pagelist) 1353 goto out; 1354 err = ceph_pagelist_reserve(pagelist, PAGE_SIZE); 1355 if (err) 1356 goto out; 1357 ceph_pagelist_encode_32(pagelist, 1); 1358 } 1359 1360 /* 1361 * FIXME: Make security_dentry_init_security() generic. Currently 1362 * It only supports single security module and only selinux has 1363 * dentry_init_security hook. 1364 */ 1365 name_len = strlen(name); 1366 err = ceph_pagelist_reserve(pagelist, 1367 4 * 2 + name_len + as_ctx->sec_ctxlen); 1368 if (err) 1369 goto out; 1370 1371 if (as_ctx->pagelist) { 1372 /* update count of KV pairs */ 1373 BUG_ON(pagelist->length <= sizeof(__le32)); 1374 if (list_is_singular(&pagelist->head)) { 1375 le32_add_cpu((__le32*)pagelist->mapped_tail, 1); 1376 } else { 1377 struct page *page = list_first_entry(&pagelist->head, 1378 struct page, lru); 1379 void *addr = kmap_atomic(page); 1380 le32_add_cpu((__le32*)addr, 1); 1381 kunmap_atomic(addr); 1382 } 1383 } else { 1384 as_ctx->pagelist = pagelist; 1385 } 1386 1387 ceph_pagelist_encode_32(pagelist, name_len); 1388 ceph_pagelist_append(pagelist, name, name_len); 1389 1390 ceph_pagelist_encode_32(pagelist, as_ctx->sec_ctxlen); 1391 ceph_pagelist_append(pagelist, as_ctx->sec_ctx, as_ctx->sec_ctxlen); 1392 1393 err = 0; 1394 out: 1395 if (pagelist && !as_ctx->pagelist) 1396 ceph_pagelist_release(pagelist); 1397 return err; 1398 } 1399 #endif /* CONFIG_CEPH_FS_SECURITY_LABEL */ 1400 #endif /* CONFIG_SECURITY */ 1401 1402 void ceph_release_acl_sec_ctx(struct ceph_acl_sec_ctx *as_ctx) 1403 { 1404 #ifdef CONFIG_CEPH_FS_POSIX_ACL 1405 posix_acl_release(as_ctx->acl); 1406 posix_acl_release(as_ctx->default_acl); 1407 #endif 1408 #ifdef CONFIG_CEPH_FS_SECURITY_LABEL 1409 security_release_secctx(as_ctx->sec_ctx, as_ctx->sec_ctxlen); 1410 #endif 1411 if (as_ctx->pagelist) 1412 ceph_pagelist_release(as_ctx->pagelist); 1413 } 1414 1415 /* 1416 * List of handlers for synthetic system.* attributes. Other 1417 * attributes are handled directly. 1418 */ 1419 const struct xattr_handler *ceph_xattr_handlers[] = { 1420 &ceph_other_xattr_handler, 1421 NULL, 1422 }; 1423