| // SPDX-License-Identifier: GPL-2.0 |
| /* |
| * Copyright (C) 2007 Oracle. All rights reserved. |
| */ |
| |
| #include <linux/blkdev.h> |
| #include <linux/module.h> |
| #include <linux/fs.h> |
| #include <linux/pagemap.h> |
| #include <linux/highmem.h> |
| #include <linux/time.h> |
| #include <linux/init.h> |
| #include <linux/seq_file.h> |
| #include <linux/string.h> |
| #include <linux/backing-dev.h> |
| #include <linux/mount.h> |
| #include <linux/writeback.h> |
| #include <linux/statfs.h> |
| #include <linux/compat.h> |
| #include <linux/fs_context.h> |
| #include <linux/fs_parser.h> |
| #include <linux/ctype.h> |
| #include <linux/namei.h> |
| #include <linux/miscdevice.h> |
| #include <linux/magic.h> |
| #include <linux/slab.h> |
| #include <linux/cleancache.h> |
| #include <linux/ratelimit.h> |
| #include <linux/crc32c.h> |
| #include <linux/btrfs.h> |
| #include "delayed-inode.h" |
| #include "ctree.h" |
| #include "disk-io.h" |
| #include "transaction.h" |
| #include "btrfs_inode.h" |
| #include "print-tree.h" |
| #include "props.h" |
| #include "xattr.h" |
| #include "volumes.h" |
| #include "export.h" |
| #include "compression.h" |
| #include "rcu-string.h" |
| #include "dev-replace.h" |
| #include "free-space-cache.h" |
| #include "backref.h" |
| #include "tests/btrfs-tests.h" |
| |
| #include "qgroup.h" |
| #define CREATE_TRACE_POINTS |
| #include <trace/events/btrfs.h> |
| |
| static const struct super_operations btrfs_super_ops; |
| |
| const char *btrfs_decode_error(int errno) |
| { |
| char *errstr = "unknown"; |
| |
| switch (errno) { |
| case -EIO: |
| errstr = "IO failure"; |
| break; |
| case -ENOMEM: |
| errstr = "Out of memory"; |
| break; |
| case -EROFS: |
| errstr = "Readonly filesystem"; |
| break; |
| case -EEXIST: |
| errstr = "Object already exists"; |
| break; |
| case -ENOSPC: |
| errstr = "No space left"; |
| break; |
| case -ENOENT: |
| errstr = "No such entry"; |
| break; |
| } |
| |
| return errstr; |
| } |
| |
| /* |
| * __btrfs_handle_fs_error decodes expected errors from the caller and |
| * invokes the approciate error response. |
| */ |
| __cold |
| void __btrfs_handle_fs_error(struct btrfs_fs_info *fs_info, const char *function, |
| unsigned int line, int errno, const char *fmt, ...) |
| { |
| struct super_block *sb = fs_info->sb; |
| #ifdef CONFIG_PRINTK |
| const char *errstr; |
| #endif |
| |
| /* |
| * Special case: if the error is EROFS, and we're already |
| * under SB_RDONLY, then it is safe here. |
| */ |
| if (errno == -EROFS && sb_rdonly(sb)) |
| return; |
| |
| #ifdef CONFIG_PRINTK |
| errstr = btrfs_decode_error(errno); |
| if (fmt) { |
| struct va_format vaf; |
| va_list args; |
| |
| va_start(args, fmt); |
| vaf.fmt = fmt; |
| vaf.va = &args; |
| |
| pr_crit("BTRFS: error (device %s) in %s:%d: errno=%d %s (%pV)\n", |
| sb->s_id, function, line, errno, errstr, &vaf); |
| va_end(args); |
| } else { |
| pr_crit("BTRFS: error (device %s) in %s:%d: errno=%d %s\n", |
| sb->s_id, function, line, errno, errstr); |
| } |
| #endif |
| |
| /* |
| * Today we only save the error info to memory. Long term we'll |
| * also send it down to the disk |
| */ |
| set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state); |
| |
| /* Don't go through full error handling during mount */ |
| if (!(sb->s_flags & SB_BORN)) |
| return; |
| |
| if (sb_rdonly(sb)) |
| return; |
| |
| /* btrfs handle error by forcing the filesystem readonly */ |
| sb->s_flags |= SB_RDONLY; |
| btrfs_info(fs_info, "forced readonly"); |
| /* |
| * Note that a running device replace operation is not canceled here |
| * although there is no way to update the progress. It would add the |
| * risk of a deadlock, therefore the canceling is omitted. The only |
| * penalty is that some I/O remains active until the procedure |
| * completes. The next time when the filesystem is mounted writeable |
| * again, the device replace operation continues. |
| */ |
| } |
| |
| #ifdef CONFIG_PRINTK |
| static const char * const logtypes[] = { |
| "emergency", |
| "alert", |
| "critical", |
| "error", |
| "warning", |
| "notice", |
| "info", |
| "debug", |
| }; |
| |
| |
| /* |
| * Use one ratelimit state per log level so that a flood of less important |
| * messages doesn't cause more important ones to be dropped. |
| */ |
| static struct ratelimit_state printk_limits[] = { |
| RATELIMIT_STATE_INIT(printk_limits[0], DEFAULT_RATELIMIT_INTERVAL, 100), |
| RATELIMIT_STATE_INIT(printk_limits[1], DEFAULT_RATELIMIT_INTERVAL, 100), |
| RATELIMIT_STATE_INIT(printk_limits[2], DEFAULT_RATELIMIT_INTERVAL, 100), |
| RATELIMIT_STATE_INIT(printk_limits[3], DEFAULT_RATELIMIT_INTERVAL, 100), |
| RATELIMIT_STATE_INIT(printk_limits[4], DEFAULT_RATELIMIT_INTERVAL, 100), |
| RATELIMIT_STATE_INIT(printk_limits[5], DEFAULT_RATELIMIT_INTERVAL, 100), |
| RATELIMIT_STATE_INIT(printk_limits[6], DEFAULT_RATELIMIT_INTERVAL, 100), |
| RATELIMIT_STATE_INIT(printk_limits[7], DEFAULT_RATELIMIT_INTERVAL, 100), |
| }; |
| |
| void btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...) |
| { |
| char lvl[PRINTK_MAX_SINGLE_HEADER_LEN + 1] = "\0"; |
| struct va_format vaf; |
| va_list args; |
| int kern_level; |
| const char *type = logtypes[4]; |
| struct ratelimit_state *ratelimit = &printk_limits[4]; |
| |
| va_start(args, fmt); |
| |
| while ((kern_level = printk_get_level(fmt)) != 0) { |
| size_t size = printk_skip_level(fmt) - fmt; |
| |
| if (kern_level >= '0' && kern_level <= '7') { |
| memcpy(lvl, fmt, size); |
| lvl[size] = '\0'; |
| type = logtypes[kern_level - '0']; |
| ratelimit = &printk_limits[kern_level - '0']; |
| } |
| fmt += size; |
| } |
| |
| vaf.fmt = fmt; |
| vaf.va = &args; |
| |
| if (__ratelimit(ratelimit)) |
| printk("%sBTRFS %s (device %s): %pV\n", lvl, type, |
| fs_info ? fs_info->sb->s_id : "<unknown>", &vaf); |
| |
| va_end(args); |
| } |
| #endif |
| |
| /* |
| * We only mark the transaction aborted and then set the file system read-only. |
| * This will prevent new transactions from starting or trying to join this |
| * one. |
| * |
| * This means that error recovery at the call site is limited to freeing |
| * any local memory allocations and passing the error code up without |
| * further cleanup. The transaction should complete as it normally would |
| * in the call path but will return -EIO. |
| * |
| * We'll complete the cleanup in btrfs_end_transaction and |
| * btrfs_commit_transaction. |
| */ |
| __cold |
| void __btrfs_abort_transaction(struct btrfs_trans_handle *trans, |
| const char *function, |
| unsigned int line, int errno) |
| { |
| struct btrfs_fs_info *fs_info = trans->fs_info; |
| |
| trans->aborted = errno; |
| /* Nothing used. The other threads that have joined this |
| * transaction may be able to continue. */ |
| if (!trans->dirty && list_empty(&trans->new_bgs)) { |
| const char *errstr; |
| |
| errstr = btrfs_decode_error(errno); |
| btrfs_warn(fs_info, |
| "%s:%d: Aborting unused transaction(%s).", |
| function, line, errstr); |
| return; |
| } |
| WRITE_ONCE(trans->transaction->aborted, errno); |
| /* Wake up anybody who may be waiting on this transaction */ |
| wake_up(&fs_info->transaction_wait); |
| wake_up(&fs_info->transaction_blocked_wait); |
| __btrfs_handle_fs_error(fs_info, function, line, errno, NULL); |
| } |
| /* |
| * __btrfs_panic decodes unexpected, fatal errors from the caller, |
| * issues an alert, and either panics or BUGs, depending on mount options. |
| */ |
| __cold |
| void __btrfs_panic(struct btrfs_fs_info *fs_info, const char *function, |
| unsigned int line, int errno, const char *fmt, ...) |
| { |
| char *s_id = "<unknown>"; |
| const char *errstr; |
| struct va_format vaf = { .fmt = fmt }; |
| va_list args; |
| |
| if (fs_info) |
| s_id = fs_info->sb->s_id; |
| |
| va_start(args, fmt); |
| vaf.va = &args; |
| |
| errstr = btrfs_decode_error(errno); |
| if (fs_info && (btrfs_test_opt(fs_info, PANIC_ON_FATAL_ERROR))) |
| panic(KERN_CRIT "BTRFS panic (device %s) in %s:%d: %pV (errno=%d %s)\n", |
| s_id, function, line, &vaf, errno, errstr); |
| |
| btrfs_crit(fs_info, "panic in %s:%d: %pV (errno=%d %s)", |
| function, line, &vaf, errno, errstr); |
| va_end(args); |
| /* Caller calls BUG() */ |
| } |
| |
| static void btrfs_put_super(struct super_block *sb) |
| { |
| close_ctree(btrfs_sb(sb)); |
| } |
| |
| static char *get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info, |
| u64 subvol_objectid) |
| { |
| struct btrfs_root *root = fs_info->tree_root; |
| struct btrfs_root *fs_root; |
| struct btrfs_root_ref *root_ref; |
| struct btrfs_inode_ref *inode_ref; |
| struct btrfs_key key; |
| struct btrfs_path *path = NULL; |
| char *name = NULL, *ptr; |
| u64 dirid; |
| int len; |
| int ret; |
| |
| path = btrfs_alloc_path(); |
| if (!path) { |
| ret = -ENOMEM; |
| goto err; |
| } |
| path->leave_spinning = 1; |
| |
| name = kmalloc(PATH_MAX, GFP_KERNEL); |
| if (!name) { |
| ret = -ENOMEM; |
| goto err; |
| } |
| ptr = name + PATH_MAX - 1; |
| ptr[0] = '\0'; |
| |
| /* |
| * Walk up the subvolume trees in the tree of tree roots by root |
| * backrefs until we hit the top-level subvolume. |
| */ |
| while (subvol_objectid != BTRFS_FS_TREE_OBJECTID) { |
| key.objectid = subvol_objectid; |
| key.type = BTRFS_ROOT_BACKREF_KEY; |
| key.offset = (u64)-1; |
| |
| ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); |
| if (ret < 0) { |
| goto err; |
| } else if (ret > 0) { |
| ret = btrfs_previous_item(root, path, subvol_objectid, |
| BTRFS_ROOT_BACKREF_KEY); |
| if (ret < 0) { |
| goto err; |
| } else if (ret > 0) { |
| ret = -ENOENT; |
| goto err; |
| } |
| } |
| |
| btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); |
| subvol_objectid = key.offset; |
| |
| root_ref = btrfs_item_ptr(path->nodes[0], path->slots[0], |
| struct btrfs_root_ref); |
| len = btrfs_root_ref_name_len(path->nodes[0], root_ref); |
| ptr -= len + 1; |
| if (ptr < name) { |
| ret = -ENAMETOOLONG; |
| goto err; |
| } |
| read_extent_buffer(path->nodes[0], ptr + 1, |
| (unsigned long)(root_ref + 1), len); |
| ptr[0] = '/'; |
| dirid = btrfs_root_ref_dirid(path->nodes[0], root_ref); |
| btrfs_release_path(path); |
| |
| key.objectid = subvol_objectid; |
| key.type = BTRFS_ROOT_ITEM_KEY; |
| key.offset = (u64)-1; |
| fs_root = btrfs_read_fs_root_no_name(fs_info, &key); |
| if (IS_ERR(fs_root)) { |
| ret = PTR_ERR(fs_root); |
| goto err; |
| } |
| |
| /* |
| * Walk up the filesystem tree by inode refs until we hit the |
| * root directory. |
| */ |
| while (dirid != BTRFS_FIRST_FREE_OBJECTID) { |
| key.objectid = dirid; |
| key.type = BTRFS_INODE_REF_KEY; |
| key.offset = (u64)-1; |
| |
| ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0); |
| if (ret < 0) { |
| goto err; |
| } else if (ret > 0) { |
| ret = btrfs_previous_item(fs_root, path, dirid, |
| BTRFS_INODE_REF_KEY); |
| if (ret < 0) { |
| goto err; |
| } else if (ret > 0) { |
| ret = -ENOENT; |
| goto err; |
| } |
| } |
| |
| btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); |
| dirid = key.offset; |
| |
| inode_ref = btrfs_item_ptr(path->nodes[0], |
| path->slots[0], |
| struct btrfs_inode_ref); |
| len = btrfs_inode_ref_name_len(path->nodes[0], |
| inode_ref); |
| ptr -= len + 1; |
| if (ptr < name) { |
| ret = -ENAMETOOLONG; |
| goto err; |
| } |
| read_extent_buffer(path->nodes[0], ptr + 1, |
| (unsigned long)(inode_ref + 1), len); |
| ptr[0] = '/'; |
| btrfs_release_path(path); |
| } |
| } |
| |
| btrfs_free_path(path); |
| if (ptr == name + PATH_MAX - 1) { |
| name[0] = '/'; |
| name[1] = '\0'; |
| } else { |
| memmove(name, ptr, name + PATH_MAX - ptr); |
| } |
| return name; |
| |
| err: |
| btrfs_free_path(path); |
| kfree(name); |
| return ERR_PTR(ret); |
| } |
| |
| static int get_default_subvol_objectid(struct btrfs_fs_info *fs_info, u64 *objectid) |
| { |
| struct btrfs_root *root = fs_info->tree_root; |
| struct btrfs_dir_item *di; |
| struct btrfs_path *path; |
| struct btrfs_key location; |
| u64 dir_id; |
| |
| path = btrfs_alloc_path(); |
| if (!path) |
| return -ENOMEM; |
| path->leave_spinning = 1; |
| |
| /* |
| * Find the "default" dir item which points to the root item that we |
| * will mount by default if we haven't been given a specific subvolume |
| * to mount. |
| */ |
| dir_id = btrfs_super_root_dir(fs_info->super_copy); |
| di = btrfs_lookup_dir_item(NULL, root, path, dir_id, "default", 7, 0); |
| if (IS_ERR(di)) { |
| btrfs_free_path(path); |
| return PTR_ERR(di); |
| } |
| if (!di) { |
| /* |
| * Ok the default dir item isn't there. This is weird since |
| * it's always been there, but don't freak out, just try and |
| * mount the top-level subvolume. |
| */ |
| btrfs_free_path(path); |
| *objectid = BTRFS_FS_TREE_OBJECTID; |
| return 0; |
| } |
| |
| btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location); |
| btrfs_free_path(path); |
| *objectid = location.objectid; |
| return 0; |
| } |
| |
| static int btrfs_fill_super(struct fs_context *fc, |
| struct super_block *sb, |
| struct btrfs_fs_devices *fs_devices) |
| { |
| struct inode *inode; |
| struct btrfs_fs_info *fs_info = btrfs_sb(sb); |
| struct btrfs_key key; |
| int err; |
| |
| sb->s_maxbytes = MAX_LFS_FILESIZE; |
| sb->s_magic = BTRFS_SUPER_MAGIC; |
| sb->s_op = &btrfs_super_ops; |
| sb->s_d_op = &btrfs_dentry_operations; |
| sb->s_export_op = &btrfs_export_ops; |
| sb->s_xattr = btrfs_xattr_handlers; |
| sb->s_time_gran = 1; |
| #ifdef CONFIG_BTRFS_FS_POSIX_ACL |
| sb->s_flags |= SB_POSIXACL; |
| #endif |
| sb->s_flags |= SB_I_VERSION; |
| sb->s_iflags |= SB_I_CGROUPWB; |
| |
| err = super_setup_bdi(sb); |
| if (err) { |
| btrfs_err(fs_info, "super_setup_bdi failed"); |
| return err; |
| } |
| |
| err = open_ctree(fc, sb, fs_devices); |
| if (err) { |
| btrfs_err(fs_info, "open_ctree failed"); |
| return err; |
| } |
| |
| key.objectid = BTRFS_FIRST_FREE_OBJECTID; |
| key.type = BTRFS_INODE_ITEM_KEY; |
| key.offset = 0; |
| inode = btrfs_iget(sb, &key, fs_info->fs_root, NULL); |
| if (IS_ERR(inode)) { |
| err = PTR_ERR(inode); |
| goto fail_close; |
| } |
| |
| sb->s_root = d_make_root(inode); |
| if (!sb->s_root) { |
| err = -ENOMEM; |
| goto fail_close; |
| } |
| |
| cleancache_init_fs(sb); |
| sb->s_flags |= SB_ACTIVE; |
| return 0; |
| |
| fail_close: |
| close_ctree(fs_info); |
| return err; |
| } |
| |
| int btrfs_sync_fs(struct super_block *sb, int wait) |
| { |
| struct btrfs_trans_handle *trans; |
| struct btrfs_fs_info *fs_info = btrfs_sb(sb); |
| struct btrfs_root *root = fs_info->tree_root; |
| |
| trace_btrfs_sync_fs(fs_info, wait); |
| |
| if (!wait) { |
| filemap_flush(fs_info->btree_inode->i_mapping); |
| return 0; |
| } |
| |
| btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1); |
| |
| trans = btrfs_attach_transaction_barrier(root); |
| if (IS_ERR(trans)) { |
| /* no transaction, don't bother */ |
| if (PTR_ERR(trans) == -ENOENT) { |
| /* |
| * Exit unless we have some pending changes |
| * that need to go through commit |
| */ |
| if (fs_info->pending_changes == 0) |
| return 0; |
| /* |
| * A non-blocking test if the fs is frozen. We must not |
| * start a new transaction here otherwise a deadlock |
| * happens. The pending operations are delayed to the |
| * next commit after thawing. |
| */ |
| if (sb_start_write_trylock(sb)) |
| sb_end_write(sb); |
| else |
| return 0; |
| trans = btrfs_start_transaction(root, 0); |
| } |
| if (IS_ERR(trans)) |
| return PTR_ERR(trans); |
| } |
| return btrfs_commit_transaction(trans); |
| } |
| |
| static int btrfs_test_super(struct super_block *s, struct fs_context *fc) |
| { |
| struct btrfs_fs_info *p = fc->s_fs_info; |
| struct btrfs_fs_info *fs_info = btrfs_sb(s); |
| |
| return fs_info->fs_devices == p->fs_devices; |
| } |
| |
| /* |
| * subvolumes are identified by ino 256 |
| */ |
| static inline int is_subvolume_inode(struct inode *inode) |
| { |
| if (inode && inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) |
| return 1; |
| return 0; |
| } |
| |
| static int mount_subvol(struct fs_context *fc) |
| { |
| struct btrfs_fs_context *ctx = fc->fs_private; |
| struct vfsmount *mnt = ctx->root_mnt; |
| struct dentry *root; |
| int ret; |
| |
| if (!ctx->subvol_name) { |
| char *subvol_name; |
| |
| if (!ctx->subvol_objectid) { |
| ret = get_default_subvol_objectid(btrfs_sb(mnt->mnt_sb), |
| &ctx->subvol_objectid); |
| if (ret < 0) |
| return ret; |
| } |
| subvol_name = get_subvol_name_from_objectid(btrfs_sb(mnt->mnt_sb), |
| ctx->subvol_objectid); |
| if (IS_ERR(subvol_name)) |
| return PTR_ERR(subvol_name); |
| ctx->subvol_name = subvol_name; |
| } |
| |
| root = mount_subtree(mnt, ctx->subvol_name); |
| /* mount_subtree() dropped our reference on the vfsmount. */ |
| ctx->root_mnt = NULL; |
| |
| if (!IS_ERR(root)) { |
| struct super_block *s = root->d_sb; |
| struct btrfs_fs_info *fs_info = btrfs_sb(s); |
| struct inode *root_inode = d_inode(root); |
| u64 root_objectid = BTRFS_I(root_inode)->root->root_key.objectid; |
| |
| ret = 0; |
| if (!is_subvolume_inode(root_inode)) { |
| btrfs_err(fs_info, "'%s' is not a valid subvolume", |
| ctx->subvol_name); |
| ret = -EINVAL; |
| } |
| if (ctx->subvol_objectid && |
| root_objectid != ctx->subvol_objectid) { |
| /* |
| * This will also catch a race condition where a |
| * subvolume which was passed by ID is renamed and |
| * another subvolume is renamed over the old location. |
| */ |
| btrfs_err(fs_info, |
| "subvol '%s' does not match subvolid %llu", |
| ctx->subvol_name, ctx->subvol_objectid); |
| ret = -EINVAL; |
| } |
| if (ret < 0) { |
| dput(root); |
| deactivate_locked_super(s); |
| goto out; |
| } |
| } |
| |
| fc->root = root; |
| ret = 0; |
| out: |
| return ret; |
| } |
| |
| /* |
| * Allocate an fsinfo record and initialise the options to appropriate |
| * defaults. |
| */ |
| static struct btrfs_fs_info *btrfs_alloc_fs_info(void) |
| { |
| struct btrfs_fs_info *fs_info; |
| |
| fs_info = kvzalloc(sizeof(struct btrfs_fs_info), GFP_KERNEL); |
| if (fs_info) { |
| btrfs_set_opt(fs_info->mount_opt, DATASUM); |
| btrfs_set_opt(fs_info->mount_opt, DATACOW); |
| btrfs_set_opt(fs_info->mount_opt, BARRIER); |
| btrfs_set_opt(fs_info->mount_opt, TREELOG); |
| |
| fs_info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL; |
| fs_info->max_inline = BTRFS_DEFAULT_MAX_INLINE; |
| fs_info->metadata_ratio = 0; |
| fs_info->thread_pool_size = min_t(unsigned long, |
| num_online_cpus() + 2, 8); |
| #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY |
| fs_info->check_integrity_print_mask = 0; |
| #endif |
| |
| /* |
| * In the long term, we'll store the compression type in the super |
| * block, and it'll be used for per file compression control. |
| */ |
| fs_info->compress_type = BTRFS_COMPRESS_ZLIB; |
| } |
| |
| return fs_info; |
| } |
| |
| /* |
| * Find a superblock for the given device / mount point. |
| * |
| * Note: This is based on mount_bdev from fs/super.c with a few additions |
| * for multiple device setup. Make sure to keep it in sync. |
| */ |
| static int btrfs_root_get_tree(struct fs_context *fc) |
| { |
| struct btrfs_fs_context *ctx = fc->fs_private; |
| struct block_device *bdev = NULL; |
| struct super_block *s; |
| struct btrfs_device *device = NULL; |
| struct btrfs_fs_devices *fs_devices = NULL; |
| struct btrfs_fs_info *fs_info = NULL; |
| fmode_t mode = FMODE_READ; |
| int error = 0, i; |
| |
| if (!(fc->sb_flags & SB_RDONLY)) |
| mode |= FMODE_WRITE; |
| |
| /* |
| * Setup a dummy root and fs_info for test/set super. This is because |
| * we don't actually fill this stuff out until open_ctree, but we need |
| * it for searching for existing supers, so this lets us do that and |
| * then open_ctree will properly initialize everything later. |
| */ |
| fs_info = btrfs_alloc_fs_info(); |
| if (!fs_info) { |
| error = -ENOMEM; |
| goto error; |
| } |
| |
| fs_info->super_copy = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_KERNEL); |
| fs_info->super_for_commit = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_KERNEL); |
| if (!fs_info->super_copy || !fs_info->super_for_commit) { |
| error = -ENOMEM; |
| goto error_fs_info; |
| } |
| |
| mutex_lock(&uuid_mutex); |
| |
| if (ctx->devices) { |
| for (i = 0; i < ctx->nr_devices; i++) { |
| device = btrfs_scan_one_device(ctx->devices[i], mode); |
| if (IS_ERR(device)) { |
| mutex_unlock(&uuid_mutex); |
| error = PTR_ERR(device); |
| goto error_fs_info; |
| } |
| } |
| } |
| |
| device = btrfs_scan_one_device(fc->source, mode); |
| if (IS_ERR(device)) { |
| mutex_unlock(&uuid_mutex); |
| error = PTR_ERR(device); |
| goto error_fs_info; |
| } |
| |
| fs_devices = device->fs_devices; |
| fs_info->fs_devices = fs_devices; |
| |
| error = btrfs_open_devices(fs_devices, mode); |
| mutex_unlock(&uuid_mutex); |
| if (error) |
| goto error_fs_info; |
| |
| if (!(fc->sb_flags & SB_RDONLY) && fs_devices->rw_devices == 0) { |
| error = -EACCES; |
| goto error_close_devices; |
| } |
| |
| if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) && |
| !btrfs_test_opt(fs_info, FREE_SPACE_TREE) && |
| !btrfs_test_opt(fs_info, CLEAR_CACHE)) { |
| btrfs_err(fs_info, "cannot disable free space tree"); |
| error = -EINVAL; |
| goto error_close_devices; |
| } |
| if (btrfs_test_opt(fs_info, SPACE_CACHE)) |
| btrfs_info(fs_info, "disk space caching is enabled"); |
| if (btrfs_test_opt(fs_info, FREE_SPACE_TREE)) |
| btrfs_info(fs_info, "using free space tree"); |
| |
| bdev = fs_devices->latest_bdev; |
| fc->s_fs_info = fs_info; |
| s = sget_fc(fc, btrfs_test_super, set_anon_super_fc); |
| if (IS_ERR(s)) { |
| error = PTR_ERR(s); |
| goto error_close_devices; |
| } |
| |
| if (s->s_root) { |
| btrfs_close_devices(fs_info->fs_devices); |
| if ((fc->sb_flags ^ s->s_flags) & SB_RDONLY) |
| error = -EBUSY; |
| } else { |
| snprintf(s->s_id, sizeof(s->s_id), "%pg", bdev); |
| error = btrfs_fill_super(fc, s, fs_devices); |
| } |
| if (error) |
| goto error_super; |
| |
| fc->root = dget(s->s_root); |
| return 0; |
| |
| error_super: |
| deactivate_locked_super(s); |
| error_close_devices: |
| btrfs_close_devices(fs_devices); |
| error_fs_info: |
| free_fs_info(fs_info); |
| error: |
| return error; |
| } |
| |
| static int btrfs_mount_root(struct fs_context *fc, unsigned int rdonly) |
| { |
| struct btrfs_fs_context *ctx, *root_ctx; |
| struct fs_context *root_fc; |
| struct vfsmount *root_mnt; |
| int ret; |
| |
| root_fc = vfs_dup_fs_context(fc, FS_CONTEXT_FOR_ROOT_MOUNT); |
| if (IS_ERR(root_fc)) |
| return PTR_ERR(root_fc); |
| |
| root_fc->sb_flags &= ~SB_RDONLY; |
| root_fc->sb_flags |= rdonly | SB_NOSEC; |
| root_ctx = root_fc->fs_private; |
| root_ctx->root_mnt = NULL; |
| root_ctx->root = true; |
| |
| ret = vfs_get_tree(root_fc); |
| if (ret < 0) |
| goto error_fc; |
| |
| root_mnt = vfs_create_mount(root_fc); |
| if (IS_ERR(root_mnt)) { |
| ret = PTR_ERR(root_mnt); |
| goto error_fc; |
| } |
| |
| ctx = fc->fs_private; |
| ctx->root_mnt = root_mnt; |
| ret = 0; |
| |
| error_fc: |
| put_fs_context(root_fc); |
| return ret; |
| } |
| |
| static int btrfs_reconfigure_root_to_rw(struct fs_context *fc, |
| struct super_block *sb) |
| { |
| int error; |
| struct fs_context root_fc = { |
| .purpose = FS_CONTEXT_FOR_RECONFIGURE, |
| .fs_type = sb->s_type, |
| .root = sb->s_root, |
| .log = fc->log, |
| .sb_flags = 0, |
| .sb_flags_mask = SB_RDONLY, |
| }; |
| |
| down_write(&sb->s_umount); |
| error = btrfs_reconfigure(&root_fc); |
| up_write(&sb->s_umount); |
| return error; |
| } |
| |
| /* |
| * Superblock creation function which is called by VFS layer. |
| * |
| * In order to allow mounting a subvolume directly, btrfs uses mount_subtree() |
| * which needs vfsmount* of device's root (/). This means device's root has to |
| * be mounted internally in any case. |
| * |
| * Operation flow: |
| * 1. Parse subvol id related options for later use in mount_subvol(). |
| * |
| * 2. Mount device's root (/). |
| * |
| * 3. Call mount_subvol() to get the dentry of subvolume. Since there is |
| * "btrfs subvolume set-default", mount_subvol() is called always. |
| */ |
| int btrfs_get_tree(struct fs_context *fc) |
| { |
| struct btrfs_fs_context *ctx = fc->fs_private; |
| fmode_t mode = FMODE_READ; |
| int error = 0; |
| |
| if (ctx->root) |
| return btrfs_root_get_tree(fc); |
| |
| if (!(fc->sb_flags & SB_RDONLY)) |
| mode |= FMODE_WRITE; |
| |
| /* mount device's root (/) */ |
| error = btrfs_mount_root(fc, fc->sb_flags & SB_RDONLY); |
| if (error < 0) { |
| if (error != -EBUSY) |
| return error; |
| |
| error = btrfs_mount_root( |
| fc, (fc->sb_flags & SB_RDONLY) ^ SB_RDONLY); |
| if (error < 0) |
| return error; |
| |
| if (!(fc->sb_flags & SB_RDONLY)) { |
| error = btrfs_reconfigure_root_to_rw(fc, ctx->root_mnt->mnt_sb); |
| if (error < 0) |
| return error; |
| } |
| } |
| |
| return mount_subvol(fc); |
| } |
| |
| static void btrfs_resize_thread_pool(struct btrfs_fs_info *fs_info, |
| u32 new_pool_size) |
| { |
| u32 old_pool_size = fs_info->thread_pool_size; |
| |
| if (new_pool_size == old_pool_size) |
| return; |
| |
| fs_info->thread_pool_size = new_pool_size; |
| |
| btrfs_info(fs_info, "resize thread pool %d -> %d", |
| old_pool_size, new_pool_size); |
| |
| btrfs_workqueue_set_max(fs_info->workers, new_pool_size); |
| btrfs_workqueue_set_max(fs_info->delalloc_workers, new_pool_size); |
| btrfs_workqueue_set_max(fs_info->submit_workers, new_pool_size); |
| btrfs_workqueue_set_max(fs_info->caching_workers, new_pool_size); |
| btrfs_workqueue_set_max(fs_info->endio_workers, new_pool_size); |
| btrfs_workqueue_set_max(fs_info->endio_meta_workers, new_pool_size); |
| btrfs_workqueue_set_max(fs_info->endio_meta_write_workers, |
| new_pool_size); |
| btrfs_workqueue_set_max(fs_info->endio_write_workers, new_pool_size); |
| btrfs_workqueue_set_max(fs_info->endio_freespace_worker, new_pool_size); |
| btrfs_workqueue_set_max(fs_info->delayed_workers, new_pool_size); |
| btrfs_workqueue_set_max(fs_info->readahead_workers, new_pool_size); |
| btrfs_workqueue_set_max(fs_info->scrub_wr_completion_workers, |
| new_pool_size); |
| } |
| |
| static inline void btrfs_remount_prepare(struct btrfs_fs_info *fs_info) |
| { |
| set_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state); |
| } |
| |
| static inline void btrfs_remount_begin(struct btrfs_fs_info *fs_info, |
| unsigned long old_opts, |
| struct fs_context *fc) |
| { |
| if (btrfs_raw_test_opt(old_opts, AUTO_DEFRAG) && |
| (!btrfs_raw_test_opt(fs_info->mount_opt, AUTO_DEFRAG) || |
| (fc->sb_flags & SB_RDONLY))) { |
| /* wait for any defraggers to finish */ |
| wait_event(fs_info->transaction_wait, |
| (atomic_read(&fs_info->defrag_running) == 0)); |
| if (fc->sb_flags & SB_RDONLY) |
| sync_filesystem(fs_info->sb); |
| } |
| } |
| |
| static inline void btrfs_remount_cleanup(struct btrfs_fs_info *fs_info, |
| unsigned long old_opts) |
| { |
| /* |
| * We need to cleanup all defragable inodes if the autodefragment is |
| * close or the filesystem is read only. |
| */ |
| if (btrfs_raw_test_opt(old_opts, AUTO_DEFRAG) && |
| (!btrfs_raw_test_opt(fs_info->mount_opt, AUTO_DEFRAG) || sb_rdonly(fs_info->sb))) { |
| btrfs_cleanup_defrag_inodes(fs_info); |
| } |
| |
| clear_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state); |
| } |
| |
| /* |
| * Change the configuration of an active superblock according to the supplied |
| * parameters. Note that the parameter pointer (fc->fs_private) may be NULL in |
| * the case of umount detach, emergency remount R/O and get_tree remounting as |
| * R/W. |
| */ |
| int btrfs_reconfigure(struct fs_context *fc) |
| { |
| struct btrfs_fs_context *ctx = fc->fs_private; |
| struct super_block *sb = fc->root->d_sb; |
| struct btrfs_fs_info *fs_info = btrfs_sb(sb); |
| struct btrfs_root *root = fs_info->tree_root; |
| unsigned old_flags = sb->s_flags; |
| unsigned long old_opts = fs_info->mount_opt; |
| unsigned long old_compress_type = fs_info->compress_type; |
| u64 old_max_inline = fs_info->max_inline; |
| u32 old_thread_pool_size = fs_info->thread_pool_size; |
| u32 old_metadata_ratio = fs_info->metadata_ratio; |
| int ret; |
| |
| sync_filesystem(sb); |
| btrfs_remount_prepare(fs_info); |
| if (ctx) |
| btrfs_apply_configuration(fc, sb); |
| btrfs_remount_begin(fs_info, old_opts, fc); |
| if (ctx) |
| btrfs_resize_thread_pool(fs_info, ctx->thread_pool_size); |
| |
| if ((bool)(fc->sb_flags & SB_RDONLY) == sb_rdonly(sb)) |
| goto out; |
| |
| if (fc->sb_flags & SB_RDONLY) { |
| /* |
| * this also happens on 'umount -rf' or on shutdown, when |
| * the filesystem is busy. |
| */ |
| cancel_work_sync(&fs_info->async_reclaim_work); |
| |
| /* wait for the uuid_scan task to finish */ |
| down(&fs_info->uuid_tree_rescan_sem); |
| /* avoid complains from lockdep et al. */ |
| up(&fs_info->uuid_tree_rescan_sem); |
| |
| sb->s_flags |= SB_RDONLY; |
| |
| /* |
| * Setting SB_RDONLY will put the cleaner thread to |
| * sleep at the next loop if it's already active. |
| * If it's already asleep, we'll leave unused block |
| * groups on disk until we're mounted read-write again |
| * unless we clean them up here. |
| */ |
| btrfs_delete_unused_bgs(fs_info); |
| |
| btrfs_dev_replace_suspend_for_unmount(fs_info); |
| btrfs_scrub_cancel(fs_info); |
| btrfs_pause_balance(fs_info); |
| |
| ret = btrfs_commit_super(fs_info); |
| if (ret) |
| goto restore; |
| } else { |
| if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) { |
| btrfs_err(fs_info, |
| "Remounting read-write after error is not allowed"); |
| ret = -EINVAL; |
| goto restore; |
| } |
| if (fs_info->fs_devices->rw_devices == 0) { |
| ret = -EACCES; |
| goto restore; |
| } |
| |
| if (!btrfs_check_rw_degradable(fs_info, NULL)) { |
| btrfs_warn(fs_info, |
| "too many missing devices, writeable remount is not allowed"); |
| ret = -EACCES; |
| goto restore; |
| } |
| |
| if (btrfs_super_log_root(fs_info->super_copy) != 0) { |
| ret = -EINVAL; |
| goto restore; |
| } |
| |
| ret = btrfs_cleanup_fs_roots(fs_info); |
| if (ret) |
| goto restore; |
| |
| /* recover relocation */ |
| mutex_lock(&fs_info->cleaner_mutex); |
| ret = btrfs_recover_relocation(root); |
| mutex_unlock(&fs_info->cleaner_mutex); |
| if (ret) |
| goto restore; |
| |
| ret = btrfs_resume_balance_async(fs_info); |
| if (ret) |
| goto restore; |
| |
| ret = btrfs_resume_dev_replace_async(fs_info); |
| if (ret) { |
| btrfs_warn(fs_info, "failed to resume dev_replace"); |
| goto restore; |
| } |
| |
| btrfs_qgroup_rescan_resume(fs_info); |
| |
| if (!fs_info->uuid_root) { |
| btrfs_info(fs_info, "creating UUID tree"); |
| ret = btrfs_create_uuid_tree(fs_info); |
| if (ret) { |
| btrfs_warn(fs_info, |
| "failed to create the UUID tree %d", |
| ret); |
| goto restore; |
| } |
| } |
| sb->s_flags &= ~SB_RDONLY; |
| |
| set_bit(BTRFS_FS_OPEN, &fs_info->flags); |
| } |
| out: |
| wake_up_process(fs_info->transaction_kthread); |
| btrfs_remount_cleanup(fs_info, old_opts); |
| return 0; |
| |
| restore: |
| /* We've hit an error - don't reset SB_RDONLY */ |
| if (sb_rdonly(sb)) |
| old_flags |= SB_RDONLY; |
| sb->s_flags = old_flags; |
| WRITE_ONCE(fs_info->mount_opt, old_opts); |
| fs_info->compress_type = old_compress_type; |
| fs_info->max_inline = old_max_inline; |
| btrfs_resize_thread_pool(fs_info, old_thread_pool_size); |
| fs_info->metadata_ratio = old_metadata_ratio; |
| btrfs_remount_cleanup(fs_info, old_opts); |
| return ret; |
| } |
| |
| /* Used to sort the devices by max_avail(descending sort) */ |
| static int btrfs_cmp_device_free_bytes(const void *dev_info1, |
| const void *dev_info2) |
| { |
| if (((struct btrfs_device_info *)dev_info1)->max_avail > |
| ((struct btrfs_device_info *)dev_info2)->max_avail) |
| return -1; |
| else if (((struct btrfs_device_info *)dev_info1)->max_avail < |
| ((struct btrfs_device_info *)dev_info2)->max_avail) |
| return 1; |
| else |
| return 0; |
| } |
| |
| /* |
| * sort the devices by max_avail, in which max free extent size of each device |
| * is stored.(Descending Sort) |
| */ |
| static inline void btrfs_descending_sort_devices( |
| struct btrfs_device_info *devices, |
| size_t nr_devices) |
| { |
| sort(devices, nr_devices, sizeof(struct btrfs_device_info), |
| btrfs_cmp_device_free_bytes, NULL); |
| } |
| |
| /* |
| * The helper to calc the free space on the devices that can be used to store |
| * file data. |
| */ |
| static int btrfs_calc_avail_data_space(struct btrfs_fs_info *fs_info, |
| u64 *free_bytes) |
| { |
| struct btrfs_device_info *devices_info; |
| struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; |
| struct btrfs_device *device; |
| u64 skip_space; |
| u64 type; |
| u64 avail_space; |
| u64 min_stripe_size; |
| int min_stripes = 1, num_stripes = 1; |
| int i = 0, nr_devices; |
| |
| /* |
| * We aren't under the device list lock, so this is racy-ish, but good |
| * enough for our purposes. |
| */ |
| nr_devices = fs_info->fs_devices->open_devices; |
| if (!nr_devices) { |
| smp_mb(); |
| nr_devices = fs_info->fs_devices->open_devices; |
| ASSERT(nr_devices); |
| if (!nr_devices) { |
| *free_bytes = 0; |
| return 0; |
| } |
| } |
| |
| devices_info = kmalloc_array(nr_devices, sizeof(*devices_info), |
| GFP_KERNEL); |
| if (!devices_info) |
| return -ENOMEM; |
| |
| /* calc min stripe number for data space allocation */ |
| type = btrfs_data_alloc_profile(fs_info); |
| if (type & BTRFS_BLOCK_GROUP_RAID0) { |
| min_stripes = 2; |
| num_stripes = nr_devices; |
| } else if (type & BTRFS_BLOCK_GROUP_RAID1) { |
| min_stripes = 2; |
| num_stripes = 2; |
| } else if (type & BTRFS_BLOCK_GROUP_RAID10) { |
| min_stripes = 4; |
| num_stripes = 4; |
| } |
| |
| if (type & BTRFS_BLOCK_GROUP_DUP) |
| min_stripe_size = 2 * BTRFS_STRIPE_LEN; |
| else |
| min_stripe_size = BTRFS_STRIPE_LEN; |
| |
| rcu_read_lock(); |
| list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) { |
| if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, |
| &device->dev_state) || |
| !device->bdev || |
| test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) |
| continue; |
| |
| if (i >= nr_devices) |
| break; |
| |
| avail_space = device->total_bytes - device->bytes_used; |
| |
| /* align with stripe_len */ |
| avail_space = div_u64(avail_space, BTRFS_STRIPE_LEN); |
| avail_space *= BTRFS_STRIPE_LEN; |
| |
| /* |
| * In order to avoid overwriting the superblock on the drive, |
| * btrfs starts at an offset of at least 1MB when doing chunk |
| * allocation. |
| */ |
| skip_space = SZ_1M; |
| |
| /* |
| * we can use the free space in [0, skip_space - 1], subtract |
| * it from the total. |
| */ |
| if (avail_space && avail_space >= skip_space) |
| avail_space -= skip_space; |
| else |
| avail_space = 0; |
| |
| if (avail_space < min_stripe_size) |
| continue; |
| |
| devices_info[i].dev = device; |
| devices_info[i].max_avail = avail_space; |
| |
| i++; |
| } |
| rcu_read_unlock(); |
| |
| nr_devices = i; |
| |
| btrfs_descending_sort_devices(devices_info, nr_devices); |
| |
| i = nr_devices - 1; |
| avail_space = 0; |
| while (nr_devices >= min_stripes) { |
| if (num_stripes > nr_devices) |
| num_stripes = nr_devices; |
| |
| if (devices_info[i].max_avail >= min_stripe_size) { |
| int j; |
| u64 alloc_size; |
| |
| avail_space += devices_info[i].max_avail * num_stripes; |
| alloc_size = devices_info[i].max_avail; |
| for (j = i + 1 - num_stripes; j <= i; j++) |
| devices_info[j].max_avail -= alloc_size; |
| } |
| i--; |
| nr_devices--; |
| } |
| |
| kfree(devices_info); |
| *free_bytes = avail_space; |
| return 0; |
| } |
| |
| /* |
| * Calculate numbers for 'df', pessimistic in case of mixed raid profiles. |
| * |
| * If there's a redundant raid level at DATA block groups, use the respective |
| * multiplier to scale the sizes. |
| * |
| * Unused device space usage is based on simulating the chunk allocator |
| * algorithm that respects the device sizes and order of allocations. This is |
| * a close approximation of the actual use but there are other factors that may |
| * change the result (like a new metadata chunk). |
| * |
| * If metadata is exhausted, f_bavail will be 0. |
| */ |
| static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf) |
| { |
| struct btrfs_fs_info *fs_info = btrfs_sb(dentry->d_sb); |
| struct btrfs_super_block *disk_super = fs_info->super_copy; |
| struct list_head *head = &fs_info->space_info; |
| struct btrfs_space_info *found; |
| u64 total_used = 0; |
| u64 total_free_data = 0; |
| u64 total_free_meta = 0; |
| int bits = dentry->d_sb->s_blocksize_bits; |
| __be32 *fsid = (__be32 *)fs_info->fsid; |
| unsigned factor = 1; |
| struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv; |
| int ret; |
| u64 thresh = 0; |
| int mixed = 0; |
| |
| rcu_read_lock(); |
| list_for_each_entry_rcu(found, head, list) { |
| if (found->flags & BTRFS_BLOCK_GROUP_DATA) { |
| int i; |
| |
| total_free_data += found->disk_total - found->disk_used; |
| total_free_data -= |
| btrfs_account_ro_block_groups_free_space(found); |
| |
| for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) { |
| if (!list_empty(&found->block_groups[i])) |
| factor = btrfs_bg_type_to_factor( |
| btrfs_raid_array[i].bg_flag); |
| } |
| } |
| |
| /* |
| * Metadata in mixed block goup profiles are accounted in data |
| */ |
| if (!mixed && found->flags & BTRFS_BLOCK_GROUP_METADATA) { |
| if (found->flags & BTRFS_BLOCK_GROUP_DATA) |
| mixed = 1; |
| else |
| total_free_meta += found->disk_total - |
| found->disk_used; |
| } |
| |
| total_used += found->disk_used; |
| } |
| |
| rcu_read_unlock(); |
| |
| buf->f_blocks = div_u64(btrfs_super_total_bytes(disk_super), factor); |
| buf->f_blocks >>= bits; |
| buf->f_bfree = buf->f_blocks - (div_u64(total_used, factor) >> bits); |
| |
| /* Account global block reserve as used, it's in logical size already */ |
| spin_lock(&block_rsv->lock); |
| /* Mixed block groups accounting is not byte-accurate, avoid overflow */ |
| if (buf->f_bfree >= block_rsv->size >> bits) |
| buf->f_bfree -= block_rsv->size >> bits; |
| else |
| buf->f_bfree = 0; |
| spin_unlock(&block_rsv->lock); |
| |
| buf->f_bavail = div_u64(total_free_data, factor); |
| ret = btrfs_calc_avail_data_space(fs_info, &total_free_data); |
| if (ret) |
| return ret; |
| buf->f_bavail += div_u64(total_free_data, factor); |
| buf->f_bavail = buf->f_bavail >> bits; |
| |
| /* |
| * We calculate the remaining metadata space minus global reserve. If |
| * this is (supposedly) smaller than zero, there's no space. But this |
| * does not hold in practice, the exhausted state happens where's still |
| * some positive delta. So we apply some guesswork and compare the |
| * delta to a 4M threshold. (Practically observed delta was ~2M.) |
| * |
| * We probably cannot calculate the exact threshold value because this |
| * depends on the internal reservations requested by various |
| * operations, so some operations that consume a few metadata will |
| * succeed even if the Avail is zero. But this is better than the other |
| * way around. |
| */ |
| thresh = SZ_4M; |
| |
| if (!mixed && total_free_meta - thresh < block_rsv->size) |
| buf->f_bavail = 0; |
| |
| buf->f_type = BTRFS_SUPER_MAGIC; |
| buf->f_bsize = dentry->d_sb->s_blocksize; |
| buf->f_namelen = BTRFS_NAME_LEN; |
| |
| /* We treat it as constant endianness (it doesn't matter _which_) |
| because we want the fsid to come out the same whether mounted |
| on a big-endian or little-endian host */ |
| buf->f_fsid.val[0] = be32_to_cpu(fsid[0]) ^ be32_to_cpu(fsid[2]); |
| buf->f_fsid.val[1] = be32_to_cpu(fsid[1]) ^ be32_to_cpu(fsid[3]); |
| /* Mask in the root object ID too, to disambiguate subvols */ |
| buf->f_fsid.val[0] ^= |
| BTRFS_I(d_inode(dentry))->root->root_key.objectid >> 32; |
| buf->f_fsid.val[1] ^= |
| BTRFS_I(d_inode(dentry))->root->root_key.objectid; |
| |
| return 0; |
| } |
| |
| static void btrfs_kill_super(struct super_block *sb) |
| { |
| struct btrfs_fs_info *fs_info = btrfs_sb(sb); |
| kill_anon_super(sb); |
| free_fs_info(fs_info); |
| } |
| |
| struct file_system_type btrfs_fs_type = { |
| .owner = THIS_MODULE, |
| .name = "btrfs", |
| .init_fs_context = btrfs_init_fs_context, |
| .parameters = &btrfs_fs_parameters, |
| .kill_sb = btrfs_kill_super, |
| .fs_flags = FS_REQUIRES_DEV, |
| }; |
| |
| MODULE_ALIAS_FS("btrfs"); |
| |
| static int btrfs_control_open(struct inode *inode, struct file *file) |
| { |
| /* |
| * The control file's private_data is used to hold the |
| * transaction when it is started and is used to keep |
| * track of whether a transaction is already in progress. |
| */ |
| file->private_data = NULL; |
| return 0; |
| } |
| |
| /* |
| * used by btrfsctl to scan devices when no FS is mounted |
| */ |
| static long btrfs_control_ioctl(struct file *file, unsigned int cmd, |
| unsigned long arg) |
| { |
| struct btrfs_ioctl_vol_args *vol; |
| struct btrfs_device *device = NULL; |
| int ret = -ENOTTY; |
| |
| if (!capable(CAP_SYS_ADMIN)) |
| return -EPERM; |
| |
| vol = memdup_user((void __user *)arg, sizeof(*vol)); |
| if (IS_ERR(vol)) |
| return PTR_ERR(vol); |
| |
| switch (cmd) { |
| case BTRFS_IOC_SCAN_DEV: |
| mutex_lock(&uuid_mutex); |
| device = btrfs_scan_one_device(vol->name, FMODE_READ); |
| ret = PTR_ERR_OR_ZERO(device); |
| mutex_unlock(&uuid_mutex); |
| break; |
| case BTRFS_IOC_DEVICES_READY: |
| mutex_lock(&uuid_mutex); |
| device = btrfs_scan_one_device(vol->name, FMODE_READ); |
| if (IS_ERR(device)) { |
| mutex_unlock(&uuid_mutex); |
| ret = PTR_ERR(device); |
| break; |
| } |
| ret = !(device->fs_devices->num_devices == |
| device->fs_devices->total_devices); |
| mutex_unlock(&uuid_mutex); |
| break; |
| case BTRFS_IOC_GET_SUPPORTED_FEATURES: |
| ret = btrfs_ioctl_get_supported_features((void __user*)arg); |
| break; |
| } |
| |
| kfree(vol); |
| return ret; |
| } |
| |
| static int btrfs_freeze(struct super_block *sb) |
| { |
| struct btrfs_trans_handle *trans; |
| struct btrfs_fs_info *fs_info = btrfs_sb(sb); |
| struct btrfs_root *root = fs_info->tree_root; |
| |
| set_bit(BTRFS_FS_FROZEN, &fs_info->flags); |
| /* |
| * We don't need a barrier here, we'll wait for any transaction that |
| * could be in progress on other threads (and do delayed iputs that |
| * we want to avoid on a frozen filesystem), or do the commit |
| * ourselves. |
| */ |
| trans = btrfs_attach_transaction_barrier(root); |
| if (IS_ERR(trans)) { |
| /* no transaction, don't bother */ |
| if (PTR_ERR(trans) == -ENOENT) |
| return 0; |
| return PTR_ERR(trans); |
| } |
| return btrfs_commit_transaction(trans); |
| } |
| |
| static int btrfs_unfreeze(struct super_block *sb) |
| { |
| struct btrfs_fs_info *fs_info = btrfs_sb(sb); |
| |
| clear_bit(BTRFS_FS_FROZEN, &fs_info->flags); |
| return 0; |
| } |
| |
| static int btrfs_show_devname(struct seq_file *m, struct dentry *root) |
| { |
| struct btrfs_fs_info *fs_info = btrfs_sb(root->d_sb); |
| struct btrfs_fs_devices *cur_devices; |
| struct btrfs_device *dev, *first_dev = NULL; |
| struct list_head *head; |
| |
| /* |
| * Lightweight locking of the devices. We should not need |
| * device_list_mutex here as we only read the device data and the list |
| * is protected by RCU. Even if a device is deleted during the list |
| * traversals, we'll get valid data, the freeing callback will wait at |
| * least until until the rcu_read_unlock. |
| */ |
| rcu_read_lock(); |
| cur_devices = fs_info->fs_devices; |
| while (cur_devices) { |
| head = &cur_devices->devices; |
| list_for_each_entry_rcu(dev, head, dev_list) { |
| if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) |
| continue; |
| if (!dev->name) |
| continue; |
| if (!first_dev || dev->devid < first_dev->devid) |
| first_dev = dev; |
| } |
| cur_devices = cur_devices->seed; |
| } |
| |
| if (first_dev) |
| seq_escape(m, rcu_str_deref(first_dev->name), " \t\n\\"); |
| else |
| WARN_ON(1); |
| rcu_read_unlock(); |
| return 0; |
| } |
| |
| static const struct super_operations btrfs_super_ops = { |
| .drop_inode = btrfs_drop_inode, |
| .evict_inode = btrfs_evict_inode, |
| .put_super = btrfs_put_super, |
| .sync_fs = btrfs_sync_fs, |
| .show_options = btrfs_show_options, |
| .show_devname = btrfs_show_devname, |
| .alloc_inode = btrfs_alloc_inode, |
| .destroy_inode = btrfs_destroy_inode, |
| .statfs = btrfs_statfs, |
| .freeze_fs = btrfs_freeze, |
| .unfreeze_fs = btrfs_unfreeze, |
| }; |
| |
| static const struct file_operations btrfs_ctl_fops = { |
| .open = btrfs_control_open, |
| .unlocked_ioctl = btrfs_control_ioctl, |
| .compat_ioctl = btrfs_control_ioctl, |
| .owner = THIS_MODULE, |
| .llseek = noop_llseek, |
| }; |
| |
| static struct miscdevice btrfs_misc = { |
| .minor = BTRFS_MINOR, |
| .name = "btrfs-control", |
| .fops = &btrfs_ctl_fops |
| }; |
| |
| MODULE_ALIAS_MISCDEV(BTRFS_MINOR); |
| MODULE_ALIAS("devname:btrfs-control"); |
| |
| static int __init btrfs_interface_init(void) |
| { |
| return misc_register(&btrfs_misc); |
| } |
| |
| static __cold void btrfs_interface_exit(void) |
| { |
| misc_deregister(&btrfs_misc); |
| } |
| |
| static void __init btrfs_print_mod_info(void) |
| { |
| static const char options[] = "" |
| #ifdef CONFIG_BTRFS_DEBUG |
| ", debug=on" |
| #endif |
| #ifdef CONFIG_BTRFS_ASSERT |
| ", assert=on" |
| #endif |
| #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY |
| ", integrity-checker=on" |
| #endif |
| #ifdef CONFIG_BTRFS_FS_REF_VERIFY |
| ", ref-verify=on" |
| #endif |
| ; |
| pr_info("Btrfs loaded, crc32c=%s%s\n", crc32c_impl(), options); |
| } |
| |
| static int __init init_btrfs_fs(void) |
| { |
| int err; |
| |
| btrfs_props_init(); |
| |
| err = btrfs_init_sysfs(); |
| if (err) |
| return err; |
| |
| btrfs_init_compress(); |
| |
| err = btrfs_init_cachep(); |
| if (err) |
| goto free_compress; |
| |
| err = extent_io_init(); |
| if (err) |
| goto free_cachep; |
| |
| err = extent_map_init(); |
| if (err) |
| goto free_extent_io; |
| |
| err = ordered_data_init(); |
| if (err) |
| goto free_extent_map; |
| |
| err = btrfs_delayed_inode_init(); |
| if (err) |
| goto free_ordered_data; |
| |
| err = btrfs_auto_defrag_init(); |
| if (err) |
| goto free_delayed_inode; |
| |
| err = btrfs_delayed_ref_init(); |
| if (err) |
| goto free_auto_defrag; |
| |
| err = btrfs_prelim_ref_init(); |
| if (err) |
| goto free_delayed_ref; |
| |
| err = btrfs_end_io_wq_init(); |
| if (err) |
| goto free_prelim_ref; |
| |
| err = btrfs_interface_init(); |
| if (err) |
| goto free_end_io_wq; |
| |
| btrfs_init_lockdep(); |
| |
| btrfs_print_mod_info(); |
| |
| err = btrfs_run_sanity_tests(); |
| if (err) |
| goto unregister_ioctl; |
| |
| err = register_filesystem(&btrfs_fs_type); |
| if (err) |
| goto unregister_ioctl; |
| |
| return 0; |
| |
| unregister_ioctl: |
| btrfs_interface_exit(); |
| free_end_io_wq: |
| btrfs_end_io_wq_exit(); |
| free_prelim_ref: |
| btrfs_prelim_ref_exit(); |
| free_delayed_ref: |
| btrfs_delayed_ref_exit(); |
| free_auto_defrag: |
| btrfs_auto_defrag_exit(); |
| free_delayed_inode: |
| btrfs_delayed_inode_exit(); |
| free_ordered_data: |
| ordered_data_exit(); |
| free_extent_map: |
| extent_map_exit(); |
| free_extent_io: |
| extent_io_exit(); |
| free_cachep: |
| btrfs_destroy_cachep(); |
| free_compress: |
| btrfs_exit_compress(); |
| btrfs_exit_sysfs(); |
| |
| return err; |
| } |
| |
| static void __exit exit_btrfs_fs(void) |
| { |
| btrfs_destroy_cachep(); |
| btrfs_delayed_ref_exit(); |
| btrfs_auto_defrag_exit(); |
| btrfs_delayed_inode_exit(); |
| btrfs_prelim_ref_exit(); |
| ordered_data_exit(); |
| extent_map_exit(); |
| extent_io_exit(); |
| btrfs_interface_exit(); |
| btrfs_end_io_wq_exit(); |
| unregister_filesystem(&btrfs_fs_type); |
| btrfs_exit_sysfs(); |
| btrfs_cleanup_fs_uuids(); |
| btrfs_exit_compress(); |
| } |
| |
| late_initcall(init_btrfs_fs); |
| module_exit(exit_btrfs_fs) |
| |
| MODULE_LICENSE("GPL"); |