summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/afs/addr_list.c8
-rw-r--r--fs/binfmt_elf.c3
-rw-r--r--fs/btrfs/block-group.c1
-rw-r--r--fs/btrfs/delayed-inode.c2
-rw-r--r--fs/btrfs/disk-io.c43
-rw-r--r--fs/btrfs/extent-tree.c8
-rw-r--r--fs/btrfs/extent_io.c1
-rw-r--r--fs/btrfs/inode.c38
-rw-r--r--fs/btrfs/ioctl.c39
-rw-r--r--fs/btrfs/messages.h3
-rw-r--r--fs/btrfs/print-tree.c10
-rw-r--r--fs/btrfs/qgroup.c2
-rw-r--r--fs/btrfs/relocation.c8
-rw-r--r--fs/btrfs/scrub.c2
-rw-r--r--fs/btrfs/space-info.c5
-rw-r--r--fs/btrfs/transaction.c16
-rw-r--r--fs/btrfs/tree-checker.c6
-rw-r--r--fs/btrfs/tree-log.c6
-rw-r--r--fs/btrfs/uuid-tree.c38
-rw-r--r--fs/btrfs/uuid-tree.h2
-rw-r--r--fs/btrfs/volumes.c10
-rw-r--r--fs/btrfs/zoned.c6
-rw-r--r--fs/ceph/addr.c1
-rw-r--r--fs/ceph/debugfs.c4
-rw-r--r--fs/ceph/dir.c17
-rw-r--r--fs/ceph/file.c4
-rw-r--r--fs/ceph/inode.c2
-rw-r--r--fs/ceph/mds_client.c3
-rw-r--r--fs/erofs/inode.c7
-rw-r--r--fs/erofs/internal.h16
-rw-r--r--fs/erofs/ishare.c14
-rw-r--r--fs/erofs/super.c85
-rw-r--r--fs/erofs/zmap.c9
-rw-r--r--fs/eventpoll.c5
-rw-r--r--fs/file_attr.c2
-rw-r--r--fs/fs-writeback.c9
-rw-r--r--fs/iomap/buffered-io.c16
-rw-r--r--fs/iomap/direct-io.c15
-rw-r--r--fs/iomap/ioend.c59
-rw-r--r--fs/minix/bitmap.c2
-rw-r--r--fs/namespace.c133
-rw-r--r--fs/netfs/direct_write.c228
-rw-r--r--fs/netfs/internal.h4
-rw-r--r--fs/netfs/write_collect.c21
-rw-r--r--fs/netfs/write_issue.c41
-rw-r--r--fs/nfs/Kconfig3
-rw-r--r--fs/nfs/nfs3proc.c7
-rw-r--r--fs/nfsd/export.c63
-rw-r--r--fs/nfsd/export.h7
-rw-r--r--fs/nfsd/nfs4xdr.c9
-rw-r--r--fs/nfsd/nfsctl.c44
-rw-r--r--fs/nfsd/nfssvc.c7
-rw-r--r--fs/nfsd/state.h17
-rw-r--r--fs/nsfs.c15
-rw-r--r--fs/pidfs.c10
-rw-r--r--fs/proc/base.c3
-rw-r--r--fs/smb/client/Makefile2
-rw-r--r--fs/smb/client/cached_dir.c2
-rw-r--r--fs/smb/client/cifs_fs_sb.h2
-rw-r--r--fs/smb/client/cifs_ioctl.h8
-rw-r--r--fs/smb/client/cifs_unicode.c14
-rw-r--r--fs/smb/client/cifs_unicode.h14
-rw-r--r--fs/smb/client/cifsacl.c19
-rw-r--r--fs/smb/client/cifsfs.c93
-rw-r--r--fs/smb/client/cifsglob.h90
-rw-r--r--fs/smb/client/cifsproto.h27
-rw-r--r--fs/smb/client/connect.c84
-rw-r--r--fs/smb/client/dfs_cache.c2
-rw-r--r--fs/smb/client/dir.c53
-rw-r--r--fs/smb/client/file.c233
-rw-r--r--fs/smb/client/fs_context.c151
-rw-r--r--fs/smb/client/fs_context.h2
-rw-r--r--fs/smb/client/inode.c169
-rw-r--r--fs/smb/client/ioctl.c2
-rw-r--r--fs/smb/client/link.c14
-rw-r--r--fs/smb/client/misc.c58
-rw-r--r--fs/smb/client/readdir.c39
-rw-r--r--fs/smb/client/reparse.c27
-rw-r--r--fs/smb/client/reparse.h4
-rw-r--r--fs/smb/client/smb1encrypt.c3
-rw-r--r--fs/smb/client/smb1ops.c24
-rw-r--r--fs/smb/client/smb1transport.c2
-rw-r--r--fs/smb/client/smb2file.c2
-rw-r--r--fs/smb/client/smb2glob.h12
-rw-r--r--fs/smb/client/smb2inode.c30
-rw-r--r--fs/smb/client/smb2maperror.c31
-rw-r--r--fs/smb/client/smb2maperror_test.c12
-rw-r--r--fs/smb/client/smb2misc.c18
-rw-r--r--fs/smb/client/smb2ops.c46
-rw-r--r--fs/smb/client/smb2pdu.c58
-rw-r--r--fs/smb/client/smb2pdu.h7
-rw-r--r--fs/smb/client/smb2proto.h3
-rw-r--r--fs/smb/client/smb2transport.c4
-rw-r--r--fs/smb/client/trace.h2
-rw-r--r--fs/smb/client/transport.c21
-rw-r--r--fs/smb/client/xattr.c6
-rw-r--r--fs/smb/server/Kconfig1
-rw-r--r--fs/smb/server/auth.c26
-rw-r--r--fs/smb/server/mgmt/tree_connect.c9
-rw-r--r--fs/smb/server/oplock.c35
-rw-r--r--fs/smb/server/oplock.h5
-rw-r--r--fs/smb/server/smb2pdu.c30
-rw-r--r--fs/smb/server/smb2pdu.h5
-rw-r--r--fs/smb/server/transport_rdma.c4
-rw-r--r--fs/smb/server/vfs_cache.c10
-rw-r--r--fs/squashfs/cache.c3
-rw-r--r--fs/verity/Kconfig3
-rw-r--r--fs/xfs/libxfs/xfs_ag.c28
-rw-r--r--fs/xfs/libxfs/xfs_ag.h3
-rw-r--r--fs/xfs/libxfs/xfs_da_btree.c8
-rw-r--r--fs/xfs/libxfs/xfs_defer.c2
-rw-r--r--fs/xfs/libxfs/xfs_inode_buf.c4
-rw-r--r--fs/xfs/libxfs/xfs_metafile.c5
-rw-r--r--fs/xfs/libxfs/xfs_ondisk.h52
-rw-r--r--fs/xfs/libxfs/xfs_sb.c3
-rw-r--r--fs/xfs/scrub/dir_repair.c2
-rw-r--r--fs/xfs/scrub/orphanage.c7
-rw-r--r--fs/xfs/xfs_bmap_item.c2
-rw-r--r--fs/xfs/xfs_dquot.c8
-rw-r--r--fs/xfs/xfs_fsops.c17
-rw-r--r--fs/xfs/xfs_health.c20
-rw-r--r--fs/xfs/xfs_healthmon.c28
-rw-r--r--fs/xfs/xfs_icache.c19
-rw-r--r--fs/xfs/xfs_log.c2
-rw-r--r--fs/xfs/xfs_mount.h2
-rw-r--r--fs/xfs/xfs_notify_failure.c4
-rw-r--r--fs/xfs/xfs_platform.h9
-rw-r--r--fs/xfs/xfs_rtalloc.c44
-rw-r--r--fs/xfs/xfs_stats.c17
-rw-r--r--fs/xfs/xfs_stats.h19
-rw-r--r--fs/xfs/xfs_super.c4
-rw-r--r--fs/xfs/xfs_verify_media.c4
-rw-r--r--fs/xfs/xfs_zone_alloc.c6
-rw-r--r--fs/xfs/xfs_zone_gc.c12
134 files changed, 1888 insertions, 1103 deletions
diff --git a/fs/afs/addr_list.c b/fs/afs/addr_list.c
index a936f9ea5610..63bf096b721a 100644
--- a/fs/afs/addr_list.c
+++ b/fs/afs/addr_list.c
@@ -298,8 +298,8 @@ int afs_merge_fs_addr4(struct afs_net *net, struct afs_addr_list *alist,
srx.transport.sin.sin_addr.s_addr = xdr;
peer = rxrpc_kernel_lookup_peer(net->socket, &srx, GFP_KERNEL);
- if (!peer)
- return -ENOMEM;
+ if (IS_ERR(peer))
+ return PTR_ERR(peer);
for (i = 0; i < alist->nr_ipv4; i++) {
if (peer == alist->addrs[i].peer) {
@@ -342,8 +342,8 @@ int afs_merge_fs_addr6(struct afs_net *net, struct afs_addr_list *alist,
memcpy(&srx.transport.sin6.sin6_addr, xdr, 16);
peer = rxrpc_kernel_lookup_peer(net->socket, &srx, GFP_KERNEL);
- if (!peer)
- return -ENOMEM;
+ if (IS_ERR(peer))
+ return PTR_ERR(peer);
for (i = alist->nr_ipv4; i < alist->nr_addrs; i++) {
if (peer == alist->addrs[i].peer) {
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 8e89cc5b2820..fb857faaf0d6 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -47,6 +47,7 @@
#include <linux/dax.h>
#include <linux/uaccess.h>
#include <uapi/linux/rseq.h>
+#include <linux/rseq.h>
#include <asm/param.h>
#include <asm/page.h>
@@ -286,7 +287,7 @@ create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec,
}
#ifdef CONFIG_RSEQ
NEW_AUX_ENT(AT_RSEQ_FEATURE_SIZE, offsetof(struct rseq, end));
- NEW_AUX_ENT(AT_RSEQ_ALIGN, __alignof__(struct rseq));
+ NEW_AUX_ENT(AT_RSEQ_ALIGN, rseq_alloc_align());
#endif
#undef NEW_AUX_ENT
/* AT_NULL is zero; clear the rest too */
diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
index c284f48cfae4..2a886bece810 100644
--- a/fs/btrfs/block-group.c
+++ b/fs/btrfs/block-group.c
@@ -3340,7 +3340,6 @@ again:
btrfs_abort_transaction(trans, ret);
goto out_put;
}
- WARN_ON(ret);
/* We've already setup this transaction, go ahead and exit */
if (block_group->cache_generation == trans->transid &&
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index d97bbbd045e0..56ff8afe9a22 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -1657,7 +1657,7 @@ int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
if (unlikely(ret)) {
btrfs_err(trans->fs_info,
"failed to add delayed dir index item, root: %llu, inode: %llu, index: %llu, error: %d",
- index, btrfs_root_id(node->root), node->inode_id, ret);
+ btrfs_root_id(node->root), node->inode_id, index, ret);
btrfs_delayed_item_release_metadata(dir->root, item);
btrfs_release_delayed_item(item);
}
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index f6fa15a1193f..407830d86d0d 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1994,7 +1994,7 @@ static int btrfs_replay_log(struct btrfs_fs_info *fs_info,
int level = btrfs_super_log_root_level(disk_super);
if (unlikely(fs_devices->rw_devices == 0)) {
- btrfs_warn(fs_info, "log replay required on RO media");
+ btrfs_err(fs_info, "log replay required on RO media");
return -EIO;
}
@@ -2008,9 +2008,9 @@ static int btrfs_replay_log(struct btrfs_fs_info *fs_info,
check.owner_root = BTRFS_TREE_LOG_OBJECTID;
log_tree_root->node = read_tree_block(fs_info, bytenr, &check);
if (IS_ERR(log_tree_root->node)) {
- btrfs_warn(fs_info, "failed to read log tree");
ret = PTR_ERR(log_tree_root->node);
log_tree_root->node = NULL;
+ btrfs_err(fs_info, "failed to read log tree with error: %d", ret);
btrfs_put_root(log_tree_root);
return ret;
}
@@ -2023,9 +2023,9 @@ static int btrfs_replay_log(struct btrfs_fs_info *fs_info,
/* returns with log_tree_root freed on success */
ret = btrfs_recover_log_trees(log_tree_root);
btrfs_put_root(log_tree_root);
- if (ret) {
- btrfs_handle_fs_error(fs_info, ret,
- "Failed to recover log tree");
+ if (unlikely(ret)) {
+ ASSERT(BTRFS_FS_ERROR(fs_info) != 0);
+ btrfs_err(fs_info, "failed to recover log trees with error: %d", ret);
return ret;
}
@@ -2972,7 +2972,6 @@ static int btrfs_check_uuid_tree(struct btrfs_fs_info *fs_info)
task = kthread_run(btrfs_uuid_rescan_kthread, fs_info, "btrfs-uuid");
if (IS_ERR(task)) {
/* fs_info->update_uuid_tree_gen remains 0 in all error case */
- btrfs_warn(fs_info, "failed to start uuid_rescan task");
up(&fs_info->uuid_tree_rescan_sem);
return PTR_ERR(task);
}
@@ -3188,7 +3187,7 @@ int btrfs_check_features(struct btrfs_fs_info *fs_info, bool is_rw_mount)
if (incompat & ~BTRFS_FEATURE_INCOMPAT_SUPP) {
btrfs_err(fs_info,
"cannot mount because of unknown incompat features (0x%llx)",
- incompat);
+ incompat & ~BTRFS_FEATURE_INCOMPAT_SUPP);
return -EINVAL;
}
@@ -3220,7 +3219,7 @@ int btrfs_check_features(struct btrfs_fs_info *fs_info, bool is_rw_mount)
if (compat_ro_unsupp && is_rw_mount) {
btrfs_err(fs_info,
"cannot mount read-write because of unknown compat_ro features (0x%llx)",
- compat_ro);
+ compat_ro_unsupp);
return -EINVAL;
}
@@ -3233,7 +3232,7 @@ int btrfs_check_features(struct btrfs_fs_info *fs_info, bool is_rw_mount)
!btrfs_test_opt(fs_info, NOLOGREPLAY)) {
btrfs_err(fs_info,
"cannot replay dirty log with unsupported compat_ro features (0x%llx), try rescue=nologreplay",
- compat_ro);
+ compat_ro_unsupp);
return -EINVAL;
}
@@ -3595,7 +3594,6 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
}
}
- btrfs_zoned_reserve_data_reloc_bg(fs_info);
btrfs_free_zone_cache(fs_info);
btrfs_check_active_zone_reservation(fs_info);
@@ -3623,6 +3621,12 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
goto fail_cleaner;
}
+ /*
+ * Starts a transaction, must be called after the transaction kthread
+ * is initialized.
+ */
+ btrfs_zoned_reserve_data_reloc_bg(fs_info);
+
ret = btrfs_read_qgroup_config(fs_info);
if (ret)
goto fail_trans_kthread;
@@ -3642,7 +3646,7 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
fs_info->fs_root = btrfs_get_fs_root(fs_info, BTRFS_FS_TREE_OBJECTID, true);
if (IS_ERR(fs_info->fs_root)) {
ret = PTR_ERR(fs_info->fs_root);
- btrfs_warn(fs_info, "failed to read fs tree: %d", ret);
+ btrfs_err(fs_info, "failed to read fs tree: %d", ret);
fs_info->fs_root = NULL;
goto fail_qgroup;
}
@@ -3663,8 +3667,7 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
btrfs_info(fs_info, "checking UUID tree");
ret = btrfs_check_uuid_tree(fs_info);
if (ret) {
- btrfs_warn(fs_info,
- "failed to check the UUID tree: %d", ret);
+ btrfs_err(fs_info, "failed to check the UUID tree: %d", ret);
close_ctree(fs_info);
return ret;
}
@@ -4399,9 +4402,17 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
*/
btrfs_flush_workqueue(fs_info->delayed_workers);
- ret = btrfs_commit_super(fs_info);
- if (ret)
- btrfs_err(fs_info, "commit super ret %d", ret);
+ /*
+ * If the filesystem is shutdown, then an attempt to commit the
+ * super block (or any write) will just fail. Since we freeze
+ * the filesystem before shutting it down, the filesystem is in
+ * a consistent state and we don't need to commit super blocks.
+ */
+ if (!btrfs_is_shutdown(fs_info)) {
+ ret = btrfs_commit_super(fs_info);
+ if (ret)
+ btrfs_err(fs_info, "commit super block returned %d", ret);
+ }
}
kthread_stop(fs_info->transaction_kthread);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 03cf9f242c70..b0d9baf5b412 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2933,9 +2933,15 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans)
while (!TRANS_ABORTED(trans) && cached_state) {
struct extent_state *next_state;
- if (btrfs_test_opt(fs_info, DISCARD_SYNC))
+ if (btrfs_test_opt(fs_info, DISCARD_SYNC)) {
ret = btrfs_discard_extent(fs_info, start,
end + 1 - start, NULL, true);
+ if (ret) {
+ btrfs_warn(fs_info,
+ "discard failed for extent [%llu, %llu]: errno=%d %s",
+ start, end, ret, btrfs_decode_error(ret));
+ }
+ }
next_state = btrfs_next_extent_state(unpin, cached_state);
btrfs_clear_extent_dirty(unpin, start, end, &cached_state);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 744a1fff6eef..5f97a3d2a8d7 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -4507,6 +4507,7 @@ static int try_release_subpage_extent_buffer(struct folio *folio)
*/
if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
spin_unlock(&eb->refs_lock);
+ rcu_read_lock();
break;
}
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 6efb543f1c24..a6da98435ef7 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -1392,10 +1392,25 @@ static int cow_one_range(struct btrfs_inode *inode, struct folio *locked_folio,
return ret;
free_reserved:
+ /*
+ * If we have reserved an extent for the current range and failed to
+ * create the respective extent map or ordered extent, it means that
+ * when we reserved the extent we decremented the extent's size from
+ * the data space_info's bytes_may_use counter and
+ * incremented the space_info's bytes_reserved counter by the same
+ * amount.
+ *
+ * We must make sure extent_clear_unlock_delalloc() does not try
+ * to decrement again the data space_info's bytes_may_use counter, which
+ * will be handled by btrfs_free_reserved_extent().
+ *
+ * Therefore we do not pass it the flag EXTENT_CLEAR_DATA_RESV, but only
+ * EXTENT_CLEAR_META_RESV.
+ */
extent_clear_unlock_delalloc(inode, file_offset, cur_end, locked_folio, cached,
EXTENT_LOCKED | EXTENT_DELALLOC |
EXTENT_DELALLOC_NEW |
- EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING,
+ EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV,
PAGE_UNLOCK | PAGE_START_WRITEBACK |
PAGE_END_WRITEBACK);
btrfs_qgroup_free_data(inode, NULL, file_offset, cur_len, NULL);
@@ -4764,7 +4779,7 @@ int btrfs_delete_subvolume(struct btrfs_inode *dir, struct dentry *dentry)
spin_unlock(&dest->root_item_lock);
btrfs_warn(fs_info,
"attempt to delete subvolume %llu with active swapfile",
- btrfs_root_id(root));
+ btrfs_root_id(dest));
ret = -EPERM;
goto out_up_write;
}
@@ -6597,6 +6612,25 @@ int btrfs_create_new_inode(struct btrfs_trans_handle *trans,
int ret;
bool xa_reserved = false;
+ if (!args->orphan && !args->subvol) {
+ /*
+ * Before anything else, check if we can add the name to the
+ * parent directory. We want to avoid a dir item overflow in
+ * case we have an existing dir item due to existing name
+ * hash collisions. We do this check here before we call
+ * btrfs_add_link() down below so that we can avoid a
+ * transaction abort (which could be exploited by malicious
+ * users).
+ *
+ * For subvolumes we already do this in btrfs_mksubvol().
+ */
+ ret = btrfs_check_dir_item_collision(BTRFS_I(dir)->root,
+ btrfs_ino(BTRFS_I(dir)),
+ name);
+ if (ret < 0)
+ return ret;
+ }
+
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index ae2173235c4d..b805dd9227ef 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -672,6 +672,13 @@ static noinline int create_subvol(struct mnt_idmap *idmap,
goto out;
}
+ /*
+ * Subvolumes have orphans cleaned on first dentry lookup. A new
+ * subvolume cannot have any orphans, so we should set the bit before we
+ * add the subvolume dentry to the dentry cache, so that it is in the
+ * same state as a subvolume after first lookup.
+ */
+ set_bit(BTRFS_ROOT_ORPHAN_CLEANUP, &new_root->state);
d_instantiate_new(dentry, new_inode_args.inode);
new_inode_args.inode = NULL;
@@ -3852,6 +3859,25 @@ static long _btrfs_ioctl_set_received_subvol(struct file *file,
goto out;
}
+ received_uuid_changed = memcmp(root_item->received_uuid, sa->uuid,
+ BTRFS_UUID_SIZE);
+
+ /*
+ * Before we attempt to add the new received uuid, check if we have room
+ * for it in case there's already an item. If the size of the existing
+ * item plus this root's ID (u64) exceeds the maximum item size, we can
+ * return here without the need to abort a transaction. If we don't do
+ * this check, the btrfs_uuid_tree_add() call below would fail with
+ * -EOVERFLOW and result in a transaction abort. Malicious users could
+ * exploit this to turn the fs into RO mode.
+ */
+ if (received_uuid_changed && !btrfs_is_empty_uuid(sa->uuid)) {
+ ret = btrfs_uuid_tree_check_overflow(fs_info, sa->uuid,
+ BTRFS_UUID_KEY_RECEIVED_SUBVOL);
+ if (ret < 0)
+ goto out;
+ }
+
/*
* 1 - root item
* 2 - uuid items (received uuid + subvol uuid)
@@ -3867,15 +3893,12 @@ static long _btrfs_ioctl_set_received_subvol(struct file *file,
sa->rtime.sec = ct.tv_sec;
sa->rtime.nsec = ct.tv_nsec;
- received_uuid_changed = memcmp(root_item->received_uuid, sa->uuid,
- BTRFS_UUID_SIZE);
if (received_uuid_changed &&
!btrfs_is_empty_uuid(root_item->received_uuid)) {
ret = btrfs_uuid_tree_remove(trans, root_item->received_uuid,
BTRFS_UUID_KEY_RECEIVED_SUBVOL,
btrfs_root_id(root));
if (unlikely(ret && ret != -ENOENT)) {
- btrfs_abort_transaction(trans, ret);
btrfs_end_transaction(trans);
goto out;
}
@@ -3890,7 +3913,8 @@ static long _btrfs_ioctl_set_received_subvol(struct file *file,
ret = btrfs_update_root(trans, fs_info->tree_root,
&root->root_key, &root->root_item);
- if (ret < 0) {
+ if (unlikely(ret < 0)) {
+ btrfs_abort_transaction(trans, ret);
btrfs_end_transaction(trans);
goto out;
}
@@ -4581,7 +4605,7 @@ static int btrfs_uring_read_extent(struct kiocb *iocb, struct iov_iter *iter,
{
struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp));
struct extent_io_tree *io_tree = &inode->io_tree;
- struct page **pages;
+ struct page **pages = NULL;
struct btrfs_uring_priv *priv = NULL;
unsigned long nr_pages;
int ret;
@@ -4639,6 +4663,11 @@ out_fail:
btrfs_unlock_extent(io_tree, start, lockend, &cached_state);
btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
kfree(priv);
+ for (int i = 0; i < nr_pages; i++) {
+ if (pages[i])
+ __free_page(pages[i]);
+ }
+ kfree(pages);
return ret;
}
diff --git a/fs/btrfs/messages.h b/fs/btrfs/messages.h
index 943e53980945..c8e92efce405 100644
--- a/fs/btrfs/messages.h
+++ b/fs/btrfs/messages.h
@@ -31,9 +31,6 @@ void _btrfs_printk(const struct btrfs_fs_info *fs_info, unsigned int level, cons
#define btrfs_printk_in_rcu(fs_info, level, fmt, args...) \
btrfs_no_printk(fs_info, fmt, ##args)
-#define btrfs_printk_in_rcu(fs_info, level, fmt, args...) \
- btrfs_no_printk(fs_info, fmt, ##args)
-
#define btrfs_printk_rl_in_rcu(fs_info, level, fmt, args...) \
btrfs_no_printk(fs_info, fmt, ##args)
diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c
index f189bf09ce6a..b7dfe877cf8d 100644
--- a/fs/btrfs/print-tree.c
+++ b/fs/btrfs/print-tree.c
@@ -38,6 +38,7 @@ static const struct root_name_map root_map[] = {
{ BTRFS_BLOCK_GROUP_TREE_OBJECTID, "BLOCK_GROUP_TREE" },
{ BTRFS_DATA_RELOC_TREE_OBJECTID, "DATA_RELOC_TREE" },
{ BTRFS_RAID_STRIPE_TREE_OBJECTID, "RAID_STRIPE_TREE" },
+ { BTRFS_REMAP_TREE_OBJECTID, "REMAP_TREE" },
};
const char *btrfs_root_name(const struct btrfs_key *key, char *buf)
@@ -415,6 +416,9 @@ static void key_type_string(const struct btrfs_key *key, char *buf, int buf_size
[BTRFS_UUID_KEY_SUBVOL] = "UUID_KEY_SUBVOL",
[BTRFS_UUID_KEY_RECEIVED_SUBVOL] = "UUID_KEY_RECEIVED_SUBVOL",
[BTRFS_RAID_STRIPE_KEY] = "RAID_STRIPE",
+ [BTRFS_IDENTITY_REMAP_KEY] = "IDENTITY_REMAP",
+ [BTRFS_REMAP_KEY] = "REMAP",
+ [BTRFS_REMAP_BACKREF_KEY] = "REMAP_BACKREF",
};
if (key->type == 0 && key->objectid == BTRFS_FREE_SPACE_OBJECTID)
@@ -435,6 +439,7 @@ void btrfs_print_leaf(const struct extent_buffer *l)
struct btrfs_extent_data_ref *dref;
struct btrfs_shared_data_ref *sref;
struct btrfs_dev_extent *dev_extent;
+ struct btrfs_remap_item *remap;
struct btrfs_key key;
if (!l)
@@ -569,6 +574,11 @@ void btrfs_print_leaf(const struct extent_buffer *l)
print_raid_stripe_key(l, btrfs_item_size(l, i),
btrfs_item_ptr(l, i, struct btrfs_stripe_extent));
break;
+ case BTRFS_REMAP_KEY:
+ case BTRFS_REMAP_BACKREF_KEY:
+ remap = btrfs_item_ptr(l, i, struct btrfs_remap_item);
+ pr_info("\t\taddress %llu\n", btrfs_remap_address(l, remap));
+ break;
}
}
}
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 3cdd9755dc52..3b2a6517d0b5 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -370,7 +370,7 @@ static bool squota_check_parent_usage(struct btrfs_fs_info *fs_info, struct btrf
nr_members++;
}
mismatch = (parent->excl != excl_sum || parent->rfer != rfer_sum ||
- parent->excl_cmpr != excl_cmpr_sum || parent->rfer_cmpr != excl_cmpr_sum);
+ parent->excl_cmpr != excl_cmpr_sum || parent->rfer_cmpr != rfer_cmpr_sum);
WARN(mismatch,
"parent squota qgroup %hu/%llu has mismatched usage from its %d members. "
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 95db7c48fbad..b2343aed7a5d 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -4399,6 +4399,8 @@ static int move_existing_remaps(struct btrfs_fs_info *fs_info,
leaf = path->nodes[0];
}
+
+ btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
}
remap = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_remap_item);
@@ -4723,6 +4725,7 @@ int btrfs_last_identity_remap_gone(struct btrfs_chunk_map *chunk_map,
ret = btrfs_remove_dev_extents(trans, chunk_map);
if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
+ btrfs_end_transaction(trans);
return ret;
}
@@ -4732,6 +4735,7 @@ int btrfs_last_identity_remap_gone(struct btrfs_chunk_map *chunk_map,
if (unlikely(ret)) {
mutex_unlock(&trans->fs_info->chunk_mutex);
btrfs_abort_transaction(trans, ret);
+ btrfs_end_transaction(trans);
return ret;
}
}
@@ -4750,6 +4754,7 @@ int btrfs_last_identity_remap_gone(struct btrfs_chunk_map *chunk_map,
ret = remove_chunk_stripes(trans, chunk_map, path);
if (unlikely(ret)) {
btrfs_abort_transaction(trans, ret);
+ btrfs_end_transaction(trans);
return ret;
}
@@ -5982,6 +5987,9 @@ static int remove_range_from_remap_tree(struct btrfs_trans_handle *trans,
struct btrfs_block_group *dest_bg;
dest_bg = btrfs_lookup_block_group(fs_info, new_addr);
+ if (unlikely(!dest_bg))
+ return -EUCLEAN;
+
adjust_block_group_remap_bytes(trans, dest_bg, -overlap_length);
btrfs_put_block_group(dest_bg);
ret = btrfs_add_to_free_space_tree(trans,
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 81022d912abb..bc94bbc00772 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -743,7 +743,7 @@ static void scrub_verify_one_metadata(struct scrub_stripe *stripe, int sector_nr
btrfs_warn_rl(fs_info,
"scrub: tree block %llu mirror %u has bad fsid, has %pU want %pU",
logical, stripe->mirror_num,
- header->fsid, fs_info->fs_devices->fsid);
+ header->fsid, fs_info->fs_devices->metadata_uuid);
return;
}
if (memcmp(header->chunk_tree_uuid, fs_info->chunk_tree_uuid,
diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c
index 52a267a5dd80..87cbc051cb12 100644
--- a/fs/btrfs/space-info.c
+++ b/fs/btrfs/space-info.c
@@ -2194,8 +2194,11 @@ void btrfs_reclaim_sweep(const struct btrfs_fs_info *fs_info)
if (!btrfs_should_periodic_reclaim(space_info))
continue;
for (raid = 0; raid < BTRFS_NR_RAID_TYPES; raid++) {
- if (do_reclaim_sweep(space_info, raid))
+ if (do_reclaim_sweep(space_info, raid)) {
+ spin_lock(&space_info->lock);
btrfs_set_periodic_reclaim_ready(space_info, false);
+ spin_unlock(&space_info->lock);
+ }
}
}
}
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 7ef8c9b7dfc1..8dd77c431974 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -1905,6 +1905,22 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
ret = btrfs_uuid_tree_add(trans, new_root_item->received_uuid,
BTRFS_UUID_KEY_RECEIVED_SUBVOL,
objectid);
+ /*
+ * We are creating of lot of snapshots of the same root that was
+ * received (has a received UUID) and reached a leaf's limit for
+ * an item. We can safely ignore this and avoid a transaction
+ * abort. A deletion of this snapshot will still work since we
+ * ignore if an item with a BTRFS_UUID_KEY_RECEIVED_SUBVOL key
+ * is missing (see btrfs_delete_subvolume()). Send/receive will
+ * work too since it peeks the first root id from the existing
+ * item (it could peek any), and in case it's missing it
+ * falls back to search by BTRFS_UUID_KEY_SUBVOL keys.
+ * Creation of a snapshot does not require CAP_SYS_ADMIN, so
+ * we don't want users triggering transaction aborts, either
+ * intentionally or not.
+ */
+ if (ret == -EOVERFLOW)
+ ret = 0;
if (unlikely(ret && ret != -EEXIST)) {
btrfs_abort_transaction(trans, ret);
goto fail;
diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
index 452394b34d01..516ef62c8f43 100644
--- a/fs/btrfs/tree-checker.c
+++ b/fs/btrfs/tree-checker.c
@@ -1284,7 +1284,7 @@ static int check_root_item(struct extent_buffer *leaf, struct btrfs_key *key,
}
if (unlikely(btrfs_root_drop_level(&ri) >= BTRFS_MAX_LEVEL)) {
generic_err(leaf, slot,
- "invalid root level, have %u expect [0, %u]",
+ "invalid root drop_level, have %u expect [0, %u]",
btrfs_root_drop_level(&ri), BTRFS_MAX_LEVEL - 1);
return -EUCLEAN;
}
@@ -1740,7 +1740,7 @@ static int check_extent_data_ref(struct extent_buffer *leaf,
objectid > BTRFS_LAST_FREE_OBJECTID)) {
extent_err(leaf, slot,
"invalid extent data backref objectid value %llu",
- root);
+ objectid);
return -EUCLEAN;
}
if (unlikely(!IS_ALIGNED(offset, leaf->fs_info->sectorsize))) {
@@ -1921,7 +1921,7 @@ static int check_dev_extent_item(const struct extent_buffer *leaf,
if (unlikely(prev_key->offset + prev_len > key->offset)) {
generic_err(leaf, slot,
"dev extent overlap, prev offset %llu len %llu current offset %llu",
- prev_key->objectid, prev_len, key->offset);
+ prev_key->offset, prev_len, key->offset);
return -EUCLEAN;
}
}
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 780a06d59240..552fef3c385a 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -6195,6 +6195,7 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_log_ctx *ctx)
{
+ const bool orig_log_new_dentries = ctx->log_new_dentries;
int ret = 0;
/*
@@ -6256,7 +6257,11 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
* dir index key range logged for the directory. So we
* must make sure the deletion is recorded.
*/
+ ctx->log_new_dentries = false;
ret = btrfs_log_inode(trans, inode, LOG_INODE_ALL, ctx);
+ if (!ret && ctx->log_new_dentries)
+ ret = log_new_dir_dentries(trans, inode, ctx);
+
btrfs_add_delayed_iput(inode);
if (ret)
break;
@@ -6291,6 +6296,7 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
break;
}
+ ctx->log_new_dentries = orig_log_new_dentries;
ctx->logging_conflict_inodes = false;
if (ret)
free_conflicting_inodes(ctx);
diff --git a/fs/btrfs/uuid-tree.c b/fs/btrfs/uuid-tree.c
index f24c14b9bb2f..43c17a1d3451 100644
--- a/fs/btrfs/uuid-tree.c
+++ b/fs/btrfs/uuid-tree.c
@@ -199,6 +199,44 @@ int btrfs_uuid_tree_remove(struct btrfs_trans_handle *trans, const u8 *uuid, u8
return 0;
}
+/*
+ * Check if we can add one root ID to a UUID key.
+ * If the key does not yet exists, we can, otherwise only if extended item does
+ * not exceeds the maximum item size permitted by the leaf size.
+ *
+ * Returns 0 on success, negative value on error.
+ */
+int btrfs_uuid_tree_check_overflow(struct btrfs_fs_info *fs_info,
+ const u8 *uuid, u8 type)
+{
+ BTRFS_PATH_AUTO_FREE(path);
+ int ret;
+ u32 item_size;
+ struct btrfs_key key;
+
+ if (WARN_ON_ONCE(!fs_info->uuid_root))
+ return -EINVAL;
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+
+ btrfs_uuid_to_key(uuid, type, &key);
+ ret = btrfs_search_slot(NULL, fs_info->uuid_root, &key, path, 0, 0);
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ return 0;
+
+ item_size = btrfs_item_size(path->nodes[0], path->slots[0]);
+
+ if (sizeof(struct btrfs_item) + item_size + sizeof(u64) >
+ BTRFS_LEAF_DATA_SIZE(fs_info))
+ return -EOVERFLOW;
+
+ return 0;
+}
+
static int btrfs_uuid_iter_rem(struct btrfs_root *uuid_root, u8 *uuid, u8 type,
u64 subid)
{
diff --git a/fs/btrfs/uuid-tree.h b/fs/btrfs/uuid-tree.h
index c60ad20325cc..02b235a3653f 100644
--- a/fs/btrfs/uuid-tree.h
+++ b/fs/btrfs/uuid-tree.h
@@ -12,6 +12,8 @@ int btrfs_uuid_tree_add(struct btrfs_trans_handle *trans, const u8 *uuid, u8 typ
u64 subid);
int btrfs_uuid_tree_remove(struct btrfs_trans_handle *trans, const u8 *uuid, u8 type,
u64 subid);
+int btrfs_uuid_tree_check_overflow(struct btrfs_fs_info *fs_info,
+ const u8 *uuid, u8 type);
int btrfs_uuid_tree_iterate(struct btrfs_fs_info *fs_info);
int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info);
int btrfs_uuid_scan_kthread(void *data);
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 6fb0c4cd50ff..be8975ef8b24 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -3587,7 +3587,7 @@ int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset, bool v
/* step one, relocate all the extents inside this chunk */
btrfs_scrub_pause(fs_info);
- ret = btrfs_relocate_block_group(fs_info, chunk_offset, true);
+ ret = btrfs_relocate_block_group(fs_info, chunk_offset, verbose);
btrfs_scrub_continue(fs_info);
if (ret) {
/*
@@ -6907,7 +6907,7 @@ int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
ret = btrfs_translate_remap(fs_info, &new_logical, length);
if (ret)
- return ret;
+ goto out;
if (new_logical != logical) {
btrfs_free_chunk_map(map);
@@ -6921,8 +6921,10 @@ int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
}
num_copies = btrfs_chunk_map_num_copies(map);
- if (io_geom.mirror_num > num_copies)
- return -EINVAL;
+ if (io_geom.mirror_num > num_copies) {
+ ret = -EINVAL;
+ goto out;
+ }
map_offset = logical - map->start;
io_geom.raid56_full_stripe_start = (u64)-1;
diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
index 39930d99943c..817ca4fb9efa 100644
--- a/fs/btrfs/zoned.c
+++ b/fs/btrfs/zoned.c
@@ -337,7 +337,10 @@ int btrfs_get_dev_zone_info_all_devices(struct btrfs_fs_info *fs_info)
if (!btrfs_fs_incompat(fs_info, ZONED))
return 0;
- mutex_lock(&fs_devices->device_list_mutex);
+ /*
+ * No need to take the device_list mutex here, we're still in the mount
+ * path and devices cannot be added to or removed from the list yet.
+ */
list_for_each_entry(device, &fs_devices->devices, dev_list) {
/* We can skip reading of zone info for missing devices */
if (!device->bdev)
@@ -347,7 +350,6 @@ int btrfs_get_dev_zone_info_all_devices(struct btrfs_fs_info *fs_info)
if (ret)
break;
}
- mutex_unlock(&fs_devices->device_list_mutex);
return ret;
}
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index e87b3bb94ee8..2090fc78529c 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -1326,7 +1326,6 @@ void ceph_process_folio_batch(struct address_space *mapping,
continue;
} else if (rc == -E2BIG) {
folio_unlock(folio);
- ceph_wbc->fbatch.folios[i] = NULL;
break;
}
diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c
index f3fe786b4143..7dc307790240 100644
--- a/fs/ceph/debugfs.c
+++ b/fs/ceph/debugfs.c
@@ -79,7 +79,7 @@ static int mdsc_show(struct seq_file *s, void *p)
if (req->r_inode) {
seq_printf(s, " #%llx", ceph_ino(req->r_inode));
} else if (req->r_dentry) {
- struct ceph_path_info path_info;
+ struct ceph_path_info path_info = {0};
path = ceph_mdsc_build_path(mdsc, req->r_dentry, &path_info, 0);
if (IS_ERR(path))
path = NULL;
@@ -98,7 +98,7 @@ static int mdsc_show(struct seq_file *s, void *p)
}
if (req->r_old_dentry) {
- struct ceph_path_info path_info;
+ struct ceph_path_info path_info = {0};
path = ceph_mdsc_build_path(mdsc, req->r_old_dentry, &path_info, 0);
if (IS_ERR(path))
path = NULL;
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 86d7aa594ea9..bac9cfb6b982 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -1339,6 +1339,7 @@ static int ceph_unlink(struct inode *dir, struct dentry *dentry)
struct ceph_client *cl = fsc->client;
struct ceph_mds_client *mdsc = fsc->mdsc;
struct inode *inode = d_inode(dentry);
+ struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_mds_request *req;
bool try_async = ceph_test_mount_opt(fsc, ASYNC_DIROPS);
struct dentry *dn;
@@ -1363,7 +1364,7 @@ static int ceph_unlink(struct inode *dir, struct dentry *dentry)
if (!dn) {
try_async = false;
} else {
- struct ceph_path_info path_info;
+ struct ceph_path_info path_info = {0};
path = ceph_mdsc_build_path(mdsc, dn, &path_info, 0);
if (IS_ERR(path)) {
try_async = false;
@@ -1424,7 +1425,19 @@ retry:
* We have enough caps, so we assume that the unlink
* will succeed. Fix up the target inode and dcache.
*/
- drop_nlink(inode);
+
+ /*
+ * Protect the i_nlink update with i_ceph_lock
+ * to precent racing against ceph_fill_inode()
+ * handling our completion on a worker thread
+ * and don't decrement if i_nlink has already
+ * been updated to zero by this completion.
+ */
+ spin_lock(&ci->i_ceph_lock);
+ if (inode->i_nlink > 0)
+ drop_nlink(inode);
+ spin_unlock(&ci->i_ceph_lock);
+
d_delete(dentry);
} else {
spin_lock(&fsc->async_unlink_conflict_lock);
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 66bbf6d517a9..5e7c73a29aa3 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -397,7 +397,7 @@ int ceph_open(struct inode *inode, struct file *file)
if (!dentry) {
do_sync = true;
} else {
- struct ceph_path_info path_info;
+ struct ceph_path_info path_info = {0};
path = ceph_mdsc_build_path(mdsc, dentry, &path_info, 0);
if (IS_ERR(path)) {
do_sync = true;
@@ -807,7 +807,7 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
if (!dn) {
try_async = false;
} else {
- struct ceph_path_info path_info;
+ struct ceph_path_info path_info = {0};
path = ceph_mdsc_build_path(mdsc, dn, &path_info, 0);
if (IS_ERR(path)) {
try_async = false;
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index d76f9a79dc0c..d99e12d1100b 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -2551,7 +2551,7 @@ int __ceph_setattr(struct mnt_idmap *idmap, struct inode *inode,
if (!dentry) {
do_sync = true;
} else {
- struct ceph_path_info path_info;
+ struct ceph_path_info path_info = {0};
path = ceph_mdsc_build_path(mdsc, dentry, &path_info, 0);
if (IS_ERR(path)) {
do_sync = true;
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 23b6d00643c9..b1746273f186 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -2768,6 +2768,7 @@ retry:
if (ret < 0) {
dput(parent);
dput(cur);
+ __putname(path);
return ERR_PTR(ret);
}
@@ -2777,6 +2778,7 @@ retry:
if (len < 0) {
dput(parent);
dput(cur);
+ __putname(path);
return ERR_PTR(len);
}
}
@@ -2813,6 +2815,7 @@ retry:
* cannot ever succeed. Creating paths that long is
* possible with Ceph, but Linux cannot use them.
*/
+ __putname(path);
return ERR_PTR(-ENAMETOOLONG);
}
diff --git a/fs/erofs/inode.c b/fs/erofs/inode.c
index 4f86169c23f1..4b3d21402e10 100644
--- a/fs/erofs/inode.c
+++ b/fs/erofs/inode.c
@@ -222,6 +222,7 @@ err_out:
static int erofs_fill_inode(struct inode *inode)
{
+ const struct address_space_operations *aops;
int err;
trace_erofs_fill_inode(inode);
@@ -254,7 +255,11 @@ static int erofs_fill_inode(struct inode *inode)
}
mapping_set_large_folios(inode->i_mapping);
- return erofs_inode_set_aops(inode, inode, false);
+ aops = erofs_get_aops(inode, false);
+ if (IS_ERR(aops))
+ return PTR_ERR(aops);
+ inode->i_mapping->a_ops = aops;
+ return 0;
}
/*
diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h
index d1634455e389..a4f0a42cf8c3 100644
--- a/fs/erofs/internal.h
+++ b/fs/erofs/internal.h
@@ -471,26 +471,24 @@ static inline void *erofs_vm_map_ram(struct page **pages, unsigned int count)
return NULL;
}
-static inline int erofs_inode_set_aops(struct inode *inode,
- struct inode *realinode, bool no_fscache)
+static inline const struct address_space_operations *
+erofs_get_aops(struct inode *realinode, bool no_fscache)
{
if (erofs_inode_is_data_compressed(EROFS_I(realinode)->datalayout)) {
if (!IS_ENABLED(CONFIG_EROFS_FS_ZIP))
- return -EOPNOTSUPP;
+ return ERR_PTR(-EOPNOTSUPP);
DO_ONCE_LITE_IF(realinode->i_blkbits != PAGE_SHIFT,
erofs_info, realinode->i_sb,
"EXPERIMENTAL EROFS subpage compressed block support in use. Use at your own risk!");
- inode->i_mapping->a_ops = &z_erofs_aops;
- return 0;
+ return &z_erofs_aops;
}
- inode->i_mapping->a_ops = &erofs_aops;
if (IS_ENABLED(CONFIG_EROFS_FS_ONDEMAND) && !no_fscache &&
erofs_is_fscache_mode(realinode->i_sb))
- inode->i_mapping->a_ops = &erofs_fscache_access_aops;
+ return &erofs_fscache_access_aops;
if (IS_ENABLED(CONFIG_EROFS_FS_BACKED_BY_FILE) &&
erofs_is_fileio_mode(EROFS_SB(realinode->i_sb)))
- inode->i_mapping->a_ops = &erofs_fileio_aops;
- return 0;
+ return &erofs_fileio_aops;
+ return &erofs_aops;
}
int erofs_register_sysfs(struct super_block *sb);
diff --git a/fs/erofs/ishare.c b/fs/erofs/ishare.c
index ce980320a8b9..829d50d5c717 100644
--- a/fs/erofs/ishare.c
+++ b/fs/erofs/ishare.c
@@ -40,10 +40,14 @@ bool erofs_ishare_fill_inode(struct inode *inode)
{
struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb);
struct erofs_inode *vi = EROFS_I(inode);
+ const struct address_space_operations *aops;
struct erofs_inode_fingerprint fp;
struct inode *sharedinode;
unsigned long hash;
+ aops = erofs_get_aops(inode, true);
+ if (IS_ERR(aops))
+ return false;
if (erofs_xattr_fill_inode_fingerprint(&fp, inode, sbi->domain_id))
return false;
hash = xxh32(fp.opaque, fp.size, 0);
@@ -56,15 +60,15 @@ bool erofs_ishare_fill_inode(struct inode *inode)
}
if (inode_state_read_once(sharedinode) & I_NEW) {
- if (erofs_inode_set_aops(sharedinode, inode, true)) {
- iget_failed(sharedinode);
- kfree(fp.opaque);
- return false;
- }
+ sharedinode->i_mapping->a_ops = aops;
sharedinode->i_size = vi->vfs_inode.i_size;
unlock_new_inode(sharedinode);
} else {
kfree(fp.opaque);
+ if (aops != sharedinode->i_mapping->a_ops) {
+ iput(sharedinode);
+ return false;
+ }
if (sharedinode->i_size != vi->vfs_inode.i_size) {
_erofs_printk(inode->i_sb, KERN_WARNING
"size(%lld:%lld) not matches for the same fingerprint\n",
diff --git a/fs/erofs/super.c b/fs/erofs/super.c
index d4995686ac6c..972a0c82198d 100644
--- a/fs/erofs/super.c
+++ b/fs/erofs/super.c
@@ -424,26 +424,23 @@ static const struct fs_parameter_spec erofs_fs_parameters[] = {
static bool erofs_fc_set_dax_mode(struct fs_context *fc, unsigned int mode)
{
-#ifdef CONFIG_FS_DAX
- struct erofs_sb_info *sbi = fc->s_fs_info;
-
- switch (mode) {
- case EROFS_MOUNT_DAX_ALWAYS:
- set_opt(&sbi->opt, DAX_ALWAYS);
- clear_opt(&sbi->opt, DAX_NEVER);
- return true;
- case EROFS_MOUNT_DAX_NEVER:
- set_opt(&sbi->opt, DAX_NEVER);
- clear_opt(&sbi->opt, DAX_ALWAYS);
- return true;
- default:
+ if (IS_ENABLED(CONFIG_FS_DAX)) {
+ struct erofs_sb_info *sbi = fc->s_fs_info;
+
+ if (mode == EROFS_MOUNT_DAX_ALWAYS) {
+ set_opt(&sbi->opt, DAX_ALWAYS);
+ clear_opt(&sbi->opt, DAX_NEVER);
+ return true;
+ } else if (mode == EROFS_MOUNT_DAX_NEVER) {
+ set_opt(&sbi->opt, DAX_NEVER);
+ clear_opt(&sbi->opt, DAX_ALWAYS);
+ return true;
+ }
DBG_BUGON(1);
return false;
}
-#else
errorfc(fc, "dax options not supported");
return false;
-#endif
}
static int erofs_fc_parse_param(struct fs_context *fc,
@@ -460,31 +457,26 @@ static int erofs_fc_parse_param(struct fs_context *fc,
switch (opt) {
case Opt_user_xattr:
-#ifdef CONFIG_EROFS_FS_XATTR
- if (result.boolean)
+ if (!IS_ENABLED(CONFIG_EROFS_FS_XATTR))
+ errorfc(fc, "{,no}user_xattr options not supported");
+ else if (result.boolean)
set_opt(&sbi->opt, XATTR_USER);
else
clear_opt(&sbi->opt, XATTR_USER);
-#else
- errorfc(fc, "{,no}user_xattr options not supported");
-#endif
break;
case Opt_acl:
-#ifdef CONFIG_EROFS_FS_POSIX_ACL
- if (result.boolean)
+ if (!IS_ENABLED(CONFIG_EROFS_FS_POSIX_ACL))
+ errorfc(fc, "{,no}acl options not supported");
+ else if (result.boolean)
set_opt(&sbi->opt, POSIX_ACL);
else
clear_opt(&sbi->opt, POSIX_ACL);
-#else
- errorfc(fc, "{,no}acl options not supported");
-#endif
break;
case Opt_cache_strategy:
-#ifdef CONFIG_EROFS_FS_ZIP
- sbi->opt.cache_strategy = result.uint_32;
-#else
- errorfc(fc, "compression not supported, cache_strategy ignored");
-#endif
+ if (!IS_ENABLED(CONFIG_EROFS_FS_ZIP))
+ errorfc(fc, "compression not supported, cache_strategy ignored");
+ else
+ sbi->opt.cache_strategy = result.uint_32;
break;
case Opt_dax:
if (!erofs_fc_set_dax_mode(fc, EROFS_MOUNT_DAX_ALWAYS))
@@ -533,24 +525,21 @@ static int erofs_fc_parse_param(struct fs_context *fc,
break;
#endif
case Opt_directio:
-#ifdef CONFIG_EROFS_FS_BACKED_BY_FILE
- if (result.boolean)
+ if (!IS_ENABLED(CONFIG_EROFS_FS_BACKED_BY_FILE))
+ errorfc(fc, "%s option not supported", erofs_fs_parameters[opt].name);
+ else if (result.boolean)
set_opt(&sbi->opt, DIRECT_IO);
else
clear_opt(&sbi->opt, DIRECT_IO);
-#else
- errorfc(fc, "%s option not supported", erofs_fs_parameters[opt].name);
-#endif
break;
case Opt_fsoffset:
sbi->dif0.fsoff = result.uint_64;
break;
case Opt_inode_share:
-#ifdef CONFIG_EROFS_FS_PAGE_CACHE_SHARE
- set_opt(&sbi->opt, INODE_SHARE);
-#else
- errorfc(fc, "%s option not supported", erofs_fs_parameters[opt].name);
-#endif
+ if (!IS_ENABLED(CONFIG_EROFS_FS_PAGE_CACHE_SHARE))
+ errorfc(fc, "%s option not supported", erofs_fs_parameters[opt].name);
+ else
+ set_opt(&sbi->opt, INODE_SHARE);
break;
}
return 0;
@@ -809,8 +798,7 @@ static int erofs_fc_get_tree(struct fs_context *fc)
ret = get_tree_bdev_flags(fc, erofs_fc_fill_super,
IS_ENABLED(CONFIG_EROFS_FS_BACKED_BY_FILE) ?
GET_TREE_BDEV_QUIET_LOOKUP : 0);
-#ifdef CONFIG_EROFS_FS_BACKED_BY_FILE
- if (ret == -ENOTBLK) {
+ if (IS_ENABLED(CONFIG_EROFS_FS_BACKED_BY_FILE) && ret == -ENOTBLK) {
struct file *file;
if (!fc->source)
@@ -824,7 +812,6 @@ static int erofs_fc_get_tree(struct fs_context *fc)
sbi->dif0.file->f_mapping->a_ops->read_folio)
return get_tree_nodev(fc, erofs_fc_fill_super);
}
-#endif
return ret;
}
@@ -1108,12 +1095,12 @@ static int erofs_show_options(struct seq_file *seq, struct dentry *root)
seq_puts(seq, ",dax=never");
if (erofs_is_fileio_mode(sbi) && test_opt(opt, DIRECT_IO))
seq_puts(seq, ",directio");
-#ifdef CONFIG_EROFS_FS_ONDEMAND
- if (sbi->fsid)
- seq_printf(seq, ",fsid=%s", sbi->fsid);
- if (sbi->domain_id)
- seq_printf(seq, ",domain_id=%s", sbi->domain_id);
-#endif
+ if (IS_ENABLED(CONFIG_EROFS_FS_ONDEMAND)) {
+ if (sbi->fsid)
+ seq_printf(seq, ",fsid=%s", sbi->fsid);
+ if (sbi->domain_id)
+ seq_printf(seq, ",domain_id=%s", sbi->domain_id);
+ }
if (sbi->dif0.fsoff)
seq_printf(seq, ",fsoffset=%llu", sbi->dif0.fsoff);
if (test_opt(opt, INODE_SHARE))
diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c
index c8d8e129eb4b..30775502b56d 100644
--- a/fs/erofs/zmap.c
+++ b/fs/erofs/zmap.c
@@ -513,6 +513,7 @@ static int z_erofs_map_blocks_ext(struct inode *inode,
unsigned int recsz = z_erofs_extent_recsize(vi->z_advise);
erofs_off_t pos = round_up(Z_EROFS_MAP_HEADER_END(erofs_iloc(inode) +
vi->inode_isize + vi->xattr_isize), recsz);
+ unsigned int bmask = sb->s_blocksize - 1;
bool in_mbox = erofs_inode_in_metabox(inode);
erofs_off_t lend = inode->i_size;
erofs_off_t l, r, mid, pa, la, lstart;
@@ -596,17 +597,17 @@ static int z_erofs_map_blocks_ext(struct inode *inode,
map->m_flags |= EROFS_MAP_MAPPED |
EROFS_MAP_FULL_MAPPED | EROFS_MAP_ENCODED;
fmt = map->m_plen >> Z_EROFS_EXTENT_PLEN_FMT_BIT;
+ if (map->m_plen & Z_EROFS_EXTENT_PLEN_PARTIAL)
+ map->m_flags |= EROFS_MAP_PARTIAL_REF;
+ map->m_plen &= Z_EROFS_EXTENT_PLEN_MASK;
if (fmt)
map->m_algorithmformat = fmt - 1;
- else if (interlaced && !erofs_blkoff(sb, map->m_pa))
+ else if (interlaced && !((map->m_pa | map->m_plen) & bmask))
map->m_algorithmformat =
Z_EROFS_COMPRESSION_INTERLACED;
else
map->m_algorithmformat =
Z_EROFS_COMPRESSION_SHIFTED;
- if (map->m_plen & Z_EROFS_EXTENT_PLEN_PARTIAL)
- map->m_flags |= EROFS_MAP_PARTIAL_REF;
- map->m_plen &= Z_EROFS_EXTENT_PLEN_MASK;
}
}
map->m_llen = lend - map->m_la;
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index a8c278c50083..5714e900567c 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -2061,7 +2061,8 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
* @ep: the &struct eventpoll to be currently checked.
* @depth: Current depth of the path being checked.
*
- * Return: depth of the subtree, or INT_MAX if we found a loop or went too deep.
+ * Return: depth of the subtree, or a value bigger than EP_MAX_NESTS if we found
+ * a loop or went too deep.
*/
static int ep_loop_check_proc(struct eventpoll *ep, int depth)
{
@@ -2080,7 +2081,7 @@ static int ep_loop_check_proc(struct eventpoll *ep, int depth)
struct eventpoll *ep_tovisit;
ep_tovisit = epi->ffd.file->private_data;
if (ep_tovisit == inserting_into || depth > EP_MAX_NESTS)
- result = INT_MAX;
+ result = EP_MAX_NESTS+1;
else
result = max(result, ep_loop_check_proc(ep_tovisit, depth + 1) + 1);
if (result > EP_MAX_NESTS)
diff --git a/fs/file_attr.c b/fs/file_attr.c
index 6d2a298a786d..da983e105d70 100644
--- a/fs/file_attr.c
+++ b/fs/file_attr.c
@@ -378,7 +378,7 @@ SYSCALL_DEFINE5(file_getattr, int, dfd, const char __user *, filename,
struct path filepath __free(path_put) = {};
unsigned int lookup_flags = 0;
struct file_attr fattr;
- struct file_kattr fa;
+ struct file_kattr fa = { .flags_valid = true }; /* hint only */
int error;
BUILD_BUG_ON(sizeof(struct file_attr) < FILE_ATTR_SIZE_VER0);
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 8f8069fb76ba..7c75ed7e8979 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -198,10 +198,11 @@ static void wb_queue_work(struct bdi_writeback *wb,
static bool wb_wait_for_completion_cb(struct wb_completion *done)
{
+ unsigned long timeout = sysctl_hung_task_timeout_secs;
unsigned long waited_secs = (jiffies - done->wait_start) / HZ;
done->progress_stamp = jiffies;
- if (waited_secs > sysctl_hung_task_timeout_secs)
+ if (timeout && (waited_secs > timeout))
pr_info("INFO: The task %s:%d has been waiting for writeback "
"completion for more than %lu seconds.",
current->comm, current->pid, waited_secs);
@@ -1954,6 +1955,7 @@ static long writeback_sb_inodes(struct super_block *sb,
.range_end = LLONG_MAX,
};
unsigned long start_time = jiffies;
+ unsigned long timeout = sysctl_hung_task_timeout_secs;
long write_chunk;
long total_wrote = 0; /* count both pages and inodes */
unsigned long dirtied_before = jiffies;
@@ -2040,9 +2042,8 @@ static long writeback_sb_inodes(struct super_block *sb,
__writeback_single_inode(inode, &wbc);
/* Report progress to inform the hung task detector of the progress. */
- if (work->done && work->done->progress_stamp &&
- (jiffies - work->done->progress_stamp) > HZ *
- sysctl_hung_task_timeout_secs / 2)
+ if (work->done && work->done->progress_stamp && timeout &&
+ (jiffies - work->done->progress_stamp) > HZ * timeout / 2)
wake_up_all(work->done->waitq);
wbc_detach_inode(&wbc);
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index a0c46aadb97d..00f0efaf12b2 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -80,18 +80,27 @@ static void iomap_set_range_uptodate(struct folio *folio, size_t off,
{
struct iomap_folio_state *ifs = folio->private;
unsigned long flags;
- bool uptodate = true;
+ bool mark_uptodate = true;
if (folio_test_uptodate(folio))
return;
if (ifs) {
spin_lock_irqsave(&ifs->state_lock, flags);
- uptodate = ifs_set_range_uptodate(folio, ifs, off, len);
+ /*
+ * If a read with bytes pending is in progress, we must not call
+ * folio_mark_uptodate(). The read completion path
+ * (iomap_read_end()) will call folio_end_read(), which uses XOR
+ * semantics to set the uptodate bit. If we set it here, the XOR
+ * in folio_end_read() will clear it, leaving the folio not
+ * uptodate.
+ */
+ mark_uptodate = ifs_set_range_uptodate(folio, ifs, off, len) &&
+ !ifs->read_bytes_pending;
spin_unlock_irqrestore(&ifs->state_lock, flags);
}
- if (uptodate)
+ if (mark_uptodate)
folio_mark_uptodate(folio);
}
@@ -624,6 +633,7 @@ static int iomap_readahead_iter(struct iomap_iter *iter,
* iomap_readahead - Attempt to read pages from a file.
* @ops: The operations vector for the filesystem.
* @ctx: The ctx used for issuing readahead.
+ * @private: The filesystem-specific information for issuing iomap_iter.
*
* This function is for filesystems to call to implement their readahead
* address_space operation.
diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c
index 95254aa1b654..e911daedff65 100644
--- a/fs/iomap/direct-io.c
+++ b/fs/iomap/direct-io.c
@@ -87,6 +87,19 @@ static inline enum fserror_type iomap_dio_err_type(const struct iomap_dio *dio)
return FSERR_DIRECTIO_READ;
}
+static inline bool should_report_dio_fserror(const struct iomap_dio *dio)
+{
+ switch (dio->error) {
+ case 0:
+ case -EAGAIN:
+ case -ENOTBLK:
+ /* don't send fsnotify for success or magic retry codes */
+ return false;
+ default:
+ return true;
+ }
+}
+
ssize_t iomap_dio_complete(struct iomap_dio *dio)
{
const struct iomap_dio_ops *dops = dio->dops;
@@ -96,7 +109,7 @@ ssize_t iomap_dio_complete(struct iomap_dio *dio)
if (dops && dops->end_io)
ret = dops->end_io(iocb, dio->size, ret, dio->flags);
- if (dio->error)
+ if (should_report_dio_fserror(dio))
fserror_report_io(file_inode(iocb->ki_filp),
iomap_dio_err_type(dio), offset, dio->size,
dio->error, GFP_NOFS);
diff --git a/fs/iomap/ioend.c b/fs/iomap/ioend.c
index e4d57cb969f1..60546fa14dfe 100644
--- a/fs/iomap/ioend.c
+++ b/fs/iomap/ioend.c
@@ -69,11 +69,57 @@ static u32 iomap_finish_ioend_buffered(struct iomap_ioend *ioend)
return folio_count;
}
+static DEFINE_SPINLOCK(failed_ioend_lock);
+static LIST_HEAD(failed_ioend_list);
+
+static void
+iomap_fail_ioends(
+ struct work_struct *work)
+{
+ struct iomap_ioend *ioend;
+ struct list_head tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&failed_ioend_lock, flags);
+ list_replace_init(&failed_ioend_list, &tmp);
+ spin_unlock_irqrestore(&failed_ioend_lock, flags);
+
+ while ((ioend = list_first_entry_or_null(&tmp, struct iomap_ioend,
+ io_list))) {
+ list_del_init(&ioend->io_list);
+ iomap_finish_ioend_buffered(ioend);
+ cond_resched();
+ }
+}
+
+static DECLARE_WORK(failed_ioend_work, iomap_fail_ioends);
+
+static void iomap_fail_ioend_buffered(struct iomap_ioend *ioend)
+{
+ unsigned long flags;
+
+ /*
+ * Bounce I/O errors to a workqueue to avoid nested i_lock acquisitions
+ * in the fserror code. The caller no longer owns the ioend reference
+ * after the spinlock drops.
+ */
+ spin_lock_irqsave(&failed_ioend_lock, flags);
+ if (list_empty(&failed_ioend_list))
+ WARN_ON_ONCE(!schedule_work(&failed_ioend_work));
+ list_add_tail(&ioend->io_list, &failed_ioend_list);
+ spin_unlock_irqrestore(&failed_ioend_lock, flags);
+}
+
static void ioend_writeback_end_bio(struct bio *bio)
{
struct iomap_ioend *ioend = iomap_ioend_from_bio(bio);
ioend->io_error = blk_status_to_errno(bio->bi_status);
+ if (ioend->io_error) {
+ iomap_fail_ioend_buffered(ioend);
+ return;
+ }
+
iomap_finish_ioend_buffered(ioend);
}
@@ -169,17 +215,18 @@ ssize_t iomap_add_to_ioend(struct iomap_writepage_ctx *wpc, struct folio *folio,
WARN_ON_ONCE(!folio->private && map_len < dirty_len);
switch (wpc->iomap.type) {
- case IOMAP_INLINE:
- WARN_ON_ONCE(1);
- return -EIO;
+ case IOMAP_UNWRITTEN:
+ ioend_flags |= IOMAP_IOEND_UNWRITTEN;
+ break;
+ case IOMAP_MAPPED:
+ break;
case IOMAP_HOLE:
return map_len;
default:
- break;
+ WARN_ON_ONCE(1);
+ return -EIO;
}
- if (wpc->iomap.type == IOMAP_UNWRITTEN)
- ioend_flags |= IOMAP_IOEND_UNWRITTEN;
if (wpc->iomap.flags & IOMAP_F_SHARED)
ioend_flags |= IOMAP_IOEND_SHARED;
if (folio_test_dropbehind(folio))
diff --git a/fs/minix/bitmap.c b/fs/minix/bitmap.c
index 7da66ca184f4..abec438330a7 100644
--- a/fs/minix/bitmap.c
+++ b/fs/minix/bitmap.c
@@ -247,7 +247,7 @@ struct inode *minix_new_inode(const struct inode *dir, umode_t mode)
j += i * bits_per_zone;
if (!j || j > sbi->s_ninodes) {
iput(inode);
- return ERR_PTR(-ENOSPC);
+ return ERR_PTR(-EFSCORRUPTED);
}
inode_init_owner(&nop_mnt_idmap, inode, dir, mode);
inode->i_ino = j;
diff --git a/fs/namespace.c b/fs/namespace.c
index ebe19ded293a..854f4fc66469 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1531,23 +1531,33 @@ static struct mount *mnt_find_id_at_reverse(struct mnt_namespace *ns, u64 mnt_id
static void *m_start(struct seq_file *m, loff_t *pos)
{
struct proc_mounts *p = m->private;
+ struct mount *mnt;
down_read(&namespace_sem);
- return mnt_find_id_at(p->ns, *pos);
+ mnt = mnt_find_id_at(p->ns, *pos);
+ if (mnt)
+ *pos = mnt->mnt_id_unique;
+ return mnt;
}
static void *m_next(struct seq_file *m, void *v, loff_t *pos)
{
- struct mount *next = NULL, *mnt = v;
+ struct mount *mnt = v;
struct rb_node *node = rb_next(&mnt->mnt_node);
- ++*pos;
if (node) {
- next = node_to_mount(node);
+ struct mount *next = node_to_mount(node);
*pos = next->mnt_id_unique;
+ return next;
}
- return next;
+
+ /*
+ * No more mounts. Set pos past current mount's ID so that if
+ * iteration restarts, mnt_find_id_at() returns NULL.
+ */
+ *pos = mnt->mnt_id_unique + 1;
+ return NULL;
}
static void m_stop(struct seq_file *m, void *v)
@@ -2791,7 +2801,8 @@ static inline void unlock_mount(struct pinned_mountpoint *m)
}
static void lock_mount_exact(const struct path *path,
- struct pinned_mountpoint *mp);
+ struct pinned_mountpoint *mp, bool copy_mount,
+ unsigned int copy_flags);
#define LOCK_MOUNT_MAYBE_BENEATH(mp, path, beneath) \
struct pinned_mountpoint mp __cleanup(unlock_mount) = {}; \
@@ -2799,7 +2810,10 @@ static void lock_mount_exact(const struct path *path,
#define LOCK_MOUNT(mp, path) LOCK_MOUNT_MAYBE_BENEATH(mp, (path), false)
#define LOCK_MOUNT_EXACT(mp, path) \
struct pinned_mountpoint mp __cleanup(unlock_mount) = {}; \
- lock_mount_exact((path), &mp)
+ lock_mount_exact((path), &mp, false, 0)
+#define LOCK_MOUNT_EXACT_COPY(mp, path, copy_flags) \
+ struct pinned_mountpoint mp __cleanup(unlock_mount) = {}; \
+ lock_mount_exact((path), &mp, true, (copy_flags))
static int graft_tree(struct mount *mnt, const struct pinned_mountpoint *mp)
{
@@ -3073,16 +3087,13 @@ static struct file *open_detached_copy(struct path *path, unsigned int flags)
return file;
}
-DEFINE_FREE(put_empty_mnt_ns, struct mnt_namespace *,
- if (!IS_ERR_OR_NULL(_T)) free_mnt_ns(_T))
-
static struct mnt_namespace *create_new_namespace(struct path *path, unsigned int flags)
{
- struct mnt_namespace *new_ns __free(put_empty_mnt_ns) = NULL;
- struct path to_path __free(path_put) = {};
struct mnt_namespace *ns = current->nsproxy->mnt_ns;
struct user_namespace *user_ns = current_user_ns();
- struct mount *new_ns_root;
+ struct mnt_namespace *new_ns;
+ struct mount *new_ns_root, *old_ns_root;
+ struct path to_path;
struct mount *mnt;
unsigned int copy_flags = 0;
bool locked = false;
@@ -3094,71 +3105,63 @@ static struct mnt_namespace *create_new_namespace(struct path *path, unsigned in
if (IS_ERR(new_ns))
return ERR_CAST(new_ns);
- scoped_guard(namespace_excl) {
- new_ns_root = clone_mnt(ns->root, ns->root->mnt.mnt_root, copy_flags);
- if (IS_ERR(new_ns_root))
- return ERR_CAST(new_ns_root);
+ old_ns_root = ns->root;
+ to_path.mnt = &old_ns_root->mnt;
+ to_path.dentry = old_ns_root->mnt.mnt_root;
- /*
- * If the real rootfs had a locked mount on top of it somewhere
- * in the stack, lock the new mount tree as well so it can't be
- * exposed.
- */
- mnt = ns->root;
- while (mnt->overmount) {
- mnt = mnt->overmount;
- if (mnt->mnt.mnt_flags & MNT_LOCKED)
- locked = true;
- }
+ VFS_WARN_ON_ONCE(old_ns_root->mnt.mnt_sb->s_type != &nullfs_fs_type);
+
+ LOCK_MOUNT_EXACT_COPY(mp, &to_path, copy_flags);
+ if (IS_ERR(mp.parent)) {
+ free_mnt_ns(new_ns);
+ return ERR_CAST(mp.parent);
}
+ new_ns_root = mp.parent;
/*
- * We dropped the namespace semaphore so we can actually lock
- * the copy for mounting. The copied mount isn't attached to any
- * mount namespace and it is thus excluded from any propagation.
- * So realistically we're isolated and the mount can't be
- * overmounted.
+ * If the real rootfs had a locked mount on top of it somewhere
+ * in the stack, lock the new mount tree as well so it can't be
+ * exposed.
*/
-
- /* Borrow the reference from clone_mnt(). */
- to_path.mnt = &new_ns_root->mnt;
- to_path.dentry = dget(new_ns_root->mnt.mnt_root);
-
- /* Now lock for actual mounting. */
- LOCK_MOUNT_EXACT(mp, &to_path);
- if (unlikely(IS_ERR(mp.parent)))
- return ERR_CAST(mp.parent);
+ mnt = old_ns_root;
+ while (mnt->overmount) {
+ mnt = mnt->overmount;
+ if (mnt->mnt.mnt_flags & MNT_LOCKED)
+ locked = true;
+ }
/*
- * We don't emulate unshare()ing a mount namespace. We stick to the
- * restrictions of creating detached bind-mounts. It has a lot
- * saner and simpler semantics.
+ * We don't emulate unshare()ing a mount namespace. We stick
+ * to the restrictions of creating detached bind-mounts. It
+ * has a lot saner and simpler semantics.
*/
mnt = __do_loopback(path, flags, copy_flags);
- if (IS_ERR(mnt))
- return ERR_CAST(mnt);
-
scoped_guard(mount_writer) {
+ if (IS_ERR(mnt)) {
+ emptied_ns = new_ns;
+ umount_tree(new_ns_root, 0);
+ return ERR_CAST(mnt);
+ }
+
if (locked)
mnt->mnt.mnt_flags |= MNT_LOCKED;
/*
- * Now mount the detached tree on top of the copy of the
- * real rootfs we created.
+ * now mount the detached tree on top of the copy
+ * of the real rootfs we created.
*/
attach_mnt(mnt, new_ns_root, mp.mp);
if (user_ns != ns->user_ns)
lock_mnt_tree(new_ns_root);
}
- /* Add all mounts to the new namespace. */
- for (struct mount *p = new_ns_root; p; p = next_mnt(p, new_ns_root)) {
- mnt_add_to_ns(new_ns, p);
+ for (mnt = new_ns_root; mnt; mnt = next_mnt(mnt, new_ns_root)) {
+ mnt_add_to_ns(new_ns, mnt);
new_ns->nr_mounts++;
}
- new_ns->root = real_mount(no_free_ptr(to_path.mnt));
+ new_ns->root = new_ns_root;
ns_tree_add_raw(new_ns);
- return no_free_ptr(new_ns);
+ return new_ns;
}
static struct file *open_new_namespace(struct path *path, unsigned int flags)
@@ -3840,16 +3843,20 @@ static int do_new_mount(const struct path *path, const char *fstype,
}
static void lock_mount_exact(const struct path *path,
- struct pinned_mountpoint *mp)
+ struct pinned_mountpoint *mp, bool copy_mount,
+ unsigned int copy_flags)
{
struct dentry *dentry = path->dentry;
int err;
+ /* Assert that inode_lock() locked the correct inode. */
+ VFS_WARN_ON_ONCE(copy_mount && !path_mounted(path));
+
inode_lock(dentry->d_inode);
namespace_lock();
if (unlikely(cant_mount(dentry)))
err = -ENOENT;
- else if (path_overmounted(path))
+ else if (!copy_mount && path_overmounted(path))
err = -EBUSY;
else
err = get_mountpoint(dentry, mp);
@@ -3857,9 +3864,15 @@ static void lock_mount_exact(const struct path *path,
namespace_unlock();
inode_unlock(dentry->d_inode);
mp->parent = ERR_PTR(err);
- } else {
- mp->parent = real_mount(path->mnt);
+ return;
}
+
+ if (copy_mount)
+ mp->parent = clone_mnt(real_mount(path->mnt), dentry, copy_flags);
+ else
+ mp->parent = real_mount(path->mnt);
+ if (unlikely(IS_ERR(mp->parent)))
+ __unlock_mount(mp);
}
int finish_automount(struct vfsmount *__m, const struct path *path)
@@ -5678,6 +5691,8 @@ static int do_statmount(struct kstatmount *s, u64 mnt_id, u64 mnt_ns_id,
s->mnt = mnt_file->f_path.mnt;
ns = real_mount(s->mnt)->mnt_ns;
+ if (IS_ERR(ns))
+ return PTR_ERR(ns);
if (!ns)
/*
* We can't set mount point and mnt_ns_id since we don't have a
diff --git a/fs/netfs/direct_write.c b/fs/netfs/direct_write.c
index a9d1c3b2c084..dd1451bf7543 100644
--- a/fs/netfs/direct_write.c
+++ b/fs/netfs/direct_write.c
@@ -10,6 +10,202 @@
#include "internal.h"
/*
+ * Perform the cleanup rituals after an unbuffered write is complete.
+ */
+static void netfs_unbuffered_write_done(struct netfs_io_request *wreq)
+{
+ struct netfs_inode *ictx = netfs_inode(wreq->inode);
+
+ _enter("R=%x", wreq->debug_id);
+
+ /* Okay, declare that all I/O is complete. */
+ trace_netfs_rreq(wreq, netfs_rreq_trace_write_done);
+
+ if (!wreq->error)
+ netfs_update_i_size(ictx, &ictx->inode, wreq->start, wreq->transferred);
+
+ if (wreq->origin == NETFS_DIO_WRITE &&
+ wreq->mapping->nrpages) {
+ /* mmap may have got underfoot and we may now have folios
+ * locally covering the region we just wrote. Attempt to
+ * discard the folios, but leave in place any modified locally.
+ * ->write_iter() is prevented from interfering by the DIO
+ * counter.
+ */
+ pgoff_t first = wreq->start >> PAGE_SHIFT;
+ pgoff_t last = (wreq->start + wreq->transferred - 1) >> PAGE_SHIFT;
+
+ invalidate_inode_pages2_range(wreq->mapping, first, last);
+ }
+
+ if (wreq->origin == NETFS_DIO_WRITE)
+ inode_dio_end(wreq->inode);
+
+ _debug("finished");
+ netfs_wake_rreq_flag(wreq, NETFS_RREQ_IN_PROGRESS, netfs_rreq_trace_wake_ip);
+ /* As we cleared NETFS_RREQ_IN_PROGRESS, we acquired its ref. */
+
+ if (wreq->iocb) {
+ size_t written = umin(wreq->transferred, wreq->len);
+
+ wreq->iocb->ki_pos += written;
+ if (wreq->iocb->ki_complete) {
+ trace_netfs_rreq(wreq, netfs_rreq_trace_ki_complete);
+ wreq->iocb->ki_complete(wreq->iocb, wreq->error ?: written);
+ }
+ wreq->iocb = VFS_PTR_POISON;
+ }
+
+ netfs_clear_subrequests(wreq);
+}
+
+/*
+ * Collect the subrequest results of unbuffered write subrequests.
+ */
+static void netfs_unbuffered_write_collect(struct netfs_io_request *wreq,
+ struct netfs_io_stream *stream,
+ struct netfs_io_subrequest *subreq)
+{
+ trace_netfs_collect_sreq(wreq, subreq);
+
+ spin_lock(&wreq->lock);
+ list_del_init(&subreq->rreq_link);
+ spin_unlock(&wreq->lock);
+
+ wreq->transferred += subreq->transferred;
+ iov_iter_advance(&wreq->buffer.iter, subreq->transferred);
+
+ stream->collected_to = subreq->start + subreq->transferred;
+ wreq->collected_to = stream->collected_to;
+ netfs_put_subrequest(subreq, netfs_sreq_trace_put_done);
+
+ trace_netfs_collect_stream(wreq, stream);
+ trace_netfs_collect_state(wreq, wreq->collected_to, 0);
+}
+
+/*
+ * Write data to the server without going through the pagecache and without
+ * writing it to the local cache. We dispatch the subrequests serially and
+ * wait for each to complete before dispatching the next, lest we leave a gap
+ * in the data written due to a failure such as ENOSPC. We could, however
+ * attempt to do preparation such as content encryption for the next subreq
+ * whilst the current is in progress.
+ */
+static int netfs_unbuffered_write(struct netfs_io_request *wreq)
+{
+ struct netfs_io_subrequest *subreq = NULL;
+ struct netfs_io_stream *stream = &wreq->io_streams[0];
+ int ret;
+
+ _enter("%llx", wreq->len);
+
+ if (wreq->origin == NETFS_DIO_WRITE)
+ inode_dio_begin(wreq->inode);
+
+ stream->collected_to = wreq->start;
+
+ for (;;) {
+ bool retry = false;
+
+ if (!subreq) {
+ netfs_prepare_write(wreq, stream, wreq->start + wreq->transferred);
+ subreq = stream->construct;
+ stream->construct = NULL;
+ stream->front = NULL;
+ }
+
+ /* Check if (re-)preparation failed. */
+ if (unlikely(test_bit(NETFS_SREQ_FAILED, &subreq->flags))) {
+ netfs_write_subrequest_terminated(subreq, subreq->error);
+ wreq->error = subreq->error;
+ break;
+ }
+
+ iov_iter_truncate(&subreq->io_iter, wreq->len - wreq->transferred);
+ if (!iov_iter_count(&subreq->io_iter))
+ break;
+
+ subreq->len = netfs_limit_iter(&subreq->io_iter, 0,
+ stream->sreq_max_len,
+ stream->sreq_max_segs);
+ iov_iter_truncate(&subreq->io_iter, subreq->len);
+ stream->submit_extendable_to = subreq->len;
+
+ trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
+ stream->issue_write(subreq);
+
+ /* Async, need to wait. */
+ netfs_wait_for_in_progress_stream(wreq, stream);
+
+ if (test_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags)) {
+ retry = true;
+ } else if (test_bit(NETFS_SREQ_FAILED, &subreq->flags)) {
+ ret = subreq->error;
+ wreq->error = ret;
+ netfs_see_subrequest(subreq, netfs_sreq_trace_see_failed);
+ subreq = NULL;
+ break;
+ }
+ ret = 0;
+
+ if (!retry) {
+ netfs_unbuffered_write_collect(wreq, stream, subreq);
+ subreq = NULL;
+ if (wreq->transferred >= wreq->len)
+ break;
+ if (!wreq->iocb && signal_pending(current)) {
+ ret = wreq->transferred ? -EINTR : -ERESTARTSYS;
+ trace_netfs_rreq(wreq, netfs_rreq_trace_intr);
+ break;
+ }
+ continue;
+ }
+
+ /* We need to retry the last subrequest, so first reset the
+ * iterator, taking into account what, if anything, we managed
+ * to transfer.
+ */
+ subreq->error = -EAGAIN;
+ trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
+ if (subreq->transferred > 0)
+ iov_iter_advance(&wreq->buffer.iter, subreq->transferred);
+
+ if (stream->source == NETFS_UPLOAD_TO_SERVER &&
+ wreq->netfs_ops->retry_request)
+ wreq->netfs_ops->retry_request(wreq, stream);
+
+ __clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
+ __clear_bit(NETFS_SREQ_BOUNDARY, &subreq->flags);
+ __clear_bit(NETFS_SREQ_FAILED, &subreq->flags);
+ subreq->io_iter = wreq->buffer.iter;
+ subreq->start = wreq->start + wreq->transferred;
+ subreq->len = wreq->len - wreq->transferred;
+ subreq->transferred = 0;
+ subreq->retry_count += 1;
+ stream->sreq_max_len = UINT_MAX;
+ stream->sreq_max_segs = INT_MAX;
+
+ netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
+ stream->prepare_write(subreq);
+
+ __set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
+ netfs_stat(&netfs_n_wh_retry_write_subreq);
+ }
+
+ netfs_unbuffered_write_done(wreq);
+ _leave(" = %d", ret);
+ return ret;
+}
+
+static void netfs_unbuffered_write_async(struct work_struct *work)
+{
+ struct netfs_io_request *wreq = container_of(work, struct netfs_io_request, work);
+
+ netfs_unbuffered_write(wreq);
+ netfs_put_request(wreq, netfs_rreq_trace_put_complete);
+}
+
+/*
* Perform an unbuffered write where we may have to do an RMW operation on an
* encrypted file. This can also be used for direct I/O writes.
*/
@@ -70,35 +266,35 @@ ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *
*/
wreq->buffer.iter = *iter;
}
+
+ wreq->len = iov_iter_count(&wreq->buffer.iter);
}
__set_bit(NETFS_RREQ_USE_IO_ITER, &wreq->flags);
- if (async)
- __set_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &wreq->flags);
/* Copy the data into the bounce buffer and encrypt it. */
// TODO
/* Dispatch the write. */
__set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags);
- if (async)
- wreq->iocb = iocb;
- wreq->len = iov_iter_count(&wreq->buffer.iter);
- ret = netfs_unbuffered_write(wreq, is_sync_kiocb(iocb), wreq->len);
- if (ret < 0) {
- _debug("begin = %zd", ret);
- goto out;
- }
- if (!async) {
- ret = netfs_wait_for_write(wreq);
- if (ret > 0)
- iocb->ki_pos += ret;
- } else {
+ if (async) {
+ INIT_WORK(&wreq->work, netfs_unbuffered_write_async);
+ wreq->iocb = iocb;
+ queue_work(system_dfl_wq, &wreq->work);
ret = -EIOCBQUEUED;
+ } else {
+ ret = netfs_unbuffered_write(wreq);
+ if (ret < 0) {
+ _debug("begin = %zd", ret);
+ } else {
+ iocb->ki_pos += wreq->transferred;
+ ret = wreq->transferred ?: wreq->error;
+ }
+
+ netfs_put_request(wreq, netfs_rreq_trace_put_complete);
}
-out:
netfs_put_request(wreq, netfs_rreq_trace_put_return);
return ret;
diff --git a/fs/netfs/internal.h b/fs/netfs/internal.h
index 4319611f5354..d436e20d3418 100644
--- a/fs/netfs/internal.h
+++ b/fs/netfs/internal.h
@@ -198,6 +198,9 @@ struct netfs_io_request *netfs_create_write_req(struct address_space *mapping,
struct file *file,
loff_t start,
enum netfs_io_origin origin);
+void netfs_prepare_write(struct netfs_io_request *wreq,
+ struct netfs_io_stream *stream,
+ loff_t start);
void netfs_reissue_write(struct netfs_io_stream *stream,
struct netfs_io_subrequest *subreq,
struct iov_iter *source);
@@ -212,7 +215,6 @@ int netfs_advance_writethrough(struct netfs_io_request *wreq, struct writeback_c
struct folio **writethrough_cache);
ssize_t netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc,
struct folio *writethrough_cache);
-int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t len);
/*
* write_retry.c
diff --git a/fs/netfs/write_collect.c b/fs/netfs/write_collect.c
index 61eab34ea67e..83eb3dc1adf8 100644
--- a/fs/netfs/write_collect.c
+++ b/fs/netfs/write_collect.c
@@ -399,27 +399,6 @@ bool netfs_write_collection(struct netfs_io_request *wreq)
ictx->ops->invalidate_cache(wreq);
}
- if ((wreq->origin == NETFS_UNBUFFERED_WRITE ||
- wreq->origin == NETFS_DIO_WRITE) &&
- !wreq->error)
- netfs_update_i_size(ictx, &ictx->inode, wreq->start, wreq->transferred);
-
- if (wreq->origin == NETFS_DIO_WRITE &&
- wreq->mapping->nrpages) {
- /* mmap may have got underfoot and we may now have folios
- * locally covering the region we just wrote. Attempt to
- * discard the folios, but leave in place any modified locally.
- * ->write_iter() is prevented from interfering by the DIO
- * counter.
- */
- pgoff_t first = wreq->start >> PAGE_SHIFT;
- pgoff_t last = (wreq->start + wreq->transferred - 1) >> PAGE_SHIFT;
- invalidate_inode_pages2_range(wreq->mapping, first, last);
- }
-
- if (wreq->origin == NETFS_DIO_WRITE)
- inode_dio_end(wreq->inode);
-
_debug("finished");
netfs_wake_rreq_flag(wreq, NETFS_RREQ_IN_PROGRESS, netfs_rreq_trace_wake_ip);
/* As we cleared NETFS_RREQ_IN_PROGRESS, we acquired its ref. */
diff --git a/fs/netfs/write_issue.c b/fs/netfs/write_issue.c
index 34894da5a23e..437268f65640 100644
--- a/fs/netfs/write_issue.c
+++ b/fs/netfs/write_issue.c
@@ -154,9 +154,9 @@ EXPORT_SYMBOL(netfs_prepare_write_failed);
* Prepare a write subrequest. We need to allocate a new subrequest
* if we don't have one.
*/
-static void netfs_prepare_write(struct netfs_io_request *wreq,
- struct netfs_io_stream *stream,
- loff_t start)
+void netfs_prepare_write(struct netfs_io_request *wreq,
+ struct netfs_io_stream *stream,
+ loff_t start)
{
struct netfs_io_subrequest *subreq;
struct iov_iter *wreq_iter = &wreq->buffer.iter;
@@ -699,41 +699,6 @@ ssize_t netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_c
}
/*
- * Write data to the server without going through the pagecache and without
- * writing it to the local cache.
- */
-int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t len)
-{
- struct netfs_io_stream *upload = &wreq->io_streams[0];
- ssize_t part;
- loff_t start = wreq->start;
- int error = 0;
-
- _enter("%zx", len);
-
- if (wreq->origin == NETFS_DIO_WRITE)
- inode_dio_begin(wreq->inode);
-
- while (len) {
- // TODO: Prepare content encryption
-
- _debug("unbuffered %zx", len);
- part = netfs_advance_write(wreq, upload, start, len, false);
- start += part;
- len -= part;
- rolling_buffer_advance(&wreq->buffer, part);
- if (test_bit(NETFS_RREQ_PAUSE, &wreq->flags))
- netfs_wait_for_paused_write(wreq);
- if (test_bit(NETFS_RREQ_FAILED, &wreq->flags))
- break;
- }
-
- netfs_end_issue_write(wreq);
- _leave(" = %d", error);
- return error;
-}
-
-/*
* Write some of a pending folio data back to the server and/or the cache.
*/
static int netfs_write_folio_single(struct netfs_io_request *wreq,
diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
index 12cb0ca738af..6bb30543eff0 100644
--- a/fs/nfs/Kconfig
+++ b/fs/nfs/Kconfig
@@ -87,7 +87,7 @@ config NFS_V4
space programs which can be found in the Linux nfs-utils package,
available from http://linux-nfs.org/.
- If unsure, say Y.
+ If unsure, say N.
config NFS_SWAP
bool "Provide swap over NFS support"
@@ -100,6 +100,7 @@ config NFS_SWAP
config NFS_V4_0
bool "NFS client support for NFSv4.0"
depends on NFS_V4
+ default y
help
This option enables support for minor version 0 of the NFSv4 protocol
(RFC 3530) in the kernel's NFS client.
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index 3e2de45c95fe..be2aebf62056 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -392,8 +392,13 @@ nfs3_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
if (status != 0)
goto out_release_acls;
- if (d_alias)
+ if (d_alias) {
+ if (d_is_dir(d_alias)) {
+ status = -EISDIR;
+ goto out_dput;
+ }
dentry = d_alias;
+ }
/* When we created the file with exclusive semantics, make
* sure we set the attributes afterwards. */
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index 8fdbba7cad96..8e8a76a44ff0 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -36,19 +36,30 @@
* second map contains a reference to the entry in the first map.
*/
+static struct workqueue_struct *nfsd_export_wq;
+
#define EXPKEY_HASHBITS 8
#define EXPKEY_HASHMAX (1 << EXPKEY_HASHBITS)
#define EXPKEY_HASHMASK (EXPKEY_HASHMAX -1)
-static void expkey_put(struct kref *ref)
+static void expkey_release(struct work_struct *work)
{
- struct svc_expkey *key = container_of(ref, struct svc_expkey, h.ref);
+ struct svc_expkey *key = container_of(to_rcu_work(work),
+ struct svc_expkey, ek_rwork);
if (test_bit(CACHE_VALID, &key->h.flags) &&
!test_bit(CACHE_NEGATIVE, &key->h.flags))
path_put(&key->ek_path);
auth_domain_put(key->ek_client);
- kfree_rcu(key, ek_rcu);
+ kfree(key);
+}
+
+static void expkey_put(struct kref *ref)
+{
+ struct svc_expkey *key = container_of(ref, struct svc_expkey, h.ref);
+
+ INIT_RCU_WORK(&key->ek_rwork, expkey_release);
+ queue_rcu_work(nfsd_export_wq, &key->ek_rwork);
}
static int expkey_upcall(struct cache_detail *cd, struct cache_head *h)
@@ -353,11 +364,13 @@ static void export_stats_destroy(struct export_stats *stats)
EXP_STATS_COUNTERS_NUM);
}
-static void svc_export_release(struct rcu_head *rcu_head)
+static void svc_export_release(struct work_struct *work)
{
- struct svc_export *exp = container_of(rcu_head, struct svc_export,
- ex_rcu);
+ struct svc_export *exp = container_of(to_rcu_work(work),
+ struct svc_export, ex_rwork);
+ path_put(&exp->ex_path);
+ auth_domain_put(exp->ex_client);
nfsd4_fslocs_free(&exp->ex_fslocs);
export_stats_destroy(exp->ex_stats);
kfree(exp->ex_stats);
@@ -369,9 +382,8 @@ static void svc_export_put(struct kref *ref)
{
struct svc_export *exp = container_of(ref, struct svc_export, h.ref);
- path_put(&exp->ex_path);
- auth_domain_put(exp->ex_client);
- call_rcu(&exp->ex_rcu, svc_export_release);
+ INIT_RCU_WORK(&exp->ex_rwork, svc_export_release);
+ queue_rcu_work(nfsd_export_wq, &exp->ex_rwork);
}
static int svc_export_upcall(struct cache_detail *cd, struct cache_head *h)
@@ -1479,6 +1491,36 @@ const struct seq_operations nfs_exports_op = {
.show = e_show,
};
+/**
+ * nfsd_export_wq_init - allocate the export release workqueue
+ *
+ * Called once at module load. The workqueue runs deferred svc_export and
+ * svc_expkey release work scheduled by queue_rcu_work() in the cache put
+ * callbacks.
+ *
+ * Return values:
+ * %0: workqueue allocated
+ * %-ENOMEM: allocation failed
+ */
+int nfsd_export_wq_init(void)
+{
+ nfsd_export_wq = alloc_workqueue("nfsd_export", WQ_UNBOUND, 0);
+ if (!nfsd_export_wq)
+ return -ENOMEM;
+ return 0;
+}
+
+/**
+ * nfsd_export_wq_shutdown - drain and free the export release workqueue
+ *
+ * Called once at module unload. Per-namespace teardown in
+ * nfsd_export_shutdown() has already drained all deferred work.
+ */
+void nfsd_export_wq_shutdown(void)
+{
+ destroy_workqueue(nfsd_export_wq);
+}
+
/*
* Initialize the exports module.
*/
@@ -1540,6 +1582,9 @@ nfsd_export_shutdown(struct net *net)
cache_unregister_net(nn->svc_expkey_cache, net);
cache_unregister_net(nn->svc_export_cache, net);
+ /* Drain deferred export and expkey release work. */
+ rcu_barrier();
+ flush_workqueue(nfsd_export_wq);
cache_destroy_net(nn->svc_expkey_cache, net);
cache_destroy_net(nn->svc_export_cache, net);
svcauth_unix_purge(net);
diff --git a/fs/nfsd/export.h b/fs/nfsd/export.h
index d2b09cd76145..b05399374574 100644
--- a/fs/nfsd/export.h
+++ b/fs/nfsd/export.h
@@ -7,6 +7,7 @@
#include <linux/sunrpc/cache.h>
#include <linux/percpu_counter.h>
+#include <linux/workqueue.h>
#include <uapi/linux/nfsd/export.h>
#include <linux/nfs4.h>
@@ -75,7 +76,7 @@ struct svc_export {
u32 ex_layout_types;
struct nfsd4_deviceid_map *ex_devid_map;
struct cache_detail *cd;
- struct rcu_head ex_rcu;
+ struct rcu_work ex_rwork;
unsigned long ex_xprtsec_modes;
struct export_stats *ex_stats;
};
@@ -92,7 +93,7 @@ struct svc_expkey {
u32 ek_fsid[6];
struct path ek_path;
- struct rcu_head ek_rcu;
+ struct rcu_work ek_rwork;
};
#define EX_ISSYNC(exp) (!((exp)->ex_flags & NFSEXP_ASYNC))
@@ -110,6 +111,8 @@ __be32 check_nfsd_access(struct svc_export *exp, struct svc_rqst *rqstp,
/*
* Function declarations
*/
+int nfsd_export_wq_init(void);
+void nfsd_export_wq_shutdown(void);
int nfsd_export_init(struct net *);
void nfsd_export_shutdown(struct net *);
void nfsd_export_flush(struct net *);
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 41dfba5ab8b8..9d234913100b 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -6281,9 +6281,14 @@ nfsd4_encode_operation(struct nfsd4_compoundres *resp, struct nfsd4_op *op)
int len = xdr->buf->len - (op_status_offset + XDR_UNIT);
so->so_replay.rp_status = op->status;
- so->so_replay.rp_buflen = len;
- read_bytes_from_xdr_buf(xdr->buf, op_status_offset + XDR_UNIT,
+ if (len <= NFSD4_REPLAY_ISIZE) {
+ so->so_replay.rp_buflen = len;
+ read_bytes_from_xdr_buf(xdr->buf,
+ op_status_offset + XDR_UNIT,
so->so_replay.rp_buf, len);
+ } else {
+ so->so_replay.rp_buflen = 0;
+ }
}
status:
op->status = nfsd4_map_status(op->status,
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index e9acd2cd602c..71aabdaa1d15 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -149,9 +149,19 @@ static int exports_net_open(struct net *net, struct file *file)
seq = file->private_data;
seq->private = nn->svc_export_cache;
+ get_net(net);
return 0;
}
+static int exports_release(struct inode *inode, struct file *file)
+{
+ struct seq_file *seq = file->private_data;
+ struct cache_detail *cd = seq->private;
+
+ put_net(cd->net);
+ return seq_release(inode, file);
+}
+
static int exports_nfsd_open(struct inode *inode, struct file *file)
{
return exports_net_open(inode->i_sb->s_fs_info, file);
@@ -161,7 +171,7 @@ static const struct file_operations exports_nfsd_operations = {
.open = exports_nfsd_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = seq_release,
+ .release = exports_release,
};
static int export_features_show(struct seq_file *m, void *v)
@@ -377,15 +387,15 @@ static ssize_t write_filehandle(struct file *file, char *buf, size_t size)
}
/*
- * write_threads - Start NFSD, or report the current number of running threads
+ * write_threads - Start NFSD, or report the configured number of threads
*
* Input:
* buf: ignored
* size: zero
* Output:
* On success: passed-in buffer filled with '\n'-terminated C
- * string numeric value representing the number of
- * running NFSD threads;
+ * string numeric value representing the configured
+ * number of NFSD threads;
* return code is the size in bytes of the string
* On error: return code is zero
*
@@ -399,8 +409,8 @@ static ssize_t write_filehandle(struct file *file, char *buf, size_t size)
* Output:
* On success: NFS service is started;
* passed-in buffer filled with '\n'-terminated C
- * string numeric value representing the number of
- * running NFSD threads;
+ * string numeric value representing the configured
+ * number of NFSD threads;
* return code is the size in bytes of the string
* On error: return code is zero or a negative errno value
*/
@@ -430,7 +440,7 @@ static ssize_t write_threads(struct file *file, char *buf, size_t size)
}
/*
- * write_pool_threads - Set or report the current number of threads per pool
+ * write_pool_threads - Set or report the configured number of threads per pool
*
* Input:
* buf: ignored
@@ -447,7 +457,7 @@ static ssize_t write_threads(struct file *file, char *buf, size_t size)
* Output:
* On success: passed-in buffer filled with '\n'-terminated C
* string containing integer values representing the
- * number of NFSD threads in each pool;
+ * configured number of NFSD threads in each pool;
* return code is the size in bytes of the string
* On error: return code is zero or a negative errno value
*/
@@ -1376,7 +1386,7 @@ static const struct proc_ops exports_proc_ops = {
.proc_open = exports_proc_open,
.proc_read = seq_read,
.proc_lseek = seq_lseek,
- .proc_release = seq_release,
+ .proc_release = exports_release,
};
static int create_proc_exports_entry(void)
@@ -1647,7 +1657,7 @@ int nfsd_nl_threads_set_doit(struct sk_buff *skb, struct genl_info *info)
if (attr)
nn->min_threads = nla_get_u32(attr);
- ret = nfsd_svc(nrpools, nthreads, net, get_current_cred(), scope);
+ ret = nfsd_svc(nrpools, nthreads, net, current_cred(), scope);
if (ret > 0)
ret = 0;
out_unlock:
@@ -1657,7 +1667,7 @@ out_unlock:
}
/**
- * nfsd_nl_threads_get_doit - get the number of running threads
+ * nfsd_nl_threads_get_doit - get the maximum number of running threads
* @skb: reply buffer
* @info: netlink metadata and command arguments
*
@@ -1700,7 +1710,7 @@ int nfsd_nl_threads_get_doit(struct sk_buff *skb, struct genl_info *info)
struct svc_pool *sp = &nn->nfsd_serv->sv_pools[i];
err = nla_put_u32(skb, NFSD_A_SERVER_THREADS,
- sp->sp_nrthreads);
+ sp->sp_nrthrmax);
if (err)
goto err_unlock;
}
@@ -2000,7 +2010,7 @@ int nfsd_nl_listener_set_doit(struct sk_buff *skb, struct genl_info *info)
}
ret = svc_xprt_create_from_sa(serv, xcl_name, net, sa, 0,
- get_current_cred());
+ current_cred());
/* always save the latest error */
if (ret < 0)
err = ret;
@@ -2259,9 +2269,12 @@ static int __init init_nfsd(void)
if (retval)
goto out_free_pnfs;
nfsd_lockd_init(); /* lockd->nfsd callbacks */
+ retval = nfsd_export_wq_init();
+ if (retval)
+ goto out_free_lockd;
retval = register_pernet_subsys(&nfsd_net_ops);
if (retval < 0)
- goto out_free_lockd;
+ goto out_free_export_wq;
retval = register_cld_notifier();
if (retval)
goto out_free_subsys;
@@ -2290,6 +2303,8 @@ out_free_cld:
unregister_cld_notifier();
out_free_subsys:
unregister_pernet_subsys(&nfsd_net_ops);
+out_free_export_wq:
+ nfsd_export_wq_shutdown();
out_free_lockd:
nfsd_lockd_shutdown();
nfsd_drc_slab_free();
@@ -2310,6 +2325,7 @@ static void __exit exit_nfsd(void)
nfsd4_destroy_laundry_wq();
unregister_cld_notifier();
unregister_pernet_subsys(&nfsd_net_ops);
+ nfsd_export_wq_shutdown();
nfsd_drc_slab_free();
nfsd_lockd_shutdown();
nfsd4_free_slabs();
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index 0887ee601d3c..4a04208393b8 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -239,12 +239,13 @@ static void nfsd_net_free(struct percpu_ref *ref)
int nfsd_nrthreads(struct net *net)
{
- int rv = 0;
+ int i, rv = 0;
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
mutex_lock(&nfsd_mutex);
if (nn->nfsd_serv)
- rv = nn->nfsd_serv->sv_nrthreads;
+ for (i = 0; i < nn->nfsd_serv->sv_nrpools; ++i)
+ rv += nn->nfsd_serv->sv_pools[i].sp_nrthrmax;
mutex_unlock(&nfsd_mutex);
return rv;
}
@@ -659,7 +660,7 @@ int nfsd_get_nrthreads(int n, int *nthreads, struct net *net)
if (serv)
for (i = 0; i < serv->sv_nrpools && i < n; i++)
- nthreads[i] = serv->sv_pools[i].sp_nrthreads;
+ nthreads[i] = serv->sv_pools[i].sp_nrthrmax;
return 0;
}
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index 6fcbf1e427d4..c0ca115c3b74 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -541,11 +541,18 @@ struct nfs4_client_reclaim {
struct xdr_netobj cr_princhash;
};
-/* A reasonable value for REPLAY_ISIZE was estimated as follows:
- * The OPEN response, typically the largest, requires
- * 4(status) + 8(stateid) + 20(changeinfo) + 4(rflags) + 8(verifier) +
- * 4(deleg. type) + 8(deleg. stateid) + 4(deleg. recall flag) +
- * 20(deleg. space limit) + ~32(deleg. ace) = 112 bytes
+/*
+ * REPLAY_ISIZE is sized for an OPEN response with delegation:
+ * 4(status) + 8(stateid) + 20(changeinfo) + 4(rflags) +
+ * 8(verifier) + 4(deleg. type) + 8(deleg. stateid) +
+ * 4(deleg. recall flag) + 20(deleg. space limit) +
+ * ~32(deleg. ace) = 112 bytes
+ *
+ * Some responses can exceed this. A LOCK denial includes the conflicting
+ * lock owner, which can be up to 1024 bytes (NFS4_OPAQUE_LIMIT). Responses
+ * larger than REPLAY_ISIZE are not cached in rp_ibuf; only rp_status is
+ * saved. Enlarging this constant increases the size of every
+ * nfs4_stateowner.
*/
#define NFSD4_REPLAY_ISIZE 112
diff --git a/fs/nsfs.c b/fs/nsfs.c
index db91de208645..c215878d55e8 100644
--- a/fs/nsfs.c
+++ b/fs/nsfs.c
@@ -199,6 +199,17 @@ static bool nsfs_ioctl_valid(unsigned int cmd)
return false;
}
+static bool may_use_nsfs_ioctl(unsigned int cmd)
+{
+ switch (_IOC_NR(cmd)) {
+ case _IOC_NR(NS_MNT_GET_NEXT):
+ fallthrough;
+ case _IOC_NR(NS_MNT_GET_PREV):
+ return may_see_all_namespaces();
+ }
+ return true;
+}
+
static long ns_ioctl(struct file *filp, unsigned int ioctl,
unsigned long arg)
{
@@ -214,6 +225,8 @@ static long ns_ioctl(struct file *filp, unsigned int ioctl,
if (!nsfs_ioctl_valid(ioctl))
return -ENOIOCTLCMD;
+ if (!may_use_nsfs_ioctl(ioctl))
+ return -EPERM;
ns = get_proc_ns(file_inode(filp));
switch (ioctl) {
@@ -614,7 +627,7 @@ static struct dentry *nsfs_fh_to_dentry(struct super_block *sb, struct fid *fh,
return ERR_PTR(-EOPNOTSUPP);
}
- if (owning_ns && !ns_capable(owning_ns, CAP_SYS_ADMIN)) {
+ if (owning_ns && !may_see_all_namespaces()) {
ns->ops->put(ns);
return ERR_PTR(-EPERM);
}
diff --git a/fs/pidfs.c b/fs/pidfs.c
index 318253344b5c..e3825ee246be 100644
--- a/fs/pidfs.c
+++ b/fs/pidfs.c
@@ -608,9 +608,8 @@ static long pidfd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
struct user_namespace *user_ns;
user_ns = task_cred_xxx(task, user_ns);
- if (!ns_ref_get(user_ns))
- break;
- ns_common = to_ns_common(user_ns);
+ if (ns_ref_get(user_ns))
+ ns_common = to_ns_common(user_ns);
}
#endif
break;
@@ -620,9 +619,8 @@ static long pidfd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
struct pid_namespace *pid_ns;
pid_ns = task_active_pid_ns(task);
- if (!ns_ref_get(pid_ns))
- break;
- ns_common = to_ns_common(pid_ns);
+ if (ns_ref_get(pid_ns))
+ ns_common = to_ns_common(pid_ns);
}
#endif
break;
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 4eec684baca9..4c863d17dfb4 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -2128,6 +2128,9 @@ bool proc_fill_cache(struct file *file, struct dir_context *ctx,
ino_t ino = 1;
child = try_lookup_noperm(&qname, dir);
+ if (IS_ERR(child))
+ goto end_instantiate;
+
if (!child) {
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
child = d_alloc_parallel(dir, &qname, &wq);
diff --git a/fs/smb/client/Makefile b/fs/smb/client/Makefile
index 3abd357d6df6..26b6105f04d1 100644
--- a/fs/smb/client/Makefile
+++ b/fs/smb/client/Makefile
@@ -56,4 +56,6 @@ $(obj)/smb2maperror.o: $(obj)/smb2_mapping_table.c
quiet_cmd_gen_smb2_mapping = GEN $@
cmd_gen_smb2_mapping = perl $(src)/gen_smb2_mapping $< $@
+obj-$(CONFIG_SMB_KUNIT_TESTS) += smb2maperror_test.o
+
clean-files += smb2_mapping_table.c
diff --git a/fs/smb/client/cached_dir.c b/fs/smb/client/cached_dir.c
index c327c246a9b4..04bb95091f49 100644
--- a/fs/smb/client/cached_dir.c
+++ b/fs/smb/client/cached_dir.c
@@ -118,7 +118,7 @@ static const char *path_no_prefix(struct cifs_sb_info *cifs_sb,
if (!*path)
return path;
- if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) &&
+ if ((cifs_sb_flags(cifs_sb) & CIFS_MOUNT_USE_PREFIX_PATH) &&
cifs_sb->prepath) {
len = strlen(cifs_sb->prepath) + 1;
if (unlikely(len > strlen(path)))
diff --git a/fs/smb/client/cifs_fs_sb.h b/fs/smb/client/cifs_fs_sb.h
index 5e8d163cb5f8..84e7e366b0ff 100644
--- a/fs/smb/client/cifs_fs_sb.h
+++ b/fs/smb/client/cifs_fs_sb.h
@@ -55,7 +55,7 @@ struct cifs_sb_info {
struct nls_table *local_nls;
struct smb3_fs_context *ctx;
atomic_t active;
- unsigned int mnt_cifs_flags;
+ atomic_t mnt_cifs_flags;
struct delayed_work prune_tlinks;
struct rcu_head rcu;
diff --git a/fs/smb/client/cifs_ioctl.h b/fs/smb/client/cifs_ioctl.h
index b51ce64fcccf..147496ac9f9f 100644
--- a/fs/smb/client/cifs_ioctl.h
+++ b/fs/smb/client/cifs_ioctl.h
@@ -122,11 +122,3 @@ struct smb3_notify_info {
#define CIFS_GOING_FLAGS_DEFAULT 0x0 /* going down */
#define CIFS_GOING_FLAGS_LOGFLUSH 0x1 /* flush log but not data */
#define CIFS_GOING_FLAGS_NOLOGFLUSH 0x2 /* don't flush log nor data */
-
-static inline bool cifs_forced_shutdown(struct cifs_sb_info *sbi)
-{
- if (CIFS_MOUNT_SHUTDOWN & sbi->mnt_cifs_flags)
- return true;
- else
- return false;
-}
diff --git a/fs/smb/client/cifs_unicode.c b/fs/smb/client/cifs_unicode.c
index e7891b4406f2..e2edc207cef2 100644
--- a/fs/smb/client/cifs_unicode.c
+++ b/fs/smb/client/cifs_unicode.c
@@ -11,20 +11,6 @@
#include "cifsglob.h"
#include "cifs_debug.h"
-int cifs_remap(struct cifs_sb_info *cifs_sb)
-{
- int map_type;
-
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR)
- map_type = SFM_MAP_UNI_RSVD;
- else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
- map_type = SFU_MAP_UNI_RSVD;
- else
- map_type = NO_MAP_UNI_RSVD;
-
- return map_type;
-}
-
/* Convert character using the SFU - "Services for Unix" remapping range */
static bool
convert_sfu_char(const __u16 src_char, char *target)
diff --git a/fs/smb/client/cifs_unicode.h b/fs/smb/client/cifs_unicode.h
index 9249db3b78c3..3e9cd9acf0a9 100644
--- a/fs/smb/client/cifs_unicode.h
+++ b/fs/smb/client/cifs_unicode.h
@@ -22,6 +22,7 @@
#include <linux/types.h>
#include <linux/nls.h>
#include "../../nls/nls_ucs2_utils.h"
+#include "cifsglob.h"
/*
* Macs use an older "SFM" mapping of the symbols above. Fortunately it does
@@ -65,10 +66,21 @@ char *cifs_strndup_from_utf16(const char *src, const int maxlen,
const struct nls_table *codepage);
int cifsConvertToUTF16(__le16 *target, const char *source, int srclen,
const struct nls_table *cp, int map_chars);
-int cifs_remap(struct cifs_sb_info *cifs_sb);
__le16 *cifs_strndup_to_utf16(const char *src, const int maxlen,
int *utf16_len, const struct nls_table *cp,
int remap);
wchar_t cifs_toupper(wchar_t in);
+static inline int cifs_remap(const struct cifs_sb_info *cifs_sb)
+{
+ unsigned int sbflags = cifs_sb_flags(cifs_sb);
+
+ if (sbflags & CIFS_MOUNT_MAP_SFM_CHR)
+ return SFM_MAP_UNI_RSVD;
+ if (sbflags & CIFS_MOUNT_MAP_SPECIAL_CHR)
+ return SFU_MAP_UNI_RSVD;
+
+ return NO_MAP_UNI_RSVD;
+}
+
#endif /* _CIFS_UNICODE_H */
diff --git a/fs/smb/client/cifsacl.c b/fs/smb/client/cifsacl.c
index 6fa12c901c14..c920039d733c 100644
--- a/fs/smb/client/cifsacl.c
+++ b/fs/smb/client/cifsacl.c
@@ -356,7 +356,7 @@ sid_to_id(struct cifs_sb_info *cifs_sb, struct smb_sid *psid,
psid->num_subauth, SID_MAX_SUB_AUTHORITIES);
}
- if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL) ||
+ if ((cifs_sb_flags(cifs_sb) & CIFS_MOUNT_UID_FROM_ACL) ||
(cifs_sb_master_tcon(cifs_sb)->posix_extensions)) {
uint32_t unix_id;
bool is_group;
@@ -1489,7 +1489,7 @@ struct smb_ntsd *get_cifs_acl(struct cifs_sb_info *cifs_sb,
struct cifsFileInfo *open_file = NULL;
if (inode)
- open_file = find_readable_file(CIFS_I(inode), true);
+ open_file = find_readable_file(CIFS_I(inode), FIND_FSUID_ONLY);
if (!open_file)
return get_cifs_acl_by_path(cifs_sb, path, pacllen, info);
@@ -1612,7 +1612,8 @@ id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64 *pnmode,
struct smb_acl *dacl_ptr = NULL;
struct smb_ntsd *pntsd = NULL; /* acl obtained from server */
struct smb_ntsd *pnntsd = NULL; /* modified acl to be sent to server */
- struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+ struct cifs_sb_info *cifs_sb = CIFS_SB(inode);
+ unsigned int sbflags;
struct tcon_link *tlink;
struct smb_version_operations *ops;
bool mode_from_sid, id_from_sid;
@@ -1643,15 +1644,9 @@ id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64 *pnmode,
return rc;
}
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID)
- mode_from_sid = true;
- else
- mode_from_sid = false;
-
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL)
- id_from_sid = true;
- else
- id_from_sid = false;
+ sbflags = cifs_sb_flags(cifs_sb);
+ mode_from_sid = sbflags & CIFS_MOUNT_MODE_FROM_SID;
+ id_from_sid = sbflags & CIFS_MOUNT_UID_FROM_ACL;
/* Potentially, five new ACEs can be added to the ACL for U,G,O mapping */
if (pnmode && *pnmode != NO_CHANGE_64) { /* chmod */
diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c
index 99b04234a08e..32d0305a1239 100644
--- a/fs/smb/client/cifsfs.c
+++ b/fs/smb/client/cifsfs.c
@@ -226,16 +226,18 @@ cifs_sb_deactive(struct super_block *sb)
static int
cifs_read_super(struct super_block *sb)
{
- struct inode *inode;
struct cifs_sb_info *cifs_sb;
struct cifs_tcon *tcon;
+ unsigned int sbflags;
struct timespec64 ts;
+ struct inode *inode;
int rc = 0;
cifs_sb = CIFS_SB(sb);
tcon = cifs_sb_master_tcon(cifs_sb);
+ sbflags = cifs_sb_flags(cifs_sb);
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL)
+ if (sbflags & CIFS_MOUNT_POSIXACL)
sb->s_flags |= SB_POSIXACL;
if (tcon->snapshot_time)
@@ -311,7 +313,7 @@ cifs_read_super(struct super_block *sb)
}
#ifdef CONFIG_CIFS_NFSD_EXPORT
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
+ if (sbflags & CIFS_MOUNT_SERVER_INUM) {
cifs_dbg(FYI, "export ops supported\n");
sb->s_export_op = &cifs_export_ops;
}
@@ -330,10 +332,14 @@ static void cifs_kill_sb(struct super_block *sb)
/*
* We need to release all dentries for the cached directories
- * before we kill the sb.
+ * and close all deferred file handles before we kill the sb.
*/
if (cifs_sb->root) {
close_all_cached_dirs(cifs_sb);
+ cifs_close_all_deferred_files_sb(cifs_sb);
+
+ /* Wait for all pending oplock breaks to complete */
+ flush_workqueue(cifsoplockd_wq);
/* finally release root dentry */
dput(cifs_sb->root);
@@ -389,8 +395,7 @@ statfs_out:
static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len)
{
- struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
- struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
+ struct cifs_tcon *tcon = cifs_sb_master_tcon(CIFS_SB(file));
struct TCP_Server_Info *server = tcon->ses->server;
struct inode *inode = file_inode(file);
int rc;
@@ -418,11 +423,9 @@ out_unlock:
static int cifs_permission(struct mnt_idmap *idmap,
struct inode *inode, int mask)
{
- struct cifs_sb_info *cifs_sb;
+ unsigned int sbflags = cifs_sb_flags(CIFS_SB(inode));
- cifs_sb = CIFS_SB(inode->i_sb);
-
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
+ if (sbflags & CIFS_MOUNT_NO_PERM) {
if ((mask & MAY_EXEC) && !execute_ok(inode))
return -EACCES;
else
@@ -568,15 +571,17 @@ cifs_show_security(struct seq_file *s, struct cifs_ses *ses)
static void
cifs_show_cache_flavor(struct seq_file *s, struct cifs_sb_info *cifs_sb)
{
+ unsigned int sbflags = cifs_sb_flags(cifs_sb);
+
seq_puts(s, ",cache=");
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
+ if (sbflags & CIFS_MOUNT_STRICT_IO)
seq_puts(s, "strict");
- else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
+ else if (sbflags & CIFS_MOUNT_DIRECT_IO)
seq_puts(s, "none");
- else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RW_CACHE)
+ else if (sbflags & CIFS_MOUNT_RW_CACHE)
seq_puts(s, "singleclient"); /* assume only one client access */
- else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE)
+ else if (sbflags & CIFS_MOUNT_RO_CACHE)
seq_puts(s, "ro"); /* read only caching assumed */
else
seq_puts(s, "loose");
@@ -637,6 +642,8 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
struct sockaddr *srcaddr;
+ unsigned int sbflags;
+
srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
seq_show_option(s, "vers", tcon->ses->server->vals->version_string);
@@ -670,16 +677,17 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
(int)(srcaddr->sa_family));
}
+ sbflags = cifs_sb_flags(cifs_sb);
seq_printf(s, ",uid=%u",
from_kuid_munged(&init_user_ns, cifs_sb->ctx->linux_uid));
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
+ if (sbflags & CIFS_MOUNT_OVERR_UID)
seq_puts(s, ",forceuid");
else
seq_puts(s, ",noforceuid");
seq_printf(s, ",gid=%u",
from_kgid_munged(&init_user_ns, cifs_sb->ctx->linux_gid));
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
+ if (sbflags & CIFS_MOUNT_OVERR_GID)
seq_puts(s, ",forcegid");
else
seq_puts(s, ",noforcegid");
@@ -722,53 +730,53 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
seq_puts(s, ",unix");
else
seq_puts(s, ",nounix");
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS)
+ if (sbflags & CIFS_MOUNT_NO_DFS)
seq_puts(s, ",nodfs");
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
+ if (sbflags & CIFS_MOUNT_POSIX_PATHS)
seq_puts(s, ",posixpaths");
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
+ if (sbflags & CIFS_MOUNT_SET_UID)
seq_puts(s, ",setuids");
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL)
+ if (sbflags & CIFS_MOUNT_UID_FROM_ACL)
seq_puts(s, ",idsfromsid");
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
+ if (sbflags & CIFS_MOUNT_SERVER_INUM)
seq_puts(s, ",serverino");
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
+ if (sbflags & CIFS_MOUNT_RWPIDFORWARD)
seq_puts(s, ",rwpidforward");
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL)
+ if (sbflags & CIFS_MOUNT_NOPOSIXBRL)
seq_puts(s, ",forcemand");
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
+ if (sbflags & CIFS_MOUNT_NO_XATTR)
seq_puts(s, ",nouser_xattr");
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
+ if (sbflags & CIFS_MOUNT_MAP_SPECIAL_CHR)
seq_puts(s, ",mapchars");
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR)
+ if (sbflags & CIFS_MOUNT_MAP_SFM_CHR)
seq_puts(s, ",mapposix");
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
+ if (sbflags & CIFS_MOUNT_UNX_EMUL)
seq_puts(s, ",sfu");
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
+ if (sbflags & CIFS_MOUNT_NO_BRL)
seq_puts(s, ",nobrl");
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_HANDLE_CACHE)
+ if (sbflags & CIFS_MOUNT_NO_HANDLE_CACHE)
seq_puts(s, ",nohandlecache");
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID)
+ if (sbflags & CIFS_MOUNT_MODE_FROM_SID)
seq_puts(s, ",modefromsid");
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
+ if (sbflags & CIFS_MOUNT_CIFS_ACL)
seq_puts(s, ",cifsacl");
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
+ if (sbflags & CIFS_MOUNT_DYNPERM)
seq_puts(s, ",dynperm");
if (root->d_sb->s_flags & SB_POSIXACL)
seq_puts(s, ",acl");
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
+ if (sbflags & CIFS_MOUNT_MF_SYMLINKS)
seq_puts(s, ",mfsymlinks");
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE)
+ if (sbflags & CIFS_MOUNT_FSCACHE)
seq_puts(s, ",fsc");
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)
+ if (sbflags & CIFS_MOUNT_NOSSYNC)
seq_puts(s, ",nostrictsync");
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
+ if (sbflags & CIFS_MOUNT_NO_PERM)
seq_puts(s, ",noperm");
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID)
+ if (sbflags & CIFS_MOUNT_CIFS_BACKUPUID)
seq_printf(s, ",backupuid=%u",
from_kuid_munged(&init_user_ns,
cifs_sb->ctx->backupuid));
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID)
+ if (sbflags & CIFS_MOUNT_CIFS_BACKUPGID)
seq_printf(s, ",backupgid=%u",
from_kgid_munged(&init_user_ns,
cifs_sb->ctx->backupgid));
@@ -864,7 +872,6 @@ static void cifs_umount_begin(struct super_block *sb)
spin_unlock(&tcon->tc_lock);
spin_unlock(&cifs_tcp_ses_lock);
- cifs_close_all_deferred_files(tcon);
/* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
/* cancel_notify_requests(tcon); */
if (tcon->ses && tcon->ses->server) {
@@ -909,10 +916,10 @@ static int cifs_write_inode(struct inode *inode, struct writeback_control *wbc)
static int cifs_drop_inode(struct inode *inode)
{
- struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+ unsigned int sbflags = cifs_sb_flags(CIFS_SB(inode));
/* no serverino => unconditional eviction */
- return !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) ||
+ return !(sbflags & CIFS_MOUNT_SERVER_INUM) ||
inode_generic_drop(inode);
}
@@ -950,7 +957,7 @@ cifs_get_root(struct smb3_fs_context *ctx, struct super_block *sb)
char *s, *p;
char sep;
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
+ if (cifs_sb_flags(cifs_sb) & CIFS_MOUNT_USE_PREFIX_PATH)
return dget(sb->s_root);
full_path = cifs_build_path_to_root(ctx, cifs_sb,
@@ -1262,7 +1269,7 @@ static int cifs_precopy_set_eof(struct inode *src_inode, struct cifsInodeInfo *s
struct cifsFileInfo *writeable_srcfile;
int rc = -EINVAL;
- writeable_srcfile = find_writable_file(src_cifsi, FIND_WR_FSUID_ONLY);
+ writeable_srcfile = find_writable_file(src_cifsi, FIND_FSUID_ONLY);
if (writeable_srcfile) {
if (src_tcon->ses->server->ops->set_file_size)
rc = src_tcon->ses->server->ops->set_file_size(
diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
index 080ea601c209..709e96e07791 100644
--- a/fs/smb/client/cifsglob.h
+++ b/fs/smb/client/cifsglob.h
@@ -20,6 +20,7 @@
#include <linux/utsname.h>
#include <linux/sched/mm.h>
#include <linux/netfs.h>
+#include <linux/fcntl.h>
#include "cifs_fs_sb.h"
#include "cifsacl.h"
#include <crypto/internal/hash.h>
@@ -1580,24 +1581,59 @@ CIFS_I(struct inode *inode)
return container_of(inode, struct cifsInodeInfo, netfs.inode);
}
-static inline struct cifs_sb_info *
-CIFS_SB(struct super_block *sb)
+static inline void *cinode_to_fsinfo(struct cifsInodeInfo *cinode)
+{
+ return cinode->netfs.inode.i_sb->s_fs_info;
+}
+
+static inline void *super_to_fsinfo(struct super_block *sb)
{
return sb->s_fs_info;
}
-static inline struct cifs_sb_info *
-CIFS_FILE_SB(struct file *file)
+static inline void *inode_to_fsinfo(struct inode *inode)
+{
+ return inode->i_sb->s_fs_info;
+}
+
+static inline void *file_to_fsinfo(struct file *file)
+{
+ return file_inode(file)->i_sb->s_fs_info;
+}
+
+static inline void *dentry_to_fsinfo(struct dentry *dentry)
+{
+ return dentry->d_sb->s_fs_info;
+}
+
+static inline void *const_dentry_to_fsinfo(const struct dentry *dentry)
+{
+ return dentry->d_sb->s_fs_info;
+}
+
+#define CIFS_SB(_ptr) \
+ ((struct cifs_sb_info *) \
+ _Generic((_ptr), \
+ struct cifsInodeInfo * : cinode_to_fsinfo, \
+ const struct dentry * : const_dentry_to_fsinfo, \
+ struct super_block * : super_to_fsinfo, \
+ struct dentry * : dentry_to_fsinfo, \
+ struct inode * : inode_to_fsinfo, \
+ struct file * : file_to_fsinfo)(_ptr))
+
+/*
+ * Use atomic_t for @cifs_sb->mnt_cifs_flags as it is currently accessed
+ * locklessly and may be changed concurrently by mount/remount and reconnect
+ * paths.
+ */
+static inline unsigned int cifs_sb_flags(const struct cifs_sb_info *cifs_sb)
{
- return CIFS_SB(file_inode(file)->i_sb);
+ return atomic_read(&cifs_sb->mnt_cifs_flags);
}
static inline char CIFS_DIR_SEP(const struct cifs_sb_info *cifs_sb)
{
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
- return '/';
- else
- return '\\';
+ return (cifs_sb_flags(cifs_sb) & CIFS_MOUNT_POSIX_PATHS) ? '/' : '\\';
}
static inline void
@@ -1849,12 +1885,12 @@ static inline bool is_replayable_error(int error)
}
-/* cifs_get_writable_file() flags */
-enum cifs_writable_file_flags {
- FIND_WR_ANY = 0U,
- FIND_WR_FSUID_ONLY = (1U << 0),
- FIND_WR_WITH_DELETE = (1U << 1),
- FIND_WR_NO_PENDING_DELETE = (1U << 2),
+enum cifs_find_flags {
+ FIND_ANY = 0U,
+ FIND_FSUID_ONLY = (1U << 0),
+ FIND_WITH_DELETE = (1U << 1),
+ FIND_NO_PENDING_DELETE = (1U << 2),
+ FIND_OPEN_FLAGS = (1U << 3),
};
#define MID_FREE 0
@@ -2314,9 +2350,8 @@ static inline bool __cifs_cache_state_check(struct cifsInodeInfo *cinode,
unsigned int oplock_flags,
unsigned int sb_flags)
{
- struct cifs_sb_info *cifs_sb = CIFS_SB(cinode->netfs.inode.i_sb);
+ unsigned int sflags = cifs_sb_flags(CIFS_SB(cinode));
unsigned int oplock = READ_ONCE(cinode->oplock);
- unsigned int sflags = cifs_sb->mnt_cifs_flags;
return (oplock & oplock_flags) || (sflags & sb_flags);
}
@@ -2336,4 +2371,25 @@ static inline void cifs_reset_oplock(struct cifsInodeInfo *cinode)
WRITE_ONCE(cinode->oplock, 0);
}
+static inline bool cifs_forced_shutdown(const struct cifs_sb_info *sbi)
+{
+ return cifs_sb_flags(sbi) & CIFS_MOUNT_SHUTDOWN;
+}
+
+static inline int cifs_open_create_options(unsigned int oflags, int opts)
+{
+ /* O_SYNC also has bit for O_DSYNC so following check picks up either */
+ if (oflags & O_SYNC)
+ opts |= CREATE_WRITE_THROUGH;
+ if (oflags & O_DIRECT)
+ opts |= CREATE_NO_BUFFER;
+ return opts;
+}
+
+/*
+ * The number of blocks is not related to (i_size / i_blksize), but instead
+ * 512 byte (2**9) size is required for calculating num blocks.
+ */
+#define CIFS_INO_BLOCKS(size) DIV_ROUND_UP_ULL((u64)(size), 512)
+
#endif /* _CIFS_GLOB_H */
diff --git a/fs/smb/client/cifsproto.h b/fs/smb/client/cifsproto.h
index 96d6b5325aa3..884bfa1cf0b4 100644
--- a/fs/smb/client/cifsproto.h
+++ b/fs/smb/client/cifsproto.h
@@ -138,12 +138,14 @@ void cifs_write_subrequest_terminated(struct cifs_io_subrequest *wdata,
ssize_t result);
struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
int flags);
-int cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, int flags,
- struct cifsFileInfo **ret_file);
+int __cifs_get_writable_file(struct cifsInodeInfo *cifs_inode,
+ unsigned int find_flags, unsigned int open_flags,
+ struct cifsFileInfo **ret_file);
int cifs_get_writable_path(struct cifs_tcon *tcon, const char *name, int flags,
struct cifsFileInfo **ret_file);
-struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
- bool fsuid_only);
+struct cifsFileInfo *__find_readable_file(struct cifsInodeInfo *cifs_inode,
+ unsigned int find_flags,
+ unsigned int open_flags);
int cifs_get_readable_path(struct cifs_tcon *tcon, const char *name,
struct cifsFileInfo **ret_file);
int cifs_get_hardlink_path(struct cifs_tcon *tcon, struct inode *inode,
@@ -261,6 +263,7 @@ void cifs_close_deferred_file(struct cifsInodeInfo *cifs_inode);
void cifs_close_all_deferred_files(struct cifs_tcon *tcon);
+void cifs_close_all_deferred_files_sb(struct cifs_sb_info *cifs_sb);
void cifs_close_deferred_file_under_dentry(struct cifs_tcon *tcon,
struct dentry *dentry);
@@ -595,4 +598,20 @@ static inline void cifs_sg_set_buf(struct sg_table *sgtable,
}
}
+static inline int cifs_get_writable_file(struct cifsInodeInfo *cifs_inode,
+ unsigned int find_flags,
+ struct cifsFileInfo **ret_file)
+{
+ find_flags &= ~FIND_OPEN_FLAGS;
+ return __cifs_get_writable_file(cifs_inode, find_flags, 0, ret_file);
+}
+
+static inline struct cifsFileInfo *
+find_readable_file(struct cifsInodeInfo *cinode, unsigned int find_flags)
+{
+ find_flags &= ~FIND_OPEN_FLAGS;
+ find_flags |= FIND_NO_PENDING_DELETE;
+ return __find_readable_file(cinode, find_flags, 0);
+}
+
#endif /* _CIFSPROTO_H */
diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
index 33dfe116ca52..69b38f0ccf2b 100644
--- a/fs/smb/client/connect.c
+++ b/fs/smb/client/connect.c
@@ -1955,6 +1955,10 @@ static int match_session(struct cifs_ses *ses,
case Kerberos:
if (!uid_eq(ctx->cred_uid, ses->cred_uid))
return 0;
+ if (strncmp(ses->user_name ?: "",
+ ctx->username ?: "",
+ CIFS_MAX_USERNAME_LEN))
+ return 0;
break;
case NTLMv2:
case RawNTLMSSP:
@@ -2167,9 +2171,6 @@ void __cifs_put_smb_ses(struct cifs_ses *ses)
#ifdef CONFIG_KEYS
-/* strlen("cifs:a:") + CIFS_MAX_DOMAINNAME_LEN + 1 */
-#define CIFSCREDS_DESC_SIZE (7 + CIFS_MAX_DOMAINNAME_LEN + 1)
-
/* Populate username and pw fields from keyring if possible */
static int
cifs_set_cifscreds(struct smb3_fs_context *ctx, struct cifs_ses *ses)
@@ -2177,6 +2178,7 @@ cifs_set_cifscreds(struct smb3_fs_context *ctx, struct cifs_ses *ses)
int rc = 0;
int is_domain = 0;
const char *delim, *payload;
+ size_t desc_sz;
char *desc;
ssize_t len;
struct key *key;
@@ -2185,7 +2187,9 @@ cifs_set_cifscreds(struct smb3_fs_context *ctx, struct cifs_ses *ses)
struct sockaddr_in6 *sa6;
const struct user_key_payload *upayload;
- desc = kmalloc(CIFSCREDS_DESC_SIZE, GFP_KERNEL);
+ /* "cifs:a:" and "cifs:d:" are the same length; +1 for NUL terminator */
+ desc_sz = strlen("cifs:a:") + CIFS_MAX_DOMAINNAME_LEN + 1;
+ desc = kmalloc(desc_sz, GFP_KERNEL);
if (!desc)
return -ENOMEM;
@@ -2193,11 +2197,11 @@ cifs_set_cifscreds(struct smb3_fs_context *ctx, struct cifs_ses *ses)
switch (server->dstaddr.ss_family) {
case AF_INET:
sa = (struct sockaddr_in *)&server->dstaddr;
- sprintf(desc, "cifs:a:%pI4", &sa->sin_addr.s_addr);
+ snprintf(desc, desc_sz, "cifs:a:%pI4", &sa->sin_addr.s_addr);
break;
case AF_INET6:
sa6 = (struct sockaddr_in6 *)&server->dstaddr;
- sprintf(desc, "cifs:a:%pI6c", &sa6->sin6_addr.s6_addr);
+ snprintf(desc, desc_sz, "cifs:a:%pI6c", &sa6->sin6_addr.s6_addr);
break;
default:
cifs_dbg(FYI, "Bad ss_family (%hu)\n",
@@ -2216,7 +2220,7 @@ cifs_set_cifscreds(struct smb3_fs_context *ctx, struct cifs_ses *ses)
}
/* didn't work, try to find a domain key */
- sprintf(desc, "cifs:d:%s", ses->domainName);
+ snprintf(desc, desc_sz, "cifs:d:%s", ses->domainName);
cifs_dbg(FYI, "%s: desc=%s\n", __func__, desc);
key = request_key(&key_type_logon, desc, "");
if (IS_ERR(key)) {
@@ -2236,7 +2240,6 @@ cifs_set_cifscreds(struct smb3_fs_context *ctx, struct cifs_ses *ses)
/* find first : in payload */
payload = upayload->data;
delim = strnchr(payload, upayload->datalen, ':');
- cifs_dbg(FYI, "payload=%s\n", payload);
if (!delim) {
cifs_dbg(FYI, "Unable to find ':' in payload (datalen=%d)\n",
upayload->datalen);
@@ -2915,8 +2918,8 @@ compare_mount_options(struct super_block *sb, struct cifs_mnt_data *mnt_data)
{
struct cifs_sb_info *old = CIFS_SB(sb);
struct cifs_sb_info *new = mnt_data->cifs_sb;
- unsigned int oldflags = old->mnt_cifs_flags & CIFS_MOUNT_MASK;
- unsigned int newflags = new->mnt_cifs_flags & CIFS_MOUNT_MASK;
+ unsigned int oldflags = cifs_sb_flags(old) & CIFS_MOUNT_MASK;
+ unsigned int newflags = cifs_sb_flags(new) & CIFS_MOUNT_MASK;
if ((sb->s_flags & CIFS_MS_MASK) != (mnt_data->flags & CIFS_MS_MASK))
return 0;
@@ -2971,9 +2974,9 @@ static int match_prepath(struct super_block *sb,
struct smb3_fs_context *ctx = mnt_data->ctx;
struct cifs_sb_info *old = CIFS_SB(sb);
struct cifs_sb_info *new = mnt_data->cifs_sb;
- bool old_set = (old->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) &&
+ bool old_set = (cifs_sb_flags(old) & CIFS_MOUNT_USE_PREFIX_PATH) &&
old->prepath;
- bool new_set = (new->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) &&
+ bool new_set = (cifs_sb_flags(new) & CIFS_MOUNT_USE_PREFIX_PATH) &&
new->prepath;
if (tcon->origin_fullpath &&
@@ -3004,7 +3007,7 @@ cifs_match_super(struct super_block *sb, void *data)
cifs_sb = CIFS_SB(sb);
/* We do not want to use a superblock that has been shutdown */
- if (CIFS_MOUNT_SHUTDOWN & cifs_sb->mnt_cifs_flags) {
+ if (cifs_forced_shutdown(cifs_sb)) {
spin_unlock(&cifs_tcp_ses_lock);
return 0;
}
@@ -3469,6 +3472,8 @@ ip_connect(struct TCP_Server_Info *server)
int cifs_setup_cifs_sb(struct cifs_sb_info *cifs_sb)
{
struct smb3_fs_context *ctx = cifs_sb->ctx;
+ unsigned int sbflags;
+ int rc = 0;
INIT_DELAYED_WORK(&cifs_sb->prune_tlinks, cifs_prune_tlinks);
INIT_LIST_HEAD(&cifs_sb->tcon_sb_link);
@@ -3493,17 +3498,16 @@ int cifs_setup_cifs_sb(struct cifs_sb_info *cifs_sb)
}
ctx->local_nls = cifs_sb->local_nls;
- smb3_update_mnt_flags(cifs_sb);
+ sbflags = smb3_update_mnt_flags(cifs_sb);
if (ctx->direct_io)
cifs_dbg(FYI, "mounting share using direct i/o\n");
if (ctx->cache_ro) {
cifs_dbg(VFS, "mounting share with read only caching. Ensure that the share will not be modified while in use.\n");
- cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_RO_CACHE;
+ sbflags |= CIFS_MOUNT_RO_CACHE;
} else if (ctx->cache_rw) {
cifs_dbg(VFS, "mounting share in single client RW caching mode. Ensure that no other systems will be accessing the share.\n");
- cifs_sb->mnt_cifs_flags |= (CIFS_MOUNT_RO_CACHE |
- CIFS_MOUNT_RW_CACHE);
+ sbflags |= CIFS_MOUNT_RO_CACHE | CIFS_MOUNT_RW_CACHE;
}
if ((ctx->cifs_acl) && (ctx->dynperm))
@@ -3512,16 +3516,19 @@ int cifs_setup_cifs_sb(struct cifs_sb_info *cifs_sb)
if (ctx->prepath) {
cifs_sb->prepath = kstrdup(ctx->prepath, GFP_KERNEL);
if (cifs_sb->prepath == NULL)
- return -ENOMEM;
- cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
+ rc = -ENOMEM;
+ else
+ sbflags |= CIFS_MOUNT_USE_PREFIX_PATH;
}
- return 0;
+ atomic_set(&cifs_sb->mnt_cifs_flags, sbflags);
+ return rc;
}
/* Release all succeed connections */
void cifs_mount_put_conns(struct cifs_mount_ctx *mnt_ctx)
{
+ struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
int rc = 0;
if (mnt_ctx->tcon)
@@ -3533,7 +3540,7 @@ void cifs_mount_put_conns(struct cifs_mount_ctx *mnt_ctx)
mnt_ctx->ses = NULL;
mnt_ctx->tcon = NULL;
mnt_ctx->server = NULL;
- mnt_ctx->cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_POSIX_PATHS;
+ atomic_andnot(CIFS_MOUNT_POSIX_PATHS, &cifs_sb->mnt_cifs_flags);
free_xid(mnt_ctx->xid);
}
@@ -3587,19 +3594,23 @@ out:
int cifs_mount_get_tcon(struct cifs_mount_ctx *mnt_ctx)
{
struct TCP_Server_Info *server;
+ struct cifs_tcon *tcon = NULL;
struct cifs_sb_info *cifs_sb;
struct smb3_fs_context *ctx;
- struct cifs_tcon *tcon = NULL;
+ unsigned int sbflags;
int rc = 0;
- if (WARN_ON_ONCE(!mnt_ctx || !mnt_ctx->server || !mnt_ctx->ses || !mnt_ctx->fs_ctx ||
- !mnt_ctx->cifs_sb)) {
- rc = -EINVAL;
- goto out;
+ if (WARN_ON_ONCE(!mnt_ctx))
+ return -EINVAL;
+ if (WARN_ON_ONCE(!mnt_ctx->server || !mnt_ctx->ses ||
+ !mnt_ctx->fs_ctx || !mnt_ctx->cifs_sb)) {
+ mnt_ctx->tcon = NULL;
+ return -EINVAL;
}
server = mnt_ctx->server;
ctx = mnt_ctx->fs_ctx;
cifs_sb = mnt_ctx->cifs_sb;
+ sbflags = cifs_sb_flags(cifs_sb);
/* search for existing tcon to this server share */
tcon = cifs_get_tcon(mnt_ctx->ses, ctx);
@@ -3614,9 +3625,9 @@ int cifs_mount_get_tcon(struct cifs_mount_ctx *mnt_ctx)
* path (i.e., do not remap / and \ and do not map any special characters)
*/
if (tcon->posix_extensions) {
- cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_POSIX_PATHS;
- cifs_sb->mnt_cifs_flags &= ~(CIFS_MOUNT_MAP_SFM_CHR |
- CIFS_MOUNT_MAP_SPECIAL_CHR);
+ sbflags |= CIFS_MOUNT_POSIX_PATHS;
+ sbflags &= ~(CIFS_MOUNT_MAP_SFM_CHR |
+ CIFS_MOUNT_MAP_SPECIAL_CHR);
}
#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
@@ -3643,12 +3654,11 @@ int cifs_mount_get_tcon(struct cifs_mount_ctx *mnt_ctx)
/* do not care if a following call succeed - informational */
if (!tcon->pipe && server->ops->qfs_tcon) {
server->ops->qfs_tcon(mnt_ctx->xid, tcon, cifs_sb);
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE) {
+ if (sbflags & CIFS_MOUNT_RO_CACHE) {
if (tcon->fsDevInfo.DeviceCharacteristics &
cpu_to_le32(FILE_READ_ONLY_DEVICE))
cifs_dbg(VFS, "mounted to read only share\n");
- else if ((cifs_sb->mnt_cifs_flags &
- CIFS_MOUNT_RW_CACHE) == 0)
+ else if (!(sbflags & CIFS_MOUNT_RW_CACHE))
cifs_dbg(VFS, "read only mount of RW share\n");
/* no need to log a RW mount of a typical RW share */
}
@@ -3660,11 +3670,12 @@ int cifs_mount_get_tcon(struct cifs_mount_ctx *mnt_ctx)
* Inside cifs_fscache_get_super_cookie it checks
* that we do not get super cookie twice.
*/
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE)
+ if (sbflags & CIFS_MOUNT_FSCACHE)
cifs_fscache_get_super_cookie(tcon);
out:
mnt_ctx->tcon = tcon;
+ atomic_set(&cifs_sb->mnt_cifs_flags, sbflags);
return rc;
}
@@ -3783,7 +3794,8 @@ int cifs_is_path_remote(struct cifs_mount_ctx *mnt_ctx)
cifs_sb, full_path, tcon->Flags & SMB_SHARE_IS_IN_DFS);
if (rc != 0) {
cifs_server_dbg(VFS, "cannot query dirs between root and final path, enabling CIFS_MOUNT_USE_PREFIX_PATH\n");
- cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
+ atomic_or(CIFS_MOUNT_USE_PREFIX_PATH,
+ &cifs_sb->mnt_cifs_flags);
rc = 0;
}
}
@@ -3863,7 +3875,7 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
* Force the use of prefix path to support failover on DFS paths that resolve to targets
* that have different prefix paths.
*/
- cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
+ atomic_or(CIFS_MOUNT_USE_PREFIX_PATH, &cifs_sb->mnt_cifs_flags);
kfree(cifs_sb->prepath);
cifs_sb->prepath = ctx->prepath;
ctx->prepath = NULL;
@@ -4357,7 +4369,7 @@ cifs_sb_tlink(struct cifs_sb_info *cifs_sb)
kuid_t fsuid = current_fsuid();
int err;
- if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
+ if (!(cifs_sb_flags(cifs_sb) & CIFS_MOUNT_MULTIUSER))
return cifs_get_tlink(cifs_sb_master_tlink(cifs_sb));
spin_lock(&cifs_sb->tlink_tree_lock);
diff --git a/fs/smb/client/dfs_cache.c b/fs/smb/client/dfs_cache.c
index 983132735d72..83f8cf2f8d2b 100644
--- a/fs/smb/client/dfs_cache.c
+++ b/fs/smb/client/dfs_cache.c
@@ -1333,7 +1333,7 @@ int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb)
* Force the use of prefix path to support failover on DFS paths that resolve to targets
* that have different prefix paths.
*/
- cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
+ atomic_or(CIFS_MOUNT_USE_PREFIX_PATH, &cifs_sb->mnt_cifs_flags);
refresh_tcon_referral(tcon, true);
return 0;
diff --git a/fs/smb/client/dir.c b/fs/smb/client/dir.c
index cb10088197d2..6d2378eeb7f6 100644
--- a/fs/smb/client/dir.c
+++ b/fs/smb/client/dir.c
@@ -82,10 +82,11 @@ char *__build_path_from_dentry_optional_prefix(struct dentry *direntry, void *pa
const char *tree, int tree_len,
bool prefix)
{
- int dfsplen;
- int pplen = 0;
- struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb);
+ struct cifs_sb_info *cifs_sb = CIFS_SB(direntry);
+ unsigned int sbflags = cifs_sb_flags(cifs_sb);
char dirsep = CIFS_DIR_SEP(cifs_sb);
+ int pplen = 0;
+ int dfsplen;
char *s;
if (unlikely(!page))
@@ -96,7 +97,7 @@ char *__build_path_from_dentry_optional_prefix(struct dentry *direntry, void *pa
else
dfsplen = 0;
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
+ if (sbflags & CIFS_MOUNT_USE_PREFIX_PATH)
pplen = cifs_sb->prepath ? strlen(cifs_sb->prepath) + 1 : 0;
s = dentry_path_raw(direntry, page, PATH_MAX);
@@ -123,7 +124,7 @@ char *__build_path_from_dentry_optional_prefix(struct dentry *direntry, void *pa
if (dfsplen) {
s -= dfsplen;
memcpy(s, tree, dfsplen);
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) {
+ if (sbflags & CIFS_MOUNT_POSIX_PATHS) {
int i;
for (i = 0; i < dfsplen; i++) {
if (s[i] == '\\')
@@ -152,7 +153,7 @@ char *build_path_from_dentry_optional_prefix(struct dentry *direntry, void *page
static int
check_name(struct dentry *direntry, struct cifs_tcon *tcon)
{
- struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb);
+ struct cifs_sb_info *cifs_sb = CIFS_SB(direntry);
int i;
if (unlikely(tcon->fsAttrInfo.MaxPathNameComponentLength &&
@@ -160,7 +161,7 @@ check_name(struct dentry *direntry, struct cifs_tcon *tcon)
le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength)))
return -ENAMETOOLONG;
- if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)) {
+ if (!(cifs_sb_flags(cifs_sb) & CIFS_MOUNT_POSIX_PATHS)) {
for (i = 0; i < direntry->d_name.len; i++) {
if (direntry->d_name.name[i] == '\\') {
cifs_dbg(FYI, "Invalid file name\n");
@@ -181,11 +182,12 @@ static int cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned
int rc = -ENOENT;
int create_options = CREATE_NOT_DIR;
int desired_access;
- struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+ struct cifs_sb_info *cifs_sb = CIFS_SB(inode);
struct cifs_tcon *tcon = tlink_tcon(tlink);
const char *full_path;
void *page = alloc_dentry_path();
struct inode *newinode = NULL;
+ unsigned int sbflags = cifs_sb_flags(cifs_sb);
int disposition;
struct TCP_Server_Info *server = tcon->ses->server;
struct cifs_open_parms oparms;
@@ -306,6 +308,7 @@ static int cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned
goto out;
}
+ create_options |= cifs_open_create_options(oflags, create_options);
/*
* if we're not using unix extensions, see if we need to set
* ATTR_READONLY on the create call
@@ -374,7 +377,7 @@ retry_open:
.device = 0,
};
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) {
+ if (sbflags & CIFS_MOUNT_SET_UID) {
args.uid = current_fsuid();
if (inode->i_mode & S_ISGID)
args.gid = inode->i_gid;
@@ -411,9 +414,9 @@ cifs_create_get_file_info:
if (server->ops->set_lease_key)
server->ops->set_lease_key(newinode, fid);
if ((*oplock & CIFS_CREATE_ACTION) && S_ISREG(newinode->i_mode)) {
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
+ if (sbflags & CIFS_MOUNT_DYNPERM)
newinode->i_mode = mode;
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) {
+ if (sbflags & CIFS_MOUNT_SET_UID) {
newinode->i_uid = current_fsuid();
if (inode->i_mode & S_ISGID)
newinode->i_gid = inode->i_gid;
@@ -458,18 +461,20 @@ int
cifs_atomic_open(struct inode *inode, struct dentry *direntry,
struct file *file, unsigned int oflags, umode_t mode)
{
- int rc;
- unsigned int xid;
- struct tcon_link *tlink;
- struct cifs_tcon *tcon;
+ struct cifs_sb_info *cifs_sb = CIFS_SB(inode);
+ struct cifs_open_info_data buf = {};
struct TCP_Server_Info *server;
- struct cifs_fid fid = {};
+ struct cifsFileInfo *file_info;
struct cifs_pending_open open;
+ struct cifs_fid fid = {};
+ struct tcon_link *tlink;
+ struct cifs_tcon *tcon;
+ unsigned int sbflags;
+ unsigned int xid;
__u32 oplock;
- struct cifsFileInfo *file_info;
- struct cifs_open_info_data buf = {};
+ int rc;
- if (unlikely(cifs_forced_shutdown(CIFS_SB(inode->i_sb))))
+ if (unlikely(cifs_forced_shutdown(cifs_sb)))
return smb_EIO(smb_eio_trace_forced_shutdown);
/*
@@ -499,7 +504,7 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry,
cifs_dbg(FYI, "parent inode = 0x%p name is: %pd and dentry = 0x%p\n",
inode, direntry, direntry);
- tlink = cifs_sb_tlink(CIFS_SB(inode->i_sb));
+ tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink)) {
rc = PTR_ERR(tlink);
goto out_free_xid;
@@ -536,13 +541,13 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry,
goto out;
}
- if (file->f_flags & O_DIRECT &&
- CIFS_SB(inode->i_sb)->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
- if (CIFS_SB(inode->i_sb)->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
+ sbflags = cifs_sb_flags(cifs_sb);
+ if ((file->f_flags & O_DIRECT) && (sbflags & CIFS_MOUNT_STRICT_IO)) {
+ if (sbflags & CIFS_MOUNT_NO_BRL)
file->f_op = &cifs_file_direct_nobrl_ops;
else
file->f_op = &cifs_file_direct_ops;
- }
+ }
file_info = cifs_new_fileinfo(&fid, file, tlink, oplock, buf.symlink_target);
if (file_info == NULL) {
diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c
index 18f31d4eb98d..a69e05f86d7e 100644
--- a/fs/smb/client/file.c
+++ b/fs/smb/client/file.c
@@ -255,7 +255,7 @@ static void cifs_begin_writeback(struct netfs_io_request *wreq)
struct cifs_io_request *req = container_of(wreq, struct cifs_io_request, rreq);
int ret;
- ret = cifs_get_writable_file(CIFS_I(wreq->inode), FIND_WR_ANY, &req->cfile);
+ ret = cifs_get_writable_file(CIFS_I(wreq->inode), FIND_ANY, &req->cfile);
if (ret) {
cifs_dbg(VFS, "No writable handle in writepages ret=%d\n", ret);
return;
@@ -270,7 +270,7 @@ static void cifs_begin_writeback(struct netfs_io_request *wreq)
static int cifs_init_request(struct netfs_io_request *rreq, struct file *file)
{
struct cifs_io_request *req = container_of(rreq, struct cifs_io_request, rreq);
- struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb);
+ struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode);
struct cifsFileInfo *open_file = NULL;
rreq->rsize = cifs_sb->ctx->rsize;
@@ -281,7 +281,7 @@ static int cifs_init_request(struct netfs_io_request *rreq, struct file *file)
open_file = file->private_data;
rreq->netfs_priv = file->private_data;
req->cfile = cifsFileInfo_get(open_file);
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
+ if (cifs_sb_flags(cifs_sb) & CIFS_MOUNT_RWPIDFORWARD)
req->pid = req->cfile->pid;
} else if (rreq->origin != NETFS_WRITEBACK) {
WARN_ON_ONCE(1);
@@ -584,15 +584,8 @@ static int cifs_nt_open(const char *full_path, struct inode *inode, struct cifs_
*********************************************************************/
disposition = cifs_get_disposition(f_flags);
-
/* BB pass O_SYNC flag through on file attributes .. BB */
-
- /* O_SYNC also has bit for O_DSYNC so following check picks up either */
- if (f_flags & O_SYNC)
- create_options |= CREATE_WRITE_THROUGH;
-
- if (f_flags & O_DIRECT)
- create_options |= CREATE_NO_BUFFER;
+ create_options |= cifs_open_create_options(f_flags, create_options);
retry_open:
oparms = (struct cifs_open_parms) {
@@ -711,8 +704,6 @@ struct cifsFileInfo *cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
mutex_init(&cfile->fh_mutex);
spin_lock_init(&cfile->file_info_lock);
- cifs_sb_active(inode->i_sb);
-
/*
* If the server returned a read oplock and we have mandatory brlocks,
* set oplock level to None.
@@ -767,7 +758,6 @@ static void cifsFileInfo_put_final(struct cifsFileInfo *cifs_file)
struct inode *inode = d_inode(cifs_file->dentry);
struct cifsInodeInfo *cifsi = CIFS_I(inode);
struct cifsLockInfo *li, *tmp;
- struct super_block *sb = inode->i_sb;
/*
* Delete any outstanding lock records. We'll lose them when the file
@@ -785,7 +775,6 @@ static void cifsFileInfo_put_final(struct cifsFileInfo *cifs_file)
cifs_put_tlink(cifs_file->tlink);
dput(cifs_file->dentry);
- cifs_sb_deactive(sb);
kfree(cifs_file->symlink_target);
kfree(cifs_file);
}
@@ -906,7 +895,7 @@ void _cifsFileInfo_put(struct cifsFileInfo *cifs_file,
* close because it may cause a error when we open this file
* again and get at least level II oplock.
*/
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
+ if (cifs_sb_flags(cifs_sb) & CIFS_MOUNT_STRICT_IO)
set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
cifs_set_oplock_level(cifsi, 0);
}
@@ -955,11 +944,11 @@ void _cifsFileInfo_put(struct cifsFileInfo *cifs_file,
int cifs_file_flush(const unsigned int xid, struct inode *inode,
struct cifsFileInfo *cfile)
{
- struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+ struct cifs_sb_info *cifs_sb = CIFS_SB(inode);
struct cifs_tcon *tcon;
int rc;
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)
+ if (cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NOSSYNC)
return 0;
if (cfile && (OPEN_FMODE(cfile->f_flags) & FMODE_WRITE)) {
@@ -967,7 +956,7 @@ int cifs_file_flush(const unsigned int xid, struct inode *inode,
return tcon->ses->server->ops->flush(xid, tcon,
&cfile->fid);
}
- rc = cifs_get_writable_file(CIFS_I(inode), FIND_WR_ANY, &cfile);
+ rc = cifs_get_writable_file(CIFS_I(inode), FIND_ANY, &cfile);
if (!rc) {
tcon = tlink_tcon(cfile->tlink);
rc = tcon->ses->server->ops->flush(xid, tcon, &cfile->fid);
@@ -992,7 +981,7 @@ static int cifs_do_truncate(const unsigned int xid, struct dentry *dentry)
return -ERESTARTSYS;
mapping_set_error(inode->i_mapping, rc);
- cfile = find_writable_file(cinode, FIND_WR_FSUID_ONLY);
+ cfile = find_writable_file(cinode, FIND_FSUID_ONLY);
rc = cifs_file_flush(xid, inode, cfile);
if (!rc) {
if (cfile) {
@@ -1004,7 +993,6 @@ static int cifs_do_truncate(const unsigned int xid, struct dentry *dentry)
if (!rc) {
netfs_resize_file(&cinode->netfs, 0, true);
cifs_setsize(inode, 0);
- inode->i_blocks = 0;
}
}
if (cfile)
@@ -1015,24 +1003,24 @@ static int cifs_do_truncate(const unsigned int xid, struct dentry *dentry)
int cifs_open(struct inode *inode, struct file *file)
{
+ struct cifs_sb_info *cifs_sb = CIFS_SB(inode);
+ struct cifs_open_info_data data = {};
+ struct cifsFileInfo *cfile = NULL;
+ struct TCP_Server_Info *server;
+ struct cifs_pending_open open;
+ bool posix_open_ok = false;
+ struct cifs_fid fid = {};
+ struct tcon_link *tlink;
+ struct cifs_tcon *tcon;
+ const char *full_path;
+ unsigned int sbflags;
int rc = -EACCES;
unsigned int xid;
__u32 oplock;
- struct cifs_sb_info *cifs_sb;
- struct TCP_Server_Info *server;
- struct cifs_tcon *tcon;
- struct tcon_link *tlink;
- struct cifsFileInfo *cfile = NULL;
void *page;
- const char *full_path;
- bool posix_open_ok = false;
- struct cifs_fid fid = {};
- struct cifs_pending_open open;
- struct cifs_open_info_data data = {};
xid = get_xid();
- cifs_sb = CIFS_SB(inode->i_sb);
if (unlikely(cifs_forced_shutdown(cifs_sb))) {
free_xid(xid);
return smb_EIO(smb_eio_trace_forced_shutdown);
@@ -1056,9 +1044,9 @@ int cifs_open(struct inode *inode, struct file *file)
cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
inode, file->f_flags, full_path);
- if (file->f_flags & O_DIRECT &&
- cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
+ sbflags = cifs_sb_flags(cifs_sb);
+ if ((file->f_flags & O_DIRECT) && (sbflags & CIFS_MOUNT_STRICT_IO)) {
+ if (sbflags & CIFS_MOUNT_NO_BRL)
file->f_op = &cifs_file_direct_nobrl_ops;
else
file->f_op = &cifs_file_direct_ops;
@@ -1072,32 +1060,29 @@ int cifs_open(struct inode *inode, struct file *file)
/* Get the cached handle as SMB2 close is deferred */
if (OPEN_FMODE(file->f_flags) & FMODE_WRITE) {
- rc = cifs_get_writable_path(tcon, full_path,
- FIND_WR_FSUID_ONLY |
- FIND_WR_NO_PENDING_DELETE,
- &cfile);
+ rc = __cifs_get_writable_file(CIFS_I(inode),
+ FIND_FSUID_ONLY |
+ FIND_NO_PENDING_DELETE |
+ FIND_OPEN_FLAGS,
+ file->f_flags, &cfile);
} else {
- rc = cifs_get_readable_path(tcon, full_path, &cfile);
+ cfile = __find_readable_file(CIFS_I(inode),
+ FIND_NO_PENDING_DELETE |
+ FIND_OPEN_FLAGS,
+ file->f_flags);
+ rc = cfile ? 0 : -ENOENT;
}
if (rc == 0) {
- unsigned int oflags = file->f_flags & ~(O_CREAT|O_EXCL|O_TRUNC);
- unsigned int cflags = cfile->f_flags & ~(O_CREAT|O_EXCL|O_TRUNC);
-
- if (cifs_convert_flags(oflags, 0) == cifs_convert_flags(cflags, 0) &&
- (oflags & (O_SYNC|O_DIRECT)) == (cflags & (O_SYNC|O_DIRECT))) {
- file->private_data = cfile;
- spin_lock(&CIFS_I(inode)->deferred_lock);
- cifs_del_deferred_close(cfile);
- spin_unlock(&CIFS_I(inode)->deferred_lock);
- goto use_cache;
- }
- _cifsFileInfo_put(cfile, true, false);
- } else {
- /* hard link on the defeered close file */
- rc = cifs_get_hardlink_path(tcon, inode, file);
- if (rc)
- cifs_close_deferred_file(CIFS_I(inode));
- }
+ file->private_data = cfile;
+ spin_lock(&CIFS_I(inode)->deferred_lock);
+ cifs_del_deferred_close(cfile);
+ spin_unlock(&CIFS_I(inode)->deferred_lock);
+ goto use_cache;
+ }
+ /* hard link on the deferred close file */
+ rc = cifs_get_hardlink_path(tcon, inode, file);
+ if (rc)
+ cifs_close_deferred_file(CIFS_I(inode));
if (server->oplocks)
oplock = REQ_OPLOCK;
@@ -1209,7 +1194,7 @@ cifs_relock_file(struct cifsFileInfo *cfile)
struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
int rc = 0;
#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
- struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
+ struct cifs_sb_info *cifs_sb = CIFS_SB(cinode);
#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING);
@@ -1222,7 +1207,7 @@ cifs_relock_file(struct cifsFileInfo *cfile)
#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
if (cap_unix(tcon->ses) &&
(CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
- ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
+ ((cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NOPOSIXBRL) == 0))
rc = cifs_push_posix_locks(cfile);
else
#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
@@ -1318,13 +1303,8 @@ cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
rdwr_for_fscache = 1;
desired_access = cifs_convert_flags(cfile->f_flags, rdwr_for_fscache);
-
- /* O_SYNC also has bit for O_DSYNC so following check picks up either */
- if (cfile->f_flags & O_SYNC)
- create_options |= CREATE_WRITE_THROUGH;
-
- if (cfile->f_flags & O_DIRECT)
- create_options |= CREATE_NO_BUFFER;
+ create_options |= cifs_open_create_options(cfile->f_flags,
+ create_options);
if (server->ops->get_lease_key)
server->ops->get_lease_key(inode, &cfile->fid);
@@ -2011,7 +1991,7 @@ cifs_push_locks(struct cifsFileInfo *cfile)
struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
int rc = 0;
#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
- struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
+ struct cifs_sb_info *cifs_sb = CIFS_SB(cinode);
#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
/* we are going to update can_cache_brlcks here - need a write access */
@@ -2024,7 +2004,7 @@ cifs_push_locks(struct cifsFileInfo *cfile)
#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
if (cap_unix(tcon->ses) &&
(CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
- ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
+ ((cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NOPOSIXBRL) == 0))
rc = cifs_push_posix_locks(cfile);
else
#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
@@ -2428,11 +2408,11 @@ int cifs_flock(struct file *file, int cmd, struct file_lock *fl)
cifs_read_flock(fl, &type, &lock, &unlock, &wait_flag,
tcon->ses->server);
- cifs_sb = CIFS_FILE_SB(file);
+ cifs_sb = CIFS_SB(file);
if (cap_unix(tcon->ses) &&
(CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
- ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
+ ((cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NOPOSIXBRL) == 0))
posix_lck = true;
if (!lock && !unlock) {
@@ -2455,14 +2435,14 @@ int cifs_flock(struct file *file, int cmd, struct file_lock *fl)
int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
{
- int rc, xid;
+ struct cifs_sb_info *cifs_sb = CIFS_SB(file);
+ struct cifsFileInfo *cfile;
int lock = 0, unlock = 0;
bool wait_flag = false;
bool posix_lck = false;
- struct cifs_sb_info *cifs_sb;
struct cifs_tcon *tcon;
- struct cifsFileInfo *cfile;
__u32 type;
+ int rc, xid;
rc = -EACCES;
xid = get_xid();
@@ -2477,12 +2457,11 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
tcon->ses->server);
- cifs_sb = CIFS_FILE_SB(file);
set_bit(CIFS_INO_CLOSE_ON_LOCK, &CIFS_I(d_inode(cfile->dentry))->flags);
if (cap_unix(tcon->ses) &&
(CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
- ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
+ ((cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NOPOSIXBRL) == 0))
posix_lck = true;
/*
* BB add code here to normalize offset and length to account for
@@ -2529,14 +2508,37 @@ void cifs_write_subrequest_terminated(struct cifs_io_subrequest *wdata, ssize_t
netfs_write_subrequest_terminated(&wdata->subreq, result);
}
-struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
- bool fsuid_only)
+static bool open_flags_match(struct cifsInodeInfo *cinode,
+ unsigned int oflags, unsigned int cflags)
+{
+ struct inode *inode = &cinode->netfs.inode;
+ int crw = 0, orw = 0;
+
+ oflags &= ~(O_CREAT | O_EXCL | O_TRUNC);
+ cflags &= ~(O_CREAT | O_EXCL | O_TRUNC);
+
+ if (cifs_fscache_enabled(inode)) {
+ if (OPEN_FMODE(cflags) & FMODE_WRITE)
+ crw = 1;
+ if (OPEN_FMODE(oflags) & FMODE_WRITE)
+ orw = 1;
+ }
+ if (cifs_convert_flags(oflags, orw) != cifs_convert_flags(cflags, crw))
+ return false;
+
+ return (oflags & (O_SYNC | O_DIRECT)) == (cflags & (O_SYNC | O_DIRECT));
+}
+
+struct cifsFileInfo *__find_readable_file(struct cifsInodeInfo *cifs_inode,
+ unsigned int find_flags,
+ unsigned int open_flags)
{
+ struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode);
+ bool fsuid_only = find_flags & FIND_FSUID_ONLY;
struct cifsFileInfo *open_file = NULL;
- struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb);
/* only filter by fsuid on multiuser mounts */
- if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
+ if (!(cifs_sb_flags(cifs_sb) & CIFS_MOUNT_MULTIUSER))
fsuid_only = false;
spin_lock(&cifs_inode->open_file_lock);
@@ -2546,6 +2548,13 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
continue;
+ if ((find_flags & FIND_NO_PENDING_DELETE) &&
+ open_file->status_file_deleted)
+ continue;
+ if ((find_flags & FIND_OPEN_FLAGS) &&
+ !open_flags_match(cifs_inode, open_flags,
+ open_file->f_flags))
+ continue;
if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
if ((!open_file->invalidHandle)) {
/* found a good file */
@@ -2564,17 +2573,17 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
}
/* Return -EBADF if no handle is found and general rc otherwise */
-int
-cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, int flags,
- struct cifsFileInfo **ret_file)
+int __cifs_get_writable_file(struct cifsInodeInfo *cifs_inode,
+ unsigned int find_flags, unsigned int open_flags,
+ struct cifsFileInfo **ret_file)
{
struct cifsFileInfo *open_file, *inv_file = NULL;
struct cifs_sb_info *cifs_sb;
bool any_available = false;
int rc = -EBADF;
unsigned int refind = 0;
- bool fsuid_only = flags & FIND_WR_FSUID_ONLY;
- bool with_delete = flags & FIND_WR_WITH_DELETE;
+ bool fsuid_only = find_flags & FIND_FSUID_ONLY;
+ bool with_delete = find_flags & FIND_WITH_DELETE;
*ret_file = NULL;
/*
@@ -2589,10 +2598,10 @@ cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, int flags,
return rc;
}
- cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb);
+ cifs_sb = CIFS_SB(cifs_inode);
/* only filter by fsuid on multiuser mounts */
- if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
+ if (!(cifs_sb_flags(cifs_sb) & CIFS_MOUNT_MULTIUSER))
fsuid_only = false;
spin_lock(&cifs_inode->open_file_lock);
@@ -2608,9 +2617,13 @@ refind_writable:
continue;
if (with_delete && !(open_file->fid.access & DELETE))
continue;
- if ((flags & FIND_WR_NO_PENDING_DELETE) &&
+ if ((find_flags & FIND_NO_PENDING_DELETE) &&
open_file->status_file_deleted)
continue;
+ if ((find_flags & FIND_OPEN_FLAGS) &&
+ !open_flags_match(cifs_inode, open_flags,
+ open_file->f_flags))
+ continue;
if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
if (!open_file->invalidHandle) {
/* found a good writable file */
@@ -2727,17 +2740,7 @@ cifs_get_readable_path(struct cifs_tcon *tcon, const char *name,
cinode = CIFS_I(d_inode(cfile->dentry));
spin_unlock(&tcon->open_file_lock);
free_dentry_path(page);
- *ret_file = find_readable_file(cinode, 0);
- if (*ret_file) {
- spin_lock(&cinode->open_file_lock);
- if ((*ret_file)->status_file_deleted) {
- spin_unlock(&cinode->open_file_lock);
- cifsFileInfo_put(*ret_file);
- *ret_file = NULL;
- } else {
- spin_unlock(&cinode->open_file_lock);
- }
- }
+ *ret_file = find_readable_file(cinode, FIND_ANY);
return *ret_file ? 0 : -ENOENT;
}
@@ -2787,7 +2790,7 @@ int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
struct TCP_Server_Info *server;
struct cifsFileInfo *smbfile = file->private_data;
struct inode *inode = file_inode(file);
- struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
+ struct cifs_sb_info *cifs_sb = CIFS_SB(file);
rc = file_write_and_wait_range(file, start, end);
if (rc) {
@@ -2801,7 +2804,7 @@ int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
file, datasync);
tcon = tlink_tcon(smbfile->tlink);
- if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
+ if (!(cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NOSSYNC)) {
server = tcon->ses->server;
if (server->ops->flush == NULL) {
rc = -ENOSYS;
@@ -2809,7 +2812,7 @@ int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
}
if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
- smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY);
+ smbfile = find_writable_file(CIFS_I(inode), FIND_ANY);
if (smbfile) {
rc = server->ops->flush(xid, tcon, &smbfile->fid);
cifsFileInfo_put(smbfile);
@@ -2853,7 +2856,7 @@ cifs_writev(struct kiocb *iocb, struct iov_iter *from)
struct inode *inode = file->f_mapping->host;
struct cifsInodeInfo *cinode = CIFS_I(inode);
struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
- struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+ struct cifs_sb_info *cifs_sb = CIFS_SB(inode);
ssize_t rc;
rc = netfs_start_io_write(inode);
@@ -2870,7 +2873,7 @@ cifs_writev(struct kiocb *iocb, struct iov_iter *from)
if (rc <= 0)
goto out;
- if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) &&
+ if ((cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NOPOSIXBRL) &&
(cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
server->vals->exclusive_lock_type, 0,
NULL, CIFS_WRITE_OP))) {
@@ -2893,7 +2896,7 @@ cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
{
struct inode *inode = file_inode(iocb->ki_filp);
struct cifsInodeInfo *cinode = CIFS_I(inode);
- struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+ struct cifs_sb_info *cifs_sb = CIFS_SB(inode);
struct cifsFileInfo *cfile = (struct cifsFileInfo *)
iocb->ki_filp->private_data;
struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
@@ -2906,7 +2909,7 @@ cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
if (CIFS_CACHE_WRITE(cinode)) {
if (cap_unix(tcon->ses) &&
(CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
- ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
+ ((cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
written = netfs_file_write_iter(iocb, from);
goto out;
}
@@ -2994,7 +2997,7 @@ cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
{
struct inode *inode = file_inode(iocb->ki_filp);
struct cifsInodeInfo *cinode = CIFS_I(inode);
- struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+ struct cifs_sb_info *cifs_sb = CIFS_SB(inode);
struct cifsFileInfo *cfile = (struct cifsFileInfo *)
iocb->ki_filp->private_data;
struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
@@ -3011,7 +3014,7 @@ cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
if (!CIFS_CACHE_READ(cinode))
return netfs_unbuffered_read_iter(iocb, to);
- if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0) {
+ if ((cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NOPOSIXBRL) == 0) {
if (iocb->ki_flags & IOCB_DIRECT)
return netfs_unbuffered_read_iter(iocb, to);
return netfs_buffered_read_iter(iocb, to);
@@ -3130,10 +3133,9 @@ bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file,
if (is_inode_writable(cifsInode) ||
((cifsInode->oplock & CIFS_CACHE_RW_FLG) != 0 && from_readdir)) {
/* This inode is open for write at least once */
- struct cifs_sb_info *cifs_sb;
+ struct cifs_sb_info *cifs_sb = CIFS_SB(cifsInode);
- cifs_sb = CIFS_SB(cifsInode->netfs.inode.i_sb);
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
+ if (cifs_sb_flags(cifs_sb) & CIFS_MOUNT_DIRECT_IO) {
/* since no page cache to corrupt on directio
we can change size safely */
return true;
@@ -3165,12 +3167,6 @@ void cifs_oplock_break(struct work_struct *work)
__u64 persistent_fid, volatile_fid;
__u16 net_fid;
- /*
- * Hold a reference to the superblock to prevent it and its inodes from
- * being freed while we are accessing cinode. Otherwise, _cifsFileInfo_put()
- * may release the last reference to the sb and trigger inode eviction.
- */
- cifs_sb_active(sb);
wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
TASK_UNINTERRUPTIBLE);
@@ -3181,7 +3177,7 @@ void cifs_oplock_break(struct work_struct *work)
server = tcon->ses->server;
scoped_guard(spinlock, &cinode->open_file_lock) {
- unsigned int sbflags = cifs_sb->mnt_cifs_flags;
+ unsigned int sbflags = cifs_sb_flags(cifs_sb);
server->ops->downgrade_oplock(server, cinode, cfile->oplock_level,
cfile->oplock_epoch, &purge_cache);
@@ -3255,7 +3251,6 @@ oplock_break_ack:
cifs_put_tlink(tlink);
out:
cifs_done_oplock_break(cinode);
- cifs_sb_deactive(sb);
}
static int cifs_swap_activate(struct swap_info_struct *sis,
diff --git a/fs/smb/client/fs_context.c b/fs/smb/client/fs_context.c
index 09fe749e97ee..a4a7c7eee038 100644
--- a/fs/smb/client/fs_context.c
+++ b/fs/smb/client/fs_context.c
@@ -1997,7 +1997,7 @@ int smb3_init_fs_context(struct fs_context *fc)
ctx->backupuid_specified = false; /* no backup intent for a user */
ctx->backupgid_specified = false; /* no backup intent for a group */
- ctx->retrans = 1;
+ ctx->retrans = 0;
ctx->reparse_type = CIFS_REPARSE_TYPE_DEFAULT;
ctx->symlink_type = CIFS_SYMLINK_TYPE_DEFAULT;
ctx->nonativesocket = 0;
@@ -2062,161 +2062,160 @@ smb3_cleanup_fs_context(struct smb3_fs_context *ctx)
kfree(ctx);
}
-void smb3_update_mnt_flags(struct cifs_sb_info *cifs_sb)
+unsigned int smb3_update_mnt_flags(struct cifs_sb_info *cifs_sb)
{
+ unsigned int sbflags = cifs_sb_flags(cifs_sb);
struct smb3_fs_context *ctx = cifs_sb->ctx;
if (ctx->nodfs)
- cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_DFS;
+ sbflags |= CIFS_MOUNT_NO_DFS;
else
- cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_NO_DFS;
+ sbflags &= ~CIFS_MOUNT_NO_DFS;
if (ctx->noperm)
- cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_PERM;
+ sbflags |= CIFS_MOUNT_NO_PERM;
else
- cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_NO_PERM;
+ sbflags &= ~CIFS_MOUNT_NO_PERM;
if (ctx->setuids)
- cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_SET_UID;
+ sbflags |= CIFS_MOUNT_SET_UID;
else
- cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SET_UID;
+ sbflags &= ~CIFS_MOUNT_SET_UID;
if (ctx->setuidfromacl)
- cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_UID_FROM_ACL;
+ sbflags |= CIFS_MOUNT_UID_FROM_ACL;
else
- cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_UID_FROM_ACL;
+ sbflags &= ~CIFS_MOUNT_UID_FROM_ACL;
if (ctx->server_ino)
- cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_SERVER_INUM;
+ sbflags |= CIFS_MOUNT_SERVER_INUM;
else
- cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM;
+ sbflags &= ~CIFS_MOUNT_SERVER_INUM;
if (ctx->remap)
- cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_MAP_SFM_CHR;
+ sbflags |= CIFS_MOUNT_MAP_SFM_CHR;
else
- cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_MAP_SFM_CHR;
+ sbflags &= ~CIFS_MOUNT_MAP_SFM_CHR;
if (ctx->sfu_remap)
- cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_MAP_SPECIAL_CHR;
+ sbflags |= CIFS_MOUNT_MAP_SPECIAL_CHR;
else
- cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_MAP_SPECIAL_CHR;
+ sbflags &= ~CIFS_MOUNT_MAP_SPECIAL_CHR;
if (ctx->no_xattr)
- cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_XATTR;
+ sbflags |= CIFS_MOUNT_NO_XATTR;
else
- cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_NO_XATTR;
+ sbflags &= ~CIFS_MOUNT_NO_XATTR;
if (ctx->sfu_emul)
- cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_UNX_EMUL;
+ sbflags |= CIFS_MOUNT_UNX_EMUL;
else
- cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_UNX_EMUL;
+ sbflags &= ~CIFS_MOUNT_UNX_EMUL;
if (ctx->nobrl)
- cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_BRL;
+ sbflags |= CIFS_MOUNT_NO_BRL;
else
- cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_NO_BRL;
+ sbflags &= ~CIFS_MOUNT_NO_BRL;
if (ctx->nohandlecache)
- cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_HANDLE_CACHE;
+ sbflags |= CIFS_MOUNT_NO_HANDLE_CACHE;
else
- cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_NO_HANDLE_CACHE;
+ sbflags &= ~CIFS_MOUNT_NO_HANDLE_CACHE;
if (ctx->nostrictsync)
- cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NOSSYNC;
+ sbflags |= CIFS_MOUNT_NOSSYNC;
else
- cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_NOSSYNC;
+ sbflags &= ~CIFS_MOUNT_NOSSYNC;
if (ctx->mand_lock)
- cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NOPOSIXBRL;
+ sbflags |= CIFS_MOUNT_NOPOSIXBRL;
else
- cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_NOPOSIXBRL;
+ sbflags &= ~CIFS_MOUNT_NOPOSIXBRL;
if (ctx->rwpidforward)
- cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_RWPIDFORWARD;
+ sbflags |= CIFS_MOUNT_RWPIDFORWARD;
else
- cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_RWPIDFORWARD;
+ sbflags &= ~CIFS_MOUNT_RWPIDFORWARD;
if (ctx->mode_ace)
- cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_MODE_FROM_SID;
+ sbflags |= CIFS_MOUNT_MODE_FROM_SID;
else
- cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_MODE_FROM_SID;
+ sbflags &= ~CIFS_MOUNT_MODE_FROM_SID;
if (ctx->cifs_acl)
- cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_CIFS_ACL;
+ sbflags |= CIFS_MOUNT_CIFS_ACL;
else
- cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_CIFS_ACL;
+ sbflags &= ~CIFS_MOUNT_CIFS_ACL;
if (ctx->backupuid_specified)
- cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_CIFS_BACKUPUID;
+ sbflags |= CIFS_MOUNT_CIFS_BACKUPUID;
else
- cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_CIFS_BACKUPUID;
+ sbflags &= ~CIFS_MOUNT_CIFS_BACKUPUID;
if (ctx->backupgid_specified)
- cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_CIFS_BACKUPGID;
+ sbflags |= CIFS_MOUNT_CIFS_BACKUPGID;
else
- cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_CIFS_BACKUPGID;
+ sbflags &= ~CIFS_MOUNT_CIFS_BACKUPGID;
if (ctx->override_uid)
- cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_OVERR_UID;
+ sbflags |= CIFS_MOUNT_OVERR_UID;
else
- cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_OVERR_UID;
+ sbflags &= ~CIFS_MOUNT_OVERR_UID;
if (ctx->override_gid)
- cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_OVERR_GID;
+ sbflags |= CIFS_MOUNT_OVERR_GID;
else
- cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_OVERR_GID;
+ sbflags &= ~CIFS_MOUNT_OVERR_GID;
if (ctx->dynperm)
- cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_DYNPERM;
+ sbflags |= CIFS_MOUNT_DYNPERM;
else
- cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_DYNPERM;
+ sbflags &= ~CIFS_MOUNT_DYNPERM;
if (ctx->fsc)
- cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_FSCACHE;
+ sbflags |= CIFS_MOUNT_FSCACHE;
else
- cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_FSCACHE;
+ sbflags &= ~CIFS_MOUNT_FSCACHE;
if (ctx->multiuser)
- cifs_sb->mnt_cifs_flags |= (CIFS_MOUNT_MULTIUSER |
- CIFS_MOUNT_NO_PERM);
+ sbflags |= CIFS_MOUNT_MULTIUSER | CIFS_MOUNT_NO_PERM;
else
- cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_MULTIUSER;
+ sbflags &= ~CIFS_MOUNT_MULTIUSER;
if (ctx->strict_io)
- cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_STRICT_IO;
+ sbflags |= CIFS_MOUNT_STRICT_IO;
else
- cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_STRICT_IO;
+ sbflags &= ~CIFS_MOUNT_STRICT_IO;
if (ctx->direct_io)
- cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_DIRECT_IO;
+ sbflags |= CIFS_MOUNT_DIRECT_IO;
else
- cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_DIRECT_IO;
+ sbflags &= ~CIFS_MOUNT_DIRECT_IO;
if (ctx->mfsymlinks)
- cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_MF_SYMLINKS;
+ sbflags |= CIFS_MOUNT_MF_SYMLINKS;
else
- cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_MF_SYMLINKS;
- if (ctx->mfsymlinks) {
- if (ctx->sfu_emul) {
- /*
- * Our SFU ("Services for Unix") emulation allows now
- * creating new and reading existing SFU symlinks.
- * Older Linux kernel versions were not able to neither
- * read existing nor create new SFU symlinks. But
- * creating and reading SFU style mknod and FIFOs was
- * supported for long time. When "mfsymlinks" and
- * "sfu" are both enabled at the same time, it allows
- * reading both types of symlinks, but will only create
- * them with mfsymlinks format. This allows better
- * Apple compatibility, compatibility with older Linux
- * kernel clients (probably better for Samba too)
- * while still recognizing old Windows style symlinks.
- */
- cifs_dbg(VFS, "mount options mfsymlinks and sfu both enabled\n");
- }
- }
- cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SHUTDOWN;
+ sbflags &= ~CIFS_MOUNT_MF_SYMLINKS;
- return;
+ if (ctx->mfsymlinks && ctx->sfu_emul) {
+ /*
+ * Our SFU ("Services for Unix") emulation allows now
+ * creating new and reading existing SFU symlinks.
+ * Older Linux kernel versions were not able to neither
+ * read existing nor create new SFU symlinks. But
+ * creating and reading SFU style mknod and FIFOs was
+ * supported for long time. When "mfsymlinks" and
+ * "sfu" are both enabled at the same time, it allows
+ * reading both types of symlinks, but will only create
+ * them with mfsymlinks format. This allows better
+ * Apple compatibility, compatibility with older Linux
+ * kernel clients (probably better for Samba too)
+ * while still recognizing old Windows style symlinks.
+ */
+ cifs_dbg(VFS, "mount options mfsymlinks and sfu both enabled\n");
+ }
+ sbflags &= ~CIFS_MOUNT_SHUTDOWN;
+ atomic_set(&cifs_sb->mnt_cifs_flags, sbflags);
+ return sbflags;
}
diff --git a/fs/smb/client/fs_context.h b/fs/smb/client/fs_context.h
index 49b2a6f09ca2..0b64fcb5d302 100644
--- a/fs/smb/client/fs_context.h
+++ b/fs/smb/client/fs_context.h
@@ -374,7 +374,7 @@ int smb3_fs_context_dup(struct smb3_fs_context *new_ctx,
struct smb3_fs_context *ctx);
int smb3_sync_session_ctx_passwords(struct cifs_sb_info *cifs_sb,
struct cifs_ses *ses);
-void smb3_update_mnt_flags(struct cifs_sb_info *cifs_sb);
+unsigned int smb3_update_mnt_flags(struct cifs_sb_info *cifs_sb);
/*
* max deferred close timeout (jiffies) - 2^30
diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c
index d4d3cfeb6c90..888f9e35f14b 100644
--- a/fs/smb/client/inode.c
+++ b/fs/smb/client/inode.c
@@ -40,32 +40,33 @@ static void cifs_set_netfs_context(struct inode *inode)
static void cifs_set_ops(struct inode *inode)
{
- struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+ struct cifs_sb_info *cifs_sb = CIFS_SB(inode);
+ struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
struct netfs_inode *ictx = netfs_inode(inode);
+ unsigned int sbflags = cifs_sb_flags(cifs_sb);
switch (inode->i_mode & S_IFMT) {
case S_IFREG:
inode->i_op = &cifs_file_inode_ops;
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
+ if (sbflags & CIFS_MOUNT_DIRECT_IO) {
set_bit(NETFS_ICTX_UNBUFFERED, &ictx->flags);
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
+ if (sbflags & CIFS_MOUNT_NO_BRL)
inode->i_fop = &cifs_file_direct_nobrl_ops;
else
inode->i_fop = &cifs_file_direct_ops;
- } else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
+ } else if (sbflags & CIFS_MOUNT_STRICT_IO) {
+ if (sbflags & CIFS_MOUNT_NO_BRL)
inode->i_fop = &cifs_file_strict_nobrl_ops;
else
inode->i_fop = &cifs_file_strict_ops;
- } else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
+ } else if (sbflags & CIFS_MOUNT_NO_BRL)
inode->i_fop = &cifs_file_nobrl_ops;
else { /* not direct, send byte range locks */
inode->i_fop = &cifs_file_ops;
}
/* check if server can support readahead */
- if (cifs_sb_master_tcon(cifs_sb)->ses->server->max_read <
- PAGE_SIZE + MAX_CIFS_HDR_SIZE)
+ if (tcon->ses->server->max_read < PAGE_SIZE + MAX_CIFS_HDR_SIZE)
inode->i_data.a_ops = &cifs_addr_ops_smallbuf;
else
inode->i_data.a_ops = &cifs_addr_ops;
@@ -194,8 +195,8 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr,
inode->i_gid = fattr->cf_gid;
/* if dynperm is set, don't clobber existing mode */
- if (inode_state_read(inode) & I_NEW ||
- !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM))
+ if ((inode_state_read(inode) & I_NEW) ||
+ !(cifs_sb_flags(cifs_sb) & CIFS_MOUNT_DYNPERM))
inode->i_mode = fattr->cf_mode;
cifs_i->cifsAttrs = fattr->cf_cifsattrs;
@@ -218,13 +219,7 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr,
*/
if (is_size_safe_to_change(cifs_i, fattr->cf_eof, from_readdir)) {
i_size_write(inode, fattr->cf_eof);
-
- /*
- * i_blocks is not related to (i_size / i_blksize),
- * but instead 512 byte (2**9) size is required for
- * calculating num blocks.
- */
- inode->i_blocks = (512 - 1 + fattr->cf_bytes) >> 9;
+ inode->i_blocks = CIFS_INO_BLOCKS(fattr->cf_bytes);
}
if (S_ISLNK(fattr->cf_mode) && fattr->cf_symlink_target) {
@@ -248,10 +243,8 @@ cifs_fill_uniqueid(struct super_block *sb, struct cifs_fattr *fattr)
{
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
- return;
-
- fattr->cf_uniqueid = iunique(sb, ROOT_I);
+ if (!(cifs_sb_flags(cifs_sb) & CIFS_MOUNT_SERVER_INUM))
+ fattr->cf_uniqueid = iunique(sb, ROOT_I);
}
/* Fill a cifs_fattr struct with info from FILE_UNIX_BASIC_INFO. */
@@ -259,6 +252,8 @@ void
cifs_unix_basic_to_fattr(struct cifs_fattr *fattr, FILE_UNIX_BASIC_INFO *info,
struct cifs_sb_info *cifs_sb)
{
+ unsigned int sbflags;
+
memset(fattr, 0, sizeof(*fattr));
fattr->cf_uniqueid = le64_to_cpu(info->UniqueId);
fattr->cf_bytes = le64_to_cpu(info->NumOfBytes);
@@ -317,8 +312,9 @@ cifs_unix_basic_to_fattr(struct cifs_fattr *fattr, FILE_UNIX_BASIC_INFO *info,
break;
}
+ sbflags = cifs_sb_flags(cifs_sb);
fattr->cf_uid = cifs_sb->ctx->linux_uid;
- if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)) {
+ if (!(sbflags & CIFS_MOUNT_OVERR_UID)) {
u64 id = le64_to_cpu(info->Uid);
if (id < ((uid_t)-1)) {
kuid_t uid = make_kuid(&init_user_ns, id);
@@ -328,7 +324,7 @@ cifs_unix_basic_to_fattr(struct cifs_fattr *fattr, FILE_UNIX_BASIC_INFO *info,
}
fattr->cf_gid = cifs_sb->ctx->linux_gid;
- if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)) {
+ if (!(sbflags & CIFS_MOUNT_OVERR_GID)) {
u64 id = le64_to_cpu(info->Gid);
if (id < ((gid_t)-1)) {
kgid_t gid = make_kgid(&init_user_ns, id);
@@ -382,7 +378,7 @@ static int update_inode_info(struct super_block *sb,
*
* If file type or uniqueid is different, return error.
*/
- if (unlikely((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) &&
+ if (unlikely((cifs_sb_flags(cifs_sb) & CIFS_MOUNT_SERVER_INUM) &&
CIFS_I(*inode)->uniqueid != fattr->cf_uniqueid)) {
CIFS_I(*inode)->time = 0; /* force reval */
return -ESTALE;
@@ -468,7 +464,7 @@ static int cifs_get_unix_fattr(const unsigned char *full_path,
cifs_fill_uniqueid(sb, fattr);
/* check for Minshall+French symlinks */
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) {
+ if (cifs_sb_flags(cifs_sb) & CIFS_MOUNT_MF_SYMLINKS) {
tmprc = check_mf_symlink(xid, tcon, cifs_sb, fattr, full_path);
cifs_dbg(FYI, "check_mf_symlink: %d\n", tmprc);
}
@@ -1081,7 +1077,7 @@ cifs_backup_query_path_info(int xid,
else if ((tcon->ses->capabilities &
tcon->ses->server->vals->cap_nt_find) == 0)
info.info_level = SMB_FIND_FILE_INFO_STANDARD;
- else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
+ else if (cifs_sb_flags(cifs_sb) & CIFS_MOUNT_SERVER_INUM)
info.info_level = SMB_FIND_FILE_ID_FULL_DIR_INFO;
else /* no srvino useful for fallback to some netapp */
info.info_level = SMB_FIND_FILE_DIRECTORY_INFO;
@@ -1109,7 +1105,7 @@ static void cifs_set_fattr_ino(int xid, struct cifs_tcon *tcon, struct super_blo
struct TCP_Server_Info *server = tcon->ses->server;
int rc;
- if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)) {
+ if (!(cifs_sb_flags(cifs_sb) & CIFS_MOUNT_SERVER_INUM)) {
if (*inode)
fattr->cf_uniqueid = CIFS_I(*inode)->uniqueid;
else
@@ -1263,14 +1259,15 @@ static int cifs_get_fattr(struct cifs_open_info_data *data,
struct inode **inode,
const char *full_path)
{
+ struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
struct cifs_open_info_data tmp_data = {};
- struct cifs_tcon *tcon;
+ void *smb1_backup_rsp_buf = NULL;
struct TCP_Server_Info *server;
+ struct cifs_tcon *tcon;
struct tcon_link *tlink;
- struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
- void *smb1_backup_rsp_buf = NULL;
- int rc = 0;
+ unsigned int sbflags;
int tmprc = 0;
+ int rc = 0;
tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink))
@@ -1370,16 +1367,17 @@ static int cifs_get_fattr(struct cifs_open_info_data *data,
#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
handle_mnt_opt:
#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
+ sbflags = cifs_sb_flags(cifs_sb);
/* query for SFU type info if supported and needed */
if ((fattr->cf_cifsattrs & ATTR_SYSTEM) &&
- (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)) {
+ (sbflags & CIFS_MOUNT_UNX_EMUL)) {
tmprc = cifs_sfu_type(fattr, full_path, cifs_sb, xid);
if (tmprc)
cifs_dbg(FYI, "cifs_sfu_type failed: %d\n", tmprc);
}
/* fill in 0777 bits from ACL */
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID) {
+ if (sbflags & CIFS_MOUNT_MODE_FROM_SID) {
rc = cifs_acl_to_fattr(cifs_sb, fattr, *inode,
true, full_path, fid);
if (rc == -EREMOTE)
@@ -1389,7 +1387,7 @@ handle_mnt_opt:
__func__, rc);
goto out;
}
- } else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) {
+ } else if (sbflags & CIFS_MOUNT_CIFS_ACL) {
rc = cifs_acl_to_fattr(cifs_sb, fattr, *inode,
false, full_path, fid);
if (rc == -EREMOTE)
@@ -1399,7 +1397,7 @@ handle_mnt_opt:
__func__, rc);
goto out;
}
- } else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
+ } else if (sbflags & CIFS_MOUNT_UNX_EMUL)
/* fill in remaining high mode bits e.g. SUID, VTX */
cifs_sfu_mode(fattr, full_path, cifs_sb, xid);
else if (!(tcon->posix_extensions))
@@ -1409,7 +1407,7 @@ handle_mnt_opt:
/* check for Minshall+French symlinks */
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) {
+ if (sbflags & CIFS_MOUNT_MF_SYMLINKS) {
tmprc = check_mf_symlink(xid, tcon, cifs_sb, fattr, full_path);
cifs_dbg(FYI, "check_mf_symlink: %d\n", tmprc);
}
@@ -1509,7 +1507,7 @@ static int smb311_posix_get_fattr(struct cifs_open_info_data *data,
* 3. Tweak fattr based on mount options
*/
/* check for Minshall+French symlinks */
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) {
+ if (cifs_sb_flags(cifs_sb) & CIFS_MOUNT_MF_SYMLINKS) {
tmprc = check_mf_symlink(xid, tcon, cifs_sb, fattr, full_path);
cifs_dbg(FYI, "check_mf_symlink: %d\n", tmprc);
}
@@ -1660,7 +1658,7 @@ struct inode *cifs_root_iget(struct super_block *sb)
int len;
int rc;
- if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
+ if ((cifs_sb_flags(cifs_sb) & CIFS_MOUNT_USE_PREFIX_PATH)
&& cifs_sb->prepath) {
len = strlen(cifs_sb->prepath);
path = kzalloc(len + 2 /* leading sep + null */, GFP_KERNEL);
@@ -2098,8 +2096,9 @@ cifs_mkdir_qinfo(struct inode *parent, struct dentry *dentry, umode_t mode,
const char *full_path, struct cifs_sb_info *cifs_sb,
struct cifs_tcon *tcon, const unsigned int xid)
{
- int rc = 0;
struct inode *inode = NULL;
+ unsigned int sbflags;
+ int rc = 0;
if (tcon->posix_extensions) {
rc = smb311_posix_get_inode_info(&inode, full_path,
@@ -2139,6 +2138,7 @@ cifs_mkdir_qinfo(struct inode *parent, struct dentry *dentry, umode_t mode,
if (parent->i_mode & S_ISGID)
mode |= S_ISGID;
+ sbflags = cifs_sb_flags(cifs_sb);
#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
if (tcon->unix_ext) {
struct cifs_unix_set_info_args args = {
@@ -2148,7 +2148,7 @@ cifs_mkdir_qinfo(struct inode *parent, struct dentry *dentry, umode_t mode,
.mtime = NO_CHANGE_64,
.device = 0,
};
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) {
+ if (sbflags & CIFS_MOUNT_SET_UID) {
args.uid = current_fsuid();
if (parent->i_mode & S_ISGID)
args.gid = parent->i_gid;
@@ -2166,14 +2166,14 @@ cifs_mkdir_qinfo(struct inode *parent, struct dentry *dentry, umode_t mode,
{
#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
struct TCP_Server_Info *server = tcon->ses->server;
- if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) &&
+ if (!(sbflags & CIFS_MOUNT_CIFS_ACL) &&
(mode & S_IWUGO) == 0 && server->ops->mkdir_setinfo)
server->ops->mkdir_setinfo(inode, full_path, cifs_sb,
tcon, xid);
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
+ if (sbflags & CIFS_MOUNT_DYNPERM)
inode->i_mode = (mode | S_IFDIR);
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) {
+ if (sbflags & CIFS_MOUNT_SET_UID) {
inode->i_uid = current_fsuid();
if (inode->i_mode & S_ISGID)
inode->i_gid = parent->i_gid;
@@ -2686,7 +2686,7 @@ cifs_dentry_needs_reval(struct dentry *dentry)
{
struct inode *inode = d_inode(dentry);
struct cifsInodeInfo *cifs_i = CIFS_I(inode);
- struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+ struct cifs_sb_info *cifs_sb = CIFS_SB(inode);
struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
struct cached_fid *cfid = NULL;
@@ -2727,7 +2727,7 @@ cifs_dentry_needs_reval(struct dentry *dentry)
}
/* hardlinked files w/ noserverino get "special" treatment */
- if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) &&
+ if (!(cifs_sb_flags(cifs_sb) & CIFS_MOUNT_SERVER_INUM) &&
S_ISREG(inode->i_mode) && inode->i_nlink != 1)
return true;
@@ -2752,10 +2752,10 @@ cifs_wait_bit_killable(struct wait_bit_key *key, int mode)
int
cifs_revalidate_mapping(struct inode *inode)
{
- int rc;
struct cifsInodeInfo *cifs_inode = CIFS_I(inode);
+ struct cifs_sb_info *cifs_sb = CIFS_SB(inode);
unsigned long *flags = &cifs_inode->flags;
- struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+ int rc;
/* swapfiles are not supposed to be shared */
if (IS_SWAPFILE(inode))
@@ -2768,7 +2768,7 @@ cifs_revalidate_mapping(struct inode *inode)
if (test_and_clear_bit(CIFS_INO_INVALID_MAPPING, flags)) {
/* for cache=singleclient, do not invalidate */
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RW_CACHE)
+ if (cifs_sb_flags(cifs_sb) & CIFS_MOUNT_RW_CACHE)
goto skip_invalidate;
cifs_inode->netfs.zero_point = cifs_inode->netfs.remote_i_size;
@@ -2892,10 +2892,11 @@ int cifs_revalidate_dentry(struct dentry *dentry)
int cifs_getattr(struct mnt_idmap *idmap, const struct path *path,
struct kstat *stat, u32 request_mask, unsigned int flags)
{
- struct dentry *dentry = path->dentry;
- struct cifs_sb_info *cifs_sb = CIFS_SB(dentry->d_sb);
+ struct cifs_sb_info *cifs_sb = CIFS_SB(path->dentry);
struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
+ struct dentry *dentry = path->dentry;
struct inode *inode = d_inode(dentry);
+ unsigned int sbflags;
int rc;
if (unlikely(cifs_forced_shutdown(CIFS_SB(inode->i_sb))))
@@ -2952,12 +2953,13 @@ int cifs_getattr(struct mnt_idmap *idmap, const struct path *path,
* enabled, and the admin hasn't overridden them, set the ownership
* to the fsuid/fsgid of the current process.
*/
- if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER) &&
- !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) &&
+ sbflags = cifs_sb_flags(cifs_sb);
+ if ((sbflags & CIFS_MOUNT_MULTIUSER) &&
+ !(sbflags & CIFS_MOUNT_CIFS_ACL) &&
!tcon->unix_ext) {
- if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID))
+ if (!(sbflags & CIFS_MOUNT_OVERR_UID))
stat->uid = current_fsuid();
- if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID))
+ if (!(sbflags & CIFS_MOUNT_OVERR_GID))
stat->gid = current_fsgid();
}
return 0;
@@ -2989,7 +2991,7 @@ int cifs_fiemap(struct inode *inode, struct fiemap_extent_info *fei, u64 start,
}
}
- cfile = find_readable_file(cifs_i, false);
+ cfile = find_readable_file(cifs_i, FIND_ANY);
if (cfile == NULL)
return -EINVAL;
@@ -3007,6 +3009,11 @@ void cifs_setsize(struct inode *inode, loff_t offset)
{
spin_lock(&inode->i_lock);
i_size_write(inode, offset);
+ /*
+ * Until we can query the server for actual allocation size,
+ * this is best estimate we have for blocks allocated for a file.
+ */
+ inode->i_blocks = CIFS_INO_BLOCKS(offset);
spin_unlock(&inode->i_lock);
inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
truncate_pagecache(inode, offset);
@@ -3042,7 +3049,7 @@ int cifs_file_set_size(const unsigned int xid, struct dentry *dentry,
size, false);
cifs_dbg(FYI, "%s: set_file_size: rc = %d\n", __func__, rc);
} else {
- open_file = find_writable_file(cifsInode, FIND_WR_FSUID_ONLY);
+ open_file = find_writable_file(cifsInode, FIND_FSUID_ONLY);
if (open_file) {
tcon = tlink_tcon(open_file->tlink);
server = tcon->ses->server;
@@ -3079,14 +3086,6 @@ set_size_out:
if (rc == 0) {
netfs_resize_file(&cifsInode->netfs, size, true);
cifs_setsize(inode, size);
- /*
- * i_blocks is not related to (i_size / i_blksize), but instead
- * 512 byte (2**9) size is required for calculating num blocks.
- * Until we can query the server for actual allocation size,
- * this is best estimate we have for blocks allocated for a file
- * Number of blocks must be rounded up so size 1 is not 0 blocks
- */
- inode->i_blocks = (512 - 1 + size) >> 9;
}
return rc;
@@ -3102,7 +3101,7 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs)
void *page = alloc_dentry_path();
struct inode *inode = d_inode(direntry);
struct cifsInodeInfo *cifsInode = CIFS_I(inode);
- struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+ struct cifs_sb_info *cifs_sb = CIFS_SB(inode);
struct tcon_link *tlink;
struct cifs_tcon *pTcon;
struct cifs_unix_set_info_args *args = NULL;
@@ -3113,7 +3112,7 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs)
xid = get_xid();
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
+ if (cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NO_PERM)
attrs->ia_valid |= ATTR_FORCE;
rc = setattr_prepare(&nop_mnt_idmap, direntry, attrs);
@@ -3211,7 +3210,7 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs)
open_file->fid.netfid,
open_file->pid);
} else {
- open_file = find_writable_file(cifsInode, FIND_WR_FSUID_ONLY);
+ open_file = find_writable_file(cifsInode, FIND_FSUID_ONLY);
if (open_file) {
pTcon = tlink_tcon(open_file->tlink);
rc = CIFSSMBUnixSetFileInfo(xid, pTcon, args,
@@ -3266,26 +3265,26 @@ out:
static int
cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
{
- unsigned int xid;
- kuid_t uid = INVALID_UID;
- kgid_t gid = INVALID_GID;
struct inode *inode = d_inode(direntry);
- struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct cifsInodeInfo *cifsInode = CIFS_I(inode);
+ struct cifs_sb_info *cifs_sb = CIFS_SB(inode);
+ unsigned int sbflags = cifs_sb_flags(cifs_sb);
struct cifsFileInfo *cfile = NULL;
- const char *full_path;
void *page = alloc_dentry_path();
- int rc = -EACCES;
- __u32 dosattr = 0;
__u64 mode = NO_CHANGE_64;
- bool posix = cifs_sb_master_tcon(cifs_sb)->posix_extensions;
+ kuid_t uid = INVALID_UID;
+ kgid_t gid = INVALID_GID;
+ const char *full_path;
+ __u32 dosattr = 0;
+ int rc = -EACCES;
+ unsigned int xid;
xid = get_xid();
cifs_dbg(FYI, "setattr on file %pd attrs->ia_valid 0x%x\n",
direntry, attrs->ia_valid);
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
+ if (sbflags & CIFS_MOUNT_NO_PERM)
attrs->ia_valid |= ATTR_FORCE;
rc = setattr_prepare(&nop_mnt_idmap, direntry, attrs);
@@ -3346,8 +3345,7 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
if (attrs->ia_valid & ATTR_GID)
gid = attrs->ia_gid;
- if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) ||
- (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID)) {
+ if (sbflags & (CIFS_MOUNT_CIFS_ACL | CIFS_MOUNT_MODE_FROM_SID)) {
if (uid_valid(uid) || gid_valid(gid)) {
mode = NO_CHANGE_64;
rc = id_mode_to_cifs_acl(inode, full_path, &mode,
@@ -3358,9 +3356,9 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
goto cifs_setattr_exit;
}
}
- } else
- if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID))
+ } else if (!(sbflags & CIFS_MOUNT_SET_UID)) {
attrs->ia_valid &= ~(ATTR_UID | ATTR_GID);
+ }
/* skip mode change if it's just for clearing setuid/setgid */
if (attrs->ia_valid & (ATTR_KILL_SUID|ATTR_KILL_SGID))
@@ -3369,9 +3367,8 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
if (attrs->ia_valid & ATTR_MODE) {
mode = attrs->ia_mode;
rc = 0;
- if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) ||
- (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID) ||
- posix) {
+ if ((sbflags & (CIFS_MOUNT_CIFS_ACL | CIFS_MOUNT_MODE_FROM_SID)) ||
+ cifs_sb_master_tcon(cifs_sb)->posix_extensions) {
rc = id_mode_to_cifs_acl(inode, full_path, &mode,
INVALID_UID, INVALID_GID);
if (rc) {
@@ -3393,7 +3390,7 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
dosattr = cifsInode->cifsAttrs | ATTR_READONLY;
/* fix up mode if we're not using dynperm */
- if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM) == 0)
+ if ((sbflags & CIFS_MOUNT_DYNPERM) == 0)
attrs->ia_mode = inode->i_mode & ~S_IWUGO;
} else if ((mode & S_IWUGO) &&
(cifsInode->cifsAttrs & ATTR_READONLY)) {
@@ -3404,7 +3401,7 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
dosattr |= ATTR_NORMAL;
/* reset local inode permissions to normal */
- if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)) {
+ if (!(sbflags & CIFS_MOUNT_DYNPERM)) {
attrs->ia_mode &= ~(S_IALLUGO);
if (S_ISDIR(inode->i_mode))
attrs->ia_mode |=
@@ -3413,7 +3410,7 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
attrs->ia_mode |=
cifs_sb->ctx->file_mode;
}
- } else if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)) {
+ } else if (!(sbflags & CIFS_MOUNT_DYNPERM)) {
/* ignore mode change - ATTR_READONLY hasn't changed */
attrs->ia_valid &= ~ATTR_MODE;
}
diff --git a/fs/smb/client/ioctl.c b/fs/smb/client/ioctl.c
index 8dc2651e237f..9afab3237e54 100644
--- a/fs/smb/client/ioctl.c
+++ b/fs/smb/client/ioctl.c
@@ -216,7 +216,7 @@ static int cifs_shutdown(struct super_block *sb, unsigned long arg)
*/
case CIFS_GOING_FLAGS_LOGFLUSH:
case CIFS_GOING_FLAGS_NOLOGFLUSH:
- sbi->mnt_cifs_flags |= CIFS_MOUNT_SHUTDOWN;
+ atomic_or(CIFS_MOUNT_SHUTDOWN, &sbi->mnt_cifs_flags);
goto shutdown_good;
default:
rc = -EINVAL;
diff --git a/fs/smb/client/link.c b/fs/smb/client/link.c
index a2f7bfa8ad1e..434e8fe74080 100644
--- a/fs/smb/client/link.c
+++ b/fs/smb/client/link.c
@@ -544,14 +544,15 @@ int
cifs_symlink(struct mnt_idmap *idmap, struct inode *inode,
struct dentry *direntry, const char *symname)
{
- int rc = -EOPNOTSUPP;
- unsigned int xid;
- struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+ struct cifs_sb_info *cifs_sb = CIFS_SB(inode);
+ struct inode *newinode = NULL;
struct tcon_link *tlink;
struct cifs_tcon *pTcon;
const char *full_path;
+ int rc = -EOPNOTSUPP;
+ unsigned int sbflags;
+ unsigned int xid;
void *page;
- struct inode *newinode = NULL;
if (unlikely(cifs_forced_shutdown(cifs_sb)))
return smb_EIO(smb_eio_trace_forced_shutdown);
@@ -580,6 +581,7 @@ cifs_symlink(struct mnt_idmap *idmap, struct inode *inode,
cifs_dbg(FYI, "symname is %s\n", symname);
/* BB what if DFS and this volume is on different share? BB */
+ sbflags = cifs_sb_flags(cifs_sb);
rc = -EOPNOTSUPP;
switch (cifs_symlink_type(cifs_sb)) {
case CIFS_SYMLINK_TYPE_UNIX:
@@ -594,14 +596,14 @@ cifs_symlink(struct mnt_idmap *idmap, struct inode *inode,
break;
case CIFS_SYMLINK_TYPE_MFSYMLINKS:
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) {
+ if (sbflags & CIFS_MOUNT_MF_SYMLINKS) {
rc = create_mf_symlink(xid, pTcon, cifs_sb,
full_path, symname);
}
break;
case CIFS_SYMLINK_TYPE_SFU:
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) {
+ if (sbflags & CIFS_MOUNT_UNX_EMUL) {
rc = __cifs_sfu_make_node(xid, inode, direntry, pTcon,
full_path, S_IFLNK,
0, symname);
diff --git a/fs/smb/client/misc.c b/fs/smb/client/misc.c
index 22cde46309fe..2aff1cab6c31 100644
--- a/fs/smb/client/misc.c
+++ b/fs/smb/client/misc.c
@@ -28,6 +28,11 @@
#include "fs_context.h"
#include "cached_dir.h"
+struct tcon_list {
+ struct list_head entry;
+ struct cifs_tcon *tcon;
+};
+
/* The xid serves as a useful identifier for each incoming vfs request,
in a similar way to the mid which is useful to track each sent smb,
and CurrentXid can also provide a running counter (although it
@@ -275,13 +280,15 @@ dump_smb(void *buf, int smb_buf_length)
void
cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb)
{
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
+ unsigned int sbflags = cifs_sb_flags(cifs_sb);
+
+ if (sbflags & CIFS_MOUNT_SERVER_INUM) {
struct cifs_tcon *tcon = NULL;
if (cifs_sb->master_tlink)
tcon = cifs_sb_master_tcon(cifs_sb);
- cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM;
+ atomic_andnot(CIFS_MOUNT_SERVER_INUM, &cifs_sb->mnt_cifs_flags);
cifs_sb->mnt_cifs_serverino_autodisabled = true;
cifs_dbg(VFS, "Autodisabling the use of server inode numbers on %s\n",
tcon ? tcon->tree_name : "new server");
@@ -382,11 +389,13 @@ void cifs_done_oplock_break(struct cifsInodeInfo *cinode)
bool
backup_cred(struct cifs_sb_info *cifs_sb)
{
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID) {
+ unsigned int sbflags = cifs_sb_flags(cifs_sb);
+
+ if (sbflags & CIFS_MOUNT_CIFS_BACKUPUID) {
if (uid_eq(cifs_sb->ctx->backupuid, current_fsuid()))
return true;
}
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID) {
+ if (sbflags & CIFS_MOUNT_CIFS_BACKUPGID) {
if (in_group_p(cifs_sb->ctx->backupgid))
return true;
}
@@ -550,6 +559,43 @@ cifs_close_all_deferred_files(struct cifs_tcon *tcon)
}
}
+void cifs_close_all_deferred_files_sb(struct cifs_sb_info *cifs_sb)
+{
+ struct rb_root *root = &cifs_sb->tlink_tree;
+ struct rb_node *node;
+ struct cifs_tcon *tcon;
+ struct tcon_link *tlink;
+ struct tcon_list *tmp_list, *q;
+ LIST_HEAD(tcon_head);
+
+ spin_lock(&cifs_sb->tlink_tree_lock);
+ for (node = rb_first(root); node; node = rb_next(node)) {
+ tlink = rb_entry(node, struct tcon_link, tl_rbnode);
+ tcon = tlink_tcon(tlink);
+ if (IS_ERR(tcon))
+ continue;
+ tmp_list = kmalloc_obj(struct tcon_list, GFP_ATOMIC);
+ if (tmp_list == NULL)
+ break;
+ tmp_list->tcon = tcon;
+ /* Take a reference on tcon to prevent it from being freed */
+ spin_lock(&tcon->tc_lock);
+ ++tcon->tc_count;
+ trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
+ netfs_trace_tcon_ref_get_close_defer_files);
+ spin_unlock(&tcon->tc_lock);
+ list_add_tail(&tmp_list->entry, &tcon_head);
+ }
+ spin_unlock(&cifs_sb->tlink_tree_lock);
+
+ list_for_each_entry_safe(tmp_list, q, &tcon_head, entry) {
+ cifs_close_all_deferred_files(tmp_list->tcon);
+ list_del(&tmp_list->entry);
+ cifs_put_tcon(tmp_list->tcon, netfs_trace_tcon_ref_put_close_defer_files);
+ kfree(tmp_list);
+ }
+}
+
void cifs_close_deferred_file_under_dentry(struct cifs_tcon *tcon,
struct dentry *dentry)
{
@@ -955,7 +1001,7 @@ int cifs_update_super_prepath(struct cifs_sb_info *cifs_sb, char *prefix)
convert_delimiter(cifs_sb->prepath, CIFS_DIR_SEP(cifs_sb));
}
- cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
+ atomic_or(CIFS_MOUNT_USE_PREFIX_PATH, &cifs_sb->mnt_cifs_flags);
return 0;
}
@@ -984,7 +1030,7 @@ int cifs_inval_name_dfs_link_error(const unsigned int xid,
* look up or tcon is not DFS.
*/
if (strlen(full_path) < 2 || !cifs_sb ||
- (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS) ||
+ (cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NO_DFS) ||
!is_tcon_dfs(tcon))
return 0;
diff --git a/fs/smb/client/readdir.c b/fs/smb/client/readdir.c
index 8615a8747b7f..be22bbc4a65a 100644
--- a/fs/smb/client/readdir.c
+++ b/fs/smb/client/readdir.c
@@ -121,7 +121,7 @@ retry:
* want to clobber the existing one with the one that
* the readdir code created.
*/
- if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM))
+ if (!(cifs_sb_flags(cifs_sb) & CIFS_MOUNT_SERVER_INUM))
fattr->cf_uniqueid = CIFS_I(inode)->uniqueid;
/*
@@ -177,6 +177,7 @@ cifs_fill_common_info(struct cifs_fattr *fattr, struct cifs_sb_info *cifs_sb)
struct cifs_open_info_data data = {
.reparse = { .tag = fattr->cf_cifstag, },
};
+ unsigned int sbflags;
fattr->cf_uid = cifs_sb->ctx->linux_uid;
fattr->cf_gid = cifs_sb->ctx->linux_gid;
@@ -215,12 +216,12 @@ out_reparse:
* may look wrong since the inodes may not have timed out by the time
* "ls" does a stat() call on them.
*/
- if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) ||
- (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID))
+ sbflags = cifs_sb_flags(cifs_sb);
+ if (sbflags & (CIFS_MOUNT_CIFS_ACL | CIFS_MOUNT_MODE_FROM_SID))
fattr->cf_flags |= CIFS_FATTR_NEED_REVAL;
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL &&
- fattr->cf_cifsattrs & ATTR_SYSTEM) {
+ if ((sbflags & CIFS_MOUNT_UNX_EMUL) &&
+ (fattr->cf_cifsattrs & ATTR_SYSTEM)) {
if (fattr->cf_eof == 0) {
fattr->cf_mode &= ~S_IFMT;
fattr->cf_mode |= S_IFIFO;
@@ -345,13 +346,14 @@ static int
_initiate_cifs_search(const unsigned int xid, struct file *file,
const char *full_path)
{
- __u16 search_flags;
- int rc = 0;
- struct cifsFileInfo *cifsFile;
- struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
+ struct cifs_sb_info *cifs_sb = CIFS_SB(file);
struct tcon_link *tlink = NULL;
- struct cifs_tcon *tcon;
struct TCP_Server_Info *server;
+ struct cifsFileInfo *cifsFile;
+ struct cifs_tcon *tcon;
+ unsigned int sbflags;
+ __u16 search_flags;
+ int rc = 0;
if (file->private_data == NULL) {
tlink = cifs_sb_tlink(cifs_sb);
@@ -385,6 +387,7 @@ _initiate_cifs_search(const unsigned int xid, struct file *file,
cifs_dbg(FYI, "Full path: %s start at: %lld\n", full_path, file->f_pos);
ffirst_retry:
+ sbflags = cifs_sb_flags(cifs_sb);
/* test for Unix extensions */
/* but now check for them on the share/mount not on the SMB session */
/* if (cap_unix(tcon->ses) { */
@@ -395,7 +398,7 @@ ffirst_retry:
else if ((tcon->ses->capabilities &
tcon->ses->server->vals->cap_nt_find) == 0) {
cifsFile->srch_inf.info_level = SMB_FIND_FILE_INFO_STANDARD;
- } else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
+ } else if (sbflags & CIFS_MOUNT_SERVER_INUM) {
cifsFile->srch_inf.info_level = SMB_FIND_FILE_ID_FULL_DIR_INFO;
} else /* not srvinos - BB fixme add check for backlevel? */ {
cifsFile->srch_inf.info_level = SMB_FIND_FILE_FULL_DIRECTORY_INFO;
@@ -411,8 +414,7 @@ ffirst_retry:
if (rc == 0) {
cifsFile->invalidHandle = false;
- } else if ((rc == -EOPNOTSUPP) &&
- (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)) {
+ } else if (rc == -EOPNOTSUPP && (sbflags & CIFS_MOUNT_SERVER_INUM)) {
cifs_autodisable_serverino(cifs_sb);
goto ffirst_retry;
}
@@ -690,7 +692,7 @@ find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos,
loff_t first_entry_in_buffer;
loff_t index_to_find = pos;
struct cifsFileInfo *cfile = file->private_data;
- struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
+ struct cifs_sb_info *cifs_sb = CIFS_SB(file);
struct TCP_Server_Info *server = tcon->ses->server;
/* check if index in the buffer */
@@ -955,6 +957,7 @@ static int cifs_filldir(char *find_entry, struct file *file,
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
struct cifs_dirent de = { NULL, };
struct cifs_fattr fattr;
+ unsigned int sbflags;
struct qstr name;
int rc = 0;
@@ -1019,15 +1022,15 @@ static int cifs_filldir(char *find_entry, struct file *file,
break;
}
- if (de.ino && (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)) {
+ sbflags = cifs_sb_flags(cifs_sb);
+ if (de.ino && (sbflags & CIFS_MOUNT_SERVER_INUM)) {
fattr.cf_uniqueid = de.ino;
} else {
fattr.cf_uniqueid = iunique(sb, ROOT_I);
cifs_autodisable_serverino(cifs_sb);
}
- if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) &&
- couldbe_mf_symlink(&fattr))
+ if ((sbflags & CIFS_MOUNT_MF_SYMLINKS) && couldbe_mf_symlink(&fattr))
/*
* trying to get the type and mode can be slow,
* so just call those regular files for now, and mark
@@ -1058,7 +1061,7 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
const char *full_path;
void *page = alloc_dentry_path();
struct cached_fid *cfid = NULL;
- struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
+ struct cifs_sb_info *cifs_sb = CIFS_SB(file);
xid = get_xid();
diff --git a/fs/smb/client/reparse.c b/fs/smb/client/reparse.c
index ce9b923498b5..cd1e1eaee67a 100644
--- a/fs/smb/client/reparse.c
+++ b/fs/smb/client/reparse.c
@@ -55,17 +55,18 @@ static int create_native_symlink(const unsigned int xid, struct inode *inode,
const char *full_path, const char *symname)
{
struct reparse_symlink_data_buffer *buf = NULL;
- struct cifs_open_info_data data = {};
- struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+ struct cifs_sb_info *cifs_sb = CIFS_SB(inode);
const char *symroot = cifs_sb->ctx->symlinkroot;
+ struct cifs_open_info_data data = {};
+ char sep = CIFS_DIR_SEP(cifs_sb);
+ char *symlink_target = NULL;
+ u16 len, plen, poff, slen;
+ unsigned int sbflags;
+ __le16 *path = NULL;
struct inode *new;
+ char *sym = NULL;
struct kvec iov;
- __le16 *path = NULL;
bool directory;
- char *symlink_target = NULL;
- char *sym = NULL;
- char sep = CIFS_DIR_SEP(cifs_sb);
- u16 len, plen, poff, slen;
int rc = 0;
if (strlen(symname) > REPARSE_SYM_PATH_MAX)
@@ -83,8 +84,8 @@ static int create_native_symlink(const unsigned int xid, struct inode *inode,
.symlink_target = symlink_target,
};
- if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) &&
- symroot && symname[0] == '/') {
+ sbflags = cifs_sb_flags(cifs_sb);
+ if (!(sbflags & CIFS_MOUNT_POSIX_PATHS) && symroot && symname[0] == '/') {
/*
* This is a request to create an absolute symlink on the server
* which does not support POSIX paths, and expects symlink in
@@ -164,7 +165,7 @@ static int create_native_symlink(const unsigned int xid, struct inode *inode,
* mask these characters in NT object prefix by '_' and then change
* them back.
*/
- if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) && symname[0] == '/')
+ if (!(sbflags & CIFS_MOUNT_POSIX_PATHS) && symname[0] == '/')
sym[0] = sym[1] = sym[2] = sym[5] = '_';
path = cifs_convert_path_to_utf16(sym, cifs_sb);
@@ -173,7 +174,7 @@ static int create_native_symlink(const unsigned int xid, struct inode *inode,
goto out;
}
- if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) && symname[0] == '/') {
+ if (!(sbflags & CIFS_MOUNT_POSIX_PATHS) && symname[0] == '/') {
sym[0] = '\\';
sym[1] = sym[2] = '?';
sym[5] = ':';
@@ -197,7 +198,7 @@ static int create_native_symlink(const unsigned int xid, struct inode *inode,
slen = 2 * UniStrnlen((wchar_t *)path, REPARSE_SYM_PATH_MAX);
poff = 0;
plen = slen;
- if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) && symname[0] == '/') {
+ if (!(sbflags & CIFS_MOUNT_POSIX_PATHS) && symname[0] == '/') {
/*
* For absolute NT symlinks skip leading "\\??\\" in PrintName as
* PrintName is user visible location in DOS/Win32 format (not in NT format).
@@ -824,7 +825,7 @@ int smb2_parse_native_symlink(char **target, const char *buf, unsigned int len,
goto out;
}
- if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) &&
+ if (!(cifs_sb_flags(cifs_sb) & CIFS_MOUNT_POSIX_PATHS) &&
symroot && !relative) {
/*
* This is an absolute symlink from the server which does not
diff --git a/fs/smb/client/reparse.h b/fs/smb/client/reparse.h
index 570b0d25aeba..0164dc47bdfd 100644
--- a/fs/smb/client/reparse.h
+++ b/fs/smb/client/reparse.h
@@ -33,7 +33,7 @@ static inline kuid_t wsl_make_kuid(struct cifs_sb_info *cifs_sb,
{
u32 uid = le32_to_cpu(*(__le32 *)ptr);
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
+ if (cifs_sb_flags(cifs_sb) & CIFS_MOUNT_OVERR_UID)
return cifs_sb->ctx->linux_uid;
return make_kuid(current_user_ns(), uid);
}
@@ -43,7 +43,7 @@ static inline kgid_t wsl_make_kgid(struct cifs_sb_info *cifs_sb,
{
u32 gid = le32_to_cpu(*(__le32 *)ptr);
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
+ if (cifs_sb_flags(cifs_sb) & CIFS_MOUNT_OVERR_GID)
return cifs_sb->ctx->linux_gid;
return make_kgid(current_user_ns(), gid);
}
diff --git a/fs/smb/client/smb1encrypt.c b/fs/smb/client/smb1encrypt.c
index 0dbbce2431ff..bf10fdeeedca 100644
--- a/fs/smb/client/smb1encrypt.c
+++ b/fs/smb/client/smb1encrypt.c
@@ -11,6 +11,7 @@
#include <linux/fips.h>
#include <crypto/md5.h>
+#include <crypto/utils.h>
#include "cifsproto.h"
#include "smb1proto.h"
#include "cifs_debug.h"
@@ -131,7 +132,7 @@ int cifs_verify_signature(struct smb_rqst *rqst,
/* cifs_dump_mem("what we think it should be: ",
what_we_think_sig_should_be, 16); */
- if (memcmp(server_response_sig, what_we_think_sig_should_be, 8))
+ if (crypto_memneq(server_response_sig, what_we_think_sig_should_be, 8))
return -EACCES;
else
return 0;
diff --git a/fs/smb/client/smb1ops.c b/fs/smb/client/smb1ops.c
index aed49aaef8c4..9694117050a6 100644
--- a/fs/smb/client/smb1ops.c
+++ b/fs/smb/client/smb1ops.c
@@ -49,6 +49,7 @@ void reset_cifs_unix_caps(unsigned int xid, struct cifs_tcon *tcon,
if (!CIFSSMBQFSUnixInfo(xid, tcon)) {
__u64 cap = le64_to_cpu(tcon->fsUnixInfo.Capability);
+ unsigned int sbflags;
cifs_dbg(FYI, "unix caps which server supports %lld\n", cap);
/*
@@ -75,14 +76,16 @@ void reset_cifs_unix_caps(unsigned int xid, struct cifs_tcon *tcon,
if (cap & CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP)
cifs_dbg(VFS, "per-share encryption not supported yet\n");
+ if (cifs_sb)
+ sbflags = cifs_sb_flags(cifs_sb);
+
cap &= CIFS_UNIX_CAP_MASK;
if (ctx && ctx->no_psx_acl)
cap &= ~CIFS_UNIX_POSIX_ACL_CAP;
else if (CIFS_UNIX_POSIX_ACL_CAP & cap) {
cifs_dbg(FYI, "negotiated posix acl support\n");
if (cifs_sb)
- cifs_sb->mnt_cifs_flags |=
- CIFS_MOUNT_POSIXACL;
+ sbflags |= CIFS_MOUNT_POSIXACL;
}
if (ctx && ctx->posix_paths == 0)
@@ -90,10 +93,12 @@ void reset_cifs_unix_caps(unsigned int xid, struct cifs_tcon *tcon,
else if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) {
cifs_dbg(FYI, "negotiate posix pathnames\n");
if (cifs_sb)
- cifs_sb->mnt_cifs_flags |=
- CIFS_MOUNT_POSIX_PATHS;
+ sbflags |= CIFS_MOUNT_POSIX_PATHS;
}
+ if (cifs_sb)
+ atomic_set(&cifs_sb->mnt_cifs_flags, sbflags);
+
cifs_dbg(FYI, "Negotiate caps 0x%x\n", (int)cap);
#ifdef CONFIG_CIFS_DEBUG2
if (cap & CIFS_UNIX_FCNTL_CAP)
@@ -955,7 +960,7 @@ smb_set_file_info(struct inode *inode, const char *full_path,
struct cifs_tcon *tcon;
/* if the file is already open for write, just use that fileid */
- open_file = find_writable_file(cinode, FIND_WR_FSUID_ONLY);
+ open_file = find_writable_file(cinode, FIND_FSUID_ONLY);
if (open_file) {
fid.netfid = open_file->fid.netfid;
@@ -1147,7 +1152,7 @@ static int cifs_oplock_response(struct cifs_tcon *tcon, __u64 persistent_fid,
__u64 volatile_fid, __u16 net_fid,
struct cifsInodeInfo *cinode, unsigned int oplock)
{
- unsigned int sbflags = CIFS_SB(cinode->netfs.inode.i_sb)->mnt_cifs_flags;
+ unsigned int sbflags = cifs_sb_flags(CIFS_SB(cinode));
__u8 op;
op = !!((oplock & CIFS_CACHE_READ_FLG) || (sbflags & CIFS_MOUNT_RO_CACHE));
@@ -1282,7 +1287,8 @@ cifs_make_node(unsigned int xid, struct inode *inode,
struct dentry *dentry, struct cifs_tcon *tcon,
const char *full_path, umode_t mode, dev_t dev)
{
- struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+ struct cifs_sb_info *cifs_sb = CIFS_SB(inode);
+ unsigned int sbflags = cifs_sb_flags(cifs_sb);
struct inode *newinode = NULL;
int rc;
@@ -1298,7 +1304,7 @@ cifs_make_node(unsigned int xid, struct inode *inode,
.mtime = NO_CHANGE_64,
.device = dev,
};
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) {
+ if (sbflags & CIFS_MOUNT_SET_UID) {
args.uid = current_fsuid();
args.gid = current_fsgid();
} else {
@@ -1317,7 +1323,7 @@ cifs_make_node(unsigned int xid, struct inode *inode,
if (rc == 0)
d_instantiate(dentry, newinode);
return rc;
- } else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) {
+ } else if (sbflags & CIFS_MOUNT_UNX_EMUL) {
/*
* Check if mounted with mount parm 'sfu' mount parm.
* SFU emulation should work with all servers
diff --git a/fs/smb/client/smb1transport.c b/fs/smb/client/smb1transport.c
index 38d6d5538b96..53abb29fe71b 100644
--- a/fs/smb/client/smb1transport.c
+++ b/fs/smb/client/smb1transport.c
@@ -460,7 +460,7 @@ check_smb_hdr(struct smb_hdr *smb)
return 0;
/*
- * Windows NT server returns error resposne (e.g. STATUS_DELETE_PENDING
+ * Windows NT server returns error response (e.g. STATUS_DELETE_PENDING
* or STATUS_OBJECT_NAME_NOT_FOUND or ERRDOS/ERRbadfile or any other)
* for some TRANS2 requests without the RESPONSE flag set in header.
*/
diff --git a/fs/smb/client/smb2file.c b/fs/smb/client/smb2file.c
index 1ab41de2b634..ed651c946251 100644
--- a/fs/smb/client/smb2file.c
+++ b/fs/smb/client/smb2file.c
@@ -72,7 +72,7 @@ int smb2_fix_symlink_target_type(char **target, bool directory, struct cifs_sb_i
* POSIX server does not distinguish between symlinks to file and
* symlink directory. So nothing is needed to fix on the client side.
*/
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
+ if (cifs_sb_flags(cifs_sb) & CIFS_MOUNT_POSIX_PATHS)
return 0;
if (!*target)
diff --git a/fs/smb/client/smb2glob.h b/fs/smb/client/smb2glob.h
index e56e4d402f13..19da74b1edab 100644
--- a/fs/smb/client/smb2glob.h
+++ b/fs/smb/client/smb2glob.h
@@ -46,4 +46,16 @@ enum smb2_compound_ops {
#define END_OF_CHAIN 4
#define RELATED_REQUEST 8
+/*
+ *****************************************************************
+ * Struct definitions go here
+ *****************************************************************
+ */
+
+struct status_to_posix_error {
+ __u32 smb2_status;
+ int posix_error;
+ char *status_string;
+};
+
#endif /* _SMB2_GLOB_H */
diff --git a/fs/smb/client/smb2inode.c b/fs/smb/client/smb2inode.c
index 195a38fd61e8..364bdcff9c9d 100644
--- a/fs/smb/client/smb2inode.c
+++ b/fs/smb/client/smb2inode.c
@@ -325,7 +325,7 @@ replay_again:
cfile->fid.volatile_fid,
SMB_FIND_FILE_POSIX_INFO,
SMB2_O_INFO_FILE, 0,
- sizeof(struct smb311_posix_qinfo *) +
+ sizeof(struct smb311_posix_qinfo) +
(PATH_MAX * 2) +
(sizeof(struct smb_sid) * 2), 0, NULL);
} else {
@@ -335,7 +335,7 @@ replay_again:
COMPOUND_FID,
SMB_FIND_FILE_POSIX_INFO,
SMB2_O_INFO_FILE, 0,
- sizeof(struct smb311_posix_qinfo *) +
+ sizeof(struct smb311_posix_qinfo) +
(PATH_MAX * 2) +
(sizeof(struct smb_sid) * 2), 0, NULL);
}
@@ -1156,7 +1156,7 @@ smb2_mkdir_setinfo(struct inode *inode, const char *name,
cifs_i = CIFS_I(inode);
dosattrs = cifs_i->cifsAttrs | ATTR_READONLY;
data.Attributes = cpu_to_le32(dosattrs);
- cifs_get_writable_path(tcon, name, FIND_WR_ANY, &cfile);
+ cifs_get_writable_path(tcon, name, FIND_ANY, &cfile);
oparms = CIFS_OPARMS(cifs_sb, tcon, name, FILE_WRITE_ATTRIBUTES,
FILE_CREATE, CREATE_NOT_FILE, ACL_NO_MODE);
tmprc = smb2_compound_op(xid, tcon, cifs_sb, name,
@@ -1216,6 +1216,7 @@ again:
memset(resp_buftype, 0, sizeof(resp_buftype));
memset(rsp_iov, 0, sizeof(rsp_iov));
+ memset(open_iov, 0, sizeof(open_iov));
rqst[0].rq_iov = open_iov;
rqst[0].rq_nvec = ARRAY_SIZE(open_iov);
@@ -1240,14 +1241,15 @@ again:
creq = rqst[0].rq_iov[0].iov_base;
creq->ShareAccess = FILE_SHARE_DELETE_LE;
+ memset(&close_iov, 0, sizeof(close_iov));
rqst[1].rq_iov = &close_iov;
rqst[1].rq_nvec = 1;
rc = SMB2_close_init(tcon, server, &rqst[1],
COMPOUND_FID, COMPOUND_FID, false);
- smb2_set_related(&rqst[1]);
if (rc)
goto err_free;
+ smb2_set_related(&rqst[1]);
if (retries) {
/* Back-off before retry */
@@ -1334,14 +1336,13 @@ int smb2_rename_path(const unsigned int xid,
__u32 co = file_create_options(source_dentry);
drop_cached_dir_by_name(xid, tcon, from_name, cifs_sb);
- cifs_get_writable_path(tcon, from_name, FIND_WR_WITH_DELETE, &cfile);
+ cifs_get_writable_path(tcon, from_name, FIND_WITH_DELETE, &cfile);
int rc = smb2_set_path_attr(xid, tcon, from_name, to_name, cifs_sb,
co, DELETE, SMB2_OP_RENAME, cfile, source_dentry);
if (rc == -EINVAL) {
cifs_dbg(FYI, "invalid lease key, resending request without lease");
- cifs_get_writable_path(tcon, from_name,
- FIND_WR_WITH_DELETE, &cfile);
+ cifs_get_writable_path(tcon, from_name, FIND_WITH_DELETE, &cfile);
rc = smb2_set_path_attr(xid, tcon, from_name, to_name, cifs_sb,
co, DELETE, SMB2_OP_RENAME, cfile, NULL);
}
@@ -1375,7 +1376,7 @@ smb2_set_path_size(const unsigned int xid, struct cifs_tcon *tcon,
in_iov.iov_base = &eof;
in_iov.iov_len = sizeof(eof);
- cifs_get_writable_path(tcon, full_path, FIND_WR_ANY, &cfile);
+ cifs_get_writable_path(tcon, full_path, FIND_ANY, &cfile);
oparms = CIFS_OPARMS(cifs_sb, tcon, full_path, FILE_WRITE_DATA,
FILE_OPEN, 0, ACL_NO_MODE);
@@ -1385,7 +1386,7 @@ smb2_set_path_size(const unsigned int xid, struct cifs_tcon *tcon,
cfile, NULL, NULL, dentry);
if (rc == -EINVAL) {
cifs_dbg(FYI, "invalid lease key, resending request without lease");
- cifs_get_writable_path(tcon, full_path, FIND_WR_ANY, &cfile);
+ cifs_get_writable_path(tcon, full_path, FIND_ANY, &cfile);
rc = smb2_compound_op(xid, tcon, cifs_sb,
full_path, &oparms, &in_iov,
&(int){SMB2_OP_SET_EOF}, 1,
@@ -1415,7 +1416,7 @@ smb2_set_file_info(struct inode *inode, const char *full_path,
(buf->LastWriteTime == 0) && (buf->ChangeTime == 0)) {
if (buf->Attributes == 0)
goto out; /* would be a no op, no sense sending this */
- cifs_get_writable_path(tcon, full_path, FIND_WR_ANY, &cfile);
+ cifs_get_writable_path(tcon, full_path, FIND_ANY, &cfile);
}
oparms = CIFS_OPARMS(cifs_sb, tcon, full_path, FILE_WRITE_ATTRIBUTES,
@@ -1474,7 +1475,7 @@ struct inode *smb2_create_reparse_inode(struct cifs_open_info_data *data,
if (tcon->posix_extensions) {
cmds[1] = SMB2_OP_POSIX_QUERY_INFO;
- cifs_get_writable_path(tcon, full_path, FIND_WR_ANY, &cfile);
+ cifs_get_writable_path(tcon, full_path, FIND_ANY, &cfile);
rc = smb2_compound_op(xid, tcon, cifs_sb, full_path, &oparms,
in_iov, cmds, 2, cfile, out_iov, out_buftype, NULL);
if (!rc) {
@@ -1483,7 +1484,7 @@ struct inode *smb2_create_reparse_inode(struct cifs_open_info_data *data,
}
} else {
cmds[1] = SMB2_OP_QUERY_INFO;
- cifs_get_writable_path(tcon, full_path, FIND_WR_ANY, &cfile);
+ cifs_get_writable_path(tcon, full_path, FIND_ANY, &cfile);
rc = smb2_compound_op(xid, tcon, cifs_sb, full_path, &oparms,
in_iov, cmds, 2, cfile, out_iov, out_buftype, NULL);
if (!rc) {
@@ -1634,13 +1635,12 @@ int smb2_rename_pending_delete(const char *full_path,
iov[1].iov_base = utf16_path;
iov[1].iov_len = sizeof(*utf16_path) * UniStrlen((wchar_t *)utf16_path);
- cifs_get_writable_path(tcon, full_path, FIND_WR_WITH_DELETE, &cfile);
+ cifs_get_writable_path(tcon, full_path, FIND_WITH_DELETE, &cfile);
rc = smb2_compound_op(xid, tcon, cifs_sb, full_path, &oparms, iov,
cmds, num_cmds, cfile, NULL, NULL, dentry);
if (rc == -EINVAL) {
cifs_dbg(FYI, "invalid lease key, resending request without lease\n");
- cifs_get_writable_path(tcon, full_path,
- FIND_WR_WITH_DELETE, &cfile);
+ cifs_get_writable_path(tcon, full_path, FIND_WITH_DELETE, &cfile);
rc = smb2_compound_op(xid, tcon, cifs_sb, full_path, &oparms, iov,
cmds, num_cmds, cfile, NULL, NULL, NULL);
}
diff --git a/fs/smb/client/smb2maperror.c b/fs/smb/client/smb2maperror.c
index cd036365201f..2b8782c4f684 100644
--- a/fs/smb/client/smb2maperror.c
+++ b/fs/smb/client/smb2maperror.c
@@ -8,7 +8,6 @@
*
*/
#include <linux/errno.h>
-#include "cifsglob.h"
#include "cifsproto.h"
#include "cifs_debug.h"
#include "smb2proto.h"
@@ -16,12 +15,6 @@
#include "../common/smb2status.h"
#include "trace.h"
-struct status_to_posix_error {
- __u32 smb2_status;
- int posix_error;
- char *status_string;
-};
-
static const struct status_to_posix_error smb2_error_map_table[] = {
/*
* Automatically generated by the `gen_smb2_mapping` script,
@@ -115,10 +108,22 @@ int __init smb2_init_maperror(void)
return 0;
}
-#define SMB_CLIENT_KUNIT_AVAILABLE \
- ((IS_MODULE(CONFIG_CIFS) && IS_ENABLED(CONFIG_KUNIT)) || \
- (IS_BUILTIN(CONFIG_CIFS) && IS_BUILTIN(CONFIG_KUNIT)))
+#if IS_ENABLED(CONFIG_SMB_KUNIT_TESTS)
+#define EXPORT_SYMBOL_FOR_SMB_TEST(sym) \
+ EXPORT_SYMBOL_FOR_MODULES(sym, "smb2maperror_test")
+
+/* Previous prototype for eliminating the build warning. */
+const struct status_to_posix_error *smb2_get_err_map_test(__u32 smb2_status);
+
+const struct status_to_posix_error *smb2_get_err_map_test(__u32 smb2_status)
+{
+ return smb2_get_err_map(smb2_status);
+}
+EXPORT_SYMBOL_FOR_SMB_TEST(smb2_get_err_map_test);
+
+const struct status_to_posix_error *smb2_error_map_table_test = smb2_error_map_table;
+EXPORT_SYMBOL_FOR_SMB_TEST(smb2_error_map_table_test);
-#if SMB_CLIENT_KUNIT_AVAILABLE && IS_ENABLED(CONFIG_SMB_KUNIT_TESTS)
-#include "smb2maperror_test.c"
-#endif /* CONFIG_SMB_KUNIT_TESTS */
+unsigned int smb2_error_map_num = ARRAY_SIZE(smb2_error_map_table);
+EXPORT_SYMBOL_FOR_SMB_TEST(smb2_error_map_num);
+#endif
diff --git a/fs/smb/client/smb2maperror_test.c b/fs/smb/client/smb2maperror_test.c
index 38ea6b846a99..8c47dea7a2c1 100644
--- a/fs/smb/client/smb2maperror_test.c
+++ b/fs/smb/client/smb2maperror_test.c
@@ -9,13 +9,18 @@
*/
#include <kunit/test.h>
+#include "smb2glob.h"
+
+const struct status_to_posix_error *smb2_get_err_map_test(__u32 smb2_status);
+extern const struct status_to_posix_error *smb2_error_map_table_test;
+extern unsigned int smb2_error_map_num;
static void
test_cmp_map(struct kunit *test, const struct status_to_posix_error *expect)
{
const struct status_to_posix_error *result;
- result = smb2_get_err_map(expect->smb2_status);
+ result = smb2_get_err_map_test(expect->smb2_status);
KUNIT_EXPECT_PTR_NE(test, NULL, result);
KUNIT_EXPECT_EQ(test, expect->smb2_status, result->smb2_status);
KUNIT_EXPECT_EQ(test, expect->posix_error, result->posix_error);
@@ -26,8 +31,8 @@ static void maperror_test_check_search(struct kunit *test)
{
unsigned int i;
- for (i = 0; i < ARRAY_SIZE(smb2_error_map_table); i++)
- test_cmp_map(test, &smb2_error_map_table[i]);
+ for (i = 0; i < smb2_error_map_num; i++)
+ test_cmp_map(test, &smb2_error_map_table_test[i]);
}
static struct kunit_case maperror_test_cases[] = {
@@ -43,3 +48,4 @@ static struct kunit_suite maperror_suite = {
kunit_test_suite(maperror_suite);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("KUnit tests of SMB2 maperror");
diff --git a/fs/smb/client/smb2misc.c b/fs/smb/client/smb2misc.c
index e19674d9b92b..973fce3c959c 100644
--- a/fs/smb/client/smb2misc.c
+++ b/fs/smb/client/smb2misc.c
@@ -455,17 +455,8 @@ calc_size_exit:
__le16 *
cifs_convert_path_to_utf16(const char *from, struct cifs_sb_info *cifs_sb)
{
- int len;
const char *start_of_path;
- __le16 *to;
- int map_type;
-
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR)
- map_type = SFM_MAP_UNI_RSVD;
- else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
- map_type = SFU_MAP_UNI_RSVD;
- else
- map_type = NO_MAP_UNI_RSVD;
+ int len;
/* Windows doesn't allow paths beginning with \ */
if (from[0] == '\\')
@@ -479,14 +470,13 @@ cifs_convert_path_to_utf16(const char *from, struct cifs_sb_info *cifs_sb)
} else
start_of_path = from;
- to = cifs_strndup_to_utf16(start_of_path, PATH_MAX, &len,
- cifs_sb->local_nls, map_type);
- return to;
+ return cifs_strndup_to_utf16(start_of_path, PATH_MAX, &len,
+ cifs_sb->local_nls, cifs_remap(cifs_sb));
}
__le32 smb2_get_lease_state(struct cifsInodeInfo *cinode, unsigned int oplock)
{
- unsigned int sbflags = CIFS_SB(cinode->netfs.inode.i_sb)->mnt_cifs_flags;
+ unsigned int sbflags = cifs_sb_flags(CIFS_SB(cinode));
__le32 lease = 0;
if ((oplock & CIFS_CACHE_WRITE_FLG) || (sbflags & CIFS_MOUNT_RW_CACHE))
diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
index fea9a35caa57..509fcea28a42 100644
--- a/fs/smb/client/smb2ops.c
+++ b/fs/smb/client/smb2ops.c
@@ -628,6 +628,7 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
struct smb_sockaddr_in6 *p6;
struct cifs_server_iface *info = NULL, *iface = NULL, *niface = NULL;
struct cifs_server_iface tmp_iface;
+ __be16 port;
ssize_t bytes_left;
size_t next = 0;
int nb_iface = 0;
@@ -662,6 +663,15 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
goto out;
}
+ spin_lock(&ses->server->srv_lock);
+ if (ses->server->dstaddr.ss_family == AF_INET)
+ port = ((struct sockaddr_in *)&ses->server->dstaddr)->sin_port;
+ else if (ses->server->dstaddr.ss_family == AF_INET6)
+ port = ((struct sockaddr_in6 *)&ses->server->dstaddr)->sin6_port;
+ else
+ port = cpu_to_be16(CIFS_PORT);
+ spin_unlock(&ses->server->srv_lock);
+
while (bytes_left >= (ssize_t)sizeof(*p)) {
memset(&tmp_iface, 0, sizeof(tmp_iface));
/* default to 1Gbps when link speed is unset */
@@ -682,7 +692,7 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
memcpy(&addr4->sin_addr, &p4->IPv4Address, 4);
/* [MS-SMB2] 2.2.32.5.1.1 Clients MUST ignore these */
- addr4->sin_port = cpu_to_be16(CIFS_PORT);
+ addr4->sin_port = port;
cifs_dbg(FYI, "%s: ipv4 %pI4\n", __func__,
&addr4->sin_addr);
@@ -696,7 +706,7 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
/* [MS-SMB2] 2.2.32.5.1.2 Clients MUST ignore these */
addr6->sin6_flowinfo = 0;
addr6->sin6_scope_id = 0;
- addr6->sin6_port = cpu_to_be16(CIFS_PORT);
+ addr6->sin6_port = port;
cifs_dbg(FYI, "%s: ipv6 %pI6\n", __func__,
&addr6->sin6_addr);
@@ -986,7 +996,7 @@ smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
rc = -EREMOTE;
}
if (rc == -EREMOTE && IS_ENABLED(CONFIG_CIFS_DFS_UPCALL) &&
- (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS))
+ (cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NO_DFS))
rc = -EOPNOTSUPP;
goto out;
}
@@ -1487,6 +1497,7 @@ smb2_close_getattr(const unsigned int xid, struct cifs_tcon *tcon,
{
struct smb2_file_network_open_info file_inf;
struct inode *inode;
+ u64 asize;
int rc;
rc = __SMB2_close(xid, tcon, cfile->fid.persistent_fid,
@@ -1510,14 +1521,9 @@ smb2_close_getattr(const unsigned int xid, struct cifs_tcon *tcon,
inode_set_atime_to_ts(inode,
cifs_NTtimeToUnix(file_inf.LastAccessTime));
- /*
- * i_blocks is not related to (i_size / i_blksize),
- * but instead 512 byte (2**9) size is required for
- * calculating num blocks.
- */
- if (le64_to_cpu(file_inf.AllocationSize) > 4096)
- inode->i_blocks =
- (512 - 1 + le64_to_cpu(file_inf.AllocationSize)) >> 9;
+ asize = le64_to_cpu(file_inf.AllocationSize);
+ if (asize > 4096)
+ inode->i_blocks = CIFS_INO_BLOCKS(asize);
/* End of file and Attributes should not have to be updated on close */
spin_unlock(&inode->i_lock);
@@ -2194,14 +2200,6 @@ smb2_duplicate_extents(const unsigned int xid,
rc = smb2_set_file_size(xid, tcon, trgtfile, dest_off + len, false);
if (rc)
goto duplicate_extents_out;
-
- /*
- * Although also could set plausible allocation size (i_blocks)
- * here in addition to setting the file size, in reflink
- * it is likely that the target file is sparse. Its allocation
- * size will be queried on next revalidate, but it is important
- * to make sure that file's cached size is updated immediately
- */
netfs_resize_file(netfs_inode(inode), dest_off + len, true);
cifs_setsize(inode, dest_off + len);
}
@@ -2691,7 +2689,7 @@ static int smb2_oplock_response(struct cifs_tcon *tcon, __u64 persistent_fid,
__u64 volatile_fid, __u16 net_fid,
struct cifsInodeInfo *cinode, unsigned int oplock)
{
- unsigned int sbflags = CIFS_SB(cinode->netfs.inode.i_sb)->mnt_cifs_flags;
+ unsigned int sbflags = cifs_sb_flags(CIFS_SB(cinode));
__u8 op;
if (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_LEASING)
@@ -3352,7 +3350,7 @@ get_smb2_acl(struct cifs_sb_info *cifs_sb,
struct cifsFileInfo *open_file = NULL;
if (inode && !(info & SACL_SECINFO))
- open_file = find_readable_file(CIFS_I(inode), true);
+ open_file = find_readable_file(CIFS_I(inode), FIND_FSUID_ONLY);
if (!open_file || (info & SACL_SECINFO))
return get_smb2_acl_by_path(cifs_sb, path, pacllen, info);
@@ -3898,7 +3896,7 @@ static loff_t smb3_llseek(struct file *file, struct cifs_tcon *tcon, loff_t offs
* some servers (Windows2016) will not reflect recent writes in
* QUERY_ALLOCATED_RANGES until SMB2_flush is called.
*/
- wrcfile = find_writable_file(cifsi, FIND_WR_ANY);
+ wrcfile = find_writable_file(cifsi, FIND_ANY);
if (wrcfile) {
filemap_write_and_wait(inode->i_mapping);
smb2_flush_file(xid, tcon, &wrcfile->fid);
@@ -5332,7 +5330,7 @@ static int smb2_make_node(unsigned int xid, struct inode *inode,
struct dentry *dentry, struct cifs_tcon *tcon,
const char *full_path, umode_t mode, dev_t dev)
{
- struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+ unsigned int sbflags = cifs_sb_flags(CIFS_SB(inode));
int rc = -EOPNOTSUPP;
/*
@@ -5341,7 +5339,7 @@ static int smb2_make_node(unsigned int xid, struct inode *inode,
* supports block and char device, socket & fifo,
* and was used by default in earlier versions of Windows
*/
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) {
+ if (sbflags & CIFS_MOUNT_UNX_EMUL) {
rc = cifs_sfu_make_node(xid, inode, dentry, tcon,
full_path, mode, dev);
} else if (CIFS_REPARSE_SUPPORT(tcon)) {
diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
index ef655acf673d..5188218c25be 100644
--- a/fs/smb/client/smb2pdu.c
+++ b/fs/smb/client/smb2pdu.c
@@ -1714,19 +1714,17 @@ SMB2_auth_kerberos(struct SMB2_sess_data *sess_data)
is_binding = (ses->ses_status == SES_GOOD);
spin_unlock(&ses->ses_lock);
- /* keep session key if binding */
- if (!is_binding) {
- kfree_sensitive(ses->auth_key.response);
- ses->auth_key.response = kmemdup(msg->data, msg->sesskey_len,
- GFP_KERNEL);
- if (!ses->auth_key.response) {
- cifs_dbg(VFS, "Kerberos can't allocate (%u bytes) memory\n",
- msg->sesskey_len);
- rc = -ENOMEM;
- goto out_put_spnego_key;
- }
- ses->auth_key.len = msg->sesskey_len;
+ kfree_sensitive(ses->auth_key.response);
+ ses->auth_key.response = kmemdup(msg->data,
+ msg->sesskey_len,
+ GFP_KERNEL);
+ if (!ses->auth_key.response) {
+ cifs_dbg(VFS, "%s: can't allocate (%u bytes) memory\n",
+ __func__, msg->sesskey_len);
+ rc = -ENOMEM;
+ goto out_put_spnego_key;
}
+ ses->auth_key.len = msg->sesskey_len;
sess_data->iov[1].iov_base = msg->data + msg->sesskey_len;
sess_data->iov[1].iov_len = msg->secblob_len;
@@ -3182,22 +3180,19 @@ SMB2_open_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
}
if ((oparms->disposition != FILE_OPEN) && (oparms->cifs_sb)) {
+ unsigned int sbflags = cifs_sb_flags(oparms->cifs_sb);
bool set_mode;
bool set_owner;
- if ((oparms->cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID) &&
- (oparms->mode != ACL_NO_MODE))
+ if ((sbflags & CIFS_MOUNT_MODE_FROM_SID) &&
+ oparms->mode != ACL_NO_MODE) {
set_mode = true;
- else {
+ } else {
set_mode = false;
oparms->mode = ACL_NO_MODE;
}
- if (oparms->cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL)
- set_owner = true;
- else
- set_owner = false;
-
+ set_owner = sbflags & CIFS_MOUNT_UID_FROM_ACL;
if (set_owner | set_mode) {
cifs_dbg(FYI, "add sd with mode 0x%x\n", oparms->mode);
rc = add_sd_context(iov, &n_iov, oparms->mode, set_owner);
@@ -3994,24 +3989,6 @@ int SMB2_query_info(const unsigned int xid, struct cifs_tcon *tcon,
NULL);
}
-#if 0
-/* currently unused, as now we are doing compounding instead (see smb311_posix_query_path_info) */
-int
-SMB311_posix_query_info(const unsigned int xid, struct cifs_tcon *tcon,
- u64 persistent_fid, u64 volatile_fid,
- struct smb311_posix_qinfo *data, u32 *plen)
-{
- size_t output_len = sizeof(struct smb311_posix_qinfo *) +
- (sizeof(struct smb_sid) * 2) + (PATH_MAX * 2);
- *plen = 0;
-
- return query_info(xid, tcon, persistent_fid, volatile_fid,
- SMB_FIND_FILE_POSIX_INFO, SMB2_O_INFO_FILE, 0,
- output_len, sizeof(struct smb311_posix_qinfo), (void **)&data, plen);
- /* Note caller must free "data" (passed in above). It may be allocated in query_info call */
-}
-#endif
-
int
SMB2_query_acl(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid,
@@ -5330,7 +5307,10 @@ replay_again:
memset(&rqst, 0, sizeof(struct smb_rqst));
rqst.rq_iov = iov;
- rqst.rq_nvec = n_vec + 1;
+ /* iov[0] is the SMB header; move payload to rq_iter for encryption safety */
+ rqst.rq_nvec = 1;
+ iov_iter_kvec(&rqst.rq_iter, ITER_SOURCE, &iov[1], n_vec,
+ io_parms->length);
if (retries) {
/* Back-off before retry */
diff --git a/fs/smb/client/smb2pdu.h b/fs/smb/client/smb2pdu.h
index 78bb99f29d38..30d70097fe2f 100644
--- a/fs/smb/client/smb2pdu.h
+++ b/fs/smb/client/smb2pdu.h
@@ -224,7 +224,7 @@ struct smb2_file_reparse_point_info {
__le32 Tag;
} __packed;
-/* See MS-FSCC 2.4.21 */
+/* See MS-FSCC 2.4.26 */
struct smb2_file_id_information {
__le64 VolumeSerialNumber;
__u64 PersistentFileId; /* opaque endianness */
@@ -251,7 +251,10 @@ struct smb2_file_id_extd_directory_info {
extern char smb2_padding[7];
-/* equivalent of the contents of SMB3.1.1 POSIX open context response */
+/*
+ * See POSIX-SMB2 2.2.14.2.16
+ * Link: https://gitlab.com/samba-team/smb3-posix-spec/-/blob/master/smb3_posix_extensions.md
+ */
struct create_posix_rsp {
u32 nlink;
u32 reparse_tag;
diff --git a/fs/smb/client/smb2proto.h b/fs/smb/client/smb2proto.h
index 881e42cf66ce..230bb1e9f4e1 100644
--- a/fs/smb/client/smb2proto.h
+++ b/fs/smb/client/smb2proto.h
@@ -167,9 +167,6 @@ int SMB2_flush_init(const unsigned int xid, struct smb_rqst *rqst,
struct cifs_tcon *tcon, struct TCP_Server_Info *server,
u64 persistent_fid, u64 volatile_fid);
void SMB2_flush_free(struct smb_rqst *rqst);
-int SMB311_posix_query_info(const unsigned int xid, struct cifs_tcon *tcon,
- u64 persistent_fid, u64 volatile_fid,
- struct smb311_posix_qinfo *data, u32 *plen);
int SMB2_query_info(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid,
struct smb2_file_all_info *data);
diff --git a/fs/smb/client/smb2transport.c b/fs/smb/client/smb2transport.c
index 8b9000a83181..81be2b226e26 100644
--- a/fs/smb/client/smb2transport.c
+++ b/fs/smb/client/smb2transport.c
@@ -20,6 +20,7 @@
#include <linux/highmem.h>
#include <crypto/aead.h>
#include <crypto/sha2.h>
+#include <crypto/utils.h>
#include "cifsglob.h"
#include "cifsproto.h"
#include "smb2proto.h"
@@ -617,7 +618,8 @@ smb2_verify_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
if (rc)
return rc;
- if (memcmp(server_response_sig, shdr->Signature, SMB2_SIGNATURE_SIZE)) {
+ if (crypto_memneq(server_response_sig, shdr->Signature,
+ SMB2_SIGNATURE_SIZE)) {
cifs_dbg(VFS, "sign fail cmd 0x%x message id 0x%llx\n",
shdr->Command, shdr->MessageId);
return -EACCES;
diff --git a/fs/smb/client/trace.h b/fs/smb/client/trace.h
index 9228f95cae2b..acfbb63086ea 100644
--- a/fs/smb/client/trace.h
+++ b/fs/smb/client/trace.h
@@ -176,6 +176,7 @@
EM(netfs_trace_tcon_ref_get_cached_laundromat, "GET Ch-Lau") \
EM(netfs_trace_tcon_ref_get_cached_lease_break, "GET Ch-Lea") \
EM(netfs_trace_tcon_ref_get_cancelled_close, "GET Cn-Cls") \
+ EM(netfs_trace_tcon_ref_get_close_defer_files, "GET Cl-Def") \
EM(netfs_trace_tcon_ref_get_dfs_refer, "GET DfsRef") \
EM(netfs_trace_tcon_ref_get_find, "GET Find ") \
EM(netfs_trace_tcon_ref_get_find_sess_tcon, "GET FndSes") \
@@ -187,6 +188,7 @@
EM(netfs_trace_tcon_ref_put_cancelled_close, "PUT Cn-Cls") \
EM(netfs_trace_tcon_ref_put_cancelled_close_fid, "PUT Cn-Fid") \
EM(netfs_trace_tcon_ref_put_cancelled_mid, "PUT Cn-Mid") \
+ EM(netfs_trace_tcon_ref_put_close_defer_files, "PUT Cl-Def") \
EM(netfs_trace_tcon_ref_put_mnt_ctx, "PUT MntCtx") \
EM(netfs_trace_tcon_ref_put_dfs_refer, "PUT DfsRfr") \
EM(netfs_trace_tcon_ref_put_reconnect_server, "PUT Reconn") \
diff --git a/fs/smb/client/transport.c b/fs/smb/client/transport.c
index 75697f6d2566..05f8099047e1 100644
--- a/fs/smb/client/transport.c
+++ b/fs/smb/client/transport.c
@@ -807,16 +807,21 @@ cifs_cancelled_callback(struct TCP_Server_Info *server, struct mid_q_entry *mid)
}
/*
- * Return a channel (master if none) of @ses that can be used to send
- * regular requests.
+ * cifs_pick_channel - pick an eligible channel for network operations
*
- * If we are currently binding a new channel (negprot/sess.setup),
- * return the new incomplete channel.
+ * @ses: session reference
+ *
+ * Select an eligible channel (not terminating and not marked as needing
+ * reconnect), preferring the least loaded one. If no eligible channel is
+ * found, fall back to the primary channel (index 0).
+ *
+ * Return: TCP_Server_Info pointer for the chosen channel, or NULL if @ses is
+ * NULL.
*/
struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
{
uint index = 0;
- unsigned int min_in_flight = UINT_MAX, max_in_flight = 0;
+ unsigned int min_in_flight = UINT_MAX;
struct TCP_Server_Info *server = NULL;
int i, start, cur;
@@ -846,14 +851,8 @@ struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
min_in_flight = server->in_flight;
index = cur;
}
- if (server->in_flight > max_in_flight)
- max_in_flight = server->in_flight;
}
- /* if all channels are equally loaded, fall back to round-robin */
- if (min_in_flight == max_in_flight)
- index = (uint)start % ses->chan_count;
-
server = ses->chans[index].server;
spin_unlock(&ses->chan_lock);
diff --git a/fs/smb/client/xattr.c b/fs/smb/client/xattr.c
index e1a7d9a10a53..23227f2f9428 100644
--- a/fs/smb/client/xattr.c
+++ b/fs/smb/client/xattr.c
@@ -149,7 +149,7 @@ static int cifs_xattr_set(const struct xattr_handler *handler,
break;
}
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
+ if (cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NO_XATTR)
goto out;
if (pTcon->ses->server->ops->set_EA) {
@@ -309,7 +309,7 @@ static int cifs_xattr_get(const struct xattr_handler *handler,
break;
}
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
+ if (cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NO_XATTR)
goto out;
if (pTcon->ses->server->ops->query_all_EAs)
@@ -398,7 +398,7 @@ ssize_t cifs_listxattr(struct dentry *direntry, char *data, size_t buf_size)
if (unlikely(cifs_forced_shutdown(cifs_sb)))
return smb_EIO(smb_eio_trace_forced_shutdown);
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
+ if (cifs_sb_flags(cifs_sb) & CIFS_MOUNT_NO_XATTR)
return -EOPNOTSUPP;
tlink = cifs_sb_tlink(cifs_sb);
diff --git a/fs/smb/server/Kconfig b/fs/smb/server/Kconfig
index 2775162c535c..12594879cb64 100644
--- a/fs/smb/server/Kconfig
+++ b/fs/smb/server/Kconfig
@@ -13,6 +13,7 @@ config SMB_SERVER
select CRYPTO_LIB_MD5
select CRYPTO_LIB_SHA256
select CRYPTO_LIB_SHA512
+ select CRYPTO_LIB_UTILS
select CRYPTO_CMAC
select CRYPTO_AEAD2
select CRYPTO_CCM
diff --git a/fs/smb/server/auth.c b/fs/smb/server/auth.c
index 580c4d303dc3..af5f40304331 100644
--- a/fs/smb/server/auth.c
+++ b/fs/smb/server/auth.c
@@ -15,6 +15,7 @@
#include <crypto/aead.h>
#include <crypto/md5.h>
#include <crypto/sha2.h>
+#include <crypto/utils.h>
#include <linux/random.h>
#include <linux/scatterlist.h>
@@ -165,7 +166,8 @@ int ksmbd_auth_ntlmv2(struct ksmbd_conn *conn, struct ksmbd_session *sess,
ntlmv2_rsp, CIFS_HMAC_MD5_HASH_SIZE,
sess->sess_key);
- if (memcmp(ntlmv2->ntlmv2_hash, ntlmv2_rsp, CIFS_HMAC_MD5_HASH_SIZE) != 0)
+ if (crypto_memneq(ntlmv2->ntlmv2_hash, ntlmv2_rsp,
+ CIFS_HMAC_MD5_HASH_SIZE))
return -EINVAL;
return 0;
}
@@ -587,12 +589,8 @@ static int generate_smb3signingkey(struct ksmbd_session *sess,
if (!(conn->dialect >= SMB30_PROT_ID && signing->binding))
memcpy(chann->smb3signingkey, key, SMB3_SIGN_KEY_SIZE);
- ksmbd_debug(AUTH, "dumping generated AES signing keys\n");
+ ksmbd_debug(AUTH, "generated SMB3 signing key\n");
ksmbd_debug(AUTH, "Session Id %llu\n", sess->id);
- ksmbd_debug(AUTH, "Session Key %*ph\n",
- SMB2_NTLMV2_SESSKEY_SIZE, sess->sess_key);
- ksmbd_debug(AUTH, "Signing Key %*ph\n",
- SMB3_SIGN_KEY_SIZE, key);
return 0;
}
@@ -650,23 +648,9 @@ static void generate_smb3encryptionkey(struct ksmbd_conn *conn,
ptwin->decryption.context,
sess->smb3decryptionkey, SMB3_ENC_DEC_KEY_SIZE);
- ksmbd_debug(AUTH, "dumping generated AES encryption keys\n");
+ ksmbd_debug(AUTH, "generated SMB3 encryption/decryption keys\n");
ksmbd_debug(AUTH, "Cipher type %d\n", conn->cipher_type);
ksmbd_debug(AUTH, "Session Id %llu\n", sess->id);
- ksmbd_debug(AUTH, "Session Key %*ph\n",
- SMB2_NTLMV2_SESSKEY_SIZE, sess->sess_key);
- if (conn->cipher_type == SMB2_ENCRYPTION_AES256_CCM ||
- conn->cipher_type == SMB2_ENCRYPTION_AES256_GCM) {
- ksmbd_debug(AUTH, "ServerIn Key %*ph\n",
- SMB3_GCM256_CRYPTKEY_SIZE, sess->smb3encryptionkey);
- ksmbd_debug(AUTH, "ServerOut Key %*ph\n",
- SMB3_GCM256_CRYPTKEY_SIZE, sess->smb3decryptionkey);
- } else {
- ksmbd_debug(AUTH, "ServerIn Key %*ph\n",
- SMB3_GCM128_CRYPTKEY_SIZE, sess->smb3encryptionkey);
- ksmbd_debug(AUTH, "ServerOut Key %*ph\n",
- SMB3_GCM128_CRYPTKEY_SIZE, sess->smb3decryptionkey);
- }
}
void ksmbd_gen_smb30_encryptionkey(struct ksmbd_conn *conn,
diff --git a/fs/smb/server/mgmt/tree_connect.c b/fs/smb/server/mgmt/tree_connect.c
index a72d7e42a6c2..58e5b8592da4 100644
--- a/fs/smb/server/mgmt/tree_connect.c
+++ b/fs/smb/server/mgmt/tree_connect.c
@@ -102,8 +102,10 @@ out_error:
void ksmbd_tree_connect_put(struct ksmbd_tree_connect *tcon)
{
- if (atomic_dec_and_test(&tcon->refcount))
+ if (atomic_dec_and_test(&tcon->refcount)) {
+ ksmbd_share_config_put(tcon->share_conf);
kfree(tcon);
+ }
}
static int __ksmbd_tree_conn_disconnect(struct ksmbd_session *sess,
@@ -113,10 +115,11 @@ static int __ksmbd_tree_conn_disconnect(struct ksmbd_session *sess,
ret = ksmbd_ipc_tree_disconnect_request(sess->id, tree_conn->id);
ksmbd_release_tree_conn_id(sess, tree_conn->id);
- ksmbd_share_config_put(tree_conn->share_conf);
ksmbd_counter_dec(KSMBD_COUNTER_TREE_CONNS);
- if (atomic_dec_and_test(&tree_conn->refcount))
+ if (atomic_dec_and_test(&tree_conn->refcount)) {
+ ksmbd_share_config_put(tree_conn->share_conf);
kfree(tree_conn);
+ }
return ret;
}
diff --git a/fs/smb/server/oplock.c b/fs/smb/server/oplock.c
index 09d9878db9cb..393a4ae47cc1 100644
--- a/fs/smb/server/oplock.c
+++ b/fs/smb/server/oplock.c
@@ -120,7 +120,7 @@ static void free_lease(struct oplock_info *opinfo)
kfree(lease);
}
-static void free_opinfo(struct oplock_info *opinfo)
+static void __free_opinfo(struct oplock_info *opinfo)
{
if (opinfo->is_lease)
free_lease(opinfo);
@@ -129,6 +129,18 @@ static void free_opinfo(struct oplock_info *opinfo)
kfree(opinfo);
}
+static void free_opinfo_rcu(struct rcu_head *rcu)
+{
+ struct oplock_info *opinfo = container_of(rcu, struct oplock_info, rcu);
+
+ __free_opinfo(opinfo);
+}
+
+static void free_opinfo(struct oplock_info *opinfo)
+{
+ call_rcu(&opinfo->rcu, free_opinfo_rcu);
+}
+
struct oplock_info *opinfo_get(struct ksmbd_file *fp)
{
struct oplock_info *opinfo;
@@ -176,9 +188,9 @@ void opinfo_put(struct oplock_info *opinfo)
free_opinfo(opinfo);
}
-static void opinfo_add(struct oplock_info *opinfo)
+static void opinfo_add(struct oplock_info *opinfo, struct ksmbd_file *fp)
{
- struct ksmbd_inode *ci = opinfo->o_fp->f_ci;
+ struct ksmbd_inode *ci = fp->f_ci;
down_write(&ci->m_lock);
list_add(&opinfo->op_entry, &ci->m_op_list);
@@ -1123,10 +1135,12 @@ void smb_lazy_parent_lease_break_close(struct ksmbd_file *fp)
rcu_read_lock();
opinfo = rcu_dereference(fp->f_opinfo);
- rcu_read_unlock();
- if (!opinfo || !opinfo->is_lease || opinfo->o_lease->version != 2)
+ if (!opinfo || !opinfo->is_lease || opinfo->o_lease->version != 2) {
+ rcu_read_unlock();
return;
+ }
+ rcu_read_unlock();
p_ci = ksmbd_inode_lookup_lock(fp->filp->f_path.dentry->d_parent);
if (!p_ci)
@@ -1277,20 +1291,21 @@ set_lev:
set_oplock_level(opinfo, req_op_level, lctx);
out:
- rcu_assign_pointer(fp->f_opinfo, opinfo);
- opinfo->o_fp = fp;
-
opinfo_count_inc(fp);
- opinfo_add(opinfo);
+ opinfo_add(opinfo, fp);
+
if (opinfo->is_lease) {
err = add_lease_global_list(opinfo);
if (err)
goto err_out;
}
+ rcu_assign_pointer(fp->f_opinfo, opinfo);
+ opinfo->o_fp = fp;
+
return 0;
err_out:
- free_opinfo(opinfo);
+ __free_opinfo(opinfo);
return err;
}
diff --git a/fs/smb/server/oplock.h b/fs/smb/server/oplock.h
index 9a56eaadd0dd..921e3199e4df 100644
--- a/fs/smb/server/oplock.h
+++ b/fs/smb/server/oplock.h
@@ -69,8 +69,9 @@ struct oplock_info {
struct lease *o_lease;
struct list_head op_entry;
struct list_head lease_entry;
- wait_queue_head_t oplock_q; /* Other server threads */
- wait_queue_head_t oplock_brk; /* oplock breaking wait */
+ wait_queue_head_t oplock_q; /* Other server threads */
+ wait_queue_head_t oplock_brk; /* oplock breaking wait */
+ struct rcu_head rcu;
};
struct lease_break_info {
diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
index 95901a78951c..9c44e71e3c3b 100644
--- a/fs/smb/server/smb2pdu.c
+++ b/fs/smb/server/smb2pdu.c
@@ -4,6 +4,7 @@
* Copyright (C) 2018 Samsung Electronics Co., Ltd.
*/
+#include <crypto/utils.h>
#include <linux/inetdevice.h>
#include <net/addrconf.h>
#include <linux/syscalls.h>
@@ -125,6 +126,8 @@ int smb2_get_ksmbd_tcon(struct ksmbd_work *work)
pr_err("The first operation in the compound does not have tcon\n");
return -EINVAL;
}
+ if (work->tcon->t_state != TREE_CONNECTED)
+ return -ENOENT;
if (tree_id != UINT_MAX && work->tcon->id != tree_id) {
pr_err("tree id(%u) is different with id(%u) in first operation\n",
tree_id, work->tcon->id);
@@ -1947,6 +1950,7 @@ out_err:
}
}
smb2_set_err_rsp(work);
+ conn->binding = false;
} else {
unsigned int iov_len;
@@ -2827,7 +2831,11 @@ static int parse_durable_handle_context(struct ksmbd_work *work,
goto out;
}
- dh_info->fp->conn = conn;
+ if (dh_info->fp->conn) {
+ ksmbd_put_durable_fd(dh_info->fp);
+ err = -EBADF;
+ goto out;
+ }
dh_info->reconnected = true;
goto out;
}
@@ -3011,13 +3019,14 @@ int smb2_open(struct ksmbd_work *work)
goto err_out2;
}
+ fp = dh_info.fp;
+
if (ksmbd_override_fsids(work)) {
rc = -ENOMEM;
ksmbd_put_durable_fd(dh_info.fp);
goto err_out2;
}
- fp = dh_info.fp;
file_info = FILE_OPENED;
rc = ksmbd_vfs_getattr(&fp->filp->f_path, &stat);
@@ -3615,10 +3624,8 @@ int smb2_open(struct ksmbd_work *work)
reconnected_fp:
rsp->StructureSize = cpu_to_le16(89);
- rcu_read_lock();
- opinfo = rcu_dereference(fp->f_opinfo);
+ opinfo = opinfo_get(fp);
rsp->OplockLevel = opinfo != NULL ? opinfo->level : 0;
- rcu_read_unlock();
rsp->Flags = 0;
rsp->CreateAction = cpu_to_le32(file_info);
rsp->CreationTime = cpu_to_le64(fp->create_time);
@@ -3659,6 +3666,7 @@ reconnected_fp:
next_ptr = &lease_ccontext->Next;
next_off = conn->vals->create_lease_size;
}
+ opinfo_put(opinfo);
if (maximal_access_ctxt) {
struct create_context *mxac_ccontext;
@@ -5451,7 +5459,6 @@ static int smb2_get_info_filesystem(struct ksmbd_work *work,
struct smb2_query_info_req *req,
struct smb2_query_info_rsp *rsp)
{
- struct ksmbd_session *sess = work->sess;
struct ksmbd_conn *conn = work->conn;
struct ksmbd_share_config *share = work->tcon->share_conf;
int fsinfoclass = 0;
@@ -5588,10 +5595,11 @@ static int smb2_get_info_filesystem(struct ksmbd_work *work,
info = (struct object_id_info *)(rsp->Buffer);
- if (!user_guest(sess->user))
- memcpy(info->objid, user_passkey(sess->user), 16);
+ if (path.mnt->mnt_sb->s_uuid_len == 16)
+ memcpy(info->objid, path.mnt->mnt_sb->s_uuid.b,
+ path.mnt->mnt_sb->s_uuid_len);
else
- memset(info->objid, 0, 16);
+ memcpy(info->objid, &stfs.f_fsid, sizeof(stfs.f_fsid));
info->extended_info.magic = cpu_to_le32(EXTENDED_INFO_MAGIC);
info->extended_info.version = cpu_to_le32(1);
@@ -8880,7 +8888,7 @@ int smb2_check_sign_req(struct ksmbd_work *work)
ksmbd_sign_smb2_pdu(work->conn, work->sess->sess_key, iov, 1,
signature);
- if (memcmp(signature, signature_req, SMB2_SIGNATURE_SIZE)) {
+ if (crypto_memneq(signature, signature_req, SMB2_SIGNATURE_SIZE)) {
pr_err("bad smb2 signature\n");
return 0;
}
@@ -8968,7 +8976,7 @@ int smb3_check_sign_req(struct ksmbd_work *work)
if (ksmbd_sign_smb3_pdu(conn, signing_key, iov, 1, signature))
return 0;
- if (memcmp(signature, signature_req, SMB2_SIGNATURE_SIZE)) {
+ if (crypto_memneq(signature, signature_req, SMB2_SIGNATURE_SIZE)) {
pr_err("bad smb2 signature\n");
return 0;
}
diff --git a/fs/smb/server/smb2pdu.h b/fs/smb/server/smb2pdu.h
index 257c6d26df26..8b6eafb70dca 100644
--- a/fs/smb/server/smb2pdu.h
+++ b/fs/smb/server/smb2pdu.h
@@ -83,7 +83,10 @@ struct create_durable_rsp {
} Data;
} __packed;
-/* equivalent of the contents of SMB3.1.1 POSIX open context response */
+/*
+ * See POSIX-SMB2 2.2.14.2.16
+ * Link: https://gitlab.com/samba-team/smb3-posix-spec/-/blob/master/smb3_posix_extensions.md
+ */
struct create_posix_rsp {
struct create_context_hdr ccontext;
__u8 Name[16];
diff --git a/fs/smb/server/transport_rdma.c b/fs/smb/server/transport_rdma.c
index 7c53b78b818e..188572491d53 100644
--- a/fs/smb/server/transport_rdma.c
+++ b/fs/smb/server/transport_rdma.c
@@ -2540,9 +2540,9 @@ static int smb_direct_prepare(struct ksmbd_transport *t)
goto put;
req = (struct smbdirect_negotiate_req *)recvmsg->packet;
- sp->max_recv_size = min_t(int, sp->max_recv_size,
+ sp->max_recv_size = min_t(u32, sp->max_recv_size,
le32_to_cpu(req->preferred_send_size));
- sp->max_send_size = min_t(int, sp->max_send_size,
+ sp->max_send_size = min_t(u32, sp->max_send_size,
le32_to_cpu(req->max_receive_size));
sp->max_fragmented_send_size =
le32_to_cpu(req->max_fragmented_size);
diff --git a/fs/smb/server/vfs_cache.c b/fs/smb/server/vfs_cache.c
index ff4ea412d900..168f2dd7e200 100644
--- a/fs/smb/server/vfs_cache.c
+++ b/fs/smb/server/vfs_cache.c
@@ -87,11 +87,7 @@ static int proc_show_files(struct seq_file *m, void *v)
rcu_read_lock();
opinfo = rcu_dereference(fp->f_opinfo);
- rcu_read_unlock();
-
- if (!opinfo) {
- seq_printf(m, " %-15s", " ");
- } else {
+ if (opinfo) {
const struct ksmbd_const_name *const_names;
int count;
unsigned int level;
@@ -105,8 +101,12 @@ static int proc_show_files(struct seq_file *m, void *v)
count = ARRAY_SIZE(ksmbd_oplock_const_names);
level = opinfo->level;
}
+ rcu_read_unlock();
ksmbd_proc_show_const_name(m, " %-15s",
const_names, count, level);
+ } else {
+ rcu_read_unlock();
+ seq_printf(m, " %-15s", " ");
}
seq_printf(m, " %#010x %#010x %s\n",
diff --git a/fs/squashfs/cache.c b/fs/squashfs/cache.c
index 8e958db5f786..67abd4dff222 100644
--- a/fs/squashfs/cache.c
+++ b/fs/squashfs/cache.c
@@ -344,6 +344,9 @@ int squashfs_read_metadata(struct super_block *sb, void *buffer,
if (unlikely(length < 0))
return -EIO;
+ if (unlikely(*offset < 0 || *offset >= SQUASHFS_METADATA_SIZE))
+ return -EIO;
+
while (length) {
entry = squashfs_cache_get(sb, msblk->block_cache, *block, 0);
if (entry->error) {
diff --git a/fs/verity/Kconfig b/fs/verity/Kconfig
index 76d1c5971b82..b20882963ffb 100644
--- a/fs/verity/Kconfig
+++ b/fs/verity/Kconfig
@@ -2,6 +2,9 @@
config FS_VERITY
bool "FS Verity (read-only file-based authenticity protection)"
+ # Filesystems cache the Merkle tree at a 64K aligned offset in the
+ # pagecache. That approach assumes the page size is at most 64K.
+ depends on PAGE_SHIFT <= 16
select CRYPTO_HASH_INFO
select CRYPTO_LIB_SHA256
select CRYPTO_LIB_SHA512
diff --git a/fs/xfs/libxfs/xfs_ag.c b/fs/xfs/libxfs/xfs_ag.c
index 9c6765cc2d44..bd8fbb40b49e 100644
--- a/fs/xfs/libxfs/xfs_ag.c
+++ b/fs/xfs/libxfs/xfs_ag.c
@@ -872,6 +872,34 @@ resv_err:
return err2;
}
+void
+xfs_growfs_compute_deltas(
+ struct xfs_mount *mp,
+ xfs_rfsblock_t nb,
+ int64_t *deltap,
+ xfs_agnumber_t *nagcountp)
+{
+ xfs_rfsblock_t nb_div, nb_mod;
+ int64_t delta;
+ xfs_agnumber_t nagcount;
+
+ nb_div = nb;
+ nb_mod = do_div(nb_div, mp->m_sb.sb_agblocks);
+ if (nb_mod && nb_mod >= XFS_MIN_AG_BLOCKS)
+ nb_div++;
+ else if (nb_mod)
+ nb = nb_div * mp->m_sb.sb_agblocks;
+
+ if (nb_div > XFS_MAX_AGNUMBER + 1) {
+ nb_div = XFS_MAX_AGNUMBER + 1;
+ nb = nb_div * mp->m_sb.sb_agblocks;
+ }
+ nagcount = nb_div;
+ delta = nb - mp->m_sb.sb_dblocks;
+ *deltap = delta;
+ *nagcountp = nagcount;
+}
+
/*
* Extent the AG indicated by the @id by the length passed in
*/
diff --git a/fs/xfs/libxfs/xfs_ag.h b/fs/xfs/libxfs/xfs_ag.h
index 1f24cfa27321..3cd4790768ff 100644
--- a/fs/xfs/libxfs/xfs_ag.h
+++ b/fs/xfs/libxfs/xfs_ag.h
@@ -331,6 +331,9 @@ struct aghdr_init_data {
int xfs_ag_init_headers(struct xfs_mount *mp, struct aghdr_init_data *id);
int xfs_ag_shrink_space(struct xfs_perag *pag, struct xfs_trans **tpp,
xfs_extlen_t delta);
+void
+xfs_growfs_compute_deltas(struct xfs_mount *mp, xfs_rfsblock_t nb,
+ int64_t *deltap, xfs_agnumber_t *nagcountp);
int xfs_ag_extend_space(struct xfs_perag *pag, struct xfs_trans *tp,
xfs_extlen_t len);
int xfs_ag_get_geometry(struct xfs_perag *pag, struct xfs_ag_geometry *ageo);
diff --git a/fs/xfs/libxfs/xfs_da_btree.c b/fs/xfs/libxfs/xfs_da_btree.c
index 766631f0562e..09d4c17b3e7b 100644
--- a/fs/xfs/libxfs/xfs_da_btree.c
+++ b/fs/xfs/libxfs/xfs_da_btree.c
@@ -2716,12 +2716,8 @@ xfs_dabuf_map(
* larger one that needs to be free by the caller.
*/
if (nirecs > 1) {
- map = kzalloc(nirecs * sizeof(struct xfs_buf_map),
- GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_NOFAIL);
- if (!map) {
- error = -ENOMEM;
- goto out_free_irecs;
- }
+ map = kcalloc(nirecs, sizeof(struct xfs_buf_map),
+ GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_NOFAIL);
*mapp = map;
}
diff --git a/fs/xfs/libxfs/xfs_defer.c b/fs/xfs/libxfs/xfs_defer.c
index 472c261163ed..c6909716b041 100644
--- a/fs/xfs/libxfs/xfs_defer.c
+++ b/fs/xfs/libxfs/xfs_defer.c
@@ -809,7 +809,7 @@ xfs_defer_can_append(
/* Paused items cannot absorb more work */
if (dfp->dfp_flags & XFS_DEFER_PAUSED)
- return NULL;
+ return false;
/* Already full? */
if (ops->max_items && dfp->dfp_count >= ops->max_items)
diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c
index a017016e9075..3794e5412eba 100644
--- a/fs/xfs/libxfs/xfs_inode_buf.c
+++ b/fs/xfs/libxfs/xfs_inode_buf.c
@@ -268,6 +268,10 @@ xfs_inode_from_disk(
}
if (xfs_is_reflink_inode(ip))
xfs_ifork_init_cow(ip);
+ if (xfs_is_metadir_inode(ip)) {
+ XFS_STATS_DEC(ip->i_mount, xs_inodes_active);
+ XFS_STATS_INC(ip->i_mount, xs_inodes_meta);
+ }
return 0;
out_destroy_data_fork:
diff --git a/fs/xfs/libxfs/xfs_metafile.c b/fs/xfs/libxfs/xfs_metafile.c
index cf239f862212..71f004e9dc64 100644
--- a/fs/xfs/libxfs/xfs_metafile.c
+++ b/fs/xfs/libxfs/xfs_metafile.c
@@ -61,6 +61,9 @@ xfs_metafile_set_iflag(
ip->i_diflags2 |= XFS_DIFLAG2_METADATA;
ip->i_metatype = metafile_type;
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+
+ XFS_STATS_DEC(ip->i_mount, xs_inodes_active);
+ XFS_STATS_INC(ip->i_mount, xs_inodes_meta);
}
/* Clear the metadata directory inode flag. */
@@ -74,6 +77,8 @@ xfs_metafile_clear_iflag(
ip->i_diflags2 &= ~XFS_DIFLAG2_METADATA;
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+ XFS_STATS_INC(ip->i_mount, xs_inodes_active);
+ XFS_STATS_DEC(ip->i_mount, xs_inodes_meta);
}
/*
diff --git a/fs/xfs/libxfs/xfs_ondisk.h b/fs/xfs/libxfs/xfs_ondisk.h
index 2e9715cc1641..23cde1248f01 100644
--- a/fs/xfs/libxfs/xfs_ondisk.h
+++ b/fs/xfs/libxfs/xfs_ondisk.h
@@ -73,7 +73,7 @@ xfs_check_ondisk_structs(void)
XFS_CHECK_STRUCT_SIZE(struct xfs_dir3_free_hdr, 64);
XFS_CHECK_STRUCT_SIZE(struct xfs_dir3_leaf, 64);
XFS_CHECK_STRUCT_SIZE(struct xfs_dir3_leaf_hdr, 64);
- XFS_CHECK_STRUCT_SIZE(struct xfs_attr_leaf_entry, 8);
+ XFS_CHECK_STRUCT_SIZE(struct xfs_attr_leaf_entry, 8);
XFS_CHECK_STRUCT_SIZE(struct xfs_attr_leaf_hdr, 32);
XFS_CHECK_STRUCT_SIZE(struct xfs_attr_leaf_map, 4);
XFS_CHECK_STRUCT_SIZE(struct xfs_attr_leaf_name_local, 4);
@@ -116,7 +116,7 @@ xfs_check_ondisk_structs(void)
XFS_CHECK_STRUCT_SIZE(struct xfs_da_intnode, 16);
XFS_CHECK_STRUCT_SIZE(struct xfs_da_node_entry, 8);
XFS_CHECK_STRUCT_SIZE(struct xfs_da_node_hdr, 16);
- XFS_CHECK_STRUCT_SIZE(struct xfs_dir2_data_free, 4);
+ XFS_CHECK_STRUCT_SIZE(struct xfs_dir2_data_free, 4);
XFS_CHECK_STRUCT_SIZE(struct xfs_dir2_data_hdr, 16);
XFS_CHECK_OFFSET(struct xfs_dir2_data_unused, freetag, 0);
XFS_CHECK_OFFSET(struct xfs_dir2_data_unused, length, 2);
@@ -136,16 +136,7 @@ xfs_check_ondisk_structs(void)
/* ondisk dir/attr structures from xfs/122 */
XFS_CHECK_STRUCT_SIZE(struct xfs_attr_sf_entry, 3);
XFS_CHECK_STRUCT_SIZE(struct xfs_dir2_data_free, 4);
- XFS_CHECK_STRUCT_SIZE(struct xfs_dir2_data_hdr, 16);
XFS_CHECK_STRUCT_SIZE(struct xfs_dir2_data_unused, 6);
- XFS_CHECK_STRUCT_SIZE(struct xfs_dir2_free, 16);
- XFS_CHECK_STRUCT_SIZE(struct xfs_dir2_free_hdr, 16);
- XFS_CHECK_STRUCT_SIZE(struct xfs_dir2_leaf, 16);
- XFS_CHECK_STRUCT_SIZE(struct xfs_dir2_leaf_entry, 8);
- XFS_CHECK_STRUCT_SIZE(struct xfs_dir2_leaf_hdr, 16);
- XFS_CHECK_STRUCT_SIZE(struct xfs_dir2_leaf_tail, 4);
- XFS_CHECK_STRUCT_SIZE(struct xfs_dir2_sf_entry, 3);
- XFS_CHECK_STRUCT_SIZE(struct xfs_dir2_sf_hdr, 10);
/* log structures */
XFS_CHECK_STRUCT_SIZE(struct xfs_buf_log_format, 88);
@@ -217,11 +208,6 @@ xfs_check_ondisk_structs(void)
XFS_CHECK_OFFSET(struct xfs_dir3_free, hdr.hdr.magic, 0);
XFS_CHECK_OFFSET(struct xfs_attr3_leafblock, hdr.info.hdr, 0);
- XFS_CHECK_STRUCT_SIZE(struct xfs_bulkstat, 192);
- XFS_CHECK_STRUCT_SIZE(struct xfs_inumbers, 24);
- XFS_CHECK_STRUCT_SIZE(struct xfs_bulkstat_req, 64);
- XFS_CHECK_STRUCT_SIZE(struct xfs_inumbers_req, 64);
-
/*
* Make sure the incore inode timestamp range corresponds to hand
* converted values based on the ondisk format specification.
@@ -301,6 +287,40 @@ xfs_check_ondisk_structs(void)
XFS_CHECK_SB_OFFSET(sb_pad, 281);
XFS_CHECK_SB_OFFSET(sb_rtstart, 288);
XFS_CHECK_SB_OFFSET(sb_rtreserved, 296);
+
+ /*
+ * ioctl UABI
+ *
+ * Due to different padding/alignment requirements across
+ * different architectures, some structures are ommited from
+ * the size checks. In addition, structures with architecture
+ * dependent size fields are also ommited (e.g. __kernel_long_t).
+ */
+ XFS_CHECK_STRUCT_SIZE(struct xfs_bulkstat, 192);
+ XFS_CHECK_STRUCT_SIZE(struct xfs_inumbers, 24);
+ XFS_CHECK_STRUCT_SIZE(struct xfs_bulkstat_req, 64);
+ XFS_CHECK_STRUCT_SIZE(struct xfs_inumbers_req, 64);
+ XFS_CHECK_STRUCT_SIZE(struct dioattr, 12);
+ XFS_CHECK_STRUCT_SIZE(struct getbmap, 32);
+ XFS_CHECK_STRUCT_SIZE(struct getbmapx, 48);
+ XFS_CHECK_STRUCT_SIZE(struct xfs_attrlist_cursor, 16);
+ XFS_CHECK_STRUCT_SIZE(struct xfs_attrlist, 8);
+ XFS_CHECK_STRUCT_SIZE(struct xfs_attrlist, 8);
+ XFS_CHECK_STRUCT_SIZE(struct xfs_attrlist_ent, 4);
+ XFS_CHECK_STRUCT_SIZE(struct xfs_ag_geometry, 128);
+ XFS_CHECK_STRUCT_SIZE(struct xfs_rtgroup_geometry, 128);
+ XFS_CHECK_STRUCT_SIZE(struct xfs_error_injection, 8);
+ XFS_CHECK_STRUCT_SIZE(struct xfs_fsop_geom, 256);
+ XFS_CHECK_STRUCT_SIZE(struct xfs_fsop_geom_v4, 112);
+ XFS_CHECK_STRUCT_SIZE(struct xfs_fsop_counts, 32);
+ XFS_CHECK_STRUCT_SIZE(struct xfs_fsop_resblks, 16);
+ XFS_CHECK_STRUCT_SIZE(struct xfs_growfs_log, 8);
+ XFS_CHECK_STRUCT_SIZE(struct xfs_bulk_ireq, 64);
+ XFS_CHECK_STRUCT_SIZE(struct xfs_fs_eofblocks, 128);
+ XFS_CHECK_STRUCT_SIZE(struct xfs_fsid, 8);
+ XFS_CHECK_STRUCT_SIZE(struct xfs_scrub_metadata, 64);
+ XFS_CHECK_STRUCT_SIZE(struct xfs_scrub_vec, 16);
+ XFS_CHECK_STRUCT_SIZE(struct xfs_scrub_vec_head, 40);
}
#endif /* __XFS_ONDISK_H */
diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c
index 38d16fe1f6d8..47322adb7690 100644
--- a/fs/xfs/libxfs/xfs_sb.c
+++ b/fs/xfs/libxfs/xfs_sb.c
@@ -1347,6 +1347,9 @@ xfs_log_sb(
* feature was introduced. This counter can go negative due to the way
* we handle nearly-lockless reservations, so we must use the _positive
* variant here to avoid writing out nonsense frextents.
+ *
+ * RT groups are only supported on v5 file systems, which always
+ * have lazy SB counters.
*/
if (xfs_has_rtgroups(mp) && !xfs_has_zoned(mp)) {
mp->m_sb.sb_frextents =
diff --git a/fs/xfs/scrub/dir_repair.c b/fs/xfs/scrub/dir_repair.c
index 9dc55c918c78..23b80c54aa60 100644
--- a/fs/xfs/scrub/dir_repair.c
+++ b/fs/xfs/scrub/dir_repair.c
@@ -177,7 +177,7 @@ xrep_dir_teardown(
rd->dir_names = NULL;
if (rd->dir_entries)
xfarray_destroy(rd->dir_entries);
- rd->dir_names = NULL;
+ rd->dir_entries = NULL;
}
/* Set up for a directory repair. */
diff --git a/fs/xfs/scrub/orphanage.c b/fs/xfs/scrub/orphanage.c
index 52a108f6d5f4..33c6db6b4498 100644
--- a/fs/xfs/scrub/orphanage.c
+++ b/fs/xfs/scrub/orphanage.c
@@ -442,6 +442,11 @@ xrep_adoption_check_dcache(
return 0;
d_child = try_lookup_noperm(&qname, d_orphanage);
+ if (IS_ERR(d_child)) {
+ dput(d_orphanage);
+ return PTR_ERR(d_child);
+ }
+
if (d_child) {
trace_xrep_adoption_check_child(sc->mp, d_child);
@@ -479,7 +484,7 @@ xrep_adoption_zap_dcache(
return;
d_child = try_lookup_noperm(&qname, d_orphanage);
- while (d_child != NULL) {
+ while (!IS_ERR_OR_NULL(d_child)) {
trace_xrep_adoption_invalidate_child(sc->mp, d_child);
ASSERT(d_is_negative(d_child));
diff --git a/fs/xfs/xfs_bmap_item.c b/fs/xfs/xfs_bmap_item.c
index e8775f254c89..b237a25d6045 100644
--- a/fs/xfs/xfs_bmap_item.c
+++ b/fs/xfs/xfs_bmap_item.c
@@ -245,7 +245,7 @@ xfs_bmap_update_diff_items(
struct xfs_bmap_intent *ba = bi_entry(a);
struct xfs_bmap_intent *bb = bi_entry(b);
- return ba->bi_owner->i_ino - bb->bi_owner->i_ino;
+ return cmp_int(ba->bi_owner->i_ino, bb->bi_owner->i_ino);
}
/* Log bmap updates in the intent item. */
diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
index 2b208e2c5264..69e9bc588c8b 100644
--- a/fs/xfs/xfs_dquot.c
+++ b/fs/xfs/xfs_dquot.c
@@ -1439,9 +1439,15 @@ xfs_qm_dqflush(
return 0;
out_abort:
+ /*
+ * Shut down the log before removing the dquot item from the AIL.
+ * Otherwise, the log tail may advance past this item's LSN while
+ * log writes are still in progress, making these unflushed changes
+ * unrecoverable on the next mount.
+ */
+ xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
dqp->q_flags &= ~XFS_DQFLAG_DIRTY;
xfs_trans_ail_delete(lip, 0);
- xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
xfs_dqfunlock(dqp);
return error;
}
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index 17255c41786b..8d64d904d73c 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -95,18 +95,17 @@ xfs_growfs_data_private(
struct xfs_growfs_data *in) /* growfs data input struct */
{
xfs_agnumber_t oagcount = mp->m_sb.sb_agcount;
+ xfs_rfsblock_t nb = in->newblocks;
struct xfs_buf *bp;
int error;
xfs_agnumber_t nagcount;
xfs_agnumber_t nagimax = 0;
- xfs_rfsblock_t nb, nb_div, nb_mod;
int64_t delta;
bool lastag_extended = false;
struct xfs_trans *tp;
struct aghdr_init_data id = {};
struct xfs_perag *last_pag;
- nb = in->newblocks;
error = xfs_sb_validate_fsb_count(&mp->m_sb, nb);
if (error)
return error;
@@ -125,20 +124,8 @@ xfs_growfs_data_private(
mp->m_sb.sb_rextsize);
if (error)
return error;
+ xfs_growfs_compute_deltas(mp, nb, &delta, &nagcount);
- nb_div = nb;
- nb_mod = do_div(nb_div, mp->m_sb.sb_agblocks);
- if (nb_mod && nb_mod >= XFS_MIN_AG_BLOCKS)
- nb_div++;
- else if (nb_mod)
- nb = nb_div * mp->m_sb.sb_agblocks;
-
- if (nb_div > XFS_MAX_AGNUMBER + 1) {
- nb_div = XFS_MAX_AGNUMBER + 1;
- nb = nb_div * mp->m_sb.sb_agblocks;
- }
- nagcount = nb_div;
- delta = nb - mp->m_sb.sb_dblocks;
/*
* Reject filesystems with a single AG because they are not
* supported, and reject a shrink operation that would cause a
diff --git a/fs/xfs/xfs_health.c b/fs/xfs/xfs_health.c
index 169123772cb3..239b843e83d4 100644
--- a/fs/xfs/xfs_health.c
+++ b/fs/xfs/xfs_health.c
@@ -314,6 +314,22 @@ xfs_rgno_mark_sick(
xfs_rtgroup_put(rtg);
}
+static inline void xfs_inode_report_fserror(struct xfs_inode *ip)
+{
+ /*
+ * Do not report inodes being constructed or freed, or metadata inodes,
+ * to fsnotify.
+ */
+ if (xfs_iflags_test(ip, XFS_INEW | XFS_IRECLAIM) ||
+ xfs_is_internal_inode(ip)) {
+ fserror_report_metadata(ip->i_mount->m_super, -EFSCORRUPTED,
+ GFP_NOFS);
+ return;
+ }
+
+ fserror_report_file_metadata(VFS_I(ip), -EFSCORRUPTED, GFP_NOFS);
+}
+
/* Mark the unhealthy parts of an inode. */
void
xfs_inode_mark_sick(
@@ -339,7 +355,7 @@ xfs_inode_mark_sick(
inode_state_clear(VFS_I(ip), I_DONTCACHE);
spin_unlock(&VFS_I(ip)->i_lock);
- fserror_report_file_metadata(VFS_I(ip), -EFSCORRUPTED, GFP_NOFS);
+ xfs_inode_report_fserror(ip);
if (mask)
xfs_healthmon_report_inode(ip, XFS_HEALTHMON_SICK, old_mask,
mask);
@@ -371,7 +387,7 @@ xfs_inode_mark_corrupt(
inode_state_clear(VFS_I(ip), I_DONTCACHE);
spin_unlock(&VFS_I(ip)->i_lock);
- fserror_report_file_metadata(VFS_I(ip), -EFSCORRUPTED, GFP_NOFS);
+ xfs_inode_report_fserror(ip);
if (mask)
xfs_healthmon_report_inode(ip, XFS_HEALTHMON_CORRUPT, old_mask,
mask);
diff --git a/fs/xfs/xfs_healthmon.c b/fs/xfs/xfs_healthmon.c
index e37c18cec372..26c325d34bd1 100644
--- a/fs/xfs/xfs_healthmon.c
+++ b/fs/xfs/xfs_healthmon.c
@@ -69,7 +69,7 @@ xfs_healthmon_get(
struct xfs_healthmon *hm;
rcu_read_lock();
- hm = mp->m_healthmon;
+ hm = rcu_dereference(mp->m_healthmon);
if (hm && !refcount_inc_not_zero(&hm->ref))
hm = NULL;
rcu_read_unlock();
@@ -110,13 +110,13 @@ xfs_healthmon_attach(
struct xfs_healthmon *hm)
{
spin_lock(&xfs_healthmon_lock);
- if (mp->m_healthmon != NULL) {
+ if (rcu_access_pointer(mp->m_healthmon) != NULL) {
spin_unlock(&xfs_healthmon_lock);
return -EEXIST;
}
refcount_inc(&hm->ref);
- mp->m_healthmon = hm;
+ rcu_assign_pointer(mp->m_healthmon, hm);
hm->mount_cookie = (uintptr_t)mp->m_super;
spin_unlock(&xfs_healthmon_lock);
@@ -128,16 +128,29 @@ STATIC void
xfs_healthmon_detach(
struct xfs_healthmon *hm)
{
+ struct xfs_mount *mp;
+
spin_lock(&xfs_healthmon_lock);
if (hm->mount_cookie == DETACHED_MOUNT_COOKIE) {
spin_unlock(&xfs_healthmon_lock);
return;
}
- XFS_M((struct super_block *)hm->mount_cookie)->m_healthmon = NULL;
+ mp = XFS_M((struct super_block *)hm->mount_cookie);
+ rcu_assign_pointer(mp->m_healthmon, NULL);
hm->mount_cookie = DETACHED_MOUNT_COOKIE;
spin_unlock(&xfs_healthmon_lock);
+ /*
+ * Wake up any readers that might remain. This can happen if unmount
+ * races with the healthmon fd owner entering ->read_iter, having
+ * already emptied the event queue.
+ *
+ * In the ->release case there shouldn't be any readers because the
+ * only users of the waiter are read and poll.
+ */
+ wake_up_all(&hm->wait);
+
trace_xfs_healthmon_detach(hm);
xfs_healthmon_put(hm);
}
@@ -1024,13 +1037,6 @@ xfs_healthmon_release(
* process can create another health monitor file.
*/
xfs_healthmon_detach(hm);
-
- /*
- * Wake up any readers that might be left. There shouldn't be any
- * because the only users of the waiter are read and poll.
- */
- wake_up_all(&hm->wait);
-
xfs_healthmon_put(hm);
return 0;
}
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index dbaab4ae709f..2040a9292ee6 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -106,7 +106,7 @@ xfs_inode_alloc(
mapping_set_folio_min_order(VFS_I(ip)->i_mapping,
M_IGEO(mp)->min_folio_order);
- XFS_STATS_INC(mp, vn_active);
+ XFS_STATS_INC(mp, xs_inodes_active);
ASSERT(atomic_read(&ip->i_pincount) == 0);
ASSERT(ip->i_ino == 0);
@@ -159,7 +159,6 @@ xfs_inode_free_callback(
ASSERT(!test_bit(XFS_LI_IN_AIL,
&ip->i_itemp->ili_item.li_flags));
xfs_inode_item_destroy(ip);
- ip->i_itemp = NULL;
}
kmem_cache_free(xfs_inode_cache, ip);
@@ -172,7 +171,10 @@ __xfs_inode_free(
/* asserts to verify all state is correct here */
ASSERT(atomic_read(&ip->i_pincount) == 0);
ASSERT(!ip->i_itemp || list_empty(&ip->i_itemp->ili_item.li_bio_list));
- XFS_STATS_DEC(ip->i_mount, vn_active);
+ if (xfs_is_metadir_inode(ip))
+ XFS_STATS_DEC(ip->i_mount, xs_inodes_meta);
+ else
+ XFS_STATS_DEC(ip->i_mount, xs_inodes_active);
call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
}
@@ -636,6 +638,14 @@ xfs_iget_cache_miss(
if (!ip)
return -ENOMEM;
+ /*
+ * Set XFS_INEW as early as possible so that the health code won't pass
+ * the inode to the fserror code if the ondisk inode cannot be loaded.
+ * We're going to free the xfs_inode immediately if that happens, which
+ * would lead to UAF problems.
+ */
+ xfs_iflags_set(ip, XFS_INEW);
+
error = xfs_imap(pag, tp, ip->i_ino, &ip->i_imap, flags);
if (error)
goto out_destroy;
@@ -713,7 +723,6 @@ xfs_iget_cache_miss(
ip->i_udquot = NULL;
ip->i_gdquot = NULL;
ip->i_pdquot = NULL;
- xfs_iflags_set(ip, XFS_INEW);
/* insert the new inode */
spin_lock(&pag->pag_ici_lock);
@@ -2234,7 +2243,7 @@ xfs_inode_mark_reclaimable(
struct xfs_mount *mp = ip->i_mount;
bool need_inactive;
- XFS_STATS_INC(mp, vn_reclaim);
+ XFS_STATS_INC(mp, xs_inode_mark_reclaimable);
/*
* We should never get here with any of the reclaim flags already set.
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index b96f262ba139..f807f8f4f705 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -1357,6 +1357,8 @@ xlog_alloc_log(
if (xfs_has_logv2(mp) && mp->m_sb.sb_logsunit > 1)
log->l_iclog_roundoff = mp->m_sb.sb_logsunit;
+ else if (mp->m_sb.sb_logsectsize > 0)
+ log->l_iclog_roundoff = mp->m_sb.sb_logsectsize;
else
log->l_iclog_roundoff = BBSIZE;
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 61c71128d171..ddd4028be8d6 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -345,7 +345,7 @@ typedef struct xfs_mount {
struct xfs_hooks m_dir_update_hooks;
/* Private data referring to a health monitor object. */
- struct xfs_healthmon *m_healthmon;
+ struct xfs_healthmon __rcu *m_healthmon;
} xfs_mount_t;
#define M_IGEO(mp) (&(mp)->m_ino_geo)
diff --git a/fs/xfs/xfs_notify_failure.c b/fs/xfs/xfs_notify_failure.c
index 6be19fa1ebe2..64c8afb935c2 100644
--- a/fs/xfs/xfs_notify_failure.c
+++ b/fs/xfs/xfs_notify_failure.c
@@ -304,7 +304,7 @@ xfs_dax_notify_dev_failure(
error = xfs_alloc_read_agf(pag, tp, 0, &agf_bp);
if (error) {
- xfs_perag_put(pag);
+ xfs_perag_rele(pag);
break;
}
@@ -340,7 +340,7 @@ xfs_dax_notify_dev_failure(
if (rtg)
xfs_rtgroup_unlock(rtg, XFS_RTGLOCK_RMAP);
if (error) {
- xfs_group_put(xg);
+ xfs_group_rele(xg);
break;
}
}
diff --git a/fs/xfs/xfs_platform.h b/fs/xfs/xfs_platform.h
index 1e59bf94d1f2..59a33c60e0ca 100644
--- a/fs/xfs/xfs_platform.h
+++ b/fs/xfs/xfs_platform.h
@@ -235,6 +235,10 @@ int xfs_rw_bdev(struct block_device *bdev, sector_t sector, unsigned int count,
#ifdef XFS_WARN
+/*
+ * Please note that this ASSERT doesn't kill the kernel. It will if the kernel
+ * has panic_on_warn set.
+ */
#define ASSERT(expr) \
(likely(expr) ? (void)0 : asswarn(NULL, #expr, __FILE__, __LINE__))
@@ -245,6 +249,11 @@ int xfs_rw_bdev(struct block_device *bdev, sector_t sector, unsigned int count,
#endif /* XFS_WARN */
#endif /* DEBUG */
+/*
+ * Use this to catch metadata corruptions that are not caught by block or
+ * structure verifiers. The reason is that the verifiers check corruptions only
+ * within the scope of the object being verified.
+ */
#define XFS_IS_CORRUPT(mp, expr) \
(unlikely(expr) ? xfs_corruption_error(#expr, XFS_ERRLEVEL_LOW, (mp), \
NULL, 0, __FILE__, __LINE__, \
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
index 90a94a5b6f7e..153f3c378f9f 100644
--- a/fs/xfs/xfs_rtalloc.c
+++ b/fs/xfs/xfs_rtalloc.c
@@ -112,6 +112,10 @@ xfs_rtcopy_summary(
error = xfs_rtget_summary(oargs, log, bbno, &sum);
if (error)
goto out;
+ if (XFS_IS_CORRUPT(oargs->mp, sum < 0)) {
+ error = -EFSCORRUPTED;
+ goto out;
+ }
if (sum == 0)
continue;
error = xfs_rtmodify_summary(oargs, log, bbno, -sum);
@@ -120,7 +124,6 @@ xfs_rtcopy_summary(
error = xfs_rtmodify_summary(nargs, log, bbno, sum);
if (error)
goto out;
- ASSERT(sum > 0);
}
}
error = 0;
@@ -1047,6 +1050,15 @@ xfs_growfs_rt_bmblock(
*/
xfs_trans_resv_calc(mp, &mp->m_resv);
+ /*
+ * Sync sb counters now to reflect the updated values. Lazy counters are
+ * not always updated and in order to avoid inconsistencies between
+ * frextents and rtextents, it is better to sync the counters.
+ */
+
+ if (xfs_has_lazysbcount(mp))
+ xfs_log_sb(args.tp);
+
error = xfs_trans_commit(args.tp);
if (error)
goto out_free;
@@ -1079,17 +1091,27 @@ xfs_last_rtgroup_extents(
}
/*
- * Calculate the last rbmblock currently used.
+ * This will return the bitmap block number (indexed at 0) that will be
+ * extended/modified. There are 2 cases here:
+ * 1. The size of the rtg is such that it is a multiple of
+ * xfs_rtbitmap_rtx_per_rbmblock() i.e, an integral number of bitmap blocks
+ * are completely filled up. In this case, we should return
+ * 1 + (the last used bitmap block number).
+ * 2. The size of the rtg is not an multiple of xfs_rtbitmap_rtx_per_rbmblock().
+ * Here we will return the block number of last used block number. In this
+ * case, we will modify the last used bitmap block to extend the size of the
+ * rtgroup.
*
* This also deals with the case where there were no rtextents before.
*/
static xfs_fileoff_t
-xfs_last_rt_bmblock(
+xfs_last_rt_bmblock_to_extend(
struct xfs_rtgroup *rtg)
{
struct xfs_mount *mp = rtg_mount(rtg);
xfs_rgnumber_t rgno = rtg_rgno(rtg);
xfs_fileoff_t bmbno = 0;
+ unsigned int mod = 0;
ASSERT(!mp->m_sb.sb_rgcount || rgno >= mp->m_sb.sb_rgcount - 1);
@@ -1097,9 +1119,16 @@ xfs_last_rt_bmblock(
xfs_rtxnum_t nrext = xfs_last_rtgroup_extents(mp);
/* Also fill up the previous block if not entirely full. */
- bmbno = xfs_rtbitmap_blockcount_len(mp, nrext);
- if (xfs_rtx_to_rbmword(mp, nrext) != 0)
- bmbno--;
+ /* We are doing a -1 to convert it to a 0 based index */
+ bmbno = xfs_rtbitmap_blockcount_len(mp, nrext) - 1;
+ div_u64_rem(nrext, xfs_rtbitmap_rtx_per_rbmblock(mp), &mod);
+ /*
+ * mod = 0 means that all the current blocks are full. So
+ * return the next block number to be used for the rtgroup
+ * growth.
+ */
+ if (mod == 0)
+ bmbno++;
}
return bmbno;
@@ -1204,7 +1233,8 @@ xfs_growfs_rtg(
goto out_rele;
}
- for (bmbno = xfs_last_rt_bmblock(rtg); bmbno < bmblocks; bmbno++) {
+ for (bmbno = xfs_last_rt_bmblock_to_extend(rtg); bmbno < bmblocks;
+ bmbno++) {
error = xfs_growfs_rt_bmblock(rtg, nrblocks, rextsize, bmbno);
if (error)
goto out_error;
diff --git a/fs/xfs/xfs_stats.c b/fs/xfs/xfs_stats.c
index 017db0361cd8..c13d600732c9 100644
--- a/fs/xfs/xfs_stats.c
+++ b/fs/xfs/xfs_stats.c
@@ -42,7 +42,7 @@ int xfs_stats_format(struct xfsstats __percpu *stats, char *buf)
{ "xstrat", xfsstats_offset(xs_write_calls) },
{ "rw", xfsstats_offset(xs_attr_get) },
{ "attr", xfsstats_offset(xs_iflush_count)},
- { "icluster", xfsstats_offset(vn_active) },
+ { "icluster", xfsstats_offset(xs_inodes_active) },
{ "vnodes", xfsstats_offset(xb_get) },
{ "buf", xfsstats_offset(xs_abtb_2) },
{ "abtb2", xfsstats_offset(xs_abtc_2) },
@@ -59,7 +59,8 @@ int xfs_stats_format(struct xfsstats __percpu *stats, char *buf)
{ "rtrefcntbt", xfsstats_offset(xs_qm_dqreclaims)},
/* we print both series of quota information together */
{ "qm", xfsstats_offset(xs_gc_read_calls)},
- { "zoned", xfsstats_offset(__pad1)},
+ { "zoned", xfsstats_offset(xs_inodes_meta)},
+ { "metafile", xfsstats_offset(xs_xstrat_bytes)},
};
/* Loop over all stats groups */
@@ -99,16 +100,20 @@ int xfs_stats_format(struct xfsstats __percpu *stats, char *buf)
void xfs_stats_clearall(struct xfsstats __percpu *stats)
{
+ uint32_t xs_inodes_active, xs_inodes_meta;
int c;
- uint32_t vn_active;
xfs_notice(NULL, "Clearing xfsstats");
for_each_possible_cpu(c) {
preempt_disable();
- /* save vn_active, it's a universal truth! */
- vn_active = per_cpu_ptr(stats, c)->s.vn_active;
+ /*
+ * Save the active / meta inode counters, as they are stateful.
+ */
+ xs_inodes_active = per_cpu_ptr(stats, c)->s.xs_inodes_active;
+ xs_inodes_meta = per_cpu_ptr(stats, c)->s.xs_inodes_meta;
memset(per_cpu_ptr(stats, c), 0, sizeof(*stats));
- per_cpu_ptr(stats, c)->s.vn_active = vn_active;
+ per_cpu_ptr(stats, c)->s.xs_inodes_active = xs_inodes_active;
+ per_cpu_ptr(stats, c)->s.xs_inodes_meta = xs_inodes_meta;
preempt_enable();
}
}
diff --git a/fs/xfs/xfs_stats.h b/fs/xfs/xfs_stats.h
index 153d2381d0a8..57c32b86c358 100644
--- a/fs/xfs/xfs_stats.h
+++ b/fs/xfs/xfs_stats.h
@@ -100,14 +100,14 @@ struct __xfsstats {
uint32_t xs_iflush_count;
uint32_t xs_icluster_flushcnt;
uint32_t xs_icluster_flushinode;
- uint32_t vn_active; /* # vnodes not on free lists */
- uint32_t vn_alloc; /* # times vn_alloc called */
- uint32_t vn_get; /* # times vn_get called */
- uint32_t vn_hold; /* # times vn_hold called */
- uint32_t vn_rele; /* # times vn_rele called */
- uint32_t vn_reclaim; /* # times vn_reclaim called */
- uint32_t vn_remove; /* # times vn_remove called */
- uint32_t vn_free; /* # times vn_free called */
+ uint32_t xs_inodes_active;
+ uint32_t __unused_vn_alloc;
+ uint32_t __unused_vn_get;
+ uint32_t __unused_vn_hold;
+ uint32_t xs_inode_destroy;
+ uint32_t xs_inode_destroy2; /* same as xs_inode_destroy */
+ uint32_t xs_inode_mark_reclaimable;
+ uint32_t __unused_vn_free;
uint32_t xb_get;
uint32_t xb_create;
uint32_t xb_get_locked;
@@ -142,7 +142,8 @@ struct __xfsstats {
uint32_t xs_gc_read_calls;
uint32_t xs_gc_write_calls;
uint32_t xs_gc_zone_reset_calls;
- uint32_t __pad1;
+/* Metafile counters */
+ uint32_t xs_inodes_meta;
/* Extra precision counters */
uint64_t xs_xstrat_bytes;
uint64_t xs_write_bytes;
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index abc45f860a73..f8de44443e81 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -712,8 +712,8 @@ xfs_fs_destroy_inode(
trace_xfs_destroy_inode(ip);
ASSERT(!rwsem_is_locked(&inode->i_rwsem));
- XFS_STATS_INC(ip->i_mount, vn_rele);
- XFS_STATS_INC(ip->i_mount, vn_remove);
+ XFS_STATS_INC(ip->i_mount, xs_inode_destroy);
+ XFS_STATS_INC(ip->i_mount, xs_inode_destroy2);
xfs_inode_mark_reclaimable(ip);
}
diff --git a/fs/xfs/xfs_verify_media.c b/fs/xfs/xfs_verify_media.c
index 069cd371619d..8bbd4ec567f8 100644
--- a/fs/xfs/xfs_verify_media.c
+++ b/fs/xfs/xfs_verify_media.c
@@ -122,7 +122,7 @@ xfs_verify_report_losses(
error = xfs_alloc_read_agf(pag, tp, 0, &agf_bp);
if (error) {
- xfs_perag_put(pag);
+ xfs_perag_rele(pag);
break;
}
@@ -158,7 +158,7 @@ xfs_verify_report_losses(
if (rtg)
xfs_rtgroup_unlock(rtg, XFS_RTGLOCK_RMAP);
if (error) {
- xfs_group_put(xg);
+ xfs_group_rele(xg);
break;
}
}
diff --git a/fs/xfs/xfs_zone_alloc.c b/fs/xfs/xfs_zone_alloc.c
index 67e0c8f5800f..e3d19b6dc64a 100644
--- a/fs/xfs/xfs_zone_alloc.c
+++ b/fs/xfs/xfs_zone_alloc.c
@@ -78,7 +78,7 @@ xfs_zone_account_reclaimable(
struct xfs_rtgroup *rtg,
uint32_t freed)
{
- struct xfs_group *xg = &rtg->rtg_group;
+ struct xfs_group *xg = rtg_group(rtg);
struct xfs_mount *mp = rtg_mount(rtg);
struct xfs_zone_info *zi = mp->m_zone_info;
uint32_t used = rtg_rmap(rtg)->i_used_blocks;
@@ -759,7 +759,7 @@ xfs_zone_alloc_blocks(
trace_xfs_zone_alloc_blocks(oz, allocated, count_fsb);
- *sector = xfs_gbno_to_daddr(&rtg->rtg_group, 0);
+ *sector = xfs_gbno_to_daddr(rtg_group(rtg), 0);
*is_seq = bdev_zone_is_seq(mp->m_rtdev_targp->bt_bdev, *sector);
if (!*is_seq)
*sector += XFS_FSB_TO_BB(mp, allocated);
@@ -1080,7 +1080,7 @@ xfs_init_zone(
if (write_pointer == 0) {
/* zone is empty */
atomic_inc(&zi->zi_nr_free_zones);
- xfs_group_set_mark(&rtg->rtg_group, XFS_RTG_FREE);
+ xfs_group_set_mark(rtg_group(rtg), XFS_RTG_FREE);
iz->available += rtg_blocks(rtg);
} else if (write_pointer < rtg_blocks(rtg)) {
/* zone is open */
diff --git a/fs/xfs/xfs_zone_gc.c b/fs/xfs/xfs_zone_gc.c
index 48c6cf584447..309f70098524 100644
--- a/fs/xfs/xfs_zone_gc.c
+++ b/fs/xfs/xfs_zone_gc.c
@@ -96,7 +96,6 @@ struct xfs_gc_bio {
*/
xfs_fsblock_t old_startblock;
xfs_daddr_t new_daddr;
- struct xfs_zone_scratch *scratch;
/* Are we writing to a sequential write required zone? */
bool is_seq;
@@ -627,7 +626,7 @@ xfs_zone_gc_alloc_blocks(
if (!*count_fsb)
return NULL;
- *daddr = xfs_gbno_to_daddr(&oz->oz_rtg->rtg_group, 0);
+ *daddr = xfs_gbno_to_daddr(rtg_group(oz->oz_rtg), 0);
*is_seq = bdev_zone_is_seq(mp->m_rtdev_targp->bt_bdev, *daddr);
if (!*is_seq)
*daddr += XFS_FSB_TO_BB(mp, oz->oz_allocated);
@@ -702,7 +701,7 @@ xfs_zone_gc_start_chunk(
chunk->data = data;
chunk->oz = oz;
chunk->victim_rtg = iter->victim_rtg;
- atomic_inc(&chunk->victim_rtg->rtg_group.xg_active_ref);
+ atomic_inc(&rtg_group(chunk->victim_rtg)->xg_active_ref);
atomic_inc(&chunk->victim_rtg->rtg_gccount);
bio->bi_iter.bi_sector = xfs_rtb_to_daddr(mp, chunk->old_startblock);
@@ -779,7 +778,6 @@ xfs_zone_gc_split_write(
ihold(VFS_I(chunk->ip));
split_chunk->ip = chunk->ip;
split_chunk->is_seq = chunk->is_seq;
- split_chunk->scratch = chunk->scratch;
split_chunk->offset = chunk->offset;
split_chunk->len = split_len;
split_chunk->old_startblock = chunk->old_startblock;
@@ -788,7 +786,7 @@ xfs_zone_gc_split_write(
atomic_inc(&chunk->oz->oz_ref);
split_chunk->victim_rtg = chunk->victim_rtg;
- atomic_inc(&chunk->victim_rtg->rtg_group.xg_active_ref);
+ atomic_inc(&rtg_group(chunk->victim_rtg)->xg_active_ref);
atomic_inc(&chunk->victim_rtg->rtg_gccount);
chunk->offset += split_len;
@@ -888,7 +886,7 @@ xfs_zone_gc_finish_reset(
goto out;
}
- xfs_group_set_mark(&rtg->rtg_group, XFS_RTG_FREE);
+ xfs_group_set_mark(rtg_group(rtg), XFS_RTG_FREE);
atomic_inc(&zi->zi_nr_free_zones);
xfs_zoned_add_available(mp, rtg_blocks(rtg));
@@ -917,7 +915,7 @@ xfs_submit_zone_reset_bio(
XFS_STATS_INC(mp, xs_gc_zone_reset_calls);
- bio->bi_iter.bi_sector = xfs_gbno_to_daddr(&rtg->rtg_group, 0);
+ bio->bi_iter.bi_sector = xfs_gbno_to_daddr(rtg_group(rtg), 0);
if (!bdev_zone_is_seq(bio->bi_bdev, bio->bi_iter.bi_sector)) {
/*
* Also use the bio to drive the state machine when neither