| /* SPDX-License-Identifier: GPL-2.0-or-later */ |
| /* Internal definitions for network filesystem support |
| * |
| * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved. |
| * Written by David Howells (dhowells@redhat.com) |
| */ |
| |
| #include <linux/netfs.h> |
| #include <linux/fscache.h> |
| #include <trace/events/netfs.h> |
| |
| #ifdef pr_fmt |
| #undef pr_fmt |
| #endif |
| |
| #define pr_fmt(fmt) "netfs: " fmt |
| |
| /* |
| * buffered_read.c |
| */ |
| void netfs_rreq_unlock_folios(struct netfs_io_request *rreq); |
| |
| /* |
| * direct_read.c |
| */ |
| int netfs_dio_copy_to_dest(struct netfs_io_request *rreq); |
| |
| /* |
| * io.c |
| */ |
| ssize_t netfs_begin_read(struct netfs_io_request *rreq, bool sync); |
| |
| /* |
| * main.c |
| */ |
| extern unsigned int netfs_debug; |
| extern struct list_head netfs_io_requests; |
| extern spinlock_t netfs_proc_lock; |
| |
| #ifdef CONFIG_PROC_FS |
| static inline void netfs_proc_add_rreq(struct netfs_io_request *rreq) |
| { |
| spin_lock(&netfs_proc_lock); |
| list_add_tail_rcu(&rreq->proc_link, &netfs_io_requests); |
| spin_unlock(&netfs_proc_lock); |
| } |
| static inline void netfs_proc_del_rreq(struct netfs_io_request *rreq) |
| { |
| if (!list_empty(&rreq->proc_link)) { |
| spin_lock(&netfs_proc_lock); |
| list_del_rcu(&rreq->proc_link); |
| spin_unlock(&netfs_proc_lock); |
| } |
| } |
| #else |
| static inline void netfs_proc_add_rreq(struct netfs_io_request *rreq) {} |
| static inline void netfs_proc_del_rreq(struct netfs_io_request *rreq) {} |
| #endif |
| |
| /* |
| * misc.c |
| */ |
| int netfs_xa_store_and_mark(struct xarray *xa, unsigned long index, |
| struct folio *folio, bool put_mark, |
| bool pagecache_mark, gfp_t gfp_mask); |
| int netfs_add_folios_to_buffer(struct xarray *buffer, |
| struct address_space *mapping, |
| pgoff_t index, pgoff_t to, gfp_t gfp_mask); |
| int netfs_set_up_buffer(struct xarray *buffer, |
| struct address_space *mapping, |
| struct readahead_control *ractl, |
| struct folio *keep, |
| pgoff_t have_index, unsigned int have_folios); |
| void netfs_clear_buffer(struct xarray *buffer); |
| |
| /* |
| * objects.c |
| */ |
| struct netfs_io_request *netfs_alloc_request(struct address_space *mapping, |
| struct file *file, |
| loff_t start, size_t len, |
| enum netfs_io_origin origin); |
| void netfs_get_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what); |
| void netfs_clear_subrequests(struct netfs_io_request *rreq, bool was_async); |
| void netfs_put_request(struct netfs_io_request *rreq, bool was_async, |
| enum netfs_rreq_ref_trace what); |
| struct netfs_io_subrequest *netfs_alloc_subrequest(struct netfs_io_request *rreq); |
| |
| static inline void netfs_see_request(struct netfs_io_request *rreq, |
| enum netfs_rreq_ref_trace what) |
| { |
| trace_netfs_rreq_ref(rreq->debug_id, refcount_read(&rreq->ref), what); |
| } |
| |
| /* |
| * stats.c |
| */ |
| #ifdef CONFIG_NETFS_STATS |
| extern atomic_t netfs_n_rh_dio_read; |
| extern atomic_t netfs_n_rh_readahead; |
| extern atomic_t netfs_n_rh_readpage; |
| extern atomic_t netfs_n_rh_rreq; |
| extern atomic_t netfs_n_rh_sreq; |
| extern atomic_t netfs_n_rh_download; |
| extern atomic_t netfs_n_rh_download_done; |
| extern atomic_t netfs_n_rh_download_failed; |
| extern atomic_t netfs_n_rh_download_instead; |
| extern atomic_t netfs_n_rh_read; |
| extern atomic_t netfs_n_rh_read_done; |
| extern atomic_t netfs_n_rh_read_failed; |
| extern atomic_t netfs_n_rh_zero; |
| extern atomic_t netfs_n_rh_short_read; |
| extern atomic_t netfs_n_rh_write; |
| extern atomic_t netfs_n_rh_write_begin; |
| extern atomic_t netfs_n_rh_write_done; |
| extern atomic_t netfs_n_rh_write_failed; |
| extern atomic_t netfs_n_rh_write_zskip; |
| extern atomic_t netfs_n_rh_remove_folio; |
| |
| |
| static inline void netfs_stat(atomic_t *stat) |
| { |
| atomic_inc(stat); |
| } |
| |
| static inline void netfs_stat_d(atomic_t *stat) |
| { |
| atomic_dec(stat); |
| } |
| |
| #else |
| #define netfs_stat(x) do {} while(0) |
| #define netfs_stat_d(x) do {} while(0) |
| #endif |
| |
| /* |
| * Miscellaneous functions. |
| */ |
| static inline bool netfs_is_cache_enabled(struct netfs_i_context *ctx) |
| { |
| #if IS_ENABLED(CONFIG_FSCACHE) |
| struct fscache_cookie *cookie = ctx->cache; |
| |
| return fscache_cookie_valid(cookie) && cookie->cache_priv && |
| fscache_cookie_enabled(cookie); |
| #else |
| return false; |
| #endif |
| } |
| |
| /*****************************************************************************/ |
| /* |
| * debug tracing |
| */ |
| #define dbgprintk(FMT, ...) \ |
| printk("[%-6.6s] "FMT"\n", current->comm, ##__VA_ARGS__) |
| |
| #define kenter(FMT, ...) dbgprintk("==> %s("FMT")", __func__, ##__VA_ARGS__) |
| #define kleave(FMT, ...) dbgprintk("<== %s()"FMT"", __func__, ##__VA_ARGS__) |
| #define kdebug(FMT, ...) dbgprintk(FMT, ##__VA_ARGS__) |
| |
| #ifdef __KDEBUG |
| #define _enter(FMT, ...) kenter(FMT, ##__VA_ARGS__) |
| #define _leave(FMT, ...) kleave(FMT, ##__VA_ARGS__) |
| #define _debug(FMT, ...) kdebug(FMT, ##__VA_ARGS__) |
| |
| #elif defined(CONFIG_NETFS_DEBUG) |
| #define _enter(FMT, ...) \ |
| do { \ |
| if (netfs_debug) \ |
| kenter(FMT, ##__VA_ARGS__); \ |
| } while (0) |
| |
| #define _leave(FMT, ...) \ |
| do { \ |
| if (netfs_debug) \ |
| kleave(FMT, ##__VA_ARGS__); \ |
| } while (0) |
| |
| #define _debug(FMT, ...) \ |
| do { \ |
| if (netfs_debug) \ |
| kdebug(FMT, ##__VA_ARGS__); \ |
| } while (0) |
| |
| #else |
| #define _enter(FMT, ...) no_printk("==> %s("FMT")", __func__, ##__VA_ARGS__) |
| #define _leave(FMT, ...) no_printk("<== %s()"FMT"", __func__, ##__VA_ARGS__) |
| #define _debug(FMT, ...) no_printk(FMT, ##__VA_ARGS__) |
| #endif |