b/Documentation/filesystems/9p.txt
@@ -91,6 +91,7 @@ OPTIONS
0x200 = display Fid debug
0x400 = display packet debug
0x800 = display fscache tracing debug
+ 0x1000 = display xattrcache tracing debug
rfdno=n the file descriptor for reading with trans=fd
@@ -133,6 +134,8 @@ OPTIONS
cache tags for existing cache sessions can be listed at
/sys/fs/9p/caches. (applies only to cache=fscache)
+ xattrcache enable extended attributes cache
+
RESOURCES
=========
@@ -11,6 +11,7 @@ obj-$(CONFIG_9P_FS) := 9p.o
v9fs.o \
fid.o \
xattr.o \
+ xattr_cache.o \
xattr_user.o \
xattr_trusted.o
@@ -38,10 +38,12 @@
#include "v9fs.h"
#include "v9fs_vfs.h"
#include "cache.h"
+#include "xattr_cache.h"
static DEFINE_SPINLOCK(v9fs_sessionlist_lock);
static LIST_HEAD(v9fs_sessionlist);
struct kmem_cache *v9fs_inode_cache;
+struct kmem_cache *v9fs_xattr_cache;
/*
* Option Parsing (code inspired by NFS code)
@@ -59,6 +61,8 @@ enum {
Opt_cache_loose, Opt_fscache, Opt_mmap,
/* Access options */
Opt_access, Opt_posixacl,
+ /* xattr cache */
+ Opt_xcache,
/* Error token */
Opt_err
};
@@ -78,6 +82,7 @@ static const match_table_t tokens = {
{Opt_cachetag, "cachetag=%s"},
{Opt_access, "access=%s"},
{Opt_posixacl, "posixacl"},
+ {Opt_xcache, "xattrcache"},
{Opt_err, NULL}
};
@@ -126,6 +131,7 @@ static int v9fs_parse_options(struct
v9fs_session_info *v9ses, char *opts)
#ifdef CONFIG_9P_FSCACHE
v9ses->cachetag = NULL;
#endif
+ v9ses->xcache = 0;
if (!opts)
return 0;
@@ -297,7 +303,9 @@ static int v9fs_parse_options(struct
v9fs_session_info *v9ses, char *opts)
"Not defined CONFIG_9P_FS_POSIX_ACL. Ignoring posixacl option\n");
#endif
break;
-
+ case Opt_xcache:
+ v9ses->xcache = 1;
+ break;
default:
continue;
}
@@ -589,6 +597,17 @@ static int v9fs_init_inode_cache(void)
return 0;
}
+static int v9fs_init_xattr_cache(void)
+{
+ v9fs_xattr_cache = kmem_cache_create("v9fs_xattr_cache",
+ sizeof(struct xattr_item),
+ 0, 0, NULL);
+ if (!v9fs_xattr_cache)
+ return -ENOMEM;
+
+ return 0;
+}
+
/**
* v9fs_destroy_inode_cache - destroy the cache of 9P inode
*
@@ -603,16 +622,30 @@ static void v9fs_destroy_inode_cache(void)
kmem_cache_destroy(v9fs_inode_cache);
}
+static void v9fs_destroy_xattr_cache(void)
+{
+ kmem_cache_destroy(v9fs_xattr_cache);
+}
+
static int v9fs_cache_register(void)
{
int ret;
ret = v9fs_init_inode_cache();
if (ret < 0)
return ret;
+
+ ret = v9fs_init_xattr_cache();
+ if (ret < 0) {
+ v9fs_destroy_inode_cache();
+ return ret;
+ }
+
#ifdef CONFIG_9P_FSCACHE
ret = fscache_register_netfs(&v9fs_cache_netfs);
- if (ret < 0)
+ if (ret < 0) {
v9fs_destroy_inode_cache();
+ v9fs_destroy_xattr_cache();
+ }
#endif
return ret;
}
@@ -620,6 +653,7 @@ static int v9fs_cache_register(void)
static void v9fs_cache_unregister(void)
{
v9fs_destroy_inode_cache();
+ v9fs_destroy_xattr_cache();
#ifdef CONFIG_9P_FSCACHE
fscache_unregister_netfs(&v9fs_cache_netfs);
#endif
@@ -101,6 +101,7 @@ struct v9fs_session_info {
unsigned short debug;
unsigned int afid;
unsigned int cache;
+ unsigned short xcache;
#ifdef CONFIG_9P_FSCACHE
char *cachetag;
struct fscache_cookie *fscache;
@@ -121,6 +122,15 @@ struct v9fs_session_info {
/* cache_validity flags */
#define V9FS_INO_INVALID_ATTR 0x01
+/**
+ * @xal_head: fast path, the xattr has beed queried from backend
+ * @xaf_head: slow path, the xattr is looking up from backend
+ *
+ * Ensure implementation query request as soon as possible from xal_head
+ *
+ * Return from query, if there is not error, except -ENOTSUPP and -ERANGE,
+ * move xattr_item from xaf_head to xal_head, or set V9FS_XI_Removing
+ */
struct v9fs_inode {
#ifdef CONFIG_9P_FSCACHE
spinlock_t fscache_lock;
@@ -131,6 +141,11 @@ struct v9fs_inode {
struct p9_fid *writeback_fid;
struct mutex v_mutex;
struct inode vfs_inode;
+
+ rwlock_t xal_lock;
+ struct list_head xal_head;
+ spinlock_t xaf_lock;
+ struct list_head xaf_head;
};
static inline struct v9fs_inode *V9FS_I(const struct inode *inode)
@@ -53,6 +53,7 @@ extern const struct file_operations
v9fs_cached_file_operations_dotl;
extern const struct file_operations v9fs_mmap_file_operations;
extern const struct file_operations v9fs_mmap_file_operations_dotl;
extern struct kmem_cache *v9fs_inode_cache;
+extern struct kmem_cache *v9fs_xattr_cache;
struct inode *v9fs_alloc_inode(struct super_block *sb);
void v9fs_destroy_inode(struct inode *inode);
@@ -249,6 +249,12 @@ struct inode *v9fs_alloc_inode(struct super_block *sb)
v9inode->writeback_fid = NULL;
v9inode->cache_validity = 0;
mutex_init(&v9inode->v_mutex);
+
+ rwlock_init(&v9inode->xal_lock);
+ INIT_LIST_HEAD(&v9inode->xal_head);
+ spin_lock_init(&v9inode->xaf_lock);
+ INIT_LIST_HEAD(&v9inode->xaf_head);
+
return &v9inode->vfs_inode;
}
@@ -260,6 +266,8 @@ struct inode *v9fs_alloc_inode(struct super_block *sb)
static void v9fs_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
+
+ v9fs_free_inode_xattr_items(inode);
kmem_cache_free(v9fs_inode_cache, V9FS_I(inode));
}
@@ -19,8 +19,10 @@
#include <net/9p/9p.h>
#include <net/9p/client.h>
+#include "v9fs.h"
#include "fid.h"
#include "xattr.h"
+#include "xattr_cache.h"
ssize_t v9fs_fid_xattr_get(struct p9_fid *fid, const char *name,
void *buffer, size_t buffer_size)
@@ -56,6 +58,142 @@ ssize_t v9fs_fid_xattr_get(struct p9_fid *fid, const
char *name,
return retval;
}
+static ssize_t __v9fs_xattr_get(struct dentry *dentry, const char *name,
+ void *buffer, size_t buffer_size)
+{
+ struct p9_fid *fid;
+
+ fid = v9fs_fid_lookup(dentry);
+ if (IS_ERR(fid))
+ return PTR_ERR(fid);
+
+ return v9fs_fid_xattr_get(fid, name, buffer, buffer_size);
+}
+
+static ssize_t __v9fs_xattr_get_cached(struct dentry *dentry, const
char *name,
+ void *buffer, size_t buffer_size)
+{
+ ssize_t ret = 0;
+ unsigned long old_flags = V9FS_XI_FLAGS_INIT;
+ struct xattr_item *xi;
+
+ xi = v9fs_find_cached_xattr_item(dentry, name);
+ if (!xi) {
+ xi = v9fs_find_or_create_cached_xattr_item_slow(dentry, name);
+ if (IS_ERR(xi)) {
+ p9_debug(P9_DEBUG_ERROR,
+ "create fail, dentry:%s xname:%s errno:%ld\n",
+ dentry->d_name.name, name, PTR_ERR(xi));
+
+ return __v9fs_xattr_get(dentry, name,
+ buffer, buffer_size);
+ }
+ }
+
+ /*
+ * We don't know whether the xattr is removed succes
+ * don't wait, handled by backend
+ */
+ if (v9fs_xif_test_removing(xi)) {
+ v9fs_xattr_item_dec_ref(dentry, xi);
+ return __v9fs_xattr_get(dentry, name, buffer, buffer_size);
+ }
+
+ if (v9fs_xif_test_clear_new(xi)) {
+again:
+ down_write(&xi->rw_sem);
+
+ if (old_flags == v9fs_xif_current_dflag(xi)) {
+ ret = __v9fs_xattr_get(dentry, name, buffer, buffer_size);
+ v9fs_xattr_item_setval(xi, buffer, buffer_size, ret);
+ if (likely(!v9fs_xif_test_removing(xi) &&
+ v9fs_xif_test_slowpath(xi)))
+ v9fs_move_xattr_item_to_fast(dentry, xi);
+
+ up_write(&xi->rw_sem);
+ v9fs_xattr_item_dec_ref(dentry, xi);
+
+ return ret;
+ } else {
+ /*
+ * the other process has re-fill in the data
+ * do nothing, read from cache again
+ */
+ up_write(&xi->rw_sem);
+ }
+ }
+
+ down_read(&xi->rw_sem);
+ /* check whether the xattr name is exist */
+ if (buffer_size == 0) {
+ /*
+ * Returns the actual length of the attribute value
+ */
+ if (v9fs_xif_test_erange(xi)) {
+ old_flags = v9fs_xif_test_nodata(xi);
+ up_read(&xi->rw_sem);
+ goto again;
+ } else if (v9fs_xif_test_normal(xi) ||
+ v9fs_xif_test_nodata(xi))
+ ret = xi->xi_vlen;
+ else if (v9fs_xif_test_enotsupp(xi))
+ ret = -EOPNOTSUPP;
+
+ goto exit;
+ }
+
+ if (v9fs_xif_test_normal(xi)) {
+ if (xi->xi_vlen > buffer_size) {
+ ret = -ERANGE;
+ goto exit;
+ } else {
+ strcpy(buffer, xi->xi_value);
+ ret = xi->xi_vlen;
+ goto exit;
+ }
+ }
+
+ if (v9fs_xif_test_enotsupp(xi)) {
+ ret = -EOPNOTSUPP;
+ goto exit;
+ }
+
+ if (v9fs_xif_test_nodata(xi)) {
+ if (xi->xi_vlen > buffer_size) {
+ ret = -ERANGE;
+ goto exit;
+ } else {
+ old_flags = v9fs_xif_test_nodata(xi);
+ up_read(&xi->rw_sem);
+ goto again;
+ }
+ }
+
+ if (v9fs_xif_test_erange(xi)) {
+ if (xi->xi_vlen >= buffer_size) {
+ ret = -ERANGE;
+ goto exit;
+ } else {
+ old_flags = v9fs_xif_test_erange(xi);
+ up_read(&xi->rw_sem);
+ goto again;
+ }
+ }
+
+ if (v9fs_xif_test_removing(xi)) {
+ ret = -ENODATA;
+ goto exit;
+ }
+
+ ret = -EINVAL;
+ BUG_ON(1);
+
+exit:
+ up_read(&xi->rw_sem);
+ v9fs_xattr_item_dec_ref(dentry, xi);
+
+ return ret;
+}
/*
* v9fs_xattr_get()
@@ -70,15 +208,74 @@ ssize_t v9fs_fid_xattr_get(struct p9_fid *fid,
const char *name,
ssize_t v9fs_xattr_get(struct dentry *dentry, const char *name,
void *buffer, size_t buffer_size)
{
- struct p9_fid *fid;
+ struct v9fs_session_info *v9ses;
p9_debug(P9_DEBUG_VFS, "name = %s value_len = %zu\n",
name, buffer_size);
- fid = v9fs_fid_lookup(dentry);
+
+ v9ses = v9fs_dentry2v9ses(dentry);
+ /* when listxattr, the @name is NULL*/
+ if (v9ses->xcache && name)
+ return __v9fs_xattr_get_cached(dentry, name,
+ buffer, buffer_size);
+ else
+ return __v9fs_xattr_get(dentry, name, buffer, buffer_size);
+}
+
+static int __v9fs_xattr_set(struct dentry *dentry, const char *name,
+ const void *value, size_t value_len, int flags)
+{
+ struct p9_fid *fid = v9fs_fid_lookup(dentry);
if (IS_ERR(fid))
return PTR_ERR(fid);
+ return v9fs_fid_xattr_set(fid, name, value, value_len, flags);
+}
- return v9fs_fid_xattr_get(fid, name, buffer, buffer_size);
+static int __v9fs_xattr_set_cached(struct dentry *dentry, const char *name,
+ const void *value, size_t value_len, int flags)
+{
+ int ret;
+ struct xattr_item *xi;
+
+ /* remove xattr item */
+ if (flags == XATTR_REPLACE && value == NULL) {
+ xi = v9fs_find_cached_xattr_item_full(dentry, name);
+ if (xi && !v9fs_xif_test_removing(xi)) {
+ down_write(&xi->rw_sem);
+ v9fs_xif_set_removing(xi);
+
+ ret = __v9fs_xattr_set(dentry, name, value,
+ value_len, flags);
+ if (ret != 0)
+ /* remove fail */
+ v9fs_xif_clear_removing(xi);
+
+ goto exit;
+ } else
+ return __v9fs_xattr_set(dentry, name,
+ value, value_len, flags);
+ } else {
+ /* create or replace */
+ xi = v9fs_find_cached_xattr_item(dentry, name);
+ if (!xi)
+ xi = v9fs_find_or_create_cached_xattr_item_slow(dentry, name);
+
+ if (!IS_ERR(xi) && !v9fs_xif_test_removing(xi)) {
+ down_write(&xi->rw_sem);
+
+ ret = __v9fs_xattr_set(dentry, name, value, value_len, flags);
+ if (ret == 0)
+ v9fs_xattr_item_setval(xi, value, value_len, value_len);
+
+ goto exit;
+ } else
+ return __v9fs_xattr_set(dentry, name, value, value_len, flags);
+ }
+
+exit:
+ up_write(&xi->rw_sem);
+ v9fs_xattr_item_dec_ref(dentry, xi);
+ return ret;
}
/*
@@ -96,10 +293,13 @@ ssize_t v9fs_xattr_get(struct dentry *dentry, const
char *name,
int v9fs_xattr_set(struct dentry *dentry, const char *name,
const void *value, size_t value_len, int flags)
{
- struct p9_fid *fid = v9fs_fid_lookup(dentry);
- if (IS_ERR(fid))
- return PTR_ERR(fid);
- return v9fs_fid_xattr_set(fid, name, value, value_len, flags);
+ struct v9fs_session_info *v9ses;
+
+ v9ses = v9fs_dentry2v9ses(dentry);
+ if (v9ses->xcache)
+ return __v9fs_xattr_set_cached(dentry, name, value, value_len, flags);
+ else
+ return __v9fs_xattr_set(dentry, name, value, value_len, flags);
}
int v9fs_fid_xattr_set(struct p9_fid *fid, const char *name,
@@ -18,6 +18,8 @@
#include <net/9p/9p.h>
#include <net/9p/client.h>
+#include "xattr_cache.h"
+
extern const struct xattr_handler *v9fs_xattr_handlers[];
extern struct xattr_handler v9fs_xattr_user_handler;
extern struct xattr_handler v9fs_xattr_trusted_handler;
new file mode 100644
@@ -0,0 +1,228 @@
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include "xattr.h"
+#include "v9fs.h"
+#include "v9fs_vfs.h"
+#include "xattr_cache.h"
+
+static void v9fs_init_xattr_item(struct xattr_item *xi, const char *xname)
+{
+ xi->xi_name = kstrdup(xname, GFP_NOFS);
+ xi->xi_nlen = strlen(xname);
+
+ xi->xi_value = NULL;
+ xi->xi_vlen = 0;
+ xi->flags = V9FS_XI_FLAGS_INIT;
+
+ v9fs_xif_set_slowpath(xi);
+ v9fs_xif_set_new(xi);
+ atomic_set(&xi->ref, 1);
+ init_rwsem(&xi->rw_sem);
+}
+
+static struct xattr_item *v9fs_find_cached_xattr_slow_lock(
+ struct list_head *head,
+ const char *xname)
+{
+ struct xattr_item *xi = NULL;
+
+ list_for_each_entry(xi, head, xi_list) {
+ if (0 == strcmp(xname, xi->xi_name)) {
+ p9_debug(P9_DEBUG_XCACHE,
+ "found xattr from slow path, name:%s nlen:%u flags:%lu\n",
+ xname, xi->xi_nlen, xi->flags);
+
+ atomic_inc(&xi->ref);
+
+ return xi;
+ }
+ }
+
+ return NULL;
+}
+
+static void v9fs_free_cached_xattr_item(struct dentry *dentry,
+ struct xattr_item *xi)
+{
+ struct v9fs_inode *v9inode = V9FS_I(dentry->d_inode);
+ unsigned long flag = v9fs_xif_test_slowpath(xi);
+
+ p9_debug(P9_DEBUG_XCACHE, "free xattr item, name:%s flags:%lu\n",
+ xi->xi_name, xi->flags);
+ if (flag)
+ spin_lock(&v9inode->xaf_lock);
+ else
+ write_lock(&v9inode->xal_lock);
+
+ list_del(&xi->xi_list);
+ kfree(xi->xi_value);
+ kfree(xi->xi_name);
+ kmem_cache_free(v9fs_xattr_cache, xi);
+
+ if (flag)
+ spin_unlock(&v9inode->xaf_lock);
+ else
+ write_unlock(&v9inode->xal_lock);
+}
+
+struct xattr_item *v9fs_find_cached_xattr_item(struct dentry *dentry,
+ const char *xname)
+{
+ struct v9fs_inode *v9inode;
+ struct xattr_item *xi;
+
+ v9inode = V9FS_I(dentry->d_inode);
+
+ read_lock(&v9inode->xal_lock);
+
+ list_for_each_entry(xi, &v9inode->xal_head, xi_list) {
+ if (0 == strcmp(xname, xi->xi_name)) {
+ p9_debug(P9_DEBUG_XCACHE,
+ "found xattr from fast path name:%s nlen:%u flags:%lu\n",
+ xname, xi->xi_nlen, xi->flags);
+
+ atomic_inc(&xi->ref);
+ read_unlock(&v9inode->xal_lock);
+
+ return xi;
+ }
+ }
+
+ read_unlock(&v9inode->xal_lock);
+
+ return NULL;
+}
+
+struct xattr_item *v9fs_find_cached_xattr_item_full(
+ struct dentry *dentry,
+ const char *xname)
+{
+ struct v9fs_inode *v9inode = V9FS_I(dentry->d_inode);
+ struct xattr_item *xi;
+
+ xi = v9fs_find_cached_xattr_item(dentry, xname);
+ if (!xi) {
+ spin_lock(&v9inode->xaf_lock);
+ xi = v9fs_find_cached_xattr_slow_lock(&v9inode->xaf_head, xname);
+ spin_unlock(&v9inode->xaf_lock);
+ }
+
+ return xi;
+}
+
+struct xattr_item *v9fs_find_or_create_cached_xattr_item_slow(
+ struct dentry *dentry,
+ const char *xname)
+{
+ struct v9fs_inode *v9inode = V9FS_I(dentry->d_inode);
+ struct xattr_item *xi = NULL;
+
+ spin_lock(&v9inode->xaf_lock);
+ xi = v9fs_find_cached_xattr_slow_lock(&v9inode->xaf_head, xname);
+ if (!xi) {
+ xi = kmem_cache_alloc(v9fs_xattr_cache, GFP_NOFS);
+ if (!xi) {
+ spin_unlock(&v9inode->xaf_lock);
+ p9_debug(P9_DEBUG_XCACHE,
+ "create cached xattr item fail, name:%s\n",
+ xname);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ p9_debug(P9_DEBUG_XCACHE,
+ "create xattr item success, name:%s\n",
+ xname);
+
+ v9fs_init_xattr_item(xi, xname);
+ list_add(&xi->xi_list, &v9inode->xaf_head);
+ }
+ spin_unlock(&v9inode->xaf_lock);
+
+ return xi;
+}
+
+void v9fs_move_xattr_item_to_fast(struct dentry *dentry, struct
xattr_item *xi)
+{
+ struct v9fs_inode *v9inode = V9FS_I(dentry->d_inode);
+
+ p9_debug(P9_DEBUG_XCACHE,
+ "move xattr item to fast path, name:%s flags:%lu\n",
+ xi->xi_name, xi->flags);
+
+ write_lock(&v9inode->xal_lock);
+ spin_lock(&v9inode->xaf_lock);
+
+ list_move(&xi->xi_list, &v9inode->xal_head);
+ v9fs_xif_clear_slowpath(xi);
+
+ spin_unlock(&v9inode->xaf_lock);
+ write_unlock(&v9inode->xal_lock);
+}
+
+int v9fs_xattr_item_setval(struct xattr_item *xi, const char *buffer,
+ size_t buffer_size, ssize_t ret)
+{
+ p9_debug(P9_DEBUG_XCACHE,
+ "setvalue, name:%s flags:%lu buffer:%s buffer_size:%zu ret:%zu\n",
+ xi->xi_name, xi->flags,
+ buffer ? buffer : "null", buffer_size, ret);
+
+ if (likely(ret >= 0)) {
+ if (buffer_size) {
+ kfree(xi->xi_value);
+
+ xi->xi_vlen = ret;
+ xi->xi_value = kstrdup(buffer, GFP_NOFS);
+ if (!xi->xi_value) {
+ v9fs_xif_set_nodata(xi);
+ p9_debug(P9_DEBUG_ERROR, "setval fail, no memory.\n");
+ return -ENOMEM;
+ } else
+ v9fs_xif_set_normal(xi);
+ } else {
+ v9fs_xif_set_nodata(xi);
+ xi->xi_vlen = ret;
+ }
+ } else if (ret == -EOPNOTSUPP) {
+ v9fs_xif_set_enotsupp(xi);
+ } else if (ret == -ERANGE) {
+ xi->xi_vlen = buffer_size;
+ v9fs_xif_set_erange(xi);
+ } else
+ v9fs_xif_set_removing(xi);
+
+ return ret;
+}
+
+void v9fs_xattr_item_dec_ref(struct dentry *dentry, struct xattr_item *xi)
+{
+ if (v9fs_xif_test_removing(xi) && atomic_dec_and_test(&xi->ref))
+ v9fs_free_cached_xattr_item(dentry, xi);
+ else
+ atomic_dec(&xi->ref);
+}
+
+void v9fs_free_inode_xattr_items(struct inode *inode)
+{
+ struct v9fs_inode *v9inode = V9FS_I(inode);
+ struct xattr_item *xi, *tmp;
+
+ p9_debug(P9_DEBUG_XCACHE,
+ "free all xattr items for inode, inode:%p\n", inode);
+
+ list_for_each_entry_safe(xi, tmp, &v9inode->xal_head, xi_list) {
+ if (xi->xi_vlen)
+ kfree(xi->xi_value);
+ kfree(xi->xi_name);
+ kmem_cache_free(v9fs_xattr_cache, xi);
+ }
+
+ list_for_each_entry_safe(xi, tmp, &v9inode->xaf_head, xi_list) {
+ kfree(xi->xi_value);
+ kfree(xi->xi_name);
+ kmem_cache_free(v9fs_xattr_cache, xi);
+ }
+}
new file mode 100644
@@ -0,0 +1,100 @@
+#ifndef FS_9P_XCACHE_H
+#define FS_9P_XCACHE_H
+
+struct xattr_item {
+ struct list_head xi_list;
+
+ char *xi_name;
+ u16 xi_nlen;
+ char *xi_value;
+ u16 xi_vlen;
+
+ unsigned long flags;
+ atomic_t ref;
+ struct rw_semaphore rw_sem;
+};
+
+enum v9fs_xi_flags {
+ /* xattr item value flags */
+ V9FS_XI_Normal,
+ V9FS_XI_Nodata,
+ V9FS_XI_Erange,
+ V9FS_XI_Enotsupp,
+ V9FS_XI_Bound,
+ /* xattr item flags */
+ V9FS_XI_New,
+ V9FS_XI_Removing,
+ V9FS_XI_Slowpath,
+};
+
+#define V9FS_XI_FLAGS_INIT 0UL
+
+#define V9FS_XI_FLAGS_BOUND ((1 << V9FS_XI_Bound) - 1)
+
+#define V9FS_XIF_FNS_EXCL(bit, name) \
+static inline void v9fs_xif_set_##name(struct xattr_item *xi) \
+{ \
+ xi->flags = (xi->flags & ~V9FS_XI_FLAGS_BOUND) | (1 << V9FS_XI_##bit); \
+} \
+static inline int v9fs_xif_test_##name(struct xattr_item *xi) \
+{ \
+ return (xi->flags & V9FS_XI_FLAGS_BOUND) & (1 << V9FS_XI_##bit); \
+}
+
+V9FS_XIF_FNS_EXCL(Normal, normal);
+V9FS_XIF_FNS_EXCL(Nodata, nodata);
+V9FS_XIF_FNS_EXCL(Erange, erange);
+V9FS_XIF_FNS_EXCL(Enotsupp, enotsupp);
+
+static inline unsigned long v9fs_xif_current_dflag(struct xattr_item *xi)
+{
+ return xi->flags & V9FS_XI_FLAGS_BOUND;
+}
+
+#define V9FS_XIF_FNS(bit, name) \
+static inline void v9fs_xif_set_##name(struct xattr_item *xi) \
+{ \
+ set_bit(V9FS_XI_##bit, &(xi)->flags); \
+} \
+static inline void v9fs_xif_clear_##name(struct xattr_item *xi) \
+{ \
+ clear_bit(V9FS_XI_##bit, &(xi)->flags); \
+} \
+static inline int v9fs_xif_test_##name(const struct xattr_item *xi) \
+{ \
+ return test_bit(V9FS_XI_##bit, &(xi)->flags); \
+}
+
+V9FS_XIF_FNS(New, new);
+V9FS_XIF_FNS(Removing, removing);
+V9FS_XIF_FNS(Slowpath, slowpath);
+
+#define V9FS_XIF_TAS(bit, name) \
+static inline int v9fs_xif_test_set_##name(struct xattr_item *xi) \
+{ \
+ return test_and_set_bit(V9FS_XI_##bit, &(xi)->flags); \
+} \
+static inline int v9fs_xif_test_clear_##name(struct xattr_item *xi) \
+{ \
+ return test_and_clear_bit(V9FS_XI_##bit, &(xi)->flags); \
+}
+
+V9FS_XIF_TAS(New, new);
+
+extern struct xattr_item *v9fs_find_cached_xattr_item(struct dentry
*dentry,
+ const char *xname);
+extern struct xattr_item *v9fs_find_cached_xattr_item_full(
+ struct dentry *dentry,
+ const char *xname);
+extern struct xattr_item *v9fs_find_or_create_cached_xattr_item_slow(
+ struct dentry *dentry,
+ const char *xname);
+extern void v9fs_move_xattr_item_to_fast(struct dentry *dentry,
+ struct xattr_item *xi);
+extern int v9fs_xattr_item_setval(struct xattr_item *xi, const char
*buffer,
+ size_t buffer_size, ssize_t ret);
+extern void v9fs_xattr_item_dec_ref(struct dentry *dentry,
+ struct xattr_item *xi);
+extern void v9fs_free_inode_xattr_items(struct inode *inode);
+
+#endif
@@ -59,6 +59,7 @@ enum p9_debug_flags {
P9_DEBUG_PKT = (1<<10),
P9_DEBUG_FSC = (1<<11),
P9_DEBUG_VPKT = (1<<12),
+ P9_DEBUG_XCACHE = (1<<13),
};
#ifdef CONFIG_NET_9P_DEBUG