@@ -912,7 +912,6 @@ static struct list_head *alloc_heuristic_ws(unsigned int level)
const struct btrfs_compress_op btrfs_heuristic_compress = {
.workspace_manager = &heuristic_wsm,
- .alloc_workspace = alloc_heuristic_ws,
.free_workspace = free_heuristic_ws,
};
@@ -924,6 +923,22 @@ static const struct btrfs_compress_op * const btrfs_compress_op[] = {
&btrfs_zstd_compress,
};
+static struct list_head *alloc_workspace(int type, unsigned int level)
+{
+ switch (type) {
+ case BTRFS_COMPRESS_NONE: return alloc_heuristic_ws(level);
+ case BTRFS_COMPRESS_ZLIB: return zlib_alloc_workspace(level);
+ case BTRFS_COMPRESS_LZO: return lzo_alloc_workspace(level);
+ case BTRFS_COMPRESS_ZSTD: return zstd_alloc_workspace(level);
+ default:
+ /*
+ * This can't happen, the type is validated several times
+ * before we get here.
+ */
+ BUG();
+ }
+}
+
static void btrfs_init_workspace_manager(int type)
{
const struct btrfs_compress_op *ops = btrfs_compress_op[type];
@@ -941,7 +956,7 @@ static void btrfs_init_workspace_manager(int type)
* Preallocate one workspace for each compression type so we can
* guarantee forward progress in the worst case
*/
- workspace = wsm->ops->alloc_workspace(0);
+ workspace = alloc_workspace(type, 0);
if (IS_ERR(workspace)) {
pr_warn(
"BTRFS: cannot preallocate compression workspace, will try later\n");
@@ -1020,7 +1035,7 @@ struct list_head *btrfs_get_workspace(int type, unsigned int level)
* context of btrfs_compress_bio/btrfs_compress_pages
*/
nofs_flag = memalloc_nofs_save();
- workspace = wsm->ops->alloc_workspace(level);
+ workspace = alloc_workspace(type, level);
memalloc_nofs_restore(nofs_flag);
if (IS_ERR(workspace)) {
@@ -124,8 +124,6 @@ struct list_head *btrfs_get_workspace(int type, unsigned int level);
void btrfs_put_workspace(struct workspace_manager *wsm, struct list_head *ws);
struct btrfs_compress_op {
- struct list_head *(*alloc_workspace)(unsigned int level);
-
void (*free_workspace)(struct list_head *workspace);
struct workspace_manager *workspace_manager;
@@ -484,7 +484,6 @@ int lzo_decompress(struct list_head *ws, unsigned char *data_in,
const struct btrfs_compress_op btrfs_lzo_compress = {
.workspace_manager = &wsm,
- .alloc_workspace = lzo_alloc_workspace,
.free_workspace = lzo_free_workspace,
.max_level = 1,
.default_level = 1,
@@ -400,7 +400,6 @@ int zlib_decompress(struct list_head *ws, unsigned char *data_in,
const struct btrfs_compress_op btrfs_zlib_compress = {
.workspace_manager = &wsm,
- .alloc_workspace = zlib_alloc_workspace,
.free_workspace = zlib_free_workspace,
.max_level = 9,
.default_level = BTRFS_ZLIB_DEFAULT_LEVEL,
@@ -708,7 +708,6 @@ int zstd_decompress(struct list_head *ws, unsigned char *data_in,
const struct btrfs_compress_op btrfs_zstd_compress = {
/* ZSTD uses own workspace manager */
.workspace_manager = NULL,
- .alloc_workspace = zstd_alloc_workspace,
.free_workspace = zstd_free_workspace,
.max_level = ZSTD_BTRFS_MAX_LEVEL,
.default_level = ZSTD_BTRFS_DEFAULT_LEVEL,
Replace indirect calls to alloc_workspace by switch and calls to the specific callbacks. This is mainly to get rid of the indirection due to spectre vulnerability mitigations. Signed-off-by: David Sterba <dsterba@suse.com> --- fs/btrfs/compression.c | 21 ++++++++++++++++++--- fs/btrfs/compression.h | 2 -- fs/btrfs/lzo.c | 1 - fs/btrfs/zlib.c | 1 - fs/btrfs/zstd.c | 1 - 5 files changed, 18 insertions(+), 8 deletions(-)