From patchwork Tue Dec 7 15:09:25 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Arnd Bergmann X-Patchwork-Id: 12662057 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 06DE2C433EF for ; Tue, 7 Dec 2021 15:10:13 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S238583AbhLGPNk (ORCPT ); Tue, 7 Dec 2021 10:13:40 -0500 Received: from ams.source.kernel.org ([145.40.68.75]:35280 "EHLO ams.source.kernel.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S238582AbhLGPNb (ORCPT ); Tue, 7 Dec 2021 10:13:31 -0500 Received: from smtp.kernel.org (relay.kernel.org [52.25.139.140]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by ams.source.kernel.org (Postfix) with ESMTPS id CB67AB817FA; Tue, 7 Dec 2021 15:09:59 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id C72A6C341CC; Tue, 7 Dec 2021 15:09:53 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1638889798; bh=WGd7fsWYuMGyr6AETtwoydvmq5KCu2yy5Rl4gz2YBqM=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=BOsjzz8/1Em8VynfKZe/2Z8ZxwnUnb1k4R8N6YsZShy2vFedtJtO4LtnTt3p9nLnK n3TUuy1PtTKlPlZ0cbYqM3ho1Y3Rw/YnwftbbYLAPbfBJElqFEwzhpRbkm26MgD7E1 wQSG/2tziZFQ+M49CPs3PoVcQhk2WSasHXJ+5v8abGgj18Kfki/5QLQxMe0CCJaxWY p+jmh1Z63dZ9jxB5NE7Sb/dtENuyS6YWgxeS6YfbztPm83rjslMtUY3shmprFCQ81p nJ1iDy5frS4/pIsTu7v6GCeZmlflYEnMGGa2H9SjxBkeeASpPcOz99ZXeqHtPHWF3Y ijf8wxGIJWW0g== From: Arnd Bergmann To: linux-kernel@vger.kernel.org Cc: Arnd Bergmann , Al Viro , Andrew Morton , Guenter Roeck , Kees Cook , Linus Torvalds , Masahiro Yamada , Matthew Wilcox , Nathan Chancellor , Nick Desaulniers , Stephen Rothwell , kernel test robot , Ingo Molnar , Peter Zijlstra , Will Deacon , Waiman Long , Boqun Feng , Thomas Gleixner , Tejun Heo , kernelci@groups.io, linux-fsdevel@vger.kernel.org, linux-kbuild@vger.kernel.org, llvm@lists.linux.dev Subject: [RFC 1/3] headers: add more types to linux/types.h Date: Tue, 7 Dec 2021 16:09:25 +0100 Message-Id: <20211207150927.3042197-2-arnd@kernel.org> X-Mailer: git-send-email 2.29.2 In-Reply-To: <20211207150927.3042197-1-arnd@kernel.org> References: <20211207150927.3042197-1-arnd@kernel.org> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-kbuild@vger.kernel.org From: Arnd Bergmann There are a couple types for atomics, uid, timespec, and isolate_mode_t that are used in other central header files like linux/fs.h to define larger data structures. The headers that traditionally define these in turn include other headers recursively, which adds considerable bloat to the preprocessed source files and requires rebuilding large parts of the kernel for any change to indirectly included headers. Moving these into linux/types.h means we can build on top of the structure with a much smaller set of indirect includes. Signed-off-by: Arnd Bergmann --- arch/arc/include/asm/atomic64-arcv2.h | 4 -- arch/arm/include/asm/atomic.h | 4 -- arch/x86/include/asm/atomic64_32.h | 4 -- include/asm-generic/atomic64.h | 4 -- include/linux/atomic/atomic-long.h | 4 +- include/linux/ktime.h | 3 - include/linux/list_bl.h | 7 --- include/linux/list_lru.h | 15 ----- include/linux/llist.h | 8 --- include/linux/mmzone.h | 3 - include/linux/plist.h | 10 --- include/linux/time64.h | 13 ---- include/linux/types.h | 90 ++++++++++++++++++++++++++- include/linux/uidgid.h | 9 --- include/linux/uuid.h | 6 -- 15 files changed, 89 insertions(+), 95 deletions(-) diff --git a/arch/arc/include/asm/atomic64-arcv2.h b/arch/arc/include/asm/atomic64-arcv2.h index c5a8010fdc97..25f7ac390e57 100644 --- a/arch/arc/include/asm/atomic64-arcv2.h +++ b/arch/arc/include/asm/atomic64-arcv2.h @@ -8,10 +8,6 @@ #ifndef _ASM_ARC_ATOMIC64_ARCV2_H #define _ASM_ARC_ATOMIC64_ARCV2_H -typedef struct { - s64 __aligned(8) counter; -} atomic64_t; - #define ATOMIC64_INIT(a) { (a) } static inline s64 arch_atomic64_read(const atomic64_t *v) diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h index db8512d9a918..df41a46a46e7 100644 --- a/arch/arm/include/asm/atomic.h +++ b/arch/arm/include/asm/atomic.h @@ -243,10 +243,6 @@ ATOMIC_OPS(xor, ^=, eor) #define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new)) #ifndef CONFIG_GENERIC_ATOMIC64 -typedef struct { - s64 counter; -} atomic64_t; - #define ATOMIC64_INIT(i) { (i) } #ifdef CONFIG_ARM_LPAE diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h index 5efd01b548d1..c74f58a719da 100644 --- a/arch/x86/include/asm/atomic64_32.h +++ b/arch/x86/include/asm/atomic64_32.h @@ -8,10 +8,6 @@ /* An 64bit atomic type */ -typedef struct { - s64 __aligned(8) counter; -} atomic64_t; - #define ATOMIC64_INIT(val) { (val) } #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...) diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h index 100d24b02e52..a824626346a3 100644 --- a/include/asm-generic/atomic64.h +++ b/include/asm-generic/atomic64.h @@ -9,10 +9,6 @@ #define _ASM_GENERIC_ATOMIC64_H #include -typedef struct { - s64 counter; -} atomic64_t; - #define ATOMIC64_INIT(i) { (i) } extern s64 generic_atomic64_read(const atomic64_t *v); diff --git a/include/linux/atomic/atomic-long.h b/include/linux/atomic/atomic-long.h index 800b8c35992d..82a6971600c3 100644 --- a/include/linux/atomic/atomic-long.h +++ b/include/linux/atomic/atomic-long.h @@ -7,15 +7,13 @@ #define _LINUX_ATOMIC_LONG_H #include -#include +#include #ifdef CONFIG_64BIT -typedef atomic64_t atomic_long_t; #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i) #define atomic_long_cond_read_acquire atomic64_cond_read_acquire #define atomic_long_cond_read_relaxed atomic64_cond_read_relaxed #else -typedef atomic_t atomic_long_t; #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i) #define atomic_long_cond_read_acquire atomic_cond_read_acquire #define atomic_long_cond_read_relaxed atomic_cond_read_relaxed diff --git a/include/linux/ktime.h b/include/linux/ktime.h index 73f20deb497d..4bd945df3446 100644 --- a/include/linux/ktime.h +++ b/include/linux/ktime.h @@ -25,9 +25,6 @@ #include #include -/* Nanosecond scalar representation for kernel time values */ -typedef s64 ktime_t; - /** * ktime_set - Set a ktime_t variable from a seconds/nanoseconds value * @secs: seconds to set diff --git a/include/linux/list_bl.h b/include/linux/list_bl.h index ae1b541446c9..d3d6038d748b 100644 --- a/include/linux/list_bl.h +++ b/include/linux/list_bl.h @@ -31,13 +31,6 @@ #endif -struct hlist_bl_head { - struct hlist_bl_node *first; -}; - -struct hlist_bl_node { - struct hlist_bl_node *next, **pprev; -}; #define INIT_HLIST_BL_HEAD(ptr) \ ((ptr)->first = NULL) diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h index 1b5fceb565df..a2d175f0c3d5 100644 --- a/include/linux/list_lru.h +++ b/include/linux/list_lru.h @@ -25,12 +25,6 @@ enum lru_status { internally, but has to return locked. */ }; -struct list_lru_one { - struct list_head list; - /* may become negative during memcg reparenting */ - long nr_items; -}; - struct list_lru_memcg { struct rcu_head rcu; /* array of per cgroup lists, indexed by memcg_cache_id */ @@ -49,15 +43,6 @@ struct list_lru_node { long nr_items; } ____cacheline_aligned_in_smp; -struct list_lru { - struct list_lru_node *node; -#ifdef CONFIG_MEMCG_KMEM - struct list_head list; - int shrinker_id; - bool memcg_aware; -#endif -}; - void list_lru_destroy(struct list_lru *lru); int __list_lru_init(struct list_lru *lru, bool memcg_aware, struct lock_class_key *key, struct shrinker *shrinker); diff --git a/include/linux/llist.h b/include/linux/llist.h index 85bda2d02d65..99cc3c30f79c 100644 --- a/include/linux/llist.h +++ b/include/linux/llist.h @@ -53,14 +53,6 @@ #include #include -struct llist_head { - struct llist_node *first; -}; - -struct llist_node { - struct llist_node *next; -}; - #define LLIST_HEAD_INIT(name) { NULL } #define LLIST_HEAD(name) struct llist_head name = LLIST_HEAD_INIT(name) diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 58e744b78c2c..852fb61a0817 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -331,9 +331,6 @@ struct lruvec { /* Isolate unevictable pages */ #define ISOLATE_UNEVICTABLE ((__force isolate_mode_t)0x8) -/* LRU Isolation modes. */ -typedef unsigned __bitwise isolate_mode_t; - enum zone_watermarks { WMARK_MIN, WMARK_LOW, diff --git a/include/linux/plist.h b/include/linux/plist.h index 0f352c1d3c80..7e236c166191 100644 --- a/include/linux/plist.h +++ b/include/linux/plist.h @@ -79,16 +79,6 @@ #include -struct plist_head { - struct list_head node_list; -}; - -struct plist_node { - int prio; - struct list_head prio_list; - struct list_head node_list; -}; - /** * PLIST_HEAD_INIT - static struct plist_head initializer * @head: struct plist_head variable name diff --git a/include/linux/time64.h b/include/linux/time64.h index 81b9686a2079..7e323c97ca6c 100644 --- a/include/linux/time64.h +++ b/include/linux/time64.h @@ -5,21 +5,8 @@ #include #include -typedef __s64 time64_t; -typedef __u64 timeu64_t; - #include -struct timespec64 { - time64_t tv_sec; /* seconds */ - long tv_nsec; /* nanoseconds */ -}; - -struct itimerspec64 { - struct timespec64 it_interval; - struct timespec64 it_value; -}; - /* Located here for timespec[64]_valid_strict */ #define TIME64_MAX ((s64)~((u64)1 << 63)) #define TIME64_MIN (-TIME64_MAX - 1) diff --git a/include/linux/types.h b/include/linux/types.h index ac825ad90e44..390492c2a8a2 100644 --- a/include/linux/types.h +++ b/include/linux/types.h @@ -46,6 +46,14 @@ typedef __kernel_old_gid_t old_gid_t; typedef __kernel_loff_t loff_t; #endif +typedef struct { + uid_t val; +} kuid_t; + +typedef struct { + gid_t val; +} kgid_t; + /* * The following typedefs are also protected by individual ifdefs for * historical reasons: @@ -169,10 +177,14 @@ typedef struct { #define ATOMIC_INIT(i) { (i) } -#ifdef CONFIG_64BIT typedef struct { - s64 counter; + s64 __aligned(8) counter; } atomic64_t; + +#ifdef CONFIG_64BIT +typedef atomic64_t atomic_long_t; +#else +typedef atomic_t atomic_long_t; #endif struct list_head { @@ -187,6 +199,47 @@ struct hlist_node { struct hlist_node *next, **pprev; }; +struct hlist_bl_head { + struct hlist_bl_node *first; +}; + +struct hlist_bl_node { + struct hlist_bl_node *next, **pprev; +}; + +struct llist_head { + struct llist_node *first; +}; + +struct llist_node { + struct llist_node *next; +}; + +struct list_lru_one { + struct list_head list; + /* may become negative during memcg reparenting */ + long nr_items; +}; + +struct list_lru { + struct list_lru_node *node; +#ifdef CONFIG_MEMCG_KMEM + struct list_head list; + int shrinker_id; + bool memcg_aware; +#endif +}; + +struct plist_head { + struct list_head node_list; +}; + +struct plist_node { + int prio; + struct list_head prio_list; + struct list_head node_list; +}; + struct ustat { __kernel_daddr_t f_tfree; #ifdef CONFIG_ARCH_32BIT_USTAT_F_TINODE @@ -231,5 +284,38 @@ typedef void (*swap_func_t)(void *a, void *b, int size); typedef int (*cmp_r_func_t)(const void *a, const void *b, const void *priv); typedef int (*cmp_func_t)(const void *a, const void *b); +#define UUID_SIZE 16 +typedef struct { + __u8 b[UUID_SIZE]; +} uuid_t; + +/* LRU Isolation modes. */ +typedef unsigned __bitwise isolate_mode_t; + +enum pid_type +{ + PIDTYPE_PID, + PIDTYPE_TGID, + PIDTYPE_PGID, + PIDTYPE_SID, + PIDTYPE_MAX, +}; + +typedef __s64 time64_t; +typedef __u64 timeu64_t; + +struct timespec64 { + time64_t tv_sec; /* seconds */ + long tv_nsec; /* nanoseconds */ +}; + +struct itimerspec64 { + struct timespec64 it_interval; + struct timespec64 it_value; +}; + +/* Nanosecond scalar representation for kernel time values */ +typedef s64 ktime_t; + #endif /* __ASSEMBLY__ */ #endif /* _LINUX_TYPES_H */ diff --git a/include/linux/uidgid.h b/include/linux/uidgid.h index b0542cd11aeb..1f6e849173be 100644 --- a/include/linux/uidgid.h +++ b/include/linux/uidgid.h @@ -18,15 +18,6 @@ struct user_namespace; extern struct user_namespace init_user_ns; -typedef struct { - uid_t val; -} kuid_t; - - -typedef struct { - gid_t val; -} kgid_t; - #define KUIDT_INIT(value) (kuid_t){ value } #define KGIDT_INIT(value) (kgid_t){ value } diff --git a/include/linux/uuid.h b/include/linux/uuid.h index 8cdc0d3567cd..4c078613facd 100644 --- a/include/linux/uuid.h +++ b/include/linux/uuid.h @@ -11,12 +11,6 @@ #include #include -#define UUID_SIZE 16 - -typedef struct { - __u8 b[UUID_SIZE]; -} uuid_t; - #define UUID_INIT(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \ ((uuid_t) \ {{ ((a) >> 24) & 0xff, ((a) >> 16) & 0xff, ((a) >> 8) & 0xff, (a) & 0xff, \ From patchwork Tue Dec 7 15:09:26 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Arnd Bergmann X-Patchwork-Id: 12662059 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 47E93C433EF for ; Tue, 7 Dec 2021 15:10:27 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S238777AbhLGPNz (ORCPT ); Tue, 7 Dec 2021 10:13:55 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:54604 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S238716AbhLGPNh (ORCPT ); Tue, 7 Dec 2021 10:13:37 -0500 Received: from ams.source.kernel.org (ams.source.kernel.org [IPv6:2604:1380:4601:e00::1]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id E19AFC061D72; Tue, 7 Dec 2021 07:10:06 -0800 (PST) Received: from smtp.kernel.org (relay.kernel.org [52.25.139.140]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by ams.source.kernel.org (Postfix) with ESMTPS id 67BF1B817F8; Tue, 7 Dec 2021 15:10:05 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id 1CDE9C341D2; Tue, 7 Dec 2021 15:09:58 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1638889804; bh=FYa5JWso4Ayhhd4BQ27/FnDrynmPXfLG/fOT+65DJ0w=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=I9gzWhvK/FYT2PYEG178pAkC16VSobPxyBczNZxEKldDeSXLSpmwtG0Q+/W2O5InQ P5fSM3kWT2flbgBi41bVhiGaFYzaxvGTXCpSlv7P6TMSgkvl1c/SbPWLm9QGZ0G74Q Vs2QE3X/HMlVXtQLxAsshpJitizMrlz8LN6sSMOzCFs6muyZ4+Js/2gFxiHynHL2Vs eiIoehTvNbno6BwJp8DPCPiiUT5xnVPcgXe1ielq6y9wO/HnNczYnB8/D2lS9OPvd7 FzsXsddPtMiC32sQLlPPYhh/5mEBCwh3tsuJC48oWfiD0DjG4uN4nj77kWLegSz51n drHJxG0SBdpeg== From: Arnd Bergmann To: linux-kernel@vger.kernel.org Cc: Arnd Bergmann , Al Viro , Andrew Morton , Guenter Roeck , Kees Cook , Linus Torvalds , Masahiro Yamada , Matthew Wilcox , Nathan Chancellor , Nick Desaulniers , Stephen Rothwell , kernel test robot , Ingo Molnar , Peter Zijlstra , Will Deacon , Waiman Long , Boqun Feng , Thomas Gleixner , Tejun Heo , kernelci@groups.io, linux-fsdevel@vger.kernel.org, linux-kbuild@vger.kernel.org, llvm@lists.linux.dev Subject: [RFC 2/3] headers: introduce linux/struct_types.h Date: Tue, 7 Dec 2021 16:09:26 +0100 Message-Id: <20211207150927.3042197-3-arnd@kernel.org> X-Mailer: git-send-email 2.29.2 In-Reply-To: <20211207150927.3042197-1-arnd@kernel.org> References: <20211207150927.3042197-1-arnd@kernel.org> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-kbuild@vger.kernel.org From: Arnd Bergmann Deeply nested recursive header includes cause a large compile-time overhead for parsing an excessive amount of macros and inline functions, and they cause rebuilding large parts of the kernel when a minor header gets modified. Most of the indirect header inclusions are done in order to import type definitions for embedding structures within larger structures, but most files that reference the larger structures do not care about the interfaces that operate on the embedded structures. Working towards a cleaner header structure, start by moving the most commonly embedded structures into a single header file that itself has only a minimum set of indirect includes. At this moment, this include structures for - locking - timers - work queues - waitqueues - rcu - xarray - kobject - bio_vec With these, we can build most of the high-level structures (mm, task, device, skbuff, ...) while needing very few other headers, and in particular avoiding the inclusion of large headers like linux/mm.h, linux/sched.h or linux/spinlock.h that are fairly expensive to parse. Once there is consensus on the basic approach, we can start with patches that replace excessive indirect includes from other headers and just use this one instead where possible. There are some tradeoffs to consider regarding how far we take this one, as it is a departure from how we've always done it, but it's also the most promising approach in my mind. I have stopped short of including 'struct device' along with the rest, since that seemed a little too big, but it is also a structure that gets embedded in many places. On the other hand, separatig the xarray, kobject and bio_vec structures from their headers did seem to be clear wins. Alternatively, those could be moved into newly added individual headers. Signed-off-by: Arnd Bergmann --- arch/alpha/include/asm/spinlock_types.h | 2 +- arch/arm/include/asm/spinlock_types.h | 2 +- arch/arm64/include/asm/spinlock_types.h | 2 +- arch/csky/include/asm/spinlock_types.h | 2 +- arch/hexagon/include/asm/spinlock_types.h | 2 +- arch/ia64/include/asm/spinlock_types.h | 2 +- .../include/asm/simple_spinlock_types.h | 2 +- arch/powerpc/include/asm/spinlock_types.h | 2 +- arch/riscv/include/asm/spinlock_types.h | 2 +- arch/s390/include/asm/spinlock_types.h | 2 +- arch/sh/include/asm/spinlock_types.h | 2 +- arch/xtensa/include/asm/spinlock_types.h | 2 +- include/linux/bitops.h | 6 - include/linux/bits.h | 6 + include/linux/bvec.h | 18 - include/linux/completion.h | 17 - include/linux/cpumask.h | 3 - include/linux/hrtimer.h | 32 -- include/linux/kobject.h | 18 - include/linux/kref.h | 4 - include/linux/mutex.h | 51 -- include/linux/osq_lock.h | 8 - include/linux/percpu-rwsem.h | 11 - include/linux/pid.h | 9 - include/linux/rcu_sync.h | 9 - include/linux/rcuwait.h | 12 - include/linux/refcount.h | 12 - include/linux/rtmutex.h | 8 +- include/linux/rwbase_rt.h | 5 - include/linux/rwlock_types.h | 19 - include/linux/rwsem.h | 40 -- include/linux/seqlock.h | 31 -- include/linux/spinlock_types.h | 21 - include/linux/spinlock_types_raw.h | 21 +- include/linux/spinlock_types_up.h | 2 +- include/linux/struct_types.h | 483 ++++++++++++++++++ include/linux/swait.h | 12 - include/linux/timer.h | 16 +- include/linux/timerqueue.h | 12 +- include/linux/wait.h | 29 -- include/linux/workqueue.h | 27 - include/linux/xarray.h | 23 - 42 files changed, 506 insertions(+), 483 deletions(-) create mode 100644 include/linux/struct_types.h diff --git a/arch/alpha/include/asm/spinlock_types.h b/arch/alpha/include/asm/spinlock_types.h index 1d5716bc060b..7cd2515169cf 100644 --- a/arch/alpha/include/asm/spinlock_types.h +++ b/arch/alpha/include/asm/spinlock_types.h @@ -2,7 +2,7 @@ #ifndef _ALPHA_SPINLOCK_TYPES_H #define _ALPHA_SPINLOCK_TYPES_H -#ifndef __LINUX_SPINLOCK_TYPES_H +#ifndef __LINUX_STRUCT_TYPES_H # error "please don't include this file directly" #endif diff --git a/arch/arm/include/asm/spinlock_types.h b/arch/arm/include/asm/spinlock_types.h index 5976958647fe..104ebab73185 100644 --- a/arch/arm/include/asm/spinlock_types.h +++ b/arch/arm/include/asm/spinlock_types.h @@ -2,7 +2,7 @@ #ifndef __ASM_SPINLOCK_TYPES_H #define __ASM_SPINLOCK_TYPES_H -#ifndef __LINUX_SPINLOCK_TYPES_H +#ifndef __LINUX_STRUCT_TYPES_H # error "please don't include this file directly" #endif diff --git a/arch/arm64/include/asm/spinlock_types.h b/arch/arm64/include/asm/spinlock_types.h index 18782f0c4721..f2bba81671e0 100644 --- a/arch/arm64/include/asm/spinlock_types.h +++ b/arch/arm64/include/asm/spinlock_types.h @@ -5,7 +5,7 @@ #ifndef __ASM_SPINLOCK_TYPES_H #define __ASM_SPINLOCK_TYPES_H -#if !defined(__LINUX_SPINLOCK_TYPES_H) && !defined(__ASM_SPINLOCK_H) +#if !defined(__LINUX_STRUCT_TYPES_H) && !defined(__ASM_SPINLOCK_H) # error "please don't include this file directly" #endif diff --git a/arch/csky/include/asm/spinlock_types.h b/arch/csky/include/asm/spinlock_types.h index 8ff0f6ff3a00..bb1fb62426aa 100644 --- a/arch/csky/include/asm/spinlock_types.h +++ b/arch/csky/include/asm/spinlock_types.h @@ -3,7 +3,7 @@ #ifndef __ASM_CSKY_SPINLOCK_TYPES_H #define __ASM_CSKY_SPINLOCK_TYPES_H -#ifndef __LINUX_SPINLOCK_TYPES_H +#ifndef __LINUX_STRUCT_TYPES_H # error "please don't include this file directly" #endif diff --git a/arch/hexagon/include/asm/spinlock_types.h b/arch/hexagon/include/asm/spinlock_types.h index 19d233497ba5..086f15d76833 100644 --- a/arch/hexagon/include/asm/spinlock_types.h +++ b/arch/hexagon/include/asm/spinlock_types.h @@ -8,7 +8,7 @@ #ifndef _ASM_SPINLOCK_TYPES_H #define _ASM_SPINLOCK_TYPES_H -#ifndef __LINUX_SPINLOCK_TYPES_H +#ifndef __LINUX_STRUCT_TYPES_H # error "please don't include this file directly" #endif diff --git a/arch/ia64/include/asm/spinlock_types.h b/arch/ia64/include/asm/spinlock_types.h index 6e345fefcdca..3bbddc8b04d3 100644 --- a/arch/ia64/include/asm/spinlock_types.h +++ b/arch/ia64/include/asm/spinlock_types.h @@ -2,7 +2,7 @@ #ifndef _ASM_IA64_SPINLOCK_TYPES_H #define _ASM_IA64_SPINLOCK_TYPES_H -#ifndef __LINUX_SPINLOCK_TYPES_H +#ifndef __LINUX_STRUCT_TYPES_H # error "please don't include this file directly" #endif diff --git a/arch/powerpc/include/asm/simple_spinlock_types.h b/arch/powerpc/include/asm/simple_spinlock_types.h index 0f3cdd8faa95..4afc9cb919c5 100644 --- a/arch/powerpc/include/asm/simple_spinlock_types.h +++ b/arch/powerpc/include/asm/simple_spinlock_types.h @@ -2,7 +2,7 @@ #ifndef _ASM_POWERPC_SIMPLE_SPINLOCK_TYPES_H #define _ASM_POWERPC_SIMPLE_SPINLOCK_TYPES_H -#ifndef __LINUX_SPINLOCK_TYPES_H +#ifndef __LINUX_STRUCT_TYPES_H # error "please don't include this file directly" #endif diff --git a/arch/powerpc/include/asm/spinlock_types.h b/arch/powerpc/include/asm/spinlock_types.h index c5d742f18021..2264b4047799 100644 --- a/arch/powerpc/include/asm/spinlock_types.h +++ b/arch/powerpc/include/asm/spinlock_types.h @@ -2,7 +2,7 @@ #ifndef _ASM_POWERPC_SPINLOCK_TYPES_H #define _ASM_POWERPC_SPINLOCK_TYPES_H -#ifndef __LINUX_SPINLOCK_TYPES_H +#ifndef __LINUX_STRUCT_TYPES_H # error "please don't include this file directly" #endif diff --git a/arch/riscv/include/asm/spinlock_types.h b/arch/riscv/include/asm/spinlock_types.h index f398e7638dd6..e3c2fec0fa41 100644 --- a/arch/riscv/include/asm/spinlock_types.h +++ b/arch/riscv/include/asm/spinlock_types.h @@ -6,7 +6,7 @@ #ifndef _ASM_RISCV_SPINLOCK_TYPES_H #define _ASM_RISCV_SPINLOCK_TYPES_H -#ifndef __LINUX_SPINLOCK_TYPES_H +#ifndef __LINUX_STRUCT_TYPES_H # error "please don't include this file directly" #endif diff --git a/arch/s390/include/asm/spinlock_types.h b/arch/s390/include/asm/spinlock_types.h index a2bbfd7df85f..1f48a5f0e591 100644 --- a/arch/s390/include/asm/spinlock_types.h +++ b/arch/s390/include/asm/spinlock_types.h @@ -2,7 +2,7 @@ #ifndef __ASM_SPINLOCK_TYPES_H #define __ASM_SPINLOCK_TYPES_H -#ifndef __LINUX_SPINLOCK_TYPES_H +#ifndef __LINUX_STRUCT_TYPES_H # error "please don't include this file directly" #endif diff --git a/arch/sh/include/asm/spinlock_types.h b/arch/sh/include/asm/spinlock_types.h index e82369f286a2..8ec6d822057b 100644 --- a/arch/sh/include/asm/spinlock_types.h +++ b/arch/sh/include/asm/spinlock_types.h @@ -2,7 +2,7 @@ #ifndef __ASM_SH_SPINLOCK_TYPES_H #define __ASM_SH_SPINLOCK_TYPES_H -#ifndef __LINUX_SPINLOCK_TYPES_H +#ifndef __LINUX_STRUCT_TYPES_H # error "please don't include this file directly" #endif diff --git a/arch/xtensa/include/asm/spinlock_types.h b/arch/xtensa/include/asm/spinlock_types.h index 64c9389254f1..33e43b619392 100644 --- a/arch/xtensa/include/asm/spinlock_types.h +++ b/arch/xtensa/include/asm/spinlock_types.h @@ -2,7 +2,7 @@ #ifndef __ASM_SPINLOCK_TYPES_H #define __ASM_SPINLOCK_TYPES_H -#if !defined(__LINUX_SPINLOCK_TYPES_H) && !defined(__ASM_SPINLOCK_H) +#if !defined(__LINUX_STRUCT_TYPES_H) && !defined(__ASM_SPINLOCK_H) # error "please don't include this file directly" #endif diff --git a/include/linux/bitops.h b/include/linux/bitops.h index 7aaed501f768..d7dbc762c45f 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -15,12 +15,6 @@ # define aligned_byte_mask(n) (~0xffUL << (BITS_PER_LONG - 8 - 8*(n))) #endif -#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE) -#define BITS_TO_LONGS(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(long)) -#define BITS_TO_U64(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(u64)) -#define BITS_TO_U32(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(u32)) -#define BITS_TO_BYTES(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(char)) - extern unsigned int __sw_hweight8(unsigned int w); extern unsigned int __sw_hweight16(unsigned int w); extern unsigned int __sw_hweight32(unsigned int w); diff --git a/include/linux/bits.h b/include/linux/bits.h index 87d112650dfb..bae942e9427e 100644 --- a/include/linux/bits.h +++ b/include/linux/bits.h @@ -13,6 +13,12 @@ #define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG) #define BITS_PER_BYTE 8 +#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE) +#define BITS_TO_LONGS(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(long)) +#define BITS_TO_U64(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(u64)) +#define BITS_TO_U32(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(u32)) +#define BITS_TO_BYTES(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(char)) + /* * Create a contiguous bitmask starting at bit position @l and ending at * position @h. For example diff --git a/include/linux/bvec.h b/include/linux/bvec.h index 35c25dff651a..74863e68e115 100644 --- a/include/linux/bvec.h +++ b/include/linux/bvec.h @@ -17,24 +17,6 @@ struct page; -/** - * struct bio_vec - a contiguous range of physical memory addresses - * @bv_page: First page associated with the address range. - * @bv_len: Number of bytes in the address range. - * @bv_offset: Start of the address range relative to the start of @bv_page. - * - * The following holds for a bvec if n * PAGE_SIZE < bv_offset + bv_len: - * - * nth_page(@bv_page, n) == @bv_page + n - * - * This holds because page_is_mergeable() checks the above property. - */ -struct bio_vec { - struct page *bv_page; - unsigned int bv_len; - unsigned int bv_offset; -}; - struct bvec_iter { sector_t bi_sector; /* device address in 512 byte sectors */ diff --git a/include/linux/completion.h b/include/linux/completion.h index 51d9ab079629..29917878e650 100644 --- a/include/linux/completion.h +++ b/include/linux/completion.h @@ -11,23 +11,6 @@ #include -/* - * struct completion - structure used to maintain state for a "completion" - * - * This is the opaque structure used to maintain the state for a "completion". - * Completions currently use a FIFO to queue threads that have to wait for - * the "completion" event. - * - * See also: complete(), wait_for_completion() (and friends _timeout, - * _interruptible, _interruptible_timeout, and _killable), init_completion(), - * reinit_completion(), and macros DECLARE_COMPLETION(), - * DECLARE_COMPLETION_ONSTACK(). - */ -struct completion { - unsigned int done; - struct swait_queue_head wait; -}; - #define init_completion_map(x, m) init_completion(x) static inline void complete_acquire(struct completion *x) {} static inline void complete_release(struct completion *x) {} diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 64dae70d31f5..d0333b7b958e 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -13,9 +13,6 @@ #include #include -/* Don't assign or return these: may not be this big! */ -typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t; - /** * cpumask_bits - get the bits in a cpumask * @maskp: the struct cpumask * diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 0ee140176f10..a60d7eda9709 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -94,38 +94,6 @@ enum hrtimer_restart { #define HRTIMER_STATE_INACTIVE 0x00 #define HRTIMER_STATE_ENQUEUED 0x01 -/** - * struct hrtimer - the basic hrtimer structure - * @node: timerqueue node, which also manages node.expires, - * the absolute expiry time in the hrtimers internal - * representation. The time is related to the clock on - * which the timer is based. Is setup by adding - * slack to the _softexpires value. For non range timers - * identical to _softexpires. - * @_softexpires: the absolute earliest expiry time of the hrtimer. - * The time which was given as expiry time when the timer - * was armed. - * @function: timer expiry callback function - * @base: pointer to the timer base (per cpu and per clock) - * @state: state information (See bit values above) - * @is_rel: Set if the timer was armed relative - * @is_soft: Set if hrtimer will be expired in soft interrupt context. - * @is_hard: Set if hrtimer will be expired in hard interrupt context - * even on RT. - * - * The hrtimer structure must be initialized by hrtimer_init() - */ -struct hrtimer { - struct timerqueue_node node; - ktime_t _softexpires; - enum hrtimer_restart (*function)(struct hrtimer *); - struct hrtimer_clock_base *base; - u8 state; - u8 is_rel; - u8 is_soft; - u8 is_hard; -}; - /** * struct hrtimer_sleeper - simple sleeper structure * @timer: embedded timer structure diff --git a/include/linux/kobject.h b/include/linux/kobject.h index c740062b4b1a..9d33cf8a468f 100644 --- a/include/linux/kobject.h +++ b/include/linux/kobject.h @@ -61,24 +61,6 @@ enum kobject_action { KOBJ_UNBIND, }; -struct kobject { - const char *name; - struct list_head entry; - struct kobject *parent; - struct kset *kset; - struct kobj_type *ktype; - struct kernfs_node *sd; /* sysfs directory entry */ - struct kref kref; -#ifdef CONFIG_DEBUG_KOBJECT_RELEASE - struct delayed_work release; -#endif - unsigned int state_initialized:1; - unsigned int state_in_sysfs:1; - unsigned int state_add_uevent_sent:1; - unsigned int state_remove_uevent_sent:1; - unsigned int uevent_suppress:1; -}; - extern __printf(2, 3) int kobject_set_name(struct kobject *kobj, const char *name, ...); extern __printf(2, 0) diff --git a/include/linux/kref.h b/include/linux/kref.h index d32e21a2538c..d926b2ec2d14 100644 --- a/include/linux/kref.h +++ b/include/linux/kref.h @@ -16,10 +16,6 @@ #include #include -struct kref { - refcount_t refcount; -}; - #define KREF_INIT(n) { .refcount = REFCOUNT_INIT(n), } /** diff --git a/include/linux/mutex.h b/include/linux/mutex.h index 8f226d460f51..e8e0a352128b 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h @@ -31,50 +31,6 @@ #endif #ifndef CONFIG_PREEMPT_RT - -/* - * Simple, straightforward mutexes with strict semantics: - * - * - only one task can hold the mutex at a time - * - only the owner can unlock the mutex - * - multiple unlocks are not permitted - * - recursive locking is not permitted - * - a mutex object must be initialized via the API - * - a mutex object must not be initialized via memset or copying - * - task may not exit with mutex held - * - memory areas where held locks reside must not be freed - * - held mutexes must not be reinitialized - * - mutexes may not be used in hardware or software interrupt - * contexts such as tasklets and timers - * - * These semantics are fully enforced when DEBUG_MUTEXES is - * enabled. Furthermore, besides enforcing the above rules, the mutex - * debugging code also implements a number of additional features - * that make lock debugging easier and faster: - * - * - uses symbolic names of mutexes, whenever they are printed in debug output - * - point-of-acquire tracking, symbolic lookup of function names - * - list of all locks held in the system, printout of them - * - owner tracking - * - detects self-recursing locks and prints out all relevant info - * - detects multi-task circular deadlocks and prints out all affected - * locks and tasks (and only those tasks) - */ -struct mutex { - atomic_long_t owner; - raw_spinlock_t wait_lock; -#ifdef CONFIG_MUTEX_SPIN_ON_OWNER - struct optimistic_spin_queue osq; /* Spinner MCS lock */ -#endif - struct list_head wait_list; -#ifdef CONFIG_DEBUG_MUTEXES - void *magic; -#endif -#ifdef CONFIG_DEBUG_LOCK_ALLOC - struct lockdep_map dep_map; -#endif -}; - #ifdef CONFIG_DEBUG_MUTEXES #define __DEBUG_MUTEX_INITIALIZER(lockname) \ @@ -132,13 +88,6 @@ extern bool mutex_is_locked(struct mutex *lock); */ #include -struct mutex { - struct rt_mutex_base rtmutex; -#ifdef CONFIG_DEBUG_LOCK_ALLOC - struct lockdep_map dep_map; -#endif -}; - #define __MUTEX_INITIALIZER(mutexname) \ { \ .rtmutex = __RT_MUTEX_BASE_INITIALIZER(mutexname.rtmutex) \ diff --git a/include/linux/osq_lock.h b/include/linux/osq_lock.h index 5581dbd3bd34..782136e5fcf6 100644 --- a/include/linux/osq_lock.h +++ b/include/linux/osq_lock.h @@ -12,14 +12,6 @@ struct optimistic_spin_node { int cpu; /* encoded CPU # + 1 value */ }; -struct optimistic_spin_queue { - /* - * Stores an encoded value of the CPU # of the tail node in the queue. - * If the queue is empty, then it's set to OSQ_UNLOCKED_VAL. - */ - atomic_t tail; -}; - #define OSQ_UNLOCKED_VAL (0) /* Init macro and function. */ diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h index 5fda40f97fe9..b9f611997124 100644 --- a/include/linux/percpu-rwsem.h +++ b/include/linux/percpu-rwsem.h @@ -9,17 +9,6 @@ #include #include -struct percpu_rw_semaphore { - struct rcu_sync rss; - unsigned int __percpu *read_count; - struct rcuwait writer; - wait_queue_head_t waiters; - atomic_t block; -#ifdef CONFIG_DEBUG_LOCK_ALLOC - struct lockdep_map dep_map; -#endif -}; - #ifdef CONFIG_DEBUG_LOCK_ALLOC #define __PERCPU_RWSEM_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }, #else diff --git a/include/linux/pid.h b/include/linux/pid.h index 343abf22092e..5ca6e1efaeeb 100644 --- a/include/linux/pid.h +++ b/include/linux/pid.h @@ -6,15 +6,6 @@ #include #include -enum pid_type -{ - PIDTYPE_PID, - PIDTYPE_TGID, - PIDTYPE_PGID, - PIDTYPE_SID, - PIDTYPE_MAX, -}; - /* * What is struct pid? * diff --git a/include/linux/rcu_sync.h b/include/linux/rcu_sync.h index 0027d4c8087c..c4f8a765481e 100644 --- a/include/linux/rcu_sync.h +++ b/include/linux/rcu_sync.h @@ -13,15 +13,6 @@ #include #include -/* Structure to mediate between updaters and fastpath-using readers. */ -struct rcu_sync { - int gp_state; - int gp_count; - wait_queue_head_t gp_wait; - - struct rcu_head cb_head; -}; - /** * rcu_sync_is_idle() - Are readers permitted to use their fastpaths? * @rsp: Pointer to rcu_sync structure to use for synchronization diff --git a/include/linux/rcuwait.h b/include/linux/rcuwait.h index 61c56cca95c4..4e8fdf939dd8 100644 --- a/include/linux/rcuwait.h +++ b/include/linux/rcuwait.h @@ -5,18 +5,6 @@ #include #include -/* - * rcuwait provides a way of blocking and waking up a single - * task in an rcu-safe manner. - * - * The only time @task is non-nil is when a user is blocked (or - * checking if it needs to) on a condition, and reset as soon as we - * know that the condition has succeeded and are awoken. - */ -struct rcuwait { - struct task_struct __rcu *task; -}; - #define __RCUWAIT_INITIALIZER(name) \ { .task = NULL, } diff --git a/include/linux/refcount.h b/include/linux/refcount.h index b8a6e387f8f9..63c7b8c9e4e8 100644 --- a/include/linux/refcount.h +++ b/include/linux/refcount.h @@ -100,18 +100,6 @@ struct mutex; -/** - * typedef refcount_t - variant of atomic_t specialized for reference counts - * @refs: atomic_t counter field - * - * The counter saturates at REFCOUNT_SATURATED and will not move once - * there. This avoids wrapping the counter and causing 'spurious' - * use-after-free bugs. - */ -typedef struct refcount_struct { - atomic_t refs; -} refcount_t; - #define REFCOUNT_INIT(n) { .refs = ATOMIC_INIT(n), } #define REFCOUNT_MAX INT_MAX #define REFCOUNT_SATURATED (INT_MIN / 2) diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h index 9deedfeec2b1..0b071bd6805d 100644 --- a/include/linux/rtmutex.h +++ b/include/linux/rtmutex.h @@ -15,17 +15,11 @@ #include #include -#include +#include #include extern int max_lock_depth; /* for sysctl */ -struct rt_mutex_base { - raw_spinlock_t wait_lock; - struct rb_root_cached waiters; - struct task_struct *owner; -}; - #define __RT_MUTEX_BASE_INITIALIZER(rtbasename) \ { \ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(rtbasename.wait_lock), \ diff --git a/include/linux/rwbase_rt.h b/include/linux/rwbase_rt.h index 1d264dd08625..7a0bec89974e 100644 --- a/include/linux/rwbase_rt.h +++ b/include/linux/rwbase_rt.h @@ -8,11 +8,6 @@ #define READER_BIAS (1U << 31) #define WRITER_BIAS (1U << 30) -struct rwbase_rt { - atomic_t readers; - struct rt_mutex_base rtmutex; -}; - #define __RWBASE_INITIALIZER(name) \ { \ .readers = ATOMIC_INIT(READER_BIAS), \ diff --git a/include/linux/rwlock_types.h b/include/linux/rwlock_types.h index 1948442e7750..8675a7b10688 100644 --- a/include/linux/rwlock_types.h +++ b/include/linux/rwlock_types.h @@ -22,17 +22,6 @@ * portions Copyright 2005, Red Hat, Inc., Ingo Molnar * Released under the General Public License (GPL). */ -typedef struct { - arch_rwlock_t raw_lock; -#ifdef CONFIG_DEBUG_SPINLOCK - unsigned int magic, owner_cpu; - void *owner; -#endif -#ifdef CONFIG_DEBUG_LOCK_ALLOC - struct lockdep_map dep_map; -#endif -} rwlock_t; - #define RWLOCK_MAGIC 0xdeaf1eed #ifdef CONFIG_DEBUG_SPINLOCK @@ -54,14 +43,6 @@ typedef struct { #include -typedef struct { - struct rwbase_rt rwbase; - atomic_t readers; -#ifdef CONFIG_DEBUG_LOCK_ALLOC - struct lockdep_map dep_map; -#endif -} rwlock_t; - #define __RWLOCK_RT_INITIALIZER(name) \ { \ .rwbase = __RWBASE_INITIALIZER(name), \ diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index f9348769e558..462cda939c4e 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h @@ -32,39 +32,6 @@ #include #endif -/* - * For an uncontended rwsem, count and owner are the only fields a task - * needs to touch when acquiring the rwsem. So they are put next to each - * other to increase the chance that they will share the same cacheline. - * - * In a contended rwsem, the owner is likely the most frequently accessed - * field in the structure as the optimistic waiter that holds the osq lock - * will spin on owner. For an embedded rwsem, other hot fields in the - * containing structure should be moved further away from the rwsem to - * reduce the chance that they will share the same cacheline causing - * cacheline bouncing problem. - */ -struct rw_semaphore { - atomic_long_t count; - /* - * Write owner or one of the read owners as well flags regarding - * the current state of the rwsem. Can be used as a speculative - * check to see if the write owner is running on the cpu. - */ - atomic_long_t owner; -#ifdef CONFIG_RWSEM_SPIN_ON_OWNER - struct optimistic_spin_queue osq; /* spinner MCS lock */ -#endif - raw_spinlock_t wait_lock; - struct list_head wait_list; -#ifdef CONFIG_DEBUG_RWSEMS - void *magic; -#endif -#ifdef CONFIG_DEBUG_LOCK_ALLOC - struct lockdep_map dep_map; -#endif -}; - /* In all implementations count != 0 means locked */ static inline int rwsem_is_locked(struct rw_semaphore *sem) { @@ -125,13 +92,6 @@ static inline int rwsem_is_contended(struct rw_semaphore *sem) #include -struct rw_semaphore { - struct rwbase_rt rwbase; -#ifdef CONFIG_DEBUG_LOCK_ALLOC - struct lockdep_map dep_map; -#endif -}; - #define __RWSEM_INITIALIZER(name) \ { \ .rwbase = __RWBASE_INITIALIZER(name), \ diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index 37ded6b8fee6..622cba2a45ff 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -38,37 +38,6 @@ */ #define KCSAN_SEQLOCK_REGION_MAX 1000 -/* - * Sequence counters (seqcount_t) - * - * This is the raw counting mechanism, without any writer protection. - * - * Write side critical sections must be serialized and non-preemptible. - * - * If readers can be invoked from hardirq or softirq contexts, - * interrupts or bottom halves must also be respectively disabled before - * entering the write section. - * - * This mechanism can't be used if the protected data contains pointers, - * as the writer can invalidate a pointer that a reader is following. - * - * If the write serialization mechanism is one of the common kernel - * locking primitives, use a sequence counter with associated lock - * (seqcount_LOCKNAME_t) instead. - * - * If it's desired to automatically handle the sequence counter writer - * serialization and non-preemptibility requirements, use a sequential - * lock (seqlock_t) instead. - * - * See Documentation/locking/seqlock.rst - */ -typedef struct seqcount { - unsigned sequence; -#ifdef CONFIG_DEBUG_LOCK_ALLOC - struct lockdep_map dep_map; -#endif -} seqcount_t; - static inline void __seqcount_init(seqcount_t *s, const char *name, struct lock_class_key *key) { diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h index 2dfa35ffec76..c1f481d334ae 100644 --- a/include/linux/spinlock_types.h +++ b/include/linux/spinlock_types.h @@ -14,20 +14,6 @@ #ifndef CONFIG_PREEMPT_RT /* Non PREEMPT_RT kernels map spinlock to raw_spinlock */ -typedef struct spinlock { - union { - struct raw_spinlock rlock; - -#ifdef CONFIG_DEBUG_LOCK_ALLOC -# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map)) - struct { - u8 __padding[LOCK_PADSIZE]; - struct lockdep_map dep_map; - }; -#endif - }; -} spinlock_t; - #define ___SPIN_LOCK_INITIALIZER(lockname) \ { \ .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \ @@ -47,13 +33,6 @@ typedef struct spinlock { /* PREEMPT_RT kernels map spinlock to rt_mutex */ #include -typedef struct spinlock { - struct rt_mutex_base lock; -#ifdef CONFIG_DEBUG_LOCK_ALLOC - struct lockdep_map dep_map; -#endif -} spinlock_t; - #define __SPIN_LOCK_UNLOCKED(name) \ { \ .lock = __RT_MUTEX_BASE_INITIALIZER(name.lock), \ diff --git a/include/linux/spinlock_types_raw.h b/include/linux/spinlock_types_raw.h index 91cb36b65a17..f6a1737f2ab8 100644 --- a/include/linux/spinlock_types_raw.h +++ b/include/linux/spinlock_types_raw.h @@ -1,26 +1,7 @@ #ifndef __LINUX_SPINLOCK_TYPES_RAW_H #define __LINUX_SPINLOCK_TYPES_RAW_H -#include - -#if defined(CONFIG_SMP) -# include -#else -# include -#endif - -#include - -typedef struct raw_spinlock { - arch_spinlock_t raw_lock; -#ifdef CONFIG_DEBUG_SPINLOCK - unsigned int magic, owner_cpu; - void *owner; -#endif -#ifdef CONFIG_DEBUG_LOCK_ALLOC - struct lockdep_map dep_map; -#endif -} raw_spinlock_t; +#include #define SPINLOCK_MAGIC 0xdead4ead diff --git a/include/linux/spinlock_types_up.h b/include/linux/spinlock_types_up.h index c09b6407ae1b..5b0cf1d86105 100644 --- a/include/linux/spinlock_types_up.h +++ b/include/linux/spinlock_types_up.h @@ -1,7 +1,7 @@ #ifndef __LINUX_SPINLOCK_TYPES_UP_H #define __LINUX_SPINLOCK_TYPES_UP_H -#ifndef __LINUX_SPINLOCK_TYPES_H +#ifndef __LINUX_STRUCT_TYPES_H # error "please don't include this file directly" #endif diff --git a/include/linux/struct_types.h b/include/linux/struct_types.h new file mode 100644 index 000000000000..5a06849fd347 --- /dev/null +++ b/include/linux/struct_types.h @@ -0,0 +1,483 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_STRUCT_TYPES_H +#define __LINUX_STRUCT_TYPES_H +/* + * This header includes data structures that build on top of + * the plain types from linux/types.h and that are commonly + * embedded within other structures in the kernel. + * + * By keeping these in one place that has a minimum set of + * indirect includes, we can avoid deeply nested include + * hierarchies that slow down the build and cause frequent + * recompiles after header changes. + * + * Be careful about including further headers here. + */ + +#include +#include +#include +#include +#include + +#if defined(CONFIG_SMP) +# include +#else +# include +#endif + +/** + * typedef refcount_t - variant of atomic_t specialized for reference counts + * @refs: atomic_t counter field + * + * The counter saturates at REFCOUNT_SATURATED and will not move once + * there. This avoids wrapping the counter and causing 'spurious' + * use-after-free bugs. + */ +typedef struct refcount_struct { + atomic_t refs; +} refcount_t; + +struct kref { + refcount_t refcount; +}; + +typedef struct raw_spinlock { + arch_spinlock_t raw_lock; +#ifdef CONFIG_DEBUG_SPINLOCK + unsigned int magic, owner_cpu; + void *owner; +#endif +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +} raw_spinlock_t; + +struct task_struct; +struct rt_mutex_base { + raw_spinlock_t wait_lock; + struct rb_root_cached waiters; + struct task_struct *owner; +}; + +#ifndef CONFIG_PREEMPT_RT +/* Non PREEMPT_RT kernels map spinlock to raw_spinlock */ +typedef struct spinlock { + union { + struct raw_spinlock rlock; + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map)) + struct { + u8 __padding[LOCK_PADSIZE]; + struct lockdep_map dep_map; + }; +#endif + }; +} spinlock_t; +#else +/* PREEMPT_RT kernels map spinlock to rt_mutex */ +typedef struct spinlock { + struct rt_mutex_base lock; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +} spinlock_t; +#endif + +#ifndef CONFIG_PREEMPT_RT +typedef struct { + arch_rwlock_t raw_lock; +#ifdef CONFIG_DEBUG_SPINLOCK + unsigned int magic, owner_cpu; + void *owner; +#endif +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +} rwlock_t; +#else +struct rwbase_rt { + atomic_t readers; + struct rt_mutex_base rtmutex; +}; + +typedef struct { + struct rwbase_rt rwbase; + atomic_t readers; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +} rwlock_t; +#endif + +struct optimistic_spin_queue { + /* + * Stores an encoded value of the CPU # of the tail node in the queue. + * If the queue is empty, then it's set to OSQ_UNLOCKED_VAL. + */ + atomic_t tail; +}; + +#ifndef CONFIG_PREEMPT_RT +/* + * Simple, straightforward mutexes with strict semantics: + * + * - only one task can hold the mutex at a time + * - only the owner can unlock the mutex + * - multiple unlocks are not permitted + * - recursive locking is not permitted + * - a mutex object must be initialized via the API + * - a mutex object must not be initialized via memset or copying + * - task may not exit with mutex held + * - memory areas where held locks reside must not be freed + * - held mutexes must not be reinitialized + * - mutexes may not be used in hardware or software interrupt + * contexts such as tasklets and timers + * + * These semantics are fully enforced when DEBUG_MUTEXES is + * enabled. Furthermore, besides enforcing the above rules, the mutex + * debugging code also implements a number of additional features + * that make lock debugging easier and faster: + * + * - uses symbolic names of mutexes, whenever they are printed in debug output + * - point-of-acquire tracking, symbolic lookup of function names + * - list of all locks held in the system, printout of them + * - owner tracking + * - detects self-recursing locks and prints out all relevant info + * - detects multi-task circular deadlocks and prints out all affected + * locks and tasks (and only those tasks) + */ +struct mutex { + atomic_long_t owner; + raw_spinlock_t wait_lock; +#ifdef CONFIG_MUTEX_SPIN_ON_OWNER + struct optimistic_spin_queue osq; /* Spinner MCS lock */ +#endif + struct list_head wait_list; +#ifdef CONFIG_DEBUG_MUTEXES + void *magic; +#endif +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +}; +#else +struct mutex { + struct rt_mutex_base rtmutex; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +}; + +#endif + +/* + * Sequence counters (seqcount_t) + * + * This is the raw counting mechanism, without any writer protection. + * + * Write side critical sections must be serialized and non-preemptible. + * + * If readers can be invoked from hardirq or softirq contexts, + * interrupts or bottom halves must also be respectively disabled before + * entering the write section. + * + * This mechanism can't be used if the protected data contains pointers, + * as the writer can invalidate a pointer that a reader is following. + * + * If the write serialization mechanism is one of the common kernel + * locking primitives, use a sequence counter with associated lock + * (seqcount_LOCKNAME_t) instead. + * + * If it's desired to automatically handle the sequence counter writer + * serialization and non-preemptibility requirements, use a sequential + * lock (seqlock_t) instead. + * + * See Documentation/locking/seqlock.rst + */ +typedef struct seqcount { + unsigned sequence; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +} seqcount_t; + +typedef struct wait_queue_entry wait_queue_entry_t; + +typedef int (*wait_queue_func_t)(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key); +int default_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key); + +/* wait_queue_entry::flags */ +#define WQ_FLAG_EXCLUSIVE 0x01 +#define WQ_FLAG_WOKEN 0x02 +#define WQ_FLAG_BOOKMARK 0x04 +#define WQ_FLAG_CUSTOM 0x08 +#define WQ_FLAG_DONE 0x10 +#define WQ_FLAG_PRIORITY 0x20 + +/* + * A single wait-queue entry structure: + */ +struct wait_queue_entry { + unsigned int flags; + void *private; + wait_queue_func_t func; + struct list_head entry; +}; + +struct wait_queue_head { + spinlock_t lock; + struct list_head head; +}; +typedef struct wait_queue_head wait_queue_head_t; + +struct swait_queue_head { + raw_spinlock_t lock; + struct list_head task_list; +}; + +struct swait_queue { + struct task_struct *task; + struct list_head task_list; +}; + +struct timer_list { + /* + * All fields that change during normal runtime grouped to the + * same cacheline + */ + struct hlist_node entry; + unsigned long expires; + void (*function)(struct timer_list *); + u32 flags; + +#ifdef CONFIG_LOCKDEP + struct lockdep_map lockdep_map; +#endif +}; + +struct timerqueue_node { + struct rb_node node; + ktime_t expires; +}; + +struct timerqueue_head { + struct rb_root_cached rb_root; +}; + +/** + * struct hrtimer - the basic hrtimer structure + * @node: timerqueue node, which also manages node.expires, + * the absolute expiry time in the hrtimers internal + * representation. The time is related to the clock on + * which the timer is based. Is setup by adding + * slack to the _softexpires value. For non range timers + * identical to _softexpires. + * @_softexpires: the absolute earliest expiry time of the hrtimer. + * The time which was given as expiry time when the timer + * was armed. + * @function: timer expiry callback function + * @base: pointer to the timer base (per cpu and per clock) + * @state: state information (See bit values above) + * @is_rel: Set if the timer was armed relative + * @is_soft: Set if hrtimer will be expired in soft interrupt context. + * @is_hard: Set if hrtimer will be expired in hard interrupt context + * even on RT. + * + * The hrtimer structure must be initialized by hrtimer_init() + */ +struct hrtimer { + struct timerqueue_node node; + ktime_t _softexpires; + enum hrtimer_restart (*function)(struct hrtimer *); + struct hrtimer_clock_base *base; + u8 state; + u8 is_rel; + u8 is_soft; + u8 is_hard; +}; + + +/* + * struct completion - structure used to maintain state for a "completion" + * + * This is the opaque structure used to maintain the state for a "completion". + * Completions currently use a FIFO to queue threads that have to wait for + * the "completion" event. + * + * See also: complete(), wait_for_completion() (and friends _timeout, + * _interruptible, _interruptible_timeout, and _killable), init_completion(), + * reinit_completion(), and macros DECLARE_COMPLETION(), + * DECLARE_COMPLETION_ONSTACK(). + */ +struct completion { + unsigned int done; + struct swait_queue_head wait; +}; + +struct work_struct; +typedef void (*work_func_t)(struct work_struct *work); +struct work_struct { + atomic_long_t data; + struct list_head entry; + work_func_t func; +#ifdef CONFIG_LOCKDEP + struct lockdep_map lockdep_map; +#endif +}; + +struct delayed_work { + struct work_struct work; + struct timer_list timer; + + /* target workqueue and CPU ->timer uses to queue ->work */ + struct workqueue_struct *wq; + int cpu; +}; + +struct rcu_work { + struct work_struct work; + struct rcu_head rcu; + + /* target workqueue ->rcu uses to queue ->work */ + struct workqueue_struct *wq; +}; + +/* Structure to mediate between updaters and fastpath-using readers. */ +struct rcu_sync { + int gp_state; + int gp_count; + wait_queue_head_t gp_wait; + struct rcu_head cb_head; +}; + +/* + * rcuwait provides a way of blocking and waking up a single + * task in an rcu-safe manner. + * + * The only time @task is non-nil is when a user is blocked (or + * checking if it needs to) on a condition, and reset as soon as we + * know that the condition has succeeded and are awoken. + */ +struct rcuwait { + struct task_struct __rcu *task; +}; + +#ifndef CONFIG_PREEMPT_RT +/* + * For an uncontended rwsem, count and owner are the only fields a task + * needs to touch when acquiring the rwsem. So they are put next to each + * other to increase the chance that they will share the same cacheline. + * + * In a contended rwsem, the owner is likely the most frequently accessed + * field in the structure as the optimistic waiter that holds the osq lock + * will spin on owner. For an embedded rwsem, other hot fields in the + * containing structure should be moved further away from the rwsem to + * reduce the chance that they will share the same cacheline causing + * cacheline bouncing problem. + */ +struct rw_semaphore { + atomic_long_t count; + /* + * Write owner or one of the read owners as well flags regarding + * the current state of the rwsem. Can be used as a speculative + * check to see if the write owner is running on the cpu. + */ + atomic_long_t owner; +#ifdef CONFIG_RWSEM_SPIN_ON_OWNER + struct optimistic_spin_queue osq; /* spinner MCS lock */ +#endif + raw_spinlock_t wait_lock; + struct list_head wait_list; +#ifdef CONFIG_DEBUG_RWSEMS + void *magic; +#endif +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +}; +#else +struct rw_semaphore { + struct rwbase_rt rwbase; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +}; +#endif + +struct percpu_rw_semaphore { + struct rcu_sync rss; + unsigned int __percpu *read_count; + struct rcuwait writer; + wait_queue_head_t waiters; + atomic_t block; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +}; + +/** + * struct xarray - The anchor of the XArray. + * @xa_lock: Lock that protects the contents of the XArray. + * + * To use the xarray, define it statically or embed it in your data structure. + * It is a very small data structure, so it does not usually make sense to + * allocate it separately and keep a pointer to it in your data structure. + * + * You may use the xa_lock to protect your own data structures as well. + */ +/* + * If all of the entries in the array are NULL, @xa_head is a NULL pointer. + * If the only non-NULL entry in the array is at index 0, @xa_head is that + * entry. If any other entry in the array is non-NULL, @xa_head points + * to an @xa_node. + */ +struct xarray { + spinlock_t xa_lock; +/* private: The rest of the data structure is not to be used directly. */ + gfp_t xa_flags; + void __rcu * xa_head; +}; + +/** + * struct bio_vec - a contiguous range of physical memory addresses + * @bv_page: First page associated with the address range. + * @bv_len: Number of bytes in the address range. + * @bv_offset: Start of the address range relative to the start of @bv_page. + * + * The following holds for a bvec if n * PAGE_SIZE < bv_offset + bv_len: + * + * nth_page(@bv_page, n) == @bv_page + n + * + * This holds because page_is_mergeable() checks the above property. + */ +struct bio_vec { + struct page *bv_page; + unsigned int bv_len; + unsigned int bv_offset; +}; +typedef struct bio_vec skb_frag_t; + +/* Don't assign or return these: may not be this big! */ +typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t; + +struct kobject { + const char *name; + struct list_head entry; + struct kobject *parent; + struct kset *kset; + struct kobj_type *ktype; + struct kernfs_node *sd; /* sysfs directory entry */ + struct kref kref; +#ifdef CONFIG_DEBUG_KOBJECT_RELEASE + struct delayed_work release; +#endif + unsigned int state_initialized:1; + unsigned int state_in_sysfs:1; + unsigned int state_add_uevent_sent:1; + unsigned int state_remove_uevent_sent:1; + unsigned int uevent_suppress:1; +}; + +#endif /* __LINUX_STRUCT_TYPES_H */ diff --git a/include/linux/swait.h b/include/linux/swait.h index 6a8c22b8c2a5..d7798752922d 100644 --- a/include/linux/swait.h +++ b/include/linux/swait.h @@ -38,18 +38,6 @@ * wait queues in most cases. */ -struct task_struct; - -struct swait_queue_head { - raw_spinlock_t lock; - struct list_head task_list; -}; - -struct swait_queue { - struct task_struct *task; - struct list_head task_list; -}; - #define __SWAITQUEUE_INITIALIZER(name) { \ .task = current, \ .task_list = LIST_HEAD_INIT((name).task_list), \ diff --git a/include/linux/timer.h b/include/linux/timer.h index fda13c9d1256..a562663d4947 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h @@ -7,21 +7,7 @@ #include #include #include - -struct timer_list { - /* - * All fields that change during normal runtime grouped to the - * same cacheline - */ - struct hlist_node entry; - unsigned long expires; - void (*function)(struct timer_list *); - u32 flags; - -#ifdef CONFIG_LOCKDEP - struct lockdep_map lockdep_map; -#endif -}; +#include #ifdef CONFIG_LOCKDEP /* diff --git a/include/linux/timerqueue.h b/include/linux/timerqueue.h index 93884086f392..d2a4e03ec3ec 100644 --- a/include/linux/timerqueue.h +++ b/include/linux/timerqueue.h @@ -4,17 +4,7 @@ #include #include - - -struct timerqueue_node { - struct rb_node node; - ktime_t expires; -}; - -struct timerqueue_head { - struct rb_root_cached rb_root; -}; - +#include extern bool timerqueue_add(struct timerqueue_head *head, struct timerqueue_node *node); diff --git a/include/linux/wait.h b/include/linux/wait.h index 2d0df57c9902..020bc1cf89a5 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -11,35 +11,6 @@ #include #include -typedef struct wait_queue_entry wait_queue_entry_t; - -typedef int (*wait_queue_func_t)(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key); -int default_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key); - -/* wait_queue_entry::flags */ -#define WQ_FLAG_EXCLUSIVE 0x01 -#define WQ_FLAG_WOKEN 0x02 -#define WQ_FLAG_BOOKMARK 0x04 -#define WQ_FLAG_CUSTOM 0x08 -#define WQ_FLAG_DONE 0x10 -#define WQ_FLAG_PRIORITY 0x20 - -/* - * A single wait-queue entry structure: - */ -struct wait_queue_entry { - unsigned int flags; - void *private; - wait_queue_func_t func; - struct list_head entry; -}; - -struct wait_queue_head { - spinlock_t lock; - struct list_head head; -}; -typedef struct wait_queue_head wait_queue_head_t; - struct task_struct; /* diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 7fee9b6cfede..4e4eba8de156 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -18,7 +18,6 @@ struct workqueue_struct; struct work_struct; -typedef void (*work_func_t)(struct work_struct *work); void delayed_work_timer_fn(struct timer_list *t); /* @@ -94,36 +93,10 @@ enum { WORKER_DESC_LEN = 24, }; -struct work_struct { - atomic_long_t data; - struct list_head entry; - work_func_t func; -#ifdef CONFIG_LOCKDEP - struct lockdep_map lockdep_map; -#endif -}; - #define WORK_DATA_INIT() ATOMIC_LONG_INIT((unsigned long)WORK_STRUCT_NO_POOL) #define WORK_DATA_STATIC_INIT() \ ATOMIC_LONG_INIT((unsigned long)(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC)) -struct delayed_work { - struct work_struct work; - struct timer_list timer; - - /* target workqueue and CPU ->timer uses to queue ->work */ - struct workqueue_struct *wq; - int cpu; -}; - -struct rcu_work { - struct work_struct work; - struct rcu_head rcu; - - /* target workqueue ->rcu uses to queue ->work */ - struct workqueue_struct *wq; -}; - /** * struct workqueue_attrs - A struct for workqueue attributes. * diff --git a/include/linux/xarray.h b/include/linux/xarray.h index a91e3d90df8a..4f1e55074ef0 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h @@ -275,29 +275,6 @@ enum xa_lock_type { #define XA_FLAGS_ALLOC (XA_FLAGS_TRACK_FREE | XA_FLAGS_MARK(XA_FREE_MARK)) #define XA_FLAGS_ALLOC1 (XA_FLAGS_TRACK_FREE | XA_FLAGS_ZERO_BUSY) -/** - * struct xarray - The anchor of the XArray. - * @xa_lock: Lock that protects the contents of the XArray. - * - * To use the xarray, define it statically or embed it in your data structure. - * It is a very small data structure, so it does not usually make sense to - * allocate it separately and keep a pointer to it in your data structure. - * - * You may use the xa_lock to protect your own data structures as well. - */ -/* - * If all of the entries in the array are NULL, @xa_head is a NULL pointer. - * If the only non-NULL entry in the array is at index 0, @xa_head is that - * entry. If any other entry in the array is non-NULL, @xa_head points - * to an @xa_node. - */ -struct xarray { - spinlock_t xa_lock; -/* private: The rest of the data structure is not to be used directly. */ - gfp_t xa_flags; - void __rcu * xa_head; -}; - #define XARRAY_INIT(name, flags) { \ .xa_lock = __SPIN_LOCK_UNLOCKED(name.xa_lock), \ .xa_flags = flags, \ From patchwork Tue Dec 7 15:09:27 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Arnd Bergmann X-Patchwork-Id: 12662061 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id E06ECC433F5 for ; Tue, 7 Dec 2021 15:11:00 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S234145AbhLGPOa (ORCPT ); Tue, 7 Dec 2021 10:14:30 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:54548 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S238875AbhLGPNt (ORCPT ); Tue, 7 Dec 2021 10:13:49 -0500 Received: from sin.source.kernel.org (sin.source.kernel.org [IPv6:2604:1380:40e1:4800::1]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id BAE43C0698DE; Tue, 7 Dec 2021 07:10:13 -0800 (PST) Received: from smtp.kernel.org (relay.kernel.org [52.25.139.140]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by sin.source.kernel.org (Postfix) with ESMTPS id 8901ACE1B6F; Tue, 7 Dec 2021 15:10:11 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id B0DD1C341C5; Tue, 7 Dec 2021 15:10:04 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1638889809; bh=2BmJx6yIJdFye4bXEzRWTrrg2Egv3kemAFm7efhWefI=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=LfB8kU+9bb4ZtD/2LZoS7VpFT/QvWzYDEzWAuiYcyaIQWUG45rLXd3X7LpbYxleiA +8QMw2fb9G4em448oNUxy9+bc58rwc8T9XbhB8T/C9Zmfxx9jRZzaTKfgzhyv4H3DK 9JYHWiT469zNVy0ICKqomxBXs8L5/+8bzljVcrpFTgU7IJkqdN3EPnEfOV7sJ3sP0o +J5tzWbBfZ504sE0M5MaJuYi3kyz9AyJOrK4NhNOjC/7nxZYZvoua5vMSkO5cfDGdX c4tyRc0A58dpAJwE8nUDMGc+1rUEfQgexM3lA/Zmt6WTFXkYgEk9t4tV+wUFVv/O8r QasDwKzu95kmQ== From: Arnd Bergmann To: linux-kernel@vger.kernel.org Cc: Arnd Bergmann , Al Viro , Andrew Morton , Guenter Roeck , Kees Cook , Linus Torvalds , Masahiro Yamada , Matthew Wilcox , Nathan Chancellor , Nick Desaulniers , Stephen Rothwell , kernel test robot , Ingo Molnar , Peter Zijlstra , Will Deacon , Waiman Long , Boqun Feng , Thomas Gleixner , Tejun Heo , kernelci@groups.io, linux-fsdevel@vger.kernel.org, linux-kbuild@vger.kernel.org, llvm@lists.linux.dev Subject: [RFC 3/3] headers: repurpose linux/fs_types.h Date: Tue, 7 Dec 2021 16:09:27 +0100 Message-Id: <20211207150927.3042197-4-arnd@kernel.org> X-Mailer: git-send-email 2.29.2 In-Reply-To: <20211207150927.3042197-1-arnd@kernel.org> References: <20211207150927.3042197-1-arnd@kernel.org> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-kbuild@vger.kernel.org From: Arnd Bergmann linux/fs_types.h traditionally describes the types of file systems we deal with, but the file name could also be interpreted to refer to data types used for interacting with file systems, similar to linux/spinlock_types.h or linux/mm_types.h. Splitting out the data type definitions from the generic header helps avoid excessive indirect include hierarchies, so steal this file name and repurpose it to contain the definitions for file, inode, address_space, super_block, file_lock, quota and filename, along with their respective callback operations, moving them out of linux/fs.h. The preprocessed linux/fs_types.h is now about 50KB, compared to over 1MB for the traditional linux/fs.h, and can be included from most other headers that currently rely on type definitions from linux/fs.h. Signed-off-by: Arnd Bergmann --- include/linux/fs.h | 1151 +---------------------------------- include/linux/fs_types.h | 1225 +++++++++++++++++++++++++++++++++++++- include/linux/quota.h | 29 - 3 files changed, 1227 insertions(+), 1178 deletions(-) diff --git a/include/linux/fs.h b/include/linux/fs.h index 9617dea24978..dca9a2c073c5 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2,6 +2,7 @@ #ifndef _LINUX_FS_H #define _LINUX_FS_H +#include #include #include #include @@ -36,7 +37,6 @@ #include #include #include -#include #include #include #include @@ -82,203 +82,11 @@ extern void __init files_maxfiles_init(void); extern unsigned long get_max_files(void); extern unsigned int sysctl_nr_open; -typedef __kernel_rwf_t rwf_t; - -struct buffer_head; -typedef int (get_block_t)(struct inode *inode, sector_t iblock, - struct buffer_head *bh_result, int create); -typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset, - ssize_t bytes, void *private); - -#define MAY_EXEC 0x00000001 -#define MAY_WRITE 0x00000002 -#define MAY_READ 0x00000004 -#define MAY_APPEND 0x00000008 -#define MAY_ACCESS 0x00000010 -#define MAY_OPEN 0x00000020 -#define MAY_CHDIR 0x00000040 -/* called from RCU mode, don't block */ -#define MAY_NOT_BLOCK 0x00000080 - -/* - * flags in file.f_mode. Note that FMODE_READ and FMODE_WRITE must correspond - * to O_WRONLY and O_RDWR via the strange trick in do_dentry_open() - */ - -/* file is open for reading */ -#define FMODE_READ ((__force fmode_t)0x1) -/* file is open for writing */ -#define FMODE_WRITE ((__force fmode_t)0x2) -/* file is seekable */ -#define FMODE_LSEEK ((__force fmode_t)0x4) -/* file can be accessed using pread */ -#define FMODE_PREAD ((__force fmode_t)0x8) -/* file can be accessed using pwrite */ -#define FMODE_PWRITE ((__force fmode_t)0x10) -/* File is opened for execution with sys_execve / sys_uselib */ -#define FMODE_EXEC ((__force fmode_t)0x20) -/* File is opened with O_NDELAY (only set for block devices) */ -#define FMODE_NDELAY ((__force fmode_t)0x40) -/* File is opened with O_EXCL (only set for block devices) */ -#define FMODE_EXCL ((__force fmode_t)0x80) -/* File is opened using open(.., 3, ..) and is writeable only for ioctls - (specialy hack for floppy.c) */ -#define FMODE_WRITE_IOCTL ((__force fmode_t)0x100) -/* 32bit hashes as llseek() offset (for directories) */ -#define FMODE_32BITHASH ((__force fmode_t)0x200) -/* 64bit hashes as llseek() offset (for directories) */ -#define FMODE_64BITHASH ((__force fmode_t)0x400) - -/* - * Don't update ctime and mtime. - * - * Currently a special hack for the XFS open_by_handle ioctl, but we'll - * hopefully graduate it to a proper O_CMTIME flag supported by open(2) soon. - */ -#define FMODE_NOCMTIME ((__force fmode_t)0x800) - -/* Expect random access pattern */ -#define FMODE_RANDOM ((__force fmode_t)0x1000) - -/* File is huge (eg. /dev/mem): treat loff_t as unsigned */ -#define FMODE_UNSIGNED_OFFSET ((__force fmode_t)0x2000) - -/* File is opened with O_PATH; almost nothing can be done with it */ -#define FMODE_PATH ((__force fmode_t)0x4000) - -/* File needs atomic accesses to f_pos */ -#define FMODE_ATOMIC_POS ((__force fmode_t)0x8000) -/* Write access to underlying fs */ -#define FMODE_WRITER ((__force fmode_t)0x10000) -/* Has read method(s) */ -#define FMODE_CAN_READ ((__force fmode_t)0x20000) -/* Has write method(s) */ -#define FMODE_CAN_WRITE ((__force fmode_t)0x40000) - -#define FMODE_OPENED ((__force fmode_t)0x80000) -#define FMODE_CREATED ((__force fmode_t)0x100000) - -/* File is stream-like */ -#define FMODE_STREAM ((__force fmode_t)0x200000) - -/* File was opened by fanotify and shouldn't generate fanotify events */ -#define FMODE_NONOTIFY ((__force fmode_t)0x4000000) - -/* File is capable of returning -EAGAIN if I/O will block */ -#define FMODE_NOWAIT ((__force fmode_t)0x8000000) - -/* File represents mount that needs unmounting */ -#define FMODE_NEED_UNMOUNT ((__force fmode_t)0x10000000) - -/* File does not contribute to nr_files count */ -#define FMODE_NOACCOUNT ((__force fmode_t)0x20000000) - -/* File supports async buffered reads */ -#define FMODE_BUF_RASYNC ((__force fmode_t)0x40000000) - -/* - * Attribute flags. These should be or-ed together to figure out what - * has been changed! - */ -#define ATTR_MODE (1 << 0) -#define ATTR_UID (1 << 1) -#define ATTR_GID (1 << 2) -#define ATTR_SIZE (1 << 3) -#define ATTR_ATIME (1 << 4) -#define ATTR_MTIME (1 << 5) -#define ATTR_CTIME (1 << 6) -#define ATTR_ATIME_SET (1 << 7) -#define ATTR_MTIME_SET (1 << 8) -#define ATTR_FORCE (1 << 9) /* Not a change, but a change it */ -#define ATTR_KILL_SUID (1 << 11) -#define ATTR_KILL_SGID (1 << 12) -#define ATTR_FILE (1 << 13) -#define ATTR_KILL_PRIV (1 << 14) -#define ATTR_OPEN (1 << 15) /* Truncating from open(O_TRUNC) */ -#define ATTR_TIMES_SET (1 << 16) -#define ATTR_TOUCH (1 << 17) - -/* - * Whiteout is represented by a char device. The following constants define the - * mode and device number to use. - */ -#define WHITEOUT_MODE 0 -#define WHITEOUT_DEV 0 - -/* - * This is the Inode Attributes structure, used for notify_change(). It - * uses the above definitions as flags, to know which values have changed. - * Also, in this manner, a Filesystem can look at only the values it cares - * about. Basically, these are the attributes that the VFS layer can - * request to change from the FS layer. - * - * Derek Atkins 94-10-20 - */ -struct iattr { - unsigned int ia_valid; - umode_t ia_mode; - kuid_t ia_uid; - kgid_t ia_gid; - loff_t ia_size; - struct timespec64 ia_atime; - struct timespec64 ia_mtime; - struct timespec64 ia_ctime; - - /* - * Not an attribute, but an auxiliary info for filesystems wanting to - * implement an ftruncate() like method. NOTE: filesystem should - * check for (ia_valid & ATTR_FILE), and not for (ia_file != NULL). - */ - struct file *ia_file; -}; - /* * Includes for diskquotas. */ #include -/* - * Maximum number of layers of fs stack. Needs to be limited to - * prevent kernel stack overflow - */ -#define FILESYSTEM_MAX_STACK_DEPTH 2 - -/** - * enum positive_aop_returns - aop return codes with specific semantics - * - * @AOP_WRITEPAGE_ACTIVATE: Informs the caller that page writeback has - * completed, that the page is still locked, and - * should be considered active. The VM uses this hint - * to return the page to the active list -- it won't - * be a candidate for writeback again in the near - * future. Other callers must be careful to unlock - * the page if they get this return. Returned by - * writepage(); - * - * @AOP_TRUNCATED_PAGE: The AOP method that was handed a locked page has - * unlocked it and the page might have been truncated. - * The caller should back up to acquiring a new page and - * trying again. The aop will be taking reasonable - * precautions not to livelock. If the caller held a page - * reference, it should drop it before retrying. Returned - * by readpage(). - * - * address_space_operation functions return these large constants to indicate - * special semantics to the caller. These are much larger than the bytes in a - * page to allow for functions that return the number of bytes operated on in a - * given page. - */ - -enum positive_aop_returns { - AOP_WRITEPAGE_ACTIVATE = 0x80000, - AOP_TRUNCATED_PAGE = 0x80001, -}; - -#define AOP_FLAG_CONT_EXPAND 0x0001 /* called from cont_expand */ -#define AOP_FLAG_NOFS 0x0002 /* used by filesystem to direct - * helper code (eg buffer layer) - * to clear GFP_FS from alloc */ - /* * oh the beauties of C type declarations. */ @@ -287,130 +95,11 @@ struct address_space; struct writeback_control; struct readahead_control; -/* - * Write life time hint values. - * Stored in struct inode as u8. - */ -enum rw_hint { - WRITE_LIFE_NOT_SET = 0, - WRITE_LIFE_NONE = RWH_WRITE_LIFE_NONE, - WRITE_LIFE_SHORT = RWH_WRITE_LIFE_SHORT, - WRITE_LIFE_MEDIUM = RWH_WRITE_LIFE_MEDIUM, - WRITE_LIFE_LONG = RWH_WRITE_LIFE_LONG, - WRITE_LIFE_EXTREME = RWH_WRITE_LIFE_EXTREME, -}; - -/* Match RWF_* bits to IOCB bits */ -#define IOCB_HIPRI (__force int) RWF_HIPRI -#define IOCB_DSYNC (__force int) RWF_DSYNC -#define IOCB_SYNC (__force int) RWF_SYNC -#define IOCB_NOWAIT (__force int) RWF_NOWAIT -#define IOCB_APPEND (__force int) RWF_APPEND - -/* non-RWF related bits - start at 16 */ -#define IOCB_EVENTFD (1 << 16) -#define IOCB_DIRECT (1 << 17) -#define IOCB_WRITE (1 << 18) -/* iocb->ki_waitq is valid */ -#define IOCB_WAITQ (1 << 19) -#define IOCB_NOIO (1 << 20) -/* can use bio alloc cache */ -#define IOCB_ALLOC_CACHE (1 << 21) - -struct kiocb { - struct file *ki_filp; - - /* The 'ki_filp' pointer is shared in a union for aio */ - randomized_struct_fields_start - - loff_t ki_pos; - void (*ki_complete)(struct kiocb *iocb, long ret); - void *private; - int ki_flags; - u16 ki_hint; - u16 ki_ioprio; /* See linux/ioprio.h */ - struct wait_page_queue *ki_waitq; /* for async buffered IO */ - randomized_struct_fields_end -}; - static inline bool is_sync_kiocb(struct kiocb *kiocb) { return kiocb->ki_complete == NULL; } -/* - * "descriptor" for what we're up to with a read. - * This allows us to use the same read code yet - * have multiple different users of the data that - * we read from a file. - * - * The simplest case just copies the data to user - * mode. - */ -typedef struct { - size_t written; - size_t count; - union { - char __user *buf; - void *data; - } arg; - int error; -} read_descriptor_t; - -typedef int (*read_actor_t)(read_descriptor_t *, struct page *, - unsigned long, unsigned long); - -struct address_space_operations { - int (*writepage)(struct page *page, struct writeback_control *wbc); - int (*readpage)(struct file *, struct page *); - - /* Write back some dirty pages from this mapping. */ - int (*writepages)(struct address_space *, struct writeback_control *); - - /* Set a page dirty. Return true if this dirtied it */ - int (*set_page_dirty)(struct page *page); - - /* - * Reads in the requested pages. Unlike ->readpage(), this is - * PURELY used for read-ahead!. - */ - int (*readpages)(struct file *filp, struct address_space *mapping, - struct list_head *pages, unsigned nr_pages); - void (*readahead)(struct readahead_control *); - - int (*write_begin)(struct file *, struct address_space *mapping, - loff_t pos, unsigned len, unsigned flags, - struct page **pagep, void **fsdata); - int (*write_end)(struct file *, struct address_space *mapping, - loff_t pos, unsigned len, unsigned copied, - struct page *page, void *fsdata); - - /* Unfortunately this kludge is needed for FIBMAP. Don't use it */ - sector_t (*bmap)(struct address_space *, sector_t); - void (*invalidatepage) (struct page *, unsigned int, unsigned int); - int (*releasepage) (struct page *, gfp_t); - void (*freepage)(struct page *); - ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter); - /* - * migrate the contents of a page to the specified target. If - * migrate_mode is MIGRATE_ASYNC, it must not block. - */ - int (*migratepage) (struct address_space *, - struct page *, struct page *, enum migrate_mode); - bool (*isolate_page)(struct page *, isolate_mode_t); - void (*putback_page)(struct page *); - int (*launder_page) (struct page *); - int (*is_partially_uptodate) (struct page *, unsigned long, - unsigned long); - void (*is_dirty_writeback) (struct page *, bool *, bool *); - int (*error_remove_page)(struct address_space *, struct page *); - - /* swapfile support */ - int (*swap_activate)(struct swap_info_struct *sis, struct file *file, - sector_t *span); - void (*swap_deactivate)(struct file *file); -}; - extern const struct address_space_operations empty_aops; /* @@ -425,60 +114,6 @@ int pagecache_write_end(struct file *, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata); -/** - * struct address_space - Contents of a cacheable, mappable object. - * @host: Owner, either the inode or the block_device. - * @i_pages: Cached pages. - * @invalidate_lock: Guards coherency between page cache contents and - * file offset->disk block mappings in the filesystem during invalidates. - * It is also used to block modification of page cache contents through - * memory mappings. - * @gfp_mask: Memory allocation flags to use for allocating pages. - * @i_mmap_writable: Number of VM_SHARED mappings. - * @nr_thps: Number of THPs in the pagecache (non-shmem only). - * @i_mmap: Tree of private and shared mappings. - * @i_mmap_rwsem: Protects @i_mmap and @i_mmap_writable. - * @nrpages: Number of page entries, protected by the i_pages lock. - * @writeback_index: Writeback starts here. - * @a_ops: Methods. - * @flags: Error bits and flags (AS_*). - * @wb_err: The most recent error which has occurred. - * @private_lock: For use by the owner of the address_space. - * @private_list: For use by the owner of the address_space. - * @private_data: For use by the owner of the address_space. - */ -struct address_space { - struct inode *host; - struct xarray i_pages; - struct rw_semaphore invalidate_lock; - gfp_t gfp_mask; - atomic_t i_mmap_writable; -#ifdef CONFIG_READ_ONLY_THP_FOR_FS - /* number of thp, only for non-shmem files */ - atomic_t nr_thps; -#endif - struct rb_root_cached i_mmap; - struct rw_semaphore i_mmap_rwsem; - unsigned long nrpages; - pgoff_t writeback_index; - const struct address_space_operations *a_ops; - unsigned long flags; - errseq_t wb_err; - spinlock_t private_lock; - struct list_head private_list; - void *private_data; -} __attribute__((aligned(sizeof(long)))) __randomize_layout; - /* - * On most architectures that alignment is already the case; but - * must be enforced here for CRIS, to let the least significant bit - * of struct page's "mapping" pointer be used for PAGE_MAPPING_ANON. - */ - -/* XArray tags, for tagging dirty and writeback pages in the pagecache. */ -#define PAGECACHE_TAG_DIRTY XA_MARK_0 -#define PAGECACHE_TAG_WRITEBACK XA_MARK_1 -#define PAGECACHE_TAG_TOWRITE XA_MARK_2 - /* * Returns true if any of the pages in the mapping are marked with the tag. */ @@ -569,9 +204,8 @@ static inline void mapping_allow_writable(struct address_space *mapping) /* * Use sequence counter to get consistent i_size on 32-bit processors. */ -#if BITS_PER_LONG==32 && defined(CONFIG_SMP) +#ifdef __NEED_I_SIZE_ORDERED #include -#define __NEED_I_SIZE_ORDERED #define i_size_ordered_init(inode) seqcount_init(&inode->i_size_seqcount) #else #define i_size_ordered_init(inode) do { } while (0) @@ -606,123 +240,6 @@ is_uncached_acl(struct posix_acl *acl) struct fsnotify_mark_connector; -/* - * Keep mostly read-only and often accessed (especially for - * the RCU path lookup and 'stat' data) fields at the beginning - * of the 'struct inode' - */ -struct inode { - umode_t i_mode; - unsigned short i_opflags; - kuid_t i_uid; - kgid_t i_gid; - unsigned int i_flags; - -#ifdef CONFIG_FS_POSIX_ACL - struct posix_acl *i_acl; - struct posix_acl *i_default_acl; -#endif - - const struct inode_operations *i_op; - struct super_block *i_sb; - struct address_space *i_mapping; - -#ifdef CONFIG_SECURITY - void *i_security; -#endif - - /* Stat data, not accessed from path walking */ - unsigned long i_ino; - /* - * Filesystems may only read i_nlink directly. They shall use the - * following functions for modification: - * - * (set|clear|inc|drop)_nlink - * inode_(inc|dec)_link_count - */ - union { - const unsigned int i_nlink; - unsigned int __i_nlink; - }; - dev_t i_rdev; - loff_t i_size; - struct timespec64 i_atime; - struct timespec64 i_mtime; - struct timespec64 i_ctime; - spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */ - unsigned short i_bytes; - u8 i_blkbits; - u8 i_write_hint; - blkcnt_t i_blocks; - -#ifdef __NEED_I_SIZE_ORDERED - seqcount_t i_size_seqcount; -#endif - - /* Misc */ - unsigned long i_state; - struct rw_semaphore i_rwsem; - - unsigned long dirtied_when; /* jiffies of first dirtying */ - unsigned long dirtied_time_when; - - struct hlist_node i_hash; - struct list_head i_io_list; /* backing dev IO list */ -#ifdef CONFIG_CGROUP_WRITEBACK - struct bdi_writeback *i_wb; /* the associated cgroup wb */ - - /* foreign inode detection, see wbc_detach_inode() */ - int i_wb_frn_winner; - u16 i_wb_frn_avg_time; - u16 i_wb_frn_history; -#endif - struct list_head i_lru; /* inode LRU list */ - struct list_head i_sb_list; - struct list_head i_wb_list; /* backing dev writeback list */ - union { - struct hlist_head i_dentry; - struct rcu_head i_rcu; - }; - atomic64_t i_version; - atomic64_t i_sequence; /* see futex */ - atomic_t i_count; - atomic_t i_dio_count; - atomic_t i_writecount; -#if defined(CONFIG_IMA) || defined(CONFIG_FILE_LOCKING) - atomic_t i_readcount; /* struct files open RO */ -#endif - union { - const struct file_operations *i_fop; /* former ->i_op->default_file_ops */ - void (*free_inode)(struct inode *); - }; - struct file_lock_context *i_flctx; - struct address_space i_data; - struct list_head i_devices; - union { - struct pipe_inode_info *i_pipe; - struct cdev *i_cdev; - char *i_link; - unsigned i_dir_seq; - }; - - __u32 i_generation; - -#ifdef CONFIG_FSNOTIFY - __u32 i_fsnotify_mask; /* all events this inode cares about */ - struct fsnotify_mark_connector __rcu *i_fsnotify_marks; -#endif - -#ifdef CONFIG_FS_ENCRYPTION - struct fscrypt_info *i_crypt_info; -#endif - -#ifdef CONFIG_FS_VERITY - struct fsverity_info *i_verity_info; -#endif - - void *i_private; /* fs or device private pointer */ -} __randomize_layout; - struct timespec64 timestamp_truncate(struct timespec64 t, struct inode *inode); static inline unsigned int i_blocksize(const struct inode *node) @@ -918,32 +435,6 @@ static inline unsigned imajor(const struct inode *inode) return MAJOR(inode->i_rdev); } -struct fown_struct { - rwlock_t lock; /* protects pid, uid, euid fields */ - struct pid *pid; /* pid or -pgrp where SIGIO should be sent */ - enum pid_type pid_type; /* Kind of process group SIGIO should be sent to */ - kuid_t uid, euid; /* uid/euid of process setting the owner */ - int signum; /* posix.1b rt signal to be delivered on IO */ -}; - -/** - * struct file_ra_state - Track a file's readahead state. - * @start: Where the most recent readahead started. - * @size: Number of pages read in the most recent readahead. - * @async_size: Start next readahead when this many pages are left. - * @ra_pages: Maximum size of a readahead request. - * @mmap_miss: How many mmap accesses missed in the page cache. - * @prev_pos: The last byte in the most recent read request. - */ -struct file_ra_state { - pgoff_t start; - unsigned int size; - unsigned int async_size; - unsigned int ra_pages; - unsigned int mmap_miss; - loff_t prev_pos; -}; - /* * Check if @index falls in the readahead windows. */ @@ -953,54 +444,6 @@ static inline int ra_has_index(struct file_ra_state *ra, pgoff_t index) index < ra->start + ra->size); } -struct file { - union { - struct llist_node fu_llist; - struct rcu_head fu_rcuhead; - } f_u; - struct path f_path; - struct inode *f_inode; /* cached value */ - const struct file_operations *f_op; - - /* - * Protects f_ep, f_flags. - * Must not be taken from IRQ context. - */ - spinlock_t f_lock; - enum rw_hint f_write_hint; - atomic_long_t f_count; - unsigned int f_flags; - fmode_t f_mode; - struct mutex f_pos_lock; - loff_t f_pos; - struct fown_struct f_owner; - const struct cred *f_cred; - struct file_ra_state f_ra; - - u64 f_version; -#ifdef CONFIG_SECURITY - void *f_security; -#endif - /* needed for tty driver, and maybe others */ - void *private_data; - -#ifdef CONFIG_EPOLL - /* Used by fs/eventpoll.c to link all the hooks to this file */ - struct hlist_head *f_ep; -#endif /* #ifdef CONFIG_EPOLL */ - struct address_space *f_mapping; - errseq_t f_wb_err; - errseq_t f_sb_err; /* for syncfs */ -} __randomize_layout - __attribute__((aligned(4))); /* lest something weird decides that 2 is OK */ - -struct file_handle { - __u32 handle_bytes; - int handle_type; - /* file identifier */ - unsigned char f_handle[]; -}; - static inline struct file *get_file(struct file *f) { atomic_long_inc(&f->f_count); @@ -1011,68 +454,6 @@ static inline struct file *get_file(struct file *f) #define get_file_rcu(x) get_file_rcu_many((x), 1) #define file_count(x) atomic_long_read(&(x)->f_count) -#define MAX_NON_LFS ((1UL<<31) - 1) - -/* Page cache limit. The filesystems should put that into their s_maxbytes - limits, otherwise bad things can happen in VM. */ -#if BITS_PER_LONG==32 -#define MAX_LFS_FILESIZE ((loff_t)ULONG_MAX << PAGE_SHIFT) -#elif BITS_PER_LONG==64 -#define MAX_LFS_FILESIZE ((loff_t)LLONG_MAX) -#endif - -#define FL_POSIX 1 -#define FL_FLOCK 2 -#define FL_DELEG 4 /* NFSv4 delegation */ -#define FL_ACCESS 8 /* not trying to lock, just looking */ -#define FL_EXISTS 16 /* when unlocking, test for existence */ -#define FL_LEASE 32 /* lease held on this file */ -#define FL_CLOSE 64 /* unlock on close */ -#define FL_SLEEP 128 /* A blocking lock */ -#define FL_DOWNGRADE_PENDING 256 /* Lease is being downgraded */ -#define FL_UNLOCK_PENDING 512 /* Lease is being broken */ -#define FL_OFDLCK 1024 /* lock is "owned" by struct file */ -#define FL_LAYOUT 2048 /* outstanding pNFS layout */ -#define FL_RECLAIM 4096 /* reclaiming from a reboot server */ - -#define FL_CLOSE_POSIX (FL_POSIX | FL_CLOSE) - -/* - * Special return value from posix_lock_file() and vfs_lock_file() for - * asynchronous locking. - */ -#define FILE_LOCK_DEFERRED 1 - -/* legacy typedef, should eventually be removed */ -typedef void *fl_owner_t; - -struct file_lock; - -struct file_lock_operations { - void (*fl_copy_lock)(struct file_lock *, struct file_lock *); - void (*fl_release_private)(struct file_lock *); -}; - -struct lock_manager_operations { - fl_owner_t (*lm_get_owner)(fl_owner_t); - void (*lm_put_owner)(fl_owner_t); - void (*lm_notify)(struct file_lock *); /* unblock callback */ - int (*lm_grant)(struct file_lock *, int); - bool (*lm_break)(struct file_lock *); - int (*lm_change)(struct file_lock *, int, struct list_head *); - void (*lm_setup)(struct file_lock *, void **); - bool (*lm_breaker_owns_lease)(struct file_lock *); -}; - -struct lock_manager { - struct list_head list; - /* - * NFSv4 and up also want opens blocked during the grace period; - * NLM doesn't care: - */ - bool block_opens; -}; - struct net; void locks_start_grace(struct net *, struct lock_manager *); void locks_end_grace(struct lock_manager *); @@ -1082,68 +463,6 @@ bool opens_in_grace(struct net *); /* that will die - we need it for nfs_lock_info */ #include -/* - * struct file_lock represents a generic "file lock". It's used to represent - * POSIX byte range locks, BSD (flock) locks, and leases. It's important to - * note that the same struct is used to represent both a request for a lock and - * the lock itself, but the same object is never used for both. - * - * FIXME: should we create a separate "struct lock_request" to help distinguish - * these two uses? - * - * The varous i_flctx lists are ordered by: - * - * 1) lock owner - * 2) lock range start - * 3) lock range end - * - * Obviously, the last two criteria only matter for POSIX locks. - */ -struct file_lock { - struct file_lock *fl_blocker; /* The lock, that is blocking us */ - struct list_head fl_list; /* link into file_lock_context */ - struct hlist_node fl_link; /* node in global lists */ - struct list_head fl_blocked_requests; /* list of requests with - * ->fl_blocker pointing here - */ - struct list_head fl_blocked_member; /* node in - * ->fl_blocker->fl_blocked_requests - */ - fl_owner_t fl_owner; - unsigned int fl_flags; - unsigned char fl_type; - unsigned int fl_pid; - int fl_link_cpu; /* what cpu's list is this on? */ - wait_queue_head_t fl_wait; - struct file *fl_file; - loff_t fl_start; - loff_t fl_end; - - struct fasync_struct * fl_fasync; /* for lease break notifications */ - /* for lease breaks: */ - unsigned long fl_break_time; - unsigned long fl_downgrade_time; - - const struct file_lock_operations *fl_ops; /* Callbacks for filesystems */ - const struct lock_manager_operations *fl_lmops; /* Callbacks for lockmanagers */ - union { - struct nfs_lock_info nfs_fl; - struct nfs4_lock_info nfs4_fl; - struct { - struct list_head link; /* link in AFS vnode's pending_locks list */ - int state; /* state of grant or error if -ve */ - unsigned int debug_id; - } afs; - } fl_u; -} __randomize_layout; - -struct file_lock_context { - spinlock_t flc_lock; - struct list_head flc_flock; - struct list_head flc_posix; - struct list_head flc_lease; -}; - /* The following constant reflects the upper bound of the file/locking space */ #ifndef OFFSET_MAX #define INT_LIMIT(x) (~((x)1 << (sizeof(x)*8 - 1))) @@ -1351,17 +670,6 @@ static inline int locks_lock_file_wait(struct file *filp, struct file_lock *fl) return locks_lock_inode_wait(locks_inode(filp), fl); } -struct fasync_struct { - rwlock_t fa_lock; - int magic; - int fa_fd; - struct fasync_struct *fa_next; /* singly linked list */ - struct file *fa_file; - struct rcu_head fa_rcu; -}; - -#define FASYNC_MAGIC 0x4601 - /* SMP safe fasync helpers: */ extern int fasync_helper(int, struct file *, int, struct fasync_struct **); extern struct fasync_struct *fasync_insert_entry(int, struct file *, struct fasync_struct **, struct fasync_struct *); @@ -1378,221 +686,6 @@ extern void f_delown(struct file *filp); extern pid_t f_getown(struct file *filp); extern int send_sigurg(struct fown_struct *fown); -/* - * sb->s_flags. Note that these mirror the equivalent MS_* flags where - * represented in both. - */ -#define SB_RDONLY 1 /* Mount read-only */ -#define SB_NOSUID 2 /* Ignore suid and sgid bits */ -#define SB_NODEV 4 /* Disallow access to device special files */ -#define SB_NOEXEC 8 /* Disallow program execution */ -#define SB_SYNCHRONOUS 16 /* Writes are synced at once */ -#define SB_MANDLOCK 64 /* Allow mandatory locks on an FS */ -#define SB_DIRSYNC 128 /* Directory modifications are synchronous */ -#define SB_NOATIME 1024 /* Do not update access times. */ -#define SB_NODIRATIME 2048 /* Do not update directory access times */ -#define SB_SILENT 32768 -#define SB_POSIXACL (1<<16) /* VFS does not apply the umask */ -#define SB_INLINECRYPT (1<<17) /* Use blk-crypto for encrypted files */ -#define SB_KERNMOUNT (1<<22) /* this is a kern_mount call */ -#define SB_I_VERSION (1<<23) /* Update inode I_version field */ -#define SB_LAZYTIME (1<<25) /* Update the on-disk [acm]times lazily */ - -/* These sb flags are internal to the kernel */ -#define SB_SUBMOUNT (1<<26) -#define SB_FORCE (1<<27) -#define SB_NOSEC (1<<28) -#define SB_BORN (1<<29) -#define SB_ACTIVE (1<<30) -#define SB_NOUSER (1<<31) - -/* These flags relate to encoding and casefolding */ -#define SB_ENC_STRICT_MODE_FL (1 << 0) - -#define sb_has_strict_encoding(sb) \ - (sb->s_encoding_flags & SB_ENC_STRICT_MODE_FL) - -/* - * Umount options - */ - -#define MNT_FORCE 0x00000001 /* Attempt to forcibily umount */ -#define MNT_DETACH 0x00000002 /* Just detach from the tree */ -#define MNT_EXPIRE 0x00000004 /* Mark for expiry */ -#define UMOUNT_NOFOLLOW 0x00000008 /* Don't follow symlink on umount */ -#define UMOUNT_UNUSED 0x80000000 /* Flag guaranteed to be unused */ - -/* sb->s_iflags */ -#define SB_I_CGROUPWB 0x00000001 /* cgroup-aware writeback enabled */ -#define SB_I_NOEXEC 0x00000002 /* Ignore executables on this fs */ -#define SB_I_NODEV 0x00000004 /* Ignore devices on this fs */ -#define SB_I_STABLE_WRITES 0x00000008 /* don't modify blks until WB is done */ - -/* sb->s_iflags to limit user namespace mounts */ -#define SB_I_USERNS_VISIBLE 0x00000010 /* fstype already mounted */ -#define SB_I_IMA_UNVERIFIABLE_SIGNATURE 0x00000020 -#define SB_I_UNTRUSTED_MOUNTER 0x00000040 - -#define SB_I_SKIP_SYNC 0x00000100 /* Skip superblock at global sync */ -#define SB_I_PERSB_BDI 0x00000200 /* has a per-sb bdi */ - -/* Possible states of 'frozen' field */ -enum { - SB_UNFROZEN = 0, /* FS is unfrozen */ - SB_FREEZE_WRITE = 1, /* Writes, dir ops, ioctls frozen */ - SB_FREEZE_PAGEFAULT = 2, /* Page faults stopped as well */ - SB_FREEZE_FS = 3, /* For internal FS use (e.g. to stop - * internal threads if needed) */ - SB_FREEZE_COMPLETE = 4, /* ->freeze_fs finished successfully */ -}; - -#define SB_FREEZE_LEVELS (SB_FREEZE_COMPLETE - 1) - -struct sb_writers { - int frozen; /* Is sb frozen? */ - wait_queue_head_t wait_unfrozen; /* wait for thaw */ - struct percpu_rw_semaphore rw_sem[SB_FREEZE_LEVELS]; -}; - -struct super_block { - struct list_head s_list; /* Keep this first */ - dev_t s_dev; /* search index; _not_ kdev_t */ - unsigned char s_blocksize_bits; - unsigned long s_blocksize; - loff_t s_maxbytes; /* Max file size */ - struct file_system_type *s_type; - const struct super_operations *s_op; - const struct dquot_operations *dq_op; - const struct quotactl_ops *s_qcop; - const struct export_operations *s_export_op; - unsigned long s_flags; - unsigned long s_iflags; /* internal SB_I_* flags */ - unsigned long s_magic; - struct dentry *s_root; - struct rw_semaphore s_umount; - int s_count; - atomic_t s_active; -#ifdef CONFIG_SECURITY - void *s_security; -#endif - const struct xattr_handler **s_xattr; -#ifdef CONFIG_FS_ENCRYPTION - const struct fscrypt_operations *s_cop; - struct key *s_master_keys; /* master crypto keys in use */ -#endif -#ifdef CONFIG_FS_VERITY - const struct fsverity_operations *s_vop; -#endif -#ifdef CONFIG_UNICODE - struct unicode_map *s_encoding; - __u16 s_encoding_flags; -#endif - struct hlist_bl_head s_roots; /* alternate root dentries for NFS */ - struct list_head s_mounts; /* list of mounts; _not_ for fs use */ - struct block_device *s_bdev; - struct backing_dev_info *s_bdi; - struct mtd_info *s_mtd; - struct hlist_node s_instances; - unsigned int s_quota_types; /* Bitmask of supported quota types */ - struct quota_info s_dquot; /* Diskquota specific options */ - - struct sb_writers s_writers; - - /* - * Keep s_fs_info, s_time_gran, s_fsnotify_mask, and - * s_fsnotify_marks together for cache efficiency. They are frequently - * accessed and rarely modified. - */ - void *s_fs_info; /* Filesystem private info */ - - /* Granularity of c/m/atime in ns (cannot be worse than a second) */ - u32 s_time_gran; - /* Time limits for c/m/atime in seconds */ - time64_t s_time_min; - time64_t s_time_max; -#ifdef CONFIG_FSNOTIFY - __u32 s_fsnotify_mask; - struct fsnotify_mark_connector __rcu *s_fsnotify_marks; -#endif - - char s_id[32]; /* Informational name */ - uuid_t s_uuid; /* UUID */ - - unsigned int s_max_links; - fmode_t s_mode; - - /* - * The next field is for VFS *only*. No filesystems have any business - * even looking at it. You had been warned. - */ - struct mutex s_vfs_rename_mutex; /* Kludge */ - - /* - * Filesystem subtype. If non-empty the filesystem type field - * in /proc/mounts will be "type.subtype" - */ - const char *s_subtype; - - const struct dentry_operations *s_d_op; /* default d_op for dentries */ - - /* - * Saved pool identifier for cleancache (-1 means none) - */ - int cleancache_poolid; - - struct shrinker s_shrink; /* per-sb shrinker handle */ - - /* Number of inodes with nlink == 0 but still referenced */ - atomic_long_t s_remove_count; - - /* - * Number of inode/mount/sb objects that are being watched, note that - * inodes objects are currently double-accounted. - */ - atomic_long_t s_fsnotify_connectors; - - /* Being remounted read-only */ - int s_readonly_remount; - - /* per-sb errseq_t for reporting writeback errors via syncfs */ - errseq_t s_wb_err; - - /* AIO completions deferred from interrupt context */ - struct workqueue_struct *s_dio_done_wq; - struct hlist_head s_pins; - - /* - * Owning user namespace and default context in which to - * interpret filesystem uids, gids, quotas, device nodes, - * xattrs and security labels. - */ - struct user_namespace *s_user_ns; - - /* - * The list_lru structure is essentially just a pointer to a table - * of per-node lru lists, each of which has its own spinlock. - * There is no need to put them into separate cachelines. - */ - struct list_lru s_dentry_lru; - struct list_lru s_inode_lru; - struct rcu_head rcu; - struct work_struct destroy_work; - - struct mutex s_sync_lock; /* sync serialisation lock */ - - /* - * Indicates how deep in a filesystem stack this SB is - */ - int s_stack_depth; - - /* s_inode_list_lock protects s_inodes */ - spinlock_t s_inode_list_lock ____cacheline_aligned_in_smp; - struct list_head s_inodes; /* all inodes */ - - spinlock_t s_inode_wblist_lock; - struct list_head s_inodes_wb; /* writeback inodes */ -} __randomize_layout; - static inline struct user_namespace *i_user_ns(const struct inode *inode) { return inode->i_sb->s_user_ns; @@ -1869,28 +962,6 @@ int vfs_rmdir(struct user_namespace *, struct inode *, struct dentry *); int vfs_unlink(struct user_namespace *, struct inode *, struct dentry *, struct inode **); -/** - * struct renamedata - contains all information required for renaming - * @old_mnt_userns: old user namespace of the mount the inode was found from - * @old_dir: parent of source - * @old_dentry: source - * @new_mnt_userns: new user namespace of the mount the inode was found from - * @new_dir: parent of destination - * @new_dentry: destination - * @delegated_inode: returns an inode needing a delegation break - * @flags: rename flags - */ -struct renamedata { - struct user_namespace *old_mnt_userns; - struct inode *old_dir; - struct dentry *old_dentry; - struct user_namespace *new_mnt_userns; - struct inode *new_dir; - struct dentry *new_dentry; - struct inode **delegated_inode; - unsigned int flags; -} __randomize_layout; - int vfs_rename(struct renamedata *); static inline int vfs_whiteout(struct user_namespace *mnt_userns, @@ -1927,146 +998,6 @@ void inode_init_owner(struct user_namespace *mnt_userns, struct inode *inode, const struct inode *dir, umode_t mode); extern bool may_open_dev(const struct path *path); -/* - * This is the "filldir" function type, used by readdir() to let - * the kernel specify what kind of dirent layout it wants to have. - * This allows the kernel to read directories into kernel space or - * to have different dirent layouts depending on the binary type. - */ -struct dir_context; -typedef int (*filldir_t)(struct dir_context *, const char *, int, loff_t, u64, - unsigned); - -struct dir_context { - filldir_t actor; - loff_t pos; -}; - -/* - * These flags let !MMU mmap() govern direct device mapping vs immediate - * copying more easily for MAP_PRIVATE, especially for ROM filesystems. - * - * NOMMU_MAP_COPY: Copy can be mapped (MAP_PRIVATE) - * NOMMU_MAP_DIRECT: Can be mapped directly (MAP_SHARED) - * NOMMU_MAP_READ: Can be mapped for reading - * NOMMU_MAP_WRITE: Can be mapped for writing - * NOMMU_MAP_EXEC: Can be mapped for execution - */ -#define NOMMU_MAP_COPY 0x00000001 -#define NOMMU_MAP_DIRECT 0x00000008 -#define NOMMU_MAP_READ VM_MAYREAD -#define NOMMU_MAP_WRITE VM_MAYWRITE -#define NOMMU_MAP_EXEC VM_MAYEXEC - -#define NOMMU_VMFLAGS \ - (NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC) - -/* - * These flags control the behavior of the remap_file_range function pointer. - * If it is called with len == 0 that means "remap to end of source file". - * See Documentation/filesystems/vfs.rst for more details about this call. - * - * REMAP_FILE_DEDUP: only remap if contents identical (i.e. deduplicate) - * REMAP_FILE_CAN_SHORTEN: caller can handle a shortened request - */ -#define REMAP_FILE_DEDUP (1 << 0) -#define REMAP_FILE_CAN_SHORTEN (1 << 1) - -/* - * These flags signal that the caller is ok with altering various aspects of - * the behavior of the remap operation. The changes must be made by the - * implementation; the vfs remap helper functions can take advantage of them. - * Flags in this category exist to preserve the quirky behavior of the hoisted - * btrfs clone/dedupe ioctls. - */ -#define REMAP_FILE_ADVISORY (REMAP_FILE_CAN_SHORTEN) - -struct iov_iter; - -struct file_operations { - struct module *owner; - loff_t (*llseek) (struct file *, loff_t, int); - ssize_t (*read) (struct file *, char __user *, size_t, loff_t *); - ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *); - ssize_t (*read_iter) (struct kiocb *, struct iov_iter *); - ssize_t (*write_iter) (struct kiocb *, struct iov_iter *); - int (*iopoll)(struct kiocb *kiocb, struct io_comp_batch *, - unsigned int flags); - int (*iterate) (struct file *, struct dir_context *); - int (*iterate_shared) (struct file *, struct dir_context *); - __poll_t (*poll) (struct file *, struct poll_table_struct *); - long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long); - long (*compat_ioctl) (struct file *, unsigned int, unsigned long); - int (*mmap) (struct file *, struct vm_area_struct *); - unsigned long mmap_supported_flags; - int (*open) (struct inode *, struct file *); - int (*flush) (struct file *, fl_owner_t id); - int (*release) (struct inode *, struct file *); - int (*fsync) (struct file *, loff_t, loff_t, int datasync); - int (*fasync) (int, struct file *, int); - int (*lock) (struct file *, int, struct file_lock *); - ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int); - unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); - int (*check_flags)(int); - int (*flock) (struct file *, int, struct file_lock *); - ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int); - ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int); - int (*setlease)(struct file *, long, struct file_lock **, void **); - long (*fallocate)(struct file *file, int mode, loff_t offset, - loff_t len); - void (*show_fdinfo)(struct seq_file *m, struct file *f); -#ifndef CONFIG_MMU - unsigned (*mmap_capabilities)(struct file *); -#endif - ssize_t (*copy_file_range)(struct file *, loff_t, struct file *, - loff_t, size_t, unsigned int); - loff_t (*remap_file_range)(struct file *file_in, loff_t pos_in, - struct file *file_out, loff_t pos_out, - loff_t len, unsigned int remap_flags); - int (*fadvise)(struct file *, loff_t, loff_t, int); -} __randomize_layout; - -struct inode_operations { - struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int); - const char * (*get_link) (struct dentry *, struct inode *, struct delayed_call *); - int (*permission) (struct user_namespace *, struct inode *, int); - struct posix_acl * (*get_acl)(struct inode *, int, bool); - - int (*readlink) (struct dentry *, char __user *,int); - - int (*create) (struct user_namespace *, struct inode *,struct dentry *, - umode_t, bool); - int (*link) (struct dentry *,struct inode *,struct dentry *); - int (*unlink) (struct inode *,struct dentry *); - int (*symlink) (struct user_namespace *, struct inode *,struct dentry *, - const char *); - int (*mkdir) (struct user_namespace *, struct inode *,struct dentry *, - umode_t); - int (*rmdir) (struct inode *,struct dentry *); - int (*mknod) (struct user_namespace *, struct inode *,struct dentry *, - umode_t,dev_t); - int (*rename) (struct user_namespace *, struct inode *, struct dentry *, - struct inode *, struct dentry *, unsigned int); - int (*setattr) (struct user_namespace *, struct dentry *, - struct iattr *); - int (*getattr) (struct user_namespace *, const struct path *, - struct kstat *, u32, unsigned int); - ssize_t (*listxattr) (struct dentry *, char *, size_t); - int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, - u64 len); - int (*update_time)(struct inode *, struct timespec64 *, int); - int (*atomic_open)(struct inode *, struct dentry *, - struct file *, unsigned open_flag, - umode_t create_mode); - int (*tmpfile) (struct user_namespace *, struct inode *, - struct dentry *, umode_t); - int (*set_acl)(struct user_namespace *, struct inode *, - struct posix_acl *, int); - int (*fileattr_set)(struct user_namespace *mnt_userns, - struct dentry *dentry, struct fileattr *fa); - int (*fileattr_get)(struct dentry *dentry, struct fileattr *fa); -} ____cacheline_aligned; - static inline ssize_t call_read_iter(struct file *file, struct kiocb *kio, struct iov_iter *iter) { @@ -2107,41 +1038,6 @@ extern loff_t vfs_dedupe_file_range_one(struct file *src_file, loff_t src_pos, struct file *dst_file, loff_t dst_pos, loff_t len, unsigned int remap_flags); - -struct super_operations { - struct inode *(*alloc_inode)(struct super_block *sb); - void (*destroy_inode)(struct inode *); - void (*free_inode)(struct inode *); - - void (*dirty_inode) (struct inode *, int flags); - int (*write_inode) (struct inode *, struct writeback_control *wbc); - int (*drop_inode) (struct inode *); - void (*evict_inode) (struct inode *); - void (*put_super) (struct super_block *); - int (*sync_fs)(struct super_block *sb, int wait); - int (*freeze_super) (struct super_block *); - int (*freeze_fs) (struct super_block *); - int (*thaw_super) (struct super_block *); - int (*unfreeze_fs) (struct super_block *); - int (*statfs) (struct dentry *, struct kstatfs *); - int (*remount_fs) (struct super_block *, int *, char *); - void (*umount_begin) (struct super_block *); - - int (*show_options)(struct seq_file *, struct dentry *); - int (*show_devname)(struct seq_file *, struct dentry *); - int (*show_path)(struct seq_file *, struct dentry *); - int (*show_stats)(struct seq_file *, struct dentry *); -#ifdef CONFIG_QUOTA - ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t); - ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t); - struct dquot **(*get_dquots)(struct inode *); -#endif - long (*nr_cached_objects)(struct super_block *, - struct shrink_control *); - long (*free_cached_objects)(struct super_block *, - struct shrink_control *); -}; - /* * Inode flags - they have no relation to superblock flags now */ @@ -2430,36 +1326,6 @@ extern int file_modified(struct file *file); int sync_inode_metadata(struct inode *inode, int wait); -struct file_system_type { - const char *name; - int fs_flags; -#define FS_REQUIRES_DEV 1 -#define FS_BINARY_MOUNTDATA 2 -#define FS_HAS_SUBTYPE 4 -#define FS_USERNS_MOUNT 8 /* Can be mounted by userns root */ -#define FS_DISALLOW_NOTIFY_PERM 16 /* Disable fanotify permission events */ -#define FS_ALLOW_IDMAP 32 /* FS has been updated to handle vfs idmappings. */ -#define FS_RENAME_DOES_D_MOVE 32768 /* FS will handle d_move() during rename() internally. */ - int (*init_fs_context)(struct fs_context *); - const struct fs_parameter_spec *parameters; - struct dentry *(*mount) (struct file_system_type *, int, - const char *, void *); - void (*kill_sb) (struct super_block *); - struct module *owner; - struct file_system_type * next; - struct hlist_head fs_supers; - - struct lock_class_key s_lock_key; - struct lock_class_key s_umount_key; - struct lock_class_key s_vfs_rename_key; - struct lock_class_key s_writers_key[SB_FREEZE_LEVELS]; - - struct lock_class_key i_lock_key; - struct lock_class_key i_mutex_key; - struct lock_class_key invalidate_lock_key; - struct lock_class_key i_mutex_dir_key; -}; - #define MODULE_ALIAS_FS(NAME) MODULE_ALIAS("fs-" NAME) extern struct dentry *mount_bdev(struct file_system_type *fs_type, @@ -2631,16 +1497,6 @@ static inline int break_layout(struct inode *inode, bool wait) #endif /* CONFIG_FILE_LOCKING */ /* fs/open.c */ -struct audit_names; -struct filename { - const char *name; /* pointer to actual string */ - const __user char *uptr; /* original userland pointer */ - int refcnt; - struct audit_names *aname; - const char iname[]; -}; -static_assert(offsetof(struct filename, iname) % sizeof(long) == 0); - static inline struct user_namespace *file_mnt_user_ns(struct file *file) { return mnt_user_ns(file->f_path.mnt); @@ -2686,6 +1542,7 @@ static inline struct file *file_clone_open(struct file *file) } extern int filp_close(struct file *, fl_owner_t id); +static_assert(offsetof(struct filename, iname) % sizeof(long) == 0); extern struct filename *getname_flags(const char __user *, int, int *); extern struct filename *getname_uflags(const char __user *, int); extern struct filename *getname(const char __user *); @@ -3027,7 +1884,7 @@ ssize_t __kernel_read(struct file *file, void *buf, size_t count, loff_t *pos); extern ssize_t kernel_write(struct file *, const void *, size_t, loff_t *); extern ssize_t __kernel_write(struct file *, const void *, size_t, loff_t *); extern struct file * open_exec(const char *); - + /* fs/dcache.c -- generic fs support functions */ extern bool is_subdir(struct dentry *, struct dentry *); extern bool path_is_under(const struct path *, const struct path *); diff --git a/include/linux/fs_types.h b/include/linux/fs_types.h index 54816791196f..3e6e21265eaf 100644 --- a/include/linux/fs_types.h +++ b/include/linux/fs_types.h @@ -3,8 +3,1229 @@ #define _LINUX_FS_TYPES_H /* - * This is a header for the common implementation of dirent - * to fs on-disk file type conversion. Although the fs on-disk + * Common data structures and type definitions for file systems + * and related objects. + * + * Only add headers for other data structures in here when + * absolutely required, and include this file instead of + * linux/fs.h from other headers, to keep the include hierarchy + * flat. + */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +struct address_space; +struct buffer_head; +struct delayed_call; +struct fiemap_extent_info; +struct fileattr; +struct fs_context; +struct inode; +struct io_comp_batch; +struct iov_iter; +struct kiocb; +struct kstat; +struct kstatfs; +struct page; +struct poll_table_struct; +struct readahead_control; +struct seq_file; +struct swap_info_struct; +struct vm_area_struct; +struct writeback_control; + +typedef __kernel_rwf_t rwf_t; + +typedef int (get_block_t)(struct inode *inode, sector_t iblock, + struct buffer_head *bh_result, int create); +typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset, + ssize_t bytes, void *private); + +#define MAY_EXEC 0x00000001 +#define MAY_WRITE 0x00000002 +#define MAY_READ 0x00000004 +#define MAY_APPEND 0x00000008 +#define MAY_ACCESS 0x00000010 +#define MAY_OPEN 0x00000020 +#define MAY_CHDIR 0x00000040 +/* called from RCU mode, don't block */ +#define MAY_NOT_BLOCK 0x00000080 + +/* + * flags in file.f_mode. Note that FMODE_READ and FMODE_WRITE must correspond + * to O_WRONLY and O_RDWR via the strange trick in do_dentry_open() + */ + +/* file is open for reading */ +#define FMODE_READ ((__force fmode_t)0x1) +/* file is open for writing */ +#define FMODE_WRITE ((__force fmode_t)0x2) +/* file is seekable */ +#define FMODE_LSEEK ((__force fmode_t)0x4) +/* file can be accessed using pread */ +#define FMODE_PREAD ((__force fmode_t)0x8) +/* file can be accessed using pwrite */ +#define FMODE_PWRITE ((__force fmode_t)0x10) +/* File is opened for execution with sys_execve / sys_uselib */ +#define FMODE_EXEC ((__force fmode_t)0x20) +/* File is opened with O_NDELAY (only set for block devices) */ +#define FMODE_NDELAY ((__force fmode_t)0x40) +/* File is opened with O_EXCL (only set for block devices) */ +#define FMODE_EXCL ((__force fmode_t)0x80) +/* File is opened using open(.., 3, ..) and is writeable only for ioctls + (specialy hack for floppy.c) */ +#define FMODE_WRITE_IOCTL ((__force fmode_t)0x100) +/* 32bit hashes as llseek() offset (for directories) */ +#define FMODE_32BITHASH ((__force fmode_t)0x200) +/* 64bit hashes as llseek() offset (for directories) */ +#define FMODE_64BITHASH ((__force fmode_t)0x400) + +/* + * Don't update ctime and mtime. + * + * Currently a special hack for the XFS open_by_handle ioctl, but we'll + * hopefully graduate it to a proper O_CMTIME flag supported by open(2) soon. + */ +#define FMODE_NOCMTIME ((__force fmode_t)0x800) + +/* Expect random access pattern */ +#define FMODE_RANDOM ((__force fmode_t)0x1000) + +/* File is huge (eg. /dev/mem): treat loff_t as unsigned */ +#define FMODE_UNSIGNED_OFFSET ((__force fmode_t)0x2000) + +/* File is opened with O_PATH; almost nothing can be done with it */ +#define FMODE_PATH ((__force fmode_t)0x4000) + +/* File needs atomic accesses to f_pos */ +#define FMODE_ATOMIC_POS ((__force fmode_t)0x8000) +/* Write access to underlying fs */ +#define FMODE_WRITER ((__force fmode_t)0x10000) +/* Has read method(s) */ +#define FMODE_CAN_READ ((__force fmode_t)0x20000) +/* Has write method(s) */ +#define FMODE_CAN_WRITE ((__force fmode_t)0x40000) + +#define FMODE_OPENED ((__force fmode_t)0x80000) +#define FMODE_CREATED ((__force fmode_t)0x100000) + +/* File is stream-like */ +#define FMODE_STREAM ((__force fmode_t)0x200000) + +/* File was opened by fanotify and shouldn't generate fanotify events */ +#define FMODE_NONOTIFY ((__force fmode_t)0x4000000) + +/* File is capable of returning -EAGAIN if I/O will block */ +#define FMODE_NOWAIT ((__force fmode_t)0x8000000) + +/* File represents mount that needs unmounting */ +#define FMODE_NEED_UNMOUNT ((__force fmode_t)0x10000000) + +/* File does not contribute to nr_files count */ +#define FMODE_NOACCOUNT ((__force fmode_t)0x20000000) + +/* File supports async buffered reads */ +#define FMODE_BUF_RASYNC ((__force fmode_t)0x40000000) + +/* + * Attribute flags. These should be or-ed together to figure out what + * has been changed! + */ +#define ATTR_MODE (1 << 0) +#define ATTR_UID (1 << 1) +#define ATTR_GID (1 << 2) +#define ATTR_SIZE (1 << 3) +#define ATTR_ATIME (1 << 4) +#define ATTR_MTIME (1 << 5) +#define ATTR_CTIME (1 << 6) +#define ATTR_ATIME_SET (1 << 7) +#define ATTR_MTIME_SET (1 << 8) +#define ATTR_FORCE (1 << 9) /* Not a change, but a change it */ +#define ATTR_KILL_SUID (1 << 11) +#define ATTR_KILL_SGID (1 << 12) +#define ATTR_FILE (1 << 13) +#define ATTR_KILL_PRIV (1 << 14) +#define ATTR_OPEN (1 << 15) /* Truncating from open(O_TRUNC) */ +#define ATTR_TIMES_SET (1 << 16) +#define ATTR_TOUCH (1 << 17) + +/* + * Whiteout is represented by a char device. The following constants define the + * mode and device number to use. + */ +#define WHITEOUT_MODE 0 +#define WHITEOUT_DEV 0 + +/* + * This is the Inode Attributes structure, used for notify_change(). It + * uses the above definitions as flags, to know which values have changed. + * Also, in this manner, a Filesystem can look at only the values it cares + * about. Basically, these are the attributes that the VFS layer can + * request to change from the FS layer. + * + * Derek Atkins 94-10-20 + */ +struct iattr { + unsigned int ia_valid; + umode_t ia_mode; + kuid_t ia_uid; + kgid_t ia_gid; + loff_t ia_size; + struct timespec64 ia_atime; + struct timespec64 ia_mtime; + struct timespec64 ia_ctime; + + /* + * Not an attribute, but an auxiliary info for filesystems wanting to + * implement an ftruncate() like method. NOTE: filesystem should + * check for (ia_valid & ATTR_FILE), and not for (ia_file != NULL). + */ + struct file *ia_file; +}; + +/* + * Write life time hint values. + * Stored in struct inode as u8. + */ +enum rw_hint { + WRITE_LIFE_NOT_SET = 0, + WRITE_LIFE_NONE = RWH_WRITE_LIFE_NONE, + WRITE_LIFE_SHORT = RWH_WRITE_LIFE_SHORT, + WRITE_LIFE_MEDIUM = RWH_WRITE_LIFE_MEDIUM, + WRITE_LIFE_LONG = RWH_WRITE_LIFE_LONG, + WRITE_LIFE_EXTREME = RWH_WRITE_LIFE_EXTREME, +}; + +/* Match RWF_* bits to IOCB bits */ +#define IOCB_HIPRI (__force int) RWF_HIPRI +#define IOCB_DSYNC (__force int) RWF_DSYNC +#define IOCB_SYNC (__force int) RWF_SYNC +#define IOCB_NOWAIT (__force int) RWF_NOWAIT +#define IOCB_APPEND (__force int) RWF_APPEND + +/* non-RWF related bits - start at 16 */ +#define IOCB_EVENTFD (1 << 16) +#define IOCB_DIRECT (1 << 17) +#define IOCB_WRITE (1 << 18) +/* iocb->ki_waitq is valid */ +#define IOCB_WAITQ (1 << 19) +#define IOCB_NOIO (1 << 20) +/* can use bio alloc cache */ +#define IOCB_ALLOC_CACHE (1 << 21) + +struct kiocb { + struct file *ki_filp; + + /* The 'ki_filp' pointer is shared in a union for aio */ + randomized_struct_fields_start + + loff_t ki_pos; + void (*ki_complete)(struct kiocb *iocb, long ret); + void *private; + int ki_flags; + u16 ki_hint; + u16 ki_ioprio; /* See linux/ioprio.h */ + struct wait_page_queue *ki_waitq; /* for async buffered IO */ + randomized_struct_fields_end +}; + +/* + * Maximum number of layers of fs stack. Needs to be limited to + * prevent kernel stack overflow + */ +#define FILESYSTEM_MAX_STACK_DEPTH 2 + +/** + * enum positive_aop_returns - aop return codes with specific semantics + * + * @AOP_WRITEPAGE_ACTIVATE: Informs the caller that page writeback has + * completed, that the page is still locked, and + * should be considered active. The VM uses this hint + * to return the page to the active list -- it won't + * be a candidate for writeback again in the near + * future. Other callers must be careful to unlock + * the page if they get this return. Returned by + * writepage(); + * + * @AOP_TRUNCATED_PAGE: The AOP method that was handed a locked page has + * unlocked it and the page might have been truncated. + * The caller should back up to acquiring a new page and + * trying again. The aop will be taking reasonable + * precautions not to livelock. If the caller held a page + * reference, it should drop it before retrying. Returned + * by readpage(). + * + * address_space_operation functions return these large constants to indicate + * special semantics to the caller. These are much larger than the bytes in a + * page to allow for functions that return the number of bytes operated on in a + * given page. + */ + +enum positive_aop_returns { + AOP_WRITEPAGE_ACTIVATE = 0x80000, + AOP_TRUNCATED_PAGE = 0x80001, +}; + +#define AOP_FLAG_CONT_EXPAND 0x0001 /* called from cont_expand */ +#define AOP_FLAG_NOFS 0x0002 /* used by filesystem to direct + * helper code (eg buffer layer) + * to clear GFP_FS from alloc */ + +/* + * "descriptor" for what we're up to with a read. + * This allows us to use the same read code yet + * have multiple different users of the data that + * we read from a file. + * + * The simplest case just copies the data to user + * mode. + */ +typedef struct { + size_t written; + size_t count; + union { + char __user *buf; + void *data; + } arg; + int error; +} read_descriptor_t; + +typedef int (*read_actor_t)(read_descriptor_t *, struct page *, + unsigned long, unsigned long); + +struct address_space_operations { + int (*writepage)(struct page *page, struct writeback_control *wbc); + int (*readpage)(struct file *, struct page *); + + /* Write back some dirty pages from this mapping. */ + int (*writepages)(struct address_space *, struct writeback_control *); + + /* Set a page dirty. Return true if this dirtied it */ + int (*set_page_dirty)(struct page *page); + + /* + * Reads in the requested pages. Unlike ->readpage(), this is + * PURELY used for read-ahead!. + */ + int (*readpages)(struct file *filp, struct address_space *mapping, + struct list_head *pages, unsigned nr_pages); + void (*readahead)(struct readahead_control *); + + int (*write_begin)(struct file *, struct address_space *mapping, + loff_t pos, unsigned len, unsigned flags, + struct page **pagep, void **fsdata); + int (*write_end)(struct file *, struct address_space *mapping, + loff_t pos, unsigned len, unsigned copied, + struct page *page, void *fsdata); + + /* Unfortunately this kludge is needed for FIBMAP. Don't use it */ + sector_t (*bmap)(struct address_space *, sector_t); + void (*invalidatepage) (struct page *, unsigned int, unsigned int); + int (*releasepage) (struct page *, gfp_t); + void (*freepage)(struct page *); + ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter); + /* + * migrate the contents of a page to the specified target. If + * migrate_mode is MIGRATE_ASYNC, it must not block. + */ + int (*migratepage) (struct address_space *, + struct page *, struct page *, enum migrate_mode); + bool (*isolate_page)(struct page *, isolate_mode_t); + void (*putback_page)(struct page *); + int (*launder_page) (struct page *); + int (*is_partially_uptodate) (struct page *, unsigned long, + unsigned long); + void (*is_dirty_writeback) (struct page *, bool *, bool *); + int (*error_remove_page)(struct address_space *, struct page *); + + /* swapfile support */ + int (*swap_activate)(struct swap_info_struct *sis, struct file *file, + sector_t *span); + void (*swap_deactivate)(struct file *file); +}; + +/** + * struct address_space - Contents of a cacheable, mappable object. + * @host: Owner, either the inode or the block_device. + * @i_pages: Cached pages. + * @invalidate_lock: Guards coherency between page cache contents and + * file offset->disk block mappings in the filesystem during invalidates. + * It is also used to block modification of page cache contents through + * memory mappings. + * @gfp_mask: Memory allocation flags to use for allocating pages. + * @i_mmap_writable: Number of VM_SHARED mappings. + * @nr_thps: Number of THPs in the pagecache (non-shmem only). + * @i_mmap: Tree of private and shared mappings. + * @i_mmap_rwsem: Protects @i_mmap and @i_mmap_writable. + * @nrpages: Number of page entries, protected by the i_pages lock. + * @writeback_index: Writeback starts here. + * @a_ops: Methods. + * @flags: Error bits and flags (AS_*). + * @wb_err: The most recent error which has occurred. + * @private_lock: For use by the owner of the address_space. + * @private_list: For use by the owner of the address_space. + * @private_data: For use by the owner of the address_space. + */ +struct address_space { + struct inode *host; + struct xarray i_pages; + struct rw_semaphore invalidate_lock; + gfp_t gfp_mask; + atomic_t i_mmap_writable; +#ifdef CONFIG_READ_ONLY_THP_FOR_FS + /* number of thp, only for non-shmem files */ + atomic_t nr_thps; +#endif + struct rb_root_cached i_mmap; + struct rw_semaphore i_mmap_rwsem; + unsigned long nrpages; + pgoff_t writeback_index; + const struct address_space_operations *a_ops; + unsigned long flags; + errseq_t wb_err; + spinlock_t private_lock; + struct list_head private_list; + void *private_data; +} __attribute__((aligned(sizeof(long)))) __randomize_layout; + /* + * On most architectures that alignment is already the case; but + * must be enforced here for CRIS, to let the least significant bit + * of struct page's "mapping" pointer be used for PAGE_MAPPING_ANON. + */ + +/* XArray tags, for tagging dirty and writeback pages in the pagecache. */ +#define PAGECACHE_TAG_DIRTY XA_MARK_0 +#define PAGECACHE_TAG_WRITEBACK XA_MARK_1 +#define PAGECACHE_TAG_TOWRITE XA_MARK_2 + +#if BITS_PER_LONG==32 && defined(CONFIG_SMP) +#define __NEED_I_SIZE_ORDERED +#endif + +/* + * Keep mostly read-only and often accessed (especially for + * the RCU path lookup and 'stat' data) fields at the beginning + * of the 'struct inode' + */ +struct inode { + umode_t i_mode; + unsigned short i_opflags; + kuid_t i_uid; + kgid_t i_gid; + unsigned int i_flags; + +#ifdef CONFIG_FS_POSIX_ACL + struct posix_acl *i_acl; + struct posix_acl *i_default_acl; +#endif + + const struct inode_operations *i_op; + struct super_block *i_sb; + struct address_space *i_mapping; + +#ifdef CONFIG_SECURITY + void *i_security; +#endif + + /* Stat data, not accessed from path walking */ + unsigned long i_ino; + /* + * Filesystems may only read i_nlink directly. They shall use the + * following functions for modification: + * + * (set|clear|inc|drop)_nlink + * inode_(inc|dec)_link_count + */ + union { + const unsigned int i_nlink; + unsigned int __i_nlink; + }; + dev_t i_rdev; + loff_t i_size; + struct timespec64 i_atime; + struct timespec64 i_mtime; + struct timespec64 i_ctime; + spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */ + unsigned short i_bytes; + u8 i_blkbits; + u8 i_write_hint; + blkcnt_t i_blocks; + +#ifdef __NEED_I_SIZE_ORDERED + seqcount_t i_size_seqcount; +#endif + + /* Misc */ + unsigned long i_state; + struct rw_semaphore i_rwsem; + + unsigned long dirtied_when; /* jiffies of first dirtying */ + unsigned long dirtied_time_when; + + struct hlist_node i_hash; + struct list_head i_io_list; /* backing dev IO list */ +#ifdef CONFIG_CGROUP_WRITEBACK + struct bdi_writeback *i_wb; /* the associated cgroup wb */ + + /* foreign inode detection, see wbc_detach_inode() */ + int i_wb_frn_winner; + u16 i_wb_frn_avg_time; + u16 i_wb_frn_history; +#endif + struct list_head i_lru; /* inode LRU list */ + struct list_head i_sb_list; + struct list_head i_wb_list; /* backing dev writeback list */ + union { + struct hlist_head i_dentry; + struct rcu_head i_rcu; + }; + atomic64_t i_version; + atomic64_t i_sequence; /* see futex */ + atomic_t i_count; + atomic_t i_dio_count; + atomic_t i_writecount; +#if defined(CONFIG_IMA) || defined(CONFIG_FILE_LOCKING) + atomic_t i_readcount; /* struct files open RO */ +#endif + union { + const struct file_operations *i_fop; /* former ->i_op->default_file_ops */ + void (*free_inode)(struct inode *); + }; + struct file_lock_context *i_flctx; + struct address_space i_data; + struct list_head i_devices; + union { + struct pipe_inode_info *i_pipe; + struct cdev *i_cdev; + char *i_link; + unsigned i_dir_seq; + }; + + __u32 i_generation; + +#ifdef CONFIG_FSNOTIFY + __u32 i_fsnotify_mask; /* all events this inode cares about */ + struct fsnotify_mark_connector __rcu *i_fsnotify_marks; +#endif + +#ifdef CONFIG_FS_ENCRYPTION + struct fscrypt_info *i_crypt_info; +#endif + +#ifdef CONFIG_FS_VERITY + struct fsverity_info *i_verity_info; +#endif + + void *i_private; /* fs or device private pointer */ +} __randomize_layout; + +struct fown_struct { + rwlock_t lock; /* protects pid, uid, euid fields */ + struct pid *pid; /* pid or -pgrp where SIGIO should be sent */ + enum pid_type pid_type; /* Kind of process group SIGIO should be sent to */ + kuid_t uid, euid; /* uid/euid of process setting the owner */ + int signum; /* posix.1b rt signal to be delivered on IO */ +}; + +/** + * struct file_ra_state - Track a file's readahead state. + * @start: Where the most recent readahead started. + * @size: Number of pages read in the most recent readahead. + * @async_size: Start next readahead when this many pages are left. + * @ra_pages: Maximum size of a readahead request. + * @mmap_miss: How many mmap accesses missed in the page cache. + * @prev_pos: The last byte in the most recent read request. + */ +struct file_ra_state { + pgoff_t start; + unsigned int size; + unsigned int async_size; + unsigned int ra_pages; + unsigned int mmap_miss; + loff_t prev_pos; +}; + +struct file { + union { + struct llist_node fu_llist; + struct rcu_head fu_rcuhead; + } f_u; + struct path f_path; + struct inode *f_inode; /* cached value */ + const struct file_operations *f_op; + + /* + * Protects f_ep, f_flags. + * Must not be taken from IRQ context. + */ + spinlock_t f_lock; + enum rw_hint f_write_hint; + atomic_long_t f_count; + unsigned int f_flags; + fmode_t f_mode; + struct mutex f_pos_lock; + loff_t f_pos; + struct fown_struct f_owner; + const struct cred *f_cred; + struct file_ra_state f_ra; + + u64 f_version; +#ifdef CONFIG_SECURITY + void *f_security; +#endif + /* needed for tty driver, and maybe others */ + void *private_data; + +#ifdef CONFIG_EPOLL + /* Used by fs/eventpoll.c to link all the hooks to this file */ + struct hlist_head *f_ep; +#endif /* #ifdef CONFIG_EPOLL */ + struct address_space *f_mapping; + errseq_t f_wb_err; + errseq_t f_sb_err; /* for syncfs */ +} __randomize_layout + __attribute__((aligned(4))); /* lest something weird decides that 2 is OK */ + +struct file_handle { + __u32 handle_bytes; + int handle_type; + /* file identifier */ + unsigned char f_handle[]; +}; + +#define MAX_NON_LFS ((1UL<<31) - 1) + +/* Page cache limit. The filesystems should put that into their s_maxbytes + limits, otherwise bad things can happen in VM. */ +#if BITS_PER_LONG==32 +#define MAX_LFS_FILESIZE ((loff_t)ULONG_MAX << PAGE_SHIFT) +#elif BITS_PER_LONG==64 +#define MAX_LFS_FILESIZE ((loff_t)LLONG_MAX) +#endif + +#define FL_POSIX 1 +#define FL_FLOCK 2 +#define FL_DELEG 4 /* NFSv4 delegation */ +#define FL_ACCESS 8 /* not trying to lock, just looking */ +#define FL_EXISTS 16 /* when unlocking, test for existence */ +#define FL_LEASE 32 /* lease held on this file */ +#define FL_CLOSE 64 /* unlock on close */ +#define FL_SLEEP 128 /* A blocking lock */ +#define FL_DOWNGRADE_PENDING 256 /* Lease is being downgraded */ +#define FL_UNLOCK_PENDING 512 /* Lease is being broken */ +#define FL_OFDLCK 1024 /* lock is "owned" by struct file */ +#define FL_LAYOUT 2048 /* outstanding pNFS layout */ +#define FL_RECLAIM 4096 /* reclaiming from a reboot server */ + +#define FL_CLOSE_POSIX (FL_POSIX | FL_CLOSE) + +/* + * Special return value from posix_lock_file() and vfs_lock_file() for + * asynchronous locking. + */ +#define FILE_LOCK_DEFERRED 1 + +/* legacy typedef, should eventually be removed */ +typedef void *fl_owner_t; + +struct file_lock; + +struct file_lock_operations { + void (*fl_copy_lock)(struct file_lock *, struct file_lock *); + void (*fl_release_private)(struct file_lock *); +}; + +struct lock_manager_operations { + fl_owner_t (*lm_get_owner)(fl_owner_t); + void (*lm_put_owner)(fl_owner_t); + void (*lm_notify)(struct file_lock *); /* unblock callback */ + int (*lm_grant)(struct file_lock *, int); + bool (*lm_break)(struct file_lock *); + int (*lm_change)(struct file_lock *, int, struct list_head *); + void (*lm_setup)(struct file_lock *, void **); + bool (*lm_breaker_owns_lease)(struct file_lock *); +}; + +struct lock_manager { + struct list_head list; + /* + * NFSv4 and up also want opens blocked during the grace period; + * NLM doesn't care: + */ + bool block_opens; +}; + +/* + * struct file_lock represents a generic "file lock". It's used to represent + * POSIX byte range locks, BSD (flock) locks, and leases. It's important to + * note that the same struct is used to represent both a request for a lock and + * the lock itself, but the same object is never used for both. + * + * FIXME: should we create a separate "struct lock_request" to help distinguish + * these two uses? + * + * The varous i_flctx lists are ordered by: + * + * 1) lock owner + * 2) lock range start + * 3) lock range end + * + * Obviously, the last two criteria only matter for POSIX locks. + */ +struct file_lock { + struct file_lock *fl_blocker; /* The lock, that is blocking us */ + struct list_head fl_list; /* link into file_lock_context */ + struct hlist_node fl_link; /* node in global lists */ + struct list_head fl_blocked_requests; /* list of requests with + * ->fl_blocker pointing here + */ + struct list_head fl_blocked_member; /* node in + * ->fl_blocker->fl_blocked_requests + */ + fl_owner_t fl_owner; + unsigned int fl_flags; + unsigned char fl_type; + unsigned int fl_pid; + int fl_link_cpu; /* what cpu's list is this on? */ + wait_queue_head_t fl_wait; + struct file *fl_file; + loff_t fl_start; + loff_t fl_end; + + struct fasync_struct * fl_fasync; /* for lease break notifications */ + /* for lease breaks: */ + unsigned long fl_break_time; + unsigned long fl_downgrade_time; + + const struct file_lock_operations *fl_ops; /* Callbacks for filesystems */ + const struct lock_manager_operations *fl_lmops; /* Callbacks for lockmanagers */ + union { + struct nfs_lock_info nfs_fl; + struct nfs4_lock_info nfs4_fl; + struct { + struct list_head link; /* link in AFS vnode's pending_locks list */ + int state; /* state of grant or error if -ve */ + unsigned int debug_id; + } afs; + } fl_u; +} __randomize_layout; + +struct file_lock_context { + spinlock_t flc_lock; + struct list_head flc_flock; + struct list_head flc_posix; + struct list_head flc_lease; +}; + +struct fasync_struct { + rwlock_t fa_lock; + int magic; + int fa_fd; + struct fasync_struct *fa_next; /* singly linked list */ + struct file *fa_file; + struct rcu_head fa_rcu; +}; + +#define FASYNC_MAGIC 0x4601 + +/* + * sb->s_flags. Note that these mirror the equivalent MS_* flags where + * represented in both. + */ +#define SB_RDONLY 1 /* Mount read-only */ +#define SB_NOSUID 2 /* Ignore suid and sgid bits */ +#define SB_NODEV 4 /* Disallow access to device special files */ +#define SB_NOEXEC 8 /* Disallow program execution */ +#define SB_SYNCHRONOUS 16 /* Writes are synced at once */ +#define SB_MANDLOCK 64 /* Allow mandatory locks on an FS */ +#define SB_DIRSYNC 128 /* Directory modifications are synchronous */ +#define SB_NOATIME 1024 /* Do not update access times. */ +#define SB_NODIRATIME 2048 /* Do not update directory access times */ +#define SB_SILENT 32768 +#define SB_POSIXACL (1<<16) /* VFS does not apply the umask */ +#define SB_INLINECRYPT (1<<17) /* Use blk-crypto for encrypted files */ +#define SB_KERNMOUNT (1<<22) /* this is a kern_mount call */ +#define SB_I_VERSION (1<<23) /* Update inode I_version field */ +#define SB_LAZYTIME (1<<25) /* Update the on-disk [acm]times lazily */ + +/* These sb flags are internal to the kernel */ +#define SB_SUBMOUNT (1<<26) +#define SB_FORCE (1<<27) +#define SB_NOSEC (1<<28) +#define SB_BORN (1<<29) +#define SB_ACTIVE (1<<30) +#define SB_NOUSER (1<<31) + +/* These flags relate to encoding and casefolding */ +#define SB_ENC_STRICT_MODE_FL (1 << 0) + +#define sb_has_strict_encoding(sb) \ + (sb->s_encoding_flags & SB_ENC_STRICT_MODE_FL) + +/* + * Umount options + */ + +#define MNT_FORCE 0x00000001 /* Attempt to forcibily umount */ +#define MNT_DETACH 0x00000002 /* Just detach from the tree */ +#define MNT_EXPIRE 0x00000004 /* Mark for expiry */ +#define UMOUNT_NOFOLLOW 0x00000008 /* Don't follow symlink on umount */ +#define UMOUNT_UNUSED 0x80000000 /* Flag guaranteed to be unused */ + +/* sb->s_iflags */ +#define SB_I_CGROUPWB 0x00000001 /* cgroup-aware writeback enabled */ +#define SB_I_NOEXEC 0x00000002 /* Ignore executables on this fs */ +#define SB_I_NODEV 0x00000004 /* Ignore devices on this fs */ +#define SB_I_STABLE_WRITES 0x00000008 /* don't modify blks until WB is done */ + +/* sb->s_iflags to limit user namespace mounts */ +#define SB_I_USERNS_VISIBLE 0x00000010 /* fstype already mounted */ +#define SB_I_IMA_UNVERIFIABLE_SIGNATURE 0x00000020 +#define SB_I_UNTRUSTED_MOUNTER 0x00000040 + +#define SB_I_SKIP_SYNC 0x00000100 /* Skip superblock at global sync */ +#define SB_I_PERSB_BDI 0x00000200 /* has a per-sb bdi */ + +/* Possible states of 'frozen' field */ +enum { + SB_UNFROZEN = 0, /* FS is unfrozen */ + SB_FREEZE_WRITE = 1, /* Writes, dir ops, ioctls frozen */ + SB_FREEZE_PAGEFAULT = 2, /* Page faults stopped as well */ + SB_FREEZE_FS = 3, /* For internal FS use (e.g. to stop + * internal threads if needed) */ + SB_FREEZE_COMPLETE = 4, /* ->freeze_fs finished successfully */ +}; + +#define SB_FREEZE_LEVELS (SB_FREEZE_COMPLETE - 1) + +struct sb_writers { + int frozen; /* Is sb frozen? */ + wait_queue_head_t wait_unfrozen; /* wait for thaw */ + struct percpu_rw_semaphore rw_sem[SB_FREEZE_LEVELS]; +}; + +/* quota data structures for superblock */ +#define MAXQUOTAS 3 +#define USRQUOTA 0 /* element used for user quotas */ +#define GRPQUOTA 1 /* element used for group quotas */ +#define PRJQUOTA 2 /* element used for project quotas */ + +typedef __kernel_uid32_t qid_t; /* Type in which we store ids in memory */ +typedef long long qsize_t; /* Type in which we store sizes */ + +/* + * Data for one quotafile kept in memory + */ +struct quota_format_type; + +struct mem_dqinfo { + struct quota_format_type *dqi_format; + int dqi_fmt_id; /* Id of the dqi_format - used when turning + * quotas on after remount RW */ + struct list_head dqi_dirty_list; /* List of dirty dquots [dq_list_lock] */ + unsigned long dqi_flags; /* DFQ_ flags [dq_data_lock] */ + unsigned int dqi_bgrace; /* Space grace time [dq_data_lock] */ + unsigned int dqi_igrace; /* Inode grace time [dq_data_lock] */ + qsize_t dqi_max_spc_limit; /* Maximum space limit [static] */ + qsize_t dqi_max_ino_limit; /* Maximum inode limit [static] */ + void *dqi_priv; +}; + + +struct quota_info { + unsigned int flags; /* Flags for diskquotas on this device */ + struct rw_semaphore dqio_sem; /* Lock quota file while I/O in progress */ + struct inode *files[MAXQUOTAS]; /* inodes of quotafiles */ + struct mem_dqinfo info[MAXQUOTAS]; /* Information for each quota type */ + const struct quota_format_ops *ops[MAXQUOTAS]; /* Operations for each type */ +}; + +struct super_block { + struct list_head s_list; /* Keep this first */ + dev_t s_dev; /* search index; _not_ kdev_t */ + unsigned char s_blocksize_bits; + unsigned long s_blocksize; + loff_t s_maxbytes; /* Max file size */ + struct file_system_type *s_type; + const struct super_operations *s_op; + const struct dquot_operations *dq_op; + const struct quotactl_ops *s_qcop; + const struct export_operations *s_export_op; + unsigned long s_flags; + unsigned long s_iflags; /* internal SB_I_* flags */ + unsigned long s_magic; + struct dentry *s_root; + struct rw_semaphore s_umount; + int s_count; + atomic_t s_active; +#ifdef CONFIG_SECURITY + void *s_security; +#endif + const struct xattr_handler **s_xattr; +#ifdef CONFIG_FS_ENCRYPTION + const struct fscrypt_operations *s_cop; + struct key *s_master_keys; /* master crypto keys in use */ +#endif +#ifdef CONFIG_FS_VERITY + const struct fsverity_operations *s_vop; +#endif +#ifdef CONFIG_UNICODE + struct unicode_map *s_encoding; + __u16 s_encoding_flags; +#endif + struct hlist_bl_head s_roots; /* alternate root dentries for NFS */ + struct list_head s_mounts; /* list of mounts; _not_ for fs use */ + struct block_device *s_bdev; + struct backing_dev_info *s_bdi; + struct mtd_info *s_mtd; + struct hlist_node s_instances; + unsigned int s_quota_types; /* Bitmask of supported quota types */ + struct quota_info s_dquot; /* Diskquota specific options */ + + struct sb_writers s_writers; + + /* + * Keep s_fs_info, s_time_gran, s_fsnotify_mask, and + * s_fsnotify_marks together for cache efficiency. They are frequently + * accessed and rarely modified. + */ + void *s_fs_info; /* Filesystem private info */ + + /* Granularity of c/m/atime in ns (cannot be worse than a second) */ + u32 s_time_gran; + /* Time limits for c/m/atime in seconds */ + time64_t s_time_min; + time64_t s_time_max; +#ifdef CONFIG_FSNOTIFY + __u32 s_fsnotify_mask; + struct fsnotify_mark_connector __rcu *s_fsnotify_marks; +#endif + + char s_id[32]; /* Informational name */ + uuid_t s_uuid; /* UUID */ + + unsigned int s_max_links; + fmode_t s_mode; + + /* + * The next field is for VFS *only*. No filesystems have any business + * even looking at it. You had been warned. + */ + struct mutex s_vfs_rename_mutex; /* Kludge */ + + /* + * Filesystem subtype. If non-empty the filesystem type field + * in /proc/mounts will be "type.subtype" + */ + const char *s_subtype; + + const struct dentry_operations *s_d_op; /* default d_op for dentries */ + + /* + * Saved pool identifier for cleancache (-1 means none) + */ + int cleancache_poolid; + + struct shrinker s_shrink; /* per-sb shrinker handle */ + + /* Number of inodes with nlink == 0 but still referenced */ + atomic_long_t s_remove_count; + + /* + * Number of inode/mount/sb objects that are being watched, note that + * inodes objects are currently double-accounted. + */ + atomic_long_t s_fsnotify_connectors; + + /* Being remounted read-only */ + int s_readonly_remount; + + /* per-sb errseq_t for reporting writeback errors via syncfs */ + errseq_t s_wb_err; + + /* AIO completions deferred from interrupt context */ + struct workqueue_struct *s_dio_done_wq; + struct hlist_head s_pins; + + /* + * Owning user namespace and default context in which to + * interpret filesystem uids, gids, quotas, device nodes, + * xattrs and security labels. + */ + struct user_namespace *s_user_ns; + + /* + * The list_lru structure is essentially just a pointer to a table + * of per-node lru lists, each of which has its own spinlock. + * There is no need to put them into separate cachelines. + */ + struct list_lru s_dentry_lru; + struct list_lru s_inode_lru; + struct rcu_head rcu; + struct work_struct destroy_work; + + struct mutex s_sync_lock; /* sync serialisation lock */ + + /* + * Indicates how deep in a filesystem stack this SB is + */ + int s_stack_depth; + + /* s_inode_list_lock protects s_inodes */ + spinlock_t s_inode_list_lock ____cacheline_aligned_in_smp; + struct list_head s_inodes; /* all inodes */ + + spinlock_t s_inode_wblist_lock; + struct list_head s_inodes_wb; /* writeback inodes */ +} __randomize_layout; + +/** + * struct renamedata - contains all information required for renaming + * @old_mnt_userns: old user namespace of the mount the inode was found from + * @old_dir: parent of source + * @old_dentry: source + * @new_mnt_userns: new user namespace of the mount the inode was found from + * @new_dir: parent of destination + * @new_dentry: destination + * @delegated_inode: returns an inode needing a delegation break + * @flags: rename flags + */ +struct renamedata { + struct user_namespace *old_mnt_userns; + struct inode *old_dir; + struct dentry *old_dentry; + struct user_namespace *new_mnt_userns; + struct inode *new_dir; + struct dentry *new_dentry; + struct inode **delegated_inode; + unsigned int flags; +} __randomize_layout; + +/* + * This is the "filldir" function type, used by readdir() to let + * the kernel specify what kind of dirent layout it wants to have. + * This allows the kernel to read directories into kernel space or + * to have different dirent layouts depending on the binary type. + */ +struct dir_context; +typedef int (*filldir_t)(struct dir_context *, const char *, int, loff_t, u64, + unsigned); + +struct dir_context { + filldir_t actor; + loff_t pos; +}; + +/* + * These flags let !MMU mmap() govern direct device mapping vs immediate + * copying more easily for MAP_PRIVATE, especially for ROM filesystems. + * + * NOMMU_MAP_COPY: Copy can be mapped (MAP_PRIVATE) + * NOMMU_MAP_DIRECT: Can be mapped directly (MAP_SHARED) + * NOMMU_MAP_READ: Can be mapped for reading + * NOMMU_MAP_WRITE: Can be mapped for writing + * NOMMU_MAP_EXEC: Can be mapped for execution + */ +#define NOMMU_MAP_COPY 0x00000001 +#define NOMMU_MAP_DIRECT 0x00000008 +#define NOMMU_MAP_READ VM_MAYREAD +#define NOMMU_MAP_WRITE VM_MAYWRITE +#define NOMMU_MAP_EXEC VM_MAYEXEC + +#define NOMMU_VMFLAGS \ + (NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC) + +/* + * These flags control the behavior of the remap_file_range function pointer. + * If it is called with len == 0 that means "remap to end of source file". + * See Documentation/filesystems/vfs.rst for more details about this call. + * + * REMAP_FILE_DEDUP: only remap if contents identical (i.e. deduplicate) + * REMAP_FILE_CAN_SHORTEN: caller can handle a shortened request + */ +#define REMAP_FILE_DEDUP (1 << 0) +#define REMAP_FILE_CAN_SHORTEN (1 << 1) + +/* + * These flags signal that the caller is ok with altering various aspects of + * the behavior of the remap operation. The changes must be made by the + * implementation; the vfs remap helper functions can take advantage of them. + * Flags in this category exist to preserve the quirky behavior of the hoisted + * btrfs clone/dedupe ioctls. + */ +#define REMAP_FILE_ADVISORY (REMAP_FILE_CAN_SHORTEN) + +struct iov_iter; + +struct file_operations { + struct module *owner; + loff_t (*llseek) (struct file *, loff_t, int); + ssize_t (*read) (struct file *, char __user *, size_t, loff_t *); + ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *); + ssize_t (*read_iter) (struct kiocb *, struct iov_iter *); + ssize_t (*write_iter) (struct kiocb *, struct iov_iter *); + int (*iopoll)(struct kiocb *kiocb, struct io_comp_batch *, + unsigned int flags); + int (*iterate) (struct file *, struct dir_context *); + int (*iterate_shared) (struct file *, struct dir_context *); + __poll_t (*poll) (struct file *, struct poll_table_struct *); + long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long); + long (*compat_ioctl) (struct file *, unsigned int, unsigned long); + int (*mmap) (struct file *, struct vm_area_struct *); + unsigned long mmap_supported_flags; + int (*open) (struct inode *, struct file *); + int (*flush) (struct file *, fl_owner_t id); + int (*release) (struct inode *, struct file *); + int (*fsync) (struct file *, loff_t, loff_t, int datasync); + int (*fasync) (int, struct file *, int); + int (*lock) (struct file *, int, struct file_lock *); + ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int); + unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); + int (*check_flags)(int); + int (*flock) (struct file *, int, struct file_lock *); + ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int); + ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int); + int (*setlease)(struct file *, long, struct file_lock **, void **); + long (*fallocate)(struct file *file, int mode, loff_t offset, + loff_t len); + void (*show_fdinfo)(struct seq_file *m, struct file *f); +#ifndef CONFIG_MMU + unsigned (*mmap_capabilities)(struct file *); +#endif + ssize_t (*copy_file_range)(struct file *, loff_t, struct file *, + loff_t, size_t, unsigned int); + loff_t (*remap_file_range)(struct file *file_in, loff_t pos_in, + struct file *file_out, loff_t pos_out, + loff_t len, unsigned int remap_flags); + int (*fadvise)(struct file *, loff_t, loff_t, int); +} __randomize_layout; + +struct inode_operations { + struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int); + const char * (*get_link) (struct dentry *, struct inode *, struct delayed_call *); + int (*permission) (struct user_namespace *, struct inode *, int); + struct posix_acl * (*get_acl)(struct inode *, int, bool); + + int (*readlink) (struct dentry *, char __user *,int); + + int (*create) (struct user_namespace *, struct inode *,struct dentry *, + umode_t, bool); + int (*link) (struct dentry *,struct inode *,struct dentry *); + int (*unlink) (struct inode *,struct dentry *); + int (*symlink) (struct user_namespace *, struct inode *,struct dentry *, + const char *); + int (*mkdir) (struct user_namespace *, struct inode *,struct dentry *, + umode_t); + int (*rmdir) (struct inode *,struct dentry *); + int (*mknod) (struct user_namespace *, struct inode *,struct dentry *, + umode_t,dev_t); + int (*rename) (struct user_namespace *, struct inode *, struct dentry *, + struct inode *, struct dentry *, unsigned int); + int (*setattr) (struct user_namespace *, struct dentry *, + struct iattr *); + int (*getattr) (struct user_namespace *, const struct path *, + struct kstat *, u32, unsigned int); + ssize_t (*listxattr) (struct dentry *, char *, size_t); + int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, + u64 len); + int (*update_time)(struct inode *, struct timespec64 *, int); + int (*atomic_open)(struct inode *, struct dentry *, + struct file *, unsigned open_flag, + umode_t create_mode); + int (*tmpfile) (struct user_namespace *, struct inode *, + struct dentry *, umode_t); + int (*set_acl)(struct user_namespace *, struct inode *, + struct posix_acl *, int); + int (*fileattr_set)(struct user_namespace *mnt_userns, + struct dentry *dentry, struct fileattr *fa); + int (*fileattr_get)(struct dentry *dentry, struct fileattr *fa); +} ____cacheline_aligned; + +struct super_operations { + struct inode *(*alloc_inode)(struct super_block *sb); + void (*destroy_inode)(struct inode *); + void (*free_inode)(struct inode *); + + void (*dirty_inode) (struct inode *, int flags); + int (*write_inode) (struct inode *, struct writeback_control *wbc); + int (*drop_inode) (struct inode *); + void (*evict_inode) (struct inode *); + void (*put_super) (struct super_block *); + int (*sync_fs)(struct super_block *sb, int wait); + int (*freeze_super) (struct super_block *); + int (*freeze_fs) (struct super_block *); + int (*thaw_super) (struct super_block *); + int (*unfreeze_fs) (struct super_block *); + int (*statfs) (struct dentry *, struct kstatfs *); + int (*remount_fs) (struct super_block *, int *, char *); + void (*umount_begin) (struct super_block *); + + int (*show_options)(struct seq_file *, struct dentry *); + int (*show_devname)(struct seq_file *, struct dentry *); + int (*show_path)(struct seq_file *, struct dentry *); + int (*show_stats)(struct seq_file *, struct dentry *); +#ifdef CONFIG_QUOTA + ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t); + ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t); + struct dquot **(*get_dquots)(struct inode *); +#endif + long (*nr_cached_objects)(struct super_block *, + struct shrink_control *); + long (*free_cached_objects)(struct super_block *, + struct shrink_control *); +}; + +struct file_system_type { + const char *name; + int fs_flags; +#define FS_REQUIRES_DEV 1 +#define FS_BINARY_MOUNTDATA 2 +#define FS_HAS_SUBTYPE 4 +#define FS_USERNS_MOUNT 8 /* Can be mounted by userns root */ +#define FS_DISALLOW_NOTIFY_PERM 16 /* Disable fanotify permission events */ +#define FS_ALLOW_IDMAP 32 /* FS has been updated to handle vfs idmappings. */ +#define FS_RENAME_DOES_D_MOVE 32768 /* FS will handle d_move() during rename() internally. */ + int (*init_fs_context)(struct fs_context *); + const struct fs_parameter_spec *parameters; + struct dentry *(*mount) (struct file_system_type *, int, + const char *, void *); + void (*kill_sb) (struct super_block *); + struct module *owner; + struct file_system_type * next; + struct hlist_head fs_supers; + + struct lock_class_key s_lock_key; + struct lock_class_key s_umount_key; + struct lock_class_key s_vfs_rename_key; + struct lock_class_key s_writers_key[SB_FREEZE_LEVELS]; + + struct lock_class_key i_lock_key; + struct lock_class_key i_mutex_key; + struct lock_class_key invalidate_lock_key; + struct lock_class_key i_mutex_dir_key; +}; + +struct audit_names; +struct filename { + const char *name; /* pointer to actual string */ + const __user char *uptr; /* original userland pointer */ + int refcnt; + struct audit_names *aname; + const char iname[]; +}; + +/* + * This is the common implementation of dirent to fs + * on-disk file type conversion. Although the fs on-disk * bits are specific to every file system, in practice, many * file systems use the exact same on-disk format to describe * the lower 3 file type bits that represent the 7 POSIX file diff --git a/include/linux/quota.h b/include/linux/quota.h index 18ebd39c9487..b105e92290dd 100644 --- a/include/linux/quota.h +++ b/include/linux/quota.h @@ -62,9 +62,6 @@ enum quota_type { #define QTYPE_MASK_GRP (1 << GRPQUOTA) #define QTYPE_MASK_PRJ (1 << PRJQUOTA) -typedef __kernel_uid32_t qid_t; /* Type in which we store ids in memory */ -typedef long long qsize_t; /* Type in which we store sizes */ - struct kqid { /* Type in which we store the quota identifier */ union { kuid_t uid; @@ -214,24 +211,6 @@ struct mem_dqblk { time64_t dqb_itime; /* time limit for excessive inode use */ }; -/* - * Data for one quotafile kept in memory - */ -struct quota_format_type; - -struct mem_dqinfo { - struct quota_format_type *dqi_format; - int dqi_fmt_id; /* Id of the dqi_format - used when turning - * quotas on after remount RW */ - struct list_head dqi_dirty_list; /* List of dirty dquots [dq_list_lock] */ - unsigned long dqi_flags; /* DFQ_ flags [dq_data_lock] */ - unsigned int dqi_bgrace; /* Space grace time [dq_data_lock] */ - unsigned int dqi_igrace; /* Inode grace time [dq_data_lock] */ - qsize_t dqi_max_spc_limit; /* Maximum space limit [static] */ - qsize_t dqi_max_ino_limit; /* Maximum inode limit [static] */ - void *dqi_priv; -}; - struct super_block; /* Mask for flags passed to userspace */ @@ -516,14 +495,6 @@ static inline void quota_send_warning(struct kqid qid, dev_t dev, } #endif /* CONFIG_QUOTA_NETLINK_INTERFACE */ -struct quota_info { - unsigned int flags; /* Flags for diskquotas on this device */ - struct rw_semaphore dqio_sem; /* Lock quota file while I/O in progress */ - struct inode *files[MAXQUOTAS]; /* inodes of quotafiles */ - struct mem_dqinfo info[MAXQUOTAS]; /* Information for each quota type */ - const struct quota_format_ops *ops[MAXQUOTAS]; /* Operations for each type */ -}; - int register_quota_format(struct quota_format_type *fmt); void unregister_quota_format(struct quota_format_type *fmt);