@@ -44,7 +44,7 @@
#include "perf_event.h"
-struct x86_pmu x86_pmu __read_mostly;
+struct x86_pmu x86_pmu __asi_not_sensitive_readmostly;
static struct pmu pmu;
DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
@@ -2685,7 +2685,7 @@ static int x86_pmu_filter_match(struct perf_event *event)
return 1;
}
-static struct pmu pmu = {
+static struct pmu pmu __asi_not_sensitive = {
.pmu_enable = x86_pmu_enable,
.pmu_disable = x86_pmu_disable,
@@ -189,7 +189,7 @@ static struct event_constraint intel_slm_event_constraints[] __read_mostly =
EVENT_CONSTRAINT_END
};
-static struct event_constraint intel_skl_event_constraints[] = {
+static struct event_constraint intel_skl_event_constraints[] __asi_not_sensitive = {
FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
@@ -280,7 +280,7 @@ static int msr_event_add(struct perf_event *event, int flags)
return 0;
}
-static struct pmu pmu_msr = {
+static struct pmu pmu_msr __asi_not_sensitive = {
.task_ctx_nr = perf_sw_context,
.attr_groups = attr_groups,
.event_init = msr_event_init,
@@ -1020,7 +1020,7 @@ static struct perf_pmu_format_hybrid_attr format_attr_hybrid_##_name = {\
}
struct pmu *x86_get_pmu(unsigned int cpu);
-extern struct x86_pmu x86_pmu __read_mostly;
+extern struct x86_pmu x86_pmu __asi_not_sensitive_readmostly;
static __always_inline struct x86_perf_task_context_opt *task_context_opt(void *ctx)
{
@@ -1542,8 +1542,8 @@ struct kvm_arch_async_pf {
extern u32 __read_mostly kvm_nr_uret_msrs;
extern u64 __read_mostly host_efer;
-extern bool __read_mostly allow_smaller_maxphyaddr;
-extern bool __read_mostly enable_apicv;
+extern bool __asi_not_sensitive_readmostly allow_smaller_maxphyaddr;
+extern bool __asi_not_sensitive_readmostly enable_apicv;
extern struct kvm_x86_ops kvm_x86_ops;
#define KVM_X86_OP(func) \
@@ -31,7 +31,7 @@
#include <asm/paravirt.h>
#include <asm/asm-prototypes.h>
-int __read_mostly alternatives_patched;
+int __asi_not_sensitive alternatives_patched;
EXPORT_SYMBOL_GPL(alternatives_patched);
@@ -46,7 +46,7 @@ static void __init srbds_select_mitigation(void);
static void __init l1d_flush_select_mitigation(void);
/* The base value of the SPEC_CTRL MSR that always has to be preserved. */
-u64 x86_spec_ctrl_base;
+u64 x86_spec_ctrl_base __asi_not_sensitive;
EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
static DEFINE_MUTEX(spec_ctrl_mutex);
@@ -116,7 +116,7 @@ static struct resource bss_resource = {
struct cpuinfo_x86 new_cpu_data;
/* Common CPU data for all CPUs */
-struct cpuinfo_x86 boot_cpu_data __read_mostly;
+struct cpuinfo_x86 boot_cpu_data __asi_not_sensitive_readmostly;
EXPORT_SYMBOL(boot_cpu_data);
unsigned int def_to_bigsmp;
@@ -133,7 +133,7 @@ struct ist_info ist_info;
#endif
#else
-struct cpuinfo_x86 boot_cpu_data __read_mostly;
+struct cpuinfo_x86 boot_cpu_data __asi_not_sensitive_readmostly;
EXPORT_SYMBOL(boot_cpu_data);
#endif
@@ -257,7 +257,7 @@ static int __init nonmi_ipi_setup(char *str)
__setup("nonmi_ipi", nonmi_ipi_setup);
-struct smp_ops smp_ops = {
+struct smp_ops smp_ops __asi_not_sensitive = {
.smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
.smp_prepare_cpus = native_smp_prepare_cpus,
.smp_cpus_done = native_smp_cpus_done,
@@ -30,10 +30,10 @@
#include <asm/i8259.h>
#include <asm/uv/uv.h>
-unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */
+unsigned int __asi_not_sensitive_readmostly cpu_khz; /* TSC clocks / usec, not used here */
EXPORT_SYMBOL(cpu_khz);
-unsigned int __read_mostly tsc_khz;
+unsigned int __asi_not_sensitive_readmostly tsc_khz;
EXPORT_SYMBOL(tsc_khz);
#define KHZ 1000
@@ -41,7 +41,7 @@ EXPORT_SYMBOL(tsc_khz);
/*
* TSC can be unstable due to cpufreq or due to unsynced TSCs
*/
-static int __read_mostly tsc_unstable;
+static int __asi_not_sensitive_readmostly tsc_unstable;
static unsigned int __initdata tsc_early_khz;
static DEFINE_STATIC_KEY_FALSE(__use_tsc);
@@ -1146,7 +1146,7 @@ static struct clocksource clocksource_tsc_early = {
* this one will immediately take over. We will only register if TSC has
* been found good.
*/
-static struct clocksource clocksource_tsc = {
+static struct clocksource clocksource_tsc __asi_not_sensitive = {
.name = "tsc",
.rating = 300,
.read = read_tsc,
@@ -60,7 +60,7 @@
#define MAX_APIC_VECTOR 256
#define APIC_VECTORS_PER_REG 32
-static bool lapic_timer_advance_dynamic __read_mostly;
+static bool lapic_timer_advance_dynamic __asi_not_sensitive_readmostly;
#define LAPIC_TIMER_ADVANCE_ADJUST_MIN 100 /* clock cycles */
#define LAPIC_TIMER_ADVANCE_ADJUST_MAX 10000 /* clock cycles */
#define LAPIC_TIMER_ADVANCE_NS_INIT 1000
@@ -33,7 +33,7 @@ u64 __read_mostly shadow_mmio_mask;
u64 __read_mostly shadow_mmio_access_mask;
u64 __read_mostly shadow_present_mask;
u64 __read_mostly shadow_me_mask;
-u64 __read_mostly shadow_acc_track_mask;
+u64 __asi_not_sensitive_readmostly shadow_acc_track_mask;
u64 __read_mostly shadow_nonpresent_or_rsvd_mask;
u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask;
@@ -151,7 +151,7 @@ extern u64 __read_mostly shadow_me_mask;
* shadow_acc_track_mask is the set of bits to be cleared in non-accessed
* pages.
*/
-extern u64 __read_mostly shadow_acc_track_mask;
+extern u64 __asi_not_sensitive_readmostly shadow_acc_track_mask;
/*
* This mask must be set on all non-zero Non-Present or Reserved SPTEs in order
@@ -138,7 +138,7 @@ struct fixed_mtrr_segment {
int range_start;
};
-static struct fixed_mtrr_segment fixed_seg_table[] = {
+static struct fixed_mtrr_segment fixed_seg_table[] __asi_not_sensitive = {
/* MSR_MTRRfix64K_00000, 1 unit. 64K fixed mtrr. */
{
.start = 0x0,
@@ -6,13 +6,13 @@
#include "lapic.h"
-extern bool __read_mostly enable_vpid;
-extern bool __read_mostly flexpriority_enabled;
-extern bool __read_mostly enable_ept;
-extern bool __read_mostly enable_unrestricted_guest;
-extern bool __read_mostly enable_ept_ad_bits;
-extern bool __read_mostly enable_pml;
-extern int __read_mostly pt_mode;
+extern bool __asi_not_sensitive_readmostly enable_vpid;
+extern bool __asi_not_sensitive_readmostly flexpriority_enabled;
+extern bool __asi_not_sensitive_readmostly enable_ept;
+extern bool __asi_not_sensitive_readmostly enable_unrestricted_guest;
+extern bool __asi_not_sensitive_readmostly enable_ept_ad_bits;
+extern bool __asi_not_sensitive_readmostly enable_pml;
+extern int __asi_not_sensitive_readmostly pt_mode;
#define PT_MODE_SYSTEM 0
#define PT_MODE_HOST_GUEST 1
@@ -78,29 +78,29 @@ static const struct x86_cpu_id vmx_cpu_id[] = {
MODULE_DEVICE_TABLE(x86cpu, vmx_cpu_id);
#endif
-bool __read_mostly enable_vpid = 1;
+bool __asi_not_sensitive_readmostly enable_vpid = 1;
module_param_named(vpid, enable_vpid, bool, 0444);
-static bool __read_mostly enable_vnmi = 1;
+static bool __asi_not_sensitive_readmostly enable_vnmi = 1;
module_param_named(vnmi, enable_vnmi, bool, S_IRUGO);
-bool __read_mostly flexpriority_enabled = 1;
+bool __asi_not_sensitive_readmostly flexpriority_enabled = 1;
module_param_named(flexpriority, flexpriority_enabled, bool, S_IRUGO);
-bool __read_mostly enable_ept = 1;
+bool __asi_not_sensitive_readmostly enable_ept = 1;
module_param_named(ept, enable_ept, bool, S_IRUGO);
-bool __read_mostly enable_unrestricted_guest = 1;
+bool __asi_not_sensitive_readmostly enable_unrestricted_guest = 1;
module_param_named(unrestricted_guest,
enable_unrestricted_guest, bool, S_IRUGO);
-bool __read_mostly enable_ept_ad_bits = 1;
+bool __asi_not_sensitive_readmostly enable_ept_ad_bits = 1;
module_param_named(eptad, enable_ept_ad_bits, bool, S_IRUGO);
-static bool __read_mostly emulate_invalid_guest_state = true;
+static bool __asi_not_sensitive_readmostly emulate_invalid_guest_state = true;
module_param(emulate_invalid_guest_state, bool, S_IRUGO);
-static bool __read_mostly fasteoi = 1;
+static bool __asi_not_sensitive_readmostly fasteoi = 1;
module_param(fasteoi, bool, S_IRUGO);
module_param(enable_apicv, bool, S_IRUGO);
@@ -110,13 +110,13 @@ module_param(enable_apicv, bool, S_IRUGO);
* VMX and be a hypervisor for its own guests. If nested=0, guests may not
* use VMX instructions.
*/
-static bool __read_mostly nested = 1;
+static bool __asi_not_sensitive_readmostly nested = 1;
module_param(nested, bool, S_IRUGO);
-bool __read_mostly enable_pml = 1;
+bool __asi_not_sensitive_readmostly enable_pml = 1;
module_param_named(pml, enable_pml, bool, S_IRUGO);
-static bool __read_mostly dump_invalid_vmcs = 0;
+static bool __asi_not_sensitive_readmostly dump_invalid_vmcs = 0;
module_param(dump_invalid_vmcs, bool, 0644);
#define MSR_BITMAP_MODE_X2APIC 1
@@ -125,13 +125,13 @@ module_param(dump_invalid_vmcs, bool, 0644);
#define KVM_VMX_TSC_MULTIPLIER_MAX 0xffffffffffffffffULL
/* Guest_tsc -> host_tsc conversion requires 64-bit division. */
-static int __read_mostly cpu_preemption_timer_multi;
-static bool __read_mostly enable_preemption_timer = 1;
+static int __asi_not_sensitive_readmostly cpu_preemption_timer_multi;
+static bool __asi_not_sensitive_readmostly enable_preemption_timer = 1;
#ifdef CONFIG_X86_64
module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO);
#endif
-extern bool __read_mostly allow_smaller_maxphyaddr;
+extern bool __asi_not_sensitive_readmostly allow_smaller_maxphyaddr;
module_param(allow_smaller_maxphyaddr, bool, S_IRUGO);
#define KVM_VM_CR0_ALWAYS_OFF (X86_CR0_NW | X86_CR0_CD)
@@ -202,7 +202,7 @@ static unsigned int ple_window_max = KVM_VMX_DEFAULT_PLE_WINDOW_MAX;
module_param(ple_window_max, uint, 0444);
/* Default is SYSTEM mode, 1 for host-guest mode */
-int __read_mostly pt_mode = PT_MODE_SYSTEM;
+int __asi_not_sensitive_readmostly pt_mode = PT_MODE_SYSTEM;
module_param(pt_mode, int, S_IRUGO);
static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush);
@@ -421,7 +421,7 @@ static DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu);
static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
static DEFINE_SPINLOCK(vmx_vpid_lock);
-struct vmcs_config vmcs_config;
+struct vmcs_config vmcs_config __asi_not_sensitive;
struct vmx_capability vmx_capability;
#define VMX_SEGMENT_FIELD(seg) \
@@ -453,7 +453,7 @@ static inline void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
vmx->segment_cache.bitmask = 0;
}
-static unsigned long host_idt_base;
+static unsigned long host_idt_base __asi_not_sensitive;
#if IS_ENABLED(CONFIG_HYPERV)
static bool __read_mostly enlightened_vmcs = true;
@@ -5549,7 +5549,8 @@ static int handle_bus_lock_vmexit(struct kvm_vcpu *vcpu)
* may resume. Otherwise they set the kvm_run parameter to indicate what needs
* to be done to userspace and return 0.
*/
-static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
+static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) __asi_not_sensitive
+= {
[EXIT_REASON_EXCEPTION_NMI] = handle_exception_nmi,
[EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt,
[EXIT_REASON_TRIPLE_FAULT] = handle_triple_fault,
@@ -123,7 +123,7 @@ static int sync_regs(struct kvm_vcpu *vcpu);
static int __set_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2);
static void __get_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2);
-struct kvm_x86_ops kvm_x86_ops __read_mostly;
+struct kvm_x86_ops kvm_x86_ops __asi_not_sensitive_readmostly;
EXPORT_SYMBOL_GPL(kvm_x86_ops);
#define KVM_X86_OP(func) \
@@ -148,17 +148,17 @@ module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR);
static bool __read_mostly kvmclock_periodic_sync = true;
module_param(kvmclock_periodic_sync, bool, S_IRUGO);
-bool __read_mostly kvm_has_tsc_control;
+bool __asi_not_sensitive_readmostly kvm_has_tsc_control;
EXPORT_SYMBOL_GPL(kvm_has_tsc_control);
-u32 __read_mostly kvm_max_guest_tsc_khz;
+u32 __asi_not_sensitive_readmostly kvm_max_guest_tsc_khz;
EXPORT_SYMBOL_GPL(kvm_max_guest_tsc_khz);
-u8 __read_mostly kvm_tsc_scaling_ratio_frac_bits;
+u8 __asi_not_sensitive_readmostly kvm_tsc_scaling_ratio_frac_bits;
EXPORT_SYMBOL_GPL(kvm_tsc_scaling_ratio_frac_bits);
-u64 __read_mostly kvm_max_tsc_scaling_ratio;
+u64 __asi_not_sensitive_readmostly kvm_max_tsc_scaling_ratio;
EXPORT_SYMBOL_GPL(kvm_max_tsc_scaling_ratio);
-u64 __read_mostly kvm_default_tsc_scaling_ratio;
+u64 __asi_not_sensitive_readmostly kvm_default_tsc_scaling_ratio;
EXPORT_SYMBOL_GPL(kvm_default_tsc_scaling_ratio);
-bool __read_mostly kvm_has_bus_lock_exit;
+bool __asi_not_sensitive_readmostly kvm_has_bus_lock_exit;
EXPORT_SYMBOL_GPL(kvm_has_bus_lock_exit);
/* tsc tolerance in parts per million - default to 1/2 of the NTP threshold */
@@ -171,20 +171,20 @@ module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR);
* advancement entirely. Any other value is used as-is and disables adaptive
* tuning, i.e. allows privileged userspace to set an exact advancement time.
*/
-static int __read_mostly lapic_timer_advance_ns = -1;
+static int __asi_not_sensitive_readmostly lapic_timer_advance_ns = -1;
module_param(lapic_timer_advance_ns, int, S_IRUGO | S_IWUSR);
-static bool __read_mostly vector_hashing = true;
+static bool __asi_not_sensitive_readmostly vector_hashing = true;
module_param(vector_hashing, bool, S_IRUGO);
-bool __read_mostly enable_vmware_backdoor = false;
+bool __asi_not_sensitive_readmostly enable_vmware_backdoor = false;
module_param(enable_vmware_backdoor, bool, S_IRUGO);
EXPORT_SYMBOL_GPL(enable_vmware_backdoor);
-static bool __read_mostly force_emulation_prefix = false;
+static bool __asi_not_sensitive_readmostly force_emulation_prefix = false;
module_param(force_emulation_prefix, bool, S_IRUGO);
-int __read_mostly pi_inject_timer = -1;
+int __asi_not_sensitive_readmostly pi_inject_timer = -1;
module_param(pi_inject_timer, bint, S_IRUGO | S_IWUSR);
/*
@@ -216,13 +216,14 @@ static struct kvm_user_return_msrs __percpu *user_return_msrs;
u64 __read_mostly host_efer;
EXPORT_SYMBOL_GPL(host_efer);
-bool __read_mostly allow_smaller_maxphyaddr = 0;
+bool __asi_not_sensitive_readmostly allow_smaller_maxphyaddr = 0;
EXPORT_SYMBOL_GPL(allow_smaller_maxphyaddr);
-bool __read_mostly enable_apicv = true;
+bool __asi_not_sensitive_readmostly enable_apicv = true;
EXPORT_SYMBOL_GPL(enable_apicv);
-u64 __read_mostly host_xss;
+/* TODO(oweisse): how dangerous is this variable, from a security standpoint? */
+u64 __asi_not_sensitive_readmostly host_xss;
EXPORT_SYMBOL_GPL(host_xss);
u64 __read_mostly supported_xss;
EXPORT_SYMBOL_GPL(supported_xss);
@@ -292,7 +293,7 @@ const struct kvm_stats_header kvm_vcpu_stats_header = {
sizeof(kvm_vcpu_stats_desc),
};
-u64 __read_mostly host_xcr0;
+u64 __asi_not_sensitive_readmostly host_xcr0;
u64 __read_mostly supported_xcr0;
EXPORT_SYMBOL_GPL(supported_xcr0);
@@ -2077,7 +2078,7 @@ struct pvclock_gtod_data {
u64 wall_time_sec;
};
-static struct pvclock_gtod_data pvclock_gtod_data;
+static struct pvclock_gtod_data pvclock_gtod_data __asi_not_sensitive;
static void update_pvclock_gtod(struct timekeeper *tk)
{
@@ -17,8 +17,8 @@
#undef pr_fmt
#define pr_fmt(fmt) "ASI: " fmt
-static struct asi_class asi_class[ASI_MAX_NUM];
-static DEFINE_SPINLOCK(asi_class_lock);
+static struct asi_class asi_class[ASI_MAX_NUM] __asi_not_sensitive;
+static DEFINE_SPINLOCK(asi_class_lock __asi_not_sensitive);
DEFINE_PER_CPU_ALIGNED(struct asi_state, asi_cpu_state);
EXPORT_PER_CPU_SYMBOL_GPL(asi_cpu_state);
@@ -7,8 +7,8 @@
struct task_struct;
-extern int debug_locks __read_mostly;
-extern int debug_locks_silent __read_mostly;
+extern int debug_locks;
+extern int debug_locks_silent;
static __always_inline int __debug_locks_off(void)
@@ -76,8 +76,8 @@ extern int register_refined_jiffies(long clock_tick_rate);
* without sampling the sequence number in jiffies_lock.
* get_jiffies_64() will do this for you as appropriate.
*/
-extern u64 __cacheline_aligned_in_smp jiffies_64;
-extern unsigned long volatile __cacheline_aligned_in_smp __jiffy_arch_data jiffies;
+extern u64 jiffies_64;
+extern unsigned long volatile __jiffy_arch_data jiffies;
#if (BITS_PER_LONG < 64)
u64 get_jiffies_64(void);
@@ -117,7 +117,7 @@ extern void srcu_init_notifier_head(struct srcu_notifier_head *nh);
struct blocking_notifier_head name = \
BLOCKING_NOTIFIER_INIT(name)
#define RAW_NOTIFIER_HEAD(name) \
- struct raw_notifier_head name = \
+ struct raw_notifier_head name __asi_not_sensitive = \
RAW_NOTIFIER_INIT(name)
#ifdef CONFIG_TREE_SRCU
@@ -38,7 +38,7 @@ enum profile_type {
#ifdef CONFIG_PROFILING
-extern int prof_on __read_mostly;
+extern int prof_on;
/* init basic kernel profiler */
int profile_init(void);
@@ -84,7 +84,7 @@ static inline int rcu_preempt_depth(void)
/* Internal to kernel */
void rcu_init(void);
-extern int rcu_scheduler_active __read_mostly;
+extern int rcu_scheduler_active;
void rcu_sched_clock_irq(int user);
void rcu_report_dead(unsigned int cpu);
void rcutree_migrate_callbacks(int cpu);
@@ -308,6 +308,8 @@ static inline int rcu_read_lock_any_held(void)
#ifdef CONFIG_PROVE_RCU
+/* TODO: ASI - (oweisse) we might want to switch ".data.unlikely" to some other
+ * section that will be mapped to ASI. */
/**
* RCU_LOCKDEP_WARN - emit lockdep splat if specified condition is met
* @c: condition to check
@@ -62,7 +62,7 @@ static inline void rcu_irq_exit_check_preempt(void) { }
void exit_rcu(void);
void rcu_scheduler_starting(void);
-extern int rcu_scheduler_active __read_mostly;
+extern int rcu_scheduler_active;
void rcu_end_inkernel_boot(void);
bool rcu_inkernel_boot_has_ended(void);
bool rcu_is_watching(void);
@@ -3,6 +3,7 @@
#define _LINUX_SCHED_SYSCTL_H
#include <linux/types.h>
+#include <asm/asi.h>
struct ctl_table;
@@ -123,7 +123,7 @@ extern void radix_tree_init(void);
* operations which are not allowed with IRQ disabled are allowed while the
* flag is set.
*/
-bool early_boot_irqs_disabled __read_mostly;
+bool early_boot_irqs_disabled __asi_not_sensitive;
enum system_states system_state __read_mostly;
EXPORT_SYMBOL(system_state);
@@ -162,7 +162,8 @@ static struct static_key_true *cgroup_subsys_on_dfl_key[] = {
static DEFINE_PER_CPU(struct cgroup_rstat_cpu, cgrp_dfl_root_rstat_cpu);
/* the default hierarchy */
-struct cgroup_root cgrp_dfl_root = { .cgrp.rstat_cpu = &cgrp_dfl_root_rstat_cpu };
+struct cgroup_root cgrp_dfl_root __asi_not_sensitive =
+ { .cgrp.rstat_cpu = &cgrp_dfl_root_rstat_cpu };
EXPORT_SYMBOL_GPL(cgrp_dfl_root);
/*
@@ -755,7 +756,7 @@ EXPORT_SYMBOL_GPL(of_css);
* reference-counted, to improve performance when child cgroups
* haven't been created.
*/
-struct css_set init_css_set = {
+struct css_set init_css_set __asi_not_sensitive = {
.refcount = REFCOUNT_INIT(1),
.dom_cset = &init_css_set,
.tasks = LIST_HEAD_INIT(init_css_set.tasks),
@@ -2581,26 +2581,26 @@ const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
EXPORT_SYMBOL(cpu_all_bits);
#ifdef CONFIG_INIT_ALL_POSSIBLE
-struct cpumask __cpu_possible_mask __read_mostly
+struct cpumask __cpu_possible_mask __asi_not_sensitive_readmostly
= {CPU_BITS_ALL};
#else
-struct cpumask __cpu_possible_mask __read_mostly;
+struct cpumask __cpu_possible_mask __asi_not_sensitive_readmostly;
#endif
EXPORT_SYMBOL(__cpu_possible_mask);
-struct cpumask __cpu_online_mask __read_mostly;
+struct cpumask __cpu_online_mask __asi_not_sensitive_readmostly;
EXPORT_SYMBOL(__cpu_online_mask);
-struct cpumask __cpu_present_mask __read_mostly;
+struct cpumask __cpu_present_mask __asi_not_sensitive_readmostly;
EXPORT_SYMBOL(__cpu_present_mask);
-struct cpumask __cpu_active_mask __read_mostly;
+struct cpumask __cpu_active_mask __asi_not_sensitive_readmostly;
EXPORT_SYMBOL(__cpu_active_mask);
-struct cpumask __cpu_dying_mask __read_mostly;
+struct cpumask __cpu_dying_mask __asi_not_sensitive_readmostly;
EXPORT_SYMBOL(__cpu_dying_mask);
-atomic_t __num_online_cpus __read_mostly;
+atomic_t __num_online_cpus __asi_not_sensitive_readmostly;
EXPORT_SYMBOL(__num_online_cpus);
void init_cpu_present(const struct cpumask *src)
@@ -9651,7 +9651,7 @@ static int perf_swevent_init(struct perf_event *event)
return 0;
}
-static struct pmu perf_swevent = {
+static struct pmu perf_swevent __asi_not_sensitive = {
.task_ctx_nr = perf_sw_context,
.capabilities = PERF_PMU_CAP_NO_NMI,
@@ -9800,7 +9800,7 @@ static int perf_tp_event_init(struct perf_event *event)
return 0;
}
-static struct pmu perf_tracepoint = {
+static struct pmu perf_tracepoint __asi_not_sensitive = {
.task_ctx_nr = perf_sw_context,
.event_init = perf_tp_event_init,
@@ -13,7 +13,7 @@
#include <linux/kthread.h>
/* total number of freezing conditions in effect */
-atomic_t system_freezing_cnt = ATOMIC_INIT(0);
+atomic_t __asi_not_sensitive system_freezing_cnt = ATOMIC_INIT(0);
EXPORT_SYMBOL(system_freezing_cnt);
/* indicate whether PM freezing is in effect, protected by
@@ -64,7 +64,7 @@
#include <trace/events/lock.h>
#ifdef CONFIG_PROVE_LOCKING
-int prove_locking = 1;
+int prove_locking __asi_not_sensitive = 1;
module_param(prove_locking, int, 0644);
#else
#define prove_locking 0
@@ -186,8 +186,8 @@ unsigned long nr_zapped_classes;
#ifndef CONFIG_DEBUG_LOCKDEP
static
#endif
-struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
-static DECLARE_BITMAP(lock_classes_in_use, MAX_LOCKDEP_KEYS);
+struct lock_class lock_classes[MAX_LOCKDEP_KEYS] __asi_not_sensitive;
+static DECLARE_BITMAP(lock_classes_in_use, MAX_LOCKDEP_KEYS) __asi_not_sensitive;
static inline struct lock_class *hlock_class(struct held_lock *hlock)
{
@@ -389,7 +389,7 @@ static struct hlist_head classhash_table[CLASSHASH_SIZE];
#define __chainhashfn(chain) hash_long(chain, CHAINHASH_BITS)
#define chainhashentry(chain) (chainhash_table + __chainhashfn((chain)))
-static struct hlist_head chainhash_table[CHAINHASH_SIZE];
+static struct hlist_head chainhash_table[CHAINHASH_SIZE] __asi_not_sensitive;
/*
* the id of held_lock
@@ -599,7 +599,7 @@ u64 lockdep_stack_hash_count(void)
unsigned int nr_hardirq_chains;
unsigned int nr_softirq_chains;
unsigned int nr_process_chains;
-unsigned int max_lockdep_depth;
+unsigned int max_lockdep_depth __asi_not_sensitive;
#ifdef CONFIG_DEBUG_LOCKDEP
/*
@@ -3225,8 +3225,8 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next)
return 0;
}
-struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
-static DECLARE_BITMAP(lock_chains_in_use, MAX_LOCKDEP_CHAINS);
+struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS] __asi_not_sensitive;
+static DECLARE_BITMAP(lock_chains_in_use, MAX_LOCKDEP_CHAINS) __asi_not_sensitive;
static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];
unsigned long nr_zapped_lock_chains;
unsigned int nr_free_chain_hlocks; /* Free chain_hlocks in buckets */
@@ -56,7 +56,7 @@ int panic_on_warn __read_mostly;
unsigned long panic_on_taint;
bool panic_on_taint_nousertaint = false;
-int panic_timeout = CONFIG_PANIC_TIMEOUT;
+int panic_timeout __asi_not_sensitive = CONFIG_PANIC_TIMEOUT;
EXPORT_SYMBOL_GPL(panic_timeout);
#define PANIC_PRINT_TASK_INFO 0x00000001
@@ -75,7 +75,7 @@ EXPORT_SYMBOL(ignore_console_lock_warning);
* Low level drivers may need that to know if they can schedule in
* their unblank() callback or not. So let's export it.
*/
-int oops_in_progress;
+int oops_in_progress __asi_not_sensitive;
EXPORT_SYMBOL(oops_in_progress);
/*
@@ -2001,7 +2001,7 @@ static u8 *__printk_recursion_counter(void)
local_irq_restore(flags); \
} while (0)
-int printk_delay_msec __read_mostly;
+int printk_delay_msec __asi_not_sensitive_readmostly;
static inline void printk_delay(void)
{
@@ -44,10 +44,10 @@ static atomic_t *prof_buffer;
static unsigned long prof_len;
static unsigned short int prof_shift;
-int prof_on __read_mostly;
+int prof_on __asi_not_sensitive_readmostly;
EXPORT_SYMBOL_GPL(prof_on);
-static cpumask_var_t prof_cpu_mask;
+static cpumask_var_t prof_cpu_mask __asi_not_sensitive;
#if defined(CONFIG_SMP) && defined(CONFIG_PROC_FS)
static DEFINE_PER_CPU(struct profile_hit *[2], cpu_profile_hits);
static DEFINE_PER_CPU(int, cpu_profile_flip);
@@ -82,7 +82,7 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data) = {
.cblist.flags = SEGCBLIST_SOFTIRQ_ONLY,
#endif
};
-static struct rcu_state rcu_state = {
+static struct rcu_state rcu_state __asi_not_sensitive = {
.level = { &rcu_state.node[0] },
.gp_state = RCU_GP_IDLE,
.gp_seq = (0UL - 300UL) << RCU_SEQ_CTR_SHIFT,
@@ -98,7 +98,7 @@ static struct rcu_state rcu_state = {
static bool dump_tree;
module_param(dump_tree, bool, 0444);
/* By default, use RCU_SOFTIRQ instead of rcuc kthreads. */
-static bool use_softirq = !IS_ENABLED(CONFIG_PREEMPT_RT);
+static __asi_not_sensitive bool use_softirq = !IS_ENABLED(CONFIG_PREEMPT_RT);
#ifndef CONFIG_PREEMPT_RT
module_param(use_softirq, bool, 0444);
#endif
@@ -125,7 +125,7 @@ int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */
* transitions from RCU_SCHEDULER_INIT to RCU_SCHEDULER_RUNNING after RCU
* is fully initialized, including all of its kthreads having been spawned.
*/
-int rcu_scheduler_active __read_mostly;
+int rcu_scheduler_active __asi_not_sensitive;
EXPORT_SYMBOL_GPL(rcu_scheduler_active);
/*
@@ -140,7 +140,7 @@ EXPORT_SYMBOL_GPL(rcu_scheduler_active);
* early boot to take responsibility for these callbacks, but one step at
* a time.
*/
-static int rcu_scheduler_fully_active __read_mostly;
+static int rcu_scheduler_fully_active __asi_not_sensitive;
static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
unsigned long gps, unsigned long flags);
@@ -470,7 +470,7 @@ module_param(qovld, long, 0444);
static ulong jiffies_till_first_fqs = IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) ? 0 : ULONG_MAX;
static ulong jiffies_till_next_fqs = ULONG_MAX;
-static bool rcu_kick_kthreads;
+static bool rcu_kick_kthreads __asi_not_sensitive;
static int rcu_divisor = 7;
module_param(rcu_divisor, int, 0644);
@@ -243,7 +243,7 @@ core_initcall(rcu_set_runtime_mode);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
static struct lock_class_key rcu_lock_key;
-struct lockdep_map rcu_lock_map = {
+struct lockdep_map rcu_lock_map __asi_not_sensitive = {
.name = "rcu_read_lock",
.key = &rcu_lock_key,
.wait_type_outer = LD_WAIT_FREE,
@@ -494,7 +494,7 @@ EXPORT_SYMBOL_GPL(rcutorture_sched_setaffinity);
#ifdef CONFIG_RCU_STALL_COMMON
int rcu_cpu_stall_ftrace_dump __read_mostly;
module_param(rcu_cpu_stall_ftrace_dump, int, 0644);
-int rcu_cpu_stall_suppress __read_mostly; // !0 = suppress stall warnings.
+int rcu_cpu_stall_suppress __asi_not_sensitive_readmostly; // !0 = suppress stall warnings.
EXPORT_SYMBOL_GPL(rcu_cpu_stall_suppress);
module_param(rcu_cpu_stall_suppress, int, 0644);
int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT;
@@ -84,7 +84,7 @@ static int __sched_clock_stable_early = 1;
/*
* We want: ktime_get_ns() + __gtod_offset == sched_clock() + __sched_clock_offset
*/
-__read_mostly u64 __sched_clock_offset;
+__asi_not_sensitive u64 __sched_clock_offset;
static __read_mostly u64 __gtod_offset;
struct sched_clock_data {
@@ -76,9 +76,9 @@ __read_mostly int sysctl_resched_latency_warn_once = 1;
* Limited because this is done with IRQs disabled.
*/
#ifdef CONFIG_PREEMPT_RT
-const_debug unsigned int sysctl_sched_nr_migrate = 8;
+unsigned int sysctl_sched_nr_migrate __asi_not_sensitive_readmostly = 8;
#else
-const_debug unsigned int sysctl_sched_nr_migrate = 32;
+unsigned int sysctl_sched_nr_migrate __asi_not_sensitive_readmostly = 32;
#endif
/*
@@ -9254,7 +9254,7 @@ int in_sched_functions(unsigned long addr)
* Default task group.
* Every task in system belongs to this group at bootup.
*/
-struct task_group root_task_group;
+struct task_group root_task_group __asi_not_sensitive;
LIST_HEAD(task_groups);
/* Cacheline aligned slab cache for task_group */
@@ -50,7 +50,7 @@ static inline struct cpuacct *parent_ca(struct cpuacct *ca)
}
static DEFINE_PER_CPU(struct cpuacct_usage, root_cpuacct_cpuusage);
-static struct cpuacct root_cpuacct = {
+static struct cpuacct root_cpuacct __asi_not_sensitive = {
.cpustat = &kernel_cpustat,
.cpuusage = &root_cpuacct_cpuusage,
};
@@ -19,7 +19,7 @@
*/
DEFINE_PER_CPU(struct irqtime, cpu_irqtime);
-static int sched_clock_irqtime;
+static int __asi_not_sensitive sched_clock_irqtime;
void enable_sched_clock_irqtime(void)
{
@@ -35,7 +35,7 @@
*
* (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
*/
-unsigned int sysctl_sched_latency = 6000000ULL;
+__asi_not_sensitive unsigned int sysctl_sched_latency = 6000000ULL;
static unsigned int normalized_sysctl_sched_latency = 6000000ULL;
/*
@@ -90,7 +90,7 @@ unsigned int sysctl_sched_child_runs_first __read_mostly;
unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
static unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
-const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
+unsigned int sysctl_sched_migration_cost __asi_not_sensitive_readmostly = 500000UL;
int sched_thermal_decay_shift;
static int __init setup_sched_thermal_decay_shift(char *str)
@@ -57,7 +57,7 @@
/* Variables and functions for calc_load */
atomic_long_t calc_load_tasks;
-unsigned long calc_load_update;
+unsigned long calc_load_update __asi_not_sensitive;
unsigned long avenrun[3];
EXPORT_SYMBOL(avenrun); /* should be removed */
@@ -14,7 +14,7 @@ static const u64 max_rt_runtime = MAX_BW;
static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
-struct rt_bandwidth def_rt_bandwidth;
+struct rt_bandwidth def_rt_bandwidth __asi_not_sensitive;
static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
{
@@ -2379,8 +2379,8 @@ extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);
extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
-extern const_debug unsigned int sysctl_sched_nr_migrate;
-extern const_debug unsigned int sysctl_sched_migration_cost;
+extern unsigned int sysctl_sched_nr_migrate;
+extern unsigned int sysctl_sched_migration_cost;
#ifdef CONFIG_SCHED_DEBUG
extern unsigned int sysctl_sched_latency;
@@ -1070,7 +1070,7 @@ static int __init maxcpus(char *str)
early_param("maxcpus", maxcpus);
/* Setup number of possible processor ids */
-unsigned int nr_cpu_ids __read_mostly = NR_CPUS;
+unsigned int nr_cpu_ids __asi_not_sensitive = NR_CPUS;
EXPORT_SYMBOL(nr_cpu_ids);
/* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
@@ -56,7 +56,8 @@ DEFINE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat);
EXPORT_PER_CPU_SYMBOL(irq_stat);
#endif
-static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
+static struct softirq_action softirq_vec[NR_SOFTIRQS]
+__asi_not_sensitive ____cacheline_aligned;
DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
@@ -706,7 +706,7 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
* High resolution timer enabled ?
*/
static bool hrtimer_hres_enabled __read_mostly = true;
-unsigned int hrtimer_resolution __read_mostly = LOW_RES_NSEC;
+unsigned int hrtimer_resolution __asi_not_sensitive = LOW_RES_NSEC;
EXPORT_SYMBOL_GPL(hrtimer_resolution);
/*
@@ -40,7 +40,13 @@ static struct clocksource clocksource_jiffies = {
.max_cycles = 10,
};
-__cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(jiffies_lock);
+/* TODO(oweisse): __cacheline_aligned_in_smp is expanded to
+ __section__(".data..cacheline_aligned"))) which is at odds with
+ __asi_not_sensitive. We should consider instead using
+ __attribute__ ((__aligned__(XXX))) where XXX is a def for cacheline or
+ something*/
+/* __cacheline_aligned_in_smp */
+__asi_not_sensitive DEFINE_RAW_SPINLOCK(jiffies_lock);
__cacheline_aligned_in_smp seqcount_raw_spinlock_t jiffies_seq =
SEQCNT_RAW_SPINLOCK_ZERO(jiffies_seq, &jiffies_lock);
@@ -31,13 +31,13 @@
/* USER_HZ period (usecs): */
-unsigned long tick_usec = USER_TICK_USEC;
+unsigned long tick_usec __asi_not_sensitive = USER_TICK_USEC;
/* SHIFTED_HZ period (nsecs): */
-unsigned long tick_nsec;
+unsigned long tick_nsec __asi_not_sensitive;
-static u64 tick_length;
-static u64 tick_length_base;
+static u64 tick_length __asi_not_sensitive;
+static u64 tick_length_base __asi_not_sensitive;
#define SECS_PER_DAY 86400
#define MAX_TICKADJ 500LL /* usecs */
@@ -54,36 +54,36 @@ static u64 tick_length_base;
*
* (TIME_ERROR prevents overwriting the CMOS clock)
*/
-static int time_state = TIME_OK;
+static int time_state __asi_not_sensitive = TIME_OK;
/* clock status bits: */
-static int time_status = STA_UNSYNC;
+static int time_status __asi_not_sensitive = STA_UNSYNC;
/* time adjustment (nsecs): */
-static s64 time_offset;
+static s64 time_offset __asi_not_sensitive;
/* pll time constant: */
-static long time_constant = 2;
+static long time_constant __asi_not_sensitive = 2;
/* maximum error (usecs): */
-static long time_maxerror = NTP_PHASE_LIMIT;
+static long time_maxerror __asi_not_sensitive = NTP_PHASE_LIMIT;
/* estimated error (usecs): */
-static long time_esterror = NTP_PHASE_LIMIT;
+static long time_esterror __asi_not_sensitive = NTP_PHASE_LIMIT;
/* frequency offset (scaled nsecs/secs): */
-static s64 time_freq;
+static s64 time_freq __asi_not_sensitive;
/* time at last adjustment (secs): */
-static time64_t time_reftime;
+static time64_t time_reftime __asi_not_sensitive;
-static long time_adjust;
+static long time_adjust __asi_not_sensitive;
/* constant (boot-param configurable) NTP tick adjustment (upscaled) */
-static s64 ntp_tick_adj;
+static s64 ntp_tick_adj __asi_not_sensitive;
/* second value of the next pending leapsecond, or TIME64_MAX if no leap */
-static time64_t ntp_next_leap_sec = TIME64_MAX;
+static time64_t ntp_next_leap_sec __asi_not_sensitive = TIME64_MAX;
#ifdef CONFIG_NTP_PPS
@@ -31,7 +31,7 @@ DEFINE_PER_CPU(struct tick_device, tick_cpu_device);
* CPU which handles the tick and protected by jiffies_lock. There is
* no requirement to write hold the jiffies seqcount for it.
*/
-ktime_t tick_next_period;
+ktime_t tick_next_period __asi_not_sensitive;
/*
* tick_do_timer_cpu is a timer core internal variable which holds the CPU NR
@@ -47,7 +47,7 @@ ktime_t tick_next_period;
* at it will take over and keep the time keeping alive. The handover
* procedure also covers cpu hotplug.
*/
-int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT;
+int tick_do_timer_cpu __asi_not_sensitive_readmostly = TICK_DO_TIMER_BOOT;
#ifdef CONFIG_NO_HZ_FULL
/*
* tick_do_timer_boot_cpu indicates the boot CPU temporarily owns
@@ -15,7 +15,7 @@
DECLARE_PER_CPU(struct tick_device, tick_cpu_device);
extern ktime_t tick_next_period;
-extern int tick_do_timer_cpu __read_mostly;
+extern int tick_do_timer_cpu;
extern void tick_setup_periodic(struct clock_event_device *dev, int broadcast);
extern void tick_handle_periodic(struct clock_event_device *dev);
@@ -49,7 +49,7 @@ struct tick_sched *tick_get_tick_sched(int cpu)
* jiffies_lock and jiffies_seq. tick_nohz_next_event() needs to get a
* consistent view of jiffies and last_jiffies_update.
*/
-static ktime_t last_jiffies_update;
+static ktime_t last_jiffies_update __asi_not_sensitive;
/*
* Must be called with interrupts disabled !
@@ -39,7 +39,7 @@ enum timekeeping_adv_mode {
TK_ADV_FREQ
};
-DEFINE_RAW_SPINLOCK(timekeeper_lock);
+__asi_not_sensitive DEFINE_RAW_SPINLOCK(timekeeper_lock);
/*
* The most important data for readout fits into a single 64 byte
@@ -48,14 +48,14 @@ DEFINE_RAW_SPINLOCK(timekeeper_lock);
static struct {
seqcount_raw_spinlock_t seq;
struct timekeeper timekeeper;
-} tk_core ____cacheline_aligned = {
+} tk_core ____cacheline_aligned __asi_not_sensitive = {
.seq = SEQCNT_RAW_SPINLOCK_ZERO(tk_core.seq, &timekeeper_lock),
};
-static struct timekeeper shadow_timekeeper;
+static struct timekeeper shadow_timekeeper __asi_not_sensitive;
/* flag for if timekeeping is suspended */
-int __read_mostly timekeeping_suspended;
+int __asi_not_sensitive_readmostly timekeeping_suspended;
/**
* struct tk_fast - NMI safe timekeeper
@@ -72,7 +72,7 @@ struct tk_fast {
};
/* Suspend-time cycles value for halted fast timekeeper. */
-static u64 cycles_at_suspend;
+static u64 cycles_at_suspend __asi_not_sensitive;
static u64 dummy_clock_read(struct clocksource *cs)
{
@@ -26,7 +26,7 @@ extern void update_process_times(int user);
extern void do_timer(unsigned long ticks);
extern void update_wall_time(void);
-extern raw_spinlock_t jiffies_lock;
+extern __asi_not_sensitive raw_spinlock_t jiffies_lock;
extern seqcount_raw_spinlock_t jiffies_seq;
#define CS_NAME_LEN 32
@@ -56,7 +56,7 @@
#define CREATE_TRACE_POINTS
#include <trace/events/timer.h>
-__visible u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
+u64 jiffies_64 __asi_not_sensitive ____cacheline_aligned = INITIAL_JIFFIES;
EXPORT_SYMBOL(jiffies_64);
@@ -432,7 +432,7 @@ EXPORT_SYMBOL_GPL(unregister_ftrace_export);
* The global_trace is the descriptor that holds the top-level tracing
* buffers for the live tracing.
*/
-static struct trace_array global_trace = {
+static struct trace_array global_trace __asi_not_sensitive = {
.trace_flags = TRACE_DEFAULT_FLAGS,
};
@@ -16,8 +16,8 @@
#define RECORD_CMDLINE 1
#define RECORD_TGID 2
-static int sched_cmdline_ref;
-static int sched_tgid_ref;
+static int sched_cmdline_ref __asi_not_sensitive;
+static int sched_tgid_ref __asi_not_sensitive;
static DEFINE_MUTEX(sched_register_mutex);
static void
@@ -14,6 +14,7 @@
#include <linux/export.h>
#include <linux/spinlock.h>
#include <linux/debug_locks.h>
+#include <asm/asi.h>
/*
* We want to turn all lock-debugging facilities on/off at once,
@@ -22,7 +23,7 @@
* that would just muddy the log. So we report the first one and
* shut up after that.
*/
-int debug_locks __read_mostly = 1;
+int debug_locks __asi_not_sensitive_readmostly = 1;
EXPORT_SYMBOL_GPL(debug_locks);
/*
@@ -30,7 +31,7 @@ EXPORT_SYMBOL_GPL(debug_locks);
* 'silent failure': nothing is printed to the console when
* a locking bug is detected.
*/
-int debug_locks_silent __read_mostly;
+int debug_locks_silent __asi_not_sensitive_readmostly;
EXPORT_SYMBOL_GPL(debug_locks_silent);
/*
@@ -152,7 +152,7 @@ static int __init disable_randmaps(char *s)
}
__setup("norandmaps", disable_randmaps);
-unsigned long zero_pfn __read_mostly;
+unsigned long zero_pfn __asi_not_sensitive;
EXPORT_SYMBOL(zero_pfn);
unsigned long highest_memmap_pfn __read_mostly;
@@ -183,7 +183,7 @@ unsigned long totalreserve_pages __read_mostly;
unsigned long totalcma_pages __read_mostly;
int percpu_pagelist_high_fraction;
-gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
+gfp_t gfp_allowed_mask __asi_not_sensitive_readmostly = GFP_BOOT_MASK;
DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc);
EXPORT_SYMBOL(init_on_alloc);
@@ -24,10 +24,10 @@
* 1) mem_section - memory sections, mem_map's for valid memory
*/
#ifdef CONFIG_SPARSEMEM_EXTREME
-struct mem_section **mem_section;
+struct mem_section **mem_section __asi_not_sensitive;
#else
struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]
- ____cacheline_internodealigned_in_smp;
+ ____cacheline_internodealigned_in_smp __asi_not_sensitive;
#endif
EXPORT_SYMBOL(mem_section);
@@ -3497,7 +3497,7 @@ static int kvm_vcpu_release(struct inode *inode, struct file *filp)
return 0;
}
-static struct file_operations kvm_vcpu_fops = {
+static struct file_operations kvm_vcpu_fops __asi_not_sensitive = {
.release = kvm_vcpu_release,
.unlocked_ioctl = kvm_vcpu_ioctl,
.mmap = kvm_vcpu_mmap,