@@ -61,9 +61,6 @@ CFLAGS-$(perfc_arrays) += -DPERF_ARRAYS
CFLAGS-$(lock_profile) += -DLOCK_PROFILE
CFLAGS-$(frame_pointer) += -fno-omit-frame-pointer -DCONFIG_FRAME_POINTER
-ifneq ($(max_phys_cpus),)
-CFLAGS-y += -DMAX_PHYS_CPUS=$(max_phys_cpus)
-endif
ifneq ($(max_phys_irqs),)
CFLAGS-y += -DMAX_PHYS_IRQS=$(max_phys_irqs)
endif
@@ -12,8 +12,8 @@ struct smp_enable_ops {
int (*prepare_cpu)(int);
};
-static paddr_t cpu_release_addr[NR_CPUS];
-static struct smp_enable_ops smp_enable_ops[NR_CPUS];
+static paddr_t cpu_release_addr[CONFIG_NR_CPUS];
+static struct smp_enable_ops smp_enable_ops[CONFIG_NR_CPUS];
static int __init smp_spin_table_cpu_up(int cpu)
{
@@ -5,14 +5,14 @@
#include <xen/mm.h>
#include <xen/rcupdate.h>
-unsigned long __per_cpu_offset[NR_CPUS];
+unsigned long __per_cpu_offset[CONFIG_NR_CPUS];
#define INVALID_PERCPU_AREA (-(long)__per_cpu_start)
#define PERCPU_ORDER (get_order_from_bytes(__per_cpu_data_end-__per_cpu_start))
void __init percpu_init_areas(void)
{
unsigned int cpu;
- for ( cpu = 1; cpu < NR_CPUS; cpu++ )
+ for ( cpu = 1; cpu < CONFIG_NR_CPUS; cpu++ )
__per_cpu_offset[cpu] = INVALID_PERCPU_AREA;
}
@@ -36,10 +36,10 @@ cpumask_t cpu_online_map;
cpumask_t cpu_present_map;
cpumask_t cpu_possible_map;
-struct cpuinfo_arm cpu_data[NR_CPUS];
+struct cpuinfo_arm cpu_data[CONFIG_NR_CPUS];
/* CPU logical map: map xen cpuid to an MPIDR */
-u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
+u32 __cpu_logical_map[CONFIG_NR_CPUS] = { [0 ... CONFIG_NR_CPUS-1] = MPIDR_INVALID };
/* Fake one node for now. See also include/asm-arm/numa.h */
nodemask_t __read_mostly node_online_map = { { [0] = 1UL } };
@@ -99,9 +99,9 @@ void __init smp_init_cpus(void)
struct dt_device_node *cpu;
unsigned int i, j;
unsigned int cpuidx = 1;
- static u32 tmp_map[NR_CPUS] __initdata =
+ static u32 tmp_map[CONFIG_NR_CPUS] __initdata =
{
- [0 ... NR_CPUS - 1] = MPIDR_INVALID
+ [0 ... CONFIG_NR_CPUS - 1] = MPIDR_INVALID
};
bool_t bootcpu_valid = 0;
int rc;
@@ -209,12 +209,12 @@ void __init smp_init_cpus(void)
else
i = cpuidx++;
- if ( cpuidx > NR_CPUS )
+ if ( cpuidx > CONFIG_NR_CPUS )
{
printk(XENLOG_WARNING
"DT /cpu %u node greater than max cores %u, capping them\n",
- cpuidx, NR_CPUS);
- cpuidx = NR_CPUS;
+ cpuidx, CONFIG_NR_CPUS);
+ cpuidx = CONFIG_NR_CPUS;
break;
}
@@ -122,7 +122,7 @@ acpi_parse_x2apic(struct acpi_subtable_header *header, const unsigned long end)
* We need to register disabled CPU as well to permit
* counting disabled CPUs. This allows us to size
* cpus_possible_map more accurately, to permit
- * to not preallocating memory for all NR_CPUS
+ * to not preallocating memory for all CONFIG_NR_CPUS
* when we use CPU hotplug.
*/
mp_register_lapic(processor->local_apic_id, enabled, 0);
@@ -152,7 +152,7 @@ acpi_parse_lapic(struct acpi_subtable_header * header, const unsigned long end)
* We need to register disabled CPU as well to permit
* counting disabled CPUs. This allows us to size
* cpus_possible_map more accurately, to permit
- * to not preallocating memory for all NR_CPUS
+ * to not preallocating memory for all CONFIG_NR_CPUS
* when we use CPU hotplug.
*/
mp_register_lapic(processor->id, enabled, 0);
@@ -110,7 +110,7 @@ integer_param("max_cstate", max_cstate);
static bool_t __read_mostly local_apic_timer_c2_ok;
boolean_param("lapic_timer_c2_ok", local_apic_timer_c2_ok);
-struct acpi_processor_power *__read_mostly processor_powers[NR_CPUS];
+struct acpi_processor_power *__read_mostly processor_powers[CONFIG_NR_CPUS];
struct hw_residencies
{
@@ -51,7 +51,7 @@ enum {
#define INTEL_MSR_RANGE (0xffffull)
-struct acpi_cpufreq_data *cpufreq_drv_data[NR_CPUS];
+struct acpi_cpufreq_data *cpufreq_drv_data[CONFIG_NR_CPUS];
static struct cpufreq_driver acpi_cpufreq_driver;
@@ -71,8 +71,8 @@ void __init set_nr_cpu_ids(unsigned int max_cpus)
{
if (!max_cpus)
max_cpus = num_processors + disabled_cpus;
- if (max_cpus > NR_CPUS)
- max_cpus = NR_CPUS;
+ if (max_cpus > CONFIG_NR_CPUS)
+ max_cpus = CONFIG_NR_CPUS;
else if (!max_cpus)
max_cpus = 1;
printk(XENLOG_INFO "SMP: Allowing %u CPUs (%d hotplug CPUs)\n",
@@ -82,8 +82,8 @@ void __init set_nr_cpu_ids(unsigned int max_cpus)
#ifndef nr_cpumask_bits
nr_cpumask_bits = (max_cpus + (BITS_PER_LONG - 1)) &
~(BITS_PER_LONG - 1);
- printk(XENLOG_DEBUG "NR_CPUS:%u nr_cpumask_bits:%u\n",
- NR_CPUS, nr_cpumask_bits);
+ printk(XENLOG_DEBUG "CONFIG_NR_CPUS:%u nr_cpumask_bits:%u\n",
+ CONFIG_NR_CPUS, nr_cpumask_bits);
#endif
}
@@ -151,7 +151,7 @@ static int MP_processor_info_x(struct mpc_config_processor *m,
set_apicid(apicid, &phys_cpu_present_map);
if (num_processors >= nr_cpu_ids) {
- printk(KERN_WARNING "WARNING: NR_CPUS limit of %u reached."
+ printk(KERN_WARNING "WARNING: CONFIG_NR_CPUS limit of %u reached."
" Processor ignored.\n", nr_cpu_ids);
return -ENOSPC;
}
@@ -144,7 +144,7 @@ static void __init wait_for_nmis(void *p)
int __init check_nmi_watchdog (void)
{
- static unsigned int __initdata prev_nmi_count[NR_CPUS];
+ static unsigned int __initdata prev_nmi_count[CONFIG_NR_CPUS];
int cpu;
bool_t ok = 1;
@@ -36,8 +36,8 @@ static typeof(*memnodemap) _memnodemap[64];
unsigned long memnodemapsize;
u8 *memnodemap;
-nodeid_t cpu_to_node[NR_CPUS] __read_mostly = {
- [0 ... NR_CPUS-1] = NUMA_NO_NODE
+nodeid_t cpu_to_node[CONFIG_NR_CPUS] __read_mostly = {
+ [0 ... CONFIG_NR_CPUS-1] = NUMA_NO_NODE
};
/*
* Keep BIOS's CPU2node information, should not be used for memory allocaion
@@ -32,8 +32,8 @@ struct op_counter_config counter_config[OP_MAX_COUNTER];
struct op_ibs_config ibs_config;
struct op_x86_model_spec const *__read_mostly model;
-static struct op_msrs cpu_msrs[NR_CPUS];
-static unsigned long saved_lvtpc[NR_CPUS];
+static struct op_msrs cpu_msrs[CONFIG_NR_CPUS];
+static unsigned long saved_lvtpc[CONFIG_NR_CPUS];
static char *cpu_type;
@@ -5,7 +5,7 @@
#include <xen/mm.h>
#include <xen/rcupdate.h>
-unsigned long __per_cpu_offset[NR_CPUS];
+unsigned long __per_cpu_offset[CONFIG_NR_CPUS];
/*
* Force uses of per_cpu() with an invalid area to attempt to access the
@@ -19,7 +19,7 @@ unsigned long __per_cpu_offset[NR_CPUS];
void __init percpu_init_areas(void)
{
unsigned int cpu;
- for ( cpu = 1; cpu < NR_CPUS; cpu++ )
+ for ( cpu = 1; cpu < CONFIG_NR_CPUS; cpu++ )
__per_cpu_offset[cpu] = INVALID_PERCPU_AREA;
}
@@ -63,10 +63,10 @@ unsigned int __read_mostly nr_sockets;
cpumask_t **__read_mostly socket_cpumask;
static cpumask_t *secondary_socket_cpumask;
-struct cpuinfo_x86 cpu_data[NR_CPUS];
+struct cpuinfo_x86 cpu_data[CONFIG_NR_CPUS];
-u32 x86_cpu_to_apicid[NR_CPUS] __read_mostly =
- { [0 ... NR_CPUS-1] = BAD_APICID };
+u32 x86_cpu_to_apicid[CONFIG_NR_CPUS] __read_mostly =
+ { [0 ... CONFIG_NR_CPUS-1] = BAD_APICID };
static int cpu_error;
static enum cpu_state {
@@ -79,7 +79,7 @@ static enum cpu_state {
} cpu_state;
#define set_cpu_state(state) do { mb(); cpu_state = (state); } while (0)
-void *stack_base[NR_CPUS];
+void *stack_base[CONFIG_NR_CPUS];
static void smp_store_cpu_info(int id)
{
@@ -734,7 +734,7 @@ static int cpu_smpboot_alloc(unsigned int cpu)
if ( gdt == NULL )
goto oom;
memcpy(gdt, boot_cpu_gdt_table, NR_RESERVED_GDT_PAGES * PAGE_SIZE);
- BUILD_BUG_ON(NR_CPUS > 0x10000);
+ BUILD_BUG_ON(CONFIG_NR_CPUS > 0x10000);
gdt[PER_CPU_GDT_ENTRY - FIRST_RESERVED_GDT_ENTRY].a = cpu;
per_cpu(compat_gdt_table, cpu) = gdt = alloc_xenheap_pages(order, memflags);
@@ -100,7 +100,7 @@ DEFINE_PER_CPU_READ_MOSTLY(struct desc_struct *, compat_gdt_table);
idt_entry_t idt_table[IDT_ENTRIES];
/* Pointer to the IDT of every CPU. */
-idt_entry_t *idt_tables[NR_CPUS] __read_mostly;
+idt_entry_t *idt_tables[CONFIG_NR_CPUS] __read_mostly;
void (*ioemul_handle_quirk)(
u8 opcode, char *io_emul_stub, struct cpu_user_regs *regs);
@@ -218,7 +218,7 @@ SECTIONS
}
ASSERT(__image_base__ > XEN_VIRT_START ||
- _end <= XEN_VIRT_END - NR_CPUS * PAGE_SIZE,
+ _end <= XEN_VIRT_END - CONFIG_NR_CPUS * PAGE_SIZE,
"Xen image overlaps stubs area")
#ifdef CONFIG_KEXEC
@@ -29,7 +29,7 @@ static unsigned int core_parking_power(unsigned int event);
static unsigned int core_parking_performance(unsigned int event);
static uint32_t cur_idle_nums;
-static unsigned int core_parking_cpunum[NR_CPUS] = {[0 ... NR_CPUS-1] = -1};
+static unsigned int core_parking_cpunum[CONFIG_NR_CPUS] = {[0 ... CONFIG_NR_CPUS-1] = -1};
static struct core_parking_policy {
char name[30];
@@ -118,8 +118,8 @@ static unsigned int core_parking_power(unsigned int event)
{
case CORE_PARKING_INCREMENT:
{
- int core_tmp, core_weight = NR_CPUS + 1;
- int sibling_tmp, sibling_weight = NR_CPUS + 1;
+ int core_tmp, core_weight = CONFIG_NR_CPUS + 1;
+ int sibling_tmp, sibling_weight = CONFIG_NR_CPUS + 1;
cpumask_t core_candidate_map, sibling_candidate_map;
cpumask_clear(&core_candidate_map);
cpumask_clear(&sibling_candidate_map);
@@ -6,15 +6,15 @@
#include <xen/sched.h>
#include <xen/stop_machine.h>
-unsigned int __read_mostly nr_cpu_ids = NR_CPUS;
+unsigned int __read_mostly nr_cpu_ids = CONFIG_NR_CPUS;
#ifndef nr_cpumask_bits
unsigned int __read_mostly nr_cpumask_bits
- = BITS_TO_LONGS(NR_CPUS) * BITS_PER_LONG;
+ = BITS_TO_LONGS(CONFIG_NR_CPUS) * BITS_PER_LONG;
#endif
/*
* cpu_bit_bitmap[] is a special, "compressed" data structure that
- * represents all NR_CPUS bits binary values of 1<<nr.
+ * represents all CONFIG_NR_CPUS bits binary values of 1<<nr.
*
* It is used by cpumask_of() to get a constant address to a CPU
* mask value that has a single bit set only.
@@ -26,7 +26,7 @@ unsigned int __read_mostly nr_cpumask_bits
#define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
#define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
-const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
+const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(CONFIG_NR_CPUS)] = {
MASK_DECLARE_8(0), MASK_DECLARE_8(8),
MASK_DECLARE_8(16), MASK_DECLARE_8(24),
@@ -68,7 +68,7 @@ domid_t hardware_domid __read_mostly;
integer_param("hardware_dom", hardware_domid);
#endif
-struct vcpu *idle_vcpu[NR_CPUS] __read_mostly;
+struct vcpu *idle_vcpu[CONFIG_NR_CPUS] __read_mostly;
vcpu_info_t dummy_vcpu_info;
@@ -30,7 +30,7 @@ const CHAR16 *__read_mostly efi_fw_vendor;
const EFI_RUNTIME_SERVICES *__read_mostly efi_rs;
#ifndef CONFIG_ARM /* TODO - disabled until implemented on ARM */
static DEFINE_SPINLOCK(efi_rs_lock);
-static unsigned int efi_rs_on_cpu = NR_CPUS;
+static unsigned int efi_rs_on_cpu = CONFIG_NR_CPUS;
#endif
UINTN __read_mostly efi_memmap_size;
@@ -103,7 +103,7 @@ void efi_rs_leave(unsigned long cr3)
asm volatile ( "lgdt %0" : : "m" (gdt_desc) );
}
irq_exit();
- efi_rs_on_cpu = NR_CPUS;
+ efi_rs_on_cpu = CONFIG_NR_CPUS;
spin_unlock(&efi_rs_lock);
stts();
}
@@ -60,7 +60,7 @@ struct gdb_cpu_info
atomic_t ack;
};
-static struct gdb_cpu_info gdb_cpu[NR_CPUS];
+static struct gdb_cpu_info gdb_cpu[CONFIG_NR_CPUS];
static atomic_t gdb_smp_paused_count;
static void gdb_smp_pause(void);
@@ -132,7 +132,7 @@ static int rcu_barrier_action(void *_cpu_count)
int rcu_barrier(void)
{
atomic_t cpu_count = ATOMIC_INIT(0);
- return stop_machine_run(rcu_barrier_action, &cpu_count, NR_CPUS);
+ return stop_machine_run(rcu_barrier_action, &cpu_count, CONFIG_NR_CPUS);
}
/* Is batch a before batch b ? */
@@ -223,9 +223,9 @@ struct csched2_private {
struct list_head sdom; /* Used mostly for dump keyhandler. */
- int runq_map[NR_CPUS];
+ int runq_map[CONFIG_NR_CPUS];
cpumask_t active_queues; /* Queues which may have active cpus */
- struct csched2_runqueue_data rqd[NR_CPUS];
+ struct csched2_runqueue_data rqd[CONFIG_NR_CPUS];
int load_window_shift;
};
@@ -18,7 +18,7 @@
#include <xen/softirq.h>
#ifndef __ARCH_IRQ_STAT
-irq_cpustat_t irq_stat[NR_CPUS];
+irq_cpustat_t irq_stat[CONFIG_NR_CPUS];
#endif
static softirq_handler softirq_handlers[NR_SOFTIRQS];
@@ -246,7 +246,7 @@ int _spin_trylock_recursive(spinlock_t *lock)
unsigned int cpu = smp_processor_id();
/* Don't allow overflow of recurse_cpu field. */
- BUILD_BUG_ON(NR_CPUS > 0xfffu);
+ BUILD_BUG_ON(CONFIG_NR_CPUS > 0xfffu);
check_lock(&lock->debug);
@@ -112,7 +112,7 @@ int stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu)
spin_debug_disable();
stopmachine_set_state(STOPMACHINE_INVOKE);
- if ( (cpu == smp_processor_id()) || (cpu == NR_CPUS) )
+ if ( (cpu == smp_processor_id()) || (cpu == CONFIG_NR_CPUS) )
stopmachine_data.fn_result = (*fn)(data);
stopmachine_wait_state();
ret = stopmachine_data.fn_result;
@@ -150,7 +150,7 @@ static void stopmachine_action(unsigned long cpu)
break;
case STOPMACHINE_INVOKE:
if ( (stopmachine_data.fn_cpu == smp_processor_id()) ||
- (stopmachine_data.fn_cpu == NR_CPUS) )
+ (stopmachine_data.fn_cpu == CONFIG_NR_CPUS) )
stopmachine_data.fn_result =
stopmachine_data.fn(stopmachine_data.fn_data);
break;
@@ -96,7 +96,7 @@ static struct notifier_block cpu_nfb = {
static uint32_t calc_tinfo_first_offset(void)
{
- int offset_in_bytes = offsetof(struct t_info, mfn_offset[NR_CPUS]);
+ int offset_in_bytes = offsetof(struct t_info, mfn_offset[CONFIG_NR_CPUS]);
return fit_to_type(uint32_t, offset_in_bytes);
}
@@ -169,7 +169,7 @@ static int calculate_tbuf_size(unsigned int pages, uint16_t t_info_first_offset)
static int alloc_trace_bufs(unsigned int pages)
{
int i, cpu;
- /* Start after a fixed-size array of NR_CPUS */
+ /* Start after a fixed-size array of CONFIG_NR_CPUS */
uint32_t *t_info_mfn_list;
uint16_t t_info_first_offset;
uint16_t offset;
@@ -32,7 +32,7 @@
#include <public/sysctl.h>
struct cpufreq_driver *cpufreq_driver;
-struct processor_pminfo *__read_mostly processor_pminfo[NR_CPUS];
+struct processor_pminfo *__read_mostly processor_pminfo[CONFIG_NR_CPUS];
DEFINE_PER_CPU_READ_MOSTLY(struct cpufreq_policy *, cpufreq_cpu_policy);
DEFINE_PER_CPU(spinlock_t, cpufreq_statistic_lock);
@@ -32,7 +32,7 @@ struct acpi_cpufreq_data {
unsigned int arch_cpu_flags;
};
-extern struct acpi_cpufreq_data *cpufreq_drv_data[NR_CPUS];
+extern struct acpi_cpufreq_data *cpufreq_drv_data[CONFIG_NR_CPUS];
struct cpufreq_cpuinfo {
unsigned int max_freq;
@@ -40,7 +40,7 @@ struct processor_pminfo {
struct processor_performance perf;
};
-extern struct processor_pminfo *processor_pminfo[NR_CPUS];
+extern struct processor_pminfo *processor_pminfo[CONFIG_NR_CPUS];
struct px_stat {
uint8_t total; /* total Px states */
@@ -45,12 +45,6 @@
#define OPT_CONSOLE_STR "dtuart"
-#ifdef MAX_PHYS_CPUS
-#define NR_CPUS MAX_PHYS_CPUS
-#else
-#define NR_CPUS 128
-#endif
-
#ifdef CONFIG_ARM_64
#define MAX_VIRT_CPUS 128
#else
@@ -14,7 +14,7 @@
#endif
extern char __per_cpu_start[], __per_cpu_data_end[];
-extern unsigned long __per_cpu_offset[NR_CPUS];
+extern unsigned long __per_cpu_offset[CONFIG_NR_CPUS];
void percpu_init_areas(void);
/* Separate out the type, so (int[3], foo) works. */
@@ -142,9 +142,9 @@ struct acpi_sleep_info {
#endif /* CONFIG_ACPI_SLEEP */
-#define MAX_MADT_ENTRIES MAX(256, 2 * NR_CPUS)
+#define MAX_MADT_ENTRIES MAX(256, 2 * CONFIG_NR_CPUS)
extern u32 x86_acpiid_to_apicid[];
-#define MAX_LOCAL_APIC MAX(256, 4 * NR_CPUS)
+#define MAX_LOCAL_APIC MAX(256, 4 * CONFIG_NR_CPUS)
#define INVALID_ACPIID (-1U)
@@ -60,12 +60,6 @@
#define OPT_CONSOLE_STR "vga"
-#ifdef MAX_PHYS_CPUS
-#define NR_CPUS MAX_PHYS_CPUS
-#else
-#define NR_CPUS 256
-#endif
-
/* Linkage for x86 */
#define __ALIGN .align 16,0x90
#define __ALIGN_STR ".align 16,0x90"
@@ -14,7 +14,7 @@
#define SMP_MAGIC_IDENT (('_'<<24)|('P'<<16)|('M'<<8)|'_')
#define MAX_MPC_ENTRY 1024
-#define MAX_APICS MAX(256, 4 * NR_CPUS)
+#define MAX_APICS MAX(256, 4 * CONFIG_NR_CPUS)
struct intel_mp_floating
{
@@ -9,7 +9,7 @@ typedef u8 nodeid_t;
extern int srat_rev;
-extern nodeid_t cpu_to_node[NR_CPUS];
+extern nodeid_t cpu_to_node[CONFIG_NR_CPUS];
extern cpumask_t node_to_cpumask[];
#define cpu_to_node(cpu) (cpu_to_node[cpu])
@@ -3,7 +3,7 @@
#ifndef __ASSEMBLY__
extern char __per_cpu_start[], __per_cpu_data_end[];
-extern unsigned long __per_cpu_offset[NR_CPUS];
+extern unsigned long __per_cpu_offset[CONFIG_NR_CPUS];
void percpu_init_areas(void);
#endif
@@ -37,11 +37,11 @@
* void cpumask_shift_right(dst, src, n) Shift right
* void cpumask_shift_left(dst, src, n) Shift left
*
- * int cpumask_first(mask) Number lowest set bit, or NR_CPUS
- * int cpumask_next(cpu, mask) Next cpu past 'cpu', or NR_CPUS
- * int cpumask_last(mask) Number highest set bit, or NR_CPUS
- * int cpumask_any(mask) Any cpu in mask, or NR_CPUS
- * int cpumask_cycle(cpu, mask) Next cpu cycling from 'cpu', or NR_CPUS
+ * int cpumask_first(mask) Number lowest set bit, or CONFIG_NR_CPUS
+ * int cpumask_next(cpu, mask) Next cpu past 'cpu', or CONFIG_NR_CPUS
+ * int cpumask_last(mask) Number highest set bit, or CONFIG_NR_CPUS
+ * int cpumask_any(mask) Any cpu in mask, or CONFIG_NR_CPUS
+ * int cpumask_cycle(cpu, mask) Next cpu cycling from 'cpu', or CONFIG_NR_CPUS
*
* const cpumask_t *cpumask_of(cpu) Return cpumask with bit 'cpu' set
* unsigned long *cpumask_bits(mask) Array of unsigned long's in mask
@@ -59,7 +59,7 @@
* int cpu_possible(cpu) Is some cpu possible?
* int cpu_present(cpu) Is some cpu present (can schedule)?
*
- * int any_online_cpu(mask) First online cpu in mask, or NR_CPUS
+ * int any_online_cpu(mask) First online cpu in mask, or CONFIG_NR_CPUS
*
* for_each_possible_cpu(cpu) for-loop cpu over cpu_possible_map
* for_each_online_cpu(cpu) for-loop cpu over cpu_online_map
@@ -79,16 +79,16 @@
#include <xen/kernel.h>
#include <xen/random.h>
-typedef struct cpumask{ DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
+typedef struct cpumask{ DECLARE_BITMAP(bits, CONFIG_NR_CPUS); } cpumask_t;
extern unsigned int nr_cpu_ids;
-#if NR_CPUS > 4 * BITS_PER_LONG
-/* Assuming NR_CPUS is huge, a runtime limit is more efficient. Also,
+#if CONFIG_NR_CPUS > 4 * BITS_PER_LONG
+/* Assuming CONFIG_NR_CPUS is huge, a runtime limit is more efficient. Also,
* not all bits may be allocated. */
extern unsigned int nr_cpumask_bits;
#else
-# define nr_cpumask_bits (BITS_TO_LONGS(NR_CPUS) * BITS_PER_LONG)
+# define nr_cpumask_bits (BITS_TO_LONGS(CONFIG_NR_CPUS) * BITS_PER_LONG)
#endif
/* verify cpu argument to cpumask_* operators */
@@ -292,7 +292,7 @@ static inline unsigned int cpumask_any(const cpumask_t *srcp)
* appropriately offset.
*/
extern const unsigned long
- cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)];
+ cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(CONFIG_NR_CPUS)];
static inline const cpumask_t *cpumask_of(unsigned int cpu)
{
@@ -330,7 +330,7 @@ static inline int cpulist_scnprintf(char *buf, int len,
*
* free_cpumask_var(tmpmask);
*/
-#if NR_CPUS > 2 * BITS_PER_LONG
+#if CONFIG_NR_CPUS > 2 * BITS_PER_LONG
#include <xen/xmalloc.h>
typedef cpumask_t *cpumask_var_t;
@@ -370,20 +370,20 @@ static inline void free_cpumask_var(cpumask_var_t mask)
}
#endif
-#if NR_CPUS > 1
+#if CONFIG_NR_CPUS > 1
#define for_each_cpu(cpu, mask) \
for ((cpu) = cpumask_first(mask); \
(cpu) < nr_cpu_ids; \
(cpu) = cpumask_next(cpu, mask))
-#else /* NR_CPUS == 1 */
+#else /* CONFIG_NR_CPUS == 1 */
#define for_each_cpu(cpu, mask) \
for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)(mask))
-#endif /* NR_CPUS */
+#endif /* CONFIG_NR_CPUS */
/*
* The following particular system cpumasks and operations manage
* possible, present and online cpus. Each of them is a fixed size
- * bitmap of size NR_CPUS.
+ * bitmap of size CONFIG_NR_CPUS.
*
* #ifdef CONFIG_HOTPLUG_CPU
* cpu_possible_map - has bit 'cpu' set iff cpu is populatable
@@ -395,7 +395,7 @@ static inline void free_cpumask_var(cpumask_var_t mask)
* cpu_online_map - has bit 'cpu' set iff cpu available to scheduler
* #endif
*
- * In either case, NR_CPUS is fixed at compile time, as the static
+ * In either case, CONFIG_NR_CPUS is fixed at compile time, as the static
* size of these bitmaps. The cpu_possible_map is fixed at boot
* time, as the set of CPU id's that it is possible might ever
* be plugged in at anytime during the life of that system boot.
@@ -405,7 +405,7 @@ static inline void free_cpumask_var(cpumask_var_t mask)
* for scheduling.
*
* If HOTPLUG is enabled, then cpu_possible_map is forced to have
- * all NR_CPUS bits set, otherwise it is just the set of CPUs that
+ * all CONFIG_NR_CPUS bits set, otherwise it is just the set of CPUs that
* ACPI reports present at boot.
*
* If HOTPLUG is enabled, then cpu_present_map varies dynamically,
@@ -416,7 +416,7 @@ static inline void free_cpumask_var(cpumask_var_t mask)
* hotplug, it's a copy of cpu_possible_map, hence fixed at boot.
*
* Subtleties:
- * 1) UP arch's (NR_CPUS == 1, CONFIG_SMP not defined) hardcode
+ * 1) UP arch's (CONFIG_NR_CPUS == 1, CONFIG_SMP not defined) hardcode
* assumption that their single CPU is online. The UP
* cpu_{online,possible,present}_maps are placebos. Changing them
* will have no useful affect on the following num_*_cpus()
@@ -440,7 +440,7 @@ extern cpumask_t cpu_possible_map;
extern cpumask_t cpu_online_map;
extern cpumask_t cpu_present_map;
-#if NR_CPUS > 1
+#if CONFIG_NR_CPUS > 1
#define num_online_cpus() cpumask_weight(&cpu_online_map)
#define num_possible_cpus() cpumask_weight(&cpu_possible_map)
#define num_present_cpus() cpumask_weight(&cpu_present_map)
@@ -467,7 +467,7 @@ struct domain
extern spinlock_t domlist_update_lock;
extern rcu_read_lock_t domlist_read_lock;
-extern struct vcpu *idle_vcpu[NR_CPUS];
+extern struct vcpu *idle_vcpu[CONFIG_NR_CPUS];
#define is_idle_domain(d) ((d)->domain_id == DOMID_IDLE)
#define is_idle_vcpu(v) (is_idle_domain((v)->domain))
@@ -69,6 +69,6 @@ void smp_send_call_function_mask(const cpumask_t *mask);
int alloc_cpu_id(void);
-extern void *stack_base[NR_CPUS];
+extern void *stack_base[CONFIG_NR_CPUS];
#endif /* __XEN_SMP_H__ */
@@ -5,7 +5,7 @@
* stop_machine_run: freeze the machine on all CPUs and run this function
* @fn: the function to run
* @data: the data ptr for the @fn()
- * @cpu: the cpu to run @fn() on (or all, if @cpu == NR_CPUS).
+ * @cpu: the cpu to run @fn() on (or all, if @cpu == CONFIG_NR_CPUS).
*
* Description: This causes every other cpu to enter a safe point, with
* each of which disables interrupts, and finally interrupts are disabled
This converts the usage of NR_CPUS / MAX_PHYS_CPUS to Kconfig as CONFIG_NR_CPUS. This should be mostly mechanical except for removing the old MAX_PHYS_CPUS environment variable. CC: Ian Campbell <ian.campbell@citrix.com> CC: Stefano Stabellini <stefano.stabellini@citrix.com> CC: Jan Beulich <jbeulich@suse.com> CC: Keir Fraser <keir@xen.org> CC: Andrew Cooper <andrew.cooper3@citrix.com> CC: Liu Jinsong <jinsong.liu@alibaba-inc.com> CC: George Dunlap <george.dunlap@eu.citrix.com> CC: Dario Faggioli <dario.faggioli@citrix.com> Signed-off-by: Doug Goldstein <cardoe@cardoe.com> --- xen/Rules.mk | 3 --- xen/arch/arm/arm64/smpboot.c | 4 ++-- xen/arch/arm/percpu.c | 4 ++-- xen/arch/arm/smpboot.c | 14 +++++------ xen/arch/x86/acpi/boot.c | 4 ++-- xen/arch/x86/acpi/cpu_idle.c | 2 +- xen/arch/x86/acpi/cpufreq/cpufreq.c | 2 +- xen/arch/x86/mpparse.c | 10 ++++---- xen/arch/x86/nmi.c | 2 +- xen/arch/x86/numa.c | 4 ++-- xen/arch/x86/oprofile/nmi_int.c | 4 ++-- xen/arch/x86/percpu.c | 4 ++-- xen/arch/x86/smpboot.c | 10 ++++---- xen/arch/x86/traps.c | 2 +- xen/arch/x86/xen.lds.S | 2 +- xen/common/core_parking.c | 6 ++--- xen/common/cpu.c | 8 +++---- xen/common/domain.c | 2 +- xen/common/efi/runtime.c | 4 ++-- xen/common/gdbstub.c | 2 +- xen/common/rcupdate.c | 2 +- xen/common/sched_credit2.c | 4 ++-- xen/common/softirq.c | 2 +- xen/common/spinlock.c | 2 +- xen/common/stop_machine.c | 4 ++-- xen/common/trace.c | 4 ++-- xen/drivers/cpufreq/utility.c | 2 +- xen/include/acpi/cpufreq/cpufreq.h | 2 +- xen/include/acpi/cpufreq/processor_perf.h | 2 +- xen/include/asm-arm/config.h | 6 ----- xen/include/asm-arm/percpu.h | 2 +- xen/include/asm-x86/acpi.h | 4 ++-- xen/include/asm-x86/config.h | 6 ----- xen/include/asm-x86/mpspec_def.h | 2 +- xen/include/asm-x86/numa.h | 2 +- xen/include/asm-x86/percpu.h | 2 +- xen/include/xen/cpumask.h | 40 +++++++++++++++---------------- xen/include/xen/sched.h | 2 +- xen/include/xen/smp.h | 2 +- xen/include/xen/stop_machine.h | 2 +- 40 files changed, 86 insertions(+), 101 deletions(-)