@@ -1344,7 +1344,7 @@ static void ept_dump_p2m_table(unsigned char key)
c ?: ept_entry->ipat ? '!' : ' ');
if ( !(record_counter++ % 100) )
- process_pending_softirqs();
+ process_pending_softirqs_norcu();
}
unmap_domain_page(table);
}
@@ -418,7 +418,7 @@ static void dump_numa(unsigned char key)
printk("Memory location of each domain:\n");
for_each_domain ( d )
{
- process_pending_softirqs();
+ process_pending_softirqs_norcu();
printk("Domain %u (total: %u):\n", d->domain_id, domain_tot_pages(d));
@@ -462,7 +462,7 @@ static void dump_numa(unsigned char key)
for ( j = 0; j < d->max_vcpus; j++ )
{
if ( !(j & 0x3f) )
- process_pending_softirqs();
+ process_pending_softirqs_norcu();
if ( vnuma->vcpu_to_vnode[j] == i )
{
@@ -263,7 +263,7 @@ static void dump_domains(unsigned char key)
{
unsigned int i;
- process_pending_softirqs();
+ process_pending_softirqs_norcu();
printk("General information for domain %u:\n", d->domain_id);
printk(" refcnt=%d dying=%d pause_count=%d\n",
@@ -307,7 +307,7 @@ static void dump_domains(unsigned char key)
for_each_sched_unit_vcpu ( unit, v )
{
if ( !(v->vcpu_id & 0x3f) )
- process_pending_softirqs();
+ process_pending_softirqs_norcu();
printk(" VCPU%d: CPU%d [has=%c] poll=%d "
"upcall_pend=%02x upcall_mask=%02x ",
@@ -337,7 +337,7 @@ static void dump_domains(unsigned char key)
for_each_vcpu ( d, v )
{
if ( !(v->vcpu_id & 0x3f) )
- process_pending_softirqs();
+ process_pending_softirqs_norcu();
printk("Notifying guest %d:%d (virq %d, port %d)\n",
d->domain_id, v->vcpu_id,
@@ -25,7 +25,7 @@ static softirq_handler softirq_handlers[NR_SOFTIRQS];
static DEFINE_PER_CPU(cpumask_t, batch_mask);
static DEFINE_PER_CPU(unsigned int, batching);
-static void __do_softirq(unsigned long ignore_mask)
+static void __do_softirq(unsigned long ignore_mask, bool rcu_allowed)
{
unsigned int i, cpu;
unsigned long pending;
@@ -38,7 +38,7 @@ static void __do_softirq(unsigned long ignore_mask)
*/
cpu = smp_processor_id();
- if ( rcu_pending(cpu) )
+ if ( rcu_allowed && rcu_pending(cpu) )
rcu_check_callbacks(cpu);
if ( ((pending = (softirq_pending(cpu) & ~ignore_mask)) == 0)
@@ -55,13 +55,22 @@ void process_pending_softirqs(void)
{
ASSERT(!in_irq() && local_irq_is_enabled());
/* Do not enter scheduler as it can preempt the calling context. */
- __do_softirq((1ul << SCHEDULE_SOFTIRQ) | (1ul << SCHED_SLAVE_SOFTIRQ));
+ __do_softirq((1ul << SCHEDULE_SOFTIRQ) | (1ul << SCHED_SLAVE_SOFTIRQ),
+ true);
+}
+
+void process_pending_softirqs_norcu(void)
+{
+ ASSERT(!in_irq() && local_irq_is_enabled());
+ /* Do not enter scheduler as it can preempt the calling context. */
+ __do_softirq((1ul << SCHEDULE_SOFTIRQ) | (1ul << SCHED_SLAVE_SOFTIRQ) |
+ (1ul << RCU_SOFTIRQ), false);
}
void do_softirq(void)
{
ASSERT_NOT_IN_ATOMIC();
- __do_softirq(0);
+ __do_softirq(0, true);
}
void open_softirq(int nr, softirq_handler handler)
@@ -587,7 +587,7 @@ static void amd_dump_p2m_table_level(struct page_info* pg, int level,
struct amd_iommu_pte *pde = &table_vaddr[index];
if ( !(index % 2) )
- process_pending_softirqs();
+ process_pending_softirqs_norcu();
if ( !pde->pr )
continue;
@@ -2646,7 +2646,7 @@ static void vtd_dump_p2m_table_level(paddr_t pt_maddr, int level, paddr_t gpa,
for ( i = 0; i < PTE_NUM; i++ )
{
if ( !(i % 2) )
- process_pending_softirqs();
+ process_pending_softirqs_norcu();
pte = &pt_vaddr[i];
if ( !dma_pte_present(*pte) )
@@ -321,13 +321,13 @@ void vpci_dump_msi(void)
* holding the lock.
*/
printk("unable to print all MSI-X entries: %d\n", rc);
- process_pending_softirqs();
+ process_pending_softirqs_norcu();
continue;
}
}
spin_unlock(&pdev->vpci->lock);
- process_pending_softirqs();
+ process_pending_softirqs_norcu();
}
}
rcu_read_unlock(&domlist_read_lock);
@@ -37,7 +37,9 @@ void cpu_raise_softirq_batch_finish(void);
* Process pending softirqs on this CPU. This should be called periodically
* when performing work that prevents softirqs from running in a timely manner.
* Use this instead of do_softirq() when you do not want to be preempted.
+ * The norcu variant is to be used while holding a read_rcu_lock().
*/
void process_pending_softirqs(void);
+void process_pending_softirqs_norcu(void);
#endif /* __XEN_SOFTIRQ_H__ */
Some keyhandlers are calling process_pending_softirqs() while holding a rcu_read_lock(). This is wrong, as process_pending_softirqs() might activate rcu calls which should not happen inside a rcu_read_lock(). For that purpose add process_pending_softirqs_norcu() which will not do any rcu activity and use this for keyhandlers. Signed-off-by: Juergen Gross <jgross@suse.com> --- V3: - add RCU_SOFTIRQ to ignore in process_pending_softirqs_norcu() (Roger Pau Monné) --- xen/arch/x86/mm/p2m-ept.c | 2 +- xen/arch/x86/numa.c | 4 ++-- xen/common/keyhandler.c | 6 +++--- xen/common/softirq.c | 17 +++++++++++++---- xen/drivers/passthrough/amd/pci_amd_iommu.c | 2 +- xen/drivers/passthrough/vtd/iommu.c | 2 +- xen/drivers/vpci/msi.c | 4 ++-- xen/include/xen/softirq.h | 2 ++ 8 files changed, 25 insertions(+), 14 deletions(-)