@@ -105,7 +105,7 @@ void share_xen_page_with_guest(struct page_info *page, struct domain *d,
if ( page_get_owner(page) == d )
return;
- spin_lock(&d->page_alloc_lock);
+ nrspin_lock(&d->page_alloc_lock);
/*
* The incremented type count pins as writable or read-only.
@@ -136,7 +136,7 @@ void share_xen_page_with_guest(struct page_info *page, struct domain *d,
page_list_add_tail(page, &d->xenpage_list);
}
- spin_unlock(&d->page_alloc_lock);
+ nrspin_unlock(&d->page_alloc_lock);
}
int xenmem_add_to_physmap_one(
@@ -212,7 +212,7 @@ void dump_pageframe_info(struct domain *d)
{
unsigned long total[MASK_EXTR(PGT_type_mask, PGT_type_mask) + 1] = {};
- spin_lock(&d->page_alloc_lock);
+ nrspin_lock(&d->page_alloc_lock);
page_list_for_each ( page, &d->page_list )
{
unsigned int index = MASK_EXTR(page->u.inuse.type_info,
@@ -231,13 +231,13 @@ void dump_pageframe_info(struct domain *d)
_p(mfn_x(page_to_mfn(page))),
page->count_info, page->u.inuse.type_info);
}
- spin_unlock(&d->page_alloc_lock);
+ nrspin_unlock(&d->page_alloc_lock);
}
if ( is_hvm_domain(d) )
p2m_pod_dump_data(d);
- spin_lock(&d->page_alloc_lock);
+ nrspin_lock(&d->page_alloc_lock);
page_list_for_each ( page, &d->xenpage_list )
{
@@ -253,7 +253,7 @@ void dump_pageframe_info(struct domain *d)
page->count_info, page->u.inuse.type_info);
}
- spin_unlock(&d->page_alloc_lock);
+ nrspin_unlock(&d->page_alloc_lock);
}
void update_guest_memory_policy(struct vcpu *v,
@@ -2448,10 +2448,10 @@ int domain_relinquish_resources(struct domain *d)
d->arch.auto_unmask = 0;
}
- spin_lock(&d->page_alloc_lock);
+ nrspin_lock(&d->page_alloc_lock);
page_list_splice(&d->arch.relmem_list, &d->page_list);
INIT_PAGE_LIST_HEAD(&d->arch.relmem_list);
- spin_unlock(&d->page_alloc_lock);
+ nrspin_unlock(&d->page_alloc_lock);
PROGRESS(xen):
@@ -482,7 +482,7 @@ void share_xen_page_with_guest(struct page_info *page, struct domain *d,
set_gpfn_from_mfn(mfn_x(page_to_mfn(page)), INVALID_M2P_ENTRY);
- spin_lock(&d->page_alloc_lock);
+ nrspin_lock(&d->page_alloc_lock);
/* The incremented type count pins as writable or read-only. */
page->u.inuse.type_info =
@@ -502,7 +502,7 @@ void share_xen_page_with_guest(struct page_info *page, struct domain *d,
page_list_add_tail(page, &d->xenpage_list);
}
- spin_unlock(&d->page_alloc_lock);
+ nrspin_unlock(&d->page_alloc_lock);
}
void make_cr3(struct vcpu *v, mfn_t mfn)
@@ -3597,11 +3597,11 @@ long do_mmuext_op(
{
bool drop_ref;
- spin_lock(&pg_owner->page_alloc_lock);
+ nrspin_lock(&pg_owner->page_alloc_lock);
drop_ref = (pg_owner->is_dying &&
test_and_clear_bit(_PGT_pinned,
&page->u.inuse.type_info));
- spin_unlock(&pg_owner->page_alloc_lock);
+ nrspin_unlock(&pg_owner->page_alloc_lock);
if ( drop_ref )
{
pin_drop:
@@ -4424,7 +4424,7 @@ int steal_page(
* that it might be upon return from alloc_domheap_pages with
* MEMF_no_owner set.
*/
- spin_lock(&d->page_alloc_lock);
+ nrspin_lock(&d->page_alloc_lock);
BUG_ON(page->u.inuse.type_info & (PGT_count_mask | PGT_locked |
PGT_pinned));
@@ -4436,7 +4436,7 @@ int steal_page(
if ( !(memflags & MEMF_no_refcount) && !domain_adjust_tot_pages(d, -1) )
drop_dom_ref = true;
- spin_unlock(&d->page_alloc_lock);
+ nrspin_unlock(&d->page_alloc_lock);
if ( unlikely(drop_dom_ref) )
put_domain(d);
@@ -740,11 +740,11 @@ static int page_make_private(struct domain *d, struct page_info *page)
if ( !get_page(page, dom_cow) )
return -EINVAL;
- spin_lock(&d->page_alloc_lock);
+ nrspin_lock(&d->page_alloc_lock);
if ( d->is_dying )
{
- spin_unlock(&d->page_alloc_lock);
+ nrspin_unlock(&d->page_alloc_lock);
put_page(page);
return -EBUSY;
}
@@ -752,7 +752,7 @@ static int page_make_private(struct domain *d, struct page_info *page)
expected_type = (PGT_shared_page | PGT_validated | PGT_locked | 2);
if ( page->u.inuse.type_info != expected_type )
{
- spin_unlock(&d->page_alloc_lock);
+ nrspin_unlock(&d->page_alloc_lock);
put_page(page);
return -EEXIST;
}
@@ -769,7 +769,7 @@ static int page_make_private(struct domain *d, struct page_info *page)
if ( domain_adjust_tot_pages(d, 1) == 1 )
get_knownalive_domain(d);
page_list_add_tail(page, &d->page_list);
- spin_unlock(&d->page_alloc_lock);
+ nrspin_unlock(&d->page_alloc_lock);
put_page(page);
@@ -27,7 +27,7 @@
static always_inline void lock_page_alloc(struct p2m_domain *p2m)
{
page_alloc_mm_pre_lock(p2m->domain);
- spin_lock(&(p2m->domain->page_alloc_lock));
+ nrspin_lock(&(p2m->domain->page_alloc_lock));
page_alloc_mm_post_lock(p2m->domain,
p2m->domain->arch.page_alloc_unlock_level);
}
@@ -35,7 +35,7 @@ static always_inline void lock_page_alloc(struct p2m_domain *p2m)
static inline void unlock_page_alloc(struct p2m_domain *p2m)
{
page_alloc_mm_unlock(p2m->domain->arch.page_alloc_unlock_level);
- spin_unlock(&(p2m->domain->page_alloc_lock));
+ nrspin_unlock(&(p2m->domain->page_alloc_lock));
}
/*
@@ -2228,7 +2228,7 @@ void audit_p2m(struct domain *d,
/* Audit part two: walk the domain's page allocation list, checking
* the m2p entries. */
- spin_lock(&d->page_alloc_lock);
+ nrspin_lock(&d->page_alloc_lock);
page_list_for_each ( page, &d->page_list )
{
mfn = mfn_x(page_to_mfn(page));
@@ -2280,7 +2280,7 @@ void audit_p2m(struct domain *d,
P2M_PRINTK("OK: mfn=%#lx, gfn=%#lx, p2mfn=%#lx\n",
mfn, gfn, mfn_x(p2mfn));
}
- spin_unlock(&d->page_alloc_lock);
+ nrspin_unlock(&d->page_alloc_lock);
pod_unlock(p2m);
p2m_unlock(p2m);
@@ -205,14 +205,14 @@ static void tboot_gen_domain_integrity(const uint8_t key[TB_KEY_SIZE],
continue;
printk("MACing Domain %u\n", d->domain_id);
- spin_lock(&d->page_alloc_lock);
+ nrspin_lock(&d->page_alloc_lock);
page_list_for_each(page, &d->page_list)
{
void *pg = __map_domain_page(page);
vmac_update(pg, PAGE_SIZE, &ctx);
unmap_domain_page(pg);
}
- spin_unlock(&d->page_alloc_lock);
+ nrspin_unlock(&d->page_alloc_lock);
if ( is_iommu_enabled(d) && is_vtd )
{
@@ -621,14 +621,14 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
{
uint64_t new_max = op->u.max_mem.max_memkb >> (PAGE_SHIFT - 10);
- spin_lock(&d->page_alloc_lock);
+ nrspin_lock(&d->page_alloc_lock);
/*
* NB. We removed a check that new_max >= current tot_pages; this means
* that the domain will now be allowed to "ratchet" down to new_max. In
* the meantime, while tot > max, all new allocations are disallowed.
*/
d->max_pages = min(new_max, (uint64_t)(typeof(d->max_pages))-1);
- spin_unlock(&d->page_alloc_lock);
+ nrspin_unlock(&d->page_alloc_lock);
break;
}
@@ -2376,7 +2376,7 @@ gnttab_transfer(
mfn = page_to_mfn(page);
}
- spin_lock(&e->page_alloc_lock);
+ nrspin_lock(&e->page_alloc_lock);
/*
* Check that 'e' will accept the page and has reservation
@@ -2387,7 +2387,7 @@ gnttab_transfer(
unlikely(domain_tot_pages(e) >= e->max_pages) ||
unlikely(!(e->tot_pages + 1)) )
{
- spin_unlock(&e->page_alloc_lock);
+ nrspin_unlock(&e->page_alloc_lock);
if ( e->is_dying )
gdprintk(XENLOG_INFO, "Transferee d%d is dying\n",
@@ -2411,7 +2411,7 @@ gnttab_transfer(
* safely drop the lock and re-aquire it later to add page to the
* pagelist.
*/
- spin_unlock(&e->page_alloc_lock);
+ nrspin_unlock(&e->page_alloc_lock);
okay = gnttab_prepare_for_transfer(e, d, gop.ref);
/*
@@ -2427,9 +2427,9 @@ gnttab_transfer(
* Need to grab this again to safely free our "reserved"
* page in the page total
*/
- spin_lock(&e->page_alloc_lock);
+ nrspin_lock(&e->page_alloc_lock);
drop_dom_ref = !domain_adjust_tot_pages(e, -1);
- spin_unlock(&e->page_alloc_lock);
+ nrspin_unlock(&e->page_alloc_lock);
if ( okay /* i.e. e->is_dying due to the surrounding if() */ )
gdprintk(XENLOG_INFO, "Transferee d%d is now dying\n",
@@ -770,10 +770,10 @@ static long memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg)
(1UL << in_chunk_order)) -
(j * (1UL << exch.out.extent_order)));
- spin_lock(&d->page_alloc_lock);
+ nrspin_lock(&d->page_alloc_lock);
drop_dom_ref = (dec_count &&
!domain_adjust_tot_pages(d, -dec_count));
- spin_unlock(&d->page_alloc_lock);
+ nrspin_unlock(&d->page_alloc_lock);
if ( drop_dom_ref )
put_domain(d);
@@ -719,13 +719,13 @@ static void cf_check dump_numa(unsigned char key)
memset(page_num_node, 0, sizeof(page_num_node));
- spin_lock(&d->page_alloc_lock);
+ nrspin_lock(&d->page_alloc_lock);
page_list_for_each ( page, &d->page_list )
{
i = page_to_nid(page);
page_num_node[i]++;
}
- spin_unlock(&d->page_alloc_lock);
+ nrspin_unlock(&d->page_alloc_lock);
for_each_online_node ( i )
printk(" Node %u: %u\n", i, page_num_node[i]);
@@ -514,7 +514,7 @@ int domain_set_outstanding_pages(struct domain *d, unsigned long pages)
* must always take the global heap_lock rather than only in the much
* rarer case that d->outstanding_pages is non-zero
*/
- spin_lock(&d->page_alloc_lock);
+ nrspin_lock(&d->page_alloc_lock);
spin_lock(&heap_lock);
/* pages==0 means "unset" the claim. */
@@ -560,7 +560,7 @@ int domain_set_outstanding_pages(struct domain *d, unsigned long pages)
out:
spin_unlock(&heap_lock);
- spin_unlock(&d->page_alloc_lock);
+ nrspin_unlock(&d->page_alloc_lock);
return ret;
}
@@ -2346,7 +2346,7 @@ int assign_pages(
int rc = 0;
unsigned int i;
- spin_lock(&d->page_alloc_lock);
+ nrspin_lock(&d->page_alloc_lock);
if ( unlikely(d->is_dying) )
{
@@ -2428,7 +2428,7 @@ int assign_pages(
}
out:
- spin_unlock(&d->page_alloc_lock);
+ nrspin_unlock(&d->page_alloc_lock);
return rc;
}
@@ -2907,9 +2907,9 @@ mfn_t acquire_reserved_page(struct domain *d, unsigned int memflags)
ASSERT_ALLOC_CONTEXT();
/* Acquire a page from reserved page list(resv_page_list). */
- spin_lock(&d->page_alloc_lock);
+ nrspin_lock(&d->page_alloc_lock);
page = page_list_remove_head(&d->resv_page_list);
- spin_unlock(&d->page_alloc_lock);
+ nrspin_unlock(&d->page_alloc_lock);
if ( unlikely(!page) )
return INVALID_MFN;
@@ -2928,9 +2928,9 @@ mfn_t acquire_reserved_page(struct domain *d, unsigned int memflags)
*/
unprepare_staticmem_pages(page, 1, false);
fail:
- spin_lock(&d->page_alloc_lock);
+ nrspin_lock(&d->page_alloc_lock);
page_list_add_tail(page, &d->resv_page_list);
- spin_unlock(&d->page_alloc_lock);
+ nrspin_unlock(&d->page_alloc_lock);
return INVALID_MFN;
}
#endif
@@ -368,9 +368,9 @@ long read_console_ring(struct xen_sysctl_readconsole *op)
if ( op->clear )
{
- spin_lock_irq(&console_lock);
+ nrspin_lock_irq(&console_lock);
conringc = p - c > conring_size ? p - conring_size : c;
- spin_unlock_irq(&console_lock);
+ nrspin_unlock_irq(&console_lock);
}
op->count = sofar;
@@ -638,7 +638,7 @@ static long guest_console_write(XEN_GUEST_HANDLE_PARAM(char) buffer,
if ( is_hardware_domain(cd) )
{
/* Use direct console output as it could be interactive */
- spin_lock_irq(&console_lock);
+ nrspin_lock_irq(&console_lock);
console_serial_puts(kbuf, kcount);
video_puts(kbuf, kcount);
@@ -659,7 +659,7 @@ static long guest_console_write(XEN_GUEST_HANDLE_PARAM(char) buffer,
tasklet_schedule(¬ify_dom0_con_ring_tasklet);
}
- spin_unlock_irq(&console_lock);
+ nrspin_unlock_irq(&console_lock);
}
else
{
@@ -1026,9 +1026,9 @@ void __init console_init_preirq(void)
pv_console_set_rx_handler(serial_rx);
/* HELLO WORLD --- start-of-day banner text. */
- spin_lock(&console_lock);
+ nrspin_lock(&console_lock);
__putstr(xen_banner());
- spin_unlock(&console_lock);
+ nrspin_unlock(&console_lock);
printk("Xen version %d.%d%s (%s@%s) (%s) %s %s\n",
xen_major_version(), xen_minor_version(), xen_extra_version(),
xen_compile_by(), xen_compile_domain(), xen_compiler(),
@@ -1065,13 +1065,13 @@ void __init console_init_ring(void)
}
opt_conring_size = PAGE_SIZE << order;
- spin_lock_irqsave(&console_lock, flags);
+ nrspin_lock_irqsave(&console_lock, flags);
for ( i = conringc ; i != conringp; i++ )
ring[i & (opt_conring_size - 1)] = conring[i & (conring_size - 1)];
conring = ring;
smp_wmb(); /* Allow users of console_force_unlock() to see larger buffer. */
conring_size = opt_conring_size;
- spin_unlock_irqrestore(&console_lock, flags);
+ nrspin_unlock_irqrestore(&console_lock, flags);
printk("Allocated console ring of %u KiB.\n", opt_conring_size >> 10);
}
@@ -102,6 +102,9 @@ struct lock_profile_qhead {
};
#define LOCK_PROFILE_(lockname) { .name = #lockname, .ptr.lock = &(lockname), }
+#define RLOCK_PROFILE_(lockname) { .name = #lockname, \
+ .ptr.rlock = &(lockname), \
+ .is_rlock = true, }
#define LOCK_PROFILE_PTR_(name) \
static struct lock_profile * const lock_profile__##name \
__used_section(".lockprofile.data") = \
@@ -118,10 +121,10 @@ struct lock_profile_qhead {
LOCK_PROFILE_PTR_(l)
#define DEFINE_RSPINLOCK(l) \
rspinlock_t l = SPIN_LOCK_UNLOCKED_(NULL); \
- static struct lock_profile lock_profile_data__##l = LOCK_PROFILE_(l); \
+ static struct lock_profile lock_profile_data__##l = RLOCK_PROFILE_(l); \
LOCK_PROFILE_PTR_(l)
-#define spin_lock_init_prof__(s, l, locktype) \
+#define spin_lock_init_prof__(s, l, lockptr, locktype, isr) \
do { \
struct lock_profile *prof; \
prof = xzalloc(struct lock_profile); \
@@ -134,13 +137,16 @@ struct lock_profile_qhead {
break; \
} \
prof->name = #l; \
- prof->ptr.lock = &(s)->l; \
+ prof->ptr.lockptr = &(s)->l; \
+ prof->is_rlock = isr; \
prof->next = (s)->profile_head.elem_q; \
(s)->profile_head.elem_q = prof; \
} while( 0 )
-#define spin_lock_init_prof(s, l) spin_lock_init_prof__(s, l, spinlock_t)
-#define rspin_lock_init_prof(s, l) spin_lock_init_prof__(s, l, rspinlock_t)
+#define spin_lock_init_prof(s, l) \
+ spin_lock_init_prof__(s, l, lock, spinlock_t, false)
+#define rspin_lock_init_prof(s, l) \
+ spin_lock_init_prof__(s, l, rlock, rspinlock_t, true)
void _lock_profile_register_struct(
int32_t type, struct lock_profile_qhead *qhead, int32_t idx);
@@ -274,7 +280,10 @@ static always_inline void spin_lock_if(bool condition, spinlock_t *l)
* reentered recursively on the same CPU. All critical regions that may form
* part of a recursively-nested set must be protected by these forms. If there
* are any critical regions that cannot form part of such a set, they can use
- * standard spin_[un]lock().
+ * nrspin_[un]lock().
+ * The nrspin_[un]lock() forms act the same way as normal spin_[un]lock()
+ * calls, but operate on rspinlock_t locks. nrspin_lock() and rspin_lock()
+ * calls are blocking to each other for a specific lock even on the same cpu.
*/
bool _rspin_trylock(rspinlock_t *lock);
void _rspin_lock(rspinlock_t *lock);
@@ -298,4 +307,12 @@ static always_inline void rspin_lock(rspinlock_t *lock)
#define rspin_unlock(l) _rspin_unlock(l)
#define rspin_unlock_irqrestore(l, f) _rspin_unlock_irqrestore(l, f)
+#define nrspin_trylock(l) spin_trylock(l)
+#define nrspin_lock(l) spin_lock(l)
+#define nrspin_unlock(l) spin_unlock(l)
+#define nrspin_lock_irq(l) spin_lock_irq(l)
+#define nrspin_unlock_irq(l) spin_unlock_irq(l)
+#define nrspin_lock_irqsave(l, f) spin_lock_irqsave(l, f)
+#define nrspin_unlock_irqrestore(l, f) spin_unlock_irqrestore(l, f)
+
#endif /* __SPINLOCK_H__ */