@@ -652,7 +652,7 @@ static void __init setup_low_mem_virq(void)
static void check_low_mem_virq(void)
{
unsigned long avail_pages = total_avail_pages +
- (tmem_enabled() ? tmem_freeable_pages() : 0) - outstanding_claims;
+ tmem_freeable_pages() - outstanding_claims;
if ( unlikely(avail_pages <= low_mem_virq_th) )
{
@@ -738,7 +738,7 @@ static struct page_info *alloc_heap_pages(
* Others try tmem pools then fail. This is a workaround until all
* post-dom0-creation-multi-page allocations can be eliminated.
*/
- if ( tmem_enabled() && ((order == 0) || (order >= 9)) &&
+ if ( ((order == 0) || (order >= 9)) &&
(total_avail_pages <= midsize_alloc_zone_pages) &&
tmem_freeable_pages() )
goto try_tmem;
@@ -2837,6 +2837,9 @@ void *tmem_relinquish_pages(unsigned int order, unsigned int memflags)
unsigned long tmem_freeable_pages(void)
{
+ if ( !tmem_enabled() )
+ return 0;
+
return tmem_page_list_pages + _atomic_read(freeable_page_count);
}
Most callers of tmem_freeable_pages() checked to see if tmem was enabled before calling tmem_freeable_pages() but not all of them did. This seemed like an oversight and to avoid similar situations like that, stick the check of tmem into tmem_freeable_pages(). Signed-off-by: Doug Goldstein <cardoe@cardoe.com> --- CC: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> --- xen/common/page_alloc.c | 4 ++-- xen/common/tmem.c | 3 +++ 2 files changed, 5 insertions(+), 2 deletions(-)