@@ -20,13 +20,26 @@ static inline int gfp_migratetype(const gfp_t gfp_flags)
VM_WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK);
if (unlikely(page_group_by_mobility_disabled))
- return MIGRATE_UNMOVABLE;
+ goto unmovable;
+
+ /* Only unmovable/unreclaimable pages can be nonsensitive right now. */
+ VM_WARN_ONCE((gfp_flags & GFP_MOVABLE_MASK) && !(gfp_flags & __GFP_SENSITIVE),
+ "%pGg", &gfp_flags);
switch (gfp_flags & GFP_MOVABLE_MASK) {
- case __GFP_RECLAIMABLE: return MIGRATE_RECLAIMABLE;
- case __GFP_MOVABLE: return MIGRATE_MOVABLE;
- default: return MIGRATE_UNMOVABLE;
+ case __GFP_RECLAIMABLE:
+ return MIGRATE_RECLAIMABLE;
+ case __GFP_MOVABLE:
+ return MIGRATE_MOVABLE;
+ default:
+ break;
}
+
+unmovable:
+ if (gfp_flags & __GFP_SENSITIVE)
+ return MIGRATE_UNMOVABLE_SENSITIVE;
+ else
+ return MIGRATE_UNMOVABLE_NONSENSITIVE;
}
static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags)
@@ -46,7 +46,19 @@
#define PAGE_ALLOC_COSTLY_ORDER 3
enum migratetype {
- MIGRATE_UNMOVABLE,
+ /*
+ * All movable pages are sensitive for ASI. Unmovable pages might be
+ * either; the migratetype reflects whether they are mapped into the
+ * global-nonsensitive address space.
+ *
+ * TODO: what about HIGHATOMIC/RECLAIMABLE?
+ */
+ MIGRATE_UNMOVABLE_SENSITIVE,
+#ifdef CONFIG_MITIGATION_ADDRESS_SPACE_ISOLATION
+ MIGRATE_UNMOVABLE_NONSENSITIVE,
+#else
+ MIGRATE_UNMOVABLE_NONSENSITIVE = MIGRATE_UNMOVABLE_SENSITIVE,
+#endif
MIGRATE_MOVABLE,
MIGRATE_RECLAIMABLE,
MIGRATE_PCPTYPES, /* the number of types on the pcp lists */
@@ -89,6 +101,11 @@ static inline bool is_migrate_movable(int mt)
return is_migrate_cma(mt) || mt == MIGRATE_MOVABLE;
}
+static inline bool is_migrate_unmovable(int mt)
+{
+ return mt == MIGRATE_UNMOVABLE_SENSITIVE || mt == MIGRATE_UNMOVABLE_NONSENSITIVE;
+}
+
/*
* Check whether a migratetype can be merged with another migratetype.
*
@@ -1127,7 +1127,7 @@ int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages,
if (mhp_off_inaccessible)
page_init_poison(pfn_to_page(pfn), sizeof(struct page) * nr_pages);
- move_pfn_range_to_zone(zone, pfn, nr_pages, NULL, MIGRATE_UNMOVABLE);
+ move_pfn_range_to_zone(zone, pfn, nr_pages, NULL, MIGRATE_UNMOVABLE_SENSITIVE);
for (i = 0; i < nr_pages; i++) {
struct page *page = pfn_to_page(pfn + i);
@@ -418,8 +418,9 @@ void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
void set_pageblock_migratetype(struct page *page, int migratetype)
{
if (unlikely(page_group_by_mobility_disabled &&
- migratetype < MIGRATE_PCPTYPES))
- migratetype = MIGRATE_UNMOVABLE;
+ migratetype < MIGRATE_PCPTYPES &&
+ migratetype != MIGRATE_UNMOVABLE_NONSENSITIVE))
+ migratetype = MIGRATE_UNMOVABLE_SENSITIVE;
set_pfnblock_flags_mask(page, (unsigned long)migratetype,
page_to_pfn(page), MIGRATETYPE_MASK);
@@ -1610,10 +1611,14 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
*
* The other migratetypes do not have fallbacks.
*/
-static int fallbacks[MIGRATE_PCPTYPES][MIGRATE_PCPTYPES - 1] = {
- [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE },
- [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE },
- [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE },
+static const int fallbacks[MIGRATE_PCPTYPES][MIGRATE_PCPTYPES - 1] = {
+ [MIGRATE_UNMOVABLE_SENSITIVE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE },
+#ifdef CONFIG_MITIGATION_ADDRESS_SPACE_ISOLATION
+ /* TODO: Cannot fallback from nonsensitive */
+ [MIGRATE_UNMOVABLE_NONSENSITIVE] = { -1 },
+#endif
+ [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE_SENSITIVE },
+ [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE_SENSITIVE, MIGRATE_MOVABLE },
};
#ifdef CONFIG_CMA
@@ -1893,7 +1898,7 @@ static bool should_try_claim_block(unsigned int order, int start_mt)
* allocation size. Later movable allocations can always steal from this
* block, which is less problematic.
*/
- if (start_mt == MIGRATE_RECLAIMABLE || start_mt == MIGRATE_UNMOVABLE)
+ if (start_mt == MIGRATE_RECLAIMABLE || is_migrate_unmovable(start_mt))
return true;
if (page_group_by_mobility_disabled)
@@ -1929,6 +1934,9 @@ int find_suitable_fallback(struct free_area *area, unsigned int order,
*claim_block = false;
for (i = 0; i < MIGRATE_PCPTYPES - 1 ; i++) {
fallback_mt = fallbacks[migratetype][i];
+ if (fallback_mt < 0)
+ return fallback_mt;
+
if (free_area_empty(area, fallback_mt))
continue;
@@ -141,15 +141,16 @@ static bool show_mem_node_skip(unsigned int flags, int nid, nodemask_t *nodemask
static void show_migration_types(unsigned char type)
{
static const char types[MIGRATE_TYPES] = {
- [MIGRATE_UNMOVABLE] = 'U',
- [MIGRATE_MOVABLE] = 'M',
- [MIGRATE_RECLAIMABLE] = 'E',
- [MIGRATE_HIGHATOMIC] = 'H',
+ [MIGRATE_UNMOVABLE_SENSITIVE] = 'S',
+ [MIGRATE_UNMOVABLE_NONSENSITIVE] = 'N',
+ [MIGRATE_MOVABLE] = 'M',
+ [MIGRATE_RECLAIMABLE] = 'E',
+ [MIGRATE_HIGHATOMIC] = 'H',
#ifdef CONFIG_CMA
- [MIGRATE_CMA] = 'C',
+ [MIGRATE_CMA] = 'C',
#endif
#ifdef CONFIG_MEMORY_ISOLATION
- [MIGRATE_ISOLATE] = 'I',
+ [MIGRATE_ISOLATE] = 'I',
#endif
};
char tmp[MIGRATE_TYPES + 1];
When ASI is compiled in, create two separate unmovable migratetypes. MIGRATE_UNMOVABLE_NONSENSITIVE represents blocks that are mapped into ASI's restricted address space. MIGRATE_UNMOVABLE becomes MIGRATE_UNMOVABLE_SENSITIVE. All other migratetypes retain their original meaning and gain the additional implication that the pageblock is not ASI-mapped. In future extensions it's likely that more migratetypes will need to support different sensitivities; if and when that happens a more invasive change will be needed but for now this should allow developing the necessary allocator logic for flipping sensitivities by modifying the ASI page tables. For builds with ASI disabled, the two new migratetypes are aliases for one another. Some code needs to be aware of this aliasing (for example, the 'fallbacks' array needs an ifdef for the entries that would otherwise alias) while others doesn't (for example, set_pageblock_migratetype() just works regardless). Since there is now a migratetype below MIGRATE_PCPTYPES with no fallbacks, the 'fallbacks' arrays are no longer all the same size, so make them be terminated by a -1 instead of having a fixed size. On non-ASI builds, the new 'if (fallback_mt < 0)' in find_suitable_fallback() is provably always false and can be eliminated by the compiler. Clang 20 seems to be smart enough to do this regardless, but add a 'const' qualifier to the arrays to try and increase confidence anyway. Signed-off-by: Brendan Jackman <jackmanb@google.com> --- include/linux/gfp.h | 21 +++++++++++++++++---- include/linux/mmzone.h | 19 ++++++++++++++++++- mm/memory_hotplug.c | 2 +- mm/page_alloc.c | 22 +++++++++++++++------- mm/show_mem.c | 13 +++++++------ 5 files changed, 58 insertions(+), 19 deletions(-)