diff --git a/include/linux/gfp.h b/include/linux/gfp.h index a951de920e20..090df56a808e 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -22,8 +22,6 @@ static inline int gfp_migratetype(const gfp_t gfp_flags) BUILD_BUG_ON((1UL << GFP_MOVABLE_SHIFT) != ___GFP_MOVABLE); BUILD_BUG_ON((___GFP_MOVABLE >> GFP_MOVABLE_SHIFT) != MIGRATE_MOVABLE); BUILD_BUG_ON((___GFP_RECLAIMABLE >> GFP_MOVABLE_SHIFT) != MIGRATE_RECLAIMABLE); - BUILD_BUG_ON(((___GFP_MOVABLE | ___GFP_RECLAIMABLE) >> - GFP_MOVABLE_SHIFT) != MIGRATE_HIGHATOMIC); if (unlikely(page_group_by_mobility_disabled)) return MIGRATE_UNMOVABLE; diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 7bae701e3fc2..a9896d9b9bf2 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -51,9 +51,6 @@ enum migratetype { MIGRATE_RECLAIMABLE, /* the number of types that have fallbacks */ MIGRATE_FALLBACKS, - /* the number of types on the pcp lists */ - MIGRATE_PCPTYPES = MIGRATE_FALLBACKS, - MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES, #ifdef CONFIG_CMA /* * MIGRATE_CMA migration type is designed to mimic the way @@ -65,8 +62,13 @@ enum migratetype { * pageblocks to MIGRATE_CMA which can be done by * __free_pageblock_cma() function. */ - MIGRATE_CMA, + MIGRATE_CMA = MIGRATE_FALLBACKS, + MIGRATE_PCPTYPES, +#else + /* the number of types on the pcp lists */ + MIGRATE_PCPTYPES = MIGRATE_FALLBACKS, #endif + MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES, #ifdef CONFIG_MEMORY_ISOLATION MIGRATE_ISOLATE, /* can't allocate from here */ #endif diff --git a/mm/page_alloc.c b/mm/page_alloc.c index e283cd52323b..9f2f4a7b3b3d 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -204,6 +204,16 @@ static inline bool cma_redirect_restricted(void) return static_key_enabled(&restrict_cma_redirect); } +/* + * Return true if CMA has pcplist. We use the PCP list for CMA only if + * this returns true. For now, rather than define a new flag, reuse the + * restrict_cma_redirect flag itself to select this behavior. + */ +static inline bool cma_has_pcplist(void) +{ + return static_key_enabled(&restrict_cma_redirect); +} + #ifdef CONFIG_HAVE_MEMORYLESS_NODES /* * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly. @@ -294,10 +304,10 @@ const char * const migratetype_names[MIGRATE_TYPES] = { "Unmovable", "Movable", "Reclaimable", - "HighAtomic", #ifdef CONFIG_CMA "CMA", #endif + "HighAtomic", #ifdef CONFIG_MEMORY_ISOLATION "Isolate", #endif @@ -544,6 +554,13 @@ static inline unsigned int order_to_pindex(int migratetype, int order) { bool __maybe_unused movable; + /* + * We shouldn't get here for MIGRATE_CMA if those pages don't + * have their own pcp list. For instance, free_unref_page() sets + * pcpmigratetype to MIGRATE_MOVABLE. + */ + VM_BUG_ON(!cma_has_pcplist() && migratetype == MIGRATE_CMA); + #ifdef CONFIG_TRANSPARENT_HUGEPAGE if (order > PAGE_ALLOC_COSTLY_ORDER) { VM_BUG_ON(order != HPAGE_PMD_ORDER); @@ -2716,19 +2733,21 @@ void free_unref_page(struct page *page, unsigned int order) return; /* - * We only track unmovable, reclaimable and movable on pcp lists. + * We only track unmovable, reclaimable, movable and if restrict cma + * fallback flag is set, CMA on pcp lists. * Place ISOLATE pages on the isolated list because they are being * offlined but treat HIGHATOMIC and CMA as movable pages so we can * get those areas back if necessary. Otherwise, we may have to free * excessively into the page allocator */ migratetype = get_pfnblock_migratetype(page, pfn); - if (unlikely(migratetype >= MIGRATE_PCPTYPES)) { + if (unlikely(migratetype > MIGRATE_RECLAIMABLE)) { if (unlikely(is_migrate_isolate(migratetype))) { free_one_page(page_zone(page), page, pfn, order, FPI_NONE); return; } - migratetype = MIGRATE_MOVABLE; + if (!cma_has_pcplist() || migratetype != MIGRATE_CMA) + migratetype = MIGRATE_MOVABLE; } zone = page_zone(page); @@ -2831,8 +2850,10 @@ void free_unref_folios(struct folio_batch *folios) * Non-isolated types over MIGRATE_PCPTYPES get added * to the MIGRATE_MOVABLE pcp list. */ - if (unlikely(migratetype >= MIGRATE_PCPTYPES)) - migratetype = MIGRATE_MOVABLE; + if (unlikely(migratetype > MIGRATE_RECLAIMABLE)) { + if (!cma_has_pcplist() || migratetype != MIGRATE_CMA) + migratetype = MIGRATE_MOVABLE; + } trace_mm_page_free_batched(&folio->page); free_unref_page_commit(zone, pcp, &folio->page, migratetype, @@ -3094,8 +3115,12 @@ struct page *__rmqueue_pcplist(struct zone *zone, unsigned int order, if (cma_redirect_restricted() && alloc_flags & ALLOC_CMA) { struct page *page; + int cma_migratetype; - page = ___rmqueue_pcplist(zone, order, get_cma_migrate_type(), + /* Use CMA pcp list */ + cma_migratetype = get_cma_migrate_type(); + list = &pcp->lists[order_to_pindex(cma_migratetype, order)]; + page = ___rmqueue_pcplist(zone, order, cma_migratetype, alloc_flags, pcp, list); if (page) return page; @@ -3269,6 +3294,14 @@ bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, continue; for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) { +#ifdef CONFIG_CMA + /* + * Note that this check is needed only + * when MIGRATE_CMA < MIGRATE_PCPTYPES. + */ + if (mt == MIGRATE_CMA) + continue; +#endif if (!free_area_empty(area, mt)) return true; }