内核中常用的分配物理内存页面的接口alloc_pages,用于分配一个或者多个连续的物理页面,分配的页面个数只能是2的整数次幂。
alloc_pages有两个参数,一个是分配掩码gfp_mask,另一个是分配阶数order.
include/linux/gfp.h
#define alloc_pages(gfp_mask, order) alloc_pages_node(numa_node_id(), gfp_mask, order)
分配掩码是非常重要的参数,它同样定义在gfp.h头文件中。
/* Plain integer GFP bitmasks. Do not use this directly. */
/*下面的宏定义是___GFP_xxx开头(三个下划线)*/
#define ___GFP_DMA 0x01u
#define ___GFP_HIGHMEM 0x02u
#define ___GFP_DMA32 0x04u
#define ___GFP_MOVABLE 0x08u
#define ___GFP_WAIT 0x10u
#define ___GFP_HIGH 0x20u
#define ___GFP_IO 0x40u
#define ___GFP_FS 0x80u
#define ___GFP_COLD 0x100u
#define ___GFP_NOWARN 0x200u
#define ___GFP_REPEAT 0x400u
#define ___GFP_NOFAIL 0x800u
#define ___GFP_NORETRY 0x1000u
#define ___GFP_MEMALLOC 0x2000u
#define ___GFP_COMP 0x4000u
#define ___GFP_ZERO 0x8000u
#define ___GFP_NOMEMALLOC 0x10000u
#define ___GFP_HARDWALL 0x20000u
#define ___GFP_THISNODE 0x40000u
#define ___GFP_RECLAIMABLE 0x80000u
#define ___GFP_NOTRACK 0x200000u
#define ___GFP_NO_KSWAPD 0x400000u
#define ___GFP_OTHER_NODE 0x800000u
#define ___GFP_WRITE 0x1000000u
/* If the above are modified, __GFP_BITS_SHIFT may need updating */
分配掩码在内核中分成两类,一类为zone modifiers,另一类为action modifiers。
zone modifiers指定从哪个zone中分配所需的页面。zone modifiers由分配掩码的最低4位定义,
分别是__GFP_DMA,__GFP_HIGHMEM,__GFP_DMA32和__GFP_MOVABLE。
typedef unsigned __bitwise__ gfp_t;
/*
* GFP bitmasks..
*
* Zone modifiers (see linux/mmzone.h - low three bits)
*
* Do not put any conditional on these. If necessary modify the definitions
* without the underscores and use them consistently. The definitions here may
* be used in bit comparisons.
*/
#define __GFP_DMA ((__force gfp_t)___GFP_DMA)
#define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM)
#define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32)
#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* Page is movable */
#define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE)
action modifiers并不限制从哪个内存区域中分配内存,但会改变分配行为,其定义如下:
/*
* Action modifiers - doesn't change the zoning
*
* __GFP_REPEAT: Try hard to allocate the memory, but the allocation attempt _might_ fail.
* This depends upon the particular VM implementation.
*
* __GFP_NOFAIL: The VM implementation _must_ retry infinitely(无限的): the caller
* cannot handle allocation failures. This modifier is deprecated(弃用) and no new
* users should be added.
*
* __GFP_NORETRY: The VM implementation must not retry indefinitely.
*
* __GFP_MOVABLE: Flag that this page will be movable by the page migration
* mechanism or reclaimed
*/
#define __GFP_WAIT ((__force gfp_t)___GFP_WAIT) /* Can wait and reschedule? */
#define __GFP_HIGH ((__force gfp_t)___GFP_HIGH) /* Should access emergency pools? */
#define __GFP_IO ((__force gfp_t)___GFP_IO) /* Can start physical IO? */
#define __GFP_FS ((__force gfp_t)___GFP_FS) /* Can call down to low-level FS? */
#define __GFP_COLD ((__force gfp_t)___GFP_COLD) /* Cache-cold page required */
#define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN) /* Suppress(抑制) page allocation failure warning */
#define __GFP_REPEAT ((__force gfp_t)___GFP_REPEAT) /* See above */
#define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL) /* See above */
#define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY) /* See above */
#define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC)/* Allow access to emergency reserves */
#define __GFP_COMP ((__force gfp_t)___GFP_COMP) /* Add compound page metadata */
#define __GFP_ZERO ((__force gfp_t)___GFP_ZERO) /* Return zeroed page on success */
#define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC) /* Don't use emergency reserves.
* This takes precedence over the
* __GFP_MEMALLOC flag if both are
* set
*/
#define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL) /* Enforce hardwall cpuset memory allocs */
#define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)/* No fallback, no policies */
#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) /* Page is reclaimable(可回收的) */
#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK) /* Don't track with kmemcheck */
#define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
#define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
下面以GFP_KERNEL为例,在理想情况下alloc_pages是如何分配出物理内存。
pages = alloc_pages(GFP_KERNEL,order);
GFP_KERNEL分配掩码定义在gfp.h头文件中,是一个分配掩码的组合。
常用的分配掩码组合如下:
/*This equals 0, but use constants in case they ever change */
#define GFP_NOWAIT (GFP_ATOMIC & ~__GFP_HIGH)
/* GFP_ATOMIC means both !wait (__GFP_WAIT not set) and use emergency pool */
#define GFP_ATOMIC (__GFP_HIGH)
#define GFP_NOIO (__GFP_WAIT)
#define GFP_NOFS (__GFP_WAIT | __GFP_IO)
#define GFP_KERNEL (__GFP_WAIT | __GFP_IO | __GFP_FS)
#define GFP_TEMPORARY (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_RECLAIMABLE)
#define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
#define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM)
#define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE)
#define GFP_IOFS (__GFP_IO | __GFP_FS)
#define GFP_TRANSHUGE (GFP_HIGHUSER_MOVABLE | __GFP_COMP | __GFP_NOMEMALLOC | __GFP_NORETRY | \
__GFP_NOWARN | __GFP_NO_KSWAPD)
/*分配掩码当前使用25bit*/
#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
所以GFP_KERNEL分配掩码包含了__GFP_WATI,__GFP_IO和__GFP_FS这3个标志位,换算成十六进制是0xd0。
#define alloc_pages(gfp_mask, order) alloc_pages_node(numa_node_id(), gfp_mask, order)
static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,unsigned int order)
{
/* Unknown node is current node */
if (nid < 0)
nid = numa_node_id();
return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask));
}
static inline struct page * __alloc_pages(gfp_t gfp_mask, unsigned int order,struct zonelist *zonelist)
{
return __alloc_pages_nodemask(gfp_mask, order, zonelist, NULL);
}
alloc_pages最终调用__alloc_pages_nodemask,它是伙伴系统的核心函数。
/*
* This is the 'heart' of the zoned buddy allocator.
*/
/*伙伴分配器的核心函数*/
struct page * __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,struct zonelist *zonelist, nodemask_t *nodemask)
{
struct zoneref *preferred_zoneref;
struct page *page = NULL;
unsigned int cpuset_mems_cookie;
/*ALLOC_WMARK_LOW:低水位值*/
int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET|ALLOC_FAIR;
gfp_t alloc_mask; /* The gfp_t that was actually used for allocation */
/*伙伴系统分配函数中用于保存相关参数*/
struct alloc_context ac = {
/*通过gfp_mask获取对应的zone*/
.high_zoneidx = gfp_zone(gfp_mask),
.nodemask = nodemask,
/*通过gfp_mask获取对应的migrate type*/
.migratetype = gfpflags_to_migratetype(gfp_mask),
};
gfp_mask &= gfp_allowed_mask;
lockdep_trace_alloc(gfp_mask);
might_sleep_if(gfp_mask & __GFP_WAIT);
/*CONFIG_FAIL_PAGE_ALLOC 配置进行管理*/
if (should_fail_alloc_page(gfp_mask, order)) /*true则直接返回NULL*/
return NULL;
/*
* Check the zones suitable for the gfp_mask contain at least one
* valid zone. It's possible to have an empty zonelist as a result
* of GFP_THISNODE and a memoryless node
*/
if (unlikely(!zonelist->_zonerefs->zone))
return NULL;
if (IS_ENABLED(CONFIG_CMA) && ac.migratetype == MIGRATE_MOVABLE)
alloc_flags |= ALLOC_CMA;
retry_cpuset:
cpuset_mems_cookie = read_mems_allowed_begin();
/* We set it here, as __alloc_pages_slowpath might have changed it */
ac.zonelist = zonelist;
/* The preferred zone is used for statistics later */
/*zonelist中第一个zone idx <= high_zoneidx的zoneref*/
preferred_zoneref = first_zones_zonelist(ac.zonelist, ac.high_zoneidx,
ac.nodemask ? : &cpuset_current_mems_allowed, /*a? b:c 这个三目运算符少了b(a?:c),如果a为true的话,a?:c整个运算符的结果为a的值*/
&ac.preferred_zone);
if (!ac.preferred_zone) /*无对应的zoneref*/
goto out;
ac.classzone_idx = zonelist_zone_idx(preferred_zoneref); /*zoneref->zone_idx; */
/* First allocation attempt */
alloc_mask = gfp_mask|__GFP_HARDWALL;
/*从free_list中分配物理页面*/
page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac);
if (unlikely(!page)) {
/*
* Runtime PM, block IO and its error handling path
* can deadlock because I/O on the device might not
* complete.
*/
alloc_mask = memalloc_noio_flags(gfp_mask);
page = __alloc_pages_slowpath(alloc_mask, order, &ac);
}
if (kmemcheck_enabled && page)
kmemcheck_pagealloc_alloc(page, order, gfp_mask);
trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype);
out:
/*
* When updating a task's mems_allowed, it is possible to race with
* parallel threads in such a way that an allocation can fail while
* the mask is being updated. If a page allocation is about to fail,
* check if the cpuset changed during allocation and if so, retry.
*/
if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
goto retry_cpuset;
return page;
}
struct alloc_context数据结构是伙伴系统分配函数中用于保存相关参数的数据结构。
mm/internal.h
struct alloc_context {
struct zonelist *zonelist;
nodemask_t *nodemask;
struct zone *preferred_zone;
int classzone_idx;
int migratetype;
enum zone_type high_zoneidx;
};
gfp_zone从分配掩码中计算出zone的zoneidx,并存放在high_zoneidx成员中。
static inline enum zone_type gfp_zone(gfp_t flags)
{
enum zone_type z;
/*使用哪个zone分配物理内存*/
int bit = (__force int) (flags & GFP_ZONEMASK);
z = (GFP_ZONE_TABLE >> (bit * ZONES_SHIFT)) & ((1 << ZONES_SHIFT) - 1);
/*判断zone合理性*/
VM_BUG_ON((GFP_ZONE_BAD >> bit) & 1);
return z;
}
gfp_zone会用到GFP_ZONEMASK,GFP_ZONE_TABLE和ZONES_SHIFT等宏,它们定义如下:
/*
* GFP_ZONE_TABLE is a word size bitstring that is used for looking up the
* zone to use given the lowest 4 bits of gfp_t. Entries are ZONE_SHIFT long
* and there are 16 of them to cover all possible combinations of
* __GFP_DMA, __GFP_DMA32, __GFP_MOVABLE and __GFP_HIGHMEM.
*
* The zone fallback order is MOVABLE=>HIGHMEM=>NORMAL=>DMA32=>DMA.
* But GFP_MOVABLE is not only a zone specifier but also an allocation policy.
* Therefore __GFP_MOVABLE plus another zone selector is valid.
* Only 1 bit of the lowest 3 bits (DMA,DMA32,HIGHMEM) can be set to "1".
*
* bit result
* =================
* 0x0 => NORMAL
* 0x1 => DMA or NORMAL
* 0x2 => HIGHMEM or NORMAL
* 0x3 => BAD (DMA+HIGHMEM)
* 0x4 => DMA32 or DMA or NORMAL
* 0x5 => BAD (DMA+DMA32)
* 0x6 => BAD (HIGHMEM+DMA32)
* 0x7 => BAD (HIGHMEM+DMA32+DMA)
* 0x8 => NORMAL (MOVABLE+0)
* 0x9 => DMA or NORMAL (MOVABLE+DMA)
* 0xa => MOVABLE (Movable is valid only if HIGHMEM is set too)
* 0xb => BAD (MOVABLE+HIGHMEM+DMA)
* 0xc => DMA32 (MOVABLE+DMA32)
* 0xd => BAD (MOVABLE+DMA32+DMA)
* 0xe => BAD (MOVABLE+DMA32+HIGHMEM)
* 0xf => BAD (MOVABLE+DMA32+HIGHMEM+DMA)
*
* ZONES_SHIFT must be <= 2 on 32 bit platforms.
*/
#if 16 * ZONES_SHIFT > BITS_PER_LONG
#error ZONES_SHIFT too large to create GFP_ZONE_TABLE integer
#endif
/*<<(0,2,4,8),<<(16,18,20,24)*/
#define GFP_ZONE_TABLE ( \
(ZONE_NORMAL << 0 * ZONES_SHIFT) \
| (OPT_ZONE_DMA << ___GFP_DMA * ZONES_SHIFT) \
| (OPT_ZONE_HIGHMEM << ___GFP_HIGHMEM * ZONES_SHIFT) \
| (OPT_ZONE_DMA32 << ___GFP_DMA32 * ZONES_SHIFT) \
| (ZONE_NORMAL << ___GFP_MOVABLE * ZONES_SHIFT) \
| (OPT_ZONE_DMA << (___GFP_MOVABLE | ___GFP_DMA) * ZONES_SHIFT) \
| (ZONE_MOVABLE << (___GFP_MOVABLE | ___GFP_HIGHMEM) * ZONES_SHIFT) \
| (OPT_ZONE_DMA32 << (___GFP_MOVABLE | ___GFP_DMA32) * ZONES_SHIFT) \
)
/*
* GFP_ZONE_BAD is a bitmap for all combinations of __GFP_DMA, __GFP_DMA32
* __GFP_HIGHMEM and __GFP_MOVABLE that are not permitted. One flag per
* entry starting with bit 0. Bit is set if the combination is not
* allowed.
*/
/*(3,5,6,7,11,13,14,15) */
#define GFP_ZONE_BAD ( \
1 << (___GFP_DMA | ___GFP_HIGHMEM) \
| 1 << (___GFP_DMA | ___GFP_DMA32) \
| 1 << (___GFP_DMA32 | ___GFP_HIGHMEM) \
| 1 << (___GFP_DMA | ___GFP_DMA32 | ___GFP_HIGHMEM) \
| 1 << (___GFP_MOVABLE | ___GFP_HIGHMEM | ___GFP_DMA) \
| 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA) \
| 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_HIGHMEM) \
| 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA | ___GFP_HIGHMEM) \
)
include/linux/nodemask.h
typedef struct { DECLARE_BITMAP(bits, MAX_NUMNODES); } nodemask_t;
#define MAX_NUMNODES (1 << NODES_SHIFT) /*1*/
#define ZONES_SHIFT 2
#ifdef CONFIG_HIGHMEM
#define OPT_ZONE_HIGHMEM ZONE_HIGHMEM
#else
#define OPT_ZONE_HIGHMEM ZONE_NORMAL
#endif
#ifdef CONFIG_ZONE_DMA
#define OPT_ZONE_DMA ZONE_DMA
#else
#define OPT_ZONE_DMA ZONE_NORMAL
#endif
#ifdef CONFIG_ZONE_DMA32
#define OPT_ZONE_DMA32 ZONE_DMA32
#else
#define OPT_ZONE_DMA32 ZONE_NORMAL
#endif
GFP_ZONEMASK是分配掩码的低4位,在ARM Vexpress平台中,只有ZONE_NORMAL和ZONE_HIGHMEM这两个zone,但是计算__MAX_NR_ZONES需要加上
ZONE_MOVABLE,所以MAX_NR_ZONE等于3,ZONE_SHIFT值等于2,所以GFP_ZONE_TABLE计算结果为0x200010。
在上述代码中以GFP_KERNEL分配掩码(0xd0)为参数带入gfp_zone()中,最终结果为0,即high_zoneidx为0。
此外,gfpflags_to_migratetype()把gfp_mask分配掩码转换为MIGRATE_TYPES类型。例如分配掩码为GFP_KERNEL,那么MIGRATE_TYPES类型是MIGRATE_UNMOVABLE;
如果分配掩码为GFP_HIGHUSER_MOVABLE,那么MIGRATE_TYPES类型是MIGRATE_MOVABLE。
/* This mask makes up all the page movable related flags */
#define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE)
/* Convert GFP flags to their corresponding migrate type */
static inline int gfpflags_to_migratetype(const gfp_t gfp_flags)
{
WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK);
if (unlikely(page_group_by_mobility_disabled))
return MIGRATE_UNMOVABLE;
/* Group based on mobility */
/*movable在bit 1,因为MIGRATE_MOVABLE的值为2(0b10),reclaimable在bit 0,因为MIGRATE_RECLAIMABLE的值为1(0b01)*/
return (((gfp_flags & __GFP_MOVABLE) != 0) << 1) | ((gfp_flags & __GFP_RECLAIMABLE) != 0);
}
注意,MIGRATE_UNMOVABLE的值为0,如果在gfp分配掩码中未明确使用__GFP_MOVABLE的话,默认就是UNMOVABLE类型。
下面看下页面的迁移类型
/*页面迁移类型*/
enum {
/*
不可移动类型,其特点就是在内存中有固定的位置,不能移动到其他地方,比如内核本身需要使用的内存就属于此类。
使用GFP_KERNEL这个标志位分配的内存,就不能迁移。简单来说,内核使用的内存都属于此类,包括DMA Buffer等。
*/
MIGRATE_UNMOVABLE,
/*
可回收的页面,这些页面不能直接移动,但是可以回收。
页面的内容可以重新读回或则取回,最典型的一个例子就是映射来自文件的页面缓存。
*/
MIGRATE_RECLAIMABLE,
/*
可移动类型,表示可以随意移动的页面,这里通常指属于应用程序的页面,比如通过malloc分配的内存,
mmap分配的匿名页名等。这些页面是可以安全迁移的。
*/
MIGRATE_MOVABLE, /*2 0b10*/
MIGRATE_PCPTYPES, /* the number of types on the pcp lists */
MIGRATE_RESERVE = MIGRATE_PCPTYPES,
#ifdef CONFIG_CMA
/*CONFIG_CMA=y*/
/*
* MIGRATE_CMA migration type is designed to mimic the way
* ZONE_MOVABLE works. Only movable pages can be allocated
* from MIGRATE_CMA pageblocks and page allocator never
* implicitly change migration type of MIGRATE_CMA pageblock.
*
* The way to use it is to change migratetype of a range of
* pageblocks to MIGRATE_CMA which can be done by
* __free_pageblock_cma() function. What is important though
* is that a range of pageblocks must be aligned to
* MAX_ORDER_NR_PAGES should biggest page be bigger then
* a single pageblock.
* MIGRATE_CMA迁移类型旨在模仿ZONE_MOVABLE的工作方式。只有可移动的页面可以从MIGRATE_CMA页块中分配,而页面分配器永远不会隐式地改变MIGRATE_CMA页块的迁移类型。
* 使用它的方法是将一组页块的migratetype更改为MIGRATE_CMA,这可以通过__free_pageblock_cma()函数来完成。重要的是,如果最大的页面大于单个页面块,那么页面块的范围必须对齐到MAX_ORDER_NR_PAGES。
*/
MIGRATE_CMA,
#endif
#ifdef CONFIG_MEMORY_ISOLATION
MIGRATE_ISOLATE, /* can't allocate from here */
#endif
MIGRATE_TYPES
};
继续分析__alloc_pages_nodemask,首先get_free_from_freelist会去尝试分配物理页面,如果这里分配失败,就会调用__alloc_pages_slowpath,
该函数将处理很多特殊的场景。这里假设在理想情况下get_free_from_freelist能分配成功。
/*
* get_page_from_freelist goes through the zonelist trying to allocate a page.
*/
static struct page * get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,const struct alloc_context *ac)
{
struct zonelist *zonelist = ac->zonelist;
struct zoneref *z;
struct page *page = NULL;
struct zone *zone;
nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
int zlc_active = 0; /* set if using zonelist_cache */
int did_zlc_setup = 0; /* just call zlc_setup() one time */
bool consider_zone_dirty = (alloc_flags & ALLOC_WMARK_LOW) && (gfp_mask & __GFP_WRITE);
int nr_fair_skipped = 0;
bool zonelist_rescan;
zonelist_scan:
zonelist_rescan = false;
/*
* Scan zonelist, looking for a zone with enough free.
* See also __cpuset_node_allowed() comment in kernel/cpuset.c.
*/
/*遍历zonelist中所有 zone idx <= high_zoneidx的zoneref*/
for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->high_zoneidx,ac->nodemask) {
unsigned long mark;
/*一些必要的检查*/
if (IS_ENABLED(CONFIG_NUMA) && zlc_active &&
!zlc_zone_worth_trying(zonelist, z, allowednodes))
continue;
if (cpusets_enabled() &&
(alloc_flags & ALLOC_CPUSET) &&
!cpuset_zone_allowed(zone, gfp_mask))
continue;
/*
* Distribute pages in proportion to the individual
* zone size to ensure fair page aging. The zone a
* page was allocated in should have no effect on the
* time the page has in memory before being reclaimed.
*/
if (alloc_flags & ALLOC_FAIR) {
if (!zone_local(ac->preferred_zone, zone))
break;
if (test_bit(ZONE_FAIR_DEPLETED, &zone->flags)) {
nr_fair_skipped++;
continue;
}
}
/*
* When allocating a page cache page for writing, we
* want to get it from a zone that is within its dirty
* limit, such that no single zone holds more than its
* proportional share of globally allowed dirty pages.
* The dirty limits take into account the zone's
* lowmem reserves and high watermark so that kswapd
* should be able to balance it without having to
* write pages from its LRU list.
*
* This may look like it could increase pressure on
* lower zones by failing allocations in higher zones
* before they are full. But the pages that do spill
* over are limited as the lower zones are protected
* by this very same mechanism. It should not become
* a practical burden to them.
*
* XXX: For now, allow allocations to potentially
* exceed the per-zone dirty limit in the slowpath
* (ALLOC_WMARK_LOW unset) before going into reclaim,
* which is important when on a NUMA setup the allowed
* zones are together not big enough to reach the
* global limit. The proper fix for these situations
* will require awareness of zones in the
* dirty-throttling and the flusher threads.
*/
if (consider_zone_dirty && !zone_dirty_ok(zone))
continue;
/*检查当前的zone的watermark 水位是否充足,这里会读取WMARK_LOW水位值*/
mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
/*判断当前zone的空闲页面是否满足WMARK_LOW水位值*/
if (!zone_watermark_ok(zone, order, mark,ac->classzone_idx, alloc_flags)) {
int ret;
/* Checked here to keep the fast path fast */
BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
if (alloc_flags & ALLOC_NO_WATERMARKS) /**/
goto try_this_zone;
if (IS_ENABLED(CONFIG_NUMA) &&
!did_zlc_setup && nr_online_nodes > 1) {
/*
* we do zlc_setup if there are multiple nodes
* and before considering the first zone allowed
* by the cpuset.
*/
allowednodes = zlc_setup(zonelist, alloc_flags);
zlc_active = 1;
did_zlc_setup = 1;
}
if (zone_reclaim_mode == 0 ||
!zone_allows_reclaim(ac->preferred_zone, zone))
goto this_zone_full;
/*
* As we may have just activated ZLC, check if the first
* eligible zone has failed zone_reclaim recently.
*/
if (IS_ENABLED(CONFIG_NUMA) && zlc_active &&
!zlc_zone_worth_trying(zonelist, z, allowednodes))
continue;
/*当前zone的空闲页面低于WMARK_LOW水位值,调用zone_reclaim来回收页面*/
ret = zone_reclaim(zone, gfp_mask, order);
switch (ret) {
case ZONE_RECLAIM_NOSCAN:
/* did not scan */
continue;
case ZONE_RECLAIM_FULL:
/* scanned but unreclaimable */
continue;
default:
/* did we reclaim enough */
if (zone_watermark_ok(zone, order, mark,
ac->classzone_idx, alloc_flags))
goto try_this_zone;
/*
* Failed to reclaim enough to meet watermark.
* Only mark the zone full if checking the min
* watermark or if we failed to reclaim just
* 1<preferred_zone, zone, order,gfp_mask, ac->migratetype);
if (page) {
/*检查页面设置有效性,其中会检查__GFP_COMP标志,该标志对于slab分配器从伙伴系统中申请的物理页面需要设置该标志*/
if (prep_new_page(page, order, gfp_mask, alloc_flags))
goto try_this_zone;
return page;
}
this_zone_full:
if (IS_ENABLED(CONFIG_NUMA) && zlc_active)
zlc_mark_zone_full(zonelist, z);
}
/*
* The first pass makes sure allocations are spread fairly within the
* local node. However, the local node might have free pages left
* after the fairness batches are exhausted, and remote zones haven't
* even been considered yet. Try once more without fairness, and
* include remote zones now, before entering the slowpath and waking
* kswapd: prefer spilling to a remote zone over swapping locally.
*/
if (alloc_flags & ALLOC_FAIR) {
alloc_flags &= ~ALLOC_FAIR;
if (nr_fair_skipped) {
zonelist_rescan = true;
reset_alloc_batches(ac->preferred_zone);
}
if (nr_online_nodes > 1)
zonelist_rescan = true;
}
if (unlikely(IS_ENABLED(CONFIG_NUMA) && zlc_active)) {
/* Disable zlc cache for second zonelist scan */
zlc_active = 0;
zonelist_rescan = true;
}
if (zonelist_rescan)
goto zonelist_scan;
return NULL;
}
#define set_page_private(page, v) ((page)->private = (v))
#define __GFP_COMP ((__force gfp_t)___GFP_COMP) /* Add compound page metadata */
#define ___GFP_COMP 0x4000u
static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,int alloc_flags)
{
int i;
/*一个一个地检查分配的物理页面合理性*/
for (i = 0; i < (1 << order); i++) {
struct page *p = page + i;
if (unlikely(check_new_page(p)))
return 1;
}
/*设置page->private为0*/
set_page_private(page, 0);
set_page_refcounted(page);
arch_alloc_page(page, order);
kernel_map_pages(page, 1 << order, 1);
kasan_alloc_pages(page, order);
if (gfp_flags & __GFP_ZERO)
prep_zero_page(page, order, gfp_flags);
/*slab分配器从伙伴系统中申请物理页面会设置__GFP_COMP该标志,多个组合物理页面时该标志有用*/
if (order && (gfp_flags & __GFP_COMP))
prep_compound_page(page, order);
set_page_owner(page, order, gfp_flags);
/*
* page->pfmemalloc is set when ALLOC_NO_WATERMARKS was necessary to
* allocate the page. The expectation is that the caller is taking
* steps that will free more memory. The caller should avoid the page
* being used for !PFMEMALLOC purposes.
*/
page->pfmemalloc = !!(alloc_flags & ALLOC_NO_WATERMARKS);
return 0;
}
void prep_compound_page(struct page *page, unsigned long order)
{
int i;
int nr_pages = 1 << order;
set_compound_page_dtor(page, free_compound_page);
set_compound_order(page, order);
__SetPageHead(page); /*设置头物理页面标志*/
/*从第二个的物理页面开始设置*/
for (i = 1; i < nr_pages; i++) {
struct page *p = page + i;
set_page_count(p, 0); /*设置为0*/
p->first_page = page; /*后面每个物理页面都能找头物理页面*/
/* Make sure p->first_page is always valid for PageTail() */
smp_wmb();
__SetPageTail(p); /*设置tail尾标志,并非尾物理页面才设置,而是首物理页面以外的所有物理页面都设置尾标志*/
}
}
get_page_from_freelist首先需要判断可以从哪个zone来分配内存。for_each_zone_zonelist_nodemask宏扫描内存节点中的zonelist去查找合适分配内存的zone;
for_each_zone_zonelist_nodemask首先通过first_zones_zonelist从给定的zoneidx开始查找,这个给定的zoneidx就是highidx,之前通过gfp_zone转换而来的。
/**
* for_each_zone_zonelist_nodemask - helper macro to iterate over valid zones in a zonelist at or below a given zone index and within a nodemask
* @zone - The current zone in the iterator
* @z - The current pointer within zonelist->zones being iterated
* @zlist - The zonelist being iterated
* @highidx - The zone index of the highest zone to return
* @nodemask - Nodemask allowed by the allocator
*
* This iterator iterates though all zones at or below a given zone index and
* within a given nodemask
*/
#define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
/*从zonelist中找到对应zone idx小于或者等于highest_zoneidx 的zone*/ \
for (z = first_zones_zonelist(zlist, highidx, nodemask, &zone); \
zone; \
/*从zonelist中下一个zoneref中寻找*/ \
z = next_zones_zonelist(++z, highidx, nodemask), \
zone = zonelist_zone(z))
first_zones_zonelist会调用next_zones_zonelis来计算zoneref,最后返回zone数据结构。
/**
* first_zones_zonelist - Returns the first zone at or below highest_zoneidx within the allowed nodemask in a zonelist
* @zonelist - The zonelist to search for a suitable zone
* @highest_zoneidx - The zone index of the highest zone to return
* @nodes - An optional nodemask to filter the zonelist with
* @zone - The first suitable zone found is returned via this parameter
*
* This function returns the first zone at or below a given zone index that is
* within the allowed nodemask. The zoneref returned is a cursor that can be
* used to iterate the zonelist with next_zones_zonelist by advancing it by
* one before calling.
*/
static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,enum zone_type highest_zoneidx,
nodemask_t *nodes,struct zone **zone)
{
/*从zonelist中找到对应zone idx小于或者等于highest_zoneidx 的zoneref*/
struct zoneref *z = next_zones_zonelist(zonelist->_zonerefs,highest_zoneidx, nodes);
/*zoneref对应的zone*/
*zone = zonelist_zone(z);
return z;
}
/* Returns the next zone at or below highest_zoneidx in a zonelist */
struct zoneref *next_zones_zonelist(struct zoneref *z,enum zone_type highest_zoneidx,nodemask_t *nodes)
{
/*
* Find the next suitable zone to use for the allocation.
* Only filter based on nodemask if it's set
*/
if (likely(nodes == NULL))
while (zonelist_zone_idx(z) > highest_zoneidx) /*zone idx <= highest_zoneidx*/
z++;
else
while (zonelist_zone_idx(z) > highest_zoneidx ||
(z->zone && !zref_in_nodemask(z, nodes)))
z++;
/*从zonelist中找到对应的zoneref*/
return z;
}
static inline struct zone *zonelist_zone(struct zoneref *zoneref)
{
return zoneref->zone;
}
static inline int zonelist_zone_idx(struct zoneref *zoneref)
{
return zoneref->zone_idx;
}
计算zone的核心函数在next_zones_zonelist中,这里highest_zoneidx是gfp_zone通过分配掩码转换而来。
zonelist有一个zoneref数组,zoneref数据结构里有一个成员zone指针会指向zone数据结构,还有一个zone_index成员指向zone的编号。
zone会初始化这个数组,具体在build_zonelists_node中。
static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,int nr_zones)
{
struct zone *zone;
enum zone_type zone_type = MAX_NR_ZONES; /*3*/
/*倒序存放zone到zonerefo中*/
do {
zone_type--;
zone = pgdat->node_zones + zone_type;
if (populated_zone(zone)) { /*zone有效才设置zoneref*/
zoneref_set_zone(zone,&zonelist->_zonerefs[nr_zones++]);
check_highest_zone(zone_type);
}
} while (zone_type);
return nr_zones;
}
在ARM Vexpress平台中,zone类型,zoneref[]数组和zoneidx的关系如下:
ZONE_HIGHMEM _zonerefs[0]->zone_idex=1
ZONE_NORMAL _zonerefs[1]->zone_idex=0
zoneref[0]表示ZONE_HIGHMEM,其zone的编号zone_index值为1;zoneref[1]表示ZONE_NORMAL,其zone的编号zone_index为0。
也就是说,基于zone的设计思想是:分配物理页面时会优先考虑ZONE_HIGHMEM,因为ZONE_HIGHMEM在zonelist中排在ZONE_NORMAL前面。
gfp_zone(GFP_KERNEL)返回值为0,即highest_zoneidx为0,而zonelist的第一个zone是ZONE_HIGHMEM,其zone编号zone_index值为1。
因此在next_zones_zonelist中,z++,最终first_zones_zonelist会返回ZONE_NORMAL。
再举一个例子,分配掩码为GFP_HIGHUSER_MOVABLE,其包含了__GFP_HIGHMEM,那么next_zones_zonlist会返回哪个zone呢?
GFP_HIGHUSER_MOVABLE的值为0x200da,gfp_zone(GFP_HIGHUSER_MOVABLE)返回值为2,即highest_zoneidx为2,
而zonelist的第一个zoneref为ZONE_HIGHMEN,其zone编号zone_index为1.
在first_zones_zonelist中,由于第一个zone的zone_index值小于highest_zoneidx,因此会返回ZONE_HIGHMEM。
在for_each_zone_zonelist_nodemask中next_zones_zonelist(++z,highidx,nodemask)返回ZONE_NORMAL。
因此会遍历ZONE_HIGHMEM和ZONE_NORMAL这两个zone,但是会先遍历ZONE_HIGHMEM,然后才是ZONE_NORMAL。
要正确理解for_each_zone_zonelist_nodemask这个宏的行为,需要理解如下两个方面:
highest_zoneidx是怎么计算出来的,即如何解析分配掩码,这是gfp_zone的职责。
每个内存节点有一个struct pglist_data数据结构,其成员node_zonelists是一个struct zonelist数据结构,其成员 struct zoneref _zonerefs[]数组描述zone.
其中ZONE_HIGHMEM排在前面,并且_zonerefs[0]->zone_index=1,ZONE_NORMAL排在后面,且_zonerefs[1]->zone_index=0。
上述这些设计让人感觉有些复杂,但是这时正确理解以zone为基础的物理页面分配机制的基础。
struct zonelist {
struct zonelist_cache *zlcache_ptr; // NULL or &zlcache
struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1]; /*list中对应的zone引用*/
#ifdef CONFIG_NUMA
struct zonelist_cache zlcache; // optional ...
#endif
};
struct zoneref {
struct zone *zone; /* Pointer to actual zone */
int zone_idx; /* zone_idx(zoneref->zone) */
};
zone数据结构中有一个成员watermark记录各种水位的情况。系统中定义了3中水位,分别是WMARK_MIN,WMARK_LOW和WMARK_HIGH。
watermark水位的计算在__setup_per_zone_wmarks中(见本章开始位置)。
include/linux/mmzone.h
enum zone_watermarks {
WMARK_MIN,
WMARK_LOW,
WMARK_HIGH,
NR_WMARK
};
mm/internal.h
/* The ALLOC_WMARK bits are used as an index to zone->watermark */
#define ALLOC_WMARK_MIN WMARK_MIN
#define ALLOC_WMARK_LOW WMARK_LOW
#define ALLOC_WMARK_HIGH WMARK_HIGH
#define ALLOC_NO_WATERMARKS 0x04 /* don't check watermarks at all */
/* Mask to get the watermark bits */
#define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1)
#define min_wmark_pages(z) (z->watermark[WMARK_MIN])
#define low_wmark_pages(z) (z->watermark[WMARK_LOW])
#define high_wmark_pages(z) (z->watermark[WMARK_HIGH])
bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,int classzone_idx, int alloc_flags)
{
return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,zone_page_state(z, NR_FREE_PAGES));
}
/* Return true if free pages are above 'mark'. This takes into account the order of the allocation. */
static bool __zone_watermark_ok(struct zone *z, unsigned int order,unsigned long mark,
int classzone_idx, int alloc_flags,long free_pages)
{
/* free_pages may go negative - that's OK */
/*mark值为水位值,分配物理页面后剩余的物理页面和水位值之间的关系*/
long min = mark;
int o;
long free_cma = 0;
/*去除需要分配的物理页面*/
free_pages -= (1 << order) - 1;
if (alloc_flags & ALLOC_HIGH)
min -= min / 2;
if (alloc_flags & ALLOC_HARDER)
min -= min / 4;
#ifdef CONFIG_CMA
/* If allocation can't use CMA areas don't use free CMA pages */
if (!(alloc_flags & ALLOC_CMA))
free_cma = zone_page_state(z, NR_FREE_CMA_PAGES);
#endif
/*zone空闲页面小于水位值和zone最低保留值(lowmem_reserve)之和*/
if (free_pages - free_cma <= min + z->lowmem_reserve[classzone_idx])
return false;
for (o = 0; o < order; o++) {
/* At the next order, this order's pages become unavailable */
/*去除 zone free_area中比oder小的各个order中空闲物理页面,因为对于当前order而言,小于order的free_area是没法进行分配的,视为无效*/
free_pages -= z->free_area[o].nr_free << o; /**/
/* Require fewer higher order pages to be free */
min >>= 1; /*min /= 2*/
if (free_pages <= min)
return false;
}
return true;
}
当前zone的空闲页面低于WMARK_LOW水位值,调用zone_reclaim来回收页面;
我们假设zone_watermark_ok判断空闲页面充沛,接下来调用buffered_rmqueue从伙伴系统中分配物理页面。
/*
* Allocate a page from the given zone. Use pcplists for order-0 allocations.
*/
static inline struct page *buffered_rmqueue(struct zone *preferred_zone,
struct zone *zone, unsigned int order,
gfp_t gfp_flags, int migratetype)
{
unsigned long flags;
struct page *page;
bool cold = ((gfp_flags & __GFP_COLD) != 0);
if (likely(order == 0)) {
/*order为0的情况下,从zone->per_cpu_pages中分配*/
struct per_cpu_pages *pcp;
struct list_head *list;
local_irq_save(flags);
pcp = &this_cpu_ptr(zone->pageset)->pcp;
list = &pcp->lists[migratetype];
if (list_empty(list)) {
pcp->count += rmqueue_bulk(zone, 0,
pcp->batch, list,
migratetype, cold);
if (unlikely(list_empty(list)))
goto failed;
}
if (cold)
page = list_entry(list->prev, struct page, lru);
else
page = list_entry(list->next, struct page, lru);
list_del(&page->lru);
pcp->count--;
} else {
if (unlikely(gfp_flags & __GFP_NOFAIL)) {
/*
* __GFP_NOFAIL is not to be used in new code.
*
* All __GFP_NOFAIL callers should be fixed so that they
* properly detect and handle allocation failures.
*
* We most definitely don't want callers attempting to
* allocate greater than order-1 page units with
* __GFP_NOFAIL.
*/
WARN_ON_ONCE(order > 1);
}
spin_lock_irqsave(&zone->lock, flags);
/*order > 0情况下分配物理内存*/
page = __rmqueue(zone, order, migratetype);
spin_unlock(&zone->lock);
if (!page)
goto failed;
__mod_zone_freepage_state(zone, -(1 << order),
get_freepage_migratetype(page));
}
__mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
if (atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 &&
!test_bit(ZONE_FAIR_DEPLETED, &zone->flags))
set_bit(ZONE_FAIR_DEPLETED, &zone->flags);
__count_zone_vm_events(PGALLOC, zone, 1 << order);
zone_statistics(preferred_zone, zone, gfp_flags);
local_irq_restore(flags);
VM_BUG_ON_PAGE(bad_range(zone, page), page);
return page;
failed:
local_irq_restore(flags);
return NULL;
}
/*
* Go through the free lists for the given migratetype and remove
* the smallest available page from the freelists
*/
static inline
struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,int migratetype)
{
unsigned int current_order;
struct free_area *area;
struct page *page;
/* Find a page of the appropriate size in the preferred list */
for (current_order = order; current_order < MAX_ORDER; ++current_order) {
/*从期望order到MAX_ORDER,只要其free_list[migrate_type]中存在物理页面就从该free_area中分配物理页面*/
area = &(zone->free_area[current_order]);
if (list_empty(&area->free_list[migratetype]))
continue; /*该order上无物理页面*/
page = list_entry(area->free_list[migratetype].next,struct page, lru);
list_del(&page->lru);
rmv_page_order(page);
area->nr_free--;
/*
如果当前order的物理页面比期望分配的物理页面多,则将多余的物理页表挂载到对应的order free_list[migrate type]链表上;
否则不进行任何处理;
*/
expand(zone, page, order, current_order, area, migratetype);
set_freepage_migratetype(page, migratetype);
return page; /*返回page*/
}
return NULL; /*没有满足要求的物理页面则返回NULL*/
}
static inline void expand(struct zone *zone, struct page *page,
int low, int high, struct free_area *area,int migratetype)
{
/*low为期望获取的物理页面 order,high为zone free_area中满足要求的order(可能>=low)*/
unsigned long size = 1 << high;
while (high > low) {
area--; /*指向当前order-1的free_area*/
high--; /*当前order--*/
size >>= 1; /*物理页面数量*/
VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) &&
debug_guardpage_enabled() &&
high < debug_guardpage_minorder()) {
/*
* Mark as guard pages (or page), that will allow to
* merge back to allocator when buddy will be freed.
* Corresponding page table entries will not be touched,
* pages will stay not present in virtual address space
*/
set_page_guard(zone, &page[size], high, migratetype);
continue;
}
/*将多余的物理页表挂载到对应的order free_list[migrate type]链表上*/
list_add(&page[size].lru, &area->free_list[migratetype]);
area->nr_free++;
set_page_order(&page[size], high);
}
}