android/lowmemorykiller: Selectively count free CMA pages

In certain memory configurations there can be a large number of
CMA pages which are not suitable to satisfy certain memory
requests.

This large number of unsuitable pages can cause the
lowmemorykiller to not kill any tasks because the
lowmemorykiller counts all free pages.
In order to ensure the lowmemorykiller properly evaluates the
free memory only count the free CMA pages if they are suitable
for satisfying the memory request.

Change-Id: I7f06d53e2d8cfe7439e5561fe6e5209ce73b1c90
CRs-fixed: 437016
Signed-off-by: Liam Mark <lmark@codeaurora.org>
This commit is contained in:
Liam Mark 2013-03-27 12:34:51 -07:00 committed by Jeevan Shriram
parent e4c1c94265
commit 92c1fefed5
3 changed files with 86 additions and 14 deletions

View file

@ -107,16 +107,47 @@ static int test_task_flag(struct task_struct *p, int flag)
static DEFINE_MUTEX(scan_mutex); static DEFINE_MUTEX(scan_mutex);
int can_use_cma_pages(gfp_t gfp_mask)
{
int can_use = 0;
int mtype = gfpflags_to_migratetype(gfp_mask);
int i = 0;
int *mtype_fallbacks = get_migratetype_fallbacks(mtype);
if (is_migrate_cma(mtype)) {
can_use = 1;
} else {
for (i = 0;; i++) {
int fallbacktype = mtype_fallbacks[i];
if (is_migrate_cma(fallbacktype)) {
can_use = 1;
break;
}
if (fallbacktype == MIGRATE_TYPES)
break;
}
}
return can_use;
}
void tune_lmk_zone_param(struct zonelist *zonelist, int classzone_idx, void tune_lmk_zone_param(struct zonelist *zonelist, int classzone_idx,
int *other_free, int *other_file) int *other_free, int *other_file,
int use_cma_pages)
{ {
struct zone *zone; struct zone *zone;
struct zoneref *zoneref; struct zoneref *zoneref;
int zone_idx; int zone_idx;
for_each_zone_zonelist(zone, zoneref, zonelist, MAX_NR_ZONES) { for_each_zone_zonelist(zone, zoneref, zonelist, MAX_NR_ZONES) {
if ((zone_idx = zonelist_zone_idx(zoneref)) == ZONE_MOVABLE) zone_idx = zonelist_zone_idx(zoneref);
if (zone_idx == ZONE_MOVABLE) {
if (!use_cma_pages)
*other_free -=
zone_page_state(zone, NR_FREE_CMA_PAGES);
continue; continue;
}
if (zone_idx > classzone_idx) { if (zone_idx > classzone_idx) {
if (other_free != NULL) if (other_free != NULL)
@ -127,12 +158,22 @@ void tune_lmk_zone_param(struct zonelist *zonelist, int classzone_idx,
NR_FILE_PAGES) NR_FILE_PAGES)
- zone_page_state(zone, NR_SHMEM); - zone_page_state(zone, NR_SHMEM);
} else if (zone_idx < classzone_idx) { } else if (zone_idx < classzone_idx) {
if (zone_watermark_ok(zone, 0, 0, classzone_idx, 0)) if (zone_watermark_ok(zone, 0, 0, classzone_idx, 0)) {
if (!use_cma_pages) {
*other_free -= min(
zone->lowmem_reserve[classzone_idx] +
zone_page_state(
zone, NR_FREE_CMA_PAGES),
zone_page_state(
zone, NR_FREE_PAGES));
} else {
*other_free -=
zone->lowmem_reserve[classzone_idx];
}
} else {
*other_free -= *other_free -=
zone->lowmem_reserve[classzone_idx]; zone_page_state(zone, NR_FREE_PAGES);
else }
*other_free -=
zone_page_state(zone, NR_FREE_PAGES);
} }
} }
} }
@ -144,12 +185,14 @@ void tune_lmk_param(int *other_free, int *other_file, struct shrink_control *sc)
struct zonelist *zonelist; struct zonelist *zonelist;
enum zone_type high_zoneidx, classzone_idx; enum zone_type high_zoneidx, classzone_idx;
unsigned long balance_gap; unsigned long balance_gap;
int use_cma_pages;
gfp_mask = sc->gfp_mask; gfp_mask = sc->gfp_mask;
zonelist = node_zonelist(0, gfp_mask); zonelist = node_zonelist(0, gfp_mask);
high_zoneidx = gfp_zone(gfp_mask); high_zoneidx = gfp_zone(gfp_mask);
first_zones_zonelist(zonelist, high_zoneidx, NULL, &preferred_zone); first_zones_zonelist(zonelist, high_zoneidx, NULL, &preferred_zone);
classzone_idx = zone_idx(preferred_zone); classzone_idx = zone_idx(preferred_zone);
use_cma_pages = can_use_cma_pages(gfp_mask);
balance_gap = min(low_wmark_pages(preferred_zone), balance_gap = min(low_wmark_pages(preferred_zone),
(preferred_zone->present_pages + (preferred_zone->present_pages +
@ -161,22 +204,38 @@ void tune_lmk_param(int *other_free, int *other_file, struct shrink_control *sc)
balance_gap, 0, 0))) { balance_gap, 0, 0))) {
if (lmk_fast_run) if (lmk_fast_run)
tune_lmk_zone_param(zonelist, classzone_idx, other_free, tune_lmk_zone_param(zonelist, classzone_idx, other_free,
other_file); other_file, use_cma_pages);
else else
tune_lmk_zone_param(zonelist, classzone_idx, other_free, tune_lmk_zone_param(zonelist, classzone_idx, other_free,
NULL); NULL, use_cma_pages);
if (zone_watermark_ok(preferred_zone, 0, 0, _ZONE, 0)) if (zone_watermark_ok(preferred_zone, 0, 0, _ZONE, 0)) {
*other_free -= if (!use_cma_pages) {
preferred_zone->lowmem_reserve[_ZONE]; *other_free -= min(
else preferred_zone->lowmem_reserve[_ZONE]
+ zone_page_state(
preferred_zone, NR_FREE_CMA_PAGES),
zone_page_state(
preferred_zone, NR_FREE_PAGES));
} else {
*other_free -=
preferred_zone->lowmem_reserve[_ZONE];
}
} else {
*other_free -= zone_page_state(preferred_zone, *other_free -= zone_page_state(preferred_zone,
NR_FREE_PAGES); NR_FREE_PAGES);
}
lowmem_print(4, "lowmem_shrink of kswapd tunning for highmem " lowmem_print(4, "lowmem_shrink of kswapd tunning for highmem "
"ofree %d, %d\n", *other_free, *other_file); "ofree %d, %d\n", *other_free, *other_file);
} else { } else {
tune_lmk_zone_param(zonelist, classzone_idx, other_free, tune_lmk_zone_param(zonelist, classzone_idx, other_free,
other_file); other_file, use_cma_pages);
if (!use_cma_pages) {
*other_free -=
zone_page_state(preferred_zone, NR_FREE_CMA_PAGES);
}
lowmem_print(4, "lowmem_shrink tunning for others ofree %d, " lowmem_print(4, "lowmem_shrink tunning for others ofree %d, "
"%d\n", *other_free, *other_file); "%d\n", *other_free, *other_file);

View file

@ -63,6 +63,14 @@ enum {
MIGRATE_TYPES MIGRATE_TYPES
}; };
/*
* Returns a list which contains the migrate types on to which
* an allocation falls back when the free list for the migrate
* type mtype is depleted.
* The end of the list is delimited by the type MIGRATE_TYPES.
*/
extern int *get_migratetype_fallbacks(int mtype);
#ifdef CONFIG_CMA #ifdef CONFIG_CMA
bool is_cma_pageblock(struct page *page); bool is_cma_pageblock(struct page *page);
# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA) # define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)

View file

@ -1466,6 +1466,11 @@ static int fallbacks[MIGRATE_TYPES][4] = {
#endif #endif
}; };
int *get_migratetype_fallbacks(int mtype)
{
return fallbacks[mtype];
}
#ifdef CONFIG_CMA #ifdef CONFIG_CMA
static struct page *__rmqueue_cma_fallback(struct zone *zone, static struct page *__rmqueue_cma_fallback(struct zone *zone,
unsigned int order) unsigned int order)