Merge "msm: kgsl: Allow mempools to configure from the device tree"

This commit is contained in:
Linux Build Service Account 2016-11-07 22:29:02 -08:00 committed by Gerrit - the friendly Code Review server
commit 21a4717e4f
4 changed files with 143 additions and 48 deletions

View file

@ -155,6 +155,23 @@ GPU Quirks:
- qcom,gpu-quirk-dp2clockgating-disable:
Disable RB sampler data path clock gating optimization
KGSL Memory Pools:
- qcom,gpu-mempools: Container for sets of GPU mempools.Multiple sets
(pools) can be defined within qcom,gpu-mempools.
Each mempool defines a pool order, reserved pages,
allocation allowed.
Properties:
- compatible: Must be qcom,gpu-mempools.
- qcom,mempool-max-pages: Max pages for all mempools, If not defined there is no limit.
- qcom,gpu-mempool: Defines a set of mempools.
Properties:
- reg: Index of the pool (0 = lowest pool order).
- qcom,mempool-page-size: Size of page.
- qcom,mempool-reserved: Number of pages reserved at init time for a pool.
- qcom,mempool-allocate: Allocate memory from the system memory when the
reserved pool exhausted.
The following properties are optional as collecting data via coresight might
not be supported for every chipset. The documentation for coresight
properties can be found in:
@ -222,6 +239,40 @@ Example of A330 GPU in MSM8916:
coresight-child-list = <&funnel_in0>;
coresight-child-ports = <5>;
/* GPU Mempools */
qcom,gpu-mempools {
#address-cells= <1>;
#size-cells = <0>;
compatible = "qcom,gpu-mempools";
/* 4K Page Pool configuration */
qcom,gpu-mempool@0 {
reg = <0>;
qcom,mempool-page-size = <4096>;
qcom,mempool-reserved = <2048>;
qcom,mempool-allocate;
};
/* 8K Page Pool configuration */
qcom,gpu-mempool@1 {
reg = <1>;
qcom,mempool-page-size = <8192>;
qcom,mempool-reserved = <1024>;
qcom,mempool-allocate;
};
/* 64K Page Pool configuration */
qcom,gpu-mempool@2 {
reg = <2>;
qcom,mempool-page-size = <65536>;
qcom,mempool-reserved = <256>;
};
/* 1M Page Pool configuration */
qcom,gpu-mempool@3 {
reg = <3>;
qcom,mempool-page-size = <1048576>;
qcom,mempool-reserved = <32>;
};
};
/* Power levels */
qcom,gpu-pwrlevels-bins {
#address-cells = <1>;

View file

@ -4527,6 +4527,9 @@ int kgsl_device_platform_probe(struct kgsl_device *device)
if (status)
goto error_close_mmu;
/* Initialize the memory pools */
kgsl_init_page_pools(device->pdev);
status = kgsl_allocate_global(device, &device->memstore,
KGSL_MEMSTORE_SIZE, 0, KGSL_MEMDESC_CONTIG, "memstore");
@ -4581,9 +4584,6 @@ int kgsl_device_platform_probe(struct kgsl_device *device)
/* Initialize common sysfs entries */
kgsl_pwrctrl_init_sysfs(device);
/* Initialize the memory pools */
kgsl_init_page_pools();
return 0;
error_free_memstore:

View file

@ -21,6 +21,10 @@
#include "kgsl_device.h"
#include "kgsl_pool.h"
#define KGSL_MAX_POOLS 4
#define KGSL_MAX_POOL_ORDER 8
#define KGSL_MAX_RESERVED_PAGES 4096
/**
* struct kgsl_page_pool - Structure to hold information for the pool
* @pool_order: Page order describing the size of the page
@ -40,41 +44,10 @@ struct kgsl_page_pool {
struct list_head page_list;
};
static struct kgsl_page_pool kgsl_pools[] = {
{
.pool_order = 0,
.reserved_pages = 2048,
.allocation_allowed = true,
.list_lock = __SPIN_LOCK_UNLOCKED(kgsl_pools[0].list_lock),
.page_list = LIST_HEAD_INIT(kgsl_pools[0].page_list),
},
#ifndef CONFIG_ALLOC_BUFFERS_IN_4K_CHUNKS
{
.pool_order = 1,
.reserved_pages = 1024,
.allocation_allowed = true,
.list_lock = __SPIN_LOCK_UNLOCKED(kgsl_pools[1].list_lock),
.page_list = LIST_HEAD_INIT(kgsl_pools[1].page_list),
},
{
.pool_order = 4,
.reserved_pages = 256,
.allocation_allowed = false,
.list_lock = __SPIN_LOCK_UNLOCKED(kgsl_pools[2].list_lock),
.page_list = LIST_HEAD_INIT(kgsl_pools[2].page_list),
},
{
.pool_order = 8,
.reserved_pages = 32,
.allocation_allowed = false,
.list_lock = __SPIN_LOCK_UNLOCKED(kgsl_pools[3].list_lock),
.page_list = LIST_HEAD_INIT(kgsl_pools[3].page_list),
},
static struct kgsl_page_pool kgsl_pools[KGSL_MAX_POOLS];
static int kgsl_num_pools;
static int kgsl_pool_max_pages;
#endif
};
#define KGSL_NUM_POOLS ARRAY_SIZE(kgsl_pools)
/* Returns KGSL pool corresponding to input page order*/
static struct kgsl_page_pool *
@ -82,7 +55,7 @@ _kgsl_get_pool_from_order(unsigned int order)
{
int i;
for (i = 0; i < KGSL_NUM_POOLS; i++) {
for (i = 0; i < kgsl_num_pools; i++) {
if (kgsl_pools[i].pool_order == order)
return &kgsl_pools[i];
}
@ -154,7 +127,7 @@ static int kgsl_pool_size_total(void)
int i;
int total = 0;
for (i = 0; i < KGSL_NUM_POOLS; i++)
for (i = 0; i < kgsl_num_pools; i++)
total += kgsl_pool_size(&kgsl_pools[i]);
return total;
}
@ -207,7 +180,7 @@ kgsl_pool_reduce(unsigned int target_pages, bool exit)
total_pages = kgsl_pool_size_total();
for (i = (KGSL_NUM_POOLS - 1); i >= 0; i--) {
for (i = (kgsl_num_pools - 1); i >= 0; i--) {
pool = &kgsl_pools[i];
/*
@ -300,7 +273,7 @@ static int kgsl_pool_idx_lookup(unsigned int order)
{
int i;
for (i = 0; i < KGSL_NUM_POOLS; i++)
for (i = 0; i < kgsl_num_pools; i++)
if (order == kgsl_pools[i].pool_order)
return i;
@ -384,10 +357,13 @@ void kgsl_pool_free_page(struct page *page)
page_order = compound_order(page);
pool = _kgsl_get_pool_from_order(page_order);
if (pool != NULL) {
_kgsl_pool_add_page(pool, page);
return;
if (!kgsl_pool_max_pages ||
(kgsl_pool_size_total() < kgsl_pool_max_pages)) {
pool = _kgsl_get_pool_from_order(page_order);
if (pool != NULL) {
_kgsl_pool_add_page(pool, page);
return;
}
}
/* Give back to system as not added to pool */
@ -398,7 +374,7 @@ static void kgsl_pool_reserve_pages(void)
{
int i, j;
for (i = 0; i < KGSL_NUM_POOLS; i++) {
for (i = 0; i < kgsl_num_pools; i++) {
struct page *page;
for (j = 0; j < kgsl_pools[i].reserved_pages; j++) {
@ -445,8 +421,76 @@ static struct shrinker kgsl_pool_shrinker = {
.batch = 0,
};
void kgsl_init_page_pools(void)
static void kgsl_pool_config(unsigned int order, unsigned int reserved_pages,
bool allocation_allowed)
{
#ifdef CONFIG_ALLOC_BUFFERS_IN_4K_CHUNKS
if (order > 0) {
pr_info("%s: Pool order:%d not supprted.!!\n", __func__, order);
return;
}
#endif
if ((order > KGSL_MAX_POOL_ORDER) ||
(reserved_pages > KGSL_MAX_RESERVED_PAGES))
return;
kgsl_pools[kgsl_num_pools].pool_order = order;
kgsl_pools[kgsl_num_pools].reserved_pages = reserved_pages;
kgsl_pools[kgsl_num_pools].allocation_allowed = allocation_allowed;
spin_lock_init(&kgsl_pools[kgsl_num_pools].list_lock);
INIT_LIST_HEAD(&kgsl_pools[kgsl_num_pools].page_list);
kgsl_num_pools++;
}
static void kgsl_of_parse_mempools(struct device_node *node)
{
struct device_node *child;
unsigned int page_size, reserved_pages = 0;
bool allocation_allowed;
for_each_child_of_node(node, child) {
unsigned int index;
if (of_property_read_u32(child, "reg", &index))
return;
if (index >= KGSL_MAX_POOLS)
continue;
if (of_property_read_u32(child, "qcom,mempool-page-size",
&page_size))
return;
of_property_read_u32(child, "qcom,mempool-reserved",
&reserved_pages);
allocation_allowed = of_property_read_bool(child,
"qcom,mempool-allocate");
kgsl_pool_config(ilog2(page_size >> PAGE_SHIFT), reserved_pages,
allocation_allowed);
}
}
static void kgsl_of_get_mempools(struct device_node *parent)
{
struct device_node *node;
node = of_find_compatible_node(parent, NULL, "qcom,gpu-mempools");
if (node != NULL) {
/* Get Max pages limit for mempool */
of_property_read_u32(node, "qcom,mempool-max-pages",
&kgsl_pool_max_pages);
kgsl_of_parse_mempools(node);
}
}
void kgsl_init_page_pools(struct platform_device *pdev)
{
/* Get GPU mempools data and configure pools */
kgsl_of_get_mempools(pdev->dev.of_node);
/* Reserve the appropriate number of pages for each pool */
kgsl_pool_reserve_pages();

View file

@ -35,7 +35,7 @@ kgsl_gfp_mask(unsigned int page_order)
void kgsl_pool_free_sgt(struct sg_table *sgt);
void kgsl_pool_free_pages(struct page **pages, unsigned int page_count);
void kgsl_init_page_pools(void);
void kgsl_init_page_pools(struct platform_device *pdev);
void kgsl_exit_page_pools(void);
int kgsl_pool_alloc_page(int *page_size, struct page **pages,
unsigned int pages_len, unsigned int *align);