Merge "drm/msm: Fix drm_mm bottom_up search"

This commit is contained in:
Linux Build Service Account 2017-06-08 05:13:44 -07:00 committed by Gerrit - the friendly Code Review server
commit 2bf7a89b5a
3 changed files with 104 additions and 9 deletions

View file

@ -47,6 +47,7 @@
#include <linux/seq_file.h>
#include <linux/export.h>
#include <linux/interval_tree_generic.h>
#include <linux/rbtree.h>
/**
* DOC: Overview
@ -74,7 +75,8 @@
* allocations and avoiding too much fragmentation. This means free space
* searches are O(num_holes). Given that all the fancy features drm_mm supports
* something better would be fairly complex and since gfx thrashing is a fairly
* steep cliff not a real concern. Removing a node again is O(1).
* steep cliff not a real concern. Removing a node again is O(1). With the
* rbtree to track free holes, free hole search becomes O(log(num_holes)).
*
* drm_mm supports a few features: Alignment and range restrictions can be
* supplied. Further more every &drm_mm_node has a color value (which is just an
@ -170,6 +172,32 @@ static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
&drm_mm_interval_tree_augment);
}
static void
rb_insert_hole_node(struct drm_mm_node *hole_node, struct drm_mm *mm)
{
struct rb_node **new = &(mm->holes_tree.rb_node);
struct rb_node *parent = NULL;
struct drm_mm_node *cur;
while (*new) {
parent = *new;
cur = rb_entry(parent, struct drm_mm_node, hole_node);
if (__drm_mm_hole_node_start(hole_node)
< __drm_mm_hole_node_start(cur))
new = &parent->rb_left;
else
new = &parent->rb_right;
}
rb_link_node(&hole_node->hole_node, parent, new);
rb_insert_color(&hole_node->hole_node, &mm->holes_tree);
}
static void rb_erase_hole_node(struct drm_mm_node *hole_node, struct drm_mm *mm)
{
rb_erase(&hole_node->hole_node, &mm->holes_tree);
}
static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
struct drm_mm_node *node,
u64 size, unsigned alignment,
@ -209,6 +237,7 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
if (adj_start == hole_start) {
hole_node->hole_follows = 0;
list_del(&hole_node->hole_stack);
rb_erase_hole_node(hole_node, mm);
}
node->start = adj_start;
@ -226,6 +255,7 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
node->hole_follows = 0;
if (__drm_mm_hole_node_start(node) < hole_end) {
list_add(&node->hole_stack, &mm->hole_stack);
rb_insert_hole_node(node, mm);
node->hole_follows = 1;
}
}
@ -283,11 +313,13 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
if (node->start == hole_start) {
hole->hole_follows = 0;
list_del(&hole->hole_stack);
rb_erase_hole_node(hole, mm);
}
node->hole_follows = 0;
if (end != hole_end) {
list_add(&node->hole_stack, &mm->hole_stack);
rb_insert_hole_node(node, mm);
node->hole_follows = 1;
}
@ -373,6 +405,7 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
if (adj_start == hole_start) {
hole_node->hole_follows = 0;
list_del(&hole_node->hole_stack);
rb_erase_hole_node(hole_node, mm);
}
node->start = adj_start;
@ -393,6 +426,7 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
node->hole_follows = 0;
if (__drm_mm_hole_node_start(node) < hole_end) {
list_add(&node->hole_stack, &mm->hole_stack);
rb_insert_hole_node(node, mm);
node->hole_follows = 1;
}
}
@ -465,6 +499,7 @@ void drm_mm_remove_node(struct drm_mm_node *node)
BUG_ON(__drm_mm_hole_node_start(node) ==
__drm_mm_hole_node_end(node));
list_del(&node->hole_stack);
rb_erase_hole_node(node, mm);
} else
BUG_ON(__drm_mm_hole_node_start(node) !=
__drm_mm_hole_node_end(node));
@ -473,6 +508,7 @@ void drm_mm_remove_node(struct drm_mm_node *node)
if (!prev_node->hole_follows) {
prev_node->hole_follows = 1;
list_add(&prev_node->hole_stack, &mm->hole_stack);
rb_insert_hole_node(prev_node, mm);
} else
list_move(&prev_node->hole_stack, &mm->hole_stack);
@ -499,6 +535,46 @@ static int check_free_hole(u64 start, u64 end, u64 size, unsigned alignment)
return end >= start + size;
}
static struct drm_mm_node *get_first_hole(const struct drm_mm *mm,
enum drm_mm_search_flags flags)
{
if (flags & DRM_MM_SEARCH_BOTTOM_UP) {
struct rb_node *node = rb_first(&mm->holes_tree);
return rb_entry(node, struct drm_mm_node, hole_node);
} else if (flags & DRM_MM_SEARCH_BELOW) {
return list_entry((mm)->hole_stack.prev,
struct drm_mm_node, hole_stack);
} else {
return list_entry((mm)->hole_stack.next,
struct drm_mm_node, hole_stack);
}
}
static struct drm_mm_node *get_next_hole(struct drm_mm_node *entry,
enum drm_mm_search_flags flags)
{
if (flags & DRM_MM_SEARCH_BOTTOM_UP) {
return rb_entry(rb_next(&entry->hole_node),
struct drm_mm_node, hole_node);
} else if (flags & DRM_MM_SEARCH_BELOW) {
return list_entry(entry->hole_stack.prev,
struct drm_mm_node, hole_stack);
} else {
return list_entry(entry->hole_stack.next,
struct drm_mm_node, hole_stack);
}
}
static bool drm_mm_hole_traversal_condition(const struct drm_mm *mm,
struct drm_mm_node *entry, enum drm_mm_search_flags flags)
{
if (flags & DRM_MM_SEARCH_BOTTOM_UP)
return entry ? 1 : 0;
else
return (&entry->hole_stack != &(mm)->hole_stack) ? 1 : 0;
}
static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
u64 size,
unsigned alignment,
@ -516,9 +592,14 @@ static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
best = NULL;
best_size = ~0UL;
__drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
flags & DRM_MM_SEARCH_BELOW) {
u64 hole_size = adj_end - adj_start;
for (entry = get_first_hole(mm, flags);
drm_mm_hole_traversal_condition(mm, entry, flags);
entry = get_next_hole(entry, flags)) {
u64 hole_size;
adj_start = drm_mm_hole_node_start(entry);
adj_end = drm_mm_hole_node_end(entry);
hole_size = adj_end - adj_start;
if (mm->color_adjust) {
mm->color_adjust(entry, color, &adj_start, &adj_end);
@ -560,9 +641,14 @@ static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_
best = NULL;
best_size = ~0UL;
__drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
flags & DRM_MM_SEARCH_BELOW) {
u64 hole_size = adj_end - adj_start;
for (entry = get_first_hole(mm, flags);
drm_mm_hole_traversal_condition(mm, entry, flags);
entry = get_next_hole(entry, flags)) {
u64 hole_size;
adj_start = drm_mm_hole_node_start(entry);
adj_end = drm_mm_hole_node_end(entry);
hole_size = adj_end - adj_start;
if (adj_start < start)
adj_start = start;
@ -613,6 +699,11 @@ void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
old->allocated = 0;
new->allocated = 1;
if (old->hole_follows)
rb_replace_node(&old->hole_node, &new->hole_node,
&old->mm->holes_tree);
}
EXPORT_SYMBOL(drm_mm_replace_node);
@ -847,8 +938,9 @@ void drm_mm_init(struct drm_mm * mm, u64 start, u64 size)
list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
mm->interval_tree = RB_ROOT;
mm->color_adjust = NULL;
mm->holes_tree = RB_ROOT;
rb_insert_hole_node(&mm->head_node, mm);
}
EXPORT_SYMBOL(drm_mm_init);

View file

@ -88,7 +88,7 @@ static int allocate_iova(struct msm_gem_address_space *aspace,
return 0;
}
ret = drm_mm_insert_node(&aspace->mm, &vma->node,
size >> PAGE_SHIFT, 0, DRM_MM_SEARCH_DEFAULT);
size >> PAGE_SHIFT, 0, DRM_MM_SEARCH_BOTTOM_UP);
spin_unlock(&aspace->lock);

View file

@ -49,6 +49,7 @@ enum drm_mm_search_flags {
DRM_MM_SEARCH_DEFAULT = 0,
DRM_MM_SEARCH_BEST = 1 << 0,
DRM_MM_SEARCH_BELOW = 1 << 1,
DRM_MM_SEARCH_BOTTOM_UP = 1 << 2,
};
enum drm_mm_allocator_flags {
@ -63,6 +64,7 @@ struct drm_mm_node {
struct list_head node_list;
struct list_head hole_stack;
struct rb_node rb;
struct rb_node hole_node;
unsigned hole_follows : 1;
unsigned scanned_block : 1;
unsigned scanned_prev_free : 1;
@ -84,6 +86,7 @@ struct drm_mm {
struct drm_mm_node head_node;
/* Keep an interval_tree for fast lookup of drm_mm_nodes by address. */
struct rb_root interval_tree;
struct rb_root holes_tree;
unsigned int scan_check_range : 1;
unsigned scan_alignment;