Merge branch 'drm-mm-cleanup' into drm-next
* drm-mm-cleanup: radeon: move blit functions to radeon_asic.h radeon: kill decls for inline functions radeon: consolidate asic-specific function decls for r600 & later drm/radeon: kill radeon_bo->gobj pointer drm/radeon: introduce gem_to_radeon_bo helper drm/radeon: embed struct drm_gem_object drm: mm: add helper to unwind scan state drm: mm: add api for embedding struct drm_mm_node drm: mm: extract node insert helper functions drm: mm: track free areas implicitly drm/nouveau: don't munge in drm_mm internals
This commit is contained in:
commit
63871f89d1
27 changed files with 501 additions and 432 deletions
|
@ -64,8 +64,8 @@ static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
|
||||||
else {
|
else {
|
||||||
child =
|
child =
|
||||||
list_entry(mm->unused_nodes.next,
|
list_entry(mm->unused_nodes.next,
|
||||||
struct drm_mm_node, free_stack);
|
struct drm_mm_node, node_list);
|
||||||
list_del(&child->free_stack);
|
list_del(&child->node_list);
|
||||||
--mm->num_unused;
|
--mm->num_unused;
|
||||||
}
|
}
|
||||||
spin_unlock(&mm->unused_lock);
|
spin_unlock(&mm->unused_lock);
|
||||||
|
@ -94,195 +94,242 @@ int drm_mm_pre_get(struct drm_mm *mm)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
++mm->num_unused;
|
++mm->num_unused;
|
||||||
list_add_tail(&node->free_stack, &mm->unused_nodes);
|
list_add_tail(&node->node_list, &mm->unused_nodes);
|
||||||
}
|
}
|
||||||
spin_unlock(&mm->unused_lock);
|
spin_unlock(&mm->unused_lock);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(drm_mm_pre_get);
|
EXPORT_SYMBOL(drm_mm_pre_get);
|
||||||
|
|
||||||
static int drm_mm_create_tail_node(struct drm_mm *mm,
|
static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node)
|
||||||
unsigned long start,
|
|
||||||
unsigned long size, int atomic)
|
|
||||||
{
|
{
|
||||||
struct drm_mm_node *child;
|
return hole_node->start + hole_node->size;
|
||||||
|
|
||||||
child = drm_mm_kmalloc(mm, atomic);
|
|
||||||
if (unlikely(child == NULL))
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
child->free = 1;
|
|
||||||
child->size = size;
|
|
||||||
child->start = start;
|
|
||||||
child->mm = mm;
|
|
||||||
|
|
||||||
list_add_tail(&child->node_list, &mm->node_list);
|
|
||||||
list_add_tail(&child->free_stack, &mm->free_stack);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
|
static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
|
||||||
unsigned long size,
|
|
||||||
int atomic)
|
|
||||||
{
|
{
|
||||||
struct drm_mm_node *child;
|
struct drm_mm_node *next_node =
|
||||||
|
list_entry(hole_node->node_list.next, struct drm_mm_node,
|
||||||
|
node_list);
|
||||||
|
|
||||||
child = drm_mm_kmalloc(parent->mm, atomic);
|
return next_node->start;
|
||||||
if (unlikely(child == NULL))
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
INIT_LIST_HEAD(&child->free_stack);
|
|
||||||
|
|
||||||
child->size = size;
|
|
||||||
child->start = parent->start;
|
|
||||||
child->mm = parent->mm;
|
|
||||||
|
|
||||||
list_add_tail(&child->node_list, &parent->node_list);
|
|
||||||
INIT_LIST_HEAD(&child->free_stack);
|
|
||||||
|
|
||||||
parent->size -= size;
|
|
||||||
parent->start += size;
|
|
||||||
return child;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
|
||||||
|
struct drm_mm_node *node,
|
||||||
|
unsigned long size, unsigned alignment)
|
||||||
|
{
|
||||||
|
struct drm_mm *mm = hole_node->mm;
|
||||||
|
unsigned long tmp = 0, wasted = 0;
|
||||||
|
unsigned long hole_start = drm_mm_hole_node_start(hole_node);
|
||||||
|
unsigned long hole_end = drm_mm_hole_node_end(hole_node);
|
||||||
|
|
||||||
struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
|
BUG_ON(!hole_node->hole_follows || node->allocated);
|
||||||
|
|
||||||
|
if (alignment)
|
||||||
|
tmp = hole_start % alignment;
|
||||||
|
|
||||||
|
if (!tmp) {
|
||||||
|
hole_node->hole_follows = 0;
|
||||||
|
list_del_init(&hole_node->hole_stack);
|
||||||
|
} else
|
||||||
|
wasted = alignment - tmp;
|
||||||
|
|
||||||
|
node->start = hole_start + wasted;
|
||||||
|
node->size = size;
|
||||||
|
node->mm = mm;
|
||||||
|
node->allocated = 1;
|
||||||
|
|
||||||
|
INIT_LIST_HEAD(&node->hole_stack);
|
||||||
|
list_add(&node->node_list, &hole_node->node_list);
|
||||||
|
|
||||||
|
BUG_ON(node->start + node->size > hole_end);
|
||||||
|
|
||||||
|
if (node->start + node->size < hole_end) {
|
||||||
|
list_add(&node->hole_stack, &mm->hole_stack);
|
||||||
|
node->hole_follows = 1;
|
||||||
|
} else {
|
||||||
|
node->hole_follows = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
|
||||||
unsigned long size,
|
unsigned long size,
|
||||||
unsigned alignment,
|
unsigned alignment,
|
||||||
int atomic)
|
int atomic)
|
||||||
{
|
{
|
||||||
|
struct drm_mm_node *node;
|
||||||
|
|
||||||
struct drm_mm_node *align_splitoff = NULL;
|
node = drm_mm_kmalloc(hole_node->mm, atomic);
|
||||||
unsigned tmp = 0;
|
if (unlikely(node == NULL))
|
||||||
|
return NULL;
|
||||||
|
|
||||||
if (alignment)
|
drm_mm_insert_helper(hole_node, node, size, alignment);
|
||||||
tmp = node->start % alignment;
|
|
||||||
|
|
||||||
if (tmp) {
|
|
||||||
align_splitoff =
|
|
||||||
drm_mm_split_at_start(node, alignment - tmp, atomic);
|
|
||||||
if (unlikely(align_splitoff == NULL))
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (node->size == size) {
|
|
||||||
list_del_init(&node->free_stack);
|
|
||||||
node->free = 0;
|
|
||||||
} else {
|
|
||||||
node = drm_mm_split_at_start(node, size, atomic);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (align_splitoff)
|
|
||||||
drm_mm_put_block(align_splitoff);
|
|
||||||
|
|
||||||
return node;
|
return node;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(drm_mm_get_block_generic);
|
EXPORT_SYMBOL(drm_mm_get_block_generic);
|
||||||
|
|
||||||
struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *node,
|
/**
|
||||||
|
* Search for free space and insert a preallocated memory node. Returns
|
||||||
|
* -ENOSPC if no suitable free area is available. The preallocated memory node
|
||||||
|
* must be cleared.
|
||||||
|
*/
|
||||||
|
int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
|
||||||
|
unsigned long size, unsigned alignment)
|
||||||
|
{
|
||||||
|
struct drm_mm_node *hole_node;
|
||||||
|
|
||||||
|
hole_node = drm_mm_search_free(mm, size, alignment, 0);
|
||||||
|
if (!hole_node)
|
||||||
|
return -ENOSPC;
|
||||||
|
|
||||||
|
drm_mm_insert_helper(hole_node, node, size, alignment);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(drm_mm_insert_node);
|
||||||
|
|
||||||
|
static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
|
||||||
|
struct drm_mm_node *node,
|
||||||
|
unsigned long size, unsigned alignment,
|
||||||
|
unsigned long start, unsigned long end)
|
||||||
|
{
|
||||||
|
struct drm_mm *mm = hole_node->mm;
|
||||||
|
unsigned long tmp = 0, wasted = 0;
|
||||||
|
unsigned long hole_start = drm_mm_hole_node_start(hole_node);
|
||||||
|
unsigned long hole_end = drm_mm_hole_node_end(hole_node);
|
||||||
|
|
||||||
|
BUG_ON(!hole_node->hole_follows || node->allocated);
|
||||||
|
|
||||||
|
if (hole_start < start)
|
||||||
|
wasted += start - hole_start;
|
||||||
|
if (alignment)
|
||||||
|
tmp = (hole_start + wasted) % alignment;
|
||||||
|
|
||||||
|
if (tmp)
|
||||||
|
wasted += alignment - tmp;
|
||||||
|
|
||||||
|
if (!wasted) {
|
||||||
|
hole_node->hole_follows = 0;
|
||||||
|
list_del_init(&hole_node->hole_stack);
|
||||||
|
}
|
||||||
|
|
||||||
|
node->start = hole_start + wasted;
|
||||||
|
node->size = size;
|
||||||
|
node->mm = mm;
|
||||||
|
node->allocated = 1;
|
||||||
|
|
||||||
|
INIT_LIST_HEAD(&node->hole_stack);
|
||||||
|
list_add(&node->node_list, &hole_node->node_list);
|
||||||
|
|
||||||
|
BUG_ON(node->start + node->size > hole_end);
|
||||||
|
BUG_ON(node->start + node->size > end);
|
||||||
|
|
||||||
|
if (node->start + node->size < hole_end) {
|
||||||
|
list_add(&node->hole_stack, &mm->hole_stack);
|
||||||
|
node->hole_follows = 1;
|
||||||
|
} else {
|
||||||
|
node->hole_follows = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node,
|
||||||
unsigned long size,
|
unsigned long size,
|
||||||
unsigned alignment,
|
unsigned alignment,
|
||||||
unsigned long start,
|
unsigned long start,
|
||||||
unsigned long end,
|
unsigned long end,
|
||||||
int atomic)
|
int atomic)
|
||||||
{
|
{
|
||||||
struct drm_mm_node *align_splitoff = NULL;
|
struct drm_mm_node *node;
|
||||||
unsigned tmp = 0;
|
|
||||||
unsigned wasted = 0;
|
|
||||||
|
|
||||||
if (node->start < start)
|
node = drm_mm_kmalloc(hole_node->mm, atomic);
|
||||||
wasted += start - node->start;
|
if (unlikely(node == NULL))
|
||||||
if (alignment)
|
return NULL;
|
||||||
tmp = ((node->start + wasted) % alignment);
|
|
||||||
|
|
||||||
if (tmp)
|
drm_mm_insert_helper_range(hole_node, node, size, alignment,
|
||||||
wasted += alignment - tmp;
|
start, end);
|
||||||
if (wasted) {
|
|
||||||
align_splitoff = drm_mm_split_at_start(node, wasted, atomic);
|
|
||||||
if (unlikely(align_splitoff == NULL))
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (node->size == size) {
|
|
||||||
list_del_init(&node->free_stack);
|
|
||||||
node->free = 0;
|
|
||||||
} else {
|
|
||||||
node = drm_mm_split_at_start(node, size, atomic);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (align_splitoff)
|
|
||||||
drm_mm_put_block(align_splitoff);
|
|
||||||
|
|
||||||
return node;
|
return node;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(drm_mm_get_block_range_generic);
|
EXPORT_SYMBOL(drm_mm_get_block_range_generic);
|
||||||
|
|
||||||
/*
|
/**
|
||||||
* Put a block. Merge with the previous and / or next block if they are free.
|
* Search for free space and insert a preallocated memory node. Returns
|
||||||
* Otherwise add to the free stack.
|
* -ENOSPC if no suitable free area is available. This is for range
|
||||||
|
* restricted allocations. The preallocated memory node must be cleared.
|
||||||
*/
|
*/
|
||||||
|
int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
|
||||||
|
unsigned long size, unsigned alignment,
|
||||||
|
unsigned long start, unsigned long end)
|
||||||
|
{
|
||||||
|
struct drm_mm_node *hole_node;
|
||||||
|
|
||||||
void drm_mm_put_block(struct drm_mm_node *cur)
|
hole_node = drm_mm_search_free_in_range(mm, size, alignment,
|
||||||
|
start, end, 0);
|
||||||
|
if (!hole_node)
|
||||||
|
return -ENOSPC;
|
||||||
|
|
||||||
|
drm_mm_insert_helper_range(hole_node, node, size, alignment,
|
||||||
|
start, end);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(drm_mm_insert_node_in_range);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Remove a memory node from the allocator.
|
||||||
|
*/
|
||||||
|
void drm_mm_remove_node(struct drm_mm_node *node)
|
||||||
|
{
|
||||||
|
struct drm_mm *mm = node->mm;
|
||||||
|
struct drm_mm_node *prev_node;
|
||||||
|
|
||||||
|
BUG_ON(node->scanned_block || node->scanned_prev_free
|
||||||
|
|| node->scanned_next_free);
|
||||||
|
|
||||||
|
prev_node =
|
||||||
|
list_entry(node->node_list.prev, struct drm_mm_node, node_list);
|
||||||
|
|
||||||
|
if (node->hole_follows) {
|
||||||
|
BUG_ON(drm_mm_hole_node_start(node)
|
||||||
|
== drm_mm_hole_node_end(node));
|
||||||
|
list_del(&node->hole_stack);
|
||||||
|
} else
|
||||||
|
BUG_ON(drm_mm_hole_node_start(node)
|
||||||
|
!= drm_mm_hole_node_end(node));
|
||||||
|
|
||||||
|
if (!prev_node->hole_follows) {
|
||||||
|
prev_node->hole_follows = 1;
|
||||||
|
list_add(&prev_node->hole_stack, &mm->hole_stack);
|
||||||
|
} else
|
||||||
|
list_move(&prev_node->hole_stack, &mm->hole_stack);
|
||||||
|
|
||||||
|
list_del(&node->node_list);
|
||||||
|
node->allocated = 0;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(drm_mm_remove_node);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Remove a memory node from the allocator and free the allocated struct
|
||||||
|
* drm_mm_node. Only to be used on a struct drm_mm_node obtained by one of the
|
||||||
|
* drm_mm_get_block functions.
|
||||||
|
*/
|
||||||
|
void drm_mm_put_block(struct drm_mm_node *node)
|
||||||
{
|
{
|
||||||
|
|
||||||
struct drm_mm *mm = cur->mm;
|
struct drm_mm *mm = node->mm;
|
||||||
struct list_head *cur_head = &cur->node_list;
|
|
||||||
struct list_head *root_head = &mm->node_list;
|
|
||||||
struct drm_mm_node *prev_node = NULL;
|
|
||||||
struct drm_mm_node *next_node;
|
|
||||||
|
|
||||||
int merged = 0;
|
drm_mm_remove_node(node);
|
||||||
|
|
||||||
BUG_ON(cur->scanned_block || cur->scanned_prev_free
|
spin_lock(&mm->unused_lock);
|
||||||
|| cur->scanned_next_free);
|
if (mm->num_unused < MM_UNUSED_TARGET) {
|
||||||
|
list_add(&node->node_list, &mm->unused_nodes);
|
||||||
if (cur_head->prev != root_head) {
|
++mm->num_unused;
|
||||||
prev_node =
|
} else
|
||||||
list_entry(cur_head->prev, struct drm_mm_node, node_list);
|
kfree(node);
|
||||||
if (prev_node->free) {
|
spin_unlock(&mm->unused_lock);
|
||||||
prev_node->size += cur->size;
|
|
||||||
merged = 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (cur_head->next != root_head) {
|
|
||||||
next_node =
|
|
||||||
list_entry(cur_head->next, struct drm_mm_node, node_list);
|
|
||||||
if (next_node->free) {
|
|
||||||
if (merged) {
|
|
||||||
prev_node->size += next_node->size;
|
|
||||||
list_del(&next_node->node_list);
|
|
||||||
list_del(&next_node->free_stack);
|
|
||||||
spin_lock(&mm->unused_lock);
|
|
||||||
if (mm->num_unused < MM_UNUSED_TARGET) {
|
|
||||||
list_add(&next_node->free_stack,
|
|
||||||
&mm->unused_nodes);
|
|
||||||
++mm->num_unused;
|
|
||||||
} else
|
|
||||||
kfree(next_node);
|
|
||||||
spin_unlock(&mm->unused_lock);
|
|
||||||
} else {
|
|
||||||
next_node->size += cur->size;
|
|
||||||
next_node->start = cur->start;
|
|
||||||
merged = 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (!merged) {
|
|
||||||
cur->free = 1;
|
|
||||||
list_add(&cur->free_stack, &mm->free_stack);
|
|
||||||
} else {
|
|
||||||
list_del(&cur->node_list);
|
|
||||||
spin_lock(&mm->unused_lock);
|
|
||||||
if (mm->num_unused < MM_UNUSED_TARGET) {
|
|
||||||
list_add(&cur->free_stack, &mm->unused_nodes);
|
|
||||||
++mm->num_unused;
|
|
||||||
} else
|
|
||||||
kfree(cur);
|
|
||||||
spin_unlock(&mm->unused_lock);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
EXPORT_SYMBOL(drm_mm_put_block);
|
EXPORT_SYMBOL(drm_mm_put_block);
|
||||||
|
|
||||||
static int check_free_hole(unsigned long start, unsigned long end,
|
static int check_free_hole(unsigned long start, unsigned long end,
|
||||||
|
@ -319,8 +366,10 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
|
||||||
best = NULL;
|
best = NULL;
|
||||||
best_size = ~0UL;
|
best_size = ~0UL;
|
||||||
|
|
||||||
list_for_each_entry(entry, &mm->free_stack, free_stack) {
|
list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
|
||||||
if (!check_free_hole(entry->start, entry->start + entry->size,
|
BUG_ON(!entry->hole_follows);
|
||||||
|
if (!check_free_hole(drm_mm_hole_node_start(entry),
|
||||||
|
drm_mm_hole_node_end(entry),
|
||||||
size, alignment))
|
size, alignment))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
@ -353,12 +402,13 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
|
||||||
best = NULL;
|
best = NULL;
|
||||||
best_size = ~0UL;
|
best_size = ~0UL;
|
||||||
|
|
||||||
list_for_each_entry(entry, &mm->free_stack, free_stack) {
|
list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
|
||||||
unsigned long adj_start = entry->start < start ?
|
unsigned long adj_start = drm_mm_hole_node_start(entry) < start ?
|
||||||
start : entry->start;
|
start : drm_mm_hole_node_start(entry);
|
||||||
unsigned long adj_end = entry->start + entry->size > end ?
|
unsigned long adj_end = drm_mm_hole_node_end(entry) > end ?
|
||||||
end : entry->start + entry->size;
|
end : drm_mm_hole_node_end(entry);
|
||||||
|
|
||||||
|
BUG_ON(!entry->hole_follows);
|
||||||
if (!check_free_hole(adj_start, adj_end, size, alignment))
|
if (!check_free_hole(adj_start, adj_end, size, alignment))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
@ -375,6 +425,23 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(drm_mm_search_free_in_range);
|
EXPORT_SYMBOL(drm_mm_search_free_in_range);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Moves an allocation. To be used with embedded struct drm_mm_node.
|
||||||
|
*/
|
||||||
|
void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
|
||||||
|
{
|
||||||
|
list_replace(&old->node_list, &new->node_list);
|
||||||
|
list_replace(&old->node_list, &new->hole_stack);
|
||||||
|
new->hole_follows = old->hole_follows;
|
||||||
|
new->mm = old->mm;
|
||||||
|
new->start = old->start;
|
||||||
|
new->size = old->size;
|
||||||
|
|
||||||
|
old->allocated = 0;
|
||||||
|
new->allocated = 1;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(drm_mm_replace_node);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Initializa lru scanning.
|
* Initializa lru scanning.
|
||||||
*
|
*
|
||||||
|
@ -393,6 +460,7 @@ void drm_mm_init_scan(struct drm_mm *mm, unsigned long size,
|
||||||
mm->scan_hit_start = 0;
|
mm->scan_hit_start = 0;
|
||||||
mm->scan_hit_size = 0;
|
mm->scan_hit_size = 0;
|
||||||
mm->scan_check_range = 0;
|
mm->scan_check_range = 0;
|
||||||
|
mm->prev_scanned_node = NULL;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(drm_mm_init_scan);
|
EXPORT_SYMBOL(drm_mm_init_scan);
|
||||||
|
|
||||||
|
@ -418,6 +486,7 @@ void drm_mm_init_scan_with_range(struct drm_mm *mm, unsigned long size,
|
||||||
mm->scan_start = start;
|
mm->scan_start = start;
|
||||||
mm->scan_end = end;
|
mm->scan_end = end;
|
||||||
mm->scan_check_range = 1;
|
mm->scan_check_range = 1;
|
||||||
|
mm->prev_scanned_node = NULL;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(drm_mm_init_scan_with_range);
|
EXPORT_SYMBOL(drm_mm_init_scan_with_range);
|
||||||
|
|
||||||
|
@ -430,70 +499,42 @@ EXPORT_SYMBOL(drm_mm_init_scan_with_range);
|
||||||
int drm_mm_scan_add_block(struct drm_mm_node *node)
|
int drm_mm_scan_add_block(struct drm_mm_node *node)
|
||||||
{
|
{
|
||||||
struct drm_mm *mm = node->mm;
|
struct drm_mm *mm = node->mm;
|
||||||
struct list_head *prev_free, *next_free;
|
struct drm_mm_node *prev_node;
|
||||||
struct drm_mm_node *prev_node, *next_node;
|
unsigned long hole_start, hole_end;
|
||||||
unsigned long adj_start;
|
unsigned long adj_start;
|
||||||
unsigned long adj_end;
|
unsigned long adj_end;
|
||||||
|
|
||||||
mm->scanned_blocks++;
|
mm->scanned_blocks++;
|
||||||
|
|
||||||
prev_free = next_free = NULL;
|
BUG_ON(node->scanned_block);
|
||||||
|
|
||||||
BUG_ON(node->free);
|
|
||||||
node->scanned_block = 1;
|
node->scanned_block = 1;
|
||||||
node->free = 1;
|
|
||||||
|
|
||||||
if (node->node_list.prev != &mm->node_list) {
|
prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
|
||||||
prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
|
node_list);
|
||||||
node_list);
|
|
||||||
|
|
||||||
if (prev_node->free) {
|
node->scanned_preceeds_hole = prev_node->hole_follows;
|
||||||
list_del(&prev_node->node_list);
|
prev_node->hole_follows = 1;
|
||||||
|
list_del(&node->node_list);
|
||||||
node->start = prev_node->start;
|
node->node_list.prev = &prev_node->node_list;
|
||||||
node->size += prev_node->size;
|
node->node_list.next = &mm->prev_scanned_node->node_list;
|
||||||
|
mm->prev_scanned_node = node;
|
||||||
prev_node->scanned_prev_free = 1;
|
|
||||||
|
|
||||||
prev_free = &prev_node->free_stack;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (node->node_list.next != &mm->node_list) {
|
|
||||||
next_node = list_entry(node->node_list.next, struct drm_mm_node,
|
|
||||||
node_list);
|
|
||||||
|
|
||||||
if (next_node->free) {
|
|
||||||
list_del(&next_node->node_list);
|
|
||||||
|
|
||||||
node->size += next_node->size;
|
|
||||||
|
|
||||||
next_node->scanned_next_free = 1;
|
|
||||||
|
|
||||||
next_free = &next_node->free_stack;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* The free_stack list is not used for allocated objects, so these two
|
|
||||||
* pointers can be abused (as long as no allocations in this memory
|
|
||||||
* manager happens). */
|
|
||||||
node->free_stack.prev = prev_free;
|
|
||||||
node->free_stack.next = next_free;
|
|
||||||
|
|
||||||
|
hole_start = drm_mm_hole_node_start(prev_node);
|
||||||
|
hole_end = drm_mm_hole_node_end(prev_node);
|
||||||
if (mm->scan_check_range) {
|
if (mm->scan_check_range) {
|
||||||
adj_start = node->start < mm->scan_start ?
|
adj_start = hole_start < mm->scan_start ?
|
||||||
mm->scan_start : node->start;
|
mm->scan_start : hole_start;
|
||||||
adj_end = node->start + node->size > mm->scan_end ?
|
adj_end = hole_end > mm->scan_end ?
|
||||||
mm->scan_end : node->start + node->size;
|
mm->scan_end : hole_end;
|
||||||
} else {
|
} else {
|
||||||
adj_start = node->start;
|
adj_start = hole_start;
|
||||||
adj_end = node->start + node->size;
|
adj_end = hole_end;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (check_free_hole(adj_start , adj_end,
|
if (check_free_hole(adj_start , adj_end,
|
||||||
mm->scan_size, mm->scan_alignment)) {
|
mm->scan_size, mm->scan_alignment)) {
|
||||||
mm->scan_hit_start = node->start;
|
mm->scan_hit_start = hole_start;
|
||||||
mm->scan_hit_size = node->size;
|
mm->scan_hit_size = hole_end;
|
||||||
|
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
@ -519,39 +560,19 @@ EXPORT_SYMBOL(drm_mm_scan_add_block);
|
||||||
int drm_mm_scan_remove_block(struct drm_mm_node *node)
|
int drm_mm_scan_remove_block(struct drm_mm_node *node)
|
||||||
{
|
{
|
||||||
struct drm_mm *mm = node->mm;
|
struct drm_mm *mm = node->mm;
|
||||||
struct drm_mm_node *prev_node, *next_node;
|
struct drm_mm_node *prev_node;
|
||||||
|
|
||||||
mm->scanned_blocks--;
|
mm->scanned_blocks--;
|
||||||
|
|
||||||
BUG_ON(!node->scanned_block);
|
BUG_ON(!node->scanned_block);
|
||||||
node->scanned_block = 0;
|
node->scanned_block = 0;
|
||||||
node->free = 0;
|
|
||||||
|
|
||||||
prev_node = list_entry(node->free_stack.prev, struct drm_mm_node,
|
prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
|
||||||
free_stack);
|
node_list);
|
||||||
next_node = list_entry(node->free_stack.next, struct drm_mm_node,
|
|
||||||
free_stack);
|
|
||||||
|
|
||||||
if (prev_node) {
|
prev_node->hole_follows = node->scanned_preceeds_hole;
|
||||||
BUG_ON(!prev_node->scanned_prev_free);
|
INIT_LIST_HEAD(&node->node_list);
|
||||||
prev_node->scanned_prev_free = 0;
|
list_add(&node->node_list, &prev_node->node_list);
|
||||||
|
|
||||||
list_add_tail(&prev_node->node_list, &node->node_list);
|
|
||||||
|
|
||||||
node->start = prev_node->start + prev_node->size;
|
|
||||||
node->size -= prev_node->size;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (next_node) {
|
|
||||||
BUG_ON(!next_node->scanned_next_free);
|
|
||||||
next_node->scanned_next_free = 0;
|
|
||||||
|
|
||||||
list_add(&next_node->node_list, &node->node_list);
|
|
||||||
|
|
||||||
node->size -= next_node->size;
|
|
||||||
}
|
|
||||||
|
|
||||||
INIT_LIST_HEAD(&node->free_stack);
|
|
||||||
|
|
||||||
/* Only need to check for containement because start&size for the
|
/* Only need to check for containement because start&size for the
|
||||||
* complete resulting free block (not just the desired part) is
|
* complete resulting free block (not just the desired part) is
|
||||||
|
@ -568,7 +589,7 @@ EXPORT_SYMBOL(drm_mm_scan_remove_block);
|
||||||
|
|
||||||
int drm_mm_clean(struct drm_mm * mm)
|
int drm_mm_clean(struct drm_mm * mm)
|
||||||
{
|
{
|
||||||
struct list_head *head = &mm->node_list;
|
struct list_head *head = &mm->head_node.node_list;
|
||||||
|
|
||||||
return (head->next->next == head);
|
return (head->next->next == head);
|
||||||
}
|
}
|
||||||
|
@ -576,38 +597,40 @@ EXPORT_SYMBOL(drm_mm_clean);
|
||||||
|
|
||||||
int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
|
int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
|
||||||
{
|
{
|
||||||
INIT_LIST_HEAD(&mm->node_list);
|
INIT_LIST_HEAD(&mm->hole_stack);
|
||||||
INIT_LIST_HEAD(&mm->free_stack);
|
|
||||||
INIT_LIST_HEAD(&mm->unused_nodes);
|
INIT_LIST_HEAD(&mm->unused_nodes);
|
||||||
mm->num_unused = 0;
|
mm->num_unused = 0;
|
||||||
mm->scanned_blocks = 0;
|
mm->scanned_blocks = 0;
|
||||||
spin_lock_init(&mm->unused_lock);
|
spin_lock_init(&mm->unused_lock);
|
||||||
|
|
||||||
return drm_mm_create_tail_node(mm, start, size, 0);
|
/* Clever trick to avoid a special case in the free hole tracking. */
|
||||||
|
INIT_LIST_HEAD(&mm->head_node.node_list);
|
||||||
|
INIT_LIST_HEAD(&mm->head_node.hole_stack);
|
||||||
|
mm->head_node.hole_follows = 1;
|
||||||
|
mm->head_node.scanned_block = 0;
|
||||||
|
mm->head_node.scanned_prev_free = 0;
|
||||||
|
mm->head_node.scanned_next_free = 0;
|
||||||
|
mm->head_node.mm = mm;
|
||||||
|
mm->head_node.start = start + size;
|
||||||
|
mm->head_node.size = start - mm->head_node.start;
|
||||||
|
list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(drm_mm_init);
|
EXPORT_SYMBOL(drm_mm_init);
|
||||||
|
|
||||||
void drm_mm_takedown(struct drm_mm * mm)
|
void drm_mm_takedown(struct drm_mm * mm)
|
||||||
{
|
{
|
||||||
struct list_head *bnode = mm->free_stack.next;
|
struct drm_mm_node *entry, *next;
|
||||||
struct drm_mm_node *entry;
|
|
||||||
struct drm_mm_node *next;
|
|
||||||
|
|
||||||
entry = list_entry(bnode, struct drm_mm_node, free_stack);
|
if (!list_empty(&mm->head_node.node_list)) {
|
||||||
|
|
||||||
if (entry->node_list.next != &mm->node_list ||
|
|
||||||
entry->free_stack.next != &mm->free_stack) {
|
|
||||||
DRM_ERROR("Memory manager not clean. Delaying takedown\n");
|
DRM_ERROR("Memory manager not clean. Delaying takedown\n");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
list_del(&entry->free_stack);
|
|
||||||
list_del(&entry->node_list);
|
|
||||||
kfree(entry);
|
|
||||||
|
|
||||||
spin_lock(&mm->unused_lock);
|
spin_lock(&mm->unused_lock);
|
||||||
list_for_each_entry_safe(entry, next, &mm->unused_nodes, free_stack) {
|
list_for_each_entry_safe(entry, next, &mm->unused_nodes, node_list) {
|
||||||
list_del(&entry->free_stack);
|
list_del(&entry->node_list);
|
||||||
kfree(entry);
|
kfree(entry);
|
||||||
--mm->num_unused;
|
--mm->num_unused;
|
||||||
}
|
}
|
||||||
|
@ -620,19 +643,37 @@ EXPORT_SYMBOL(drm_mm_takedown);
|
||||||
void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
|
void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
|
||||||
{
|
{
|
||||||
struct drm_mm_node *entry;
|
struct drm_mm_node *entry;
|
||||||
int total_used = 0, total_free = 0, total = 0;
|
unsigned long total_used = 0, total_free = 0, total = 0;
|
||||||
|
unsigned long hole_start, hole_end, hole_size;
|
||||||
|
|
||||||
list_for_each_entry(entry, &mm->node_list, node_list) {
|
hole_start = drm_mm_hole_node_start(&mm->head_node);
|
||||||
printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8ld: %s\n",
|
hole_end = drm_mm_hole_node_end(&mm->head_node);
|
||||||
|
hole_size = hole_end - hole_start;
|
||||||
|
if (hole_size)
|
||||||
|
printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
|
||||||
|
prefix, hole_start, hole_end,
|
||||||
|
hole_size);
|
||||||
|
total_free += hole_size;
|
||||||
|
|
||||||
|
drm_mm_for_each_node(entry, mm) {
|
||||||
|
printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: used\n",
|
||||||
prefix, entry->start, entry->start + entry->size,
|
prefix, entry->start, entry->start + entry->size,
|
||||||
entry->size, entry->free ? "free" : "used");
|
entry->size);
|
||||||
total += entry->size;
|
total_used += entry->size;
|
||||||
if (entry->free)
|
|
||||||
total_free += entry->size;
|
if (entry->hole_follows) {
|
||||||
else
|
hole_start = drm_mm_hole_node_start(entry);
|
||||||
total_used += entry->size;
|
hole_end = drm_mm_hole_node_end(entry);
|
||||||
|
hole_size = hole_end - hole_start;
|
||||||
|
printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
|
||||||
|
prefix, hole_start, hole_end,
|
||||||
|
hole_size);
|
||||||
|
total_free += hole_size;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
printk(KERN_DEBUG "%s total: %d, used %d free %d\n", prefix, total,
|
total = total_free + total_used;
|
||||||
|
|
||||||
|
printk(KERN_DEBUG "%s total: %lu, used %lu free %lu\n", prefix, total,
|
||||||
total_used, total_free);
|
total_used, total_free);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(drm_mm_debug_table);
|
EXPORT_SYMBOL(drm_mm_debug_table);
|
||||||
|
@ -641,17 +682,34 @@ EXPORT_SYMBOL(drm_mm_debug_table);
|
||||||
int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
|
int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
|
||||||
{
|
{
|
||||||
struct drm_mm_node *entry;
|
struct drm_mm_node *entry;
|
||||||
int total_used = 0, total_free = 0, total = 0;
|
unsigned long total_used = 0, total_free = 0, total = 0;
|
||||||
|
unsigned long hole_start, hole_end, hole_size;
|
||||||
|
|
||||||
list_for_each_entry(entry, &mm->node_list, node_list) {
|
hole_start = drm_mm_hole_node_start(&mm->head_node);
|
||||||
seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: %s\n", entry->start, entry->start + entry->size, entry->size, entry->free ? "free" : "used");
|
hole_end = drm_mm_hole_node_end(&mm->head_node);
|
||||||
total += entry->size;
|
hole_size = hole_end - hole_start;
|
||||||
if (entry->free)
|
if (hole_size)
|
||||||
total_free += entry->size;
|
seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
|
||||||
else
|
hole_start, hole_end, hole_size);
|
||||||
total_used += entry->size;
|
total_free += hole_size;
|
||||||
|
|
||||||
|
drm_mm_for_each_node(entry, mm) {
|
||||||
|
seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n",
|
||||||
|
entry->start, entry->start + entry->size,
|
||||||
|
entry->size);
|
||||||
|
total_used += entry->size;
|
||||||
|
if (entry->hole_follows) {
|
||||||
|
hole_start = drm_mm_hole_node_start(&mm->head_node);
|
||||||
|
hole_end = drm_mm_hole_node_end(&mm->head_node);
|
||||||
|
hole_size = hole_end - hole_start;
|
||||||
|
seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
|
||||||
|
hole_start, hole_end, hole_size);
|
||||||
|
total_free += hole_size;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
seq_printf(m, "total: %d, used %d free %d\n", total, total_used, total_free);
|
total = total_free + total_used;
|
||||||
|
|
||||||
|
seq_printf(m, "total: %lu, used %lu free %lu\n", total, total_used, total_free);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(drm_mm_dump_table);
|
EXPORT_SYMBOL(drm_mm_dump_table);
|
||||||
|
|
|
@ -909,7 +909,7 @@ nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
|
||||||
nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
|
nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
|
||||||
nouveau_gpuobj_ref(NULL, &chan->vm_pd);
|
nouveau_gpuobj_ref(NULL, &chan->vm_pd);
|
||||||
|
|
||||||
if (chan->ramin_heap.free_stack.next)
|
if (drm_mm_initialized(&chan->ramin_heap))
|
||||||
drm_mm_takedown(&chan->ramin_heap);
|
drm_mm_takedown(&chan->ramin_heap);
|
||||||
nouveau_gpuobj_ref(NULL, &chan->ramin);
|
nouveau_gpuobj_ref(NULL, &chan->ramin);
|
||||||
}
|
}
|
||||||
|
|
|
@ -56,7 +56,7 @@ nv50_channel_del(struct nouveau_channel **pchan)
|
||||||
nouveau_gpuobj_ref(NULL, &chan->ramfc);
|
nouveau_gpuobj_ref(NULL, &chan->ramfc);
|
||||||
nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
|
nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
|
||||||
nouveau_gpuobj_ref(NULL, &chan->vm_pd);
|
nouveau_gpuobj_ref(NULL, &chan->vm_pd);
|
||||||
if (chan->ramin_heap.free_stack.next)
|
if (drm_mm_initialized(&chan->ramin_heap))
|
||||||
drm_mm_takedown(&chan->ramin_heap);
|
drm_mm_takedown(&chan->ramin_heap);
|
||||||
nouveau_gpuobj_ref(NULL, &chan->ramin);
|
nouveau_gpuobj_ref(NULL, &chan->ramin);
|
||||||
kfree(chan);
|
kfree(chan);
|
||||||
|
@ -259,7 +259,7 @@ nv50_instmem_takedown(struct drm_device *dev)
|
||||||
nouveau_gpuobj_ref(NULL, &dev_priv->bar3_vm->pgt[0].obj[0]);
|
nouveau_gpuobj_ref(NULL, &dev_priv->bar3_vm->pgt[0].obj[0]);
|
||||||
nouveau_vm_ref(NULL, &dev_priv->bar3_vm, NULL);
|
nouveau_vm_ref(NULL, &dev_priv->bar3_vm, NULL);
|
||||||
|
|
||||||
if (dev_priv->ramin_heap.free_stack.next)
|
if (drm_mm_initialized(&dev_priv->ramin_heap))
|
||||||
drm_mm_takedown(&dev_priv->ramin_heap);
|
drm_mm_takedown(&dev_priv->ramin_heap);
|
||||||
|
|
||||||
dev_priv->engine.instmem.priv = NULL;
|
dev_priv->engine.instmem.priv = NULL;
|
||||||
|
|
|
@ -67,7 +67,7 @@ nvc0_channel_del(struct nouveau_channel **pchan)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
nouveau_vm_ref(NULL, &chan->vm, NULL);
|
nouveau_vm_ref(NULL, &chan->vm, NULL);
|
||||||
if (chan->ramin_heap.free_stack.next)
|
if (drm_mm_initialized(&chan->ramin_heap))
|
||||||
drm_mm_takedown(&chan->ramin_heap);
|
drm_mm_takedown(&chan->ramin_heap);
|
||||||
nouveau_gpuobj_ref(NULL, &chan->ramin);
|
nouveau_gpuobj_ref(NULL, &chan->ramin);
|
||||||
kfree(chan);
|
kfree(chan);
|
||||||
|
|
|
@ -1030,7 +1030,7 @@ static int evergreen_crtc_do_set_base(struct drm_crtc *crtc,
|
||||||
* just update base pointers
|
* just update base pointers
|
||||||
*/
|
*/
|
||||||
obj = radeon_fb->obj;
|
obj = radeon_fb->obj;
|
||||||
rbo = obj->driver_private;
|
rbo = gem_to_radeon_bo(obj);
|
||||||
r = radeon_bo_reserve(rbo, false);
|
r = radeon_bo_reserve(rbo, false);
|
||||||
if (unlikely(r != 0))
|
if (unlikely(r != 0))
|
||||||
return r;
|
return r;
|
||||||
|
@ -1145,7 +1145,7 @@ static int evergreen_crtc_do_set_base(struct drm_crtc *crtc,
|
||||||
|
|
||||||
if (!atomic && fb && fb != crtc->fb) {
|
if (!atomic && fb && fb != crtc->fb) {
|
||||||
radeon_fb = to_radeon_framebuffer(fb);
|
radeon_fb = to_radeon_framebuffer(fb);
|
||||||
rbo = radeon_fb->obj->driver_private;
|
rbo = gem_to_radeon_bo(radeon_fb->obj);
|
||||||
r = radeon_bo_reserve(rbo, false);
|
r = radeon_bo_reserve(rbo, false);
|
||||||
if (unlikely(r != 0))
|
if (unlikely(r != 0))
|
||||||
return r;
|
return r;
|
||||||
|
@ -1191,7 +1191,7 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
|
||||||
}
|
}
|
||||||
|
|
||||||
obj = radeon_fb->obj;
|
obj = radeon_fb->obj;
|
||||||
rbo = obj->driver_private;
|
rbo = gem_to_radeon_bo(obj);
|
||||||
r = radeon_bo_reserve(rbo, false);
|
r = radeon_bo_reserve(rbo, false);
|
||||||
if (unlikely(r != 0))
|
if (unlikely(r != 0))
|
||||||
return r;
|
return r;
|
||||||
|
@ -1308,7 +1308,7 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
|
||||||
|
|
||||||
if (!atomic && fb && fb != crtc->fb) {
|
if (!atomic && fb && fb != crtc->fb) {
|
||||||
radeon_fb = to_radeon_framebuffer(fb);
|
radeon_fb = to_radeon_framebuffer(fb);
|
||||||
rbo = radeon_fb->obj->driver_private;
|
rbo = gem_to_radeon_bo(radeon_fb->obj);
|
||||||
r = radeon_bo_reserve(rbo, false);
|
r = radeon_bo_reserve(rbo, false);
|
||||||
if (unlikely(r != 0))
|
if (unlikely(r != 0))
|
||||||
return r;
|
return r;
|
||||||
|
|
|
@ -572,7 +572,7 @@ int evergreen_blit_init(struct radeon_device *rdev)
|
||||||
obj_size += evergreen_ps_size * 4;
|
obj_size += evergreen_ps_size * 4;
|
||||||
obj_size = ALIGN(obj_size, 256);
|
obj_size = ALIGN(obj_size, 256);
|
||||||
|
|
||||||
r = radeon_bo_create(rdev, NULL, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
|
r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
|
||||||
&rdev->r600_blit.shader_obj);
|
&rdev->r600_blit.shader_obj);
|
||||||
if (r) {
|
if (r) {
|
||||||
DRM_ERROR("evergreen failed to allocate shader\n");
|
DRM_ERROR("evergreen failed to allocate shader\n");
|
||||||
|
|
|
@ -2728,7 +2728,7 @@ static int r600_ih_ring_alloc(struct radeon_device *rdev)
|
||||||
|
|
||||||
/* Allocate ring buffer */
|
/* Allocate ring buffer */
|
||||||
if (rdev->ih.ring_obj == NULL) {
|
if (rdev->ih.ring_obj == NULL) {
|
||||||
r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size,
|
r = radeon_bo_create(rdev, rdev->ih.ring_size,
|
||||||
PAGE_SIZE, true,
|
PAGE_SIZE, true,
|
||||||
RADEON_GEM_DOMAIN_GTT,
|
RADEON_GEM_DOMAIN_GTT,
|
||||||
&rdev->ih.ring_obj);
|
&rdev->ih.ring_obj);
|
||||||
|
|
|
@ -26,6 +26,7 @@
|
||||||
#include "drmP.h"
|
#include "drmP.h"
|
||||||
#include "radeon.h"
|
#include "radeon.h"
|
||||||
#include "radeon_reg.h"
|
#include "radeon_reg.h"
|
||||||
|
#include "radeon_asic.h"
|
||||||
#include "atom.h"
|
#include "atom.h"
|
||||||
|
|
||||||
#define AUDIO_TIMER_INTERVALL 100 /* 1/10 sekund should be enough */
|
#define AUDIO_TIMER_INTERVALL 100 /* 1/10 sekund should be enough */
|
||||||
|
|
|
@ -501,7 +501,7 @@ int r600_blit_init(struct radeon_device *rdev)
|
||||||
obj_size += r6xx_ps_size * 4;
|
obj_size += r6xx_ps_size * 4;
|
||||||
obj_size = ALIGN(obj_size, 256);
|
obj_size = ALIGN(obj_size, 256);
|
||||||
|
|
||||||
r = radeon_bo_create(rdev, NULL, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
|
r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
|
||||||
&rdev->r600_blit.shader_obj);
|
&rdev->r600_blit.shader_obj);
|
||||||
if (r) {
|
if (r) {
|
||||||
DRM_ERROR("r600 failed to allocate shader\n");
|
DRM_ERROR("r600 failed to allocate shader\n");
|
||||||
|
|
|
@ -26,6 +26,7 @@
|
||||||
#include "drmP.h"
|
#include "drmP.h"
|
||||||
#include "radeon_drm.h"
|
#include "radeon_drm.h"
|
||||||
#include "radeon.h"
|
#include "radeon.h"
|
||||||
|
#include "radeon_asic.h"
|
||||||
#include "atom.h"
|
#include "atom.h"
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -258,8 +258,9 @@ struct radeon_bo {
|
||||||
int surface_reg;
|
int surface_reg;
|
||||||
/* Constant after initialization */
|
/* Constant after initialization */
|
||||||
struct radeon_device *rdev;
|
struct radeon_device *rdev;
|
||||||
struct drm_gem_object *gobj;
|
struct drm_gem_object gem_base;
|
||||||
};
|
};
|
||||||
|
#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base)
|
||||||
|
|
||||||
struct radeon_bo_list {
|
struct radeon_bo_list {
|
||||||
struct ttm_validate_buffer tv;
|
struct ttm_validate_buffer tv;
|
||||||
|
@ -1197,19 +1198,6 @@ int radeon_device_init(struct radeon_device *rdev,
|
||||||
void radeon_device_fini(struct radeon_device *rdev);
|
void radeon_device_fini(struct radeon_device *rdev);
|
||||||
int radeon_gpu_wait_for_idle(struct radeon_device *rdev);
|
int radeon_gpu_wait_for_idle(struct radeon_device *rdev);
|
||||||
|
|
||||||
/* r600 blit */
|
|
||||||
int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes);
|
|
||||||
void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence);
|
|
||||||
void r600_kms_blit_copy(struct radeon_device *rdev,
|
|
||||||
u64 src_gpu_addr, u64 dst_gpu_addr,
|
|
||||||
int size_bytes);
|
|
||||||
/* evergreen blit */
|
|
||||||
int evergreen_blit_prepare_copy(struct radeon_device *rdev, int size_bytes);
|
|
||||||
void evergreen_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence);
|
|
||||||
void evergreen_kms_blit_copy(struct radeon_device *rdev,
|
|
||||||
u64 src_gpu_addr, u64 dst_gpu_addr,
|
|
||||||
int size_bytes);
|
|
||||||
|
|
||||||
static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg)
|
static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg)
|
||||||
{
|
{
|
||||||
if (reg < rdev->rmmio_size)
|
if (reg < rdev->rmmio_size)
|
||||||
|
@ -1460,59 +1448,12 @@ extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc
|
||||||
extern int radeon_resume_kms(struct drm_device *dev);
|
extern int radeon_resume_kms(struct drm_device *dev);
|
||||||
extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
|
extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
|
||||||
|
|
||||||
/* r600, rv610, rv630, rv620, rv635, rv670, rs780, rs880 */
|
/*
|
||||||
extern bool r600_card_posted(struct radeon_device *rdev);
|
* r600 functions used by radeon_encoder.c
|
||||||
extern void r600_cp_stop(struct radeon_device *rdev);
|
*/
|
||||||
extern int r600_cp_start(struct radeon_device *rdev);
|
|
||||||
extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size);
|
|
||||||
extern int r600_cp_resume(struct radeon_device *rdev);
|
|
||||||
extern void r600_cp_fini(struct radeon_device *rdev);
|
|
||||||
extern int r600_count_pipe_bits(uint32_t val);
|
|
||||||
extern int r600_mc_wait_for_idle(struct radeon_device *rdev);
|
|
||||||
extern int r600_pcie_gart_init(struct radeon_device *rdev);
|
|
||||||
extern void r600_pcie_gart_tlb_flush(struct radeon_device *rdev);
|
|
||||||
extern int r600_ib_test(struct radeon_device *rdev);
|
|
||||||
extern int r600_ring_test(struct radeon_device *rdev);
|
|
||||||
extern void r600_scratch_init(struct radeon_device *rdev);
|
|
||||||
extern int r600_blit_init(struct radeon_device *rdev);
|
|
||||||
extern void r600_blit_fini(struct radeon_device *rdev);
|
|
||||||
extern int r600_init_microcode(struct radeon_device *rdev);
|
|
||||||
extern int r600_asic_reset(struct radeon_device *rdev);
|
|
||||||
/* r600 irq */
|
|
||||||
extern int r600_irq_init(struct radeon_device *rdev);
|
|
||||||
extern void r600_irq_fini(struct radeon_device *rdev);
|
|
||||||
extern void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size);
|
|
||||||
extern int r600_irq_set(struct radeon_device *rdev);
|
|
||||||
extern void r600_irq_suspend(struct radeon_device *rdev);
|
|
||||||
extern void r600_disable_interrupts(struct radeon_device *rdev);
|
|
||||||
extern void r600_rlc_stop(struct radeon_device *rdev);
|
|
||||||
/* r600 audio */
|
|
||||||
extern int r600_audio_init(struct radeon_device *rdev);
|
|
||||||
extern int r600_audio_tmds_index(struct drm_encoder *encoder);
|
|
||||||
extern void r600_audio_set_clock(struct drm_encoder *encoder, int clock);
|
|
||||||
extern int r600_audio_channels(struct radeon_device *rdev);
|
|
||||||
extern int r600_audio_bits_per_sample(struct radeon_device *rdev);
|
|
||||||
extern int r600_audio_rate(struct radeon_device *rdev);
|
|
||||||
extern uint8_t r600_audio_status_bits(struct radeon_device *rdev);
|
|
||||||
extern uint8_t r600_audio_category_code(struct radeon_device *rdev);
|
|
||||||
extern void r600_audio_schedule_polling(struct radeon_device *rdev);
|
|
||||||
extern void r600_audio_enable_polling(struct drm_encoder *encoder);
|
|
||||||
extern void r600_audio_disable_polling(struct drm_encoder *encoder);
|
|
||||||
extern void r600_audio_fini(struct radeon_device *rdev);
|
|
||||||
extern void r600_hdmi_init(struct drm_encoder *encoder);
|
|
||||||
extern void r600_hdmi_enable(struct drm_encoder *encoder);
|
extern void r600_hdmi_enable(struct drm_encoder *encoder);
|
||||||
extern void r600_hdmi_disable(struct drm_encoder *encoder);
|
extern void r600_hdmi_disable(struct drm_encoder *encoder);
|
||||||
extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
|
extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
|
||||||
extern int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
|
|
||||||
extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder);
|
|
||||||
|
|
||||||
extern void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
|
|
||||||
extern void r700_cp_stop(struct radeon_device *rdev);
|
|
||||||
extern void r700_cp_fini(struct radeon_device *rdev);
|
|
||||||
extern void evergreen_disable_interrupt_state(struct radeon_device *rdev);
|
|
||||||
extern int evergreen_irq_set(struct radeon_device *rdev);
|
|
||||||
extern int evergreen_blit_init(struct radeon_device *rdev);
|
|
||||||
extern void evergreen_blit_fini(struct radeon_device *rdev);
|
|
||||||
|
|
||||||
extern int ni_init_microcode(struct radeon_device *rdev);
|
extern int ni_init_microcode(struct radeon_device *rdev);
|
||||||
extern int btc_mc_load_microcode(struct radeon_device *rdev);
|
extern int btc_mc_load_microcode(struct radeon_device *rdev);
|
||||||
|
@ -1524,14 +1465,6 @@ extern int radeon_acpi_init(struct radeon_device *rdev);
|
||||||
static inline int radeon_acpi_init(struct radeon_device *rdev) { return 0; }
|
static inline int radeon_acpi_init(struct radeon_device *rdev) { return 0; }
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* evergreen */
|
|
||||||
struct evergreen_mc_save {
|
|
||||||
u32 vga_control[6];
|
|
||||||
u32 vga_render_control;
|
|
||||||
u32 vga_hdp_control;
|
|
||||||
u32 crtc_control[6];
|
|
||||||
};
|
|
||||||
|
|
||||||
#include "radeon_object.h"
|
#include "radeon_object.h"
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -57,8 +57,6 @@ int r100_init(struct radeon_device *rdev);
|
||||||
void r100_fini(struct radeon_device *rdev);
|
void r100_fini(struct radeon_device *rdev);
|
||||||
int r100_suspend(struct radeon_device *rdev);
|
int r100_suspend(struct radeon_device *rdev);
|
||||||
int r100_resume(struct radeon_device *rdev);
|
int r100_resume(struct radeon_device *rdev);
|
||||||
uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg);
|
|
||||||
void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
|
|
||||||
void r100_vga_set_state(struct radeon_device *rdev, bool state);
|
void r100_vga_set_state(struct radeon_device *rdev, bool state);
|
||||||
bool r100_gpu_is_lockup(struct radeon_device *rdev);
|
bool r100_gpu_is_lockup(struct radeon_device *rdev);
|
||||||
int r100_asic_reset(struct radeon_device *rdev);
|
int r100_asic_reset(struct radeon_device *rdev);
|
||||||
|
@ -164,8 +162,6 @@ extern void r300_fence_ring_emit(struct radeon_device *rdev,
|
||||||
extern int r300_cs_parse(struct radeon_cs_parser *p);
|
extern int r300_cs_parse(struct radeon_cs_parser *p);
|
||||||
extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev);
|
extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev);
|
||||||
extern int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
|
extern int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
|
||||||
extern uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg);
|
|
||||||
extern void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
|
|
||||||
extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes);
|
extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes);
|
||||||
extern int rv370_get_pcie_lanes(struct radeon_device *rdev);
|
extern int rv370_get_pcie_lanes(struct radeon_device *rdev);
|
||||||
extern void r300_set_reg_safe(struct radeon_device *rdev);
|
extern void r300_set_reg_safe(struct radeon_device *rdev);
|
||||||
|
@ -208,7 +204,6 @@ void rs400_gart_adjust_size(struct radeon_device *rdev);
|
||||||
void rs400_gart_disable(struct radeon_device *rdev);
|
void rs400_gart_disable(struct radeon_device *rdev);
|
||||||
void rs400_gart_fini(struct radeon_device *rdev);
|
void rs400_gart_fini(struct radeon_device *rdev);
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* rs600.
|
* rs600.
|
||||||
*/
|
*/
|
||||||
|
@ -270,8 +265,6 @@ void rv515_fini(struct radeon_device *rdev);
|
||||||
uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg);
|
uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg);
|
||||||
void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
|
void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
|
||||||
void rv515_ring_start(struct radeon_device *rdev);
|
void rv515_ring_start(struct radeon_device *rdev);
|
||||||
uint32_t rv515_pcie_rreg(struct radeon_device *rdev, uint32_t reg);
|
|
||||||
void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
|
|
||||||
void rv515_bandwidth_update(struct radeon_device *rdev);
|
void rv515_bandwidth_update(struct radeon_device *rdev);
|
||||||
int rv515_resume(struct radeon_device *rdev);
|
int rv515_resume(struct radeon_device *rdev);
|
||||||
int rv515_suspend(struct radeon_device *rdev);
|
int rv515_suspend(struct radeon_device *rdev);
|
||||||
|
@ -307,14 +300,13 @@ void r600_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
|
||||||
int r600_cs_parse(struct radeon_cs_parser *p);
|
int r600_cs_parse(struct radeon_cs_parser *p);
|
||||||
void r600_fence_ring_emit(struct radeon_device *rdev,
|
void r600_fence_ring_emit(struct radeon_device *rdev,
|
||||||
struct radeon_fence *fence);
|
struct radeon_fence *fence);
|
||||||
int r600_irq_process(struct radeon_device *rdev);
|
|
||||||
int r600_irq_set(struct radeon_device *rdev);
|
|
||||||
bool r600_gpu_is_lockup(struct radeon_device *rdev);
|
bool r600_gpu_is_lockup(struct radeon_device *rdev);
|
||||||
int r600_asic_reset(struct radeon_device *rdev);
|
int r600_asic_reset(struct radeon_device *rdev);
|
||||||
int r600_set_surface_reg(struct radeon_device *rdev, int reg,
|
int r600_set_surface_reg(struct radeon_device *rdev, int reg,
|
||||||
uint32_t tiling_flags, uint32_t pitch,
|
uint32_t tiling_flags, uint32_t pitch,
|
||||||
uint32_t offset, uint32_t obj_size);
|
uint32_t offset, uint32_t obj_size);
|
||||||
void r600_clear_surface_reg(struct radeon_device *rdev, int reg);
|
void r600_clear_surface_reg(struct radeon_device *rdev, int reg);
|
||||||
|
int r600_ib_test(struct radeon_device *rdev);
|
||||||
void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
|
void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
|
||||||
int r600_ring_test(struct radeon_device *rdev);
|
int r600_ring_test(struct radeon_device *rdev);
|
||||||
int r600_copy_blit(struct radeon_device *rdev,
|
int r600_copy_blit(struct radeon_device *rdev,
|
||||||
|
@ -333,6 +325,50 @@ extern void rs780_pm_init_profile(struct radeon_device *rdev);
|
||||||
extern void r600_pm_get_dynpm_state(struct radeon_device *rdev);
|
extern void r600_pm_get_dynpm_state(struct radeon_device *rdev);
|
||||||
extern void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes);
|
extern void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes);
|
||||||
extern int r600_get_pcie_lanes(struct radeon_device *rdev);
|
extern int r600_get_pcie_lanes(struct radeon_device *rdev);
|
||||||
|
bool r600_card_posted(struct radeon_device *rdev);
|
||||||
|
void r600_cp_stop(struct radeon_device *rdev);
|
||||||
|
int r600_cp_start(struct radeon_device *rdev);
|
||||||
|
void r600_ring_init(struct radeon_device *rdev, unsigned ring_size);
|
||||||
|
int r600_cp_resume(struct radeon_device *rdev);
|
||||||
|
void r600_cp_fini(struct radeon_device *rdev);
|
||||||
|
int r600_count_pipe_bits(uint32_t val);
|
||||||
|
int r600_mc_wait_for_idle(struct radeon_device *rdev);
|
||||||
|
int r600_pcie_gart_init(struct radeon_device *rdev);
|
||||||
|
void r600_scratch_init(struct radeon_device *rdev);
|
||||||
|
int r600_blit_init(struct radeon_device *rdev);
|
||||||
|
void r600_blit_fini(struct radeon_device *rdev);
|
||||||
|
int r600_init_microcode(struct radeon_device *rdev);
|
||||||
|
/* r600 irq */
|
||||||
|
int r600_irq_process(struct radeon_device *rdev);
|
||||||
|
int r600_irq_init(struct radeon_device *rdev);
|
||||||
|
void r600_irq_fini(struct radeon_device *rdev);
|
||||||
|
void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size);
|
||||||
|
int r600_irq_set(struct radeon_device *rdev);
|
||||||
|
void r600_irq_suspend(struct radeon_device *rdev);
|
||||||
|
void r600_disable_interrupts(struct radeon_device *rdev);
|
||||||
|
void r600_rlc_stop(struct radeon_device *rdev);
|
||||||
|
/* r600 audio */
|
||||||
|
int r600_audio_init(struct radeon_device *rdev);
|
||||||
|
int r600_audio_tmds_index(struct drm_encoder *encoder);
|
||||||
|
void r600_audio_set_clock(struct drm_encoder *encoder, int clock);
|
||||||
|
int r600_audio_channels(struct radeon_device *rdev);
|
||||||
|
int r600_audio_bits_per_sample(struct radeon_device *rdev);
|
||||||
|
int r600_audio_rate(struct radeon_device *rdev);
|
||||||
|
uint8_t r600_audio_status_bits(struct radeon_device *rdev);
|
||||||
|
uint8_t r600_audio_category_code(struct radeon_device *rdev);
|
||||||
|
void r600_audio_schedule_polling(struct radeon_device *rdev);
|
||||||
|
void r600_audio_enable_polling(struct drm_encoder *encoder);
|
||||||
|
void r600_audio_disable_polling(struct drm_encoder *encoder);
|
||||||
|
void r600_audio_fini(struct radeon_device *rdev);
|
||||||
|
void r600_hdmi_init(struct drm_encoder *encoder);
|
||||||
|
int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
|
||||||
|
void r600_hdmi_update_audio_settings(struct drm_encoder *encoder);
|
||||||
|
/* r600 blit */
|
||||||
|
int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes);
|
||||||
|
void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence);
|
||||||
|
void r600_kms_blit_copy(struct radeon_device *rdev,
|
||||||
|
u64 src_gpu_addr, u64 dst_gpu_addr,
|
||||||
|
int size_bytes);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* rv770,rv730,rv710,rv740
|
* rv770,rv730,rv710,rv740
|
||||||
|
@ -341,12 +377,21 @@ int rv770_init(struct radeon_device *rdev);
|
||||||
void rv770_fini(struct radeon_device *rdev);
|
void rv770_fini(struct radeon_device *rdev);
|
||||||
int rv770_suspend(struct radeon_device *rdev);
|
int rv770_suspend(struct radeon_device *rdev);
|
||||||
int rv770_resume(struct radeon_device *rdev);
|
int rv770_resume(struct radeon_device *rdev);
|
||||||
extern void rv770_pm_misc(struct radeon_device *rdev);
|
void rv770_pm_misc(struct radeon_device *rdev);
|
||||||
extern u32 rv770_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
|
u32 rv770_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
|
||||||
|
void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
|
||||||
|
void r700_cp_stop(struct radeon_device *rdev);
|
||||||
|
void r700_cp_fini(struct radeon_device *rdev);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* evergreen
|
* evergreen
|
||||||
*/
|
*/
|
||||||
|
struct evergreen_mc_save {
|
||||||
|
u32 vga_control[6];
|
||||||
|
u32 vga_render_control;
|
||||||
|
u32 vga_hdp_control;
|
||||||
|
u32 crtc_control[6];
|
||||||
|
};
|
||||||
void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev);
|
void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev);
|
||||||
int evergreen_init(struct radeon_device *rdev);
|
int evergreen_init(struct radeon_device *rdev);
|
||||||
void evergreen_fini(struct radeon_device *rdev);
|
void evergreen_fini(struct radeon_device *rdev);
|
||||||
|
@ -374,5 +419,15 @@ extern void evergreen_pm_finish(struct radeon_device *rdev);
|
||||||
extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc);
|
extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc);
|
||||||
extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
|
extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
|
||||||
extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc);
|
extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc);
|
||||||
|
void evergreen_disable_interrupt_state(struct radeon_device *rdev);
|
||||||
|
int evergreen_blit_init(struct radeon_device *rdev);
|
||||||
|
void evergreen_blit_fini(struct radeon_device *rdev);
|
||||||
|
/* evergreen blit */
|
||||||
|
int evergreen_blit_prepare_copy(struct radeon_device *rdev, int size_bytes);
|
||||||
|
void evergreen_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence);
|
||||||
|
void evergreen_kms_blit_copy(struct radeon_device *rdev,
|
||||||
|
u64 src_gpu_addr, u64 dst_gpu_addr,
|
||||||
|
int size_bytes);
|
||||||
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -41,7 +41,7 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
|
||||||
|
|
||||||
size = bsize;
|
size = bsize;
|
||||||
n = 1024;
|
n = 1024;
|
||||||
r = radeon_bo_create(rdev, NULL, size, PAGE_SIZE, true, sdomain, &sobj);
|
r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, &sobj);
|
||||||
if (r) {
|
if (r) {
|
||||||
goto out_cleanup;
|
goto out_cleanup;
|
||||||
}
|
}
|
||||||
|
@ -53,7 +53,7 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
|
||||||
if (r) {
|
if (r) {
|
||||||
goto out_cleanup;
|
goto out_cleanup;
|
||||||
}
|
}
|
||||||
r = radeon_bo_create(rdev, NULL, size, PAGE_SIZE, true, ddomain, &dobj);
|
r = radeon_bo_create(rdev, size, PAGE_SIZE, true, ddomain, &dobj);
|
||||||
if (r) {
|
if (r) {
|
||||||
goto out_cleanup;
|
goto out_cleanup;
|
||||||
}
|
}
|
||||||
|
|
|
@ -75,7 +75,7 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
}
|
}
|
||||||
p->relocs_ptr[i] = &p->relocs[i];
|
p->relocs_ptr[i] = &p->relocs[i];
|
||||||
p->relocs[i].robj = p->relocs[i].gobj->driver_private;
|
p->relocs[i].robj = gem_to_radeon_bo(p->relocs[i].gobj);
|
||||||
p->relocs[i].lobj.bo = p->relocs[i].robj;
|
p->relocs[i].lobj.bo = p->relocs[i].robj;
|
||||||
p->relocs[i].lobj.wdomain = r->write_domain;
|
p->relocs[i].lobj.wdomain = r->write_domain;
|
||||||
p->relocs[i].lobj.rdomain = r->read_domains;
|
p->relocs[i].lobj.rdomain = r->read_domains;
|
||||||
|
|
|
@ -184,7 +184,7 @@ int radeon_wb_init(struct radeon_device *rdev)
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
if (rdev->wb.wb_obj == NULL) {
|
if (rdev->wb.wb_obj == NULL) {
|
||||||
r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
|
r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
|
||||||
RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj);
|
RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj);
|
||||||
if (r) {
|
if (r) {
|
||||||
dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
|
dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
|
||||||
|
@ -860,7 +860,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
|
||||||
if (rfb == NULL || rfb->obj == NULL) {
|
if (rfb == NULL || rfb->obj == NULL) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
robj = rfb->obj->driver_private;
|
robj = gem_to_radeon_bo(rfb->obj);
|
||||||
/* don't unpin kernel fb objects */
|
/* don't unpin kernel fb objects */
|
||||||
if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
|
if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
|
||||||
r = radeon_bo_reserve(robj, false);
|
r = radeon_bo_reserve(robj, false);
|
||||||
|
|
|
@ -371,7 +371,7 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
|
||||||
new_radeon_fb = to_radeon_framebuffer(fb);
|
new_radeon_fb = to_radeon_framebuffer(fb);
|
||||||
/* schedule unpin of the old buffer */
|
/* schedule unpin of the old buffer */
|
||||||
obj = old_radeon_fb->obj;
|
obj = old_radeon_fb->obj;
|
||||||
rbo = obj->driver_private;
|
rbo = gem_to_radeon_bo(obj);
|
||||||
work->old_rbo = rbo;
|
work->old_rbo = rbo;
|
||||||
INIT_WORK(&work->work, radeon_unpin_work_func);
|
INIT_WORK(&work->work, radeon_unpin_work_func);
|
||||||
|
|
||||||
|
@ -391,7 +391,7 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
|
||||||
|
|
||||||
/* pin the new buffer */
|
/* pin the new buffer */
|
||||||
obj = new_radeon_fb->obj;
|
obj = new_radeon_fb->obj;
|
||||||
rbo = obj->driver_private;
|
rbo = gem_to_radeon_bo(obj);
|
||||||
|
|
||||||
DRM_DEBUG_DRIVER("flip-ioctl() cur_fbo = %p, cur_bbo = %p\n",
|
DRM_DEBUG_DRIVER("flip-ioctl() cur_fbo = %p, cur_bbo = %p\n",
|
||||||
work->old_rbo, rbo);
|
work->old_rbo, rbo);
|
||||||
|
|
|
@ -90,7 +90,7 @@ int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tile
|
||||||
|
|
||||||
static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj)
|
static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj)
|
||||||
{
|
{
|
||||||
struct radeon_bo *rbo = gobj->driver_private;
|
struct radeon_bo *rbo = gem_to_radeon_bo(gobj);
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = radeon_bo_reserve(rbo, false);
|
ret = radeon_bo_reserve(rbo, false);
|
||||||
|
@ -128,7 +128,7 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
|
||||||
aligned_size);
|
aligned_size);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
rbo = gobj->driver_private;
|
rbo = gem_to_radeon_bo(gobj);
|
||||||
|
|
||||||
if (fb_tiled)
|
if (fb_tiled)
|
||||||
tiling_flags = RADEON_TILING_MACRO;
|
tiling_flags = RADEON_TILING_MACRO;
|
||||||
|
@ -202,7 +202,7 @@ static int radeonfb_create(struct radeon_fbdev *rfbdev,
|
||||||
mode_cmd.depth = sizes->surface_depth;
|
mode_cmd.depth = sizes->surface_depth;
|
||||||
|
|
||||||
ret = radeonfb_create_pinned_object(rfbdev, &mode_cmd, &gobj);
|
ret = radeonfb_create_pinned_object(rfbdev, &mode_cmd, &gobj);
|
||||||
rbo = gobj->driver_private;
|
rbo = gem_to_radeon_bo(gobj);
|
||||||
|
|
||||||
/* okay we have an object now allocate the framebuffer */
|
/* okay we have an object now allocate the framebuffer */
|
||||||
info = framebuffer_alloc(0, device);
|
info = framebuffer_alloc(0, device);
|
||||||
|
@ -403,14 +403,14 @@ int radeon_fbdev_total_size(struct radeon_device *rdev)
|
||||||
struct radeon_bo *robj;
|
struct radeon_bo *robj;
|
||||||
int size = 0;
|
int size = 0;
|
||||||
|
|
||||||
robj = rdev->mode_info.rfbdev->rfb.obj->driver_private;
|
robj = gem_to_radeon_bo(rdev->mode_info.rfbdev->rfb.obj);
|
||||||
size += radeon_bo_size(robj);
|
size += radeon_bo_size(robj);
|
||||||
return size;
|
return size;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
|
bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
|
||||||
{
|
{
|
||||||
if (robj == rdev->mode_info.rfbdev->rfb.obj->driver_private)
|
if (robj == gem_to_radeon_bo(rdev->mode_info.rfbdev->rfb.obj))
|
||||||
return true;
|
return true;
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
|
@ -78,7 +78,7 @@ int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
if (rdev->gart.table.vram.robj == NULL) {
|
if (rdev->gart.table.vram.robj == NULL) {
|
||||||
r = radeon_bo_create(rdev, NULL, rdev->gart.table_size,
|
r = radeon_bo_create(rdev, rdev->gart.table_size,
|
||||||
PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
|
PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
|
||||||
&rdev->gart.table.vram.robj);
|
&rdev->gart.table.vram.robj);
|
||||||
if (r) {
|
if (r) {
|
||||||
|
|
|
@ -32,21 +32,18 @@
|
||||||
|
|
||||||
int radeon_gem_object_init(struct drm_gem_object *obj)
|
int radeon_gem_object_init(struct drm_gem_object *obj)
|
||||||
{
|
{
|
||||||
/* we do nothings here */
|
BUG();
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void radeon_gem_object_free(struct drm_gem_object *gobj)
|
void radeon_gem_object_free(struct drm_gem_object *gobj)
|
||||||
{
|
{
|
||||||
struct radeon_bo *robj = gobj->driver_private;
|
struct radeon_bo *robj = gem_to_radeon_bo(gobj);
|
||||||
|
|
||||||
gobj->driver_private = NULL;
|
|
||||||
if (robj) {
|
if (robj) {
|
||||||
radeon_bo_unref(&robj);
|
radeon_bo_unref(&robj);
|
||||||
}
|
}
|
||||||
|
|
||||||
drm_gem_object_release(gobj);
|
|
||||||
kfree(gobj);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int radeon_gem_object_create(struct radeon_device *rdev, int size,
|
int radeon_gem_object_create(struct radeon_device *rdev, int size,
|
||||||
|
@ -54,36 +51,34 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size,
|
||||||
bool discardable, bool kernel,
|
bool discardable, bool kernel,
|
||||||
struct drm_gem_object **obj)
|
struct drm_gem_object **obj)
|
||||||
{
|
{
|
||||||
struct drm_gem_object *gobj;
|
|
||||||
struct radeon_bo *robj;
|
struct radeon_bo *robj;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
*obj = NULL;
|
*obj = NULL;
|
||||||
gobj = drm_gem_object_alloc(rdev->ddev, size);
|
|
||||||
if (!gobj) {
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
/* At least align on page size */
|
/* At least align on page size */
|
||||||
if (alignment < PAGE_SIZE) {
|
if (alignment < PAGE_SIZE) {
|
||||||
alignment = PAGE_SIZE;
|
alignment = PAGE_SIZE;
|
||||||
}
|
}
|
||||||
r = radeon_bo_create(rdev, gobj, size, alignment, kernel, initial_domain, &robj);
|
r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, &robj);
|
||||||
if (r) {
|
if (r) {
|
||||||
if (r != -ERESTARTSYS)
|
if (r != -ERESTARTSYS)
|
||||||
DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
|
DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
|
||||||
size, initial_domain, alignment, r);
|
size, initial_domain, alignment, r);
|
||||||
drm_gem_object_unreference_unlocked(gobj);
|
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
gobj->driver_private = robj;
|
*obj = &robj->gem_base;
|
||||||
*obj = gobj;
|
|
||||||
|
mutex_lock(&rdev->gem.mutex);
|
||||||
|
list_add_tail(&robj->list, &rdev->gem.objects);
|
||||||
|
mutex_unlock(&rdev->gem.mutex);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
|
int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
|
||||||
uint64_t *gpu_addr)
|
uint64_t *gpu_addr)
|
||||||
{
|
{
|
||||||
struct radeon_bo *robj = obj->driver_private;
|
struct radeon_bo *robj = gem_to_radeon_bo(obj);
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
r = radeon_bo_reserve(robj, false);
|
r = radeon_bo_reserve(robj, false);
|
||||||
|
@ -96,7 +91,7 @@ int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
|
||||||
|
|
||||||
void radeon_gem_object_unpin(struct drm_gem_object *obj)
|
void radeon_gem_object_unpin(struct drm_gem_object *obj)
|
||||||
{
|
{
|
||||||
struct radeon_bo *robj = obj->driver_private;
|
struct radeon_bo *robj = gem_to_radeon_bo(obj);
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
r = radeon_bo_reserve(robj, false);
|
r = radeon_bo_reserve(robj, false);
|
||||||
|
@ -114,7 +109,7 @@ int radeon_gem_set_domain(struct drm_gem_object *gobj,
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
/* FIXME: reeimplement */
|
/* FIXME: reeimplement */
|
||||||
robj = gobj->driver_private;
|
robj = gem_to_radeon_bo(gobj);
|
||||||
/* work out where to validate the buffer to */
|
/* work out where to validate the buffer to */
|
||||||
domain = wdomain;
|
domain = wdomain;
|
||||||
if (!domain) {
|
if (!domain) {
|
||||||
|
@ -228,7 +223,7 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
|
||||||
if (gobj == NULL) {
|
if (gobj == NULL) {
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
}
|
}
|
||||||
robj = gobj->driver_private;
|
robj = gem_to_radeon_bo(gobj);
|
||||||
|
|
||||||
r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
|
r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
|
||||||
|
|
||||||
|
@ -247,7 +242,7 @@ int radeon_mode_dumb_mmap(struct drm_file *filp,
|
||||||
if (gobj == NULL) {
|
if (gobj == NULL) {
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
}
|
}
|
||||||
robj = gobj->driver_private;
|
robj = gem_to_radeon_bo(gobj);
|
||||||
*offset_p = radeon_bo_mmap_offset(robj);
|
*offset_p = radeon_bo_mmap_offset(robj);
|
||||||
drm_gem_object_unreference_unlocked(gobj);
|
drm_gem_object_unreference_unlocked(gobj);
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -274,7 +269,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
|
||||||
if (gobj == NULL) {
|
if (gobj == NULL) {
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
}
|
}
|
||||||
robj = gobj->driver_private;
|
robj = gem_to_radeon_bo(gobj);
|
||||||
r = radeon_bo_wait(robj, &cur_placement, true);
|
r = radeon_bo_wait(robj, &cur_placement, true);
|
||||||
switch (cur_placement) {
|
switch (cur_placement) {
|
||||||
case TTM_PL_VRAM:
|
case TTM_PL_VRAM:
|
||||||
|
@ -304,7 +299,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
|
||||||
if (gobj == NULL) {
|
if (gobj == NULL) {
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
}
|
}
|
||||||
robj = gobj->driver_private;
|
robj = gem_to_radeon_bo(gobj);
|
||||||
r = radeon_bo_wait(robj, NULL, false);
|
r = radeon_bo_wait(robj, NULL, false);
|
||||||
/* callback hw specific functions if any */
|
/* callback hw specific functions if any */
|
||||||
if (robj->rdev->asic->ioctl_wait_idle)
|
if (robj->rdev->asic->ioctl_wait_idle)
|
||||||
|
@ -325,7 +320,7 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
|
||||||
gobj = drm_gem_object_lookup(dev, filp, args->handle);
|
gobj = drm_gem_object_lookup(dev, filp, args->handle);
|
||||||
if (gobj == NULL)
|
if (gobj == NULL)
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
robj = gobj->driver_private;
|
robj = gem_to_radeon_bo(gobj);
|
||||||
r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
|
r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
|
||||||
drm_gem_object_unreference_unlocked(gobj);
|
drm_gem_object_unreference_unlocked(gobj);
|
||||||
return r;
|
return r;
|
||||||
|
@ -343,7 +338,7 @@ int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
|
||||||
gobj = drm_gem_object_lookup(dev, filp, args->handle);
|
gobj = drm_gem_object_lookup(dev, filp, args->handle);
|
||||||
if (gobj == NULL)
|
if (gobj == NULL)
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
rbo = gobj->driver_private;
|
rbo = gem_to_radeon_bo(gobj);
|
||||||
r = radeon_bo_reserve(rbo, false);
|
r = radeon_bo_reserve(rbo, false);
|
||||||
if (unlikely(r != 0))
|
if (unlikely(r != 0))
|
||||||
goto out;
|
goto out;
|
||||||
|
|
|
@ -415,7 +415,7 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc,
|
||||||
|
|
||||||
/* Pin framebuffer & get tilling informations */
|
/* Pin framebuffer & get tilling informations */
|
||||||
obj = radeon_fb->obj;
|
obj = radeon_fb->obj;
|
||||||
rbo = obj->driver_private;
|
rbo = gem_to_radeon_bo(obj);
|
||||||
r = radeon_bo_reserve(rbo, false);
|
r = radeon_bo_reserve(rbo, false);
|
||||||
if (unlikely(r != 0))
|
if (unlikely(r != 0))
|
||||||
return r;
|
return r;
|
||||||
|
@ -520,7 +520,7 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc,
|
||||||
|
|
||||||
if (!atomic && fb && fb != crtc->fb) {
|
if (!atomic && fb && fb != crtc->fb) {
|
||||||
radeon_fb = to_radeon_framebuffer(fb);
|
radeon_fb = to_radeon_framebuffer(fb);
|
||||||
rbo = radeon_fb->obj->driver_private;
|
rbo = gem_to_radeon_bo(radeon_fb->obj);
|
||||||
r = radeon_bo_reserve(rbo, false);
|
r = radeon_bo_reserve(rbo, false);
|
||||||
if (unlikely(r != 0))
|
if (unlikely(r != 0))
|
||||||
return r;
|
return r;
|
||||||
|
|
|
@ -55,6 +55,7 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
|
||||||
list_del_init(&bo->list);
|
list_del_init(&bo->list);
|
||||||
mutex_unlock(&bo->rdev->gem.mutex);
|
mutex_unlock(&bo->rdev->gem.mutex);
|
||||||
radeon_bo_clear_surface_reg(bo);
|
radeon_bo_clear_surface_reg(bo);
|
||||||
|
drm_gem_object_release(&bo->gem_base);
|
||||||
kfree(bo);
|
kfree(bo);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -86,7 +87,7 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
|
||||||
rbo->placement.num_busy_placement = c;
|
rbo->placement.num_busy_placement = c;
|
||||||
}
|
}
|
||||||
|
|
||||||
int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
|
int radeon_bo_create(struct radeon_device *rdev,
|
||||||
unsigned long size, int byte_align, bool kernel, u32 domain,
|
unsigned long size, int byte_align, bool kernel, u32 domain,
|
||||||
struct radeon_bo **bo_ptr)
|
struct radeon_bo **bo_ptr)
|
||||||
{
|
{
|
||||||
|
@ -96,6 +97,8 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
|
||||||
unsigned long max_size = 0;
|
unsigned long max_size = 0;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
|
size = ALIGN(size, PAGE_SIZE);
|
||||||
|
|
||||||
if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
|
if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
|
||||||
rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
|
rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
|
||||||
}
|
}
|
||||||
|
@ -118,8 +121,13 @@ retry:
|
||||||
bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
|
bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
|
||||||
if (bo == NULL)
|
if (bo == NULL)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
r = drm_gem_object_init(rdev->ddev, &bo->gem_base, size);
|
||||||
|
if (unlikely(r)) {
|
||||||
|
kfree(bo);
|
||||||
|
return r;
|
||||||
|
}
|
||||||
bo->rdev = rdev;
|
bo->rdev = rdev;
|
||||||
bo->gobj = gobj;
|
bo->gem_base.driver_private = NULL;
|
||||||
bo->surface_reg = -1;
|
bo->surface_reg = -1;
|
||||||
INIT_LIST_HEAD(&bo->list);
|
INIT_LIST_HEAD(&bo->list);
|
||||||
radeon_ttm_placement_from_domain(bo, domain);
|
radeon_ttm_placement_from_domain(bo, domain);
|
||||||
|
@ -142,12 +150,9 @@ retry:
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
*bo_ptr = bo;
|
*bo_ptr = bo;
|
||||||
if (gobj) {
|
|
||||||
mutex_lock(&bo->rdev->gem.mutex);
|
|
||||||
list_add_tail(&bo->list, &rdev->gem.objects);
|
|
||||||
mutex_unlock(&bo->rdev->gem.mutex);
|
|
||||||
}
|
|
||||||
trace_radeon_bo_create(bo);
|
trace_radeon_bo_create(bo);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -260,7 +265,6 @@ int radeon_bo_evict_vram(struct radeon_device *rdev)
|
||||||
void radeon_bo_force_delete(struct radeon_device *rdev)
|
void radeon_bo_force_delete(struct radeon_device *rdev)
|
||||||
{
|
{
|
||||||
struct radeon_bo *bo, *n;
|
struct radeon_bo *bo, *n;
|
||||||
struct drm_gem_object *gobj;
|
|
||||||
|
|
||||||
if (list_empty(&rdev->gem.objects)) {
|
if (list_empty(&rdev->gem.objects)) {
|
||||||
return;
|
return;
|
||||||
|
@ -268,16 +272,14 @@ void radeon_bo_force_delete(struct radeon_device *rdev)
|
||||||
dev_err(rdev->dev, "Userspace still has active objects !\n");
|
dev_err(rdev->dev, "Userspace still has active objects !\n");
|
||||||
list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
|
list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
|
||||||
mutex_lock(&rdev->ddev->struct_mutex);
|
mutex_lock(&rdev->ddev->struct_mutex);
|
||||||
gobj = bo->gobj;
|
|
||||||
dev_err(rdev->dev, "%p %p %lu %lu force free\n",
|
dev_err(rdev->dev, "%p %p %lu %lu force free\n",
|
||||||
gobj, bo, (unsigned long)gobj->size,
|
&bo->gem_base, bo, (unsigned long)bo->gem_base.size,
|
||||||
*((unsigned long *)&gobj->refcount));
|
*((unsigned long *)&bo->gem_base.refcount));
|
||||||
mutex_lock(&bo->rdev->gem.mutex);
|
mutex_lock(&bo->rdev->gem.mutex);
|
||||||
list_del_init(&bo->list);
|
list_del_init(&bo->list);
|
||||||
mutex_unlock(&bo->rdev->gem.mutex);
|
mutex_unlock(&bo->rdev->gem.mutex);
|
||||||
radeon_bo_unref(&bo);
|
radeon_bo_unref(&bo);
|
||||||
gobj->driver_private = NULL;
|
drm_gem_object_unreference(&bo->gem_base);
|
||||||
drm_gem_object_unreference(gobj);
|
|
||||||
mutex_unlock(&rdev->ddev->struct_mutex);
|
mutex_unlock(&rdev->ddev->struct_mutex);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -137,10 +137,9 @@ static inline int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
|
||||||
}
|
}
|
||||||
|
|
||||||
extern int radeon_bo_create(struct radeon_device *rdev,
|
extern int radeon_bo_create(struct radeon_device *rdev,
|
||||||
struct drm_gem_object *gobj, unsigned long size,
|
unsigned long size, int byte_align,
|
||||||
int byte_align,
|
bool kernel, u32 domain,
|
||||||
bool kernel, u32 domain,
|
struct radeon_bo **bo_ptr);
|
||||||
struct radeon_bo **bo_ptr);
|
|
||||||
extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr);
|
extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr);
|
||||||
extern void radeon_bo_kunmap(struct radeon_bo *bo);
|
extern void radeon_bo_kunmap(struct radeon_bo *bo);
|
||||||
extern void radeon_bo_unref(struct radeon_bo **bo);
|
extern void radeon_bo_unref(struct radeon_bo **bo);
|
||||||
|
|
|
@ -175,7 +175,7 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
|
||||||
return 0;
|
return 0;
|
||||||
INIT_LIST_HEAD(&rdev->ib_pool.bogus_ib);
|
INIT_LIST_HEAD(&rdev->ib_pool.bogus_ib);
|
||||||
/* Allocate 1M object buffer */
|
/* Allocate 1M object buffer */
|
||||||
r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024,
|
r = radeon_bo_create(rdev, RADEON_IB_POOL_SIZE*64*1024,
|
||||||
PAGE_SIZE, true, RADEON_GEM_DOMAIN_GTT,
|
PAGE_SIZE, true, RADEON_GEM_DOMAIN_GTT,
|
||||||
&rdev->ib_pool.robj);
|
&rdev->ib_pool.robj);
|
||||||
if (r) {
|
if (r) {
|
||||||
|
@ -332,7 +332,7 @@ int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
|
||||||
rdev->cp.ring_size = ring_size;
|
rdev->cp.ring_size = ring_size;
|
||||||
/* Allocate ring buffer */
|
/* Allocate ring buffer */
|
||||||
if (rdev->cp.ring_obj == NULL) {
|
if (rdev->cp.ring_obj == NULL) {
|
||||||
r = radeon_bo_create(rdev, NULL, rdev->cp.ring_size, PAGE_SIZE, true,
|
r = radeon_bo_create(rdev, rdev->cp.ring_size, PAGE_SIZE, true,
|
||||||
RADEON_GEM_DOMAIN_GTT,
|
RADEON_GEM_DOMAIN_GTT,
|
||||||
&rdev->cp.ring_obj);
|
&rdev->cp.ring_obj);
|
||||||
if (r) {
|
if (r) {
|
||||||
|
|
|
@ -52,7 +52,7 @@ void radeon_test_moves(struct radeon_device *rdev)
|
||||||
goto out_cleanup;
|
goto out_cleanup;
|
||||||
}
|
}
|
||||||
|
|
||||||
r = radeon_bo_create(rdev, NULL, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
|
r = radeon_bo_create(rdev, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
|
||||||
&vram_obj);
|
&vram_obj);
|
||||||
if (r) {
|
if (r) {
|
||||||
DRM_ERROR("Failed to create VRAM object\n");
|
DRM_ERROR("Failed to create VRAM object\n");
|
||||||
|
@ -71,7 +71,7 @@ void radeon_test_moves(struct radeon_device *rdev)
|
||||||
void **gtt_start, **gtt_end;
|
void **gtt_start, **gtt_end;
|
||||||
void **vram_start, **vram_end;
|
void **vram_start, **vram_end;
|
||||||
|
|
||||||
r = radeon_bo_create(rdev, NULL, size, PAGE_SIZE, true,
|
r = radeon_bo_create(rdev, size, PAGE_SIZE, true,
|
||||||
RADEON_GEM_DOMAIN_GTT, gtt_obj + i);
|
RADEON_GEM_DOMAIN_GTT, gtt_obj + i);
|
||||||
if (r) {
|
if (r) {
|
||||||
DRM_ERROR("Failed to create GTT object %d\n", i);
|
DRM_ERROR("Failed to create GTT object %d\n", i);
|
||||||
|
|
|
@ -530,7 +530,7 @@ int radeon_ttm_init(struct radeon_device *rdev)
|
||||||
DRM_ERROR("Failed initializing VRAM heap.\n");
|
DRM_ERROR("Failed initializing VRAM heap.\n");
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
r = radeon_bo_create(rdev, NULL, 256 * 1024, PAGE_SIZE, true,
|
r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true,
|
||||||
RADEON_GEM_DOMAIN_VRAM,
|
RADEON_GEM_DOMAIN_VRAM,
|
||||||
&rdev->stollen_vga_memory);
|
&rdev->stollen_vga_memory);
|
||||||
if (r) {
|
if (r) {
|
||||||
|
|
|
@ -999,7 +999,7 @@ static int rv770_vram_scratch_init(struct radeon_device *rdev)
|
||||||
u64 gpu_addr;
|
u64 gpu_addr;
|
||||||
|
|
||||||
if (rdev->vram_scratch.robj == NULL) {
|
if (rdev->vram_scratch.robj == NULL) {
|
||||||
r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE,
|
r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE,
|
||||||
PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
|
PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
|
||||||
&rdev->vram_scratch.robj);
|
&rdev->vram_scratch.robj);
|
||||||
if (r) {
|
if (r) {
|
||||||
|
|
|
@ -42,23 +42,25 @@
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
struct drm_mm_node {
|
struct drm_mm_node {
|
||||||
struct list_head free_stack;
|
|
||||||
struct list_head node_list;
|
struct list_head node_list;
|
||||||
unsigned free : 1;
|
struct list_head hole_stack;
|
||||||
|
unsigned hole_follows : 1;
|
||||||
unsigned scanned_block : 1;
|
unsigned scanned_block : 1;
|
||||||
unsigned scanned_prev_free : 1;
|
unsigned scanned_prev_free : 1;
|
||||||
unsigned scanned_next_free : 1;
|
unsigned scanned_next_free : 1;
|
||||||
|
unsigned scanned_preceeds_hole : 1;
|
||||||
|
unsigned allocated : 1;
|
||||||
unsigned long start;
|
unsigned long start;
|
||||||
unsigned long size;
|
unsigned long size;
|
||||||
struct drm_mm *mm;
|
struct drm_mm *mm;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct drm_mm {
|
struct drm_mm {
|
||||||
/* List of free memory blocks, most recently freed ordered. */
|
/* List of all memory nodes that immediatly preceed a free hole. */
|
||||||
struct list_head free_stack;
|
struct list_head hole_stack;
|
||||||
/* List of all memory nodes, ordered according to the (increasing) start
|
/* head_node.node_list is the list of all memory nodes, ordered
|
||||||
* address of the memory node. */
|
* according to the (increasing) start address of the memory node. */
|
||||||
struct list_head node_list;
|
struct drm_mm_node head_node;
|
||||||
struct list_head unused_nodes;
|
struct list_head unused_nodes;
|
||||||
int num_unused;
|
int num_unused;
|
||||||
spinlock_t unused_lock;
|
spinlock_t unused_lock;
|
||||||
|
@ -70,8 +72,28 @@ struct drm_mm {
|
||||||
unsigned scanned_blocks;
|
unsigned scanned_blocks;
|
||||||
unsigned long scan_start;
|
unsigned long scan_start;
|
||||||
unsigned long scan_end;
|
unsigned long scan_end;
|
||||||
|
struct drm_mm_node *prev_scanned_node;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static inline bool drm_mm_node_allocated(struct drm_mm_node *node)
|
||||||
|
{
|
||||||
|
return node->allocated;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline bool drm_mm_initialized(struct drm_mm *mm)
|
||||||
|
{
|
||||||
|
return mm->hole_stack.next;
|
||||||
|
}
|
||||||
|
#define drm_mm_for_each_node(entry, mm) list_for_each_entry(entry, \
|
||||||
|
&(mm)->head_node.node_list, \
|
||||||
|
node_list);
|
||||||
|
#define drm_mm_for_each_scanned_node_reverse(entry, n, mm) \
|
||||||
|
for (entry = (mm)->prev_scanned_node, \
|
||||||
|
next = entry ? list_entry(entry->node_list.next, \
|
||||||
|
struct drm_mm_node, node_list) : NULL; \
|
||||||
|
entry != NULL; entry = next, \
|
||||||
|
next = entry ? list_entry(entry->node_list.next, \
|
||||||
|
struct drm_mm_node, node_list) : NULL) \
|
||||||
/*
|
/*
|
||||||
* Basic range manager support (drm_mm.c)
|
* Basic range manager support (drm_mm.c)
|
||||||
*/
|
*/
|
||||||
|
@ -118,7 +140,15 @@ static inline struct drm_mm_node *drm_mm_get_block_atomic_range(
|
||||||
return drm_mm_get_block_range_generic(parent, size, alignment,
|
return drm_mm_get_block_range_generic(parent, size, alignment,
|
||||||
start, end, 1);
|
start, end, 1);
|
||||||
}
|
}
|
||||||
|
extern int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
|
||||||
|
unsigned long size, unsigned alignment);
|
||||||
|
extern int drm_mm_insert_node_in_range(struct drm_mm *mm,
|
||||||
|
struct drm_mm_node *node,
|
||||||
|
unsigned long size, unsigned alignment,
|
||||||
|
unsigned long start, unsigned long end);
|
||||||
extern void drm_mm_put_block(struct drm_mm_node *cur);
|
extern void drm_mm_put_block(struct drm_mm_node *cur);
|
||||||
|
extern void drm_mm_remove_node(struct drm_mm_node *node);
|
||||||
|
extern void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new);
|
||||||
extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
|
extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
|
||||||
unsigned long size,
|
unsigned long size,
|
||||||
unsigned alignment,
|
unsigned alignment,
|
||||||
|
@ -134,11 +164,6 @@ extern int drm_mm_init(struct drm_mm *mm, unsigned long start,
|
||||||
unsigned long size);
|
unsigned long size);
|
||||||
extern void drm_mm_takedown(struct drm_mm *mm);
|
extern void drm_mm_takedown(struct drm_mm *mm);
|
||||||
extern int drm_mm_clean(struct drm_mm *mm);
|
extern int drm_mm_clean(struct drm_mm *mm);
|
||||||
extern unsigned long drm_mm_tail_space(struct drm_mm *mm);
|
|
||||||
extern int drm_mm_remove_space_from_tail(struct drm_mm *mm,
|
|
||||||
unsigned long size);
|
|
||||||
extern int drm_mm_add_space_to_tail(struct drm_mm *mm,
|
|
||||||
unsigned long size, int atomic);
|
|
||||||
extern int drm_mm_pre_get(struct drm_mm *mm);
|
extern int drm_mm_pre_get(struct drm_mm *mm);
|
||||||
|
|
||||||
static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block)
|
static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block)
|
||||||
|
|
Loading…
Add table
Reference in a new issue