drm/i915: Thread the pipelining ring through the callers.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
This commit is contained in:
parent
576ae4b8e4
commit
919926aeb3
4 changed files with 18 additions and 20 deletions
|
@ -1133,7 +1133,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
|
||||||
int i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
|
int i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
|
||||||
int write);
|
int write);
|
||||||
int i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
|
int i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
|
||||||
bool pipelined);
|
struct intel_ring_buffer *pipelined);
|
||||||
int i915_gem_attach_phys_object(struct drm_device *dev,
|
int i915_gem_attach_phys_object(struct drm_device *dev,
|
||||||
struct drm_i915_gem_object *obj,
|
struct drm_i915_gem_object *obj,
|
||||||
int id,
|
int id,
|
||||||
|
|
|
@ -42,11 +42,11 @@ struct change_domains {
|
||||||
};
|
};
|
||||||
|
|
||||||
static int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj,
|
static int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj,
|
||||||
bool pipelined);
|
struct intel_ring_buffer *pipelined);
|
||||||
static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
|
static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
|
||||||
static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
|
static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
|
||||||
static int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj,
|
static int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj,
|
||||||
int write);
|
bool write);
|
||||||
static int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
|
static int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
|
||||||
uint64_t offset,
|
uint64_t offset,
|
||||||
uint64_t size);
|
uint64_t size);
|
||||||
|
@ -1274,12 +1274,10 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
||||||
mutex_lock(&dev->struct_mutex);
|
mutex_lock(&dev->struct_mutex);
|
||||||
BUG_ON(obj->pin_count && !obj->pin_mappable);
|
BUG_ON(obj->pin_count && !obj->pin_mappable);
|
||||||
|
|
||||||
if (obj->gtt_space) {
|
if (!obj->map_and_fenceable) {
|
||||||
if (!obj->map_and_fenceable) {
|
ret = i915_gem_object_unbind(obj);
|
||||||
ret = i915_gem_object_unbind(obj);
|
if (ret)
|
||||||
if (ret)
|
goto unlock;
|
||||||
goto unlock;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!obj->gtt_space) {
|
if (!obj->gtt_space) {
|
||||||
|
@ -2637,7 +2635,7 @@ i915_gem_object_put_fence_reg(struct drm_i915_gem_object *obj,
|
||||||
if (reg->gpu) {
|
if (reg->gpu) {
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = i915_gem_object_flush_gpu_write_domain(obj, true);
|
ret = i915_gem_object_flush_gpu_write_domain(obj, NULL);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
@ -2817,7 +2815,7 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj)
|
||||||
/** Flushes any GPU write domain for the object if it's dirty. */
|
/** Flushes any GPU write domain for the object if it's dirty. */
|
||||||
static int
|
static int
|
||||||
i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj,
|
i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj,
|
||||||
bool pipelined)
|
struct intel_ring_buffer *pipelined)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = obj->base.dev;
|
struct drm_device *dev = obj->base.dev;
|
||||||
|
|
||||||
|
@ -2828,7 +2826,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj,
|
||||||
i915_gem_flush_ring(dev, obj->ring, 0, obj->base.write_domain);
|
i915_gem_flush_ring(dev, obj->ring, 0, obj->base.write_domain);
|
||||||
BUG_ON(obj->base.write_domain);
|
BUG_ON(obj->base.write_domain);
|
||||||
|
|
||||||
if (pipelined)
|
if (pipelined && pipelined == obj->ring)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
return i915_gem_object_wait_rendering(obj, true);
|
return i915_gem_object_wait_rendering(obj, true);
|
||||||
|
@ -2892,7 +2890,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, int write)
|
||||||
if (obj->gtt_space == NULL)
|
if (obj->gtt_space == NULL)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
ret = i915_gem_object_flush_gpu_write_domain(obj, false);
|
ret = i915_gem_object_flush_gpu_write_domain(obj, NULL);
|
||||||
if (ret != 0)
|
if (ret != 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
@ -2931,7 +2929,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, int write)
|
||||||
*/
|
*/
|
||||||
int
|
int
|
||||||
i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
|
i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
|
||||||
bool pipelined)
|
struct intel_ring_buffer *pipelined)
|
||||||
{
|
{
|
||||||
uint32_t old_read_domains;
|
uint32_t old_read_domains;
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -2940,7 +2938,7 @@ i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
|
||||||
if (obj->gtt_space == NULL)
|
if (obj->gtt_space == NULL)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
ret = i915_gem_object_flush_gpu_write_domain(obj, true);
|
ret = i915_gem_object_flush_gpu_write_domain(obj, pipelined);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
@ -2984,7 +2982,7 @@ i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
|
||||||
* flushes to occur.
|
* flushes to occur.
|
||||||
*/
|
*/
|
||||||
static int
|
static int
|
||||||
i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, int write)
|
i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
|
||||||
{
|
{
|
||||||
uint32_t old_write_domain, old_read_domains;
|
uint32_t old_write_domain, old_read_domains;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
|
@ -1434,7 +1434,7 @@ out_disable:
|
||||||
int
|
int
|
||||||
intel_pin_and_fence_fb_obj(struct drm_device *dev,
|
intel_pin_and_fence_fb_obj(struct drm_device *dev,
|
||||||
struct drm_i915_gem_object *obj,
|
struct drm_i915_gem_object *obj,
|
||||||
bool pipelined)
|
struct intel_ring_buffer *pipelined)
|
||||||
{
|
{
|
||||||
u32 alignment;
|
u32 alignment;
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -1594,7 +1594,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
|
||||||
mutex_lock(&dev->struct_mutex);
|
mutex_lock(&dev->struct_mutex);
|
||||||
ret = intel_pin_and_fence_fb_obj(dev,
|
ret = intel_pin_and_fence_fb_obj(dev,
|
||||||
to_intel_framebuffer(crtc->fb)->obj,
|
to_intel_framebuffer(crtc->fb)->obj,
|
||||||
false);
|
NULL);
|
||||||
if (ret != 0) {
|
if (ret != 0) {
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&dev->struct_mutex);
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -5092,7 +5092,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
|
||||||
obj = intel_fb->obj;
|
obj = intel_fb->obj;
|
||||||
|
|
||||||
mutex_lock(&dev->struct_mutex);
|
mutex_lock(&dev->struct_mutex);
|
||||||
ret = intel_pin_and_fence_fb_obj(dev, obj, true);
|
ret = intel_pin_and_fence_fb_obj(dev, obj, &dev_priv->render_ring);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto cleanup_work;
|
goto cleanup_work;
|
||||||
|
|
||||||
|
|
|
@ -301,7 +301,7 @@ extern void intel_init_emon(struct drm_device *dev);
|
||||||
|
|
||||||
extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
|
extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
|
||||||
struct drm_i915_gem_object *obj,
|
struct drm_i915_gem_object *obj,
|
||||||
bool pipelined);
|
struct intel_ring_buffer *pipelined);
|
||||||
|
|
||||||
extern int intel_framebuffer_init(struct drm_device *dev,
|
extern int intel_framebuffer_init(struct drm_device *dev,
|
||||||
struct intel_framebuffer *ifb,
|
struct intel_framebuffer *ifb,
|
||||||
|
|
Loading…
Add table
Reference in a new issue