Merge branch 'drm-next-4.4' of git://people.freedesktop.org/~agd5f/linux into drm-next

This is the first radeon and amdgpu pull for drm-next. Highlights include:
- Efficiency improvements to the CS checker for pre-SI asics
- Cursor fixes ported from radeon to amdgpu
- Enable GPU scheduler by default
- Add a bunch of GPUVM debugging options
- Add support for some new atombios opcodes
- Misc cleanups and fixes

* 'drm-next-4.4' of git://people.freedesktop.org/~agd5f/linux: (42 commits)
  drm/amdgpu: fix lockup when clean pending fences
  drm/amdgpu: add timer to fence to detect scheduler lockup
  drm/amdgpu: add VM CS mapping trace point
  drm/amdgpu: add option to clear VM page tables after every submit
  drm/amdgpu: add option to stop on VM fault
  drm/amdgpu: only print meaningful VM faults
  drm/amdgpu: also trace already allocated VMIDs
  drm/amdgpu: Drop unnecessary #include <linux/vga_switcheroo.h>
  drm/radeon: Drop unnecessary #include <linux/vga_switcheroo.h>
  drm/amdgpu: clean up pageflip interrupt handling
  drm/amdgpu: rework sdma structures
  drm/amdgpu: unpin cursor BOs on suspend and pin them again on resume
  drm/amdgpu/dce8: Fold set_cursor() into show_cursor()
  drm/amdgpu/dce8: Clean up reference counting and pinning of the cursor BOs
  drm/amdgpu/dce8: Move hotspot handling out of set_cursor
  drm/amdgpu/dce8: Re-show the cursor after a modeset (v2)
  drm/amdgpu/dce8: Use cursor_set2 hook for enabling / disabling the HW cursor
  drm/amdgpu/dce11: Fold set_cursor() into show_cursor()
  drm/amdgpu/dce11: Clean up reference counting and pinning of the cursor BOs
  drm/amdgpu/dce11: Move hotspot handling out of set_cursor
  ...
This commit is contained in:
Dave Airlie 2015-10-16 09:39:14 +10:00
commit aa1b36f2bb
37 changed files with 1075 additions and 821 deletions

View file

@ -79,6 +79,8 @@ extern int amdgpu_bapm;
extern int amdgpu_deep_color; extern int amdgpu_deep_color;
extern int amdgpu_vm_size; extern int amdgpu_vm_size;
extern int amdgpu_vm_block_size; extern int amdgpu_vm_block_size;
extern int amdgpu_vm_fault_stop;
extern int amdgpu_vm_debug;
extern int amdgpu_enable_scheduler; extern int amdgpu_enable_scheduler;
extern int amdgpu_sched_jobs; extern int amdgpu_sched_jobs;
extern int amdgpu_sched_hw_submission; extern int amdgpu_sched_hw_submission;
@ -960,6 +962,11 @@ struct amdgpu_ring {
#define AMDGPU_PTE_FRAG_64KB (4 << 7) #define AMDGPU_PTE_FRAG_64KB (4 << 7)
#define AMDGPU_LOG2_PAGES_PER_FRAG 4 #define AMDGPU_LOG2_PAGES_PER_FRAG 4
/* How to programm VM fault handling */
#define AMDGPU_VM_FAULT_STOP_NEVER 0
#define AMDGPU_VM_FAULT_STOP_FIRST 1
#define AMDGPU_VM_FAULT_STOP_ALWAYS 2
struct amdgpu_vm_pt { struct amdgpu_vm_pt {
struct amdgpu_bo *bo; struct amdgpu_bo *bo;
uint64_t addr; uint64_t addr;
@ -1708,7 +1715,7 @@ struct amdgpu_vce {
/* /*
* SDMA * SDMA
*/ */
struct amdgpu_sdma { struct amdgpu_sdma_instance {
/* SDMA firmware */ /* SDMA firmware */
const struct firmware *fw; const struct firmware *fw;
uint32_t fw_version; uint32_t fw_version;
@ -1718,6 +1725,13 @@ struct amdgpu_sdma {
bool burst_nop; bool burst_nop;
}; };
struct amdgpu_sdma {
struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES];
struct amdgpu_irq_src trap_irq;
struct amdgpu_irq_src illegal_inst_irq;
int num_instances;
};
/* /*
* Firmware * Firmware
*/ */
@ -2064,9 +2078,7 @@ struct amdgpu_device {
struct amdgpu_gfx gfx; struct amdgpu_gfx gfx;
/* sdma */ /* sdma */
struct amdgpu_sdma sdma[AMDGPU_MAX_SDMA_INSTANCES]; struct amdgpu_sdma sdma;
struct amdgpu_irq_src sdma_trap_irq;
struct amdgpu_irq_src sdma_illegal_inst_irq;
/* uvd */ /* uvd */
bool has_uvd; bool has_uvd;
@ -2203,17 +2215,18 @@ static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v)
ring->ring_free_dw--; ring->ring_free_dw--;
} }
static inline struct amdgpu_sdma * amdgpu_get_sdma_instance(struct amdgpu_ring *ring) static inline struct amdgpu_sdma_instance *
amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
{ {
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
int i; int i;
for (i = 0; i < AMDGPU_MAX_SDMA_INSTANCES; i++) for (i = 0; i < adev->sdma.num_instances; i++)
if (&adev->sdma[i].ring == ring) if (&adev->sdma.instance[i].ring == ring)
break; break;
if (i < AMDGPU_MAX_SDMA_INSTANCES) if (i < AMDGPU_MAX_SDMA_INSTANCES)
return &adev->sdma[i]; return &adev->sdma.instance[i];
else else
return NULL; return NULL;
} }

View file

@ -25,7 +25,6 @@
#include <linux/acpi.h> #include <linux/acpi.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/power_supply.h> #include <linux/power_supply.h>
#include <linux/vga_switcheroo.h>
#include <acpi/video.h> #include <acpi/video.h>
#include <drm/drmP.h> #include <drm/drmP.h>
#include <drm/drm_crtc_helper.h> #include <drm/drm_crtc_helper.h>

View file

@ -649,12 +649,12 @@ static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
case KGD_ENGINE_SDMA1: case KGD_ENGINE_SDMA1:
hdr = (const union amdgpu_firmware_header *) hdr = (const union amdgpu_firmware_header *)
adev->sdma[0].fw->data; adev->sdma.instance[0].fw->data;
break; break;
case KGD_ENGINE_SDMA2: case KGD_ENGINE_SDMA2:
hdr = (const union amdgpu_firmware_header *) hdr = (const union amdgpu_firmware_header *)
adev->sdma[1].fw->data; adev->sdma.instance[1].fw->data;
break; break;
default: default:

View file

@ -523,12 +523,12 @@ static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
case KGD_ENGINE_SDMA1: case KGD_ENGINE_SDMA1:
hdr = (const union amdgpu_firmware_header *) hdr = (const union amdgpu_firmware_header *)
adev->sdma[0].fw->data; adev->sdma.instance[0].fw->data;
break; break;
case KGD_ENGINE_SDMA2: case KGD_ENGINE_SDMA2:
hdr = (const union amdgpu_firmware_header *) hdr = (const union amdgpu_firmware_header *)
adev->sdma[1].fw->data; adev->sdma.instance[1].fw->data;
break; break;
default: default:

View file

@ -536,7 +536,7 @@ static bool amdgpu_atpx_detect(void)
if (has_atpx && vga_count == 2) { if (has_atpx && vga_count == 2) {
acpi_get_name(amdgpu_atpx_priv.atpx.handle, ACPI_FULL_PATHNAME, &buffer); acpi_get_name(amdgpu_atpx_priv.atpx.handle, ACPI_FULL_PATHNAME, &buffer);
printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n", printk(KERN_INFO "vga_switcheroo: detected switching method %s handle\n",
acpi_method_name); acpi_method_name);
amdgpu_atpx_priv.atpx_detected = true; amdgpu_atpx_priv.atpx_detected = true;
return true; return true;

View file

@ -29,7 +29,6 @@
#include "amdgpu.h" #include "amdgpu.h"
#include "atom.h" #include "atom.h"
#include <linux/vga_switcheroo.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/acpi.h> #include <linux/acpi.h>
/* /*

View file

@ -104,10 +104,11 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
} }
break; break;
case AMDGPU_HW_IP_DMA: case AMDGPU_HW_IP_DMA:
if (ring < 2) { if (ring < adev->sdma.num_instances) {
*out_ring = &adev->sdma[ring].ring; *out_ring = &adev->sdma.instance[ring].ring;
} else { } else {
DRM_ERROR("only two SDMA rings are supported\n"); DRM_ERROR("only %d SDMA rings are supported\n",
adev->sdma.num_instances);
return -EINVAL; return -EINVAL;
} }
break; break;
@ -566,9 +567,24 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
if (r) if (r)
return r; return r;
} }
} }
return amdgpu_vm_clear_invalids(adev, vm, &p->ibs[0].sync); r = amdgpu_vm_clear_invalids(adev, vm, &p->ibs[0].sync);
if (amdgpu_vm_debug && p->bo_list) {
/* Invalidate all BOs to test for userspace bugs */
for (i = 0; i < p->bo_list->num_entries; i++) {
/* ignore duplicates */
bo = p->bo_list->array[i].robj;
if (!bo)
continue;
amdgpu_vm_bo_invalidate(adev, bo);
}
}
return r;
} }
static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev, static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,

View file

@ -1022,7 +1022,7 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev)
* amdgpu_switcheroo_set_state - set switcheroo state * amdgpu_switcheroo_set_state - set switcheroo state
* *
* @pdev: pci dev pointer * @pdev: pci dev pointer
* @state: vga switcheroo state * @state: vga_switcheroo state
* *
* Callback for the switcheroo driver. Suspends or resumes the * Callback for the switcheroo driver. Suspends or resumes the
* the asics before or after it is powered up using ACPI methods. * the asics before or after it is powered up using ACPI methods.
@ -1657,11 +1657,21 @@ int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
} }
drm_modeset_unlock_all(dev); drm_modeset_unlock_all(dev);
/* unpin the front buffers */ /* unpin the front buffers and cursors */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct amdgpu_framebuffer *rfb = to_amdgpu_framebuffer(crtc->primary->fb); struct amdgpu_framebuffer *rfb = to_amdgpu_framebuffer(crtc->primary->fb);
struct amdgpu_bo *robj; struct amdgpu_bo *robj;
if (amdgpu_crtc->cursor_bo) {
struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
r = amdgpu_bo_reserve(aobj, false);
if (r == 0) {
amdgpu_bo_unpin(aobj);
amdgpu_bo_unreserve(aobj);
}
}
if (rfb == NULL || rfb->obj == NULL) { if (rfb == NULL || rfb->obj == NULL) {
continue; continue;
} }
@ -1713,6 +1723,7 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
{ {
struct drm_connector *connector; struct drm_connector *connector;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = dev->dev_private;
struct drm_crtc *crtc;
int r; int r;
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
@ -1746,6 +1757,24 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
if (r) if (r)
return r; return r;
/* pin cursors */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
if (amdgpu_crtc->cursor_bo) {
struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
r = amdgpu_bo_reserve(aobj, false);
if (r == 0) {
r = amdgpu_bo_pin(aobj,
AMDGPU_GEM_DOMAIN_VRAM,
&amdgpu_crtc->cursor_addr);
if (r != 0)
DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
amdgpu_bo_unreserve(aobj);
}
}
}
/* blat the mode back in */ /* blat the mode back in */
if (fbcon) { if (fbcon) {
drm_helper_resume_force_mode(dev); drm_helper_resume_force_mode(dev);

View file

@ -75,11 +75,13 @@ int amdgpu_bapm = -1;
int amdgpu_deep_color = 0; int amdgpu_deep_color = 0;
int amdgpu_vm_size = 8; int amdgpu_vm_size = 8;
int amdgpu_vm_block_size = -1; int amdgpu_vm_block_size = -1;
int amdgpu_vm_fault_stop = 0;
int amdgpu_vm_debug = 0;
int amdgpu_exp_hw_support = 0; int amdgpu_exp_hw_support = 0;
int amdgpu_enable_scheduler = 0; int amdgpu_enable_scheduler = 1;
int amdgpu_sched_jobs = 16; int amdgpu_sched_jobs = 16;
int amdgpu_sched_hw_submission = 2; int amdgpu_sched_hw_submission = 2;
int amdgpu_enable_semaphores = 1; int amdgpu_enable_semaphores = 0;
MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes"); MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
module_param_named(vramlimit, amdgpu_vram_limit, int, 0600); module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
@ -141,10 +143,16 @@ module_param_named(vm_size, amdgpu_vm_size, int, 0444);
MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default depending on vm_size)"); MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default depending on vm_size)");
module_param_named(vm_block_size, amdgpu_vm_block_size, int, 0444); module_param_named(vm_block_size, amdgpu_vm_block_size, int, 0444);
MODULE_PARM_DESC(vm_fault_stop, "Stop on VM fault (0 = never (default), 1 = print first, 2 = always)");
module_param_named(vm_fault_stop, amdgpu_vm_fault_stop, int, 0444);
MODULE_PARM_DESC(vm_debug, "Debug VM handling (0 = disabled (default), 1 = enabled)");
module_param_named(vm_debug, amdgpu_vm_debug, int, 0644);
MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))"); MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))");
module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444); module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444);
MODULE_PARM_DESC(enable_scheduler, "enable SW GPU scheduler (1 = enable, 0 = disable ((default))"); MODULE_PARM_DESC(enable_scheduler, "enable SW GPU scheduler (1 = enable (default), 0 = disable)");
module_param_named(enable_scheduler, amdgpu_enable_scheduler, int, 0444); module_param_named(enable_scheduler, amdgpu_enable_scheduler, int, 0444);
MODULE_PARM_DESC(sched_jobs, "the max number of jobs supported in the sw queue (default 16)"); MODULE_PARM_DESC(sched_jobs, "the max number of jobs supported in the sw queue (default 16)");
@ -153,7 +161,7 @@ module_param_named(sched_jobs, amdgpu_sched_jobs, int, 0444);
MODULE_PARM_DESC(sched_hw_submission, "the max number of HW submissions (default 2)"); MODULE_PARM_DESC(sched_hw_submission, "the max number of HW submissions (default 2)");
module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444); module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444);
MODULE_PARM_DESC(enable_semaphores, "Enable semaphores (1 = enable (default), 0 = disable)"); MODULE_PARM_DESC(enable_semaphores, "Enable semaphores (1 = enable, 0 = disable (default))");
module_param_named(enable_semaphores, amdgpu_enable_semaphores, int, 0644); module_param_named(enable_semaphores, amdgpu_enable_semaphores, int, 0644);
static struct pci_device_id pciidlist[] = { static struct pci_device_id pciidlist[] = {

View file

@ -628,8 +628,20 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
init_waitqueue_head(&ring->fence_drv.fence_queue); init_waitqueue_head(&ring->fence_drv.fence_queue);
if (amdgpu_enable_scheduler) { if (amdgpu_enable_scheduler) {
long timeout = msecs_to_jiffies(amdgpu_lockup_timeout);
if (timeout == 0) {
/*
* FIXME:
* Delayed workqueue cannot use it directly,
* so the scheduler will not use delayed workqueue if
* MAX_SCHEDULE_TIMEOUT is set.
* Currently keep it simple and silly.
*/
timeout = MAX_SCHEDULE_TIMEOUT;
}
r = amd_sched_init(&ring->sched, &amdgpu_sched_ops, r = amd_sched_init(&ring->sched, &amdgpu_sched_ops,
amdgpu_sched_hw_submission, ring->name); amdgpu_sched_hw_submission,
timeout, ring->name);
if (r) { if (r) {
DRM_ERROR("Failed to create scheduler on ring %s.\n", DRM_ERROR("Failed to create scheduler on ring %s.\n",
ring->name); ring->name);

View file

@ -218,8 +218,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
break; break;
case AMDGPU_HW_IP_DMA: case AMDGPU_HW_IP_DMA:
type = AMD_IP_BLOCK_TYPE_SDMA; type = AMD_IP_BLOCK_TYPE_SDMA;
ring_mask = adev->sdma[0].ring.ready ? 1 : 0; for (i = 0; i < adev->sdma.num_instances; i++)
ring_mask |= ((adev->sdma[1].ring.ready ? 1 : 0) << 1); ring_mask |= ((adev->sdma.instance[i].ring.ready ? 1 : 0) << i);
ib_start_alignment = AMDGPU_GPU_PAGE_SIZE; ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
ib_size_alignment = 1; ib_size_alignment = 1;
break; break;
@ -341,10 +341,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
fw_info.feature = 0; fw_info.feature = 0;
break; break;
case AMDGPU_INFO_FW_SDMA: case AMDGPU_INFO_FW_SDMA:
if (info->query_fw.index >= 2) if (info->query_fw.index >= adev->sdma.num_instances)
return -EINVAL; return -EINVAL;
fw_info.ver = adev->sdma[info->query_fw.index].fw_version; fw_info.ver = adev->sdma.instance[info->query_fw.index].fw_version;
fw_info.feature = adev->sdma[info->query_fw.index].feature_version; fw_info.feature = adev->sdma.instance[info->query_fw.index].feature_version;
break; break;
default: default:
return -EINVAL; return -EINVAL;
@ -489,7 +489,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
* *
* @dev: drm dev pointer * @dev: drm dev pointer
* *
* Switch vga switcheroo state after last close (all asics). * Switch vga_switcheroo state after last close (all asics).
*/ */
void amdgpu_driver_lastclose_kms(struct drm_device *dev) void amdgpu_driver_lastclose_kms(struct drm_device *dev)
{ {

View file

@ -373,6 +373,10 @@ struct amdgpu_crtc {
uint32_t crtc_offset; uint32_t crtc_offset;
struct drm_gem_object *cursor_bo; struct drm_gem_object *cursor_bo;
uint64_t cursor_addr; uint64_t cursor_addr;
int cursor_x;
int cursor_y;
int cursor_hot_x;
int cursor_hot_y;
int cursor_width; int cursor_width;
int cursor_height; int cursor_height;
int max_cursor_width; int max_cursor_width;

View file

@ -132,6 +132,8 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
placements[c].fpfn = 0; placements[c].fpfn = 0;
placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_VRAM; TTM_PL_FLAG_VRAM;
if (!(flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED))
placements[c - 1].flags |= TTM_PL_FLAG_TOPDOWN;
} }
if (domain & AMDGPU_GEM_DOMAIN_GTT) { if (domain & AMDGPU_GEM_DOMAIN_GTT) {

View file

@ -540,8 +540,8 @@ static int amdgpu_debugfs_ring_info(struct seq_file *m, void *data)
static int amdgpu_gfx_index = offsetof(struct amdgpu_device, gfx.gfx_ring[0]); static int amdgpu_gfx_index = offsetof(struct amdgpu_device, gfx.gfx_ring[0]);
static int cayman_cp1_index = offsetof(struct amdgpu_device, gfx.compute_ring[0]); static int cayman_cp1_index = offsetof(struct amdgpu_device, gfx.compute_ring[0]);
static int cayman_cp2_index = offsetof(struct amdgpu_device, gfx.compute_ring[1]); static int cayman_cp2_index = offsetof(struct amdgpu_device, gfx.compute_ring[1]);
static int amdgpu_dma1_index = offsetof(struct amdgpu_device, sdma[0].ring); static int amdgpu_dma1_index = offsetof(struct amdgpu_device, sdma.instance[0].ring);
static int amdgpu_dma2_index = offsetof(struct amdgpu_device, sdma[1].ring); static int amdgpu_dma2_index = offsetof(struct amdgpu_device, sdma.instance[1].ring);
static int r600_uvd_index = offsetof(struct amdgpu_device, uvd.ring); static int r600_uvd_index = offsetof(struct amdgpu_device, uvd.ring);
static int si_vce1_index = offsetof(struct amdgpu_device, vce.ring[0]); static int si_vce1_index = offsetof(struct amdgpu_device, vce.ring[0]);
static int si_vce2_index = offsetof(struct amdgpu_device, vce.ring[1]); static int si_vce2_index = offsetof(struct amdgpu_device, vce.ring[1]);

View file

@ -111,7 +111,7 @@ TRACE_EVENT(amdgpu_vm_bo_unmap,
__entry->offset, __entry->flags) __entry->offset, __entry->flags)
); );
TRACE_EVENT(amdgpu_vm_bo_update, DECLARE_EVENT_CLASS(amdgpu_vm_mapping,
TP_PROTO(struct amdgpu_bo_va_mapping *mapping), TP_PROTO(struct amdgpu_bo_va_mapping *mapping),
TP_ARGS(mapping), TP_ARGS(mapping),
TP_STRUCT__entry( TP_STRUCT__entry(
@ -129,6 +129,16 @@ TRACE_EVENT(amdgpu_vm_bo_update,
__entry->soffset, __entry->eoffset, __entry->flags) __entry->soffset, __entry->eoffset, __entry->flags)
); );
DEFINE_EVENT(amdgpu_vm_mapping, amdgpu_vm_bo_update,
TP_PROTO(struct amdgpu_bo_va_mapping *mapping),
TP_ARGS(mapping)
);
DEFINE_EVENT(amdgpu_vm_mapping, amdgpu_vm_bo_mapping,
TP_PROTO(struct amdgpu_bo_va_mapping *mapping),
TP_ARGS(mapping)
);
TRACE_EVENT(amdgpu_vm_set_page, TRACE_EVENT(amdgpu_vm_set_page,
TP_PROTO(uint64_t pe, uint64_t addr, unsigned count, TP_PROTO(uint64_t pe, uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags), uint32_t incr, uint32_t flags),

View file

@ -1072,6 +1072,11 @@ static int amdgpu_mm_dump_table(struct seq_file *m, void *data)
spin_lock(&glob->lru_lock); spin_lock(&glob->lru_lock);
ret = drm_mm_dump_table(m, mm); ret = drm_mm_dump_table(m, mm);
spin_unlock(&glob->lru_lock); spin_unlock(&glob->lru_lock);
if (ttm_pl == TTM_PL_VRAM)
seq_printf(m, "man size:%llu pages, ram usage:%luMB, vis usage:%luMB\n",
adev->mman.bdev.man[ttm_pl].size,
atomic64_read(&adev->vram_usage) >> 20,
atomic64_read(&adev->vram_vis_usage) >> 20);
return ret; return ret;
} }

View file

@ -147,8 +147,10 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
/* check if the id is still valid */ /* check if the id is still valid */
if (vm_id->id && vm_id->last_id_use && if (vm_id->id && vm_id->last_id_use &&
vm_id->last_id_use == adev->vm_manager.active[vm_id->id]) vm_id->last_id_use == adev->vm_manager.active[vm_id->id]) {
trace_amdgpu_vm_grab_id(vm_id->id, ring->idx);
return 0; return 0;
}
/* we definately need to flush */ /* we definately need to flush */
vm_id->pd_gpu_addr = ~0ll; vm_id->pd_gpu_addr = ~0ll;
@ -850,6 +852,14 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
return r; return r;
} }
if (trace_amdgpu_vm_bo_mapping_enabled()) {
list_for_each_entry(mapping, &bo_va->valids, list)
trace_amdgpu_vm_bo_mapping(mapping);
list_for_each_entry(mapping, &bo_va->invalids, list)
trace_amdgpu_vm_bo_mapping(mapping);
}
spin_lock(&vm->status_lock); spin_lock(&vm->status_lock);
list_splice_init(&bo_va->invalids, &bo_va->valids); list_splice_init(&bo_va->invalids, &bo_va->valids);
list_del_init(&bo_va->vm_status); list_del_init(&bo_va->vm_status);

View file

@ -685,6 +685,27 @@ static void atom_op_div(atom_exec_context *ctx, int *ptr, int arg)
} }
} }
static void atom_op_div32(atom_exec_context *ctx, int *ptr, int arg)
{
uint64_t val64;
uint8_t attr = U8((*ptr)++);
uint32_t dst, src;
SDEBUG(" src1: ");
dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
SDEBUG(" src2: ");
src = atom_get_src(ctx, attr, ptr);
if (src != 0) {
val64 = dst;
val64 |= ((uint64_t)ctx->ctx->divmul[1]) << 32;
do_div(val64, src);
ctx->ctx->divmul[0] = lower_32_bits(val64);
ctx->ctx->divmul[1] = upper_32_bits(val64);
} else {
ctx->ctx->divmul[0] = 0;
ctx->ctx->divmul[1] = 0;
}
}
static void atom_op_eot(atom_exec_context *ctx, int *ptr, int arg) static void atom_op_eot(atom_exec_context *ctx, int *ptr, int arg)
{ {
/* functionally, a nop */ /* functionally, a nop */
@ -788,6 +809,20 @@ static void atom_op_mul(atom_exec_context *ctx, int *ptr, int arg)
ctx->ctx->divmul[0] = dst * src; ctx->ctx->divmul[0] = dst * src;
} }
static void atom_op_mul32(atom_exec_context *ctx, int *ptr, int arg)
{
uint64_t val64;
uint8_t attr = U8((*ptr)++);
uint32_t dst, src;
SDEBUG(" src1: ");
dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
SDEBUG(" src2: ");
src = atom_get_src(ctx, attr, ptr);
val64 = (uint64_t)dst * (uint64_t)src;
ctx->ctx->divmul[0] = lower_32_bits(val64);
ctx->ctx->divmul[1] = upper_32_bits(val64);
}
static void atom_op_nop(atom_exec_context *ctx, int *ptr, int arg) static void atom_op_nop(atom_exec_context *ctx, int *ptr, int arg)
{ {
/* nothing */ /* nothing */
@ -1022,7 +1057,15 @@ static void atom_op_xor(atom_exec_context *ctx, int *ptr, int arg)
static void atom_op_debug(atom_exec_context *ctx, int *ptr, int arg) static void atom_op_debug(atom_exec_context *ctx, int *ptr, int arg)
{ {
printk(KERN_INFO "unimplemented!\n"); uint8_t val = U8((*ptr)++);
SDEBUG("DEBUG output: 0x%02X\n", val);
}
static void atom_op_processds(atom_exec_context *ctx, int *ptr, int arg)
{
uint16_t val = U16(*ptr);
(*ptr) += val + 2;
SDEBUG("PROCESSDS output: 0x%02X\n", val);
} }
static struct { static struct {
@ -1151,7 +1194,13 @@ static struct {
atom_op_shr, ATOM_ARG_FB}, { atom_op_shr, ATOM_ARG_FB}, {
atom_op_shr, ATOM_ARG_PLL}, { atom_op_shr, ATOM_ARG_PLL}, {
atom_op_shr, ATOM_ARG_MC}, { atom_op_shr, ATOM_ARG_MC}, {
atom_op_debug, 0},}; atom_op_debug, 0}, {
atom_op_processds, 0}, {
atom_op_mul32, ATOM_ARG_PS}, {
atom_op_mul32, ATOM_ARG_WS}, {
atom_op_div32, ATOM_ARG_PS}, {
atom_op_div32, ATOM_ARG_WS},
};
static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params) static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params)
{ {

View file

@ -60,7 +60,7 @@
#define ATOM_CT_PS_MASK 0x7F #define ATOM_CT_PS_MASK 0x7F
#define ATOM_CT_CODE_PTR 6 #define ATOM_CT_CODE_PTR 6
#define ATOM_OP_CNT 123 #define ATOM_OP_CNT 127
#define ATOM_OP_EOT 91 #define ATOM_OP_EOT 91
#define ATOM_CASE_MAGIC 0x63 #define ATOM_CASE_MAGIC 0x63

View file

@ -96,7 +96,7 @@ static int cik_sdma_init_microcode(struct amdgpu_device *adev)
{ {
const char *chip_name; const char *chip_name;
char fw_name[30]; char fw_name[30];
int err, i; int err = 0, i;
DRM_DEBUG("\n"); DRM_DEBUG("\n");
@ -119,24 +119,24 @@ static int cik_sdma_init_microcode(struct amdgpu_device *adev)
default: BUG(); default: BUG();
} }
for (i = 0; i < SDMA_MAX_INSTANCE; i++) { for (i = 0; i < adev->sdma.num_instances; i++) {
if (i == 0) if (i == 0)
snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", chip_name); snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", chip_name);
else else
snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma1.bin", chip_name); snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma1.bin", chip_name);
err = request_firmware(&adev->sdma[i].fw, fw_name, adev->dev); err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
if (err) if (err)
goto out; goto out;
err = amdgpu_ucode_validate(adev->sdma[i].fw); err = amdgpu_ucode_validate(adev->sdma.instance[i].fw);
} }
out: out:
if (err) { if (err) {
printk(KERN_ERR printk(KERN_ERR
"cik_sdma: Failed to load firmware \"%s\"\n", "cik_sdma: Failed to load firmware \"%s\"\n",
fw_name); fw_name);
for (i = 0; i < SDMA_MAX_INSTANCE; i++) { for (i = 0; i < adev->sdma.num_instances; i++) {
release_firmware(adev->sdma[i].fw); release_firmware(adev->sdma.instance[i].fw);
adev->sdma[i].fw = NULL; adev->sdma.instance[i].fw = NULL;
} }
} }
return err; return err;
@ -168,7 +168,7 @@ static uint32_t cik_sdma_ring_get_rptr(struct amdgpu_ring *ring)
static uint32_t cik_sdma_ring_get_wptr(struct amdgpu_ring *ring) static uint32_t cik_sdma_ring_get_wptr(struct amdgpu_ring *ring)
{ {
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
u32 me = (ring == &adev->sdma[0].ring) ? 0 : 1; u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
return (RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) & 0x3fffc) >> 2; return (RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) & 0x3fffc) >> 2;
} }
@ -183,14 +183,14 @@ static uint32_t cik_sdma_ring_get_wptr(struct amdgpu_ring *ring)
static void cik_sdma_ring_set_wptr(struct amdgpu_ring *ring) static void cik_sdma_ring_set_wptr(struct amdgpu_ring *ring)
{ {
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
u32 me = (ring == &adev->sdma[0].ring) ? 0 : 1; u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], (ring->wptr << 2) & 0x3fffc); WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], (ring->wptr << 2) & 0x3fffc);
} }
static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
{ {
struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ring); struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
int i; int i;
for (i = 0; i < count; i++) for (i = 0; i < count; i++)
@ -248,7 +248,7 @@ static void cik_sdma_ring_emit_hdp_flush(struct amdgpu_ring *ring)
SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */ SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
u32 ref_and_mask; u32 ref_and_mask;
if (ring == &ring->adev->sdma[0].ring) if (ring == &ring->adev->sdma.instance[0].ring)
ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA0_MASK; ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA0_MASK;
else else
ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA1_MASK; ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA1_MASK;
@ -327,8 +327,8 @@ static bool cik_sdma_ring_emit_semaphore(struct amdgpu_ring *ring,
*/ */
static void cik_sdma_gfx_stop(struct amdgpu_device *adev) static void cik_sdma_gfx_stop(struct amdgpu_device *adev)
{ {
struct amdgpu_ring *sdma0 = &adev->sdma[0].ring; struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
struct amdgpu_ring *sdma1 = &adev->sdma[1].ring; struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
u32 rb_cntl; u32 rb_cntl;
int i; int i;
@ -336,7 +336,7 @@ static void cik_sdma_gfx_stop(struct amdgpu_device *adev)
(adev->mman.buffer_funcs_ring == sdma1)) (adev->mman.buffer_funcs_ring == sdma1))
amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size); amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
for (i = 0; i < SDMA_MAX_INSTANCE; i++) { for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]); rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
rb_cntl &= ~SDMA0_GFX_RB_CNTL__RB_ENABLE_MASK; rb_cntl &= ~SDMA0_GFX_RB_CNTL__RB_ENABLE_MASK;
WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl); WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
@ -376,7 +376,7 @@ static void cik_sdma_enable(struct amdgpu_device *adev, bool enable)
cik_sdma_rlc_stop(adev); cik_sdma_rlc_stop(adev);
} }
for (i = 0; i < SDMA_MAX_INSTANCE; i++) { for (i = 0; i < adev->sdma.num_instances; i++) {
me_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]); me_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]);
if (enable) if (enable)
me_cntl &= ~SDMA0_F32_CNTL__HALT_MASK; me_cntl &= ~SDMA0_F32_CNTL__HALT_MASK;
@ -402,8 +402,8 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev)
u32 wb_offset; u32 wb_offset;
int i, j, r; int i, j, r;
for (i = 0; i < SDMA_MAX_INSTANCE; i++) { for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma[i].ring; ring = &adev->sdma.instance[i].ring;
wb_offset = (ring->rptr_offs * 4); wb_offset = (ring->rptr_offs * 4);
mutex_lock(&adev->srbm_mutex); mutex_lock(&adev->srbm_mutex);
@ -502,26 +502,25 @@ static int cik_sdma_load_microcode(struct amdgpu_device *adev)
u32 fw_size; u32 fw_size;
int i, j; int i, j;
if (!adev->sdma[0].fw || !adev->sdma[1].fw)
return -EINVAL;
/* halt the MEs */ /* halt the MEs */
cik_sdma_enable(adev, false); cik_sdma_enable(adev, false);
for (i = 0; i < SDMA_MAX_INSTANCE; i++) { for (i = 0; i < adev->sdma.num_instances; i++) {
hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data; if (!adev->sdma.instance[i].fw)
return -EINVAL;
hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
amdgpu_ucode_print_sdma_hdr(&hdr->header); amdgpu_ucode_print_sdma_hdr(&hdr->header);
fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version); adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version); adev->sdma.instance[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
if (adev->sdma[i].feature_version >= 20) if (adev->sdma.instance[i].feature_version >= 20)
adev->sdma[i].burst_nop = true; adev->sdma.instance[i].burst_nop = true;
fw_data = (const __le32 *) fw_data = (const __le32 *)
(adev->sdma[i].fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); (adev->sdma.instance[i].fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0); WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0);
for (j = 0; j < fw_size; j++) for (j = 0; j < fw_size; j++)
WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++)); WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++));
WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma[i].fw_version); WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma.instance[i].fw_version);
} }
return 0; return 0;
@ -830,7 +829,7 @@ static void cik_sdma_vm_set_pte_pde(struct amdgpu_ib *ib,
*/ */
static void cik_sdma_vm_pad_ib(struct amdgpu_ib *ib) static void cik_sdma_vm_pad_ib(struct amdgpu_ib *ib)
{ {
struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ib->ring); struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ib->ring);
u32 pad_count; u32 pad_count;
int i; int i;
@ -934,6 +933,8 @@ static int cik_sdma_early_init(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
adev->sdma.num_instances = SDMA_MAX_INSTANCE;
cik_sdma_set_ring_funcs(adev); cik_sdma_set_ring_funcs(adev);
cik_sdma_set_irq_funcs(adev); cik_sdma_set_irq_funcs(adev);
cik_sdma_set_buffer_funcs(adev); cik_sdma_set_buffer_funcs(adev);
@ -946,7 +947,7 @@ static int cik_sdma_sw_init(void *handle)
{ {
struct amdgpu_ring *ring; struct amdgpu_ring *ring;
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r; int r, i;
r = cik_sdma_init_microcode(adev); r = cik_sdma_init_microcode(adev);
if (r) { if (r) {
@ -955,43 +956,33 @@ static int cik_sdma_sw_init(void *handle)
} }
/* SDMA trap event */ /* SDMA trap event */
r = amdgpu_irq_add_id(adev, 224, &adev->sdma_trap_irq); r = amdgpu_irq_add_id(adev, 224, &adev->sdma.trap_irq);
if (r) if (r)
return r; return r;
/* SDMA Privileged inst */ /* SDMA Privileged inst */
r = amdgpu_irq_add_id(adev, 241, &adev->sdma_illegal_inst_irq); r = amdgpu_irq_add_id(adev, 241, &adev->sdma.illegal_inst_irq);
if (r) if (r)
return r; return r;
/* SDMA Privileged inst */ /* SDMA Privileged inst */
r = amdgpu_irq_add_id(adev, 247, &adev->sdma_illegal_inst_irq); r = amdgpu_irq_add_id(adev, 247, &adev->sdma.illegal_inst_irq);
if (r) if (r)
return r; return r;
ring = &adev->sdma[0].ring; for (i = 0; i < adev->sdma.num_instances; i++) {
ring->ring_obj = NULL; ring = &adev->sdma.instance[i].ring;
ring->ring_obj = NULL;
ring = &adev->sdma[1].ring; sprintf(ring->name, "sdma%d", i);
ring->ring_obj = NULL; r = amdgpu_ring_init(adev, ring, 256 * 1024,
SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0), 0xf,
ring = &adev->sdma[0].ring; &adev->sdma.trap_irq,
sprintf(ring->name, "sdma0"); (i == 0) ?
r = amdgpu_ring_init(adev, ring, 256 * 1024, AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1,
SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0), 0xf, AMDGPU_RING_TYPE_SDMA);
&adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP0, if (r)
AMDGPU_RING_TYPE_SDMA); return r;
if (r) }
return r;
ring = &adev->sdma[1].ring;
sprintf(ring->name, "sdma1");
r = amdgpu_ring_init(adev, ring, 256 * 1024,
SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0), 0xf,
&adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP1,
AMDGPU_RING_TYPE_SDMA);
if (r)
return r;
return r; return r;
} }
@ -999,9 +990,10 @@ static int cik_sdma_sw_init(void *handle)
static int cik_sdma_sw_fini(void *handle) static int cik_sdma_sw_fini(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i;
amdgpu_ring_fini(&adev->sdma[0].ring); for (i = 0; i < adev->sdma.num_instances; i++)
amdgpu_ring_fini(&adev->sdma[1].ring); amdgpu_ring_fini(&adev->sdma.instance[i].ring);
return 0; return 0;
} }
@ -1078,7 +1070,7 @@ static void cik_sdma_print_status(void *handle)
dev_info(adev->dev, "CIK SDMA registers\n"); dev_info(adev->dev, "CIK SDMA registers\n");
dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n", dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n",
RREG32(mmSRBM_STATUS2)); RREG32(mmSRBM_STATUS2));
for (i = 0; i < SDMA_MAX_INSTANCE; i++) { for (i = 0; i < adev->sdma.num_instances; i++) {
dev_info(adev->dev, " SDMA%d_STATUS_REG=0x%08X\n", dev_info(adev->dev, " SDMA%d_STATUS_REG=0x%08X\n",
i, RREG32(mmSDMA0_STATUS_REG + sdma_offsets[i])); i, RREG32(mmSDMA0_STATUS_REG + sdma_offsets[i]));
dev_info(adev->dev, " SDMA%d_ME_CNTL=0x%08X\n", dev_info(adev->dev, " SDMA%d_ME_CNTL=0x%08X\n",
@ -1223,7 +1215,7 @@ static int cik_sdma_process_trap_irq(struct amdgpu_device *adev,
case 0: case 0:
switch (queue_id) { switch (queue_id) {
case 0: case 0:
amdgpu_fence_process(&adev->sdma[0].ring); amdgpu_fence_process(&adev->sdma.instance[0].ring);
break; break;
case 1: case 1:
/* XXX compute */ /* XXX compute */
@ -1236,7 +1228,7 @@ static int cik_sdma_process_trap_irq(struct amdgpu_device *adev,
case 1: case 1:
switch (queue_id) { switch (queue_id) {
case 0: case 0:
amdgpu_fence_process(&adev->sdma[1].ring); amdgpu_fence_process(&adev->sdma.instance[1].ring);
break; break;
case 1: case 1:
/* XXX compute */ /* XXX compute */
@ -1334,8 +1326,10 @@ static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = {
static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev) static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev)
{ {
adev->sdma[0].ring.funcs = &cik_sdma_ring_funcs; int i;
adev->sdma[1].ring.funcs = &cik_sdma_ring_funcs;
for (i = 0; i < adev->sdma.num_instances; i++)
adev->sdma.instance[i].ring.funcs = &cik_sdma_ring_funcs;
} }
static const struct amdgpu_irq_src_funcs cik_sdma_trap_irq_funcs = { static const struct amdgpu_irq_src_funcs cik_sdma_trap_irq_funcs = {
@ -1349,9 +1343,9 @@ static const struct amdgpu_irq_src_funcs cik_sdma_illegal_inst_irq_funcs = {
static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev) static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev)
{ {
adev->sdma_trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST; adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
adev->sdma_trap_irq.funcs = &cik_sdma_trap_irq_funcs; adev->sdma.trap_irq.funcs = &cik_sdma_trap_irq_funcs;
adev->sdma_illegal_inst_irq.funcs = &cik_sdma_illegal_inst_irq_funcs; adev->sdma.illegal_inst_irq.funcs = &cik_sdma_illegal_inst_irq_funcs;
} }
/** /**
@ -1416,7 +1410,7 @@ static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev)
{ {
if (adev->mman.buffer_funcs == NULL) { if (adev->mman.buffer_funcs == NULL) {
adev->mman.buffer_funcs = &cik_sdma_buffer_funcs; adev->mman.buffer_funcs = &cik_sdma_buffer_funcs;
adev->mman.buffer_funcs_ring = &adev->sdma[0].ring; adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
} }
} }
@ -1431,7 +1425,7 @@ static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev)
{ {
if (adev->vm_manager.vm_pte_funcs == NULL) { if (adev->vm_manager.vm_pte_funcs == NULL) {
adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs; adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs;
adev->vm_manager.vm_pte_funcs_ring = &adev->sdma[0].ring; adev->vm_manager.vm_pte_funcs_ring = &adev->sdma.instance[0].ring;
adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true; adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true;
} }
} }

View file

@ -2499,26 +2499,19 @@ static void dce_v10_0_show_cursor(struct drm_crtc *crtc)
struct amdgpu_device *adev = crtc->dev->dev_private; struct amdgpu_device *adev = crtc->dev->dev_private;
u32 tmp; u32 tmp;
WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
upper_32_bits(amdgpu_crtc->cursor_addr));
WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
lower_32_bits(amdgpu_crtc->cursor_addr));
tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset); tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 1); tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 1);
tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_MODE, 2); tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_MODE, 2);
WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp); WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
} }
static void dce_v10_0_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj, static int dce_v10_0_cursor_move_locked(struct drm_crtc *crtc,
uint64_t gpu_addr) int x, int y)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct amdgpu_device *adev = crtc->dev->dev_private;
WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
upper_32_bits(gpu_addr));
WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
lower_32_bits(gpu_addr));
}
static int dce_v10_0_crtc_cursor_move(struct drm_crtc *crtc,
int x, int y)
{ {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct amdgpu_device *adev = crtc->dev->dev_private; struct amdgpu_device *adev = crtc->dev->dev_private;
@ -2538,26 +2531,40 @@ static int dce_v10_0_crtc_cursor_move(struct drm_crtc *crtc,
y = 0; y = 0;
} }
dce_v10_0_lock_cursor(crtc, true);
WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1)); ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
dce_v10_0_lock_cursor(crtc, false);
amdgpu_crtc->cursor_x = x;
amdgpu_crtc->cursor_y = y;
return 0; return 0;
} }
static int dce_v10_0_crtc_cursor_set(struct drm_crtc *crtc, static int dce_v10_0_crtc_cursor_move(struct drm_crtc *crtc,
struct drm_file *file_priv, int x, int y)
uint32_t handle, {
uint32_t width, int ret;
uint32_t height)
dce_v10_0_lock_cursor(crtc, true);
ret = dce_v10_0_cursor_move_locked(crtc, x, y);
dce_v10_0_lock_cursor(crtc, false);
return ret;
}
static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
struct drm_file *file_priv,
uint32_t handle,
uint32_t width,
uint32_t height,
int32_t hot_x,
int32_t hot_y)
{ {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_gem_object *obj; struct drm_gem_object *obj;
struct amdgpu_bo *robj; struct amdgpu_bo *aobj;
uint64_t gpu_addr;
int ret; int ret;
if (!handle) { if (!handle) {
@ -2579,41 +2586,71 @@ static int dce_v10_0_crtc_cursor_set(struct drm_crtc *crtc,
return -ENOENT; return -ENOENT;
} }
robj = gem_to_amdgpu_bo(obj); aobj = gem_to_amdgpu_bo(obj);
ret = amdgpu_bo_reserve(robj, false); ret = amdgpu_bo_reserve(aobj, false);
if (unlikely(ret != 0)) if (ret != 0) {
goto fail; drm_gem_object_unreference_unlocked(obj);
ret = amdgpu_bo_pin_restricted(robj, AMDGPU_GEM_DOMAIN_VRAM, return ret;
0, 0, &gpu_addr); }
amdgpu_bo_unreserve(robj);
if (ret) ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
goto fail; amdgpu_bo_unreserve(aobj);
if (ret) {
DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
drm_gem_object_unreference_unlocked(obj);
return ret;
}
amdgpu_crtc->cursor_width = width; amdgpu_crtc->cursor_width = width;
amdgpu_crtc->cursor_height = height; amdgpu_crtc->cursor_height = height;
dce_v10_0_lock_cursor(crtc, true); dce_v10_0_lock_cursor(crtc, true);
dce_v10_0_set_cursor(crtc, obj, gpu_addr);
if (hot_x != amdgpu_crtc->cursor_hot_x ||
hot_y != amdgpu_crtc->cursor_hot_y) {
int x, y;
x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
dce_v10_0_cursor_move_locked(crtc, x, y);
amdgpu_crtc->cursor_hot_x = hot_x;
amdgpu_crtc->cursor_hot_y = hot_y;
}
dce_v10_0_show_cursor(crtc); dce_v10_0_show_cursor(crtc);
dce_v10_0_lock_cursor(crtc, false); dce_v10_0_lock_cursor(crtc, false);
unpin: unpin:
if (amdgpu_crtc->cursor_bo) { if (amdgpu_crtc->cursor_bo) {
robj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
ret = amdgpu_bo_reserve(robj, false); ret = amdgpu_bo_reserve(aobj, false);
if (likely(ret == 0)) { if (likely(ret == 0)) {
amdgpu_bo_unpin(robj); amdgpu_bo_unpin(aobj);
amdgpu_bo_unreserve(robj); amdgpu_bo_unreserve(aobj);
} }
drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo); drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
} }
amdgpu_crtc->cursor_bo = obj; amdgpu_crtc->cursor_bo = obj;
return 0; return 0;
fail: }
drm_gem_object_unreference_unlocked(obj);
return ret; static void dce_v10_0_cursor_reset(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
if (amdgpu_crtc->cursor_bo) {
dce_v10_0_lock_cursor(crtc, true);
dce_v10_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
amdgpu_crtc->cursor_y);
dce_v10_0_show_cursor(crtc);
dce_v10_0_lock_cursor(crtc, false);
}
} }
static void dce_v10_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, static void dce_v10_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
@ -2641,7 +2678,7 @@ static void dce_v10_0_crtc_destroy(struct drm_crtc *crtc)
} }
static const struct drm_crtc_funcs dce_v10_0_crtc_funcs = { static const struct drm_crtc_funcs dce_v10_0_crtc_funcs = {
.cursor_set = dce_v10_0_crtc_cursor_set, .cursor_set2 = dce_v10_0_crtc_cursor_set2,
.cursor_move = dce_v10_0_crtc_cursor_move, .cursor_move = dce_v10_0_crtc_cursor_move,
.gamma_set = dce_v10_0_crtc_gamma_set, .gamma_set = dce_v10_0_crtc_gamma_set,
.set_config = amdgpu_crtc_set_config, .set_config = amdgpu_crtc_set_config,
@ -2774,6 +2811,7 @@ static int dce_v10_0_crtc_mode_set(struct drm_crtc *crtc,
dce_v10_0_crtc_do_set_base(crtc, old_fb, x, y, 0); dce_v10_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode); amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
amdgpu_atombios_crtc_scaler_setup(crtc); amdgpu_atombios_crtc_scaler_setup(crtc);
dce_v10_0_cursor_reset(crtc);
/* update the hw version fpr dpm */ /* update the hw version fpr dpm */
amdgpu_crtc->hw_mode = *adjusted_mode; amdgpu_crtc->hw_mode = *adjusted_mode;
@ -3267,37 +3305,20 @@ static int dce_v10_0_set_pageflip_irq_state(struct amdgpu_device *adev,
unsigned type, unsigned type,
enum amdgpu_interrupt_state state) enum amdgpu_interrupt_state state)
{ {
u32 reg, reg_block; u32 reg;
/* now deal with page flip IRQ */
switch (type) { if (type >= adev->mode_info.num_crtc) {
case AMDGPU_PAGEFLIP_IRQ_D1: DRM_ERROR("invalid pageflip crtc %d\n", type);
reg_block = CRTC0_REGISTER_OFFSET; return -EINVAL;
break;
case AMDGPU_PAGEFLIP_IRQ_D2:
reg_block = CRTC1_REGISTER_OFFSET;
break;
case AMDGPU_PAGEFLIP_IRQ_D3:
reg_block = CRTC2_REGISTER_OFFSET;
break;
case AMDGPU_PAGEFLIP_IRQ_D4:
reg_block = CRTC3_REGISTER_OFFSET;
break;
case AMDGPU_PAGEFLIP_IRQ_D5:
reg_block = CRTC4_REGISTER_OFFSET;
break;
case AMDGPU_PAGEFLIP_IRQ_D6:
reg_block = CRTC5_REGISTER_OFFSET;
break;
default:
DRM_ERROR("invalid pageflip crtc %d\n", type);
return -EINVAL;
} }
reg = RREG32(mmGRPH_INTERRUPT_CONTROL + reg_block); reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]);
if (state == AMDGPU_IRQ_STATE_DISABLE) if (state == AMDGPU_IRQ_STATE_DISABLE)
WREG32(mmGRPH_INTERRUPT_CONTROL + reg_block, reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK); WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
else else
WREG32(mmGRPH_INTERRUPT_CONTROL + reg_block, reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK); WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
return 0; return 0;
} }
@ -3306,7 +3327,6 @@ static int dce_v10_0_pageflip_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source, struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry) struct amdgpu_iv_entry *entry)
{ {
int reg_block;
unsigned long flags; unsigned long flags;
unsigned crtc_id; unsigned crtc_id;
struct amdgpu_crtc *amdgpu_crtc; struct amdgpu_crtc *amdgpu_crtc;
@ -3315,33 +3335,15 @@ static int dce_v10_0_pageflip_irq(struct amdgpu_device *adev,
crtc_id = (entry->src_id - 8) >> 1; crtc_id = (entry->src_id - 8) >> 1;
amdgpu_crtc = adev->mode_info.crtcs[crtc_id]; amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
/* ack the interrupt */ if (crtc_id >= adev->mode_info.num_crtc) {
switch(crtc_id){ DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
case AMDGPU_PAGEFLIP_IRQ_D1: return -EINVAL;
reg_block = CRTC0_REGISTER_OFFSET;
break;
case AMDGPU_PAGEFLIP_IRQ_D2:
reg_block = CRTC1_REGISTER_OFFSET;
break;
case AMDGPU_PAGEFLIP_IRQ_D3:
reg_block = CRTC2_REGISTER_OFFSET;
break;
case AMDGPU_PAGEFLIP_IRQ_D4:
reg_block = CRTC3_REGISTER_OFFSET;
break;
case AMDGPU_PAGEFLIP_IRQ_D5:
reg_block = CRTC4_REGISTER_OFFSET;
break;
case AMDGPU_PAGEFLIP_IRQ_D6:
reg_block = CRTC5_REGISTER_OFFSET;
break;
default:
DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
return -EINVAL;
} }
if (RREG32(mmGRPH_INTERRUPT_STATUS + reg_block) & GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK) if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) &
WREG32(mmGRPH_INTERRUPT_STATUS + reg_block, GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK); GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id],
GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
/* IRQ could occur when in initial stage */ /* IRQ could occur when in initial stage */
if (amdgpu_crtc == NULL) if (amdgpu_crtc == NULL)

View file

@ -2476,26 +2476,19 @@ static void dce_v11_0_show_cursor(struct drm_crtc *crtc)
struct amdgpu_device *adev = crtc->dev->dev_private; struct amdgpu_device *adev = crtc->dev->dev_private;
u32 tmp; u32 tmp;
WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
upper_32_bits(amdgpu_crtc->cursor_addr));
WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
lower_32_bits(amdgpu_crtc->cursor_addr));
tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset); tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 1); tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 1);
tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_MODE, 2); tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_MODE, 2);
WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp); WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
} }
static void dce_v11_0_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj, static int dce_v11_0_cursor_move_locked(struct drm_crtc *crtc,
uint64_t gpu_addr) int x, int y)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct amdgpu_device *adev = crtc->dev->dev_private;
WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
upper_32_bits(gpu_addr));
WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
lower_32_bits(gpu_addr));
}
static int dce_v11_0_crtc_cursor_move(struct drm_crtc *crtc,
int x, int y)
{ {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct amdgpu_device *adev = crtc->dev->dev_private; struct amdgpu_device *adev = crtc->dev->dev_private;
@ -2515,26 +2508,40 @@ static int dce_v11_0_crtc_cursor_move(struct drm_crtc *crtc,
y = 0; y = 0;
} }
dce_v11_0_lock_cursor(crtc, true);
WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1)); ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
dce_v11_0_lock_cursor(crtc, false);
amdgpu_crtc->cursor_x = x;
amdgpu_crtc->cursor_y = y;
return 0; return 0;
} }
static int dce_v11_0_crtc_cursor_set(struct drm_crtc *crtc, static int dce_v11_0_crtc_cursor_move(struct drm_crtc *crtc,
struct drm_file *file_priv, int x, int y)
uint32_t handle, {
uint32_t width, int ret;
uint32_t height)
dce_v11_0_lock_cursor(crtc, true);
ret = dce_v11_0_cursor_move_locked(crtc, x, y);
dce_v11_0_lock_cursor(crtc, false);
return ret;
}
static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
struct drm_file *file_priv,
uint32_t handle,
uint32_t width,
uint32_t height,
int32_t hot_x,
int32_t hot_y)
{ {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_gem_object *obj; struct drm_gem_object *obj;
struct amdgpu_bo *robj; struct amdgpu_bo *aobj;
uint64_t gpu_addr;
int ret; int ret;
if (!handle) { if (!handle) {
@ -2556,41 +2563,71 @@ static int dce_v11_0_crtc_cursor_set(struct drm_crtc *crtc,
return -ENOENT; return -ENOENT;
} }
robj = gem_to_amdgpu_bo(obj); aobj = gem_to_amdgpu_bo(obj);
ret = amdgpu_bo_reserve(robj, false); ret = amdgpu_bo_reserve(aobj, false);
if (unlikely(ret != 0)) if (ret != 0) {
goto fail; drm_gem_object_unreference_unlocked(obj);
ret = amdgpu_bo_pin_restricted(robj, AMDGPU_GEM_DOMAIN_VRAM, return ret;
0, 0, &gpu_addr); }
amdgpu_bo_unreserve(robj);
if (ret) ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
goto fail; amdgpu_bo_unreserve(aobj);
if (ret) {
DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
drm_gem_object_unreference_unlocked(obj);
return ret;
}
amdgpu_crtc->cursor_width = width; amdgpu_crtc->cursor_width = width;
amdgpu_crtc->cursor_height = height; amdgpu_crtc->cursor_height = height;
dce_v11_0_lock_cursor(crtc, true); dce_v11_0_lock_cursor(crtc, true);
dce_v11_0_set_cursor(crtc, obj, gpu_addr);
if (hot_x != amdgpu_crtc->cursor_hot_x ||
hot_y != amdgpu_crtc->cursor_hot_y) {
int x, y;
x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
dce_v11_0_cursor_move_locked(crtc, x, y);
amdgpu_crtc->cursor_hot_x = hot_x;
amdgpu_crtc->cursor_hot_y = hot_y;
}
dce_v11_0_show_cursor(crtc); dce_v11_0_show_cursor(crtc);
dce_v11_0_lock_cursor(crtc, false); dce_v11_0_lock_cursor(crtc, false);
unpin: unpin:
if (amdgpu_crtc->cursor_bo) { if (amdgpu_crtc->cursor_bo) {
robj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
ret = amdgpu_bo_reserve(robj, false); ret = amdgpu_bo_reserve(aobj, false);
if (likely(ret == 0)) { if (likely(ret == 0)) {
amdgpu_bo_unpin(robj); amdgpu_bo_unpin(aobj);
amdgpu_bo_unreserve(robj); amdgpu_bo_unreserve(aobj);
} }
drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo); drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
} }
amdgpu_crtc->cursor_bo = obj; amdgpu_crtc->cursor_bo = obj;
return 0; return 0;
fail: }
drm_gem_object_unreference_unlocked(obj);
return ret; static void dce_v11_0_cursor_reset(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
if (amdgpu_crtc->cursor_bo) {
dce_v11_0_lock_cursor(crtc, true);
dce_v11_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
amdgpu_crtc->cursor_y);
dce_v11_0_show_cursor(crtc);
dce_v11_0_lock_cursor(crtc, false);
}
} }
static void dce_v11_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, static void dce_v11_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
@ -2618,7 +2655,7 @@ static void dce_v11_0_crtc_destroy(struct drm_crtc *crtc)
} }
static const struct drm_crtc_funcs dce_v11_0_crtc_funcs = { static const struct drm_crtc_funcs dce_v11_0_crtc_funcs = {
.cursor_set = dce_v11_0_crtc_cursor_set, .cursor_set2 = dce_v11_0_crtc_cursor_set2,
.cursor_move = dce_v11_0_crtc_cursor_move, .cursor_move = dce_v11_0_crtc_cursor_move,
.gamma_set = dce_v11_0_crtc_gamma_set, .gamma_set = dce_v11_0_crtc_gamma_set,
.set_config = amdgpu_crtc_set_config, .set_config = amdgpu_crtc_set_config,
@ -2751,6 +2788,7 @@ static int dce_v11_0_crtc_mode_set(struct drm_crtc *crtc,
dce_v11_0_crtc_do_set_base(crtc, old_fb, x, y, 0); dce_v11_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode); amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
amdgpu_atombios_crtc_scaler_setup(crtc); amdgpu_atombios_crtc_scaler_setup(crtc);
dce_v11_0_cursor_reset(crtc);
/* update the hw version fpr dpm */ /* update the hw version fpr dpm */
amdgpu_crtc->hw_mode = *adjusted_mode; amdgpu_crtc->hw_mode = *adjusted_mode;
@ -3243,37 +3281,20 @@ static int dce_v11_0_set_pageflip_irq_state(struct amdgpu_device *adev,
unsigned type, unsigned type,
enum amdgpu_interrupt_state state) enum amdgpu_interrupt_state state)
{ {
u32 reg, reg_block; u32 reg;
/* now deal with page flip IRQ */
switch (type) { if (type >= adev->mode_info.num_crtc) {
case AMDGPU_PAGEFLIP_IRQ_D1: DRM_ERROR("invalid pageflip crtc %d\n", type);
reg_block = CRTC0_REGISTER_OFFSET; return -EINVAL;
break;
case AMDGPU_PAGEFLIP_IRQ_D2:
reg_block = CRTC1_REGISTER_OFFSET;
break;
case AMDGPU_PAGEFLIP_IRQ_D3:
reg_block = CRTC2_REGISTER_OFFSET;
break;
case AMDGPU_PAGEFLIP_IRQ_D4:
reg_block = CRTC3_REGISTER_OFFSET;
break;
case AMDGPU_PAGEFLIP_IRQ_D5:
reg_block = CRTC4_REGISTER_OFFSET;
break;
case AMDGPU_PAGEFLIP_IRQ_D6:
reg_block = CRTC5_REGISTER_OFFSET;
break;
default:
DRM_ERROR("invalid pageflip crtc %d\n", type);
return -EINVAL;
} }
reg = RREG32(mmGRPH_INTERRUPT_CONTROL + reg_block); reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]);
if (state == AMDGPU_IRQ_STATE_DISABLE) if (state == AMDGPU_IRQ_STATE_DISABLE)
WREG32(mmGRPH_INTERRUPT_CONTROL + reg_block, reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK); WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
else else
WREG32(mmGRPH_INTERRUPT_CONTROL + reg_block, reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK); WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
return 0; return 0;
} }
@ -3282,7 +3303,6 @@ static int dce_v11_0_pageflip_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source, struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry) struct amdgpu_iv_entry *entry)
{ {
int reg_block;
unsigned long flags; unsigned long flags;
unsigned crtc_id; unsigned crtc_id;
struct amdgpu_crtc *amdgpu_crtc; struct amdgpu_crtc *amdgpu_crtc;
@ -3291,33 +3311,15 @@ static int dce_v11_0_pageflip_irq(struct amdgpu_device *adev,
crtc_id = (entry->src_id - 8) >> 1; crtc_id = (entry->src_id - 8) >> 1;
amdgpu_crtc = adev->mode_info.crtcs[crtc_id]; amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
/* ack the interrupt */ if (crtc_id >= adev->mode_info.num_crtc) {
switch(crtc_id){ DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
case AMDGPU_PAGEFLIP_IRQ_D1: return -EINVAL;
reg_block = CRTC0_REGISTER_OFFSET;
break;
case AMDGPU_PAGEFLIP_IRQ_D2:
reg_block = CRTC1_REGISTER_OFFSET;
break;
case AMDGPU_PAGEFLIP_IRQ_D3:
reg_block = CRTC2_REGISTER_OFFSET;
break;
case AMDGPU_PAGEFLIP_IRQ_D4:
reg_block = CRTC3_REGISTER_OFFSET;
break;
case AMDGPU_PAGEFLIP_IRQ_D5:
reg_block = CRTC4_REGISTER_OFFSET;
break;
case AMDGPU_PAGEFLIP_IRQ_D6:
reg_block = CRTC5_REGISTER_OFFSET;
break;
default:
DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
return -EINVAL;
} }
if (RREG32(mmGRPH_INTERRUPT_STATUS + reg_block) & GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK) if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) &
WREG32(mmGRPH_INTERRUPT_STATUS + reg_block, GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK); GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id],
GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
/* IRQ could occur when in initial stage */ /* IRQ could occur when in initial stage */
if(amdgpu_crtc == NULL) if(amdgpu_crtc == NULL)

View file

@ -2411,26 +2411,19 @@ static void dce_v8_0_show_cursor(struct drm_crtc *crtc)
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct amdgpu_device *adev = crtc->dev->dev_private; struct amdgpu_device *adev = crtc->dev->dev_private;
WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
upper_32_bits(amdgpu_crtc->cursor_addr));
WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
lower_32_bits(amdgpu_crtc->cursor_addr));
WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
CUR_CONTROL__CURSOR_EN_MASK | CUR_CONTROL__CURSOR_EN_MASK |
(CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) | (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
(CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT)); (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
} }
static void dce_v8_0_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj, static int dce_v8_0_cursor_move_locked(struct drm_crtc *crtc,
uint64_t gpu_addr) int x, int y)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct amdgpu_device *adev = crtc->dev->dev_private;
WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
upper_32_bits(gpu_addr));
WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
gpu_addr & 0xffffffff);
}
static int dce_v8_0_crtc_cursor_move(struct drm_crtc *crtc,
int x, int y)
{ {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct amdgpu_device *adev = crtc->dev->dev_private; struct amdgpu_device *adev = crtc->dev->dev_private;
@ -2450,26 +2443,40 @@ static int dce_v8_0_crtc_cursor_move(struct drm_crtc *crtc,
y = 0; y = 0;
} }
dce_v8_0_lock_cursor(crtc, true);
WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1)); ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
dce_v8_0_lock_cursor(crtc, false);
amdgpu_crtc->cursor_x = x;
amdgpu_crtc->cursor_y = y;
return 0; return 0;
} }
static int dce_v8_0_crtc_cursor_set(struct drm_crtc *crtc, static int dce_v8_0_crtc_cursor_move(struct drm_crtc *crtc,
struct drm_file *file_priv, int x, int y)
uint32_t handle, {
uint32_t width, int ret;
uint32_t height)
dce_v8_0_lock_cursor(crtc, true);
ret = dce_v8_0_cursor_move_locked(crtc, x, y);
dce_v8_0_lock_cursor(crtc, false);
return ret;
}
static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
struct drm_file *file_priv,
uint32_t handle,
uint32_t width,
uint32_t height,
int32_t hot_x,
int32_t hot_y)
{ {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_gem_object *obj; struct drm_gem_object *obj;
struct amdgpu_bo *robj; struct amdgpu_bo *aobj;
uint64_t gpu_addr;
int ret; int ret;
if (!handle) { if (!handle) {
@ -2491,41 +2498,71 @@ static int dce_v8_0_crtc_cursor_set(struct drm_crtc *crtc,
return -ENOENT; return -ENOENT;
} }
robj = gem_to_amdgpu_bo(obj); aobj = gem_to_amdgpu_bo(obj);
ret = amdgpu_bo_reserve(robj, false); ret = amdgpu_bo_reserve(aobj, false);
if (unlikely(ret != 0)) if (ret != 0) {
goto fail; drm_gem_object_unreference_unlocked(obj);
ret = amdgpu_bo_pin_restricted(robj, AMDGPU_GEM_DOMAIN_VRAM, return ret;
0, 0, &gpu_addr); }
amdgpu_bo_unreserve(robj);
if (ret) ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
goto fail; amdgpu_bo_unreserve(aobj);
if (ret) {
DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
drm_gem_object_unreference_unlocked(obj);
return ret;
}
amdgpu_crtc->cursor_width = width; amdgpu_crtc->cursor_width = width;
amdgpu_crtc->cursor_height = height; amdgpu_crtc->cursor_height = height;
dce_v8_0_lock_cursor(crtc, true); dce_v8_0_lock_cursor(crtc, true);
dce_v8_0_set_cursor(crtc, obj, gpu_addr);
if (hot_x != amdgpu_crtc->cursor_hot_x ||
hot_y != amdgpu_crtc->cursor_hot_y) {
int x, y;
x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
dce_v8_0_cursor_move_locked(crtc, x, y);
amdgpu_crtc->cursor_hot_x = hot_x;
amdgpu_crtc->cursor_hot_y = hot_y;
}
dce_v8_0_show_cursor(crtc); dce_v8_0_show_cursor(crtc);
dce_v8_0_lock_cursor(crtc, false); dce_v8_0_lock_cursor(crtc, false);
unpin: unpin:
if (amdgpu_crtc->cursor_bo) { if (amdgpu_crtc->cursor_bo) {
robj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
ret = amdgpu_bo_reserve(robj, false); ret = amdgpu_bo_reserve(aobj, false);
if (likely(ret == 0)) { if (likely(ret == 0)) {
amdgpu_bo_unpin(robj); amdgpu_bo_unpin(aobj);
amdgpu_bo_unreserve(robj); amdgpu_bo_unreserve(aobj);
} }
drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo); drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
} }
amdgpu_crtc->cursor_bo = obj; amdgpu_crtc->cursor_bo = obj;
return 0; return 0;
fail: }
drm_gem_object_unreference_unlocked(obj);
return ret; static void dce_v8_0_cursor_reset(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
if (amdgpu_crtc->cursor_bo) {
dce_v8_0_lock_cursor(crtc, true);
dce_v8_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
amdgpu_crtc->cursor_y);
dce_v8_0_show_cursor(crtc);
dce_v8_0_lock_cursor(crtc, false);
}
} }
static void dce_v8_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, static void dce_v8_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
@ -2553,7 +2590,7 @@ static void dce_v8_0_crtc_destroy(struct drm_crtc *crtc)
} }
static const struct drm_crtc_funcs dce_v8_0_crtc_funcs = { static const struct drm_crtc_funcs dce_v8_0_crtc_funcs = {
.cursor_set = dce_v8_0_crtc_cursor_set, .cursor_set2 = dce_v8_0_crtc_cursor_set2,
.cursor_move = dce_v8_0_crtc_cursor_move, .cursor_move = dce_v8_0_crtc_cursor_move,
.gamma_set = dce_v8_0_crtc_gamma_set, .gamma_set = dce_v8_0_crtc_gamma_set,
.set_config = amdgpu_crtc_set_config, .set_config = amdgpu_crtc_set_config,
@ -2693,6 +2730,7 @@ static int dce_v8_0_crtc_mode_set(struct drm_crtc *crtc,
dce_v8_0_crtc_do_set_base(crtc, old_fb, x, y, 0); dce_v8_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode); amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
amdgpu_atombios_crtc_scaler_setup(crtc); amdgpu_atombios_crtc_scaler_setup(crtc);
dce_v8_0_cursor_reset(crtc);
/* update the hw version fpr dpm */ /* update the hw version fpr dpm */
amdgpu_crtc->hw_mode = *adjusted_mode; amdgpu_crtc->hw_mode = *adjusted_mode;
@ -3274,37 +3312,20 @@ static int dce_v8_0_set_pageflip_interrupt_state(struct amdgpu_device *adev,
unsigned type, unsigned type,
enum amdgpu_interrupt_state state) enum amdgpu_interrupt_state state)
{ {
u32 reg, reg_block; u32 reg;
/* now deal with page flip IRQ */
switch (type) { if (type >= adev->mode_info.num_crtc) {
case AMDGPU_PAGEFLIP_IRQ_D1: DRM_ERROR("invalid pageflip crtc %d\n", type);
reg_block = CRTC0_REGISTER_OFFSET; return -EINVAL;
break;
case AMDGPU_PAGEFLIP_IRQ_D2:
reg_block = CRTC1_REGISTER_OFFSET;
break;
case AMDGPU_PAGEFLIP_IRQ_D3:
reg_block = CRTC2_REGISTER_OFFSET;
break;
case AMDGPU_PAGEFLIP_IRQ_D4:
reg_block = CRTC3_REGISTER_OFFSET;
break;
case AMDGPU_PAGEFLIP_IRQ_D5:
reg_block = CRTC4_REGISTER_OFFSET;
break;
case AMDGPU_PAGEFLIP_IRQ_D6:
reg_block = CRTC5_REGISTER_OFFSET;
break;
default:
DRM_ERROR("invalid pageflip crtc %d\n", type);
return -EINVAL;
} }
reg = RREG32(mmGRPH_INTERRUPT_CONTROL + reg_block); reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]);
if (state == AMDGPU_IRQ_STATE_DISABLE) if (state == AMDGPU_IRQ_STATE_DISABLE)
WREG32(mmGRPH_INTERRUPT_CONTROL + reg_block, reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK); WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
else else
WREG32(mmGRPH_INTERRUPT_CONTROL + reg_block, reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK); WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
return 0; return 0;
} }
@ -3313,7 +3334,6 @@ static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source, struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry) struct amdgpu_iv_entry *entry)
{ {
int reg_block;
unsigned long flags; unsigned long flags;
unsigned crtc_id; unsigned crtc_id;
struct amdgpu_crtc *amdgpu_crtc; struct amdgpu_crtc *amdgpu_crtc;
@ -3322,33 +3342,15 @@ static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev,
crtc_id = (entry->src_id - 8) >> 1; crtc_id = (entry->src_id - 8) >> 1;
amdgpu_crtc = adev->mode_info.crtcs[crtc_id]; amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
/* ack the interrupt */ if (crtc_id >= adev->mode_info.num_crtc) {
switch(crtc_id){ DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
case AMDGPU_PAGEFLIP_IRQ_D1: return -EINVAL;
reg_block = CRTC0_REGISTER_OFFSET;
break;
case AMDGPU_PAGEFLIP_IRQ_D2:
reg_block = CRTC1_REGISTER_OFFSET;
break;
case AMDGPU_PAGEFLIP_IRQ_D3:
reg_block = CRTC2_REGISTER_OFFSET;
break;
case AMDGPU_PAGEFLIP_IRQ_D4:
reg_block = CRTC3_REGISTER_OFFSET;
break;
case AMDGPU_PAGEFLIP_IRQ_D5:
reg_block = CRTC4_REGISTER_OFFSET;
break;
case AMDGPU_PAGEFLIP_IRQ_D6:
reg_block = CRTC5_REGISTER_OFFSET;
break;
default:
DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
return -EINVAL;
} }
if (RREG32(mmGRPH_INTERRUPT_STATUS + reg_block) & GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK) if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) &
WREG32(mmGRPH_INTERRUPT_STATUS + reg_block, GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK); GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id],
GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
/* IRQ could occur when in initial stage */ /* IRQ could occur when in initial stage */
if (amdgpu_crtc == NULL) if (amdgpu_crtc == NULL)

View file

@ -903,6 +903,191 @@ static int gfx_v8_0_mec_init(struct amdgpu_device *adev)
return 0; return 0;
} }
static void gfx_v8_0_gpu_early_init(struct amdgpu_device *adev)
{
u32 gb_addr_config;
u32 mc_shared_chmap, mc_arb_ramcfg;
u32 dimm00_addr_map, dimm01_addr_map, dimm10_addr_map, dimm11_addr_map;
u32 tmp;
switch (adev->asic_type) {
case CHIP_TOPAZ:
adev->gfx.config.max_shader_engines = 1;
adev->gfx.config.max_tile_pipes = 2;
adev->gfx.config.max_cu_per_sh = 6;
adev->gfx.config.max_sh_per_se = 1;
adev->gfx.config.max_backends_per_se = 2;
adev->gfx.config.max_texture_channel_caches = 2;
adev->gfx.config.max_gprs = 256;
adev->gfx.config.max_gs_threads = 32;
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
gb_addr_config = TOPAZ_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_FIJI:
adev->gfx.config.max_shader_engines = 4;
adev->gfx.config.max_tile_pipes = 16;
adev->gfx.config.max_cu_per_sh = 16;
adev->gfx.config.max_sh_per_se = 1;
adev->gfx.config.max_backends_per_se = 4;
adev->gfx.config.max_texture_channel_caches = 8;
adev->gfx.config.max_gprs = 256;
adev->gfx.config.max_gs_threads = 32;
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_TONGA:
adev->gfx.config.max_shader_engines = 4;
adev->gfx.config.max_tile_pipes = 8;
adev->gfx.config.max_cu_per_sh = 8;
adev->gfx.config.max_sh_per_se = 1;
adev->gfx.config.max_backends_per_se = 2;
adev->gfx.config.max_texture_channel_caches = 8;
adev->gfx.config.max_gprs = 256;
adev->gfx.config.max_gs_threads = 32;
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_CARRIZO:
adev->gfx.config.max_shader_engines = 1;
adev->gfx.config.max_tile_pipes = 2;
adev->gfx.config.max_sh_per_se = 1;
adev->gfx.config.max_backends_per_se = 2;
switch (adev->pdev->revision) {
case 0xc4:
case 0x84:
case 0xc8:
case 0xcc:
/* B10 */
adev->gfx.config.max_cu_per_sh = 8;
break;
case 0xc5:
case 0x81:
case 0x85:
case 0xc9:
case 0xcd:
/* B8 */
adev->gfx.config.max_cu_per_sh = 6;
break;
case 0xc6:
case 0xca:
case 0xce:
/* B6 */
adev->gfx.config.max_cu_per_sh = 6;
break;
case 0xc7:
case 0x87:
case 0xcb:
default:
/* B4 */
adev->gfx.config.max_cu_per_sh = 4;
break;
}
adev->gfx.config.max_texture_channel_caches = 2;
adev->gfx.config.max_gprs = 256;
adev->gfx.config.max_gs_threads = 32;
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
gb_addr_config = CARRIZO_GB_ADDR_CONFIG_GOLDEN;
break;
default:
adev->gfx.config.max_shader_engines = 2;
adev->gfx.config.max_tile_pipes = 4;
adev->gfx.config.max_cu_per_sh = 2;
adev->gfx.config.max_sh_per_se = 1;
adev->gfx.config.max_backends_per_se = 2;
adev->gfx.config.max_texture_channel_caches = 4;
adev->gfx.config.max_gprs = 256;
adev->gfx.config.max_gs_threads = 32;
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
break;
}
mc_shared_chmap = RREG32(mmMC_SHARED_CHMAP);
adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG);
mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg;
adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes;
adev->gfx.config.mem_max_burst_length_bytes = 256;
if (adev->flags & AMD_IS_APU) {
/* Get memory bank mapping mode. */
tmp = RREG32(mmMC_FUS_DRAM0_BANK_ADDR_MAPPING);
dimm00_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
dimm01_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
tmp = RREG32(mmMC_FUS_DRAM1_BANK_ADDR_MAPPING);
dimm10_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
dimm11_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
/* Validate settings in case only one DIMM installed. */
if ((dimm00_addr_map == 0) || (dimm00_addr_map == 3) || (dimm00_addr_map == 4) || (dimm00_addr_map > 12))
dimm00_addr_map = 0;
if ((dimm01_addr_map == 0) || (dimm01_addr_map == 3) || (dimm01_addr_map == 4) || (dimm01_addr_map > 12))
dimm01_addr_map = 0;
if ((dimm10_addr_map == 0) || (dimm10_addr_map == 3) || (dimm10_addr_map == 4) || (dimm10_addr_map > 12))
dimm10_addr_map = 0;
if ((dimm11_addr_map == 0) || (dimm11_addr_map == 3) || (dimm11_addr_map == 4) || (dimm11_addr_map > 12))
dimm11_addr_map = 0;
/* If DIMM Addr map is 8GB, ROW size should be 2KB. Otherwise 1KB. */
/* If ROW size(DIMM1) != ROW size(DMIMM0), ROW size should be larger one. */
if ((dimm00_addr_map == 11) || (dimm01_addr_map == 11) || (dimm10_addr_map == 11) || (dimm11_addr_map == 11))
adev->gfx.config.mem_row_size_in_kb = 2;
else
adev->gfx.config.mem_row_size_in_kb = 1;
} else {
tmp = REG_GET_FIELD(mc_arb_ramcfg, MC_ARB_RAMCFG, NOOFCOLS);
adev->gfx.config.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
if (adev->gfx.config.mem_row_size_in_kb > 4)
adev->gfx.config.mem_row_size_in_kb = 4;
}
adev->gfx.config.shader_engine_tile_size = 32;
adev->gfx.config.num_gpus = 1;
adev->gfx.config.multi_gpu_tile_size = 64;
/* fix up row size */
switch (adev->gfx.config.mem_row_size_in_kb) {
case 1:
default:
gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 0);
break;
case 2:
gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 1);
break;
case 4:
gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 2);
break;
}
adev->gfx.config.gb_addr_config = gb_addr_config;
}
static int gfx_v8_0_sw_init(void *handle) static int gfx_v8_0_sw_init(void *handle)
{ {
int i, r; int i, r;
@ -1010,6 +1195,8 @@ static int gfx_v8_0_sw_init(void *handle)
adev->gfx.ce_ram_size = 0x8000; adev->gfx.ce_ram_size = 0x8000;
gfx_v8_0_gpu_early_init(adev);
return 0; return 0;
} }
@ -2043,203 +2230,23 @@ static void gfx_v8_0_init_compute_vmid(struct amdgpu_device *adev)
static void gfx_v8_0_gpu_init(struct amdgpu_device *adev) static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
{ {
u32 gb_addr_config;
u32 mc_shared_chmap, mc_arb_ramcfg;
u32 dimm00_addr_map, dimm01_addr_map, dimm10_addr_map, dimm11_addr_map;
u32 tmp; u32 tmp;
int i; int i;
switch (adev->asic_type) {
case CHIP_TOPAZ:
adev->gfx.config.max_shader_engines = 1;
adev->gfx.config.max_tile_pipes = 2;
adev->gfx.config.max_cu_per_sh = 6;
adev->gfx.config.max_sh_per_se = 1;
adev->gfx.config.max_backends_per_se = 2;
adev->gfx.config.max_texture_channel_caches = 2;
adev->gfx.config.max_gprs = 256;
adev->gfx.config.max_gs_threads = 32;
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
gb_addr_config = TOPAZ_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_FIJI:
adev->gfx.config.max_shader_engines = 4;
adev->gfx.config.max_tile_pipes = 16;
adev->gfx.config.max_cu_per_sh = 16;
adev->gfx.config.max_sh_per_se = 1;
adev->gfx.config.max_backends_per_se = 4;
adev->gfx.config.max_texture_channel_caches = 8;
adev->gfx.config.max_gprs = 256;
adev->gfx.config.max_gs_threads = 32;
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_TONGA:
adev->gfx.config.max_shader_engines = 4;
adev->gfx.config.max_tile_pipes = 8;
adev->gfx.config.max_cu_per_sh = 8;
adev->gfx.config.max_sh_per_se = 1;
adev->gfx.config.max_backends_per_se = 2;
adev->gfx.config.max_texture_channel_caches = 8;
adev->gfx.config.max_gprs = 256;
adev->gfx.config.max_gs_threads = 32;
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_CARRIZO:
adev->gfx.config.max_shader_engines = 1;
adev->gfx.config.max_tile_pipes = 2;
adev->gfx.config.max_sh_per_se = 1;
adev->gfx.config.max_backends_per_se = 2;
switch (adev->pdev->revision) {
case 0xc4:
case 0x84:
case 0xc8:
case 0xcc:
/* B10 */
adev->gfx.config.max_cu_per_sh = 8;
break;
case 0xc5:
case 0x81:
case 0x85:
case 0xc9:
case 0xcd:
/* B8 */
adev->gfx.config.max_cu_per_sh = 6;
break;
case 0xc6:
case 0xca:
case 0xce:
/* B6 */
adev->gfx.config.max_cu_per_sh = 6;
break;
case 0xc7:
case 0x87:
case 0xcb:
default:
/* B4 */
adev->gfx.config.max_cu_per_sh = 4;
break;
}
adev->gfx.config.max_texture_channel_caches = 2;
adev->gfx.config.max_gprs = 256;
adev->gfx.config.max_gs_threads = 32;
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
gb_addr_config = CARRIZO_GB_ADDR_CONFIG_GOLDEN;
break;
default:
adev->gfx.config.max_shader_engines = 2;
adev->gfx.config.max_tile_pipes = 4;
adev->gfx.config.max_cu_per_sh = 2;
adev->gfx.config.max_sh_per_se = 1;
adev->gfx.config.max_backends_per_se = 2;
adev->gfx.config.max_texture_channel_caches = 4;
adev->gfx.config.max_gprs = 256;
adev->gfx.config.max_gs_threads = 32;
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
break;
}
tmp = RREG32(mmGRBM_CNTL); tmp = RREG32(mmGRBM_CNTL);
tmp = REG_SET_FIELD(tmp, GRBM_CNTL, READ_TIMEOUT, 0xff); tmp = REG_SET_FIELD(tmp, GRBM_CNTL, READ_TIMEOUT, 0xff);
WREG32(mmGRBM_CNTL, tmp); WREG32(mmGRBM_CNTL, tmp);
mc_shared_chmap = RREG32(mmMC_SHARED_CHMAP); WREG32(mmGB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG); WREG32(mmHDP_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg; WREG32(mmDMIF_ADDR_CALC, adev->gfx.config.gb_addr_config);
adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes;
adev->gfx.config.mem_max_burst_length_bytes = 256;
if (adev->flags & AMD_IS_APU) {
/* Get memory bank mapping mode. */
tmp = RREG32(mmMC_FUS_DRAM0_BANK_ADDR_MAPPING);
dimm00_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
dimm01_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
tmp = RREG32(mmMC_FUS_DRAM1_BANK_ADDR_MAPPING);
dimm10_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
dimm11_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
/* Validate settings in case only one DIMM installed. */
if ((dimm00_addr_map == 0) || (dimm00_addr_map == 3) || (dimm00_addr_map == 4) || (dimm00_addr_map > 12))
dimm00_addr_map = 0;
if ((dimm01_addr_map == 0) || (dimm01_addr_map == 3) || (dimm01_addr_map == 4) || (dimm01_addr_map > 12))
dimm01_addr_map = 0;
if ((dimm10_addr_map == 0) || (dimm10_addr_map == 3) || (dimm10_addr_map == 4) || (dimm10_addr_map > 12))
dimm10_addr_map = 0;
if ((dimm11_addr_map == 0) || (dimm11_addr_map == 3) || (dimm11_addr_map == 4) || (dimm11_addr_map > 12))
dimm11_addr_map = 0;
/* If DIMM Addr map is 8GB, ROW size should be 2KB. Otherwise 1KB. */
/* If ROW size(DIMM1) != ROW size(DMIMM0), ROW size should be larger one. */
if ((dimm00_addr_map == 11) || (dimm01_addr_map == 11) || (dimm10_addr_map == 11) || (dimm11_addr_map == 11))
adev->gfx.config.mem_row_size_in_kb = 2;
else
adev->gfx.config.mem_row_size_in_kb = 1;
} else {
tmp = REG_GET_FIELD(mc_arb_ramcfg, MC_ARB_RAMCFG, NOOFCOLS);
adev->gfx.config.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
if (adev->gfx.config.mem_row_size_in_kb > 4)
adev->gfx.config.mem_row_size_in_kb = 4;
}
adev->gfx.config.shader_engine_tile_size = 32;
adev->gfx.config.num_gpus = 1;
adev->gfx.config.multi_gpu_tile_size = 64;
/* fix up row size */
switch (adev->gfx.config.mem_row_size_in_kb) {
case 1:
default:
gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 0);
break;
case 2:
gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 1);
break;
case 4:
gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 2);
break;
}
adev->gfx.config.gb_addr_config = gb_addr_config;
WREG32(mmGB_ADDR_CONFIG, gb_addr_config);
WREG32(mmHDP_ADDR_CONFIG, gb_addr_config);
WREG32(mmDMIF_ADDR_CALC, gb_addr_config);
WREG32(mmSDMA0_TILING_CONFIG + SDMA0_REGISTER_OFFSET, WREG32(mmSDMA0_TILING_CONFIG + SDMA0_REGISTER_OFFSET,
gb_addr_config & 0x70); adev->gfx.config.gb_addr_config & 0x70);
WREG32(mmSDMA0_TILING_CONFIG + SDMA1_REGISTER_OFFSET, WREG32(mmSDMA0_TILING_CONFIG + SDMA1_REGISTER_OFFSET,
gb_addr_config & 0x70); adev->gfx.config.gb_addr_config & 0x70);
WREG32(mmUVD_UDEC_ADDR_CONFIG, gb_addr_config); WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, gb_addr_config); WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config); WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
gfx_v8_0_tiling_mode_table_init(adev); gfx_v8_0_tiling_mode_table_init(adev);
@ -2256,13 +2263,13 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
if (i == 0) { if (i == 0) {
tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, DEFAULT_MTYPE, MTYPE_UC); tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, DEFAULT_MTYPE, MTYPE_UC);
tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, APE1_MTYPE, MTYPE_UC); tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, APE1_MTYPE, MTYPE_UC);
tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, ALIGNMENT_MODE, tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, ALIGNMENT_MODE,
SH_MEM_ALIGNMENT_MODE_UNALIGNED); SH_MEM_ALIGNMENT_MODE_UNALIGNED);
WREG32(mmSH_MEM_CONFIG, tmp); WREG32(mmSH_MEM_CONFIG, tmp);
} else { } else {
tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, DEFAULT_MTYPE, MTYPE_NC); tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, DEFAULT_MTYPE, MTYPE_NC);
tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, APE1_MTYPE, MTYPE_NC); tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, APE1_MTYPE, MTYPE_NC);
tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, ALIGNMENT_MODE, tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, ALIGNMENT_MODE,
SH_MEM_ALIGNMENT_MODE_UNALIGNED); SH_MEM_ALIGNMENT_MODE_UNALIGNED);
WREG32(mmSH_MEM_CONFIG, tmp); WREG32(mmSH_MEM_CONFIG, tmp);
} }

View file

@ -435,6 +435,33 @@ static int gmc_v7_0_gart_set_pte_pde(struct amdgpu_device *adev,
return 0; return 0;
} }
/**
* gmc_v8_0_set_fault_enable_default - update VM fault handling
*
* @adev: amdgpu_device pointer
* @value: true redirects VM faults to the default page
*/
static void gmc_v7_0_set_fault_enable_default(struct amdgpu_device *adev,
bool value)
{
u32 tmp;
tmp = RREG32(mmVM_CONTEXT1_CNTL);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
WREG32(mmVM_CONTEXT1_CNTL, tmp);
}
/** /**
* gmc_v7_0_gart_enable - gart enable * gmc_v7_0_gart_enable - gart enable
* *
@ -523,15 +550,13 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
tmp = RREG32(mmVM_CONTEXT1_CNTL); tmp = RREG32(mmVM_CONTEXT1_CNTL);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1); tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1); tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE, tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,
amdgpu_vm_block_size - 9); amdgpu_vm_block_size - 9);
WREG32(mmVM_CONTEXT1_CNTL, tmp); WREG32(mmVM_CONTEXT1_CNTL, tmp);
if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
gmc_v7_0_set_fault_enable_default(adev, false);
else
gmc_v7_0_set_fault_enable_default(adev, true);
if (adev->asic_type == CHIP_KAVERI) { if (adev->asic_type == CHIP_KAVERI) {
tmp = RREG32(mmCHUB_CONTROL); tmp = RREG32(mmCHUB_CONTROL);
@ -1262,6 +1287,15 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR); addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS); status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT); mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
/* reset addr and status */
WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
if (!addr && !status)
return 0;
if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
gmc_v7_0_set_fault_enable_default(adev, false);
dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n", dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
entry->src_id, entry->src_data); entry->src_id, entry->src_data);
dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
@ -1269,8 +1303,6 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
status); status);
gmc_v7_0_vm_decode_fault(adev, status, addr, mc_client); gmc_v7_0_vm_decode_fault(adev, status, addr, mc_client);
/* reset addr and status */
WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
return 0; return 0;
} }

View file

@ -549,6 +549,35 @@ static int gmc_v8_0_gart_set_pte_pde(struct amdgpu_device *adev,
return 0; return 0;
} }
/**
* gmc_v8_0_set_fault_enable_default - update VM fault handling
*
* @adev: amdgpu_device pointer
* @value: true redirects VM faults to the default page
*/
static void gmc_v8_0_set_fault_enable_default(struct amdgpu_device *adev,
bool value)
{
u32 tmp;
tmp = RREG32(mmVM_CONTEXT1_CNTL);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
WREG32(mmVM_CONTEXT1_CNTL, tmp);
}
/** /**
* gmc_v8_0_gart_enable - gart enable * gmc_v8_0_gart_enable - gart enable
* *
@ -663,6 +692,10 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE, tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,
amdgpu_vm_block_size - 9); amdgpu_vm_block_size - 9);
WREG32(mmVM_CONTEXT1_CNTL, tmp); WREG32(mmVM_CONTEXT1_CNTL, tmp);
if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
gmc_v8_0_set_fault_enable_default(adev, false);
else
gmc_v8_0_set_fault_enable_default(adev, true);
gmc_v8_0_gart_flush_gpu_tlb(adev, 0); gmc_v8_0_gart_flush_gpu_tlb(adev, 0);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
@ -1262,6 +1295,15 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR); addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS); status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT); mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
/* reset addr and status */
WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
if (!addr && !status)
return 0;
if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
gmc_v8_0_set_fault_enable_default(adev, false);
dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n", dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
entry->src_id, entry->src_data); entry->src_id, entry->src_data);
dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
@ -1269,8 +1311,6 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
status); status);
gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client); gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client);
/* reset addr and status */
WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
return 0; return 0;
} }

View file

@ -118,7 +118,7 @@ static int sdma_v2_4_init_microcode(struct amdgpu_device *adev)
{ {
const char *chip_name; const char *chip_name;
char fw_name[30]; char fw_name[30];
int err, i; int err = 0, i;
struct amdgpu_firmware_info *info = NULL; struct amdgpu_firmware_info *info = NULL;
const struct common_firmware_header *header = NULL; const struct common_firmware_header *header = NULL;
const struct sdma_firmware_header_v1_0 *hdr; const struct sdma_firmware_header_v1_0 *hdr;
@ -132,27 +132,27 @@ static int sdma_v2_4_init_microcode(struct amdgpu_device *adev)
default: BUG(); default: BUG();
} }
for (i = 0; i < SDMA_MAX_INSTANCE; i++) { for (i = 0; i < adev->sdma.num_instances; i++) {
if (i == 0) if (i == 0)
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name); snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
else else
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name); snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name);
err = request_firmware(&adev->sdma[i].fw, fw_name, adev->dev); err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
if (err) if (err)
goto out; goto out;
err = amdgpu_ucode_validate(adev->sdma[i].fw); err = amdgpu_ucode_validate(adev->sdma.instance[i].fw);
if (err) if (err)
goto out; goto out;
hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data; hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version); adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version); adev->sdma.instance[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
if (adev->sdma[i].feature_version >= 20) if (adev->sdma.instance[i].feature_version >= 20)
adev->sdma[i].burst_nop = true; adev->sdma.instance[i].burst_nop = true;
if (adev->firmware.smu_load) { if (adev->firmware.smu_load) {
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i]; info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i; info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
info->fw = adev->sdma[i].fw; info->fw = adev->sdma.instance[i].fw;
header = (const struct common_firmware_header *)info->fw->data; header = (const struct common_firmware_header *)info->fw->data;
adev->firmware.fw_size += adev->firmware.fw_size +=
ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
@ -164,9 +164,9 @@ out:
printk(KERN_ERR printk(KERN_ERR
"sdma_v2_4: Failed to load firmware \"%s\"\n", "sdma_v2_4: Failed to load firmware \"%s\"\n",
fw_name); fw_name);
for (i = 0; i < SDMA_MAX_INSTANCE; i++) { for (i = 0; i < adev->sdma.num_instances; i++) {
release_firmware(adev->sdma[i].fw); release_firmware(adev->sdma.instance[i].fw);
adev->sdma[i].fw = NULL; adev->sdma.instance[i].fw = NULL;
} }
} }
return err; return err;
@ -199,7 +199,7 @@ static uint32_t sdma_v2_4_ring_get_rptr(struct amdgpu_ring *ring)
static uint32_t sdma_v2_4_ring_get_wptr(struct amdgpu_ring *ring) static uint32_t sdma_v2_4_ring_get_wptr(struct amdgpu_ring *ring)
{ {
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
int me = (ring == &ring->adev->sdma[0].ring) ? 0 : 1; int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
u32 wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) >> 2; u32 wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) >> 2;
return wptr; return wptr;
@ -215,14 +215,14 @@ static uint32_t sdma_v2_4_ring_get_wptr(struct amdgpu_ring *ring)
static void sdma_v2_4_ring_set_wptr(struct amdgpu_ring *ring) static void sdma_v2_4_ring_set_wptr(struct amdgpu_ring *ring)
{ {
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
int me = (ring == &ring->adev->sdma[0].ring) ? 0 : 1; int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], ring->wptr << 2); WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], ring->wptr << 2);
} }
static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
{ {
struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ring); struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
int i; int i;
for (i = 0; i < count; i++) for (i = 0; i < count; i++)
@ -284,7 +284,7 @@ static void sdma_v2_4_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{ {
u32 ref_and_mask = 0; u32 ref_and_mask = 0;
if (ring == &ring->adev->sdma[0].ring) if (ring == &ring->adev->sdma.instance[0].ring)
ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA0, 1); ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA0, 1);
else else
ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA1, 1); ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA1, 1);
@ -368,8 +368,8 @@ static bool sdma_v2_4_ring_emit_semaphore(struct amdgpu_ring *ring,
*/ */
static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev) static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev)
{ {
struct amdgpu_ring *sdma0 = &adev->sdma[0].ring; struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
struct amdgpu_ring *sdma1 = &adev->sdma[1].ring; struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
u32 rb_cntl, ib_cntl; u32 rb_cntl, ib_cntl;
int i; int i;
@ -377,7 +377,7 @@ static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev)
(adev->mman.buffer_funcs_ring == sdma1)) (adev->mman.buffer_funcs_ring == sdma1))
amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size); amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
for (i = 0; i < SDMA_MAX_INSTANCE; i++) { for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]); rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0); rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl); WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
@ -419,7 +419,7 @@ static void sdma_v2_4_enable(struct amdgpu_device *adev, bool enable)
sdma_v2_4_rlc_stop(adev); sdma_v2_4_rlc_stop(adev);
} }
for (i = 0; i < SDMA_MAX_INSTANCE; i++) { for (i = 0; i < adev->sdma.num_instances; i++) {
f32_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]); f32_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]);
if (enable) if (enable)
f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 0); f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 0);
@ -445,8 +445,8 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev)
u32 wb_offset; u32 wb_offset;
int i, j, r; int i, j, r;
for (i = 0; i < SDMA_MAX_INSTANCE; i++) { for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma[i].ring; ring = &adev->sdma.instance[i].ring;
wb_offset = (ring->rptr_offs * 4); wb_offset = (ring->rptr_offs * 4);
mutex_lock(&adev->srbm_mutex); mutex_lock(&adev->srbm_mutex);
@ -545,29 +545,23 @@ static int sdma_v2_4_load_microcode(struct amdgpu_device *adev)
const __le32 *fw_data; const __le32 *fw_data;
u32 fw_size; u32 fw_size;
int i, j; int i, j;
bool smc_loads_fw = false; /* XXX fix me */
if (!adev->sdma[0].fw || !adev->sdma[1].fw)
return -EINVAL;
/* halt the MEs */ /* halt the MEs */
sdma_v2_4_enable(adev, false); sdma_v2_4_enable(adev, false);
if (smc_loads_fw) { for (i = 0; i < adev->sdma.num_instances; i++) {
/* XXX query SMC for fw load complete */ if (!adev->sdma.instance[i].fw)
} else { return -EINVAL;
for (i = 0; i < SDMA_MAX_INSTANCE; i++) { hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data; amdgpu_ucode_print_sdma_hdr(&hdr->header);
amdgpu_ucode_print_sdma_hdr(&hdr->header); fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; fw_data = (const __le32 *)
fw_data = (const __le32 *) (adev->sdma.instance[i].fw->data +
(adev->sdma[i].fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
le32_to_cpu(hdr->header.ucode_array_offset_bytes)); WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0);
WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0); for (j = 0; j < fw_size; j++)
for (j = 0; j < fw_size; j++) WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++));
WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++)); WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma.instance[i].fw_version);
WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma[i].fw_version);
}
} }
return 0; return 0;
@ -894,7 +888,7 @@ static void sdma_v2_4_vm_set_pte_pde(struct amdgpu_ib *ib,
*/ */
static void sdma_v2_4_vm_pad_ib(struct amdgpu_ib *ib) static void sdma_v2_4_vm_pad_ib(struct amdgpu_ib *ib)
{ {
struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ib->ring); struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ib->ring);
u32 pad_count; u32 pad_count;
int i; int i;
@ -952,6 +946,8 @@ static int sdma_v2_4_early_init(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
adev->sdma.num_instances = SDMA_MAX_INSTANCE;
sdma_v2_4_set_ring_funcs(adev); sdma_v2_4_set_ring_funcs(adev);
sdma_v2_4_set_buffer_funcs(adev); sdma_v2_4_set_buffer_funcs(adev);
sdma_v2_4_set_vm_pte_funcs(adev); sdma_v2_4_set_vm_pte_funcs(adev);
@ -963,21 +959,21 @@ static int sdma_v2_4_early_init(void *handle)
static int sdma_v2_4_sw_init(void *handle) static int sdma_v2_4_sw_init(void *handle)
{ {
struct amdgpu_ring *ring; struct amdgpu_ring *ring;
int r; int r, i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* SDMA trap event */ /* SDMA trap event */
r = amdgpu_irq_add_id(adev, 224, &adev->sdma_trap_irq); r = amdgpu_irq_add_id(adev, 224, &adev->sdma.trap_irq);
if (r) if (r)
return r; return r;
/* SDMA Privileged inst */ /* SDMA Privileged inst */
r = amdgpu_irq_add_id(adev, 241, &adev->sdma_illegal_inst_irq); r = amdgpu_irq_add_id(adev, 241, &adev->sdma.illegal_inst_irq);
if (r) if (r)
return r; return r;
/* SDMA Privileged inst */ /* SDMA Privileged inst */
r = amdgpu_irq_add_id(adev, 247, &adev->sdma_illegal_inst_irq); r = amdgpu_irq_add_id(adev, 247, &adev->sdma.illegal_inst_irq);
if (r) if (r)
return r; return r;
@ -987,31 +983,20 @@ static int sdma_v2_4_sw_init(void *handle)
return r; return r;
} }
ring = &adev->sdma[0].ring; for (i = 0; i < adev->sdma.num_instances; i++) {
ring->ring_obj = NULL; ring = &adev->sdma.instance[i].ring;
ring->use_doorbell = false; ring->ring_obj = NULL;
ring->use_doorbell = false;
ring = &adev->sdma[1].ring; sprintf(ring->name, "sdma%d", i);
ring->ring_obj = NULL; r = amdgpu_ring_init(adev, ring, 256 * 1024,
ring->use_doorbell = false; SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf,
&adev->sdma.trap_irq,
ring = &adev->sdma[0].ring; (i == 0) ?
sprintf(ring->name, "sdma0"); AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1,
r = amdgpu_ring_init(adev, ring, 256 * 1024, AMDGPU_RING_TYPE_SDMA);
SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf, if (r)
&adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP0, return r;
AMDGPU_RING_TYPE_SDMA); }
if (r)
return r;
ring = &adev->sdma[1].ring;
sprintf(ring->name, "sdma1");
r = amdgpu_ring_init(adev, ring, 256 * 1024,
SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf,
&adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP1,
AMDGPU_RING_TYPE_SDMA);
if (r)
return r;
return r; return r;
} }
@ -1019,9 +1004,10 @@ static int sdma_v2_4_sw_init(void *handle)
static int sdma_v2_4_sw_fini(void *handle) static int sdma_v2_4_sw_fini(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i;
amdgpu_ring_fini(&adev->sdma[0].ring); for (i = 0; i < adev->sdma.num_instances; i++)
amdgpu_ring_fini(&adev->sdma[1].ring); amdgpu_ring_fini(&adev->sdma.instance[i].ring);
return 0; return 0;
} }
@ -1100,7 +1086,7 @@ static void sdma_v2_4_print_status(void *handle)
dev_info(adev->dev, "VI SDMA registers\n"); dev_info(adev->dev, "VI SDMA registers\n");
dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n", dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n",
RREG32(mmSRBM_STATUS2)); RREG32(mmSRBM_STATUS2));
for (i = 0; i < SDMA_MAX_INSTANCE; i++) { for (i = 0; i < adev->sdma.num_instances; i++) {
dev_info(adev->dev, " SDMA%d_STATUS_REG=0x%08X\n", dev_info(adev->dev, " SDMA%d_STATUS_REG=0x%08X\n",
i, RREG32(mmSDMA0_STATUS_REG + sdma_offsets[i])); i, RREG32(mmSDMA0_STATUS_REG + sdma_offsets[i]));
dev_info(adev->dev, " SDMA%d_F32_CNTL=0x%08X\n", dev_info(adev->dev, " SDMA%d_F32_CNTL=0x%08X\n",
@ -1243,7 +1229,7 @@ static int sdma_v2_4_process_trap_irq(struct amdgpu_device *adev,
case 0: case 0:
switch (queue_id) { switch (queue_id) {
case 0: case 0:
amdgpu_fence_process(&adev->sdma[0].ring); amdgpu_fence_process(&adev->sdma.instance[0].ring);
break; break;
case 1: case 1:
/* XXX compute */ /* XXX compute */
@ -1256,7 +1242,7 @@ static int sdma_v2_4_process_trap_irq(struct amdgpu_device *adev,
case 1: case 1:
switch (queue_id) { switch (queue_id) {
case 0: case 0:
amdgpu_fence_process(&adev->sdma[1].ring); amdgpu_fence_process(&adev->sdma.instance[1].ring);
break; break;
case 1: case 1:
/* XXX compute */ /* XXX compute */
@ -1345,8 +1331,10 @@ static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = {
static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev) static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev)
{ {
adev->sdma[0].ring.funcs = &sdma_v2_4_ring_funcs; int i;
adev->sdma[1].ring.funcs = &sdma_v2_4_ring_funcs;
for (i = 0; i < adev->sdma.num_instances; i++)
adev->sdma.instance[i].ring.funcs = &sdma_v2_4_ring_funcs;
} }
static const struct amdgpu_irq_src_funcs sdma_v2_4_trap_irq_funcs = { static const struct amdgpu_irq_src_funcs sdma_v2_4_trap_irq_funcs = {
@ -1360,9 +1348,9 @@ static const struct amdgpu_irq_src_funcs sdma_v2_4_illegal_inst_irq_funcs = {
static void sdma_v2_4_set_irq_funcs(struct amdgpu_device *adev) static void sdma_v2_4_set_irq_funcs(struct amdgpu_device *adev)
{ {
adev->sdma_trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST; adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
adev->sdma_trap_irq.funcs = &sdma_v2_4_trap_irq_funcs; adev->sdma.trap_irq.funcs = &sdma_v2_4_trap_irq_funcs;
adev->sdma_illegal_inst_irq.funcs = &sdma_v2_4_illegal_inst_irq_funcs; adev->sdma.illegal_inst_irq.funcs = &sdma_v2_4_illegal_inst_irq_funcs;
} }
/** /**
@ -1428,7 +1416,7 @@ static void sdma_v2_4_set_buffer_funcs(struct amdgpu_device *adev)
{ {
if (adev->mman.buffer_funcs == NULL) { if (adev->mman.buffer_funcs == NULL) {
adev->mman.buffer_funcs = &sdma_v2_4_buffer_funcs; adev->mman.buffer_funcs = &sdma_v2_4_buffer_funcs;
adev->mman.buffer_funcs_ring = &adev->sdma[0].ring; adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
} }
} }
@ -1443,7 +1431,7 @@ static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev)
{ {
if (adev->vm_manager.vm_pte_funcs == NULL) { if (adev->vm_manager.vm_pte_funcs == NULL) {
adev->vm_manager.vm_pte_funcs = &sdma_v2_4_vm_pte_funcs; adev->vm_manager.vm_pte_funcs = &sdma_v2_4_vm_pte_funcs;
adev->vm_manager.vm_pte_funcs_ring = &adev->sdma[0].ring; adev->vm_manager.vm_pte_funcs_ring = &adev->sdma.instance[0].ring;
adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true; adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true;
} }
} }

View file

@ -184,7 +184,7 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
{ {
const char *chip_name; const char *chip_name;
char fw_name[30]; char fw_name[30];
int err, i; int err = 0, i;
struct amdgpu_firmware_info *info = NULL; struct amdgpu_firmware_info *info = NULL;
const struct common_firmware_header *header = NULL; const struct common_firmware_header *header = NULL;
const struct sdma_firmware_header_v1_0 *hdr; const struct sdma_firmware_header_v1_0 *hdr;
@ -204,27 +204,27 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
default: BUG(); default: BUG();
} }
for (i = 0; i < SDMA_MAX_INSTANCE; i++) { for (i = 0; i < adev->sdma.num_instances; i++) {
if (i == 0) if (i == 0)
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name); snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
else else
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name); snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name);
err = request_firmware(&adev->sdma[i].fw, fw_name, adev->dev); err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
if (err) if (err)
goto out; goto out;
err = amdgpu_ucode_validate(adev->sdma[i].fw); err = amdgpu_ucode_validate(adev->sdma.instance[i].fw);
if (err) if (err)
goto out; goto out;
hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data; hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version); adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version); adev->sdma.instance[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
if (adev->sdma[i].feature_version >= 20) if (adev->sdma.instance[i].feature_version >= 20)
adev->sdma[i].burst_nop = true; adev->sdma.instance[i].burst_nop = true;
if (adev->firmware.smu_load) { if (adev->firmware.smu_load) {
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i]; info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i; info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
info->fw = adev->sdma[i].fw; info->fw = adev->sdma.instance[i].fw;
header = (const struct common_firmware_header *)info->fw->data; header = (const struct common_firmware_header *)info->fw->data;
adev->firmware.fw_size += adev->firmware.fw_size +=
ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
@ -235,9 +235,9 @@ out:
printk(KERN_ERR printk(KERN_ERR
"sdma_v3_0: Failed to load firmware \"%s\"\n", "sdma_v3_0: Failed to load firmware \"%s\"\n",
fw_name); fw_name);
for (i = 0; i < SDMA_MAX_INSTANCE; i++) { for (i = 0; i < adev->sdma.num_instances; i++) {
release_firmware(adev->sdma[i].fw); release_firmware(adev->sdma.instance[i].fw);
adev->sdma[i].fw = NULL; adev->sdma.instance[i].fw = NULL;
} }
} }
return err; return err;
@ -276,7 +276,7 @@ static uint32_t sdma_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
/* XXX check if swapping is necessary on BE */ /* XXX check if swapping is necessary on BE */
wptr = ring->adev->wb.wb[ring->wptr_offs] >> 2; wptr = ring->adev->wb.wb[ring->wptr_offs] >> 2;
} else { } else {
int me = (ring == &ring->adev->sdma[0].ring) ? 0 : 1; int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) >> 2; wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) >> 2;
} }
@ -300,7 +300,7 @@ static void sdma_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
adev->wb.wb[ring->wptr_offs] = ring->wptr << 2; adev->wb.wb[ring->wptr_offs] = ring->wptr << 2;
WDOORBELL32(ring->doorbell_index, ring->wptr << 2); WDOORBELL32(ring->doorbell_index, ring->wptr << 2);
} else { } else {
int me = (ring == &ring->adev->sdma[0].ring) ? 0 : 1; int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], ring->wptr << 2); WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], ring->wptr << 2);
} }
@ -308,7 +308,7 @@ static void sdma_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
{ {
struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ring); struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
int i; int i;
for (i = 0; i < count; i++) for (i = 0; i < count; i++)
@ -369,7 +369,7 @@ static void sdma_v3_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{ {
u32 ref_and_mask = 0; u32 ref_and_mask = 0;
if (ring == &ring->adev->sdma[0].ring) if (ring == &ring->adev->sdma.instance[0].ring)
ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA0, 1); ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA0, 1);
else else
ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA1, 1); ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA1, 1);
@ -454,8 +454,8 @@ static bool sdma_v3_0_ring_emit_semaphore(struct amdgpu_ring *ring,
*/ */
static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev) static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev)
{ {
struct amdgpu_ring *sdma0 = &adev->sdma[0].ring; struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
struct amdgpu_ring *sdma1 = &adev->sdma[1].ring; struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
u32 rb_cntl, ib_cntl; u32 rb_cntl, ib_cntl;
int i; int i;
@ -463,7 +463,7 @@ static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev)
(adev->mman.buffer_funcs_ring == sdma1)) (adev->mman.buffer_funcs_ring == sdma1))
amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size); amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
for (i = 0; i < SDMA_MAX_INSTANCE; i++) { for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]); rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0); rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl); WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
@ -500,7 +500,7 @@ static void sdma_v3_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
u32 f32_cntl; u32 f32_cntl;
int i; int i;
for (i = 0; i < SDMA_MAX_INSTANCE; i++) { for (i = 0; i < adev->sdma.num_instances; i++) {
f32_cntl = RREG32(mmSDMA0_CNTL + sdma_offsets[i]); f32_cntl = RREG32(mmSDMA0_CNTL + sdma_offsets[i]);
if (enable) if (enable)
f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
@ -530,7 +530,7 @@ static void sdma_v3_0_enable(struct amdgpu_device *adev, bool enable)
sdma_v3_0_rlc_stop(adev); sdma_v3_0_rlc_stop(adev);
} }
for (i = 0; i < SDMA_MAX_INSTANCE; i++) { for (i = 0; i < adev->sdma.num_instances; i++) {
f32_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]); f32_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]);
if (enable) if (enable)
f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 0); f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 0);
@ -557,8 +557,8 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
u32 doorbell; u32 doorbell;
int i, j, r; int i, j, r;
for (i = 0; i < SDMA_MAX_INSTANCE; i++) { for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma[i].ring; ring = &adev->sdma.instance[i].ring;
wb_offset = (ring->rptr_offs * 4); wb_offset = (ring->rptr_offs * 4);
mutex_lock(&adev->srbm_mutex); mutex_lock(&adev->srbm_mutex);
@ -669,23 +669,22 @@ static int sdma_v3_0_load_microcode(struct amdgpu_device *adev)
u32 fw_size; u32 fw_size;
int i, j; int i, j;
if (!adev->sdma[0].fw || !adev->sdma[1].fw)
return -EINVAL;
/* halt the MEs */ /* halt the MEs */
sdma_v3_0_enable(adev, false); sdma_v3_0_enable(adev, false);
for (i = 0; i < SDMA_MAX_INSTANCE; i++) { for (i = 0; i < adev->sdma.num_instances; i++) {
hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data; if (!adev->sdma.instance[i].fw)
return -EINVAL;
hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
amdgpu_ucode_print_sdma_hdr(&hdr->header); amdgpu_ucode_print_sdma_hdr(&hdr->header);
fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
fw_data = (const __le32 *) fw_data = (const __le32 *)
(adev->sdma[i].fw->data + (adev->sdma.instance[i].fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes)); le32_to_cpu(hdr->header.ucode_array_offset_bytes));
WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0); WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0);
for (j = 0; j < fw_size; j++) for (j = 0; j < fw_size; j++)
WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++)); WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++));
WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma[i].fw_version); WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma.instance[i].fw_version);
} }
return 0; return 0;
@ -701,21 +700,21 @@ static int sdma_v3_0_load_microcode(struct amdgpu_device *adev)
*/ */
static int sdma_v3_0_start(struct amdgpu_device *adev) static int sdma_v3_0_start(struct amdgpu_device *adev)
{ {
int r; int r, i;
if (!adev->firmware.smu_load) { if (!adev->firmware.smu_load) {
r = sdma_v3_0_load_microcode(adev); r = sdma_v3_0_load_microcode(adev);
if (r) if (r)
return r; return r;
} else { } else {
r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, for (i = 0; i < adev->sdma.num_instances; i++) {
AMDGPU_UCODE_ID_SDMA0); r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
if (r) (i == 0) ?
return -EINVAL; AMDGPU_UCODE_ID_SDMA0 :
r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, AMDGPU_UCODE_ID_SDMA1);
AMDGPU_UCODE_ID_SDMA1); if (r)
if (r) return -EINVAL;
return -EINVAL; }
} }
/* unhalt the MEs */ /* unhalt the MEs */
@ -1013,7 +1012,7 @@ static void sdma_v3_0_vm_set_pte_pde(struct amdgpu_ib *ib,
*/ */
static void sdma_v3_0_vm_pad_ib(struct amdgpu_ib *ib) static void sdma_v3_0_vm_pad_ib(struct amdgpu_ib *ib)
{ {
struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ib->ring); struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ib->ring);
u32 pad_count; u32 pad_count;
int i; int i;
@ -1071,6 +1070,12 @@ static int sdma_v3_0_early_init(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
switch (adev->asic_type) {
default:
adev->sdma.num_instances = SDMA_MAX_INSTANCE;
break;
}
sdma_v3_0_set_ring_funcs(adev); sdma_v3_0_set_ring_funcs(adev);
sdma_v3_0_set_buffer_funcs(adev); sdma_v3_0_set_buffer_funcs(adev);
sdma_v3_0_set_vm_pte_funcs(adev); sdma_v3_0_set_vm_pte_funcs(adev);
@ -1082,21 +1087,21 @@ static int sdma_v3_0_early_init(void *handle)
static int sdma_v3_0_sw_init(void *handle) static int sdma_v3_0_sw_init(void *handle)
{ {
struct amdgpu_ring *ring; struct amdgpu_ring *ring;
int r; int r, i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* SDMA trap event */ /* SDMA trap event */
r = amdgpu_irq_add_id(adev, 224, &adev->sdma_trap_irq); r = amdgpu_irq_add_id(adev, 224, &adev->sdma.trap_irq);
if (r) if (r)
return r; return r;
/* SDMA Privileged inst */ /* SDMA Privileged inst */
r = amdgpu_irq_add_id(adev, 241, &adev->sdma_illegal_inst_irq); r = amdgpu_irq_add_id(adev, 241, &adev->sdma.illegal_inst_irq);
if (r) if (r)
return r; return r;
/* SDMA Privileged inst */ /* SDMA Privileged inst */
r = amdgpu_irq_add_id(adev, 247, &adev->sdma_illegal_inst_irq); r = amdgpu_irq_add_id(adev, 247, &adev->sdma.illegal_inst_irq);
if (r) if (r)
return r; return r;
@ -1106,33 +1111,23 @@ static int sdma_v3_0_sw_init(void *handle)
return r; return r;
} }
ring = &adev->sdma[0].ring; for (i = 0; i < adev->sdma.num_instances; i++) {
ring->ring_obj = NULL; ring = &adev->sdma.instance[i].ring;
ring->use_doorbell = true; ring->ring_obj = NULL;
ring->doorbell_index = AMDGPU_DOORBELL_sDMA_ENGINE0; ring->use_doorbell = true;
ring->doorbell_index = (i == 0) ?
AMDGPU_DOORBELL_sDMA_ENGINE0 : AMDGPU_DOORBELL_sDMA_ENGINE1;
ring = &adev->sdma[1].ring; sprintf(ring->name, "sdma%d", i);
ring->ring_obj = NULL; r = amdgpu_ring_init(adev, ring, 256 * 1024,
ring->use_doorbell = true; SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf,
ring->doorbell_index = AMDGPU_DOORBELL_sDMA_ENGINE1; &adev->sdma.trap_irq,
(i == 0) ?
ring = &adev->sdma[0].ring; AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1,
sprintf(ring->name, "sdma0"); AMDGPU_RING_TYPE_SDMA);
r = amdgpu_ring_init(adev, ring, 256 * 1024, if (r)
SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf, return r;
&adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP0, }
AMDGPU_RING_TYPE_SDMA);
if (r)
return r;
ring = &adev->sdma[1].ring;
sprintf(ring->name, "sdma1");
r = amdgpu_ring_init(adev, ring, 256 * 1024,
SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf,
&adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP1,
AMDGPU_RING_TYPE_SDMA);
if (r)
return r;
return r; return r;
} }
@ -1140,9 +1135,10 @@ static int sdma_v3_0_sw_init(void *handle)
static int sdma_v3_0_sw_fini(void *handle) static int sdma_v3_0_sw_fini(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i;
amdgpu_ring_fini(&adev->sdma[0].ring); for (i = 0; i < adev->sdma.num_instances; i++)
amdgpu_ring_fini(&adev->sdma[1].ring); amdgpu_ring_fini(&adev->sdma.instance[i].ring);
return 0; return 0;
} }
@ -1222,7 +1218,7 @@ static void sdma_v3_0_print_status(void *handle)
dev_info(adev->dev, "VI SDMA registers\n"); dev_info(adev->dev, "VI SDMA registers\n");
dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n", dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n",
RREG32(mmSRBM_STATUS2)); RREG32(mmSRBM_STATUS2));
for (i = 0; i < SDMA_MAX_INSTANCE; i++) { for (i = 0; i < adev->sdma.num_instances; i++) {
dev_info(adev->dev, " SDMA%d_STATUS_REG=0x%08X\n", dev_info(adev->dev, " SDMA%d_STATUS_REG=0x%08X\n",
i, RREG32(mmSDMA0_STATUS_REG + sdma_offsets[i])); i, RREG32(mmSDMA0_STATUS_REG + sdma_offsets[i]));
dev_info(adev->dev, " SDMA%d_F32_CNTL=0x%08X\n", dev_info(adev->dev, " SDMA%d_F32_CNTL=0x%08X\n",
@ -1367,7 +1363,7 @@ static int sdma_v3_0_process_trap_irq(struct amdgpu_device *adev,
case 0: case 0:
switch (queue_id) { switch (queue_id) {
case 0: case 0:
amdgpu_fence_process(&adev->sdma[0].ring); amdgpu_fence_process(&adev->sdma.instance[0].ring);
break; break;
case 1: case 1:
/* XXX compute */ /* XXX compute */
@ -1380,7 +1376,7 @@ static int sdma_v3_0_process_trap_irq(struct amdgpu_device *adev,
case 1: case 1:
switch (queue_id) { switch (queue_id) {
case 0: case 0:
amdgpu_fence_process(&adev->sdma[1].ring); amdgpu_fence_process(&adev->sdma.instance[1].ring);
break; break;
case 1: case 1:
/* XXX compute */ /* XXX compute */
@ -1468,8 +1464,10 @@ static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = {
static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev) static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev)
{ {
adev->sdma[0].ring.funcs = &sdma_v3_0_ring_funcs; int i;
adev->sdma[1].ring.funcs = &sdma_v3_0_ring_funcs;
for (i = 0; i < adev->sdma.num_instances; i++)
adev->sdma.instance[i].ring.funcs = &sdma_v3_0_ring_funcs;
} }
static const struct amdgpu_irq_src_funcs sdma_v3_0_trap_irq_funcs = { static const struct amdgpu_irq_src_funcs sdma_v3_0_trap_irq_funcs = {
@ -1483,9 +1481,9 @@ static const struct amdgpu_irq_src_funcs sdma_v3_0_illegal_inst_irq_funcs = {
static void sdma_v3_0_set_irq_funcs(struct amdgpu_device *adev) static void sdma_v3_0_set_irq_funcs(struct amdgpu_device *adev)
{ {
adev->sdma_trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST; adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
adev->sdma_trap_irq.funcs = &sdma_v3_0_trap_irq_funcs; adev->sdma.trap_irq.funcs = &sdma_v3_0_trap_irq_funcs;
adev->sdma_illegal_inst_irq.funcs = &sdma_v3_0_illegal_inst_irq_funcs; adev->sdma.illegal_inst_irq.funcs = &sdma_v3_0_illegal_inst_irq_funcs;
} }
/** /**
@ -1551,7 +1549,7 @@ static void sdma_v3_0_set_buffer_funcs(struct amdgpu_device *adev)
{ {
if (adev->mman.buffer_funcs == NULL) { if (adev->mman.buffer_funcs == NULL) {
adev->mman.buffer_funcs = &sdma_v3_0_buffer_funcs; adev->mman.buffer_funcs = &sdma_v3_0_buffer_funcs;
adev->mman.buffer_funcs_ring = &adev->sdma[0].ring; adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
} }
} }
@ -1566,7 +1564,7 @@ static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev)
{ {
if (adev->vm_manager.vm_pte_funcs == NULL) { if (adev->vm_manager.vm_pte_funcs == NULL) {
adev->vm_manager.vm_pte_funcs = &sdma_v3_0_vm_pte_funcs; adev->vm_manager.vm_pte_funcs = &sdma_v3_0_vm_pte_funcs;
adev->vm_manager.vm_pte_funcs_ring = &adev->sdma[0].ring; adev->vm_manager.vm_pte_funcs_ring = &adev->sdma.instance[0].ring;
adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true; adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true;
} }
} }

View file

@ -327,19 +327,49 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
struct amd_sched_fence *s_fence = struct amd_sched_fence *s_fence =
container_of(cb, struct amd_sched_fence, cb); container_of(cb, struct amd_sched_fence, cb);
struct amd_gpu_scheduler *sched = s_fence->sched; struct amd_gpu_scheduler *sched = s_fence->sched;
unsigned long flags;
atomic_dec(&sched->hw_rq_count); atomic_dec(&sched->hw_rq_count);
amd_sched_fence_signal(s_fence); amd_sched_fence_signal(s_fence);
if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
cancel_delayed_work_sync(&s_fence->dwork);
spin_lock_irqsave(&sched->fence_list_lock, flags);
list_del_init(&s_fence->list);
spin_unlock_irqrestore(&sched->fence_list_lock, flags);
}
fence_put(&s_fence->base); fence_put(&s_fence->base);
wake_up_interruptible(&sched->wake_up_worker); wake_up_interruptible(&sched->wake_up_worker);
} }
static void amd_sched_fence_work_func(struct work_struct *work)
{
struct amd_sched_fence *s_fence =
container_of(work, struct amd_sched_fence, dwork.work);
struct amd_gpu_scheduler *sched = s_fence->sched;
struct amd_sched_fence *entity, *tmp;
unsigned long flags;
DRM_ERROR("[%s] scheduler is timeout!\n", sched->name);
/* Clean all pending fences */
spin_lock_irqsave(&sched->fence_list_lock, flags);
list_for_each_entry_safe(entity, tmp, &sched->fence_list, list) {
DRM_ERROR(" fence no %d\n", entity->base.seqno);
cancel_delayed_work(&entity->dwork);
list_del_init(&entity->list);
fence_put(&entity->base);
}
spin_unlock_irqrestore(&sched->fence_list_lock, flags);
}
static int amd_sched_main(void *param) static int amd_sched_main(void *param)
{ {
struct sched_param sparam = {.sched_priority = 1}; struct sched_param sparam = {.sched_priority = 1};
struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param; struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param;
int r, count; int r, count;
spin_lock_init(&sched->fence_list_lock);
INIT_LIST_HEAD(&sched->fence_list);
sched_setscheduler(current, SCHED_FIFO, &sparam); sched_setscheduler(current, SCHED_FIFO, &sparam);
while (!kthread_should_stop()) { while (!kthread_should_stop()) {
@ -347,6 +377,7 @@ static int amd_sched_main(void *param)
struct amd_sched_fence *s_fence; struct amd_sched_fence *s_fence;
struct amd_sched_job *sched_job; struct amd_sched_job *sched_job;
struct fence *fence; struct fence *fence;
unsigned long flags;
wait_event_interruptible(sched->wake_up_worker, wait_event_interruptible(sched->wake_up_worker,
kthread_should_stop() || kthread_should_stop() ||
@ -357,6 +388,15 @@ static int amd_sched_main(void *param)
entity = sched_job->s_entity; entity = sched_job->s_entity;
s_fence = sched_job->s_fence; s_fence = sched_job->s_fence;
if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
INIT_DELAYED_WORK(&s_fence->dwork, amd_sched_fence_work_func);
schedule_delayed_work(&s_fence->dwork, sched->timeout);
spin_lock_irqsave(&sched->fence_list_lock, flags);
list_add_tail(&s_fence->list, &sched->fence_list);
spin_unlock_irqrestore(&sched->fence_list_lock, flags);
}
atomic_inc(&sched->hw_rq_count); atomic_inc(&sched->hw_rq_count);
fence = sched->ops->run_job(sched_job); fence = sched->ops->run_job(sched_job);
if (fence) { if (fence) {
@ -392,11 +432,12 @@ static int amd_sched_main(void *param)
*/ */
int amd_sched_init(struct amd_gpu_scheduler *sched, int amd_sched_init(struct amd_gpu_scheduler *sched,
struct amd_sched_backend_ops *ops, struct amd_sched_backend_ops *ops,
unsigned hw_submission, const char *name) unsigned hw_submission, long timeout, const char *name)
{ {
sched->ops = ops; sched->ops = ops;
sched->hw_submission_limit = hw_submission; sched->hw_submission_limit = hw_submission;
sched->name = name; sched->name = name;
sched->timeout = timeout;
amd_sched_rq_init(&sched->sched_rq); amd_sched_rq_init(&sched->sched_rq);
amd_sched_rq_init(&sched->kernel_rq); amd_sched_rq_init(&sched->kernel_rq);

View file

@ -68,6 +68,8 @@ struct amd_sched_fence {
struct amd_gpu_scheduler *sched; struct amd_gpu_scheduler *sched;
spinlock_t lock; spinlock_t lock;
void *owner; void *owner;
struct delayed_work dwork;
struct list_head list;
}; };
struct amd_sched_job { struct amd_sched_job {
@ -103,18 +105,21 @@ struct amd_sched_backend_ops {
struct amd_gpu_scheduler { struct amd_gpu_scheduler {
struct amd_sched_backend_ops *ops; struct amd_sched_backend_ops *ops;
uint32_t hw_submission_limit; uint32_t hw_submission_limit;
long timeout;
const char *name; const char *name;
struct amd_sched_rq sched_rq; struct amd_sched_rq sched_rq;
struct amd_sched_rq kernel_rq; struct amd_sched_rq kernel_rq;
wait_queue_head_t wake_up_worker; wait_queue_head_t wake_up_worker;
wait_queue_head_t job_scheduled; wait_queue_head_t job_scheduled;
atomic_t hw_rq_count; atomic_t hw_rq_count;
struct list_head fence_list;
spinlock_t fence_list_lock;
struct task_struct *thread; struct task_struct *thread;
}; };
int amd_sched_init(struct amd_gpu_scheduler *sched, int amd_sched_init(struct amd_gpu_scheduler *sched,
struct amd_sched_backend_ops *ops, struct amd_sched_backend_ops *ops,
uint32_t hw_submission, const char *name); uint32_t hw_submission, long timeout, const char *name);
void amd_sched_fini(struct amd_gpu_scheduler *sched); void amd_sched_fini(struct amd_gpu_scheduler *sched);
int amd_sched_entity_init(struct amd_gpu_scheduler *sched, int amd_sched_entity_init(struct amd_gpu_scheduler *sched,

View file

@ -34,6 +34,8 @@
#define MAX(a,b) (((a)>(b))?(a):(b)) #define MAX(a,b) (((a)>(b))?(a):(b))
#define MIN(a,b) (((a)<(b))?(a):(b)) #define MIN(a,b) (((a)<(b))?(a):(b))
#define REG_SAFE_BM_SIZE ARRAY_SIZE(evergreen_reg_safe_bm)
int r600_dma_cs_next_reloc(struct radeon_cs_parser *p, int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
struct radeon_bo_list **cs_reloc); struct radeon_bo_list **cs_reloc);
struct evergreen_cs_track { struct evergreen_cs_track {
@ -84,6 +86,7 @@ struct evergreen_cs_track {
u32 htile_surface; u32 htile_surface;
struct radeon_bo *htile_bo; struct radeon_bo *htile_bo;
unsigned long indirect_draw_buffer_size; unsigned long indirect_draw_buffer_size;
const unsigned *reg_safe_bm;
}; };
static u32 evergreen_cs_get_aray_mode(u32 tiling_flags) static u32 evergreen_cs_get_aray_mode(u32 tiling_flags)
@ -444,7 +447,7 @@ static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned i
* command stream. * command stream.
*/ */
if (!surf.mode) { if (!surf.mode) {
volatile u32 *ib = p->ib.ptr; uint32_t *ib = p->ib.ptr;
unsigned long tmp, nby, bsize, size, min = 0; unsigned long tmp, nby, bsize, size, min = 0;
/* find the height the ddx wants */ /* find the height the ddx wants */
@ -1083,41 +1086,18 @@ static int evergreen_cs_parse_packet0(struct radeon_cs_parser *p,
} }
/** /**
* evergreen_cs_check_reg() - check if register is authorized or not * evergreen_cs_handle_reg() - process registers that need special handling.
* @parser: parser structure holding parsing context * @parser: parser structure holding parsing context
* @reg: register we are testing * @reg: register we are testing
* @idx: index into the cs buffer * @idx: index into the cs buffer
*
* This function will test against evergreen_reg_safe_bm and return 0
* if register is safe. If register is not flag as safe this function
* will test it against a list of register needind special handling.
*/ */
static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
{ {
struct evergreen_cs_track *track = (struct evergreen_cs_track *)p->track; struct evergreen_cs_track *track = (struct evergreen_cs_track *)p->track;
struct radeon_bo_list *reloc; struct radeon_bo_list *reloc;
u32 last_reg; u32 tmp, *ib;
u32 m, i, tmp, *ib;
int r; int r;
if (p->rdev->family >= CHIP_CAYMAN)
last_reg = ARRAY_SIZE(cayman_reg_safe_bm);
else
last_reg = ARRAY_SIZE(evergreen_reg_safe_bm);
i = (reg >> 7);
if (i >= last_reg) {
dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
return -EINVAL;
}
m = 1 << ((reg >> 2) & 31);
if (p->rdev->family >= CHIP_CAYMAN) {
if (!(cayman_reg_safe_bm[i] & m))
return 0;
} else {
if (!(evergreen_reg_safe_bm[i] & m))
return 0;
}
ib = p->ib.ptr; ib = p->ib.ptr;
switch (reg) { switch (reg) {
/* force following reg to 0 in an attempt to disable out buffer /* force following reg to 0 in an attempt to disable out buffer
@ -1764,29 +1744,27 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
return 0; return 0;
} }
static bool evergreen_is_safe_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) /**
* evergreen_is_safe_reg() - check if register is authorized or not
* @parser: parser structure holding parsing context
* @reg: register we are testing
*
* This function will test against reg_safe_bm and return true
* if register is safe or false otherwise.
*/
static inline bool evergreen_is_safe_reg(struct radeon_cs_parser *p, u32 reg)
{ {
u32 last_reg, m, i; struct evergreen_cs_track *track = p->track;
u32 m, i;
if (p->rdev->family >= CHIP_CAYMAN)
last_reg = ARRAY_SIZE(cayman_reg_safe_bm);
else
last_reg = ARRAY_SIZE(evergreen_reg_safe_bm);
i = (reg >> 7); i = (reg >> 7);
if (i >= last_reg) { if (unlikely(i >= REG_SAFE_BM_SIZE)) {
dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
return false; return false;
} }
m = 1 << ((reg >> 2) & 31); m = 1 << ((reg >> 2) & 31);
if (p->rdev->family >= CHIP_CAYMAN) { if (!(track->reg_safe_bm[i] & m))
if (!(cayman_reg_safe_bm[i] & m)) return true;
return true;
} else {
if (!(evergreen_reg_safe_bm[i] & m))
return true;
}
dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
return false; return false;
} }
@ -1795,7 +1773,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
{ {
struct radeon_bo_list *reloc; struct radeon_bo_list *reloc;
struct evergreen_cs_track *track; struct evergreen_cs_track *track;
volatile u32 *ib; uint32_t *ib;
unsigned idx; unsigned idx;
unsigned i; unsigned i;
unsigned start_reg, end_reg, reg; unsigned start_reg, end_reg, reg;
@ -2321,9 +2299,10 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n"); DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
return -EINVAL; return -EINVAL;
} }
for (i = 0; i < pkt->count; i++) { for (reg = start_reg, idx++; reg <= end_reg; reg += 4, idx++) {
reg = start_reg + (4 * i); if (evergreen_is_safe_reg(p, reg))
r = evergreen_cs_check_reg(p, reg, idx+1+i); continue;
r = evergreen_cs_handle_reg(p, reg, idx);
if (r) if (r)
return r; return r;
} }
@ -2337,9 +2316,10 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n"); DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n");
return -EINVAL; return -EINVAL;
} }
for (i = 0; i < pkt->count; i++) { for (reg = start_reg, idx++; reg <= end_reg; reg += 4, idx++) {
reg = start_reg + (4 * i); if (evergreen_is_safe_reg(p, reg))
r = evergreen_cs_check_reg(p, reg, idx+1+i); continue;
r = evergreen_cs_handle_reg(p, reg, idx);
if (r) if (r)
return r; return r;
} }
@ -2594,8 +2574,11 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
} else { } else {
/* SRC is a reg. */ /* SRC is a reg. */
reg = radeon_get_ib_value(p, idx+1) << 2; reg = radeon_get_ib_value(p, idx+1) << 2;
if (!evergreen_is_safe_reg(p, reg, idx+1)) if (!evergreen_is_safe_reg(p, reg)) {
dev_warn(p->dev, "forbidden register 0x%08x at %d\n",
reg, idx + 1);
return -EINVAL; return -EINVAL;
}
} }
if (idx_value & 0x2) { if (idx_value & 0x2) {
u64 offset; u64 offset;
@ -2618,8 +2601,11 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
} else { } else {
/* DST is a reg. */ /* DST is a reg. */
reg = radeon_get_ib_value(p, idx+3) << 2; reg = radeon_get_ib_value(p, idx+3) << 2;
if (!evergreen_is_safe_reg(p, reg, idx+3)) if (!evergreen_is_safe_reg(p, reg)) {
dev_warn(p->dev, "forbidden register 0x%08x at %d\n",
reg, idx + 3);
return -EINVAL; return -EINVAL;
}
} }
break; break;
case PACKET3_NOP: case PACKET3_NOP:
@ -2644,11 +2630,15 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
if (track == NULL) if (track == NULL)
return -ENOMEM; return -ENOMEM;
evergreen_cs_track_init(track); evergreen_cs_track_init(track);
if (p->rdev->family >= CHIP_CAYMAN) if (p->rdev->family >= CHIP_CAYMAN) {
tmp = p->rdev->config.cayman.tile_config; tmp = p->rdev->config.cayman.tile_config;
else track->reg_safe_bm = cayman_reg_safe_bm;
} else {
tmp = p->rdev->config.evergreen.tile_config; tmp = p->rdev->config.evergreen.tile_config;
track->reg_safe_bm = evergreen_reg_safe_bm;
}
BUILD_BUG_ON(ARRAY_SIZE(cayman_reg_safe_bm) != REG_SAFE_BM_SIZE);
BUILD_BUG_ON(ARRAY_SIZE(evergreen_reg_safe_bm) != REG_SAFE_BM_SIZE);
switch (tmp & 0xf) { switch (tmp & 0xf) {
case 0: case 0:
track->npipes = 1; track->npipes = 1;
@ -2757,7 +2747,7 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
struct radeon_cs_chunk *ib_chunk = p->chunk_ib; struct radeon_cs_chunk *ib_chunk = p->chunk_ib;
struct radeon_bo_list *src_reloc, *dst_reloc, *dst2_reloc; struct radeon_bo_list *src_reloc, *dst_reloc, *dst2_reloc;
u32 header, cmd, count, sub_cmd; u32 header, cmd, count, sub_cmd;
volatile u32 *ib = p->ib.ptr; uint32_t *ib = p->ib.ptr;
u32 idx; u32 idx;
u64 src_offset, dst_offset, dst2_offset; u64 src_offset, dst_offset, dst2_offset;
int r; int r;

View file

@ -25,7 +25,6 @@
#include <linux/acpi.h> #include <linux/acpi.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/power_supply.h> #include <linux/power_supply.h>
#include <linux/vga_switcheroo.h>
#include <acpi/video.h> #include <acpi/video.h>
#include <drm/drmP.h> #include <drm/drmP.h>
#include <drm/drm_crtc_helper.h> #include <drm/drm_crtc_helper.h>

View file

@ -31,7 +31,6 @@
#include <drm/drm_crtc_helper.h> #include <drm/drm_crtc_helper.h>
#include <drm/radeon_drm.h> #include <drm/radeon_drm.h>
#include <linux/vgaarb.h> #include <linux/vgaarb.h>
#include <linux/vga_switcheroo.h>
#include "radeon_reg.h" #include "radeon_reg.h"
#include "radeon.h" #include "radeon.h"
#include "radeon_asic.h" #include "radeon_asic.h"

View file

@ -535,7 +535,7 @@ static bool radeon_atpx_detect(void)
if (has_atpx && vga_count == 2) { if (has_atpx && vga_count == 2) {
acpi_get_name(radeon_atpx_priv.atpx.handle, ACPI_FULL_PATHNAME, &buffer); acpi_get_name(radeon_atpx_priv.atpx.handle, ACPI_FULL_PATHNAME, &buffer);
printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n", printk(KERN_INFO "vga_switcheroo: detected switching method %s handle\n",
acpi_method_name); acpi_method_name);
radeon_atpx_priv.atpx_detected = true; radeon_atpx_priv.atpx_detected = true;
return true; return true;

View file

@ -30,7 +30,6 @@
#include "radeon.h" #include "radeon.h"
#include "atom.h" #include "atom.h"
#include <linux/vga_switcheroo.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/acpi.h> #include <linux/acpi.h>
/* /*

View file

@ -1197,7 +1197,7 @@ static void radeon_check_arguments(struct radeon_device *rdev)
* radeon_switcheroo_set_state - set switcheroo state * radeon_switcheroo_set_state - set switcheroo state
* *
* @pdev: pci dev pointer * @pdev: pci dev pointer
* @state: vga switcheroo state * @state: vga_switcheroo state
* *
* Callback for the switcheroo driver. Suspends or resumes the * Callback for the switcheroo driver. Suspends or resumes the
* the asics before or after it is powered up using ACPI methods. * the asics before or after it is powered up using ACPI methods.

View file

@ -602,7 +602,7 @@ static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file
* *
* @dev: drm dev pointer * @dev: drm dev pointer
* *
* Switch vga switcheroo state after last close (all asics). * Switch vga_switcheroo state after last close (all asics).
*/ */
void radeon_driver_lastclose_kms(struct drm_device *dev) void radeon_driver_lastclose_kms(struct drm_device *dev)
{ {