This is the 4.4.201 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAl3K9lIACgkQONu9yGCS
 aT4PXw/9ExBjUrU6NzDZXAr8h+mR3D9lDzZb3KC3Jn/8bzARG7OHx2i3wsYqQB3p
 a+A5HViZlBJCRl70CkfPmUAgQ0OpLQDnrkW3XPRzpa/x+xE1IBKM1nkmvxofD3Jh
 HRW8eD8YcIR2vqzYhYiSpqKYMfMYcKfSl7XYs6QBGMsRcbDs2O8+KP+S4Z5wm3ZO
 aCJ6v3GVWhOosE4oDklXm4OxhIQ12IQMtP66j4RskF31wd3iXoUzTQkxJxTLWHpK
 D8e+7oFUCVDRB8kdfmsNOL/HCkazqvZ9ZsuU91P6/f91S9vimzaR7xOzk7XZRxSF
 FRDbe3uwWWvscs4E4MU3cqHQXO1PePdGalty2pzMKQxQzLyh4VOF13d2GmlOjac6
 BV7Yim8En5SSsGh3V1VhRbLBodboFp8paLVBQoXBDJ0ErpTCwxxCzfKfK/+QJ0RD
 esdrcl+iAuz4CFJQLBwfB4iFJDG31lD3sc8IWQ9bx4FDQzZxtPf2UPJJCGF6JvCS
 eiGqO5blbhasuvsGxgBVdAdlpXDssGI6LDDZPy5nxGkMtFs/3Ic6OtjS0V3NQEHt
 2zdeYmGkiZ0OSTYUnlXUfhm1NAp8m3HMGvTD4VU8UDx+cnI2p9FF103/6X4m0dui
 0+7cGeWnAlKxORmOV8C49Pc0OXQ8SJzxoiTF4rF7KU+n1loypgY=
 =IvZj
 -----END PGP SIGNATURE-----

Merge 4.4.201 into android-4.4-p

Changes in 4.4.201
	CDC-NCM: handle incomplete transfer of MTU
	net: fix data-race in neigh_event_send()
	NFC: fdp: fix incorrect free object
	NFC: st21nfca: fix double free
	qede: fix NULL pointer deref in __qede_remove()
	nfc: netlink: fix double device reference drop
	ALSA: bebob: fix to detect configured source of sampling clock for Focusrite Saffire Pro i/o series
	ALSA: hda/ca0132 - Fix possible workqueue stall
	mm, vmstat: hide /proc/pagetypeinfo from normal users
	dump_stack: avoid the livelock of the dump_lock
	perf tools: Fix time sorting
	drm/radeon: fix si_enable_smc_cac() failed issue
	ceph: fix use-after-free in __ceph_remove_cap()
	iio: imu: adis16480: make sure provided frequency is positive
	netfilter: nf_tables: Align nft_expr private data to 64-bit
	netfilter: ipset: Fix an error code in ip_set_sockfn_get()
	can: usb_8dev: fix use-after-free on disconnect
	can: c_can: c_can_poll(): only read status register after status IRQ
	can: peak_usb: fix a potential out-of-sync while decoding packets
	can: gs_usb: gs_can_open(): prevent memory leak
	can: peak_usb: fix slab info leak
	drivers: usb: usbip: Add missing break statement to switch
	configfs: fix a deadlock in configfs_symlink()
	PCI: tegra: Enable Relaxed Ordering only for Tegra20 & Tegra30
	scsi: qla2xxx: fixup incorrect usage of host_byte
	scsi: lpfc: Honor module parameter lpfc_use_adisc
	ipvs: move old_secure_tcp into struct netns_ipvs
	bonding: fix unexpected IFF_BONDING bit unset
	usb: fsl: Check memory resource before releasing it
	usb: gadget: udc: atmel: Fix interrupt storm in FIFO mode.
	usb: gadget: composite: Fix possible double free memory bug
	usb: gadget: configfs: fix concurrent issue between composite APIs
	perf/x86/amd/ibs: Fix reading of the IBS OpData register and thus precise RIP validity
	USB: Skip endpoints with 0 maxpacket length
	scsi: qla2xxx: stop timer in shutdown path
	net: hisilicon: Fix "Trying to free already-free IRQ"
	NFSv4: Don't allow a cached open with a revoked delegation
	igb: Fix constant media auto sense switching when no cable is connected
	e1000: fix memory leaks
	can: flexcan: disable completely the ECC mechanism
	mm/filemap.c: don't initiate writeback if mapping has no dirty pages
	cgroup,writeback: don't switch wbs immediately on dead wbs if the memcg is dead
	net: prevent load/store tearing on sk->sk_stamp
	drm/i915/gtt: Add read only pages to gen8_pte_encode
	drm/i915/gtt: Read-only pages for insert_entries on bdw+
	drm/i915/gtt: Disable read-only support under GVT
	drm/i915: Rename gen7 cmdparser tables
	drm/i915: Disable Secure Batches for gen6+
	drm/i915: Remove Master tables from cmdparser
	drm/i915: Add support for mandatory cmdparsing
	drm/i915: Support ro ppgtt mapped cmdparser shadow buffers
	drm/i915: Allow parsing of unsized batches
	drm/i915: Add gen9 BCS cmdparsing
	drm/i915/cmdparser: Add support for backward jumps
	drm/i915/cmdparser: Ignore Length operands during command matching
	drm/i915: Lower RM timeout to avoid DSI hard hangs
	drm/i915/gen8+: Add RC6 CTX corruption WA
	drm/i915/cmdparser: Fix jump whitelist clearing
	Linux 4.4.201

Change-Id: Ifc1fa5b9734f244745b862c6dbf7e34b73245806
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2019-11-14 14:39:48 +08:00
commit ef0b39d33a
62 changed files with 933 additions and 295 deletions

View file

@ -1,6 +1,6 @@
VERSION = 4 VERSION = 4
PATCHLEVEL = 4 PATCHLEVEL = 4
SUBLEVEL = 200 SUBLEVEL = 201
EXTRAVERSION = EXTRAVERSION =
NAME = Blurry Fish Butt NAME = Blurry Fish Butt

View file

@ -555,7 +555,7 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
if (event->attr.sample_type & PERF_SAMPLE_RAW) if (event->attr.sample_type & PERF_SAMPLE_RAW)
offset_max = perf_ibs->offset_max; offset_max = perf_ibs->offset_max;
else if (check_rip) else if (check_rip)
offset_max = 2; offset_max = 3;
else else
offset_max = 1; offset_max = 1;
do { do {

View file

@ -50,13 +50,11 @@
* granting userspace undue privileges. There are three categories of privilege. * granting userspace undue privileges. There are three categories of privilege.
* *
* First, commands which are explicitly defined as privileged or which should * First, commands which are explicitly defined as privileged or which should
* only be used by the kernel driver. The parser generally rejects such * only be used by the kernel driver. The parser rejects such commands
* commands, though it may allow some from the drm master process.
* *
* Second, commands which access registers. To support correct/enhanced * Second, commands which access registers. To support correct/enhanced
* userspace functionality, particularly certain OpenGL extensions, the parser * userspace functionality, particularly certain OpenGL extensions, the parser
* provides a whitelist of registers which userspace may safely access (for both * provides a whitelist of registers which userspace may safely access
* normal and drm master processes).
* *
* Third, commands which access privileged memory (i.e. GGTT, HWS page, etc). * Third, commands which access privileged memory (i.e. GGTT, HWS page, etc).
* The parser always rejects such commands. * The parser always rejects such commands.
@ -81,9 +79,9 @@
* in the per-ring command tables. * in the per-ring command tables.
* *
* Other command table entries map fairly directly to high level categories * Other command table entries map fairly directly to high level categories
* mentioned above: rejected, master-only, register whitelist. The parser * mentioned above: rejected, register whitelist. The parser implements a number
* implements a number of checks, including the privileged memory checks, via a * of checks, including the privileged memory checks, via a general bitmasking
* general bitmasking mechanism. * mechanism.
*/ */
#define STD_MI_OPCODE_MASK 0xFF800000 #define STD_MI_OPCODE_MASK 0xFF800000
@ -94,7 +92,7 @@
#define CMD(op, opm, f, lm, fl, ...) \ #define CMD(op, opm, f, lm, fl, ...) \
{ \ { \
.flags = (fl) | ((f) ? CMD_DESC_FIXED : 0), \ .flags = (fl) | ((f) ? CMD_DESC_FIXED : 0), \
.cmd = { (op), (opm) }, \ .cmd = { (op) & (opm), (opm) }, \
.length = { (lm) }, \ .length = { (lm) }, \
__VA_ARGS__ \ __VA_ARGS__ \
} }
@ -109,14 +107,13 @@
#define R CMD_DESC_REJECT #define R CMD_DESC_REJECT
#define W CMD_DESC_REGISTER #define W CMD_DESC_REGISTER
#define B CMD_DESC_BITMASK #define B CMD_DESC_BITMASK
#define M CMD_DESC_MASTER
/* Command Mask Fixed Len Action /* Command Mask Fixed Len Action
---------------------------------------------------------- */ ---------------------------------------------------------- */
static const struct drm_i915_cmd_descriptor common_cmds[] = { static const struct drm_i915_cmd_descriptor gen7_common_cmds[] = {
CMD( MI_NOOP, SMI, F, 1, S ), CMD( MI_NOOP, SMI, F, 1, S ),
CMD( MI_USER_INTERRUPT, SMI, F, 1, R ), CMD( MI_USER_INTERRUPT, SMI, F, 1, R ),
CMD( MI_WAIT_FOR_EVENT, SMI, F, 1, M ), CMD( MI_WAIT_FOR_EVENT, SMI, F, 1, R ),
CMD( MI_ARB_CHECK, SMI, F, 1, S ), CMD( MI_ARB_CHECK, SMI, F, 1, S ),
CMD( MI_REPORT_HEAD, SMI, F, 1, S ), CMD( MI_REPORT_HEAD, SMI, F, 1, S ),
CMD( MI_SUSPEND_FLUSH, SMI, F, 1, S ), CMD( MI_SUSPEND_FLUSH, SMI, F, 1, S ),
@ -146,7 +143,7 @@ static const struct drm_i915_cmd_descriptor common_cmds[] = {
CMD( MI_BATCH_BUFFER_START, SMI, !F, 0xFF, S ), CMD( MI_BATCH_BUFFER_START, SMI, !F, 0xFF, S ),
}; };
static const struct drm_i915_cmd_descriptor render_cmds[] = { static const struct drm_i915_cmd_descriptor gen7_render_cmds[] = {
CMD( MI_FLUSH, SMI, F, 1, S ), CMD( MI_FLUSH, SMI, F, 1, S ),
CMD( MI_ARB_ON_OFF, SMI, F, 1, R ), CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
CMD( MI_PREDICATE, SMI, F, 1, S ), CMD( MI_PREDICATE, SMI, F, 1, S ),
@ -213,7 +210,7 @@ static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = {
CMD( MI_URB_ATOMIC_ALLOC, SMI, F, 1, S ), CMD( MI_URB_ATOMIC_ALLOC, SMI, F, 1, S ),
CMD( MI_SET_APPID, SMI, F, 1, S ), CMD( MI_SET_APPID, SMI, F, 1, S ),
CMD( MI_RS_CONTEXT, SMI, F, 1, S ), CMD( MI_RS_CONTEXT, SMI, F, 1, S ),
CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ), CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, R ),
CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ), CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ),
CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, R ), CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, R ),
CMD( MI_RS_STORE_DATA_IMM, SMI, !F, 0xFF, S ), CMD( MI_RS_STORE_DATA_IMM, SMI, !F, 0xFF, S ),
@ -229,7 +226,7 @@ static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = {
CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_PS, S3D, !F, 0x1FF, S ), CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_PS, S3D, !F, 0x1FF, S ),
}; };
static const struct drm_i915_cmd_descriptor video_cmds[] = { static const struct drm_i915_cmd_descriptor gen7_video_cmds[] = {
CMD( MI_ARB_ON_OFF, SMI, F, 1, R ), CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
CMD( MI_SET_APPID, SMI, F, 1, S ), CMD( MI_SET_APPID, SMI, F, 1, S ),
CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B, CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B,
@ -273,7 +270,7 @@ static const struct drm_i915_cmd_descriptor video_cmds[] = {
CMD( MFX_WAIT, SMFX, F, 1, S ), CMD( MFX_WAIT, SMFX, F, 1, S ),
}; };
static const struct drm_i915_cmd_descriptor vecs_cmds[] = { static const struct drm_i915_cmd_descriptor gen7_vecs_cmds[] = {
CMD( MI_ARB_ON_OFF, SMI, F, 1, R ), CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
CMD( MI_SET_APPID, SMI, F, 1, S ), CMD( MI_SET_APPID, SMI, F, 1, S ),
CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B, CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B,
@ -311,7 +308,7 @@ static const struct drm_i915_cmd_descriptor vecs_cmds[] = {
}}, ), }}, ),
}; };
static const struct drm_i915_cmd_descriptor blt_cmds[] = { static const struct drm_i915_cmd_descriptor gen7_blt_cmds[] = {
CMD( MI_DISPLAY_FLIP, SMI, !F, 0xFF, R ), CMD( MI_DISPLAY_FLIP, SMI, !F, 0xFF, R ),
CMD( MI_STORE_DWORD_IMM, SMI, !F, 0x3FF, B, CMD( MI_STORE_DWORD_IMM, SMI, !F, 0x3FF, B,
.bits = {{ .bits = {{
@ -345,10 +342,62 @@ static const struct drm_i915_cmd_descriptor blt_cmds[] = {
}; };
static const struct drm_i915_cmd_descriptor hsw_blt_cmds[] = { static const struct drm_i915_cmd_descriptor hsw_blt_cmds[] = {
CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ), CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, R ),
CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ), CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ),
}; };
/*
* For Gen9 we can still rely on the h/w to enforce cmd security, and only
* need to re-enforce the register access checks. We therefore only need to
* teach the cmdparser how to find the end of each command, and identify
* register accesses. The table doesn't need to reject any commands, and so
* the only commands listed here are:
* 1) Those that touch registers
* 2) Those that do not have the default 8-bit length
*
* Note that the default MI length mask chosen for this table is 0xFF, not
* the 0x3F used on older devices. This is because the vast majority of MI
* cmds on Gen9 use a standard 8-bit Length field.
* All the Gen9 blitter instructions are standard 0xFF length mask, and
* none allow access to non-general registers, so in fact no BLT cmds are
* included in the table at all.
*
*/
static const struct drm_i915_cmd_descriptor gen9_blt_cmds[] = {
CMD( MI_NOOP, SMI, F, 1, S ),
CMD( MI_USER_INTERRUPT, SMI, F, 1, S ),
CMD( MI_WAIT_FOR_EVENT, SMI, F, 1, S ),
CMD( MI_FLUSH, SMI, F, 1, S ),
CMD( MI_ARB_CHECK, SMI, F, 1, S ),
CMD( MI_REPORT_HEAD, SMI, F, 1, S ),
CMD( MI_ARB_ON_OFF, SMI, F, 1, S ),
CMD( MI_SUSPEND_FLUSH, SMI, F, 1, S ),
CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, S ),
CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, S ),
CMD( MI_STORE_DWORD_IMM, SMI, !F, 0x3FF, S ),
CMD( MI_LOAD_REGISTER_IMM(1), SMI, !F, 0xFF, W,
.reg = { .offset = 1, .mask = 0x007FFFFC, .step = 2 } ),
CMD( MI_UPDATE_GTT, SMI, !F, 0x3FF, S ),
CMD( MI_STORE_REGISTER_MEM_GEN8, SMI, F, 4, W,
.reg = { .offset = 1, .mask = 0x007FFFFC } ),
CMD( MI_FLUSH_DW, SMI, !F, 0x3F, S ),
CMD( MI_LOAD_REGISTER_MEM_GEN8, SMI, F, 4, W,
.reg = { .offset = 1, .mask = 0x007FFFFC } ),
CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, W,
.reg = { .offset = 1, .mask = 0x007FFFFC, .step = 1 } ),
/*
* We allow BB_START but apply further checks. We just sanitize the
* basic fields here.
*/
CMD( MI_BATCH_BUFFER_START_GEN8, SMI, !F, 0xFF, B,
.bits = {{
.offset = 0,
.mask = ~SMI,
.expected = (MI_BATCH_PPGTT_HSW | 1),
}}, ),
};
#undef CMD #undef CMD
#undef SMI #undef SMI
#undef S3D #undef S3D
@ -359,40 +408,44 @@ static const struct drm_i915_cmd_descriptor hsw_blt_cmds[] = {
#undef R #undef R
#undef W #undef W
#undef B #undef B
#undef M
static const struct drm_i915_cmd_table gen7_render_cmds[] = { static const struct drm_i915_cmd_table gen7_render_cmd_table[] = {
{ common_cmds, ARRAY_SIZE(common_cmds) }, { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
{ render_cmds, ARRAY_SIZE(render_cmds) }, { gen7_render_cmds, ARRAY_SIZE(gen7_render_cmds) },
}; };
static const struct drm_i915_cmd_table hsw_render_ring_cmds[] = { static const struct drm_i915_cmd_table hsw_render_ring_cmd_table[] = {
{ common_cmds, ARRAY_SIZE(common_cmds) }, { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
{ render_cmds, ARRAY_SIZE(render_cmds) }, { gen7_render_cmds, ARRAY_SIZE(gen7_render_cmds) },
{ hsw_render_cmds, ARRAY_SIZE(hsw_render_cmds) }, { hsw_render_cmds, ARRAY_SIZE(hsw_render_cmds) },
}; };
static const struct drm_i915_cmd_table gen7_video_cmds[] = { static const struct drm_i915_cmd_table gen7_video_cmd_table[] = {
{ common_cmds, ARRAY_SIZE(common_cmds) }, { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
{ video_cmds, ARRAY_SIZE(video_cmds) }, { gen7_video_cmds, ARRAY_SIZE(gen7_video_cmds) },
}; };
static const struct drm_i915_cmd_table hsw_vebox_cmds[] = { static const struct drm_i915_cmd_table hsw_vebox_cmd_table[] = {
{ common_cmds, ARRAY_SIZE(common_cmds) }, { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
{ vecs_cmds, ARRAY_SIZE(vecs_cmds) }, { gen7_vecs_cmds, ARRAY_SIZE(gen7_vecs_cmds) },
}; };
static const struct drm_i915_cmd_table gen7_blt_cmds[] = { static const struct drm_i915_cmd_table gen7_blt_cmd_table[] = {
{ common_cmds, ARRAY_SIZE(common_cmds) }, { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
{ blt_cmds, ARRAY_SIZE(blt_cmds) }, { gen7_blt_cmds, ARRAY_SIZE(gen7_blt_cmds) },
}; };
static const struct drm_i915_cmd_table hsw_blt_ring_cmds[] = { static const struct drm_i915_cmd_table hsw_blt_ring_cmd_table[] = {
{ common_cmds, ARRAY_SIZE(common_cmds) }, { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
{ blt_cmds, ARRAY_SIZE(blt_cmds) }, { gen7_blt_cmds, ARRAY_SIZE(gen7_blt_cmds) },
{ hsw_blt_cmds, ARRAY_SIZE(hsw_blt_cmds) }, { hsw_blt_cmds, ARRAY_SIZE(hsw_blt_cmds) },
}; };
static const struct drm_i915_cmd_table gen9_blt_cmd_table[] = {
{ gen9_blt_cmds, ARRAY_SIZE(gen9_blt_cmds) },
};
/* /*
* Register whitelists, sorted by increasing register offset. * Register whitelists, sorted by increasing register offset.
*/ */
@ -426,6 +479,10 @@ struct drm_i915_reg_descriptor {
#define REG64(addr) \ #define REG64(addr) \
REG32(addr), REG32(addr + sizeof(u32)) REG32(addr), REG32(addr + sizeof(u32))
#define REG64_IDX(_reg, idx) \
{ .addr = _reg(idx) }, \
{ .addr = _reg ## _UDW(idx) }
static const struct drm_i915_reg_descriptor gen7_render_regs[] = { static const struct drm_i915_reg_descriptor gen7_render_regs[] = {
REG64(GPGPU_THREADS_DISPATCHED), REG64(GPGPU_THREADS_DISPATCHED),
REG64(HS_INVOCATION_COUNT), REG64(HS_INVOCATION_COUNT),
@ -479,17 +536,27 @@ static const struct drm_i915_reg_descriptor gen7_blt_regs[] = {
REG32(BCS_SWCTRL), REG32(BCS_SWCTRL),
}; };
static const struct drm_i915_reg_descriptor ivb_master_regs[] = { static const struct drm_i915_reg_descriptor gen9_blt_regs[] = {
REG32(FORCEWAKE_MT), REG64_IDX(RING_TIMESTAMP, RENDER_RING_BASE),
REG32(DERRMR), REG64_IDX(RING_TIMESTAMP, BSD_RING_BASE),
REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_A)), REG32(BCS_SWCTRL),
REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_B)), REG64_IDX(RING_TIMESTAMP, BLT_RING_BASE),
REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_C)), REG64_IDX(BCS_GPR, 0),
}; REG64_IDX(BCS_GPR, 1),
REG64_IDX(BCS_GPR, 2),
static const struct drm_i915_reg_descriptor hsw_master_regs[] = { REG64_IDX(BCS_GPR, 3),
REG32(FORCEWAKE_MT), REG64_IDX(BCS_GPR, 4),
REG32(DERRMR), REG64_IDX(BCS_GPR, 5),
REG64_IDX(BCS_GPR, 6),
REG64_IDX(BCS_GPR, 7),
REG64_IDX(BCS_GPR, 8),
REG64_IDX(BCS_GPR, 9),
REG64_IDX(BCS_GPR, 10),
REG64_IDX(BCS_GPR, 11),
REG64_IDX(BCS_GPR, 12),
REG64_IDX(BCS_GPR, 13),
REG64_IDX(BCS_GPR, 14),
REG64_IDX(BCS_GPR, 15),
}; };
#undef REG64 #undef REG64
@ -550,6 +617,17 @@ static u32 gen7_blt_get_cmd_length_mask(u32 cmd_header)
return 0; return 0;
} }
static u32 gen9_blt_get_cmd_length_mask(u32 cmd_header)
{
u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT;
if (client == INSTR_MI_CLIENT || client == INSTR_BC_CLIENT)
return 0xFF;
DRM_DEBUG_DRIVER("CMD: Abnormal blt cmd length! 0x%08X\n", cmd_header);
return 0;
}
static bool validate_cmds_sorted(struct intel_engine_cs *ring, static bool validate_cmds_sorted(struct intel_engine_cs *ring,
const struct drm_i915_cmd_table *cmd_tables, const struct drm_i915_cmd_table *cmd_tables,
int cmd_table_count) int cmd_table_count)
@ -608,9 +686,7 @@ static bool check_sorted(int ring_id,
static bool validate_regs_sorted(struct intel_engine_cs *ring) static bool validate_regs_sorted(struct intel_engine_cs *ring)
{ {
return check_sorted(ring->id, ring->reg_table, ring->reg_count) && return check_sorted(ring->id, ring->reg_table, ring->reg_count);
check_sorted(ring->id, ring->master_reg_table,
ring->master_reg_count);
} }
struct cmd_node { struct cmd_node {
@ -691,63 +767,61 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *ring)
int cmd_table_count; int cmd_table_count;
int ret; int ret;
if (!IS_GEN7(ring->dev)) if (!IS_GEN7(ring->dev) && !(IS_GEN9(ring->dev) && ring->id == BCS))
return 0; return 0;
switch (ring->id) { switch (ring->id) {
case RCS: case RCS:
if (IS_HASWELL(ring->dev)) { if (IS_HASWELL(ring->dev)) {
cmd_tables = hsw_render_ring_cmds; cmd_tables = hsw_render_ring_cmd_table;
cmd_table_count = cmd_table_count =
ARRAY_SIZE(hsw_render_ring_cmds); ARRAY_SIZE(hsw_render_ring_cmd_table);
} else { } else {
cmd_tables = gen7_render_cmds; cmd_tables = gen7_render_cmd_table;
cmd_table_count = ARRAY_SIZE(gen7_render_cmds); cmd_table_count = ARRAY_SIZE(gen7_render_cmd_table);
} }
ring->reg_table = gen7_render_regs; ring->reg_table = gen7_render_regs;
ring->reg_count = ARRAY_SIZE(gen7_render_regs); ring->reg_count = ARRAY_SIZE(gen7_render_regs);
if (IS_HASWELL(ring->dev)) {
ring->master_reg_table = hsw_master_regs;
ring->master_reg_count = ARRAY_SIZE(hsw_master_regs);
} else {
ring->master_reg_table = ivb_master_regs;
ring->master_reg_count = ARRAY_SIZE(ivb_master_regs);
}
ring->get_cmd_length_mask = gen7_render_get_cmd_length_mask; ring->get_cmd_length_mask = gen7_render_get_cmd_length_mask;
break; break;
case VCS: case VCS:
cmd_tables = gen7_video_cmds; cmd_tables = gen7_video_cmd_table;
cmd_table_count = ARRAY_SIZE(gen7_video_cmds); cmd_table_count = ARRAY_SIZE(gen7_video_cmd_table);
ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask; ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
break; break;
case BCS: case BCS:
if (IS_HASWELL(ring->dev)) { ring->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
cmd_tables = hsw_blt_ring_cmds; if (IS_GEN9(ring->dev)) {
cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds); cmd_tables = gen9_blt_cmd_table;
cmd_table_count = ARRAY_SIZE(gen9_blt_cmd_table);
ring->get_cmd_length_mask =
gen9_blt_get_cmd_length_mask;
/* BCS Engine unsafe without parser */
ring->requires_cmd_parser = 1;
}
else if (IS_HASWELL(ring->dev)) {
cmd_tables = hsw_blt_ring_cmd_table;
cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmd_table);
} else { } else {
cmd_tables = gen7_blt_cmds; cmd_tables = gen7_blt_cmd_table;
cmd_table_count = ARRAY_SIZE(gen7_blt_cmds); cmd_table_count = ARRAY_SIZE(gen7_blt_cmd_table);
} }
if (IS_GEN9(ring->dev)) {
ring->reg_table = gen9_blt_regs;
ring->reg_count = ARRAY_SIZE(gen9_blt_regs);
} else {
ring->reg_table = gen7_blt_regs; ring->reg_table = gen7_blt_regs;
ring->reg_count = ARRAY_SIZE(gen7_blt_regs); ring->reg_count = ARRAY_SIZE(gen7_blt_regs);
if (IS_HASWELL(ring->dev)) {
ring->master_reg_table = hsw_master_regs;
ring->master_reg_count = ARRAY_SIZE(hsw_master_regs);
} else {
ring->master_reg_table = ivb_master_regs;
ring->master_reg_count = ARRAY_SIZE(ivb_master_regs);
} }
ring->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
break; break;
case VECS: case VECS:
cmd_tables = hsw_vebox_cmds; cmd_tables = hsw_vebox_cmd_table;
cmd_table_count = ARRAY_SIZE(hsw_vebox_cmds); cmd_table_count = ARRAY_SIZE(hsw_vebox_cmd_table);
/* VECS can use the same length_mask function as VCS */ /* VECS can use the same length_mask function as VCS */
ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask; ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
break; break;
@ -769,7 +843,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *ring)
return ret; return ret;
} }
ring->needs_cmd_parser = true; ring->using_cmd_parser = true;
return 0; return 0;
} }
@ -783,7 +857,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *ring)
*/ */
void i915_cmd_parser_fini_ring(struct intel_engine_cs *ring) void i915_cmd_parser_fini_ring(struct intel_engine_cs *ring)
{ {
if (!ring->needs_cmd_parser) if (!ring->using_cmd_parser)
return; return;
fini_hash_table(ring); fini_hash_table(ring);
@ -949,30 +1023,9 @@ unpin_src:
return ret ? ERR_PTR(ret) : dst; return ret ? ERR_PTR(ret) : dst;
} }
/** static int check_cmd(const struct intel_engine_cs *ring,
* i915_needs_cmd_parser() - should a given ring use software command parsing?
* @ring: the ring in question
*
* Only certain platforms require software batch buffer command parsing, and
* only when enabled via module parameter.
*
* Return: true if the ring requires software command parsing
*/
bool i915_needs_cmd_parser(struct intel_engine_cs *ring)
{
if (!ring->needs_cmd_parser)
return false;
if (!USES_PPGTT(ring->dev))
return false;
return (i915.enable_cmd_parser == 1);
}
static bool check_cmd(const struct intel_engine_cs *ring,
const struct drm_i915_cmd_descriptor *desc, const struct drm_i915_cmd_descriptor *desc,
const u32 *cmd, u32 length, const u32 *cmd, u32 length,
const bool is_master,
bool *oacontrol_set) bool *oacontrol_set)
{ {
if (desc->flags & CMD_DESC_REJECT) { if (desc->flags & CMD_DESC_REJECT) {
@ -980,12 +1033,6 @@ static bool check_cmd(const struct intel_engine_cs *ring,
return false; return false;
} }
if ((desc->flags & CMD_DESC_MASTER) && !is_master) {
DRM_DEBUG_DRIVER("CMD: Rejected master-only command: 0x%08X\n",
*cmd);
return false;
}
if (desc->flags & CMD_DESC_REGISTER) { if (desc->flags & CMD_DESC_REGISTER) {
/* /*
* Get the distance between individual register offset * Get the distance between individual register offset
@ -1002,11 +1049,6 @@ static bool check_cmd(const struct intel_engine_cs *ring,
find_reg(ring->reg_table, ring->reg_count, find_reg(ring->reg_table, ring->reg_count,
reg_addr); reg_addr);
if (!reg && is_master)
reg = find_reg(ring->master_reg_table,
ring->master_reg_count,
reg_addr);
if (!reg) { if (!reg) {
DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (ring=%d)\n", DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (ring=%d)\n",
reg_addr, *cmd, ring->id); reg_addr, *cmd, ring->id);
@ -1091,16 +1133,113 @@ static bool check_cmd(const struct intel_engine_cs *ring,
return true; return true;
} }
static int check_bbstart(struct intel_context *ctx,
u32 *cmd, u64 offset, u32 length,
u32 batch_len,
u64 batch_start,
u64 shadow_batch_start)
{
u64 jump_offset, jump_target;
u32 target_cmd_offset, target_cmd_index;
/* For igt compatibility on older platforms */
if (CMDPARSER_USES_GGTT(ctx->i915)) {
DRM_DEBUG("CMD: Rejecting BB_START for ggtt based submission\n");
return -EACCES;
}
if (length != 3) {
DRM_DEBUG("CMD: Recursive BB_START with bad length(%u)\n",
length);
return -EINVAL;
}
jump_target = *(u64*)(cmd+1);
jump_offset = jump_target - batch_start;
/*
* Any underflow of jump_target is guaranteed to be outside the range
* of a u32, so >= test catches both too large and too small
*/
if (jump_offset >= batch_len) {
DRM_DEBUG("CMD: BB_START to 0x%llx jumps out of BB\n",
jump_target);
return -EINVAL;
}
/*
* This cannot overflow a u32 because we already checked jump_offset
* is within the BB, and the batch_len is a u32
*/
target_cmd_offset = lower_32_bits(jump_offset);
target_cmd_index = target_cmd_offset / sizeof(u32);
*(u64*)(cmd + 1) = shadow_batch_start + target_cmd_offset;
if (target_cmd_index == offset)
return 0;
if (ctx->jump_whitelist_cmds <= target_cmd_index) {
DRM_DEBUG("CMD: Rejecting BB_START - truncated whitelist array\n");
return -EINVAL;
} else if (!test_bit(target_cmd_index, ctx->jump_whitelist)) {
DRM_DEBUG("CMD: BB_START to 0x%llx not a previously executed cmd\n",
jump_target);
return -EINVAL;
}
return 0;
}
static void init_whitelist(struct intel_context *ctx, u32 batch_len)
{
const u32 batch_cmds = DIV_ROUND_UP(batch_len, sizeof(u32));
const u32 exact_size = BITS_TO_LONGS(batch_cmds);
u32 next_size = BITS_TO_LONGS(roundup_pow_of_two(batch_cmds));
unsigned long *next_whitelist;
if (CMDPARSER_USES_GGTT(ctx->i915))
return;
if (batch_cmds <= ctx->jump_whitelist_cmds) {
bitmap_zero(ctx->jump_whitelist, batch_cmds);
return;
}
again:
next_whitelist = kcalloc(next_size, sizeof(long), GFP_KERNEL);
if (next_whitelist) {
kfree(ctx->jump_whitelist);
ctx->jump_whitelist = next_whitelist;
ctx->jump_whitelist_cmds =
next_size * BITS_PER_BYTE * sizeof(long);
return;
}
if (next_size > exact_size) {
next_size = exact_size;
goto again;
}
DRM_DEBUG("CMD: Failed to extend whitelist. BB_START may be disallowed\n");
bitmap_zero(ctx->jump_whitelist, ctx->jump_whitelist_cmds);
return;
}
#define LENGTH_BIAS 2 #define LENGTH_BIAS 2
/** /**
* i915_parse_cmds() - parse a submitted batch buffer for privilege violations * i915_parse_cmds() - parse a submitted batch buffer for privilege violations
* @ctx: the context in which the batch is to execute
* @ring: the ring on which the batch is to execute * @ring: the ring on which the batch is to execute
* @batch_obj: the batch buffer in question * @batch_obj: the batch buffer in question
* @shadow_batch_obj: copy of the batch buffer in question * @user_batch_start: Canonical base address of original user batch
* @batch_start_offset: byte offset in the batch at which execution starts * @batch_start_offset: byte offset in the batch at which execution starts
* @batch_len: length of the commands in batch_obj * @batch_len: length of the commands in batch_obj
* @is_master: is the submitting process the drm master? * @shadow_batch_obj: copy of the batch buffer in question
* @shadow_batch_start: Canonical base address of shadow_batch_obj
* *
* Parses the specified batch buffer looking for privilege violations as * Parses the specified batch buffer looking for privilege violations as
* described in the overview. * described in the overview.
@ -1108,14 +1247,16 @@ static bool check_cmd(const struct intel_engine_cs *ring,
* Return: non-zero if the parser finds violations or otherwise fails; -EACCES * Return: non-zero if the parser finds violations or otherwise fails; -EACCES
* if the batch appears legal but should use hardware parsing * if the batch appears legal but should use hardware parsing
*/ */
int i915_parse_cmds(struct intel_engine_cs *ring, int i915_parse_cmds(struct intel_context *ctx,
struct intel_engine_cs *ring,
struct drm_i915_gem_object *batch_obj, struct drm_i915_gem_object *batch_obj,
struct drm_i915_gem_object *shadow_batch_obj, u64 user_batch_start,
u32 batch_start_offset, u32 batch_start_offset,
u32 batch_len, u32 batch_len,
bool is_master) struct drm_i915_gem_object *shadow_batch_obj,
u64 shadow_batch_start)
{ {
u32 *cmd, *batch_base, *batch_end; u32 *cmd, *batch_base, *batch_end, offset = 0;
struct drm_i915_cmd_descriptor default_desc = { 0 }; struct drm_i915_cmd_descriptor default_desc = { 0 };
bool oacontrol_set = false; /* OACONTROL tracking. See check_cmd() */ bool oacontrol_set = false; /* OACONTROL tracking. See check_cmd() */
int ret = 0; int ret = 0;
@ -1127,6 +1268,8 @@ int i915_parse_cmds(struct intel_engine_cs *ring,
return PTR_ERR(batch_base); return PTR_ERR(batch_base);
} }
init_whitelist(ctx, batch_len);
/* /*
* We use the batch length as size because the shadow object is as * We use the batch length as size because the shadow object is as
* large or larger and copy_batch() will write MI_NOPs to the extra * large or larger and copy_batch() will write MI_NOPs to the extra
@ -1150,16 +1293,6 @@ int i915_parse_cmds(struct intel_engine_cs *ring,
break; break;
} }
/*
* If the batch buffer contains a chained batch, return an
* error that tells the caller to abort and dispatch the
* workload as a non-secure batch.
*/
if (desc->cmd.value == MI_BATCH_BUFFER_START) {
ret = -EACCES;
break;
}
if (desc->flags & CMD_DESC_FIXED) if (desc->flags & CMD_DESC_FIXED)
length = desc->length.fixed; length = desc->length.fixed;
else else
@ -1174,13 +1307,23 @@ int i915_parse_cmds(struct intel_engine_cs *ring,
break; break;
} }
if (!check_cmd(ring, desc, cmd, length, is_master, if (!check_cmd(ring, desc, cmd, length, &oacontrol_set)) {
&oacontrol_set)) { ret = CMDPARSER_USES_GGTT(ring->dev) ? -EINVAL : -EACCES;
ret = -EINVAL;
break; break;
} }
if (desc->cmd.value == MI_BATCH_BUFFER_START) {
ret = check_bbstart(ctx, cmd, offset, length,
batch_len, user_batch_start,
shadow_batch_start);
break;
}
if (ctx->jump_whitelist_cmds > offset)
set_bit(offset, ctx->jump_whitelist);
cmd += length; cmd += length;
offset += length;
} }
if (oacontrol_set) { if (oacontrol_set) {
@ -1206,7 +1349,7 @@ int i915_parse_cmds(struct intel_engine_cs *ring,
* *
* Return: the current version number of the cmd parser * Return: the current version number of the cmd parser
*/ */
int i915_cmd_parser_get_version(void) int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv)
{ {
/* /*
* Command parser version history * Command parser version history
@ -1218,6 +1361,7 @@ int i915_cmd_parser_get_version(void)
* 3. Allow access to the GPGPU_THREADS_DISPATCHED register. * 3. Allow access to the GPGPU_THREADS_DISPATCHED register.
* 4. L3 atomic chicken bits of HSW_SCRATCH1 and HSW_ROW_CHICKEN3. * 4. L3 atomic chicken bits of HSW_SCRATCH1 and HSW_ROW_CHICKEN3.
* 5. GPGPU dispatch compute indirect registers. * 5. GPGPU dispatch compute indirect registers.
* 10. Gen9 only - Supports the new ppgtt based BLIT parser
*/ */
return 5; return CMDPARSER_USES_GGTT(dev_priv) ? 5 : 10;
} }

View file

@ -133,7 +133,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
value = 1; value = 1;
break; break;
case I915_PARAM_HAS_SECURE_BATCHES: case I915_PARAM_HAS_SECURE_BATCHES:
value = capable(CAP_SYS_ADMIN); value = HAS_SECURE_BATCHES(dev_priv) && capable(CAP_SYS_ADMIN);
break; break;
case I915_PARAM_HAS_PINNED_BATCHES: case I915_PARAM_HAS_PINNED_BATCHES:
value = 1; value = 1;
@ -145,7 +145,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
value = 1; value = 1;
break; break;
case I915_PARAM_CMD_PARSER_VERSION: case I915_PARAM_CMD_PARSER_VERSION:
value = i915_cmd_parser_get_version(); value = i915_cmd_parser_get_version(dev_priv);
break; break;
case I915_PARAM_HAS_COHERENT_PHYS_GTT: case I915_PARAM_HAS_COHERENT_PHYS_GTT:
value = 1; value = 1;

View file

@ -698,6 +698,8 @@ static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
return ret; return ret;
} }
i915_rc6_ctx_wa_suspend(dev_priv);
pci_disable_device(drm_dev->pdev); pci_disable_device(drm_dev->pdev);
/* /*
* During hibernation on some platforms the BIOS may try to access * During hibernation on some platforms the BIOS may try to access
@ -849,6 +851,8 @@ static int i915_drm_resume_early(struct drm_device *dev)
intel_uncore_sanitize(dev); intel_uncore_sanitize(dev);
intel_power_domains_init_hw(dev_priv); intel_power_domains_init_hw(dev_priv);
i915_rc6_ctx_wa_resume(dev_priv);
return ret; return ret;
} }

View file

@ -891,6 +891,12 @@ struct intel_context {
int pin_count; int pin_count;
} engine[I915_NUM_RINGS]; } engine[I915_NUM_RINGS];
/* jump_whitelist: Bit array for tracking cmds during cmdparsing */
unsigned long *jump_whitelist;
/* jump_whitelist_cmds: No of cmd slots available */
uint32_t jump_whitelist_cmds;
struct list_head link; struct list_head link;
}; };
@ -1153,6 +1159,7 @@ struct intel_gen6_power_mgmt {
bool client_boost; bool client_boost;
bool enabled; bool enabled;
bool ctx_corrupted;
struct delayed_work delayed_resume_work; struct delayed_work delayed_resume_work;
unsigned boosts; unsigned boosts;
@ -2539,6 +2546,9 @@ struct drm_i915_cmd_table {
#define HAS_BSD2(dev) (INTEL_INFO(dev)->ring_mask & BSD2_RING) #define HAS_BSD2(dev) (INTEL_INFO(dev)->ring_mask & BSD2_RING)
#define HAS_BLT(dev) (INTEL_INFO(dev)->ring_mask & BLT_RING) #define HAS_BLT(dev) (INTEL_INFO(dev)->ring_mask & BLT_RING)
#define HAS_VEBOX(dev) (INTEL_INFO(dev)->ring_mask & VEBOX_RING) #define HAS_VEBOX(dev) (INTEL_INFO(dev)->ring_mask & VEBOX_RING)
#define HAS_SECURE_BATCHES(dev_priv) (INTEL_INFO(dev_priv)->gen < 6)
#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc) #define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
#define HAS_WT(dev) ((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \ #define HAS_WT(dev) ((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \
__I915__(dev)->ellc_size) __I915__(dev)->ellc_size)
@ -2553,8 +2563,18 @@ struct drm_i915_cmd_table {
#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) #define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical) #define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
/*
* The Gen7 cmdparser copies the scanned buffer to the ggtt for execution
* All later gens can run the final buffer from the ppgtt
*/
#define CMDPARSER_USES_GGTT(dev_priv) IS_GEN7(dev_priv)
/* Early gen2 have a totally busted CS tlb and require pinned batches. */ /* Early gen2 have a totally busted CS tlb and require pinned batches. */
#define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev)) #define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev))
#define NEEDS_RC6_CTX_CORRUPTION_WA(dev) \
(IS_BROADWELL(dev) || INTEL_INFO(dev)->gen == 9)
/* /*
* dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
* even when in MSI mode. This results in spurious interrupt warnings if the * even when in MSI mode. This results in spurious interrupt warnings if the
@ -3276,16 +3296,19 @@ void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone);
const char *i915_cache_level_str(struct drm_i915_private *i915, int type); const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
/* i915_cmd_parser.c */ /* i915_cmd_parser.c */
int i915_cmd_parser_get_version(void); int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv);
int i915_cmd_parser_init_ring(struct intel_engine_cs *ring); int i915_cmd_parser_init_ring(struct intel_engine_cs *ring);
void i915_cmd_parser_fini_ring(struct intel_engine_cs *ring); void i915_cmd_parser_fini_ring(struct intel_engine_cs *ring);
bool i915_needs_cmd_parser(struct intel_engine_cs *ring); bool i915_needs_cmd_parser(struct intel_engine_cs *ring);
int i915_parse_cmds(struct intel_engine_cs *ring, int i915_parse_cmds(struct intel_context *cxt,
struct intel_engine_cs *ring,
struct drm_i915_gem_object *batch_obj, struct drm_i915_gem_object *batch_obj,
struct drm_i915_gem_object *shadow_batch_obj, u64 user_batch_start,
u32 batch_start_offset, u32 batch_start_offset,
u32 batch_len, u32 batch_len,
bool is_master); struct drm_i915_gem_object *shadow_batch_obj,
u64 shadow_batch_start);
/* i915_suspend.c */ /* i915_suspend.c */
extern int i915_save_state(struct drm_device *dev); extern int i915_save_state(struct drm_device *dev);

View file

@ -157,6 +157,8 @@ void i915_gem_context_free(struct kref *ctx_ref)
if (i915.enable_execlists) if (i915.enable_execlists)
intel_lr_context_free(ctx); intel_lr_context_free(ctx);
kfree(ctx->jump_whitelist);
/* /*
* This context is going away and we need to remove all VMAs still * This context is going away and we need to remove all VMAs still
* around. This is to handle imported shared objects for which * around. This is to handle imported shared objects for which
@ -246,6 +248,9 @@ __create_hw_context(struct drm_device *dev,
ctx->hang_stats.ban_period_seconds = DRM_I915_CTX_BAN_PERIOD; ctx->hang_stats.ban_period_seconds = DRM_I915_CTX_BAN_PERIOD;
ctx->jump_whitelist = NULL;
ctx->jump_whitelist_cmds = 0;
return ctx; return ctx;
err_out: err_out:

View file

@ -1123,17 +1123,52 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
return 0; return 0;
} }
static struct i915_vma*
shadow_batch_pin(struct drm_i915_gem_object *obj, struct i915_address_space *vm)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct i915_address_space *pin_vm = vm;
u64 flags;
int ret;
/*
* PPGTT backed shadow buffers must be mapped RO, to prevent
* post-scan tampering
*/
if (CMDPARSER_USES_GGTT(dev_priv)) {
flags = PIN_GLOBAL;
pin_vm = &dev_priv->gtt.base;
} else if (vm->has_read_only) {
flags = PIN_USER;
obj->gt_ro = 1;
} else {
DRM_DEBUG("Cannot prevent post-scan tampering without RO capable vm\n");
return ERR_PTR(-EINVAL);
}
ret = i915_gem_object_pin(obj, pin_vm, 0, flags);
if (ret)
return ERR_PTR(ret);
else
return i915_gem_obj_to_vma(obj, pin_vm);
}
static struct drm_i915_gem_object* static struct drm_i915_gem_object*
i915_gem_execbuffer_parse(struct intel_engine_cs *ring, i915_gem_execbuffer_parse(struct intel_context *ctx,
struct intel_engine_cs *ring,
struct drm_i915_gem_exec_object2 *shadow_exec_entry, struct drm_i915_gem_exec_object2 *shadow_exec_entry,
struct eb_vmas *eb, struct eb_vmas *eb,
struct i915_address_space *vm,
struct drm_i915_gem_object *batch_obj, struct drm_i915_gem_object *batch_obj,
u32 batch_start_offset, u32 batch_start_offset,
u32 batch_len, u32 batch_len)
bool is_master)
{ {
struct drm_i915_gem_object *shadow_batch_obj; struct drm_i915_gem_object *shadow_batch_obj;
struct i915_vma *vma; struct i915_vma *vma;
struct i915_vma *user_vma = list_entry(eb->vmas.prev,
typeof(*user_vma), exec_list);
u64 batch_start;
u64 shadow_batch_start;
int ret; int ret;
shadow_batch_obj = i915_gem_batch_pool_get(&ring->batch_pool, shadow_batch_obj = i915_gem_batch_pool_get(&ring->batch_pool,
@ -1141,24 +1176,34 @@ i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
if (IS_ERR(shadow_batch_obj)) if (IS_ERR(shadow_batch_obj))
return shadow_batch_obj; return shadow_batch_obj;
ret = i915_parse_cmds(ring, vma = shadow_batch_pin(shadow_batch_obj, vm);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err;
}
batch_start = user_vma->node.start + batch_start_offset;
shadow_batch_start = vma->node.start;
ret = i915_parse_cmds(ctx,
ring,
batch_obj, batch_obj,
shadow_batch_obj, batch_start,
batch_start_offset, batch_start_offset,
batch_len, batch_len,
is_master); shadow_batch_obj,
if (ret) shadow_batch_start);
goto err; if (ret) {
WARN_ON(vma->pin_count == 0);
ret = i915_gem_obj_ggtt_pin(shadow_batch_obj, 0, 0); vma->pin_count--;
if (ret)
goto err; goto err;
}
i915_gem_object_unpin_pages(shadow_batch_obj); i915_gem_object_unpin_pages(shadow_batch_obj);
memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry)); memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry));
vma = i915_gem_obj_to_ggtt(shadow_batch_obj);
vma->exec_entry = shadow_exec_entry; vma->exec_entry = shadow_exec_entry;
vma->exec_entry->flags = __EXEC_OBJECT_HAS_PIN; vma->exec_entry->flags = __EXEC_OBJECT_HAS_PIN;
drm_gem_object_reference(&shadow_batch_obj->base); drm_gem_object_reference(&shadow_batch_obj->base);
@ -1170,7 +1215,14 @@ i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
err: err:
i915_gem_object_unpin_pages(shadow_batch_obj); i915_gem_object_unpin_pages(shadow_batch_obj);
if (ret == -EACCES) /* unhandled chained batch */
/*
* Unsafe GGTT-backed buffers can still be submitted safely
* as non-secure.
* For PPGTT backing however, we have no choice but to forcibly
* reject unsafe buffers
*/
if (CMDPARSER_USES_GGTT(batch_obj->base.dev) && (ret == -EACCES))
return batch_obj; return batch_obj;
else else
return ERR_PTR(ret); return ERR_PTR(ret);
@ -1322,6 +1374,13 @@ eb_get_batch(struct eb_vmas *eb)
return vma->obj; return vma->obj;
} }
static inline bool use_cmdparser(const struct intel_engine_cs *ring,
u32 batch_len)
{
return ring->requires_cmd_parser ||
(ring->using_cmd_parser && batch_len && USES_PPGTT(ring->dev));
}
static int static int
i915_gem_do_execbuffer(struct drm_device *dev, void *data, i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file, struct drm_file *file,
@ -1351,6 +1410,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
dispatch_flags = 0; dispatch_flags = 0;
if (args->flags & I915_EXEC_SECURE) { if (args->flags & I915_EXEC_SECURE) {
/* Return -EPERM to trigger fallback code on old binaries. */
if (!HAS_SECURE_BATCHES(dev_priv))
return -EPERM;
if (!file->is_master || !capable(CAP_SYS_ADMIN)) if (!file->is_master || !capable(CAP_SYS_ADMIN))
return -EPERM; return -EPERM;
@ -1489,16 +1552,20 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
} }
params->args_batch_start_offset = args->batch_start_offset; params->args_batch_start_offset = args->batch_start_offset;
if (i915_needs_cmd_parser(ring) && args->batch_len) { if (use_cmdparser(ring, args->batch_len)) {
struct drm_i915_gem_object *parsed_batch_obj; struct drm_i915_gem_object *parsed_batch_obj;
parsed_batch_obj = i915_gem_execbuffer_parse(ring, u32 batch_off = args->batch_start_offset;
u32 batch_len = args->batch_len;
if (batch_len == 0)
batch_len = batch_obj->base.size - batch_off;
parsed_batch_obj = i915_gem_execbuffer_parse(ctx, ring,
&shadow_exec_entry, &shadow_exec_entry,
eb, eb, vm,
batch_obj, batch_obj,
args->batch_start_offset, batch_off,
args->batch_len, batch_len);
file->is_master);
if (IS_ERR(parsed_batch_obj)) { if (IS_ERR(parsed_batch_obj)) {
ret = PTR_ERR(parsed_batch_obj); ret = PTR_ERR(parsed_batch_obj);
goto err; goto err;
@ -1508,17 +1575,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
* parsed_batch_obj == batch_obj means batch not fully parsed: * parsed_batch_obj == batch_obj means batch not fully parsed:
* Accept, but don't promote to secure. * Accept, but don't promote to secure.
*/ */
if (parsed_batch_obj != batch_obj) { if (parsed_batch_obj != batch_obj) {
/* if (CMDPARSER_USES_GGTT(dev_priv))
* Batch parsed and accepted:
*
* Set the DISPATCH_SECURE bit to remove the NON_SECURE
* bit from MI_BATCH_BUFFER_START commands issued in
* the dispatch_execbuffer implementations. We
* specifically don't want that set on batches the
* command parser has accepted.
*/
dispatch_flags |= I915_DISPATCH_SECURE; dispatch_flags |= I915_DISPATCH_SECURE;
params->args_batch_start_offset = 0; params->args_batch_start_offset = 0;
batch_obj = parsed_batch_obj; batch_obj = parsed_batch_obj;

View file

@ -119,7 +119,8 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
(enable_ppgtt == 0 || !has_aliasing_ppgtt)) (enable_ppgtt == 0 || !has_aliasing_ppgtt))
return 0; return 0;
if (enable_ppgtt == 1) /* Full PPGTT is required by the Gen9 cmdparser */
if (enable_ppgtt == 1 && INTEL_INFO(dev)->gen != 9)
return 1; return 1;
if (enable_ppgtt == 2 && has_full_ppgtt) if (enable_ppgtt == 2 && has_full_ppgtt)
@ -152,7 +153,8 @@ static int ppgtt_bind_vma(struct i915_vma *vma,
{ {
u32 pte_flags = 0; u32 pte_flags = 0;
/* Currently applicable only to VLV */ /* Applicable to VLV, and gen8+ */
pte_flags = 0;
if (vma->obj->gt_ro) if (vma->obj->gt_ro)
pte_flags |= PTE_READ_ONLY; pte_flags |= PTE_READ_ONLY;
@ -172,11 +174,14 @@ static void ppgtt_unbind_vma(struct i915_vma *vma)
static gen8_pte_t gen8_pte_encode(dma_addr_t addr, static gen8_pte_t gen8_pte_encode(dma_addr_t addr,
enum i915_cache_level level, enum i915_cache_level level,
bool valid) bool valid, u32 flags)
{ {
gen8_pte_t pte = valid ? _PAGE_PRESENT | _PAGE_RW : 0; gen8_pte_t pte = valid ? _PAGE_PRESENT | _PAGE_RW : 0;
pte |= addr; pte |= addr;
if (unlikely(flags & PTE_READ_ONLY))
pte &= ~_PAGE_RW;
switch (level) { switch (level) {
case I915_CACHE_NONE: case I915_CACHE_NONE:
pte |= PPAT_UNCACHED_INDEX; pte |= PPAT_UNCACHED_INDEX;
@ -460,7 +465,7 @@ static void gen8_initialize_pt(struct i915_address_space *vm,
gen8_pte_t scratch_pte; gen8_pte_t scratch_pte;
scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page), scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
I915_CACHE_LLC, true); I915_CACHE_LLC, true, 0);
fill_px(vm->dev, pt, scratch_pte); fill_px(vm->dev, pt, scratch_pte);
} }
@ -757,8 +762,9 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
{ {
struct i915_hw_ppgtt *ppgtt = struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base); container_of(vm, struct i915_hw_ppgtt, base);
gen8_pte_t scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page), gen8_pte_t scratch_pte =
I915_CACHE_LLC, use_scratch); gen8_pte_encode(px_dma(vm->scratch_page),
I915_CACHE_LLC, use_scratch, 0);
if (!USES_FULL_48BIT_PPGTT(vm->dev)) { if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
gen8_ppgtt_clear_pte_range(vm, &ppgtt->pdp, start, length, gen8_ppgtt_clear_pte_range(vm, &ppgtt->pdp, start, length,
@ -779,7 +785,8 @@ gen8_ppgtt_insert_pte_entries(struct i915_address_space *vm,
struct i915_page_directory_pointer *pdp, struct i915_page_directory_pointer *pdp,
struct sg_page_iter *sg_iter, struct sg_page_iter *sg_iter,
uint64_t start, uint64_t start,
enum i915_cache_level cache_level) enum i915_cache_level cache_level,
u32 flags)
{ {
struct i915_hw_ppgtt *ppgtt = struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base); container_of(vm, struct i915_hw_ppgtt, base);
@ -799,7 +806,7 @@ gen8_ppgtt_insert_pte_entries(struct i915_address_space *vm,
pt_vaddr[pte] = pt_vaddr[pte] =
gen8_pte_encode(sg_page_iter_dma_address(sg_iter), gen8_pte_encode(sg_page_iter_dma_address(sg_iter),
cache_level, true); cache_level, true, flags);
if (++pte == GEN8_PTES) { if (++pte == GEN8_PTES) {
kunmap_px(ppgtt, pt_vaddr); kunmap_px(ppgtt, pt_vaddr);
pt_vaddr = NULL; pt_vaddr = NULL;
@ -820,7 +827,7 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
struct sg_table *pages, struct sg_table *pages,
uint64_t start, uint64_t start,
enum i915_cache_level cache_level, enum i915_cache_level cache_level,
u32 unused) u32 flags)
{ {
struct i915_hw_ppgtt *ppgtt = struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base); container_of(vm, struct i915_hw_ppgtt, base);
@ -830,7 +837,7 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
if (!USES_FULL_48BIT_PPGTT(vm->dev)) { if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
gen8_ppgtt_insert_pte_entries(vm, &ppgtt->pdp, &sg_iter, start, gen8_ppgtt_insert_pte_entries(vm, &ppgtt->pdp, &sg_iter, start,
cache_level); cache_level, flags);
} else { } else {
struct i915_page_directory_pointer *pdp; struct i915_page_directory_pointer *pdp;
uint64_t templ4, pml4e; uint64_t templ4, pml4e;
@ -838,7 +845,7 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, templ4, pml4e) { gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, templ4, pml4e) {
gen8_ppgtt_insert_pte_entries(vm, pdp, &sg_iter, gen8_ppgtt_insert_pte_entries(vm, pdp, &sg_iter,
start, cache_level); start, cache_level, flags);
} }
} }
} }
@ -1447,7 +1454,7 @@ static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
uint64_t start = ppgtt->base.start; uint64_t start = ppgtt->base.start;
uint64_t length = ppgtt->base.total; uint64_t length = ppgtt->base.total;
gen8_pte_t scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page), gen8_pte_t scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
I915_CACHE_LLC, true); I915_CACHE_LLC, true, 0);
if (!USES_FULL_48BIT_PPGTT(vm->dev)) { if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
gen8_dump_pdp(&ppgtt->pdp, start, length, scratch_pte, m); gen8_dump_pdp(&ppgtt->pdp, start, length, scratch_pte, m);
@ -1515,6 +1522,14 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
ppgtt->base.clear_range = gen8_ppgtt_clear_range; ppgtt->base.clear_range = gen8_ppgtt_clear_range;
ppgtt->base.unbind_vma = ppgtt_unbind_vma; ppgtt->base.unbind_vma = ppgtt_unbind_vma;
ppgtt->base.bind_vma = ppgtt_bind_vma; ppgtt->base.bind_vma = ppgtt_bind_vma;
/*
* From bdw, there is support for read-only pages in the PPGTT.
*
* XXX GVT is not honouring the lack of RW in the PTE bits.
*/
ppgtt->base.has_read_only = !intel_vgpu_active(ppgtt->base.dev);
ppgtt->debug_dump = gen8_dump_ppgtt; ppgtt->debug_dump = gen8_dump_ppgtt;
if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) { if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
@ -2343,7 +2358,7 @@ static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
static void gen8_ggtt_insert_entries(struct i915_address_space *vm, static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
struct sg_table *st, struct sg_table *st,
uint64_t start, uint64_t start,
enum i915_cache_level level, u32 unused) enum i915_cache_level level, u32 flags)
{ {
struct drm_i915_private *dev_priv = vm->dev->dev_private; struct drm_i915_private *dev_priv = vm->dev->dev_private;
unsigned first_entry = start >> PAGE_SHIFT; unsigned first_entry = start >> PAGE_SHIFT;
@ -2357,7 +2372,7 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
addr = sg_dma_address(sg_iter.sg) + addr = sg_dma_address(sg_iter.sg) +
(sg_iter.sg_pgoffset << PAGE_SHIFT); (sg_iter.sg_pgoffset << PAGE_SHIFT);
gen8_set_pte(&gtt_entries[i], gen8_set_pte(&gtt_entries[i],
gen8_pte_encode(addr, level, true)); gen8_pte_encode(addr, level, true, flags));
i++; i++;
} }
@ -2370,7 +2385,7 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
*/ */
if (i != 0) if (i != 0)
WARN_ON(readq(&gtt_entries[i-1]) WARN_ON(readq(&gtt_entries[i-1])
!= gen8_pte_encode(addr, level, true)); != gen8_pte_encode(addr, level, true, flags));
/* This next bit makes the above posting read even more important. We /* This next bit makes the above posting read even more important. We
* want to flush the TLBs only after we're certain all the PTE updates * want to flush the TLBs only after we're certain all the PTE updates
@ -2444,7 +2459,7 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page), scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
I915_CACHE_LLC, I915_CACHE_LLC,
use_scratch); use_scratch, 0);
for (i = 0; i < num_entries; i++) for (i = 0; i < num_entries; i++)
gen8_set_pte(&gtt_base[i], scratch_pte); gen8_set_pte(&gtt_base[i], scratch_pte);
readl(gtt_base); readl(gtt_base);
@ -2510,7 +2525,8 @@ static int ggtt_bind_vma(struct i915_vma *vma,
if (ret) if (ret)
return ret; return ret;
/* Currently applicable only to VLV */ /* Applicable to VLV (gen8+ do not support RO in the GGTT) */
pte_flags = 0;
if (obj->gt_ro) if (obj->gt_ro)
pte_flags |= PTE_READ_ONLY; pte_flags |= PTE_READ_ONLY;
@ -2653,6 +2669,9 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
i915_address_space_init(ggtt_vm, dev_priv); i915_address_space_init(ggtt_vm, dev_priv);
ggtt_vm->total += PAGE_SIZE; ggtt_vm->total += PAGE_SIZE;
/* Only VLV supports read-only GGTT mappings */
ggtt_vm->has_read_only = IS_VALLEYVIEW(dev_priv);
if (intel_vgpu_active(dev)) { if (intel_vgpu_active(dev)) {
ret = intel_vgt_balloon(dev); ret = intel_vgt_balloon(dev);
if (ret) if (ret)

View file

@ -307,6 +307,9 @@ struct i915_address_space {
*/ */
struct list_head inactive_list; struct list_head inactive_list;
/* Some systems support read-only mappings for GGTT and/or PPGTT */
bool has_read_only:1;
/* FIXME: Need a more generic return type */ /* FIXME: Need a more generic return type */
gen6_pte_t (*pte_encode)(dma_addr_t addr, gen6_pte_t (*pte_encode)(dma_addr_t addr,
enum i915_cache_level level, enum i915_cache_level level,

View file

@ -170,6 +170,8 @@
#define ECOCHK_PPGTT_WT_HSW (0x2<<3) #define ECOCHK_PPGTT_WT_HSW (0x2<<3)
#define ECOCHK_PPGTT_WB_HSW (0x3<<3) #define ECOCHK_PPGTT_WB_HSW (0x3<<3)
#define GEN8_RC6_CTX_INFO 0x8504
#define GAC_ECO_BITS 0x14090 #define GAC_ECO_BITS 0x14090
#define ECOBITS_SNB_BIT (1<<13) #define ECOBITS_SNB_BIT (1<<13)
#define ECOBITS_PPGTT_CACHE64B (3<<8) #define ECOBITS_PPGTT_CACHE64B (3<<8)
@ -511,6 +513,10 @@
*/ */
#define BCS_SWCTRL 0x22200 #define BCS_SWCTRL 0x22200
/* There are 16 GPR registers */
#define BCS_GPR(n) (0x22600 + (n) * 8)
#define BCS_GPR_UDW(n) (0x22600 + (n) * 8 + 4)
#define GPGPU_THREADS_DISPATCHED 0x2290 #define GPGPU_THREADS_DISPATCHED 0x2290
#define HS_INVOCATION_COUNT 0x2300 #define HS_INVOCATION_COUNT 0x2300
#define DS_INVOCATION_COUNT 0x2308 #define DS_INVOCATION_COUNT 0x2308
@ -1567,6 +1573,7 @@ enum skl_disp_power_wells {
#define RING_IMR(base) ((base)+0xa8) #define RING_IMR(base) ((base)+0xa8)
#define RING_HWSTAM(base) ((base)+0x98) #define RING_HWSTAM(base) ((base)+0x98)
#define RING_TIMESTAMP(base) ((base)+0x358) #define RING_TIMESTAMP(base) ((base)+0x358)
#define RING_TIMESTAMP_UDW(base) ((base) + 0x358 + 4)
#define TAIL_ADDR 0x001FFFF8 #define TAIL_ADDR 0x001FFFF8
#define HEAD_WRAP_COUNT 0xFFE00000 #define HEAD_WRAP_COUNT 0xFFE00000
#define HEAD_WRAP_ONE 0x00200000 #define HEAD_WRAP_ONE 0x00200000
@ -5704,6 +5711,10 @@ enum skl_disp_power_wells {
#define GAMMA_MODE_MODE_12BIT (2 << 0) #define GAMMA_MODE_MODE_12BIT (2 << 0)
#define GAMMA_MODE_MODE_SPLIT (3 << 0) #define GAMMA_MODE_MODE_SPLIT (3 << 0)
/* Display Internal Timeout Register */
#define RM_TIMEOUT 0x42060
#define MMIO_TIMEOUT_US(us) ((us) << 0)
/* interrupts */ /* interrupts */
#define DE_MASTER_IRQ_CONTROL (1 << 31) #define DE_MASTER_IRQ_CONTROL (1 << 31)
#define DE_SPRITEB_FLIP_DONE (1 << 29) #define DE_SPRITEB_FLIP_DONE (1 << 29)

View file

@ -10747,6 +10747,10 @@ void intel_mark_busy(struct drm_device *dev)
return; return;
intel_runtime_pm_get(dev_priv); intel_runtime_pm_get(dev_priv);
if (NEEDS_RC6_CTX_CORRUPTION_WA(dev_priv))
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
i915_update_gfx_val(dev_priv); i915_update_gfx_val(dev_priv);
if (INTEL_INFO(dev)->gen >= 6) if (INTEL_INFO(dev)->gen >= 6)
gen6_rps_busy(dev_priv); gen6_rps_busy(dev_priv);
@ -10765,6 +10769,11 @@ void intel_mark_idle(struct drm_device *dev)
if (INTEL_INFO(dev)->gen >= 6) if (INTEL_INFO(dev)->gen >= 6)
gen6_rps_idle(dev->dev_private); gen6_rps_idle(dev->dev_private);
if (NEEDS_RC6_CTX_CORRUPTION_WA(dev_priv)) {
i915_rc6_ctx_wa_check(dev_priv);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
intel_runtime_pm_put(dev_priv); intel_runtime_pm_put(dev_priv);
} }

View file

@ -1410,6 +1410,9 @@ void intel_enable_gt_powersave(struct drm_device *dev);
void intel_disable_gt_powersave(struct drm_device *dev); void intel_disable_gt_powersave(struct drm_device *dev);
void intel_suspend_gt_powersave(struct drm_device *dev); void intel_suspend_gt_powersave(struct drm_device *dev);
void intel_reset_gt_powersave(struct drm_device *dev); void intel_reset_gt_powersave(struct drm_device *dev);
bool i915_rc6_ctx_wa_check(struct drm_i915_private *i915);
void i915_rc6_ctx_wa_suspend(struct drm_i915_private *i915);
void i915_rc6_ctx_wa_resume(struct drm_i915_private *i915);
void gen6_update_ring_freq(struct drm_device *dev); void gen6_update_ring_freq(struct drm_device *dev);
void gen6_rps_busy(struct drm_i915_private *dev_priv); void gen6_rps_busy(struct drm_i915_private *dev_priv);
void gen6_rps_reset_ei(struct drm_i915_private *dev_priv); void gen6_rps_reset_ei(struct drm_i915_private *dev_priv);

View file

@ -66,6 +66,14 @@ static void bxt_init_clock_gating(struct drm_device *dev)
*/ */
I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ); GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ);
/*
* Lower the display internal timeout.
* This is needed to avoid any hard hangs when DSI port PLL
* is off and a MMIO access is attempted by any privilege
* application, using batch buffers or any other means.
*/
I915_WRITE(RM_TIMEOUT, MMIO_TIMEOUT_US(950));
} }
static void i915_pineview_get_mem_freq(struct drm_device *dev) static void i915_pineview_get_mem_freq(struct drm_device *dev)
@ -4591,30 +4599,42 @@ void intel_set_rps(struct drm_device *dev, u8 val)
gen6_set_rps(dev, val); gen6_set_rps(dev, val);
} }
static void gen9_disable_rps(struct drm_device *dev) static void gen9_disable_rc6(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
I915_WRITE(GEN6_RC_CONTROL, 0); I915_WRITE(GEN6_RC_CONTROL, 0);
}
static void gen9_disable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
I915_WRITE(GEN9_PG_ENABLE, 0); I915_WRITE(GEN9_PG_ENABLE, 0);
} }
static void gen6_disable_rc6(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
I915_WRITE(GEN6_RC_CONTROL, 0);
}
static void gen6_disable_rps(struct drm_device *dev) static void gen6_disable_rps(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
I915_WRITE(GEN6_RC_CONTROL, 0);
I915_WRITE(GEN6_RPNSWREQ, 1 << 31); I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
} }
static void cherryview_disable_rps(struct drm_device *dev) static void cherryview_disable_rc6(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
I915_WRITE(GEN6_RC_CONTROL, 0); I915_WRITE(GEN6_RC_CONTROL, 0);
} }
static void valleyview_disable_rps(struct drm_device *dev) static void valleyview_disable_rc6(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
@ -4818,7 +4838,8 @@ static void gen9_enable_rc6(struct drm_device *dev)
I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 25); I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 25);
/* 3a: Enable RC6 */ /* 3a: Enable RC6 */
if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE) if (!dev_priv->rps.ctx_corrupted &&
intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
rc6_mask = GEN6_RC_CTL_RC6_ENABLE; rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
DRM_INFO("RC6 %s\n", (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? DRM_INFO("RC6 %s\n", (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
"on" : "off"); "on" : "off");
@ -4841,7 +4862,7 @@ static void gen9_enable_rc6(struct drm_device *dev)
* WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6. * WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6.
*/ */
if ((IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0)) || if ((IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0)) ||
((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && (INTEL_REVID(dev) <= SKL_REVID_F0))) INTEL_INFO(dev)->gen == 9)
I915_WRITE(GEN9_PG_ENABLE, 0); I915_WRITE(GEN9_PG_ENABLE, 0);
else else
I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
@ -4884,7 +4905,8 @@ static void gen8_enable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */ I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
/* 3: Enable RC6 */ /* 3: Enable RC6 */
if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE) if (!dev_priv->rps.ctx_corrupted &&
intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
rc6_mask = GEN6_RC_CTL_RC6_ENABLE; rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
intel_print_rc6_info(dev, rc6_mask); intel_print_rc6_info(dev, rc6_mask);
if (IS_BROADWELL(dev)) if (IS_BROADWELL(dev))
@ -6128,10 +6150,101 @@ static void intel_init_emon(struct drm_device *dev)
dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK); dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
} }
static bool i915_rc6_ctx_corrupted(struct drm_i915_private *dev_priv)
{
return !I915_READ(GEN8_RC6_CTX_INFO);
}
static void i915_rc6_ctx_wa_init(struct drm_i915_private *i915)
{
if (!NEEDS_RC6_CTX_CORRUPTION_WA(i915))
return;
if (i915_rc6_ctx_corrupted(i915)) {
DRM_INFO("RC6 context corrupted, disabling runtime power management\n");
i915->rps.ctx_corrupted = true;
intel_runtime_pm_get(i915);
}
}
static void i915_rc6_ctx_wa_cleanup(struct drm_i915_private *i915)
{
if (i915->rps.ctx_corrupted) {
intel_runtime_pm_put(i915);
i915->rps.ctx_corrupted = false;
}
}
/**
* i915_rc6_ctx_wa_suspend - system suspend sequence for the RC6 CTX WA
* @i915: i915 device
*
* Perform any steps needed to clean up the RC6 CTX WA before system suspend.
*/
void i915_rc6_ctx_wa_suspend(struct drm_i915_private *i915)
{
if (i915->rps.ctx_corrupted)
intel_runtime_pm_put(i915);
}
/**
* i915_rc6_ctx_wa_resume - system resume sequence for the RC6 CTX WA
* @i915: i915 device
*
* Perform any steps needed to re-init the RC6 CTX WA after system resume.
*/
void i915_rc6_ctx_wa_resume(struct drm_i915_private *i915)
{
if (!i915->rps.ctx_corrupted)
return;
if (i915_rc6_ctx_corrupted(i915)) {
intel_runtime_pm_get(i915);
return;
}
DRM_INFO("RC6 context restored, re-enabling runtime power management\n");
i915->rps.ctx_corrupted = false;
}
static void intel_disable_rc6(struct drm_device *dev);
/**
* i915_rc6_ctx_wa_check - check for a new RC6 CTX corruption
* @i915: i915 device
*
* Check if an RC6 CTX corruption has happened since the last check and if so
* disable RC6 and runtime power management.
*
* Return false if no context corruption has happened since the last call of
* this function, true otherwise.
*/
bool i915_rc6_ctx_wa_check(struct drm_i915_private *i915)
{
if (!NEEDS_RC6_CTX_CORRUPTION_WA(i915))
return false;
if (i915->rps.ctx_corrupted)
return false;
if (!i915_rc6_ctx_corrupted(i915))
return false;
DRM_NOTE("RC6 context corruption, disabling runtime power management\n");
intel_disable_rc6(i915->dev);
i915->rps.ctx_corrupted = true;
intel_runtime_pm_get_noresume(i915);
return true;
}
void intel_init_gt_powersave(struct drm_device *dev) void intel_init_gt_powersave(struct drm_device *dev)
{ {
i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6); i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6);
i915_rc6_ctx_wa_init(to_i915(dev));
if (IS_CHERRYVIEW(dev)) if (IS_CHERRYVIEW(dev))
cherryview_init_gt_powersave(dev); cherryview_init_gt_powersave(dev);
else if (IS_VALLEYVIEW(dev)) else if (IS_VALLEYVIEW(dev))
@ -6144,6 +6257,8 @@ void intel_cleanup_gt_powersave(struct drm_device *dev)
return; return;
else if (IS_VALLEYVIEW(dev)) else if (IS_VALLEYVIEW(dev))
valleyview_cleanup_gt_powersave(dev); valleyview_cleanup_gt_powersave(dev);
i915_rc6_ctx_wa_cleanup(to_i915(dev));
} }
static void gen6_suspend_rps(struct drm_device *dev) static void gen6_suspend_rps(struct drm_device *dev)
@ -6176,6 +6291,38 @@ void intel_suspend_gt_powersave(struct drm_device *dev)
gen6_rps_idle(dev_priv); gen6_rps_idle(dev_priv);
} }
static void __intel_disable_rc6(struct drm_device *dev)
{
if (INTEL_INFO(dev)->gen >= 9)
gen9_disable_rc6(dev);
else if (IS_CHERRYVIEW(dev))
cherryview_disable_rc6(dev);
else if (IS_VALLEYVIEW(dev))
valleyview_disable_rc6(dev);
else
gen6_disable_rc6(dev);
}
static void intel_disable_rc6(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
mutex_lock(&dev_priv->rps.hw_lock);
__intel_disable_rc6(dev);
mutex_unlock(&dev_priv->rps.hw_lock);
}
static void intel_disable_rps(struct drm_device *dev)
{
if (IS_CHERRYVIEW(dev) || IS_VALLEYVIEW(dev))
return;
if (INTEL_INFO(dev)->gen >= 9)
gen9_disable_rps(dev);
else
gen6_disable_rps(dev);
}
void intel_disable_gt_powersave(struct drm_device *dev) void intel_disable_gt_powersave(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
@ -6186,16 +6333,12 @@ void intel_disable_gt_powersave(struct drm_device *dev)
intel_suspend_gt_powersave(dev); intel_suspend_gt_powersave(dev);
mutex_lock(&dev_priv->rps.hw_lock); mutex_lock(&dev_priv->rps.hw_lock);
if (INTEL_INFO(dev)->gen >= 9)
gen9_disable_rps(dev); __intel_disable_rc6(dev);
else if (IS_CHERRYVIEW(dev)) intel_disable_rps(dev);
cherryview_disable_rps(dev);
else if (IS_VALLEYVIEW(dev))
valleyview_disable_rps(dev);
else
gen6_disable_rps(dev);
dev_priv->rps.enabled = false; dev_priv->rps.enabled = false;
mutex_unlock(&dev_priv->rps.hw_lock); mutex_unlock(&dev_priv->rps.hw_lock);
} }
} }

View file

@ -2058,6 +2058,8 @@ static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
static int intel_alloc_ringbuffer_obj(struct drm_device *dev, static int intel_alloc_ringbuffer_obj(struct drm_device *dev,
struct intel_ringbuffer *ringbuf) struct intel_ringbuffer *ringbuf)
{ {
struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_address_space *vm = &dev_priv->gtt.base;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
obj = NULL; obj = NULL;
@ -2068,7 +2070,11 @@ static int intel_alloc_ringbuffer_obj(struct drm_device *dev,
if (obj == NULL) if (obj == NULL)
return -ENOMEM; return -ENOMEM;
/* mark ring buffers as read-only from GPU side by default */ /*
* Mark ring buffers as read-only from GPU side (so no stray overwrites)
* if supported by the platform's GGTT.
*/
if (vm->has_read_only)
obj->gt_ro = 1; obj->gt_ro = 1;
ringbuf->obj = obj; ringbuf->obj = obj;

View file

@ -314,7 +314,8 @@ struct intel_engine_cs {
volatile u32 *cpu_page; volatile u32 *cpu_page;
} scratch; } scratch;
bool needs_cmd_parser; bool using_cmd_parser;
bool requires_cmd_parser;
/* /*
* Table of commands the command parser needs to know about * Table of commands the command parser needs to know about

View file

@ -1956,6 +1956,7 @@ static void si_initialize_powertune_defaults(struct radeon_device *rdev)
case 0x682C: case 0x682C:
si_pi->cac_weights = cac_weights_cape_verde_pro; si_pi->cac_weights = cac_weights_cape_verde_pro;
si_pi->dte_data = dte_data_sun_xt; si_pi->dte_data = dte_data_sun_xt;
update_dte_from_pl2 = true;
break; break;
case 0x6825: case 0x6825:
case 0x6827: case 0x6827:

View file

@ -266,8 +266,11 @@ static int adis16480_set_freq(struct iio_dev *indio_dev, int val, int val2)
struct adis16480 *st = iio_priv(indio_dev); struct adis16480 *st = iio_priv(indio_dev);
unsigned int t; unsigned int t;
if (val < 0 || val2 < 0)
return -EINVAL;
t = val * 1000 + val2 / 1000; t = val * 1000 + val2 / 1000;
if (t <= 0) if (t == 0)
return -EINVAL; return -EINVAL;
t = 2460000 / t; t = 2460000 / t;

View file

@ -1719,6 +1719,7 @@ err_detach:
slave_disable_netpoll(new_slave); slave_disable_netpoll(new_slave);
err_close: err_close:
if (!netif_is_bond_master(slave_dev))
slave_dev->priv_flags &= ~IFF_BONDING; slave_dev->priv_flags &= ~IFF_BONDING;
dev_close(slave_dev); dev_close(slave_dev);
@ -1915,6 +1916,7 @@ static int __bond_release_one(struct net_device *bond_dev,
dev_set_mtu(slave_dev, slave->original_mtu); dev_set_mtu(slave_dev, slave->original_mtu);
if (!netif_is_bond_master(slave_dev))
slave_dev->priv_flags &= ~IFF_BONDING; slave_dev->priv_flags &= ~IFF_BONDING;
bond_free_slave(slave); bond_free_slave(slave);

View file

@ -97,6 +97,9 @@
#define BTR_TSEG2_SHIFT 12 #define BTR_TSEG2_SHIFT 12
#define BTR_TSEG2_MASK (0x7 << BTR_TSEG2_SHIFT) #define BTR_TSEG2_MASK (0x7 << BTR_TSEG2_SHIFT)
/* interrupt register */
#define INT_STS_PENDING 0x8000
/* brp extension register */ /* brp extension register */
#define BRP_EXT_BRPE_MASK 0x0f #define BRP_EXT_BRPE_MASK 0x0f
#define BRP_EXT_BRPE_SHIFT 0 #define BRP_EXT_BRPE_SHIFT 0
@ -1029,10 +1032,16 @@ static int c_can_poll(struct napi_struct *napi, int quota)
u16 curr, last = priv->last_status; u16 curr, last = priv->last_status;
int work_done = 0; int work_done = 0;
/* Only read the status register if a status interrupt was pending */
if (atomic_xchg(&priv->sie_pending, 0)) {
priv->last_status = curr = priv->read_reg(priv, C_CAN_STS_REG); priv->last_status = curr = priv->read_reg(priv, C_CAN_STS_REG);
/* Ack status on C_CAN. D_CAN is self clearing */ /* Ack status on C_CAN. D_CAN is self clearing */
if (priv->type != BOSCH_D_CAN) if (priv->type != BOSCH_D_CAN)
priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED); priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED);
} else {
/* no change detected ... */
curr = last;
}
/* handle state changes */ /* handle state changes */
if ((curr & STATUS_EWARN) && (!(last & STATUS_EWARN))) { if ((curr & STATUS_EWARN) && (!(last & STATUS_EWARN))) {
@ -1083,10 +1092,16 @@ static irqreturn_t c_can_isr(int irq, void *dev_id)
{ {
struct net_device *dev = (struct net_device *)dev_id; struct net_device *dev = (struct net_device *)dev_id;
struct c_can_priv *priv = netdev_priv(dev); struct c_can_priv *priv = netdev_priv(dev);
int reg_int;
if (!priv->read_reg(priv, C_CAN_INT_REG)) reg_int = priv->read_reg(priv, C_CAN_INT_REG);
if (!reg_int)
return IRQ_NONE; return IRQ_NONE;
/* save for later use */
if (reg_int & INT_STS_PENDING)
atomic_set(&priv->sie_pending, 1);
/* disable all interrupts and schedule the NAPI */ /* disable all interrupts and schedule the NAPI */
c_can_irq_control(priv, false); c_can_irq_control(priv, false);
napi_schedule(&priv->napi); napi_schedule(&priv->napi);

View file

@ -198,6 +198,7 @@ struct c_can_priv {
struct net_device *dev; struct net_device *dev;
struct device *device; struct device *device;
atomic_t tx_active; atomic_t tx_active;
atomic_t sie_pending;
unsigned long tx_dir; unsigned long tx_dir;
int last_status; int last_status;
u16 (*read_reg) (const struct c_can_priv *priv, enum reg index); u16 (*read_reg) (const struct c_can_priv *priv, enum reg index);

View file

@ -923,6 +923,7 @@ static int flexcan_chip_start(struct net_device *dev)
reg_mecr = flexcan_read(&regs->mecr); reg_mecr = flexcan_read(&regs->mecr);
reg_mecr &= ~FLEXCAN_MECR_ECRWRDIS; reg_mecr &= ~FLEXCAN_MECR_ECRWRDIS;
flexcan_write(reg_mecr, &regs->mecr); flexcan_write(reg_mecr, &regs->mecr);
reg_mecr |= FLEXCAN_MECR_ECCDIS;
reg_mecr &= ~(FLEXCAN_MECR_NCEFAFRZ | FLEXCAN_MECR_HANCEI_MSK | reg_mecr &= ~(FLEXCAN_MECR_NCEFAFRZ | FLEXCAN_MECR_HANCEI_MSK |
FLEXCAN_MECR_FANCEI_MSK); FLEXCAN_MECR_FANCEI_MSK);
flexcan_write(reg_mecr, &regs->mecr); flexcan_write(reg_mecr, &regs->mecr);

View file

@ -617,6 +617,7 @@ static int gs_can_open(struct net_device *netdev)
rc); rc);
usb_unanchor_urb(urb); usb_unanchor_urb(urb);
usb_free_urb(urb);
break; break;
} }

View file

@ -108,7 +108,7 @@ struct pcan_usb_msg_context {
u8 *end; u8 *end;
u8 rec_cnt; u8 rec_cnt;
u8 rec_idx; u8 rec_idx;
u8 rec_data_idx; u8 rec_ts_idx;
struct net_device *netdev; struct net_device *netdev;
struct pcan_usb *pdev; struct pcan_usb *pdev;
}; };
@ -552,10 +552,15 @@ static int pcan_usb_decode_status(struct pcan_usb_msg_context *mc,
mc->ptr += PCAN_USB_CMD_ARGS; mc->ptr += PCAN_USB_CMD_ARGS;
if (status_len & PCAN_USB_STATUSLEN_TIMESTAMP) { if (status_len & PCAN_USB_STATUSLEN_TIMESTAMP) {
int err = pcan_usb_decode_ts(mc, !mc->rec_idx); int err = pcan_usb_decode_ts(mc, !mc->rec_ts_idx);
if (err) if (err)
return err; return err;
/* Next packet in the buffer will have a timestamp on a single
* byte
*/
mc->rec_ts_idx++;
} }
switch (f) { switch (f) {
@ -638,10 +643,13 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len)
cf->can_dlc = get_can_dlc(rec_len); cf->can_dlc = get_can_dlc(rec_len);
/* first data packet timestamp is a word */ /* Only first packet timestamp is a word */
if (pcan_usb_decode_ts(mc, !mc->rec_data_idx)) if (pcan_usb_decode_ts(mc, !mc->rec_ts_idx))
goto decode_failed; goto decode_failed;
/* Next packet in the buffer will have a timestamp on a single byte */
mc->rec_ts_idx++;
/* read data */ /* read data */
memset(cf->data, 0x0, sizeof(cf->data)); memset(cf->data, 0x0, sizeof(cf->data));
if (status_len & PCAN_USB_STATUSLEN_RTR) { if (status_len & PCAN_USB_STATUSLEN_RTR) {
@ -695,7 +703,6 @@ static int pcan_usb_decode_msg(struct peak_usb_device *dev, u8 *ibuf, u32 lbuf)
/* handle normal can frames here */ /* handle normal can frames here */
} else { } else {
err = pcan_usb_decode_data(&mc, sl); err = pcan_usb_decode_data(&mc, sl);
mc.rec_data_idx++;
} }
} }

View file

@ -776,7 +776,7 @@ static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter,
dev = netdev_priv(netdev); dev = netdev_priv(netdev);
/* allocate a buffer large enough to send commands */ /* allocate a buffer large enough to send commands */
dev->cmd_buf = kmalloc(PCAN_USB_MAX_CMD_LEN, GFP_KERNEL); dev->cmd_buf = kzalloc(PCAN_USB_MAX_CMD_LEN, GFP_KERNEL);
if (!dev->cmd_buf) { if (!dev->cmd_buf) {
err = -ENOMEM; err = -ENOMEM;
goto lbl_free_candev; goto lbl_free_candev;

View file

@ -1010,9 +1010,8 @@ static void usb_8dev_disconnect(struct usb_interface *intf)
netdev_info(priv->netdev, "device disconnected\n"); netdev_info(priv->netdev, "device disconnected\n");
unregister_netdev(priv->netdev); unregister_netdev(priv->netdev);
free_candev(priv->netdev);
unlink_all_urbs(priv); unlink_all_urbs(priv);
free_candev(priv->netdev);
} }
} }

View file

@ -950,7 +950,6 @@ static int hip04_remove(struct platform_device *pdev)
hip04_free_ring(ndev, d); hip04_free_ring(ndev, d);
unregister_netdev(ndev); unregister_netdev(ndev);
free_irq(ndev->irq, ndev);
of_node_put(priv->phy_node); of_node_put(priv->phy_node);
cancel_work_sync(&priv->tx_timeout_task); cancel_work_sync(&priv->tx_timeout_task);
free_netdev(ndev); free_netdev(ndev);

View file

@ -628,6 +628,7 @@ static int e1000_set_ringparam(struct net_device *netdev,
for (i = 0; i < adapter->num_rx_queues; i++) for (i = 0; i < adapter->num_rx_queues; i++)
rxdr[i].count = rxdr->count; rxdr[i].count = rxdr->count;
err = 0;
if (netif_running(adapter->netdev)) { if (netif_running(adapter->netdev)) {
/* Try to get new resources before deleting old */ /* Try to get new resources before deleting old */
err = e1000_setup_all_rx_resources(adapter); err = e1000_setup_all_rx_resources(adapter);
@ -648,14 +649,13 @@ static int e1000_set_ringparam(struct net_device *netdev,
adapter->rx_ring = rxdr; adapter->rx_ring = rxdr;
adapter->tx_ring = txdr; adapter->tx_ring = txdr;
err = e1000_up(adapter); err = e1000_up(adapter);
if (err)
goto err_setup;
} }
kfree(tx_old); kfree(tx_old);
kfree(rx_old); kfree(rx_old);
clear_bit(__E1000_RESETTING, &adapter->flags); clear_bit(__E1000_RESETTING, &adapter->flags);
return 0; return err;
err_setup_tx: err_setup_tx:
e1000_free_all_rx_resources(adapter); e1000_free_all_rx_resources(adapter);
err_setup_rx: err_setup_rx:
@ -667,7 +667,6 @@ err_alloc_rx:
err_alloc_tx: err_alloc_tx:
if (netif_running(adapter->netdev)) if (netif_running(adapter->netdev))
e1000_up(adapter); e1000_up(adapter);
err_setup:
clear_bit(__E1000_RESETTING, &adapter->flags); clear_bit(__E1000_RESETTING, &adapter->flags);
return err; return err;
} }

View file

@ -1673,7 +1673,8 @@ static void igb_check_swap_media(struct igb_adapter *adapter)
if ((hw->phy.media_type == e1000_media_type_copper) && if ((hw->phy.media_type == e1000_media_type_copper) &&
(!(connsw & E1000_CONNSW_AUTOSENSE_EN))) { (!(connsw & E1000_CONNSW_AUTOSENSE_EN))) {
swap_now = true; swap_now = true;
} else if (!(connsw & E1000_CONNSW_SERDESD)) { } else if ((hw->phy.media_type != e1000_media_type_copper) &&
!(connsw & E1000_CONNSW_SERDESD)) {
/* copper signal takes time to appear */ /* copper signal takes time to appear */
if (adapter->copper_tries < 4) { if (adapter->copper_tries < 4) {
adapter->copper_tries++; adapter->copper_tries++;

View file

@ -1465,8 +1465,16 @@ enum qede_remove_mode {
static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode) static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
{ {
struct net_device *ndev = pci_get_drvdata(pdev); struct net_device *ndev = pci_get_drvdata(pdev);
struct qede_dev *edev = netdev_priv(ndev); struct qede_dev *edev;
struct qed_dev *cdev = edev->cdev; struct qed_dev *cdev;
if (!ndev) {
dev_info(&pdev->dev, "Device has already been removed\n");
return;
}
edev = netdev_priv(ndev);
cdev = edev->cdev;
DP_INFO(edev, "Starting qede_remove\n"); DP_INFO(edev, "Starting qede_remove\n");

View file

@ -533,8 +533,8 @@ static void cdc_ncm_set_dgram_size(struct usbnet *dev, int new_size)
/* read current mtu value from device */ /* read current mtu value from device */
err = usbnet_read_cmd(dev, USB_CDC_GET_MAX_DATAGRAM_SIZE, err = usbnet_read_cmd(dev, USB_CDC_GET_MAX_DATAGRAM_SIZE,
USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE, USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE,
0, iface_no, &max_datagram_size, 2); 0, iface_no, &max_datagram_size, sizeof(max_datagram_size));
if (err < 0) { if (err < sizeof(max_datagram_size)) {
dev_dbg(&dev->intf->dev, "GET_MAX_DATAGRAM_SIZE failed\n"); dev_dbg(&dev->intf->dev, "GET_MAX_DATAGRAM_SIZE failed\n");
goto out; goto out;
} }
@ -545,7 +545,7 @@ static void cdc_ncm_set_dgram_size(struct usbnet *dev, int new_size)
max_datagram_size = cpu_to_le16(ctx->max_datagram_size); max_datagram_size = cpu_to_le16(ctx->max_datagram_size);
err = usbnet_write_cmd(dev, USB_CDC_SET_MAX_DATAGRAM_SIZE, err = usbnet_write_cmd(dev, USB_CDC_SET_MAX_DATAGRAM_SIZE,
USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE, USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE,
0, iface_no, &max_datagram_size, 2); 0, iface_no, &max_datagram_size, sizeof(max_datagram_size));
if (err < 0) if (err < 0)
dev_dbg(&dev->intf->dev, "SET_MAX_DATAGRAM_SIZE failed\n"); dev_dbg(&dev->intf->dev, "SET_MAX_DATAGRAM_SIZE failed\n");

View file

@ -278,7 +278,7 @@ static void fdp_nci_i2c_read_device_properties(struct device *dev,
*fw_vsc_cfg, len); *fw_vsc_cfg, len);
if (r) { if (r) {
devm_kfree(dev, fw_vsc_cfg); devm_kfree(dev, *fw_vsc_cfg);
goto vsc_read_err; goto vsc_read_err;
} }
} else { } else {

View file

@ -726,6 +726,7 @@ static int st21nfca_hci_complete_target_discovered(struct nfc_hci_dev *hdev,
NFC_PROTO_FELICA_MASK; NFC_PROTO_FELICA_MASK;
} else { } else {
kfree_skb(nfcid_skb); kfree_skb(nfcid_skb);
nfcid_skb = NULL;
/* P2P in type A */ /* P2P in type A */
r = nfc_hci_get_param(hdev, ST21NFCA_RF_READER_F_GATE, r = nfc_hci_get_param(hdev, ST21NFCA_RF_READER_F_GATE,
ST21NFCA_RF_READER_F_NFCID1, ST21NFCA_RF_READER_F_NFCID1,

View file

@ -586,12 +586,15 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_fixup_class);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_fixup_class); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_fixup_class);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_fixup_class); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_fixup_class);
/* Tegra PCIE requires relaxed ordering */ /* Tegra20 and Tegra30 PCIE requires relaxed ordering */
static void tegra_pcie_relax_enable(struct pci_dev *dev) static void tegra_pcie_relax_enable(struct pci_dev *dev)
{ {
pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN); pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
} }
DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_relax_enable); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_relax_enable);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_relax_enable);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_relax_enable);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_relax_enable);
static int tegra_pcie_setup(int nr, struct pci_sys_data *sys) static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
{ {

View file

@ -759,9 +759,9 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
if (!(vport->fc_flag & FC_PT2PT)) { if (!(vport->fc_flag & FC_PT2PT)) {
/* Check config parameter use-adisc or FCP-2 */ /* Check config parameter use-adisc or FCP-2 */
if ((vport->cfg_use_adisc && (vport->fc_flag & FC_RSCN_MODE)) || if (vport->cfg_use_adisc && ((vport->fc_flag & FC_RSCN_MODE) ||
((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) && ((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) &&
(ndlp->nlp_type & NLP_FCP_TARGET))) { (ndlp->nlp_type & NLP_FCP_TARGET)))) {
spin_lock_irq(shost->host_lock); spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NPR_ADISC; ndlp->nlp_flag |= NLP_NPR_ADISC;
spin_unlock_irq(shost->host_lock); spin_unlock_irq(shost->host_lock);

View file

@ -252,7 +252,7 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
srb_t *sp; srb_t *sp;
const char *type; const char *type;
int req_sg_cnt, rsp_sg_cnt; int req_sg_cnt, rsp_sg_cnt;
int rval = (DRIVER_ERROR << 16); int rval = (DID_ERROR << 16);
uint16_t nextlid = 0; uint16_t nextlid = 0;
if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) { if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
@ -426,7 +426,7 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
struct Scsi_Host *host = bsg_job->shost; struct Scsi_Host *host = bsg_job->shost;
scsi_qla_host_t *vha = shost_priv(host); scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw; struct qla_hw_data *ha = vha->hw;
int rval = (DRIVER_ERROR << 16); int rval = (DID_ERROR << 16);
int req_sg_cnt, rsp_sg_cnt; int req_sg_cnt, rsp_sg_cnt;
uint16_t loop_id; uint16_t loop_id;
struct fc_port *fcport; struct fc_port *fcport;
@ -1910,7 +1910,7 @@ qlafx00_mgmt_cmd(struct fc_bsg_job *bsg_job)
struct Scsi_Host *host = bsg_job->shost; struct Scsi_Host *host = bsg_job->shost;
scsi_qla_host_t *vha = shost_priv(host); scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw; struct qla_hw_data *ha = vha->hw;
int rval = (DRIVER_ERROR << 16); int rval = (DID_ERROR << 16);
struct qla_mt_iocb_rqst_fx00 *piocb_rqst; struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
srb_t *sp; srb_t *sp;
int req_sg_cnt = 0, rsp_sg_cnt = 0; int req_sg_cnt = 0, rsp_sg_cnt = 0;

View file

@ -2962,6 +2962,10 @@ qla2x00_shutdown(struct pci_dev *pdev)
/* Stop currently executing firmware. */ /* Stop currently executing firmware. */
qla2x00_try_to_stop_firmware(vha); qla2x00_try_to_stop_firmware(vha);
/* Disable timer */
if (vha->timer_active)
qla2x00_stop_timer(vha);
/* Turn adapter off line */ /* Turn adapter off line */
vha->flags.online = 0; vha->flags.online = 0;

View file

@ -314,6 +314,11 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
/* Validate the wMaxPacketSize field */ /* Validate the wMaxPacketSize field */
maxp = usb_endpoint_maxp(&endpoint->desc); maxp = usb_endpoint_maxp(&endpoint->desc);
if (maxp == 0) {
dev_warn(ddev, "config %d interface %d altsetting %d endpoint 0x%X has wMaxPacketSize 0, skipping\n",
cfgno, inum, asnum, d->bEndpointAddress);
goto skip_to_next_endpoint_or_interface_descriptor;
}
/* Find the highest legal maxpacket size for this endpoint */ /* Find the highest legal maxpacket size for this endpoint */
i = 0; /* additional transactions per microframe */ i = 0; /* additional transactions per microframe */

View file

@ -2074,14 +2074,18 @@ void composite_dev_cleanup(struct usb_composite_dev *cdev)
usb_ep_dequeue(cdev->gadget->ep0, cdev->os_desc_req); usb_ep_dequeue(cdev->gadget->ep0, cdev->os_desc_req);
kfree(cdev->os_desc_req->buf); kfree(cdev->os_desc_req->buf);
cdev->os_desc_req->buf = NULL;
usb_ep_free_request(cdev->gadget->ep0, cdev->os_desc_req); usb_ep_free_request(cdev->gadget->ep0, cdev->os_desc_req);
cdev->os_desc_req = NULL;
} }
if (cdev->req) { if (cdev->req) {
if (cdev->setup_pending) if (cdev->setup_pending)
usb_ep_dequeue(cdev->gadget->ep0, cdev->req); usb_ep_dequeue(cdev->gadget->ep0, cdev->req);
kfree(cdev->req->buf); kfree(cdev->req->buf);
cdev->req->buf = NULL;
usb_ep_free_request(cdev->gadget->ep0, cdev->req); usb_ep_free_request(cdev->gadget->ep0, cdev->req);
cdev->req = NULL;
} }
cdev->next_string_id = 0; cdev->next_string_id = 0;
device_remove_file(&cdev->gadget->dev, &dev_attr_suspended); device_remove_file(&cdev->gadget->dev, &dev_attr_suspended);

View file

@ -87,6 +87,8 @@ struct gadget_info {
bool use_os_desc; bool use_os_desc;
char b_vendor_code; char b_vendor_code;
char qw_sign[OS_STRING_QW_SIGN_LEN]; char qw_sign[OS_STRING_QW_SIGN_LEN];
spinlock_t spinlock;
bool unbind;
#ifdef CONFIG_USB_CONFIGFS_UEVENT #ifdef CONFIG_USB_CONFIGFS_UEVENT
bool connected; bool connected;
bool sw_connected; bool sw_connected;
@ -1277,6 +1279,7 @@ static int configfs_composite_bind(struct usb_gadget *gadget,
int ret; int ret;
/* the gi->lock is hold by the caller */ /* the gi->lock is hold by the caller */
gi->unbind = 0;
cdev->gadget = gadget; cdev->gadget = gadget;
set_gadget_data(gadget, cdev); set_gadget_data(gadget, cdev);
ret = composite_dev_prepare(composite, cdev); ret = composite_dev_prepare(composite, cdev);
@ -1463,19 +1466,116 @@ static void configfs_composite_unbind(struct usb_gadget *gadget)
{ {
struct usb_composite_dev *cdev; struct usb_composite_dev *cdev;
struct gadget_info *gi; struct gadget_info *gi;
unsigned long flags;
/* the gi->lock is hold by the caller */ /* the gi->lock is hold by the caller */
cdev = get_gadget_data(gadget); cdev = get_gadget_data(gadget);
gi = container_of(cdev, struct gadget_info, cdev); gi = container_of(cdev, struct gadget_info, cdev);
spin_lock_irqsave(&gi->spinlock, flags);
gi->unbind = 1;
spin_unlock_irqrestore(&gi->spinlock, flags);
kfree(otg_desc[0]); kfree(otg_desc[0]);
otg_desc[0] = NULL; otg_desc[0] = NULL;
purge_configs_funcs(gi); purge_configs_funcs(gi);
composite_dev_cleanup(cdev); composite_dev_cleanup(cdev);
usb_ep_autoconfig_reset(cdev->gadget); usb_ep_autoconfig_reset(cdev->gadget);
spin_lock_irqsave(&gi->spinlock, flags);
cdev->gadget = NULL; cdev->gadget = NULL;
set_gadget_data(gadget, NULL); set_gadget_data(gadget, NULL);
spin_unlock_irqrestore(&gi->spinlock, flags);
}
static int configfs_composite_setup(struct usb_gadget *gadget,
const struct usb_ctrlrequest *ctrl)
{
struct usb_composite_dev *cdev;
struct gadget_info *gi;
unsigned long flags;
int ret;
cdev = get_gadget_data(gadget);
if (!cdev)
return 0;
gi = container_of(cdev, struct gadget_info, cdev);
spin_lock_irqsave(&gi->spinlock, flags);
cdev = get_gadget_data(gadget);
if (!cdev || gi->unbind) {
spin_unlock_irqrestore(&gi->spinlock, flags);
return 0;
}
ret = composite_setup(gadget, ctrl);
spin_unlock_irqrestore(&gi->spinlock, flags);
return ret;
}
static void configfs_composite_disconnect(struct usb_gadget *gadget)
{
struct usb_composite_dev *cdev;
struct gadget_info *gi;
unsigned long flags;
cdev = get_gadget_data(gadget);
if (!cdev)
return;
gi = container_of(cdev, struct gadget_info, cdev);
spin_lock_irqsave(&gi->spinlock, flags);
cdev = get_gadget_data(gadget);
if (!cdev || gi->unbind) {
spin_unlock_irqrestore(&gi->spinlock, flags);
return;
}
composite_disconnect(gadget);
spin_unlock_irqrestore(&gi->spinlock, flags);
}
static void configfs_composite_suspend(struct usb_gadget *gadget)
{
struct usb_composite_dev *cdev;
struct gadget_info *gi;
unsigned long flags;
cdev = get_gadget_data(gadget);
if (!cdev)
return;
gi = container_of(cdev, struct gadget_info, cdev);
spin_lock_irqsave(&gi->spinlock, flags);
cdev = get_gadget_data(gadget);
if (!cdev || gi->unbind) {
spin_unlock_irqrestore(&gi->spinlock, flags);
return;
}
composite_suspend(gadget);
spin_unlock_irqrestore(&gi->spinlock, flags);
}
static void configfs_composite_resume(struct usb_gadget *gadget)
{
struct usb_composite_dev *cdev;
struct gadget_info *gi;
unsigned long flags;
cdev = get_gadget_data(gadget);
if (!cdev)
return;
gi = container_of(cdev, struct gadget_info, cdev);
spin_lock_irqsave(&gi->spinlock, flags);
cdev = get_gadget_data(gadget);
if (!cdev || gi->unbind) {
spin_unlock_irqrestore(&gi->spinlock, flags);
return;
}
composite_resume(gadget);
spin_unlock_irqrestore(&gi->spinlock, flags);
} }
#ifdef CONFIG_USB_CONFIGFS_UEVENT #ifdef CONFIG_USB_CONFIGFS_UEVENT
@ -1559,12 +1659,12 @@ static const struct usb_gadget_driver configfs_driver_template = {
.reset = android_disconnect, .reset = android_disconnect,
.disconnect = android_disconnect, .disconnect = android_disconnect,
#else #else
.setup = composite_setup, .setup = configfs_composite_setup,
.reset = composite_disconnect, .reset = configfs_composite_disconnect,
.disconnect = composite_disconnect, .disconnect = configfs_composite_disconnect,
#endif #endif
.suspend = composite_suspend, .suspend = configfs_composite_suspend,
.resume = composite_resume, .resume = configfs_composite_resume,
.max_speed = USB_SPEED_SUPER, .max_speed = USB_SPEED_SUPER,
.driver = { .driver = {

View file

@ -403,8 +403,10 @@ static void submit_request(struct usba_ep *ep, struct usba_request *req)
next_fifo_transaction(ep, req); next_fifo_transaction(ep, req);
if (req->last_transaction) { if (req->last_transaction) {
usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY); usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY);
if (ep_is_control(ep))
usba_ep_writel(ep, CTL_ENB, USBA_TX_COMPLETE); usba_ep_writel(ep, CTL_ENB, USBA_TX_COMPLETE);
} else { } else {
if (ep_is_control(ep))
usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE); usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE);
usba_ep_writel(ep, CTL_ENB, USBA_TX_PK_RDY); usba_ep_writel(ep, CTL_ENB, USBA_TX_PK_RDY);
} }

View file

@ -2570,7 +2570,7 @@ static int fsl_udc_remove(struct platform_device *pdev)
dma_pool_destroy(udc_controller->td_pool); dma_pool_destroy(udc_controller->td_pool);
free_irq(udc_controller->irq, udc_controller); free_irq(udc_controller->irq, udc_controller);
iounmap(dr_regs); iounmap(dr_regs);
if (pdata->operating_mode == FSL_USB2_DR_DEVICE) if (res && (pdata->operating_mode == FSL_USB2_DR_DEVICE))
release_mem_region(res->start, resource_size(res)); release_mem_region(res->start, resource_size(res));
/* free udc --wait for the release() finished */ /* free udc --wait for the release() finished */

View file

@ -303,6 +303,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
default: default:
break; break;
} }
break;
default: default:
usbip_dbg_vhci_rh(" ClearPortFeature: default %x\n", usbip_dbg_vhci_rh(" ClearPortFeature: default %x\n",
wValue); wValue);

View file

@ -926,6 +926,11 @@ void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
dout("__ceph_remove_cap %p from %p\n", cap, &ci->vfs_inode); dout("__ceph_remove_cap %p from %p\n", cap, &ci->vfs_inode);
/* remove from inode's cap rbtree, and clear auth cap */
rb_erase(&cap->ci_node, &ci->i_caps);
if (ci->i_auth_cap == cap)
ci->i_auth_cap = NULL;
/* remove from session list */ /* remove from session list */
spin_lock(&session->s_cap_lock); spin_lock(&session->s_cap_lock);
if (session->s_cap_iterator == cap) { if (session->s_cap_iterator == cap) {
@ -961,11 +966,6 @@ void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
spin_unlock(&session->s_cap_lock); spin_unlock(&session->s_cap_lock);
/* remove from inode list */
rb_erase(&cap->ci_node, &ci->i_caps);
if (ci->i_auth_cap == cap)
ci->i_auth_cap = NULL;
if (removed) if (removed)
ceph_put_cap(mdsc, cap); ceph_put_cap(mdsc, cap);

View file

@ -157,10 +157,41 @@ int configfs_symlink(struct inode *dir, struct dentry *dentry, const char *symna
!type->ct_item_ops->allow_link) !type->ct_item_ops->allow_link)
goto out_put; goto out_put;
/*
* This is really sick. What they wanted was a hybrid of
* link(2) and symlink(2) - they wanted the target resolved
* at syscall time (as link(2) would've done), be a directory
* (which link(2) would've refused to do) *AND* be a deep
* fucking magic, making the target busy from rmdir POV.
* symlink(2) is nothing of that sort, and the locking it
* gets matches the normal symlink(2) semantics. Without
* attempts to resolve the target (which might very well
* not even exist yet) done prior to locking the parent
* directory. This perversion, OTOH, needs to resolve
* the target, which would lead to obvious deadlocks if
* attempted with any directories locked.
*
* Unfortunately, that garbage is userland ABI and we should've
* said "no" back in 2005. Too late now, so we get to
* play very ugly games with locking.
*
* Try *ANYTHING* of that sort in new code, and you will
* really regret it. Just ask yourself - what could a BOFH
* do to me and do I want to find it out first-hand?
*
* AV, a thoroughly annoyed bastard.
*/
inode_unlock(dir);
ret = get_target(symname, &path, &target_item, dentry->d_sb); ret = get_target(symname, &path, &target_item, dentry->d_sb);
inode_lock(dir);
if (ret) if (ret)
goto out_put; goto out_put;
if (dentry->d_inode || d_unhashed(dentry))
ret = -EEXIST;
else
ret = inode_permission(dir, MAY_WRITE | MAY_EXEC);
if (!ret)
ret = type->ct_item_ops->allow_link(parent_item, target_item); ret = type->ct_item_ops->allow_link(parent_item, target_item);
if (!ret) { if (!ret) {
mutex_lock(&configfs_symlink_mutex); mutex_lock(&configfs_symlink_mutex);

View file

@ -582,10 +582,13 @@ void wbc_attach_and_unlock_inode(struct writeback_control *wbc,
spin_unlock(&inode->i_lock); spin_unlock(&inode->i_lock);
/* /*
* A dying wb indicates that the memcg-blkcg mapping has changed * A dying wb indicates that either the blkcg associated with the
* and a new wb is already serving the memcg. Switch immediately. * memcg changed or the associated memcg is dying. In the first
* case, a replacement wb should already be available and we should
* refresh the wb immediately. In the second case, trying to
* refresh will keep failing.
*/ */
if (unlikely(wb_dying(wbc->wb))) if (unlikely(wb_dying(wbc->wb) && !css_is_dying(wbc->wb->memcg_css)))
inode_switch_wbs(inode, wbc->wb_id); inode_switch_wbs(inode, wbc->wb_id);
} }

View file

@ -52,6 +52,16 @@ nfs4_is_valid_delegation(const struct nfs_delegation *delegation,
return false; return false;
} }
struct nfs_delegation *nfs4_get_valid_delegation(const struct inode *inode)
{
struct nfs_delegation *delegation;
delegation = rcu_dereference(NFS_I(inode)->delegation);
if (nfs4_is_valid_delegation(delegation, 0))
return delegation;
return NULL;
}
static int static int
nfs4_do_check_delegation(struct inode *inode, fmode_t flags, bool mark) nfs4_do_check_delegation(struct inode *inode, fmode_t flags, bool mark)
{ {

View file

@ -58,6 +58,7 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state
int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid); int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid);
bool nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode, fmode_t flags); bool nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode, fmode_t flags);
struct nfs_delegation *nfs4_get_valid_delegation(const struct inode *inode);
void nfs_mark_delegation_referenced(struct nfs_delegation *delegation); void nfs_mark_delegation_referenced(struct nfs_delegation *delegation);
int nfs4_have_delegation(struct inode *inode, fmode_t flags); int nfs4_have_delegation(struct inode *inode, fmode_t flags);
int nfs4_check_delegation(struct inode *inode, fmode_t flags); int nfs4_check_delegation(struct inode *inode, fmode_t flags);

View file

@ -1243,8 +1243,6 @@ static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode,
return 0; return 0;
if ((delegation->type & fmode) != fmode) if ((delegation->type & fmode) != fmode)
return 0; return 0;
if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags))
return 0;
switch (claim) { switch (claim) {
case NFS4_OPEN_CLAIM_NULL: case NFS4_OPEN_CLAIM_NULL:
case NFS4_OPEN_CLAIM_FH: case NFS4_OPEN_CLAIM_FH:
@ -1473,7 +1471,6 @@ static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmo
static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata) static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
{ {
struct nfs4_state *state = opendata->state; struct nfs4_state *state = opendata->state;
struct nfs_inode *nfsi = NFS_I(state->inode);
struct nfs_delegation *delegation; struct nfs_delegation *delegation;
int open_mode = opendata->o_arg.open_flags; int open_mode = opendata->o_arg.open_flags;
fmode_t fmode = opendata->o_arg.fmode; fmode_t fmode = opendata->o_arg.fmode;
@ -1490,7 +1487,7 @@ static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
} }
spin_unlock(&state->owner->so_lock); spin_unlock(&state->owner->so_lock);
rcu_read_lock(); rcu_read_lock();
delegation = rcu_dereference(nfsi->delegation); delegation = nfs4_get_valid_delegation(state->inode);
if (!can_open_delegated(delegation, fmode, claim)) { if (!can_open_delegated(delegation, fmode, claim)) {
rcu_read_unlock(); rcu_read_unlock();
break; break;
@ -1981,7 +1978,7 @@ static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
if (can_open_cached(data->state, data->o_arg.fmode, data->o_arg.open_flags)) if (can_open_cached(data->state, data->o_arg.fmode, data->o_arg.open_flags))
goto out_no_action; goto out_no_action;
rcu_read_lock(); rcu_read_lock();
delegation = rcu_dereference(NFS_I(data->state->inode)->delegation); delegation = nfs4_get_valid_delegation(data->state->inode);
if (can_open_delegated(delegation, data->o_arg.fmode, claim)) if (can_open_delegated(delegation, data->o_arg.fmode, claim))
goto unlock_no_action; goto unlock_no_action;
rcu_read_unlock(); rcu_read_unlock();

View file

@ -880,6 +880,7 @@ struct netns_ipvs {
struct delayed_work defense_work; /* Work handler */ struct delayed_work defense_work; /* Work handler */
int drop_rate; int drop_rate;
int drop_counter; int drop_counter;
int old_secure_tcp;
atomic_t dropentry; atomic_t dropentry;
/* locks in ctl.c */ /* locks in ctl.c */
spinlock_t dropentry_lock; /* drop entry handling */ spinlock_t dropentry_lock; /* drop entry handling */

View file

@ -425,8 +425,8 @@ static inline int neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
{ {
unsigned long now = jiffies; unsigned long now = jiffies;
if (neigh->used != now) if (READ_ONCE(neigh->used) != now)
neigh->used = now; WRITE_ONCE(neigh->used, now);
if (!(neigh->nud_state&(NUD_CONNECTED|NUD_DELAY|NUD_PROBE))) if (!(neigh->nud_state&(NUD_CONNECTED|NUD_DELAY|NUD_PROBE)))
return __neigh_event_send(neigh, skb); return __neigh_event_send(neigh, skb);
return 0; return 0;

View file

@ -648,7 +648,8 @@ struct nft_expr_ops {
*/ */
struct nft_expr { struct nft_expr {
const struct nft_expr_ops *ops; const struct nft_expr_ops *ops;
unsigned char data[]; unsigned char data[]
__attribute__((aligned(__alignof__(u64))));
}; };
static inline void *nft_expr_priv(const struct nft_expr *expr) static inline void *nft_expr_priv(const struct nft_expr *expr)

View file

@ -2182,7 +2182,7 @@ static inline ktime_t sock_read_timestamp(struct sock *sk)
return kt; return kt;
#else #else
return sk->sk_stamp; return READ_ONCE(sk->sk_stamp);
#endif #endif
} }
@ -2193,7 +2193,7 @@ static inline void sock_write_timestamp(struct sock *sk, ktime_t kt)
sk->sk_stamp = kt; sk->sk_stamp = kt;
write_sequnlock(&sk->sk_stamp_seq); write_sequnlock(&sk->sk_stamp_seq);
#else #else
sk->sk_stamp = kt; WRITE_ONCE(sk->sk_stamp, kt);
#endif #endif
} }

View file

@ -44,7 +44,12 @@ retry:
was_locked = 1; was_locked = 1;
} else { } else {
local_irq_restore(flags); local_irq_restore(flags);
cpu_relax(); /*
* Wait for the lock to release before jumping to
* atomic_cmpxchg() in order to mitigate the thundering herd
* problem.
*/
do { cpu_relax(); } while (atomic_read(&dump_lock) != -1);
goto retry; goto retry;
} }

View file

@ -340,7 +340,8 @@ int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
.range_end = end, .range_end = end,
}; };
if (!mapping_cap_writeback_dirty(mapping)) if (!mapping_cap_writeback_dirty(mapping) ||
!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
return 0; return 0;
wbc_attach_fdatawrite_inode(&wbc, mapping->host); wbc_attach_fdatawrite_inode(&wbc, mapping->host);

View file

@ -1589,7 +1589,7 @@ static int __init setup_vmstat(void)
#endif #endif
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations); proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops); proc_create("pagetypeinfo", 0400, NULL, &pagetypeinfo_file_ops);
proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations); proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations); proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
#endif #endif

View file

@ -1930,8 +1930,9 @@ ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len)
} }
req_version->version = IPSET_PROTOCOL; req_version->version = IPSET_PROTOCOL;
ret = copy_to_user(user, req_version, if (copy_to_user(user, req_version,
sizeof(struct ip_set_req_version)); sizeof(struct ip_set_req_version)))
ret = -EFAULT;
goto done; goto done;
} }
case IP_SET_OP_GET_BYNAME: { case IP_SET_OP_GET_BYNAME: {
@ -1988,7 +1989,8 @@ ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len)
} /* end of switch(op) */ } /* end of switch(op) */
copy: copy:
ret = copy_to_user(user, data, copylen); if (copy_to_user(user, data, copylen))
ret = -EFAULT;
done: done:
vfree(data); vfree(data);

View file

@ -97,7 +97,6 @@ static bool __ip_vs_addr_is_local_v6(struct net *net,
static void update_defense_level(struct netns_ipvs *ipvs) static void update_defense_level(struct netns_ipvs *ipvs)
{ {
struct sysinfo i; struct sysinfo i;
static int old_secure_tcp = 0;
int availmem; int availmem;
int nomem; int nomem;
int to_change = -1; int to_change = -1;
@ -178,35 +177,35 @@ static void update_defense_level(struct netns_ipvs *ipvs)
spin_lock(&ipvs->securetcp_lock); spin_lock(&ipvs->securetcp_lock);
switch (ipvs->sysctl_secure_tcp) { switch (ipvs->sysctl_secure_tcp) {
case 0: case 0:
if (old_secure_tcp >= 2) if (ipvs->old_secure_tcp >= 2)
to_change = 0; to_change = 0;
break; break;
case 1: case 1:
if (nomem) { if (nomem) {
if (old_secure_tcp < 2) if (ipvs->old_secure_tcp < 2)
to_change = 1; to_change = 1;
ipvs->sysctl_secure_tcp = 2; ipvs->sysctl_secure_tcp = 2;
} else { } else {
if (old_secure_tcp >= 2) if (ipvs->old_secure_tcp >= 2)
to_change = 0; to_change = 0;
} }
break; break;
case 2: case 2:
if (nomem) { if (nomem) {
if (old_secure_tcp < 2) if (ipvs->old_secure_tcp < 2)
to_change = 1; to_change = 1;
} else { } else {
if (old_secure_tcp >= 2) if (ipvs->old_secure_tcp >= 2)
to_change = 0; to_change = 0;
ipvs->sysctl_secure_tcp = 1; ipvs->sysctl_secure_tcp = 1;
} }
break; break;
case 3: case 3:
if (old_secure_tcp < 2) if (ipvs->old_secure_tcp < 2)
to_change = 1; to_change = 1;
break; break;
} }
old_secure_tcp = ipvs->sysctl_secure_tcp; ipvs->old_secure_tcp = ipvs->sysctl_secure_tcp;
if (to_change >= 0) if (to_change >= 0)
ip_vs_protocol_timeout_change(ipvs, ip_vs_protocol_timeout_change(ipvs,
ipvs->sysctl_secure_tcp > 1); ipvs->sysctl_secure_tcp > 1);

View file

@ -1066,7 +1066,6 @@ static int nfc_genl_llc_set_params(struct sk_buff *skb, struct genl_info *info)
local = nfc_llcp_find_local(dev); local = nfc_llcp_find_local(dev);
if (!local) { if (!local) {
nfc_put_device(dev);
rc = -ENODEV; rc = -ENODEV;
goto exit; goto exit;
} }
@ -1126,7 +1125,6 @@ static int nfc_genl_llc_sdreq(struct sk_buff *skb, struct genl_info *info)
local = nfc_llcp_find_local(dev); local = nfc_llcp_find_local(dev);
if (!local) { if (!local) {
nfc_put_device(dev);
rc = -ENODEV; rc = -ENODEV;
goto exit; goto exit;
} }

View file

@ -28,6 +28,8 @@
#define SAFFIRE_CLOCK_SOURCE_SPDIF 1 #define SAFFIRE_CLOCK_SOURCE_SPDIF 1
/* clock sources as returned from register of Saffire Pro 10 and 26 */ /* clock sources as returned from register of Saffire Pro 10 and 26 */
#define SAFFIREPRO_CLOCK_SOURCE_SELECT_MASK 0x000000ff
#define SAFFIREPRO_CLOCK_SOURCE_DETECT_MASK 0x0000ff00
#define SAFFIREPRO_CLOCK_SOURCE_INTERNAL 0 #define SAFFIREPRO_CLOCK_SOURCE_INTERNAL 0
#define SAFFIREPRO_CLOCK_SOURCE_SKIP 1 /* never used on hardware */ #define SAFFIREPRO_CLOCK_SOURCE_SKIP 1 /* never used on hardware */
#define SAFFIREPRO_CLOCK_SOURCE_SPDIF 2 #define SAFFIREPRO_CLOCK_SOURCE_SPDIF 2
@ -190,6 +192,7 @@ saffirepro_both_clk_src_get(struct snd_bebob *bebob, unsigned int *id)
map = saffirepro_clk_maps[1]; map = saffirepro_clk_maps[1];
/* In a case that this driver cannot handle the value of register. */ /* In a case that this driver cannot handle the value of register. */
value &= SAFFIREPRO_CLOCK_SOURCE_SELECT_MASK;
if (value >= SAFFIREPRO_CLOCK_SOURCE_COUNT || map[value] < 0) { if (value >= SAFFIREPRO_CLOCK_SOURCE_COUNT || map[value] < 0) {
err = -EIO; err = -EIO;
goto end; goto end;

View file

@ -4440,7 +4440,7 @@ static void hp_callback(struct hda_codec *codec, struct hda_jack_callback *cb)
/* Delay enabling the HP amp, to let the mic-detection /* Delay enabling the HP amp, to let the mic-detection
* state machine run. * state machine run.
*/ */
cancel_delayed_work_sync(&spec->unsol_hp_work); cancel_delayed_work(&spec->unsol_hp_work);
schedule_delayed_work(&spec->unsol_hp_work, msecs_to_jiffies(500)); schedule_delayed_work(&spec->unsol_hp_work, msecs_to_jiffies(500));
tbl = snd_hda_jack_tbl_get(codec, cb->nid); tbl = snd_hda_jack_tbl_get(codec, cb->nid);
if (tbl) if (tbl)

View file

@ -1080,7 +1080,7 @@ void hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
} }
} }
static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b) static int64_t hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
{ {
struct perf_hpp_fmt *fmt; struct perf_hpp_fmt *fmt;
int64_t cmp = 0; int64_t cmp = 0;